From c8e213d3427c92caa7d0f85bcf7ad9b54b90666f Mon Sep 17 00:00:00 2001 From: Yuanhao Ji Date: Mon, 9 Jun 2025 11:20:06 +0800 Subject: [PATCH 001/192] CANN: Enable labeler for Ascend NPU (#13914) --- .github/labeler.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/labeler.yml b/.github/labeler.yml index 278032ef2e1a4..3c2f67707b024 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -86,3 +86,10 @@ nix: embedding: - changed-files: - any-glob-to-any-file: examples/embedding/ + +Ascend NPU: + - changed-files: + - any-glob-to-any-file: + - ggml/include/ggml-cann.h + - ggml/src/ggml-cann/** + - docs/backend/CANN.md From bbd51ae7f60a67c4b76c5a064476aeead6ce806d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90inh=20Tr=E1=BB=8Dng=20Huy?= <77562200+huydt84@users.noreply.github.com> Date: Mon, 9 Jun 2025 13:15:31 +0900 Subject: [PATCH 002/192] add geglu activation function (#14074) Co-authored-by: dinhhuy --- src/llama-graph.cpp | 22 ++++++++++++++++++++++ src/llama-graph.h | 1 + 2 files changed, 23 insertions(+) diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index c4bdd66039277..55390d42e72ca 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -659,6 +659,28 @@ ggml_tensor * llm_graph_context::build_ffn( cur = ggml_mul(ctx0, x0, x1); cb(cur, "ffn_mul", il); } break; + case LLM_FFN_GEGLU: + { + // Split into two equal parts + int64_t split_point = cur->ne[0] / 2; + ggml_tensor * output_ffn_up = ggml_cont(ctx0, ggml_view_2d( + ctx0, cur, split_point, + cur->ne[1], cur->nb[1], 0 + )); + ggml_tensor * output_ffn_gate = ggml_cont(ctx0, ggml_view_2d( + ctx0, cur, split_point, + cur->ne[1], cur->nb[1], + split_point * ggml_element_size(cur) + )); + + // Apply GELU activation function to the first part + output_ffn_up = ggml_gelu(ctx0, output_ffn_up); + cb(output_ffn_up, "ffn_gelu", il); + + // Element-wise multiplication between the activated part and the gate part + cur = ggml_mul(ctx0, output_ffn_up, output_ffn_gate); + cb(cur, "ffn_geglu", il); + } break; } if (gate && type_gate == LLM_FFN_PAR) { diff --git a/src/llama-graph.h b/src/llama-graph.h index 2b1cfa5b7e2e7..28da6a5228bdc 100644 --- a/src/llama-graph.h +++ b/src/llama-graph.h @@ -36,6 +36,7 @@ enum llm_ffn_op_type { LLM_FFN_RELU, LLM_FFN_RELU_SQR, LLM_FFN_SWIGLU, + LLM_FFN_GEGLU, }; enum llm_ffn_gate_type { From 84aec15b9fa79824f057ae6975b961f385ba81a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=B2=20Scipione?= Date: Mon, 9 Jun 2025 11:47:07 +0200 Subject: [PATCH 003/192] sycl: Add reorder to Q6_K mmvq implementation (#13885) * Add Reorder to Q6_K mmvq implementation * Address PR comments: clean up comments * Remove unused parameter after refactoring q4_k * Adding inline to function and removing unnecessary reference to int --------- Signed-off-by: nscipione --- ggml/src/ggml-sycl/convert.cpp | 23 ++++++++- ggml/src/ggml-sycl/dequantize.hpp | 32 ++++++++++++ ggml/src/ggml-sycl/ggml-sycl.cpp | 52 ++++++++++++++++++- ggml/src/ggml-sycl/mmvq.cpp | 36 +++++++++++--- ggml/src/ggml-sycl/quants.hpp | 48 ++++++++++++++---- ggml/src/ggml-sycl/vecdotq.hpp | 83 +++++++++++++++++++++++++++---- 6 files changed, 244 insertions(+), 30 deletions(-) diff --git a/ggml/src/ggml-sycl/convert.cpp b/ggml/src/ggml-sycl/convert.cpp index 75bac98e5fb64..96d2583b13b83 100644 --- a/ggml/src/ggml-sycl/convert.cpp +++ b/ggml/src/ggml-sycl/convert.cpp @@ -265,6 +265,17 @@ static void dequantize_row_q6_K_sycl(const void *vx, dst_t *y, const int64_t k, #endif } +template +static void dequantize_row_q6_K_sycl_reorder(const void * vx, dst_t * y, const int64_t k, dpct::queue_ptr stream) { + const int64_t nb = k / QK_K; + + dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); + + stream->parallel_for( + sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 64), sycl::range<3>(1, 1, 64)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block_q6_K_reorder(vx, y, item_ct1, nb); }); +} + template static void dequantize_row_iq1_s_sycl(const void *vx, dst_t *y, const int64_t k, dpct::queue_ptr stream) { @@ -530,7 +541,11 @@ to_fp16_sycl_t ggml_get_to_fp16_sycl(ggml_type type, ggml_tensor * dst) { case GGML_TYPE_Q5_K: return dequantize_row_q5_K_sycl; case GGML_TYPE_Q6_K: - return dequantize_row_q6_K_sycl; + if (dst->src[0]->extra && ((ggml_tensor_extra_gpu *) dst->src[0]->extra)->optimized_feature.reorder) { + return dequantize_row_q6_K_sycl_reorder; + } else { + return dequantize_row_q6_K_sycl; + } case GGML_TYPE_IQ1_S: return dequantize_row_iq1_s_sycl; case GGML_TYPE_IQ1_M: @@ -587,7 +602,11 @@ to_fp32_sycl_t ggml_get_to_fp32_sycl(ggml_type type, ggml_tensor *dst) { case GGML_TYPE_Q5_K: return dequantize_row_q5_K_sycl; case GGML_TYPE_Q6_K: - return dequantize_row_q6_K_sycl; + if (dst->src[0]->extra && ((ggml_tensor_extra_gpu *) dst->src[0]->extra)->optimized_feature.reorder) { + return dequantize_row_q6_K_sycl_reorder; + } else { + return dequantize_row_q6_K_sycl; + } case GGML_TYPE_IQ1_S: return dequantize_row_iq1_s_sycl; case GGML_TYPE_IQ1_M: diff --git a/ggml/src/ggml-sycl/dequantize.hpp b/ggml/src/ggml-sycl/dequantize.hpp index 64e92f73f26c8..540539bb22381 100644 --- a/ggml/src/ggml-sycl/dequantize.hpp +++ b/ggml/src/ggml-sycl/dequantize.hpp @@ -538,6 +538,38 @@ static void dequantize_block_q6_K(const void * __restrict__ vx, dst_t * __restri #endif } +template +static void dequantize_block_q6_K_reorder(const void * __restrict__ vx, dst_t * __restrict__ yy, + const sycl::nd_item<3> & item_ct1, int64_t n_blocks) { + const int64_t ib = item_ct1.get_group(2); + + const int64_t tid = item_ct1.get_local_id(2); + const int64_t ip = tid / 32; // ip is 0 or 1 + const int64_t il = tid - 32 * ip; // 0...32 + const int64_t is = 8 * ip + il / 16; + + const uint8_t * base_ptr = static_cast(vx); + const auto ql_offset = ib * (QK_K / 2); + const auto qh_offset = (QK_K / 2) * n_blocks + (QK_K / 4) * ib; + const auto base_scales_offset = (QK_K / 2) * n_blocks + (QK_K / 4) * n_blocks + (QK_K / 16) * ib; + const auto base_d_offset = ((QK_K / 2) + (QK_K / 4) + (QK_K / 16)) * n_blocks; + const uint8_t * ql_ptr = base_ptr + ql_offset; + const uint8_t * qh_ptr = base_ptr + qh_offset; + const uint8_t * scales_ptr = base_ptr + base_scales_offset; + const ggml_half * d = (const ggml_half *) (base_ptr + base_d_offset) + ib; + + dst_t * y = yy + ib * QK_K + 128 * ip + il; + + const uint8_t * ql = ql_ptr + 64 * ip + il; + const uint8_t qh = *(qh_ptr + 32 * ip + il); + const int8_t * sc = reinterpret_cast(scales_ptr + is); + + y[0] = *d * sc[0] * ((int8_t) ((ql[0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32); + y[32] = *d * sc[2] * ((int8_t) ((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32); + y[64] = *d * sc[4] * ((int8_t) ((ql[0] >> 4) | (((qh >> 4) & 3) << 4)) - 32); + y[96] = *d * sc[6] * ((int8_t) ((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32); +} + template static void dequantize_block_iq2_xxs(const void * __restrict__ vx, dst_t * __restrict__ yy, const sycl::nd_item<3> &item_ct1, diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 3936f1eaf5ef6..3693b0a4337a5 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -354,7 +354,8 @@ ggml_backend_sycl_buffer_init_tensor(ggml_backend_buffer_t buffer, assert(tensor->view_src->buffer->buft == buffer->buft); return GGML_STATUS_SUCCESS; } - if ((tensor->type == GGML_TYPE_Q4_0 || tensor->type == GGML_TYPE_Q4_K) && !g_ggml_sycl_disable_optimize) { + if ((tensor->type == GGML_TYPE_Q4_0 || tensor->type == GGML_TYPE_Q4_K || tensor->type == GGML_TYPE_Q6_K) && + !g_ggml_sycl_disable_optimize) { ggml_tensor_extra_gpu * extra = new ggml_tensor_extra_gpu{}; tensor->extra = extra; ctx->tensor_extras.push_back(extra); //used to release it when destroy ctx. @@ -2989,6 +2990,7 @@ inline bool ggml_sycl_supports_reorder_mul_mat_sycl(enum ggml_type type) { case GGML_TYPE_Q4_0: return true; case GGML_TYPE_Q4_K: + case GGML_TYPE_Q6_K: return !g_ggml_sycl_prioritize_dmmv; default: return false; @@ -3008,6 +3010,7 @@ inline bool ggml_sycl_supports_reorder_mmvq(enum ggml_type type) { switch (type) { case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_K: + case GGML_TYPE_Q6_K: return true; default: return false; @@ -3092,6 +3095,50 @@ static void reorder_qw_q4_k(uint8_t * data_device, size_t size, size_t offset, d sycl::free(tmp_buf, *stream); } +static void reorder_qw_q6_k(uint8_t * data_device, size_t size, size_t offset, dpct::queue_ptr stream) { + GGML_ASSERT(size % sizeof(block_q6_K) == 0); + GGML_ASSERT(offset % sizeof(block_q6_K) == 0); + + const int nblocks = size / sizeof(block_q6_K); + + auto * tmp_buf = sycl::malloc_shared(size, *stream); + SYCL_CHECK(CHECK_TRY_ERROR((*stream).memcpy(tmp_buf, data_device, size).wait())); + + auto * ql_ptr = data_device; + auto * qh_ptr = ql_ptr + (QK_K / 2) * nblocks; + auto * scales_ptr = qh_ptr + (QK_K / 4) * nblocks; + sycl::half * dm_ptr = (sycl::half *) (scales_ptr + (QK_K / 16) * nblocks); + + stream + ->parallel_for(nblocks, + [=](auto i) { + const block_q6_K * x = (const block_q6_K *) tmp_buf; + const int ib = i; + + const uint8_t * ql = x[ib].ql; + const uint8_t * qh = x[ib].qh; + uint8_t * base_ql_ptr = ql_ptr + (QK_K / 2) * ib; + uint8_t * base_qh_ptr = qh_ptr + (QK_K / 4) * ib; + uint8_t * base_scales_ptr = scales_ptr + (QK_K / 16) * ib; + + for (int j = 0; j < QK_K / 2; ++j) { + base_ql_ptr[j] = ql[j]; + } + for (int j = 0; j < QK_K / 4; ++j) { + base_qh_ptr[j] = qh[j]; + } + + for (int j = 0; j < QK_K / 16; ++j) { + base_scales_ptr[j] = x[ib].scales[j]; + } + + dm_ptr[ib] = x[ib].d; + }) + .wait_and_throw(); + + sycl::free(tmp_buf, *stream); +} + static void reorder_qw(const ggml_tensor * src0, dpct::queue_ptr stream) { uint8_t * data_device = (uint8_t *) src0->data; size_t ncols = src0->ne[0]; @@ -3105,6 +3152,9 @@ static void reorder_qw(const ggml_tensor * src0, dpct::queue_ptr stream) { case GGML_TYPE_Q4_K: reorder_qw_q4_k(data_device, size, 0, stream); break; + case GGML_TYPE_Q6_K: + reorder_qw_q6_k(data_device, size, 0, stream); + break; default: GGML_ABORT("reorder_qw() called with unsupported type"); break; diff --git a/ggml/src/ggml-sycl/mmvq.cpp b/ggml/src/ggml-sycl/mmvq.cpp index 80c780b209998..5b7f064074937 100644 --- a/ggml/src/ggml-sycl/mmvq.cpp +++ b/ggml/src/ggml-sycl/mmvq.cpp @@ -31,11 +31,10 @@ static void mul_mat_vec_q_reorder(const void * __restrict__ vx, const void * __r float partial_sum = 0.0f; for (int i = sg.get_local_linear_id() / block_elements_per_subgroup; i < blocks_per_row; i += blocks_per_subgroup) { - const int ibx = row * blocks_per_row + i; // x block index - // TODO: Generalize offsets, right now only works for quantizations that don't split high and low bits - const int bx_offset = block_type::get_block_offset(ibx); - const int d_offset = block_type::get_d_offset(nrows, ncols, ibx); + const int ibx = row * blocks_per_row + i; // x block index + const auto bx_offset = block_type::get_block_offset(ibx, nblocks); + const auto d_offset = block_type::get_d_offset(nrows, ncols, ibx); // Y block index that aligns with ibx const int iby = i * block_type::block_to_q8_1_ratio(); const int8_t* q8_1_quant_ptr = (const int8_t*)vy + iby * QK8_1; @@ -46,7 +45,7 @@ static void mul_mat_vec_q_reorder(const void * __restrict__ vx, const void * __r // x block quant index when casting the quants to int const int iqs = elem + block_traits::vdr_mmvq * (sg.get_local_linear_id() % block_elements_per_subgroup); - partial_sum += reorder_vec_dot_q_sycl()(vx, bx_offset, d_offset, q8_1_quant_ptr, q8_1_ds_ptr, iqs, nblocks); + partial_sum += reorder_vec_dot_q_sycl()(vx, bx_offset, d_offset, q8_1_quant_ptr, q8_1_ds_ptr, iqs); } } @@ -785,6 +784,24 @@ static void mul_mat_vec_q5_K_q8_1_sycl(const void *vx, const void *vy, } } +static void reorder_mul_mat_vec_q6_k_q8_1_sycl(const void * vx, const void * vy, float * dst, const int ncols, + const int nrows, dpct::queue_ptr stream) { + GGML_ASSERT(ncols % QK_K == 0); + const int block_num_y = ceil_div(nrows, GGML_SYCL_MMV_Y); + constexpr size_t num_subgroups = 16; + GGML_ASSERT(block_num_y % num_subgroups == 0); + + const sycl::range<3> global_size(1, GGML_SYCL_MMV_Y, block_num_y * WARP_SIZE); + const sycl::range<3> workgroup_size(1, GGML_SYCL_MMV_Y, num_subgroups * WARP_SIZE); + + stream->submit([&](sycl::handler & cgh) { + cgh.parallel_for(sycl::nd_range<3>(global_size, workgroup_size), + [=](sycl::nd_item<3> nd_item) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q_reorder>(vx, vy, dst, ncols, nrows, + nd_item); + }); + }); +} static void mul_mat_vec_q6_K_q8_1_sycl(const void *vx, const void *vy, float *dst, const int ncols, const int nrows, @@ -1070,7 +1087,14 @@ void ggml_sycl_op_mul_mat_vec_q(ggml_backend_sycl_context & ctx, const ggml_tens mul_mat_vec_q5_K_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream); break; case GGML_TYPE_Q6_K: - mul_mat_vec_q6_K_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream); + if ((ggml_tensor_extra_gpu *) dst->src[0]->extra && + ((ggml_tensor_extra_gpu *) dst->src[0]->extra)->optimized_feature.reorder) { + GGML_SYCL_DEBUG("Calling reorder_mul_mat_vec_q6_k_q8_1_sycl\n"); + reorder_mul_mat_vec_q6_k_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream); + } else { + GGML_SYCL_DEBUG("Calling mul_mat_vec_q6_k_q8_1_sycl\n"); + mul_mat_vec_q6_K_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream); + } break; case GGML_TYPE_IQ1_S: mul_mat_vec_iq1_s_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream); diff --git a/ggml/src/ggml-sycl/quants.hpp b/ggml/src/ggml-sycl/quants.hpp index 88ec13ea26999..8b952db43bfe2 100644 --- a/ggml/src/ggml-sycl/quants.hpp +++ b/ggml/src/ggml-sycl/quants.hpp @@ -14,12 +14,13 @@ #ifndef GGML_SYCL_QUANTS_HPP #define GGML_SYCL_QUANTS_HPP +#include + #include "ggml-common.h" #include "ggml.h" namespace ggml_sycl_reordered { - // The reordered block moves quants (qs) and scales(d) to two // uniform regions of memory that is contiguous in the same tensor. // What this means is that instead of having: @@ -32,7 +33,6 @@ namespace ggml_sycl_reordered { template struct block_q_t; - // qk number of weights / quants in a block // qr number of weights in a byte (described as 'before dequantization') // for quantization types that has low and high bits split, qr is calculated with @@ -47,10 +47,12 @@ template <> struct block_q_t { static constexpr uint32_t vdr_mmvq = 2; }; - static constexpr int get_block_offset(const int block_index) { return block_index * (traits::qk / traits::qr); } + static constexpr std::pair get_block_offset(const int block_index, const int /* nblocks */) { + return { block_index * (traits::qk / traits::qr), 0 }; + } - static constexpr int get_d_offset(int nrows, int ncols, const int block_index) { - return (ncols / traits::qr * nrows) + block_index * sizeof(ggml_half); + static constexpr std::pair get_d_offset(int nrows, int ncols, const int block_index) { + return { (ncols / traits::qr * nrows) + block_index * sizeof(ggml_half), 0 }; } static constexpr int block_to_q8_1_ratio() { return traits::qk / QK8_1; } @@ -64,20 +66,46 @@ template <> struct block_q_t { static constexpr uint32_t vdr_mmvq = 2; }; - static constexpr int get_block_offset(const int block_index) { return block_index * (traits::qk / traits::qr); } + static constexpr std::pair get_block_offset(const int block_index, const int /* nblocks */) { + return { block_index * (traits::qk / traits::qr), 0 }; + } - static constexpr int get_d_offset(int nrows, int ncols, const int block_index) { + static constexpr std::pair get_d_offset(int nrows, int ncols, const int block_index) { auto nblocks = (nrows * (ncols / traits::qk)); - return (nblocks * QK_K / 2) + (nblocks * K_SCALE_SIZE) + (block_index * sizeof(ggml_half2)); + return { nblocks * (QK_K / 2), + (nblocks * QK_K / 2) + (nblocks * K_SCALE_SIZE) + (block_index * sizeof(ggml_half2)) }; } static constexpr int block_to_q8_1_ratio() { return traits::qk / QK8_1; } constexpr size_t get_total_qs_bytes(int nblocks) { return nblocks * QK_K / 2; } - - constexpr size_t get_dm_offset(int nblocks) { return get_total_qs_bytes(nblocks) + nblocks * K_SCALE_SIZE; } }; +template <> struct block_q_t { + struct traits { + static constexpr uint32_t qk = QK_K; + static constexpr uint32_t qi = QI6_K; + static constexpr uint32_t qr = QR6_K; + static constexpr uint32_t vdr_mmvq = 1; + }; + + static constexpr std::pair get_block_offset(const int block_index, const int n_blocks) { + auto low_bits_index = block_index * (traits::qk / traits::qr); + // the index of high bits it's after all low bits + auto high_bits_index = n_blocks * (QK_K / 2) + (block_index * (QK_K / 4)); + return { low_bits_index, high_bits_index }; + } + + static constexpr std::pair get_d_offset(int nrows, int ncols, const int block_index) { + auto nblocks = (nrows * (ncols / traits::qk)); + auto total_qs_bytes = nblocks * (QK_K / 2) + nblocks * (QK_K / 4); + auto block_scales = total_qs_bytes + block_index * (QK_K / 16); + auto sb_scale = total_qs_bytes + nblocks * (QK_K / 16); + return { block_scales, sb_scale }; + } + + static constexpr int block_to_q8_1_ratio() { return traits::qk / QK8_1; } +}; } // namespace ggml_sycl_reordered #endif // GGML_SYCL_QUANTS_HPP diff --git a/ggml/src/ggml-sycl/vecdotq.hpp b/ggml/src/ggml-sycl/vecdotq.hpp index fa258e4d4d106..0a5d4999419c9 100644 --- a/ggml/src/ggml-sycl/vecdotq.hpp +++ b/ggml/src/ggml-sycl/vecdotq.hpp @@ -284,10 +284,11 @@ template <> struct reorder_vec_dot_q_sycl { return d4 * (sumi * ds8f.x() - (8 * q4_0_traits::vdr_mmvq / q4_0_traits::qi) * ds8f.y()); } - __dpct_inline__ float operator()(const void * __restrict__ vbq, const int ibx_offset, const int d_offset, - const int8_t* q8_1_quant_ptr, const sycl::half2* q8_1_ds, const int & iqs, int /* nblocks */) { - const uint8_t * bq4_0 = static_cast(vbq) + ibx_offset; - const ggml_half d = *(reinterpret_cast(static_cast(vbq) + d_offset)); + __dpct_inline__ float operator()(const void * __restrict__ vbq, const std::pair ibx_offset, + const std::pair d_offset, const int8_t * q8_1_quant_ptr, + const sycl::half2 * q8_1_ds, const int & iqs) { + const uint8_t * bq4_0 = static_cast(vbq) + ibx_offset.first; + const ggml_half d = *(reinterpret_cast(static_cast(vbq) + d_offset.first)); int v[q4_0_traits::vdr_mmvq]; int u[2 * q4_0_traits::vdr_mmvq]; @@ -346,15 +347,15 @@ template <> struct reorder_vec_dot_q_sycl { using q4_k_block = ggml_sycl_reordered::block_q_t; using q4_k_traits = typename q4_k_block::traits; - float operator()(const void * __restrict__ vbq, const int ibx_offset, const int d_offset, - const int8_t* q8_1_quant_ptr, const sycl::half2* q8_1_ds, const int & iqs, int nblocks) { - const int ib = ibx_offset / (QK_K / 2); + __dpct_inline__ float operator()(const void * __restrict__ vbq, const std::pair ibx_offset, + const std::pair d_offset, const int8_t * q8_1_quant_ptr, + const sycl::half2 * q8_1_ds, const int & iqs) { + const int ib = ibx_offset.first / (QK_K / 2); const uint8_t * base = static_cast(vbq); - const uint8_t * qs = base + ibx_offset; - const int total_qs_bytes = nblocks * (QK_K / 2); - const uint8_t * scs = base + total_qs_bytes + ib * K_SCALE_SIZE; - const ggml_half2 * dms = reinterpret_cast(base + d_offset); + const uint8_t * qs = base + ibx_offset.first; + const uint8_t * scs = base + d_offset.first + ib * K_SCALE_SIZE; + const ggml_half2 * dms = reinterpret_cast(base + d_offset.second); const int bq8_offset = QR4_K * ((iqs / 2) / (QI8_1 / 2)); const int * q4 = (const int *) (qs + 16 * bq8_offset + 4 * ((iqs / 2) % 4)); @@ -395,6 +396,66 @@ template <> struct reorder_vec_dot_q_sycl { } }; +template <> struct reorder_vec_dot_q_sycl { + static constexpr ggml_type gtype = GGML_TYPE_Q6_K; + + using q6_k_block = ggml_sycl_reordered::block_q_t; + using q6_k_traits = typename q6_k_block::traits; + + __dpct_inline__ float vec_dot_q6_K_q8_1_impl_mmvq(const int vl, const int vh, const int * __restrict__ u, + const int8_t * __restrict__ scales, const float d, + const float * __restrict__ d8) { + float sumf = 0.0f; + +#pragma unroll + for (int i = 0; i < QR6_K; ++i) { + const int sc = scales[4 * i]; + + const int vil = (vl >> (4 * i)) & 0x0F0F0F0F; + + const int vih = ((vh >> (4 * i)) << 4) & 0x30303030; + + const int vi = dpct::vectorized_binary((vil | vih), 0x20202020, + dpct::sub_sat()); // vi = (vil | vih) - 32 + + sumf += d8[i] * (dpct::dp4a(vi, u[i], 0) * sc); // SIMD dot product + } + + return d * sumf; + } + + __dpct_inline__ float operator()(const void * __restrict__ vbq, const std::pair ibx_offset, + const std::pair d_offset, const int8_t * q8_1_quant_ptr, const sycl::half2 * q8_1_ds, + const int iqs) { + const int ib = ibx_offset.first / (QK_K / 2); + + const uint8_t * base = static_cast(vbq); + const uint8_t * ql = base + ibx_offset.first; + const uint8_t * qh = base + ibx_offset.second; + const int8_t * scales = reinterpret_cast(base + d_offset.first); + const ggml_half * d = (const ggml_half *) (base + d_offset.second) + ib; + + const int bq8_offset = 2 * QR6_K * (iqs / (QI6_K / 2)) + (iqs % (QI6_K / 2)) / (QI6_K / 4); + const int scale_offset = (QI6_K / 4) * (iqs / (QI6_K / 2)) + (iqs % (QI6_K / 2)) / (QI6_K / 8); + const int vh_shift = 2 * ((iqs % (QI6_K / 2)) / (QI6_K / 4)); + + const int vl = get_int_from_uint8(ql, iqs); + const int vh = get_int_from_uint8(qh, (QI6_K / 4) * (iqs / (QI6_K / 2)) + iqs % (QI6_K / 4)) >> vh_shift; + + const int8_t * scs = scales + scale_offset; + + int u[QR6_K]; + float d8[QR6_K]; + +#pragma unroll + for (int i = 0; i < QR6_K; ++i) { + u[i] = get_int_from_int8_aligned(q8_1_quant_ptr + (bq8_offset + 2 * i) * QK8_1, iqs % QI8_1); + const sycl::half2 ds_values = *(q8_1_ds + bq8_offset + 2 * i); + d8[i] = ds_values[0]; + } + return vec_dot_q6_K_q8_1_impl_mmvq(vl, vh, u, scs, *d, d8); + } +}; #define VDR_Q4_0_Q8_1_MMVQ 2 #define VDR_Q4_0_Q8_1_MMQ 4 From 0363bd9df99d71f5d09fa7c89c539e16306f1c08 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 9 Jun 2025 12:57:58 +0300 Subject: [PATCH 004/192] server : fix LRU check (#14079) ggml-ci --- tools/server/server.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/server/server.cpp b/tools/server/server.cpp index 2e78dcd7bf1da..77dcbc11bf1f0 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -2142,7 +2142,8 @@ struct server_context { // find the slot that has been least recently used if (ret == nullptr) { - int64_t t_last = ggml_time_us(); + int64_t t_last = -1; + for (server_slot & slot : slots) { // skip the slot if it is not available if (slot.is_processing()) { @@ -2150,7 +2151,7 @@ struct server_context { } // select the current slot if the criteria match - if (slot.t_last_used < t_last) { + if (!ret || slot.t_last_used <= t_last) { t_last = slot.t_last_used; ret = &slot; } From cdaacee44dda693fee68255327942491778114a5 Mon Sep 17 00:00:00 2001 From: R0CKSTAR Date: Mon, 9 Jun 2025 18:01:17 +0800 Subject: [PATCH 005/192] webui: fix sidebar being covered by main content (#14082) * webui: fix sidebar being covered by main content Signed-off-by: Xiaodong Ye * webui: update index.html.gz Signed-off-by: Xiaodong Ye --------- Signed-off-by: Xiaodong Ye --- tools/server/public/index.html.gz | Bin 1913896 -> 1913882 bytes tools/server/webui/src/App.tsx | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/server/public/index.html.gz b/tools/server/public/index.html.gz index f8e3043421d330d54f4f1184033bcbfc9b0518b4..35b9e702f698c154ceb60ac4fb8b5b3fd38c0e50 100644 GIT binary patch delta 854692 zcmW(*bx_og)0PlWN<^evq+7Zaq!k3DB#!Rx{!ju^M+itO-Q95~-7O%}dGyf-cO3VA zfA8$f^X$&X%>J{xe{9*lSJu8)Xe3r7b|g+DZsg-gyh!{=g2*S4Pa_EUbY8uS?Ml2Nbyd+gLLGr3n6E+ccCh>U^nV}1{d@T>AN zH}6qlw)S%^y1!TocmeNhOS#^5YHV%Xr5mHV*sh`|@rgjC7%!I5b#@F}59Z<9HZRuE zxXMEB)P!y<2meOT*n~}c9!uy&Dv$~Wl9M0`J<>1%?(Ty>+1EA~VmYN}!50+Tj!cip z^=YnzV^e;n2L)Yl3Rzy1E8RwRl6gZUSq3(1^ehY7!%zr2v0bRhu+Cgug@Ku)6qDKA zMB!9Yt=L!PIii>W*yS#$zJ}O$nm#g(zX`RxXqW2qwk>2D5Po{2T-R zU(ly<>s!9W8vRmqsLyEUr*;joM%$Ox6dW{l-_J_|uI66|Jj-Tz&XuU&Wx&!S2RT1_ zfh*-m0`JK(s&ULXD-IW<-wH9EGRryiIhp{c1QgC|e(BceUOoEUP+rdAmF@e{@ynz`}&nA7;Cr4#G!Pf2fB|1_G3?Ar)0Fe>Qwcre0BSbDQ?F zbG#4!WHD1FTgT0+glT(m7juYhAoU95$KIi65t$Qz5&B4Pu)-Mr|7=$#^bG3CiGS$e~pdk!)w zg)4sYzxxP2BMWt8qn~Kg$WeQ1B;@Zs@BN3hb4>Ed>_?>>n*~ zXwdbH^~BW|d6J{Na*w_}c9IDf>y?xV8A+O~jFrhJr>dHVFt<5OaQ(x57&*W_Xt<$l zgF5KdewnZIxvtRZKGdrXQ2D3P8BqfA9s7HSTtib?#4cSMfUhox(I{bDp_>VD z=8Ke?%@24s_eg*!in49+6N@G13*(T}n_`jM7IB?+HruOx_ai=6yQ!@)+v+FyCM_y7 z!V-2tUeG1;D&I<}6A_~behixjI5cY}T;R*TuUvzuY)q`i$s@z);4kGO&t6}IG0y)U z-Oshlrn`0Y?%1NcIKClW|14=nTwS(HZ+zJ%L0ikTdB6xDSS1(|<*x|_J9IR9SW|@H z=0UbV-rvQaV(~OO7jC1xvITa6dSUQnn1A$#`9}c!w&c|WlgwF&q_V(#_83<_X;H6j z@@tx3BJ&52v1^i#2}F7NIo+STAXYu?Y8tOTLzCzAT~e|;Em`A^9auUXobDVWr`1KD zs36}zBx?W`V}gV#)UWNHXqI-fcT?EO5cBY$>E8X@Fv*N8a$OcK42{*g{4P%GZPUXX z&LRI?Pc*-GdHeoo72f(u{{UeZ`|}aKQdpVQKeAdbir=C_jLh=hpKz{l81addsYP1w z;Ua?=WnNsh!9gv9E@qy%cqa(HH)8uuS|Op`-y5LI1%<%PE)tIYpwqDuaJ>W5qD8JF^{n1El-&QY*@yL zN)w-Hd`zLQ%uF)LuPll2ACHNq@tBRUw02KqTT*oRzJSN@CcwvEKazIwa>|Rvw8ke+ zGi^a#A8Y8ZDZPPhxT2c@vA8J1ir|jPZ(MpkRpaZ7cU#01fag<1Xx&1Q?6a#C*4knaB+DWVJL*JO}$zQrY7C z{vs|`{Cgi$12r3k^f6Gvn>E8=)uk{;V*?rT3cTsq(|2A!KZsNJ|NPDSRp}YOCe3h# zHDxGe7?%KDWFD(FNDdyAcjPC`IcM>^a7O)0giEl18HMRnZle^g#dk*D2HF|C+r+}} zR@4mZLl_6L>dM9NCy;eZ9&r-YErLI;m1+~VA_7X6#^NfIXl1zXZy}Kk8H-Ils>%4ZO-zqH3D5YwO-zSPqMxz0O-x0|_^H&0Q8Zp}>0C7fYlm0^ zi>>w34zVv5cFwbmkzHaoEWTFwF7X}~exOyLcf$T$#tMwM2`jWUd7qdD8>=g$>X7&h zJLj%7?}*q18*3+n_>{NGyGl-L2gy|(oYocIYgR&T59IdLmHpxJQ5qd$kSg=>DN z8%p8PI@7lZC}QJ|Bl{LyndRXBh#;J)E!03IJeQaEFP6nR0XC-yRFLtF+|Bnl!oL5L zxbZTCDZds)veH}lz~j?`Wv%&ET+DS|1L=$HLuhvp)K^+nGqFfNu^6U$db&3S0s>`C z^H~9q-do@NFhEXjOOFxbpBNa7J_Dzg1zmibQq>c9+0AZvcXNl}VOS^GFmJ%4K;!&C z(QiKd=qcssWRDvvR9jc?1LZx$;^33yboyQFc^IT4!~)iHuF|jE37`s32v2B7TOXoa z$Ni?5Bjo)58nL3K!gxTJ!!ob84*LG_gKj9wHVr=n0;LFksWX%dgJ_4;?zTB&5NhaE zaDrP9XFCD0saOac1%;e%5_JVFF7`7{^o-#LeMF&ox&$*IVx(}Hi&Ek9aLGUf_kY$p z;vpi_FLSz9(dIOJ(=oRJ@c+Zvx!tyw67()5*oa|&%5{Cy1sy>9;<_ZeR_+w+hh9Os zJ_B}4E6QXNaJszY95AMvwQ!vEbyrMX420qLb}yW|r?3Z3*E?5S3+|R(6h$WPG>S}| zk_L(@b;Wi7x^lGv=(<){9H{K&kJ3D-I_H^{bW|nu7g#@pH+VhMh75g|4@W&{oG)TV zaqmEw+I1Yl)H8Fz zr7zf;wjyXTf`aS-j3dOhYeYF0qe=btVt1E8wk`wY>h=IqdB{k3b;l|@3oue?UqlnZ zFgJfwFpg_m2>i@@FJriIpxD11$KYRbbq@1#dT$RAQ1Ubob-Li&=ctJ@vb> zMW=nX@ts!9Jy^)>J;U{RzT?TdFdSOmj=??fLyd#-3Vk6HYSLj%*CKx&@0>jue+uIJ z1SSN~wNwNppqM=VFoR0~$#s8!G2V%~^rdElW1lI+2eXRFUN2a?ZkGG5EM`ZMvTd)X z^Xpuij@h0g3rjoX0uDWwNg0nJsEt~NMX6ISUnOcn2TI;3(X~0idWx@~n%T|UfeO}0K z52o`a1#YCa<8}3P=z5%`_I|eks}JlJ92{1#qrlmfO%B~R$scvPpg4F!FC7C{<9aAQ zSFlI#N`T0IFrIQPcs}n2f3CV0J63*0l_dQuCeH&R>+njpw||W#G%pSjgn#CrXc0K8 z4kGWqDCy2>07{d)t}PX~D*35hHsOoKpBNvGT%T$4E2gc>o^~F`^4EvdvYs*6478r) z0gwK^z^!-H#zu!;Ecd5lFc`+7cOgoaGD;q{pBNY31{~s?9YDL<&yLpT`i`jEK@o45 zr@DhGA{IgLII@R{pOC=cN8=w{4;Koi0%?P3weRK?ft%~zIl48?)n(V6a`hH7Lbq(B?FEA3mP(#esR9eBr{eRGOS;Wc&v0g8M?lh0Uh~!AHm#RKKLL#Q|mJqXm29R zn0*WQJkL)kfOhq-KDZfx#O}lj10)qO!|pPqN9xjNtlmRtR=I5T{F@2KmGcZwn0fnc ziqheU`IXo8yd*$#Qu{M}4Ttm85#199zn~kISuYS}#25L{x6|vj5J9Ehggrt`z+~bB870mrf9c0w;HXl#w{IL!VY_70_4*mj=)eBcv*fjawnP zX^cs-uV14i7f2JDz{xiM#W8ON=hWRw#Sq^HHo(!WA}Ubs`CHZ>hjCQ+Eh#`PI7mnB zaQ+(3q6dkvSOMWOyjaCh=AkvGibr!!DMKHlv9CLT;FgvUD~GS~yZrcnJiR8eH?rWo zy}pK5TJrbsgr|YlGXY}X#^AFb^Goj;E83B(_rbzCz+#N7SbX4lhupgDLA3~W6+t5D z!+;#Y3-mtGp-Bk2?=1=T5%BFI`s+CqEP+y@@f*KOeS1HJOYqW_;f4`MCID0!5|9}v zez*u-UqNojz`1T0Mc2PS33l8=)8E$nPFp}5Mekv$ZZfs`r5Fg+7JM+f)%EI-yX#JA zDlZfPK`-aQ>qP*SR?v3RX<}GuQf@2QLH!IFBG<|IVRH-@eQX8*mhH&T(Z|4pT~83- zzymDLgOXM*V7zng0!0+C9@erLi|q6;}w+afrM0k>zjMqkCtt~ zBskGV*r9MzIzP}x{J8y(BdIV-_c4)vNl9`n?Q!69*FRvD(*t~qXOX~LkBm^uXQBV& zpzZZG#$-3m_1)<0fjqo;OcDXWg8i%?jCb{h82E1*BC-KAI|)&tnOmeTVo@%F8RgxF z;@yCOvt9p5SCvEE;cxs`!bIKRGh1Yqcdw%a9*93cu&PED(+!jE;6EeTFZ6FCOJ9>x z_FIy6@!{SWXQNtI0LcB5l6yEnZH5lkd>@6vcq3Cg8s)4B0OY zF|8K$tA%~Nte8hj^3b+jG67W)i>YXV6HL^)nRk%j1+*?ay6=%6(MmTrm{e^m`nwx~ zkS+d!do$Yg>^xO5)A3cq`a@E9m zDr@w-zSZtJI5l{hjUmY_-ZW&~)`Vx4=F_YmHTQM|m&P2WvfOF**Mbfu->Gl>AjR{5 zpP;dwrx4Jt`9+l5!O_QQ9k@o$27N(>(Qnt&loQW^Y-lk5G343G=GR&{1o&HGdMbxP ztW8UUa6JDt+MVF7y2+V9D4H1;rZYN0Bl-B(5VME0<0tXRn)737pQm*68tGCX%)W8E zPAF;@Cl;mGp_9a@y^_Jau}G-mfXIQK#fh;FGYyx0{D;uK3Z{JVFhGA@mlE8Lwqiwj zTpGIFwp}+qMjydl1F4_@v)@I?YOofQ5B3Gj(Q7~2pBM66jwBMyssVY9Eq42^v>j8S zDb5n|7Aek>2e$;xadka0_6xp_D?^FA-z>gc&APpS_30xsUGg}fpE2m!rPY|53)reX zDigvGx$f(oftUZ&$kh}ch(0zX3%WtdX5+1;Q=ynvdyhn?mVv$6Pl*`(-H`{Ja}Hq0 zE<~-OjgL!yMSzXD#7)~8%@js1H&ahcwHqh!KPIjbuCBE za(_2w2}3Uf`ED1QQ0b9oO#qZ?_H1++iato@ii^Zx=J=;8>@X*q?u)vZaHc+iduyf# zZBeo+?HAn$R4L|z%JfO&(qhEI^eF9rQB)75pez7Ie*WR?#kh|ck?%+BpBZLHJ3UbJ z2<>J&f~f8684*Sw3EGrHq}vN*CDBvX6kuW>fXBA>xBLTUm{{}yB{bTl9{Te$A*mk$ zIZF_eU$oQ~TjX)2$(fz&XJ3(L0k?$j?qeUGHtg4)Ekf4-nTIH#UXEF_P$6#+v)Z{5 zeIYkFcE5$UQ4m`CsA^%=PqlL8Qgcqz>F{*yOkU30za-6{W{e5Pxibo6rmsr0$Efu{Cu zFbjxpMXnO|8FN#LPa7-+-5Z;O=I-y`H^+;Q#6&&lZA0xuW_oeG|_a6oI?D zA5K5ANNth+ld=Y;Q>>6c#-B*&r2|kOmh#~jMs2J10k*8GEboz4dvu~ab}|>?7k&n% z8Gt@S&0vtU1L(e~kWM?e`VCm74}+jVuRC7J`6Ps!Z*p3$vKp=W9vnTO`4Jnvlv7j6 zlhK{u_LSfF3Qw%6T(V|${wl%NSq9skDs3I!c)yhkXO$iFMoHE&&d3h>0w~Gf|5N;x z--m?XxO}M?)JMxo6v`g^1kIle6@g9R8#R=(jK4ZVlhB--tNSLaWcvJ(a#6;*zEpD= z?hjN$pH~Hy`J3c$>195+PWkldTfaB`5CvA(WbUgymF1IO4-$F}>9})}4{CdT*}#Fh zx#jk{ctx8}vCNM!#d8D1z5+*MLG4)cBBs_@C<~fVre_RmTZXLw^TUZdjV$^4NiUez z@G1mO9m*}c?EkWR-8UP$?n}6~ICZnS2S)@uuESAyBTaJ4h-!2VTsfcbJ)=zK7=v8& zqD*(k#m_*WH<%xL?d#Znv8ll&F!s^>YxIZfl|b5^5nJU4c>vYO9`p-m>JZay43u#Fs3e)M^Rz+IhxD)=M9gHQ#&0N)FH8bE$uUq3(ipr#&F~&vSL|k#tuWIsoI` zzAuJVm$ZDU{_0R&a&2ge-S6l~oit`YZs?GardQ|vpw#&=R5l>s?oQ})r~*Wl9Z?v6 z$#3qVIim~8^)+0-M;b$qA1>ATPW36_@daUZY5EzbwkX!8hx0&l8x)0sKRv9BQ*|FA zfp$z|dr?n5EWUC6#5P&EjwoA#+23AohJ`%&jT4W3YjAOsSG9YjSZ1Q#BYY|%@K1jx z0P!ynvHLt6gT#7vJByL|aN&O)syDpyTuN)8kngg9zVLsvBus_Oj6`hs4if1%4fG%f2r5MndaDOBt zigum89s&2Vt@AZRKM{H`=H8t_P!1oIa=EN~p9FGGmznyi$VE&;z3%Q9;0`x9Y|HXq zYx-@cgzpiYz%}Lj(@Qg`^Tqccm~V&Yh}^5S3tiqZ0A{RLtFFDq-PFaYaEH4Wtn>N; zbef5qZ;Dj?7b3z2A6{8M-P1>yaOkwBb+W=V#qxYgN`p zq5)XcRPRTUG+G8FXsgg48U90)m&oq|1keANxOKJOos_a%VuW$a`15ZZU&)QEiOsG* zq@aM{@GyYzDRkWrpDF};&-eWP>w`khdhUkx1LV*$N%jEMGqW?d@!wp=T;$X}dLOn0 z?Xu#x`<1eVu&V=?bqd9r6+-Dx!QU`Yz%}oRsOg^gZH$|bj57jXQ>6jLE!`OH>-kCA z4!TEoo0Yy%D5=j+tqTm@LRLCSA9V!vM9aqUFj^fUgTa1V8Ls)4Fe%!gT&fGk)3Yhf zt#uueAt^poG(E%$?>6xDQ90aq0fD~gJVLo|2mii9FR;EFcHNN)y;d(t@|^%6zBaeB z8;wbuQfTrH#KEUvC1eQw-&Q-&Eh74V(9XQ^eu>n*fZq~L{fzwPOrQBU|KVT&L=V=b z`%Mntj?5wXB!&&!?XEJWL2o=x}wF$yYR|3IX6~d|=J{Un>|{u>k3QC|H7Y zBNf|&1MezBP$&3pm2IxI-E_OIrAWhT^xmNfDmd^U4f}pG0a3Dbec}p3Uw6Y+Xu;`q zoZEa;4>$eE#vG)K?;u>ys`7-X#6Jd1{YI+CqmhUB4@mqCeaHh72=mhv^C)I4gE9E# zQ27q2oc;*-CmUgT{GbJrVLZOhCqS}?7*O7vsv+wOuZeTe1Bz$QXYVVzE*H&%MJakS zrlz~9w=s|@I4>14bsx_7UiLv*J_iK-$N1C{j}6@d|I6%!Vvh*dHeZbUFlZ@;r`c$ZnLup49XB6+T6ViX&cS!ytAjR z9lxw4X~BqnoWZ)h)C;|6LtVX%zaNP)9cA~`xYF5@-p^TIf`S6_588ugZ9>$1mAise z0hJi7H_@0Y^&qT~t|m*Z+v8)+Ws1=9y92+|XNCFCDL&m(^%rSsDqdVpeL*C>z`zOv zlQqqsny#%`b37dgfoOy9aF)uK`MEKv`E;QwzP9M8;Vh=7F7s2+u^$nXs8TDpwpYFi zn=t5TsZnTIPDlvSv{ozDO8gJz0Kd}}sG5>{#09#MeG!E;@!Y!E7`TOxOtt>)Ge&&V z#CCcdZw+Pgv13*_IiluPGo-Q{`Mn?v0;1h zv)ooSJOn%?dK*ItMU|WKt7UiuBTeez%O+O`r|UvnQy&Mww`bX;mt?ddD8V(i%_-g@ zaOT;v@Ci5RO_%T8EPm;f(F3Y|>VX$^je}>oHeb4y!?f-Oin>ubH33X-JLch*kaed* zgdaRE^>G`Bz$I4f1hd=o6?5O}8vyZy$bGk#^}g*i*waacf;xle9~wtbj$Y?4ID^EH zSiXV6Ir(Rki%t34xwS??<5KI1n+j`pS;wAVPFi_o%l|-JZph$4A20udxvA)`%T;Q> zd|$|GbdoQGdI=6_+pg1(us1J>Im_+j)|Sie<+>t~|M^OgWZXfBz@U~M<^OUtuO&oo z{=Z|n%6^E;J?q-tUSCkZ)Otv~rbC^p^4WjY1FM3S0QANqWEB#*_XgQ?gWP*|*K~pW zUw`@Q$3>bIHB;3OeiPS2UC4(nOtQf>ta%wAy}QEycS8E(e~6K!Lt08;qW{B_8^DDG zU9WxNNrGBY_Fp7Mv{b3CE2pr-9bIfuNrNX~k9K$?hyjVcfm)&l}^FRJ(Dsqp&05~KUbzzq^ z=;DuX5P>NLda70eO}WE>JAr`#_-=m!5e#k{i@EQd6mU6eZd~2AyI=lJXJ%!;ZkZ`; znL(fP*LuQw5hSMWdt$_q&wZ4$yPl`CsUV4nZqt;H#zUVf>h~ z#v`MhJunb;l{45EaXei6JfJL*I_E(ODwyX;COtH};wjpt+W8hcyv4aoS*b)>z6ABr zD%9j_gv%PLFdC*_7@yFb=7+#FMNfu4sYzGJz_|BTU%QJ*oXo4dbZd>SmsASdV z>sJK=c49KniOLzqzJz2&KZA}I&iyM-$E+6phEtZo+)w03-R_w5=2-r0dv>?1wmbMd z&Z1UuB-sg>bc(acpMB+SKX)lzqDgPLL|f3co7y={pZZ&4{5t-m_b+Vg^AlU_PnWqo ztdqcpDXDLrY3z$)FC|<5uHx>vjV>PKu!Eo`o}{P7L^Nr<;5=shy^Rb} zeiEPZd&pBk+i91hSWUnxHl>UuKfd)^p6VDl)_z%AaxWaOy=VySaJPRz@cs* zhe$A84|W_CU^Z^|&igC$xw=ujpOw<~byUo;_P68QR8Mhr>lMzfV)a)CPkffzkn$z~ z^{$OKnI5HJO8gShQsx-13}vLO_;1I|wjet1e2~v}L*p0rPgj+$8!`%CU7lyO4~+4N z(1K$Teod=0VZhz-V{>6yXr@Qp2PVi` zEpF79$9qN6*F1Pq!OT2?v`PI#?J#s(<=7^Ypp?9m{4Um5D#0uf<>B2oX+R+iVAF21 zGrvAFCzmjnBhwe+jSmJ=o(rXKmNBz=hxwi|e51DYJk!g#r0|_l=TeMwjPOZeoFmjY ze^Ny7k;we7UA_Dj+gB@UsBoj7QDo|8fjFn;-X&do<5fXh%I|desJQCaD1nYETfyDF z_I!&<$m$@cr$W2R7jKJmdgVHPEXwPfbmtD!mFCgORuICR! zJ0aG^&8?iBs4RkPttVSc8YLnZUg~4onPOimmYhmEzQ5pe(Xb{|gO!Ma?5R409P<9A zI~3*Z6lEa7cr!BivRIFzfoUY+LLU5a=a>EPF7Rz~G z!~dgPczDFRrK8KR&Cz|(4u}dRE7Y(w*suK8KJ-j%YyUXwJSCY=C@r>8Y z>K`s_FZVKwBj~N2o9L&c|CEY;7>=8xMRs zJ*HxXb;w-N*E_Y*6e@j?VrVLQ>!LhV{Oqq?N?&s~w0pLizQUY|7^`4B>383+8kN?5 zyKz^|IpVa(JCVlv`&>ZIjmG|BS~8P7T}`1<{0MP<$-a5rE9Q1lx=H^37wSEYX*Y%Wo9>J5o zjV&(dO$UZ}{&Vd427=ol=MBV;d&_%Aq3&bu3PNg%i0p)%f2u%NPORyl7m5X|GOk6A z>MeSK+|BGJFR&w_2fNbblft9QBPqJtowUV*bLauRc9OEBU2d)JF9+H^t7@l<<*DB4 zuO%eZe3gu|Vtw&wz0L%I{dm^q8Zj1NglMJ)Yq>m z?&nsPN!iqFdQ0faf~}JE1lWcO8;pwba8a)$8QxaN5$Wxh^$<@He@^SnO`-2X?!cOv zmtc)6O2Y1sIIkMi?}*k@y}z(U`tO{$q|s%}cn=h)s_p|rRNb#cRGwO?7G79)uU6jn z64^R5d$o;1UYlCH*&m!i>*$<}x4%NT#dpl=hPN}~Et2XLC+-hMiS9|w`xfjnFdpvd zomZ%Uw?~tL?OSjpez8(s$`h5WdZ?s&mmn70(`)gih@VOP^ZH^@t6__+c(X>V7?Wo5 zrJ`FEPoW%0uc@~oxftN|OK>w%CFXy80`jedC>|Emt^N=R`@(0atkm(r&pGheQB`=M zm~p!*a%#MwR@#y6=CtQmf_-X>d?nHD`RI&POOdy~%1UEUx|DCwptgRw__2rh3 z{ui7hF`S%lf6TBcBUg`p4hrfd={B&Yq%|H->AY-~tl-iU0qq$1haI2ay>`9w4E^<8 zo3FqhbTae)n6lBsH}|AJ_3z1|C`-=g#t)sj^Bg6?25KynJa%5ye57#^Ra416E6o-? zO5JvJfc`B9d^`Bnq;)ZfeksnPZe~}Bx7WgWu!KcX_qEZG;HbXs>ZF*wXA?r(>$q}0 znFCK`^l37SXv5g?o)=42_HN|U!u*M*8L}3vaxre3{Yp`|Soh|>8W+E6Ao=P``Kn3E zVxxP{f0 zveDSEGUg>BcvDDUz=|L3ZF3tRTtamCiT?6W`!}X37rhvk52Ze=8@8$q{D;)wISGkU z?t}QRL%33u&)=!4aeA$&(Y`lHHV!IJR zB#Ph7V^#9AAcMv=9@G9%jo<$3R&wuR;{A&|J*$A?Dqd$XJNX~It3M;Igt9v7uwWYH z-iZjD6z6Lt+za4QTXK8M>Y2!1r4Z;Zb_dY2)VLlEYWN)_P0e>Tf2psD*a`ZLyDiV) zPN*!H#`nSGA6|SwT*Yun&VJCu5clvggD!96N3Exq1f?Yj)wGq>^PO>qdZY^Bu;P!F;eGm1PF$ZoHsZ&e+4e82o$Py8pFer07Z@bsw0cyGbB=m*!bu4|aAM)Q z?E1^&C%rSz+kwwKb(&b7|P-Gqns5AWlmt z?rw;uw8Qk=&)W~1_Eqx~b(6G3!&ob&4{Pc;#N_8o1@3L*Rj#Az*}-GB8v50?s_FqA z*_z#l=`wEXWv=&ctH0#Y_On6H{|FUC`F#lLS=ZqfGr0-Oeg1UwJMXMD6Ir@Gv!{rc zyN1M;mG@K4n10`H!R&cf=#Mz{6!Rx5dKkgX?$U z`OdVZto`9vlzK(tHjS%ROirhK_6IB#h;zn)3sEFm!~ZX!MN} z;n1z*+85;+#lWj#j#DYGDY#97+##FVuRJkW!6ba9Z)E6(UI3gb4A8e#I}|!ME&fp8 zIX(#!rcTris!SSk7NYdZb0rVVexc)^oF=aJNj0ejJ^o$s^GdCl;VCW5?^F_bZ8Q+Sux&z}T2;bdvw;bjcQv!wC{Fa7#;YcFPpMo)|M~N` zpqME%%xEgC+}Tw2HFj%@IUr<(9_QbauEc zJSc8yP(xBWA7_uocNE8Sce4oavHbXCd40@<_q%<5yZ)_W(`@wDX3d?M$K-LUjNR(0 zA67U9Zc0W8i%&>i{vf>y$ZS-2gS)9>>bj)`0}IHVbb-Wl7fpcfHkP!1eBQ{x0m~Ny z91iC;l@s_@U{0Yx4$<^(IjV`>X93D437(@4%Gs_3^qRZlW-1NtMI;jUMhqF)-8h9k zhN^b5JgpS-1x}?$r2|+Lx1yBblDd-}h4lu0Qp-=N(EVHoD3~!wtS+-QTU+ zZwf5sKcG@IV=jsRZ78S9vG?s)E#3!MWQH2WyacCWizFtvDW5hme?V20XSldK9W?nS>j{9c(RLp(K9Yw@9- zl2{R-e&Hg^ptN{IN^T$J86n^$q;<86rSQY$U5}HAdOmdNvD3 zj=VxizAmb?P(gM3Jm4$YBI?_0PRpikJz z@RRijZAA99c-4$cT@2M!c^arjn^S#8b&@l!TAJGb6IkU_TuTiTcL=! zk5-p!TOCX7tj)T*m#RL?qL zUH+y7TxwTdt~Oh2&rLW)es#ZAq9##>v+%iQi^uU$FFCiU6-<yE{}{)x0#a>p-sLx-h{f@ zg?p_yWyat9673btPTqi1Q)CaSzP~(Y77O+bDJhW{<09nAyGZ>>zi-)PZun_<#D=TL zrDi6@*?on{Pqj<>?bI$G%ewWO41db!ArYmjdTbe%A&N(WR^ZE3YaI6Zkg!H?k(%6`u4iVS za<|YIRGvm%=?N5#;K22LnWgS06V7ttBYz_01C|_?d?L8_p2d>IOv*IweKWvu+$g5$ zb!n=-U0F){nfd4FjfVB}V$c5`S? zS+0IlZm|yVu^xQ!d0cCu*NNS$yf5`>j#WUhq5U_5-y=KzCvON@oc21~O0%0@9`dOq z5c<`7ow|v1I=$?~IjWCaQXBirS$J3|a+s16ZJR7RiH?+vU?znyU>C+uZ2HzdV)4eQ8bA7+L-`_XsmVG_BlBeXQ86{c0l$`aEuvK>k zeb6Bk{--P1i|1uE2V;*~$j+b$C;Qk(PP!4=0*xd@cgvOuiiT?I4r0+G0UxV~ z{kI;i7Le8alet}`l1UO}1Isbq$AAYJNC`ofr5glZV8-BXSom5f}kd+&Eq8(IZ=^8*GT8Xn4R91w|bog9qo@=+Qw1-!j#i0 zh{w`&l)=Jt5tU^FKAeg@lQN(75*=J}Lyfb)?U+<{zCntlqo2Fi*sE4JN^aFIxfPM> z)6Yp;U-CW&89MYYW{0q{na`LPX?V~G4I|}et*i{Lute}t81f^f^TK1-|L_H z)7XEP|G#6Nax`S7%{~3sJ=MwXEM+cEr@*@^p>%nkY+_2dldCTae@{HHs65#>vTNMH z>G&;gxvUA#|7%rQzmL!#EB-^g&M%dXk9pKgIavn%US{b_)|WeI$Cq3NleJ}Url|KakhvNTmM?cma9@w7yB$c@jhcyWlPH#L zv+xo$7!DLQuWtzF&r}Zp*vE`x@y?X~rKB+8bgsVXt@t)^0yFmbqCXaaJtCX=O+^*) zeZr4%{B;w&%x41m@I@&cXdQ#oxS9^42H(^jP*5z_CGxx=rH=R}-IeDNbSQ1*jcwHX zurhr;U{JPs`pj?esX`nxYiov`e(_9_Xr(u{%VU2K;bs@E;8N@|i>xt6T}bmS zrpueHHC*q$5Ukk#L{z!iLUubpcu=GC+;2|$OYQc{YNY(O=!W6Pm#;IJIf~saRGIkp za&EQoW#P^2M~~#k1O6EjQBCw7OE*q0r7RB@du!P)y7dWGea@7ysF_N%ay*Lvsd6rw zp34PxW}retvZ|5XfPU@1*C+fio0+X}Cj8Oa9=#W<93O|i$7s=LTuntya(87--w0BO zw)Dv*#%|WBWg789>X<*Xy42RCI?DgoHIQPHhGJNL&im&oOeeMi z<^ZZhwZEuO$qgN-;O46ndOEI>j(YjNjIU-St&@`QHHfa9T&di7jsfK*H~G6Vkg2BT zM_a&Y?Qa>XWhc1|PHLV69lS}kZiw;T{T+erb!_C0%fFeQ#+~c?8-b6@<-CY$yNzrA zn0n*pm^$$!bh7-wb$#Qwfr)R(=$MU+o2O=*T}N!@XDk5wYck^A)%Tc7T4e2 zb;jgwRXA(VqB~4>K(+1wpS~PltAx`hn}@4Vc`?sk^&Bwn70Phf+mHN z7K`4A9Nn=!FPpf>J9KE-#H|;9BAG)u64O3^pxp)=(Cg1#9weSBYnjeip~*ejG&|Ca zXPwn}L-_*;X;b}@X?`8#VjO8>ve4b?{arjKb7exPo!NA}-{HwRoa&!ZScX+B&85kU ze%U^@Ie`&(;buZv7P_-cF=wxzjc#TM-@n-Q9vU~rx6+U;|4QL5X)kTlF*KR}U{jez zomERR`dm!&AV7G1RQ~n2#l-?FPl9`iPv1&PPWN8`KvuCc%SuK{`Y4f?13AX}N%dpT(THwavXK>P`U#E!XFWBE)ctBUTk!_O*bU55D z!uS`@Muh2#dTku{_~fdK!iDxp=r8;OHJ@Ls{q_Z7+rQo@kC+KebxTwi<6U05hc=%7 zhN~qA+%?PWhsnq_s&59oaUJZ*nmTbH6Z&Rv3e*^#+3*~_{&`5KSi{;bW9-)yI@R^J z{5=?4qW=BHT)UFEexKj0y`S5%xV_^_@o-4WsWu=;uX;Ov@eBHfOdj zlj9&f(lZ>j0Wuq5&uZ~N#nP!pzLh5SyB9O>3y*@8rn}$xqm(?HxQAB0tkuxg*Jlr$ z(f}xqvzVOS;bs-s{koT-o=@dMr+3k468Mgoje*JZjB0rXU5j zm3llT-Uh0P2_Oyl+AIv|yXWbIWl*8M41eh+Z%KxP@9_9wiHg>kc;~mcyS7^&+v|_( z4b{F2(iyXZ>@9gfF#G_*@UHZm=huIG}k8N0T- z^90XhPjJyhxi$Z{6&&{A!t&AjhCcvt;f5l#h1_=hL_47nvI!Q8-^~s_TzVKdvWt_c ztYFs^&JoY=7B3-8af$c;-1?mSqD$PRRM)!(W@pCGTG)!COS@)n`SEbK?sV>0s6k81 zmZDJ1Kaft`Xl`3np~?DtszCz(DAuz6?!wzM12uP6aiXjp6mTu z6(2)*o`K(ismmf}>JmRJTk#27ymbm|A=jj2U*q@PZK&!0zQgU&ZdOS>k^g4b*qp{s z!bTsxhijAfc%|ffO+#GGrnoBbQA%UxE?qwDvI55`B|M+<&&l2gPwx)FP;j{eCCuoX{YKV!!0 zY^+M=ARRkxc(S9ovDcfFmpaAiX~b^NpYFpT}Wx^Bqg)G z)GH@DI^O1bP?nQ}=X6q-GN|I0jzYhJs0mVDH;KL>eQ6o^7N6trUg5~ z-|srQAPfVlBcx*MU}VcbV0TIHA97-0%&6bEWNWy%=Jj)-<@om;+9!Bb49gbc8;U?~ zn`iliA43rR{{byP(!c1ESrOvVMzOHo7DBvp^ia*N$VAiPSewpeaYip-Y%Tn9!-b?F zQJeiyW6@o)NdvPettA`HFfG^)&OxW-OOBAld4{dAMN0#hM`aeXdCR@9%({Q+*!?kC zSs3r=LfRfq(^zOtHmd-@;~pcP4u`S_w86nXwcc*O+4Kp$G;YbRW2r>Xs@7X*S3BCw zF!Q9%^)^iO5To;aqumy_^KN4f3(cvN5f>Dc9of@dJivdJDhr0C%)j)&B8$^ms(mFT z&L}8A_BA_nl!Y!q_P;5C!}fof$Rnt68KuP`z^_wjB~6|h#o>t0wB>P+?;O7dw;<+e zm@t_Um91Arw()SQl@V-nxuC8niu2hXT&kODskW1w?3U|EpXGrvoupb107T-q>UnxCIJ{u5gVlzi%WbfmJuUOj+~FtNK3QgJ2$|=b zF1zPsCwDieZfIo9yAo<)8Ox%*7xnt1dNXxc5ZL-QOF+?|)QKU4<&A&yP8x`HQv!l5Ih~Vn z&xS14c{9C%HNWvdYyEAdA!)jWieEC5Iyya9wX zF!{KNy;gmeZ;^((l6agnBAUj z(}h>hp8z=m81Q7_+9SH|!O*YKI&}^uV)RR8CLN+|Z}MtHq$&sUYI#FeTqu&YNCYJ2 z#O%qO*LenhzN^*w9u_pXAnC>ol^iYQVWGiNQFJrftqx}?mfbB!>#Cg3x2@m6a1=Zg zqI`e9>^WjM=?7t2kaC#XpuB5}*UI{tNTYyVq_^yN+3Yumfk0MKdEV+e@5fexEhskQ z6O$zBi^ts#3$4=lrkT+ELU&WrjHP-4*DP4>2Ro=IR!$?py6v~yYq97@k|a|P-H|&x zC~|^F;ud^vvpC+z7K+Tl7+I$sZk@+l#Y%sBOKJ^E*sRL4c=oQ%W~uG?xprs~i7SeE z%JIz=94O{AaPzX$&F$?lWE{HLAUh*1^LhzTRfIWaHrDs> z*ABDHK-X6e+stkD4j~@$G_!5*}>m9aCmYb7|h3RhRMO=3ur|nI3-# zAs)15o?u3B#l=g)dUb$u^G%H%#v9mG+G4`HV{@`dkz2$dy&c#rh_Sxi47no|S7fuS zS$0!)Xd0Z+bsBUY8W`XoVu%82GiBX4u;aKNxo*i-Vckr(zTBm(HwjEm&J&|I#2vp- z2%D1{H%B>YHk6xVJ?2!g^y@jm0*!xwiH^Y%VA$qVn?rf>ecwrpFkR9%V>P4Tb2c(M zkq5&Mu_oPE0O`%?Vi>D3%`?B~wRycb167`YP+Bp@0r{6McV+aEiTM zo+V+nAa3QHK7uc&FR}C_@8_m@&1IxOU1lF%n0?1vaW8xF@tDZ@fdsHKzjkx{ys?vu zcb+aSfH-Gyc+-Ujp8)>WfcGpxSaZQ+YcQu4odl+|Y?Ub~02+=$+@-;NW7d} z09hVd?%MX1?Z6TkJ(alUZ^L0m6VasObub2j5G^9xw%KYO4|Rnwf@KUcn@xEIQ5w~u z_Zd)C?M6o#F5B)v1qryDz|PxoW=G07MQ?eqz58)_Pz}0T4rTylKWLH|aq>y5@T28j znAMizs4bFgrR>z>^U{CW&K06R&s(0EmRtQew5;Htt0}2SdtCxQ0ywrmf|B{%^9b2!9nVQw%0Gh>5@(a*XskW z6y#cuvCYw9tXe>u5!Hv0b49BFjKapf(2i->zPj>n=MEr>y5OfX>~f%YYn|>6!KPRO z(*}LRa(Dm~UFy@NwOFLFnchxzln&TcDmr=GCdZd4Z2Rr0moQg3sT9xjEAZAYxUica z{2^X#v&<**fYELo5~zQ=i}ZrGK_9WHvwtwOY}1G{ z6ZiOdxtRfw<#HqB-Mmcf+)BIaY1+lPY?Pya7$#2Zns$fgY#r2V0KA35B?tnVt&Ig` zT4f-s#jW4f0p?jpvWU5Z4)&R7eY*t6O=&>ghV9R?DQSnaQ(bX|&T=H4C3}L~qSxha zev&|So#va-jI5))IfJEFrI>Cs`?H)84ADC>zqbH#ou8Jf z4X_17Fg?of*b|W}2R{SRZOSF&x<;jC3LrZPBLhr3yLhn{Idr>Dmj*cDyDcZeuB{LF zK+qa@p-RBP^#*?fEYf^2H1Qze6ac{3<#D%0N9%tceV|o4$|Mc27&&HfGeg$jW-9>G z?VN6KXh+j_b!#12cPUS5?zWz**3pnv{C1{a^m5hY0LCFxh!x?Ddf{KT^T9M`2Rqs& z#G17GVAzAzaU^*T;OwVjzrU1+;4*;kBhI%q_@jC{wJPfjo4QaOb&CyRgdnl+lAF-D zskwhM0-Htsy?R~Zdv(FY(GKKfzQwrJVa`Zbk78;);&3$6|4?{3|PGzIDl&ncXX+l zqJ|=NnJ}Le#mRLo!VZ@h<-_f!I4&^q*jj(uJZ#Z}>y3J_Kb+uvyf|h$sXII%tV>Fs zj#5?v5RPR|f~ku{c*8^g#>X+L2r)&|OV8UbNcXIl8s%f$Y($H-aSu@O>oU8pHa%74 z?ZTph;!?)k84X=k!!+9O)ZSm#IFufl#)BFT#|hgl5r@adaP$idg0ixcoTH1m!D)Za zEV~H+0-Z_zGLhTjcs*yx1080Ipi99PG#%Tu5T8kn zuzQF!LFVE<@n>{Rvd4-Vh3zPw zwvF#7b!TEcH8u=kEeqg`(XNl{l@W<0>d~7HFI|j{<88k)1UF?@I0XhYwBlOKB6e`d z*JEETtKbBfp@5mdwbL=&r9On%{*a#>R1qzOO)YYqhdY{;BY=a;<5FfJ$Y6iA-6}FF zSZ5y;65-fk6glFg0s8bUE$sM2u_#}x9lO`ur3q+*>s0&8Qw84E9zahdw>4ceE{;-T zasXGzn-r05fCE~`PQx#*)pY}~esHm=XmQ#g^(>AMB6DV9;kD&FbDZtW|ARqjkCwz+rPpla84 z>T#GhjW#zde|u^WmZN$0v|F4ZiXtrl1>C5@I&Ap}^olXGXT~k)_M3J}@r!yo=({dr zuZX4M7FAxh4S>Lwp5H3y^+M%q)-(wUbYxPJoP1~Q09mk|&%Nc?Uk!h8i+HYyUL7NJ zyII57TVoi#HBpDS59XTX$wHeb>LS3%5lCgE({)u|ZZjhBrg|RU@-=}&fd#d|&$`z3 zbl$EOUa_B)2wXSEB>*aBr57=!39v09PQVniv+@*XjT~<`V4$xzh?tsU!$t?rXNmOx<_(<%fE@9CQtZX_KWeC#_!3f|| zKv2TOXrZ<9V?%L-FvHSy%`PBT3DsrRan_T3A*WJuNM*-an8SYnB<;{N$r2F&MPY(6 z0}d8ML}v$%(V3R>aj|2;%%}C?3=1x6dS5X`npf|9+b|Ngv)9z6CdedZ)X3_S+q&G#I&pt+mup-cnWdvwCNdDV4NIy^ zK{`q~m}T~)fM-9oYfhss8$?HACZ3~dp=>>BYiQni&3TJcK8zcpS{0Xb3s$_Y9GhJv zNyn3G9tkhp%Y51i_=!^k0uIDt#qM_dc`4JI;}W6>igO}CxaB4Dx~UnAE*lVJP378R zmeZtnRA+x}s4qd8!2q$OhB6eAx75QjP7(bCHqf?U`RL?qbEKN4%{5NK1?UY0794Jp zm7Ye1xSk67xGFmO(#H^zEVeV71y8~Hqzz3(Skx{`<4$v{t96)fyQL#s1yOKk9RdNd zgXu9@Z3!ZJ#0Z~TMYB+Cw#WcRet969+MN8h&_aJZ_EfQ{voY0ks06ktK5`1sr5aaYiO=GpN#rMl@Wj9=lMlzuML%E+i0z?R(h)GnMK7lgCBND z!f?kWt|}a?J$pNZkzdDJHPOY^?Rw2{L>#;z(WI@at=Tf$lbe*if!3u&(4$j;)ueR< z&!(cvHJKgHsg`k2l>H1b=?&qM>1@t(jvOBs^aflR+m+u)8%u81i?u2f6;A9{i}QcY zSyLX*C#p_dNIB)b`Szf3`{O3w;;apb;5O%@f$0_yQ4sjEeH*g4eO z^&Beb;LN5{(JJ2d16!zPE;a2ohm66jUSn;i zWW6J|*Z^Exm+1$E9j z^t8R5efD(CuXDl}X_t+17>NnWX5 z3E^7B5{H&euhQx!)DtF=4-8W+K%~ehlhi7-ZFRpk zL%c$%;c~&EGtqn7P|q zbdC-HLx!MP-?*s#zFv+Oe~|i|7a$f~xYjtPblNVPq!t6Cs3_R;*``SgeeSIGT1qps z^LC|5;Grxj`CxT?%iu0{s1L{_HTwfL<2;RDM@l_Er|F8joRfdBwYbL4b}U3iolFQ> z9uL{d?3R}qhXe(Cx3AOOtf7-LdJQhbKy1j3p$hG)+k}hMIG+IMc0!L(hL}%B2s!7M z#RPFGnt@wHm>ygmg&0BbsCZj_IC2nVbdJ)if=snO@;bS3<&|^Jy~c8;ZkHe3tKJyq zDwcGJf%9^Z{tACwmMCSFYorV4Yk7;8BJT`bGy|#G;lS=saR)HlLq){}vjKGX#)cFM z!D?yNI z;R%uf0n6t4Xe?5eWVtKr6RVLmDPwjvkqm>+xhM^Ms7<1j*CPOL$cO( zZ{h+4Z)D}Hlf+-)5>JEc3@UxrSHe264j7@>&@DExo14xiK0Co$22TM%QTREeIZ02; z4PhQ?>zG!MeIh+Nw`l9k4R$4T`IbQAqF>F)#QuAtW5jsGDnzo$1v}dcvQ@QZCTXA}CGOMEw`5rP5Rz2>xqi0v& zto;kg2&cFn@nFTFF~$URT`j|c5+SteSX&E{0Z)HJH}V8=ahxU29JF-Ba;d0Fdugl= zQ5#ta8?7J@#$|oztV+46(p3xR{PwuCw~OS22+MTUQf8{94X-E&7$lSo!T}Fut@vuG z$prXK$0CqS&fNw+qL!@^UEx68Q)CJ@AKL}1u{ME9F!s8}aS`k2GtY&r7irpzi}d2# zTX26Q@%_m>CkICp-o6Qiw#H+LVrod5u6T{PobhyLT~6a zQ*QjcvTvigy=4;Px?SybN=mJA>VQOT2ew2`=h*z&<{vBZ(Pj0w>l zD@pe`Z0C{$czjuT!BoYt`! zSThlua2qa2dd9|Gl^B<;%;MKlDtZAzNprOs6maFjcBJ z)qsjPP81Wbv{?iK=w6=ohye(vIM{|KgIF5ose>>y;JZq_IkEnriwuq)(*y5@!c2bz z(1ssT%pS5z;8{Ln?R_0x#cmF8yP0F(#C+z>EP<^!%;~Zf3+Y@SXHMU8$J3TUX1fD= z-T7^oc?zAj_O1~5l*nslNvSTQYn166%{>H_T(wr=sFeXY^+T)0W&pn38aS+VIOP1{ zRL22ZLfA>cT0BIPN>>QhIZ+T3kKljj&Q*2s=>kMTv3!>e6~Td32^P@|6_ zp<<*2ei-o8-R_)|h@EK$M8Vosef3VFk}ak+UnQ80Y-der;n|8icnj5HwjLam#7>>| zc(2ojt~E6njER=1ZYG!mwcEK+c&Oit?H86g%3C2?uXdG{gNSok9y?jHOmlzOjO1QP zkL$%2kWo`WZpo&c>^N~iT3GBfL5WTQoj~pvcQUVnvXX^qr|cLJB>M9(HW-Uyo7y@Z zn1zPA*yz??2q5zs~F6ew9Gu?yGaLU8^8PQGbrcYb%ocJyTtD%2RuoDaHc2Z}D z{k$`02*mqp7%;0==WVc+S$4ikL9fXhmfe#A;G(pI?FSjtdCLdsYMUT_sMgxZlAW?( zXi;BtyZ`~TwBA4#mu^*hteNJk)m4qv#<}{4Q4qCyMZ5`4x?Lxv`eGwdRa#6!+L3m8 zx{AA_*MoUj(#3QbrQLsZi9z%#n&81gu)7^KF;_LMWaI?Niac|m_Zp)BM!IUn5=_jfmK z!`hI?VIu{nrj@DYpdaU(EwXd#JYv-30x4@!%&(U))E;!f2FE76Aw)Qd%gOIqonjf` zx?RM?I%?5gIRQ3r4gu^sU^OyxZ`%Wsq-G^;_lVG~AnnJFKx)A5wZqcQ&zlh4`Z|Q4 z5`%`?-bypUEt-EH`s^Z=(hWInij{6GZ~k#}beGJ0T%5vMz9)F`=ca0}ZSyo&CJ4<1 z>{(G9NSX{#LEt;k$j=_Xn}$|{IXuajvI}G+r-U+tsDapm;Qk%T<$wcD)xB$+AYxAd z;SciK)(zL+*$MvL190Rmk18`Za_kR3+~m=Cyy`ar_xaFA!|dUE2h#$T492 zGi?y2Wos_Q@|fZauvg^4*s|A^H>cur=C^3Z%!t{c#R(@p`;%?#m`)9~8=IaER?bUw zjtWbN-|Y7Yh?=1jVwMt104oA$e0Objspf7B9wHL!tG3t!79TZze|enQiod=E0Pdr4 zNI>C%gu8z?cuSy~bi-WPhCCKU&z%Uz4jHAc4rzoIf`whrQ9iJUu%3ax8gmBz5<_Si zsEW(gaMBv&%v2G#71wjAq)5e@vog8xvn4G`J$>H$T6w&T>cx^)R|1b9Q3<=AuAqiP zXwPnE8+^Kg7}3=V+xaJvC5XXvqTx1I#67u=(<6VxO$|nBOD(eG<2}bt`Eu8ppw!P) z|1g&p>vVfVlay_AlW6# zC8&E^CuA|-^hFHyT*W)qeA03+Ka(>}^V2d@!=roT`(p0KTsvq3d6md1ZLEv2oa5FQ zXe57UV8ltfPRW4pC1stvlvf;<0BpEj+(6@p6TV*5*sUOs^xF3jUj=P{bKan{Qb`~c zGo*dPNZw1z6DQ9kw66HgMdAl1y6nKRNdf z@f+w}+d$nH^}HV-6rLig;Z*KPc|Psjasu1ZDa`5FDXNk^8+(UTRaD8{x~4o1_^?IO zREXwQRGP+nH!tDAYzLw#(8?uWMR{pyiq&lgn~X#njpeW$B7e*$7T62mmf#N7(A-if?~dGek7UU~!F5y^A}CJQ(ywoEA4jow4a$=Z-l+ zo-S(+R^BQOjYHQJ;STcu5iByhBnK=%Kx~n&yTf%}YiIO0)cXwG&ANHvU5=ApR(nEZ zX~Dz5Rn^|{Y!IGUfJ4IFMZCGMJ7ll69Wd~jh(j>!j%4!NT3UjdOr2RuF(QAe+f5n< zI7z-gmxOvzme&DdE*cY3cUjj(=)z$nn&fQA3x`5rVg7td1uR z8wFG}wNoRlnT266f%Qv01p+Vv_sa#cP zSjkFfF)3Hvh$-D8BeE{R&4d+7wU{ecbKEV$>s8j48h7lYmA`=;epyO|wbjv;CBh;Q zPh_W=)(jH{?VfLsGNULh=?ef6R_n7(EE*r823w!p`;9W^n$2uiB5{9m3|Qm>;V1oC zUe8vusDbdtVy;QZiWQI><^xCwJ(nooGdkuSA*~Jppx4N(Dv}28|DrpSBz&fc4I`ui zz+49@9Rt7?VDoI$Z++D-mrL3S%FU8la>E>(QF)rklDG9kgGCQiWrIn3jczQkO7v(r&+{uG) zOC!E1nsOOgEm@E|Gl%>Iqa zH=4g5lbebG5CXkfhf}eraO6q=MIW?SX(!(?@gtoPX2!kK%EJBBOGUc2v1FyFuZ@l zk{)s>B`kiDhbq7BrG(hc)#Vz1>YBRWV8bTEc!=2c>KNjPhCl%(;z1!T6u{ zfX6e3lOcaV@Z^LjKHC}U&`%pp0W36?R~>**TwzZPEd&Q#XQ9#55ahyLtwuW}mEuLl z5lR!`HGQ@819wv{Cl5<-A%OfZiVqmq2FVJ0JTZo#TsRUd&c=Q~<8ll)b+X+MM|WCC z7f`?`ScIikEm5b-%Vl@cJYZCbuDZLirX8>(v@Cxlz@65e!X4IB-Q-=)gayH*+73G} zlVjgCB2%b!5|va6;bFBnYNB(y=BJfe0uVPl=P1?~5V*HIuwpcBlJYF`G{kT~SbYsi zd5$rS+vi1fc6mD!P=C8r0RAQmr|Mvx0q1$S3d`BCKMhI(Y+r^d3L4m+)OCcAyvWa! z&{KcF7TWMUJxKHd?rCyvR*^2L znvk+o>$npcoa4=O)O#ZXv+iv>EdgAqvhl8iIW;gA(kPLTJ_5@iJw*XWyOOP>+0r5@ zL_KYc%~hLQ$QsVq^@-ofwLUGPWLmd#Pln^cX<=bjlB3#Gh73LVH4 z@{hBgWPV*D)P4S zIG(z(-bsU}&TB9fUUrbtqX)2FZn7~Tu`Lfk)fIZvJTgqH?&`uI07C6ZiUFzvl0$zW=7Qi2>+Lv*? zXMM>0qv6Ir^vk_Qok-S4Fz9MY1c*K5S8vj@gErqC@CeCQ07&Jzc>oDxde*&iCqQ_V z9En%k$U9g@ah(t2E^0euX0(PluY)T<*m|lb#FF$WiY+n0oT+nS1{p1Ll}1jBBcm~$ zC{BA&ur7hF@^gHgosNIqhKHOr+YVf-sL#WRt&_-+mXaF@(9&=ggHfkTwV6!?Gf3%a zyP7Psgy>w}`ItnKGNH?vJa1nq4QbmoyGF;#}mOR`bNxR<|TQFm(6(vCi4&X|u?W<(7W*l2n>tDt{UFN)?IBEh1&K+xdz zboI*7ylxKhj&l2j*n;Ww#Jk#FK(;m!Tw+fF<3}U<`}6|FIAtbO5aF4O_ysa@AUNB-Qx4y_gdHWV*%qGW~~*>yMnMO=!U0R3*+lE zS!XGuH&>%ucg|^48NKi{$RDYBg_Q<&mfID?ZFp~ti(tg1eWG%DUPWmu^S@!7SM6byXW;Aom90=|(mF3~~%BU~1kM>pb{Cbig zQd)Tlto=Lw)Wp-Ex;fwJ=hK5qsNeRfAJ-$>)YGTuZD!vlhuB!Q-#W+By^;KLN1gm? z!(i@yO#+WCj$BhJQ}x?n$v1%t*8vGt5&VP~Dae1-;^m^d-9&)-t31b3J-h&*#`PI3 zT6>w22e^=E7aJ71z$T-dMR2A{>qn&V-m{wbFGp#0wEWlnBFbTz>vRuLYH3_C3F zz*N0}PU|&1Gj}9x??-kM=*z+0+jZ$z~QU)cfS+o6eYB)!MXu`uH@ z~SboLRRn_ASLIz9geVi$o ztDI~s28&!@oETGHRFHgxOP`EW$sm9I9zbvlr1U;{+;Zmhx+qqTdrcj()6HAcUI!vr z-1mD4G}2OI8_cW|Y8Hy0?`Z4VnPNN%{jv8yBwF$=^-lIbs;)Op`W0&@PK=8adqx8p zU@@pOH{`u;!1DZs4`C()`*yj1w+^T~Ol;zdln_#%V=|d!LZ@6COwaYN)4zYGd{5j` zEkWyIJ8+&#`J;);8p_c+*8;9hQI@`ZO#XE%bMpO_&&lo5rOrM=%nESEccjNrSQBUK zj~;a6Ccei`B%Ijqp1GRs#+1|<4)8NiMN-u z+XZikt2Q+xLnY~a{50+Cu~C1`bU&~0ifP*8hhFL`Owy})9zIJ%O;PaC6nbQBM5Ri- z+V%7A%L5!f&s1%&O+4ygoA&AMV(UeOB^-_Id%+9%EnR zK}1{HmUq$R+~L6y`?ruvLgU9i7Q$>i^YY&0`Ri`r~jB9@lVufjG#>GT1BL4tevp=LDB?-iLPhM(G-nRgXLFU(pdI zFi-8zKBIU}$=d1nV@mxcdbA(JIj8&UW0d_YUXr>hJD@0seLR2Zrnpb|4Cqq+N6{Iu zT+Mjdbnf_TZqi!ycuL4Eu_pZfm67^W>?XU+l#pw^kB zJiK{?Zz!BK}@Nxd718{fVQ>2mqmk`!Y} zHYQi~>}X9QV0#j{1;6@MRjgGZ`MV_0F|)j~<8-09hTnQb*bOzK>xtKMiA=FEm=r z)44^-N#HR&adxLSfHKpaxMdcOvM<8l_PFFEonnMA9FFuW3}sH2m(-~DwQ-zm zHe1E-4&;B7v@j}my6FM2>1A~4oCUP&TtxhrZj;@)-qxeLX8Y$mxZGqKoe}=y)?eUT zg#0zr@8xBDEmayp`X+z7jRmsoQ_g*H=^VTzGM8D4v|heQcO4KR3Up~F$3;OwsF_94 zkQ=uw#i19ihmlqCt0-Jh>oqb?K)vj7+_Cx$9BzN`Dx=5(CV9ST6}bNsV6-Of!IE;; zr6eVaZ+zcTd`06+!U*g`Rd!wEpp)ktqch2vB9TwyPAk!6r~UMhKe57g?=mY^-ZxO& z7Lngwkn>J;J~LkJbbpVR7nz3^TOF@R+MPeCN1jBb-&I&EbcENGE`bs2XXooDPu5Lb zrYC=;SuADT_NG}TRBndj$mt*@lG_QdVfSF3$j8lPqy3O#c_CZ$MIeXp5d47M$R*?r z?}z(|-L**nrC4Q6gNc|i0%VSLUth6eT#btCK&F|Lk=+$ zk{K2Y!{70isRU22&}4;!y%Js&F<%cn*Xw_ce~OV*;qWYtk8gC2ob{a@fV)|H`yD( zrOeHXMSu*MY|$1jAX6n^E-G`*Zqv~LeGf>a{Bpn@@I)+0hpX*A*2E(x9s<@EM4qq9 zRL}R`&5TW&@6dDu>2+@(w)*x1Q;)7+MYm#uc6i407402fX1kX6x$kMB?$CeifHvOj z>L>D8FAhQ*8AQZ;_JJVeusP|p03vcrxy*(K_QO=2)vlk^{b-!QZ$cUUg*h(fR6FXm0~-Up^x_np?uRB~j}P_d zj-u{ki_brQ6t!JeZdiIY zVdlGGix(xLXOPX_2QakNw#|OJSrs5vBDHxYt3JwA>L=gy0)#u~+)3n9nj4HN;t#Fi z%7{t?$%SXS`yGGnG9KF7@g)Yw?a*6m`5DL)=Wj872c#QaZJ;B3MEWghHuPEk?9~QQ z9qk>bPIr|0taY9`L@=)G%x3=ceJ6chD$B;zVd1_<2$H7zS@HZM_kd+b5gNYBg)H!= z5?7a3boZFUx4(`7{cKT`w9Q~^gs#}V*-9OvHU4X}U# zGwu?2#N8?grGU zgg(O1v!JU9?{FvO)Yweb?Hrkl_wI}T#aX&c*urz)=BLvKZBVOMvFP@pKT}+DemLso zxR;1#&j)~c8GMe7&r-A$LSNmd4Js~|FbUL>P?M+6t-igfO<{i4i4aM6fCxZn`#Rf! z70`da_Fh3`PgFQ-%Jb_O&Ep&VSJW#$A@}a<*z(-GeHy(+_xhspct0OvrABXsGIf{0t5>dWqk>s4bMyQe$_TK8 z?bp956E2(>Ikv(0R4Yfs^8q2}$D#j5Nzi{fJLyeCxRSgu+L#RzwBAmoo;cP_VAZ4E zssCU@AmVK1+rAY2PjBt>@14!@_1v1%P`od9R_(RRt}LVC6fubyCx^t>x(=W`VY%k& zi>Qtyby3>8QTNK|F+@A=k>I=cBZljYDSz|Jp1R}hiYjPL%4+#HaUKMfKz4(9T zJ%~*gz>g&bVI~Rznc;UdtiG7s%J5ljfW-d5S~{Pc&+uA9mzsLBV0y(lMRX%ZL9&!D z;Xv`z@p1p*%T;O9h5Mo7$zE9TaXaPPCbjh8@IylYKcm0A2Qg+{^ZsZJhrCf;?eQ5g zfBd@r^)U|nRE;|FH6ck<0d!xv0r-Eg5V`D^+BM?pX4s7cKfqFrJNS~Uw_JNmA^ngE zR(IMa%Y`1?CjiQ&?XY8{g0+s8o;8-FFmm^xfwsMslU+`aIxl*fr+voBvCk&>&U+`Q z_1#MBoU|TJblHqgm`8n+aM)cnAKniUW}HMbaujAg`|axQ8Z)zFy_2ef6)}HcwXfk` z`^=1Sq51@lm!Ir!*hh-RHwHsLUdwT93)`5gpp&x0{7< zY+N%$q?Yg49&@;-N&SjD`?x-`*IQ+O`~f^__F=uxxYiwVbQIGG3z+ewSj*mc){n#~ zwjJ&P@_x;w^mVp+`a`KNgN}bR)HyCZa5k^v{rl0k9@xQ9xLuU^(Dlt41`pXVGir6ML(2{Kk_ay?F&Z9V{HET9{fGQMusU_yaNsB)i?|d zI()L;kyh4Ul>C$3> z0>kMAz}43I^u<23L>TS33b7D1-`x)PO;KI0lrjmDFWM=wtiQHSCq!Mtr19;eog2Rs82fL{PbjsYC z_O1|_4%Cz{%`e~ZC};FNWTb2L?Kemi^MloOp*uk{K&t%lX|@e)Y*T3#T%gmgGc`x62G6KONx+k?@H#tnZ+vh5e8EW0$XKrgA-6eibn zPw#3NMPIk(Ixv9*S_SeXHS<26irc9RIW*I+gX5CEeljIn$(cWJRCdHZThXB_441INY8f6Y60Lg6$f-fU+HP zXL#-nucCf{>O1(2T32gt1`t3SgG)gVY>9`fUAM9+>2jO)>t8RX|_Ded|3D67) z^mm8an8E|QG=6}MVfFGMLg9DZ<&<(4BD{UYk3f53^;E)E!&x8k;$>5uVU~8aX8GFX zlF@(cWN+BvUOlC>kE0%PlXl_gLvq`h7idg~2WCA#bB_*|o}5`Rd_+6t`?#==s_ZQx z$MrONAQpfhO988;>?o5nLm*{Bvf^*QTW06%ckThQS+tO27I-x)F_M-ltKB_+U)lp) zi{gl9H;RC?-Kf)V%2($3xqxSj^VU0g6IOr2b+LyXV8k@uTN0$ULJ0O0TY*$GTgl1N z$pqNG#sRh5LES^IRCR%4aohIknG&|j?KgjzDX`n?oNcG#22F1we{df#cCPrU8_|Ep z#tAMP4Z^3q<$zlNF+k40M}EGtlA=#G-So-!+a*y+n9e=wFj#^~@k%(nYs z&xH+t>GGl;?2#5;59E~2i~T;G<&x~a;Jh|1e)i8C$?S~zU|5~&VFj*F;Ow9^kv->K zJnSIPxJ(ZelF`UT+V~>hxb3JL^B&&E%L8xE5gyma$Y7~2n`-b?>CE#PwXSH8Xi6k7#_e`S3qE}zR5I!B1n{Yzx_hnIuY9EW z1z={syq{pY_3UDr?#V&aWVyr_PAsQs8u-(buh%35pC;Y9NxpRz;k0)kErO1h9slLG z9PEGdZz04<+4DB&AD?Mv(X3H+mwh^ayy9n*Xb)OSgofQ@bb%AZ0w(Y5Vo>u!4@RwDj zi`!Hkzn3IXspw6|hC~4m&`9_-H$KET$$`L*Jh^FaO&Q8wyHnr9hON;zCI7pB>p4C| z3g)*wErObVl)NrR7XrxQZILPuh-tz!ZZZH-kCo|CWqTN^Lqkd)PCG{IvK z$j?W^bMpC6B^5si({+9T zI54%-ej;K@m}0~T8oS!deDWo4JzW>|(fgWMl=2Yg%Dqz+0+dF5>;u<|#~L?90)EjaI6&YEC+Y`(l zif>vY_Zaaq}+0a6kL33Uj~lJiWL9s+P%Ql&e`Ynj3{q@SyJ1d z1Eto6_hbJR9_)zY8GFQkBXjJ&MH$X(O%z~c+0ZUPP;fD3fP{h^kd{;aP)hEbeu*>G zJ~N{>!A*AM6CoL$G?mxne0SUaNc|YXCKYj#64< z^ocu;hx8M5W#~ut1@w4O{PVyQe7x?_y4&8pDQ>7YZ71`rie_}MEQhVYsnsR+_1W(*tR~ioXUAP29~qR zbXs-xvAy0n%V~ChYxLSjM5^_5HOpi7u?d1wl9x|*fkB{&d5gy@YhXCK4+U;Sg8XW7 z(c_e~Ubt3iSYPi<dXei!A7Yyo?iQEP;8M!r~S zox8?Efqfd8cJz;K4<-8yG2nX#J;r2cwtu%fcU;b=AG%F{m;$u#FQ_5ENhDj!_zelw z$CUgkK9`*!O_V@pS+hw@K6L1T=6KNcX4?oWUxBm^S(v4%`wr|&i^R!d$3e2G-`B1PG61iHFap~I1w8qD@)Lbq$RxZ}3p$5F2H)RY z@oCc-_uc35Ihogr3xVys?T{FfoPFF~D6GKtzNl>cAYv{i90liqo(sw`_w-9LtTLEW zFxx0S@2~JpR`O9*OSPPrI=g|~NS^wK%HTYIT5pGOPw-E#I!rrE>_~K8P@+I2uFSgL z_Cmue6m?T1C*|FPe%^xo>MRMqPLEo$UoJFMVfwynnumEjB3Q%^EMl1;4`-9QtQzUQ z?XQm7o66#_@%-Y%<5Xa)p}`|zUyxlIvp`(`EZy_rybU`70MBd^cyrs8PxpP2Z$ZL; zn|}ZvMv%|_x6p?(xz%CvR$jG0T%X0sK#bHIefzQ-i|r(2FL7y}%R!!*K{yEC$cD?a zZ~v$+D6c5{C7qk4Z*LQ7?kV>!p2-YT$@te@Aa+k+Yyd_&RIxO3+lv>9f_hK_$e(aR zGmI;NgCchZ@(rZy==)-p>_G{+Pxt$OT$``U91k7nk;m!xLqNxl!I>gc=H4>>%VjE` zoV;q{z&{`zy+&HXJB_iQs={ z$jlT)dgHP3R?7C$uL~}#$GVetf2V&J)aO0ElxV;Nr}46nFU}2Zh#0FeS#fo4cLdZ! zlY}-|Cs9Q&85VT+gzq&^2?}-p0f!K5_)fgl+WI65aVWQH|E^A9Y~fda8Z_HbG3paw z;ZQOWl-(=swQT`v2_vQdYah(Ug=bQX) zWVh=8Cx$W{el2TC3zGdOlYm?W#~pf)5x_#{_hV)6Rs@@RH&|YZ$J$T*wXyA6^LRg{ z(Y;;C+EqW#d*Z+KkQ7b>%^fV0LXb#6+NF=l<_u7>4<(ell>8 zuXyEprt8tRNE5swUrXpOXyL5N@Rwet(=i@DCBl4ru;2dBO0a0YcsW2bQHv!SN10?6 zjmZOP*Vt`Pw^a>C?kc!V+{VXb;F&WvyMIY)JsKzDQX%EgU4!3$n-Q=hy8FOgr>$Up zpFszFfsuTzY!`}0BBjW;&$BeLLU!PMUBixq4*9h3h9pcT8wLoV9lRp6o6@U7s>*!g zq{a!WJO6-k4Ful)N1-7cqw!D7;r%&k1xzqzZA9Ap(QnHM=Q=?)SM}|klYM1Pkc*>g znw*{Tt$s`Gc-o$S#Zaxr_I%5}H|64|1s8Ck3%R6U7Ay06I>)Be{!B!sG%Gs*)5V|% z$}H6U%iRD;>_Z!i8UXPYomQLV|CI5SlJpyeX`aMMRAJ(-VDt9VvXQw?``fwQ-qMU` z`S$X5@}slL!^izVjspfb@~TQ7lg9Cavdf9Mph{Uk(K$SSh`1RmgbebgV^X#E*eDBZ z=FBqr@$yNW-`SXlHZ-?0Kh;;a4568@U%#^$_;A)>GW>L&$LN_T<_$rKqr(7IenQ`8 z7rk7Pc|2ctarz#;Eqa}uF#qs%EadX;ZaN49bB;J8f>U8ji`?~At1Us?ppO6DF5lRE ze11>)05^JnWu18bmZ2|MUIy!o0ru6W5GW3bPw#jE4gb!GgynxrM3L+|d>1|#&XWaB z{OxDlJd1sqr%N66WQXB&$RD>Yoyr34baX)T|~24=w7LKt#zGTby-)> z^yBQ%cAEq~)tP_-OMVd53q2U}MTcXoe)uSqPhe%ZDxm-r%5;~m#CyB*I4Z^~HrD)> zJ>hD98WF0!1=77bbk|$?e!cH~qN?2t`2xHWpGSRpmDwDd`u+1nD?6;|FF2)BNFv$w zZJX=ieSFKHIHx#*Ygy8q3!KvAy)7}ACDDQ&s&{>=pyPMS%VRjxVtTa-Eep=k=WfX& zMD;RS@jSEsent$)*K*vst*7v1^CV*hxujr!hi+QDBj0r5O0_8BkfvXo$N9IBheyfn z@V$P73;LCK04rw-t{7aJVx){glwRa9e<$(9^$?P;NH z90Iu_%0#z8!9s-=H%=ZZVC5N8N5>Y{&$s;kRaDMoBf!FgG3u6IhjK#g|54Cp3* zs25RG*aCT=h;yaaJCP|}JVi442cmk809bp9%*<@}cyPH!x6d{PrTI%F4lnR5&ZdE% z+yYaK>Xj?TDEdNht(EpI^CVSh40@J#NejgO##&!i-@Y@eeU3E-n)WhW?9*4*>iHX1 zpd5~Xlt-^W#$Rm!D5&&i;v7w+7b$MG^APaLiaAOhao1m5ddvsXd*?5AC!_nTf-%PjlONG z>2zbGyNM!wSzPZjwM~W5{CK5+mF-nOJu-3Fj2ivXm&AF;*IDg;*M$Be)&aREzHF(E z+e>qjuTDI}lfqbe=Wb7SyEnaPL?RE0zboC1`I3~q9K12@zy)Co(OH$g%gOSo>Ixa! zq5c_$Q~hmqT)%VHx^ZaV?+Z+SUgg`zZGTK|n zso34L*T80*kFOljRDUdn%*e!aU))p@b~h8$)#Zq9F>ga850kT{jb2fPLL`AdD#eVo2m}f9h8p z3rcZ!Uc=HC?=l+B=Y1~$o3%s4^u9;_m3H+D0Iqg+qppg$B|i!1STw^zrOuV;Hn=y< zituX3#aPnFgQ_~-b^GjpnIoHU+SH$nMV$HW(Joq|Uh6UtP23dT2+b=+1>oGhvj}!Y z4M0WZ67Rbqucg)+Yfl$ib7x@zs6D(4P3wp1(cXrDX$iGRP84%jHLU8k;-q67t*m=@ zJRiqx{?x~-6|1`E;pBwx`wcLv@tZc6B_D$QZ4Osl_fg^)4nTi@%gMHO`U4-r++V=B z4esN!_<)R-bnh*s|JS|-7&}iQX)(=_qeeqMD(-O*#zb5O_DT&%PkL*uM?CzNvr>B6A zVYQ(MpZwMs8LO{G-lpUK`|+vH_xKqK(0w~X5HTuEDFwj96rbhpeNe;tylZ%7 z3iDH)vc0(y>8aipmzTun?kJu=6)@W`m3TPr?{tN&-Px0g?+S?;oNshgv&&k1-_jH zhRow3u-0@$F}k#h@;;+FN0o@C4P?{+wJ^Oc|9npm3|a;@ADy%DHv~^I@n%>cAOX^= z!`;V!^E}}2EdBr$e8XP$Hq?DKeQoCa+I`NUtQ47lW

a^`vKvx1lWSL{9R+r%Z|( z<}F4xdA#pccD`q<&```bi0;DMW{@cGUud1T|_22(v`n3NB0$cW<)vN#R{{HTN<=J@S+5afX zgLb`(lKA(>*&IcY{pT+U9e;BF`OE*~4++b}{fhrj{`1R@=fA&qzrHa4itKL^*as`)xQra;x_T)(!ak9@L6aA4E~DCzuPCBe_tu|w)jXZ4iEU-IIKec z9RuqiMm7E!Z1^YlpAT|c{ywOyUYGkN(Z(&X=Unx_XQ+36ElgLK|L5%k_rUspN6lZ_MiF7XK!vqh+u#4Kufv|dKCzVe^FnvRQh2X^$jhb)#L1lfU!cx~48 zqUjvb(DTI#Kbb|dGW2S>${fXi^4()S1V)~{e?s3vpWj85BcBj@`?pWUDE*10Bp*L(9qF&--Wn<0O)r8N9p&xMKDBdd z(S+{xYk#e0qg3Hux10tICN?3hgouM>(o8f3f50l5U&<%UX?Gn9Ey>lI!7Cp^_+@Dp z>F^x&J2|Q@?KdibPUO>nQOe3p0AJZ%YNADV$vdnw^SRWRVibO;Q8*r-F!2t| z>Gx?Qy5mFh^~Sv$A#uJU&Z=)r6h&pjLebjN{K1f=ks7$~g1s|;m!lLI?6LZ|d!%~1 z2vEmNJB$hrH}&_Xn8S0XLT)FVW;UGLWBx`i9W2Ro$M_RxB;n3|Hb(HGQRl1m+g~?0 zj9dNw$c8gTgvEP81^)|z@KpxjWU+o?^3nar7gRjJHmdmQW`eje8*uY*G2MdJ+lu0h zwB@AD+c=T2J~vZ;AK!py*7w5O{JCD)PN^$d&MWhDrrfflXFq6qvrivI^>@aBa~xT3 zC3>tuJ?>CidevAu35$N%eUw%!`s~whzXKYIh%`GrF=CTpJU{6aaCttj%5f;PIf6#3 zf6GGV%&#xkF$YqqVsSBAj@h7iChd@vyw(JfeMfw$%jrmecT8p-UX?KsJMN2kxv&xA z9E!jv#oG}Kn}^rc^2ACqAS%}{fX}<&16GZvcah8VH*f#hT+fP3(vw3~)Q_yS9Bk_< z7-k8H(KWbSaCsIHGJIW@VR!r84uHb$bW<|K;Q;!4x3du`=l*tTXxc}TcVOkSbi2C< zH8e1CkMyH|IoFt;Y?7)D`Imc6A3i`h8CxbBrMR%OZ4R$aI7?xh>9owHh12K7C1sp` z9x{@&`b%oXEI`S@t*uZ+>u_Q55N43%DNbNZ+}q zQ~7{~-U8f8g_+48bZr$YA{%&snE3p`Wq0A(y?vU02rd@IQSH-PiCS<5_kKlIDdO(~VV7TG8c^oB*^~U9zX*+**1LYE>v`KfA zBg1+2!#nH*dtS~l4Z*as`}L}QuZ={WAuh1ZiT$j*m`nulcCXb@W?m`MjL$ zNJ@U0^thbD8mv(0w6k54%^j?W)}CnX^tl5e5P{CYzRG;DYvCn&j-ws4*aleZf503`t-FrQL3Q(T_Ak=$Km~*31lDk zixWD<^nKs;jPsfH)xWHn#4{Bv%4xMO{?XR^Cy_vG-;HbVmT@m9r0aWc+}?ihrEsx- zho-Skh|4?Ac79A!3G4nQ26t9LU(M{}1RvvCS>dU_$G*Bdu2|%Ws<>&G`wCNv*d`1i zH+a&TWpAfG_5qQ5G*@xwGjF*|HGP5^oq5fDTE z@Dav{!g~yWmIL0LZzv5Oi0~Qnr_>35Fwmy0Z(anN~->FIXa{~ed zf6v-v6CTRa(4FR$b5*4UdZSivu!tKyTsnEWA@o4IiSF0u~O z1UszCcFB0WlU;Ph>-ADly56n_ZQfZL-bbT^L_k2Jo8IMw$FbSls_A7{sJi&= zNb-^4 zYfq;gH$+2WseD!DG3~z4Kv|jh%YR6_VKA#lVE+8v*iKh%JU}v%ZRu%VsX<_#sgHq) zRknE3)36X~I1Yw09Y`=5k;Leh@(1uT(7)<6?RHuqH$nb^DrUgs2%dj`O|fCw#d25K z%SoUw<7<69>DD6dHbrqlM+b(4uSZIy?bvodT%88v5NO{k-F-BdeWvjXNb*~EY)8ce zdW^a8pVMDJ=!ygdwrIE5&Z$Qnh$E>4oDsvU8`2N{@>?qm3yU@C(v|`aK(DMLIx>>8uk&o(s7&m{RlH8$vs($f}aFseAnXzu=*M`-__) z342tjiK2IaZ$AcqF6WnM>PId<@+A=DF;tBNh)M~RdyS4}xB=uX4nI6d=6)hTJU#$- z>BT1Bx){lwutxjAfArdp(e+s>z#@MAGc&iQF!Bvw+0mwOBtPM@y~m=6On9u~-0nM9 zbMJZ8i6U3D?2{ne!Nr~wBVP(;ALu8v`l=qs+B0>J&BzFUtnfQ8@AmX}V2GC=cj$rC z-FT!Zofq}(akRhbKNle|cz2ZI8OIO*>FVtc=%rWX#Ghj?e4U7gFAxX=MKW@aaInFM z&zzSb4)-zHZKj|Nw-uD~ZRC`Q1z5+ zn)^Bi^i0o^LO_Q1@x+6-fPqZZq3g3p?Imp7?bT<+t%LLeJ{KRXj0-{YJWrI0GEE2H zeaRq>?T3@$)1N^e*^sBeu+DC=x?=y=a*}az-2vH173t}dRd#KSC&KQ8lBw;e(Zp7{a~rzKwwu&}7ywVHAwww_QYu%IPB(xSa${LRrp`2_PF ztE}?~Kf{89R zT6FQqs^YBU*Y4S-FXgs3 zmNB*qhr#n}pZ-|ij+y&_@c_OO(Z~Beu9sCHameEn7+7}Ya-e4#JcwEX;lBl;{a{S<}a2Yr!G_@ZQDXH!0Z z?h62O|Df;meFf>=&p`?u5WgA$?!hlmg=K4+IQqf}8=YN%(O>U(|9$Ru0D<(u3UsiD zdckt|w12yqdANC_aRZK05Y{)^$ot?A#0komDDhpS$n0IVO7T#0!~mHv3a37ZVPg?9 z73iyEfd^^I$&Oa&0`L2LG!B;sd*va2X+q}=P0uPc9c3I=a%S6^6!mYUk8_cV=oiti zOPE>?sXTsq!C4#PWFvJPdhPpx5du{)@QCZcG3g10)YD@QS=bJgTXha=p3f7CW{D&X z`O=hx*B-Qv+iRLS&c%Ms19}kEVrkaZ*WU=-yn=oOZ2~nC?&lorn7z-RIC^}4e4lrQ zIEMWGnBvJ<;pO1+$8NWqU|m&x4F`RH%h~cU!`cUrUF@)W(SLTM&+c9Owc4@p9*?|)RNMs! z0LPmZRVBprK8rG@2{=eU7Vi`JE4ec~|&gwgm5bk^A%&5+HOV3paq z)(%@vswIp5_Xa7HKF6bf3)|jP`&fozR_*?N-w}k@y#ajOWPo(@5}s~q%uq% zuXcZNS%9}+_>Q-xP0;>)_$ah7(ets?cZc_@KYjZC(2kREFK9cbqmW%+%@08Y|4yEg z=f3@fGOxyT`d8yv&N@4GbT&@aX9qj^F7SV1>fS?ljh5#SAN{?5uR7OOt`Ja9szU_6 z6e#i0mXSLHZ)*B|5b2uV;B`Yk!Z}GVHeuv98=nFZB5Lt{=F52`7K|k`FKV zu3bhzE^&6F?~!s?hGUm*5{5}FIzmUp2rn`st+N>OsxTrVMlI5= zxW9NlMX|G4`8oK12UQ|#$ylB7b`Ivw9_OamthWbX1iDu-{v)8xL*LrUbHPJho6U*e zy^voYzoUv#8+F8yl;C<=H}Hl|XTn52IsL2Ue<}16{NClLg@ymj9f> z)snHk#_&;FT~mm8_uOa2iKxWX`P3hpz&tlC^|oJQGhjP^WJslU?Kxq65ndifk=shz zkBlq*mVca@ek&I@zNpRTT37{A?-B4+U#&`yXQ(AIsA9`PhO1|PN?k7O66dj6z=3S( z-%x4M_4l4|9xv%bzyEBuE6xE!6K{7pa_;+=|@^}u@^z<2B;S|^lIBd|HlA6y3|R6$l9AFj?s z;R))ubdAuahWu-f@{pNQ19c17&+lqHdBtHV0l-*)0tIcIf1X2ho{+tatke|ccntog zL}ESq`Gd^p@uwN2y642;41}?rTb~bx_RER# zPY6gZ*0HF&dat*~bRg#8AvI5=X+!yD=~QAJHak6^Xz5MTXZuQCQ0l~t1+fyO$BMLa+_E+_k-UlEo&xe@H$C%vzK=>(h?`7XUiw+vondYwC$$*rH)POmZ% z70pD9_jW8j=8lk{XyM9-vB5b2YPXfS+IT=#duJN?j$?^z-MaLEC{Ca6<`=<3G?FEN z5ssm-d7a`QejztqFD=@moQmJfL%IA44=Y=4=L0cxR*FE;~AQN zRI+?5PU@V6t2V4`f<;B90mCIO5*VAF-rnTlc5rS4K#QSf?EGW)BUYb=Dw6p`zpajO zoN90Aa7#6V!q}j_9`6C837t5fpQM3z#gAqOflKDJt7y^MyU#cS-=HmO^8o5kx^cx!xK0(9seKiy4;4i(@ z?{)L3MP7cL=P~gSe7;LD#G5mioa^ljGtBR~$FZ><_xGwEED;N1mKN`dl-+>0xYIiM z(k1<+L-M_Q-38~K0V9iAYIBK}*1MDj#0RI_8AJD?Wlo!VBU7$i@P5ENh$j?(8R9~D zv|tcMEq2co3zJyIhR#wzW^BRqOhV4z+`JVR#_j^33#A{K9dzPU)`a7QG z4hT@;;4a_C3Lbs+IV8R@%HJQ^1a&t57glc<=1h zvH`^vu8t+x>v^m#rJPPc+FYr12~QRN{l2i8t82ZEE3eA!!eP3iHoHG{z;2?2dnsN8 z|89mm_JrQaT|K^>)#=QxGyDDUcps1bHa4{|?%6ss#QQ!`MNjo~&4@*RBS(R;oL@&d z0ZpO6(0>-UON_TpMrSVCaqkT9do88-QYX|r%6JC@P;Q(NT){U_6)pTgTL0(3Grd3o#r{)!k5t4zh`-+$5|cu5Dm>T_^n4B42-Lj`5H-A z-R(v3t%${9bdzt#S&cY<{qYM9->7ze)$S)EY!ZHteiDM8gyHfJD4sSKrvXZ$Zvv|l z+u$0Z9pvT+6#M0S6m&Rm`c%r$9)rr&C8SXye{N@;^UFnLY(_sT_VYkG-=BG7^LN~f zwe8WUd|fvN)MJM8T9#b>%bc*$b9WyK4;!f+^QbM?A&gDX1)9Q^H%Pi$K1Zw z+CQgzao_=%6P(XUvYo_vuA*RC9N!T9Qf^^f3Wt|WS$C1u@Fh3)2Uw=gg8gh<%q!V5 ztkV7NNVs63jAHrKqI!MGPD~s7P7&$@OU(P7C%EzMMjakf#;T$LeSq02D$@Kd9YT)w z09`dlVK@Gqz9?XS%ZCRwwjc9xU(Vk@}fvwV+exT1eD2QI| zB8g*qEIcsF!d1Tv?-s1IUm3-wgQcYy(V$Q^s1NiH8T%V- zG4rKIfHCrsFjE&rWYn265vp%q{Dv|>A$4G5I7fMX+LQlzDbrJ6*)eL8^ZT5Agj6%$ zWkvK9W_y$Ti>4-EVIxp|Q!2Cm-3I&=Ma#Z_xdnbrbXP6&K3riIx-@7asJ&m%@I1{- z73Yk|gmYG#?|GX5C*hL?HH9%9S?8H$@BGOYGn)!}Rvi_Gsd;wfW1SAadYPU%F~2#g{)eiv+oytek%hy$f@*eX%qIH z`G@s1%@Md^Pi|n(;d`@tn_JW<+zFKky7vv;@WL5uuB86TC3NC}n-%JYEbR^8Ct zciW8#uJ--#<%nB>-IuFGIj;=GP6U3J=y$LR_IPvAm}kIXIF{Nm zd%vU^ml*Md$!!j3YPI#d$I&eg?CkSxygtN&6q26`=1W-gWp{xRyy3p*L4_}Wrj74= zD_OYVO;C5X`9Aag8m9-MJrXdV2fGz3a=rg`)Dba+Qr$@|Uk%by(cE+SJowX^K2^s( zTo~ri9}fFl!f%O&A~T9?uY=&E`84|vlVo~+jy)j_iEZX7x^f+Ozpy>aadAVSz=PxFKAMADsD}xo1}bDZz%!Y&ydQM^ws5hrdL9-;Qj5hN_tzz1MWY zPS^#>7J#BrO^fjCGE7ValV#7km-t1jIZ9k7e-Qhmw3Eb)L1 zslyu#FVFPBR;4R{mTfc<^F4FBpXYpUJWd~5eIIGWcrNP;qL8K^lU2D~FWT$&5?`Sd zx?pova`9Z-?wdvM?j2%Dc(};@ePIn7J1Re4>T9EBTDbG=Gl{tVQd@W4-u?1cJ5ya-d)Vnc zaaKB(-||;cc$c7zY|#A2$D@Jcn>d*}NJKpJ`>CHn9azrSFGkaaLAz+~ur+mbKHoUw zj-)4wF~fd;VuA<%p+EZMSv$n*@o~j{aRRURo4MaepziB!8fcg;K1<@ocm|z(oMmu7 ze#{uWqUI`=eNP;|TjmQl8OyLleDARqgcZiNLK$7-!nnYT!B}k!sg5!NKYwa^TKQ>P z%zo%y6N8yc-4C~+JfQe=2#>nY5MF;PD*rX}O6~D~r?*zWmXS@gUiUQmy*`9J41VTp z{i0DHf(2ufJ#)vyktrkMj6;$}1@B}cHnd{6C}chq{I0I)b?0z2&OVvhDCH>+lOlFS zeov$yt14QfPl%S>78!kpGb>xc*%VL063>AsvE_*bfnecEOLz#p6|*S*cH`5#@MI@R zhcoGaQV`SEJxSrE^Z^lle}_udPG<95uCMop=w7#id=9~_#|8+sP`devRY~^E*ZZsL zhowiCWgLOs7hFFDPAXON&K`2H8|fpT4N3)~R2z=LI~pmsXZ;3Jm8h?qOV{)oyp_5d z5lFx`;rA}MvEL#+Li@*&t13`V+td{|cN?rjG_EjA#(P*Liu#;xM18*Z=N zdi+2LTy5xT-TMQ*^NYjOt~$`PK0nBTdb083p3-1>bpVqF}S{SLwq_-;L`Fker9 zM$?lNhjA>6#IUfl6mP}RRDo%z1n%#qG};xGdQ4gYiEXKB#AVCo=Cwv3#Rlu;B8z;fU3~1bnPF7B*@Pa1wSqQ zImx4DG0P95FoT>_gAU9E`>*Z6pa_?L4z6aTpdU5iy-Oq8nYIcY81A%-wdiSyjYo2I zVA)BYDvcxZ(!@v}lH@0M!kf=ZpD>WY7NDz7!RF|R-2u{zZ+Gw1H`Nu{W92!ur<@AX z{*mkaG!N$&c9jvIzxKx?w_*uv`+0L#lwRDaN%I_=w}I^a#YT~xroM3;YIHV#5Jy9p z`xd}aQ?TKBb_((E0CRtkk>f|#%!W?Ov%iJ29+u$^q_le^jq8C%6UQUCYSY2)C?(&p}qC=N)G0#f-{tZ&9{t-BhSgdiYhc`zCb33a-yo zT&pWVY4^p$8W@uG1EFO8SZ9ZSC_KsJ9TiSU`^=ap!Bv*pce%Et?VRjOWe(bNeIq+7 zGQ#FymyTL!Slg3aWGXUOLk{}^b#3_AA>Zm#%eK4DkujSqgh+gS7A^p{K4aL>(+|1a zckkevB9vns_ghs2=oE)6dr=BrM_wDSv0Al8*=R|2oRxcm-;XlW!r_a5^B;C*I2hj& zc&_(cw1-uvq@;0I{k=32jpnz9BF#R!fSQEaEq&#Y+%g%1bq;>!V~JazdGW*ZCp^-s zpZaR=PYxpB4Y#{@1a^W~3*zzuJwALVQf-S*MGYF(7au(5?c;k@;rm}Lzs}2?<~zam zKJgB8^Ts@ljia=ILfAEbP2Eick;>)Q{PiFEYorz}q#-PE!hGaUVzNGmZY`U6I7Z|v zGX3;bbI>3l`(tGJ@so}Ha1jY*1i&Gin0Hu<5Vl3z!N^Bv-HFr90&X-Ig^{(Orn<*d zts?ee)SCK%HJL1|yedJ-q4+b8D{yV6Ja?N{8%Ki&9n^MWYdYS4Q#9|o`N=%jAJ8NHYWn2&~X0P?-gpk}bu>#Uk?gc@AVCOz}{s&{;d*Lp`g~N$n zkig;8?U$XuPx*J`qJE- z9cz77wHng13da4xxbK6*g`8ln@yzD|3h_7Ho=2}0PFk>aEqu|XpR8c98qFoGg_t~r zPv9-9H+-0Xz*77bTA^QXZQj);VL0#34i+vbkKSqj`ph$o(#o$8_?Q$g{`6ToJNe|9 zF{@Tf8r%>o%oVQ@aP^dsfPoA+khHR*?)}26TV1`ItS3}MUbN<8fOFGp_Y;bpqsWo= z!W3WFLJG38>&cXJG{Syd(Vr(j(eFhf2t z4ew)DlPycq*L8F`+@EH-jr5O(f7ag>EyiGfC_o8a>qH;ep*dIq%s^G%5&P97GWE|K z^V>hp?-a%cBKLw3^(b?Y!pE^XuRrHWLq`204q9@LSiUum`uT|U1%|TrHI1j2Vu&Fr z4nH9W&Zt900nm@G&MMATo$p7e{0xh- z(7vC!@u}dz#_iUMfBMTrm}=Lj4<{eEWznb-m>j^N#E!6lH#0yaScuwm%hrgmhE-nB8EWI^`EU8jPr>N4=>VFr-M#y zppYD>hNA>$h7#rC-had7N`s*8KIPxBt?bz|*mdXEKGjD6(X7wDvDp+9ad&Os!I3uu zXo2TmK*4o=$f!a9FBE>OD8$!~wruy1JRF72$PX)goNM)+YOb$;Q1M28np2iEOn8ua zLyhB!#yFHLZhVRkD z1~4`tT4eU84(S9JJlFG+y+^-F-wiVH(?8}KO~d19q8b4eVdd^->}s$6KA7lUBBcS2 z9@_97;Vwv}PZQ%ef`Z6@73yO?Q~lJ8@l!GOCkcMVi<~drOEP$@tMj_rB-u zk7_Q@#zg_55Hgp4URsTH{BC{rzM#_6qcI)FRm&e(7^@DIe!qXl#jTG3Z6iNNjpziQU)0kyio@r7F~D)) z%^QE@iMN^Et?)oBzDS#!k_U2Ej9_mL8HFGF8!*K=bHJ+g#=(wCK1_(I?INnpsBQ!O zM)NzKe(04jb~S>37v;I7utgCzgF3cWX~@Ulxr|%qwUIElK$}eo3=+XN;RQA>Yi$)D zirEd(=sg&RH7?b}UK$p;(G5@n|!4o?9-jv?*lO=(RW`x2tznNANE84v z>|shTxpQ~YG|1Pfy!#9qGKYDkLE@a@cELRH-gy7+R>ku^h%#5l@HC&Ee~$!84lD9L zj~CqZnlH+K=b}MKnDHDZy#Q4}s=tQ9Bumd-@Evt_;MwGRkc+SD9G1%3ihDQMX+Tt8(>wWS{!crmxpAOdh z^(kLKFTUk+Nf4lSzjG~T!6E0r@w43Afn2*rD zSO!%{G_SwoqxrBhRG&2r3nf1I+C(VNfVQC$1m!jL7hWS~S*yP6nJ>d_3rW{z#l`WHS)MVK3y4!A*?HE`^^Qu=W7P{Bntbh?~W0xq}ES=#HM^b&Gx~3JXHH( zAavUM}O71E=3`?aSFj`{o+ZnrF9e}WtAXy*O4M3r&G>)+}_^F9`8x~cl< zUgsc8l9AQlKk_MZfSLYWxzWCyzJM~#JFZl>hgCKy-WrKd!LTcIgDqe^{D$?7@qpp< zdP!!r?StfR-K$Zq4vVCuI2~YoLK|(D9|@8uKyFHYmp`)z9`C0pun;Kk=jtx*68sPk zf5USHt>tjet9#W%rCl9>yCdUEjwLCOVx8O3FdzV6_f|(VSrrKo|COL9vCJ-D_$jk!>UW6(H+P!tx}Xg%<@T=vYdW>cfnh%_!?fE zAY1LsXUy3-&Vx1|zqk({`{qH0xa>t{e^yLXo*TGpUWap1)!Xl6a{R0g;D29E3Zr}; z=Z*cXzw7l}8gpHv?Er%K2TaKRQ@ua_em9vjxE?1IOjUk^G5$4vzfXI=a?|Zr+hg`M z;Hv9DUkMYYXx!}%oW%*pNZ%}a%i;E_8(@da9Bd^DrQ5b7pOf&3_B8!Veddtfe;LM+ zUFzp-a3a`cWEPWGcPgE%&(9BpAl1%bMX-^Mef=0I`e|yi;Sz@q^_e|`G#VP~v|gyg z4NOFO>N-ik^E{h1oxbqJV)kX|JPE;T%qzuP31Z4Yj`Wds*WUM3yVUme))x<7?&;=n z*yEt0|D^BfxVzv8zjv#c4jv;wf0&j+EyRSU|J`<~`P!U%oM748`K01M@5q7qX(qiJ zGBk7YYtw``(_95Rj&*5;u;8VT`)^8tHRRO>^2Eady@x06oPR#FaqS=9jMlzHr*e8; zLALF!`{BwNi`-&l^Ud!1${x6*8t_W1LiwO!>ZD(?f~cfs%&dKXSJU=Le^=nmwJ-jz zK*^KAru+GtH75D~qxec6eu#cp(^LhqO-r0f-yK$%_vUiU&(KaC5fpn~eMhr>zGBAG zzXk5G>lQo<^53w(R!Zt4=XeANGDyENto85wM8g)M9p1`0YMIuaU?0Lf+o2qVy)IB{ z5Og2Ta}!7Vkz=M_&U1P#e^DjWq;_iAWX;$g}@I6P#u9LgI_-M2enRCzwo>jsL^h zpEW7Vw%wZOUs+LGRbfRS(uiwqfzmh9gY1b2D2nvmuU{kY_xt9YRZ&}Y$CFR?W~N+R z&3TS-jJNLA4yHqYKZf@I`VUE0JGh1#e%JrI1^)f_pBsQa)Cb>kw$u?`5?s~BW^N{h zbY9)X+t9BhsMBwMK#@h6*UuFpGJDjWrKLPy zZ!A(-IFCMD_2-kDZKKvFA-pl)tk^Q_6T#IsXDGTCYW>;(L$oK7-lX#*yH7V6je;Df zc|i1JaXRVV24;D*0&?q#zhytmqSy};D(zOcUfnuvtm0P2~JOe5b?vt%W{?U*JOBDieHRJA+ z?A%xl&o<2HX_nBd)z}vX=JRR&PJU z*gg4!$RO~E52Qmc5yN9H+notBqda_BBWtDS2YI9UbN_a%1rG5&F}kn|on(6&25rvF zhM4VJ8TI!WLq~bvZwH~e$pYdQcaC~%{1Z0z1or?%MtrqbzUFm<72vadTFv_=gJa{r zgd&N5gyQ}+?hbcBOC1}9gq!7yx?;T-gx^iJXu~uX=DahN2Ep?P#hP_w;QR?GQXtaN zK)G}2eCGESp<|FEPn$MmE^5<+%9s5Y7uPE(wJ)M#HZ3VT2AbX+D>JV#sC8}@vTUZ3#Q z6D42}jeVHG#QY(()8>QU*{>Qff4`VcL#?@<55-~txH@Rx!``J?N6dekv@iYJ?YTeO zH(FF*nwaU0h?EnZ=e?Hi^;TWsfr{8xbib{cU0YSzefgXA<-xS(6Y^^s*;hncR}Hp* zk^chE`RbBgPLSUuIK#65Me`v9EpnTxw1yP?FNL1mo(LGU$>G_;F@cAbhZLV%N;p*4W#CPS>H@3Hb0be(34qK5_ibOw7sjzQRfR zZ~V4>IAkoIGNi|}OvnU5^E)Sh>$N9Ky7MV#LV!b>o$^WT?hYunM@sLZI&?n{8|TM6 zKRQ)CF$k@ji_PG#512n&kzdl|Bsjv9|K9Eq@X^C`=-y}<)%@}NUM|z_uTZfOkdB^y z!_yP)VfEun_ny|CsK);9`YnE?4SxJqec3%18KN-u_Q z-_J3kS$V8=En{0Ku&_z|0x|E*M9VrF+*lJ=Y;> zZB8||-yMj<;}A4e%s$$7!gOh}^`7`9Yb8~;(H{!I`4g^xM-UcqX>+FGEJOwc<7K9@ z12t%YF98Kq5Gwis&bTWbCFNkDmxa4ZS$KTGD z0^T=}DDuAED*N=BP8U8Q66^V5P|NHeeo3F|s@0G&uqCP(GnQ`GUURC?4NOe_DgKSq zyMFAnvO18lKF5Q%I}=+HG13OySpjiaE()77q3bKX$J-;%8lI>B>d*L@ituGdrcS<5Xjmxd_6@-F5=e(dRr1|{&7ul=nCFx2}cG0 z0$=%>zV3eZORibzrN2|kot=99Akz9+BXwCIq!pV1`Wj2ks;pR zhu20{^|g_WRx#aY-i>zkwI4I$PymbLEj4P6&11zjrh-a&Ie z275le20ve^ESss#_V&ike=I+HAg49&baoyH>$gY*OzW?sWNWG0bx82qFE{$9QVH7a zr$~FIkU$`j|8py(k<0b?0870zt}8u%KCXL8&{=G+Kh*I(@+t5d#rfEMAjL=hUqO=7 z4}1dyF#sAPj=LKQ@V~MrZ|?hWpkCl(*bA{ykqc35AV?RQ#FYduClN!epvaa;e+OW7 z*xlE-SwZ_~6C0s3=X~V3wrmBkr4QN-ip5H=3M6O$gxlsKidMXvqIl81H6KcUj;W?z z(_p{oRj?UF)dnWBh4T$?L$8jI})@2JMXkA73pKD&1w6!87A;stI zn+ChL_4VO<-#ysJ{tA`EfQI0I1E}dW-rHsu2lOKrO*;hvYdC@S*^kHD4zh~e=lB@W zgNltVxQ%qM56kJ=5FB+b+~1$mUa=`wsgEPVO&5B&cG3!dplnHS55N zJKru6zw;xe$Eh+U!kL+}Pn`D`S2G3o?>zdBHK1J1rf`<%_)+nW`MY?30m5@jxG)mP zD0#iv(=zt^ckS|+Uo%Jj1x4wcaEtBeL{Pl?)DEV2*+WW%z@(aUH{0<`EQZ|*iHv?Q zYR0>|&UEI+k$LyPMx$H=a;`#?O}8(BxBsG1;BEk4CI>wTNg;`bKj-PM%?pjk2iiyYM%BR2wjaF`R|FhG0d}6hP%k0E`TOtu z^o1@(c7AyMy~%zl7;4P>vba5pLb$r1UCm~WU7Lym5z=<%v)O!aq3}vnd5zrfL$;y< zK|0kVWWO5nKXf?T=bH~fqY(Dh>uNd4tGCigvVg6@lx^-#-pnn3+z!i*DPKwFX*Zu~ zMI$^4LkZzJw*{==5rBxJM4J2E?1Rgj{5*?-I zAS{&Zt=Zl{6J{^+8%vHN-p(g~0*a3hk6`Cg`@PS2e1DfYwG$BCZ3Pq}`Z(HW;80BE zXWH#1I!2Scj?Zs@VXd=Nr!H^)dF!!LsMhE^e;DWcqux}5PKobJ3Yn8uO+ZS3P%~z5 z*UMG7xk`-e$z{lg-Z$>=<8kX&=PlOBCg;q10R-oIY;M~mJ(qrq=|l81^~GY4uM_O3 z{I$)9qlac%dyxl#K>p6N{Tze>kGCC5A%>kY9?|7gc1MKa^w22spO?y0j=AGpnkVAz z5$tXcfV@?I@$8zV_nYZ6=2K6@+cb?kr44imS@gd>f>+!9-LlJlAyAa9^%ehkUb4?; z*q|f$J_&F~PypoLZ}5*t!Ma|@2ts&#>^K_+2sHg`#J@W$(|1e0gkMT^wSY~n?Pg^4y)tA|6EDTD}ef-+?FKn`D?uU2FW~ld+6_iMA@@2|<6b^_6t&h9QkGTSU zJEkN3tUt%6-7zb)$Gzu$&H`YmeI8DHwx z`2h+v$zA`Tc}F)v*yR)+uIi`WUD-9-6U%acD^Jfin_F4E$05x3@FY1l*#ObDx+UP5 z>}!GG^BvCq+&8O@Ty%9*gU({mzO2#~z{y+~o;&6{U8Krx+nNn9g; zIIT_)t9$Q@+i(RN&$Ws9Ih5A}kgC7oYGo^-aD==^symhW!fzkh^1B#FONRL2@#I5r zY509&Muzx)(t3di$*b+)a$%fu*bmINLNk#nY|NjpVZCOR2$Q{h5lf-!tZ`Fo*>LF_ z|Ay!D)?3WbwM5N42Y~>kbpFkfzNP(th||+gl#NS~<68mP#>WwfSi_w3Ki=1mNs_Dw z_Hb8Z#3>u52e{b6T(QM2O+X{k<%MIHqV;D$Un_1>;#Lz@?we>JHN~nk^{v|9uiAyWtjJ zU_n{I!?@h0f47v2tSYAvSddMB@h*#Y%-#Sy>hQza9`BeP7JbyprJk5r}UwR&Gy*n>E{3YOF>Kgo@`t499 zLZiF?Nc`iiz8+r-Cs#~=9S3u}QI_x2+G>VE0a%^ufCqAmrQ)UtFSXWQ`272Ez}HhE zUgL9~SU&!~W;4rMjmN{|0JX5vqBfg<-yci(yf&+E^*@id3G0ot&L)zM9{3&E=rgOf z;5;OB+a$j1AIh9Q@V0tc3Y1*}QJ8&IK>r4BTS!E<5JP-HG54H*-)_1nh}s1kXVM`R zw|FR`+b4$WvPchvia(6Jl^3t>@_NsEta`nIlvwv%JOsftk%uJ;vJ;?=EM9mQ6tY!( z^0O!l3xDAY^>UdX3U-aoWi`)!VP0Vf*fI9elmyNc=DrjE9uqP5+NDQvmlmYY{>*#v z{w_u~@5mLbH^Sn7{(h2!BZu(#>muUi-=*&P7On>V@47tr=X}m&d}qTzYBHg&a7@cO$xNFzX^ zjrv8K5v|r+ja3if*|+!K*NIE;;EUVeV{IC)zv%LR*Yz!bmA3$zfIxC^4e@am5wgM( z{EZC?;+?h2;*LtrAj(QfHY~Y;$ z*X;HK2h`So=2;N-C%HiMIzBJy*z@B$YJ36Pf&hr{Wp4-=PsX~x*dbExl#?O{z_K8f zubovv;8h2A(4_A{+M6BM+e}R^1KAe2E6RR&EEvgV@r=LOMUVI3d@M8As}y>^;d%c1 z{N5l?1Cz?1d*K}r)_%^p5N+P&b&-d7^$%z1{vzXlooVMnedd_S0=Ws=BEp?ImaL=xWat751uH@{W$XRp##)2CjZb@R!w1VrWsF+Frdp_+8eSp81FN*V> z*Khyk(_b@&B{hIdi)|*Jd+XiP|L#?^KmX=EJfHube*w?ke?MQiXa2+$|4;Ylzw3j4 ztRf263*+el!W#eg{Q=HgUv0g)A}{{ez4q@~zznMf(%;tb?|R~z3)|n^`gh&_b1%yC zwfmdrKqV0Vz5c(yjlcPSzWx97`TzArVq{*$5SI=f^26N2?|FqrODh~1&#@wuwEORS zOA!RThkCJ`3mRDr?yocEQ}OCofA_CBpau-)U?5AEzn_hlLNts$KVmb!A;k{BM-`cr zdWVloysIHN@|@i&C&n1S^})J;`-8`RpZCQ7d6%Ct4|0DmbMhNUu?x0S=iA;wUn_3s|y|E-h%rc?cchIxhMQL*W%;m_TO_bKKFM1^EFEl|63paYaIOd zJq{$-9%BV_dhl;7{r!IUevG&On)8w5tuFYvvq}Dqr+>f0Vp4x=48$sg;;1S=9pA)& zE~|4haX)|RaF%%gxy~v|<3FsCGv++?#}^NL&A)ru;EU3?-FdG&nA~P0N@L9wi(dR2 z2j@kN#B-uy?!;U^i-wN}C2tR4z&dv>E24mT|{XOP|4=Uzdnr^|*u8g%n z67*5!ETZ-u?W-y9YEo2#zxnfnu0Z8X zbZ!Q|#`!moIL`l@F@rr}!QzAeHD*HI5istZO8hpxcy^Flfc+xBTy|W3#0leXulvLN zBwiW+Z!BQ!cs)jqYGF)-ffFc_1A6#exPMEHHW4E~&gYpId|ozx`8#T$IC=2r8XT{$ zTOh~@$@yvrfWrTM-Tt?i|DSWl4UnEkglqXXZ~T4y?|T1@h5tWmXV#Xyx@_$q!~vkF zc$l650g@0PoFPpDK|oOh{QB>%Cjs@HbFRIbZ(sXe(*~71O*NxNjS`>#sDm?mavlu! z|D^W1I8%k)QD>%q$K-dh37WD|SO1Tlxo~z8OZ+~2J?H+-#qQkqE)oCaTv)`C{9KmN z57@AD_C)t1$MmDo_yX*j1`*zQV(1@?c4a1bsy&(hv2P@_Di0a&dX86ykHFWu(Py0y zo1XUlZJSXt^im(?`jYxt}BB82Tzk0B1n4eSfNTJu}7ykBVL~-d)?;_L+hKl z?g)MGJ5X1vYXt6~`d;l!{V0Il_3;$w*|7s=5$Z?s zOHZ>Ts=x+gzWauO6V=`m4R%O~AdmON-#mBFsAuowF3G;AI3Hhsx!&9IZc98TaUMAu zen(=pQ*P*~KPqxBWiQ4O!x2+l_w~&F7k7OlcbC2AuB&szz4xxZLI!)+08Ii4F{HDK8O>wcrB z8$okiOO-f1X|Sh7O?wXUf>Mj$3TTKlrXo@}hjr^h4+Cdg6?Je_q5PrT_)W zaj3UWjiu{$$sxri@E+uE)i@-|en5!r4cl*a&0q>CfBu4wFtJD27{6B9m{40yYLfvf zcxEE6`48;SDJSps{OT8?)Vd}g%Rg5O65SR)W%2pH_&y9)oR3C7i1?i-*TGrV_31X= z^LPI|Z)3fW*tiLrj6Y+@=m%s$K}2s zZB*uM1OF_q<5^_A7~?ay@G*mH4tbwgt(SbRexH8VWbgUCZ?d+G`oMy#pUWeMaVwR_ z>eCxBg~K`GEbzJj>`HvnHU5vYo<_4c6-!nOP4`!hGrq7w64R&Y; zX77yIPWf8$>ucBm;IyY$f8hFSrjM!B0e#-?M_XppNt}8qFo0ioJeOQR+4us#5&JCr zK>n_5^fd?cnc!07@w+y&^;KS>@;6W+?7hnC@!`zFAa028_49RU3ogVG_1Soe2z-UFYDU#@|*%paM&!N-$2 zQ+LM>K9ko^f9!!_oIIQPA7JG1M;8H7Dpx)!e74e9+e}MwjBTam$>YwUEQ|Nd*hl%;@_2jw$8EcCh~jK?;r8-Q_GjEe+#lu zj_<DreqWYuvNBZfZw!*UGcvz9uge7%gploc4o;ZiNYh58IXagx?Rm ztZ$_IL2co_;@?ej05hpst=CuV=@&C*f7)8Wj6~wC1-#8yi@HxHO)y&c%2 z@kdhY^EXVWrgCN-jV{rDZEM92~i(bmJ5QnSy>@gxf(_bOXX=Iv16^HLD zJ^Fp`Ue?5(#NpUxh0V6XL0MO^r?>m7{HVm@slsRq-4nf zdina_d0s^~RUCH5s|DkcSiRoUtTMKr?^G_2tpJn1>oFpVy=CqBfBPq&NS@cERw#Z! z3+JyY-#<}YR2W>#iJ7*dYtz~!z)1T?*E;jJ@sZ$(!~qg(^&XcDh$r8|7keEojT2pK zKd)GK!)M48cAnQVNus?T>e(N!a}Cp=!6J|K4`MQXyEiphFM6Ih`bK~1Cr1bnP0oY( z*k;uahylev2i@n9f0jsY&t2^t3m@I8bp6(Wsj2vQxu>y`3R(e%bB59@alns*HQp1mP)-(nS^R*Fn0UJoph5xDI z^EFyi^?ZXdNqS~$ShYU*7r;d(>b8bPlsZFri$I(ghYDCD0mFOzMPLY$=>m55+{+|WKM{rMN z&Glrms4IUSf6v@g*45yS;4_&VG<;jMvaY`5WCU)60-vlqm^Rp9((I~r_6HSIZSNOv zjhU6~iA8R4bkE9HC<;DN`{C~9k?!Q|!Du{wJRL(YB(c91V(z+k0Pp{LnvpH5lAo*c z=7gG(7HLMN^%LR3b6>Ogxwmd4c2c|ESJv%{g{T+`f4f@byX~;Zov(E(<|m4sRQ_M( zz?&c6-zXeJYG~qHix`32TYhf$Y9nCx@iyF@Yuqg0e*k|A>CPY4+#&WQ-EC}t5;?=_vDzQuo2}8#JdI*(3>@2qj-JVjv)#t8)Ok$Z zmh!6tr5Il{=Ui${%|8CX9jMFJAEv`#|9kz4JW()!e-#s9-~J(YJS&?v>?RVoi2owq zEbCA%{)qi4VqCFZDQm6phbs=Mez#z#*P2s-m$yC-8zlpm$4g7$-!3|WXlrVhmlBT_ z9H|9fEqEo@+`sER&D&jLpR*5__dX9hB72BXe`92yoT?UB^mOeFY^+!Pe5yQAk20`> z?a*7b8gIOpR6h?mC;x_UsshR9RBiVsCWPH-8YO;I)|P(Vp_W$SPXFuO;f@r3Ot5S| zm$N?)DgnNi-aii*0X~=aKMzGQxS!-b&G`M?o}`Ugm#)yg_P^ZI)$>_uAj5?t=hBk9 z#^er3=v5D&>@WGuI2SyL6PIm34;c~I=Y~=4TTv4%@<{G+8GFb+mzO{fF@KylIafu# zgfCR*?&NRyxef8{MV;03XnuOukUB&#)%JRq2_7A*{SNPe`>JAA%g*L+?Ub6TZ*SsI zFvy#YV0_ch+y@Lma-VaV`}9?fOumNCnm_dAS{HRRTk^O|oVb6!ofD^}2Tz^!nzJas^w4Vkc zJBXsLoPDqB+E)T$;eXRo~XWqsm`PnJ1wQCBVEAh9Wu z5BG+0Zq@6GI#}Tgir+&Hqio6rGI?=!iZdh8j=N0&3pW&f+VC2fZnCr#bkfVf}Pi&Pj{udGDwFGvnHPZOcsTbpF!Ovu$VXFY!I5p}B97 z;Z6G+ci6f>$6i|=7AZA=RoH(4SB#~~Ye|W7;VB%|{*UU@AdIFa_1a?p$!TxA7S7;p zzsOspcJY7WEPuqwW$df!?AJI>YUtP&un2~%>eL-_SM0*8bpea!?{)E&@HbY(=PWyU zDtnI&N9qOs%m^PF4435+C0s7J8HWWAIXIgLAe_)777bQOinM`dss?m72jYi4V=rv_)=E z$EF3iwLMNu(2Bp}i3LYN)$ndLWQoJd8npYV7kS7PA1-P}#a;Q%XZd%v`$c{G4^BgI zcd_x*CV$i&=5t=d>*fBDJNvbEI6qzAM~P9qXb*=cx5z)H?Hm(b)cHO=KaShR2<$;V z_Y;TMi`)&K2zvu>F|3hnYEjrS6vw2Uw5D%sU3|5?-ZS*GGo|KjZTXw<7y3S<$P>Rmzck*7K#=Hhb()K?LQNwW zXP>P|yL8h1LiwKrUIaeP$eY41BNi~Lk9jvz{4$?v2lozs&=h`dxSdpd)!^*Y1f1AsaDS$WWpZa3Iw1c0wr4(te1DGqnr{w{Ha0GK zflELMiJV@3ufUl;<3p-<6m;RBh0*eLI8U`O>ocNwfDPedM@Dtw^}At1?`rm4hcgMU zuxX`xv3qW{ax@i=PhZK>H$ttwS9WiE=sU~1`p%(7%pT~(KyWv1cdEFD3hQ+Z_i;K*UIZ-ot&ef+h@CzVr8XizfL{Z@53*(>}n zxE22ViZ|FQE-Ux~@H?YOaIJEmi#%EG?ojUMoOIM9hbNw@r7y*v?L#vUau$Gz^K-X`1;?EHA1kn6FhYuv3b98$k`nCf1K+^HAC>60AW zN*X%+LwKcat!>0$QgY2@|Mv@H^<-C%P;|KSM&2^-pKFdGHkt+wctvm29e*&;#Cv%Y zkrm>tBqG?RY4x>LdG@*Mq1MvIS{pq*V#hhPrvlfI*bG~LcSit@T|at3Q1z23vE6=J z{a96+7wro*dKw@xX?em~#Xcp+l>s-Y`UQNpiRR9!w$}XV3$X`$+1P5~k6LRNpCR=O zAy#*(XVELC=~c~bYTJTO5`V%e)t64Q?j+PRa;vB*6laDyzB&t%f05(K{&QxocU@}E ziWf3RMML=u;f@sirjl=^gw^CK+)? zjauvd^G=`7@KIAqCW#-wxrIMo#t**!B=N&?vN%lE)L4lhw0}_XhpsipZ>+oE5lPMB z%02iiR|$W@UW!ksm$vGg2Pg6W&DRhsP?Ijsr^FJmtc|q?6LSi`n6YB4BlQ7dLAY%V zhQ+G*WA4+4*?+==n_TX(=5H!Zt)?$NRDMY8;63y;T!i9Kt|%C|L)!9B0srEiYI_w@v}j^eGKXZab-REM}5OoAujo{SU zX?|wf^<6J?CEs_j0mch^RMwZC?=(L!sQoW}?;%J}I*LG?)7)WQu)X~Jih2Sucy&*` ze>5GN(SPB?$R`y)*}t^3NH}K!x|^sD9>7k<%=q&zQeY#NNXgq?f-jHy(uxYb)R$cL z+Bc?xQzxgk7x;sy?r|n$U*jI}K&WE6FV7;C8`*(mS^Uw^UDm=xERQ zb1)X}i(u7PTYAOfsIQgxMZWhPUljQjce9Ak{>-7ZlT~7_!qLEPkTdaGg}+$}qfznr zu*8D+Jt~$Tfz?r0fqH#{n=kcG_@b7N268g&6tO@?e8hb?W!NRSIM^idg>$iaIr(rx zeSgH-V!MxHnr+X~D09L189~q2>!T0WywWkmW{b@_%;09+V^_EXOtwm)VX z6(LxzV{GHTBxRhD${?99g)^XXwZQ9m=Y&~84^P8{DPRydRgDLeS?~3B)MQ6+$l>d& zd+HD$*jgQ|V?6yb0?sA;hccFT@b9{tyMM8r*n`#hp*hUqGDy4-%=IZXJTSR-m6osN zE4DrG!c{q&R)Ib`PSX@-s~n|XbSuC zm^UKlqO_w$Y^U(Gk`G>o%LnV+z4uOFk>ZPWFpRWmc+!udY~*}HYoNkeuCIZMQ-8#g z;NOx5!XYR=r)I4i!XLQtPT>j)Kcw`@C+;`(?-gUEjkEP+pqT{ERpM)EJc_SlYxfV& z<}mK1hU@WK%3?jrur~Nn(s!si8{xyGpAo2~?vA@2u}3k_4|3G_!oK1Y6Dr*?vEeiA z;=RSI2H!@DnTHlLEft(x_zH5iuz!b!-v~&rgp)hyo=_iJtfgHZz zyAzL5+iQ;L0l?NraK*XXPja8-wXVVIJG}o5jhzSMlIg=N=d3h9(ZSlz4ZH1!a9PFv zNL+ZDwQwD(-l+0WyA$R<=MSuNirWdiAxM8))u#r}f%f1*2Y__}Rc4bI8-Ha74tLR_ z9*c!h^%YIwSl76RJN@IHsu)T6N%k~V`!4ZNk-wFEzaRN~VG9&a${i2m@_dQ!g$oik zig@fof6s55Q1HO7eO=0)tC+6LN4{}0mE#Ndy!g9}At`!+gxl^4o+j^WSo9X-LBaf?lZGQ@m9)$=3p0(`58U^~87I*G#zz{Ld1zVaUf#0(BaAzYT z%$G_f`K%Dr^ey5xYzXIqpg;BcNux3DMvVyH?hX)e(V0$4e$fSZf$v966S4_hpcm>g z=Hw)ho7+XB0-ibC@e`&ff27+H0s|PEU;~`525)4FC=Brs{Wy&v9e|hFHE@SfnuCdi}jkm?*M2+I?_18rDoRuCEAk*=^*LUI9IL;_YzUqW|9=tnj zP2vowQ;TikK*fhva3leU)1KBOsEEFLk>^`$Mgk{M+X;_t;zr#BZkr^>H{qF#>%^v< zYl_ME?z!q~gKun1Y=7!?^t)NK2mu0AALX2XP(-HMTJZjkaB~&!PsQlmX*n}RUy%3= zU!A#K4-O!<;aV#Y-q`I7T+d?RW9F0VSyOxe3zt=AOE_gJ-cz`cI=_nFbnP<~`*o`s zMS0{+i6YuITmc^tTlN_q0(gnu30W$CYakh>Z;dSPWyxj$4u8$P3;*Lgfsxr zxJxR>2YV^jUH(a4COtw$-X(FdTJu|N4s1tpG|PCsxZ^4&zx9LBMWgnjN#0ptt^6Ih zB8oG(Yj5{{ggxfm3y%STCEA32ITZQ8!nE7E_dKNTpnv7rCsDQwSM@|p>zIKAuYBc{ z?HJPWMQ%hCo|<0H86aZLBf;*=H{oK3Vo2fW>ytk>6@<;Q-E`G*MNJm-wV30U~{rJDTIAp9rdDGjHog zn3=uv{6?EL;#5zZd0GrB`ye9qqV=*i3@te;oTi&*ne}a#hd_NmJgr{>&FTdXg!f6X6Wu1^y0BhN4U_hY;c987=vi$yqp5S_;a$Q3`m=Vug zT7Omf)IMBe0_eiiP2}u=t;u_Og!^#Dxqsc_aCJvbRf8=*7aT8gW|#I5Hl1k;P7@|| z$F~9&(l=i2n!?$dq4#DoH~eK>+cE0BLv4mU#zr%eTvBQWaFEGE$y;Xgt=M~s$%!ne z22DB|aSiX&TUDFdhp!(KD%X^23j3jY%)nT@t3AJdt*{@u@(pF(|H}K2{u6}(7Jv3c z^`n*cZY7H}FJxSEsQ9Sv`8&iAjcTnM^ryag1@n}nUnkS5;uA}@5 zE~5)J?ldOb+LU@h8{2BOPSFyM1DK*0(%(Zfuk$e=#wXV&-pB4TIB`jzkCOoMG2z5V z)5P#8$-%Jz=O7oc`qmtMsS;=Zw|!5@Y@7=E-Yzu4#Qp!%j!-K2E4=L=FV zp++S4jd)ZSQpf z&su6=mVam~0KEy8jXf3IWaV=Y6doB0m(0R%;uolW&%u|$H_p_5n@kCTkPT*Q7|&(^ z*FsOfXr4>qg%9Vx=082p(9r_xnGrjKfdP;+i{VM?dBOt57hzX^tUI}O5yx(By>POp z#J8$OCOiVEaiPIcV&z4Ty-#OB9-Tbm)JOOb+JEUQtOc-m;IUhpf@^BJt8sSQVN`T)T-_iR>t;<1^BEx}-;4sNs!+C-b#jt;B=voy{aN10*b;GB2N1aLfJ6-yK zRQ$fHR`x~!{H&SS6kYOP>;_sZs^`e=d?v#FOZx}uCd|-bT!gb8X1|t!v z9S83IV_|F>oe2V^^ltDhiR1vC&g+&!ePeB)v5B}G7#f_THC%Y1cmQfbB7D`D_bN)+ z@Cn2=iXXi30l{DQhPN#!41c5IgF$d zjHeUKZTf@fEyNI!-j9FG0|Bla9e)MN=m5Osp-hOG{;jdmf+852jLGsM8g^Fj$G?A`*2P~_z)|+GmotBy@=1eFuT_M(%Q}N? z-{JydFlu{e>$>KfD^^(AIsV;Zy!5wTi0b#CCf(0XCd%>b#FPe}_3{J;~Q1N)c2V@1q~{S#;vT;bl8 zdUSbrcqaOF=IM2R@WpCxTM2lk>{C(0kluRPTl6EY`;DzH^f&l9ICV4cu9G*UZj60) zi{~c?QqAZm@Bu$M#Blt1zqYY$zVUhA(dLUTJtjwfkKlixJd-|1pP;Bl?MhN>MG^dT ziR)_~_ynvelljWz5@5_--T^+>BWwZ0+1MLm6LLL&I9NBSbP8UL2JJTg!q3PBY-qMe z1&4EN5%tQn2IwI3V#EeRK_9r1UVm^1Hvo5X6eUw z{hkIY{rG=LzwM6v2n><9j=H|)4=Z&9!i{ha&eeDsYTIXOurI>d($1rumY_2~Rre0e z8AE>g2n2iVb_D0q35S{ zWZjq0TMrk1+QW5|IB=_KYLYL}16tygjoDdnOYNmYUvd3(+`^@w__bt1KTpd^R+WxF ze|Lyw?8HOmwe}oDS_7M@w@1&hjRvZJhixnN0cFflHyUjrZ;{8Wo~jbZV%%=~P_M<+ z{-((=cx&9!5rCq?Tz#IK@8}Y0Moq~4NXy0e(7)V&6A%&Q%cv>cR}eGoYk_?*G25Rd*uznZ7KR{3!aOzZ9Dvc+#!0T6wXoDdA?reU=lkj zUSnBsu zc@=;VY%BbgVSUKoeC&ue&I4Mw!}z3lE$jK-FwwKw%sYegygADc<^aJe&e#U}3b{2o z`Sg{=1q-P)w)`#ZF13=3K0Ril{EjQor^7lGIzn(v!Lfgwm$E(tcfY(hs)qR64wNyi z;7VvTecz{R?Wn|_v43~p_fbAK;B%?i0a9};Ja>;cp{324C>YtS6g*GjZ~w4@e`lL% zf@V+Cuf7v!lc!SZ@w71SI@ANI$A;gfHtoQ(KxcbJY?h!&)G`sHN2SHrqMG=3CNBIB z{y4k++_Qy7f&3gq8fkVr$AW`UTd+0_IxlPylq&2$>-wd2;eTfeVOr$}1y4q7{^gk4 z)NKVjkp3r$L9GBimBLSpEmD5kA2sN`pZl#a?raK9&sD2S@lpQfL= zrHO;B>caO9;D0arYx)E<_tbv{S6F+Ejt|EUaw-1NsSTbu>rx+;-hbcgn%*n9i0$b| zc*zo{NpG3+gnw@mi4)>%Y!AH$JbP=9Bgv`j6rM4Z@(^ zO)ciweu3x5F)^n6-FWHdopuUu_QTR=CR$ae80-Y@Mi?`<dyn`1{snMvW=>WqHt>9999U9F-|<$qS=Qs-IDzPb}7R(k6L7M^v# zQN*2=b!hnr0mjSOq0yt}1Q#l&e>U={TaF3xu}#lRxrjPeHb0?})%8thI10RnFg#3_ ze$%EQgZ`tpe(fandypUUR_~NK_+r66hrzk#o1-SZN%J1OgI4Pjb?Jwi_;t7$di3#r z2UksSE`Q+xJ4iF)3G_xD>zP~v3eN7VEFs9ltLl(M%OJ@1T- z*#>k^?HH4cmW9C(d+KoHVjlWf^tkEMei!`#dH%shYNf}!F;i;n$bB5PG<5!i<2*N7 ze2i;}XI3O9GR)y;E=YETi6nOMgdx$k|FW@P?0Bc%Mb|@TMDk|LLJ6 zNf5(%+*IF*DZnI(Fw<1eP(0_xlXH31TDW0+-`a^P?-Ly8Ti37DLsap=wf0HuR^C?yj3xd(blK(MOr^H>!^{ zqvm%gHOu0mDebhKg=@|!9Dicr`+uJ;sYR+7{)R=Du{^)4;1Wtdk6=)7 z@bs2v{oxSeOyW^5ubUI$@ap}>Dg}RT@6m~}yqA&IHpAnggFo-A8y$14bz3?;6k5JA zH4jY^`sJ9>?&Q=-P9OAt8Swp?`!YE)(EPA*s3%f`XKnCxuHLg{nq0fdrG2jHmppxI za&VXU{FvdOw!Th6S##vHM1|dTHnjsq5-0 zUkenLDC?IgVhT;O`JD-Tn^w1R5ctD<~S0*IZXHGVIjZ zpR=YB{7)CXyFHagYt^Q`{VlmLX5m4^N%?12wMnDhq2|Qjbe#~p{_A=896wgn>Cl+| zwv$&oPX^x*?GM5Hj`$RizUFKHESeLYH)^z2b>HF{vI9JSX=0va+POA4<3KD&cAXcc^3IN{UbMGr11QX#xdpF z%RF6T>@{_h0%I=LtI#0~E%s~mjWIAnk!L1+%B|||t3u3t%b9qu@G{#>KAo=zp=j&+ zA7Q{c;ybjI8tCfbku`v;4{jjqrtRMxzeCOCnj;o>=9&}guO9Ubo9$(iWB@*qyDy_5 z{H=e~vzRgpzK$-8#CYe1Oo7E;tne4$OqBFhJtFwEm+84p3rDUhq^|emy+7dV?=mpU{}!jhy-4 z+|!O7b&T_e11>@DGvSZs)Ztg6z2bIYpHqKZqkJ0B`GCeT{TTQX3%1^#-p4k246)N^ zw3Aie*LW|pk%?5Rwm(sI;hGP)XWb9EXYdl~9hW`;>MbR%*c(Q7rTs4ZyA?NI_!26Q zB5szRA+xkGg2$+@YkmX#?ASKe*%KsDeH!8VVZyci#sb{h9yo9`g2of)oBjtvecpfT zAdR5O&+Wm+?;MHIsXKaLF-x$3iAx`e9_`<;w(ntb*mcxBs_l){rAExqhbG7!%1#bB zgsiKxp;zy3_LF!N+^FbXD=@;ck3pWX;4p1Uoy!z$h#%)yxVY3$iZw{lkA3aouZ^{~ zB!1SrY0v58$Iiq^3?ocOJo|f)!U=!mZnV~BWyyV~*2cMyd3nYI7w?mtq+Q*;SC;f3 zXfq1=p>{GhKH0fHW3L`#`hLe7=_^2~hSoJ>!`xwi-jiMcgKi#b;p9b`b)2F@d^EDbQ-Q-S9lcMlw$u14XR%@wi3h5p|yW?gC>pa zbr4lJLqOi5O|XBXMuJ{9jWl8X_Z}PCQ%7{S(E~GAmpfw!CjjjvYBDn>%d%&V^mz5E z1HawTYhYIM{WPa}o$R(#+jpktMC{-aj7Zt5YSqPF!TZ#~@zs9*gS}$BWRKb}dxyri z@<-p;ui%E*b<}_6eJArb)hN)Xv?Srowb!ZRS^%IqT)>b zIB(jJuSq@c+x~oGf%IlI$&-}s`FYeg2M=A|cj-?wo(I254|sr`z^8u}wzlxMdz(4_ z=>G-*92xvy7n^1e!yLctiXr0s<_&lC2vNSYrD1;Gre4Qz9k6cx4?*tgb5r(VBsx|M zKcM)^-6(LAk|T8M=jNG;oWJ0r{mlEXyc6mVvKChzO5r76Jo>Tu{E92c90JKz@IT+W z#q5338upVecnn_;eENU?g3jj5#BFSz@j6?O&pMbLn~t{*wbvDb&T!PcN1;M-ADwl4 zoV6c6(HWBXx>|Xq`;FQMf|E7r%Ge$9{bzuFEqA|@Hs(F8L2t!oN{wAOzpr7CM&y|E zn)u#~x;j`e+$Sr#GqMkO#`FX4rDti5_E=>4w)7hkhf@DRzn*6Pw#duD8FAe%b#h{V zF0n}UxEKc>)yD}CH8Un%W>1U5nCBn6+w zRreeYVEvDHz4VjOD*&?bplt_zdf)B`PiTiJOl?eDEVc~$;YQ%vz)ybnQm2Rc>@o^f z@?z2DeR`|XvaNg)dMhNztjDghLC4t~V1SePGX|3T1cy)QE+MwCPA=#~E|>nI*`R-j z9v*t3q4cZ1kC^^F{8QF{d?db=aXG3k9OeBlcmZnE&#fNv)89+1N^Do!z27`&!7qw9 z@H;jt`%~qy6&&WnyyAcqIN(gED+>!Yj_`A7Pr)X5kZ4xb%-4 zYe|vYXfu09M~IHH`?daKAXUY4wfQqPu^S{PctmJ&*m~%7`u_fZ*tKdKFdJP8(Ph0< zG)&b%CcW>osU7V4PK8;Ia`_7AHj3nd97sx7P%%kV=$)%jpXd; zDZ_u!Nz&%*P!w+-$ZNl5&sy{pOu^V)0*b|{5XZvFMZB!W0;?=90&Sbm9{&v zAv06q2Z@cP8LOLr^J3%KM^BE+kWgP!|CM=C49`JnCMULGN&_i7c|4tv*Y_QwEpaA? zhaYj{UH_Ln51vQ&wB`T>KRqKSDL*Fv{O|Z5PYnJSPKtgi{(FJ@TRbcLobWkg-nR50 zV&}3Z@o}*(fqUhJta=7>1=P9Up!?v^j>=$H;X`nSiyEbW#O!5%M$y}&`p1fSKLz*e z);n0#2w4ZE&nCP>M{i`VA2K!;&J;DR7y5g2<8D2lNxYk+4%+5Jb3J!cSQ<1;zR-6H zUhB?>;Q|2VIC0WMq#&#M>9`22sMU+KDsecy^j4j^?h~9h-5xbtLF6qNPFclOcIcg) zsJJr6p4E>th|u+3WK;PQ0?jKPAaTpz#ET^_kXa2yt}6UCiH{{7BJWpxN_cOgNmtYt zRj)D;s%oM?n(^``mh@YJW0{=2JeM8C%@;`8#cd-f3?r;{C>m*_Iilp9ytT> zUiL<8ei-if_&|RpBiBX*I37npuC4HBf_Wc1e!Ogh7b`<&P4MO;DYM-if@%8q?G}2% zBjIhE>!cpAA<-^^*TYD?1Lb(^rvYETE&WiUt+SoZ#?Ha-Y=n2>_xr?L#NPOcn)HJ@ zk&P6@iFwL-f6y9090ut2JC|7Rqs{-BE$CDE$OaqH3FV#!?u5R+-eQX`r`#_`QfIRB z7g=YC%48--!OJbULO(R|zUR0o%n-X#e6O@EH(f;LJIyx!nL2+WH32Ro_q` zPA0Z_??4!3)&he$@!)X0b%w-4go({`T<7})+4_NMx=C55J zO&A}pAa{(f!-b}QJfI&SmGYy79I^R|pVV=g;yb_X<%oL1P#c&`n!4I2!A0qB*hqgv zHPPCdPJF>EBGnhDDc%%g^@vm9V2Q1v8&}y|@St`3gqG3BTXf*|GIwlQ{baNr{ae(% z1(#x`e?zrWPvGWOpT^p;7PW%LzLnlrdOX+bL*C(<2bU*yno~%D8GIJAK7CMG87R4#%(`X!&e5iXZtTbt#_uxDAW

u^(cS$%x@AMSIv+)BlHo5R$-sO zeSY*SpgB;~lfdtYSMXhG1_t+wOUt|ff4s)y6ZfY@!!33P9wT^q^__+V|A1~{8Fee-RLW zNkK{I36b=ksvcK-3Ga<$RY$LfjC*80!MisO>Ku%#Rr=LEoe9E1%pVIks=yHKCO!>~ zi6flphJHwygYfkrX9bnI3$u5ae{J{KK)?d9qKL5s7nbw#)7LO>@*1BAR{o<8<)O(w z?AzE{#FU$&r3@~>|07ch#wfX8t)HMPMBIs7#=FTpM&`G?bo1jB{D3?|c+AoBfNCES z>C4@^Nr0`s)|5a(?OSCJROQeuzzI?Ogj=ubZ5EFBLeVVYy#JcB!`Gj6f6)Vy`9q>7 zD|U9-L_0APy#m%!eNTTQe$#1=Bj#;luZN4g<_G~zAVEez4A?vF&Qqeq ze6D>(0X+{glk=*L$a}cOKG!-lb1%Q&7d0`_j`-%qgVp@NZInj2#WStx@sRqDcPDF7 zynhBx!#@_TFUn(R4U1j;f1B4xPlpI;J~H_(;5JiVp~brAtSet|W%!+k^ljdxzlSrNwCv5Htp!iKmiR^ouTMB) z3#PC6xGa^}i{FuR!W~-2X3-yY(g_;}5XBDgvGn0s?}pYj*?4eU=HgGsK_d`s~tOYYEDGbZ_$vx4S3!s9xZv z|6<)0zwnxqC@vh@oQ3v6(L;8PC9eGSD?h>j`zDWuvk-W%R+N}@9HK3~?R0{O-a*ea zvI8TAqp@KwdWL9RfUk;PAlkV>$O&OC$FDupYFXvi>wg#O&S8hF33GGdAX0Nr_s@(u zqIc5kIN4#(GaAMvZ;eObq>?kEk@&}JJLu@O-P&UF{o0~F!=;sVV--P3#J-YKm$-td zaOLmk{gqD?Jd$5C$-d97}Hk&{+89W7R!r2&B*6+JCm9WJ&JYp&yy%t0OrS{S-+8g(#|b z*MSdDhC@nn*ZOxK`w!!!@Mhl(v)wR5A93_C zGdFpkxf}G{HFWm^0G{pNyJ6Bo`)S9FOg%Y^npKLP@$rNC$IY}!M{<_jy7~ZsHf2`S!_f_`D-S}`Nrh;uN^w0kG zd%xy${?sroY?_e=c8HdGU|xdpWJ4cXczy-lB+8M?8V^#GQvjtAI? zw&8*S%A+b8!GH60em0qxzawYX)mVemO zbq4Wu{gmb`I;CaJ>Mwn>*L^AK8{fUnzwJkkEs(lCTn*t4p0q~AI~`xZuUElZv^$IQ zh(;r}uVFEx_MN-U$NoFY49{I7bIS8dV8wi$vE52U4GtvRvAc%d|z7H2=WTf!P0`NY)wlwva zDfnO-%o<`Q1OLqyy{|{nHIq7k^ue+XW3_*umJR-!k=&J_9~g~r}@ef5v^1uK8G z#jgC`m+M)V`=Yd?%KZi3Nc!*jVlWsk2KrlbU?f;e$?-Px(Amrv_#nfaQ-5spd}^TR z4yDI>E0vmmeG@ZN$b4*egS3_o>6X=J&Av7|_Vf$-*F(v}$kTFBXNTCLXB+Yd`^c;} zJ#CYpCwuB?a8o?d>uRAv-S>k%=d8%zRh+bBxLPjLvQ-^TG^16`O!a1~dwAdIk^b?Ute%L#?v)-wOKX7{#bm;mNy?Ew8;gih7 zeu10X2%@Y;zDp0f=U5p7O;{ZIOIrF197fTLXGR^b1ApGzJm26zN9+ftM~|9 zMfz|{eMR;~&b8thh4Dx*m!hW)`=5x0veJN&z0v&;=2%FLd+-G2-hX~J^QTJw^#ruU z%qcXFi3bMNm>EcZ1_EM%0g$(C8e9ERTeVwFuo-wI)G7abuXVG0jRbRAwfdTOAGK{p z9*&+0xRByuY>0zohQOMB423JnS&Py?*H(A}-@UF9uQyfPOAmG7-+8~)qp;H$&{ovs zpA&NGaP5LclP`r5Pk*~;>hd$|K0bgx1!fm%{`i9ar)XjjuYfr`MK5l!hp(9v&r81L zYvxyFrhV;=@%MTQ4@0f@HGlY*>n%R|JExLb5!#^c8ohNiwYtyL2aJ{9>{pN7N{$%k zuk(>LGhWAQy#X;D92lRtpQf|$Up@nKNBt?i0q9-%Hs|{LoM@8|xJ5jz>czLaE!cx5 z{V}CaV$D^|AhCmrAz}utbDvXxBABbJhDz&76Km1b2Ckne{O| z*FJ{1I^*RX%@)H{w8-Ob@ipc#2K9c6ngtcRMQg0lyd@$U<@=XCdk-4{IhR>`4;z2` zkhHVzrWu+;ciz1`u&d2Iby)EGc-*d7|9A(~eN}l3I%2+@`$X1XMHD4QCUdqf!WF0f zsd5G8daZeQjP}1cQ^;{@`HbEM;piX}!u)H?Sgz3C&<9`W`6um)y2q>X9*ah-y2ox` zTcR;5{Fa;~g`YRHE(t5SxSzMEM+}$qdk-5XE_1uM4@Lh^!RJPbgL-PIDd3)??5a_u za`J194PCFR|K)Szm1mbUd=DxMoDOt}w9lr&EKTm0X?zbp8t&T`9t(2FRCpr22I<_o`sH@0W=Kl@BsB`r+7%v_ zs%5JFXvL3{*V12f<5YRV*D3Z?^bks$#(WB?Rs8$=_tNygf^F(_e_jvlm%dmx=zD<2 zNN~KG2szjT`V%z7J)&W<23bi6r%QW83dV4o-|Y-bn|EDbBW> zWqwDs1@AD}?$G;hvydmU+yd)ES5D1fQFnAnkDFRU^f=&+fnG#c8C&t&EMpz)JNhud z-30V>H!jeB;R7qr+2i_PCe_M%-8h4v30}wd>&(6DF7@QDy=EQY<`}A!Br}TQ^sbt( z0IZK~XH5Cl&my>^uqv->BhhAQ3m%wfkiZ$fF!6@DO6=jze`SWh;w@V5;JS{&bw`@M zE&NRx!xuf6KWiW0Oy$f6A$BUQvm0MY{Oh&n;b*>e)tORpnVJ<;oacf=E%@g5Jl3lQ z55IGk+@#`X19#$o)s z*8{fD{d$L%e-ZmO25;}D{!;tc4{;>SW;^1?MO6C8d~YAV5;8mS=RM!^ojh!;bMR6v z!#Ba%MpKe{(DvugIH*9BKLcogq&X+Nzy6G+XPs)VKmB@g;HII2b^nUyh|`|0k2|#= z@U=ak{tLo|KJuZWI|go{=65Qc{%f98*oRx*^;`exf9HOn<)-pqnV}^8lv2O&$JxUZ zyHWO z!ikdeOqT}dnLY%Wp)C1<%t0^~hjJ~dkHpSuE#MubZ`34qfN#ndEVBIQr^~@7NBu`+i$(j0 zx#MUQ%boqMbNt|Saz+Y$y)wsE+`v0D7K%QuOPnzyrbbXKmlEB%Cwh1!TWk%XQKLeFiL*2B zV^Rd0=I){AOwnrWTTcV&h4GoShd4L3e-FRo_Bx}O5u+~iC7RSdysuU}M7aa3BLVZW zdJh^J#6$Sb%Ab5l+(a)mdifJo7jUWD|FVaDjdP{qJQYjbwAa4%^A*?mrb8k)-tXDo z^7|A=N7W122hmS2XGSL$Tn#k3Uu8Ce*BwL|HJXe#+-kQ>);K_z#7q~qOn~?EqFxvm+USJlq*Ni|aCO_?>JJle*VV>6NX%QgCHkJAH@Lil-@(1#IjS(#W zetasuD>uAZbOOGyjhjvUsl#@~e*_d}_wUC*ZhvG_V?TMG1G>@L(0h5_$5xm%1miU8 zRug~ETygE(OA}_Mqc1oV&Fn5YU=2HJ?bcDF?#F`3NA#(#p-DzyI-)(kYWk<6JG`rb zmwHYs-Basq0IJqKa-U>4ZZdNY-HfmaFQ1u^oGgU{4Vo{4E706E(Ja*uf2itIGKjrc zx5!rW`NUrg=Ue8A{|{^RZ^Ss@$N%LT!O8yT*k=46uZ>i7F4PISOYJSws8R?q2A+fR zgI8_`_v}_@6XRxuoCqfQ#XX{C@WHesd+v&Q04|B^G{2Mz37Wa{N4ys>9>{O}c zaC;7hT3q8^dOQTvkbSoKuT|RjHG*2VwHc#@0v#28KmKs_lvY2Em&bz-83DPM;e!uF z0SlKWgb$bwKkwwd;gbwnR=)FE^-Pz|gbyke`@9})>R)wc<;grv?sczqHzi3`YnKd# z4=sPJ9p|KFhuZ}0<4Lg=uX<-yZ|iz*79apeo`;VXzhrf0n<<*O9A@c5IvuI+bRi2Q zs=ia`IDF5NRQsy6M`G z&sdC_j0S0=2|^#q&ad6)qY&Ge;Nz0yD|~-?UNDF061vgpTEd-gVGc9=Wu1@T*7C}H)( z`Fd6tjnV5=<`Id{^V`b_ap!9Q*Ms>xq}5%pG1!&+8u+v|q0$P!iyDJ)ZR38gi=N>t z_Mq7qz|lI*!c#C8{6J=-P><8nmo`Zx{z>F{$B!RqG4~tj@6fQr7uM*uR@#3IZv#_q zTW0n&F~r{s7b!8@hW^$`2I}Vv9w_;5QslqG-{(?heqLkgLL>P)FOPY|WzJ`o_P*rl z+&DCXWvJY*jNK)VRC8sqjkhznv73QZNGT$J=`ioL=vT{wYq? zOgHIk$mjck>rD+0exwF(7kPh{K?E0r;mjue!lGfTF_+>zr@<+QOG#HVJq1$+$I@>r zeE?CqAMV+#jN#YbY`BJpgxtpOdTKO`Q*pD{Pk)_E=a^vY|A6a92IOb?c`SLI{jnBQp_$iCOZ8K8r*|-VgFQScn`wujK1?g>3v{kE9);cmrQ?x>@dItQTu|6aI_lF!cB2^ z3`(B``XTOdJtTkr!+ws$egbZMYW6k1Q8R-1cU#w+M%nLK(%}6K zhF}}i<`6l8PbpX<45G%8^IrXACJ(W^%)VeY7i{eZVt#*nws;VIAnEs)-eEW+LGXci z%ZrmR8zRUDih~LdhWvo>P5Q#FJ`DNzt4nMkgePL`6(cB#n`*5t95m@OQ!`?yU{Q)w zc%toh>eR@fNE(9qs=E2jHs17z3*4-K*$Xe=HuJ5V$$p&=1cs>ULVxrfE6lOj7u92V zn`5RpO;LYdneP0>_LaG;deEOSxNiRHH+wrZ_xJBK&Te1(Cw;TO`I`N1^Is5&duB4S z4u>xLh`?~z*`aMnjJpwR1Y6E}yNlM&KATKcuFxOLyb(A%?tFQl_q12hz_kcXYjKb+ z7D+;_u;@z#N^Z>K4GN;E=MP@3IarmNJ#4IMNA@6O2Y*raMEKnF^UL~Uf6hnvc4&yM z=jb{Vdh}7XI+632PKysUf4}#*zy4n7tCGG~>7gok{-UiO0V}_fOA-uKryhek+F^Bu z-hpVV=kL#T(b5(zNWUcwG8A=p@O7mhDVC;^Pr)0QAYSs}|Gw|;hbHlmJ6p~Z{naCJ zZC|4_n)u9lCtlaubgK;)~>ms>SvVohC3x| zDi}YrKW_2&wQm8NSI&I8a)ssR%6?(hM=1IN_dj#Wg4(~tHTGVXdLR(O&B%d>1UJdRVzLpzCV9Ho*t%Kc!D+Owx?)e5k>UItov)5dHPDvIp42* z)gutS^In-xe?%U@=iteFms};O(Tl;Hu2VEpKj$CewH!OJ!N>L%J`{EW+akJ;Xd`F= z{bx{YymGD#^7{#?HFI*LzxRK|6#g}+4m{hFhJ+2AY)X#`uhTr@e;nq02=7%iZCHZ{U2*Idw$bv@ z<&^*Oc_6anQm@}NneFFxt4C`je29?w%ALW}roE|M`LlREmQR-9z+9^p10CYD|>2-d<8ax_k(1G zXqOV_f3I?SYLl-Dqu;XX3*!*0cXiQ{2@{*RQ-+(?KghRqZA@Rw;3=!VAFqGc{`GfZ z&qy7c{uSZ>T-g6WX>$;((|^~ZXMMtX5&fQko-SSW;1F|yJ;>K~GE2c7F23Iv_$|S6 zy}Bgs^2f7}Dn=Y1#ducJHjXQ}^T^ry!Alx@f2q(AXixm{zD?t+evn?-wLhse-<(P% z-8|pX>qQ?Qb+oZ-heu}hgM;iDvU7^NYqTJg2P)=P{M=`G$iF-fYKXG0RJeZhS6FkI z%gxnqS5=>G^@%wL!{-mVrG}e1fy|u$>)Lj-c3PcQ>&%IpqH{3C7Z!Yf-iMmMCc1W} ze{dq*T{(MXr1ZUr18wPXNZ_L64w$FqS2JJ#qAj0(DCX8G9_UzjplBrH zkKbm+Yo(qck4U=&dRxR_(mN&gRC*@GHcyyu*!u3bD*gS>zt?ns{B2wHQf?DClyGOZ zsvoOPJi$sHgp>VLIjb1pdTwdYEB@|~e+i_2J}33&|KNQ*fpP!c$GN?e_eA{mPw$By z= zD7iQ)dGG6&S3e_%XRctf4zn`OZr#_C!F>%~HO;KTz#wN)`fBiVt5Tv?^GsxpXYpP| zqeS|*{*PykcvdyLy`0&gGv}VKeJb{S3K9 zi&;#8-s_yvZtTI%BU=hW~GAD~1?MIKH%+Dcbe|jz6Gxo}s z8Tz8t(bcEpa63i!ulo22NWive%M%xaA~uaH zHu@D{IUAWTEiwFwhx*{W`_Ip-v3r`znbDgoVkfC{QuBx|xAU!@8z~f}r+PgHqCca~ zf#A~k=6PK-=hep-Gz`9be?YG1;n#Yh5A+KoEN2Xs=YumNxXfYiKN;HyvZS<WU|t zH^(yzE|+(-kirdmEBK8%YlO{ors_W5)$iuS-$l;|a{%>^6)0t|%D6N)tufgQ`^Rjz z$Z*wts*7&lef>G!U+1pjG*e}WlzN7a#;g{!Td z?NK*IP3FBxPjZo;*2ONTNAVA$Pc9mPSKix?-sHJ~0;HwpC-Cuy%z?huYVm7p^&ZH# z68hOcnybMPtf;|}5l$i8Cvt0<&1P2btJ>o7p$(5u3qpvZ-gL{yn2SjNuTFd=YkzUI zo8F4_Nr--1$OJCIf7I?GY#~}E|Bt;lYgSfU*98AcMP6lwSpkUcIZ+WsFRcQC%}s@f z0)irYLK^y7V>hEr*90N%Jk0i5^ZnvnB_G2BBO zALoNinhm??o9M4?j+0mOHLNcu9dr`5)wCfvO}5SZj0kU4e~CCUsbqggJ;<%U6WJ?~ zeZhsy(?O><8^V7Zdc!vSP0&AaHqC)0G4_*Y$lY1&V^Pz!b`$pzf;3H=QHyqI#w5oYvTcF9ZpzcJI^U*iw20Y=A_e5TwaQS&?_7H3!c$8&; z81!2|e-`^D!{m6n)q8K_@z5;$%I9GIA(;PGJN3=ud}7jDye)jykuSN1M5CW*ya5Ll zDm{V;fmYHe1Fj0*Ir|R8@@y{jW+Hw|u!s35!aRn3y|5RN+ko*_rRW2%VR8h)fDOi& z!yHZYgI>Jv&wXO(0Ow42-|hbLgg*a%bm8^ZfMH&c&6pW^-5`m^DObL zJ^9cafO#mmqR)E;<_~&TJ&YOdLUKt@Z1Nf2ZQOlHBku(LH-&71iZ-r?JO=|#%twF0 z>T!1hHGl4dTW_M)ON{Ifs^^}a;9ZmUEO7-d00#afHxfmYQyc$z>F9u`h1nVFEo?*~ zf4}uEfmWS?J%lokD9Dey;ZsKAS1AQ&vf4HDaCi8}(MB#3Z{Sv_(bI16+k!X@A!G0hN zmZX2kUE_-?Rk+Vw~?F{No;ZioKPaRih3!jHUk zb)NN#HTtb3_N^{oTJyl%oVXJ(P_~z}zlq#o>x*Zvpy&I&E*yZW;vAZ!@&&?Uf8Xy> zf`usdv1oq{^@8ik0t#~OW|2W0xm&hGqDX*`Vk(5uCC!Xxvy?*u;Z_Tss zeB|~u<_;1M$M+!i>1x(S$qEv0X?4%KA?}gnPTRRb+#Iuf4xc}=C(s(+SGdvldPQ5W zPk1JrL0_MLw+Yp+IX^lX4B}q!e}5u=dN>!tET2qbh&@01CU%$vTl`(`n_1rwz|TDh@8h!uOI!7uiU|x=EsWjCzIZvPUe+S4ZG(E*w7G^b#PU@)!z1Amnbde)y0PdL+*6lutS;ttz zm0H@H5_G^KRky^H#LTQ!ioC@E_LziUKKJ_K1A8-N(&>^MEBJMYR}j6&In^nJGo(97 z|0a2$PrS5wYhr%WoP{RpD~|k*ks*%*4+vnkZK8pL^*T)?Ux^>Af8FY05U)>F6q(h) z$9@zT7=ALP3-IkVv!pA;D52JjC2I%YqYB3c{Jyrgt`4x=uQ>h%&4{BF2T6M}|JtoT z(Tkh)BUk?#doRxSciwFK3^3*)`fo%N5;*C|9?w`bSnPp$M)LIpCnWO#5@UXOETARo4|i`4Lpt|IwNuDRA9N|8NKidCf+N*My<$xw~a%S za|+Rw{1@_T;J*;9$yeWcwaNRMkEh1dhf4nE@@nu1qdM-x%&mF4okY9n1dqa8mnPUZ z@H#jF`G%WyPCMZ9UU^7Dx3VP<{t>G3$l)FS$=9@Jc*MB4f29*TId4g}iWC;|Td+74cfS&3}-b;+KZSM}O_D$NCymf0jdQuOW2}pQJB5 z*IUc(H71{XP4bzPu=}V&dbUK$B-WgRzIXEV)%3Sj=XOn z#C#LkT#`rpe?W+ES9D$;7XTmg4Z_ASWQ5?!J{&3Uakk`pVTWxqFQz~e~X z9e&QxXtT6A@u|o7ed_TDt4doC-6x0m@+OFZ%;9m{*2_m*>ouP~aenZhZTSwLxeLB# zug^X4U2lFV(IUh7xy_4^eunE7$X63DAOT(!J1=%)e?baJ9I@>aQNEva^TD3RzCJ=I zoakKHS59w;)S*h^RKPdK*nmUB&>qevjiTWE#%r}*n1`x?c#6_&QSWFjS@Q0xtDfA% zyYbsQundDMxH}g7*Z?9-t{1yEEqWzHujs2#*GBO?k9?~DQ9!Q0j|dR%c0WyY99z8F zi%;{xw|&!5`hTvI0-v5$g8lY$EkaDBo$sD!!`C=#u~+aBMrsP}NW3>EwLf;!fdc)T z=NKSoa$B9oj88epDZyT5L-Y(G8?be360?UdJ^IDFJ~+%d&13Y4BcheCP4z|^o5||N zj~ZOZ&YI{f&b>S{qNPzoL-VqsRd_M=b$fTe*!1j|x_@};TS5;DGZ2S38vL3``@TKt zan@=*n_tgA$Gicg1m=%7p65r~6VCcemkvDkr}vC(ufd%{92$8!=h!3KrKCS2?m_l3 zt=_ZD9hhR`U=RQFk(P1NAb1_TE(hzFXi@+};;d%ZVWeYR;hZM%s|Ds&0^7B{i|Ujo z*{GrSH-C5>8g6RLdomS6t0bD-c%g)A%2vM)p(*}~b99M+OI#62$2R8Q@}uD?*UlLg z*aV+5DdrQ0U>l$F=}V6Y@qu(oU5goL=PGxgac+jDPm5o-1%=zee>q<%&I)=d;4wuV z9r~O&*F!7JQGp{sY?JUU?<3`&q&xM2AmY4&AAfxt|9Gzl;O}CM;GrA*#6tfLPmca( zpRoSEbpkQbAWn#}qn!o#_-qC0I_&hgm5V(jug>BsnE@}Z^>%u)-A|Gy5-kn3v(&dw-a=F$UZh)-bm1A8kAl zOnr z1dJ~*WWY-kO*e=@l6a~Of1lgEL%|b|Xt)71gS@DT9O-OKVLyUc9sCzqQLk($(vVYYc;R#z2l)9g|MZ}iia{&C{YhC-sF#uq< zl2{Y}e)w2#%sh$pkhP28ndaCpd|d-Ar{}&6)SJ&WF2R0B@}yxW#)iQfYs1GMn?LYD zq1T6F9^zThnTEGI`UBC0c1`R(D{ww1s6*_ZF*fA$sWVi-Ba39jo)GJYmWBc^Ie(%J z`G|la#d9O}s~5B3nQRGc(7E_xe4Qy(57{Yb+`{{Jy+AzNLtoT4X{A91t-xFm!Fvlf zT_nbdyz@lXT?=_@#DtN5+{}?1Mc%+us4K*d(VZ~uZqgbWHyk8ANIp?EI{QdZICjM$ zzL=4)%6Gjfj4e_(?|mZSw>-7o6MvhW=bs#9qN_l1{lM&h;%lU?pWa#o>zB6qc?^Go zF9Ni%gSEMHbl}Sd%_Ax#+Kt5TpZJKM#y#L{!IYg*kZ9v<8s|#&gjfgoZ!G+qk!!Qf z&V5!T)cruTBt)pCpl^fBQ_90BbRKzb1Q!q}Oc0JwwN$AU@dzOv2+;&{6o1j6BHSUw z=w`kY!8gQ-Re;J6oCcS-pT%n4P;sk$NYJE;yocr_n+P?HY??JP+eYO{W`*kky zzSrNAzwtbKJ^4YTe+kZ<#1*j50qzoWQi!||^pUu|_kJ$mGu1meqcp%qV9!Bf1RCO` zu;GGZ7s(aJszgCg3mAQLdw&{x0xKbWHHI@=z6yh}jCdd9fOFaVaIZ$gKm zWHxtbN$$HL;um=0=pf7L9)mtB+~ynmDwqJIe75o|1c4j?f!neTvA0kxyr`tUa;TA-NHB5S&WhGx4T zn7??OCKlaRXKB&mKx4J_)ceM3_`>eKaXe2<@2{GXPoC!Q@!{w*#8>xkV0%UEyIjPu zfHU1Y*E2%STA5MNybYk<89HFx(a@|lK?A1IEqc}f&YzoJi+XlsVemftP1;z=z9JE z13W=rAm<$7hky9teI`(Vz<6wR##;>3AF<{?xHdofZ0om-c6{Njfi=gP`PFY*Ec@qd zP57eEIlaYW|KN%O_Phtq#^60s3~c%v7TwZhrO!v3$K2&+nAf1O4}5&H$V{T+K7fY= z<}xwTvzeG#B-f9=FKM)6SkUkAle}n3=p=@RRg2MvUw=E!IlyRdkTWAp&AbK|sDLK| zc)2-=jmhdfEmtm1nyv;f!PNo$Hwyx0P=+GOVFmxsk>8qiLc--H8j2Cgk0IxWvpcuF zf_s4a{skZW^7HQGU;NM3yY=tle}c>UlUMTOf47{KmoMeh=j!8g_FbD0u}_k>_{Ohz za^~NBpnt!cA8J%%XNJ8l7T&JSjk<>2(||+m5u&6~+rG(5XLuMRmfN?#UB@7xgFShQ zzzha~UEVd`gKBTfUlF>|-=Ue|_MZF`;6bMxbMPMI%<7-bg9`oh3eVkgeF8yZu84_1 z!!fc};Ok&d1r0?^dZ=S?{V`Xqthq2ky%S#Ppnuf^Z+WFMcz1u3S1SD%w!z=SJJqTe z*JmY&Yffxd;A^BlHz#ENPo1(Pk0fRgLi}9k5&dBWfjexv5qGagsXXU!9)hROUvpB& zPV(ebA?DT-7uFnY?emKiY@qZ6JT7=wWrK5}I$yvS?AbO6&PvedAe?BYz_Y*Ghj56V zx_@q8^`3LEd;s6^8+Vb^D_QSbz2I+gC9L;P9KhFl|5JPN?E`>%|MFgbbK2T{_%ojJ z_WULD9LCm-(yvYxqs#@2ENmdCvWE9n7-yL!nFhM(SqOdt8LY#iBwvvX|3e& zjeEB*c#s6QAl*N_GURgxPdyP2@qZU^%6@2rLbJWaUK6bqGXIhKZnc?OpJ?Lci#SuO z`%b2A!k;3!WQ?D7ZbO|b(6+_~XQr zr5#Rzu>}VV^Bf?=o;>sEA&wPitd(k>u}KK5FZNfP5IKD4kzzl997Mq>fPWPucl8k0 z@Fbh@GnN=LHC#wft!Tgtaqh~|$OcXd9KBHn5B<}k&I>5Fq5WRClSU=`@K$#YN@hfV zVGd0T27Qyv4Q;FMcRrpCEL z@)*aYF6dYAk|7vGh2XT4ir;B8K7-2N4}o9XvJ>I~0ZiC3gqo-|*3 zl&AIz*&noXz{e*WnAPvz5+q(r>bKpa6U}aL7m5GOOF#IRzV@8Qi7ps?c?s`v+wyGN zzIKDYC{NRmlJF5%8eiV03c(uTtc~~}7E9n7aV|sa5?yw0nnGXpfk_+Q$Uvq*i)F&! z>$E0kQ;Dvpz-hOAa({II(U~uy%bdw)xr9V!w2%)yN^XuwIdq6OH~J94TCM~;Mvfe@ ze~kU9!;OycHX^*yrv^wQ%_Vwt$k2{c=q97z3Jd1kAm%4a++}7%d*xfdy(#J_LY~Kk zNr?ri9MEWhr;m}lZQ}+*=)M8(Qt=_DlwA154+Y-kIj^GLF@Mjsy2|8z_3>)P`KQ(P ze#X~dyh!*3{i?@8_z%zZ)3N52h&2({l3agJB=KmXUB$Ip-&+S|9vRM{$mfEKAq{kYq)7`6 zXQijF+<}}smibfn|D~JpO&5Udhu)gH-?ZyXQ3m!%skJ0t`14u4MXUgVrBUw^r}Peb zo;&ZX@_&mclEGDR4JboTYX-5$h{TiN6}H}kV=YSx_Sf*=fIg$RC(ANlLcfEEkYm^V z$t(Wy+uuCRh+k2gA0ubUr)Jt4qekK-t>y#vbp+$_s#}lbXh}TaJ8$HLgKNK^U>KhH zxA(q~j2)iDnBKX_r{62LiEJ;Gvn;`BU6}H|*MIkX@3DwnobnH`KI104U}$(TjH z26+{Gy>#j=tvpuNwAo@)PYVynj?*p#xc&9aUvUZMiX@W#nSUL~`XlnFG^}n%cu|gS z&VPjK6obH`;1oOXh~{JE0iT3>C@PZVSBzxUAlC&gBzx*LKEHBT3Kk}w&wNQn9uW6I zIdl80BnVJ|6UcLYP`gOKhOS=F;@SzR>&9zjwYp_Y%Ioz}f2%@eIUqO$T=L<$s6Bn-LBP#5ezmb9?1aiDujLcO>`4|B-C5 z93r3l{BHE4kpj6jXe;V8!42^4rPEIk>jKA|=&=EN_#CGcRp5ExYYAQsd7gH^Dm~8$ z-?i0at_#u}wubKUT(!?rmo3MO{GOa69xg#M4t|fVZ#|w%eLc%N|A*&T0HXPwPk#tA z{9nA7z?)SNTPJ()_BjN>(3{q$a0EnX1k3}g~W4)Kq? z8SjrAncwn_+BnNEyrw_p;ruC|2@3~&EWI3H9B@Y$G;(4RU`dlPpum9N&nbBDBTona zE|T0N{g21l@pZn0Rv797eiE>Gho6~6awU*92NsCn;-qzwmqh=kGXNh7et(m#ZxnDU zCG?90#D`&t`NU;oZUR>G9YavTF@p~8`>ac1X-9L0Fa_j$X>b>Cw5dSQz^Q7hjD+4! zML#0>(4(GIz<=$BXK=p|`-bTTWi%s-^wHL57I+VWF;@eEulzk8-2W|x8M?f`!70D$ z$431C*-bP{U%namE)tU@{eL9HaSBsfkEFhU4aB|`<=|}~zJ8l^28c%hbVTCD-@j+^ z#q-5Jo$PPO-sNG!2T}E*Q_Lbai~UeJi;(hb{k!f^Qa+zmQXhZ(elaStNKC38Rqg9V+av1wzG=d=88OR0A6Qs9~5L9 znzIg35)*DV2c8G+lYjJUk$CH`=HOtExUP>l1i}C9c%EPvzcf(Ey(4$^)>#Rp95 zeWh?rk3XKGbBphq^m{TqNiYY2#O3#Qr*og^D=kkM{FjKf{>a@|3b4fz(M?3(njyyy zEpcdtz!QDzSMnsBuW;st#sK1tAo{EqWEz8hH|O;DgD z0qYX^1^drnA&?Al)>JGX=lHfvtB?68V=SUCuOzc@y z?gF2OTB`k1bio{r^&BAem5+T2(L7C|vJXx$+6!TQf`9vH@c@`CuFV6ysx!2ZY+@pS zJf?kh;lUt*%Xc_rL_;3L9<2=JoZt2l>Z4-qW^iv*V7Iqq~>!E`28Da&>sP(Z{pV>SyOSuiR;a)eQI+96ZVDMf8q(GC+_KWK4@c{ zWL+g1uFt*-oH#HyEw2UsTCG>j2glDNFS5g42Xg{+knju!Ja}Z%C>%9arw<Quh?QRD7Qj|KU9EDV1;FYsKSf9h@{ z-+zsBDJNxQKKdh0=rA529_Ao_MEn}Rbl8E@JxIa^F^xafT2FJt=&_zZ|K57^k=tzh z+m-kO86Cp=o!XezpX&^X6UcHz?!P^6KJhXFmElXTHz0tuGGovC#twLcXFs8-HV$gq*x*NOnMc*WqHYHE@iq)yD(AG*vV6 z)X@gP>eefo(-J4i8|;OKmT<;=n*xV2Rt(V`+lNQ)ll>avUeM-I^n)qv=OwMfe;)fY zp<@$HO)9d{Bp`cZDQfi?q1lSq!frUu`2J++ATXRjT+T5k@W)a`&xh!pY-vIoV}Ijo zP0}#L2W}8@dO*urhc36}_}D~ybgH`u2etg;eTO0E>3dH0^DNQLZE^k3TzuB^3p4cY zCoX&<{;TSl$-$A#g(+DVz_aV^hXFK(4LHmc)Ls%6{J*!3!2$oH-X-z}(7PN%7WCgy zzi%C%*Zcx&p?}QB;Sc_jzt=uOK7XNUJw*O7j|V5e8Bc8^vOkGo#0<}e;mN2GFCynE za;72POO+RmfFIrcJ^4)sXDxSh$C(SigZUdy8!fg*=v}d{K6$s8->`v`q09f8r#`w- z(8YcEBmLDp6{GwguA9FnFDd=Qb@G?$Luf0?+?8+f^mN4c-uAIZu7_OS)b%k(0||K9e%-t zSYZK^hSQ&lK2kcr=*%7_iYAlbn;)0FL*_K-<+|9C2nk(4Oy)hVBL_JU-CdbhrH&!l zgCGLAHUb0z0Sb{{D2US`W`BbDI9Ei8;NJ5qfNNxIZ zs{c0r_nq(OAqoD}8~RPVE{U((U(CZlwTFOXd*&tE+%^3*@3&|6e`MZH90#Byc@+@( zS*{sQ%u1IDJ#M-SlOP2KRF_nZ&y zdl1PAlut#1(kaOy&h0YNDf^U7LChLP4lJ8f!4bCCh8$SQmikf&- zkN6{qg9>p+;)8?j{mXx~^&-N$`|*Jw`MCF3N8sS?g`<2t;(u+DzkKzRb{>576XL(! za-Q`B>w!V-0j;q2L!x;r0GADRD)3(L2fug0`P48jh)x;2ep2i^i0@AYOl74G5$qxO zQeJ4a7t#rJKauaiy%SyQ06asO@_{Ey@Rc;dOOmtIcs>rHAxLxtZ?AtZL_d$lO=5B8bM5r6C{!Iy%O#P7Q})BK6Y*Z`bE zj`VXviATLcA&i;5$t?Zop3e&2tS0{51($A(l%t6Nva}%f~3C=U)G>&NHw563$~R*)L%y0R15n zFTk4h#DAiHc`LBhu-z~y>dqKMDO!FiIKD(GTFD`mJXCoE~>}^kM+xCZ6 z=4W3(aS3!XBeXT)_S^J+o$?j%@+WQ3EAU>0UYDjTMyG$MZU|sNm%;OlIrONHDsltH z_`(0n-blCK>WVww4@@r3^@LMNzKxQo#|ZJ>_4)xVe#eF{Dv`Q+c_Ovki08ODf{RG} z#(&H?;`L82D#YE0B;t>D0A8!b|9o-oBjjwp_?=|$g!=!|&;G2Dz^(t={JeNr{(;M4 z9H48~)~yMyJT$&soVQ=tWsF7mT0Zg4#*FPqAndN3KQ(t74t!u;0RW&!v~2 z-mp6_^mGax9o;yf9C*35+=EW$j>K|_&VL4Y0GP>Uj}p3mw$Q*&=Z$lgai&$85pf!k z;Ai)*czm_zOP27lalV8YMMTK?zx03N<&Xg1{H4eH&1d?Fp?l_&UYuKCEWj!ISA1z7 z?f&&&@U?x9ovJ7ZM;7xlKUbN(tD3`E) ztT|)&@bs=a+Sg3Bz!~AQ5%v>q=}9;*V8X0%rY+%rNXl<~{LlK|u7olC#`%Yo?-Sy~ zj-iG9jCsUl2mG~?s^uJ<0fNZ`zZN+_3I3=fcqYxp*_TK34g0&Ne{*Aplc-u4NOR#m zIqSZBodfHqPj2gn`{zEnpEwWE!+*fOSExvkifoX+;R{+1XXHfQyvnch3CSnCbG2Wd z3jboe{;T~00ASK5E*$a;*axCFW<8GEJ5?)tRhdoFZhIJNV?zE1=- zHXx2Z=>d;VVvpz#ixm9<;~xEC>H_0ktq8}ShrjR4kkkzt`Mq8~XL`_UGJh<`3zcMr?Eu&# z)#|d~ygBjco?usv2LoqEJwWVoKjncptzKU9SobYzK$CoYCxZX9hP{CJIYYEqA@hJU z(;xXD1eg7cKYe%~2>-p?_p?T|W$#$ium0-qKJwN-`JMmz2gm)^9cbqjqM`TE`S{FB z&pkxTH+f-(+uRwk2C;iD5 zK=FO|hun*M!i>c8xGGld{EnU4tIza}88B=!fM%i6E#nItbcb4N2p+9M-u6KNEP=+D zz(>)@>EWCSA3k8%O2l*0_dF$}9xcQkc!-`$hCzp*LC#EDCUrd~#j!j!F7UNcyPhnFjiy zj%0K1)em3m74olj`t&P>{xAO4d7V$#X@vFQpV7Gg-I?D1r?fV{&hCgyIz*=lSfGae zN3}Kf9S54?-+#~UPd@C7(#iQ8%u540K;L+ezv_X1c_Vx@YFdmXi93CKo$&7q|Ilhj zemsl+RC9g+-k-h20viI8j`Mk1vg!lET57Qe5A1i(F0kNoa`u^m;e;YHUBqZeJrMl_ zqG1ino1%8mr{|K`j|s2jiIF_PYvB*iPtK+Az{2yxDSyL21n*yMcW8kBmAAp+r!N0b z9i{f%_x%j=&uOy&=PF-u&cT_N_?$_C?w(x_c=tyRTo^R6sk41YzMW|ABNncn^-ROC zr4%`f7UyDxv&5J^MdZvuxOwC}CPAs1L7=(aRcP%k#B6z(}&BBV;bXIe)tS*GdW30dg_K+g~JY2|Uo_c^)Kt zG^*184=6r}uHj3+@2O|6wO*UV zM^}+P<(%pRjDX*QbT`_fAR2Q=E0ZPK1J{~h$|vX_7GlPTmm=EHe~f{9MV#Dq(uZS7 z42+6eZ_w??guWC2YnfLv z=2&22pI8?{TU1UO>IWer;;#tx(Su(D+uPzvCltjQ9v5I-w|LUGCo;Tj(dR$o@_+TW zx6kx99`$EG_HL8s`2V(VfA&S9E!O5e@OLs7Kxe`{!t3)&_&&PeEAntVg$_$9L{J11 z%^k+0XT~GMy-g$!Y(!|WUPELDcoP05_d_^Q1AC9{{A1)#z`byRua{!@=P4kD&*{04 zJ}?G7S$S-4BAbHm1I=UGEcMia5Pw-p=0;#j(Vu`-5y2gvf%G{Qk?M$K2tLu4()y18 zU^&-xgldTPqAIy%|Ggd!i2g3tW8{_FvG@<xq zwfjfh5BcR*xA5=DFaL9OlwrIQjJhqsqY8WBPiz_ZYFF%0JHmhb$Lod#2INok%s$Dv z|08ws>_1CR?{NKy3v4*6lYfyHc~1=ddoL3`ah)wU=a2r#zw@qskGp5y;Tw0V9fS2B z{Hz(&!;{xjgL_Og)?Qo*XjOrCwuTm~48eE!u@X(;*c?luwn01}n&l3)b9=Y*Y6T4x z%$-GjSK^D33jitu@709d2^&G8TZ`BP{3Rk8rd8lYarDS>Ie#UOT7TXSL?<)d&F`6y zC5GT8=D9HAl;Mr&!&IuXYm^IjSMHp*g?0uaqQDZA2X3unpNa8^8#yX*8-FIblC1#J z)R)ioNe!lOo4}q_V{vAEYA_H#4T)cXH`a0s=Eb)r#<_7Jnp3M!57CeVkTz4U8E+9_=UHTYv^Aj5fWx&(F!_RsNnP1;6FNmFB9Di%Mf4m`QKzMX;;O3-0 zsZ-=_k4rz7;B`m*@kbnvdna(su=la3D}x4>3D%#32MN&)6_MKV+xmqrZ|fTet?r-P z(jUH$ghPh+Z|$j(2n_6VANtIdle55EZxEv0#2fIV-vNx|3u8<)bIAHd#!naP3vi!U zq5mo0^#eRz7k>!okoVGka0G0c-~-!n5NAw`IRe*~jHM%WxOsrX3lH^w&U?M3yXgre zg2;P)vClaPF+%Y1JxMYI8kj^R_HeuVW#4q4`8_i4bjiH4jridUNBW)5N4N*rOMmBG z5?lh&jCjiEvzM@Pz@CFWkC87y8|~>ir)O~j z4jSgFi$2;YBXPh5+|hR6YjO}Bg!X-_gL5P8*g#Ca;>RByoabDR*y-zh^_l-a zJk8eUm*}PtO`BJ&?W_KO>Z-iY!{59ae{&A|@PBk$tp(!Ah@64Rzj$^}ZNZoCrP+Y5 zQ2_t)0Hz!JuL(C+)($wf?O82;1a^vtL=5d=8p6ZF8Vz{6jr<9GBLWtZH}JeXu4DN& zEb-G2%uI}|PvQ-UX~QQ9+?p+PTRh@zg#H5USz()F*R3HmA0Q{WfYYo4>J%^}TQ~GM zC4Y!|(9P*NPznZCc`GZ5kMHaq6uBnxiUslk1e0y^af*D{ZozB+{hZH8{PE>+@oJ~nnSbiXx%t=EKjIlL?bJ^`yNzqKdxaMdVD05# zX-n{{FykZ;tM0yl>?hC2@L|MxG_4Rnql3)?3O~@y!}*W5pizghI$JLgJ-2C#C^hB- z%%g!)sR{N?*mEN$#$3T)q#a~Pv?37gZhi)1&G#YIt%8SOPiBAH;}QE7!qvcDjDO3B zcQ)c2{glKzezXhDjF`5GUYmxwDn~3MB4xy%hIeUTs_k*dcL@HEv;*{NBNHY8DMzsP zUwX9|3-9>wv#wv7QLnf#JSRRei&i6w%x$RiKl)^T@u%A}Fxv0wXWD8e{o-x<@vm$3 zZzIGQp^Nm%vAn(Qs`b2$D=}OLv44#ve5ugqw)1JUrI3S=r956MeCj#DZHREE^bpjf zza#Ow-T{6rh*v{v8}Smtg~P(s`X1|o1h2AuO2(>s@)@yWsD0N-HCNidbaMGov<`j5 zm9}XPqd9Q(L}av~$^DG`MJN6cv4k544vxOZ*qq58@rGQ$dsgBAdU&?e(|-dM8mgH? zc;3(@#8_OQPVqUi_tgw=EKwf5yi&0j2`S)6;MGayUD5~r3If;2tJ<*xjkPw;;?6M# z5Z(sXyBpyb;G9O#u79RK{AsUW#>+2r-7oVT@g4e#K_QMq=Dp{5`X!!)x$~W~dTMNc zcs9SrtaNSh$Z3@Gw9d?*NB?@8*?Ap=noj=PrLnQz53x(`_$!pj<;7#*w^I-UWt3HQCCN- zGtZb|=mZg&v(F~Zq@mo<5A3V3K9hZg%BzVC0c?6!V>@uxv45im3&RC3n(#B=@veGj z;4&atCvh9~aor{l%zst>`ESQ9mpDk#YM%~#~#pxuG~t1KI}6t zNRxK6XZsB9L9NdE2;MdxHkmviYJciq`(E?E$GV|$)N0#({MKKw?pNRLN#8xe$^IHK zEy8#IHZOhC?f$95?IwO1T5j2xm36fD#kIqS~5X<-uoil6%3!Ui~`ue?JDeIuXD9ROKG&~`Qj-18ayqo+n* zd_H3C*+3^04Ffy?{*JzkOTor~PPZ{%Yk$m9kT`e?CB^@3qF73J1?0chRz=}jzhL+f zV*PsmM-XojVSgJTbWXreF$amB>7AdXvdOKs`~?eM6+HyA0%keC3B?M+m+)M}8K8># zE+V|M(*yE2i`<_;kQH}X%eTYwy zlBk>TQBX=11?$IMBLuS`uA9kh6}nFM0Da-zB_1gg+JD2&hJu@(84YU5g??QO^)*f1 zZIgMT=jy#e>~MNkJ0lpG%{gruWn*_e(=Isg*I}uRXqZLg~A_9_HD=Kom9aR8VYi%Que7Y+$94ttY`5E z_M^iy($aJ9&h#uB^diA}rtG~ScAD!X^mdgiu7A~=lULCVkSS*m(1iT&^&dV$;#`E~-j0vED}ULDv-5;60NnBs859oX0wkm$-UEL#_@mYt8JzG;qEl`X9E|{93_JvPzEdOUHBjKl<9>mwMV_XD50%7`Lx0kT&$HycWRr4ewG-biazC6B z6H$~L&XU|l!qE}$1z^Ts*FS#`@}D?&Jl_KvEt|iTxA&8LeqX9S_c!IBX*4;13#%8!X| z!I|LCNqcO`=Ye%0&qF@{^&YyA@Atsmyvtv|kN41Xh3?V!`goV)dGX#O<#^uqJ?`Hn z{p$Ud&nXliE1dyKQ z+#QNv?f1Q&2kk-lm*jrXf7{Q$eowZ*?&s&;^Yb!tOMkqdujl{PKHu5{+|5_HRDM4H zx*su)FnOSTe)RXYzad_R=zrj@ZzU-r-I_D>oPtiP_W;a@g-3nYOsxOwL+;|J8tSppoTP% zR+9vn^@Mj-OO3IE7t{qjJK^1?GU|a&Y-n_;DU53qZKh^s;@8?%J*G(@;AC*xCXuTf z>X}Q9wH~WQo?L33Q7=()7Ytb~w+SN{0X6~2(1iz$8D*m2*?*d2XNrJt+DKVhFS=Sl zXO*zjai%{>^)}LXWwA`o9X+p#EGY(hxhfiT%pTYi3WXGHyO=^60s3v2>@!Y>8a;Br zTnQa?lG7d2BAgN!)gHwfbQ9G13hr-;t*+Jhm?%3ab3(aNxj>O5z9z~*4SFz`00Jk> zW0s&9cWT19s(++LbV5!oYP^CkhKlTvoy-J{$4((}1x;7eUgEjhLd~jVooJ?L8M zjnq6!4%qFfXF4eay;Lu1auf7{T6)Q&HZ=9RO4!68n9j5oJX3R^p(c1MZ7IQ+ZBc8% zD!U6yPTS^39vazN&7BAJd8eP~B6bU3zj(#Ny%;?%h<_jkv_O?mJ&FP~6tv4HCAP?2gyLiaqal*>wQc>2Q}^2P&Hf zyVx3t>-hmOIF!`D{Aex!1nNriYz&A2$HZZL+>OO{n5|+zuHQX@PY0?eui%|%J?&D;!7_aBdVY$%9;9g97(NGYvX=%sT z|Nif5-a?eTib`mnEF^yowIH&on|j`@;xlWrFn>5&83I_j;E$?$_VHfO77BtH7-kBh z4_!4-q3Dm)6o^pf!7(r~*}Mpcg)1C)Vafx?zN8DLTy;=^bT+U;8Umj}$IFvpqht@k zQUU8`CBY>lhFs_{VQla*PvA(cr^K-3I z^C^^)pq>DbQC8GgbS<0q4^F@-jvwJS&1~Uo!2+>A)w$^l{4OF$D-}*q3#WuzT5K<% z8akD;wQ7Ku=2`NJ_T#m|<(`bs+pq6)5q}wKKL?jU@;NZG76Pf~LWg=?C1gB=&G2_b z9?AC!N1#s(uG@ZxxQ>)A97X&zC^87^;p^=)(JcdT%=2GEid>R)=oxr7Sr}$>|!Il zhNxIhoK01`7IZv6?&k)AmWSuEkDebR5_XNfEL*8e8ZS^cwrs7?zb;Cx;C1?GJM^!K z0dk*R-*AQkysQi9RfsK-zai>KP=B&;RMtqpN!AyfW`_7b3~zPx3t$r%2uaWW1rMs$-#sw+pukFN=U?#&?qb!^6i7$jsGogU^`}dms&rHr^9sN6pbCY4jps%`Q!~shGcNIk5}Ljm$~Xt{Y6EqO=f#*rUlB?l`bJ z&<{!5Altic`(amTIixDwiXagQ#ghSHHSw?h>*ME8uE{W}tE2?cb!mZiBV&q2yMl?0 zZ2k2Stx&aannyYaiZjo8wb8El{y>7T;jK#QGEmz#S&{nk^}1Whqx%@1zP`+x*Coak#ZTSx6>a%==^0Y12Ys~$h_D3H*@&*LW z4ypiVxld#Lkjc0$4Il|nfA4X*SV>oJfxj7ezjeqQgZmcH=Lu^<(b_sKg>^*wi&}Fx zyszN}iunPm321_n8(r{>g5#q<@nna_%O%$%^#n$ zY*imZ1@ciQ?RVk%u+JvXm*DxpuEp~SCZ11?$n$9e%Imf~L%H-UC-v(+%Td1|5|i>A z(G!J&osPUeC>t-+5n;wc+>A zf%e05|M_!1%Q^Y-I*dtnM{N=W{v}^eBqV;YPKj0Ipnos@)$^0N1NDXLx1|w7Xdb5V zq>V^D|K&PCf@EE^m1iBMm@}uO58>#ix!g^acGz=~v8!h8s?eR$lQ)S1@;-3uiJgaZ|-}D{oYY} z?sF6yzh~lK2p_}a6HMOqkNc4^7m1hyar}moE#{VXJ%lbjcYC$x0bC?pLw~yl`yeuR zBNO8!SS!fAVSJ%abuf<6SK*0AV@;_SWLn_Ne^}bd? z-G9iMhdzXL=?3Fq0b24vN4)x0_UGfg?qNRWM#%wl1DR(<%-`SdJHZlKyw z$G@pV{MNes~3 zr{72J56_bLO38=fLRJG$#(q(Jtc?pI&VP=L`IQ$@&jTIz`14vWX!n*sua$7^Q19jB zdD`*P*3I8~9@5w90TdH4$4lO`4xi)i_-=Q!lZn0PtDj(BPn#F?t&uA94pZav`wG?y z;Bv5E`TCB3Z46`H`SYrgX6z}fHkAL|N zb=s~k=&u;FG4`j5N@KorOS107i{SHo*r!X}G%0W89P2_`UlG}p40UpTXxG6N*%S7j zz}Wm*FRrBuYbO-YoZ6sp>40FjJhFlK*=P}zcviue6Ti=>lO*FBm39(x{_Rn zhH0_BuQZ(M)|_w>QXHMIw7RFlWhYuYDckHGZ>wjeorlUO)eS zcwQfo*Xi^6_BHlD#on|m)q-t=kvd=Bl}Knq}N~b28?AMNOoLG7((E)57PEC^e3C_ zQzzwjT30~Ni`^{+8)OP}jQ?BBpMynLHG*!DND-q_@xCT+hS z<9eLay48;C1#o>SspQ&0`~9D5(SEk{xpq(Peg3(2+Y2)pKIIm6!pCxr+;qiMIiGNH$xny ztrvCsy#7@uNqgU)YcXDl#QFPLoSkjy>spKp%s%b86JtJrc0H(8K8ekcHEV6-o&*K! zUhUXBoIl=sN5dwuDp5tu17|X2ioFEz$j>zoh+>YXb&N4&Nuf!O*AQg(cp)jUCk=)2 z0l5P^FL2MQeaVSrHh=n4N#1LCPf`QzduTG_NQ?uli(m~;R9W8hbC`B;rYHKzW$RzqD{kC`iUUY? z2Y{kWZo@CY$C3cs`pd8#ei9ZF$af0u7Pp|`NlpHR)o!F;fn$1I2seM{h}@S1-T>a} zZQkPj9$ROX%fHGI*6R`J-uHXJ4^_}AKkp^{E`sZy_mF{sfB9EAARRx-Evz*>#FgLg zr_cryL8%gn580OE``iS|V>W*x=F1?2q48jHfNT@F6Ei|>;geIspA=Q(qmXyOd&Kv# z=k~a5#l!t%i%2&#E?R!wPBKYUvU!nrd!Sb=Az__k;56_50}7Eg}GVBM{B1z)C>lu5BK||0D9d7I}YO^#9*{FGdjbx5?k( zBeW+vm2;B67H}UCxle@AZOAv{K7ngrwDlPA4Z(4?_d|Z)CclrhndJ3xe<8;o$@Tag zP3mvMk$UcZ>J2`-Ezf+)k`)s%NdE5KK^R@SddPIHWJ*j^MzNUZ_we|CQ zZ;*a~??(!ZrkGTJ~pW*lLciaz2MUsF11Z!2&fAKsRm-x5+9QtMN z`CO=1yl>k9?beQ0v|F3Mgk)Fs9Jhb?UGop$i-iJz|I2>cJ`bL=?Wccsf4CmcfqDbr z@HHMF1BZXx=VHnA==Vq=lIx!Lh5AC|u>BnJ>?k7q`sesSzsLb40fYprHYU?jNfahxZs?&-t$U-hV-F z1#g(-fj{^m5x!e*{LpXzBlGhUuSDi;Ov>OxA30&$rjUtVKTe<+S2U@|I~k9t*P4uL z^n2(jtcY(&54Z_#8UZB$vxONX^hrKEiv*pU!pncUA{T)JNc^~oPJM+%1U@0~%S9BB z@I#@40B#{7ZVq%Vfd`U2#5#C6l9sKnx-D%ur{pJ`6cR1(NktHWh`UA5)I_HNXYmN$ z9gn=snR9a4_(O%kw+0t0Qs!i^XSTB7)>|@(ueo*3A1f>ZYZ}}#1-vSBa$8BbjY&FJ#%)&o z8T=q_lk!Cb${9#A7hAc3pH2qH0~sjT)(L$wsa$QP%LQ99`QkmeU==!dK=XHG;z@t! zuWU#L!$Ri4O#mfb@sA3fcAN5bG%-kLZtTevI0(}D9cXSsmviEAp$BdK$r6hS-?b+j zE-cBU8FFE75-$2@I9Z^xJNfD;*F5XrHgm2qMO6!xtXwPpA?Py6`|p5rzdmDOG=)rX zgfFLPGS6}q%$n7+{2gB}y8cNb7(NOIZ8U0`6&h9JAYF zcG^r{zkQJUw*CHWCcHn31na9Dr_%SmdD7>%?Ya3Wx1)DCQ z{2;x{1?%&9@Z;X)=~wyJ_iOT3`TN;{4*!&!&8HlV_b&hXe)H@58yH`|%BO#y`-5G? zyPQ@(<@UR7J;(@r%0KlZI-l=%KA(4_&*w3Tyvs3I-{tdnIUM8OzklFpLGiC!2=8p6 zZ1%dgHsAdOP9E0#yAD!U1j8X<`TO?doP%y3TXa7Tj%e%8_oEkN%^?C2>^PheOEc?5j58k^SYYSG!uk!9!`Rc3u@NQ?d`)TLH`rS`!3x1IA z@=rS+F!rdl9 z^#^L^r~Fg@8a5xF^3V6%U*C^t_q$yE)E}~cpK|oCclqb}zvcm`wSPVjtL3h94;2Qe z?a%#*pZZVD{^#@b=ktG{d%nx9Py0ar^Hct5pVa!a4~%)<<S8%e&6)Q=Y!t&d2WW^;-w- zy?(M-3t#11I9ET*r{4S7HQsu!ajC?}8 zdHfn1D1Wa3D1U#iA@&@aJn!;%+of0o-=Fu?SKoo0_TklcQ2y>aD1Y@Gl)uLYyFu@NhV)p$(fWBR z!6uW0pvZrJCh?Er3q+&-IueF`Eiey2Nv%<1kcurE_lzS(mrHDMPl8#*czTuY_-v+U#x3x^tE-47y)z6n;kQu}eI>pNb)lQsv*S#+}%UoEVAVljSKnhowf@Q|#y z$k;>oq}Df9xyqv@l?vLXiRnqP?KJKlSlBAZ=1YIm((rT%`oq3{IMmarsrDe740A2o zEf#V#zE~IY5q7r6{@tJquiOtjN~dfept?Z~ye`boxbX{o1ref66pT$w;r;8rl9Rh$2I*n7hzI>S-3N{== z8elNYO@3rDj8=)ku8{AtjW2(cTGJl^-o1CPW2!I5i)HQ&qX+n0rq;cfzI0L+{7HZB z>VAbVh;`i>Lod_L17*@>xTy<}`eHCH!`Xae*=*C_7xzggWsLK65J9MA@L-wM46YAi zf)H(f2WC4T?dhmI!6H@MU3!xpHPh0`Q644>t=O!){EWe-B#jT}G>{fReybaO1mh#{ z#xO*@(N;Dsw}2>BGcM3?a*vY|(B}>?gETXk`I^9P2M2D* z^i@e-U$%ZAb$7FvkBZ}6-fYa^WUB==k@NK?o#QaM(=^dF*lGa`TI{Q=R~4Y=oI1?; zGMA_#nk_GtJPO37TLE!ftan9lSez$&whk9%eWJBATezFF@#(#~+SbX0s~>;Iwa{4E z-Bu=iYCXWRI^^rjW>>cTi2B@66npThgL|OMcaf8I&qlr3ZO`u2#U}i8+iK~?ju}Ml zAl=ESEFj!PR3g7--*c^Bfc1Sv=6V~8*&(onf}bC5dsSYgNPTPc+Pfa_dTv;QlQq)1 zW->UZ{qu3wxxic_^s2$;%Cdi3^O&A?9NiVH-tb<>;-NotOZ&WnYdx~sP-opD-?T}& zL$S?zGifytZ=B`l2P>IhqFgxWjJrsRtrt|?BO6A;)2bG;3tdK-4g$ASZ`12CGw)4! z>Y?F5k#Y}X=Xy(>D?QPV!J>+r$Z-K%--v-Q^e&WQjzY^+ty2!U&%A%!j?Q~ocYE_A zl$`J4MvfAv=M7@B--By%+^A8~-)7=?y;}4+{UqOnbp|=eWiWB3y1#?J>iKGq?0g=S zJC@4^Zd6=$&EPN_vY27|(!7G#?)cIzhS%BY0x0~tzmB3hA6^F0?eMU-y(Ag4$cp}1`lw+L9gWD15SjBI;GtKS~ToYm)9j-WAJw5D%vNZud@N}B+QgmCmQI;&4Dh#I8 z8o~yWsn97_y}7A^C!_#Y!qq;6_U+nLxFI+z-uZy2&EwoZMcaS8l8pK`JRN=M79Tc~ zL%y{QSZF*EjP~No-b}<=#6-D{d{&z zn_+T?aea4?_G_couZ$OJ`#{z&tHp4CfTO0=DHo8mLZ*CGaU1RjP1nbjXI#&wJ4KZ3bl7!H_GG14`4T96 z);KOwV|1Koac0*su~NO;hF-?}ZnG_FgFa#;=Yg&UiW98OI(=}*WU{_yb=mcEAOqcf z*If*0rwJB)ebIz_X553c!`#k#`vKjosBpK@CUtN4=&^sHqvuC!$Vk*#@b-s$`sw!6FOH`nctZBwcN2)L zGhI8Oy7%Q}5=`0cusfYk=3|zdI&u#P<-W8Rhn9a+#N$J{yqaPY8xu`+A@;F32Jt%b zSK3L8rUf(c0Z+>qBkgsdoN&Gc*RDI>cQ0LI)c_a9Tu&3dPdC?NR|&S!QoUPnnBQ&{ zXW5?}8|DDj-?;J?x6tT}nX(#8b!EAwclWiCtlg!`O)g9=A6-nolzlrwDX+eJWf9SH zHKl(~^|?H-()BUuYd#|Z+*0tHiQ*agb#~k!Ni!8_#h|;zHVr%|(pXiX&TKPNsM(5B z0D7+$rVkB)Y7CwHwJVHtGhb=}r9)?KNFX5UQ6|{dh$2YT<hbO-=6F@I)mRNJp|7o?*`wLkR?qEJ*l!G%>AvVJ^os&P zt;s`mlieX2(!$h@rWAiWmSfu6s#1Rd=fSgVW(D1>Cw0Si*QedW9SyrlY;yhX)e)kN z9?tfO3B2cYqyvEYSnO^lO`9XWrj|kxJ0WEjF$`*S*J*~Q%LgHfIjnr_^FG5E_I0?e(<&=-_uyS3O>=e2UH=G}svv9H<)(V!!GzZ@c z8g0BEkHd98VF3v%Zm73ya_fKX75MFln}Rz~HDj%lKpkG^$XV6(HH@~Fm360Do}TI% zcXn=B(7UaBUjLDe%WXOsBuYmeb|*>Z4iFUFRSSN0?q7zJK1N?^1wbe4n!9}+q&w)y zYnCR4*#`KzynC1F>Tp{!hsrs3t$rmieq9sFNs$DIw?qxW4I&5Cv&NueK zPUvEAyrf~OO5J4)UH^I0q0hk>tN^;>In7+!C*97)9&O^?DcNoA-BbM ztEY?5Es)ysFI5O2Ws!ew1M#%DEqt}(9p`-Ct?6dk_ZL$=tdzdA4sXL@vE9tqu5v$& zR;R_8Jp<}bNBf!IO>f0?V`g%Y4p%%QSb2D{n}>ZD64xn*x_`Z>adla;6}X)R+_+h4rDl2!l<uwu`Z?PrEk(1_S}aIU@}b(c2=e`W=5~P8p_sX7UQOJr02@>b$>uP=j*nocUc+n9FP!K-Jgv zH@nj#FLLW}Voj^-EM;-c(&O7*(7R`UvlPev^#%rFijIH7_=y}vOIs8yqj!CtBW!Q(}hW)(cMks$F*aw45eGd;YLz6(|UO| zFsGa9a5aB`$GHK!i(<5#TsmNhkAscr%w%O(%JLxW(oWE=kH?YR6ne$`eL({Y-! z8qx#Br987!u1NiJ@0PjVVl`QI7PDy*^qpz1%Nl>XZfb{*_~r}^f3@#zE7lqt35QQ| zhD@<1aiW_?r3%iIY&cwRl!3P1yW*|rZ+a#rAM$&%FFT0hn`XVUW;?y6XTfoMsjO+) zO^T~1EUL|cl7d_ENFu)%Y*|e0RI;~b7i%znOjA$P4F1X`9ZYTEVAwa>52-(Dw(kL4~OxyUpuxuXgw1Xmq!$ zY=q5s_VBT<1p$1|dUr}c4eR47EqCX`V0nWxpV^sjjeN0GpzKSz%sfcNy_P~|q!Mvl z7_rT<=?sRN4|?uQ8s6@Q?LO*_lKUhrHeQg(tGNbny3)n?nRYjDmE`@V=|OZaRfd1( zkfrlu!3I}um>CTtGt5r!BQ?qS%c z0IOY0FNY&DJNMM-W~yDT_u&9P)T8tSCgC;wxfG*WHkfikc13sjZJ|+{X00J8IioHo z!$0E}yg^Fo^W&y#3}VISrnl++u0nsz09%X!^Kk6(<20>D^d{Y`rtbM{bZ%X=F=E^&aG#z_vx#(4nds`hQ>-(<512S7)ylnsMXl zk7WUu4q>Z1i<_jDle>7_-z01~0e!axOSaEaEq;B(Njj47=XkF#XB*mi!qKUblq|+ zZC9DGJZ<-RkU<$#-#1~j);r#o38USzKXlcZns<_2u{5{z{lfMUn?jUoi+px2)FSJg z+*N0`zfabJzcfnjmiLXl7$bkrG}xG~CtnNqVL3OcKj`q{Y1s4Z2cmKIA@I+$n-%ZI zqlW1u(#;3anX}eSvtN$#n!fAHq*zRZbzNT!Z=o(lqmwIrcCqTn*?dJk7VOG6(UUT7 zmPfq@y3`Il=&Ee?)KHjoPf2fQYNt+rz5$H=aP{wnWMBRBt+TwBn`3`?@A=kE70w;* zv{`LwF)_|{A+NP;6Y7>)-ivc_a}vfz6z+Vx1jobKo}I!m%`Xp=gRks)^r$;isvB>; zFazT_8LyXimmY7HCqB-G^_cG!bbcLZQHNen)>%-Ry-qLLoJZRg4K2|9Zeo<@Gn5vt zVX5>wwzPgMd)Kn_fTe%6V4Tga3WT^L80LnV``ZJ1BD4`t+slTfI~6yr#fVF1Q@_UPyPLousIoAd6} zJJGdVrs^Vc6}Cg)y3jV--3m*0f#4W-)J*4g-Gwm?ntLytZ>@i3f8H3Lak)s{QF>cB zYs3`Xxbg0TaKj2#P&jqI7kiRDrV4vo@-^e{IyY>8nsEm`S5+xwinr3;r&oLaL|Kc; zHS5n(>R7Xj<@wN!Sn5=QWighQcTwq5`bgL>R`M|~>cQPZbn`s9uu9ZPPH89TM&pyL zRNhz%{qa=lT-SfDj%>s3nYSx{fk3ySvV12p1gDVd>E!aLo3n3o+DUIMMr z(!*vvJ{p75c{0(OyIS?{ZYFHIdjEa`Y`kkaqMMI$jKqLq*|~GSQHlo>=T#=^12=1w zz#o^zWE+311xKKU$4uYWAsrz0CC@yk1FzW*@AK(R7BwhqWtik^jk+U5j~pp$#z%T% z&zn=J?(+h&l+NLNDNft_bn|$urs?jSd0Ov6<(BHLsjWYnikE(O?a6XyS?(^6W0pS_ z%;P*5C;pv259Cy#c89UIn{AeuvpZ97*6ZCJs_=g&!(3Smuk>mt5A!%QtbO1(dt}^? zz1ixjUS`!U4(2KJ5eCX|aZNk3^H5(fU>ZKAedFMbHk)pzH$wVH6%K4gal`n$s4QRD zl~*%mx;I8$9=8+P)kb}7$k@|ezp?gr*mz?V8Rtv;$~=5(y1m|wY*a7y-RZPX_xp$L z9@~G*a+VAsSU5@xW@8L*%fuRwm-SjPw&t|4ScaVy9naPIQD_9(eqawaeuv$ew@lbn z-C3p{cf0()pIpmk$jmm5lZe)^bJ$qvyt^}B29%qnfEReLSEq*HxX)JHA?6 z4)dxrXI#b{A1R&5t<$bCxqaE6dE>*KZuEckJhIn%w%_i1j52q#d9OR~51L`$O_%dk zbBA`4%xlNdCM=j8RCejGVs?wsdZMliV|3oE4`a52mcAD3N$d4vRiE69dXmlDwHyxn zn4|Xfa*YIy>i5Sx`m~U*dwm`(=jXk(Rc1yq8R*BvJwL|dqU&Z|=Nk0-OdN-&jiP_K zM&aW0tj1Z-2{&s8EjnmWm(?kO1bZ*<=sxJ|j9$(u{( ze3~tT>~W>i{JiSi7YrI>dMQ`be5_<0oHJ@j4j7ZP8lT-`vcIljus~h>%~2?ZR0qd^ z$nDtUbbc)E>LS~9!gS-VsM~M}zV0KsPU)k-VyH}0hy;Y!`Eu^m)!v>XD7AmTidGV9 zv6w$P*UZ++&aG#yL%N1%D|JB9x^rxl5&;97KCJEF)>x-~SUc;LxvozkL%ZipWQNs5 zV1@Y(dUzNn`+I*fc6mqatansVyXs))l{k4DR>IqYJ2maI0w254a8ls-RWkXW9-EiW zG|lg4W$8ozG3iWJ`M5Ja7TbRk^H^thHLOP-%Ca`-8VfI8Yw~WjN=JI&C9^d%><)~> za#+gNlsT`D?Cf^l)ehUCPR)tlntFDYOk|vr1*2T4ePCWRidi0()M6G-9ZKv>%&b$u z)%P*Dvlr{OxN8Wni)^!B2@%WM%*+{UwHL>W;a2gds%BGeJ+S>kWITVBIgUiY3+nw< z>7BX_nsU8y`}!qic3MzO++ig4ZSOpIbY~Pb-h_1bJl`IZ@@{T=_bl~l&N1c?8vuxT(!CVvkb2)4Fxq3b)jQMax-$@Fvt`bR-clQ3A@?=?(V3jP z>U6)?obyJP&h{mp=#R0qLPU3D%$B7*j9k-d2A!Ppu*bypu&WUB>6m zDT;2+#@|oXL+qTQQ`CDbI*TPeSRC(NeJ78?{iU~t$HrAI#=CjHzbujE937L*skSa| zo+6oYF_GzPHgk)S9wP9k>TWlDbnAmZK5FzhpDJ4ktCFrrLuKFG)9X|o5BjoY^}1zu z-0XVsZQvketOtKJHn*zj0uw>^${EDnkNYl!w1a3PRX1(Zt;1qC-Rxl2KdH}>9RbTG7lOtd&zv&l)MSWfQ+>O7jcKwk|+vk~}##3XI z@=RdvRb<9C%O1A3_d_&ZvFl>)YG*Gq!odIwXp@@Dj-h`c0$OKRJ29f!?6}{}=IU^! z?K|prI7!AUL?GwAJ&q4>jshh|=-u7g78POH_cw~bdW^8V$HV<;Jvh*p4B|wHMB~uT z46e>&bRWUM??O%HJS^om)gpD4^Kfs*CsdCwuXe%46N-@*b-jbp>|t`PnSP`*BX*)i z*C48arQ&~1WObia>}Cx^c`+1{Nxf!zqTh+jWz!o&Sjd92+Iiz{z_B0Y^u#zpRnyzZ z^i^c`o(nv+cg*+As((d;s<$gzLrya;mPi=#tKsPF04N>DYC5)|zt|WjS@KUhLn|Hb`o?n_H65 z^sayD(Qfa7)frGq&7XulhoM&adb$vqW9*nl@#qwcu#5Tou-P$3*&Cl(gYs5pQ)bnE zw1ci{Izo5^BjetgG}r(QebTxw7QKdg!9V7Z86WBu^JCI{b)K{ zWaIS|1i>z3BIxmYGK%+0MJ#AmA3|n58(M$MJrX#5JA_0hcWeUwrn7uevBeN`KW(11 zWuhQMJgNo0}HHWpH4RbKlmw^&l0q;mtg67&=Px zZAA~@w?X$e?%H5?=={8T@q*0BDDM#PR1;fkYr~7n(`};k>hUSm4koMV;>7J`V`qP` z38j77)kF8uy=~*#Tx(6XS-qodpJ845tDBR=YU7J)1BS8OG%HLX?GUx7W;DB z%Ya< zpy%Xrt#_^KifC%{unfU}t>!A|M-W;mF}1-1nByj`79e5X5PDLv=Pc{ERYC$T^2nF+ zmln5Vu9D$!*9~E$+TI^gQh#arwKA~wy8A#f{dE^O`{LC)L!TVXn0&nev;zqy&_uxE zAeOLPcP-0evWQWJoGyUiDyWLsmY{&VdE}gLVMTmb)ztk)?*qP<) zgR(WYwslMOAl%=!ck@02DH8(P=Gfd%ko<&j{DCc#y4M+W*t$D)?k5Y2_jcPE<-9+| zk>tIP`{MxUPS5dUc|@t8CMFgTT0WhuiTS#J5|o4yG!Vk0T{j7_T@gzW2zZ z*=ife$mn?mT8yI}u2gZC>&NF3UK6uGXM>sc!>Y+ItHfuR>AFM7M zraDd-EM*^oqoJ3Tg7Y}?M-bAsMQm&4`=ilG-K|-bl1$MTpGto7E%+092ctkgtbri)XLML6HsFOOK{=ht0xI0Xl%lE%l_SZe3aXw6P;3Q}Q)rH|m4 zJ2RAE;}4B>GMiu4n})u``|U#pfT=gsgRX9MM9Q>_ATlR;6czk28|T(Gmm4h_n#|2W zx6nD~xK{P~CLhE!MdgZxz@S~a&Q;^i*{!LwPKW9+559lK!Il@{X#VtW@j}Hj~Aww zJstx~WtYQn*=U_*!azFG-W0>0xYHU%eqy50ybq z7t3(7T*1>vmEeZ#<}PSX&bG&CmU-oYPuz^Q()<#3Z~~VH$#gQS2dE0@IOKz#zl1J+ zaPCt=(F_M3t>%}>lv-0AYbT8+v*DfUI!rx*&f|Y92o8_F<+hJMX7jrdu~?jGr+fF#t7D~aZqI|=ethqFvx*uxQ}4Qh zSNG(S?3=-57D7}0#w>dNZBOqm?)G^1GWysTuMS1_tm^GX8)0-rD1#kF7ydZy$BzT$ zxf_46Yn}R~zq_uge$QoBp}hp4B=u*^tSNA$ZkiCM@s4uZiwnwRE@`D5*Sn;oMh+9! z$o_+Ht@qg_QV28EhV0>WSoKOzh2GC%04Iz&FPwqll5&aZG45!5SeZR_=d9|(Hpebv zY;_QQO8`4V^od@ri{Wfl^x)9K!1GD$81{ce3$YDq9^!VtDxudd3T_~Gkvg86m8#9= z3#qd}(M_tEkYu1kx+!~=esj<+QjMrHlm)pV{^dgr%Ghw_Rci7IR zjHZ2NW$*$MPMSzgeK_TSQ54vgvF!|2C+h$MrtB2mVWrEN9u*&#ig9X~x_?fD{v&_y zK>nPAv50n@D6p}0>Y*T|t6>=1zBuY$AM5$D%T^p^PpnavXuC--cdStu-@DFWmX=Nz zasQnQ_!;{(pc>lKA0jJ_lKEsh{P4Ng3Y8qW8>7C z1mnQS*clAs_V>mbjxp)N=1~JP3kbTZTu&|&BpuHC{nV)+mqUjRS0*Nui))0FN58wx zVVT6Ui?F}gI~g`e!(Q4Fz%!f3Y^s3y1M}Km1PmL1y9s;!4hNn=0?!)M@o94^>u4ukvg@&BrU9Ue40)2m3w0~1n)R8t4CKcPmnM0 zD{6nL4DQ@x&ouPS1ou9H)zehduWNdfZ|(t=@E7bEXA3LIE-3|i{}|CYtFWAS&J?*I z3C>A6c&E}F9}X9}e0aX5xWR2RPI#wgu9H)>?rxAs=sd9D8Q=N`Z#sW~+`i%?AQjvo zW%QtQzq{L}4%Or6b#2!EVZA(e&&Vns`ql8UqD}yI&!(l>XEL{wz}ZL&VXcB7Z5Mj6 z8*jwzd3v4b&=`&}+Y586$>Vek*UGIYxkDh)3Vyxm491hwK+mtqvFO-mnF;=X-rlum zRju0={4e#bVk#nn=&6_c2oNlPO_aR?fI&Z9AFny^rzmy~?W)idqT5b1hj9xz)K#yH zUTtUBx+3mMvW}3%?CDnnpy7Ma{2ee27cXjj1*_d1X^-ByUP)5%#%kQlAkm=>k}Ym$ zSy_3><9ZxUiD#>*81%W|k)<02wz@eWh)|$B7B76xHA+`KzHYg2vvZuV|mM1 zE@eJ1yXy^_mh39?bjV(RC3PUtE6G}!OE*%_8K&_mU?W;^WECmP?)g}L?tBjL)Of$1 zkaBshR$k^DmkF6dn1T`!|6|^#dgO8hJfdYzk{}}iPHsa~AT|I12n-t$FP`2<`hKpu z*AoOyw@@&}<#4bs&pzbBRNr^JH7TGiy9eIw_&DLOQd^K#Bj#s+QN>G)_(VJpO>e5l zFeVZ)E*$H&rKapG0J*-r2AS(!kOI)^$J5(Ni{thBq2u;wjUSEV+dx}(r2AbUtvnrsC5qV3b_)) zSYlH~eoo`6e$~Ez@_KD92k%aUI$~v?+lO&<;qUeVG$)2bx!O8gKpu|(3`koz88JSN%{YD}TBHgwH~G_n zIvakwzSv%RXb3atrs0Knb01>d=jj;PA%z-Z;lW^D-pvJnMKK?&3d@tt*;~rlS?o|5 zFJ$f4%x?7($Q)NMCu1KKfz0_R8`|ysD=HP%())6;u>RG?M`Dua@$u>3ryV4C2Bs-^ zhI|UkVmV$fwsq65d$CviwPcuy%MC%>9Q%fdFXZADp8)Q2cMS3)P@a*ti%PRgW(Yku z8sZ&dXiyq|BtZNEWzC*G-kr+=)dyzw2&rd#d>{Q#05YxHh7T@pdY(mfTnNfrqN0xg zPLvdCR&MuF9JBH2Bje8Y7iQ_>HZLwyX?1kTmrK&Rj9A{WSz?e}Tj;CuxwnfC^e8|_ zef%Kx4zn|0t7_Q+ueFaOwcOqAKs5{2`$=<8I#7*&s`4C`K+132h|A6CQO*_7CAYC( z8yOC_^jx6|Zz$+h8J#KK6exFbahDutD3y9e73t;ZaAVO1@o7P>=uiHPJuBFuo`u`D zY=n(?|FWqg5J8)xJZ-ky#iBYNk4`Rr9+C>JvGF4?Ya21;D|_9&C$Ghmjy z^1hsZ8fn;)_66}HQ-W)!c!V7w^-{JWOf?eYEixQB{} zC`VcmZ(#P<_M||B>13}|EMB*s>3!C!HdL0JtL^fA5RTsUXu349qC&ab3T%{orW{ zlV+4p!D?T3E_OB+@JyXp-o?kpSvE|3lE!8T_s8v{xo$!`vNw&jPT$o8Ucsd??1VV; z**Lkot8pz?tCQsmw%{;}CO#PCsqb|9q2HHa$=Bs~43x;_eF@!yJx-@^l|;pVNH!%4 z!nv#2EaT6HPmcj{<=1v_Uq^i`nCAtGZ|gCHrv0i|tNZDgnBUBkK71H5B8r>EFrv!Qb`@H-|>nF+G z!xTLUXOX`#x3g#$W>h8PedzP(QKIzXvN!FZ%@le;_D|n^PEEEve6B)&k-WQN$0bz1 zX#x7*r^U-@NV0JV)K-*G+!jC=Oh6+3z>xxe>xine<*=jl1me`i5moBf?zmR`<7Yf} z{?^z4!lfcEHtsijWu?6?>I<;PvjS4+w= zQYRS!@L|R?gZapM8owiz332ly|M54(*fXYaPN9Xp^SquG+vC|>JZsgWiieAzvULZOEGm?L-uw0SNohP5>71^X z#b?@G%nKbA4902qKE57&Q)fW7pU$*;tI4Z%ST&2uIQVX4bkw`BH?%E_hxR7i_wVMx zOHLl7H%4}j(~5bK#F}HOP4T=l2%E+9Fh=`6jq0am86VJxoX(%D_aDjlyjd%5FI^}z zLBi;n(VG)_*!3%aK(~k0oj9%q8GXN*eDZ{CA>@0n#_}48Swl?f@SL{lk+En3``-R} z9kEnRP7`#La3D1EgMce-pV#oWAF}1ur#@IbiOfM*le>=aeO_%5YM(2UQ;>sjsqMAN z{0quNOi}TbVnH1B*k9jUYvnH=0w~OeZg*7tz2c85W`dl5`a~$-y9Ftw8oM!GmB05j zuJW(v>k7$ru)_ZGLG68nl}=N&Jw1*heig}Zv? zP!$5*8}#*m+6nZOI|k8(HfS2$lM|P8=|1#|#o0I%*(QCg2IqrvtQgVb7L%;OZaSAs@Yj0< zut2r%otpv=a79km(QAU|x>hj-YX&UcnR{*2k!I{>P0NQO8LEd+i7!d|G;c>1-;nx$ zysrjEC)yi$>`zP0KmRch?_BkGI;j{n@}@PaZ54twX=o-1 za)eM$%$Y4j6|~BDzP(sc)~FMb7X-pK#|6~>BysHYINh^rhg>fNrWwftQXtBCpMw51 zm3cRtDhM;f(x1VGRIrAoC)5!nfGk&kcr5gxf29Jxa*bjMB{$GES0buc&j4creFln~CdxIu`kqGx{UWb`PkxJs^DPN;iDPy71TjE zjqEP{`8_aCT{I9mrd-pF<)ur&>{TI)3ki?$pu=w80*_t;;N5>7B@z z3nE_bjif2#Hq%)(gjY^~RJ=rNgj=Vp!O&4At@*2)omC0pZ1Ts-m+p*=pu-2&4~e7J zaDdmiy~70zuNvDIw8`-qX!OY-0;yAq%@|vK_CTq*vgg}Ec(xrekacVr&M<7O)Z8il z(0!0*Tm{T^pdWGK<^kCWOLA*K)N{)a+r7GLyJLI)oQCVLBc02CA=uIvhP99ckO0RV z$8%eJKtYExIp8ES`~Dhi6tngHvCwaj4L;(A-?&UpsXt$peee?;Gg;n6=06M|A_+Y3n zWa~i=sQn?!b}NHAjk4~q08p5teTk?Ge;P{ZSrG1Pzc!b^a6H&FBGfHbSJ6Lv?qxV^ zp|RseIpd!>y(0rL z^Tn;`LE9yW;+Wf$ZpQWNeD*&OvQMf7R5Lg+DkQIsEV)lFsFTXLfmN9+FP!}!r6Un{hCgcS;cS47noWx@{X+xqpP zeN0UhRf$c1=+ILxPsfMzL=lzM>Y22->i5T{5(E3TbLdNcgGz#d(kC`_^i{EH&c;&S zK-u+Dg7EkiUIT7f6-P6*?KO4g;l3KP)bmRs_*2BGe&e6QwV%8;F;c&%U#bnYd7opw zO~(oaGbq#QuIe%A7eUh{4mZJ9rPZ{*OIzn{U8sG3D#}Uqs;N)N%Lmbqqf_->?rA_l zbsR))NqEw3ohy^}Mq=QW>DzjI-!}nEw4T-+!=lZ+blpapvVAi8tGynhE$r<0)ROlW z=+5R=hXfR3l{I9BS8jV|@-xM3Kiw78lZtm;He<{2-t;lHNDmnFCFHJGKY`ol+V{<% zRJaCzuKhwpWCvUXKVErlYdAY`Ayu~c*~F(Q+ZuAN;vBgI@G<4KH~-pyIFo4iGIjG& zO8zma`$-PYDsy+vW5kLR15%66MH80kW9bg^i42nN6%d)Bh}6-^Ko%YX#83JFT?YP{ zChn(fxF2tHxQsT&gQC|?ouUppntF)|-{pIMOGYm*-}X~y0&Os|Ibm#?Qh{e z(PKrMm=DTm;IyodF)E>~xMJbNH7HHLf1u8H-bRQ$c2x%3fP%3fK33 z@0u6N4?L}FEA{33CRI-rtAEqAZpR39<9ZkRI-yJ7;^z^uU631fTQB#{5> zE*CnR4D+6#0_ohKaz<`50s(DtI+G46uXy*_e|+4X=B+j>$Sw5F@-tA`6Bx1!p zS1;dNGb@jeH>=IYJuUdWLa=F@-F-ra$iY&UcaM{K&3WYpA;=HcMdVglg}b!K7q4)2 zAcoz&Qy+=Iz3WF+^_paF4tq>3b*0CzmBq@RRu@NF2o~8#zo@$*HoB~UycQq6-9~Y+ zf6ql`y{)^~7)&v--z-AposbdWEz-YIUYSU@)gBC#rWcq}Y3QueB zvHj6*#e!l=c}2e{_O+q++c$AoR1`JpNRs%A`X?^G zjlZd^4rxsUi<9&=$|&n%rwnYEUMTP~0cMgYZNQ;(l5XbIjh{l*E_W08RXdYjP?_9J zAD59E(GX0#2Qx>{-h)F?pIy`z zXao_2y4U!4=Eme{^&_7+ngJd=L?(%jg zpgv(`10^a5%Eq1(Sg z#Cd=UXL07(9jXk@6#_b9{@H6owZam5i!c7OrgsEb@8!x)lI#yVb%~svND&=@oY14AE+S;o2FplK$*8&;B2X zrDyu*yE{FC<$E~?=Q3G_5UE6NxHTY7v;TVGN=kM?%gJ3JIFHS&ny`T^exL%%*&auV zgQ>8iqC!`FlUoDV-!AYne=6u%OtA+jl|+^YA#L(#b5VO_RJE5gpvZ;Wa8ciavcH?S zjVV{bTJe(I0g{k}lpG(g%I}6hABpcxc9_(hjdJ2guGK3ElOhjd~YEw z<8E+`FTi{mPE(n}s2-rn2xtFsJf3rLEUicB7*oIN@ zdg-w3xmJ#?Cu;u5&(Fnb>aVA#x1XZ~H3|ppv!K0_z(onrE($N{s`jo6{sTY3vu={@a^>8Uh|6|ZDjm%iV+c;8=6{UJ@6<9KTm4xA zzJ7J9i`k8=Bou(rxBD(ge;4EJ`Wbfs+Pm(FH7aTz zF9#DFk`y3Z@HxK^Q2HE9`?+?-YS4OwdMVycp|`{n`8E3NmIASTAI9CuOlm)dAWD|# zQglB@`)e|dYl3)u%#y(PBG{xTRe<2Ie^i!BFcbe1Tlg=~!sqiZWZ^G}95kV4;a#o_ zR_5Dm_FP^Af7!>qzQ{W8t$dM!Gcpiw^BF`34&!ri#Vo#USprBp9Xmw)*JoGmdWhB6 zVm*Ip^KRkC)Re=TA{D4ob5a1ff+Xd!9Rk9J$Ag3}nH% zFe3rK$UnS2fcOjz=yd=ZQ?bTPf>0j!{}mYa5u)0CP&f`YH9R`M{O+OmAVkd)?u+c3r3Su8MIA4H z3D!@n8qKoNpu=WOV97*3$DcX17k8Egik8QIVh>;9>FaX471k~`{osa1%t3vIv;Yol z-mLpXf34FMh)vfPhy|q)d^{hJf1Y#F`s5!gaPc}_gM{>BR(ULWu8(sP>1@tqp=)yl z83&uo+>k124SJ$FKWli+mAMQuXofw97wzNXFI2dW>YTBG>S*I|u6qUhGs@dzzZk-T zW4ndoLgHo^G>9oUqKg6&*9E?&fkYlrcs}s(e=eoT?A~kK#z=zU0`h8KCZ2mp@L-p+ z^Ua`t%I7UOF2z_Jq0>ak^olJ_9x+KB#Mr!R=B2Y)dc-yXgJ1%pI!zvlKp7|wwd0Kj zwHM)9-oO(6;@JpDoU-9Sx-gFjmOn3EdOjC`whss-d3;{t?;H|8Z9exyNI>of-}`CDZ`;ntzsK^m ziq9Ze{Bs?A2h8RPB%(xaupTFm4Rns$m{UmcLqmGd>rmv2%yXG$Rs;{?@7{9zC2Cgg z!2o#Qn6?T_RVA-l2o)YISNEuu+Co6bV!@3DjO{T^rFUivumAFor$ELHO7eJ6)OJ-_*2p`C(KGNp)*M8dnf7?b} zZPwb~?SvW`HIb??#IgDKPn-R;G1}d%zu&=e^b?3d(T+8E-=+rW7*!E3Ey)f$?Re_0Q3$E?0lGicn;SmeUhL9jmy24J2BJ&9QUJWoHr zo{e+{kOSixFm8UH`}g?4vG1Jx&eik-5y|=4zhg)pL_+gb2wmsV{9Xd__P3oby#w)g z3XJg@RCzhAHGQsx+eDZrX;AGb@UkN!mvV*SP>LTTD2Ds9(c_u9?u`Skf2zu$+29U0 zGJs+fbShtKyeEbdo1I-hvE1dXjoiL)cLy44qdVGte$Z;t5 zkyy_y`sUI7Kex1_m?>3s}BGT)#Jxjjh z|Zf1zJjW#1v)Z1RyRi+2;Y< zG;}u93Y;;#u>cmjd@1u|3HhDwYI_ahBbKdKsR+c%UI3cY;9-EHl-7grXfVAeas&~$ zkl`G6nTR_3h&9npYGV@+sRP+BgqAdsZ!De%5jy&V^S1L+u1H=9e|EO{{vi(su}$6$ zc}`F-hU;oZfTaWD2dkdDT&O}LxI zJ&rnrVAsbz4-g%Kf1HYtFQ!QdQw9`n-EXQ4xe1M-#R` zgs4H|!VZ}tSG;Y0hi4IaehPKoDV1pz;G!IWhb%hGYFo)%;tXYV0anK?X z*i@8i2m=kXGC?j1;_ktf9pn!X{+;a(m@$F>KCzE28_jl~e?V2h*Y3M77WU>Z8}e&C z{H~Sr`25wj|6=!l*4SjWH)vb=51aKfcQI~%t+PMt@B&Gym-PYMvHF)U5DTqHx z{1Dk-ClbM{e~OBeNr2=aTZ<6-GRr`AJuRXNXfAnN+~o(vs=(LBK}uWT%Ep0R%h?jP zcnGW5rS@tP9pzE0Z$e0;KQhqYdXhi9q>Au`%B-Yv7D!{m<&G8ld0aw()30%)aGWsizwP$@{M%mhe^~ymrQzJZbN}G8Ut|6E8u9sM zF9H|z@PGS07V2~H@BCc?bK<*ZVD8%1e(c#QznLy1qsLM?pbb5*vwW^k`RS<%!}i9P zTbc7DL9SB4ZaG?Yw-46M=XF9AD$rU()=3@+C*< z*srVXqDIFR+oWMjeiTjBV`V*(JfBtJ2Q>kTn0OpFp)3w%x1QI>;U-VBo2?)NgMVHf z|0tme0=sLn#B6tiKYWncPJo;mg!%Ae{;~s;fBD;fDoAJk_>;en;qOmdjqm-~lIc5# z=I>E5t17}2sc^_Rq?dZTnm$5VT2{4!XD;fJH*5H|*Q^ANaO`*?W@)K@gTWLJJN8KV zfVjHuVFQ~-EWr*|l+T__5A705f_PM26Le^$0%mml=IrNWeIG%G#chyjQU#vJaQ*s{ zf2xRnilBj~1doO5xPvbFr3OD6_8?iFOZ&#&DQKg>9(>@3!X8+o`+Lp!X`f%V6KnME zvG=#n_Nxv5(RP2VJ%5eU-|N};T&Vkj$ynyZ2@+x*wAD~iV{UCyaYXPAm+(ScWb38Wc1Kec0zfK%f(`YS5>0CQxObIGOv| z-!;pIN60)^$_!NU4Te|grzjc(e_%e8CW04P&{1yaYcf!lwA@JOAI*{AJG$G*I&WIX zEy0$U?qTIzv+SUH&QiK-0s#yz#?m#t+C1U)(WD^f;&u?0PeZi=RC~^^o|bR+wS|-u z1duSFkwKp4bMnvm{OjKzzxnsL{IxHc_ai`k{XIq%?EB_6P0)vE_ka3he_!9?yp6G^ zI|^kVN+|jSScYiCT$KE7N&%@wgq4T<*-)>#hX&(7sa6_R{SCU^~rGK&D9u2Nu$L%Poz0|b#+*dgrP zZEy_EEI9mKB4EoBTWR>0fA6KkA?*bJ6616KN#Kp4UDj9+e#X-HXWM*z>hC!EaO^E_66{PZcEKf4%QD)GR|3T}6BpmB#3u!=pf zjXm)K;eMVaXUc#Sh#XVJ{C5-x%0Sr2(c~f{C4H$8zDvq%C@T`MPIotZN0> z*N03&;8gD57M|8gMZjk zP}jJ#8qz;)$Mp46|J*-(_Y20P-Y|zeHNad(jKUtbP*h5J_=zacM|vMRCTsvks|iZ^ zeA5B2fxXg7n%7c+*H{bVZMKV;i?h_PZPnc}a$W4eMFc#1e_O3ZD5a<-PxZCHRb9_? zso;fYwoIXD;J>CQe;O71R`{++C@~PDIf*DQ^h($ZzcDy(>_0oi!shk_$GXWvet>{f z0JsoWM-BnR^?Gd)9Q7sMr)KUR$6-WL|H`0Z10Qw+l3EW<|rManBamvYKbiY5`L0S zuec~1__9lsnt%=N1aSamZatcrOF+VkV_uSHl=ZMhf7b0Cfyd4|XQz7rj(h|lC1wJ}VF!m_{(mWAuW6#Kf4=+2 zc>eA~?^W&|QRT_{nJ<b;wU>mG7>{s?0>IZWy!%f3d+IfE4T7EFk ze@C?UQ0}pJP~m3?!8zY!566?2AMrtqPZGj6f307C`&#*Lzvy>={9em{ z#`xds)A!$hjQO9wz%fXIeb=;~-+cjDe|F@s_wZOL;!N#dV@YOffdDCJDM&0~@|DCL zC|$$}2b=>3bJq3e^OwPuJXiIb^O=fxKRigjdV}(q!$T4f{uBBpO;6s|w}KWNEFv$3 z#2Nf>B0#V*1Py_V3*pz70q?mMSLIi+G<8!FA!1x|e5Lw|Cl-?1Z&2uYoA>(;e}cvS z(DTZaBL0%P;MbwpfoNseSD8!U`@{AOX{cd2!@h0}^z&P+5dIqjc4~)Qsu7n5;tUcm zKz764O7ONGmaEP2mT?~czuNrA&i*m};amOP#;bqsfBf5fhE-p{_`_qBfdaJDi3 z+&9eE@8`W`hk2gEbmXlanHyvFep%IUt*FXfZdF*2nh?N z1WGVh8WNgwJ71PeFZ947&1odehbP1>|JYZ2_gn7uGA3dpKbK- zIRlxAU;BTIsdW9e3Ggg$;8%sDHJ?Ki2tw ztnuH!Uw-Z1abm<;lJ9=%@6Uc;>z9506<_(=2F!j!2N5ZKTQO0N*8e z3>ywP93Fh?@|ah1EL?N9a~!3Uy*`kH6|WMizVqCXf<|)dDfZlT~$qOb2bwHVYZBdJT#jKD%9&nY(jF)xGsAjI_G^o7)g-d!M+OndhE7;#fQEz&!0Wd z?`Ovj?q`kq9-r;&Kh`mfqj|pan2(bjK%l{upu+YNb`xVRWgB1~;n=e!kWRh0NCMT)ar{qTQvPkv=5-o2=f^Mj88a*KC-#1kL!vYN_9(g#pC6!Fil~lW z7CHinh%Leff0bUL;+U{bN$j5m79em5lAc&6-dOJqGhB2Y@VJO`io&_S!q2j=Mfk?N zlc19BWgg;c?7$~uxmE>kVnKWoT$?Z!Dk`(>;?r3_-@2;jK|+TZr0#^cV2mx)BH_{|%`aT`gz*mI+)$x|3XY8Bd%2=O7F zgSfHJkDbQ*DQ|LOYL^uJR@ax$1kAq6>E{O3r`R~Tz>$sRsCr#CKbS4$9_Q*0h!Ed6 zE)9?aK}o)dy%)4vR$`z7J`PE;BfcAcWBa39e<3e=mbv^Wdp8KG=Mvm!&iBGbbbByV z&UxKnnXqOt&wC`_5YI!V!lPUNpx^`9}UpT7CExBPvLU$MbC zSNz*9VBeoSnvb{ltoX$2tC5KYr}VfAlj)e~20W*w%l<_w@fW--wx_e+I-= zAZVC5XTWaGs1EnsgI@N?M`8UI@(Fnwh*7DY!0nF7n9e=`NT87a#*9GxMGO*1Rl{FE zj;T?^cUI%AZ*0nX;Ye#aOZv3S5r@O? z{bKI=sUVq;ZD!&B?|X?|Yf3_HT@xfm5JC&t6`{CkG9nKZ046ZV1q z!F;^yfIY;2>cC^Ym~l@ie`{gKS>XL|HnZQagtv&a8S}?=LphTm`2RgT*WN5Y&_XX! znWN9aHQq_)eB1mV?AuS{esNbnm;FMQaIs$%K(L3n{Pzfc{8vBV%(&#sv1)mZd$Pfz1EvmiB@Re^hamrE*%%O#?-2O<}=ql@ojocvsGMoxXdC;WVGf0(~Fw{v%^a7>32SI(a! zgE?OhMVEcq3)y^)!Md=ARBE%>!=$R?K@hET4|={B{fWNAcX3^I6lCHxC}17N_aP-B zBO%1$yirIQz&2#ClE+?!zT@D3LckDXQ>xnezR}+C+?KIVf?or{E1!px2fQ7|3WxnQ zen(LWec*YEe+MS$dySOGx%KeVrmXouKaU)#@pu+>K9_5v6o?y62rVi7{);w`IlN{i zy%tL(Yz0;A%KCx(0Wt?qgWt@&7!Hn10(BIX=h&Arv^N{BYK*V`pu&pA{$G8E@s;;I z3qEsZ9-^F(iYh)U@mX_rksF^Qgf7HF=4vfi8?=Rlf6wwe#)W3^@HFt8LSvm@Fa&8m z&kHU^zm{>l$GuvmXZ$=TUr!V7BD79Kyk5(|_Q8$%8D9p**T2jK7&OeS5Wgip=61#y z7d6y^duex0eULODL-o^V>;dCpZr@U7@N7{t_p2MrQHC~cRT5(EzwQC~mH+aZJr=Ne zJbbV7e^(n_Fn&W6etQT?OC|2t{?32&V?PiuJeD-y2lgo-o$e|ecMPEU*d`t_SrYNy z#q;6!(Wr=f`tTxfK3;FnAlC)L%+kN(5Z4rO&3XKYxKE&E>J0swPCNqg+_|nT%sWX) zd0xjK&arrP!nyTmL=u>7WFVA%e-PsEe8jVgf757$QDvate~x)Q?$1Y5{O7pDPheHk zgea;j`JBVxWTKw}l#=qkd0G^4Kg6r`AI(FvYqUU*sMM=f$xsQ=? ze~{YmI82Q>C<2LVv@QCPbslq!0qrx7Uk+Hj`FInB=uc>bsilX0o9C1;_iN1YKoqTE zM$xqJ_%LhSM*p@eWS#lBEqVk!bcP3pudnAmk?Ogn?$iGAVy__Z;(Z^MUuG2nS!<~FrKP{(1sz>Y)t zgH6Uo{xoy8tzfW^NA1yBnK7G$JThZ1KZ%Z@N+!Gaco3!TtoYE{`dG5 z$K{{nbNeE*gX22dmq(@@$3bi)YCp%RUKLQ=8?$)5#X1S{vN=;cuT3$BF+H!nXhVa2 z!CpRl1R!wU1Anvq8o%}nE98GUf6jQTveEYQTCR#YcIB+?lJ-0&YrMu>t|7t6W#7Ic zS(I#-hj0G~>K|iJv|-E9j$({V`yFF~lfo!9C9Iw8d@dYHgLPoT|39AZ;<@9W=N7(W z{nz;t)`oxo9J9^)ub%VgI2+HvhWS5Fpw6}-5{NP1Rg5F69!iJjl>hqqe=&!}xxH{c zn4YB1fcqa}PoxJ%_q>KetP1P!&vy=@$%*j4_Dj37ikcjRPO}uXEtj z&9)l|5w!pDdvBOu7*nvD7^~m~IAcu_H2BL}_|sRoN7zaVV->z8p7YORxE{)u=QZp1 zxzO`t!`7eAiDeqwe*l%0XT;a9u@(R4^V~om!}jQ~4PQI!O#i%Z7k2np-PrHEKH#rtQz%Ae#skuao(ua2rmlZEPd$XvznnMkA^y|r z&Fh3Wug4+onLv84db;ozOGj#6eSo!owy&bu_d@shmvg!2f9zZShjU$=+dpbZWjR0d zWp11Q?(@uJ4eQ!4+j#F+-~Y_N^jE(Z=%)d`=&x}Ce-Zic^K4Vl2h`ucpJJ`(W`E?5 zYy8j6Vc4Dje6Gy%V}dJtnf-82NM&eJvXPL+Ump^4SUh~+OSAv@@8=Jm1#Oyr*B6Xe z%vllsDdr1&e<&UI3=CWL_w&I|ciD|TZAKWeaBeSu_TkXx*gwo;5qk-Yy$=8Z^YQuK zr_Eq~zuV07B~nGqq1nHg{cJA;k?RI_y~FYDzjJ){V{jjw4ZA+SAJEqgY%OPf?Pe2V zuES1u@S}rYzmNM1>Ir^7t|t=nc!Xb!J%*Uhui+Epe|7kM?cp_i#P4hW`}*he;OG2& zejWat|9u_(od10dK7{%F?`!yFa~yy}-;&6JOrP%_W5V-eE7=cNTSap|5B?e8pzvS6 z=S}l@=$P+$c&<545Pr{tpN8{T>3d!Rfp)~Iex0}N(MGH{`*1RR3bfbHYcv#=*Pqw$ zeUJnHe|e30g!BEmFJ#OUod5G0{twRod5!Ul^M78Wujjah_?^ey_qkk7obO}r&-a1f z{+@?^1IGK;d5@pxm%is6ex9HEIgj{x{_uO=>*x7@oj3bs!tnURZVa>k=J2no?qYZ^rAYS|C#;azt6|>f4Smc=NCV7dPM!s@AJv;_5J>D+o62N z_72}cjGi2h-*HXgd~rU1_62Z0u3yjR;+b9X?{nw-5B@rL`@NUH&c$=P?BdTkbDI5l zfx`K-FUc*x*RFB?+)pa{az38{Ef~%%XP-X!eJ;;`ul?7#=u=nx>)h&l&)YxRk(a*L zf1ZA|3ozxl_VRbTRNr=oFZyj0EE)5)|2h}r)fN9bx1R4g`0HHkm**8}-gm+`Ks71i zqJoFLKGxkTj#?gbU(F6!7m*nJ!*_h53Na6inUsbv@z=CbQT6w5^S20aE4hixUdztU|utCE!WpQ)?P8yT8 zMv(8cKy1mku=ip;6zOJc3b@aIDb;{rCJ!WY$Y#JT0B3?6DL#BGf#03$3%~zqe~8e0 z5|D zcQKCLKYy2+f7hSSmqGYa{=`%9I~7(Wv_FJl6%cpL@6ic(pRy9_z`n*9Lz^9$cR(!5sB_eIR@n0CavXg77yG{Of7gBGKldf!xxVkgcvPaD@h=Pu`m6bQkM{gtUl`b8;6SLK z<9^V8cz)bBez%+31^rFU&yV}cS*HY})&CFU=il7dV{S*>5AxCfdOnWhepK8CyO`S( z&#BMfL%S36_s~B6ulD)&UorP1zK_qp;|%RV%=Z<{{RVs*6isknfB!?{YX9>x*gH{bi8+Y8^v zxW>H3l0RTxV@bkuICFoXJy3V~AAT3d|KoMP<3IucVD7&k`|&-0X6XFRvw3{tc`(1e z&-b6*AAT3lk%KU+Fu%U%vUZ7n!f}i@%p26jmh5}3 z8$~~z=k~$(hxt7GKIYH&KKnn%qcPyQk;aFs@g0A-4(3OP&v6}`k8$!HpXeuCA9mn- z-x&X>4*JW!;P-+7;{fgSXS!;M6yq-@xc@{T=$lW%Qt(|jf3O6F$hRS{2LJ`e|DDB7 z1nvBs?-%!j`$Cp>p6{3smdlc?kk56%^nAbAU2owN0XJQ&4wq&y#rbC7)183EW4Hgu z->d}B2Xw=y+wp(-U;m%~b7my?-%k7d_<#R;b*bCwzv&>yO8zf?-_|9B0;)Ry-@ixu z=hPnm+jYK7fBW;vaIDMf`QIpl_}@WZ{onpoLy7!TL8EKEDtIk$Cl|We{-=uTuf_qO1cS}tE(L}B@2eo zBv6siEjRcJlu;@5QZoBvzkQa&P%YZkt(x<2jQct5KnE5Tug6IE<&W(vd_q%zl%8U&%Pm&+j_xG4ibxQUJZY3Rbg&EcOf2H^!!6J#tmjRwh&UxW%p~K?UyDk#1 z{OJ~HIqyFBkJ#Ry61!>>jtm>Ddqt1#pS-W5QitaJ(NbH!m?fIKs zclZq9?a8D`${sXSD}gCvY*sEP8=L+hh1`Y)ou}uJNXjVaVoIfmUaVsr8x$m-Lv1X=F9HyJS*=V7>9hSm=d8jJ= zUC^K10H07gzNEV@p?L9naFuTKUP`Bke<|~Z$M=NnJSW&DayN1xWl=@O`bRL=NJ*pjQY4}<=z&1@eStuh1lF{Md~s~+h4g>414*?F1*3kT{=8c zh#GVe>NGc{%d3~&WDalX80!V_ehhB%vwfvVay&n$Hdcy6?7gzKdE!`8{*{Im%Qh@8h~v84vT6TW@-YwUT&$U$ z4GUBg;``}$g50t^X29@6x$PhFo(e{Ih;Mv28VcL+JIi1hA&#|*3^ zBJx&TcHb(UY&<*PLqYAS2KDp0c~IhGgc#+GT~XrUn%Ta51j(3wVJj#oh4&z<1PZvH z*lJh&=9c8#{>?3h!?4($fAXPED?(49*aWr;_+-!N-o&g`j;Wexht!0y*?8`&f{O*6 z16Hwl>y!rIJ4FlGV{IAL>tsMq_Q4*5aX2?`3hD00`(zq>o;0HRXq5`T0|_2Fie{t8 zDTo32KrT6a?(Vu(t>*)(`6-CHmvjhVRkKVq#4{ok3?k_h?fs5lf7L-XE!D9VeMC&a z1=q5TT&o&;mkls>o;8VO5VqSZT?efGo%7Dn;z_I5R*Jz_Rfq!(Iy)&{++S=4ooJhR zpp5W&e4n1~#xB8haod%4uW&o{prscGrc`oCtoAzOTSu_2Jry__gtfP8qq;%|9*nyK zG|P>JSm}{-tLY#Of5CuwPdvs*1)o=(+tDnc1jEFByc#aHE58?{-Zdf{ua%Q;Pw@k` zN=1sc-e($fq>cVcm)A#DJpHY@<9*HsNHeW>hex|Trl2Jmt0%iCPdU`Z7ei*=d<_!c zwCjct*|tEaYv|d++bik==ygV0!Ut2A1=S6jc=lc4x?-J5e=wQGrxs?W6AQxg4MO`oQV? zwjrTeB3`Y*e}b3vTJZhG!phwn891x;`_4h46Rc8Uc?ohr=0{ygLR44SzUryIb~R)m@O*)QooA0 zHfZs3eM7aO48oxJh^Gy>>5H=w%eigZQ7K<(0M*EQe^M1nbqutwF$2P|Ai+GnqUGsj z8H#neEcaaMDbu=H0|0$~M)pmKZQ{hJX=v{UWG-_~_u9<`dKv7te48jcPa^u(X%i?Y zgRyohom7|WLdloyo2YN#*IGmAxl^xC|8kH+D0QbkJ%}pg^sK1gZilxnL;s{fA%3QK4fF5hVRj~>*@qxG2!OkvZVC(2WuSa>2lRB9jFPDTdM5Yr$yJH+S;VK z=_2G7UEShAfBw?N9MG*)meQx9un)d68x2Vhpph=OO>>LFP5lZ*knVh<*F6j$BzL1C zvx>%Oy+ZxvwLEMQ!P+PliA4h}_TBS65+D@?e@8j@f*KSS8Gg4>Q$7sqy=UE6=~_tq z8jM|AaFS<}rVqO3KFO&{&6PidnKwSHp*w&T^u3-Wwi@e}+&Phgz}b0K93Z>=vP}H( z_|x$xV5YV6z5$zAX?M+7s0(W=Y>cYODcGQ<#ZuIH5#;{*6#eNJa=pF z8Zfbo7@Iy<5Y@#C^Lu47?mnDl&9f%Den+IJM)^i=Pd?Xz1KmpVq~0;{ z(#$<}7M=$}X4@fF)tbmp_bUyafAv$(G1L(fD`cFQ&h!AUah^8eN9N;&3d)dFy0_s?$pp#@G_$^K!~S5uT{_UNf3~}=`(r0< zj4S1rJBoQ$#~Z{1kOkSd(LQL6PiUuX4y4y&I@)Br2XB+Gye&LgtD6C$8ZuOlP>&HR z{F1V|ytI$UwZ_u!>r8h0`qAkVBWPBdLNnhw@vE#`Lka7CwIp&4DW>W~3QT_J9;j_k z_xI5XijZKfmm4axfiS}EE19+{FKQMM=?T%TUeWI_EFuYC#jnWOGVyY@+wg>2zo$ZG_d@d!)el4Dhb^Y4iZ})`uPbbrC=qol8C%?WFC-4NT4D}`tN9hi_r02+N zA%oS2=4$=ioHEnce-Q6_t&Nl3lIK7}{U|}4gQvE1AZ{_0%`O99p&3%iWgo6WCTpD{ zSlssjhPgnn^G~Q9=C6|3!DF>J2p>T#3tDqK|tGJ7xh~qdJoRCrOm_P zZ;U z34VXu^T#DrQTAD^U6o;X1$z!Ej=WI2cArLWi5s(DrcyDklPjGH)${0%TnNQy=Xg~# zI?{G)Q`b%?0=m)r&1174KBN4gRyia|@+8If3^xY8xhi7r^CaVg4Jr=V25W+7W0ZOHw!e=$8`6aboqW(vS^dP4tG*Y z;%z%_y6GI$?atnb6U1f2D{(25V7K3}X#&DIg>1o6Yg=PkYCs* zd2tL$kMcLPkQ9P>sF{$Rl5_YrFK0`Sw6fS9q89uqf1A?{wQ~%`J-Xbu2(%+S'T z6gORWl;K=j!t2I5uNd5ni>*Nhrl-sGvne1LnZGX1iGbvR%HYrmiC;1VP z%ElYgf3<8a8*)3Lf?)Nwo8k@ZS=UmO*~`YtE0o01ie$5jCF zkV#%~v#O%YLMfX@w64Lc+g_h#f660SJTok4s840#y^3thpQrNK>5F9s17y)0kfA{Mb7O!Oss{csYANIP)q(qBGvE$oYDC%kD zj?0{YIMi)4)kArELum#?CcYEwc=f1Nt5*TBkl>mvE)_&;>Fwbj(Ox{7s5>k`x6~d( z7O;SOlkJ9g0+sqjWfBfeZqr4<2vSG}mub3gD20aD|4NIlPr0wxE8=?Wc6D?He-2En zfHHsY7YiP;RnoeJ2v>c-i^|Hd5;t_n{&gekV0f_rIjyGm$FU$4T`WP@Ro3DTkl>}J zckfb$y4ma1uH<~lCmG;H>M+sCnwXfKC}3+Hr|msK3?)SEHj|Ruj8K>p#JJAcK5e?d z5_N%J^>My9I_oyq9;=akzF-DNe~1fNhoamqT27z}a%*Qd#t!Ajq!5*PN1;@lOE-CR zd82Ze&oRT=2+aWMvfiwW?fvsCbP17(M9zHkv5W(1PnhqMcwFuzy-F5JSF~%DUhsOO z+<`wy!54EkP_qM8taZ9MfX_`l<^h!!tL^C>Kbel-X;=OX(j)ugG^gNzf4b2rIrTS` zGubP<0ADjCbD|2uyT_Wpo0E8fh{|4T*VUE$bi|3zUT|2Si)gI8bw(B2SGb)D=l14| z<>%_I53f2$r3{>Vl`@M11fd_Ms@&QE6x_jfuWyU(>FD&~vtI4mgcQNt)bjOnz1#;4 zwOwq4?TUheHH{pH6+~Nuf4(b=eysfIt+y)*N7Gf+=)nR|JV#vI`Do0|oMGuqc$UkM z->LbHI=+^^Z&{OSk)+MHS#r5}3`zS0x&S>`I@SZH$q}Y z2xTC%^uTbd=*3R7zXB0Hb5u@?yeZ$i1vHvpGN9;u7){`iLuvN{EL`DxLlD?Ky!EUw zdg_(aR}a$5+&&ecS7<;L4!o#;PSgp}+9TcE&0%>zxnO|5j88eqFjf5Oe;7sNXea4|Iux{fJiJeu zE|3XvxykzpIzz$K2=XojCE}%Bte09HKeYP2=-_ZOEAHJFw=gQVdu3X!wiyiWxGguY zdZipAqa2>7H&ovY`+lwuNL_FfVpwEco!7661KO$ev*p6mTiH#mc$)xYd=JZzF8k@6 zym$O`hQ#Kyf3?fc{!9efbgjjQL&aO$ng}JjTE*4gD#5#^M8v*7|5EFTUQALweuE$7xIT{OPxL#Pnkl!jwm~~9~;P@%a3)I?wH;|Rf=ke%j<1w zl&6J=GqDRc10;VIm2?pf#_qEn>VC6g7`3UdRD)=t@$QK^S9RZe$r zM=sHUbbB?|YrmbIBz<1VLb(1+viclP&qqaF{Yt0SVyqdEUFpHV5Iv2siFY$}h174R zDf@}#$gk6avDm3wZ_eSpJFhn%LcApqQZGlQx}A5Kbbkx&Iu$k|v$*H2=sfal9Uigr zt;qE8)`OFD8ebdhzNXUsIj^3&z1ztUPw`S^;rdL%zNd=V4w=!2aiaTl1%h8Eo57PA%&x^{NBmTFr~s zSFs)JmWN__We9VFAkqMaL6_}LZpVwfU`r}|5Ufv)}{@%h8!eSVFR;aS7M z4No-!+H$fNWfYk|$e9+vtFMO3)0SO34YZkTT7L$Qk#yL_OARXMy1U>CCVSA*-3(97 za%V8`q0KPmVRrKTm8}Hv#u80!(YxtxM?lhncC&XD#gK0FbskYBk{716sY&ftS62FINzL zMnz9srrTJ4d~VkP@{7Z^Dk>;sTq?zVpMP?v>YwmYa=F>-X*UF}xqqvSbdKqDviY1y z<74;EKCmceD$X62dYM_|+KV^y@aVRYV6aQffF7DHi{;kzZeW8m3NJW@hS2XtaeF^F zwCJbBK8aqM*JE_aKCpyWAmSQn<;JZXdKoG{6iHN`&PpeTrM$bUsIS|nrctfISeMvIA)FD32=9$$zI8q$!beV z=U;IvUazn})JQ)N%*$VUHIuhnw|^4?h*BDUvmQWH5(&AQM?^uAjZn z?=AxB4yEZ#0BKJ)Sbq(V?RWt* z=b<}8n&H0bC5lGFZ@2mz>b(I`9l~AI4Nb&da_=mvqgs%3q>^+=gW(@anp#3pkMa}| z`RcQJQv1(&&m#>=XYXs!6OUp!ovQu<0omiykT3gOWj4NxevFgNGI;DK3JL+Jun19mvxeAhpt6-_1)HJ3jRK&;KBamEc0ame%Q&Zq4Uj*(d zG%nNYvz{1JwMyHsPYX4uw+QgG!hXE2Uebluu_abIjxL0K&fCPMsk+2Svwqp-b)*ZO zs2iD>o{D-r2VwegKb5)(^!og=x{BxH@&nMgCqV3iaW}%wTa5EwLUyPkIlNR)gI##1yx0Z!7p) z$FR@j>Iy*?UAP-Z_J5>GbbJi>#g@w3toVduL+ADOethf7rfb*n+dq`m-9WBu<3Md2 z<;6sm6Ys>+Kid1egUS*rkP7e!p*TxmvyKDmvwCg`S0xuQ#8w7Jo3#E$ub^=M*(M-_ zt6?3Z&9sdV`?7*uB@=Y!=Bal{XC5UL3Snmys{!6TOWQu;=zmtXujc_9wTZ6I>@%C5 zAVwLc)Edcaof=*wS{=2Y6h+MH~T6?y4;W#KH zttaPa{0_ETXem9B7Q-efxil2E+D8nq;*nqJ`-|i!vUR{Ok*j0yvJa(AqXkH6MLb=p zSAJ-nP@j^~9a#$At&rLPNYDTJN;}_4Ky-l;)q=xTK`QxkssJfMxIX zk>%I4vQYY_H`QT3K~$`|-{s&&Sl8ol+;o*-K`;9)S$}NI6ifFkb=&1(tc=IQ(m`>8 zH7TfHV}AFuV9isRM>j#wUsjiA{E4HrCRtDD3LV=!xC!e+me$F=dU8xlPlbM;Hql|{ zEj_4u8y?J4iRlXJ!t_E{#^xDgi|016dh=khKswokg=a6Ga=l(Bt7kZsFY5iG?9vvt z$R2GGf`2(Xr2qQ51()@mQYU z5o=rd&2>_Wlj%pQ%Zavz)OLD^MzH~Ekq_g?u4CUp34+Ns_@C5a0H9|&3nsLLy_xFYG zt$&dCePK_2C06D~_)4ZIGm4CNmC-fpy}TkYEm;xO|2To1kDo*`VP8dmAgx(V#ozs6 znwXJfaritnmmkR$AY5lDg2XFW@>%x`C1m$}}1mS8Eql7B1^Ybs)J`NItEAyaO@F8!7~RD#5}Zd}*l z>p5-U>ITXU{7ah!^ZMg|f7xEeQKwFE`q+Q?_sQNtt4>V!UB-9ej5lU3C5|mAdT1|%fM{5j1+r0sWU6`xr+#8e}b@ia(~+( z_plB(%xV)R$car#mLDC&3K7;?npdh?kBcIZrQzByJ#l$EaYgrBT)P@dVk)YzkMufS zEQmdVnMxDiHxP)+Vp(*Ene*W8`70rRCZ@i1d5+m)vwFV1sutqhW_>uf$sJN$#Fa|< zB({!3y3!eDRql3^tl2(!aZZKwDSx&*v*tgZq#&o-dDN)w?ibXS_2fE#F6}krEe^f6 zBq2<`2Fnddt&n2@ZS{2!m$K14JqyCvZeSr93xCJ)HV5}6+_uoEvsq$==#K?8|Knw~ zEZ@YFiy;J~#NGR}Ia_VK9M^GidMp#9u&8e=xNn` zo(H~j1LlrM&}REup$<^Gt;~fjxEt-Y-E=xdOX?+xL$R^LBw{YKWHC8WM5{Q;Hr&80 zw-SO5582m+EvV){VCD`dF@FTzT5k5z+vR*YVW9S%j#C=w`{Y1JUbs+*hXg4|1Y?4D zrGOP-((mhqV%;Hk2=z;m=sKpol*YyZEzW(iV(bLO2Qmc*MCQxOWxd)QFTAq3#asFc zx3^<P{n)Q)DW6c zE|eli5{38?7iLr0Dsc~yYaux($IS(Mys9=jg_w??2eVYHe*daZ3JNY+dRQl;=h&q8 zK0qTYIIo%JPHPHM+kbJtRqNt&HvyqJJh!lU45+NIRxg5G_BoDmemx$K(LHC2<%LJh zjM38Iwz%Z)ynj2VN6lr30dkgS25rAYX*UGhay_)})m7MRfd{|78NyqCLee3K&ni7f zYA+Va@c^}6^_iU^J{iXfU9apw?XqA&t>i{o;li(=;q6hct$)lzhE!PZHr{mJ-R%IF zi79#17kx8ge{j|hSk|H348gM#Nh1f_ulvn#gJik2|Lk6?qq%vl%R3^O6me(Pg~6XS z|Lk8Y5d%(3;5g-nnmD=|52|}RXwqWw2o~#Dtc|Myo~BD*MW1Plv+w<~6s%ASZSXM# zdCRvY3Hc7m9)C!3^eP0zUY6hPGOyw=f8$Ph+}U&#Wj=UKvZkPRt3rm#>>J<&W)Xgl4HX9G-G|<+MyZb^tpYoSS4-sz zkUZ%nLs@9`EDp~Hu|wI=3Nh6F_#AEi&OJb#rCMOzHiyw?-tSIob?FXK^!z?VcVc^a zwb6E|tA`5{KM`!R&c534hxn6&<2Gay3DbpAL6XwfBcwJuiHe$~&790A&K65(O$`ymIPah|R$oi)vtFCWVuSIV zj(?+H3Q|GcpBpI_V|pR>sLx+-E+cuX?#b!CM4F&9CAVg~Eoxff>zh!s#)hxY*lNNwC+6b@hN!UTRsv>Gk;bc zx!z{N!`2V}KF}&QzsV9=eQ~$MAM!OnbT z)eGrCxm)iE)~1W)IElY}2&ZK4FUPyaIJesk{)CwEx9PLb#g{TE&vv~#pUacz_?@@7 znJb5h;BTBsafz6NA$_{SBAnp9L4Ux$3_AuP<1i#Jt31tW$f8pha0M!Ps>7fLRfhT)L3ka$fE~(C>`K17tp66-db_`nj zEr%C+6>gcfcg9?{1LO&s=Vv&QhZFL!D|P4JngeXjQm_ghaI=YjFRWuAUqbaN6~(Ib zHy*JhBCa`i@5@j=!pDFAd-8tM^1E(-aS`mw zEdVZIQl9)G1_07=O2#l)N*npfIdw#(yTuiqGZf90Jn$ zGB*PSmRNJR2=Za^ybGyN(C_sULS&!-963Fzxx2Uy)ccvc?eyzTt!vX$k(@r@=nhkTK)Um+spjt56KC}M#$Lnrvo$)O8 z1J6_sV07LWBqxX|(|<>;uVo!@{CgsfBI%z$SLZZUK;#Noot1vCwq8M=4`c~pWb+2g z{pgRv%Y=-&>pu5Tf!|Hm`y^SqDL41!EqJAmjmbF7K6YtD0EuO1d!R#-o-7FkPsQu% zWTQUpBY*Ic{orrY(8|^zTE3dJy$5OO|4ZAOHLJ>Q?SJ1TXMZUx1u823a}xR<=@Cv+ zf=VZXqSB=D-E-kwYdz1i_kQ0bmCCAGHK&tCxZ81$dyMOMV4l~BtKauM9InrcO^w^r4cz64v`$9NpO|K| z#sb7~??^))kAK89Y=`N#-%oo|P3b}U0P@8=Adeh#9kG@_+)byE?#2|ER9MpZx<7Zj zDPfCp^Jr6|xx|i4u3kj;9A07+Y789qjM!kq3X``w*jt(0_0!%wOyYCdU*8|TufoHH zpyC9wkoVKtN;1=lu6GX}CP%)?6Hz(}wKV$`8Iz0M{(qUA_m9i5xvFn~}zh`3gxg8*X z0UVHdtso71ICPu^hqL=J>0mDy%TiBc2~ENGC>`u> zcR4m&cz;U)86JGP^@Wdw@+InnxY_L2IfdQSY`9w73d&;VOK7JcnD~w+*45g$H)38Y3HcBp7^t}KZfk(vp(4= zOg+77!LW}&L!V-#peGfNRY!h4IU!Ti$c@;)hZx<>+?~o>0=(TvwcFD!-odxEUA$_Z5=_E~44vp8m_=qxp*+%etMZH$C`H;vL~(5 zuAMf+l&kEL;`TW3QTw3Z$iK)}5GlP}&BT1Dn4B~%UB4-}$AbTY0O<3{Re)E9HptRQ z!vyeTRs`;qxYSzzdLSTEQBrda(QN*B64z97RuN4MDUqiIPnr#@l}wY*-9I=Kb${x# zGZ%eQRZjXr(u}=jmA5qysN==lFe_DD0h7#4hs{aa@8VolC&ZM!GsH90=OY4`W#2Wx z(dN6O1*UyAnur)Yr?$vhb5-?sp5J>?oTfgtEV%43Z0p*2FBie6f(I^O+c}JH@0>Wk z7F#Q>!$vH;sVs!jU+dgDKGM`F;eV3VZtFY-*`t(4yeLN&IV=Pa?Ib_Z$@V6vYQ{hB zfpL9D>(H>Gd@_*!p6nCpbyPaF%qBW2-jCQE_PxGYZPkr29yINhE*sUWhH|7$YK=v) zxB^f=T{Yw-d|3n^(65w!-rm6Ibi9s;kM)KjJEMDouDm%~3#;L>G9^d#A5NI9Hf1#!@aP8 z%G+Zi^uy;lu;U_rABmwCpY;P2{r&Rv*8M&anddQTKuhqm)qk6NZXM{gU-)gbXZ-a# z-9R<}xYI*TBDQO)kRKb!A{TeF^5!VZCreU>WnMlt0-!o6;WLpWvwQr~YG`cH8Ip2k z#MK+t;|vCjnH)*Fu=qPv=OO-x{1XHZ9PV}TAZy*xJ7@{)K0~*W1S5?)e?Th5(@B)6 z#2WR^V$s()M}Kk43H}NB5$Q1^Y&U`GnX0L=7n-WJ4NERv=^nfEC0Qr*`oOu>A((<@ z7X7-wjzUj~h{_=z#IkEYS`jz!=&>$Ue@hw_~yRxR7 z3!Qt{aeaBq&Sz(Ax6Sgjv^Vs5S2G7mjFJ7yPl5tP@qg(mAR0aNl)8T$Ifx&vhj+WQ zG)X@g*J*etSAXYf?asuWU$fb%k!1Bw5WleT^;#m3FPV(xX18NSIzhl*H05R!ClP`a zuf*c>Jc~WXhx_C_J;--6>;hZs!Bj6^#Ql+gWWESRhzL1sdc2y9cFCF$n9cp9!-FFq zx1R-Z+<$206L80{KX`A;A&AqWTs~MYTzni2x=7OQ`o1M^u#k+GdPR!M`c?9ttgqv6 zJA!1>o>sf)SsT3(d46Es3Hxd=?ct@n$OF6i!|r0;6|&{C+wGhQYP?@;2yJ7<=YyDy zB&D~kfZcG-T$%$q=l*<6B&RS7Zo}_ZaNTBi?SE73S2*^D&EHfYrZfm!FLWePvM_^Z!=Tn)>VqRu=fLl*->F ztpVfbqvp;ukmNJY#QJ4+<9X{^u`;dnXU)9!=2@)n??|am(IbejuN@3oinNoS$8=v~ zVsv+k+duYNVCWK0#P>iB7{}6yVmq{>@qZz8@BYbKXN5#5lZ=fsLV@$GS&r$xquHap z;Fp!XMDJXk4}lm|8~+`NdJcrm4iAavL-|&K_l873D1s4=^mS2Q`+dYULh%3vib1#@ z`z9Ief0U7=pP+-uW^sstver*F4f>v8_PNU=(8jVykAz5P$#SdVe`D_T$s)Wis{;xAkHL)ihPzE#g;TD8|_HcZL+S z918M_9a1fm^%iP;o(i&5L)$yz9X-gDSdbxc(vMXZ-eL`i?6BTIyI&-? zeA{S<&uA!Pqq@uX5a&m%M$#uDC>}ehQ|#%iidQGO>DGy!+K<0r`i&AEYJZf;<>jfH zs}4{|BcHcJXm;__dA{WncRTa1TSNA%0%BlR-*3&x*vl?>0@xN@db@iMH`UQjy;Z{# zD_ZB4BE2K&ld``xms>eq`+L7%S#{#kn|jBX?<5u@FGPi>oeA3_qhUz@7K(it2@vbXp}hdb(~dF4;}Dyjrp>@9DL>3bnIjK3DxnPJgTHMoaJmQRd_A zP0)Wh)B#m<*FF50or%OM)uK%ldBX_RX=cp7(Z&mG`wydVteVbqMUc$cL+ zi6eK+7f_I8h2Sjm?tcT6$}5)D;`(89?bGEX=UG6i8>Y>&N_EP?OI|i2$L+YyKflo6agU96TY>@U^jvREASkMjB+S z(Z%q2AZ?JmA%BkH;-7D^6RklXmFd?GJ7V`sK;M{gc&Wz^9gS7h7CuQ@nYCJ@z5C2_D;C&T$Xi`wi0T{)V7};m)bF zZtaDIm>+~`3tfnq)}>UzIY@d6$cUv|7qJPfLEcW7EPvf_>L-JGTqUnmb`PsW$gwc) z+P0Z$l_#PnVfWH?9jXR+xOzrSLSiNj(D?f_sKABE(f6z|OI*qU8VzR(oc z-E-;LQ2^?`e|{PHnT$)O&%s(QSKgqdQpMnU<~Y6HQ9UBS#Fn1(=EgMdi!=h*mPD!a=Cjg->|)QONR@$%k9>_F1R#54^KLN zAb+YEBAJ9F?VnczQFU^A0yCIxf)&gVe-8P49d_5Y$q8Kx zx&`5TX)@Au(}ZEedy9C2y}s`u5r>I+V{`roiL2I`p3Z@BD;lg>#t>=x3;%2y;X&if zmD>u}pg7i;K(}Wv4iz@}$m4C_yjFSAb$@UD&cDpweHV?|o)^?Ela?FktsllToe#12&W&O_PWDc!1b|5norI7E=HVshqO_5j zkuqEUoM+dQcz&HKmmHF(v06O`-Qm?$0fiYyj`xqdF{~Tow6VN5mzPKEL>1<}AP~f~ z+Ll4SIy*89lBLy5akUi6>-}&CMt|aE>rT0m*-w++_WYzRmj_uKcz(6b*xmJH7K#2| zS=GW@lVAmIi3gk^9hP+mCcb>QA_)%TM?6dS=boMRiyTo0+jtmgJ6kLo=6dx{viqrr zNex2rx+WIqcaHQ$O7gel@|~z8V)2&gAz8=4ddHBiH^<~$%w{JAc23Ya~bV z8soooM`?W0bIt*{-jDMEdP9O#*lRd-ER&+{5dtf~{z53jzUSoPT!K-0s4it*#e|Sf zC$-O)m_pmIc3;J2PfBzr<%i>uo}%JHLc2$rRJmC`p8~`t%o@oy5EhryWOefWy%Sc_ z>wP7tJ&6>3e;LUaqyoPEhJQy#Ex3h!h>P+0CBw}2A3U2b#ASj$KDLy#KYE&C@H}Fd zM3H7+2qBv7*BaUz68M5OTStihVx1W__hMVmP`Al@+zrEbj+x-Gp%+SAU!L-tLV(y+{V9 z)@8y*cF;I3w)XPo_&^?yEu;Y4(nnZm>|5utl)AV!C5G&?E%|&Fk4+v^_f#|}a3qI) zXvOpyo7?oGnXZcmKblFuKR_Mh*&)!RiL?7P+td7W7xHi;ZB7^~(SPBB+Xv2I-Q9go zsGmwdmGzrah~0gJV}EQL5-@pt0ZA_;#{I{G^7all<$~G0&x{E@xt89Xk=2=h5JB2) z5tg@jZm3{Fk^~oHcayH4F?)ami@%b_nR>4;+gPMO)0GwX^<-|RzM!cI(NbD(PrIbS zMtbtNmJt=L%eM-<)8Qm*zj(KcmJp`T>hZZjx@!!u`dhr4)qhDcFB$Tx+daHo)aKS8 z!Rfr-E4vkfNEFt(;JYZDJtch~)58J_-P^k2^K+23#PZ{K^={=bC1W(dPHr`+1R0fzL;Nqhu29Gj0-H+q2Cdnsow4@c*Xe=w@X#Xyv_65@0Qoxw2RD^^dP|n^}J&_r8&O^Y>$oi~@9;lRQV}vEN9%8E-V#AEV*d8UBQSBQuMa8ndJwV+_J1xGDY{zzd$QMKDnp9-cWl3Kv7YQ zAqA9{1R#|XA*o&Pz-_Qhj|@uR6*=VqQPcwfq8RoJQ|b39c0r*I2Ny&3;sOBOd0T+i zweXbr8JgE~g7i62E+oVuK|7NN#RDt2ej`SYRX{Ez0I_Ufg11`QnKCDU1x|M@FIT$56*zYAK2|bz~X;k z!#L+JNZ}H2_rJm8zL33NFw;mDf6n`96m{raD{} z!ob6CsB@X(Io$=2MX2}K6WWRc@a#bxqkjXBgd1q_L)@2&47QuW?5@<)uH453+gCg; zwfSCAkus}DA!Zr;E{>ZST!^fgE@~yglOcbGIu;I2D)U9lc%{*>j`D*3v8ZJ5T@M24 zo3@yoJV`}^_CU38d>lL{E03|iww4v(y8zbrN*LB2`_57e%T9bZ$?~k?vmR=&SAPt) z&H$Qj0Xh~BTrOxF)&u}kNGx){4-TEXvSK21N;W9Z`_E~!8ogO1x3eTvbOMeU{0ucLwk9(yrRm;W9q;D|}=P%xOeJU?ou9ldU z!^{9r0|~0k1L5)UOvVQsveO*lAPz>LuJbMqUcKko2qyk|1{vlQ4iC5o-}DJcg=(iTlX+k!J(x&e}28=U2ey0BjZ+*CuBmpsw5MMNl1* zx9QT$sM`g~D>4MEA;k@-rY!-E(g6PJ$jcBlXRv=t;!6B7z~K$V<64$Ffu2aP`e|a z2zrh9f$G$Jup!N2T7j_QO2tQsPWNglM!Dam8T14Nd(?SYvc1q^%zsP(XhoT(8sAed z2M=Sgn4+2Z+9M*V&kW#9Pn1Y|#(Ipn>9y53pBD8?HQW#HX znCssm>s#P$5PR|k<@&$izw@|X0;v2OtcQDw1xY5=yeieaUZ@IAnK7VOOGEPKct4GLsLj-!B zA1auB=x>AlxA**8oA{YKzmVFWIRUWT8L<2dzWS>@{fsw%2d2+3-M@j*|2pQAGmLod zpOuiyK*Vg7%+^^igXRi^BCBAKinES^mBpL@aB#PC`s^sUb`ChX=!%LfRB3mx$@4=_ zUk2C68ekYOUw;E21TD0&Gx}=gPQnU(n6+-~O&nvy92kyjciM4tGtwO81Vt2p&kr)f z4&frY2VgQWy^TJtbmppg8SB$W&U%&|DiA@lBpmz;I8PRKB!TyfK1y+jgBilW$^^+6 zCFJO5(PoqamZ3XGS@STu$8(+N%4W;j7zZ25g%M*Yn)L0CYbqfU~=(02T0V8Zr}jAXz4I16c{Z z9U;j1!jz_^#;Jx)aM_w@3%2c9w&{zO<>FjV5FCckAc=*yuPEh=x5NmHxr^(`7`H%sJ{nC>km?sIOe|D}gd<`W zmBzdSW$73As%I z?Mo7uP{YXQ?_~-_INxUb@%{zo|5H}8P4l}90DqiD zyZH-}EPm~7fX9E2kNE$yp81)B{y8rH4LA+}_V2lGwgbOy5`9ZU|B(<}_A~$s=lJ5eq9ct(A#;avn4C_f97|@1vJ+6> zD0`Tv&=-dtKtMfywgWKk{hnX`v@Kcufqwt)+y4Tu{smy4$J2kzQ@{K9?|(5H*Zi^< zm}@E^qT-;B)k((eJKGUbc}|WTVjt`bzAHb*I>SC8GDjRJxQVNkZqpiYf-XPM6~@%J z&Iu5+N;w!k`+1J!9RWaLK&5*><%G!8>b;!$0LAqk6hHwo%t*|qP@v2d!w@4AW1@(% z!sQhbIVvPfJ4gStk8Brd2Y#K!2l6jisRV-idb| zAro12;yM+XI4QbSYJWP2b7=UKs?|`prWAEbVm+I7@@$`DeaAXi0odCN(xvk}cANbm zWN!pB+yL$Sf$@0MhV;ii|EZsUExVsO`vK%1XaC2qaphMV{uzh=F;Af%sPo!B_kYHd zxxT)2mj2~8Daq`>Dh8evV$gmKQRP4hgm|9ZG=w9eUIxdDNPpI`uc}LZ#0deTIm4qL zq@1IyR2Dcgc(v!cMVph=A<{4jzV#w|tB3?=U14h!bZit#Box?)>?3@)Ao8_-U&C?4 z=kU7>;rkUkF^4>!{NzFa1WOLsZrVU$eS&qgU9B?{TLtsnhw5fBI%RWJi^74rR#P4#R2G}{&! zga5Mk{uSbZCk%v-LF?0degbr+^vtNVK5JaO>W~;*r_x5jw z=wyyI&?-03nCy|awPax&>FlL;bhlnDHKz*YBSe0fTYuvp_da0vETH+4;0S$NXIRS) ztm-u9tKVXfy1jT-fY_0(%X8JCe2`b*exqULbrqd*>v>&;ARV-~&39cj%9_}%7E9x3LrU+b!$ap7B@LT)UW(y#y6U-++rYKykPiBGIz zps%MwK7W+Cr;v$LRMW{~tKC?@x&la@#ewXn!f5XchvcMN)?(gzK?+yrL%M+7gE7GT z+d5r_!G*rXM4S_%9og7zAFF}w@p=v!m^{S;Q4SjtJpNnT`==lC_nLRM^Z)pQGOdioSL*rDx#1v=Fp|C4Q}7S=X;vbQSQ3xs zmUZNO&m*~%<&T@#_HC0P^UTiC?|ayh1=b(~+Z}S|h`RX1*bGrJ5_6ZGaz$`7dHZfG zvwu`W2}Q@3=>+6iA?eq=R=^rwMQF%f;Fi1dkyWCe5*1Q%6sGOEsRz3*;w5gLGbvTc zz%lkjlgqIFDEtg{0p()KM>Idmxz|PZ7MVBrt-tI3hIef3F+*0+K0i>}{v6&Ty7_dLChK68n1BYz@EY zY|ruN43Q*{P24S5LaOroQ@O#er+j#gL7k>F8fBEYF zSzrJ3zkL600ZaOw*M4k}-+trYwtvT;d5+ye3KhtHU3MdkYbb{*D#E-Hp)s^Z=mBiW zew2pxnIYg+bo(d9F_9k{5$k!ZeS#Z^Ehdwz@Lhrk0<0(J^tTI%`5Poo!zM%UB+qc> zpzo9OI=~Rf3KGgqDIoz!E+_)Nhn>k*jvGLnpC>!5M|On`&MSTjY>Gq>)qj;7+6BUg zRB{FFBZ6I>HOjcbIP-Vp%`d}kO6CMVo_!elW4uz~YwtX`oa#Dx|(C_qDKaDl_}e|qQF*#uhVZJ(F41dhTj{( zCB*>ScHS-m;DjJ5<|)kU%5ZKkCd4vkf99_?@{e`XU+c!d>>T(ze|&d8=lEzFKV#(F zPyhHZf7a!H+EnqUt^D059t*y)wRk__M`O?TO{ubJ;pzqy#+@Wvi< z5FGr?1$#c?(Xk<@RDGB}9!{jc!CsAng1f^OB=ih(c29>QOLCapXV^KZkWu3+%~o+C zu**h05~`p&rHFu?z#OuD8KI|p(G(Hf4YxA|dmei}Y%D*}$Jq@u#L@4WtReJ5c-TmR z(Fe*$!!&1U6SSL{fPcI(qJe=K{*I53)BKK)7#qL7xSzIzeS*K)EAx2wV@v+58UL~i z|7tTo{S`S|N?!HS)ICER&tVIRuzJsn?3cnSArC*LUPgl0ibgDw2tNg3EUTNtt@N38V)7;ie(i*GD;ZGZH=7#nEMXSoK4h=dPw z_OsoQYG7-#pACLD_}O~y>$k)n!5`oElrESbyZ8Itzii@Pa!HFe#1J&6m0TxC^8G>W9xDcuqpb~OEDeUO_yts_Gqf4<4&ToehCb%24nT+|z}g#Y_^;oP zW!IZG$A2g(>Vaqbt`{+9YEDyG5&1$#(`4*>F_~Jc^L`#TvCK6X^m^(Q)BXyAv#PfC6@P4N1@d}((SVC}ZiY@}m zbAP*ZawFJvan_DE#dZg$j$7`6=dJbYlUxwx`N#DC@Ua8rHl4RsD>_|SjaDX;cP*l*sO=IP06ffrzz#RK5_I#%6*MzFJ?Xy z3bX8pu^Doe7=!Ib7Y90k{}j0U6hiTPxa$SjC&+$)G`|!s?=pim7`9=Ad4JtTJAa2A zF0R{R2JB+&!9K2$V-8oDtfw?#jzws&W&e$9`HEcuI{;;8*w%3c5ApG6etU+{;)zw#qW^#_*7;F4dk zMAnSztT>umAfAN* ztXN@OM2Yk~q&{xU0F+p!QMjHrW@sgHS8zS}URYrTKk%@w$g4(g+>)J%~3@IF1%!i@6JMzd1VNc8exYZo(qwL|U`<^)O z>;IJOH}mG(9=+hV zzkmK=*Qc8?(?O7nA*_BEnKL}V0@#i7!duas>>DZKw=6C=Edb~lkpKK`&)>yjml*PS zqX2L}3k_}LG?Omg086SB%jqEO0veB*GkO7s+bN?^DlX*D#y*R6gMWwL8pF{69%lpR z0M?J0#|F4xwGna304E}6003G*rN8O-fbC zWifg4qlKY?}WHMbWx5ht`~mY&(EMD{=E*+Zd?cNteAbvxu_8t#k6)Pf$cT#SkL!k z+6;83-n`BCfZ`nc%4T4;v|~6m0jG6^05?%ZZaj;+!?n0CFBpltV=jN+>%mu#k_-_| z2mIYE-)K6~?H0#K8SI!@NU}m%<9Vb2&y9(SjZv0rjCvFiNFd@l1Qp76_$k{<1f?)( z2|kLVkV*onS=>X+aIU2PDpO`eIqHIpogusnJSV{R0$!c~cy8KcP=0^z7&a2{JL5kT zmqRd@krjCV`FQ}1M|yt^zVjv`kb_C0K#fDrjFq6u;1TsgonfJandA5PPu-z?vE$ft z0Q&>J&oP1t3vY!5C;sM|u$e$eMg&5Hb`#a79!l5vk9r;(w2P|4CfWR+2b3i?q1a;X z7c+bvvsfIX|0A0W0gB)E)BtB!0k?wkyYfH@%;5fC?W;rk`nP|+fQ2lRIe16L?dEbu z8@F!}zON|ET!Fy_!{A$Ih>1jdK>0!mc7C_eo$D1jszHr1^~=ZyVEyMA%>VwLZVxa# z>gne`fAs~lUl-Gx;NsmGJTLX_|L8XpZ)3oKHciM0i84Ypvk-8c2Bco$oCKM4zWu_? zhW8Wa)qT&CChC7`es26enjG$J+ayLUv_ss}{?&dQ9mfSpt#Mtc=!nOV4w|x(dcijl z5!gb>5(=240v-?J>cYzhxL$xWCsuXFj9sMLLD>R-cS7NXdjB~l;rVdf`*qx(A^bvR z1~`uRhPhrvu7kf8&%~geajLHkh;O^u&sj1u0FqY-zwLiv#QV;me6*Fh{OsnTDN0kK zjo5g9lq;;Xzdq0H7j=s3zV}dWKYyRw|DJbt^ZTFYap>RM4!(;=itrnQ?;M#j*D2~H z3&Xh`;=3^>3!^#LE$We3bh9rB5a zz`%QhoQK_{+k7tp?}r5q-cP~%&HJ`^juP+3Dct;im``jVAaNW40R!;z^YI$T#h>GI z`9^==;~L5r7R!7bIq}$Q`yQXm7oG~nF-ns8} z9EI_6)f*VsbV%~*^Z3OJ^Y|ry`{jSr&Xkovp*}E|XBl!A&{j8mw43|nJWiu8V@o&^ zZ{NC#5EXb}yKns%nEL;;L7S2sAMMIH^@e}@fJ&4QU~KO(cNX7sN1V&h&;O_AoA7*^ zuovd{Mg%{;=hyju@L#+K%EATp=f8Ztp(TC@*_3DHmIYlQEd=>J^PVZ6rQbbEZ=iUQP5 zyi-xm-#5=e|8kAXym5_;`p0`l_&a<|u%o~+cw&_}+Zgk_>V&!>D2$&;J3{~j>je3^ z@4U%X^W2Q<$+V1IQ$pED^SJsS+t`0RH=~XH*f9r?-SvUd^LKHD4xzRSxJ^a0H#asL7$b=9xu2v zJYLUs#eaBioW~*c?rZ0;6~2$3f6lg(G#cmwe}A5Q9*bz$w~Y8?j`P?u_d9>qjgV6P zt3IcGZa4eCpVKi7#%}CJ2vLc-0?&s2g*Igs=rHW57cqyzHbtTa1h&3)1$#hrBE3Lc zg{?XN{jXzl+4}fw|2daM*|2V){>)1ijFOCj2e16yw*KpWZoDxbXO_}nZ1|3|#6O?A z^l}SG|t1g z^{?mUFZw*wkF6q#1N4cqWgbpLuT(l=(? z@}K8A7+XOW)8_a2_LpWZoBzA_d84fv?~b3xcnJIal;KbR`(5v-Cmw$`=B z76*bE-pW+`_5Jf)n!z4<{_P(BpZf4T?);a1<=a0L<)KBeCt27+F^B%(n|b`TA<>5& zyc_J1ul@KR`_KF?DARhLU(jAB^jWMYA*q4y#l7cwC#e5=KiCos-#qmv0+1YgE-z)a zNe(0C@VPBwe3@;+gZ_VWe13M!@%?(2cJ(x$L!=k*>eCl?QZxpZ=TU z^LOJp1fc5X&)!_uF2-5`whYEi2Ynsmv^3k%zrP>PcR+n=v;6>p8Jr8d80&qf!0#e# zVz?Q9euuq)Zuav#v?F21{rqnHoIlRL!)BPz|Nf33u=)J&@6mtH`QP7RH_YpI!I-~0 zf`s39jWNG}=#JC8hPS`xVSeam`}KQX@;wjp&V1gH!+Ci3pY!l;J?tT@jd0!t=fOwy z>%4J989}53zeiBR!}UMEqoDAeKff3A@4x>3bG|bF9{-&G^ZU>FKfnK+|MUAeKL_4@ z?#EI#zZahOTc&@ogYX=fpXc*V#r$3gWA` z&il-5$U^@9pK(J*-PoW@?^tHOHieA_$JP_$&V2s((?{oP-n~87lZ<+@g+J$?XPemj zbN=$z`Rrfkqt0w*uZ@3=9p`V`CUMPo9H73>1bf7u+l_xb+X6U$$8z(zm)WND{+tUc z8t(n)+!nn7I%0D*?vOGB@;9a_09>}7732CPfY)Q_VIak zt$jbkQw@8`Zs|G)i*-_Ix)1$ys)KEq2$F(|0tpLuj?gf9R2 zjHG`m#I*l)9AV)Ki~NV*pQV2}ZZdZT$=82A9~cB%%qmHYL^E*4ZWvi%PBwd=<5A9E z=OyZ`XWIEm;6Lu?D-SmF!hPpbLtFd$HDSN_OgisdNGX$m$1}&NN8is-tjXAN{)sM{ z&G}H@&m4tY|G)X(?|$CjzBik{7cCR#+3bJ&$DcZX54hz>C}kw52ify+fdS+MamgVr+&|P>2^z{8~U}m1LgZR45cJp~SF3rz}=OrQV%_Q(h&BsA_Kna8R zKR*|~_b|UNJOp?Se|}GdA6+r-&!2Ih3i%WBb+{jh5A$cd*K4lFcz$0Q<-^R+0dIeL zkDrip!r!Ag?%JL|!_@qi3y$M{*dfm5&nQ3gKRs{wt%r=A=I_Dte#-@l&Jdo!9NyzO zWgF#-CV=yC9XY?>&-+t<%Ke<7d{I9KX1>mVf&c%lef-q>_r1Q)h4T15m*3Czh0k;S zq5M&<-TZfiq<%l6e82a%fC51_KY)KHcplU%$^p-fa`@I?hV$6Z{9MRWSuh;(?f!pn zSO4j`y15)tU(|fvzn+idc%BT;!BZ&D*}dQ3I+Q!=1IcjcFW>fn^7+=^YW|5$L6qCK z-M#1Y@LY#)`?Yc3xt#uAwU@cSG1`3ox4va#zTaQ%`=>rW4Cqg2AM0Q3>HB{@xE|_P za9IB8C*S(Q_4tXx#wh-+ukUk=-_PH6j)X02HO>9q`@YAwK2RRizkM&JvVXbnPdk8M z(rtc^pK)bAf9`k4-}u!&QP1da-}n13o)72G?R;)0bAR~Ok23%KzSlp>ALo6aYtGO9 zeH{e7=6XVTk@I=T{N7^!i_w4kxt@%znX)|Clr*r?`U@b6@8`yswB(aZnUM6+b zBijTsDkkGA0d4~QHv-Y?`M$iMI{rWX&;RHDHY*PNFIR}d{~y18tBc$CKQxG69shUn zd8%IT=l}XW(t#d&`5*WB7Pp_j!1~Ja*Z;XY6-_-@Dj*QI}ixe!t?3dpP}!iW_PVin{XsX&=hF_cPk)V#B3XKa)&SW3xw zMF1K5#yYO~Nbv{Hmd|g}@soRO5l{N&v-NoXuq4nG2~!oCT4arPKN#)%i2!8xF>clr zdy%fWQyeln@TSe~Q$8&>2|5mA&9!fX0xAvz0zC%(+QE;ZWL8%$vh1={QnPT{XNja+~|kH5%yQlrMmu z^~w=!g41(P%P(j(b7nz^jepWi&j=Jn3)y4r@;WiD_xp7H0szE{pK%<~{uuFvAQY=; zg@~l{)!QMxC0KvHZ{_OXULOIV`%PyyvXD2t$HtYqCb z?z7dJbmfOcddwlo=0q*`(dj{M9D5ZN)#5>1f~bu*+8)Eoq|Ws|^O1F=Sl|gx zi7#w+uWf&s1@zOW1z-o;d>jp$EX($BVUH`B+L!O^wCJbP`fy+_tE+y7&f%>h7G)iz z^b4BiJP~*|5#*#rS0PS<(=z~~WIb%RB>jX?tgnTSgaM^CjhAVi zhQ#*rX|W9*$NSyzx-RI)@$=#0VF&0h^zeO^e-3|Es&o^-wZIx(-}~*QvsOVmxNl;D zvS%!uM?cK5d^iFSd6yTr_ooNaei7d)z?Mb(CO3BVvurS2|Rqf>-UHi>0AV&p0})U1z%V;?}8wfA1dCkh14vG->9?-`@u# z&0c@CUK23*NLRk94{W+t!(o`BO0(aP_Td!>^Fj_u6DivWknqbS5XQhDnxHVJ>Ee_ftWEpk~^1|+KE&8aglK<@yZ@L|{; z>G#bk_g@h+Xx_AH-p+32-i(vKxb9z6{b)KhjrxMvD%%3n#AX`|*?HF<57Tf;&cv{6 z>hxN!{O04m&*_5}F51@Jbm5?nY%|h4N1n~T-MmFFXwnIp0sXQ*onOAKO-J|Q9qxaY z8@A!bDl-yR&?&w9w|KQZf2Mb-|P8EnLQL5e~@RKlcH_XFX`h!y*=3qqL5hOSa_$MY-Q z5&pwzb0foI)E$HErtMBmKc)yr9WsB=XQ?iuwRBq{S{@)DQ`(r}2K1;Y zVdGOhRG5@C5-yb%vbzPqKPKc*g$MEsLEJC|$Rls26TioYXa*W2uB0-LZ}%KNpHwtC4JM?ljN z4{0J>$yy<7BvIblZbs(1?aF_KO4|cDsiOBTqx|ge5W-73W>e+yHg|Phgd(!TzQUvb zS#6|MW$v@*O6$)1s=6|wDVW^84TI8R7<%-zcaPiA0iR602fH(n@RhWx!?>0F`%45D>E*mDSn$Rs?pTy!f3))v=!IFPeq;%*9Qg-rnDfngQ zu+wcR2fHJ%+$0EIj`XxZO^ueXRn2-%c-wE<`0Y|4EbfvgWDq0z1!%NeuA3A(HKbmW z7nuc4+bOMcX;^x`&7y97u_vBIZ$KVfOhhO3zAZO8`Kmeg>321eUG7v_?DfobAw~~v zeIveahv$1@OSgaDO&btEmuyZJ1;V&RI<3qkhrU(qr^~~E^PxZa9(xI#0RS=WWw%C+ zZ?wBGgr8^%kMlLC-gweS5)|tO!)DDcFKt*fx~f{91o;M-|ARgXS<>tCh2=AC1lZ zhD?RAxdo59cbjD(Jtj8->HI->k!l;-#h1tds;NM?A6%u2p?;zoH**r9;-X~nO|lQ2 z*s@M3y_TtVIINgwnd!)sg^@OkCtY8|u5_=#MLe0}KA=3ipaz>?OQ4Nj8q+ebxtp!% z z%{*boZdJXp3bR_=MeDY?mwj;t-!Y?!mIhU0mYV6BWfG3s-88jixpdE!uxcOf05bkd zOBkKM_Cj@<2wy%>n?&bN`>ga%+zJ!{vbdd4aO{8ST1j;3x3YICAz8}`2f6g9aFZOM z9i^QjDm;Pyap6bdr0?Wr?{5mYRFI|)uc(68s}6&Os7mI2$Tv2$`gA70a0b%61Ezw8 z`T<#Z*;v_r=9^5HY8T#FrN`UZ`--JVcOw=a&VhO@fHC+Sh|RQVu4P|WEvsDAu_BMT z%ASADAgUFQ;k~#oUnlV*2DB1mLduhYTZ=z zk|S8W0bW;xn($h3qi}C?W-q4U2c~~FQP(qH>5Zo^+ zNiMdnG3=ABCl2vrvpH>#&f>mKPT}*g<`;j#PTi2#WkPJD+fI!47ebo~#E&4?%&saZ z7^w~*MwjE&+7T3cyifoa?_#3*1cXB@chC4j8~k!3)rMwVMLH4#?Rp6=ou1gYbZeT* zwQ8`74g>h3i|zWUjL?(sJcv(SuJ@j|u&99uR}YcG~3uG=*f9nmznw)>e zT@21AM-1R)?f82VSi@%FZyyjAqc6nR^4nl>J78{+S@H3PcH~(+M09wqcZXuH@b2k; zN1x;${$ufymAkU;_h$E4?aNIZI@a1^d$%I6La-|HbBIau5;PWQqJ4a#tLeU0c0}$z zUxSNWXAQvtF_p6HoqpKoA-oaIqP~B~p5-_dWaV}b?Ya}!YvQtJg2kpfsF8a;gdL_q znK^YXPcxqOg3`?=_$;r~3W%(5wQRPjzRLr^&*Z~^jCT1HXxx1Rf!}AgJ$85R(My|? z@V1TB>9YWhn@PyyRioC8PboMi@JZ$H@6Yl zEyQ?Qvud`9uYQgS2y)D+*Ko>U11Y303@6x9mUxY*W3sH)d=?%_Zg**U<9T4Ht;Ie{ z>p*K=OeA1F^2O?U(N>*%s-OeT9izgCvbC#TLwfsB-pSLMXb=*;r4Ojc-W5|Z@q|)8 zyUk-s2TwK7u~#?P838KzWI%tC#NVF0wcDxu^7$FL5=lMz18cU(?I`XV%dDzF!E=RB zW$R8Nk|Yoo*m_+OY72stm>J78TR+0ksGn9Hi38E@y+wZGtIg)R?h|fXxEZru1SgKY zRN>wC_1;~L%OrSTU&^#x4UevfCs1D9qhU)?FULqH2sTKMqw3Yx|FC~@GA=R$$aU!~ zIMeIy2-V_vdjeT^36UTYIpi_1y{;n9;D=i-Wr&pX0PMnedzR5b_Um1&n;LwLbrPQL zfll}$!3ziV9gV&Pd6|AcHr)z6DtAAxB}2qw4`>zfamYBy$aR0j1e7?~4O=K{invny zAmY=_R;0YUr3%Ln?+Sl+?V6k!<=(!G?Sl_fK#f3G9gWx>$`$m)5fB$f$2&_&YIBu( zqHL^=Sq#UkoEk#u09!(SWN%fwl_4Ac3i2Ukd;z}m6+tjpF^|!iAb09(Ti@etqdx4g zjz8jC?~kC#n+!~6Fe&|0z+JnZqRwVxMlOCjYf)xzrZEF5?Gff%INdRUUsz3;W1e@;zR zIm_Gr80?elpv;Xl$F^~hD?y#;V|6{gN@sXGp_edwz~ zP2}Lbs+WKFup&IX{yBT1uIthenpIcF#|@y;QPa1ht0 z>*{pw$efRKAOM>@7anXIU37?rz@XbCQMu-bjck8Hg_~Y44Rz)2;6W1lC!mnTxYrhJ zuNCs-=KNOMSN)2hs5E&G?itY8Cspf|yYWJ5O+?4X_SUa7#3x985htVgm?PQWV5mXi zCAX1Hd@xVD!^K$c*J1Esqiwb!T#hd&%Pb#v-D=idLZzpO*Q5CSqOLYem@mq!CLMYQ z9oK(aS-NOWF;JGr)8;N5h4bTC?cVPv%zr_Fs+iC?oCnv~CVG#1tZL6-qAiOunRqbN zU9wpcnkKe{W-Pbvi33R93XXEN8Hv|MIXZdz+4sGP6B}qw+h>S`apM zzn+$blRc8t{l)`IoiI1&QeB^q>0|-2qT+dU9pLfUlyY~kjPaRntJ&R_3f5YmQQW{A~9g@>f}%af>16g znzL8*vu%+mDjtfP;Aoju?B9hRy?OwJ*bNI52cQ_)P(~Q+4lI^yPwUPZJ(Nhk1%Sub057|P$Axj-z zaiLs9l)Tz=+9|}Y!VSA+wPlVqKZ1XLfobO~-e-Y|j#jND z(^)%|R`Ys>kKwdsv)b?#PocQGmsZDP)!j}z;TjptRlJ;%tLMtE8t`)z#7PAJj635TP6Rk#K4cg|SM>BOGyJfAR}UBrShy8Bgb4@N4+U zOh}+OhalrN4@8=9t(M4}r?-Ecw!U!@UN(N35DAA?sT7>wC!|Lmt*7>o-AcDY~6`s#0Uq0ErdRiBPHMAE2 zaZ&zoy}U2bM@O*-Z4Bl_@qmYPq0{V+-{&LH;KF55zQRz9BDyKSFZ z(}Eq<*8WXje=e8NCb*sMB0z6i94m?0K$UxWd|W&q_?px_dZY<5jUG;eF4yZjdnR0C zZAU2*+*ESm3J{k`4wrxRDgb-6zVmk(8tD4~xd8h|*ezH2fetRl_;I4)#mDP=v=Xw|nAxDwAFIUIf=m zUcEn!?eKfxte=}c;NIJQmvjTs7)R-I{*bFhZ5S@`(E5+zEVBKvxx*Ka#XmC=#woVk zG$ZNu%JaqfYSr*xTgYdf2%7Ap!?E!W#oN1YlKWm<2dDq7u;*#z=pZF|%#yc1ALI%aY5(!dHRR$g3yM#6 zL2s_hdRKy|{1H=TAMVXJTvWD|JT{k2wBHr$bg_Q#_q46%UY%!Ea$g$9X4y&`pb?Vg zp|Q={rH)4ApCq#3O_>InEeqF5KO6~$k}qfGW0`-pMLpNGerY4qQS9gsayDDcTEEC;S=F% z3NxEHZxbIwF#u-{(mCN}UlXXyGP~1Yw@iOs+rmo`n&i2Rfg4_{`H^k3-Fso=SyW7| zToTXwqp|k-ruBx45iCB5zN({ydnHsU+%4Ia*{9a&Cslu2&wACqjk+R}HLnz+;n9B{ z*2QHZ9#h>yracGzg0G);qCB+{W1OWALDT#t>z3O*;6^gsJX`K|IJ=i%eHza9&(U+) zgbmhQa+`*Q9@h5jE;Zm7gm-EH<8a@9UboHDe6mWDmsbG4^YOgl^KP@kq@ilh^||p0 z_I~ctD0?6II>d_){P?p&M@A4QyEiVJ6Oe z7W%8?w%GAZX2FO589+^^%DI-gb()dTD_LRM;m{yOIH#YIrd3 z;OI4SA6;@vJvl4uqn}TVq><`wvpUwr&e(amB!`8}wU5H-UWfKnFrO6)e2;~$|5?L5 zyC=E32nXj*ibOhCE1Q3}D!r==LGi>hyjJdGgN2u~Y9u8R>RwyNE7G_MmG#^!H3HZ-!)>b`PV$OfUy@V}1maHho5kr8uZ4F~7S!24Kh3r^ z-$VbJd^q;9dNfryAe+~C!3zoQ))^W6{v6alq zSvYvu_KU)<@uYv{>mVMM&Fyp(Qt4hFNOb%ukPJ6lR&iJEG&u0MzAJ41!~z&s2ur67 z+${hT%*iU>!tz=(mopHfH>ataZR2Z27kKN{7<1+GmA-1P18la-cK2Yv^{Qi#v^p;y z;hu7~^)7$k<#TJ-Yu4GhtHs{1vm{45v4qD^WH)(;-#dS5oRWAWf`%p370{wRy??$B z`+!9UL$FSWjS@O_>gV&V@v*c0yyf6!IYmI|&5~Xzk%oq4`KqPpRE+&SqBFyItk+|9RdP%p}WO>amTO+Dd zaWr!{SC0}(pU=Xvi$!5Z1(%LO)f0<9cs{vZUcxpa8V&(`Y(vxNcX z{mV`-uX8d?gVEIBxGz{y{qO}?lH(P05azRt7VBld2$mB~R>jAW&Zqux@E@6Zp|0)2 z=@EYbaZwk`r%*lU8?|kZ`Mxp0*(e~RMjqAe$CP6+EUD{rA8$H#Y=w2+uaIGSFR-D- ztQ2lvo$uD5lJN5%!Fu<{@_uQ+o?SsQ@YVu;Ks4qjeMS@DR_zgrMdyM)UOMc#FHb^) zaHjwvGvRZdgg~AkPQ2|``ly)!>y=1ae`~Mfm6YkLw%6f72joW%7OQky zp4OqdPY&`WaLgx7(?h*TM63tH$>g1EXU6G6Fs9W~JjYZ%SkpCAuQsK=2UV2Fx_q3X$tXJuHxrG-=5n^LNHQGWKMkDA6I3u z7()FGJpQ@g?1ZW~nmaCiPaxiSsk^x4)NvR)u=>-jT07zKRV^VzK8+Yk^l;O=d=Op3 z+ex_IJ5!*I&GJaB*Eiqh4BcEUSZ;p|*T<*7y9qa9dQe)y5S_4mbcyFeMVl%(D3Klf zm$9VFN2a`;%GouK)!R%QRieb5bj4NrbE#ijiN|KUU{?qLx)4*bQ5)|(HQ9aZzZ-T` ziBUSsCY64;ed4Uu@>5;_8roUPs{$Qfju!1!dYzeWnt=S&-vbhV%KgPE=}CVR6WR4# zftyx}axsduf!1`PyjN?!1(9ply>4)1U(MEwKC64URh9}O4f`g67x5jFhr5kw+<7)pL4x{`qd1jQ^EvpkJ`*6L0GBuqU7mv?g zcExdE^e3cbYHWZaT}h-OPjYRt$ux;p*&(& z{Btz4Ou8=xYaNEZE5yG48R&DnGM^OyIz#RT~mfDpR(~*Y4!*K3mL7@FIU2%p`sj+a5t3F6aheWSgrva zOv`xqq}RiAxz*5Fqr|q+_xjruFesd#51>JlmzCaZY=$4Z_3~z6O|^ToQ4*pSy)|@jq1yWAG_26tV1^_E176LEI2EeR#rQ*hqtnwLbeu z_>We9^0B-DYPtMi$`6h;0IlTbN^Xw|W!*jx)<<-alL9FMS^&285IZ*$ZowE%T%zW^G;Ca5#|v0Hbf`Y;##{=VAs zug9yu-*<@wABbs*?V>~Om4T&xK0!qQ>%-{|4Y-El#SgRf^24JLgGILZ>@*&3mn$2S+liT|E7O{|wXVEawHfswoMVi{ z%khobWA)Ot#S}cMhN!H@TbbeV@Vbsz8g!!M z{y@}S*PuI23~>o}PedjAJwfxMT+=&K%@5-4u?de(24~uRSZ5_jFz5EZ+97U|U7q=n zijCkrF1qp*>YldcZj6zNv`PE^(FUx7rkB?@^?3NS?GuM||M*0oHWfm-C#(`)k{BBy zgLr>h5FD&OgUgrRjaaN#Nl-1!8$VqwupRsEusQUjslmY$+5!~r*=~pJWac2|#{l?k zbI_mr=o9ioSXpUC^E^r3wdfOQUbz~O|Wf?~kWzFri%Q`7GAb9Wy8Cgs_>N;FJ ziGd+cG0!5uP#js84b?oc^ODC^R#;|zt}$iY1?Q?mLqXRPoOSl5C%GH#=UG`eyL@})oA~*FfNP6<<#laADc(~r zMC7>0bh|@a&W)Z)$sVe+bKFmn@{lff)=9zx>`V7~U*4_#VXBFf%L47~kwx%c?htm2 zSZ9~L5Q~L;+R|j*R?{|}uIAVw7J7G z=~w7s@zf&t^w-BVSD6>5GDdx-hTvtrTeuc!|5W#{>g7cCLusbvHA0QJ`hx;zmwmVQX)8zmQo099>3yfL08sFHScJ`u-NQQrH*nF__p)=e4 z>)EE%LE?zKqiTMgD;FSH^(^b%^yo_MX6HvTVJWAJcxzt~iOJ{MkNuJfU(@Tc^WM$z zqH>E@hdYbhWBD8rSR6dXn!mn;a&IwoYwj;VV6+1D=^y-kqz({aLuAdteyIJ8gz#f+ zE-LCxZJQOP+*~m+*q96-%>}Bi6YsyIAZOZNKwUgj&Vpt&#-L=xKfQ{5m^# z0k)bh=VT8+v*YgB{e+S03;;1=mz;MBz{)P*1Vh}T_Ac8Y zz3<}6N3?vqE^dFvSK?RFG7N$SL8;90jmGJjXY6f3>oC=x(f#i2!h<(_Iw3+>QM9C} zx9~nMcZVY-aUQlK;MJ@@;ojBbg<@&}|Ma!nli#Lecy_ezMy>av0!ud^cS?r6qp99U z@whDZ7SPSyMSY0UTise0#WF|icNtes#{Rb|xepmTMDc$MS_H=rEVD`6oT>x$bo}9k z>G#5ByQzjAt#uEIm|oP<8`&7=CF!o8Pl`J(8F{)9o40j6o_OnVgKHg(*?m{;Cw_s% ze!;=AcFAt)<9)Y~7wML_FWAl7DeKGe-HC~nZ;t*52-|9jVfMaj=>zf1jAOi;#%1*k zZ-?-)Qha{{`PoVPDl{j4bEr|W*@EF>e!|YV^5JV!JuUFm-!JY47!z<%x~v^bm00KX z z)s%nRV-PcPEj*DmayABgXtTK69^F-WLuBEhI|Ovs7d>Qw%QhOu6!#R$}>E5nT~EM z+}x`H@6UV9rDdCh?WP_gzIKuS6C|d1uE&4adVznwJxis?pYO`e?UpCSsxMk73Gz#p z%HxY=3Gfh}FBOjX>99MQt+4or5Et2drDsK(`&#PUH=?3$-g58g{}*R(wx%k!rGY+> z7c`>cC4DC#eRHR%fPko|G*5qijELM>XIGtzQ}yLunXw`anmNZD&EF8V^;jl;eO7;= zGfSHL1LR!OR%GJnx?#viXFsxi@mb}cp`6_9MOd<+WsS543C!TOUDyj>$gTS>TlSsm zuff?QI&(Ty8^VYe`E$`!D$%U;I{}C7b+(iezufGGH~YACqvCaY*iv^cWc2Cq0kdTe z78a#ILUbzKH6Vg$+w0cNPsepsUW9*@xIUeW;|MRf^|HBqt`xT(bayRorjNCYZvZ-} z&ttnF3TF&-QcN#x(q^%F8vNyZB`#T=v4;rBH@sVwLr=if`0Oso)nQNSv2nsiUKE!o zgvn*xn?A95VWri(8D%O|covWv@-oUeROJ(Lg-F}{Kgt6?<1)?nO}}98;x(E{XRMy)Ttog{*(A0OPXZOJy^i z>N7e~r)LPgm5{f5hPM~5#6|M`3>CkNhxlHZAP)t^eyA(edXnB=UA@+BZJny(Rmsm- zdHVt;OfWB{8{u=6jm0599LNJGP2v5}#N;GW>#GvIlW7ClXYer$Nr;GBQnuzD;JFV~ zA0iC3AigvXO#^Qxg^zzq#>lEZJKdI>RrC&y=ew7~Wv@zeVYCD@OhL zvt?A-sY-Br*iSCBu-}0+umOXP2-ccB8eJ(Qu|-(FsdB%#IAVVVV6CK6o7E}H?}qF> z?hmDI!;^BqHzycS%5}21ZtZl)jQ0GZ znWJ?Zbqs0`h0%WzmR`ln zNyqO-w_CQ}Y&Xx(CKNCyHXI$KFzYckJ%oahsX%7#Z1c0=1l+EiHkp4 z!9QH+@6S6aJ%G0Im8HR7M38IdrnnH%$(=34WoUqKx33eqIA0%RS{If;RH8Dnf+~`~IZvrq2ZqJP`2qgY5qPEOHQQ6B}fu)05e*_>_LL1_mD*Z{0Uh zYUzYr+2cT6iJ~XCYoqy864}Y7vc+q_D3ds)hshv)M!Syc5rQYAQm*=$Qh*r4e#~Kt`m9-P0JMFYglGOXrj5lce zVdx8?XLB0YD*>=N08u|?N+7h@x!-p)9-*1*3ncwd!4Vk_+KbZW)3;b>h>yFvPpXf&O|1&@{L$cIFu#mI1$i z$QTIiofqW3>ZEAS06sv$zufr#_)m^~Puy{Tb4d>&hzdgT+|r%AId0#}Oq&Ob`{_yL z@6U1rq50;etx0Gi6vX{7)(|CqyV(Z99WF1wMf#it_Aj0zj&m3?$;Z6y?3jBjT7y?Msx-rO^poxIgOwTEG;JX;iU!3C~-{; zdj#*LH_a<{KN{%d{ZZ@)l8GK?iE!!`u_INWIDN*kGf;JZE{sZN`!BUrtTYXwwHUMY(QbbJWYI4z#fx+oH;n=#&VO7)jjzP!c zyVg(1Wl&{#e@|)?Kg%unX*Ixq{N{d*sO0%UQECmZllCdIdV^wg->o14k}i$wW`ZE| zLpnb7m$2rM+&BFB<*^OT?y(46TDr@nnmzKzTY#}@yXw@}19-Brp;x+aGMfXWM-`+u z>NB9v`BYG=`#B)ae!75B;QD4TOC!uStH%3T~1ANq*FyH76m z%ZZ`N)#hvy&O>Aya~E}$woQQ8H16@O5^4L%wVzy_B!4J%f>aVH?0Df_zLGM;h2MSP zqW06Zz6FO}O#Mq76)ebqGAZ?jn(*hiS#GXy8eDAI0~s$((O-Scxry3s!|gdx9jCo4 zhMPBF=#=+9Bc6|^YkA^8!s*7bW8Cxc5F8%YvX35n856?y)5wSH@j9z@G!oq_pWvz1 zWr_}imydA7?h_B!)v*y@X*1?)?R{?6a8*~&CCtR!JK=tW<@(}(?kkXyr>TR~;cB;W znj*#ErXfXS?n3<;EryZR*O7d^tPVXkoef#f00Q;hT$kgru};EpTTJTt?O2b3KDK^k z3>N{67lX9wGj|-TbhCQ<1;9_+>~rmiPwKGSj^1SO!i2~GEHxnX5$M||GjOR%oftVw z^x#ZC#Q+w*>cIei!Ju3jmm*o5L(Mv7XgjX~*W-z4xnE*6tOw(MitiRtg8b2~P=u`c z+cT#o?!}anyGll9pjRGbO;Hketp_CYyJ(-;16{VeX6TYjP8;i)fo6BT(0qNuSf^ZF z&L6N(sP%@>6_|i42TtC*`63jr2WuMo#{^i&&Y*38>reK7Z5SeI@jl8?(&G$&zeH*9bR02yEt7AiQBJ-u`*=xxslIIy6ya{ddpbxw!Xt9AhPIPc;5AgY6~Np&&2E_ zmym9Dc}mAfUT-F|9#=lqAkNCyyk_shv|7^SJB9hoh7dwCpQ4pjNgJ4VwJd)s{b9Ei z;kH2>F%7*wYkv^;7td2~1%JH2x~*r};x--~iC#~C+TAAG!Lr7}pmMLB1k(m|c!3bT zuNMIhwxYW^tgW&wxn-ua8Yh;wf?3YqZ6Bt7Ij&oKse>$wtRn};3E~?U@cMfCn7yQZ z@?JTt-jmAqhK47*K>I-1FvrJekwq(-JK8bEPF;R>Br9nRhD%A5VDPER3vb8k==dF! z)gXO;TC>**lrPffm5pBf9uZY6Oum$^5m*HzF}X0@a23b9^{}`%ixkvO!+U&!G};|N z__(2SZY^&DlV0ngw)bdg=!I7tYI2phRccm{Ree0Wl8wa+@lnH;USJ9gR1||cf7`q+ zbRLNtDYJw4I$HOWw?FQC?>xkyPe&vKr;q%9^CUp*D6CXES-o}#&+G5<3#iqDvAe%& z%4OB_!71Ak{3IjPT&}U*HUAoRK zua?O2L2h}_U{-Ql$K&quife&hE(0}xoaBM0b+d0CmV24HK-gKrK5~q@0p{Y)9#;M0 zxoq2AJTRM0MP58RdzIVK?F(>n-OWq6!E)ka0bH6OD|~PA&v^rZ=f{2HF8D5%pH97E zFv0HlV@;)Iztqp$r@JV&3+*DGcrw4g-SD0|at#dr!=MI7trVT7y9!36;-U(FVI!d? zt3VjJ*HWaCcXg@{i<;O;#+eqk(PEJ)lz6m=3$Zw5afq(p@=h)=-MMrNAHeP#O#gj3 z>pf)~SI}+g7(k$@>IMwl<%YbsDaITWhDV$_1IX3W16i)pBH^Ro%~nn?;$flhQC!^4 z)#>SL2-$S@8; zlbN4;=L1=JdWh&MT;9b#T0BlqY9h9`cD1`PtNZ7q98L9u7vgcBSaso@uz9k4S1Fss zrQsF`vo(j}dPH24d%jc6(Sqyj#^~mrjq%CCcJPf!38IS(|Gh zArlG2%Q<#oAbqoY1L6oixBir$+h@bR4mt<8-j)d8;v39=eT0V1b+z1GHBH&EwQ6is zXMOyToi0I8>~>Iyr%!Ofg&%72I*r)NC5ce1Re4Q!O&+1$U&O86nj&iAcJm_bryspX z2PU}Nmv^ohD(}wK&A+65IAVf&Jd6J8eOXL#`ia)$g7X)-6j7LsZ)+myE>e+;Jmkh;oo?#C9zbP>|%fszcLXdv*G+Scg=x)GOYZn znT-H3{^oFBGj}l8K4;jP0AK)x65H4KO{_40tFEVi{hRFHV|#RpH_A=&PwO+hkdyaWAON7B z#xs$3Wt%8K@b$qtS;|)W+Itmy7Q@>M6!klQCK8abu~Bu8@fJK3*`sfO!BWKfm~VW( zIT!m_y#iUp#%Zx^)=q7ZE5fob`d&>djfEc_!T46|LSHYVPLN}~k(k2Qug6%v@bpkML2S%@i^f0geY0U

QB6|f!nYinCM+I|6FGb1TGZH z6F56uzV#w+H-ogkEIu!&$h0GB!~8{GqWghh9*09}-sR8!kkOLdov*pS_le`?OuDWd zLEh`NS-$)rA`?n{l2<_z-z?qSU;~?vdoNH_=si4N_9JjuqZ)W)Uta;_VHw`C76 znCQXocxh7yMx2U1@}ql09TkEdOSreDHns4&BOobp@_Lctw6Qn+-CZ?aeF>_FhBV(B z$;zuGH0kpMBzdFX-;GCg%6fTTigx|U6?dD!qRVMk+t2M4VbW*8o_;Te01VLE?P{yB z+3L9IS6bsT{kvJdo9UtL%T>yM-VW6X(A+iF$4j+F{Z{PHm*vvvo)~doFUt_+jE8eJ zs+_3-!_-nz^t;v#&?j$@@UGSId0161A(_bfyf+@?#A=ts#*nWK#z?ps09$5Za52xZ zc3a$H^cTv{3VW^CIib7>#Y68N)^gyO7k#AH+fW%RMisNm2P_Bsy4ogx>&KSeQ^!|w zzkr~j-k+^OnH^-!T4mwx_xI6}iL?2Ln_;pu!)p9qe7DQ`*(-;JG905o3mXOi8|nt3 zxhNT^`cnFZwjMeP`OcBWaPo{7YM&!PzV-L_3ZATv{{-K*uyW+J` zRLL|?G#`H^p*Dv2P_%b{&$@-hom!;^dwSW|>j%PJ>d~^K_qD$Pv^vTC^D=#2hU$4W z%nNQ@x6D>IFTOw);66}6%QAgUgOkM66uJ`aCoxv5{JvJNpA=o?HKXj*hhZ$^0D!Ed+xcX~b2NMdPM zgJ|19eh9b9{}|M{&z=zHBHu@sBO$W6q%%8Sye&I=ZH5@S^vh7avY~v29e-CvOEoY~ zW42VtrEIbnY%LO9~V>KCDi zmSpp4OaNp@Lfo!@!5JJY@DX$YB1!!jz`~D2H9_^{-52|q7U^+-1}42U2<3f+Lf;4Q z9_{rewE^YP2%sdZonr;?%%oj+y7~6D(6PJUZVR8JGea9MnzBiK608hNBraNz#m&kl zi$_){zJRPyv_;9&t9|3#i3xY^p?$Ygi2NwF>fNpmQ00eziDyUakkmuwPY1y7$1bcw2)tdb*Z@z?i z>xZ`AOBTC`7NBLtz&}hJ?5~ zfCsmF0LxuN?CP8==-@K3*tSu!SS%lYdfpt=)oQX6>qMxu9=r~_TmKr<&r)1A%9Mt~ z$9q5aSMZOPwO>=Kq!?`ToOQ}F$+ySYGS`5AlqU2+T883#HMT2=F0QWVIo_FKqlkua zjP!akLX25AVmq`$d08%0OMEY(*vp;(g)aQ;{y06-x9i2n6Y})Cnc3j`>csu&ub(!C zITi%=?ta9UP`t8J_D#m@6--~f*(IFi z(Gu6MlE`eSMHgm0D0hclcLj65KTcv13r`eWm8$4EvHlbtVcx5X;qKU_A-JRR+z7edaaD-;#oqn6@>fO(B456umdz$xpII0v7)G(VDA5Cder#m*IxckY5!_zub|W9#Dkq)5ejYc``M5eN z-fk&!#Kj!xBb&HeHkwK_TF@*_`C{jO>?;dbfZpfHvk5X(V^7B%uGR^$VpF7#5gQdQ z%`PXlS9cM@iX^-`@DDHtLVsnyfQY9IZ#$?8_bY!@Jyy-b&Z`Xk5?IMURBm|uT_`0;pedG>5Q31{~y820{(=`dL6XT|59!#Xzo5_bG% zK?6}GViPPlSKk_CDR=a4A>KZJ1#!HP`unZ(ld)yPpfLe7RHg2<*wHT5`tGDSckl9! z?$6gGT9)}a#Ev{sqcc^(%ZJ>HzKl0J;(UNU?2TiNA9Q%nO??V8_1aNZdS#dCV`(kr zOjFIzu{_BT0#K`P!|X0w=A8EK&GIc`^{^09L|MKL#X|U~-0|V6BnOaxR7n}Rw;Dp& zOIA=25WFkeb06iHZO_kpvZBe`LuYR{4#-Yvb7_vRcido$`RcG>DSkf{&f1RAQ+T-o zi5b9o-}&zT(zbh*&Qw8YblTqE*WfFO&oD((X<;-=)yZzvxP=ky;RMv5hlcH5JQ8De zYW(52?1}imKFFKWsevbdqB*&m%Hz?l6`Y9lK48p~yMMB$pc1o2-LO4eRhxjn6zQd~ zj=X;dwMJW1@71XT6ZT*e^s81Ykn8{*p@5m&ncXWqR_?)LAu$Gyrs5;nC!uo!WfTH% z=>r*_bQnHNXU$&2_$+^v2dliSlYV_NH&t*9Av=(RIse{(22qWF0tiTBCdSfQ?uk{^ zYWpa&an^6m7w0Wv%jyy|Te*YTAGXUnOH>HWy^o@7D^&LAV(T6xCOnCo$5MNrrX?K_ zx$1*wr8%&i9d^_?SH1on5@(j(qH*qTS?Qy!h^=^7s@B#Ux9eNVCbZq-?QpGQc)3mAw%A%q=hJ`b|$^%NfxofKiPQ6WZcM zEJgeay=0j)VG6?eR_W7n0-c#1o14iHG`7*7m6MYOWr1sFO8F=Hpq914}9_?cJN;7vtnj*e`EEy0;sDjR{p_T^Fcv3)Pg(eLlP>77B303D#UY<75-WnwM5jT}!)RUk6+H85{+3 zds_%H7VCq5JQ^37DhME&IAk&x07o^$?mhY19$$HVDm)Aj%tr-bZc4J^C$vJ6<+^&zGXzUH2PWBnCLF`Yk%yKjOAVWb{wj*C4a zubi~zCL#EftQz%s!7?wlqab?!I0eQX$^CZ0Ll}Q==9dO!697xS&8GIw=M_~~UQg)t z%88}?imG8*vzhE#mwqFr0$^IsEmSWg~di(%teF&fd9IVhJSwFXVtof4y12I-@q6P%) zwwyC-nl#w!rc0Q-t&)j5^Ae!>-!MCiZ`c|cmNTurpwxg zzoeBhirphBBEV^xf zk)TU{y;xV7o_2uNJLBpY00ee99NRKQ_H3^XgACceCVk=Bw>RKtCQLtWkMIF@#Ezum z_3OFbI@e1rJRd8x5YC_X0%h)V0_S7bIX9qt>{>iX_iEVmnm91Gd%7Dm8&t+{>v9LT zZ1>60Ip;fAPrTdoU|-MIAaAZGWf_uxk676Um$zKUJP9R7E+PQ=z+7_~+1+pgG>6om z4B<$}WDw7#U#b_oATH}goyBMYuE8>cGA#P@xNT}FapmfutheXX2^(9IHNcyo(P7YEsS zf7)-aPm0p)=4oJd{#?Vy3zg7+#roV>_37L;S@gWshsSn05czu(Z_nT+iRO4eT#!o_ zRKsaIoxMO|_L6PYeTWPxSflvhSM_WUusc3J9zv(`8DdtoU%(+g_37T2*BOGC_QHdw zB}f48R`h2>L$jnlWC4X~YreL{+IpbTT2S11pD!yR=$r2DIq|OE+$mCjd>sPvb!$OF zH15s_rFP>H9XH!C?K7o;()KM|`aye`ouXem)3xne@wnR;aLpweWPWMn$tr!ryii)? zctOC%Zr*ocod~{FYNBFY(#ij@xKs>*)ca^$l<_ooJJrdf{Lu z`s{AR5qfbOWz~)5wwKa9dpdj=hgqQe98{aNeYBKKePz_+z3VrJ=`|QItAB+(PaV}Q zjHw@XSGFBqroaUX-A+<52A&0E@XPnEO&6E6ou?H?I%1ER}l! zsX=AE^3JugTuF_)mm7EMj1T7wZ?MXg)WfdQt}irUlwAQqM5xkA7tv*ug;inL-W%?o zy1fUOo~*HCCgY}mf<@h&Z0Mb=F$MMWw|MH;{0a(k+Foac|$;A!+u{eZ0>vopJe)W=y5oiT^D8(KeMq3NM&^pIjc$ zVf4abWJ{O@6GHqq-opWIb4VoUVH(s)2E zDnwdyPBdZ2gVG_}EP6Cs!8;f9XS)GabQP13jDNqjEADynU@6$%+~`fY^~orXJL*Qg z32w*AANDYM`U9rl%XD`$-rX&_U6g50urUn;tZ=9}z?H7taj~^FCxEU#=VIM9i~TM= z$P?rh2ijhL3E+&q_d$kQBplD|f02!p*0^LDxcUf>UqsC0IdTj}-9cr*^`~&CdH~I_yo!dPfRGnZb+Hw*19OUhR0C6?bHU!*PeeUKf(y@{T49W0 zWzE6Zici&UcEjF0kv`OY$33_`lo|Oz%zJkg?Xl(gaR7}1_*`9BU)=#LN^>I4f(r(r zIV=OQ66Ldv4sFS#%9Xw}wq`WjyC6@OX|UkqO1MJt%s-*d!hOh0uUciCruFgiyuOz{ ztFQ8Zx2!*6($1YzMZMaE?*?dR?QhWjI%v0p)}4+8x4c}*eOsqk#nx$~Fs|hfz|ynY zt-BRaRg;sUJl;3&6x@@;-XDqvv2Bbzq*xjLloS7Gd-^&E5pBk>%30a-1)hD7Et5|6PQyxBa_m}ysd=H z;??M}0l=}|qZOoo&eh$Ay}uJzi`F{$mwo=UpSKP&3G~sA4y71@g;8~o|AYQQdeXJAy6`Ob0H0U;Q%Lj`kXDi0UY=gK(IC~H-Wlqx;6uW~V);6Rs*fAKH_+vO zs@!hH@WN`Mq9Brvw>tJdmgX1>NnQC935;A6@1$kF3pT?_lWKp5tIx#8`gH9=3XC(= zW#DwSFbB~DWhYeY8+ACu+;w@DeFTu@ox=|tp}7*0(jz_weI48#XSF|WLI_BvOGbTfAQu=gUWIFy~6`MWbC$T}N}lX}?UNEw>||ac*r3B7IrAN4G;|xM|B( zxZyGv(HsN`NHF;H`#c3Pm!1Ooyc3>N*KY3_4VP2xd3~(6jn=T>O`D#0(8d{mP|(}V z5}K-;>x(?bPV_`I`&z^L=cpVGpBKaq1pN74$ zUT5G&o12x@r^0{5aq)Cqd-A9tclSm`>q!-2o706}{{`L1`*m(9?eTIWez-{&Ruf#-61e@}ZG z<`E3!fwS=_?ppV>l*i0F8jr#puI(+_G#~={a6`Um%OmZ)xz0P&s|zW@FXrOx+i8tn z)-zWs=$&kx%zCBpQCa|hE$%El3n5Z-md=wd9P zOsh`5+l*WwpB9 z^k*&jy*K#~mb^ktdN9xF!Z%joYH2R{!t|lhx3U$G1eQ;z%5Ri^7jaLzNM2fL#E$jH z1m|Y$PZCvh5q&zt{Oh)i#`x*nKa~KAzo7Qy_-T@5K9Y%j*@ck(sb#n#9V>`}(@$z3 zO%?6@XS=F~r*Zlez1i^Br$`A#dL=|=*-wf7-t&Za7Z{9Mj4df)LaD#wXSDk87y0_o zPMLe!*qPt0-VktqsdcfuB@$Nf`m}|zrbUkz$GcbE&6KwBZVGiSvAcZ#DWi3U#9&VN3trBrL_O*H! zz$NeM_2Ihk7WT*Ao+svzB&FY3{3U+kaN9qc+VgjEO{X4zWj;=j^4r~@d>D($iaukU z?)Lp@k!<+%y18d*VKK$jFV0YJQ|}8|jCgA0q#hv3;$yO(2k6R;_JW1|Hnl{7{94-s z+6Sh>()Vj{2XJiNi`5X^?X9&;-ctl|ga@>656K!d#6*=KoQ^+4%Bez`ywq-A4mEh4bb=?b%imUV$4&@;SfdGfuI% zqr~^Q8gXYvPys3CZoTk=X*UNhXY5#8Zo`N$Rypy15+kL(*YQp(S5Dl{#<{mXi+Ltj zH+=yw_evN0`Yp_m2B$NGRc4C)n4=niybiq*E9h`J+d^DOjt5C{NzI+BE1HtiyVfAD zq@(F>cxc|Xk_VFfC`~vXAafw|THwwScR0eyU%ug}fJ%)_1##$}Xn6&s zbCDl^AYY%<{54ds~I*FrWR-ue%k;6ldp(ffAAn=p5Wmxg=%8L;^>4F<*1B*wB|YplI;;AKab1($hs- zZzsPn0mvR<>!F()Mxf~S^yDf_sLi)(e3CqWpuHTbF%AH8>Jvs05WVC=BEKpSo`sOZ zqR+`hdI3IR%BLM`a{^GrrW91_X!CGzFp1QX7FRWQ=@4%AqLUq@-KBGDlXkCkQCJB( zyf?Uyjj1;AV%#l*Y-&3Fd?2AmB7`bTbJ*Qv#c>hrCh8doAL_MnT9$IMHy4%D1t4Gkb$TbZzhD*}M%NWF^`wZ7S=} zMmceG>uL&kjlA6zu$SU+eZ{E5vqW%sLB0%w{ocKm-e`D|;idyfDvI1Uqhb?(YFrt- z21cZ6CM-XJIO1w=&$Wv|^3%tZWWW^~^+%F`6iD8}5vEP{vqTDP8=lWEw%e2Li(86Z z1k)2!wq23$tN|AIr>%2-&$Kn%|5HB3=hw>=3;OX6&!KD_;GB7VTFDtfhujSthhARd z&1le-em+5M=(+DzG7W41Y-y-}&g=SSk-kDxGshk_IHY7?r~=Wg9yZ9!wT9$i2_($+ z!9Ye?@9n;2oel9$9wgL=$OvP~xXPnCH8?GZniUB{6LKxyTSN2vmN8elN8RldSC(@B>v?y=tO5A1aR1PczxjmU29*90nO`s%VHeYsUn>mjm#I!qp(Z}lM5 zC-GIcWKWmT4u%T-+DgnVy6f!(S|jCeZvE;R#ZKnatw_-9PM~CVL$U|J_}g=Roi6>Y z-_rZ$=(DS4!&&cRDnG8rVYos{0+z!1sv<)_wIA=^&eBT<-7Mpd^5U2|4)eAn6<)Hm zc-dijZ_kC2B)lf;M_cKCj}J9rLxoME^KSc2jvw>H%yy5Xjp7fp;Fd79~^gK~fo2@8hb z*(19N{)rUNi~g?h=*T!J)|2i>a-spI$4=lGz=Zq>-mZ2NG7SiSw7L8`LHX>kGzqXj zo?H@=8rz>Pl6Q5Tra!VrR5akncvs$NbCKb#e&--_I1SnZ0FE2f%UYjWA~ieQ1erSD z1*M}*Gg=vEu)OlYH-yvu)FrVlO45+oqhUtn`+D^zc@*Mep$jE10i!5lB3TI+mq}%4 zDRlRzCn)Z-G}E|$%8E4&i!K!uh!yu1l1oRbKtZev+h*ytdIHDtBLva2=AB%uAA#bX zNI{h}TT)2dFYn9uwDJzhRzf;e1^d;QDIH;7W>MHx% z81MRa4q5nr9t|%&{;C1)zdnx07Fy=a0z5%ahx{#Dyx=s$#uS2VEO0{jtMyj-__f?!0F*ZXK-{4Z&0*wH4hS;Jx31Owv{dSp{O};#%sIue6QG+jIa}SO))d!vRCHTH*yB^L zM_neV&5OdRwcJpy5Ur{88o~fVR-A?>n?5{J;UbFwU>VH08|=642f>~AHl$gV3bx6A zDOPYt6N2@(!k^hv>qUdx`9FGTVEsZkJxU)FmNDOIMiT)y;AWF6Eqg|Vx_?06B zADo~26FZ$yM3hj+@yCcftl^$w!7|ejz{j{tm#_pnE^pQ+KaDgVqolFkwlBIkcHMh- z!-TT;-es%bPYY&~E7a=EaPKwToIm7$aZxbLU?>idY$@fNkfSdK> zR7QfF>%v{*4AbBulZ=!5pj>&L8&jwYxPd4r=q#sUQr%@2qEqJBLi<(In}-R1n>#y| z--TJO$I00!m9!I0yHgCO;~xGIb5Z9|9U%H3o{Ic(kWa4zTz`h+)1>L=uyaKql;i2? zs_FJ|LqiX0?W?{27K&@UvtXooD+wdX>Gd@%=##59dPAYQPAf{fUT*fy5<#!tx!0A- zf7-0xijzT6wki*zNUw8#bHRLn{kVJ6`)c7YNrIEt#G{yoPV%=lm!3aDc?|9GBcH%z z+47*97Aq)pr^CQH`C`#R{4O}C-Ur}DJ&feb5;POgHmZqHf;CCKk7%4V1eMuGnA0sc zGlPEC6gb1PWb`FqN;s!DOHl1AAFU7YY?(mSQ`Js#azHn;fQ>Idw5~FLu?91Npv=lg zG@GnoNpxt45zBXYfO*;885MyH*{C}=q*i3gtKp%}%=Xf=(!Kl%ue^|vU2UszmUP-4 zPHSTirE<8Tb~c%sfpB=9nHPUt^BB*3cEkcyxOd8SJAXB;D{)1^`p(~Jdrf{O++%^< z4fV#g=Mp1#IjsSH;sX$W5>Q#qh1nB)HI|+V;uI|7QOUvT^`|~TUYe&ufj`qcwI!)5 zGTuylYZN#XSZd;PeI}}1eiX!ee?zEk(gM)kD&Og;lJ<~Wfe&)&4)G)D2FQpu48h|` zE3FLwK!wT6R&67q3%b6W%*mQgWi^>w7V@2Awt&#eJ;KV^)2f_*fP%B@C_mK#%K^CM zq}OV5bWAb;zZ$VrrDjjKI#C07)6lm>+xQ1M+nE6fp59wL-J=RCSnO&TA|IMdcg%nUo&czN zUp8Z05ASKap1{jk@22AtW_*uHzwRL>gR{aeoEX*c8FcCe>>#eUEmVY7MWK!jMY$JW zUw|mAHAxIOh-i&zP5a?-c|m%XU%Sp&K@=SQv-bI^xw+4O5m>8jdk{Aph~NzEi`asv zc1la7IwlwWZG1k_c>(Mob@NxkQNe(BPagmVKC`kLvWs;G;?%-flj?m*!KG??AL+xJ z3s$@C^0!_fok0+g8Awc8;#f&Eh0u`?Wpj~A18NwWFfD;{n(JBX1CzCF_2tcAwxUH*XST-=(?4!S+-t#J1?+4*yMk_rOYPIEonX}^~ls{`Eh^x~Znk&?OxrwY%ohWdmT4))( z>qNgFEHpWnvs^Hg0gC1Jq!!E(AR*OdWemZ@KD>Q@7^RE)ium>eR04nQ|EJcqfFW=KZ|_{JdzL z4@y{nq?m#_FKl};nB`Wune+fU!@cLr@Wni-oU@++^jT%|Ixx(dWFY-r<;Q`NnVD3%&~5q(abf#pE^jZ<5A{( zPhdy{LJxi}PlnY1E3RC3K$FzJ08MhY6>fA(eDoY9;(#T2b zKt}jJQWx+Kr)`Or6u6jeWFa{!kb%W@8IW@4Rz0kCxR#TDe$NieWBxt9J|^EOJp5sQ zLmopO{Th(p-{S^gk!P*kXLqrv(i5bWcX=OiZo+qZ4)MLj0Obt|!Y!|Gt*&sa*3XZv z=N`B((c_F2)(nSg64tnnr3-r$kbpvN(w>~3Mfe@ikV&riy#YTYlX+zXYseIB)NXuf zmh*L3*KEG;rFH#YUm)f{juc63;qRb-*a&)Ca^v$PlzIGoZqoSP20yRJuERfjX=TUd zgCpIDuU~@6Cs!p056zE^fK11jw!vP7?}G$8$11;|LDvUV%IzeMF?dBdde!M%C$Bhi z8l}t{WGZN%%&HflXoawG5UQi%90kAOEY*DI^EX5FcLlsq(S*u3wiB(bo9CRpfmDohdE6dAB z`+WB(evi(nJmRMC$+^?~dqhOC`S)m3vc&JW&1*sj_D7x^G^-L7{~hCh>k-yzqYnqx zus4jWXT()q8{DJj(&|NZA zVeurF$=m?t>^SZ4P-iot=l_C;j#W)%l zat;!HNj63Z)gV8NwQ10kL6@*j1hfkw)Xk*Fn;yco?T$8XruvZQkV%-tBi`l?1nLoM z9IhfJRuFo%<%D3q{|jalqcH~XA;YxKXa@#p-(`@uv7R)x*` ztjHJ!+_ivz&RTQTlc*9myAc7Uagm_$&{*xD!CN#1l65^qgR+z!df{wMFPRKphZk5t zht52CSQ7B813HVv#Dc^5SyIw~;f|xtaZJ@Gyc0)SVm-KcCaN|#HO^MV{*3RNT!!!} zAJ?WKR1jRD{Y7Uda-b_`1ET(_qe@~=%LHWS_H_e)KPN`?fN;X|@CZ^8_*_M0#LmxS z|6TvRq-vgq_3*u4Je%)3`Jd0`=XtnzwrBiqOSsm`$!|fzYNS)xx-Kc_2N0aB2@L78 ze8MA=6q}!#gNn5 zaD{mg_G_JUvJf$>+C$aGohl)7wAc{0VJHAl*0@TPbYRcquLHY=RjtVrO>o{z3B0~r za$h4PU|$gvG+TQpNMp2t=ZGag@2l*Jcrago&sEEVn>9)y_o&~zqaap64fLhiYOby} z=#wHINIX{=0(|lDp1#s`y9?k}1tW|-jszY@B+kbdGJeHj5vR;~-~Po0m}=pC`{%jL z`OMF`_w%*vcMZ>b3KZMW*+5vd(CU}}yy+=)I$?2$Hi&07zMooHefMOlYp1yfhspJS zVZlU4By4r%t$VC;Xt}RiyKoiV+e$*I!x?7Q(Sxgiqt)Cw=j5D&f?i2!Ztp;_Z{A;j z@eKPtn`8TYO^SxKZ{bYSzp?+{d5ofjbq&akcBpTllipzet*wMOZ*8S|nS0=_-K*8^ z<3UZJH{nhN_-n83;Ig_M;9H@4>@(bdL8)N2-e5uH*j!j;Bc~$OMxN=+`CqU7=6@i+ zB>`XJ`uKSr{l;D~E%rkD*rQ9Mmfce=$fwb(=lLu!uG3iO@k>B@^ zIqRHXx}E#KeEz%^Za?|vypC~Z&wDF}lau&!&S2aM_3iv4x!GRwlHPVqemVYs{4OF% z|F3fo&p+Yroc7^Dh!ysT8+iV(f<|2V?w3({QXmUn$HUPx5nuN z(Q=0jic?r`0zPyXoTRx#!MSA3aqnkskyc@`pjL2Y0>$gXR4gwH4Swu;QgkPW7EzVYbh3B zP!Hk4b2#a>UgcNKt#D{coO%I#Yw85N>BW4$=s$T`&5w+LYe4M$pVy*XwNWVCab0i+udT$wGWr}3 z8m`xZxrn^S`PLKjz5hIahbj44gC*YOcfT>hoL^%P{+`7@&*OW}O^kpwdY;c^xYA3Y4&)F%W-GBJ6nrhq-r4}o1ugD!Lx zpap9&?@3Ea&zV?*fHrV3MDj#8xPabOOmICVfQ3pK2uM{F8IA0Jz&qy@CjT<$^tmSW z3m#A@S;|L6B5wv%5nvcw?ZLuQa~A6fbDXfUbk}smfukIrDfgkY-HqH0gV$XsnTDcd zvM7h}>v3DIWXq@yOJV5O$RR=n&_EX2n z&UbG#djE}K^ZK%Xxo@uijcxNYwGrd7N4_}?&hei*?`OXxH0M`o+2)a*vk@eIgB?an zMwu6KkafGkY4+u81YLP)bdZd}B4PFR(VxX78{sj{>9A|gqJxv9hLD}qy!uEtE!(@j zrxn~@%;66s*aARu8ZT}QV|9FBGzPI`BP+rVy>s#Amz2G1I=0e}qKNL3F@n|ir zWe5*XYdvtCAU!B*%c$wm2Z;j~V{gapXw}W!9o<*EsfEUrRtpn^GJ$APNT_)DndR2` ztC6k1o$fb(o>?WxsH|dHeBuC7l0|z|cnh_mW2ytRd<)cYs4pQJOECTJ#64IJdb&zC zGUZ$+ApeeK2Xz#L_P=ZUpLh6kp8j)2L6PV;C;B_r{#gt2*T3~UMvn9OEIWFmP7pnJ z%u3)gL-g!kE}@QARLWl3aRnWoo2h5oMjj2&T?`t3b3UJQROGzKuT%+>23<(_$ z%n}YiCj=cS=Ip47=F@Y@K*|RwH{T??c-=7SE^dkdML@d0wYSCI+n2)|f_N<&Ql>mq zm)xD#Gv^XCDJ*lN0nKR(f8o5QWXQc=SR6VZY^G^lH7*RDVvnmP>gd&ABS;>|5 znwJdTfW9LstNS8!RX)h&A=_#^>^zIUw%{@4x*DFZe};w{d6C>6mg8*~4qK=o^6`mJ zP&|-Z*fBxbIyu{Ovn=NgViAZwX8<>zbNG_>^7BFM+4;N&suQ``Pu&*dEQGTL_IvTq zGx&LSIP>Q4??&(vmxC!f_~6Hsq6|+7 zbez0+r;a0`MaMgH9*cu%zu>u#Bs^zW8qVk4fAH-e;C%jBC;MERbD{ZO-*b&?{b#-W z^up$KHpdX;_}^!TbA0|fo^3R*zkTjd-y9oX$JvDSC71VW+vpxL`LW<(@7AkcI~ecK z0M`aK@XiuzM~b{qifaf#6bu}33`!A`r#*0*?mD3gYb@mHd&n$Orne$0Ke6N_vSG)arY+Ci z;fFLbaIEer@v5;7R3D{5-KM<8`bBj#TpP~$(pei|rNOm~?a{hMmzUjqUmpT9G)+i* z1N{Yuvvb^+2Q!==Ld^4h!I^&U$9><^Kkt3c_h*qVXWlRKx$<{j(sSPRjh{dLe-NA} z=&6-+9+LMDV)ozw5R&`RCfH}BNlb6K?f5(9n5 zQiysXaWm8jT#8=T5$ECujsgu)YYW~p&1;Hp5X-iWP@5EtOm# zCy_79djekgr0~djiNmQP>_XG^e{xtj;T&=1-2OK=_&q0n-sv|#n$MLI`PlTANB#CF z|KjC0r~Xfl_RYu6^E!Zh7;-o~2T&Za)&-^uR)*7pRKwIfz2$>Kf)PMK?yf)-HDT(O zTPX4Oj~x|&`+k2|6^ovoiCliI_`&Nfub_i7R-RY@;Kv^qDV>^D--lw!e?`fZi(EMH z!sHN9k;5_QWM~DM$h;IN5m{SxsP4Us4N0bVKm+IFr!FR2`&wZrqw7aLdqMkHI282_ zi4C1A{Rug2rt=lNWZ+bV3lIkB49nUBT%bQ2-$if4u>`4of@scsmX8|G8tG34y{$x- zxI{XC1Z=A+Zg)d(oDg8T~`54aaczN>PSl@e+5>r3cw-E12*c`9;(yc zb0W#pU_i#6WS-r+up0(U=^=8sW-j-Pfy_eW2YQgaP{8B8{=8?7&4q`Hn@@zi|UxJe&*v)N7f-QGJc! z(@w%V)P@JVfBBM+RcNm4k|#At14WoDInNeOo%@1_mBO>2QUV|_&c5PK1gW2~Yk0hm z6iyCminxl__XZiinRck0-Lm9iseI7m}e*%Qy=jSI7=%OzrfyyC9kLRc4 zKXV7%@0@Q>KhGk-x;1}lj_))3%{?bZ{PwL&?5R1I_{M_Yeef4!zIiJn&gaowBXb@E zna2)7ad6*r7yzP7P@s4XG9%_OEJXX|JWx!9ky{n|fF2qrpI|h=sD&&$-XJ~6m8^N8 zqlvohe~Db=72H|N(Xk&g)M2?J=CEJqOaZZk=dZjU2Lhjw4rNplEjO?VD;%t?RS*eq zm?$7QY7j-Nts7yNIi*B61%xxle*c#LwiN~9&vO&Z{;~= zTHaqS{u@8%9P6LhIL8h~_|Kh=%e`jRyWW@9RscDsOT{Ac?`cwbpr$0T| z?9Ge6aW_}lvgnRh$;)2O5~}E|Wc-XqIxk!oXeT|Z37WHanXCFpA5N4Vs$0@Z=g-Saa~wJzH(kenn+Zi&GZJHk{z>b ze@_w^r*e^ZJAPip$sOksdP>>$N+xS;O3f_UhR1m&l~OJVA*+t<18b(Et(0zW2J1G3PlBw+z%Lozh1zc=lr|xf*kII<=3vx?H7-n2@YY2=b7@z0L~>0DEepBU zRPm&nEOsnk< zeqvs?N6XE>^Z1`L>)UJk?V)_nJH$Ekqy9Owe&Z){Wvth^ukxE~&9l+J{?;55FjM@` z>;}*9+XMTFr{EL+J14Us!?3$@|8sqoclqewwUc`gQz1aS#~E03*c0h0^1CbOG$U9J z8Fy&Wzc3ZguVS5J$<@__u*Lj9f4WKJlLTuPM#AP+cHJXpZj^u;)0GktUNeo z?`km8q>FLDS#p^sPSEYGvJXHK0G*jW9-2GAm9X5r6&Ic43dKE}9+(zfV4gi%>+dzr zpVA}@pCt4V);VNvi+TQ(fBnkh@?OPm$->w=MhDwJl;nuD3Ay3G-hT~;&^P`NU_LQI z*K>fd9HzYs1l@c-|D3&l&*0nh`o_ASnzZ~MeXXCmbv|c)`X1k(K|b~uOXqW@H_v!q zqe8 zLpaurdrBN|N_g+)%yG4^fRx>4RLm8 z({%-VUuo|L=Tqnog-a6RkPkK2e}v|4iej*l3iorSE6WpM5A34l<(Xa`=#%bZJ>lPX zSL*5y=y&B@I>(zKH$F2x4kl09E2Jz8NxpUIzjCKt z{{mORL>1H9$%!(j?Oybu8$UTIa--5MVT*drAsdQ5c)@Me(geLHMWZyR%Y=ogb1%#2 zcgke`$OUfne|k-LQZ5JV$B0~Au`nxFYUye5)I}dC1HZ4^UxS5I7Ovn`#NKNgKg3A{ zoDaMbB1PztOqjl2R(qW%U-IxmzO!6}NX+c1OJrd2a)@k;VsERGtfoW12i$6&PoU4C zfnjGn=K2@{C74G&mKO!eEI#bdF`tnB?`QZ|4}bfre~9rv_<-+z`t7Cv%xnI71NNNP z{Kb0AtN!XFpZ|$P_}ce#k^kWR&ik6jpEyr`^6PI+x%^q%zq$7OJyzGFBmO_0H4a1q zGySKc@Sak_CK$a3%}ZIVyM%);f;nc#mzR}l%{LyTwBX}3w1P>&t$I3%cUMP;QYT?! zJ;3G#e^nwzA0_6)l2o#QT2>P4bp;YpZQX0_18sB+5)foZ(;GcMcUfz6O%)NG;$1~1={Py4HcR6E$zJ0H63_@yd zaXX*ssw>YKR3GeQlhMfT9hTAsMd(ZAws(k$`I=m~a0>{qncld0-iT_*M>}_^a_{iP zf6e`;`%M89tRJSGrEYPdca3eV5nHuYHupSo`^ z>ed>1kNluzCchPTT(a@1#45(%>N}|)c+C?8cuS%IJQ$pvCUkof@q=rax61$TYtd;- z3-isTVcbI)4ZzMeEV#zm!XfMBlu)eorCx!5v(8`|T()qhY{OLO6!eY!RA?oxf575Sk2+b8_VQ=rlC|Hkb6gQJ>r@xK|x z&pdSQlgu%G?ydcNA3+bMG<<@A#c-_qsQezFq3R}-$deo?fw|9BXikxavqBlT&KcVS zoMc{+Pzw^|mXoTj%D(2&3+klYe>uvWk)Wr9&a-q!Z*V6UBsjn~je}PmP?d?yZFgGH zSti%-V7@_mQg-u-q?R1Y6~lasOqqsE5D&1EgCh*<%qO4>QF8A9rie4n4fX!K&L9K` zTR+4uDKZy^d6n+u?hXrL4g!EKa4B^5Gymc?XikXG`!aujK9GLaX+~jff4O*el(kwr z<+aeyDJOh5G*v%iDj;P1{5;1GGlTpE+|c*uQIO90qXP^;AKXw&^O~@viBEFmUc^e- z9#5dJgPR}{45y>njLiXL!51e|z|ivtC-UtN|IX8}=YQ+=A)C+S`R{Jr-#h>_XcLp1 zH+&s9*9F3u2U{5YiOJFRe>`&0(-gQX>|1&a=-t%Ggqo9u(*z+KS?b9QT4reOw}i}t zp-~15)>v$So8?@cnRSorJmNZ4hI^Xxbs}M$@hqMH#j{}D{q_>(J}&Bw@19%!XYLck z-?%r-xzL;+`q{PIX53(d{i3}^hYXY$oehzEJbdj192_59gPKeDe}g;*`1GQTybJ~< zVpV~G;#XNBdM_{?y1N3BoZ3mTi`MEzn2jtYP{z@RKIA;@Khb8;Zd(I^un41^E$Ho^m z*^*x4PN)VETt6GE*1*X+1AI(C4^@X#cc{Z5lBf0d)j0?7q6fy6v|Pu>99h^+0p`q& zejeo(wPIq`e1;jsJg;-mZ|0wl8qc`Lxq1I`*`v^#bN*L?FxK&<6o#YgryOPs`1~Sfxvt<=rV4y`TerRK_u-M}ZKI^*y~syV-5H+eJW8}C z(7&{SGF2sD`O1lDq0KV8lwu#pgqQ;l_NX}Ri-*m=cvxvNL%hH5v-*wi6fpD@`T_HL z`<@x|`~Ay7e~Bb<&aNqmH#m24fu4ZC$K$z#RgRv2H#X zoE!>5;v}SdGBN*9hOC6CsW{gOCa0aa6Y!}ZCvXBrozLv04|8vzqt7S?YaJYJ~Mq|IXGM)EZCrkf>39LjUWg!v zo^Or*#hm=b+;5#OBZubvckFPTCJnA0aGj01TL3+1uhr=r1VXf0)r<#jO7AN}@fJP~ zM6ZyO(>JfczDHjOvJ(A_{6l{rdwtJ!@Kt`+)K6ZD=lGL<{`Jy+dIj;^f3y65f4=tp zeq8U*tOC7*?{mlJ{H{@+SXl=8STm(ZgW609hyROKRyH@0Zwv5IJI*&ku3bC~gsdgp z*_~X{1qQjP^WZVIi=k5iEs=}nx^{Fe6dkk$sYqG?>hA^(4Nbn=X^`Ou+C&d)j<=d@ z40FcSDp?N5yT5lyHoSsnH<(sZe@d}dNN?NgO<-;3yi=U#lnLA-XPn1Bc=`F>|L=SH z-t+H!`SP~Eai3$g_+WsX#@}p(erlINnxE5e zUpxyD&;FBJ4G!w}V6OkZf6tM<)Ae3w(`*9x0!`1@GHoS_pEu`MpkZre-uqrSY5Em1cE#- zZ(sDhF-jME-Q5Lc@>5X_|xR6SU=G&T3Ry*5mP3`c-Z<+IX0P{X% z=H$FCvtRty%m?^+hnO#ZbH0DpDXt0qI$(5BM}OzK-#IVx2gD`Dz))%~Eo_m8|M7!- z1<-y`U1*3iukg^Df6%d%faUH|@P`oxCEmwcK~Mn~msD_U?SvxRsYCeKiEwk7I&cm; zQ>?p%N_7(O708<_OGOmjf9KbK*wugL|KHk; z5pi#bH39DFTMzS0%f1t?%50h|5l{Sc@X8<$(F;rFpp-~{e*iI7diZ0w$rHWtGj!P# z%|rEdyjM<%pF!#<-Z~jIQD+k$p z%gng%fA~zOlV?uJPaODp2jG%q6aM*$*FN$)0$5gH86|K@;2^o;Yi;kW6I07c`!H~P zx&qNwKxtTzf5CB*8t*M>`W^COMF*c5V6eXo*Bh~P(33!(hM_Zlf4l#U*FQPfU#tce z@xRu>?=!}qSflcIb{H3P2w+4tnkMpoJ?IJ&^ya3cuW%3W9*E4{ZTt|bu zLILtich{wRXUa|ljXQ+w43;BSx9h=wu)h3D;FKMHf6;5;=VddG0k|M8LEQW1FaNr( zxi-OdLoR&ofBfJNF`53JBlEoS?<{V(Q@k}RKmtnW-_1Jia017y$s305!IjBY9>5nQ zDF>DgPdy@sxMV_&sw7|w7b*bKzo&N*@@rN(4Wkuv6!24<( zU>m?^f02{H{ou3ayk>rfzje&d4Ag`E@7%BY>;M17)Sr9$!!pG`xkeVx*si}^_-_`C znH5j0!Cio`qc9AFPeXf;lLEI-js|C{CQUAJPAR(6&2oGg{W-^Dg=&+_Nyz~Z>c%J# z!%;Q2Hn>=?)L|V#9q(n%J&E$+ATrqy8$KH4e~XL#tpoR%@i4B=I`BSx@9wl-!=xx% z>(18DUFLuu-(p{qv3@m0gv5_HV@Kz?JXBw}Oya4Y`+Vo}PW6Xh`Rkki&Ohd7@I51c z}CYTEYXNr8QS8g5EWyZY{;-ua|1Iym!EL3s&GFG{VQL&+o))i_t ze_lklRFY2Kk*p+h=vW2*QW``#q7eo;V_gdKtX)DDQ@Tgwk2_v~W;*79peE&*zgvib zI1DlW*s~?hNdee3WV43}XUPwb@5{&i;$?pF>Mxeyci&(J`>*%=t$8p5``6ip_rRHG z9hrU{fA?-p%WB)!*ni1g%A`1-zT8APi6{ul@g~Jl zL_tANA(j80#`C;$uC@1Gd*@4~lB>+BwO)M~!eAWx=)LvUY7H^K1Lid>Yxb+aF-{z} z(AS3n>r0unmb5AW z$IWAb(f?S_Jo@nBaYJ`Hj06!vc~08 z*objBMLP|l^_!PSf34#O2Gd?!Ry6Eq3w~ztv;5`%`78Hi!Mf1Ti^!>R z1S}dr_J~!iU!Lus)ChX?*0$nGcx077ae8hYJrIU4sRd{GcYYi49gmm4;xd17$`@S7 zO+4bHyHzg03w_s(40~k@8)4_@CNL1O3{2XRlLvg=7d+cVI63gB!ZqYOK&(g#bLl45kH5Rrz#CiaBd_Ma@)5*|{L9JL!jNgBbk&^Ky{0+PCbt!8@?nehv{2!uD=FG=73g^f70Ud(Hz_lusH$2e|C0`w_vxX%kvP!1pvv;{U<8)sS=P3l)Lhd@+V8f7#tKe7fOok zpeY$JALM%b*#zuahd>0*am-?uz^{bSzpQqD{7K7x^Xo78C!6FSzRTbB`SXLb#TpPM z#dp&KMesU)EWGnagsSfm>lpkJl&fk3tyjQ%epm}O)p6q(e?RRz$|Ud~&gnQsv@suX zE=X-_{X85dY;0|87*`N(R)(!z*c_PAWuC^wbbvG>aKk?U5u7m=|KUIU2fz5)ADbZh z6BGII%l(Oo0K@ceG2_1&hTk~gzr#ZQD_luNuTRJ&{DPI}bT2e*zx#mqKzFRC;y{l2 z4r~i@?l8Zaf1~3Fvdn1;i+H;7(On;D^|C$hPtJE+dGCZ`$vv>Y27G*o!Tnn-37{It z|Fn;F?3>Eobfwc_yaLqLAP%G+IKRC=kGlamex7fEz_!4`y*<55`y2&l^&orKb)_k} zx0hqe@OHL6Wst)9&tieWa)j9s#G6PLi>r&)$7LMAfA0H(v0m1qKXd#ifA%MiqMiRa z-}eU_x0LzCc>LtO{FV3f$1W$pr>TAAa}5Tr>2Y#BmbYdNXh~{cDcmO9;^l8IU#@P2 zb~W_m%#v7-fDSf*p<%6^{&|_7*CtsT=i_`Et?72vW|5Y^2Dh*;%kWHE)XF^H#lca; zLEYr4e?>ZyqvnYPRXuY}wzBhw;PwA!%KiVGDR+&Z{BS9tMbAt7|J*`@LhpJ_EW9gp zhV@6`5&z#{N!8ccYn?cnfpt|W9VAP`6pKJ=1>S}YIs>Gz;8$=ENtYo}vhWq)`yTmi zD8GESmhb8X-xXx=|8;#SkNjvb{V?mJxVA2!e`480Sh%piks`!`5z|pFGWppcGz@Rf z1$KaX1GF2aJtrbmWi44fGAOu8KyQ%hckr>`D;OWe#=>a09GAF(g57h!hUu$*7KWdf zg5RIlAOZ6(hX6#dinqATaDNc`o?(9}sNZD`k_ThG2ip$PyWyRN?*diX*~{~S43XE(I5-s>elOd?1A?9Xm&PQTc7(on2bMX+Rfj4D2cdkD` z6zGuqI-=@#XYGHsOLC};qv>NE-LQY{F#cgzFUjc~S{ovL*yLpNm$=SAv9?D4;6L;i zwLY?#4~WmH>kZ}+Vp50oZfRfjfbb=re=ebR&ORiFAO3*#dR*ccn`C2^%efwbTnvWg z@BjWk%u8^NT-VFsTdqIAZ}P*N^g&3~Tmo6Y{s8C4^YTvvZ26?T(P}T@pYH4Fu@*1G zSXOG;Ib4YOf_OW|=g~j~@V?*BT!3$-3-uT(v zp@p+wle(~e1i_{;j3;Dyu+#)Pf43xjWBIBL+DL&6`>eI*gTAYAx?Aj2B~pz6Nf)md zSRdjm4GQH?4IfCgPuI#Gph2KrJPlOFj<7xhD}RZQZ3?j~aQ>_JHHj9vSzZAK4MqO> ztVAIEfwP`{fwKwUFHowhV&w0>*2w4rA-sLN(yI+(_f=2?+3skM==0}wU;T~?nqHv`EOh(N2mdk5Sl$fHTW2BH%C@CDC= z6tBNafm20p_-F=rE^v>L0W#abeO*BC;{?cpwY7U}>{Y;(EiEF!ueD!4JPp6cn*4LW z6Hb{wzgvHPeuqr3KwjFpe={RolkWg@Zt!<``Mds+n{{d+vi@@qCBEqNCJy{t`dSf`|WM0MvOa@J2OeLhelC57&T0Z)E(6XQZYE&1Nddva(?P+sfCGB2C}3|^6T2g8F_N9yU%dp=&T z3*rZ=r2#Kdy_T4U`a8bl3FGTO%!SVjb4$bX;fqB(t9WNH9T{q18ngQT zxi0e`%sKr5BoAP3^HC4?RU7`TGfucZP2crprvWts;2sTc8s0BhU+~xg~SKy0`Uy}n$VHOeA-c|D-4@j6j% z^g>Y5)o!=ff10`*%msfx;O|;=qR{WZ*HY9E`Tf?P>(1*1o*9SoqZhEMSXVZY2TwJ| zHQpQJ%(!E1Ij*l@Tfr{B!!_h!FXKVk;e9bDF;?WI{bcCkUanWGLjCdnXxBjf$9H!b zzv$Ct9bE2l^?(14@vo2;SjRSsi~Fpb|2TH=Ec<8Pe__j8#^KU8@CL*`;}DkUat1X> z^sR?yDp2}E{>Gp2>*H~_zSDgCM!j0dA`Z}R%bX(7uNubP<}!RQh)KiNKpKm4L+Zj$ zy8>yl+&@)48H^j(37FTPK8cq0@*x}tksS1|>-Bg}wBM^T@(*wwU`^-nb6vkq-_7Rd zdW!3ae>*SNKe!(KT;HB>{rhvhz;)yIx`^wypX=s?wg2z_#~5ukdyq@ih76`ApTz#5 zE%WxZmk+4#=C64fK$8Hu0Ef#S-!pevV^D7yV}621SKs}#=?ht?%XmS)2ABi2NuN}# z4>XXdA5i>=J>F4veT=6KqI1X-d9B%{JuJ+re+}m41^tM&@gg1jB-r+0;y*7hIGjyE z-kaYS@G-a--VZza@_tvmAMi$aKly<7GcL>f?POe^ey+ozh@`cj>r4B3zt_>e8(1W` z{=oI^@Aaj9FF)5!w6A>nxjydYwYc;rKEJU~qCG0i=Vfm~9}7QwFWOKB1KnfUOFvlG zf8M5m|6$!Z{H$NuGB5v&akd6eBier1%jFH~f<5+T)jx9*>+~|0U+U%(1WR987pxWTA+a1eA+E#XOzXdfw}VESDwUsQ-9u@TAuS) zdF&1U`aRHpc<=xGJ%6vSwf>JWr#dTRf4+VJNAb<8I=mF@Q^4A~7)$@v`!8z;+6(2u zk5OeJ>c=j9w6yboxlf=z4voI@tlGw!X)j|4rsM9khV!U^&9%&3<9>=d)19pCvR(%| zY^k5U7}sjJkm0vp`qjQ-z1<|3r^|1gWly~S{zivW7djT}K`}wczrTX@QkfuYe~RBM zd(gl8jUjVegUss1w%9Cd2NE*S#(QUijR8l>V#hcdPIycHLsy8P^Ez)<{) zwy!X!7n`HOvnqS@!W?)iZjU*Tf1)mb>H$%W&*p~p@?WgC(XyT_^$>8BSk~kdo_WH0 z{bN`Bi+T&N_u#w#v2%2231Xj&Kr#S3FEua+{!M)}tVKvT$G#c7HW}8IpZyRxq4oHu zKWpUhp$z#?Wnc#_Yqz|Y&>*p=Ou@3R{Pq>>e*vtOBgX1t zYvS+!bZu$d6HX2C-(_{QjkfbJhOjS<4g%!sO&x&^^>=LD|MPi1u~?7S;MV2mGS1O&p+kZ6irU2io)esS*muB? zVf}MX|DoJTr(oOu`*O<}f8syA-%>xk3v%^e@XH69k$M{JMFkpZKwrVq!1}sT(9RG` z{r6?p>5pysZ_BDEE12wKd7q#8^3yi|SMRfowTTDf4{N;Y{kGxn{A>Q|cl6T%w&9_)lQMP_SAoc{HE9eahj*dzaRkN;uY?$ zi=q9r*JZIu1Yi|@#v;xVi%rP>UdP}7;3WE(SM%5)Q?4fK5)N{wT#E}G~tW^ zUQB%UVD#gOzn@R|jL5^!=j-zM&)e|CYZjpKrhAe?G(hS<3%>Zhp)Ed~Sd0 z0q4|Gk3?SHOI<_nWO(VYgFXu#>d%cluwV0#O1f&rSFdzPtIbeZjIkLy8K4i$Uo&5d-(i!d2+G& z{!#wVoOaZ|%jdA);GgOLY3*Qso`LIUpRaxvlOMUD(U&&a9W$kz@wX)eFMF!`NFhdLWh|5)JW7*DU| z`wriK2-BDEBd~ePxP5^+;JU10UqHG;_QEZcw1*baSL!;EaetP{so8q z`yHk<{Ez>*4maOr0bl&}9WW7R0aN|;9sXk+n*Q+}ktE=}{_!0lk$-dDS~rgkc;o;2 ze{Lf+M4=aocpWa{^@dw-fhz=c8=T>b3mTpIlShfn5}+L}Xg&NAJfPr>GZvO(jK{^x z16~~6TW6mQ#ImwA?hkU%1Ae!Bhoaa8aTol%IHTc@|NXw;brFC4?|-g}%K!6o7k@im z73CvmGcM)8zmDesPrKgS7pn1M;bX^le>nM(6mnUIAJDhsIVcZBTR6F$4Or!kb@~B# z5=0B`W%FL^ES`^YOT8{jeZ0m0J1zCabJ223UcrCIeSz151%Uf~miqkPEe}m`wC80r zuXVK7=lA^%$l_Vr0T#sN=XdK(v)mWICztvr>!rPy`v0~cLJT-J!_N=zJ&^n`f7<__ z_49B4A&Bez)+_!~FYmLoJH9XdpdS`~3V>19a0dN;-!J0<_XkLoS#JSh`=CDPAG|;6 zeOXdkfW`nx_VWIySNs9LC-4>j?~jjvRWHP?miQFf5x@KQ<#GMbxKJRgyu2rL2J&T` zqd$_RUI=;pw$D%h(aXQ!JwUrff4}{Ra;TTR+&6V_-=&>CcperF{4QGJCm4s2q6QEB z2ko<6?t|yzIcN_AWzb&B{qehhjMv}!hWj@^<6^nr(vHjfp@g333pXdCHE4=?tz0j{9iCBUpo8uBU`RU((|6E-E_xJr7Ct9?We+L!sVY&WO z9`opDJpPPhybtEXuXychZl<$uP{Pr2WEAwHx2`P~mM znfW_jmUhH=UY1hiQemB{|BNRv=sL^u(Jw#s#JFXbc0~JUh^OJb(Qnx5AAjZ>#u>yx z7d#xwV|_z8)c@!H%Q*ite_xmR_SZPU^UlkBM?3vH6i z=#2%U0@&hfnTi*RPAT}f%kWfI3G~%KnY+l z-4?tEPV*leUQl1u^LO50{(ROXG)NF%MVuA$2(7L+-tv4ay3#wGgMU&aGqWf$!+yJu)}Sj_mdOdu z0N4Z>_A7O_Vf-fBX;sk-`eFC_us#K3b42i|BWTd|K-(Z|Lxxj&n#**^k335IVtHs6b--8KSY~Zc6O?KJh6eKxdh_C8e*z$}@d_pYoF@;LQzxQ;4E;wU{-rbm<&vmOpO%M z!|tn$g{ON!{y_`92$}2+++A7vP_iN3M~|i%;gBd|_HYqjv`IMU`0jeTLu%@|{z5h6 zxm&wOt+N|af1~+5FN;Mi|`w{WL$pDPJrH6SW515e-B14(eg{& z4WOFPOR5LH*$MgUq48YdT(=PUDm-ho;vj%Ozt;UroK!)*7v#*S@v*#|KazFIv(3CJ z#}%P}nK)h>FOd|f{xdVjH_>lw`j|McX+Q8Ona$(<8>I6prSB%ku5RXPNW5)-f98ft zB8F-+e=lATr`HSy4);6==gjBm)=veOaV9_1hI9>${5Lb*eEQApNPqLRkS@RWJ)q#Q zYd*fTgOKL>0mYDJ1deAtUcc2RyiWClvm~lXYNTdbz74+CFS4=K)%8mxk%jctCZA(@ zZEhF|?vKD4*;2ePU`X7o2Een^l_T2LF?@F2e`|8~=A~LC5WUmL-Ul}sRx7eaGBVC8 z%n`wo#~xlBOCG)_#nbzNz8b3rOgcBuq=M1yDL`kwEhU7p|I+T4DNiW1Ii5SB7O6cU z=md1)cs?e&p6JZ>&3TmikROTft2nb-atj5L)~O^BDe;ZwTE*CKVi=gE3rt#MG+PWBzVUi*aeL}N1~FAxPg z`Rf*f3GNJZ^fb3u+EaEbQWiF=y=EWQ^Z`7B%4mMV9?_gw+B~0c*C`wIZQZ{|?-U%* zu4b@it$ns{Az+^oy$2@hh*OsO?si58fAvnNs=my|d504<8DU3$ArI?F9i3(O)q^<^ zw~pXBh@Lf<`&51C-IrL&=UYde62l?f{Sas}l^73}-V%3ZHBbq-5x6#W)TUuBrEn^8 zaDc2n5SU0ru$~KZ0ujE%9OdTCG$*ESp1U?N>?zEz&|SQ!dsVrfj|Oc{;Py35e_1uo zvn_ugofcmbUQYrFDli6B+`307OMp<4RaFh<74p!lIOxA1K~Ezw_xA=A4lCn`u^ zXFlbe8!)kldS6ZBD4pDsU&NhF^@KF;I~bq+*M9~%Aoy(`2F3Q&Iz2Gu4~;Ao z*^zsc44+*NSU5+bD=c%@V73}4Dt}@qsbpXED>D!)ifWVf81;GwXq!< zWfn7*>_Ts==<0@B@$s<|r=4S!8(G=fF@|aepCza<^vz1-MU_B)z}qD3_HAy1Tak|B z=k9K<`fzyL(8FfHLfHGI9!oweKGuZ{j3g3p3ZObT8-dqCpXUbz;HqjL#mzeIHq|t& zkXj&sNJmk0(op2bC-%1_e`kv_5pa{MD$pU_dvFe#s(0Yrs_vfAWAhBW4c&K!N>K-g zPe#q;K8tpc{NPHtj-%%~B?mpsldNu4U4l#R{D_gf&vnsO~>kFEGk0&ouSZhEnhRy4;0}0JI(! zp;n=;^7_8LBQf@BO0L$;g~*24{e>2z9c-iepsCk-{!TS)cJpf=&XtU`jfAu{msr2P z4y2%*mb~A-`}hpr2r9ld+^IDognbcD*3gkZI&82H+J%a!kSo+x2nlVtKfltu1I>BqmVggY&po#w_w zbfO>K^J>Paf1!coxd2^rUeaCH%|vk9`dM25LCMwOD4Mp>h;LVKgkZB;8T;;3gG4kc z18BZLo8l;mlpV#@cg=#&`;*>0(Mb&A6Bqdw{jc z8F!+x>P4o-eBB9`&{^h&ECE{j+R07$t?GMnn*(H!L45vB-aj3sJdrLHy4+f$IX@t| zBBZiY2?zN4bN6ngw?%EJ&K`k0+d5t!;GxPa0=gIZ7Jpn ze~aS+h`!btY427u(O;#8pTw3~>-~X~!dP4>?)T~OPpC9oOzYnJJ;^Y^_QtNLIFVQ= zE)(yew8w7$7NUAegssR)(7qAg+V=T=gp$1zeOeOV-s}6dvT@%N)9lN6aG?VEIO9mi zDaENjNr(M*)5?z%xB?dmiPg3|&8SXFe?OmXxjCqXI17atm1;at(tWdU&Z^;4`8S31 zXUP{yOIi6j8g2?Gf+`!fH_hFFn}cLW4XB$}MuA ze2r#}M`<*d&xUQAZpTU$uf4cOyivXrK(|#4+0D-hQvWhISWc$Wq(o@2j~oYyGxqZF z?qCtV#$`VQcY0VoeR303&frJ|yO^`l2pkuiJ^v9adR%=T^uB(B>2Daafy%A3 zIFD3D9N7l+ymDE`eB9Vtcsy?ARw}r(B1VJ6^bX%_@j6F$nA%ZO^CdT0e}weDSK_0q zM>?q*d&jR{>1u`H){DnSe4+FD%-nmkcxyK%d)%q-n&h^}F^ecB<;gcseyBBP!*D%H z-F2&T#x|(D{cvx?Cu9^ny=o59QRD18VKJZH>=dJ^YPZ~iq}dCVzKczM{(gs$5Cf7KJla-GF}?46<}_wu*F@W+(C40;e}0Plp_a8^n^4_` z-EH*|zF|H5mK!d3X5;I?ava^nkYM75cfH#b=BCl0=0jARWw7+;z9Q5{o*{YOb_(mS18c`KgVYtwFZ3GF!`>ykZ@z`A4~ZO0CwF(5L?DCot0AkaIOW?$~mga{;T* ze!J}X?X6^H)}GCSQj)88``q{M>+y9_hDmY(%U;`GmFY@b!%>Hq##<1+zJYM8$L=e9 z^_%#Xx5RlT?7kQ(Z|tH4w`sP5dp7IL+ig2)f^gQ^|5$cl&+E@3N`LwOc-||^$ue#s zW)I6BqzG$=Xq0ng6kFRO>3dhU3i2wb$H$4MGm?3ZDVKZ$z8C4stL3f?CGze0eez^W zkxo=&xRzH?=pmj={hs`BUee5mM78u%_;WVpWcH-o$bKy z*ZlD%Zj$$`+#eEUvVY*bt&NZsbZ?+&1kyi`SW}Kcv>(JqL9!j_rVh>KX->XeJWD66 z??;hi#aY^t>TvX98`AhVC3hC98!L2XzB7kW=1FNJWu79!p4(N+_%065;)7sbv zns0U)8+B9B-oG{7F<`7uwSbn%B$76dE-*XZI3^8oZg-7vu^42_hv1|yT2(rEMICdc zE=A7a9Qi6Y1kcl*E!xiGIpbJ4v#5iTz{z$k=-wagW1C-aUcb`JtM#p<%e%2V(8`IR zKonj#v%H&c-+z$07U?SOlKZ}xY-}`3szacsBZmx} zepiVA@(|ic%Gz~T?k8D1wSt&~@ma8}pvi0y6AQ*ja3LCqf&-Jb(02P=?Sgg;7k}F3y*XxxHn9 z(C#Se8{(QC17Q-=-9EG9#I~7|j~YtkKD)Rd+JDExr~J9#fjqg!hqpVc@C+H+Q1rvG z_3|6Zl#<|l-q}Lbe*X5vw**Y=?CL83?On=_SN1{ske~FEn(@gasaNvev<3 zrIeC7ff)EC+`SKnL8Zs;{60JqiYGaF+}0D{tsf9)5@MaKvf;XwInF;GIj#c|DbKb} z2u{@PCrGWG$uLPgseyCcZ^`KFLtv5Vw10N>o~N-x&e*;dPMvXWPqY0BUzv|ILeghn zcx?t-=CKp+00J;4Xe_>L`os<$IqI~d9UK2l1aJvVFvL3Tdexa2y6Fum)SHCfmDe+4 z`YaP0h`~k!6y|Q~|(8^twgi8#9WfG})`ax~u1+3lZ33{e%13dkM5N&3RQ zUSQi|?Gv-3lTDE3d}T$vKSpjy<-NFLNX3;OO0l{3BL$7iy<+oCz6QOjw$iR?6cTch z=Si)rn3Bcwqv7~x-KJ<{=C!*$6@QP%u8y9oZ~U?ng@xXS^2}yBdP6prfppzU!69?A zWxL1m_HKo%w<`1PYOUJ}QPvWlR{K|d4zI)Nv-2E#)mGh+Bu=-ft^yCNyRn}>O82E1 z1!*0!XBA|Et*-bLr%CKfJ*J>hm^q~V(ZN=KkI(%SWtU-cL84O7AjvvZ*MIQvx_qSI zh`c3F?OHe+`V&|@HgdanPum^j!tWdl=g;mnm?!nbhyJSrG|FnDLkc09D|^f;_EDPo z2MAf}^wq+G^S??tK3fP8OQ)i7XWqUV=_x5r83G~Z7i1?#;rj0Q6*wK#&G@Et!&M!( zKOaG~@@z=2pjqvna2y=e{(q7E;MYwg5i5Ys?K@{5~e=d`-J# zS7yosYxtV`r&nI$V2X@QZay=Lf#GLLa~YoBpGo=})|_zcL@{Fs2HVP3PAVrV%@cH% z!6oI)H!&%8d~a>DCcYG@u3NHhbP%idsVCMU-2TU!$?D3Nu7~hOUVpc5UD-p<;O1`eP+xsGP_^s1L)ocJv`JKkAa-Su8>*+!dLaK~}v6T@i zss(nZQJI>3_$k@aA(MV*;3J8GB&!GuWgF=MHS zf4py$bYUT1Zhzb)!_*B)E}{YwuMQi8s=dEcv%l+Jt9_pNJzZ@#dpkYp<8GZ>_kK;w z?|2rUw%(}Ky!i$$1g8^&oe2LFgi<}dn+uY|kdwg3n*t~By}T1ki%hE4&FwGE1q0tE zayPE%RJs>sg0Xh2-^VqC3$9N}71|w9KH*kax3872y??JoF*8D;(TCm>kd2Vt$%c$d zh1<>vlR7)8KJ0_I0(RaOs9T#e9G8tfrEJe{PLq8D>)Sbt_P zbHbz5W*x00rq|Nbni^;Af<)ax&(7DXf|HDSl3S`rlNGBo0y09HQLi+LFDr5V4y`V} zuZ0zt4u9|D2GE}69g}lH?)udukBcehR*Vu|4ys$?h*b*IBb!U@eW24j;c;81+urXg z#NDPju=jo%)n+AMu}B_SLia`YyP`VVVfcp8R$cNb?`6Tqzrq#1wJ^9j?t#jSM*TYlUI1~h~7tHdjFadWjnGybJ#cAB1(;}JVpD1D9}#B#C34LU17qV zz)DXKU4^Xr_kOtU>@)o?Y>5En5-H7o|IE(xsvt8@G0FYIpU>2)2Ci$_PdX{>~X5O^HEhVMPU0!Zr5$GeP5rvWk|y|A7)aq4q8PS$#iA&`7F{a zQ4Q7kabW5?6R|)@LwcY$`l>NmXD7co`@_bCu9Onhr>CK-JE=PsiJq985ER+rBY!ZX zw#$X5Z$0f(0T;981MW`{5^qQuvu`Md$!{JXa@?U^IZs=#3Y@X;?dwrxlKL3Rf(}** z0v6;Y=il1{2$I8&t-e4-VHn!T3!*CYu1AxZ8Ahcpk?JAz6*OTMGT zOcEiq*Xv$`!Rx!>Ag29wbG5!ABCbw^HDQ4fm4UsY4vXx0~0%1aWU>9&r(391+;-i@B2AlOcn=55a}8DJLB{ z@o{9S=dTlIkclb%9T<)#A%EA5=k`;D&-+pLLF7;lQv2-BUo$os=SVojL$PB-TXH*HC5n1_vukog z%65B>KZZw&^RGF`^RBDocp{%f$89_PJO}FDO_~oW54Av1x0A$Lw|`FoML-2Eb@L&- z`4O%N>HY}mkJSgg;1}Q6pL6}uWam}reP1sTY;VO~7;qxvdyfg3BwyX>*KAF5VCr2pvH0AMMqy|`&YjrMEQE3mpx}GS~VmQ6SaJY&=X>`3s3u<@lr8G{n?bU`$ z{UPe_iH(KAh&hUL#Vt|T)9YZY#A{^NHdNfESJusIFZnc2mw(~!hv$v(fk~ILx=e4y z1>umd-f(K&CZ2WYIcYc@9{z))!m`Bqr{@Y=DBQ+cJ&|rrP+$kVLtl`(71yEa`f?}4 z&G(w;^>^ZVF9VF(yy{eppiGSm(6|c|D3DN*%_=E|$l6yPo)mDh;N*rV5UYVzFHY(DAJBZ@`nO_Ii z^@6^AYo!0Y1xx{A{Jo&*{pao#x2B)?BJD|4)TX+=yv1rJgN#UJo?q2T{$__&9+y_9 z(Bq7(OJ>#ej|-?Gp>lEashvLVt9CkWisz`Go}FTPuYWr6SADeWR3=V0=R0y_hqhcB zY`}Dcmu%)dqv>s!`RcZ%H_h3FKzrE|cb@`eWw=e(4dpOpD2GLP4>*pz3Fdp=q;gyw-=(hw|~67{qsXu z_ifqAkbl*;p?(C#PpBuvRFvdd@Y)~Va?D>jzup6(lltU)`-TFRJMg#U7OubP#%Qa| z4vg?ZCiB~zEO*GBEWbzI=(XRUwMsa^2IIvrW$wG-K_g~Ni8dUn2eVIaN5+B)&KOP_ zeqN$&X8^g!yF-^fXfajmJz;aSSvB)j-w zg_o83uqNL5nz`gfiMz62-*ZkMGbk`|53t(hgHb;pocms_x=pvs-$|_GpJx`ujM!<@ zQGM)ynAj%Hsy062vDts6jb0K|R#Ws_tIsbj)UZwv2311)4$}p22E}fIjBXrT?w@0^ zYJV?%y9yoO9jQBg*G>g*e3OP^pN|(ssBdjehYTITM++Bz{z-dG-JKXIeHf}D-K+tc1ICRUZkYu3@>54K6b~lq+GL3tEBZzUc%-^ ziYEu#!6uR6DLO`}NII=duGr2colSG=)_>jE^{8wjkQYRKaWWK!$J;ua#D=bPr9Kp+ z(7AG$a1YUSx7PG!U&%_}aP(Lt6!AJ`D^hwCMPxRG+dppkuyfP=na8ivi`dJJ{~COh zYy?XwtAi$O+Uj1p(%mrVJNIlqj?$607%?w6-Bvr6>4~sx;^%YN+MyzH;`J7orhhTE zr!Gw_kTZp&d^DxPMbDGshn2RyA?cp2fokJMwEVHku6+NrlS?PM{WfmvQ0~5G<|4vL zu#>oC?8pA5=UcbpguBp6_2qk+KfHy+Ey3Y;tEBmsFr|i&mw3X??GN~y=BK4x_uu`% zW-mXI7Eh!fy@{_FCOH@j!Iw{u5`TAX+CJFS_gcY;VCfCuFnMyk5y||4%u48Pw}fnWYwzGrJ|E}aUJcDFW9{gBpEQIShS(NlgPE;MXvC}} z=+Sn+dlB*E70GlORDRfAx}fP?_dxVnOW2apgAQ7~PlyOSj^rR2>6bfL=YLdRZM63- zI6km`RN%K4;1Mm9$P$9_0;SY6)(eQ$&`pN)Gnhk_^uL$PwzMF9GZCroDcCTsqRNn&oM7d*zk-G6W%P^4;N*tdjL zv#37uWb<#Ro=ZI|uR%ac?k zo;$yY5|aE?LsQ9F42o8iY9}kRB&PlF<>!?NrnNd4&oA!|Hq)V`SNc`JLBn{3ouGa? zff>lJ4eZX>+n-Hs|9{}stY2Mf&<4)AhU6DEfAs!F8e2qS`R?(EMX}{I%swt^E_wH= zABXpD0>hwuEo}A1mHu`+L7MmFYt<8XPW z1uoc#yozDtUWiuTvEf-G5xSi;-YV1by+3Z)Ot{@GeYZN9zJGMNJ;UQF8}=*Zsz_LB z=FSeQsr8%YN>O~9%EPYQiV?)Uwm4;3=NNH_qU*1EMv`#(N4zJ_A2kOdj}Dsf8CW7)>trHr$FkeADXdn>Gc0@NO zocxrStd!ORFiqRW3(;2vJDzKo>fUd}qW2axa)F<6-`$^1H+$?f4Q z3X8>8++N?~I~v|!#z59;@Qn|&vE!U`x%a+%#%BZ5;|WPNARbi^!ABB?r_9*YJ=_Os z>+O*IpAtL4V*C8eGp4QYh6XCq z_V-sT?0-O+U_PY>C)00!o5JO*QoXC6S3|ZV3%bThYDxm2cZz z0w;*YM|R>LZ}sW;o<67IOZo;Tp!mHHVti6u36yZiOW-oXK7P}>w>&egq)Kmhu*V@b z{(pLMiZh;X#uy4yqU9aiX~(x4aICQYUiXjrwqfBfRmvRu-FxcA(wkoUm6#=E%bf~Z z!*++&lp628yw$h;htHae@ZI)L5LY~%Y^JE8dqaIO$5cklg*WG(J;~O4zBJ5TK=`U3 zt>et!?vA*HczI0T08 zJAwZ-w>RwpXf3|Gn>0~;;bC08iMD#3Lv{sZ;tqgnU<>cS6n`r?|3Lft9?d(;$A4tl z+ApNF*ankE(v$rfcjJBTTXIzmchclIPJIvG-lVULYd<}^JU*0=zPm+m&ViwP7Vk8n z%BJB*(-Q4;Kb0|e8Z5YHyQ0yU&`yo-Q&6_&VW)knA}D2WB>TX_HYr`l;thT{=cAsV z4t0zhc<;q+OK??qIGW|zjs?oppMUbZB1L2?t`UU(-c2~ZA7{{dF&3!o`CW8{uiB+0 z?evV<=A~yz^8OCjBSG#mqnyt5XEmDfjb~y6G+7H$0pU?{^}U3d|4KfSxNdoOm2={z zX6EeD4EZ(wI_$dagvjTz?@+9yDkOJK(AKRZ_WRQiX~(KC>U@=Z1QqDtK!0w7y%(pc z7snTJ`v8p<>qh7M>+~|%QTaq-Np3FXRe~?{zS_LD?gwtkwtulw_Suq~oZq}JV1G^D zkL^x9!}oIt3GAF~@wf;~AJoLE7lIwoXkv6q6<~HopjFvv-+dmU2^W?5KCIplI_!4h zf{c<1;M1N~=R^$re!1aX{C{(O;*gJGUIEfaz*QdC{)8}&@jMB_>7BkxzHVLNaprs( zhi|CP9GO7_6gk^saC-`W?qLi##99A#+Udh>nUz%EAty6%TwmLX;99FI;#m)!!Cspj z5Aro#RYC<>?8--&E=)9fYtxzZ>^S-$S>t39YUm9L}K znHY6I0hlcM_?i-1%6Mu4+Bg>Gi*ezZDIYs3DDMa5k#G95T;%;dTUCit!|mxT6QQZ&QApC~gM-14*V0xbAF>n$bl906l|J{KY%qO^9j@||cnuQ@anj2HQlRGgC zRsS+ET3oas`FvuLyMKsMT&u|5VK_TA*)ROc``m1}ZOlfBFh$a|GvB?N-CoylgspVX zP0$0pZe(D&x=$UOP>+Bdr)|!Pg8jj1SP`r_~!l1 zJVM7xEtQq7^RjK`dDpHP%RN=A9j_ubYu&7OGY*6pAEQ&_y1jaG+?ZS9M$CCz=-ccg)CLyTyB!^)W42?BVa<+*;4`d^>}G0P zzAgdeqH3CsMRyFOXSU5D1IM-`$(=%4T8-UEHn_LA)3{&D>m&R7Yw~%ba%`*uKW|>d0U;cfl!pD_Zt~oIsWJMd^Br zEBNnMz_CMR1vzM}n_ckKuAR;md2oA^1VrbA5YKS83IN(}XR^{6-ItZf0b0HAE=_Sa z)ZAmQ(tr6%xbPqV40VfAA6$ZGXn{rEn&+(a;zrQAzZUGIm$uMU%_ILlRkm2DQlAs! zcS_}1vXVZh)~bp)9TJy5UOVNp-|~aK1A2VrL~h0by?D{&)5EY0<9wYQkTs|iLP!_K zb-+D+xvcAC>$u$GdEN2HCtItG3}8gK&di+KZ+~_NW=tlW!tT@>cstFoFZvF@Dyk5# z-vsykT&0&$hg?yioh$b&^~k?7l8tfp!w>J(Y4ennnn(5$s&iV`PsneJ&Luy57s;iz z;zL;kRQW^fVw~dH=7Y3f5HqE`#nQ?=2rM(%M0!C+AP$OM(kDC@C3MU;?_0s2=#e2$ z$A8p6TQ_+I_JDTdsSYmQAf+p`U>pUUFG(z~)YMaG3}5Myl0-sW-k)tltxj5vG{eMc zse0Da4b+;Y;tdXJZ^#9<(VhPO@RV;bC2S7t`oV@fiGOTXp4`Ei93BTZ>GO?jrNi*P z0tRkN)$Zb~-X!E{DuTRAcI2`nvTF|TXuz{j12_}+T*nQ+T}A6&rlAQLb_3l9M| zM`nYF07UtOaCUq@U-UP)-a?TPW0jpcK7WX) zn;sn4o042`%Au`|Tf4Rm?f`%&2GYDco5KnK?&Fv005fh*`L)?tblhK9vt{|$e0`G7 z%hqO}Qt_Cd_tn-KL7jiPklyFH->+8RD}R0ah@8m7mDG3gCHtCYvc3$%4sqrkE%-ui zuX$^Ad$6wDwc(W8-6t^bak@W8?0+}B1l7v9*jr+RPRseETKgG24+t2-wh8R26v8b= z>bM7RKAd0l>(qq3XG`Jz4EauBD({;7ekN8mX!Yq6)3iO_oSNfDf z=5V>DW}6@H*6r?AJd|ogG8jqTMO<0&x8<$go7pZ<>M+>#&#<^>`!l0YM}J;bwwv-i zfk^Lpun7|xLjVLCo8-*+6=xs?Br((;alrK>x#b(vWzt9eY1L$3US{VkgZT4u)e9$E zt2EQ?alW=I#qMzY4T#|;TAY?{WXe|h*<=wRr=c~i1Sxz#qHJ=BM+)g)2Q+@ zz?RL|^v$u8HGh5P%I2d)>3@?6p&r-=G7iHhIjMQK$%Ue!EgU!N9Em(@hg9TG0r(e` z9l))H)Xf}ejr(ARghj<}R0lF(=bN^Ry==@KWV(EX$LbdAyY^;6yZ@>+U$qNO9~|DV z5JI^vBKpVuSN>c(wSxSw>z)0HtjxsM`}7Is@=V-T7gf1wi}LR2@_#TdhFjmrV+(Io z_HqO33fnPqH>C>}aW@hjZ#sjQxY$V5T*@*QI;nhxyhT`$XA*Dvc+`Q96@i~tKj9`d zN-aSwn7-u2+A&l0+#QG%3ds6K;H~2=yqMul5(UpmTk*ysGs(@0JJPubxZoM`JK5KQ zaFt=l1k&jX$V^ffWPdT=iL@Rcoq?eutS zD>hau+;CTM!z}T zKeu*b`pd=3M{q)~WeAb2*H}+4(9!lEaqy!@-!A_L8tmo&=?F@zQ&8a=a(-nno-I{d z9w<9o*RgJiPLW6mUhl-^rYdAq@ltzYtDFD?pshuksQKY2IxKIY$iB@PPD3GyYPm=9 zbH5*g5pc374uAhh8*M=P{QB_2;jJ(CQ*Sy5JniQvMWhq0+y2vf#O z`$02mxl}JzG4sdi)n$d%Sp^h$*n1u1J_q?3^ir*5FvD4gg`(Z{X_`o{PK(tDht%^H zCtsnW5bL@#%$Jd%+Mi1$g5*Bkt~62h^TRB{U#y?Dz<-T^t-L5!8l;*aoo|SB;JLG` zx?t9P2iRuYghMpeMH=2cBv$)%d)bBk(S`8_%ufvt_nWKQR$uS#Dv8*F*F7LBv)jUu zHQj8u3+JLR1l5{O*wDKDnLlV5Hl0GN?b98`o@@%YgR;NQrz?=CWwrH~i(9yR{s4gb zo3>8iWl|?$`3~pTpgpaN zKZH0qNypYc@;U(!xa{2m{kCaUVt=grASr3q@oxy^2HHl`pNG2(B_T|)u>)8^7|GqG zI;*F?xw(3<^N|QQCD+&6-T3m3SKjmiVX&uoi+^O|QcHcjOJvBu!jTVEk?`9|&pFXG zfc}M(A3x(XH-HVfkTdVCz5dM6bVO)pt|{Kzfh|iyouNblzdX5lTvGjAR<|G_7`=S> z-&gOjqaVKswexqcoqc;?*^~esFXCOjxqYvJ!Q120E6U9Uv8mB ze1Ev?3b)`lqox|7)7k3{J#PyrN;xGC{2!c#YYGNXpE_?>xDyW3T6z3E-N;&(^`7JF6?-``m0&dJ?JgngsPXytCD|b)K7K?w1UvnB5m!* zmt@47YUV-EN@U*^y|*qLjWj}&?(WKFdw+PyBN27rp!DO3y3*gcPteO5XTF2!y z`UjdJuU8?N(wDHU1)7`A7dVWNJtba`?S*MLeiv%$&U@&T=erw#Ij8~t8G6c^_jLf+fH^*RWG+Wtq2B`K`Y~M=6}Ee z54)gsSDQCE*<2+S!y&t-7{XFbd%oR*=CtPSSo`-jTnkE{^9ndm@ks ztFEWjRtk$PtM!kkBoL=*htpbkD1T_P$%N8a@*A$k?QQJVmz)cO`>;EZPv1)b2#H>D zPaXKnN*4HI=0s!0gSbLlpL%xFbJ%CrN=_*ci*Je2C|np$m26MY9Lma%Rjxx(&k(3; z_h}tOp5i!9XiSCE_e2v$8YxFOkE${h=2x5Z<#^)NmHbAir%@P!o`!uRihsfZxa*YZ z-|enq6y<#hR*H&LBgH#hwh3UXP$=i!X}<@7_Jh^?`|at-CnW3P1X!-mr=e94WA*B4 z!*)0I8fi0o0501RSG=&GRoGXT3)EH`VNg;w=!2fbkJ0H z+1veG@cw<|*R7xk$Tm=FhJQb6!nEhE$MfXrn2AhuNgL^$y3so#jvE4e5OcpKw)YvS z8{jlMxDBl(dUh(;354Yi{j`Q|1f+68|Kjz4Z3j2S;2dO$-9kc-Uz}LXmFP6hJzvdg zM8l2KW(bg(_uzB^xAs93*a6?Ew|}=OxOgCUXTMl` zr7)}u{Ey?qU((~w`z^gzZk`0NC{7GuR^L^k&iwEGN) zLi+TPywgZn0eK8vNR0=1AyA)0>UP(fK2RpR-bC`x=3J}RnOG0%3e0>5974pFy72V< zJ7?yBysw_yurm6$RA!}hm(uR$%>3y>GZ&-vXV%;qqTNe$(SO_rG%-ErW6yvNftv)1pS>yPb~wcZcxuZYsEl>Ji+Vn}Nq%**PlvO>K6I%S)tPh;DKod$%geQ>sg z#)uy;mfXE{e}BtNEpaq7+p_lDH2d*%l_m`LFv&g|XDQx>dQR!H?5iEoL?-~}O`I%Z zi{Pt>j!)*nVVja3id3YA(Uy)X;$cWSPH4tR*kU@|fH;=c7Uxum1ihK)D|0uSxVAAB zJXS`vo4w!$Et4-!11kSg5*VS~A^g_Jr(hIv2;T`&vVW4iYG1*Y0NhvyjGr6RS?|;_ z&GMP!KNQ5I2ys_3!hKs?#KSVgKD-m$cjYQ9rtMv7+2l!2)F5tiM(XnQoF^?4<{N99 zPtQ_1db=Ti8p&$iiwB`7^n7LWDuipLqcHZ(_^@@x21-k(q4GI-=~-#wFk|IZsyhO- zbAB8d&VMz9U?8RShRt_x&)bepE3W(D&}ivJ$2vw4AQ?F|U9#$!@XT!=Bzsbe>74k+8|IpoNy|>SL=uX4S+yE=)Mo% zPD!ibLwgY?jM?#+I!Mr0l*jf)TaccA1Pb7c{eRt+^8OR-(_Xku-l|iq5bOo6WXh#W z#A~kat)k5W*Q#0RXWLZ3(0sxrDoG2QcB@B|iz15QPI3eFDgx zaR$e846AujqXQkWGcxKle*&>x&94}GsACE3)}vf7RV}t_;ZSrTksP@Axt?M#07}0? zx_>&(W-G*=YvhTksZ@QqQ~7v*7)#(D{O-wQ0UWZ_io2Z-!3)BbSE2_%A3@{xd`~M& zPSTj(RYv6>0wj({;(p_jh2Fsf%06z`H(2*h`XB*|>-IQQCC|PG_g43NYKF_(PpW9?BPV%kJ^KFs zF2(QH`CeZG_sjVkF{~>Zc}8bYIqa`uWbk|#a8R21euxZ!v*W1x6-Br zakvxR8^~{6&LppHh~o^k@-`*G@B{W4lA%2*Ax}GBFRk`EwMwU{;xN>5eSwYF2!A?f zJ^8V(0aS7lH`Z@$8k3Kw$yxiOTr#^V-QS28qM6B^30Im$$w&4`evkxnupphucR>Ul zgs@jkOP-$l+D}4kG|(E&kK%3*t^|I~56RkhKhj!%e*y%wVve5f$EMw1{Z2T|_x5Ut z!I3kHGMgTUSR(r|QtFw43LerF_ z`XsP)%TIGqsEk(-PJHV3*0VU^K&jQD4pH20%^_{)Tj(FGRzo=N{QY)4MSuD&_4cSQ zf!xp}HqBZJtj+1Ox4Z}6AefIO<9aLAWcmO!v{R$s-aI7w$&C%D>3FG4R zV(ejWtkl`v=&WEOL;7Np>Kmw0wNUH#s=i%=azKMaIK`#cWkLp1_F5DP$NUvM;G=7|Ik=mD$N_4^SKtn<&N zSf>|4J1E{6iP1Z0$}b$ia%GvkrSsukDH^iIDe+mA)ji8#anWFE4QiR)2W;wLKSWwK zpj0bIXR=Uc86GJVEPuc2k!{~){CnQ5s*rVAs8D}5Wy1Qg+LaJ%)B)@m0n$I~zE|Gu z&GDe#{qyJbxTnC_O&Jss-;Wi=kz#jz@;#VE-g`$2yytL!_(we(NA_*`=Bd>8T1Y@J z`W@x&)Uq#YMWPg!o=`l@`)qhxJMt%+?&_;xCZf|EZ zcstf~@vGIesgo{*myo14)sW45uFuYtKl1t))EAo9%b>0y2FBPIb;_P~2-9qhXJ=2` zAR0T}n}iL&?Ps0_pxkaj)9daPvwPghu9VZ)<6{wf{>wSo{3vOF1=n12Hz35BeUAGsS}J?bTh{?m$7 zEz!i+ZL6J>)`iBx?#4?m{?1=70`2QJ)`f;fyY1Y;zVF&lpaa&07xoD(W&u&mLfrLg z5uIY-&Kr&_ZkJj0KEx?s*FbB4&SC{3sq#^$#yL6X-W!+< zT05SQnSYCyOpbN<0LHBZDAbzX+lL+^{| zospr$Rl6eXXiDe{C8hdqS&DN2V7KYvHAsB%DeYZWD*IkAD*atuABq=}F>$_3m(#2S zcA4tQ+s?myvb{&ITpizQadMlQeteU0L%ASWZhv-dFl=FC9g0dY=>&GnjJBmuZJ@UX z(5h8eJ`}xH9FVH);?m|WJ4EVMG`!DPT5w1?ZlM$97xUp7wcn34-#19V-UpL+Q0q3&n!G3&6^X0q#2yJU~kJzD(QFZcG9p zwtpR8U9vPkwyV#@B5Lb1%1Cl0!@}NlRiYNTXeF~FuOV7P?`_-m}%DqyH$9{mY z?Cmvl58!F216;&lC33~^ERrLFzl_7fV%L-1_$DbqfL(lJoRMlhxX6-Q@ZR39qz3O5 zB$U6?6aY_EcXwd1wUGQ2b;k%`qLgn{c?!pxBL2>+o>{dC&lj#Xso+@_z~C@ zpkZ8GNFP{JcJOK(#!txSH}MfJK0Z=E%Fe2!A-Ct8V$*EfBY#?lTn>7bcGVqsJ}65? zlDm)5buVD*6(YLravnTxpx?*#!LWQ!eg{5_ zPVfEh`7Ev^yN}Zw>_B>=lU;c`dOi?oRLOlno7S9-bwKjgc!~0FLnLCx@0EUSzRt=+ ze^@jNTr`Z}FocJGotONIWIlYItJ}{FOro%Q_7D}1W9?fW-BwBNPB-|KF6`(p_%gg35l!Q3+qGbyIrx@G-t0q{w@;hX#YlC$=3$t`Ead;6xxRhmF^e9cR5 z<`Y@a+SRyRQPd{^5(Xe&l~eqEzZsll_wEa)xVI19w5Z&aP1-Y6f7^UFDDr~KfotXGia?$cDV$;-WQ77yX^ zMv$^9y0`N!Q2K8kX|)qL=lKhxgi|1W%9r7L0~=|C+WT3N<;Sy^qHXX{F5CpB=Vsbn z&c5X77GFWv<9`y9yRTVrOr$fH-2oh1#xrKlWdg#Ox5qvPOKv~RO6N_HHi=?Cn73Og z^Xi)f2Oujlo9jITblA)()Tvi}qs4cjgTA98`A^;YmN(fPX2_uyEGVj$P`GiztnUJpO z2*RpUc7J5F{xCHCX#|bg;DjrZ?)qYV+~qa#dSq}OmjZpGGdezi{UV6^izgwNzmKGX zCo1qdhB=g4jlPD~b;U%Xwi-$pD!8)p^CNhw#YvZn(MyfeRoP z_dT~?$gN7}6%T~WsZA5`Bp^`%lvXo$l(lh|34bzG3+Zw=6*#%x#p4i!TiD>=72qq# z+&^WpT+w>~ZrigOPOqB32at|?k0HZs;B1pBw_`3JN;hA<$?6q=pHZY&)$Rax+bB(` z=*yBre;`EAm3_<0%7YF60^H{kC!UreegP~_=L7@6wXW+wDI=fW5J?LKrclMJc$^;k zynnTEYC8ADffnEToZc#l0`&eh9&aW~s8Oa4hP3vIkpcXKoD)mh90s|Onn#t(if%nh zdB5_{mv9UHom)bP`BkUS7U)*@wtaN0=C`W>GczzaGz@4!qOoDcHl ze5?7-XPZ6;KMXG+0xM=rQC>n2ipUBXBYzx?(0o4(2AK>)5xfA#O-uEVL5$Ua1~66OwWpa!Xj?*#T#b)$**tz;7UtduJc%7-!_N4NGCw||w4 zJbM3lhOVhExZ;YnH0{Y1H&5B@Z-QT86Qc5%05{DgOrP%85P~l+0gLnuL(hBk!yBkR zLcWHtQzZ$fQRrdHndzGl5QPap#GXGw0^A?o`v_b!?<_g*2C!l!BcNIP$#pLDDN;KX)|MSj|yU&FjQ}f+c z4&7Py`I>TGY7x_Z; zIeM2Tea?$0u`U07vVxDPBl#B$lG78r3-v7^Q}lJd{*sP*9*#=f9O`@5ZL6nUcmsfW zE~z^!ATp*qaR3Hnd(h)8q9VK3u_n^(VJipk`?H4igk(ulRL?z~6MylWBH4Qs75rQ> zefqi4NbBSLcc#OA^8HfGlz-gMbGI6Rtky8SaCf*dv@`7~sQLy?0IlCZY+D2)cf-pD()&lK9Jj z9)PHv5{PNVVx)mPfqzbhoVYY59!jo>PYmlT1>e7X-yyN)LmfXmXy)6Qxq12Zt30$` zV3J-hiCJu$U&yZ0y!_UWkYDLmdKk1@6GU4u za_jpL?es;tWREN5u~+yHZR9=jfV=awC038pCK&hVGQ^9SZ_;W?4i5n^eoNezm*)(u zto79&^6-|JFU}@?8*rPwh-?Pe-_djLu2+J+vkp&i)!ia%c!D#caR%Pq6ft!_B;PA{ zPTo)Ry}CFffPX)e=jxxF7N;BQ3v9Z7OY;{QZ%_BcUTFYldXQSI-wyD^H`&c>cY9kW zh*$1h9-Je3NvmY&rwEOeZq6(lcsj8XOPj1+c zZtVR&C)2|>8BCZv(Ph31Z!q5Mx@kDBSRIhuT^$pB`G1~~*6x>*+8#Bf0^RLgt=8;n z4V~?ad-y>3yK)i1cPQikSvvbXfQ_+dQ?q8(s4-0c#kDCFol4d8sEv5@tkp$Ajf5`xm-jnJ zFa7r?c2EZ$zg@^e(Rw57GFs^*=# z8cQ{;D)mKwRl?6hkLihfOr)vOIjHzV5!)k)=Z|(#E-uwc6<%<2-&3}eMJ<0nDjf>f zAU8k2*4*hAbrXxv#Y6g|eRD z_gMOTUoI#B%fWBV7E(LxdN8_FyPIsxK_yswo_S`4T#mM8FBPHSQ%FBm-0L;V=Fwrc z+J8(RN?=1%%rhr~vK-)9AcQ+VE-CM_3eWGLdMX18*Yr?MXkfpq*|A0GrzFmI;%&#u zTjpvTrso9L8(TN@<{UbgP^>0(KHqPKMnJ@J8xBqVYJxlMf8^?o2!DDN#U#&Ikkh14 zlM^QIlf4-xyIwiFM@Q7=S{t66%Lxh3bALA1=R@z@HH56IkP3Ut*FWkCGEtUUEA-y> zC!aIu+??9qoZd~m5=-N>c5Z2Ol-$c{6?XpH&3ey%g^=QyT0lvIt?{O<^ZI1W@;#V0 zoaOmbg33v}ym<{ou&rrSQF7g+_WIsPwbJ_4(!ZWxk88A9S&OD@-{mQ#CRb~^Ie%Q8 zw^M1@?3e!IY3``$hYsXsvY}d};fVstkz$0{hQ#qTfmnybn%UuTt&VqfBTNUxQeRZq z$dVatGSD-bQXJWJeUoA}Fd86`KHRGZYiFoSphQ=v(`$K4@^USANUeZb7^N25XtjH@ z$J^v>Ppt4Enc}4}cT;K!{|gboQ-Ar%r_=W}HMz$Yb;sU|o~O5o znTiljd^*y_sQFn&U(SM~+!ZN(uj(^=Iqv-dJg7bD$QPa$bG53ul>F z^+pquC1{llX)j~%^IT=OcYpWA9SV5*cb{neJ30hQbpqzDBfoi|*z-)k-$=}?vWI>+ zCy<);)^|2&C4|Qfuho(ITbB=Sy6k@u$rp2ZK9kmtLnWUpoEM9GzrE{WqB!)G<=X@O5St3kM|HzseJ%{-fKWRmd`EHVvTsEsHBc}Pv?;qO6MQyRg^+}^d>^x z{=Pssn<-JzF!cT9n!e*RC!rju>UMMbsz0GSvXmfVZts$Qx?EC5oP7Xv44{8ix0_6? zQ+Mf#65QMEW4%G)z%!Twx4XTi3*JVG?8>Zcu)S^TVmQs{mGEv5&fk1)lU%>$2q_)1 zIm{@7BC=cp&;IEn34L@&Ohry-$OAYE0x>q^Rt^I8O~7dKg9rMt;=tq_GIBCN9Gzbu z%B3{pqkiRG4UhmW6y-GARjGfF#%%Kn?yPaz+*c2yLc+N?^vk>IrOTl^XD+pa?PTIO(RNC%T`*3G%>d=y9_e|k~*q_$G$ zp1Bp6BPHy@LE5Yo-D-c^ph(}I0gn2-Mt;YzEqHT1R9qz|eR_*@7Lnvh-_A?Qd*-b4 z*i%OIxGy0BaSCj{f9Z`-tJnAScwH}yb+GQI8*cYDPSysz^)rLJJ8nj?y)UR-(Sg32 zz9uh_Lfrb^qc5njKcHNaN|`yTx`wRm4Z+(@`gg)iq?mQRiwJ*SoBb1FcdrMtIy{VK z@tM8=q@$}DI8?jC0#2u{TuK!{i$|_Hv3fnCW{EM+Zg}hZB-0=Fa+cZ5B5TxE`k-X% zHqk2&1>g3}ao|Cj!D3zjfokp3`-#$xF7}})Dr(YBy>h4IwK)N5r^(FW35vsyBZ0|m z@wtFiQgNcCjJSX28~|DAW9(!uJv2{fL#3O`>;0O@k2=;4owc&1{6#w=Fkml1LOq#y zSbG;~8BU+G22xG?X3J}xYSh<-IIDohdu(Rnkra_%T6A6J&MxI~v3Y^2f2M2~;yMQ4 zJm^#ATm`JWQh*s~c(Jt2{z}`@>U>fbs}kG{1J3B4k|ttTsU?b7d$ayvzuzg|DArd$>7M)9CmAe(D&``8T{%YDrLn1>E{yi@ zHj~GWonnQxK-Bl%^m>qfRHL&_0#KJX@kSLUqc9j&(nr`txKC7$-OY+Vtj&1EXLOFd zN4GXH_1BFZ*~c`5Ho-hjpR@LzaSTiIRV8|Pl`?<)5wa1eS1dP+E=)FHl z@7~_d)Yjb4!C)k>9PXS7u$H5Cz{@}yecmrGsI16}m%7ePc~UvMa>{Iln^S|bFkV~$ zR*o0aV(6lqB|g9%pZX9b5c~qpskM0Y9dkcy1}8hcc?3h{*x68TWB;1;r|J57KPKV! zyfA+qy;^_x!-xKyH!+w&2fJPIxJqHeO)NnV; zX|KvXM2T)|MEEmLf!Ff!4t6K%X`rQ#I4Z1ThgiOU4E)&LE^pQ#U>xnMspaMQE4TQ^ zQi^r1EoD%z3QI7JtR9H5yvnRz%momY-@1SDVQh+G0vGA(m}2FB_JWpFSL>#AC1=nm zJGhs&9_O2ODmnMay3fuNHqaSsuAgSS>xdVyhy{E0smfY(*A}NIg0`C^o}MO$<3_au zfSn8NCPt^cfq!2$VAzOr6ke~=6oEHGQ15YZ9!`%<6$;Du+=Sc_eXX(=`%=Y&m8Po0V$R1a`T=%dU*{*E#y6PWH|kP^SZJ_u{|zc?Yp%+5NM!& zHLbz}QWWK&oq5diBsg4xK5=%lhckant!80aMH_k8%|g>-@R%7)gZmI~GpM=@;D9?* zd8KnsbO8yh6R$tU3DWV0{IPA1vvQ+~_Wg0~*;+p7_N0GwehL)d^|d<|LYq6ggfc@> zStu(xcE{SpyJ@Igmbb^kEq0%6q8D#}SjW}u;#`Hx=01sNb?#0GRpPIWY{=mhb0@?t?qXG+RY$Qub5KO>P@7 zx5JtRt%JGvKnza@fY-PmS-$$5(a9NaFktMfb6%=NT~OCayo9F4dCQwlu$6cpKsIwN z(&_We>!?-H@a=4}$adxWqb<~Yh&Pu9>M*6*^6&%#L1GweyxE66m85@Z=upAy;#1wU zJqi!Cs^w_n>AR^NF1am3sKu|k#+4@!y8HQYr4*W9oXzc=?Zw@FzB3tfGY9fWon^Or zUgTG0LQ2VzApzaNligc~_iD9cW``5)7SVR$Dk9|fF3nO!ISRI4AW#9Vu}JTQXI=n9 z@b*sYe3iTd4^#_m9&Ud`(Y-@RZ+q396%Bm9RQVhhLDs*lmogPT+fO1a^S7$K31bX+ z)*A^HPq$BtT?N4xp=B0tCSDvi7PI|4uW+v@lUeCZy4j)_6;-9Ie!W3oX1Sls7sCW? z*mX&-XB4NtJvlQEI;SIh^D^Kfr|+^%PSkn=WjAEvXVuc#Y_xxw;Of~v4hp8BFae>^ zWckCnTFvEX>EA)m`9SJ`*_Y?loVwghS4A>_FOG-z1gwfz8&nm5&RPfP5-yiaY+fEu z6hjc4HaMSBs8^#^NYL`28dGANZ&wB_w>e`8kpN z0S>DXfiIuP1@3=1amzUpFGH#s!uovKq38~53R4cM;5k6koW9^5p|EhREjLX;#^i|5 zMU=D-Lc|IqJ-HCn&5VN3eg)ny6|2Nu;94!=ikT2-dgb0kp>3dE`1VCvvKHA?6!)&Z z?iP$VBo0K0eMw>6f)^lJ<+`jVEhLnX+!F@cX~LHm zd{77uhPW8^gaMh}{v^NH?Iu5oGGlFT-q}QXYvLpOp_pMp@ztP2+3xzMy~g^bH44tQ z+m2<|E=r0f|6c2nngkPyIId8}_?GI7)jH%7%&eF>)(jwNk5}jHFn?a=hv#E{F!#tl zme~S8>T7?o-S}5Qz9{SW#aT_VlVSlA7jh>EVzr*lHaq0YTcm`pd9hEScd^>r2nI=; z(^V~~`?K8g>6J(71!Pcx(fJED+NF`7N?>Pa77*XJ7mxLA^4Ye=40MJj1>!r}wP&YK za;CV`qhV;4eQ1FHo#+)EG_0%5PCPh)UOgz5A2WaNag0*tSPRoTSQ$T^d)+ao3*=fr zg*XKJ+39`-A3M2(~Du2A86cb+J(b>9ndjV73Elzo4>+8k}W;We4BfUe|O7pMaUEIL|-dUO}+ z6tsNWa#^l({)zT<^8)34SCr<#*0>nRLOhNW5k&k4)U>Mi0ICuYNwA=s)2h2Zos0AP ztEK(OeowsFdCNpcySnYp@AtL3Mkv6F0p9w&cY=Y^NsRAAw^B&NGP8RhPZ1XnIOczN zf7DxKjxL#g)s{d56i??Uu;9>sf&!Q=z%{eIp5NdVWcjrciMJ#5Y=nj2pQsCJlm*dh zxkYF5LgR`^W3wPOUQY^Jg?dA|r}i&kSO=g23`g2P@6ULGB;_f;X8?$3@(pDP0`fyI zkU``}1cUZM0d4q=xbeg1KDZ>&TAY7ejdhpjvwU%LDg5{t_$1`oA&Q8=IX;}0almif$)y|*G?3+ixD zRv+~~f4-ZXDjO0DWF7_9)qR-2sdc?0kD($YYntm$rf@F=x1@_eokFlEoh)O6IvSOT zI{IZSJ#Koj2ceaFd|5nnU!H%L#o3xqw$;de+6&EBDBGJ*{=4pE2^e zlem9W?5vstb#T(NE2I(++oNP&o*!ohh-#k zqylZtge@#qdqhYdYE{zu$RU%Ey4crzbv<%^2Dzn@*l8fRDo$k(H z{47Dq_nc^5ZS0{wSXv-6=D-5)OUM*XZMt5)>dW>ia$Zvmy2(t7TWlpLYy%Imp$}@k za!`aTf52JoFE_pdb-lB=`s`lHh39ML(7d3slcOrRuD8sAn!SGxB2(a?($2EgDTGKM zi*VEDY{dRsYY)p;)!Bdv0avZ=;xM)sk?&FFX?Or`QJ;0!Y7c(2q7>_<8;~WkMTTFq zNGybUGMNKRL0L<^>p%ywqZZcAb*r=;DB0f&6EIot7Ie+ZFG++k;<4)91YA33d}p_t zckQk#&ccWCoIHOCa%)mwS;RVf5R&5@$detRh-QShmDL4gqxCXE$Ou`VjdATOX4ouC zhK*39|61-*G*Gw{a3HkBU)R-iX;y2+@B58f&Q5+)t5XU%81G!s&E#lYXQ^?$pCV^^ zf=uJ)_yPGraEQ_ef11K61$*Y9+B&9n7tL5^UDXy)-ot;y8$wZe_nLW~59y0#pj<^4 z>v?^z_GQ$#vMuZSdsw#2p+%vd#Zj8Law}zV{geVC9y%&NLCvCpyz&|?Z?l!&s(kO; zFEtGD7iua!cADC`Fe zZs`=Heb;}3TJORv7auhHL3%x5wxU~-gNe)Kt)FXOCAJM6XIJZiOKAm1Lf9j`5Ce;15t*MUTsNe+UC zoyhE3DuV6Tk7oeU?}eXRCzqx!>2NEYR4$gv);)z>ZPH+un#1lzfu7vWHe|s?^Lhh0 z7|4HRkwM4j?y1wHUbG4Dc;PH6xOmmGxAWdSBBNrO#iD~Quk-*^3dOa1|_>BX6^ zSNt~D3J^UT%g<|s)bH4JRvgRV%_kdhkVMaz-_q zCjk84lq+_V^CVGc`SuXsq^AWWxBa|B4bmNsP0cQ8_!axx$2u-W1*NPj$fqF#o;H8w zj(HCk&3-l0*C>Svc7p5RsKQ@2~cAZ0qA zmC0pTJFF_gE)QYE1&U#hHpr8KG zRt|!Iv;0tRPeI%Ak6jjrr?Ey;ujzIg<+h; zd(16SMSybPThs%{DfcjT7uA1Fa-VN&zQ;j}`gz~aHsRwIN6}8synbpd*%Naw zlvkY435;F8R=}uY*t0=fWz$Pd7g{_G*L6<`nN_p5=5t03TW>?Glc0YFwuur&uBqyi zVBT%cIdEC`(3|cMZ}pFW8Hvi%W#13TCPV-=@Tccz$=go){`c$go2snuA6V8H0!3FZ+t^wgZzkqF(@xCU4ia@+aVqpimTQ0Yrg_u;{ATC78N=V7 zmsvpeCzlT&f?b19VRZ6^J=o9LWd^yBdbR{`_H{1Pa{W3#f>T00K(8d-t85FxEHAl7 z;B~odwaAF}j|dCmb=%lVu)9z9_9pjNDY%FpwDRGb!G!OF_zy;SVk_HyDc7GxF--b<0OBjv9i28mPJas9!?60a*sa=^wIp$NnEIr5wlpZ~w@9 z@-MU}&*vZ3lb@);X59RD2p&zXw^eOVu<~HtKqU(P5`}-Tm;mAp))Ks(dm~jyUmmY+ z1~~zq%u(+;5|Xf(YS#mZMJIE7DxSw{i^>mc$;hL^TZi#wem$u5r4S$vPrgbhrbLU` zD^b2vtc%-{--pACKl?JWl#Hw-gt=r41_@qs*m}&HO>Kl>IC~6YDBM9=mq`@7das_ZWjnZi>A6(bVia1h6>7fr$Aru7rhz zA6ZB6RvuYT47|)Z!uo>$(<1PiARvYR@1>7~5U}K1Lw#`vE7KzC)DGMMx03OILWbOj zGw{8NlM&l`LdBf#Gzf^cGh0lc;sllG!Yb^}0vCU6NW=3UWX}aFT2m-|nrmK~2z)Cl zURCqHy9Jv8h)hip^aC9SXB$C>s|Ah(63r1@r31Z;@a#AP#p0Y`hK|sou}bYB4Joeg zlHZGD&~oZSGPOngRvw0YmliS?79tkt9iPa02WCjm;#)myG(Rzf646v2kVLLQ zSsyJX&&ng_A?N7~pIvpd1^+>@2PEV80TM~%{)PnFQejB0$9)oV-UZiE$YjNV|6dfpprg49Z=LVtlK*%kcYr}(1EwE(b?L zM8gW!WIBRLq{H+P-`oQV)Z_1f@Q3`)V9o3M0<}CY3kNY_2Iind|D&gN9NU!D^%!L8guMTlu9kB-q*?NWVD*40a+NXazX*?UL zLk8+2=InI19|3xNnJFXV#R(m`pH?RKgSf5=fiqR|C0tXQb4kZ&aMMh{JS2=x7O3+iu*IfV8{;2y;qT*LAqC-Lh-dEI60rtf ztkX`(`9ScbK;5zzePo!DI5U4ksJl&gnan?oAJ`)@erhd9bFLM!8A^rf)RDo4`8cOo zR}456Ves*J5`jFAUE+5~o1@$MC%fXmWFw?k{70Usc8%C8GN!-wm%~s5_JG`@ov8;QzfMwDuj*1N)LJukbbEIWv@CFzBso)^ zuQV5maGn;R+ztAzMucj&ES1p7+QS8>9CHzyH;xY#DuQIjOC52npUk26_z!xM;@3%dZeH|KrtqbpEOiv5|u z0g6@b5hsFu<$E{}HV z(h~oYdzorM$b)|j1^0?x*>*C|*_@W;Tdra(o?0bPvq_mQwNi4;dzk`ev?or`2GMxT-K@>jq05`P2K zo$@5zq4mvP&RIXOfQ)vZ!6g!%vKpjJH9~IacyDnIs_B0nkmsGOM3z&#bD@*R_1QdA zAMTh6Zw})`7}w`g%^R?oVcfdoScQp#jr%iBNWU=xu@yG?ER)E6O>JT9KmNm2s_LzS zAfg3+T4!$UdojqYj6Q-)+HNjUD^I%Km{@FTGc!ILH?+7g$6i&|658Gt{=$s>Y1T?Q zOV#H^RrY@|Yzt|xnxb2t%~W_<=(vYvVXTq%`e070+K|p*Pp_~$G3P_W?(I!|7gQ^p z1R*ofB%>`P@G!F7tmn3z3_>zO8^;+AHRDoR0P*XFJU>Q;@f_%@)`$363-%Gu|Cdd8 z@nRaw{oic{+Ty!bVqL&m`Qr=dWc;9f)}rb8iDQ2qBbSg()e@aqR6(5V?UJjH^38ni zva?2qE#`Zopm-E@xZwTHb{d%4?JC(BpXcsa-)uc#z=a*z+faAaY`O+?A@YUlf5ZGhBA-fn8KTGQ{o_#5m+=h(j8uwE|+Y9%JG2JJSX(Q`8nQx;F`n_i||4D!H zyKaA%Km9bG$F z(H@$R(b)$38X0F6MmuM?eBW_8RtPfAJdsNUGR{W(PsTcF(@gvNo4HGTvjN)ZTMb)2 zMbQ5Eo^rv!<75h?lwMk)y|k~L?!Cj~@|M-MJvBac z$$E$>Wrhk*Ha28Fk@eb5b9;bVk^|+p&WQN;_g^loniZ&5@f=V1=lm`+rsO$H;-7!} zU@HE%{`uFpLJH@1{t=s{e*M{BKbnC6uDb;g|F;cMa@*`B=nq15w^m6Kt?%bPa%ATN{|V??7yinl z;sUb%^^GyEf6&2|dou1M_)Soc9oK&u%qcPszy3J!pT6tjfGs`}gyWh(-s2fm_?{RW zqYbu&^u>+ON6z`RNeYPhyN3nhQ|QRKT9>-Ks{wMa*=DzQHE(6Y=G~nh4&KoCx)&Z~ zH+v)%;+Gy%s#VxYaAm+oy_I^`SGLyFk5k@Ry11qcPQdV##GI*S9Yi2|Z^wUw7ak~P9Klr@e`!OO7)o%YSYv@-*=SDHWvhJ;<~;I<;i+Sbqb@}0Y+O};jIJNA9g z2o~{AT?YKFX#ekgSGW%&!(M-3PeaBFY#G_>VEq4mZQ(O*ze%;q4MAq5wHi@ZGXrl2 zp*C#1nuj&rriFu8YGE%lulM+oIzj^|U?QPRNf4L(f4=L_NLv=oYH zx@qas7W=0!v=vx4OpA~7Fr1qHY(bks>!f^bI9zLE+?lrI9?g#(y90ly>9MT2nWf1{ zR|*`fK@D9s0St4%w8dl0OC5OHv#Dkk{oAMkBb(mbD2NcB1 zsf9wBgYS0-+jA4%r9*$as@|edk&fSb1ukOt%G3flCZCt%&h z_e&lWk$k~Lg2#elnI1eiu@cwh5-40wJm4aT#v*$hp*j-)QMYljd>1_Ub5vm!bFm(P zkb0S6pD)50d;pI3T`w{Jh@BXHKC+epe`rB*f&1=r$9)djuaSSTgyTQ!1bnq$pKNZG z<2)S49M)1-`P}xk7m0^DOp$cX%SW2E=fWQE&hp>}3 zmn4INgJA_U;8tzYh=wMd$$OE`ZLbS-sd}~h#iHGOzh|A zm;ZC$!bXw*;;+4eO&=2e)w@T*J6<%u7dKs|B6A#Hz&L5dEw`Iu|vMkoR#QCCc-qg+o@*nLGk#?Xhdu#{zM)3c~Xa7eZ zhV&;RXo!iB@&3CWj&1hqpM!G5%&^afpYvmnX5TetoC~8(C$W@qY;kf2IU)Fb6|}XW z8@E4}n@@j*_U^Fx)%GAmbgjPHi%_b8e!HdtXAiPH4Ghv2pG7-Iz40-abcNVU48-j; zzF7*PmdPH7mwaZy`O(xUl9fS9TT18|$|;L|fv@l^F#cKue*%R|;T8yma$B-n-%sv5 zHc!w_N)Rv5%qd;@x5kXG&D0LqP3&c^;xY?;z}|l;;{^(2<~>AF$-V__%-S=kpqbr{ z`vUlP-+4jo<~T24OTV^vE|ay5*axz|B6~Xc>|^_N62{rTwIP3OF4=!^;M7+5NQPZI zUP=JWw(K<$`mH}edY}yV;0A8>pi?|vnyI)4s)ClpCp>WxeHN0iLGxaWYIsZAF+ zYQ%r~%_^efS>~C=F?>vKM`v(dziXeSw|FQ+(~5z01WD!~zZYOGtp!uCmnsLM4w?^4 zRHes|1W7;OVym+n0U{FJWvDsI!a*U%Nr*Q-Y+fJt{@sIos7T)<+X><|B#c}&y@BNd+U|MF zN6Db~IRcG{+Ls_JKzl;k3ZO%F&u6YC=T^m?vCW^+QyW5UPDU#sR2&ebR?UB5NF#7% zOd(AUeK$dX#J1^Er6R{BY{^RZLqhLOIKk16SRd~dfo>vxUQENVdWr7XkE#r00OFNj z`!=oxKxh8URTBTTKZqp!V_$x4=Qn=+$AA6VJCn7RTuUC;oWJ>C-1Xk5o@7cLBtiTp z0~!=u5Dau1m`L-IhuKALA|`+J#%+!UpQPaRT|qIV+VN}N&Lwv*%q(PrPh*^j;CuAwUh5@W^60*Wjp znCp3wgW?It*pjCiYR`6C&$^dWkL4wsIKQd`wmPWONZ!oy8hP#N-dq+?G_On%;p13Q z&04tfsU;#-DU{q^y}N&JW$=t}Yi-;P$?}{*O(-JeqV;i0@G54@Sm$@O2q9F&iLvi_ zV;s3V5;xAsdfU_LcfB3Q1N?E)|9|lhm+u{G?kK(0jg37LNayifM>a^|uxB>)Ip z3~YDj3T!51#Fx{WO00NE0MWZA*-(~&j1byHZC`TlZY6H2t9`P-dV{!Mwvl})+6W-4 zH=2)DgqZ+cY%Sy;ihdH=>hj@D1V`(nI!JO66Mck0l1;BzHvs;l;=Dw8D0*?wie2 z&s2#$6B*Y?BtqZ}ia*2;80YY4>;C2wj&>aVOl%_AuYrHR5$oHJt^4h>AHNc@PwQt4 ze8>B@?|=4n|I%mUZ)M6w?ZY_o8`|hPL+Y)aEL31i2sf(VfNvW~ugo%Jt~dQsG&B(sQ-HKi(^1Ou{8&AdM0O)!q919JaM&!BgUTj#oI@GE< z>yE~Ug&Z;Sl(WVYiYC!> zXy1N}Y5e%-jQG1R`Pvmm`DruaumA3oNo1RjDMYi!U}k^sL;ltd1aJXqsx&TUQzvu-v#&KIl-{&_6d+Ti=zEEt*) z4DS|K1IpW{zUoT`Y%=!1m?GeF_aT3c`bHBe3K#QtDv3;sqISf_8AF0V6wf13gBXR} zp+bLq&`hNEv#YPAn<3l@TzZl~D;UjC12gLwnIxf+Z=;4#)KO@~@gQa(`Hdh^kr`~2S- zt9#D34JfNdqM*H#f;%ZC=6H_vr*KO{I|_e@&BHzc+Q5acuzE5+l61$c)uTQdo$e`oJW0gf+abOm;tY`{G=lqoBJLW1NJa?6FG zMi&7#oy$4Ii$d2aI?G0GK+GBhbv1uU<|M?pwXG0%XDw?#rY{_|>SzmYa+f~M2eE(W zAZCzFhxW1CE{UHaB46-3Cq9vO67vLWMSF01glI@SdlHJ9%y^LYVG$>U=jllCKfEV2$FcY^l(#ubsHCHslrJw4Xaaj*Ml|4-(?&l)<~2C{!ve`6cJ z{s*zy;~M-Ed&JoPiL(I#(LoX=-SAzesC;#I1WA^#V|S_$+}YrE%QN*9RZEl0G^KA; zn?N*r8eIK7hk1EU^K_~TRfaL;X}*Q>l$~yXnilnLtsT|>xK1MGX4N6Tw)IAdA(03PIyui8SwsQd z^|;;uu4xQiu5cN*Wde%n=tCw_9U(}`n%)#e_Qv^&SZ`d+my)xvcsk74YUs3P9W6Ex0Bi3LFGwIFEjU&iA&pXf{ho5|dd0&Izd3o^5)L z*`Vk~0-d>2bHO*5RsZ4~)T1No7Q zz^mnV=TyA;TF-yW!wn=l`4fR-7Vruh<#O1g#UxLz5&y2{KI1<3z#+YleQvKpyPxcH zb(h3HfA;HS%`36C!5?n*~BVhAD{@Mz-ApRM#*}t)NjM?8k-%oB5@zrlk9%FROVgBr6Moh)uV%pz)Imuh2pZ>P@ z#26&;DQ!B=+tIfFhklr|D{0msZizrZ5Moa#WtInyUS~eDDsSsYm|JBC3BV94rdBG7 zUhE7V$bG9+yL3k>w-n1~9KoIJHpS(n(@l%M6kUH2ShV5DiDfQwp9l-Bh5-Jt0#yEA z?LBl;7Xj+eX|O}bx~9mu-t;^)S^01q91}Ff3Gtu(f8pQwqa7svc^eY@Kkn~= z#~XifV&7bHO4dnY>&Y5Q)>*QbCH9TPw&6$q?tRDj7W$IFp8kgaAlQlwJ`TKuuT4E} zYN=}x?80YWSt6TTKs>|Gq)j@X zZiX1xnUpo)op9OY3@F4YBALEzIuisok#m18*ks*YE_^)@8igxBsTnj6xo*ro-GJ$x zd)O6{Q*?0=gqx}hJp>ELe*Zz~u8DPlA!>RGSnPM-{r(us{g3l|%s2hUkAZg~@naH4 z`o`IR{pjS1Tm;!K5*x z%_*>%D`Zf`w~$cFiVd$xyrk38r8yY`ApIm3!(p6z+y-1dh@E7p&27u*q>+o8Y8jba@?=tte_j6f-e`)z9Aim08uC`o=DkM%&GEkR# zmIhqt4R*=V!%pxCehGPuBZ;#BldfF9G4ySOpjZ|6yF@%{s*Its$zw!_AxnAaJUB7X7 z*25mJM{YcbCFquB5@-j#n}an?C~g_?j}aY{4I!T*6Rp5LFVENGK^PLC#v6YJdA*PB z(Vq};80T=*MMqK9@w+N6gz*v$l+9#YJuSZ7zphmefR(Kwn(op~oY|R&s zja1`_73LiYapU_0G>z}Wp8SXBQI3!knrO(9fC49})EuB};E4#X82RmD zif7a~5>JJ{@COSDoAKi#Y{&V4YuY|yfA9@{b52}%c^d^S$VT>uU(*2cp~RLydntu*OAiD(4Oz zS(lDvUBY^Q*LdK%NL(BF8_caQ-X{++etxk&Kk;5N=5UUL7|frzFTnvfM@Dib#AX|w zhe&}NVDGQlkVkJe=qt~nF^&YuLEPH%QHB&vQ(GzYiwDI)bdsqFVPc8=VmozPzklk@ zdS>y`d@?!89&kU5T4jGXje4}sr6K5pP z!GnQ&KoZfq`M`K^)!yyfi0_fRftLQse=0t3MyNHB42ebDf-Tdax~pYt>Ek?{Q}axD zaTkCS0B~;yWF0Xc;U5v);%|QRC;m0I+3&jjlT#k;)h}*}uMU6D4EYN{li9$`QFYp} zC&#mNzSOKzK|$8QK=dcCADv|!5>NyebYstT4>9V&96+u=b)i^H8v&{%+#8xIhbXnX zgxopw!N+lUpcq$fxg-3LjDvt^F;)508^>X95&KJgH>_L97WM+;@!P+o{m6L4IHi%B z2%BP|m6-Ni6&`ho_;BIdCH>~|hCf`wrNyKmL8nc-qXn$3t{2D#aNBHD zOYCMK_{2WU|M)5-_x!Qc z4@%y(lCU;NeC)1uWfn#Hz#5vse2}k@c~!Hxh(gt)#KwQzalbkGK_FOK+S{%-7tt2*M^ ze{#j2lqUI&pYi8n^}~Q!i`(G|Tr0F_-SD`T(nOS>R}EqbsO@k)K?83f2e$3T=RJ7n zbiRXMTEl-HGpffme6iGI8F=r0H%aMGi+HDZr;%&>h!stNvf;UdiOuSW8OA&b9Cpba zz)S-C@aV7fDVZl*`zMb6vyQ5x?fIG4STDvH*of=?vqqmQ1nSPb+W^xot@&W*+g+=O zd_LuQiP4bPJNP$)@7v}6k{B|yc8(^zhm`hZgs%jjdqEg)7qGG zCE}(Vxz|embCm21zLR%xHeQwU5V1B~YzQD63TAvc1&(LKrw8fd(t#Mo+lozuQ^U$b zq8NW%H)`-w)S0p~QR2W21AOdv9(kz4$jChUxsS0ufBiOslO^+XxuN@AH zEhzlZUI{fx_=CwmBgoJUvqq=FobIo2;;|C={~Uus2Y z&k0wcO-cU~?4v6~Ou$g~-A)k#MwMJw=cA^PKu68H(bC~TY0@pB;3;V)zpM;8dmDZI z68ptr+tbIV@2vdJHT9lG42`HAW^-P)mL}h9<6%Q1#sl|39UMrp7!azpM;tJpQ=osd zkX;hzBKdR+kMwt(4w-X5Yvj*3#r$#IoEAi$@gbqgS%^O&R*E%^UL*;>d-5ow^fYfz z4=z+o2MkkUe?Tmp2y0hG-hRaY1K&Q{pAi!?&TV49y>Y$R{#ow^Mgguw{f%Y(VDN|! z|BbW#-q-Nc_xl`@Yqe*|SbspIt@3}Cb#DzmBB=a%GB61Wr$8ozl z{75R#HxE&PiuE=x;ZI({Yxxq&dN}! zWzgK^ANhGItTA^xRsJz|^Re2(;{AT3_;DBuDvb^lQjm;c?zz+QhL-#cRT z|7vey$2#n9z8J-yc{k?dVtEA%S%ThylXO0-bYKoOwd*On%VeW`B4Ct)s}3dC9d5Rp zUbqb61M#}m@;nZ%41l5p?*f~q@}W7kEu1Tx78?k`4r7R+c4b~*E?J1p9KZgRNi8_# z$RP_L0^ddEqnt4Wx=!XJ`sRPfM*sLMs3-WwoBdx~ysteTF!76SdtDfxGQ@@W&fUoL zg)f_Q1ICL&m?BurP-y*gkTU_YFJ$3FO(0F{5Xr^-v#5dK`ZeJCG5!e8`$80C{;9D_l{-o|1wYvf9|qxnMgxDfi^N6NMgrd4 znzm2h4KXsR59hC6N|qoKH{4C`v~saG+PIIoz`F)8#VYW&RmfGKBeh;ao)tcV`hM^d zoLT!4wibUsOEW8xZpd;+sH+iB@`I8Q1sMv|7mfT*#T2aY?&vRoo|BAu5(EB&*FYuV z_)G$^e=hMCzP>Dn2y1`WYF=E27;?QWx;HO_u#V?>$UW?JPWhPbLppqNVOzA1(}hEQ z7pJ3Oq-E?QD|`lj$DuSR5 z%v)e!$sC3KgUIBCzA9aN;DXkaAr+{)xgNVn(*|Id!q`QA)xv)+zkq(w*6Uc??wVe9 z=Zan;c0bE97gR$hSJK3sGqygdCsC<|LlxXE<*`4&h5_rVTao)3ef!@$=WouRkBc1pQ` z^-6>m4qx{qKnQ;b(1+e|*-0OE)@ESy!0&WkNPPvkUBs5}35gpQbz)zV7oRgO;0rJ5 z)8)|JAog)j*5R!S{2V{U(}j~VM3GU=zT3BMJ|Bxm_-u_&r=koQM5+Wo)s-mL00I8$s>qE2tYsN;?hf@bv<9+Vxzd3l z3Xf?42EcUABi`8hvUG#L4m&*hTP-y~Jt~PSh#`M)Kd@h+HZ$P6k9nn^dlXTI_tzRY z<}JvcVVvhbpZ~$Y{@KeCKO-l5NCG?gS-Zb=BUe3Bin3gw?#)F$&h@#&Q*yIR_H$QP zeAqe2TOlJ2*Gp7=dmO60;kE-0{eDee z_=tapeZ-uklj+J9Z>F8s9QMKhRY0o0Yqg^vl5Zn?TSf8*4x*oxnupw-Te)1=WWWN{ z{;|7&Zib9Y)J3^a#8N?EH~J$z*2rH!f$S@R@%)3|2bO1CD}U!Na(h4djz9I7-@2}^ zAA9Bwo(rK0>4sVeVDy0*^jyK|HW8BVYaeV5DU?8eR4xUP0_HvPmU^YDyel)Y&y~Q~ z-VvWd9Rn2FLuQF}w~#JXrI*#q0JR6=*T@}al-{#_BZwJQ+-2c>%bNzAGypX zmcW9pD@|PDl>=%-4Hdu1RK(SR)Fb(RKg;E8Tz_SPm-yzHiH!~a%CV8>_`wzZtigye z!Uz85?nu17D1u!fa8Jvqu@F~Pfwehs-KXw<$B6@7nTuG%Zxs{Z+LKg`Z5r$QEf(cS zbwkq~u!$AusVG$+Ta+Skn(pP#(ec5*sR!*$@tDscl-6T>OP~QK6|RUUbJ7^!qQ|;c ziR^I^ecA=5LYS(cwFP@(k9uxfjkQ{)lUmOfp@yyPSdYpnpk}du zNImn2BS8_)Puy#4lYg#r-JrG>pe+kNAzg}vYr*b@sK3H?sYP_WE5$xrKw;HXFB+(6 zqzpbgmdxun9&$(Ucl0AskpGum``s&l>r}qB;l#mz_~Xz26(8a6h;c}(Z#=~J*h(K{ z2Yx+bK~0_e;6CiFy_>KGEA zxsf;sMh(dIfz?L8k(?2kNBEC_{64|akaimV0Qg>HU;g3Oa2>NQf*&qmkBd6eMA&+~ ztqj)0EV+oME!5s861czlEwwUyA-RPR>E&|I-eeM^ejd%H(7Y_~KrAB#Jl)zF!<4Aw z8`oJQQ&7Ak`481E_?l!G>XZzH}A` zP#qlUOP*U^NcUC7RY$PQteuk7opU>~zuk1ko5a1)uF=o<|Jf`2?$du_h=|R9*BRi8 z0!zJ%9sGG+;gVa*m#k-hqP9z(rLiPe^3DzS;^5xoK<^}1J}(1F*7{=>K^{$8Uac@cR;p2YiYcGC$ z46?2w2KbXp3waPze&Z5Kjriytew_l*4kVtNQ;=7QEOR>}L$=3HoCBieoOdhZER?vT zs~oSXn)j}wf#G}uqIR3N0ttez|K6XIUQk{cKw3svTTx*zU{xXE{2xMA@{vYz0$7x@ zUkY42A;E=z*=|>V#QB?cVvi=^9Ztqd!19j{6_`htUvWTy0^)LVQP21aMY*%L+#E94 zoihPOUW#1r#Y6B}?_#JjRLYJ!05ia-_bf971#4SY4Z=<`-0oq(O*UyrvWT0+cDq?6 zp3@C#YcGJ%xl+!*C6qEYyg*BiXD{tHA=51fFC-F5uyyZ``1?*mwm*IBkvyCEE$*ISl(CKzo4sO%k*bZZRk=QPz z#iM%4I*t>f)XE`+#sB)}P!;uNA$p3u;kcP6|NA}GkbU%c&4BDh#!E;I!6Qsby)1Y& zEEmoyT&5gl zqxjl5;zJ>SW8QpN@Nc+yzEHra2M%Tuby8TmfHwluEax2v&jJ~gFuMq|U@H@4*eamx z3Jf$hhv+HNSx`o0_zCHu4E3mBw+2fQubh#hLi;J4lyHh9P#sEzGZtS}Z=f!~_3-GY z3hjk|>v1gqqNoaKO8mY?LIb4FK3s?PF*4=&DJ>_Y=;nSmAg%Zv0+f1sWQhsY(v zq#Mg_4TF1(;aZDp8dAqUMrth=OW!~Jks-E!>ha%ZoX_7snZI~3LLE2^?gCMBSadeX zX!2DFg)V=$;F5Yx?(3Z~W{YWAMsj+kSA z{Cgk&)gOwLOaajyIZC{!$~5VP%?%_=VXeaHEddhafGVaPu#j0%M@h?Rb#kc24I)tc zWs#n*uk|KgSv1n z#rXp60aX6Tbpa*k7MMkF+;`ChI9qA_9_~dlO-?jcDEXUWe+T?)%kM;Y_&v8eXh;sU zSh`w>#>Ni~{C88JR&4ajPL9cM4#;moUz>baVQyek!46H%@>b$^${=(bTnBA`!o+A# zSX|qsqtc5Zw~cBrd>?{&-|OLiyYYMC_j_Qlt9Mp z$Td3q!W|O1-o@Ul;|eItN_N?DAp1psqVMosT$k%bg?@EvJ%0b>&p?;X@i?y+lNG`n zioS!aYL33+jcL6@TXXo$n%p;k+8g!6wg8fl4xB2P=L;P#fbR=qg{N&7zoTkE*9(Fz zD1$U!BN1?J(S6&LL)|GvK)^vi#`W($Be84u%i{>khBi}h|4oi|wA-}aC^A3wNkl&Ksnaoin`>u6uzACF&gT=^a+?VFCr4YV(I5ajqB z$Mx@Vrq#p&&&A<2i**up&H%EGYg5c)Op~=2ZD?|TFWAc`$TC`-_rQO|e)XgM!V38x z$C+T~5bYn=a!pDzwB^Q~fpD0EloM8CNDjb{P9d)c!UGCGfjqK5#tG4L0#D6nx$zM2<(RaXDf_7dha@i`Lwmc%HS143mhWDbyf`QC?r zOvbN5{KbFQ2gIJ>J}?$`#J0ny;ACR2iC^*W?wj~~u=S*Uvz>rv8}Kh_7jUFu4*bLY z5L@I3n5#fo$ncXu=7#Hq7fpahz}TM_OU8`@ANbEX@ac%{)&w5ye;V%%^9y4Nb`xW@ z<v?XXk70WZ*oM*0I>SHSH{#Z>pT5t~mdNs;jsHy>{xt@9>?>g7E1XwEO3NQb z5LX>SD1&NZzcU|Fha3g$AJ6~mdDxq4ptQO+BkAFFre-hvFpU!o0Zu=-8o$P$)3u&AG?(>kbhIOqYHs1T``|tcq ze)|20e(K3MPd?kGy!<}5O#66OnhC|w-0(q2CC;n3#TKajDAy#&VIr}!Qx_dYCvULxOx92Lh!kT(AMt;Ekpo=w7W zyJ&E{`R^Pjehlsd(N=pP?>qE$1zXG8qus1R%yro527Yw#^ZU5JpcvqP_v3mJO~xbq zViJ-7?*IgI#@FHa+QV!3h~sO2eEt1A_@1AV*GPDg^T*fG_x$m7{5^ks4Zn=U4S4kJ ztc!(V%uisfJX>rf+m66oEpi_GGoZuazmDe(ZHx*uEp5XlNYxqAn|N9!_ z7w3OpqpwNmg=m%>_-1@A)JKr}*!$!@@Y~~g=(mpe*W-DQ@AJ#!c>scw=jXrY(ck9} z$Mas_=l?m6_+?`E_?zA6i2vpYuMSmtPp~eLzGsaUk1;s|d-6DcN5}89l19$|5P$fu z^YMI8Ci^}A_?^=|h}(Y8pN-e|+rMmwI*#o%z618?S$7)8HI4Hnaz60|a6Yb&TH98| zGrQ7X=aTym{+!E>_wwglJhuxh%-`nBnfUP{gY$_m37HdeZDej-(oZ0V8qZ(x0y#G$ zK7H_et{{xp{&OyW`qY*FoSTpL%>Jz%1$n&o@Y60Du}>WJ+b;Ro?(jv&Ho=lXuKnj+ zj8|9sb8bQIIrwuf_RC~NBKuDG2Kj3ME=lyT*T=e>$5Aa{?rXaP)v@s31UmWjlCD=nU=gmD#1g-_dorGB#1cv z85Fgog{D(~V``XFCc$eU$oZxwPws(a-Mh|iOFDq${vCf{5T91!vVpr8i&mjoiwRy!Az zs?Y@Abv*E{xRVawBkx_jS1kMsQe%AX|GWoZr1Aggcfs`Z_umBxEq=Ep=jWWl;5@|F zaoxlPxquRK2bW)v<9LrCdWfwJcrg%4$M4{LK<-C@L#Q5Pkg&(^A_auu3uc3iS2(0F z4;k)%1Lx!Z267ztpK&61Xvy#6{x--L{EuAUbg>J?akR^nB;j%X@c;UIazfe@?0OdX z6ZrS{xxq{>U>cHq2kr~sN6K>7pk(Y_3!{;ml*I6lWzChw7d?-{QTo`6^E&KK(fU~$Svxly6LKH8pK*MnAAmR^^A-PMyNmW4@9Xt*f8%}O`e^rY zUgLMhafRm}?+Ym^lBXi~i+&;Z_ie|2_+2c;|8(8&IElt~(73cC$I1C*UX9~%9LH!k z%!}+l+#g{QLHc;?2aM~nA2sst_+03JFSG+`kMDWoeIZ{1lXRNI^Xzwo{T0ZV>et zFCbA4CLiuNJzQ!Ts3FYI=(P{Sk0nauaWAksEBkZ&^Vf0r&vD!RbA0yKae~bv?TeIO z$2or~!#p3a({s3;{-^)>|N7sFD$M_K-lwPk^XJv2sE7Y%?1R(x|FHId#r1z3KW?8x zefn=#^8Nbz38*Hs{Q2KjH2uG}kMzI&qsk1Co|*mX%pB>pf^qQ88!9HYQlwTL2JHYR zLV*SRxB1q;WnM4vs>%*jFqc&1!E@i3%1Kj~fh*0p!W9d-Gm+`R&Xto3C4u5jWo3bO z*zUP&FHcw48LwF5!0w=bY1B2-ehtl!O>S7tW#>ip1=>|CrJt&#>*nABo|Vgztp+;w z%A4E)?3u7j5OEEaSeBuXcgc~x&p&HU)ob(STzrV88HPUFHalygL{FR%(>HaMSi^9t zdbVlgXS_SN2Z+@6;#mUWWszv4ye0*mZI zqPOq0>o3dx@}gotTq^}=TxIE~Y^B4i-UZtFQX9|R43Z$Kb5D|an68|-wHD5c(aP=2 zM;$%&T(77ZwZxTKA0qL^GgsHK?=Jd#maZkQ;g%7|z)(&vAzbj9Sk`1kyVc5hiRRrg z>6y8L_|$XGI@y(fcPoHB3_+P*uIoz_ZMIhWwwrGG*`6kZ$J(3Boe#fcjzbi8&fAw} z6ta?x2vr_tqvtQr4gNnAg(bW1_DPJUlY zMA!X&zTUNexA0TlSP2C<({QuY&rhNAW(?%$mBZ7%*i^PY%MN9>&s@>9PXq|%Y5rNL z9J9-PS&V6Jaq9eN|BN&v<=6g8wjOI?mOx^Ws!qF8t|vGm)IOcj>?|>VKz1w``Rq|;N4mZ0bNlUG&GO29 zyzX*Cr&hTMe*9xrtnNcsCDB%i<$GX79i%>~+vL6r-qpU|T;K8I_=slmR`uqq=Vuq3 z&i>*=;0BP321+q3(EDgZu3EYY`5Mk zo>IYolRB*GcJ*3p4uQP78+8w+U-p(R>qX=3GJ3b?4t?loaUK}S&a0^B@qYBdS$LK~ z^pg;d_XYc@wyR|c8sv3mJx*HT9J6VQ0?(JxKV(#dr$JMO+rtSJ)ZFPcoz8%RpsDro zqm=XQVMv>9T4=7HwyL?b@8{W7ZJ(=^e~#3DxxCswlqBXgkw&g1JscTH0+xc4Bq#hz}2{T!X<)gibj!#jq)8{jv>1$6pw1@h37-DsjItHP#0l%Do z^@$*FTlYRvTYJ$s1;Xc3vvs|{)th5|-qcs8zJLU6y>QgQJR58Fd0oU5Xa`!Vve$rr zK(Z^+U0HSI+Z*8I{NKEvM^TvxZpOM@e9KABnw8n6uY0=V~zt-VA?(64le?BPu_G zGK`tEVUR$Cs8Z$O(e!=zVltkx&R4|<-%4A@C{Kq-+~<9rQQd1l*{`YN<*;0T>6hTK zTGIRD+&Gl)lYX67-DJIZLiKPqdoBX`u`8~(!xqB9&nUXC-ka&PhRV3XT~M4Uo=(c# z+?|7B;WD4D+ z`kc3I4RW@-uLkhfQfx+ zaFWc?WL23Pld4D#{1QE!2*e+jBrJdLMC-<~+eMyol zd;HKkv<0QRS3(AIEkA327ZLC$zc9R1k~@dz&zEO5S&;UvN`$36^}6!7RXs`K9t5%a8k_@2I3LpU)Og*0v*8HeUn_L$2>k~X zJevzaJ5_JteK$|5gR5(s+3XdR3j6--F6ce5(iJZ=i@I0@Ez7;we-^jG4C%t_@12(B zA>bg^p>PZ4KDiBX!zcD>Z5{`@MoRc{I^ zTR*PXh7m50mc2xOcDpphGPCH#>NPE++3u1F)496Md+pAw6>go@ZJNw4UP0ft()n`R zT*CY0*=|n`JC(&QnN4Ryv+wRF=!&by1r?v9<-DJNu1~CjiFtn6*^tZITmGov{G5gL zT;GTjrO33q6FYZEbAPr_jz8{f=XSc*0YpBo8jBlO#Qh+D){i@sA&_9$`{!(fm_&;F zaV|jSKM5bJ_X@>S-JPC%B0zd{VYUdYLTd`O+L|{8wU)FOz5VlqYW(Shw`z*HOgGEj zN_QKrxtI-m^r zx$361u{j@q`_;+R*6Vk@@aKRXQ$im^8CEALl%0zr3iV(BY9m&YRgs6oMnCZlcFA0lx#Yn3pVNVyj}o)2$xq>o{re^-}DvKww{u z_aQkvl4Gx2=69verPuP1vrG0cOn4_;+X*em|AhKdoA|sRS2_9m)zpca>KfMGlzYoM|%{6R%*Fi zJCkaE{c`(NY)zo2`~FP!kJ~#QR0*PhQEAw3A$s5Q^1V!Z^`_EYGOWYPwwXDoi$Jl} z+n$QYfH4ikQZM`C6fUl~+;2iJz80v~8PZ7mX@8M9q4o82zti9mQ6Hh4ncx?5@I{mEX!J}#Sdx?gd9EulMXgy*sw7mu(i2@~m z>mzP2-NI=W+;X~EJg(`Ak`K^)x>A{weWo>fV6$K!$(rff5=timt2JN^wU~Z!37U6( z<<1N0E?y`gh?Ui`aG(C(c&@b5@x4WmndaZn4NG2&%XJ=2*@AVVqnk``W^wbn#mW0X zO6xS)9Z{}_iYPfM!IfI^f^y}dMxIiCs_+Ey_AMVKfzv{36Gh2U8-iKUI02AVt=Sc* zYBB(;h3oAquAc0JN)l9sNF35E{sw#v3WAT^?8Ncv zYjY|jo@E&@yTJ!qoq3O@H<0T7$f$`<Ekm>DkMZH&ulc)h$~MiB23C0oqf8&qsyh9w`He8i;>Kaco?4XM{xf^5_M4gg zLN)%@a#yQiS3*qS9{2NmcfOyN4H9-`+e5zt(l@L8#fd#M3(IRItg+X>LElp{AyX-S zrt&VE=MON}_`=h$cK0-(9dL1fMY@%(eRhiXc{w?Y1~qswXa&^GKakGggN z0m+?WF7?Aqws;;o@iiEE`jVP#9gvIOPh7=9H-=uO-F%OHXx&U+ENs~%Qlz~r+_X6r zGS#&a62X`QiaqUr^3A!w1=a%F9Tn{(78KvJ*LnDuflfg`fD&&q8xEivpAMJJVWY6; z3G`W=^-U3cBNncCcHQihVr`^`vPk1w>z;dXsrD``({#%fKFgyO&+q*80dc;{Lx5$O z@*f?kzz5IG@6aeX4yJqKf;%;6dZS;JIf%i$JGLSl3E?Pz>+NI$@N`vqJYr3ot@CG* z3|(}}`P}9yM(yDjE+AsxD2RbMW?my2|z&la=GcD~ooK3yfEg1q>y)i(}!Wxc`w6qQ~FTj4cT zwHwW1Z@jpFx9fSUpAYv``FxZ?mMxV3?X#?(0G+PK*QI<9rSLY}7h9%EjzV={Ry(fo z!H4q7!CbJ|_^;<$z3<`)`1I${g1K9dNj*99L%}F=CLb@)V-jywC#CC~)}i!%Qwe7- zyPn@%>FZGFo>x%5w@~OA1c~d=DZW)^AUvqB9oUJF2Nu& z4>rK~MGW{hU5-C5rrs1p4&s_5K}|oqa%iiQK9`NaZ6PHHaB?jKg*N;zRI^0oRYj$p zQ0&v;%M6`LHvu`3xiw0?VKxs9#-4qD zDc)lve77{wvtAcRO+_Mw;;jl~tPiU`Y2%*7b?rjk7)1Qwv1F|M>v>cEf$^dDT0nww z{8S(B?^69NqgV^QpOJfSmw!J%%0TyrDjYPyI*(*$n76VsmPUN1kH5X<_lB43+%4Yv z=E2hUM!&j+zEmKua2}PHT7}?mBzlW~DHLE_oagg|~hXu0>^^i{zv6UWq>vF~f^+iOPF6B+K~R^4jR-EBIEole}$Mj8(^l30T)u zYNEk2b!MS${)6|ky2l)=N4S_1OJ+vSAwHn?$OY30%YCR~R@zuAWlM|p-2J9|Usyez zk-|{^IBTK!KF(!uadxlgT0Y>~ezYLQAdJ#!bhWFRJZpWnaS?le(Ng=%5Mr-`CHXSa z@2Urp_I(ah6nK{YCAbVAx%~M;^JI$1R=9kA5~4LeZhhfp@SYZ`rw;YdM^8iH2C(QS zLQ2;_b7XHP@;z*HExm>6qRHNku|L7U(;B>|0|tCOnmgt2zy6cX1Qx-hMSmx>z}Z@= zYVeG{p1p5aZkFeNfJ%F(l2lNW5MRj;Deh$vlhYS~_U}iU1p4et|EYhc}-~T>Fzgzj%`TXRAHohNh`r2NK*y7|GQhq(%(cku@{Me5fGvDh= z(+rQz=Q=&3pTI#TYM3mM@oP3M%w0!*UsnfzJ2GTa!RI6Oa9@bX{ZqJD;T&jH zzMxR@Y=F!Hjvs&7?AvR1-pe;&R&dJnWf(!Az9&wzjalY>srLQ7W2ZpY0{x{u`stw0 zE>xNHnIVNUiar)%0n&uVVPS)%AYURq_lzIKa{`2OZ|{J&3cndpihlJD+lx`z&bR%9 z`F3`H){B1-56?U*`*uBW2t0liMZwvp4xg@%&rka73?64`1uza=Q5DL0TyPa!C^(`y z+H3yR(!l|N^ih&I(E&*wK+O>9&TLU5$3PtOFz?pFr>aSwcH9mGU1t@oNh|c8;ppqF`4&X&NiheH@NpY{$05fAI|TnnrMd9@>d zSjTo>KIUM`jXV&~b>3QJjUn~(qwTpHW9)waG-;2C2CkhkN`tdqzs}!zk0dB;aVqb8 zZ>m$geik6%a@wsQ;^A8`Q3Kbv8f}rDK*Q(qX&~9X-JO2uTm^Rw<7;m20gV3Umqx&R zyJ&W+m=}3-ADVvBUzPs`#NjM+RF$fKD*7XaehD@Tb&r%bCezsinu^vwyi@DJO3VA5 z9I}IR4=i7Wfk|=JVae&0?xn;Ui`2}>Oj@5!y>*tfi^`s249{a>Wz#DQfWDSzF4|yj{aq%DEAu4H5!dZpQb2{nAGQ z6@le2GHsExa4hk%1aslL0p%eNpHMw{5)HU;ZG3|>Gak$^C%^an^Cmo3rYFsq>bYi$ zQtr#iIV=nmd)Mf;y`4RKylh!Hw3Q#Mf+T4= z!)~g^CzKMn)_W;L#qf^pWtaY%Yx6VEMd@Iu7%y-DeEY^wfDa{i`ZgQ^1@$_mUC8F` z_l0kB`@z$5`tc^rTZ9%VS<}$D_%LAtgHqT_-x|tte}i^k2mlflU-M{x=uUuziet2q zs|cURT>hey3PVnU)7}Gj+HntpUUeqnawVfqn*%TqcCSrsdm|b1FD0;Oe7PqLjVtaE*eKfh=!gK2I(Nz+E1E=}3 zW!elB9njzvw{sJlkbX_ZQpS1)Cp-UXMm~N|Nbgg^$Zd7R>G4y47FOK&d1XOM_Ez<1 z6CgOC0aU`@ZT~}w0olgL+Xi&6Hbxt~9U+9W5MnO1pYZCARA$hxS*^}C>3jCx2_s_R zy-Bhztj{g^X3Qk9cKd>$M8qB?#XqEmenV|se4fw6rk<^kBFpivA9T(?Ffk)Y_%?kO zz7An%WA~glAu&6D@$4!JRmSjy@z!B@f>b-;+j!XNN7!vWGt_J5{sWIbLLEc7P_a*cQtUaQw10&%T+*e$pvSQpt`W(U0`H?xlh` zzPntsjqUi(TY3FloG~iyV)2adNeNxUJVX^h6RX8Cv!hUd3h*VBgp=JF4ke`=T8A%# z=Xqe*57CG9y(bk4m6Zn3{p-PmRb?f)enQh{g{MCc&(4>4&nYPk(JB7jn1P^>2Np?Z zk_R(%g^yGlrXucZ+a~Yvy8^xegT1p$;T++_&aa3OxB{S%C;~b@e1BiwyYcKVkbQ-I zV14y%#?h{SXXEQ2fyO2#-M~FkOu!aFydMHC66M`hyTXD&X%fnW7r;}$nbG~@dgY1_ z@9nO^!9z%on`bE5^8se+j#L6CS#=Ttu+OtW83p*9ir*H(&qMAN%6FdhSk>f&l4G+t zK$OsCVRo5HQ-6XQTejHN`83sYaR52wzQg#mHg+k0SS0#-H@#*fVFxTZ@UnT(qf&*U zBt0l{_JV|bc9x9$4rxk;A%+h*_%0S6Siiedb`*TzU=;B^%58q^@-j?IKevADi8Q$0 zcA(N9F%^nntZnoWn1tN7qQ);WH7&%}k1!My_{4d`0DteS+nvd2*h8^;cT=1Mth!!_Fq-V>G zO$LkD``>4j+9Z#;8^09K{YG92WUKDwj~lZxd`y36EV4MSAsLKUITAqH{0$8{in5C* zC4$mlVDw#I@_NQ8KH$5Z%rY%91(v|nVe;O8#~UbMcUu^f02}OVNpJSKq@n_dzF~$W znW-{?lR5ApC2sctLJ8AK4qe#7e#uab*;GO#MivsT%>)`|Kkgz`-uMl_~3fwnb?A zADrG@*W~`dx@a@osIHaI6(wJ#WITI+t*-0C`dK~bUfv%Au<H`Q)`EOP>! zz2|cAgfTzb2_#GATqe`r=FsY?5PBM{8W`jiGf-?uqTbkgp#OC9K`Iig1;W!2#Nap#W;$HI~@>Nu98N>LH8|v^p`aN%&-T-IY#>N@y;}g`6rxDNsX8tdsE; zQ0UZ1bCn`FA0wXe^a?g)$7XHKN?jqCqwGkmT3vN7D+s_7M`;7a4X=#>n3oa); zZ)D!F;u9X=u!Ppc_BXTt5NBCy#0=uG#-YM@m9+ykm#lD{c|00Vdk2gAeB!{3FXdB# zqvy|@ACGinNI6;3zKjG>4$7V?nbUGgp^cQ-98EV-Mv{J8cdA>$a(iDudP6Kjg>sD4QZx5(qkGxvQ1 zhk`PJH!6TZN_t~P{GrzKyZ&2wG#MwsW;&Hu1dg7Wq{4j@A0zI62g@jV8saE=WTi&4 z41H|LVLU$OP^Bj9aTT_BFUs((ks2#L^szuv3;AAVQs7?>*4pZO!LeV6?0meD3c11> zk0MS;Yx1x6%M0^OLtbbq3=-fw@E;7@9zAma&xD2N`{I-YjI+VyLw&|v$K^%dcnB~? zIH`Z}8wxCKukj9l_-HpFx%b-;_xTAcEivn<XyzrNq(!_UWlNZ1u(z5_xuH4 zhI9q&K;UeT;Hyrfn^eM39l?o6LG|f_lw=IK3d{i+LIItxNak<8GlT z8y3sQN#9N%+%UN!U|x)fv&Efc&Aa8Z&t4gA?>~21nJNYTGp9uGr0_%}$6Ek11Pi`a zv(Vq~upj{L%cJMeq6oGJ4adlR(J4PK$P*x0t(v%(Af#-OE5W%I({BMNtk#2UI!+gY zL6l|UzNsO9?XV7+!Q(N`dl3|cX@t|Sd*fxU9vunyzGWjtc|w|RU@r0-y`{Cu8fW&f zwg*`R6*5U(1py(>_&wn+t3zx4pfHbs74et)K>YG4Ms1z)Ri}X1BNuEGc-JRwq>w=Q z$cB(4*Zp7r{@s}MeGI+p!Gq%*_@zk;gTwv-cRKlh2WYt8n=Kd{=xYOflyZi6wecWBjk2bX5f*pUfZEB=JkRt*|s}-jI8E5H-lgO8BSYSu>o^4pXCgg+R3@XmIaNt2V zu;-HsS&y4A`r8C)asHaW(q*^ZM+jkDR-HHaJa$offRuj~w{vzg`rZKI9NhEYUn%V> z9Glsxq102&scW?O`01u2Yn3iuun!A=rXHcdiJ1UETjTf~CReaylMC!X-|)G**_1+r z{8p7`!@6?wgINN>stV80kNT0@%Cf*Q4fhona?ODL z6T)8)_lOmTy#{8IV50h^3B4GUvOb^Zw{9gWA@&wYM2Kt8KT%>eq?%RIBdR5fL${Ws z4P}z5&Z^fzOaThyU**&OxFJJ-KB!AHklfZQ##%4$vx0bD=CjAQ&xRP8Li80;RQlW; z|M+I@Fh4hC_edj|Nfd=uQh;7xAZjk;SNB@yhZ70EdMLJ&5czvSpi>l_3KzD<6COH_ zJyY+8UAXLWqUw=votPufVyKArkdXiv0R^fX<(C1MK3+W>;y)G4dmul5(m8Mt3a#-Q?5+MTbN#atae_=1Dy+$I zHT3{vTxa&AgB{>#GYlG7%5!ySKVrb?RTw1u>lL8@q1V0(;W;Pdy*jOhUF-3L#Cj&o zIa_q|TkgEIgewKQm@Rt^!9mh@Yk`c_I@i7NIS0_#LC$#nBTnakDysNz8_tl-vl3Kg z=6mX$V(n+8qwij((R9z?9>ag#=spo;0PzmFQx6?JSkLnJnSuzT#Q|c|XOyV@$d{J?2{i#L`4b_;zWWt5)8reXMqLb-fo{lcT~&RRj8?`L~kI4x1tejh;& zfdirM$8_n;k`<+J*SmWDJQ>T6Q%Wi?I9x-!C+rxGMzWq-9=vaWZZ3`H>t31)moZu@yL3e+){|9a ziuC7l!bgz~A?c)Hrsc$vTPJgPqP!%)&g;p1Os;n00ysET+!{e&(78Jx*Q>+?HQCr= zYMtv|#xX0+FAw-Uf9M+c5P1d*RAdg*UuMWj!C)8j6F*wG_rMI*+%t?L2WNKO^crTVc`h39spg9+ z!$8C&w4+}#2l$G5&Jv5*?wJ2*6ce}J*9=qGo^vW{CoQ!Hf5O&aO$==^ns9ET01FG< zVEz3JfG43J(vRYqH!}*aCyTx|#1vo&{OvnxN(OTE4egV>DahQTx2!B_B!bTzx1K^# z2&(k{Fve0@;s)sUtIy7Jx?T3nJ9_TkNwpF8y$MSJQNW4Vb26hi$`y#?Mh)@UniZzHk{d zqN|sic+-1?UIV`TBbz=dLd6=K#VTn)?##wc;C_-bjgj@|=K9~-A-!f5CEZQUGX#2) zmV9Q!e@E|H4Y9?&8r%7b@M;2z$7k}*4|lljjrEcT2iWNdrOQ+qh5HEl6l+iMX)uxK zUfOA7=WG3y!fsS$nU5I%A-~OI@^v09@9N}v-}eTr09ArX?@oKbh>esbO;)%&$?>RU0aVx{p^h35Fy6|O^ zf1?(%@Xn>P11Vgp-0j^H*z8PiW`M%lV~4cOyM-Pea*zGIh7m-IEL`dt1gFAB`w*~T z<@`j%p>dS=f*9K{Ff@|vb?XRJN+g96D-`tYz^~YSn1H{tI)>T9556BvZ=DAG%)?&` zQC@A1gc;$dpXB8_Yf0rrU&7}phnMC{f65PF-HcCJ#bAAfx%9 zr2kTz%Yw7I(n9Olok40QzWwcVJ8JWTHYYs96U&LOt^pjsiFu7M7DLY7>R6V2jg=bJ z4<;c{ZpjC>cW?7qC%}O?ow$YQTHieuFM%o(pn}{TPPUiU>NcPBs1N(o(=}BQe;YVj zf%p9nme0;lwF!=4gUbH*4KH2DO<)J%L9 zWRxq4_EX|P9T-uTHSfIajW~YTR9O&tSG<=wjL})%c-Nf?Vh|+utH!u)L)d)GNdy&8 z8keTcy;w0tgr-7e%45Ynjb5mef5!aaod_&F`V60uFlp}ynwgxE3XmIkz29*e5qIos z@D=P)HnbB9xG?`-?jWBmoGWi_Qi3{piZBX3+LVT}6>zaus2d58qnI-vHcERh?B_J| zno6D#^MXe08dkoSn2>IWuW5R|%xpQZt%B(JWdBef0PJ>+TRzM?H&^D#ej+$;ll+_9?FGbq zsj8?XgZNy&Un49Y?s}^?qM0TpnAH1`G3F_JwrT zs~DLH0-ilx7}&SD{9^ki9g#&a^mw9o`rPx`i}Hc1V0#T%+5*!!q@ELzZklrwCY@^! zeud0+%*3m~KW-T0#=p9G-9zwa#+JvZuR{m1@*R|$=%@T?K7<{le-ob@A=|L|JA#Vz zxLfQY%O1b!K)=_hM|CZg8u$6nZ|euo8Q(gZ1WoN~!sx1fkj6NE4<2T;ZmYrZ;~9i9 zSrF6CA24g+e%t(7d&c~3kZoxv1)9ha{p(7CFH$P$Mi%UM%+f8jt|rOTVr~#7He-)3 z!yZI=@I#=VCXsA5e-HK7MMDxOCNk4So7zbKG8&6ZGK?PwE^_U3m6UV!&_ea=Sj#|3 zdy3_b@+lNz!?qwjiK&pC1**PCgynUg{p>bvka1ZnDRs!lOXK>H2JrX*4HiLHeRme% zhx86%hGYiK5y-zK)ViXmHrnYhA2)bbaCPU(t2Le_#;uu(xLnwGiURFhg=TA z?A40Sp_gFIo1bYTmH7T%5`3n9HB6P?Z^sxaVD$~cA1;%owl}P?g&SfWwi+>aZe$lk z`AIlhe;Ry`ue^k^FW8{MNX~oi9AX4>p-R9O*_J77`I#Z$SCJ_Vh_5F-n9GYsE5NZNcoDouwU%Gs>jNLvyrFIzeC}h&K%dDGb0KX>^TEZW0&upZAGv`;W)>n>nAAW z9Y0teJdxOW{)HI6qjPRm-B!#(7YGy*yPVkpnKi8|)g2qP(y-sl$OXw~R?X{p+)fWv z3Qc=HaJUNJ#%1h1a`3q6JiuIbf6<_Ne_0aW&8G9$RUE1Iiu&j0`OpCNqoA9kv&`*d zEy&{zR?^+Eem;cFp{dEuKdXi6dHma7jS({mv|w2@QKM|!sH274+K8eWRKkaj5&<F8cmpE0KKpf`-Pewcz*1vM;i)H<1n>fAXPu+Dx#Zp3eo|{Gg@} z0+*?`RF?K2uz5hr7VyHcB(#9GpANEmYqO+|DN1NU?ytl=|>0~?k zc9*3GcZLM}lfG6ILKrzM-2Zh-uixC@W$z4(H z)l4}h!qyT@{(~H*)N}+)3WpI}JHrDF2PjPj(Qg`QLVwf>-)zhgJ-zLXS6nz0wpMCj ziiq`so=zffH*ONE?F8ZhsohOc8l+zf*NBS}_mOQ2pY43D)K|##$}kz?e+7V%)YSVq zZ7iZ_rA1ofOSKw2GpTr~A8(hY z&z#le_xkmVLO>)1S+VaUe-b-+S^Cdx=FhHz_03As&2B7q$OiUnJ`V3A+FSaa_{1`OxDKT2$w5Wa-Z=%TLtNlKB`^Fx1*dMCVIQ>BAdWzulgt$LVG88;Nf2q1VG7`;*pe{bckE?3U zSEKA)|pv#3qaT{K=aIpzqZ;I#&*+{b%~F zwDWf8GVXBPnj~2a@dcTxn~yg}U>);+&4@wa;MK9NSayw9jMEh>#bBrRTzf}F9B<)V z$MdbuXwlh-f14Yqg|O!@!Cf>Ldf^!ydo@TQGv?!`pAc6ZafI=i#G4MQOyjS$kvB4+ z#?MwRkDqoWE>FQOi2F_?nC%l?5pA$!Wkm7?vM|rEbwAC~Et-E5G9ikv7-=3L zd(ykRgfs_UYI++s*woB94Wcs1#_Z42LGDTqgicToe|&Reh2WEG&vDa^EOD)XE)rqiIfK5GtNv~MRL&)Ubr##4W zLr5WYVO9NgzIVre+N^@8pCj%>h}EjeT7V zd3$)wN~rEOVcmRHwWYY#hzE@LvF?GkMACA(%*WL3IEAreD>Cd_rcK@0wfI6cxQDwk z=Y`}dCjh$=Uop(?U4`>SYE(s2Jp^}#)RmV3f8c#Vh6D~y;`GYMw?K3hJamtw)L3rc zueNWB1A2Nd#o@yzzZSUe!>5L$4<*C{0tdk*54F5W_Aca5M@_xni-?f0Vju#LtEu2`7$M~uJBBkY&bh?t_KXR4_9y7j|V=J*k@Tlg^9aAy$EO~140f9 ze{RjsNFW~*!Wu@#Xn9$;Vl^KyaW)VY0*a)skHc|s@IH(nx3%kgVx>#4kD=R5$)tJ9 zwvfd(!oYm2?F{{#=dbK+VvRpT7=+9lcsh8MEu?-$zZhFE(omiT^P!v&6+9n`4lrrw z#IH*#u=M?GzN@b;6toch+uXy?dMX)gf6kHRm!~ec%fPUqB>8mjbSFR zM!yF)d|xqMf8hOOqzA3(%gB;5oI7Et2VaRZsEHo#+V@bj^ew{o$QS#q58;`quYkZj zSxMOv@o%dHu06p}{qG*ev0oU?l*jLjaZ&FEIY&YCoOPiHQWCq4I_J{Xdp?Fje-P{; zX|Yc3%NiDLc>UUb-9~7pe{QWK>fVGe0OP3Z6C(AdEs$58LE8;jn)lm0XiWksrw*EkMlKDpe4TMT9i;GOa_aP6HL3 z);=5sjR8IMK*U!x3K|XJjS`l?e*koN3zhMUK(5*p&Zjfc^rKnp;eb`dvnn)tu}!l8 zn+i}e8aLfw$h&_JYW|&DJMo`yIQ$+yNu~n0f;Lq!>F1*ja!Mt$e1*L0$pN|IT|UWt zTV#Z}QxbUwa|RL*Jp-3+f6twu#&KR4 zy8{Q;F`q16}$#IV_y01IJZx9T5UMKT&%Ozr+ina6EpUBd!v)fvlPT+uLsA{5M8e?5cMsK3`ko#HOt_?!c|)fA1`}am@Y+xU9Clm zw9V=G1)HDf3%SZ0T)Dxt*Ty|3a6t|;u-n3RfC>9fnoil!a*pRd0(Ek}KrA|}7pwX!4rY!moJF5-dn|y@GJ!V}eABavn3< zSQxM2*YD*6@l0ab2?21gO_?6F$9?AH@anrRO__u5tou|>SHX2GO1=d!h2D@uhCa^| zFg||Cc3gQ|e_s-87hyfRr(No#e-j)3^M065=R+4_3r&>rbF)pD*SAo5Y7m^Q#$;ZV zs$1~KfXNYKQE7w4;kyJl3qFz7gkU{vZ(*;YydLlTZsZ^;UtZs+eLqH@BS1Mz=K61J z?(UNL4y<$FQ2n+fIIF!W?Fpu@y&0$;2-Y7LA|`^-f4u2XFn7<;*;H)gAEOm;kS6#$ zlz)LhT$YP5;Oh}|YUYCvcw@p$j{RmpW0sKqZ|JsfkP zodb8KfBEAJ_acNh^ydkYQ||S{nOt)q$pf4n4=678Xmx6mXNeSk*uEkKURs!>c8^eY zd{fQt`FDPBfcjH{`bw}t*^L`&elm=q=9da1Op=Ws3{6{AZ6*qniLgi|H46b?5fW(* zHHsL5tpQEF&Zv@3ZoVy)fmq7J?HS<5f9$c0f1T2y?j&a{SsDFo!L%CWtab=Z77G$7 zUjp9MYsX1?Fz8iME!h1Mk*_9JFy8K~l;=Blp+1jK`EFitZM!2d!*nQ{g?}B@VIlA; z)izY&lKs9rat-0hCo*5~Jb%JD8v=t0jfI!PJ{$k@g%%JDQ#?-kB>Uj#8#DP0n!9^O zf1%$W+bAun!*-7Zd$pVhwj2po5G@d&1zmbB#wCc|%&(PCPU61O*vsCM2!Lq;8nw@R z4W+s2CtTpW(uMc&hzRVhF)um(#d%@kvGpwSI0>B4(-CqJCwa)=5!uU-T6HGCCm@gIag3g zJph3cI*6k+s7jBeo08XIm>-;c6eL{)22eWw&USjR$ES8-jc~~zA4R@pkTp-oEF*_0 zsn@1{=C+X&Z|#y&UKbZQea!cAf06feCEYATE7El}U=Oa-qJm`B`Y5d&BD|73EKPcb zMRT9X5c|D(k#{@PnC>0|zvq}ifYp+pPbv!FU8!T*ANkHX z>O0o2@z^sH1N;+FU_gxX$7w2f7#blm?$4}|EwhC4Cy1r9Uq(37^S44ssqB3B8?*V3 zQynaoRSMuCn+SG_mt5vl5CgfcI4o5Qoq>@e-{QGjONB;cj{}z zlk%`+f)>yD`5GYZ{&^a4XO#~V#lh7z0wiC7rs3GW@}Sat^QX;H5&gn+q67cbEn0iq zn9jp&SL4V#gI1g<&Okg##K)&7qWS>AY@XWH?{ulQ1*ux&0fyl%5}zk?LqjDrUW72_ zX2hoqVm3SDtOVl{e*+}B{%}a|W+gO)XdD>l8$I@NO9UavPydX5P>g@;CbTE+)RGqR z0Xx+r`alIJy#aqJS3uPjmL0jn_=}DCPCnrg&B1vK;eIo^!jx1?gHQSf4LQWMBikS? z?gnn-2!6N#b!kY%XNSWstV0gixs~>=+*2xGVyWslrTd#Vf3Ke5ek?vkj$;a-BsC*l zw2V;$d?x&A!coR_;&VwY@zI}E^JCJ+4V^<+p|r#qkg~jvr~G?ZKN_fYgD5ojJMYUi zv_<+N1)ySVe}J0Za7K`7BA45647Ly$yIi>VKw_*L9eD>UifZfNdftXbx_|CN?a@3& zhzfj|_JCeuf5E;Fzli}Tf*`j>hkCEi8iD7#q+R+(Nyd?`p~%Q};|TTeJlSrNj|{m* zJ?}vgTz%OX#P(Sr@pu4^vm9831M~Dd zAC2zgG#o4ZFmoG9S)ZGsbR`ogbRBpt38!?JO8j6we*$m1fY)}z30cOB+fT{D<NPVWo>ZPdzcD^jqmz|d84Ala8FTq(xW8^kJN$r1eP?~gQQB>J2yZ)* zz4Ox)z-bnT-1Mv4s7D%)>M<@U$aq6YUT2bY{^?;6T*$<`UyJ$3qxNA7Xt9Bz8jEV* zL;M6N6%0AB-dEax)YcT-MDBNe#IZ!+R;{;U>(|L$-B1_&LvEft!-XmsP&D(Srxq2K=9AA*S6?R2B&Vw59krkE;7a z^QmBg-0r*b*qxzJX*?K&I=M!?L7MofJv&~P%!aQ=*pvaqPh@^q_XCxF4-Gib^=%ft zf6E{oH>U^t#o_6-45%mfPTbGtQ_Ln)9fQ;{Go_Gi5#QtWa32^3>$T)AV`KmqN^wOP z_uycmbA#a8XZ)N5HC@t;lMwlgK7g4Ep99;1do2HDZ6f1*GHySMrkk<&HA&6R5N}7OcbdcZ83BhIX0JsuJt>TsLzx9HvNS%w4}jKn z2v}TS+N?S)@NUBcBt@mLqfCfVEk?uQG=EiVztX}J-Qk2lr-KQ3$2#Dl2V9dxf3XRc zsY3V_&J{I)aB1ibEiGI~+(XALt`I%QhIK^-zy0e7YsZ-(9F;IhQk1-Q29c@?tAy1C6Ah z+Mwr8L0Bn2%&}$noSQ#5{SEkyWF3b}xt~v>4-c$lj`Fp^wZoSu`@F(De+k{;U3?Nt zHz75&1M__{IR-SE>VR+7;_GmgU;CnIOz8EappT;D>Pjv6t>nzvT?V~_}4m-ZL!hJyE z@VkNo*4>@FkYyGMlWhOv=MFL7h>PtxWH_yEdE`VbY&~VETM6an)<_bh*y|T;THUHY z>5d8a%k5mxHGBw(!&Kvmd_Qf!T?8trrQMwEwkzy0@FR1M(mr@ zq`(q3K`S|^1*Ww7e-Z(6$8oK7mUtj;pf)BZd?0^XMMnFsx{m5@HKqsev5D*FIJk`A zM9$k>{R|PtUbiX3Cn0GJy2>WMw)T57FJDVC29ePg4_x?r`Z=KDH{oVrLe5+t3Jc80 z{q^gAwt6qVNH}vy05JGM-&~Mg4t#p$w{?Mud3YhJv*`4Ze-NAwKECwlUxOy+i}57l zBO8QwO|0smGvV)ehT78t3KKMV#5#axu70uUK)9n1qX1rV$!xz%5=iE)YKf!qMT2kw zp2PHD@5NzJ$?aW_-PQbFz)k~S{`l)9X-3G35E1g3EQ&2u3 zSp>B2wDJI5SV+9|x%HB!@Y2@~yDj4jBtTG@8yyUvF^B@_cc<}G`H<6ve~~f;d~`!N zLJvaSkoKKys~PgVlx)KrikFV>l`fQDTYa5cMCUUte+%_XxvKz)B}9}urz?$A;N7IR zE|8{`LY!HY+2x^m>d!(l`kwPXE?Ng{-?Ic#6Jdflrgtpw{oM)f(+}xGbDYeA)SS{% zR{Mny?SYEIL^yK)onPYk1dzUa9Ts?y-*NbXf?w2{GdLSj%;x}}3n6Z{1b!f#0$~aS z*%@J>)(%&)#%L5;j=r2hAx=#bR=)>?zil5e|x^3$g$VAHFL}wsoo{Wu+m+wZ9k=5 z7)w%4eHh{99J7zCPti^fp5d-9)FZLAl!sq-iG&S4zatK;cnEQr*mQWel6G^!{)lj@ z3o-6}F?`GPun{aMa@D$mo0~~y21u7jbSE6g)c6Yp421i>?Y92dI0P?$@Ric0d92+p ze||a>Luih<{Ro@5&@5tUQELV;J0r`CQD0#l>M=t$4VWJ6<7wy3NPWqS74C)#gW1z3 z-b_Rr^S;uuhU*)!Y*#F^E2I7t03AIUVCp`f@)$ov)|lg?pN$Hxj@#D?-oDhUJJb-% z+T{$9B>>w*04C~!I5t=8Kncd3L4;2Yf1Dgu>gDe+BmL{_cddBuEd{3S5g)`^bkzVu z1*Qmjv9uk-&Q6MsX7KaM_Mmy-ZHgW`7@~dOFmaG~&iOQFnzdy(wwu*FY~V;qB6TFG z^}4zx9&&5p3nh`^Sx8%rmJoieDxxV#N)Ki|G% z=3WIB_IgEgd9bw0e)=QurE^)Qe>fNzMGvKbv6@{bwQj66P}Y!C??Jqva$GN&siA>e zqw-HPGzR-6q2XJHQ70w<$g=7r4hzR?dil$8qI7!r=R>Dv=xYKq`u0GVqU<*6C40OZX1TC(N|Hd7^BE{ZTY+_ z*Ldgf%vvWR{5m|Q(GC!*0_+aUv5`ldRx#NR-fZESDUum~Cm_5HtG4%z8MV3!M^Gf9 z9h(ws69Ve7J2)u+;33r{f6-5rGJnqddK*0Cbbw>CQXi}nbcJJYS!Tj?7N}Cz#yEz< zyvGOQjS^x>`Q71>`3_Lcii3h=dbuF!ORHyd&1z}1 zJ0!UPg!AOxy6oZ$E9JU~5M+cbkqiwy6EyF!q|0!S7_m&nbLc>he=B@g@W%}v@&?~^ z=KKacBpCi^>}e9fWqqllVDBFsdKXPad9YkDJ1?I>pz!EP+=Ige0_=vtjAY~O!_yO5 zap`NdOX=9I`3{|~hCR;$*fGN1kXhWpzJPdZG=vGTV@gFh#P4|b)lmdE6YmANL!BL# z5x7pDBnd{BVuUbtf3k6o_v1%|er~iVd-0>7VeN6~LqY;nAyGKknr}N(re~tI%gS(GXinRo&Hceq$SzcNm4Q{AJ;t1`{_j;ZXdr84)J(Up;--eEkJ?=J*i$i z?6nz*iYkvb?5AoFM*LD=B5(~({IvPEv`S5or+RAn)I3>0g;nn*95gF~!{&XzhUMrOsUH z@S+`oy4nrdBq!?w`WhSajn)gAo7CgEe=im!l=Dx%2lpoo(@2rDg7a#6XaW*1vP!m^45Dwa6MKK>C8k53sYX zUPp#vADu^vFjK}T%3FT89uaiW_O=Z!rz=Gbe+DMmcpkLzVuIt-?HI}s#K*}9StgE& zV9|YnWtO$n6qq(}gL}3gLV+7gyu#t652%KGpD!Kt2OI2?DamK02ZLkS_cAal0U=!NbZgeCmoWj~fZTl}CXf{vEO^Zuj-kq8;G| zv#MItdL4+u(Ao+`F@!i(!v?&XgxsTycxPd>&)q+HFP4nrhNT-gjv5FJdp=!7V!2*0 z1W$lUvw}Y3@tP(TI>UQ4pIY$;sU&#+f6OT!0!2AUMu1GP)?fsNL3hGs@*Q&YkTZI$|OY;6`(I zoM|0<0{G?ybWEwi2maQN$#bk;B%ahnhaGEzO01lxmXd*+7~D)-8O$@($_Ppsf8y77 zF(A}RF=gG22crFA(08-CLKJj0`a>g5Ho*SW%4nKfBC6(y(_nc(?8ncxV%s!j+_~K6G>ff8e(9T4?&qj!W zIiQTEvoB)KAgK-JVWHx(!sf8MGkHkS{OJ0m;5>ETyBVeVc#p{}CrUhd4+sHf)ixKi?T(#}pdKsw^e~cpeYZxwWm`D9= zAD)k8#)Y5*g~;wJ!_DoCq~s#nWD7c4o*@H7ls^D~)s?;)z$QELcTm8Ck%Yp+!!($` z7Tv8fH$(jB-v|Ho zCjR}2p86$zly~USOQ}N73|ttGbMa8%Ht2)CpAh4IfT!*yJq88Cws?^s4@+_PhOHx{ zFkxcvl5P>v;`>c*Q%i57i}gj-@t6Bhl8^Tc2e?&c`j$7He-MEhdrN{;7n2@xP(y0Y zAnRfvwV%`}LaJp*y9xD?@hAn}ZZQcGcnrkngeIA`VoNk7TL;jqYR>Sap0Z4KS&Bd0?y1lnzf5pgw30E$rwiU8Z@=($lcy*B> zv-mDDXs~qL{o)poFhNXoQ~R$Wz@0UtXav1qaR=blGenrCienVT9l9qw6ivGgem^I4 z7NC#u!jgx(#*ddQ%cx%AQhYDHo6bod7_#TP*2TW~O{1Mtx04BcAS4-f&EKYRugk{` zRqyYQe<&cH@Vt!oNk|D`(t*UPC6#)=)tbfJnK`@!$okhJ2Eo1o>Rj4V!37b&qmLkn zuGvovQgJN~UCC$Z%w#>jl>O}wK2){Mu7>f;vr1a;e!64gX1B3uY!v}|{BDkqk1SzJ zb9(`n#OSu_h?aUWSn?9uBJ_iWlaQV&a$WR zkA4Sb3+uOap7LX#HvS#xyo-^KjYXpY?^*RF)O)HT4R}cd8%f=VQB(O3G+jY{3w;B4 z1c1B&Jz5s=7nBOopB!h%onDUe;l+Q5_I_Q^nE-2A#E?#499jj=BxA<_*!@)<^$mSY ze^lc8H(t~|1wM{Z(!3kh6N7hc@Ijw+d<8!tu+V&33h*84i)*W}e=?+9w~b z1YXSGA`B286U0~K2?sTy;GV2M0{$Vuf2Sy`Q|0`6&zX9l?XUl^U!y0>?$ArXH72^M z8qpp2%PLXe^tml2T?~z({a{4qF)tQ_^Hx~=vD6=TO#Ml}_p+}ykAFKfFfzS1fa!&2 zKmx-8EM%CE>%fXZ-jI7Y|Xmi$ws3 zlegLICel|E#7sYd3~N6*krj^hbvNXvm~J!kc&gC1x~hlP2+S?c{(54c5(Ti$^jh9d zt4biMk1~n>0P+sx5fxmWf0PnaKPsHFZDl^-)+xKooqwIRX#TjT6-+LWz&#%qim9U> z`Ig#q|9X0l`7SGPZNk4V2D`<8(t$E#$1=Ox4rXormE$D)TiEoME&DDNKN3U_Dqvbt z+Up#FZZvYinlyxFx>sJ9OLsGT=6DM#mKdd;MJ`l$`kr|`lxLN?f5`*F z#X=tLJvTA+;&Oe0E!x!d@Ug#q#kDe?sfOG!K_50J2}Yng00kRH)NBJ5?o5(`1y{?Mz!rd&sL30#BX1k9Uk@*zjd`n4JuS=OmIDc zI}P&QkeOR9f2FTj>-PYp&H6HscKX&h>?1+G6?CBWb78_SN0#8;ISz^XhKU z%vmA=^s@E`X6AQ<5aGPMjfz zRL3`JVs}4m>+D?y*se83MGxD&+YllwAPNhN;q(0se;tS;gW@8}AWEj@-u)*j3s&5F z?*nNh{hkX5z|Nr90Tc(gHak*C!>?BeqC4v=&Y2r2pxLJ4$s(D%m=T+SaG+ZST=qVmwHhYH?Y#W|JGBSDI$pwbx zyZwr1eOU=hzTn#3<8jtV3?K9 z?j>wBKIQZTQ^hC1gL57gxE%+bqCbG3PTa%?+g8-L{JTl1xL(${vye{P7<5|k8ljh_Uqv}5E2s8Ddr6C`X< zgO?&?c(PWO9?LKLJQP$0c?fGIDG!pgmz5XV^=jG&T~+xnXr4d;t|2!A%iNuZeKm@F zJ0NedV0AI9hB+n0_SsR8U*onY*H|#S)WQ!{<~123zzNP34{%wIRISgZzm+XebP(TI ze<2g$&yhnQ(8q+(XFNYGo)AMFz&q1$d#b1Q4&8lehvf?rv!4OE9e>{-1L(i-$i&|O zz4T2$=WnptCzdG~-_ABGufxw?I)FFeKD)5_2%`ufYy7#qQ02=bLHnMeT-)y|?ol5F z2Jw=P7$HRR6Wcz!J-;0yN;FkClm+l(fAg=T1-|0Lk0VsdRYhnp2zVG0>yM-i$=^+Q#Ia?@(8 z4!$DUN4591WZ8>FH635Cfb1~_%{E2D{0Yf>Ki^~ESJNtv_2p=N3>Uh!djqB`Q(S=k zvqMmHeo}%T$=T}s+%NN-r&f7V+aGnpJeRphK+&1PG&3!h#uDsTejY4+p$l(G=& z`c^01>u?DvgM;cg)NaK!>q{E!M1+K`1H)kEU*)#r zuAgT@B28GZ5UgXq(> z`+Q%UXcV#F>Gxhd*Ey@OXH4`=5kgRuubZ@VSEw?*Yya!TQR|LswJ3z3E%dYQ`)e9E z30fl?xA$-`*EG8#x+dgK9ePc!i;)93|DD`6&qUdlxQPS|nHt_%&=j3&_Y1}7 z=M&;mHyFlBpkP;Ee`N?d@B0K(?BNcB-Y1j<6D5hXCp?$JR1SgT&(Zz*g**hg=lYV% zGqkuFVO%r~)(uQOry&e%_8}wgV|t%${QLS33JAv9EhZsAQFb5Ff18g2f~Q{p>OML9=0J)w zitXNl0$7*>b=e*>{|(<)$b5;Nd2M2c?+u$*RYW} z_sI+6$N7-|?~BHGPY~SeZqH3lzgJMZgYESY0h%q&-HY8HBW?aZOb-`cL0lfsZR#9p zSu!~^LFv32e==C?{{kk%F)G3#E^7&3hz(SQ&~v{NPo~I8o18?LDIgXY@1vo-EL^g} zEI^5RQu2g8e`LTVeKQ&e1C7 z1XEsMWd(}9!7cNoZtM-frOh_CYGk%ur=i>pZR(Jtp!08j=@%>Ve6#L5RRJw|CAvda zm$#hBIXlG`a_e=+_&|$yU%yEL5&-qrp;`M7=Beu0U9WZb*^S z2qfRjHZp^|Y57ih`_bMWv(fFqLA8D9cn_c1f1=6!-k5Zp!$weNeOI1(-seMIO5jVZ zd$9R91OYC?dWSI+G1$rSL?{)`uh1*+wO@k=^*ew2pgL!vh72AF7Oqak7Z^ie-!2`x zR3K{x!02~ZGfWA+AU)Fv3S^zFH0X8T{m^a1&6EY3#ryUR?=yZS6z|AYlAOZ(^!daikdMA!=1;~dX zggzK>A^_g~@)*F9z#sqp-~ahPs&VPl?LYqKcdRPnfBxrx?MwF`MY^T`s~U$5{rju` z8@Tl)L;Y8Q@sjht-T(couy4KH;Dx~-f0y#Ufp!1?^T*xtFYbc>-z|Ut^$Gv#?f!Q5 z$z4KYi{Yo^CdGd*2~auk=if=Mn4869fbUZ$P|Q7jO=r%*x+l7kH4?9 zeBsa3EB>&2B^7l}{(F7@UXk$cUxn8PuP`A_I{EvLKKw2C&comRzyv7ioNY|Pe`gC{ zsexQp(?}6e9t9 zTN}M*gi-(fZ{+^|`On|$Q!e!@BRYZC<=-zNJ^sDE|NHfB_seGGtbc9^{M8KdWAG1T ze_t=@sIFX!Z$=sze7)2B`{zNke{s9zNxRfJ9^J3x-}et{sqjbt_a|_Seczf?0+|xE z!y70o0#o^sGcihT0iWRtPEDe0G%|d*x#Pt=F%Y{04`{=Hym^@teEy%Lyyo(y>SsPY zjy6Acll{J6sEgK@8?zryf+AXhrD>j&@Osgz;D^PpiYN{DfZ#_V~ETS~C|GmG@Hx|HJ)y>S0%#|yNDbHws(BS|}K(xO+ zr4MNEyW0H;+9ZgaKX?_)C&yzV6{ZgK@omEs_55ZJU0d$H0EHwSYFtku1pUc#E$f4P z0nQHa%-+P(X_>=%9Q?}A%1v}TUw`1_2wIF0x;S70OR1Rd`^X+&9UM0_7=drW ztGDUg@{4TQ?#Uu9H6G7v`kMVF_}CAjVvh{uZl$GvkX;h_mZxC9C}cnX!2Ro;gwk)A z1G^dV5D>0n+mNpWa=-$|yg8J+a&m?Q8i?nfm*U`6WHK2jv7?VnIS*l)AQoZqhh^ zC3*l0Xdi#6*#P+BfiYQEn17S<&0dpC$csK%O7`5RYb$RPCMw8WU8_#~bieG~YL?Nc z-f|R=oINIkSPk;N>DhKmemDA5)yBA1O8pscuh-dlGP>!=i~2jCLPUqAI}TGJx6OX3 z->oA#P>N2*3wAzBFF(8$X$>|F8P>I+_RP(ew{Fzaj=wd#9Faz9dw&5f8Z2zWE0k!V z(D&yW^xla+cD)y`+p2KVb=dvdr!;Yot67|&G~W>OW$VT9xyup)vVR0PXLel{pe_5Y-8Fxb^Ii=V5~psr9xNSB%nh*f&w-A&xrzt3 zeSR%3JrnJ{v^W3OrEs!Kk~~A!_@Ev@43F2Jt)FCK0R$j3Jw^8p!`@@Sj7L`5)0?UD zUq0Me&}g%f!!P3bn@h*|Fm+K=i+kkR(+W7`w|2X)!r7>*Zhsy_YtQr%5zu6aAnC_I zw<-i+yU)!XHyWQjQV*CT+uqmZ;RhjK?$hc^@Uw*Xk{`3F~4F~l^ktqFq6|t9QG2hbj^aw+bb)~^T*U%2oRc^NIyAx zew^`-vBJyA(tqtXR;>=*^$*u3{c-uTK=Dme_fQ0ibVb^&dCVf?JovYv-C6Lf>3<#R zc3`fbL-1jrZ}3KU71UPkZ4D7K&j-&>3Xwr7`f_B)*XSF78y=Aie_UR7o|>rCErK+9 zW_wdM0xyiAiJWOO^qy209p0f`F-8)yyf+3F(<-m_bAJrDrxabE;&Gc4auCv3S@K>UWiIF>s0S(WgHW$*Q7$v~cJA z&WSu*FP)qb%Y5|rY(9}eb13ep;RtwqI)k`IUw;%Xaln!Y#MlzU#kG6yKO27+`K)Rm z_e*$ue+#S}Kk0zy9bN~kaL_-~e0_PLiF(JwdzI&(yypuCXX2Xn&&TDHU=4HJ z|9@q=PdD#=ytb(|#m@*-_U~MEQ&wx&gLn#W)NsDUDE!v~z=l3!Y*2a+_9LT(oVo40 zlQTO0$~C)+$!F^3P~Rl)25A@II@SeYrXO8!L53V{1*7R=aR;5k?ZU zbqw}9$;GFe;_-ejt6nV;4A*}&NEVG^Z-2&1ZNrV(dN)y(Z?b{cGNZp9yai?K3;l|3 z2aj6IJJHVX08gEy_U`iLpk0Ddc=bn#KREY@BMybK!ri=Oze9?i=W9FBNw$Ze`gt)* zC<=Z`TaC~6j^ZEmS>zVn6L$QoHB8txCh#~L;Bza-qF&iGC}XuYZ|L z(PmFfGcQ;MJ=^i(`#V8T^)bTH`g*!%xS)Bb^@4Ta8B5}=(-7k16}w2-vwTDMUn9(laBa(@wc;%(kXg#<$-mQKr6Y7%?fl*gofE6 zo?IR3M;xwow>fAC#w_9_Xa`k3pmQ9Mv8Ux-4ATVPR4h5K@3bJXe6Gq~cQ<+J*S-<2|4a>`cyV5x%W`Ixf^U)*1e^z9= z?e4e7^=$vr9ih)zv-~bM@_%eJzh!^IPS2&AS1zRnE~g3%pUe4jKS-(yBmW=55z%*} zNRq+IqLXs+Y;ll{^n9*M6fhLglxRXD21@>U#mnP;#?4~WeEoB^R} z*xZ5es){tS!#&W2ZOe#*DVWQSz`*Upo&gIU0LI^)8N2CL8cNxhOMd~5Gv#=9nCdo~ zCZ3foDtj^hfris?$=|`$10q!`5wVf1Y|31jxBU#3E#Gh|#v=+7G~^L3G`0?w-5-bR z=k303`ya67*PT^9$$@kMgn`5QF5Q{t=9nzW{rI}S-4V}Omc7fSBjMtqi3h|*6h!jP z&I+Pg`A2M3X0JgnOVJXr)0O4;f3S-(NqT>*!)_%7f0HUC04;m zwu2$+i$Sm(XTYMv2>E$)7oObVw%EL^=kbwzO4JS~e{&V!sj~gL74r=ZSM}EaeR{kP zZYqVIpFOtJE!f4Py z@7-$W4&NLg(0`BB*G>oR5k-#2L^$A!^kUhjXKWFz$N}vtY$2K@KWvK|5wcK6`c`!G zKsP?j#OhIKl)kg}s|rK{XubWktlx`u^ql?eLnVA@S|NY2R17pJ3-UJQf{`C_+hKfM z7M2pp`sazG>cNB{6c?gZqwM)vy)NogQQyIgp|!}r?0;RLWTdy(etV$wr}3FY=~*^7!d9x!+8hU;68ZC7XO&%uwL{897L#qpoM1=a$)!N1D>4zB(E zLB-3^l7BJ9%C{!N&tc7XIpkDa9HxivQwKVdfCZj$e>h2z)jt$t31)ibJlA=$csHVS zKF;&r|2<2%0eCq;W_WM!Yu;k4d4}?^gpEeO^UeH8{r*hXlct&vQ!Mze=>geC#X(tg zmFXtRxY{*o2D4gz2OuqxYO)DJNN;vNg%=4B#eW&Ybwli!_blj(%Em7XMJ;RU7`FdF4nO>+~E%lzPIZB3$=J@ZHE&j zihs{}HJ8ud zvHNhiV{)+lO!s3Cd)V+3BBm6S!MG%xLx0w@?rj*OxC+0ieZ7SY9tZ722wm*H3~t9) zm$C|;aeJ=LdOhCp1{K^(gyYjU6HfQ=`xtx3Egq-+oW!WxaMJR);nHr)fB#DA1Ln}~ zmQioO<)=GySgD+d!4J-+o(!)c|6RJ;6{3f8qCNEW%pm-6tas1M-=(OT@O}7kWPiVZ zgWGd^s>ZK$OVdD!w~eokPbCp^YJ)iX-3dGuw%$SDx-8?@`xLuZ_)_aC5uOgv@4M(s z28i2)$*Xu}(gC&pe0OLxq}{84WBX?mk4rq+P2AVMan_X6WjV)QyE9c(gU>KL%f}n8 z2u&Oe5ix+1qLcjlBm}?M!H9rq>L!-s_b(U9Qklv+(5WQG^Sb1%T0u@J z27QI7<=}ztci(8veC0B?(5xi_wD6Yiz~2+vbTzko%71Q&iPUJ6$HfOj|7dp{lFJbS znbWa7I^@D&;{hoqg=EFllzXI74rBos`f&(B7D|~;eY|UeyU(+b*OVlDjFrr$Ya;GX zY;c7$_#ws;@Gm0##H@Ix*nICt5cksS*^|jp^gd^I0n2(gE+*dN&~V6g;YRx}KfBM2 zf>X$}h=0qCsuW>yQ`^=qqK8A?2NzM^vtGl3eq80D19aJ+u2u<;%}r`(z4`6GKNi*T z-TD3MiTay=E4DG0K(vVtK& z&n+|XlmCd)VJ2|)bjOlVQ7cc{knNaNy6<0oz<&(gjv$__%sX>F`4jH=d-CC*Rxv#2 z{>P3xsLlISpw*8}`9V}(P8(A;mvPC=eNdY{Ns+~;;v2AAkb`B&IqrVNmwRqYjA~RC zqE&Z6W@YrMBQ4!!S_0j-JA87nz;es(r2gJWg6hZ2i~h}rbDKUa^`qzcbJWF`HXycTECJ~yZv73IJ=$S98%|JeJkcce*<1gGG)vKvb_!;UXD-pz4kBPF)`b z0qzuH>$xZm#XF>4hX$AU-Qzf06_U3UVj?e`|0^l@y5Ie_IKNMVEdxt@*uCf?zWIob z-}A#!&a>;CfTQ^icfuj}Xr%R2#Ci$v&b@y*ae@sBgsO0dQBquozdH2#;VM5;V1L0X zvkZc+*VsNxk8FwEa=)P{`)B#f>GEMOEno%!5yl#nuJ67?EBPtCUmQ^dvGUi_tV^IC zZRIKrJwalsVPW=rYwgFEx!p-(%L>dr=5OG>RB|hJ9uMMs{}ZZ2Cn0lulai3i$C3Pm ze^T509J+vPiDwfdG$9z5o@e?iUVowbeY?fnPdEfRBk?6J8~K3qgrOo(d@3(((tGC) z$iAUENGWxVWb@pi>a(Yw-|Py*P+bj8zjO5EATPCM9CO~)!R?VBr>Op z`zY=coX7401*n1-)#oh9T1v)l@O_mO;VOqEP38VF{h6on(Z0H{W#)(TZugGY2~4Wr zdUMC(`L-8Cv3fw1h>9tr?|*RlVhHkZ!aFch+5Ww-I7i*$$vtY+7YV=Jsk_ERrJ84zP_+yoA;ppQnnN%fdbE`EQDh zh|`Jgy5-5JX(>{K&VMk-?+a)i2#OG0XRBWZG@?ktuf?r&0d4@x`sOp6)cVJxbQe?B3y$56T8M8(oMtSbsh`#;=-6ALNQuX@V?d+gD+VbnBlp3-x?!lB}f* zPnrvlC#xa@0}5&7k(X)`M*<~x(cPIj^|_CZ?}Q>QZc|UG6x1~?X!fDVBsJR?nE>OZ z^ne6ZkxLKBet*)EzUo^iK!}_yBd>#ei)Mi`kbP$drLo93*e0$Y^e&5ywOq9aOTd3OvoM_*!F(~yZ_W2sh?rYF1d)he*HMs`=|!E4lIS=p?|+9^L7%63ACo`47kva6q=Lcn zGQFc(I=*M);MJ8MdS8&RL$kOT7U*X84-u`}%3ZM}%t-@UiHGrByv1@|nacG|ZQ|{F z(Xeh^3I2VMP4i&2Jrst~9^3Bj6X9%6U5&>evT{G)_(2RDj#O1Rw-LF9eOsO@;b6Mf zAJo&hoqrRXX4*S%wmYqOnD!GfNYp=rw_UKqMNNh^>Fx5Ekb)Gsu%&8phbNE`_D38j4e9WH8 zoYU`0Q3Wyh6uME|Dqai+370-`ZnK~*mk|N;`+p4{D>c$LfV5Tg!h|Ef{^PrdQKEQ} zX;XW-vtEr_7<-+ z=6|23N_qSBLh9B!R13;h`YdFh$& zAJXhWq5U*|cKUs#km%`!7NhmuY}Hfgz2~KJ0V`%s=J|Soay4tXrzJGf9eN#2$Ls%< zcfy@d_QB#LyQygM7dw5VAMKf2zuw4gU4LAu?Dr3oGl5lcT7cMx;VSePK6KIekg<+A z8{o72Xul$+oy39g*4l1XvqCU`y?58ubePAm^^e=XE&wgZ_Ms{7(|M zt!f!ggh=7{bDKR#G1y2O3VT56g$-_xia8){MX_9Ly zI2iW`JhAjm-GA^dSMNg+TYqX53&1w`(DsYZoeXx$Z&S9AxO~Dd1XLNAvunc>vx%%{ zf6k`xz3L^T?nSv^>IY3ARN}|t=55=kSv&nBLMSkz=PJeE6Fj9uPua1xKy?+CJ9lpMG(=@dZ z6m%KV&I1~H={~&LuGAJebT-njw!I1W{?Mb<`mUvwfKQ)3q^No)@jAW*<&2Z#Qkr0a zq1t0ljY5I0xL&1OrPIt#$bH#P!eD!14vy5P(mn%Th#Hk+vW32b0ZLEJF^4`lqO=k$ z;ZAapZ%v}xHUSHF3x6oyVhR{7G_kw{NJoK0)jSL@|oydope%xW^N19 zrPwrCnm%%yuYX=iw(<9ck+^c#?@o#|SagZaKX<*951i+e?{9xHK40z8wfm~Rm07#v zeti{Y78I^&dqgzP*PMnrh0pZ2vhoQ_;VYeOd9kg@%B071Lu+}Ezt_$nI?elK1rkM0 z(m^3nMe6Q}sSLPS+e^!nea8nHR!Rfhut!}~HtI1?-+%nt8fGOyeIMoLCVJTB)q?Vg zINOi6x=D)Z4c5CG`0si&`~9H>CYZr!*9H=O;6Fb*Uw+^E?EAVyigkvkGERqZBJ3hn{Valr7K`QUOzl(I`sj|lHJv9Az$Tu zBHfdKi+`f(bDu%A$u`lad{!ABHW0*zo|*5Gb_({PkW23YW^t=#c@%^8kVTcQH^XGO zedCjRn&~j!v-{OSZrQ%T_=~K^97`%#!oT!1;%x~UTs1`nVoo)^B z=9k}g5Eo#PlVAJl)A|&X_eeWxeSJ#{+@&|z5`O{9CXh_Po}{FkB^jR?)X`Pa%*GN-R4rXjq}B+VK}TNe@*A=r(fY_uFnW1E@qRC< z8#XT69sNWqZbP>QG5;+#s#mbK;M5zHP!aZ%CfHal&&ji{i+NW!Qa^qkflKqB{KOFC z41a6%*%g(&++vFNDEeo=fjFM3%O`Y1SW}Y(D71L@wHgEjWnLK>v0KOuk%+IyGt=iMX60MmfN7jnS zL>bSWbbR)SUwzGj3`{+z-fuy{oH$&|HxA6t9lFl9<9w3TS#!Z-*T_%zev-3<1%Jb; zBp|Y7#Fu$o(;8aA%aJ3PFX_myOHO=2Ctii%8N8nFba1_$R2O}!BNfwS9{D+>-%!#k z-P;k>D|m;f^t~Dn<*p_2c4e5*Fon0LAS2HwkKRVZJ`KSMKlAVH>pJ>g^Q1+UCu67l zWj~`AC=CflG%Gz^uf_e5ATLpzsDBO|h9{|V)CG&>_ueFrVu8Kp9^Wi2q-MGMupcXF zs%!uZwDiKO`aW}5c|3V2D+^ss7y(YOlbX&$kf7%q`%l&bPS>=_mw#XpioTv>`Of+s z9p|sTx&J2i2}S;DrXx#>LX8f1{!-)y6Zsaj)IWDZxX&M+{^?|>uQvmEgn#D@nmR=Q zK?nNG=D9B3M3;oz`P?0zWVd7b#EjFvM*ph}=H+%&01V`gmpSxk?8&rEUzgH?z{@Dt z4r^%c#}rvhl3x#b;$jdR*`b47nR0_E^^Mx@pg!W}`G3N4@}>cCbIQR3 z`n*j0yMO+!FlKY{S<`tZ0UvO-BQn(T9FV1eOp9X_2Za5QKvMfuLRLcR1md}&AMQ4? zHk4HXz4P|onC-Tl%14Z)9IzzT*q^uPJ9}s@&<)LNhy=;b$05z3RALs z$Nm2b&v@T<#_6aeGJk!%9NxX3Ugs>sZIhM{UZRHj`F(L_79j*qci}Pcp=L)r9aK%? zvwdWJ)py6_F~6HDGCG9*>vv%DiMZEcjV~)eTBX5V3W&FkIz{mH8pb?d9=7-SYse7E z&gDYFia)j{+ih{%tB!!SK6fvsQulzob)JL5(r@#yHCui-4u9gKt}piCv~N!|1Xg_h z-qRi!XkLov8~ngub@Q(K?QBJEdkrLFFZzMTz6ByhsdDs}c~TUJ9*6sw*nB$wfBRCcyQ*Fv$+YbQ7iWJ(DxD&UjvOj}N#Pgn|RV^`qXE}#^h zjg$8l-Ivu9Pk$>XY{Q^2KQ&T}-sRV{dq^~CKNH9<(v38lZJ-it3$_)>f$BtY%aK55 zW4H#>RW1+akq>Xv>kFyce?Zot3;S zHn4J>QyOBbY3Dp0mywP;t7f7ek8S&u*&b+Mv%oxgWk)@FuWjo5ATP}ahg5VyOLKR9 zKI=1qU$wf$9h2O-*7@i-Z{w;uZ}B8qCclJ!wsU$v@RF=iaFl<(Y*%96brS)gf|%6( zYr4s))65>fc})}RhRSw z)8O;tQj*_VYELP%2n8UMhu^W?WtJ#g3NS0&AXByFXe{qDXRkaH#6k~<%J3PcZ?@9I$YD z@<#9c0?eea(_)_2!%7+SFa@V4oZhHmKdC$29X{WF{(A} z{6Tc~Qh?SjG_!9oV*q<#?tY1eW}h2u=cO7_GQ)uMmYuiF_Ab`s@)F(evH8l*5sorC z*t7_Hl{)U)8phep*sN^)^6o!!mI=&Zvtdc1#?`Fub!4~BIH;UO&-X?Zc~9J3jDPXm z->Qdr*zQ;A#$CJb=%#vE zks0ik7#Q&Utf{;SvX~YeY=yxbIe#rk-`hvUE9OrD71DB%!se3YP4#Jm=E^)r0V23z zRs759+-?up!BU{1zfbqkK7n>qUf}n?{NlV+?0CxuDSxp}OqfpMO+MHRigbSff`5}#2)s$^ zo;ly9eSFX$Zv7*J>v(|EN77vGdtT$u&OpfTEr1l+Y{dK9-(MtkR55=+yA|H)gE}wA zo$gxbwfxEj_IT~xS&~_IXfcws3$n2$>$>7tqg*A z@FYc?72FTVfG|fTDVhb`dVj?pX7|3k@#!mALPr;vl=L!le-@wJlZrLxRzDcOP63v( zj2-DckJld)Lh?rJk8B#{>tLHa?9^^XM9Uz&R-aGoT%33Iq+roLU)?30%bIFBc!Xl zAYxROul1*hb4(F|B`hD+;MW`%n=|b#aguvIe?C9P(0CviBh2TpQFC|}cVbx#*Z8X2 zfsKh}E-%(2Hu_fyPa#9J%4v>gN$VT!^0J87a>v>ebE+5~XP!~CIAOEk17OSX)PBAS zFQ%V!JYR0(j>B%%E`PtqYKQ6xcLTh@E@SwKgrBBWJi@@P@!)^x{I-pPAkx9mE-8D7 z+iZ^$6YA}C=5G~<flROLo0$zL|QJlCU!KikZ@oPLRd5W7Q3oRP)P0ufq7;eATAy=Snf8Y5vlRstg z^_Dn%KOuu?*?#(q&xFDoic9;$=Js(Z_O1SjMXuLF_JZFB*n=MS#dw}AS6DMkAZ77W z^esKGLcX!;*0XRDT_)R8te|;08~1%IcYj0CtOI`RTMkGx#LC|TElHx_fiLLj8V-JZ z-$mcWkvoe$N%m={eqeEDX{lPK&=-B}^x>WLLRvcp189Z@vh@`8Fu|21kYe9mR+|1u zFru^4u`&zrAfc23<>&RF>7l$iODog&{sI3XCe7yT=%6GYA!T0b^1E5$q{Wdr%zrLp zj4mFYV{aUcw{QhE+vrFcm^0baB{yPG@F9)YUf%C@!C>J(y46>CV>ykwL5s=V<8+AQ z?jR%`JU4}s3#$AMA$!@8le6FDA8=B5!n+UZChv>9`iVH_o#j&Dnubn%=Syk>p``+< zfZ02FriuP#w`+c^`9-qEX(@y94%yKBm1V zoT1mK%3%>ilrulCIn_wmja@PTo?K(5CG5Cc`AV>6a!0Ti0&=D=bIY(*Bbsb2l;~2u zz7!f%d)c#PiG!$WNbB2&vwz)y^5?t~N#T;qfK?ynUf7q(&JpAL`@xa;Kx@e`T4=-e z#|ga98GonxzH4*Fzu*ntrSf*?jIbUJ_T^$zuTQwG)DwMuPxXiTz7NOi{B9HH+AJ$5 zIhA#dm}|4611(z)TY^E={ecmAroJ~_uF`kT4UZOmL~8aAi*jRvQ-7}>UhH!7sIZMc z!)b)7;2~q%2D8q?`Mjx00!oXV?;kE!@iFa9PrxoOp`R1d7r{GA=LTw@&uNNFi@d@D zcp--p^ag(__VPXN!=pcK7)ZY&qySB)*{;Qh^WKMpFNpI~1qwZt?*l9O!tKjc6M@Sy zqdPopwx=v=!Vk~;{(s=HApg#s`Gk121Sv|o;kWX67lM+9x@w;TI`#%z0lJ_91&kLu zH51emj~7x$^-ruR;NvrOBjFq*G0B$~!peVrhqrH2?CDm%&;I8d;ahxQCY|!dV)+^B zyT{Huf6xkA9s=JN;{CU*R zi1?mj;uvs*n`_c!@2<4T+22g~vPtB-u1&wDeEP@ovZ>Dlj7oj?tS}iD*vkM>wJAG` zGreRC(Y0Y%g@40m5723G6>8pZ0Qf8P@P%OwpW{0ZHESIFiz?H$uJNKegRsR6Zgva(%+xjW zr2H$xFhc~Q;|xtIZrV7i$I=_BAo@n-QrBK~u0tkMqJJ-J>o;|tA)WE)MRd+zYxBad z#O+KE@y|tC4%Jn%XWqW@iL#Wk8=!L%~3&5>sC6m zo<6ouT7THdb^CEopl&4tK`GR8GX8x#a3qkB)v14%HhxMYGnm_zg7~h5dma}UkQV*T zu=;CZU*4Y?rY*KREmHSBcLYP{(eX1MT0#cxgDR~py86)3Wq!9q$3-z+`LOdfuL^7^ ztPj9S;TJV5@^T`h*GIMnbuIH9BinEbN?-#OUVpOs=ILZ3Lra1c%zeYeA^Rg~`7wB0 zsvN`)!vcUwPyL5GOfqnZXsF;$+8jTwHq_`YQb$-o;3~$YyGE7GgV0l`lW%+r{pT6^ z_w_AI=74AV^DK}6&GVk?So`^~B29neYIFI}DKo*)g!KJ%>h^h+Hc zr>?Om)4!Mq((ko3z`Ay{?n-kreyHI;4}S(j{Dkks_y^zKH%)2i{fT5Y;rHF35B@uH z;L49a7=rI-rXoJUd(XzK5xhtlW%cv7oc6*UnVx5Z3!g<=v5+}Ka+4wl>^!-Bo@$Xvq5+HxIiN@oOC5dmVH}=yCKg!wx)WX`2X|j@+0UWZo`^}U4I6u>N`|b?( z1T&q9D0|yNb;rKVo;#~nA46^yL<{M1QfO>@DrM4d?veTy_ll_;%L|bTafV{TZm;Ib#>+ zRG&%JEG8?{G^IYtkuM?@{v4~=z`Z5^1c90F3mpoN9j`yztv`r-#xa-~C>0t-yMyEd z8QyYyeDsA_DFFSAKuXFUlxE`e{XzqyYVO@_*y~3P;?(^a4gA%=f(h&A2!DiT0J@ig z8c!uxfgAWUio7WJ*6wtSZo-@NR%sgtq<6i-gT()W-*=Q_C=+`)wgTxjvlK|f4vtgD z{S7ZxK6xLo**FJ(Zm|!HISjsLTFOtaZZcou((DVc79V*{n-)UP9qYoYeWYsbHMzoN${@eMYQ3(s&?#9qp z_Jd@M$28cebZ9fwQlGqzj4{}{m{R;GY(P*$>A_XlVU?-)(?S?bHh-AyS5pUEK0H3m z8$|yFAjX%E8{M)pYDno50po5Qta;p9-}zorvaJDRCmue|UaVwa-(Reor*%(buRpfn zS9c|+6OU&SNdCfq;Rx$(tW{rE*?jCAp#AhB1YtmbJ7MmGM)xeSkN~}NdeqN>fxQ3j zwb2>9iM`M|cOH9GM}I#bbCMhf*>Dge8{;?BR{0@s0r5DCv@Gt;v$201D?W2&8u*+G zL(60)X&tc_V6xV_5AHae^}r=~Y(Cntv2T4V)+bR%Hlw^yv;Ns8bRgBUw%3F7;f=hN zsp(Az&}(~#pTH?3jE=b5dtOGLKDd$i9KXlKD=YQ)mtT1O>VM4~cqY5CXX7sJZ(s8o zSF(2~;IFu9CtzV8@YtcY48Mf$(bg$Og3_p-zPEYoQ5IZ(M}_Vn*5bsM-1dms`*c@- zPq*oVsK}#H3;hvW(h1T+XN&8Z-j2+RIDMmw#bK>JMK_6jb}`W&HDC#LlnZwxjDj~_ z&Oh(qAsd*ApntX+CgjvnA1mwa^Y@`+fORC-=b46ni5J$_(>>0{HA}kF(_DT~J@jKb z40bIa^>~A9j)xUpAuh*Tk?OCD^0*1Ct6!AFxl3jdxvzl-D0F!P!@du+hxanm{T-e*sD~dq#sB$ z92O^hM}IMB0+hoSbq~%c#FJE;%)iY|IaIbcQg^DlTL(4#9zne%(64z=`_NOgtlnvd z(|sZ^zK5e#xP&Mt*2YO1)X%N~w0I9R9URt70b!bdY`>?c0i~gGHtIhXgl3s;s$cjC zj2j;~oHHfv43y{$fdU=z#S`pw#N}@{9K-Q$8i#-l_49d{go1S4(jZ z8h>*0W4@P+$6R)P;8oFNuXTTMWNU?K+yK{#)n5pOJ-OxbQF0jz>wr#<4121PqI}wY z6mnmCKS;~9KJ9|IdRm{igqH=5c`+#=N_?76Z1xLu_avWkjaq$4u(t9Xe!c?br^1`> zw~JN|VFrIk_2l1NGkgIJnr0CLvv085{ePf`(WzYQKJ6O3it4$UF9SJ_v@6u%03(+) zE$Sq8zl~iX6R8zGs;t|HKG5TSY9n7ih+0TuZd$9`S8|8qYjIx=y?}OiySIyl^J!S& zWOwCSLtfma?9({GtrVYh7DrLb@O*^$VT&M*oot=A)0*sZ1;4)gv6)0OzK1b%EPt=4 z$xp-~-2u@~aKZ}KV^qOpuh#9xaV(;s=pwCU1edbp_4=ALO|oZkT#(G& zA^YtD>rFRu6XMok$oU+=X+vrXiahLC*=EpNs%!2q0`(KD>kI&Fl^pMr$s94xbx>4u znCG3jeUH0}IA=S{-RVF1IqqQRh<_uQvQdvfo~#eGTj}Rn0CW$cfEvx=AZYVI2$myb zy~dI4i0ypd0)+dXnn>8UBx*=I@)*l&+rviD{)#fD zz3b$I>9F_xSumxKrQmmC>oKQ0HQE%#r`e?E4f?RFAptp#b9s%GkNd{e!Zs~ZxicNVRBhPtHpXfs3a0!V5 zwvhQ@KIS#_@a6vI;frIRQh!#d@*tn9J^@6jaa1V47#U8taLw|}evfX_qXudX2DT?_35Iz5UZuau)n`W_Ki zm+SV;j*?N-zEKHvKj}7 zkq{*fG-RN^5uGB)!|#hyRN>e>cm&#Ay{oHuROL&d+owE*#nK)O5RkV7T7QZ&B|Zun zKhu%A)kLK{^|YZYJ=~;T~yQE z+mXSOOztgI00HiF3C7%1?)Yx*svWtx#>LNk5JH94?o;)Bh63jj8R5|wavcEcWy{xo ze>%A%ii08QJ@f0@syWS-QXJi2YI!h0Vfe=L!rVcf?^S@S_l~C;d%jr z)_PuSIqLvP@|J*-Xz^<4vi& zWf6DkDWk8P!ko_@5a@RBs1N>0r+;4TA=@?}Jb#aMzjtX>CcM$6kFe!N596Kv+q@Fj zP-UXW?~4eXcr_?Usjz<;)Xeo*tUgJtmJD6GUAVXB))PY6WRK{^?&qgE?YABEJn>Ik z|JX_PJQA*UbR^+xQkLav2gNTNJP^pA|9t{ENT$I}gcg6#BF#Gffj!8bp>~4x`Q)=E zKY!_o6C>IHq*hSKBX~#RM~)`PTlY~&dHbAx@O-LF$8n>n5xukIN7RM&CZQKJ43b@I zm*J$|Hrrql)0=uH&g5^OQOdlJDR@>avOAZTXC zf*F4-l>ig4JD2dz#l<@wZwO&1dr6vHPJ-kM}iTgX5ST+{2!eD07BlK(ee6 zZc(B1^i$fLBU~)F4QuZ;+~yOFS#}@{?>(;cDS7jAVz8{spjFx+XM5wMY4HVcB+Y+< z!V89kcbXr>0%A4PCorj<_Ks2WQl!E&aAwQt(7T77$V%G9f`fI_-{ncD@CbAsuesPA z6rF(wFnC9*eZk_}ZL+2a-lKG^MCo+wEwt%(5+G%EZ7J>beU(Fk_@hIfgBE~rCg;Ke z&8}EiG_!WZ`UNptK-71yczcO6wkv;sW0;-EII52OMYRU|HNG5fDtR0~OSzj%a>__x zk!YeFsRk7JQ)qEr@_Hk8RGj6c5e0eQ9hdU-t$FhxYn`xwIU|x!Smc3K+euRZ_W2%4 z^eF2ME3gU#MZ=AU`t!P=ogpUrZUFDPm-sb<)IHs{Taf`d<=G;!%PyN{T?c>IocAw| zHrMQ?nEXZUp5hsRa7BE4^#S~c4Jr?#$@EzZ299#!ABCbucaR}{C3h!E2jrtH#IygZaXxux7?%3ZKeFl?93;0~sh~sRM2baY!8p^)bUIfBdln%OD6N<_-MDqfKeta{nuqD`G{Y9fCfnaOM(b{&gCXcr&iyCABxu5&jek{p_MN|1yDr9K*MZ|tKvfsrU%hOYgS|Hl}cd2)hWV%G~ zMbgKq{S%T_HL33{0CPdLNrNn`=QleJY*g0Vx`>0E2|9V_b@#FNxRbe}2 z2ReUm`*}VMfCeG7F)FatJv@HLMSWWq@_^M%bvuB`gO0@dHNU<1yWj_F9BUGwI8O2I z{Oy#v6Eyrc+elKI3$~-98{Bsd^SKi#acdZWguVVc#Jd7lw*9^=4=ak^Kv7rtB=3%H zG`EIu1TO?T`imr9GH#lk7cW6VN93rH)2V;*;S>nH%%5ait`YS;a%C^^H@>?)gSp~H zwmqAtGx#?QZWrxz4s&;;JnQ*N!4dy()RQ$|HO=vx&ycL$QY(Eg&jW`Y6>+nn3`r0Z zimwMNx?XFty7I^kW|$**ZdVz1eD}X#S9W$fk0T`>&hW=t=o4Dfc(@_~%fMeA;_rW| zTQhg&tkXKE0t#gHI$Og_ZAWVZnj(a$8;|}hfK~Uk0W!j{3HM7D@zmZKbU5U^P1Cs8 z-sC7Jr?N2*`e``ZyZa-OYE@!pXEw7pAPF)H{&fAs`B9~Jn@6PSLV)x1qxqS1-a_$( z(EX?NTPk0&<8m8pg)*7(t5bj|u3LW!{@06zpryN6Ts=y5cYkPemDN|)tdta*!ID}7 zRT$Lz^xkvNA%7T+6ocG;Yo@Y+pCo-{r{iHVyp$5zOHe=lgfg$$hfpxac8_Iy8=RtD z6AirpVZVNV1$U>rQb5Ua%}AF5GYG z{6Q|*pZQ8gukQ(TLKp{G>}uaFk}S~aS45ra=Y1z${PrJ0poMup_j?JU8Jc_$+;0lH z={O%I>kWB81e>7Tbv3W+q|ul9CnJL0Esq)fP8I5YuW8%)J8M#vscKf%?k5K_=W%KE z$H`j=$4}Hsz(d+!7xj+F`CEUDS4ZUbvCkbYvn66We?f6xe@nghR>IuGG8?~7HoQpN zBb#pT^chC~AEMq%>P3cW=KlPW+2nT1BW<9# z-FypiWeMPDg2W+A^A>-bLk;X}EpM)~%3Z1T_C{OSO?*4`--lrBBOlue?zTJQXd#>~ zt$oR@z0LFYgsg^xbT2=9;zEJO|QXcn7Wlmt|=slOr1)>En=sZ#$Pc=>^@92u zVN(G<+F#gx^KeBC2Tbc)jyf{y#5nKtr+R*#D%fsO2)mk-*C~1DN6lU~Zn5jq&J!_3 z{G{m(!g^MV`1f<;53h!K1wX@d5BKMp`e>);eu|}lKY=PFP4QiGRX$Kw{u%@0kh$*2 z!uI!4#hWFpsP=!dnZwmJ)EV7Te=HH_1zZkV{9{d|!>&31)@@ohJtVH^UkGQeIwk7h zxo`Zv&-FPNHvPH-?3zm!8kXkW$xINqC#5kzTvz(!mGoWVuaEe?HS8`V_x;>XCUN zw{fsXNrRtuCLD;maBCTUoa449j`3dg2wEQBH`4Bj}aq~PXUa$iwv71pC!*oy*oQF-=VS}Z%THNt$v&^38nf7CV z;x$2n`!#={gqmbT`_3dO?@&WIr^ZW7M&u3EBaIzpzME3)_S3OsT6le(uioFeVczkXXn646)e4`s!T$IAxdYhx*P#{3Q;^JvOPQ}E%j zO2W9Hb0QT-^a7Be#}NU6FKCj@jZhAR7S+QtC7B?HH=@O6!(6vA1+aQ$ECY|!dAMgE zifTU7<8#0O;F!Ud6#xu+%XBKaWg_=gdN9Z_rzv@ETKh!CmsHHPQ5N)?Zz0 zuloJ|Syga{ToC{A>DOl6h`J-VrNUvUvR-TFSUkr zi6<{QX2~&PqOce+bS%4&IpNYrhX<{|fa3Of^4qJx*#BYNOf4U8(^Dv-mMjI6l-^F> zeEh-QWyEd}-|xN|EmjPIOo&4mv?hYA;~-6FmW_}3U>~-TW&Pmz-nmajJYs)z(H+uX zXW#qUjt)Bt-qp~Jyr!uWi7mq z!vAjxF*O%3jcB^5 zJ}R2x{Y-IOf==(rF&ND0BG2d7$BQC!Vx$B$Z5&Fk9mK8{SjY6`{LCy zfSrj#yF(|ZJ@Sx`OEnZfHRNTnAwR z)|VE*;pg?i^jv@T9k@UYw$*+qac&z)n1)*}O9{*&Yh*Gr}xCW^L0kZz$=lA3Qn^Ziu=GkISY70NDksRfP(|fK1%CQ*T%1aY@jq zba_OrQfIDYYhk5tHzc&-HeN;$sH$%_ooH6%kbm#xnuDvOd`{`RzsDcvxq^HqKZ$R1 zvCm3xDo-%xuGD|N)qHDptGE4l=o|Y??;`fUe!Hg$V~E<$W#{nu-uQ`@WtXkq5pE6~ zaV8(exZv%5^WJruf`(g8PhDi}eb|%YogT~mEjN#~$&Iu;l24*`mmthvoC!y=$0|(& zxY%znHFc4Pj1&GW(R15c7V&X<3KzEyi!-ouW!UWreoBvCmE ztGFYwi5Z+Ic6}-2VS}pE%6z&dnhLH}wq;N3UFeKgy1-?O1eyPHvjL4tky z%u{7eV)=hG;vg{Y8->f;Neb6H+egxumZkE9TaHuY!~xgs2O;3PAK6Y$-R>oxm_M8( zUQ7uRqz#@OIxuw$-feZr_s_P7Y#8ZuGOg1w$Sp5^^Ilq?t6q6~(G{tgMLsLKzmNvw zivo*tul1wSH0{?(Iv<4}IKmJ8C8dBz%s3Z&XFh*j?^oxZC%c^wxQmlou(6+V_NU$i zbiUD5boWU@tN4XzK@8-@ProZ}Thu^*Pss7IP$fpcOEodz0)3II$+!`BXHlsQpk=Yl zNZ?11uIB4;x<5_b;H6u7`MUYjL(X?4w9-o2H!fJOJ2DH=5sQusy!P~o0a53yX8y}e zuCRY59Xz?zGv2t#MnIM_@DAZG0!WS?crNEM6Q;i3%DIog!pC!2r# z{dJUfOrCwypyBvcFN(qevJ2cVWnNn?ln~EIB<~Sa$^^+s+DXfbPIGY_aLMF@hm)Ib zfs^EI?tOF=i)q5H`djREN^m5K?cPvCbiG)4JiSR6EiZFYgU0ZO zUb(s z9l@c`Fk^xtY`^#}T1F2NwnavyU?K1EPWA`kdSIi!#q&~ALpf!J5bsP2Kyq@j?}C(d zBqsy4CO*iAsFl?@hR1)ltS2ZN?@nHIozr$kw!2byNUpndo6ddoM3KEgBIWuM3Tpu5 z49faEy5#G+&#r1+fGgkcp5lK(?VPnG?em$9x#67@q<&D#sck2wT!R$pifg_k7j9HF z`CZrLB;CK?k8qE7iJiqs)_f15XjA+K`3$ z`)1g`wP%52ZK2*G5DOa1IVI1ijec2oHOhwby7QYmpx-~r@c)<HUIwD zP4s)e*kAS%SQJ3(JE&S0-IBDBRjqy1Gz4ecm*Ll_SNdkLrF|W>x-zBHmG>u$^aFJ@ zLh+GTc1g>PGG0t=^Kw(igJ{Q@tVH>K&{^PU6=zuur%P|vv)0qnDZD>#X zPR)C@tRR=5nT*T>x@Df1wt27dE2&R=uu0>guE9zdxQBmBiaW_C_GYTTmxZn)t=<{Gk8?k7S|P5>336KU zioNEU!yP^C!70>doIuQ252O;1+#~jQBl#%B^(}5?o`eVDonT{CM+s;5qa3w?HG|C zznf+5ca-++oM#q$hppn;`vz#D++h|7m1hY)JcpSUopSf5(gpnjP;oGVHF}1Jre#07 zE4TM{q_#W#h@+mn>N&H)PtA9(Os5K-`b-qr0E5O(+lMolY+`AVt;k&8IaL)eldN=)9!+u=h==g zGl(q$O7yvfILfk_&VK0s{1SUvboNz+>SmWn_iJjf`W=v9oM5C>q%@!Bvv0xPO;VF9 z>T=m!goAd}4QF$ZiF4DeTzVo8vTct$;6Q(hoW=K&$@et(9e}2$W4+_nyBe^J`@9|j zawvB~qnj59r&|P8S0xIwME{{_?-@fYgun`;Sj^bQ%JL;1>f z6q5O5sh`La2B3W$wzQlzGRw~IEi$$Nt|-8A0-cS>o-IgO2n&Ybc_F9KX&@uN9%p}? z0;{ULEn>QJWOPjO_cLH2opDosO3C06$}-Jw*rK)kDu?u3e%;F?(l5#jIc28MH2f|I z+5!B-0H`l}p>8|d4#F9)g=0P$*95Z`0hq{db<^!+=r#c0mK<^3XN#)C>8NSwFH1#J z{hoyr_D3I0l!|9tK6ftY=w9Z)3t)d~v@X_j|FW>}cRXQzo@R{H)sViUpuM(N@(SxB zwSw3WIPe2N>+js&HJv+=OMTHjycohcIbUg(qpaP{YndIKOKi`BhbIOiC~`QZKQw`D zzo;}{S!29dBXXG+uhmgl*R({W$&yk9oaxW0xZZZFd)zkxw|Gikpd-Efrk#I#*@s6Q@GctWT^qFT+qK<2fDicXt@oWMg8Np0*2@fMsh>d9{Pj#k>!^YkZ$Ll%`3Vwk zvwcG1bu0Dm`I(Rm=mmnnu?Z4~{Y8%e3;Rdgn=L8Mt?PQX;_#6jID)>j!w2e-K!79! zh%*o%f#^E{SMO&?kd^h%IoE$)-eJ#J|Ej7?l0?!N-}suhY23F^Lm$y9F5L;6?QUbf znB8=IMv`*vY27xPJ&?>(MQ=^wO0ycaqdF`ef3qgb7|KN_x%u1MJGrKiKHbL2Dm$C= zYjtreb;DQ%aJRY|Cj{xUMgKgKK{{35#m@avw?+v*8)>lzNVBQEulIi*@(;7?wFe@u zUmKfsYQ}qarIq51Jk}=ic&0RqQOAE@tipI>4>V4lTziNcd+-ClAtF z?-!Jx*`$^z3gipJo>G4(oZ}#ks}DTpjrRI5#XAIe=k)RD4sq&|d+sJEk9K&x9s)mo zu4>m>#1NZ2yJ-yHxDBXN?Vd2aKDGCoRGD)EuOj8WEXVS)@(SPWbNxsgxLvD0l9_(2 z_p9FF?7Pku&V$k!?HKhf!zW@*m2Luc#+2D`ylW3#Y*Bzk*qGz%TjyqjU=5LSAquQ$5K+F<>( zXL=ZQ*KyYk%8h?xabBA1ozAl~N9X4qZUed8(2Idtd+o}dlW*g5idOH1wEa4s&nk_& zhRnvHK3Q}zzj<9y=Ej+{)vQ)4i>P;+D%$oDN}bp9Ft0AEDE~F0gdy++i`KnMui7k? zr3>(-F#yiSTy_H^!j>@En3ri~dx_eepsGe=J6Vr5k0^gaL+DT~?aG1Hd3C$aa|;25 zif#IA$voO&&GlzyvN)XQv#B5Bo?07asX&H>Vhb|J^^_zi*T)DQr)PtbCc{~*5I6HdP%M$;` zfgPD{p|gKe61?iqV`JR!H^#+6aV@=?lZ$l{6Ud`%;gEJ#F^Pg85zn8KMoD|hTNUT8 z@^dhcFkzW?khX4)ZEU(;MP?br&aF}C21+HeicQqMlwz?|RczHk8GEtYKh(9WzRR!a z8g;d9l{Ip9*W1^+xt$-VpXFD7wZ2d4Ub`uQdpv(F>vbSKxMePk<++#`Uc1p+Xw4E{ z2-e6yMS#%nlhvUAtI|XBBdbpgy`@$7ZX32fA z9EN}CuDjiJ)@5}ZUKY%6nk&2)%C~#je729ZVoM9gJqyNj(T1$?UT#LM`$QdRXp3I$ zbm7=@8)z;)B43?8K>UUsaD?&HJI2q zdF?l?Wvpx!{Kxw1!lKhXHx|1_o#5B&m7jmu#dTZ7Veha_Bqa4VTjZ^06>t{4V!>E9 zO$9mxOJrzk_ZIZX&8g`?L2ew{!^Ne8LD%UyN-7S3p)eY~ME9UmeJ8hZyL+fz5ek0| z-jY;xY;}UpM61><>-Fv3TAORHh0q3>#&fg3cCC1oItaMcO$-Io2vP#$J>Q+Tck_Qj z{e`c*VYxw6#;`NeCZ%dU84p^ecYUGit zHWro=!ubQaIo!vEdr^BNRb9q9Z3BP58^DUv97aX6&_1(Cu;Fh$U;VVxrL}ZH37Vl@ zv6x=1Zv0yAc?7%QSkm>vsHdaM-wq4H=9)x6IS%4rZfLh66H#5g50(?`<~Bf&W#&h5B&mY#EcdDI5cs{#I2w~n1M zkZUnt2QM-*UNZaAt0LlIMI0b*lLhXLoq$^M% zSY7O1{-fa?X69?zMP+;H&EF`E^|sg3;>8ld4)EzaKwOw?={;|ZseLj=l6u-4%f<0w zmotA!7{GT9qvv5&2+ewWIDp4>%VAP3H>Xo|YF5K{vEr#9RM00< z?bZ|8hc5#ASi)^B<^jxLpOge#m zl$4uSLPJ#S2T5{pt#x~5zuclX=1VFZk zplvm+K40r3nzqjh5X1Q>?wW(AKf62T#d4ax=e&R0maltQk>o$8OKbVUG+#z? zv|+@!Rn)M>=(x~_4yd44@nG`re$_X+-n<>8>xLz;O(DOsl4n=3QfUuZns&bofiOUcN8cW^226J+90BAZML@AME`&FFbKDu$F?yVv{6BK$GxTgC?M8a zDm6x74abXR7kdzl#;d+|Tr5T{LgTjnj;_l(-01S&FHA!~gdOF4@!zXn_=H9rKh(Q2 zTHJwLIIVtt*dKS-4J|x|_w{&T7Y}1H%jFe7-Jy8x97QTSSZ=W&rM7uMAEmC zdXJJ_O^%oIb~iX7*-<|Bdlf@^T!$-@j!&`HS$8|{Ig7rtod*>;A(vhi@2#Hm^g7jc zyHl(zk+j!!UsAT5HLpVy>g_LX+KvVCS zdvdPv`4(87JS_jIv#YBsI%ZgN^-wsi6vyhHv#hehclX|G&L{23;!%`$+p3O2W95In zxL;76@vNwj4z7Co=&=aq7iqP4uFzhYq_at@RH=Zf9rcW6vsb!2=GV;|zsc5l*itS~ zd;xg}XEY%ubBpe)JdvKU>GiZY@*G(i)l7+TYF|%OdDp`--I`4RsCo^@E4Y>W7;CS> z%H`lr4cbZp+@6h=xFxf^8kzgL4T*pHX#;`NZg|+d`fXUepbrWI8RK8W3%9igEc&K0 zD{h&|A8ze)v%GlB?(IUeJbVnt?Zvb-EXu{UdOfwy=v9SU&`9-3p^hlhXx(}8R?GIx zO-D!cT!t;}(%L^C&3Lc0CQ=ZZngnT~(v^Jqg~;jPSyg7$NeD2dfOth%YXpCLGaL&N5>yrdkdurSZMn07uW0dqFJkB##P?ykNru=x}46p z*{MHUl;`uogo*tmF!oYyzbzdasFS3tsXPnyS$cX5o%;%tyGgCye^zgpXqKdYdAMv3 zKn)P3F6#-N;Ivenz8{eBwa0&*{vC>MCs&j8pw(4`gTse<+DD>|pNwV|8(oQC3x7W`@vs!DjuBzqxz1^Z=*^Nbc7}*0)-=5Av zrE~FLtLdxFU|L2(YrI_;RMO43!sy*;&gOl)f81?o=GlYoL>;=NYuA1+5JoN3TZ4Id zSL$eOW|ZxRlHL&bX(Qr4oXtC@pjB;It>KcvHgdGHd&;=4;?93zXNEz28kRK4Th02q zUm4+0sveAKv)4Iz=TXfus+uS%RhP}i1Yv(LYcr1yYiD`4Wz1`Cd!!+#NxF|X#GVz1b#RPMlzNzJ*gU4NPwY>F+K#kdxT2$+(xs5XO&i`LFa#}+k*)#oufJ3JhxqSX^rmz z!VQ`R%30|SC$D|9_1-8&j3T5tV%c}$=ga|*4OVV2>r}hu^>mpiMtq2E3q{VjP(G$D z{prqY(x5wAw^9Qqoo0+bS_Q)_P$f5|gzSVIY*c-ZmWNwbIufdWMrklQAFDWoSxK~T=+sPma z;uD1B#z`{|;31EN5%AC)C?ysVs7@b9c>B(YhP5X-Lt(bUM-9zv) zEhIs9{l=k_Y^|$?YTA4jIKAY!VWymTrO4Y9C+2X!S~eJ&9Ith~4icsC!Xhu3sMr2z zD<$#bFgPJ31@HJiS#3tjp`Bm`(;a`2Lo&p6CL=p2Z{C%nlw4f9^zeLa9^7tI_PMT0 z(xltEnXUTJa~8-^7nc5Lw~AMH@aWOho`jT1^U_I)%7^eN16X@oRVgkISb`T~Ysc!j z!FU>fKYTix$H{YC8Y4>msEi%D8cK6+F9DX7=27o3*~Qygy4#qo`1Bss%Oii9h9k9f ztwgPw+=WqW90vE*1Ig^v?=4R=6#{6lKZG7bi%>(PI2q1QgSUxoBRfs&cEN!dFqd+n%IU`;~GyLHwDv<8uLfTs@7=#jse% z#IRGA25#UxgLtYcKvR8RQw-fVjqdQ|AKw5^sL(Wi;tWfmtM~i!lIOpU=S@;r_`&Y9 zgL_J7IvuW!l4C6AGpht8WuiyMbfSy{U)s$e7L6}`MaC|Bc1ooMYSn+_8YG9e-DW)> zUw5at)WC?bblR30C7kUWzA_A_<(Cw-Z#&~Q>1=lk^fg0N$jg}7Ca>fXB8vKWpI`1> zJxuqaJRaB5);4(&w`;LgM~JMqsmZ4XX4>t0e^Rl>hgHWL4x5e6L2avVw_bu=+=%2> z(TSc*4zAwrF@pLfl^cJ>sMapFC1lX0(TYtoTqC}Zo9=9Oy_~&Et56(N5ni0PRU??6 ztJZElM)~aCv74<&qkUWlCES~;)DsVNl};ta@7kq~8aeSztCr*Q6!X_vU&d2DbZYOX z=cs#nqFcSGl!Eo4eriVz6rAd%u8VzpoX)i~6{K+7Ff(rXn-A_qef}A?lp={d$}@OF0Pa%2UOmBg$HM#T>U56U~3F}N1LtR z-BIJ!)~oKRRqGu_rJ!$a_4z0@?~W3un>&f-X`4!=1UV-FIJTO+A_tmkQAL#lJC#@o zY^(G>N2u;Ik4S&*VxcOvZja)+GM&E>Q7Sfy7>r1u#`g;iH+J4{L!A}Pl$bwf(QKe; z*Ep1I0s2-XrLJ`X$grW>zeWp1K0TY#@U@@Ci7`Z#Ve+=O(bG$C`N1pUp{39dbsS_0 zsl1VTxtLY4B{k zox0$V7`N_2xwouNwPSRH@)+vN)yh%1z(Qx0UK( zy6P{HsMPhNYtHZO$;>I&>+f=Xbi5#;P~~FkuN{9f^2lZ;)p?!EnqD4|mz(R>tE^q> z=q2oHQQDTPNVoRqxA9~AhSRVzOHh=N6yEhNjs96_E&butD%?i1c5S{LHJ)L6d(lqC zF|YF?=@hGG_hMoKfZs!1fIQjUMWO9v{MLiC_5PLuKeDpPP;x+ zyMgbDdae1w(QyX5i-M3H2pq7^3hiEiXN7hWI-U#4N4YYc%x)r4cN{09S zzGCbrlldZGx>4WVBJ>3&lS=nA!$&rlkH_QP4Uym}atF$GG+HczGkkFrYSiuHcD;XH zu|L0Eu*DzTZ5PeagBAqgGF&>(XT6*z$RiDKJ-Z@m_C6%jfqi`H)z(Os+J+|C690Fn zS;_NSxEwLsNPjARa+w&-CdWbtQ2(N$cW#}*CF4p&RbaQm!6fm6vP#XBA4+hcAwg5%IA1v_)l7CN>@xBKpGR=eS|MoVm_K346m(@>vL zf4}HFn(eKp*w_cm(VN)YJp{7Qsh5Mn(lDMksH#t6TxnEpSW%YBhryz@>@Rowq-f3N zi^>qOv%vug#;x%@IxDnaXgYs1jRflbaPt_wy85B_ZuvU6utD!MK8+q1tvz&KMv3EBafhbt)<2!pS*c$fO!FYs_nfo zgHmr$c)V6)g%RP&y-Df{J%wwlb=Zx@<)gFY)}@PHe3OrFse78l53gQw_s8N!Tlw1Q zwy>GC+sn<+O4et9P(ix!&a3Xx!U|VZ_K#Bn7|_0A5u307`+xsm*2l|!e*544*Yvnv|F8f1|8c+U{U%on@WNW3@FW^tJH|Ha$@vKlsaxxLQbGKdQ^;FO zZn&VcA9$f$+_ZoA`F@dJ~5D=ilI>>%rz6X=?4mtJ4O?>{L_z>}r@_|T zgnJ-|rT*5Qh_9pJxu1*>vD2frgUCMrxaCvC)?$ zTjBjkRu~K@ekyHRi8Ye&gJl|nCY`tN_B+TRA-z7!hf%dC7p_gLrOyl!K?f^uICE4;2^z8!wf-dLO3j^JwMQo=+U)8uGOIuTerfr?^ZQ`PPjn&Q*|q)sHF(#$%eB7jj?Rwx=hyQ4 z$9>>EwYRf1)375t^wKE%JKg%AqpS8C;|`_jq@KSkFZJhoKoe=KM_raqIF(#SRlI8~ zMTvi-x~4YTb3JK@ySvlToRTMN_2o3eGHo9pWI32fN90oJ&unT(tqV6#j(uC(SyA9P z=We7K(ggmr<4{IrUz2rrp=eRc+hGLZ9GT0N`jUU@uoh6a!O$eVhMUS2u$4B@ih>g{ zBMo!Az&zwe0N^!&4|_s?9ynW6ibWZe)J~*GK|O3KhvkSIt>>)k&E$rQ!CUn#OAtm2<=- zE-ooSbgA~>p0B147<}r{DyLCTCwvl=u#t$=wZr{aTGB9;hmzisRG&yZ)-8O9pIw|d zh*f+(*77Ty>N5L2j}KX_;$G$Z;`?00v-rdYDUTN-HjKE>8-8Z2%i+X+i~WB^6$G}? z{yvZF{j)f-=Vv;sU7z=)Dr4`B93|SCFF&C(>uBaAzJgqmD*dgjcfI8};JK}mWT;6c zmypl~#m?faDVt|-M&Ea!#@ej&j2619pmnK}FuSPZ3a9e?qTCuv3 zbe^*z5&IJF+PBqN61;=7qn3Y$(k>XWM>Ltyv+nr7cOzXQtvB3wH;WM_R^ov(Y#O#& zs*F$OMIA|%nPDq7&x*zby86;_BtldwrNG2hSDXtPfiaaiV$aoCxsEYeYGv#G8&kL8 zD?1CGto?l5cCve&#iVdca+}>x8!aa}#p#dOlHWHGulzD!7tcyOhl78WX3sSqj z?(rP+vqwCeYCp35>^Z(d@w&bEnYRKmb{7?-*ffk-i@UME8K-P!kB(d#jPKR}XBDp1R`Ap?wA=pb(Ww+e=28C37v>oaLQ|4Uof*${NCbt$0V5h8< z!M3ojWw$1eb~~O)2_AoPB6Bu4u2S2XD%}Z2919>nxRE{>p{T=6tKA#bwj3biuAF$P z#IR6ux;TROe6gMI6iaqkGk+ zryytv67iU|u{AmCvRM4h0kE{r_PJP7uBK#b=U^oXd5B|35X4q= zA!YZ}?&5<2Ibfw#VkYKSo0zCH9j#C??E@AY2jqjVd|y{zQn+(8S#k~J-I>oxS8-n~ zX{5<*5wA2%C$J(Qzj~o5k)5+>$$oOEe0|%jVR1gNKSY0AFK4g$KEJ1*pBtZ8W#(sh zzE3t`9^+84(mf6IiDN2qutKL0N$FC@om1L5^4#@3BTh#8B$3CQr<_j+sHay| zE{$=^RmPHj(p?RIP#VueQ^AAUZRwCmB-A_)TNcmbPYm_r&-3W;K4N|5@znd~IbqDb zkdKJ_>rSLd{!!Cnc$toAk!>iaxii%wjx5KbmUi@0*Z!RE#`Yu;i=|0&Of^fF{f^Xt zc~2eTJ;<{hQ#z+)#?t()N~X+NQO)zipK~_9r(1DOi1kr_Q?mFj_D*tqSexU_l!1CW z*bPO=KyjQwH^%5Q;`&l@8{7oPUXYrc$b+|~^wI=EEx9nVn}al*(it#W1&r`Y(kapk zl1mAYa(@IWkNd-#KvS3x40h|+_UCH?iYQo zoBKeSp`u<*E`<7QZ>trRW?UkN3mOUE_7W1yf zf_|CmOPR9&S@OweOnIv%ePT7SP?1tTgZpq8G!8HpN(TGb3iY%iNBVid`gJ>{*Iv0s zT}EUJSWc+PZK%U!PFGQ4-C=`CMjltP=VW^LSibBsd0fd9_nv%?v%i{O6IsvGmuv2# zaEqOPew3Oul3zD2r9GAWL^^W2-pyAF%?flFdh)P|LI7)1{600S33TZQ;#TaNSJcbm z*z>d3^ZMo+BYxH_=V+dLh;^|gw*(K*y%T%rQy*k`$v0Oa7A~gU7eLH38tQwzjkk0$ zUe~%&68f4;X&aR+C2~}LHk8(A{y0P#vUx3kI$1;6Ht`di~(3pLNxpR^5aT;R@ zs>D`mN6y$*t|<~K<|xq)7-py7(7D%)m9X5@-${+Um%6W1Z~sj`XdSAUE9n>gph{So(SgedEib>M|A z>jTXT+l^@4__2@6lthbrsyNbXCv0 zo6pKuQS>0m0e0hJP84&0-k7a_i~Ho>p3_R7)5l!k{dURv7QfcBAfJf71<3b}A0&BUyBkNUEF-Epu02MhnQkiPYEquec0HO(5O4ICcC@$iyE#DLWsAa-zJGihcoz?n!T#bAsN~T^Ebv;Sz;R$P+qt;w=kEWZ}3f{&ik1`E?Q}0Wg zE7A&Dx3_UhhO1R9>Oy0eRRK^OvLhsn7kHvch3oQx`#e;e6 zZTz`3PA5GTy01uoC8Df8RMdzwR!Ir>ob#jfNuDIm`RZqVif3H@)NcP;i~rQ8w|UgXKhSDjOP zDJ7}sN0S#2TgeOL?EJQO7JDaKM;p`*f7M6SrW0y^YHCe;Ne{#nR^M?JJHK?j3uqq^TwI%ar3?MO0DTR|#;SI)sIk zCGQ;8H}YYbBU<#yKQS#f-Ost^<$K=rG@i5588fh!9sM9pLTZ{t+a_<3((k|FPdmp^ zR`bw*8^P=^Qc#~HpT&QS-UI6^7#1!lyC_~NQ6kLW)CD4MpML8|e)o?2C&`~(@YW-0 z?tFjg^0d|0s8_mIU-qkezj$d;H({*cbjii=)hIS_g?x^=XAbqzXnHmqZv7|{=ssPN z0-y&AG`A(yU(8x~7KBaOyDRCEO4Q*@cQ)mJp&H;;mi6bq@%_&l|MUT}-2M{>c+F>R ziM;J6SD>cN?w^`n!%rSCPY|M0)Cr)=t+{r#(b`BwzeOV-aYigQasKPtwiZZHsjng@ zMg2oN=|+nuFAT8G^f~w?j@82Uk3m%IxRs5G*igrdvmi1K1|PR-4*+JtQL%7Bn1vgcFvf84;4c< z?sJ@7dsdUt+mZ7#BfmHBZJAx@o}Hbud98*F--~<7dd$8z3ai}LEoU%6yexz>aT+9< zaOh*Z;Zl*NHJk@~+OfhpSi1G3qm5n+vkC3m8@v0yYAQ2OLDWmKv?tM^1*15l_D<5U ze-&TX=ACU+?Nc)k?_h3uzFZL5a#gG|-M%@!utU_n6jK?Krd9u#-RJYK#qU&Xs zj*0~ux<@5R)&ZCmqHzbj}Iv&5Aa1hAY^k_2cY^X$sf%!E0+nKR- zQ*%0!R-931T3p)Xct4eYMa>xuM#q;!Lg*JoT_$a$c#{zRL`w|`w15QE_dA6uT_?$v z%^p$^GFhmM*_*4McS-C_#Sk`suJ%(rS3&}Z#-?nz0ZF5Xi{g3x;~9#cB3Ai7B?J&IxtR364P_GB z!63q6RXihNS23)ypY`>4<7}c)Ecib1XZ?*3u9y0pYS$NMZQiHjUh{c89Luuj5N2W2=f6JlPmP-I9r9H6)3)!4>#)b7 zmveT#=Rx-Eg^?;#2irY}KFZd9kX1{a(e=zstsuRQ%?`qUHT)ebuNqQsHtPaC+&A7{ z!%(r_k&hCjKj8dolb7XnQJ{{TZe60A!sqOBj`2DDZ(sM5BWH0;#PfXK_YQ~$qRxk0 zNZ%uyd*^3FUPrJ*^L<4fA@&Q`%6bzsmaGpVW-s5fMO?RZjB4w8`w+$rBwRyl
`xlMZD=|d2*|t^@*R3n7`$*lfKK(^=JS6y%zGCO!Uj>x6NxS<(c}f zY?E-}l+{SUd6fp)v)z#wDEC72mXNA2U*U$?K{Tdy+0ol7;zi6shL5Q>l-kvyd)zCE zc46;*_oX_Py39ksqx^?``THJZeSx3P%zEB`X6LwwnVU{zuB9!{G3$q9y|5R1R>}GZ zqW9$(&i6Sjq;v#(-&c)PJ9RBxd3M2JNFK8rMXsDm^A!AndwtucbMK``yionQ>N#5} zqr+Og38S(iGB5Ln7Om}Gs1!c*Qqw zGaB=s--`?+zgNxT3-!w9e&_eg<=%=p46!838AM-#8B(_Phz*~0Ne=XxjUOcTmgcf= z9Ic_=YXfD%-az(1k#Dn5wc8W-YLX{^o7vu3H^~{q_dA6GGj4iSW&wTSU-wc+PP$O6 zdj0nnIKsQxes5VnZ$WC(#?rd7!$A@ct6rg1Y?7~h&#Qj;v(8guLEex5z31seQd6PT znXjGCeRZXu-WunVDvAC7>pEhS+LM0bBGRvl)4Z$E?n+;$OYdFdwQA#r;QMEPuZzBQ zF$oBV;3UL-zZMH6E|hbKI?|Cj&(N&8($g3Co?=CM))d(3GtvK|kBhD?=QMp>@}S@O z{-62Nx8L~=^t=z0^$N58(4T&aU!t0cZ0FTEfnbrGc7N759+cV&-l4hZIh~VgSiaA& zlD?(8d}#|TE2|p#nJsu5wSnh<#KzewhcZf(nsYGe7m_K5b$5(q&DhekirGyMN_0G2 z6yXH;&1-sYmaj@;$hqMhZLyEZYyP>9^S=G>x=)<1W6trPyfizXMBd8z!Cqa6Si9)Y zEAF$=v!vtNp0rl8ar@d!sb!U4`c|`lVL6h2*3tz=v3k_ndCuc#Yu6`q>bdD9Z}MIA8J!LF85opW zmzvHF(-M8)gtZX#u#)h=nY_o|Q#X=Oqj=?a1$0DSuo`!+Q@&l92M&)xK(w^1`PO6l;al{zQ}X8n$+bQnk> zZKBpl^QD5er4PRb#}rwCG30ZQ{^(l}1fTgi=Xq8)(JVKvCC3wSPT&y#%vApL4D!Am zGZm35X7S`#?|^(<%p3Bxn6260ScvR3VEl|vFXpqpvz`KfYfGQ?-?VbO?3mrPryG4> zK{yg6;7$?n0>78ly5xGipF`mhNAS;C9`lpC&|4R=aGPVX?7#ExY<=<>+3QSD#*2z= zE5<;7U&u969@&Nhggc(tJ*~D1wWu6SD;%<_Ii8kr(aiI%UV$`1z*n3HdjHw|Y%VLz z<(4Y`b|OEJx5a|v+2VX6@`yQdmLGt#_^bYrz@9N9|LGrO`9M~O6Z=0gg>&g&u7TO) zh#X1Og}!yQTCy-+DrpW0_ zPrWXA!$NhMxDxIftbbjJtl=R6cP(;qa;oB9*&MgQA*)gB2e93_ zPWvdEeQ$JZuDE_BEm%%b+5dpCthQD_}(SZX1x5m;$y7%7Uw=|7Gb>| z*TmQB&(L9S>&gD~&R+-R{WKStn+!^SDxmxQERUh7P98}yndgx|b9E65{=VMqy6D}4 ziT+(jigSvXA?-?|y_JkqNGYqzm391zsn>c+dcEr$WD~{TgAaJmq>?$WJlhM=FT7!D zmVZ7FxWNke4EP@L??!UJk=GL+MA-;tM8pTQ)8=oJ*p~GLw;@Q}Ja44OG}hmLoTnMy z?b|1#R&v26fdgSqn)MRsFABWMCpQ`6JtD9)g1c~(i%0sAQOm}W7$B9&!VHlNp>K3= z7MhU*rciIoT7O_1Qu-9eeSQkSJwj|Cwmn&L$`h=ddP^i`7sWdomEc$mnFY1IlA8uI z9IROXB915$e&0x9f{SB>!Fr>AeiiXJ%<2vif3dSW&WA^}fxS|)6-s8m@Jjr~JWl5K z!OHWh39JL=Jz1PA@p4)hrI8}{%rpCio~^H}>qY5W8&JR8i}AZKbCN!}y)40x&Fb?t z@87ElJQS(z9Ci8(4<-7qSskIdd*n_d-wxIhZpD4J1q|1Ks`NE85~GLv0};M zrsWyu_>f_-A;Ir`I;;zBGj&B9FncG*hnRP1BYHzQW*}b^_80%M{lOD(ul~$n*}LEg zzB4pS%qp^6H9K=et{}I6JZamN!TG;uAsQynZmA$^Td4v`7Yw8yV|<|m<(E8^QKy2Y z&p_Q$(qSQ1u7v@4wIcObwil^Li;tA8izSHN)X>#MfT}Ff)56X->5P)cFb)^$rXMX+ z>RFeDwPV#guhK$i#v@HiD0ULJB5BFoMVUxB?v;HEz6`OkFJDD})K*U|#}EDKab)Kc zwP2Pj_MJS&u}A*omm?>dl);cEv*~P_)OyS%#wBD1z!iWid8&@F)=Raf?Lz)4=+1^*MHB?e&fJc_c`ZA>}_%u*(x!Y+L|Ng#LW(~RPc|goI0ZbO_$+B zDmYcLS6|wz4w$E)J{oJ!k(w@L4i);U?Afi;?2ewr+54u?z>MJ3Fw-c(#d@1oL1F13uZ-^#u}YCYZ8b)|!i)D&|e$9JafYXyDNH^@z8YusLE&xeU>M|Kr37Ec?85BlE%xJ2zWP(s=ee6C`ujQdM<4a8!}9UW z&}61W-fXYdsim&FL7M6awXY?Nb%xsR8mG{T#>kV(*9Ts%>N_@gHGEOCbY9kJ@{~qP-zpfcGw(C|7jY${z~Sb9M6;B-ow;$IXa2MPLS67L9KV>m z<}u{;onZ=GS>B)QOXcQ*S()qi1zyZqBsIm5f`%an$H68VNUx+sS^iq+_fhn!cg*}oaYfTq9Bz&2jiVP@^3OH zqrjYh-@}(vN>t?y#DYNHpGQ*4U}pr7{W1lt9+&^ z_%qNyBWsWJ0_4B$Ni`j8SVtoKN~2??ABB769XRx?2Busr9N|l01u`|T>)|8AZ4>=} zIT6{g+u}R~Kl=;EDR6A;kDQzK;Sq*nwjwx9pSf*Tr~ld?f7f9Gr_UZMcO|1NdKC&q z7@#~LI9JkZ-%_Wt=-E+*+Ee4*Hx=d!u9FNfj2S3ED5wwn#A)+5*DPu`l!c?-CV(eG z-WZ!v$CPV@Ub?$4@;XhXpD_@zA&&!p8D@GfT|)6!^5sog;7W2H;@|v7`Ay&UAG3uY zJl7lL$1e|(baWC01q$55!YOgwYO3KTwuAuq55AyAeLbwVvYh=i7!$+{_6HW&hvEQ- z=+EaNkJ^+0a{i~!Bl5r;Gxw7Ze0T-#JiisQfnH>a&v*Ru`OXD;xti=st>x%{}c;Tbskr;c2T zdrBQyk<^SY68_ZV^!K$<@Sl4AV8Q-c6U^+%aWb6iuQ|^5Z1?2Fgt_lrb-)9*&)~ZT zy2H7?3!19Rp5tIs_1hT?=n&_BuDbr@*pkYZIe2}S`pI@gP0ksT?EJ3C(M8?06&JH| z@)}C-2weB#)3g4yUf2iOdckc!fGaBULgs?`KFH3=zt;UzbFz8q+rLQT+d@uQ%N%|* zT&V5D`PXIcHfnD^b==>-N9?x&anh9hZ0-ZbHJ`7GI`{J{Nhvo zc`yIq5VM-$-|biNxsO*dAKbPdvnR98?egpS?VV0;8W6{YNdoLtj|=Zv5;m}qlA9E%gi>dh08b{z<~yT07Sn2Tu{*V_m zkyS3b?5(k9j%42OxiQ^g(w7EeZXj2SqR%PrKZzB*bn+-K7=Pkbw(oV3LuPU5 z%Ln<7>y4R+z=ePpdD7F#W(fISm!eo58!qtm{?%0z?(sj?)U>rQOTmp^?`gep#;h?4 z*F?8E%`Q`(jeHJ&vU}ZLgC-c&yTc4mzs?uk+b*+*ixeW5}LA>uB8aNAwQg z$$q#!a+962;f4iVviD2ps4#oCOmEx1XYr`<^5?al`Ia|-BW=7?MO~T4UDjKN>&WVj zSj+l%(if-q#pmYyfq(1)arT1M;Vk*V-^e?)wgW>0jZ*8PO4wDW&s(;yv$!?5Q3EDB zx#dV6GcOmUB3KQ*Q{%?ubFir% zue&h|nj?CEwll$VkN4EnxV7oKN1wb7#j#@-7BhW+!xAY0U1H*Zcx}(i@ecaB*aU{A ztWq)8oxhEouO<2u**?j*0Dp4PA1!)P`yn9ka9JFndcY>I!jIWPPn-OM3ZyZW#W2Zt{^opZiaKB|i6m zK=w>NJ9!}UCGx=oy%bTK=RNX&>4#=KhJX1wdrr(Fa=y1?rRvy+XH)}6t?7;&Z<#v? zt_9dAfvXJx=>ctznUl%ko1TfGdQndiuk*EzYRKe45b2c^JcXlxNrvaU3qV(!>QF$e>lq*y1cJwHj=*qp=Uj_M}BfofzS9iJ>sl?@ppgtgW3E$cNH8pu{WpobGlW> zWiZ~$s3VuQmL_CKc1!0Mgw*FcG^-&9(TROR0$g7uaNA2G~b{z zDPqQOW5(Uh$(`i+;-$MuvAO5#+p;N@^?lx7rXSASrlShXin!i2^@OE~>+K8v-Ro$J z>(!qEubg91{>^h1XWPGC>mRQ9m&-BLy*^x|plY!}H3UP&x}F&=v^1mX+>MrW4?5~t z=U|!{b0d{4HQiX4u<<@ibS6}PXw_pQvP-H`+C{?;w(9PehE;mx{JoA+Fql-=VV!k0 za9EK3;lYU{T*j0ak%$a$1dk%CGX#%R;1m8}6W(9^%cpM+FNi+JuiPn{TYWiOoFjIs z5}x6Q(K;Wf-FJk#EP0DdkIZp&!uLW1QCPF^e^q&2o{SyN`jQWxRP^68=&Pdt9nqhrB4ZzS^ErS_kM=i@Bk!es zXS+P3FTO}kloz>Ql1PyG($b7Za*`O3^Tbb}{a z5|P$(Jd8;l%_e3f#AStlfL75QYBG7F3^xIMd4WqYZPx=c9tCbM@$;{_q2SRoKej!v z3gp3PnARj?=$S6>=W0x<C0yjymnCF~LEPf(?`k!#*mAf`GoX$d8 zUNzm#^bHI#uP8{&D;Tt}jxHHa^QZq8PCF9ywhSKgi{;7Z?|C1?g`gCt%o}q}ouB?~ zKBp1==Fj~W__Z*{GsL2X{ml528XB8gvFjW&o`b6Sceo3N=GAqe|K74I5Unkn2(<1Jt;H93${p5|| z^Ck06TIeX7BY;bFE{w!iI-spe*ThIthY4#}Az;!7vN8S?ggznO5=5l7{ zx=ZDLhJiMxnV!-JSDQ~>FV4#k|6Y7Q&n5aQZ@xgXhkFdMNex3Y%nSOVkGLC%&9B=Cw5HYG1q_3G2>(#J7V`$%p+V+J#4 zFY0uus>8U|r}rmw2JrGkkyZP~I#skZX!gJm_98KhK~$FaRVxxW-cmS-ucuCWEbD-x_$O+PmxMJc>~e~y>r%)0;x*TQ^6~uzm+1jB>HXC z%pcvaA1Q&9c7GI5 z@a+7$-`9=LYYvq{QUk2=}bJX&~CldxB#~e(dQ_ zu-%cEaj>F>#ijHvDU}Q_%3L7N52s%p4zPT`G&z2H>*F7O8+?WD-{H0i7@qYEOcya){y>^Zt&@YMv4 z7yR23Sikr>9d5~MAtTNK&-3bv0en%T7by1xPMR>qmnv<0iQRgOvSU?mFe#EKN;vl*8Ub zuJ4e3xf8vCf0RiHlU}S%Ri{)KZZ)sxIqeA;W4N*&d#50IDLjz6dxjm}Q(md4z1%eG zWiF-ZatWV8X#o`ye2OtMHtErX<4!IVM|XAD3{)K~ThxJLTH08ZgnF2J^k{N_U%|dl zXBBcwdCrU>s)?CC`>MMp$o3{+@q{KSc_mr|BCq_>-+>4H&9SmNJjby8!d3p#2oZJv z=L~J(>KwVRWyfU}U{8rD8h9XueZrKJOZ?=aJhxmDPP@VytD0~I|7=F+oQSaqvl zaXQQcsoSj*zg5f>M1PO`Y|PJpx@BfHDhDZbkzVHYPkI@G(x zR+`BJrG}%7;DKkjm>*o?pZTq*e|gq||C#YO{>I=jYl8bE=FwT6nfHZ6El*5C2LmjU z^h@Jnj+zkDPW8G*o#%c&EF&mQV(s?Ptph0+ zJw9;Z;UO}YLK~q-*>AjV+I|EVRI!HSXHO@YP3B|Q89xEey7sgi*R+uoJNsu6MdRAo zD7B{aJkaXvmcV(Bt?|5V>Wpf!kYY`DfxeXK zn=mF4JcYk-!~a&}1T3h3xM#m~Q2xv?{@{rJoC_a5X~rLOd0%Q+>e$l`p7M^=9z3V9 zv+eZWGg33qH?=NiP)B)cg?+g+HPJvbWv$bDnr27w*JgACljwNkS9BkP;pD)eSUQfg zIs`1|Fai6gq9a%?S=y!q_bH^dJsI2bb7x{6$B>F5pXaU;%ab~PntMK4Fd3GuGxu6I zH^S9&*aSPn)&q-`&z<2w3i%WX9VwmsSCV2BZy!vgH_5aJm5s*vSfq%q#Hc|tvn<5>SrUMuWbUDqVy1g=E2-enX zIb1teZLmq=;+@~a`E5!Fx5s>N&Ld{epM6{aQI#rV2j~cYsVawI_Xb8D&bsK)2^_|s zH74#&(wJTCp@06Bk>IB$W~%5g8hxOzhUXmeDm_q6Ii_mfwg0QsM; z^;dm=(7%r+e49E)aB%HKS8m77)~?B$XiHCPOMOS?ct_H45@OX$% zN>U!s3zo@}HadP7!9pH^<9c+O4W64=ui%7#$|ZqIgU>ABfMUIt8ukmEO155qbYC() z#9z900z)It$uEB9b4KQU`wv#=x7Owx+>@9~=CdK$>P&|EwsBdMLQ~P)(|-}4nw$Z$ zT4|3B=je4xx1Gi{wI^2-uc$yi>}~_4T|l#~GpLkz)Z2;*yw1&Z-)sw2KU@5Ke`0li zJ4t_Tsi(D}I?eTgIz~k(U*wGILcRFrOvvUfS^tSXdOqjQdhI=zj)jY?4E6;v3z!`a{hu}m_ z^olKIh}MzpmR5rjyq^7x8&2JSA4Q6_&F_~NnSF{~1zmAQH3CQLe8U#*4_gGPzd4Klt%O z2hBh<2U!0-@O@>&H_LHWWB%srGux5enfYwtWMB0^I#TFNN=O=`?;-T(v-@%leytk+)piTon3d-04S`nPeWB`fk<(W@h0aOQ^KM5L&(rM_L<4+}wm1m~8%?^1~7yE9Mm zZdk;2;!tn_`JOy1-{hR1f&Ock2G*f4dd)`~3+A5X9Mc_*;dUNxI2tL?T79f)En~djjj{ip z?2fY5n<03fhFHLqwdE4~+f7gO@5Q`CB%Dgo5&9}D0-;Cn4<3l**bd zXQvD++w5!1tKRlid9{vWUZo0TJUrBabf#Yyp(3Cn^>7IV_Hv5$P+lLsohX}$XB?Fr zH(>Hivbhd=H>_LHXA#^lG*?+eBA*j864B4yh`GRrM*%lUeiK)Y9Gn6J?)bJotW|J- zTz4u*>OZr8Fnlx$C-n2k%~W)mLnSg^%JVAz&ezDdXehv$IG~e=wiLR7%pzs&sFr%I z7*F%qga=qcN*5is2D!4>J3>2xzw??;|Ld!Fz&-`X@t^nUe?!+m?ZbEe(K8q}mmf~{ zmHBDAoX-uF-gjR@k;vA?hrh|Xpg#NIud@%$C%vwJk9K{AbN<$t^iDq-8}xhrXjItQ z+7VnU_JZIp=Do2ueR8xtI>kn`Z!hM$zcd_$?jPFLthXFz^#?C4*50pL zCF|{r7_qey2}xz$UdCWZ(kn=yYwVN>daqhf2!#!Nc+EjL&>syj?hW$rv4-9bCLGth zm~RhD^;ZD5vLo->o#-?g#0-VfWGXo`tm`&^GcY;=59IHico5Q?hlfT&n)9K5o+~P& zeKGQZ4?R?c`zBWnANnsoRQ6o6`X}QV{>4Ge@HL-44SNcau)lF;uS}D5Lk~0SO~51Y zFXz%&Ju+@6;b`x_I^yqow?Utv!QYp5e>ApZuD5tc?+ai&Jjyeg)G~V!?If++*&I=S zq_qohx_U#e{OXNLJ!!&z{5M~#6v29K%@T2IpkVCTmo7Cl=fI);dA{^_GhPum|G)Ru zJNIt*XcGO=9}?G`#|zPC{-?G`HLqd5UL1ck{(fnoiTSL+?kJJ#-7oki#OX!wu3hFk zB%4=t6@~O#IsatS)zM*z)7tmG(N{ZvVlY6j(@Ab}6^%B$7?da^&Wvl_4RaR9pue8o zi)HB@h&ii*ZZiFOJS|AMEh7SB6!xAt+GZLC2k=X7KkznA$Ho7GQ9ZLkp^+JS{kql` zS~JwNADv>6OOgjuH%~+z@h)-Zi$0#H3%C#2%p|`@pC0^g9se)>`?vm)(CGSq(HQ*n z!ZXh8Z~S83i-PO+jnm)$RymT{mzjnTeF8%rX$Qj@1Br7_UFE6Z5nAq-SqqNc>u;hf zX4>F{#0*Q|l&G`Nd>{t$eSg;h#8f<)ru1A6} zr@k9iW!;FbW8>Ls%^ZKAM{Q+)v}O8=CO+|n{_>SdNm!wCUNI{IBnFnxkPz&=TZ?YV?H z6j}hOl7aI&RWRwIPg-L36?J-LN1Wqee3}1bbqDbqohtZ4nQq~SzxUCh{JR(a_2*$7 zl(MfS$Bzfk5@<*OxAh;NVf+00?1b=VGR^hBf4?cA>tRV-N5t`kq%v3RVIC8KwG;fM zA1(c#HTsEX8J+^1i_k=W|MUU>zF!}V9rr6=*K&7+b);)9%^eFc1zRp_%N`yN#H=}8 z&4=V%IWc{Y`^ns^#r33a$79yQnP$WRm<(eDrruH$`@G~m$Oq8uOlje;SkqKiNA~uA z@co>xC9Zdsiu?V&-a`jIel}+W`}pC-<@1!^du(Ip_k8=&1O2am{gw9U!M^(V>yD&r zm-?*dp!=Hj$9XQ5*MH%wB$+KwwT1(^l}z6VJEcItqU1IPuqB?E?f)0PN_qV(6<;O! z5m9&MxQseIW|4>ga2~TfmHgsQ{Q0f*FR(mdO7rLTeYR)-{*}L9gty9%cMrN=ygt*Q zjpW+KkWKI?N6saGG-z3ij7MU0*oOSc%p0za1v6Yd#pmY-+fg7XY|ZWt1cmvU@k{Pgd>kA@6z zd-ENfd0?D>d!>o#mTqud=sAd*KD{tIW44mcyD^O@`X~B^J?*p~_wk-6cSDbl-#zlrcvLXVWSU}FOCdCi;NE$4QJ0*B|#*APo8)*6Ei zlJcVV1V4qoW+8hYxmE9^oXvb)#Y1DTF@L{i)p^-}@LV)Z4q6}V6)*)cPs)5GkVnmZ zB(}mdiF)bTm0Mh+BkCog@%y(9d49il|L?v&>!O{G?->VQt_>XFF(NmVf1_c%iik&! z#Xz}_P_>1J%CuAV*I-iQADv({47>m8Uvc`kddK3r6ixP+Y76}4kKWbuCx)?Y{%FZ- zSw69UNUgV{0#PS7f>@I}YRnGlQXjAmV??_=c<}}`1Lrqbw}l3e5X_&{Y1gO&itQFs zcUhNE{h3LA^OC>%ySe{SzJ6EaFRTOS=vYq&@KO!HywKMbYpZg-2rso`g3f3va5Yd5 z%z-L}uI1Wz1=WP!BzZ*QQ#)sMVNZ7kLTDI>n`S{ zLbKq@3HnQyH?PO?vsZABf8yZZv%@cs{EKnp{Kz>yF!Z{Ad%y*|%;{M#=d7!Dr0h;M{fe*{#q$*kDWpb{Y_u-3j--PGF@BBW; zUxLy3#;&}lnBUhN=k_z__{-OdTJkS{-J|dK|7aoqo$vn9J^JYTfK?NFajNrqKiInO zeB`HB*B^}<+;5$GrFwqt>VWlAV!OzD(z5q8aOd>og1HY?OF5h*_*$v*tZ|os{zYbY zraAN|169>uNZpumuUym8_w3wDPf3XQ)oAJ|s zPT&5L`NdG^Q%R*~5APs6{h0!8$h}=H;WPB{MY|4gz`;>~XB^$Zch8tl5r%zoh)f5_ zNAsQJS~~-T14z5H>?j?=X9df`>yT!WrT)fqq6Hp2%<0jsR^f}jskbA1FW_CNQp;xU z?+!3XR)ud+e7p@eVSt9Yu4-6+y$dU@496%m$xXD5lCTpo&wbY3%xZ1L)!_4n2{T#D zIg16<-|{>J==n$-v%kIKc>bIvIi4fO#tUx@^-~}H(tP93^rL$#yl%3d$d5llMHifb z-0LO#w=^LS+Tu^f+0j>?0v|2rOFHLe!v`}fw0Q6&B40PnA#gTkx>q`X#N-(v4QQdO zbX=>?V2CK?`1a+zQ~JOV!&?K}m|IH_(f_oclh-_}|h4iK#A(Mf}!m&%v*yg*eOooql6dNF&V~2 z@B)5v#Qf~|dY1gkv*8hBfB(+3o4&a(tw?y+!A3sY8d6&i+&pqY%6>`r` zy6h#_#&}DEy0iz!U*kRK%kQ9M9PBQ=JAr$SjOP{JQSD&6wKd+Zf8-2Wf6gpp7)K9F zJD&8EW{n^O2t8)}IkdPw?xeKpCORL4{rU=65O_zWNPxJ+QYX~mUx_tTV`e)u#%}E_ z2ZwH48mLcuO;6#udzIGIv7#SCBaq$=y@oDL8{B?weO&GFuMnIl6dDI>vG-)PrNb7x zg^>^T2u~sOIq|jFf54&DY-fZN0lMmdABWFyOkKYqml)7@ z@9p2`n&5%`b@u(%$IRm6*KhN;R&(Az!rx!7^B(LKde~5tEo!T1!YS>krfVvsJ zCePobpJOQOPwF>v0PYVSFk(il~o%D)cCYI5OI4ewaE{_$?UEvL#=?peg2OX6_g1C>xbdGDvFwgAUuYTZf z{gxba{q14;nuQK^ao@u;^E#oQ1{an*$aSj|sWw38f1eh1z~>3Na%?vMA~3txW-7k3 z_%+T^d?J_S^X^=9UGP42@J7`|U53XB(;0U|Occ5jQnQE#*;Dizh3A5nW9{=ZVjC7@ zKHEpb=!g6KStkcD{<(H7@gQ424jQ#!`TP#GkuC zU|5pyD(IKTZq5l|jS6h*cCd^sJpIF|5qlb?;NQK1-@YGT zegANepQyOsOdO8zZ_q$tW~OZG=(E)5f#V-O)*I+QVyX*Qa9TzU$k9jN`z(q3n;PH~ ze;j$(h`Hz4y(YioI{4Wt1moyvkE)sOBxPcTcYnxgiECt(#IG|4wY2tgr@A*fA{fEyJX)Wn#=l6XBS=-V5b8N>$7vG_ne!LXu zWL{&qU!s1`^wOo>7@mUnX#@|1XSS1@9L&UMNKDfcf2?t# z&C0o3F$=<@@?Y1+_q8{yYQL|gM}}Rs@Y*k z7SBqmYROh;;{4aI1!u&~HB)MDaUQ;F=$!WuKhI{gKi3QXcSR1I;Y)t?t_~nPgx=^+ zUGRympPD+)qrTWSX8(RHJfJf$MjF{t5+7RrtaRR%6)s@QVi>?#p>m@6N9@yH(r&B&B_~a(}b&1^eTett3W!M>?I*WO)89OZH zI){mLpf>nV&v1hI5qT&2mxF012o~|#c=}8GxJk}(+MU94;M>DC*a|+ky&osw)_9)6 zhhLCVhoxua;kORnz%WN>jYfEp{mWYT_R%E`{u6pUuG9XCf7?7~B=*49{MIV} z!`J)zrsug>HY3iTyZ)S4<)~*Qk?@d0Mgve}Y5Xp{e?QlV-mMh&a9X2v4X-geqH)Q6 zK^s+^%PNlK=prk#4jy4KKK(q`;^Rpa4`*;q762;HoJT?ee%OEUTyy@xuk#liHP1DR zLF50aGuQza23|G0e-9AxA%=>;bO5BfdlkmdkaCK`qExA(;7-OAp7ac+qoJURQPC#ai(2MP8+ z*RLaiEz{2!OJ(^{-xc@Pg*>r~#}d!V_m^d)!*+2M+`IAnf9xYqkq?`MS~b~7>qHx= zwLse>P3b?ML4TiZuL%4Uv#V!{X8ZR!l5+7JMcPWfJK^jWzGr{@u|B#UpV|SfmRCcFfM~kKbIbVTb0(cV_}(Rl2Fe_P`B!-K~vN5bxvx(K|K_?%#X zIyc^%dIv1K6IDDlobw>SnRI<{ts-Y-kKv<7{hhB94bV=$-$elUSuIT^8A4Hnd?uB7?Z`-&$AP`KI>T*-Ue5> zA#`{}jfl>)AoP)wf@g=r_A)PybSaG2b#$h+snC`o#u&suK8Mv$V%Aqg@wZ)?ed3EE z`@{(0-OCnxJkR^B6X@Zv~ z)(bg(wvMy;5#&;n=VNXKADI9BJs(MbJnMDNr;MA32TfM<5Kr?QAp0}hYuU{6SC9T% zf2+!Oh>KteG$n4G6fvXEyzLi--MTSRH%VD&LkgdzftL=!F3EG@EB|OzikHv?i^<%W z*o^id?b6<0bOoOcF6BAC+*Eo_!uuZ$6>Stgn0qi!@xe@My$;;$EM}!z2$zkTH`?=c z%O0gJm_tJ|W2@t|h(quu94FX}MSd%-fAGry=_>L9p(O)vfGB_^i4ngm>7oP zA6(Nf|NNgB;pZ&Pa$fNs_{a!9mJV&EYu9>FXFO{O`8{zzbTa=-&TVUp&P^n=1>kNz z{r;&0wnr-IUJFlb@FVcmyInN}Q(>AZz1vlqUI(IYhtEYZ-isLlxs)jMU^U~+e~qvo z_ZAI)VcZPfFSz&v712DnOAPOYrUWK1-Xi1TF;D*Fy+u#Pm)^|ra=-V-XI}l;AHVwf zpPDD1jsM=q;%DJdXWrO3_w^w5k|T$3rF9FPg|+Z!rALPfH_v!9Db3-eif7z8K2J=$ zE%GS(QD9&;+C>>Qs;mq26ZEq*f4L5NBZ2i7yw{}Mb()3Uqd`2-&zjVd0ng(7648P& z4Nb3D+*IC+zSpLIvbSPBmUK0Id%-QhM?)*kIc~8_5`95>F-JZZ`;Pn3dEixlmc+af z{(uC&HRnU|Jik5JJon4%eQ`eqvVxXt#*g^SqJH`{;pVg$1pos@e*geX!ME|^M{NcG7(A(ukpc%cK4l`ljL_MXG=MPT3?F5= zW_B*)LHZ}=iZkh-9)Vfz@?B3tk0y^b+0Uc%nb%x3u;fnG4-@rRw!TEH5ET;tgJUlxSQxe>l_t-;j(~uqrN>zO!aOv(i8HOwJ?t>0dA(`GW)c z~^XICG^YNph1Y(Ecl+Vm{NaG?2ikLwJJ$+&#Qz|eW zwPXkfRA3k(tSAB4OS#e^X8n3yR9gWW!z(D$;LuXNWsLC)f8{lynO=Jx@O0$U0{Wm8q6bJY{)=dag-uBW!aYb}a& ze84GPE#n$h-j>qR;2y?GvD^UbPMw#)f4?H_E>tZ#h>k&Tp?;>xCIsvRzv%0AH6^G1 z)|cF)A8ifdf80ku3=Tb5OMFwvaW?)PBbgHb{GJ962m9JW6=5ZTZIZ2lCxrzURLH^8 z5;$ljxHisgY%JHKIN{l)R&grP6JiD+^buH1-VOzb%(qu+k&h{fg9SKv77mW%7vfA9TK3O-wCvh+JJVovBok&Lun z@P$BOqyFrW8yL$HDx0p4E|PAv8hz(F11zCUO45Uw55!oYJ?>8B>=hUbCxG(`z{hhPPW2IKplI9Kpc7@KXGvxh;4(=sR(?{m~2&SRglx2_oN^9mZwBfB%2T zd$VR`wQXDQuVmb(d&A5C#q!*jq?TGhKzPYeP(Z*65g{Z0d$xgd&hJ~r-e;eEBcJk% zSZj(P3`W^U@2$6XJYR^9PCQLt*w0Ap{sEI5f7(WVO+>kge+GF=16K;>g~FzGh%W;V zNccerTgnHw&NkE{h6N2>S>$~{x_jWY{P8gp4En%z^(zmW?D2?w0(G;6{aCecMjXu0tbPe5NaKvML zf0Fnj>=|%MT-W^D*1&VS7Q6Z+Im>b*ciN$ zgwuiGDLKsT!0IneE?dHFg1As>zp8bm+>lQ}_;V2>h9Mj3uxAZigYjx_4W(>CCZzQ( z2-<8e6`T7w9ngGg+5|)lM}wh8*GE z>l`^&3Fa@EpXXQVUXwj3x^T?-dl-A;e0Idvxk^8}9N}qnXKz!YcTSAZ0jCD$e|2Ja zedou0+gpPV?ziuu%kz!98Mvc=zW=P1QPu!*4Iu5|KsaeG>V$iz`c_^7WW61eoPBUv zC}R0$q2U!JJM5{l=yA-FE5vN{(G&$73&65fs7PvijPA9c0+2?LN z$%g@82g1eo0>s$iPbK4w;F;l9Gp|mDL|pI(lev&!YB<+sS8a zPg#ABNx^_#O-W@fIB0?Yv#s)zcR_)pYfZ5Yk+>ldhi{$_#E9X~BXgZe9_kSNz|`$D zw3h?CD)Jdm$Ugx7!O>sSn5w|R?GWFnMKnUuR1kDy*JCyzCM~JTMTEI55GBU z$i8NX-;;Pg!Tu3k2ykw{#{veP*I#j-kF7*-YC~PHzT&eky#SsFV}cIY?eer{Qt%A5 zh}#t|_Ax#|MTk{8hU?#drpm<;Ov2iPM}2udZVjAG;EZbf;4~?Zjw4PsMPb6!!WQ`oeVo-e;FeqNd~@|PXB^%b z3vVewq7}GU5obKBg4HYF>W;Nt;i2AZq&6mJ8Js~De}=IrO~I2CNxqFdJmkm1{bQ`1 zsj|(%Zc-F`I~(%>xYLhrta#g+3Rt8&WE^t+0B?5DOfAPz6vQ}L@Vz7HbCYtg%P{}_ zuKD;!yww-}^f$iApT0BX;r*)1ul>)LZS%=d{;uP1O~K(_heYk}7l8ZL{)9fS3xGSJ z_QQgXe?OQ+uAX0QZk#-o_e?|GIjlRqvkYgM8LMe|0$$dbGwm_*%9geP=}`VeA9=@^ zdhHcleBjx3oU8x22AsRGdBk2ka*#uGPEOdjA9>$@>KRxd+N=aoYGY2og*9eZmcfp8 zEb#X4*%m(ZkxMUuCTG)E7-kHfwM*y}I5 z75*Huw?_QylY;;$`p+5+Zs67cC)A*i#ot(JRcDwp305C@mlkmS$mJlR5$rMcs(oTe zj*p485Bt>^dFYT~!hVHMHTW$_jGM${2%nS$y5Yx&fgcV&pHF;BSz~_Z!EHMoBqYF# ze@T39_Ghnj96`YkYci6W>_M6{2qNA9Ap>KKmsye~)}}_njZ?GIT7^SIZ@0(ePIzmjby0sIP*9 z-v-z#V6@WRXtYfV(=C8o!bTTyZ#Jiae^XEgu}a15dLUf~FU`JIE6<@n`-1_eMrb2R zIR>2C2PX=QHunAQ&v-#UA9StJzhQnlGHe^Z4@@V8ot|NCWQ`s5B7P%O1ay^%f4&3e zL6thqFmGt617ax0PwIqv!sPo4PX!z>q@xKgMuN|PU>84F*5Bd{1E$RehaK9NtdV>S zy|y7{JlL=tqrmS8`^vd7fPuLX<3XScc~d!_1E|Ax@@9_($T$%%5}k{>K7;-rdw^X> z$Ns>%xxyp_swoE?eQ-f=`3n4Qf6LNr4}dce9?HS53ErbAkHwDIqAZDnV&66RbO(P@ z|JmmZn()7PDL*`RKN^EEe@4e9Ws~j<$z32AOlSe3-@qrtM^14RuR}S(^%8Z^EKb|0 zupO7!{(!d@ol2k=D*EuJoRBIqc#q-gEn=ac`qXW8%GU7^hyThi^1nIGe}3{sf9ZF8 zb8!v17>LvVG>~tzM({Su%tZts!jRe6HZF{{zt5$*Hb^L;74&GU;b6zK8 z9xCoDjwjQ?2K2eZpocB>e~0|F&wk*;(e<&%e)w5_as&`VMBbbk7NO0%%dv7^u8{Na zv`gYwDD=IL-{%Uoai)!4s6IEL45wh$9Vc9w&z>t`ZLDy`P)|c&HpiIUBJ&>J5GTGZOfjpf45CxKS0bClcH4L zpoN<1ph}j2*T7t`2lfa$I4W=~ULTy1P>Ohoi#QzYG$h|f$Z%^5yMKo|Fwx*mc3{hW z<+p*Szo$9qCn7HpcGP!$emL<6SI-dd<4G?1XIy@84!_x1*lVu`AHW-NW5EVj9&DO3 z^fd|I3u~-`JVao3f33(v^`+v>WxWT-O*OftBjWE91m6Y*cnC;t+-0fJk;`?^RcL{^ zhwX~%#vZ|hrXD6!k|!UZGhMLkqpWsiZU#~^5qfsu0tEID8eXugu;z5& zGR=^TVPB~be3GV3&O;CezG>{G=dfva!dp~VJo+eb3BdY&fAr5sp0Kgi5$R_pQYC)j zy8ala8SH`I>g7id{Rhm-X7;p#;SVn<(r;9>fIPzdvLVd&zp~D zefsFMK_7*0gy7jA866;Yn;ljL=11^7d>w~egb87%K!+mx?wbyXoC`>COhDdXgNr~p zL>^4*miVcOf5sZwr@d0(U%{&n&J8{!(=kj@(nAdRMM2uxa>Snjd-xYV>&rI!4X5~f z{^KXc^R4v;`}#}Y@2~kjf8_c8(bxQ4V{!rx<=uV^u9|If*dhj`Y)yw~S7PsFVv}v| z>E%&AN6E$mUZWk$dI#I#?P;RF0*`4y@5K6weMj#>e^--mq{F8#3c+);&aQnZj0NPM zn?R-kbAvULCmR1D#z~EM$Ta7`;|}9iUl%LzwzuG%sU_g+k1`Bb>SecUrv_{;)J+#y zcK6TMUzyMK;R!r*F81R?92s$w72$bR?GLVU$f^DDU;OSnAAIlPFAnelD~U1s6Eg(Q z%;0C%kvEWi{09=^S@3NJF?=uBzY^{) zf6Q^ez9TViI)c56J<0N>pW`XK!dwKu0kvhh7xAs6@ShN@19CmY6k{8{CBpSH_}^CX z9i0%#o5;~5;~ai9?7b590k&;>;FW@%Pxgsk@g1l6vGef`1GX1FRieZ6wf2G|+L&0t z=?`{4{9VA~n~1kAs0aKC4xV#?otk>!f0ZwIdr~e>L7XbUY{5q%anyJZJyH^@_rQsN zH=wJ!wk2@bhK#^zhYM?{z$L&h3hoAI=Ob?)aTc++`W7D95s2YJ1^6}DQDUW^l!oplG&zc9AQT8$jK?{#@( z5d7#Ky1gH~EEfBZ&KGh$zW1kJdJG@i0z4J=4u;@o{^-KLZ98B9kfR;*OosNc_q%Gd zKy0`Gc2z}8%n*T-biiHRQO%V@e|qYkj2-MHRPL7Vs2cNujrXm9LunCAgfIJI$Ts(XseV3X2Z5-mveu?e`ZIaSQH0zvge>|JVNLi8%A`YXx#Izi^iW z*5)hM0ehdXdivVGd|`*cXUnzG3GYcXmr?G=X#QGs;GMvH2w#h>LK_bCe?d)dgO9zX z!EfpfbwM?c(K3a$4)#_&a*X`8M!mgOcf=$TU;tpCNY$*{mq?33>W~ zfS33^uV(}NFzmvT~Ff4^4Oy zW_<8szqC{^W`6V^zHwV$_CS)NZn0#{5L}!DV*WiOtbru907#n2% z!rU8n|0qWCjf;eS*Tr!>>*Nvn12xg=Sj=rwT$?K+YNli zO%$xhnh2GVMXrO2e~834h@YGge6lutTi^z9ma)gc2LjXNgGCCRkj7N}=?{0{IZ0fH zOa{cC^^Mp3E$05?zyFo*_vO3$L!9Gab;6{9{Zey68ExY?f}j~<8V4DZgXcQPO~P1k zbH7lZI{Xa<$x~L$9B~-vl*)}fAmS%gU@fB}w$=-py z=ionjrSKnt7Y(9m7*iPYpu!*UvmZUQPoAUTeXt;37y;6Mf6HBFV8aof^>3XLyeD|A zKI8HH$*KK{QGsGvV+Sp};BoALqkydm?kO=FFi&bJJ5%?Z0vn@`%=eMxeD8=>L;=kq zQ1F?+e`P=b7NFr)jpevF8Vd62=oRn+#83I<3$meM&+d?}9Z62le))oSqeaGU9Gc)I zk>}+lGITGH8!6jT76G0LSllnWV@d1|@N$ygK=P!J-}P6!0vJfFxI-Vu0`eC-Dmx*Z z3Op?E*kQ1C;|FXL@YYGL1l#omi9;kR2hxe2e-E~xGrb_!d9ul-oR9T_^2G@D0B70a zjEq;TdGIr>R}*`RJg@Uk1GW_e9?01n`YZo4PPuyEPaN>Sj!3Y0f3i`(@C(50{OoH< zJJIkNULaYpS-|&%J)slA4_;$_rIB}r`T}-_@(c8T?1@3+$NHGR&M|PQ$Tzfy@uP~H ze+kjvR_1I9pECHMp-(l?S3*~c)NM2PDdCe7KKmr(ln?!97#AdWF^>h&B~m%cmT;j0 z7l^$V_-Y*BxOi|goTfwzZwKEhe6!$a0RBweFah)iNPi-MB>W-qC2*aVEkUV$;m&~H z1h=P(T+*+723RxH1@SR+&U)3nVPXPTe~GbyXQaISE+Q&H-Wu>)U(fR86Ei1nPO|?y zvFIOAg@VuMNc;=PZv-{USG@kCfAPa_@ypK=mWay^m<{YFFC>mTtef!J5Bs7FF##Y1 zjSfJtJ?lfS!MT&TgCjo?f=Hq=xB@TBA$xUI5?+>_2QF5RXe5|>Ve+7!z&+l~e-#@b z7Dd#%Y<+$=;H!76Tfx?qYB!sQ1Fm3y<4Wf%e%EY~pUtgtPh2kpPyC`nnp5m#QP@-1 zOs5gked~a%mP9`Q2?;xb=O#KaA6=+Falrn*|8)mH*az>2Q9S79;XQ4Al7cr;FZQta z;fsDmioFgZ|IT5ZdeFvQX&(L=e>`TkN>OLjy>>-BdV)Fo^^e~Nps12jA-;7OU9qr( zGZ-6Dgna){ZiV=%7myG9wi~`NTEBAlzkMU=hX)q+!+_619`!dy26>S1Ukr8}a)iF; zdf-8TYyaBgC;zPdc!yri_VhH;``e9v&5^GqT8OHy0hDZEueDZCA}kLsf91~5@4>~9 zi`DfAHlxuU!8h1*_lVV@U7?@n$Z^igEe)R$v{A5^Qd+AzyJb-9WIEa(%ThiY^_o0q zFBflnJU-g*!{{kr91NBh_})MJUZ#aV7Jg?gCb|&)ztE4dn*NHN5&IA0;ESjJ2QyUt z({?`lo4veuRoCR_$JDn^f73U4&nN;W4(~0byMNr)*hlK)NOQ{DJUE_jLf?;&A#Uqq zNVY#z#CEU-4m?nQ7^ldp0`JCu*6#V2jtW`dNLzwC@LOAc@bS|8>dJwOd?w$BCX=|I zxFR>MfaO6pOC4cn^qqMgc=6nU zj|$ZWObej8F_K20s);&8SrO@N;t?g^hniZDsY0aHEjf2R)minEZ!9ihtw1LJpH z#kcs&FVtaQG5Ch#?3G^c@G}i-3wZ1hME>})HbnZB>@Bv7Kg2h|2mJL;4RY#;&uYML z>eMLaf$9h3#U^$MS=T<}^ULo3%0nISQd0M&y9b*EI=bNG*@1@{G%pnSTuUR^zkqek zxwTs8+vE&)e*#PiVA;SAv4S2e-<~nyB2V>&oJ8nAjBM=|cJw4t;6}OP;gH#5wGcd; zWS4;W>oBhV#E%AbGv)lljdszgIzz6)x8H53PZ$NzvJ6T5g7~cnZWeK$fwRT`j$sUK zNBCJjIPvdz&_}QL2ZMvL`N4Ign0(J0QrD1 z1s=^Ke|Wx5CA!fVJCLg%T`=E(OKgRG8+gUHqkBO&V=02JdaL&$U@o+e!M^7xw*~P+5^6L z?6ZC9gdE=~!N$b}*8zj}jnN+V7luw@e;@WFxD|o41LiB}9evIF$g6m-u}=2byJ2pB z17p%mBT8O=`Gk-N4hcalxDG|M=*fEzP+#_Ik1}u=5{xU>C{f261^oCDet{h@5qcw^ z-a7VIUwV>j4LZtSb^-@yP|KCD2!HpJ|EBr>7oX$DHb);WmxPaz*=UE|M!?dUe@($B z4b3B)-U!ULP-#_A8M!sv6+k~r1IzIpfa~PIrB85CA^x<)4z+-5F|>AG{e`3F2!8^H z!hGSVKbRD7iKc(zNcs4NI$)*W#$K3#CmA*{;2z)-M$SuipHk1UYsV!erxHN2A@=_= z6h4qvjXVPW-Ao1dC_-xlT??j+e{IcJOmWcUB|AT0oCbe{OZYGU=tutKzo-NkL-0R*+}IR}0QzIFJfG3OG0_^>V@2M6l{B0ZRIh(8?m!e4#cAsEeo>&}6re}R_P<{cvF zLwHtgxTXk4#cl*B7jVH}^STTEOytiGdQ~lOuJBcJr^SfiQWi_b>Y-O1b?}p7D^7J& zp|RH>#UAW@%pcHn9`dSwp8c68zWnaoHbl-pIgc{FZPy$_lVX)}SH!PiUqWXDiHgWe zVX0Y#wa3C$2dM@Trkx0dRPrj?iAn(ai||P+L0|{65-g5o0$D z9)}#9K|?+5f7KJ|+uo>qrc=&!{0jd=p?EuR+CKE@^}CbR(rm$`gm905v;eoNK`f0DQSttI~F{p|;L zNpOZlkL|M`BG-P$z0D)=eRsnW>EXd!p|U2ciMGxhU=-k#ET-W46mN`XT^P3xr2LUw z=i8(94tWnLBsv(LiD)MfZ?9wrZS)B@BFCqSJv=XWJ>R{hv*^$B>22YJ#7HDKrxiu) z-@e5@7xNv;iFh~Jf5ZOL7W?6~CccOPBlIiJ>lY{ekB;Zhyg@MCC9?$oBslxtim34g zrUJxcdvIFu9JIws=x5j9{~yi~Sw& zQd$bv=nqbm=v4g9*ZZ3e!B6}Bb50Wc`=hMHJ_%qdM^M4wE0 zdO5gtCBCP?f58@#32sHFhj!1+djk_I@~#o69lHQCr{;Mr$OlSJ*}@=oyoZIXUT7AE zTS?TEpzVSp+;H8^*+Sc1cUw!y>3Y*8&*)q_LYsh9gif>`n$lnRg(yY6!NSuJ2KeHI z`h`LG;-32x_i6)d(1`tTTup#8Ch>?5FZGXJDKzv4f1W7fKapwTI}==XvR6WIT_}Qd zjjm>nAZ2D4iV)p0Nc(>Bh6p#H&Yuul1MdW&XhaW1n%`{b7~Yd9ctX;{4Y6HlwDr)x zwY-j23mL$N*NlthHf2e^2%&!Bw=+`W*3Ki!*$SeKoE zJ2|Nrgu<26fE7SYgFpBx3mt3V*d1VTSN!H}f3HR0I)Lo}m#IzJ=~B^LX zf5>N7+P!nmq(?;Trj(sg9U!!}UV>AV=q{5u%~zRNV1N;y_{ISL;FPc*8^$yCCS>o9 zJVN;MP;PMh_b~+yd~li*T_S0603#5po@$J7_Go=N9hSJWALF8eUm0Ne}UV;Z&|rykyOCGrlT?o4@R^PuooNFpACgv zw%wG6#K(zaK_8X`-09Q#qD}1>g!I6+$Xri&wVqXo375Jm^-ZX4-fAGE` z77Yw(E+7p8*Ac4@wiYCYS(uTZ>wf=?eA_?#hG_c3M}<7u6ehVt6JWHeq%$suW;$@{ zI?6Z2{DJ3+F!rwk$PEzNj^z@0|A>i$N`u7xz+o<47a07Q8<1ZJE>s&s3G>9%!n3&c zFap?F1wea2GzqxE*ds1ZG+g1Qf5RGW$EkiMIrG3x5Ka#G@X>y1nJyh=vmm%92a1Wr zAA&rnm^-CpA2ayw2EQS4W8L5R;xHD6*goqobqUiPu!_L!nTWPPjX%30M^>S#Me^0>mI|Vlc z@~GK&gw}wY0VMpW9BoOO8_EYfCQtq4+ZgjRpm<{_I;D=c5fNNN^2ZU8a*1)ddw|u;R zgG0$@1{{|c^D|aHir`R0e{4t}xD!%uoA@5V!;pLl*zCZV?7S`R11|0>#?SR2pUC@f zk}o{hzr{)U&38qxj^K49Q(pOB;<)-Z_`f**1m3P$j(GeJpQE%+z#%>h6VwN|Wx?e& zykh~c-Vr&m{~kBk6KS0Ah<))odPfero+)yDnao2B6m#yt6Llfne~AA^-F@@kfcu^B zQjq<{-|*i2hEr5*=#qS5Js%7Vw9awv2e(Mh|CJBuYtH=2U4yz@J&4sziHEl0-j^=_kyE5g;Y>Iv%ptGcvGaHp-o!> zH*!iioE@`U4j7ehe;+yjUV>AB^iP9Z^YHf$=l&Ib9Lxix-UdDo_-TY+vE#qtc=^hW zM?HS=PknLY{DDXFH*K)rd7#J`BYs{1esZ3+r3Y-;8JV~7N)hs7Eb0dSpkH`Op!EM; zE}7){g_%aKi2-hC!l5Esaz^Ux3tMg1FY}Ex3w@8vxFmOof4_r>9Ea!qR-u6Gf#$8a zgeKIm&)-lK@MZ&l4pJNjzs_I!*T2Q5^RM(X%xC2DVDk+8GoRo6ssH|!lj&xlPhM}> zyOEq-!Y2lwMlpWMHPQTsT{<`Lt^j_fCic|G<+^5CdmcS0QiuOae-thD>7g%ApU9U- zu93paTkj@-f3Ig~%fIXU8T>GCT5y@j=mUo5zt9eL@!oJSneEWd#Ug=E5c>ge0YMmX zcbdn4!}asmaq+{u_gObSxTg<)BK(oSnhkyX-{3M9bHYjZ!NVj3gJlo;pSBo7=lFkR zO+g5SU{DbggU<+jZh>?obM5~cuj023C-eegdVF|Ie?I<=AKWTpkKz{x#*eo5pU;2A zTfgkgfwPb1KUgB-iyiFF3+mz)zh`&kEw5qszw{P(wQfmbm5wAwRzx(6+N+4~aE{=d zUpXSrAF$)?A92<2tC4vBmoDAFdq;SJe?GAdlC$`;FZkwR`tqBAgY}O&F@M5- z^6^$O&!d^J#Cs54e$1sAmk}JRDBS#wPxfSge77?wT*2M?v_<-Wv3UgU8q`a-MBiRB z*pBYetC!Fhz}wP|6P%ZC_OHAN*Fk54CjL7T|AlV#-(je+7X;1(v15`;0xdBGpN%yI zf4>8tDDW!-BilmiprwKLg+|J$gC^pTqfPz`SxqD%*v~)WWyTG`AA-MAXcztbgZ~o9 zoD_eN-wiu!n;lI>)pqurT!?w=V#O#Vr3IY*NO;@#7WXpN8~b=6 z{tUSFArT-dOSUNa5M0gaE#V&`a1u|Gf1lB~>3b`vp3$)VMDPs)E#DwNz}mI*O6ZNv zEklNxG_^08a1gzvbI;ay@m#oD2VUr_=iWH22X0`yXER0P{B z8cU76Gn|B$7U1K_GD+6xaLyGgIaH!mcC!id;lg?5GE*PmV0nO>DSl@?@@vv$f16Fr z_2GInoH;nv;!~7S4+XxltmB{1SkDT6y;hDP=0+t@&UL=yHAT6{Ig=ZaLQl_WZmm>K zDZJc$s%BkUd0?o=X%*&&Qf<6y=Kk^TijM{07cP-|qyP%P=6oq?(e>7Lmx*?XA5PaS zbdi0obH>!uw(+}pt~comXk`$}f6#4m7B&WSGf(~6MJ_Kwt_o=^ z@4Rr`x4JK?y?azOi@vw+JJC;Gv)aNisIlk~$!@opMB8*_W#XdS&Q{Q{;f{!UJ8UTu zo`sJ-CmA2I=XOGJ0wxD|f4HSU0CRu+C#w>vM8?tQ+#LAR&qt5=dp=4aqZFOtCJT;6(y2Ukg0KVaY~_QG&mboL5-}=UMEN#VdDeF@=h>>IHkG8;h;usN#KO94r9mnK(I39jK+)p@Re|`AeAJ50mc#SmA zuVeV3QUUwS3)_?5o0x!Oeg$+5`?Rs1LbnTa^BFllK&;<}9}u~>5CpieU^P{do`CB= z=Yfc@S;HmAh|XXgp5nZU=MOb>V;SVYw0Is@h&LNK(-XXnO^yxzMd}CTO^5O?Q9tnG z;eAfz{jrM&UJ)fkf1Z;Keif7t^;1D74DTtemqNlpOEw5=GY@29@Ij(AKF6#h7Udi$Pgo*g8IPwo`C-($2%4};6r_jA!Y0pZz$Li^BOpY|r@6Ndgd^aqqjFgUb7UW)jI;N~YE$@^*Km?xYxtVhNHo)0pc zL30@OKmYVMfAxTU1lA4IC+Z8xAJYC3LcJPz;)v7{OC1$*ljrC8>pewHVMn5JaI687 zHiW^y>O)9zf4IMJe~Vj_@{#^99nOD_hx5OV3AZqwM@;cw=ZE|K>6oS12l~fwf8fc{ zf5>^g#q+r-=^q?>u>md{`Y(BYL&mS!qW)y~y4ufpe?oNcYkcE=C@*wS2#?`sybt4% zjQ=UA|IhPn-9>x+^dIos4fh}V%Zpt9IkrgN`Okdt)jz(+@82JPmFMSuhx0@Ez@Yc* z{BOs@^L~zba_pq!`Ct9zZ=XlnNBoS}pYx*7!;L}S2YVAT-;nwUNvDAql^hT4&$*!3 zCgaxte}0$XLeM{uIga`s+V87j4;<^IuffRPkS3`YZ9K*vxVs*lkg1&}y zRkklCr)D*{oIh+)fBFZUHAe3-<&k_z_8Wi|e`;h<0KBPb?1K!Fb;=54FEm{LwNQM0 zvJhgkdXgvjeLFiU{9?$12IDl_!R}>|2fc_n5cS~875Iel7j6zn?U0D#*E%-bb};2o zh#{Yg3{e7B88{38Bf{k}F$H&I9>JH_mQV*Hxd-72avlcfiM7yVU~2~^fJZ-vWLeEm ze^NbRXSPtW)`6W(!RTk5*?B13T~<~gngn9NNa1KK(#X;ilyuyQ8-n4-o-hOsCo^3+ zQubkX&*ha8i&@XFeJESx;5n6)G|l-=0e;|`uX8Ablh~aLUHfwj-B?u^tS4SIl@f?M zS1pyQ&X1iYDAzbY=USxPz5LGWvQnq{f1=ah;Mar?BIwmaQNG-rpdOc1*hMdOR9r^+ zX{nu;_ZsyIovhCyF2nu?Ll7n%_CbQ@dWtlCVaJ+Fc9sgr4-q+-9}Msr;G@w)_Sp(W zi%Oc+!X*VG+}sduwuK~qCFNc0w&2Xe1|K`Q05w3$zc4j&VW-$j>@&r!;-uM{2PY#C z6@Rx}zA1+`Yx%XIB&Y`-3X3WDK|4iPfGi>Uvr3MLPN$UcSLYkVO_jpP9an`e9i4k% ze*{xQzR%UUa#ivy*DU2G=GR=iDz%+=yxu4;@yr92r5=p3af6i_8|9*lQnjKlbN)0| zsrJ6R*%gj zr6OuMfAts2+AXJ4pHr5wWT^f)RSsze!RYDwk|rZ=JY6Hx=SlI*^mWRgBbAx%PU&-X zXWqhzGnYK`S|1myMaeY!u`%Zi^Ee!z$-HOo%<*B)@67#jyshRHbKf1yWbQII;eU8u z%}wTdIcBRF!(4X9IGLT9k~s#ena`A)qq~}^Oujhk$y8y|Q?g8$!-Vb+&gv;ndg>sV z_hNE;MarrOC(bDtCsCzru&#aYkN*Gu4+#f4kFg;6p##wpMFeP!ojL9Tmh$)()`EQi zi!5R?VVakIShGqQ9tz@sU=hVUhksd}DL@(r_|NW4u_p7TXr^`!rZ%?&vo2TZ2iS8w z*eE;kS+hp1$Zn6glaF*J0nDcYVZ*l{z%n6axOP3kUa+9604bM_m4kc+yF;X7m00KS zlfdOW;rN=D$~7;yY+(v@Iz=c}iE%xTGVpa~a3W300G1j=(<8rvUOe1!_eW5EebkTpkAK%Uw8kyk!L@>_X& z3z4Mi_lj%cuet}wL)tx&zrp5iNifY9@pmWr%qYpfp&g#6R3UbtbanT1)oqTFW}Sk zOeq}xqRuN0pIJ!*?yhDtK~b)0{y=#a$Te{TJTr0vcmm~#BkOT}b#YzN25L03ZzS zDHP@z;)HCNlX1a?Xn)r<2=*--n5mtX~9p$x#@sD<_Ti}3LQ-31mfF^ntxYrElH@XfV zY!S4PEQvU_@qTzdDwSOc?t}E16rY!8yR3kiGofmZmcu`Vl0;sCt+>X$$VaLPy z$DQ+>F!O=^mghD6hign;>2PiS)_o~|b23DIUAGki?|j6&irriHK#GfY|D1Dbl+T4S z(&xOycR=^4f`9MlBx?gSmBU(%*z9>hI(HTZmw{tt*Q7#Kuzjr;I_?K|XI zGcxWFeuQc{X;buLjNzdlhH&;^UwkHQi*{r;4}x1j{eO`D%XuI$!}$W7uRwGtw30e# zH!`LK(ykR4{bioPG>WRlW`Iyx7ury-5@}b|MeTqHc){!1yUt0QNI9u5?}7H*p?#6t zT;n+yC${2iXb0KF`S);skMrW^e2w$oaDI*YMh+4=pWyuZbAF2R>F0dcz2Pq>=O38+ zf3-i>m48rtLZHV;bsX=|jtUvKaF8pQ>qPl$Tyjwgj}dar*apY%VU9t$fgL*&*50rp zb!xS0#i74o9_Q!Su`35KuSR%giY91<4q20+OGke~ze4{KF;C)e4edDMsj-2GBz5z2 z2H_$HtF@bzI@<6GJm~?`3o!LKka9{nOUfNvX(=?Jw;tw_QWD%GRJ3(<-7%w zGJhJ{Y68}RZF(5mM^Jw2gQA7G9Q7y$wIJTnCpgLTLqo#aS(0m^t)#OX%;Epy^$-CS z;6WhoEew7A>v{=m!{5ILne+bHdw$Nd!S`m%5vFHLhtPxL-4*mBGY80%=hpxH{V|3` zQeU{fYa6L=QN9#q#d?Uo`p?!0lt&#>f; zWWElGEk)K*oa?Y@Em(&?zd48oBvo3THXc!UZAcVpqg=`J z7JvPXhXlCz>~vtr)lp-#_J&&$b^H`GybGfL#ExM-KkwlgkQA=L-%ygv65O3+?ti+G zwVC{WtxUwTgNQs=kh$mY`WWiN#U|}z$B0-L3(TVu)D%G;pSPeg!8}TixnWJiSp1v1 zN7e$dHTV$E5~ni80AH9g83Uv&pK^eq1MfoY#lLFZmS885NKMTm@0Oo^wWqNp{bmCj z_~#nfbY$*E1P}Fp9Lk3Ag+7IO6MePj;xpD10UkB1Q=hg%88KH*(N|$>;yr&qhxVwd@Cv$v(>vCVNxkNzF`QVi$5(eIp{y+wVV`Ki~xW8Vf96aeS*O5KM zKfE7VCmgaKdnhw)hF z6F(jX16XUZte0K1ea6FLD?(q5Z{q}QxOg?*` z>xa+&=lbDu@VS2Y41c?fK<7BL?W7I(4smeoF*>|Iwvr^9o)ryp9qcobzVUTkN3KJ| z4A)@Iv*X?p#M`Rv`>y|a@h;@h!2eANBU!TvYC@im^&#--fKlyyd zIKuTVX$u+S1lNB)!~VhbpU>#Oxc>7QZ9Sxa4CDAZyq66C41db8cS$+$?BP1J8{vle zT=)3AKN1LV-Qn~8+~+#_^Zwp&UGsVWuj`0iCbW;==8cxvZyUa8ZSvbS-pOW{+JZx$ zoIrhI7^A~)YEdEA_rxClbv@qC7Jsf!K4TjB-9Oh)hWoqqueoDA^z9XX1KqqydmQ>T zjq62nJ+TFFJ%8?>lWXzLw)pE>QvU7FwacL_KiA^D5taXa%$$fFF94e%5?hj^hI{9@ zp0tyKwj|dtIG$V!1p?gr`&ynK?)`Hu+7!9p-`8eCnU}xSBQFj2?!M|pA@dVUeb-Ai z)H`g^p-!-5kbD1Ji~fqZ!1uK|Df9N{TI`p}ibVFEuzwA*rX#r+4)*$3ceB7Rd5nD} zI$&Ltu)+9cJ3d+Bt1>e9JkV#t8ElE4HLpe4*7n1Idmk~l2fQA%HirH(;bYOhkCFF} z+~m(Oj~NDOZ^V&~Thy>oF)w2_ALQ;12>V57)n`tiU*QI40r%#W5c`_pci61q{i49H#%ntzI{TIeAQLsk zT(5vs8aZ)w>x1JHo(nR0%$?+V#3K$Vt^+h2&p}24Amujq zWY^%iBFCtF9D{&xiPUIvzQFaUCo~zh35XjZg}en^63=x~QXYkr1LehYQj`KPB}BVWbv$Ab+Gh`N2lYPG zCwNy#yW{=>Kmsktuv8_8A7H$pKJlI!`5o#V$rxl%!YlEqpP~H{(vJ8&zL!7ki0e=;#7}Ww8uumT z!p_15DHeVQ^w){#CE^(MiTHUsL>EC-K;EOkeaZE|`sHUl0rx`gi*n$1s3+h+Sdu>k zJ_mC=c2bI%CFA&K9%w(5`=ZuJM=qe)>l+ zyno8|L%$~1lkqb2!=e9br2hjX@{h}h>+v2FQvaw|oJTvPbZXrhw_$jZ=9a2@(F)DTGLIt!O>(nIS1ksrv0-Q;(oJ}Zv7lm#1O<6`kwGet(6oNq!I7=k+yyFpj`80n`!j6~Jj=JYhaSly*VJ%LaB2 z>=Ckl!=5Cx>%a%2=nl!B#JI~OQ=qRS^?<@T*f9a;LJH}T`3*{ip!Km4k4NMO zg-%#$O5*7xj*R#|@HWbn6i#4OgJnR;Mwp2T1hPq?(W#IX9Fs!Bw|{JqczZ@V0Wh&L zYu3=g782GhSm=!q{yjKs!NUyQ8Dw063vr4RZ{$^gn+v$;E$lDw@IwO|9CCpAbS&)R zXUa2VGI=6x3{f)n-a^OP0jDlBg^`QKfv5B|AUvVp$+`v>FR=ut8>hBNDsc-A+Jp}> z`RKOTAV(0LfN&Op+kc?N;NE&1at;v>N7Ay*hwK*Ez!sGRK7kFORj}OvFD2C8ptuG7 z1ENikB0)sl1C44+DzN}>k)#vQ(1qZMyeYSBF{red^K9_(`Ub#RypISRz=MRRFUFt* zHJX3rf>w%Ip9Y=|I4?{#p)KWvFD_!Gk`2H;-%4O4EVz9N?|&A|q=PLrY_LQ|oAida zi;J;z|sl37DvrdB94J;3+pJaTfpf-Ls7>@Cq{g&Y%rDq7_BU>VowB&A_Z{DH_t{wYrJSpqLWGZ+v&W*W#|9wwdoPF-4 zAY(!KJo5~6c{`l%kO4veIuEYkuk&X5bN<)wMNvKkWs~fj^7}hSh`)X#wtz0E5majS zj5ChEf4>!bo=M7m;CC_OfGOYXc}cCns`g0Me#Zq;9e?e(D-TuMxLPp>*{>2d$&hXH zN*KT8tIzjwRbKWW#tnB+Fk+Gd|25yg39A*oN2PrKCX%P$Wt#oG$F}&Yd*y9y)1UL! zr=B{)6DQy2?d@0FKw0doZEzmY^Y8NoI8Hz3F^hbkH{L(zX~Xy~XIQ-<_8HON?-9vr zod7lR+kg3PA0*d=eSgY7aq@%g;P3SJd*lZO1N7_uj^L@}`ALO3ok#%Q_|yH)q@!R) z?@^z;>_0)6KZ_?mMD~&newZ6M$4z{cWn3OE5DQ`a^M37I!`=70DDR&>aTdp4&qBS0 zCs$plke7Q8CCD{xHiXNl`R2?>d^k^<5{2VhHLTN_x&^J`~KGU z=lL_^=lMnd>%6UnlM*U{?=qI(WEiw_i@s8zEylmsv;A`)RevxcfQSD0hrVF<;m>*N`~5&&@O|F#e$LCr zcR6}?`gOiHgzqtDv_E4G9>|~b=kK}8_~}Qcr+nAX*gk*%9(46z&o`kU`h6bHm%wzH zML+!!2$ZkqAiu2RWV23!?`Ht+^L>vqy!fAcOw8i@9TMsL9ZlN%u4~3Qf8RTizklZ) zlMcV<9i0EJ!vuPA-{(Q4^nJegYKubbkybFA2RKCjoCgKW`P0`OtREl4ia^1KllGZPEt_5IZgiOVP7BT z)Bio8S2FgCAH1W8_3y*?1eRxVEDnChfA$Xo32b@>QCnI;5CYuo*R_69g(M<++Jg{wY18Y32y zSa4P8akA%&aZucD_keS+fPdTsezHvK%e0N#4K(W+W zI9z5B%IVvac?0q4U6f8|O<<5@zkP&-*s7y<=cZ3}*3-M^^*KiudVj#N14!nm+%ifQ>iQ^$0N z^#5fyrVbZN^MfGbSGoI|JF~pLyxQCJSlxIa72?DVlifAjjCTFm*=fguzeWC}>&oQ$ zh)$z^QuYVRVpjdASAXMZ{9=Rj0Neb&NWI6godkR8D041z9*fT-y{MAT2oL6|pFtUS z`&eG)w{q046xHu?y?8RSH^;**%3Rzh1I1*DQ*u!3Zi^J+`m7J+%jPsG_Ur^nd8sc5 z(Rro{c@-GSInm-G&yw+aJ-*NO$uwAuN&sWLQ{W*Cl#(!Jd4IXEN-nCPHap>FllU?5 z_l=Uxor``b%l3YKo-U8WYd5h9>pE`gP!IB5ZJb7n*O>IkVAd?c=2FVaS@vwBzKrIx zv+n7;Lwq?{wevi)qY%mGaIx#*i?w`e6T-{8w&t*wC#Or_BB#w>t<4Rq&yv-MM_*WA z@o0lq86W4unSb7D-Tm~6Bam~>h*}ACzZ06036|~46oI(?VXJII^RhjSE?E&j(Di_6 z8<+29JVv(h<#p`#lYY^|PpN|Bb>&r*M~$8ppp(t%0d|lDvvMq`5FtdlP2~BBT1u(B z57feXI$3MHtZp}71al0K`KaGC^Qol^+9ZFTmapS$hJP%ZF~h76kRF&Wrru?-&=2wr zFe2;7Z&rtV6;%vXX2+x9-f8tcV_v(9X`45H8(vEB;r5wHRnKvu%H}R*ByQ8MsCy#K z4JzB3BOyACjs7tc4P%rBaW)aX<8H}ApK~>OMVD~h3Ga)wcNr+XtzV-<7jA`qlb9IA z`V@N+r}rQjHO+qU+`tbp zZ-36&<1n6$V!Do~O}Eox{iyXqoo&7D_`VB{Q>|y_m2F>AmNd~B;mT;S-kz;cS{#h~ zlJCw7q3AF46MWL)JK2l62956cVmuk95|7iJri0a$TdGczJVUOSThunAc z8^TJv-CmpJK7X~xxAmN!p|@HH)k<8=s36)KNd$4C3B(tz85OV8H$S#l_Yfix&wp&i z`(b#%&dGQ9QU2COrm3z*^Zq%mt@n7lJ|rSdm@2|qttzb`XF%5p*R!t)?D7dfL;{bk zzP799X;RO0E?83Mz&7)haN3wt@Dso0hrDgBAl$hZ<)WvBVl$as{d?z4#z$lZ?i10z zb{j7~CW^vmkp1iSsW;o$H}`TKpMSU5lO5=Nj{lNqFE1aIgFOk=2Up(|7Vki*r_^K| zkJpnKQ>);mq%Xwx+h8Fpi7n(F5KPhHZfq#w@gBRNOPN>u|0KzEfW@DxO{ zhkJ@m5xXw0Y-DS;)>o7F5_IGH@GV?2{AEISXzL1~8keO-38sZFZUf_cp6 zGtHQqn8#|+ZjDYN@);VVq_d61ZZO#6X`mdsT=dS7yu<7nRq}P?)$T(m8hTvqr|o5S zSE+e3S%Zij43O284u7`$3ENCA+1XtH2>GZzngd`O+QSGQ_xgW-IPv!W2?h{c7E2e#vHKos<-#? zi?84WJ|)b$dRb+k_V>%2vbIwvxkN8Wm3OyUuHC6u8Wo!b6n`B}$xVu}^r+{aWoVqf zlvkS7zG?991(2Fe=K$(T4N+t?dpDFXEOpyk>83`$%v+X2mfi)(91rs}sEV6&r?-j~ z?BBfPj{BX9@p!X0K~UI2>rS4k9VEgtdla_u+@UR=*Tc;M2u@rSSMQNktoa$;k? zOE1i1FPh5`Fn>{YFj*`tGTGwXzSg)++Y!|KhINYO$d12Im-h)CiOGsa zhIEjQZ|9J*S3SFbTVo_OyT@ss3eJU9=b?ACASjJwVSl~{^%!!vAEP)GF2+5Df^o2G z1envwnT8|s*zDDLWn3AlItb6{Zo}=3c3tl})ZZ0E^u|VYCL3tB?V%35Sv9t>GBq>0 zOE&l`!m3uZ;(b7YGb2@Zma`x0UG^@v^X+;a+jqA2^I|)rODz(ZwR&Uy;QqSU6M^4P z%ZUTg*njG3s=z^6cYhU1?5q@wC36b{aewy==N^PB{$>>nFK*oDMw%z|t(!OgvI4K% zO+FUB744p@(04H48&m7LyW>r-mP!|;&foOKJPMn*J73`#%nRuqZ8L5Jv3f6?9HMYy z-Q&U3rGcPSid&ayf)Wy!Pl5u-Tc75M9U2MwM`^9V%I^eghpsTO*z0tq~D=wlWZ=<+$ zmVdb*V}nf<-JzU-k!55sG;)w8wN6+0>;5{>i*1?YUFV}`V^DOAd@cDX%__It27gpYDB z9_d?Z*VZu?6=4sFlYR=-<8F6V6lxdIh<~eKs^WK#wF!v`#|_F+>a8rgUCmy3d=enC zxraE0KO2vfm=nA8!ldS1dOSu??IJr+#T8G*%TvGyYK#>FW$ZRfe!pq5_+dMAZN6Xo zqKuHf=DyordJ4!(p3Am-gjB$#)GT|Pl@a@%9+*eFQV;gY>rMT#U2V?(a~;p3RDZr} z_KcBsx7+9v)Q(aw_fIMcbE(p9G#57o7l8Rd)v|ML&7uEs@3pxZ>06c(z1%2nv2zX` zzz*1roIET}wV&yD5gx;8vVJL&;6|5(pNF&DCqD=yA_5nHE*L$Xf)evbTZHBxsGjm@{+9W zL!VEmiF@bTn{x)=+oTR2^P~ypv)LU~2yV4{p7=-|aZ$HhLlEO6$ww=jmVaWvP;u}u zU%Ospu2rl&_pOniPsiIO92bnQpC>h)q1$aNlZX7!d7Fe20rU%g#PB<~(7Y<;oTJmc z?fAmTw*@ozYtiX18vF(uWA!dV&=zZ5bvKXMp((_ohaya))21Hw(Q|R8`;l;nC{3N- z_4)k%woXm@xX<3p*Nlk@8h`XLm_*v#Jm1vu9*w!*AW31h5gR9UV1un6b+4BD)k_>d zLD1i7hvS+Z-K=w0^m-qOYjLu6n zFlQ5ix^3@kF-X07k>r=|b?Uv@d+$wj%@!uzGbU; zI=&%-);)OVRTyJUWl zr0qJ9XdMowN!`jBbAPnWNgTKDkz-mjIS{4tJl^ub39^Zf^0JLjiP3N6nXy2;vstW;`>TGkW$$4=U%SO&?te4kIlZgOvM-D~=)A0Q zQm6CM=-%&noh%Sm|R^ zv71z_)%u2rpme!jU4+eGNJOs=w$ACsm)CdoPTP6gGyT%Jp7g~AN!T|(IN4fl_lqix zH_p3d+<#TD7p_!7pI>C^Sssd4B7`s6b$ScQiVt7&7t6y)-D7Q0+sV;hxBJJTtgo%H zv6n}HXAk>{9x>ytaYv63crU5Y;-}EKg{+6??xZ{OC~f&?V(`7%%_+r7MzO!8Pe)bF z>uNtvw_|iQ%Uhp2>OHe>#jU&T^+l=lV$F2=&3`;(31F^U-+XBF+~BP3#6D0}y*mhc z*1TTkcIi*)$awHYdphoI-g4L_J?gcaQ;s*A_4&(s<$C1F!Se!A z8tHxS)2lfR=lRinZC2{?dKl@+t6yI0*#n`#RkpH^PaUx67wq~&^K2Jg-pQ3ejnj7! zzkeJH1XMhC>|VTe+ah`ShkX3Sa+ zk3x62IoM^!29>(J?rBn?QaN`FVke)a@s!?EVY#)+@jFR)ZXZ5`Ej(5rsS@_#vx_d) z;l32R+w1haO|Ewa{7TYR&>A^h8c2d{?tj>(R#e|@Z@o|nhds28PeO3@mg8exYS>Qk z`*(4;SJl1%0nKdXJ;iy3{qy=hm&V59(28a!WXJKb+xpGjew-X?a~+3}syVN7xoI?M zGm`a!J?FH|9U;VG`Y-S5YcojJwv(H1I_gHXTDHbf2h)iz-nK=1btWB!NggQWGk>zu z)6*lnjVF2?x|5_rN{!H0`+1+$%Tx1wC05}awx@?APy1#*soHiazH-ob$Fm90x%hj& zUE!UTlHFF?*rx}-eW(!6EQD6NGb>Q)Y`RATa;R(VZo57npX<#rpt9#)?(?NyEhQ^b z)wvV5{oTKdouxCcR+r&Xd%t35oPVw7$Ca_28Ru>7Z+g2_Q~La7v}WWL<+W$zrIciT zW!fGrnd9r`)jNNR)roRuhgXpFRNF%gV^ZxKEUiXq>v1O7Qh1lP*ZJD__wSl9p#&-I zZ>zN4`0LgAp_Fb>f|rVZti|jhAxooG$L#Dq9iey5<|_5W!Nb*;!5u@=;KJwCDCh^v%pGZctg1t~?fu8ZijGZ>UaFcDg zY)A34+~gZ;xtBENR;6~Zn8Ee&c&@;%U_xM}Y0A5Qq+Vl4phTPLl7E_Kcpv7dU*r!m8CyTjbs&h2NSFz~2}@f&RG z!rcdhHbpN2cJ#=Y!{wfUQ!Iiaw8V)IS$#yiu}kY&_j<0fhij(gbF({hkEA@S%`PdV zj)LQ>Ly&;hlgFh7Eq_~d$hXGI-EAQOK)-vznwjBbTIyS>t0a@3fE*okp!)=gur-1h zj~XLnd?`*FPEE9}e`1tJy?(5mId?gg>b+cy9i!2h?MiD)^^z_v$lJn*M-py{;(Hfnt%84@<}h_1Ri%Sj=_e0 znL^o7?VFU|yylxaS5Mo4+o{~VVSW+&c_ zo!G_Ku`y}A%2{9Sz>ymeZdG)m9@}w~(f8ZRv|T&bjOV5r&y;wwqUi0tGDpb8nVk&o zaCyvL(k*Z9^M4s+MW#%*kDAG-z4y%IE|0)grcWL(^;ypau8JbAv@&&i+0K{K;|1C4 zh+7<$rFuW>h~LodZU1m2TfdKQTJB9BO>WE)H5y~T&@!jzvuNdrPj2tRrgw_zt@Y*f zalcguM&|2#&z*#JP1~Ez^d25&#ih910ge~W`)17L`+o`^X_!*ogMH28e7U}lq<1;$ z3~F?XuAl)FUsJJ*`b%9R?)RE)7DaP`%e0P)pblpIf-j+$HxKSzbmQfCWJlNZ5sR*q z0FuNRn|!Ax4_n)Hbg>RMNjxt$+wBfYfVT)f%aakg-FST8Ez(XKd;PuL-n+$Peho(Q zLP}1_(SL6#7ZSrxA{8TOb(y>%>o#G{@{ zmeP7Z*<0a_->uKv>`e#ixUDxU-rd3a$Iz zc!2aanoc*UM|~tb9K=6fx3O6t4$ArzsHa4WOn*kd7e@!<{3=hjEaJ6iWKEgHZDDb= zu|E~^wp?qGyjR+h=dHBa@c^e2EP}w=Y|kp~^!Dws7nofj#^iKuCR+CHD$HR%=zdGh9JmJ9Fa z7=OVhIz2Gwm%cqWzIyDM-KDzsx68_R*#tY#3%ji+<6s6Iv9Nnj`rg@P;fyoN{`6uS zDzD(@!A)s>mzE5jjFgFV_YnEv27Q0sz&z;n=I|ENq#n6sE0K4 zG~*bdtKT1XcDv-S41{hkU^xtFqsA51=lxsqPTl5iUrnugtt^BP z+m@c3=%g221v9RB+JK34u2T@4iGP@bim1h%l3~3no7q%1o9AQGBh6~!@QN&Y@0Q!=(TJMpkv|r+5zNlhic(;x-OrBs zVz~8tKIg$miN!*jmD+YSn!T#W`>5TY0l-xgbxc){=P13;lX1ZAy-O|G^MChLoxQVr zdjBw z(PB0k9g#l1^5XSW#cmWNiGNzF7Y}nXDrvk28@Km zqtFxaEg_JPRXKi*)6P%Sa!!FwXw)?mq0}a0VR5AZOgpdAVs>QVuLYJZwDyx`FF(f4 zwVyM4_BelSPF5}7Xz-#uM-P8jo-V$lpKn&W+f=j7>zD@E0>d}Qnt!(i_9~0*w!?0z z9#xfG9n@^SQ*u`qtB9yHGbrc+QQq1qF=E= zr$M%+ZO5nJIri<{{eNiP7`h&v)K++FZFkO^RRSmPtzNGY3+!H+5V5lQ`YU2N*A}T08o*27eF3?DhG1eOZ{<#w|g1w7+@Ug@3KUK>B3BE_y#ge8@Dq z)hR~bRK2@<+3~aGNIL1qX(l{MZ*I`w5bPcg&nO_Wvo70awmT}lmowP{a=W9+8`d6M zzLx>tln<*40^I=eO}OU#w9&O@H!t&lw7IhoMfb1>8lctYvW@uN0xk1ZxAFReMd5q1uEuMLmowNs=Gh zd1qCkd)N3{152p?Y=YxUJap?^?Tr~|BW?(ZKRI^M-cQGwrh+qT1ws<^4gu2Dei^{#aT~DpM<$s)*7k85HRI?86NnRZSxQRG@ z-AA;RbJ|XaG||i`vU^<`BfT1_#q-|E3v9uz`O^pkZ{N-9s*haxy_n4;N!%XGaF@6T zY|Yo$il^7z2|RbqyeaCJaJ61OysKJXQ%9s27R+UOU3XTmPI>jkk8kN}!Gv%hin$vH zr{dCVmVb=z#6DMGo0|3>B8#5$-V6Ern7jctQ0{s=-n-5GcEntlwC_N2)5B%|gsmYh zAf9Rqoq3PVqxza31Jyonw%~^?clX^`vNy8Er(^c|=2uO9;-I{=Slvg|oX*~Upw6R< zHZ$XwKL%;W`*1&;?_K?9);xg5Fpf$lysqQb@qe&eEX{lea`2wpUuZitH}Cl|W{d!d zfV1K-cdL`86d-Lv$hTmwN1Yc&!a{mY#(5Gx9{sYI1}PIY)6|%Ey8HHfYu!!D+pO%E zy-{YD*CX7$OlzxN#ybb#^87j5ch$>tHY+|sFi31p-aZLVZgZF!uyJ{)D=uyAJ>B(T zQGdLIW;*qvP3&vZNnuy_vWT2Lgga_|#55W$g}NDT8~QG(k*|-n>)LH@#x~o$o~Qaw z&m7G^95owoA{Mpq>WukYtHws%>T~85>{N zYMD5CmdmCndy3d%h?0H0hus#gtpl5IWo9UXmwCNlj4BG=^>sfQukKvXJfR{9V=(_FI?h>Y7%?YNu?D zl6c8ClDgxXn{E5;@?c7{<;~eFm?#~!+0s0O#-0f^6&u>nnVdr*0r7bh2#B}x%eDt{ z)yB2s^DfwJv~n{&WT)|?r_@>Wa(@kGverg7l?v|JW$MhG`);*l1miiiq#18-^mMnL zb6aT{G}Jbq_xg+JnPww{$gbdCR`lRq8t@Yp8eYdX46EgH@zf8u>j*-e8)KA5;`lxS z@Mv<|@Xxr{>bpDhE_1zk3UE7*vv`zf;{5P%7V_@Z@SDjzI?Sa!r7vAK%YV~q_r7=S zC0s&7shiu=H9zsXfV&EN#L4*){OS8(UulchflSEs&=<#ydF0bYb7Z&Fs&7Zr%`~qf zt;4?XSlZUA+f7~SzI9`>voCP|F*B`NARu%MlAX*MK0|feM_f+j!@IwGOM48Nsdq~G zRP6vi@dP5v2F9ssS93K|9)HXpa!6(fE9c_PkGE-2%vxYtJjuGRX4P@NEE(q3OZWFK zYJ3C;NAv4UQ{I{9TJ?4EdgYh=1bxrv#S^qbtTJl{d|!7TX`s)z0o&2 zJBGMIAH1z8G8zv1#DY$$u8)LT8>@c(7x7 z>-~QExv+R??ahCz7t={>ZHjo#SOL5>SBL^-2k4ir7-+;l8du%-g7hv-h|2BC5GlN~ zTg0`!w+tr-WBXcDm&Q0ZO`^fM4kd!cWNzJ`wWZCWMSn(6`EAW1R1zJJv9dO*%lxsh zC$qV;UezkI4o{*+34hv#8gD}BBtE>O*~A*V&8lDoGVsl!yca>C-Y}dd=?OZfp(UyP zwDMKgDdU?`b%)Uec-k8X=67+hcIoGvTDtnKW0Y=hcsu?aqAmLMX!hJq{OZasDUNRT zv&zu-(|Wh(PWde`SJE*9cCnwaRD+oH;<$a!tHYdr(ShdbEq}jzar)(`VZiYHO1BXW<&nd-WsFtIs5YnS z2APcU>~>HpDU{$iV;kvlIqdev%*O0*Lh~e)^p}wm<{z*$^=X&`gQ!C8CJSYrdtT&`oHT7k+(xOq_+i$*Jl5N`LuV2hS~UDzc%q>&8=HA(>sTG%hACff$-DAG}5o8ZLKK}aC0J#AJEUab8C9h zm@%<&sPtp$fODO(Hg&PcM%&a8lv!My=ht$|-L5u-yK7@{yIw`Wk~6!`%}XCdP8Rl6 zJJKlWwz)mjT9;(-ZfbK`KFgEcpr$7lS1 zFDP_3yFJ+XP3!$H(FKn_iZ(Qj8n720YiEXpRwG0w@vAEuW6{e|_0Nt4EO>NcSIjOKZ zCVyQKA9wCLvG=sLg*WJxZclwD*>2N~M%cre{YyT-O)+VWcQ?(k^=9Q!mYtta*%9c= z%{diL;kI?BALCmaY4Yr9ntN`PF5VUY;9nD=-W$F%Tibhmyt%#Sy!LKa*Sk`!l9UNy zd)LyTvGh6{Z=k|HKkD*(`BZ|rWIUwFLw~R5ZiF!8U%+gwG;9CB@YBbo)*WQ>^5a$M@#>LAPVsZ^XZ-`urs zx|szIl=_irmzq!+g84q%VO<6|YVY?`#>+3Kt_kB+ncz z_P{)$-BAnkTQjX@wf#P%m+4yfF5&1c&s}vodrL>pxzCk{xl`FUq=JtT+pjPDQw;e$ zZ2Mj+!7+PWs^Rf218Y+$&&pRSxqk$q2hfYJF~D+-(1{R*SfJa+mUvs602i&;YZxk8 zX;=Fd{6T>VUu$dpXO%A$1_bwxD7;g<{{PMaYV4zpj zAd1!U``#_>^*RHO1jd%x`E@gQC9zMDQ@}NWRMxNXGTUq$Pi6f{yPWv`G=H;|_O?#= z(tFIF0D{&#_en={AL`lI8Zo0$1NqqrlY;D2(PFAMg{rij&+mrF;aW zqXt5E1ebSbt6iBv1a)x>X z@YBCuy#;__nc;7iv-dfY7L7j3;?N>2>f;*elJ$N^1WN(&z{ER=6Jo2AC^{c$vZ-s#`cth2<2kC0A-Dv$tuL+_>I2ci6Q%EXntdza2N-P1o4x zyz2rH=At>@-MI7d+M$CR%Y|KR9KM}s;b}gJeftr_9yQ*zFZ)!O(|)JitGG9pqoo-( z5!Ht8)^E<@WLvK!yIM1qzA}LE-_5A$IE*DOfhhXPpWjQl-ha&A>8t3Ec&E)X_K=4W z%e!3wNdBVO?bRptu$`WkU@?%PFAA9K)|CW3pN7$7glq@koFOTrf-$Mm^YOh&S{Soa zc?tNq*UUU&bhWDKouBP}WX-|NY6@QpNg67?&GM&8Ev!uw>71@6i|1}T^|##n#ltwQ{B>}TN;Q~ti{fVZ*XMXPPu#s8%=}SyPwe~L<;Dlv zoZg$#)~fk!q&=R3yLZhC;*wTRo0>O%ojmjhSs!7k(SL*+Q1P2(pG5VseYr>9WH&`% zw+kr29IE;J3@AeDM)c8r*pFf)S5qK9GbFCp>cM+_fVgy|#Oa zyOHDp4sWU|cBgg~jo0paW6t~tfV?-NWNYSaw!q->o$KVnrBNwra~Ppwn@sHdKJeWljaI~No(v>cH`W} zWbf$Zdz%;f_C8Wo*FCcDPv{g-}gumO7aP!E!5BpQ*TC*Mel6utLf-|Ix1YM_8l zpl;o~nHhf(8Rv-Hd#yRgfNyn9Ud9S!U3XT!esMEI6RA_&SN~|+loI*39rcDT?h?xI zDw5PW<<*T>&a8^OLX_h@{=U5K&b<7TqHyA!d4EsStOSFH`s~Pa0cRV(_@P|&h(vJA zn87rOhzOpM-lJON`kV0mW|5KE-MKn%lh3J->==J`uqNDPhXYsi$I2uRZ_arrsu4(X zgkMIVBorDU(NxIole_C|T@gF2;iL&^!)6Q?xP3o2n=v(kMIhvD2y2^CamsgLS$lv^&)S!s_CY1B38fUs3$20icM@eEoT6vdw&XiqjhQdqb z9gBav*wBU0AqC9eEIl+Z&fDn3Es{v2(<~#iNN|ZZd{fWFeQ}Qokj54;FoLdlZbWaS zbBkwBeSyKyiB&7le>;W8PoE57YQo~oeaPrvr`g%J$C2M=eV8`=U8e2)&>R-D)U*%W zBqn2V%e4=c41s!(R;K+>Kh**3x$}eZpx%F4puabeEq`zE37Au~sNqf|LqbfcLF9NO z4BqFm z-n7AOfe4L5v%BL6#=Yf)qqSv$Y86GtgCiSXyMTx3@CILa+Gv^3Y*F@wjyl`|;^=>S z)r2z9S!dT@21o`woZ$1Pq;u7m@3DJtYA^t?w(35B2G8*%>bape`k_fVlp>h`Yoq7j z{f<$G;B*VXK1vh}^jW7Z`02dhxE7p(F+$cpTfRP{8kGj3`ii4fb_i3RaF7b z`gqsy%=Q^>(Qy2&ev1fP$o>B6h5&zMfyZ$V=7L7+w>>olA;A^j``AMcd+c`!;B^z^ zP?-UmlmtNmcKC+W6ZTwhrt^A`;B!@48v?g2yy-@=ph87lCppNBdlSHWfgo1#y2L_8 zBGiXaIHvYx#T5-iW-CE=zszVP1Kr&fz$3;R6MvFlPu|NRTe=HFp>x#)=97QqTu7xL zCW8PI7jXs1_JM9K{NZ|HNg{T1AgX%9o7J1K0Pxlg1B$}t#ww!|MsP8ZSgRY4UUB$r z>@%iqa6ntr?h_uT9jp2dlhH>nFygL5{u6aaxSWrj3b{U#zP3?*Y&#-SQP z5GKl9Fqv^72B1J?Q-TJ}e5ZdIn%C+PHlNMS`a)}u`?$e8m&@Zk3lELnz&Q(wi8Peg z2r;t+p4h#;)Y;qE=7-Rs-i25YchY8!Sj;Rn^mDDh-b#3bacXa5HK2gij!;9qE$?KP z|9l4MjO>$QEfmY*lg(o11{%EL6<$W4NH^jX6NO2L;EF`Bvh$Nx|kXb%Ny^3sh@prZnNH2Hd z_i&#EHYOP~0SIoT#$z4b#6f3O02%BwK0sYb-A1&G$DT*`1goB#elr1kf$aKQ1dIoC z9m~FBgy{ap-_%1a8#EVe|(;a=t!evZ00K0Jw6393aI zfwr)^se@ck^b5JqV++5znMw*rFh?FlKXBrEut;BB-r+s15q$q=x0EMjzasQTfFNXY zHD_Zp?1sL6>2zjDA7EvfxiFnU;W|@3GP4L?B1L_TvliR?YlD9?+NX{MIYy@w;>(e} zt7K34XLL{H%QRhN>fWgFR*nfxd`uIb{^bo#$K>3djduur!UT^UCota5PjVR82%xV& z`VBesK??h5>8R{AyAaS`S11Y%AyhU8U}4AbO5`YuRj~eH$oEi(cRfTK4^A=LWx|^8 z2P`Ia-nVR!V~c+yoYfaX3TJ{sn&l;hCi>!)9bP*f{`kdr_EVH+(n+B^5l=^YaRRq; zQO6Bj#@h+(y^M+No~_R%v9*FU#iZQ$^PczC$7e#Z?-~+L0WECZ11zT$=Rx`nfgVB| zRXuXGl9`%GiT3x-j;Fwi`b zEb;pd&X*g6@twZy8K@QJwELXCb01}HMZP%@THG;Y0{QHN5l0RfF_AePJD>>W)n<6P z~f#-^lUxeW)04a(6VEPE};t$F|T(qz(0=&w`jm_NT6?3UPN_ksxN z_*n4%jZBd+1+7bsC~P#T~V z!S&+BUtT=|bpzI-JcFIBFzjRyLnS;vQl)&oW^&OEtD^ zUetfo4p~r!AJ>IWM|qx^iA#n&==G(7o)@GLTm7Ll3E4@t1}zfk-%f(*KXR&4zZn=d zDmaQ>bp+3 z6$FeM3odHo?BuYc7&r9A<(M6r`|gegu9JtfBUO>-ZQ+I$TV5oI~6 z92abL&fs*9dqDO**WbTSnSd)$uVjDfk)OPtY?d^$_qRd?jneN)KCPcOd496MPk;Ik z^W^q+6391kA!#%CX@3S@==sQv8b=!Wv>lZEDg&fPo<9WzaCD$RR(ejIJKLG!irQ-a zGn}vtAv|928e0uFo__F*(>){$@?>FL550wv*XgZsJD3610z}#1&GW|leQ(KC`N16Sj13!Qwc7;A0i?Yz z+Z*j;_@^GiH`C$7q8cp`+Khi6gJ^f1B|x*?7l2!#o57cNfeBhqxbLN@Uf;$UbjIu8 z!B@Ra$B!6vrep|t-G0!*pAQ!v=k`#~+I~#<6X!5=B%YfiHcuy}bfsn>5S;`xwv4c; z0VG4Iy{@I!R<`X8UT>_fEraf6?9PdT{>nt2;tSJ$QAVJEvU@e}4fYC2K z??eDahL`dS;di)w3J?zX)l^q>k=qf_$1cJUnI|x21ztX5jOb>?v?4#e(fI{wvx>L1wd0T_;=hOC#3^| z@tb8H#BxH;RF;3SM9BQ9d6H0G0?AQiFgE++z=I9shNsQHv%@r8Y=_EkTnT>TaC@Zm zsf&C_nvO0Pei^qYfWBoTzVJJ7F}6S8-}(<6MtmRM#^Ua34h&o_4D{n(ops2pu0|c> zhpPg-0kbTnD9nEk=i4);`AGuOO;W);|9MwDmi(N8Q(pm7FMZz1+L| zCVew$mbZWIIrX0&H0OW@dLq4@Q~dL*aJR9DuK-r7^$pg&zvom`Eggmpg>ccEbqT69 zstNT3@-XzvUz4(iB|Kx94QB#`PgXSOim#}i=s!50LJV2dRftMEPh3mWcb9#+q@=6zi032Qg*!*e6ae67gqyk5{r zfFX@}%+Qm9x@FcxMCpq-GoyWu>SV+oaC!L>YBok7qOQv~62qBN*j7j0qbC48UFe!aapl|P!7(!_Yz#yaJ-ZUepf?tYsyOHut(Kx z1r4{1Rp+EBPan*XPjfY^=o$`5<58mS#=8j4C5l zW>i?7PqzQA%7}w8YeA8WI?D_h9oV_5CdDCZviQ;F16%fSM%6PBk_O zA)kT!1p*()ey6zUnrS0Rs_&clYxJ3{VIKczDj2zftlGFxQ|0MzF#U7N8#+KPpafzXTe!f zU0(ZWIN23czNO$+8JHzp1;j;S&8wfOqkY3;=q`Q~UqWH+`+|W12MppA)Zyr!sXsV@ z0Zkw7P=gIa9Vo9R-QVHh4kcd#f=UL5cr{@#Y(;$iER}wH^GV7P`9XhU=xm5!J(5;T z_bO!9;1|ZokUo^aWbYkxwjv1ZeV|{il=p)808!4u*D(7^5PdTx;xh_|v$ePM*&}p> z7$U3M#^KwUy|Ct3S}}sWh`2W4v2|XqCohSqA-Rh(id4}5?p_ML!l4vEq46Q2V_!G9Y!tm4B!mlYq|yxJyYt24GHYnN9dO& zaL?L9`h+LXYp_!s05^mDgPYlMw>#f_$OUusZC}{dkdT6m8^7a8)D4gRE(gT_j;kS{ zmWN8vX2>{5JFquQF)+R^fEz@q%#eU)T^o}Uwj`g?V`V58Bieu55;|KjLxg0oHj@(c zN{6o6(^D^0Z-sY zA$$a6I2Ugvs>=MVh(B8b)+O^8eyqbp15Ve$ zeyqghj5?a+I}(3CI(d#QOa+(}+4#JZIQgNlpb>0f*12~?w zkQe!}WbED77q1a~EK#WIxCKKoz-E2%{+_#JuZZ&MtAb%ZYfUj__Wc2Hjw8>BN5?i&78W94? zv`(mhR|{P7Y9(7mf@uq^&z4Aq$3Fwa<&Y9GWf6b*pGrM>pp=REdTA>N3*W^aK!X&2 zKpdR`hzkUMXgTwH4mz$LJ;OU2LO>bfXJ@A&Z=op;`L1cb4I2*9MSAI`5|>)kF2o}2 z=-#Hu=!oePH$+U*=GX&O+zNI7WON<^4~`_q-9bEehO>KFfm_VT2{)o^ zDgNF$3E-~wUi<>R8J}kj=R753efM^?e93yF1>S#5Zh{6FAZjooRys!avex&Ift-1pScuvGL@|83q zdVX~%L?1-X3qO!or79)!KGUL7DAJK_CEV&%BZfhA^NF&6OEJVHsJTiqo173vyRFQzY*|Bc0qLTk@hl7x5THedh3m551~wfY{y41U5{BO&xLv_3w(7S#BvcB6J~Mjzow2|e%!I4WHnkah zp#*#GUNz;}E90U5#FPOZd(Y!;8k_n9Fh(3ntIKS-f8xMuMMk!ouE~U0or^Xu5(4t1 z$rHvSRqmMjn@V*G!pHEH|;7L5hFYF2Ws_Zh>yK?STaHRTC{vq@}7 zUzVN`@HK;g?V00q3UzaGaFjLc-uJAeF3u4iSluoLM?b&E_`9e2{%aIc%)L-0CmTNj z2qDnE0ukaEAtMf-8@fwbin^K7~15bmt#jLajDGg7M9~vjLtE z_qV#VqG#c=#9Q_7TOw9Y=;b@!^Z_Om*1L!a&d9aCocHe*%*1E?us-DFX;@1UVX4AO zssa_%qL;4?4wO%CEP`EF@RNUlw{{KBA_Hy&%&J|9LR~a*ekY!oLZ+_rkcD3@OgOA`y6g}^kw|!VzEW!m3CAbkkAmg)N+X+dw+jO@5nu#Lg#(BF% z+FR|%0`uJ#8)Pg;m8>pdRZ(M*(qsKG<>45BL8k>1>!}9_#H(Uy_hEk>2}GY8DekdY zE3oFlSkPK)y3{00ke{|Vm+8RE*u$N(V3pI~rmc2VY0d1Rd?!=9LgCB{hkG7Z?9M)S zxwNoa!eRBlzGq^BAcg?{`=@w&yz?sMgMcA}-@;bA{&bMi(}?)MS?eb!R;F_JqJ3z9 zA-*~Pi%tL}<>2QppU{6)hOY1|X!yYdGkR%@#x?cqH0iV9euXYt^xb;dW$!{HW_bdt zZRaXcQ_eq=^UIAjX?`{D8DNe=C#Qh{y=diA1AYZE^-$%5?~EuNZOzTrd#=?g{^)H@ z1ctlZ0NDp#E04jR+<38if%+1dPek*Q(pd#4NH@(NyU&yR=}CX`hNkKJjW3+u)hGO6 z%YkXWhF*At6h={OcsfprZ7W<1pJ)=!`0}I0)Tjht;jvO zTCi}e;QM)_w@3d@)k~=!%?;72wn1^^6Li<7`%!lsRaFc&Zk}V>A@R~syxVAg(Vl}9 zlY(f0?}2|7CyjZs7L+-%MTr_5T0e<#*FP zIq(tR!O8Kb&-!yY+<(?0D~$XG-t*s&2SNq-1OIbAI0qV)a2FyY?Cf9;=6f{1R)g9eOAp%9Ev=UGV;otZ3_v67+8UCS47Tuh`1ONODKmUInJjQ?G(7)d*`Te(1lAiZ7`uF?y z1M|=g1Qmb&bfwwzO@SX{$nqKXs3Kfv5!>Wgmb96HM#G;qf}0Osr9dHb41-b)!*q{=v!ezI3*!i{r) zlZ$`#q+|o;h*TuK44yG?eml~2N!WJ8>!*n)IDEknHN`QvInDQi+c2JSj_HM2J7Q zzfmvWE!eI1Q9zu!E0_hrAa3?7lIK*O5HZg;PpEa;ljZCDr9nHl|$-{`(<_ktR;Pod0AUdsX`K-+n0$f=qoUL&x~Z>{u%niq0!*!F*Z zZVUh$-tDx_MJKH!nX$M5G~Yvg0=0hmEtHr#$DRv9QU0FB_WUw2f#A$=TpWS0#e)LE zX1}n_aSDtBgcdY0e7aw07Eee&inO1MR&xFFu!o0(kH&TjAgm5W%VLD1hB1<2J75|Xx&}@vf4%2=YC7%5ujF-*-xM0@ z1?v=W`u?>pnRt;Bw(cjq^cQ~sNYuBQ^5&yY3P5~ZXHU&iO#8!{O{)kQMOcZk-|K=o z{4HzW7Z!<_?3-&DdW%y*!N+H{EDyiDrm;LxZj_aS5ybh5TZ8{N2j8Xv!V{L86Quq{ zk@fR{R8+6Q@cugUJ%{(dN|r7xJ0ENXDtVUFm|4=E(VWmY&L=&(*N}fMfR$;C`Zp*l zUxhxE76~4`+n0o+*z54%CyfZ@(?l016kGgqbA*pgHbT5kiA6C;GgEZ*(ZQohjk)#K_z9{6;9g`n)TLLh&R;?OO3ZMLAq0C*#yVYz*BJgS;Mn3K$9al?>Md zOTTrYMpN4D;Fz%Cu3*aE`>CFejt|~ZA59T}4-UiTUwC~T_lFSQdP*s z+gY`b&w(eP?F6zTqz=_u`mD+4g3ew2F_=i`4*uUhxz?i@9wbx0Wj zp95I|wJsgtwoL7QI?~XZN3eh=Bre$4Na<;O9#ladcco4-jfA- zlITdLX+AH*8SsBLU5rnMnjx?S;$}7bVUWtPzdNIr!Y^b*6SOg*?Y?ccfyoC@@%HQX z{w?-3F@%>D8lyukpdv|RU(p;1pO;=dgmcbe?YQL#a0#5ELPU~^{Iz&fzm_cRd_G^l z&R2b=;x|GuwSmj!l)lla~ZYKQ|bjN;=b-0MLK?8Oi5oekcS<%PR$Zk#d-S zu=LjIv&nwy;saw+n5YN(G(Bh_2at+QqQ~}15WNsql~XsJRxa^9)%_mh$H)& zts;B{;Z0SH!~p;%xB7v>ZGV${sitqb_2G{@Uv=dQ=kc#r8<1a99ytUWhaNAYa5Nz0 zTNR4FD2?pkXCl!PbRb3O@7|U|VCo6K9>reyo7#<>qg8zVw?zV;M+RKP~y!6-TTUy>*qRmK2{DJM$mn5}~hH~o(ux}6iTyviv z2{wM-d8u$vl4WOOE1Vh$MtuBTkj({*vQVDwPQ!rHLUBvZw&ah#WOQB&$sz3NOgON{ zFt0s$W*8I#Yd~zRcaI!};zB)U6=r{`iE4ka?SpS3WYa2svYV0pE*vq0EB4!T3q|0cic<954t4&QpLyLWEH7TzP+O z`N(;|6Fd~5l6}7-0H5o60FXlrCKm(x(Hvw9mAibsS>*F7Cy$z7B1iB%=x*Rx2C=e& z2JY)MoN;>od=kR7Cn$RRI*!F-rZy;%@61R@;c!hdELf&~|EQV)6{|@mAh9$__~A9> zF&?T)1m3q|!v0-e4*3Blbw6Dh8`pntw*;yK;IONe-N2Uq39TtDA+@3AGPb~sUu(bw zSm)SY|LoDW3}$d}uYj9xO|zg|hBa>1wKh|^lQQX}1Bj2qBBAj|W3R{S;2#dQX3vbI zBwYRV#2pi+3pR(?Nl6~dn`;U$PGA=f0@OAyZZP&|1=@l)bwFkW!1Bt@+|+-^7|8Vd z;ynSH(iB@9U^9;fl9LH)GeRPB^kjl}T^8!w7z9W6q?YMTYWSfdS#(-=*Dsi9zK2+*WU?z44?cr`5xp++M1j9g1_+y8FU$va| zh%TY`Z|7Yf+owp2LlM1)B}sp`hT0nexHpOcyk4NDlbGYN$)^Ru(DwAEZnvj{rJB&f+UH3}*YajaouR zvgQw~;{cU;V?Pi9OnmK8XbMnFcrX1mchG+63@}PP3|KgB+(Fb5J~n^EA=fjdKl|l4 zLDMa8vJ$+Qs(C;gfsc`IH@iS*+U5b~RM2kxNRi3`WMGe`t*dM`kBxaaAkLkgY=+_w#D(5;zd!j9%9NqC3P8pCLg|ua)_LE zbr10`WYg9zSO8M>=&+{>UN6$Y90y=2EV8Dq0U z6*-H~F*H8O${69TCOTP_A$@1DTn4#HV^2v1SDt|Wa7w*T;U0wy#Vsqd(+Ak<^(Bwr z^nfM`TpPV06^$vf$mEbO_rK<%P_64*b za!G?x#A;x(NP6BN0e)$PG7)gBfDYR(ztXXQ4eJ5AexyGRU58Gu-AIfuPuKtz1|Ty# z7a-wr3(O$Exdsg*FbCec4_Nm9-_>wx_K!danNBoWm5OQ80>lm(i4JoKYuLx*| zrK4c7DlKUxvX5oA#}fLu*Tsj3Z;g}qfTqZN<;PDfp>%)q=?x3s%=kILuG2xgd6^pG z#mPqxLM>T+$BYEI#ostQU{<6&re0W|Q#P>M(-p%WoJzbwxa;lBjWCJrd<2=zRWF7I z0etFF2f!e;gOtST=M@Avw|*+qgDH;tumIHD4bV+-S^X&i4Kz-f(pO!jf=T@Wd%)7o zyJ+(DrBi=v^_}`I!mc<`DN5XDohE-PZXZB7B#o-2%--4bJ+W+g02mv~q9&#AvVk#X z=*|2VHz+LZT^iGE@DpQvM~b2T+~)#}TxZ_-f`h!f=GjzO^z1D%%KGy|Nx1So9-fbbmX)}LO-!wP>vhW!iT$Kyb&%UwN=(Qfj`=s^7F z2NsS*_1tx?Bfy6vuq5#$j548*YP;o`Z}|kM^%+|_h>#4Z0-P{&^L^-ETO1SxsQ$64 zosK?)+W*zcS*XYQdCA>Wdu9tr-IKz9usF=;9FSnNzXqAyy&~1&7S&IXl z$`gO6Qj@+f_XIo$tcvxg*BaM;w6OyEG)So zIaUEu*R6nrV{yemvArLX6_pf)|21~*VBa`j@R6H#y01T}9%a4Vt2A=sraMUdp8?C? zQ?8ir3t^bPI9^?;9QI4hHayPE`CG(fab^7DZlR zh#Y4f$7cDouHOUvTmVmX0&O2=<~~nM>m!O64=8l6cWPKJ-(lH{&Th zF&A_Q*sbdy8V7xCv+sbSMo_b0S`~WTa&dPbC<3FLyPZCM95^9h$0dU?=|etdiq8R< zL?n|6CLh;C*RoMraL)zik6odANKSuPV1?37uFCuS!x7Twh&~DM|n$B-o&6{zg>(0GzZx9QkkvVo?OtHj0z*6?hDHwt2u&Wp%Kc{mcxY--^4avvih1mhVmvcg&ENpNA`kl7v0#-9AoMVLo)NZ z-O%I*>r|KygH6f+0AX%mEp7MSGa=a*aB$@JLz3t50OIcN>%&VY+u?Z+V%eq`v%e`l zsxV9dGamqW*Vu5JkDPf%ZG3;Y8I!%=GP+#NagOK(vq+hZD zY-4bfiQShTOy%rVzB%3rY&&6dX7FI>j5Ne&Opjlz7E;y~fIUlMxh_A8xIj|-#yvty zqxEAg&=FFS%H8bChqxA9n6HF*(NU$CfRG_^({&Pz>!v%bec*`<$Y6i=3vFB;gP1@W zK+)afTWjY#j$Mco1L;(Yq^SQm9uHt{t=MAkyB(@fm%BqN*x;+zRb#V#(2z z8*k8_3m$yZ08d-b#HsZ%HX?16g7sFv-zu1NrXr!iaJPO5(7@lU3wnq4$|3E`E7o5Q ziC?e-ZC|{bZcpgXJxqUlAJz_sSXcP*G2VEyQ#dIDkocbl*^!qi1J$tiLsKkD=~>3? zm}V#!>1ap2+175&4L;~xeQr=DV;t)x6*zFcY_U(;a%X?Q!$>nixK1pGFB!YWU`zvi z>+kof#4Xmh*sQoRKSbF2yx8BHlx?_#u{HOBMg2;J!>5O5iSK{0wp)=8z!_w%eeDKX z$9i<@eMkH7qDuVW>u=H2j`#f)^zgBm^_`Bx1i5oBh|1O)1w$cl2RtFv(!Ed)&{Y(i zd{c;T9>6C;?Jl2_fj`i~LJs^&zghkkUqA51xSi?DH{SgFKDoveL1i553MYR|d{x2%dsO`KctbBl5J%muREGi%$upKG*oSQhnubFit5%uMrUvVd zEKhTUtUJ2z83nIJ-n+lc-30y)N=60XXVh`~GDSNG4+!lrmhY%Ak$b<^=}DJ%;RTO& zq#jrZg*RJI`^yR~%PVkaK^WJmDc34sxi7A&qB5*-nlyjRc_mfm*FW#@`wi#y>4x^2 zMuCbZU{2!{H+;DpzGSFCS11fs&sz0@7+GJ>Gw$bDlig6)Z|gHf&e%V%=||ahR^T zYo&h@mc7sNuz7+WX5wHv!{rHt2kYU2oK;Lp~5T zkvx+ye!y2u1R@7>D>Mg*^k@STYX3WCi1rEj0um>Ff%e$>mAFI%`{akyx`#^U&^o=VlTJC1n&Y9s@V%z+3mci5 zel_gu$tZ!wkNN7094fn_s1+CN+u-zT_QV58PbEhNJ7fW}%aVHpwXq@Kft!%Rwk3ae zcPn|802qVO{=*L$Satq@7lda35)?o8`eD<=4S-!eT>PWM#mc9SZZ79vF$%m70JYVA z|LZ3Hpb^q)sr0K6sSUWo-wlW(fP76+>w45TPTNogZ8ZXs@Pw$^Y^u?lAi>hV&G~i% znD2pNKeT{wU|v6@B$={fdW|$?a?5}EtpG$Jfoe8I-Gh3Og|MMeA_qcApKplRGyoMc z0Y(WrgR^Yx>5N>ke}Exzlgs93edH!mIHrpSQTHI%y2#&K#)|51jPG~29f}3$7#RL@ z+)UWNfWZ1;d(%7o6ew?qq}cbL+S~0{mZbzaD zpZUwwzy&;Ee&gKyr*`g|YqNpilFZ_Y-95I|P0!&l_rT@cvy#eQ=t@3L&l;Fdhaj}J ziMhhLpdV%8o#k&D;MlNNFK~bQ?;=BhKa6caK>40!8rxojsrn8yn6MhQ0+P9g3v;sG z;CqO0zhxoaaPpE#!FLC|E`V=URGC4tCsinn(slKnR=*&QvcWijGhS8}NW60cr=j zR(Tt~z;e73QVWo!69_7c53Fn#`JT}W{R!_^T!z@Ce>P2bO`0@WXzu}*Stc&Ztm*e$X5jh{Pn$E|-tz^AYToYk**w4`allh#2uW}CrbRF?3ocNgf&A;%NvsaB@| zY(j;uy5M?)NVFc0Yj>|NI-(&c;tvi#|&`P=9={Fr}G+jz%G1}>`>`l^36 z1CWe&M4PYWc>9s-47-d&=KBS>Q*GwIP&jb&tB~ka@T2u`uJ9HJFD6=aun=uNS%Ln_ zNvt>=;o8gvbK&$;A%h{OsTnD)3;YM>g;EY&|3V9TOc@GZf$gbJq!$Tq_%(=?x!j@8 z3cYKtgmnXVDyM&&=Z*E`VDG*=0t0PWVP}N*wf6}6C@mlmd^v{hj+;Mxev<|tWCp_6 zu+mkg4K`nVLmRuT#t%AQ@pU0};wgWJHwS`1w)cd6hUQ#D`|zCuOAGr+w3#k~m>pAK zHE6@Owt!im4E$-0#;N4nCuL>w!=#@-cu^$00ef-`2Lyju>cB4nwr5ST`b*`rF{bA# zUBXF|dv%*4#c&}^^cnWsK^|MCc?n+r$j!yh$X1{flsay|`wKpLkV2z+nuJ_8Byy`K zag<#`Hh+z>mZ8%k90_pssK+wVL`5*LmjJDclBQ=)cPMrRDLj%AAW@f6bO%h zBKlEJF+M#=LAZ+nbybL~a#Oqi&C{av0XJ$mCPbN2Hm?=XyNz0c2EQG z&TO{GkkZk#)FlLTOLxZSRJ-H9uGWw0 z3o{W|i1mR!Fq-x=MG!`O7}onHgHV6!z&H-Sq66f;#UgI_1kA$US%)WZfXaD;=0r2o z<=r(ZEUqTKiOTxO?C6Y|HiQalXOY1&YG(k1uX>=ufCIFG#{=KV$uV}vGtTe#yLcbU zr=Q)=O94}Gt-bUT=9NRw^F81N`V&r3Ie>lWEqJaH*k^QPg+EQYf(J2n7=3@@5!h7S zx=U5y;FM}7Gd{@(@(q^|uydTsDa6P65Y?^!P~am#p%GIBDh}oiRFIUJYuWC@e~LPi zBe$_Fb|S_4BhHz?zDNx~eGN9$zn=D|{nN5wr0`wJ`izi~AY+R4AD#;)DVx(!9&UV6 zX~Q3RFrh%mhTSKY@0GL*U}=BvwiQO;faS}OoY?!CsRC!8 zv9sg7ir+tjlLCJO%BLXjBVtwU6_l-2BMr3Sbg7`C@XGVU`Q6bZ1HU~-2Uz*Xa1=&d z_zQSS_3L`M+0C9$W(#h0*AL^Rkmd;>#A_yFehrSlsOiIgWK)wE_p}$eSLAha4+r;K zcykiQ`ME#wwO464;ycnKsfTgTz_x_WQoV;QIVVdMj*s^^e3do)an&rHuYqaNtNFu(<41RekuRO}_Ye zA_F67@(+Qte+z%gLXC(&E%mpZQ8ZoPYNrspb@i=!djpK8^EZE`%toS8)bvzMAY}{l z6Z$J+c}I0tcmefi1cp9m7YlkUK;r`KlUxn2d*y5y7cu#P13$A0ApvshBiNij=*|Nm z`}C7;sE0iH=FY(eQ0pO7#IYCKQ+%zxO+u$$VY6zIk^;Uf*Jhj%;feSUpIi(f57C8u?r5JqV(ItR=_ryV1)jzwcW`Q1fI_~ zPN==x+v?~K@;=gh?fjygrb#(w!FctfOu+Mhso-5vvz;!lRV=USzxpWY1-MhlTOp5UhuB^t-X-zvYMRL0$!XPN?iR^0I+|uF?~Ti=(jf zL;K?GZom$Cjx4)yZ-5#Lb_Z99H{^P+2dIbJ&SZaMg7+wpX6H?u)I?k2F2om=%2cTu zUW=HHn-zEkz8#DLr|U7*Lgu8O3_I36bdSjfpz%|H*1{|L0RIXM)g~){i5fds05bR1 zO9K@~H5M@Q42n+!O?wT5N+3)c-fot#mpLFa^6}DpZilD1)Sw3IQ=4D_DV9E4aDY9L zQ8a)3{rx=cxw@t9BhsMiVl%yg(Lc8#+x}6jkl%wD(t`w8ARvlzdBK4!lI6@7+K6}* zD$V`v9h%DeK@2r-V(<+xFKnPh;nXnmp*N(KjM;RZ|7N)jun=w~bRU35m$8cVKUnV1 zZ3rYr2mgZ7Ux1Pma0)*u`W!k!MZG)fSuTHh%OttWz-mlh#+(9!lEpW3HIFklC!z#m z40BsnnEfoUnx$zDbS5%iE5$rqFC3eoVgMvp1Mq56^7_H3!2@-)R+94N|B4(*uM~vF zV`4-0#st_)IDy-%`6*ZM_|RJg1c#@mF%^tCAl;5;`3ICP#JAJfA~I${}ar35M^C;ev6e&63{m_$%`6Af{{+NK+Ck>XGa-$W3-kU~c5U3}BRdHk_2U z7|M$@`Zme$PmL@!+9nQtpP+-;9M^we9HM!`O1NnIvAax(^1c9oiZ8+yB}_kd3$%0_ z^&JH?hHu*YXQgcO#Gl*#UPss~VEeuX^n1T^`PD7LE0)<#-#f#Wlp_1ys!4RgreOPa z{DR@s=Y0)GGzLHLCD@2_6jJ*Ib;CU-tP^Yt@JRqjA6VKmmpO;%D<@3};RJv8)^Jb{ zLAq)seR0!%BA~uay53#jJrA9@y0bnKY?Zv`$pU5l*MQ&{NI{8`3tE=Oh7#Bf{`Dy) zFyMtS6z~kZ7U1j(f(&RSID=tY0cvOA!r;uy(W03OV_r~~lDjGn&KH?P8=ul2Fnohv)z zRU1VDv)3{rNkKn;p26ZY??c}AYTUxHQy!?T>E2(X4N4pL)hiU>aBMg19P(tsjgG!R+!cRB)r zpH)Ka?!ZCbZL!t&vCav^*iP4?Bl1WCY<^PNgw)7`?!oDBtPC|{2x9Qg zY#tz1AQ#|;hq$;5gX@0_+TTa8KXn_fx%c`tgB_5=u-XFo^-5YE&?Mn>e517wY7SVD zE-wg^$IGV75p9==DxSQ}oHrY~bUl*!&4CLCF#JCsK&YkBl78XUa>2{P1WMc;nDQaJC?%AzYW5X43;)0BtG!s_N-t| zgcjqSh5T<6X)(BoaiOz0>+Tt1trdW=r+JS=H>!wGKt>5`|#Ze^aKNVF_9l=Bfzo%Oj@A6 z2g@I88Q13lS8MlsuHQ}bkeRW>=z3KLbY+jN4XXyK*y*C&j39ujfv2evSQ_F0M8-!o zv$0KA;hedi_f-j7){BdY(d7qEqH;;>eD6W^3}aYGX_TJ64XF=*HK!Te*wIvvGo^X6 zP(z?Mgv0#PZ2^1`R9cxni_YylzH{i4O_%+S4*CqTQv35k^7q|?<%4nBq70V}`Fheu z%~+Ihzdb-nx1L82T$Mu3Y#HJQBz4Z%oP7`&D0vgmq+WOHx^YFVfdY*cf&NWM(M>+< zw<+Jm0=VsRju^9lnww?Pu70bo-c;QC;P}$1p!Ipcj6w#qC^Tf}U03I85=yU^;ayE2#(D5(=ZrKp2oj)u9f;~ z^~O>kslvCk&(c6>P5Fqn_2;!dMaq)*2Ow-hD6$UGFGYfXzA$msg2U4J{*9_!Ov;Pg z{438U7rQ4`*-!Mk@dF+vJoC$O0cD(FRKS25OqYM&Vw)YwljFMuw?DvCfOmqG+xGD)HT}bg1nh%WmK{uS432#p%5N zeO>_RKXV2^0(7C?u3-5^JM*wsv+P?Fe98+X3%*bn06DuB^z*_oCHG>cOvzOUn%Qz6 zAft)UM)4_aZZ;RY?6I5%=bSL513x_G94&i|s}-n|#^Uni5cTT;q&083WS1*J&X z+T3)4EukJmU%^9|0IT*XqR9|~K20kiPH+{(?Ts6jKJJ{dK42!`jb!V@d3@S-{P=Dk zV6VJcij2ot@;`>%bGHUnvzSwWlkt8MxhUM7n_N;&r(E!FbLCHxKOSG0O(s+#v-S6X z{;Jf_GwMYNA#n+3OYd6Xq(0+7$%{Z0Gk`gMcmZmXkhs>G=0VeIr{Uco8iJdv0U0JH z-nLN;?oz4Q;=sEalne2zlN8*Zv@-=Bx>Apx2mC0#_>l!K!I)?kA&FlM6s!x^{a(aA z^46(R;;T{;&hd#>nwi!+`vzj8gk zMry@&*eXNXS7X1mtJV2y{p@{LkWk^}Qb5nXzstO%cb#=~sGcmqg`3@f0R9Gk z&HImmE&DaN&I1>fUiIvpcu|-ss9WC_cksVBw&TMDv}NCbWoJd08sWOEYSt8$F+SP{ zTYAso>zG?tfejG`oRkRE6^=sg_CTM>Ew;F@bLp*gd%8RUP$PYH#W!ahGprht()Qow z2M*4F73lg`hJk9{xI@M};Ro1%xHrEW!^iqVB6)Bx#*aLgP{J>OKcU9kH`KT9#%h`b zFaqVr!`#f}R~RK4pZd(fPC|9^ba%)wZc-B9@eHfwp8CzV!E5#}$g^0B1w<{z4u`?o z?;$}_>CzNH76lI&+LW2mn*JB=3`yu*Zy>ZUu=+XQShF8P?*AUZ+g_@Fj||BG4R%Cy zHex}>)n&6hlb{)45=y|7LscWsNje#&+uJv;Kr;{g5W9iIsr%?l^!)5Vmw!c$eh}}` zd%8R@$MH1ps20lkJIbLueR48{hC{VXd22Rjf?E)T@x_&I+g(E+QI`Geq zph<_s48t}dL|+p^sm~UFj?dr6Bcg0Zz^3de*WxmMQe!HTUN3m4PN|BM!*3=v8?ZS+ z_Ad^xhKK@r;Z7$G4(jwCvrcfZEk2yyhSsiy8#LFbMB%>l=D^}ylHT+q^-v5J)8Vqh zu-<|?R(HTwc+sspmJ~GC3Ju?^6(Hwb=!FtAhPmCnij0R0WX2uP#F`JU-jmw;({+=2 zsa4SdUj3Ct6q7GuA?TVpg%3phz#sr08Ov3&HvoB!3Vtu=)}CI&p+?4i?KJ)(crYnD zR57+7MtpIbQ`Itmh|w-=9(@*K2tdZU8duD3W14)1?%9B}hrhB@)C zv@$2jITf}n^t(tldBB}7_VE!2Q`hr55BNPetFHW?v0pg9bGV-P4{_gkgE~+_ZQl@z zsU|SWBQo$^ON-Zf)O6@_teGaNOa%ZBbM$Lr?=-dO(6}0Z%9L-x9bSB7*M^<3Cs50M zO|GB57s0m4ke~va0^Yu4>+IARYy3O$lB{8{r530{bVKHslNotx;4Peuu>NEITf@p0nr}J$<_np6UHnv>$ynuZB+)&!ZbjJ z-)2LA{Q@i3&YsMBx0jC@Q8R>>OeobuYrlXe4e`i-A+qWp(Ib4OP8;1sFv{RLra<40jO<>!LgSD!qwAy5xtI!x}M z%M+NOIfG8p;NmpFk1-inabdSGjIUTGOp#`P?)RvG)yEf_D&D%gJ8vlwRJ9(pM>>tQ zg1N-72`dC*{TqYX@Zc33_%+|3&b;tegR&&(dKIk*`WA0r^3WE9keJb8z_BlV$I=?$ z2jVG(3T&obzvpZl-;~?!FYVj3I!Nz-Kzu%r!hw&D&aD3XU2i9V(Un2}S*nkFr>#JL zYV=Fzm!g11vTD4UecmjYfOXAdaG=+aXTLMcUTUe7H!64FU*!uhw5H$Nk$4N+4ChZ9T-~o7PZ`p(P*pfi@kWH4W8l=;OzjKm6?R|e>aQeIF>s0m$ESeV( zdp{Dbc^?5J8sVM3B))xc;9Ecg2=4Oy1^N4|=HJHmape3rwNPncTbI#DcF;b5W%%Ww zS2xGby@?6qyM3{4xgctw0^`zx#}*nsbL$Pc>^JE9MP@>K6zBS);_r;^fPirFn&6u^ z%=7aAGD_IkEnCQevSqXQ=`V>%VZWx^o&yDeMO}^;7Fv42W=28`^#r8-phzh^PVK7r zE9)G_Ym;pU&gVdL5km{iMV$mDa#K6fh&3n1o~cCz?!aOyZ^x?RvuN_ ztR7p{YiJkFdpi2dZk#eu z4tr_9?4(fuC*uHWz!Ay;duF(U3Je~oWyw6SrTjWP3te+z-r>AiWv7LEIM^pNjNCx^ zg{IiPLw11>yI3GhU_Lvyu&cbLR+04X$da8L5%8`*CkoV@iM8LMta zmmNi&MPR#JfJV{Bf47lekjmQu)=CE$NAw8Hs*6Y^=1~#DwzezkBb|^8h@iJGT0LR2 zSjG?gZNcMq;mDCN-Z;iSVQqz&z46YYCxz>C0mXLqyrlPk;fwFfufP=Tegvl2v6Y^{ zM|>YgP%>vTjKdwYuHVQAjsQai>~lt=pjAUo$AFr@h5Q&r`Au-jC44LbyRLeKB`E?t z`)uJMEG%aU@gl}-5#ELk zERd!Ck`vT)Z6%e750^n3y6eZ7pJm}P=_+TX0(zJJODG%sDbL01SN1(_6(&qpl8D?- z<7eM@xPZ*w^baI$?0yvr&bE=q`KHvdETJjpmj?_3Y_eWgBXk>R*1GD@qSTQ2@$>$| zM{xarA3ERl?;pGAlY>4ZXl%Y9NGEsL0gEG37Y*isu~Rp?EHn(~_(4b${6?P%dFz-+ zEdYE5ac+tPrhNd?m3alskyKlRq%Ju*pM!bZrDmdGJsZMO5)POl1cH?Ep&t)`BedCx3G?1$_ za9sZ$B_KOorT>192a5mT-z<#oNOynF%)amFLG7uRxUgF5J*8bN z`YgVu?C>iC2DOqRcg6x|h=h?pF==`$f+ko%6LSdN+9$^==Ywp=8uQc%;+I%#J;ayg ziFXv`5l@SsE=pPR!~0;M>x9Ew*LKiPb2*V-5Zl)bD4TTfz4f~%CP`K|6O!vdrt%>*_z{a>e4Rkz2chwEtNwV#&v!R^ zh>4cu=KjtxLH8!R`&GVZzP=m!&=@tnfiZdQMis!4jMcAv!!2$O z&4U=Y9%p&nYY)b#5(f(;=}WDD&KptQu&#W&J4AlP&03RfE%yNNWt?cN;0i!@Ag>Vc zBu*1@X5=F=tc_FvF!IKsd;07PfPX(xq!&Q(2D6kvqh}AD^hxq>S+IsP3)!bR#?USE_4PBXE z0~1k36BH!ILNv3!5>yOGEa)v1>kf%$F>|iI*7LnR3dp0cR^f){sRAorPSmaCAW@mm zFAz?A7kc9#>(&tBgZTk+bcz9|yv{!`8~LpWivR&8gPbpya>ssuXb7-U32>ivUO*{7 zZ1zc(j05aq{N%0=w=G)<>f*)>qkAB{@yzQHzX`X1px_H00HGs36xY#)gF%%qOMH~ zwOI&mn=bN;0Z(W_+=xu(7M9&kS^hvIN|Sd9qCN!x)&6A4FD5R z0gN}<2sIHgf!LaP(r!bz>0$y+1|jmiTvemnfo!`m(&RUj1uP?xSq$RLXWTo?vM zCEsw4UcSVGNfzb}KVo1&!6W0`tpxAU7fc=sxbeVh=w+FI46m*BRi#g3>M?La5IJNX z_#Ul{Fv7GYi!}j2Je##aQG+Jn7J9W`yy~yUXk&8_t7&fn>mS-yIgcn#*$16z%9 zL!eRTgAqJcKnh_k>>1UevTb|%dI&beb+;;##M)Fy<$lAIPya&=X-mBu&S<^?wyGb) zn2)pod9MtAyIby1Z%}1&XtXMKudB7`JY5-7Eo-3yG36Om@wCWLrB~AhTtCr z>LabW^Fo}Bh*0d?zoaDiaMjM@@x;D()2|re`MnDevos+2h8Yo;KK+f(@*jh;GQ$Vz zlHGiM14O$Cp(8m!?;s#veT4n&Z)o6f+V|wxEnOvx)|Cgywi-IbyHj`=h|4;_(MmF7 zlGp-&hFf*(WPY{|)(QrW4sra?iGh}oe>G%$A%>sAf-l8MBd)LBf38LPIbR!Odx1#8 z@7)bqbaFe?l45+iIB)XSc;x!12!ybSwzdJ~)`*V@W^mV~Js$OP!LJE0t$qo50dvyP z`|;Vnq?T#1&%>(byF1-><}kDiCWKLo>;q|kc*@qCV}n6^DL$2LXa{^#UIzhS*A>=S z`fH8myPPTD8NNwI$#4M#*tKRos}Hk=_o&x_JAl4U!8 z_WS}OjT1fxlxqbGd@x@$}M-4=b?hpzNQ`*}FH4$G74uFl#LTcymMy5a8XM zoB^Exz1>LKe4X$6idsSt0c^yOLChR@cwjAH13TFJ-4KZDjmaSNSHRA_mxVIpmF=Eo zcgx86S+1vDxgsEtZ+{!p-b|1|2gQ9C}}{l7s5MmU=eJ@{FgM6M{d=p@?^= zDg5|PX}{WSv4LiH`vrd(bqas*fZq|03q;;;5}>IZAxKaC_V5<5PGGTCVZd^dAiZli zNYwZ@HZlOqIrg4?BknogLB3H7^^g|9`TDMT>}`jKLb4x!a0KHr zc?_C;4J?56D<=U??tBjwseC~gY<@|Vjf-4LBWK?vkSy+NIUi+xp<2f57vjMM)TjVG z=p|RM#o}I4$@b0poCj=$1hN4TN||N8wyz7bRtoqs$H|}bu{`UOM{a}xD1{Gn#9ALh zUX+OjHv_hl7+8YQEM?yzFli@$3fQ%2wpD1A!=T;do)`Fq+du5!AEfb*nD~3d{|R;c zM=1<%uYU-{O!)n;zwsXi@^`au_ZR&CdvH_xyZr+~;yge29ai%=240nqa~CiXCJEeK z)q%O}lJ~DGN590oWBqfxK^;Ybc+IZ{C~^-6y&iM`DfnsEP5@MGeuE=_Lwo}o0qY4T zW>(hy25kWAV?E*LobBJCyCRFZpSw*;_U_e3h)BB_ly|s~-LcTSmy)c$BB5w-IpJax zoGtS1CJCqy!G@Ub&}MtaSmqs&G&trQ0*}v^EdDiS>sS0L;43P;_P#11WT1#x?bsFx zS&o{21UCg_Y)tv>739@_n>S5`Zsra7$Fuw| ze?XWBz5g8Mw*8?a|GsGdzHWa%1Vxm@=SH2`$Fd}rJ780eL8FR)Oct=#3VxqJDR_VP z0LOu~^J<22OrsI17^$gW?-~qs!L?sDrv^rhUEkpgvgnk#muMP#*d^0m52PQFSZW#p zK+${ZB)XUVa7RAO{)5`tZ+@7K0H1}laMxX?z%^l??NL2T188C3#dQJ)d4Fwl{}SLJ z`&yyQEC<`$@J!c#;~05in9P23Lp}Im10EmEiU60d{Z+jdXc9tGGp3tq;J<5ksJ(2x zQ%|sn=gWyUd=N;+YeUkPIv;-Z&xO}g0u!8A0<}jQbx^7*ghK)eC3()+FVj*mB>5@>)i8rTz@q1xYMJP7!IB)ORZsUFhId0u>sEW|O% zaR(vi#X@UiKz3W=6$w^a%l~BV-If!_f^OXh@&yNjjW4Ip5(tn$fVcx;BoJ5_umwE* z{-y*NS6BD{$6k9y?24%FvH>B@L(a^RBS)Hyq5igPOv3z440}}jFZ`z?#R%Dp)ambc zTYcKHjH|nUPKBDA0|JWfcW88z=4-Xrn)bV5H~T)TzB%vJ8ib{$a$@f_>f3HSHI?}3 zd%Ii@%=UEgTG>@v`?1wf{E?#Axmxe%%PBOYSy)%-0CTP;4RaKS=1#5RJoJ4rb3+T_ zo;wsLgxyW0F|P{Q72ma*VvD$4j1oYc*@r{Rsi=*AFx%}~?)S+H&HjE3e*U6edPhO~ z))3B&(B2z5awhWo7IwZF3)=tN{>O85Z#@qwU(P6Lje5H`+tISGK5d>Fi#3W?-(%z3 zocZ*OqVfvG*iQtzPz)uDMkD&2+Uxe>ykReNM|I~f7A3U)-Q3m&+$n~;AphOZxO#LX_TOME;Wb;g{b_5F*n#d|;2Hs7^F z+Ib&*_F*3bwBZR%m#}C)J^bXO=k2{s%~J$_UBkl-`;_t7<2t=l7osKgPo&F7Mtdf< z(VA+`_jZ-y#?z};e_l>r&+R%Q_KT;t^S+;3)oIY%fUu$==s2%WG!3J&^PFoRqN=x1 z0l|^V(Q0~rv8(UZ=W%!c_Spa@cVKH|E$_AR6@FGJyr_;Vv(6-?+GyGqb}-&HtBGoV zRA01R*0zUv)^MB7fL5B*WK!+FMElyn_B#c6_3 z45!F9DKvF7jH_vk?S)Pb&*XAT_o=l8k4{6(E4b#qLs9}`9l2#6mg$y z{PmXtC1Nj(%ls|cZ1vUCTaa81!G)#_;j2AGhhq0qt2#5)n`dMU8dOBKWAD{Do+4c+ zFjvqC-mgSI)+geiq3iWr-DcK*h%Qc4HS|B`W>48J><|AXZM>5=YtB;VDNf!yCcHq@ z?3!yX<59TNKmu`2u_7JCCXUlXqpz)#+{wq?&t&K|n+Cyelp&UP3?cl!>-pwudbVE2 zYb~G905TSh-3FDG`5@N5bkszrS-1IGpH(}4dleWf+Z|7%(6ItL5^2MKn|saP=X!w@ zwv)|suOGep-twcJ@4G16*Wc>2m)oUtGBvBir#|hzJdS z(Q>C|=NZk0E=_gOwcfkchLSd3`)JeC(e!wUNZ%}d6bI;gHtM_$7wz}?G2C>5v)_hQy+|H4pXp^*ylZ#J!rIF3a|K@NFOFFJN$8Cm2kvjWrzyW2DUnKHlGXznhZA zcRUwAVf0!t-JR18H{Gt*P~KW&gnF88 z1lli~mhq))H0+}zqq|eNb^=>l%7Y!IIo&5O6}lQAhU`9VK3ZGyv>&R%d$RUsVTxrt z*uA>BcbuUqzZ+0vG<2zIP<{FyX8m<@EcAMVfaAQ~^;==5GRW)0>hL)`QkK}U(pJ>p zHBn=ok6QJ2&FD>khHHm*1ygWfr#Y@{D1^=4p8Kn4);*5ShtVs=F6*ayjoR53;aO`J zPEKN3?VQsk^)Qr5e6hX)1SB0!BM{q z-W|-y6M9pXPnPC=rcm_APR6XYUe&6q&&ab;)vGa+TA@X^vR3j?y{=ZDoeVt zYJQD>@7y=id-TZ&1cr`upApeJpTw5u+2P2G!|Z7@Sa*9u^|O57 z^|sm81}&=A_P#Tu^ICh@r0wT;J02cI^fBIlzF%mDIj!wd)XUpWI2O}I=!_Ci9c{NS z#u&8&op@>cA(XE}=XFMeK3&zv&f9JbQfLi#%GWdUKEpi)wa+`xemfy;qH6VeK#QaI z-smeholdyPlg}4**dDx5N3pH+_Kt~^>-*Gfwl|ZtcJ3k3(|ld&ADhZ9`s&2y$1WFt zmwKZ%4^J@C7IuqfT<=l8+V5H)L$~+wDXa_KK#h8Ro_>wr-0x0H1J_&UDq{j68MK|orYwHytueqiR z=a|Zw*~yQauTS1~-UyHsUS6A+6*B7gQrf6Gg zyD9zezR(6Qv{-hG;`3R%CN)8z8IMLst?S>GR(#%pVVZ%$*sSGAk> zv3U8)P)dI}+$wRy^bkl_wxKypl#V?SLsP#*2$X#JN?p(0Fuu%fNwSPaxpLUivzI{E z?w!Xztq~Lyn6xmBMlW01j&9q3&R0JVhfUM##iP07tua91*IEHYUR9xFf4v^i6+L|I zdzEGPW%za8#9_4SHo@Y$ZW6pihx))tdkarnoJjEXVBwzfhUU`%GyWKOwWxZ~FIn87 z!S1^LeWW|L+Zy!H=<+SoYK}h^^XOxE9DKHc5p|jueSS3iPsXLOO%A<(q|-;C;gjIb zd0Nam>iFHEEAv!;9xuQ`(^qY|Ur;+tp#@&xx;A-Q(}4MN(AwCmH*t39PFi(pG3Ez~dzuk0@=en$CT2F;ARgwS+w~M$V;@HCDo3PpAp(b(_!AjW_s=H`C*! zHvQ7Rdc&Yu|3s(1qrA4PufhJHj215Xzk4+8~x$YXs&i4fZeAmt((wEx#(C^VEntW>x>7 zQG;u087()#2?XNb6be-k@oqNm!`1j4>lz%Ti-SMQq?TkL_;c+}$_=oy@K@Z?Ja5lE6g|E>=-{aCrI~x=rzMif9 zCF@$l+Dq%5!n^!`C8DwBYwPLT{dUxR)n0{P_8{HR>SXV{yhSwR{ZbQQ()7Z9{?fm; z%MV5B%>enP`}DaE!?-tBSItOL;Jt|2zb3K5$Dux1kfUi*cE(9Oc0Y!@YIE^q4L0NN z_hmmGwt6V(8}BoE{jzsrL3e2PGN6L+42eF2YW#WmqIRQyxf-kQhgsmvs)>m}>S|kk zTg^94o3=E%v*`+Fd(5Zno#%}cB^pnA%cObn^l%$hm6MYWH29xlII5{71Oq;^L@`UW z{^+Cfk2WgDmoz_$1|MP36-6n!pn%?dBK#`5qL6^| zVzGCpW8V6Io)*1NX44Ov5034x$^=x-{E}DgX}2yY1%#)f%P0RNA9HK76|Lp;rMLt6 z{?U16j*_YAsdQ7~=l23K>TZj!CqXi zqW`4Ji3w;Z+p?5hPKvH3{J1Upsi+Fd`n3&$XcdWnBffu1hzpT4^X;-Q$uFGv*=osK zRn`;P)kG<}nDAq>=*rUJ)?!iI%9;Hr1cHLR+db(QeGu{2J#V(?T*fv)=iw~ojB||q z8y7g_9q#5J+MckF10AJEp;D8nxJfuNL}ZE>qlvPDa}H}=#eVLt!)JHhP~A(YmG24H z_KMekhTd~GHug4RlOk)VB2LFD+!qXM8e4aa}1C^5QTtTu{m%|d%GHqmYo9((#GC>0z?mLY&+OEp` zF78DOFUIx$m0aFmv@PyWi|XAxy!~Qogcs98a1?ms-Czn`t)kSAK42V8N zTM+?cwWE8FL?&%PDb$mA73wioc0ywCol!okindtWd&e*C*Ru17bQE*44yxTtNO6UK zYiY<7me0lguY;fGSp!pEqAFdnKd$e;xcKhfb@|u5*kk&R$j^_Ai~C$$PrgTv4OWf$ zxA=YW9_8#>By=v$#anX^mZ5R&yt_91u@cjQ!|>o$`=+@-g*k1 z7L^Un1XsuJxfc%fc={C6sHqo+3r)m-CI$Hek;B9DY6iWjxmHA%hTK;{UTm-R78qhg zN83bxe!^x)2%K5@{b-hUb z{6T-ha(%J~k&L~#*E!>TL{P=L<+A~!5(LG+Z~L_TF7|1t84BMiKXc*t?2z&^hTqd# zyY5yuG^P4(=!^ZL+UluD2Iw?@dPEMU$9iXMTok+Xv@PK2Yh=f1Ch4o$xSeDs9>%L0 z2M7WmLe0Z{@vMzI)Dzswj*hC(BylMMY;=T29y2}+$)%6Ezqx)AGs81z8oZnMb=MbO zW9ZlTzC?FbFEA@)+VwnV?n4C>f+R?+T5-@ui1#%}bbGXN6?KB>hSJr4mH6=P1iKK$ z53Q%~tl25lgg@GM)-_5~QtZd8swLjA^Q^y{felk(BaaJ~)N|!E?+I~;MG~u1A?PoZ zrCU9=&+S}4-^Vj%t(@2g#T-4@h&7_*a-55QhtJ(4;xkb>m$!Y%d*}eXWgg{qEcW5; zy5-!i@A!^~If_1iir>e4{`MJpO}?9bC%?Ox+dlhU{*HU7b5vZvC$tRCX4aLw zuMa0iRZ^wvuo7q;5z*vLQ#LQh9-DfEt|?1A8qG(xuCLxy8h34VKMSo>K2q{(-&DP+ zo}`YOh!yCk(_7_Bi$;d*k%@>s6fP@@zj9qMunlG4;l>{fDUWP_VO^ayN*S@PY1I^+ zOgF@5cZBEa=0$s2#5BxK;*j}PRPQjuej1u!U+%-mINKLfN%anLb^R)Ge1l{CU60R0 zpU$b5Do#^2<5}X-#OC_g*f{lje%!PC2!uA(6fdLA4k%Hat!*!a6B)Apxb`OPp?!tO z)7#OlqH%u7YpUme4a6yq=#z$OGqC>YVCbUJ=?z-0c4~2dqQiG^58_<;w&@8PupeRw zkh#;%L8)c-se9mV=ti~=rQ2rG{RHtEd(W5@^WvVu>;As(=hFT@V=Q0FGsLDjSNw84 zL)xL=7>mSPS%U*Tz*`@KTJX~LIA-AcDAn#HG)T+bfdYO!RZ5Bb{KG>(gEmYbw-CJJ}&(TVLAEkyL0#PHD6uXXf zEsF0O-uIMT7ufaUOc=K`ph4y!9V#MuTgGlzcsXbX8VBA;8cl_@OV>qRMAD1*Q?ZkO zagKZrk62)Jxca2!`7X~KC7#u-RiLSA>s?daZ?SHq7A@MX0hGMlbG!7RE%qN8(c&|f z`;U3MU+>3%=Wl6ym>;gs0CoXMjLJM!RZ+$HWIe?WW1Xt6iQeT5ZdVLmQ}nJT0`1d- zCx~A}YA0A3aslCgmnsAM$LF~FG=W&m$ImQ% zo5cfs1;{T`UXAMpdP#^;FDbejA@4z`oY8ZNGjsm*ynIbLOI1Z=fgT6MM!L%X7tF1KJ51$4h)oW3Ioa$FDtX?N z(%vb<3Igpj(GJLz%Kdue_0rydUC)=ou1UY;X7{B(al%Kr`5A?sllvwb|Lhz5i!+_$ z?lvxzKi}=*)pp-}$oqN1b*_HP%@*9|fBtO`$YNIW4{H_K8{hV9ZQSk*^>_6Yj~vq- z+oyDBd9H>-7Q#HyIhaww}ig2ZZn^<7Q6G=U0Z zwDLB5-lJba7e92Sz=hCH_SlaNItl47gQ`tmZ+*Jf@WuRAb>K%U%gY$|h!g_WFUCI7 z9BnH7#W;-}Pq($fp$nIWcoZc9z_ZSo0*p(5$aFsPbKUC5sF~Qt5OfRns~zQ(nyPPW zT~Qr5tDF#?8Sc}X_QPd=zbG^won6d0F4o&QE{IB z@)hy%$}_)MuhQ?sFD%a2!XGT=hy6FC`^vBLj^r*K>O1Srr{cxEiJI-jDOocX4a0vj z$HX0NEpik!hbh!r8k|e`3o_Q`w_r_oO+ zZ_woR(6Wd-@R`66LfeU5Rf*HoR#PMt9m7gmJ~pDGiOg1%;HcC4vRCXAk{XwWYRB!7 zJW|*vQF*Qf*L^~NLW(tBjMM7oA4p%`{S_B;hL>~0xEC=Sc2z0-Yxiu$%-=N*D`N5_ z;7l;6Av}1xA8&06s`Wf`H7_6FB#p;jJOjKo4ZmU9FVjyw4v)Q)2LmNu%f8lD!b9RG zix@lszg0&xToYCHM_@ZGdv17$O9|W7@NH$Zicitwq}qyqQT%Sjk!mJ^Z8U~YPql;e z2?HgIho+wgKx3?J*TKuh)uqMnFm@>gRt)eHu2Ki5Q_;N|mHrb+pPzC1Su;E*<8g~! zZ}$8ahu-~LUH+bRTE-xS9V+cl5t|lk@&{kI#s*YZ9q2Y62ZX z2-HzwGiUvG&;6nya_GhJ7cBp^fta?Xgwm!#%x;WK7R2jNd=d}htAbk%)65ADin59$ z(CTl08_kN2m#$$TO-}Pq zm;2F{>#SxJaf?oD>&kQYaV#W;^>apnSCzK1*oU|CsC?}t_o!Q9l#F+2TjiR1ecn=O&BlyNf`PI2EsH+Glg5Km=>&RaL$8 z_VR!RqqK}+2%my$PT|L5t4gfzd|NB!^WV?;!k*cG z+rqah<2+uA&V^s@OYG<#cX6$#IKy#{C^m^kn{l^2$#ZP{2?elttI%w8bU7?>K$N}` zZmi*fj;!6p$UOs2<|hmY$%=S7L2eAZ47(hR+#wr6MROm?m%-pLur|r5C+&258{eD{2?AMM*GffZ8uCw={`NtBs zvB(h#=OA{Ta6JVOdsXWXy-&*nx4B3RrW`N1PHukZFKo9MgI~VUEoLarWPyLmSU>nS zCEndVa`31qQLxUi$}2xXBQuW=-PHSVS9#!_gHsR8dKuHuUwv%>2E>cuHbJq#*lZKuHQCv! zkpnQEvZdvR(aaPh+A)sjqfq8V7#G>l-B^k_-SBfkUIJ+eUl!yuE)RYoYv5{kL+zjO z#=K?Gp9oA`QDZl9*0833#~+G+xeB9>((+mJYYQbbh;o04Nw^<_ic0T(KYMvUmQVct z@AqRp#Bta20^eqRwTT{c#M-VX`4-89vA|w)uPv0&INGvFKW}`IJ8)DH4&0o3aAhU^ed?Q}RKmW#2iHnx{ z>eM3-qaVar<}*eHz*_c@&nV`Jz5J(NU!2J!V%TDT_hWKAg`K=(PB&lkdS(>+|9WPZ zF|2;|FH20Lm>2rwM6v3BFIs-iE?c*DZ7>6O63g;D?i-v*+ExJ$i|faEfqx`BK-an^ zeY8cRsu(9>X*8kl4t0?7DCw|nopm)uHSTB$lLi4%-wd2o_4K8!(Qi-FHv=$A{BZmh zVj?9Y2#VMr%#k8`T_scotLmqLje=t3S-u#XabwsPtqJ%7R<`ng)Lcb#y2!1GBTklI zFQ3O}HXn;7dGKCAUWh3!FN_kDLt+2};jGdWSH`w*B-kFJG{ zwh~RnQ1SF0acXmaZ>E9keQmnF(giI4?5l3OJKXfgakdDxQ-4ag4RTQJKG+C860(`^ zUj{y!bw)}~slK>zwq@C_W+Lu&M0M($cBTL3k!b}37GBS!4ae9|SKUi892)+bxN;O< zM0GN2Vf)>+vQmbFvz;z3`NxRdfQacWoVU8X&m3%ZjI{xE%j7u>Ee#u^SHc!c**4T%#`yU7@_nd2^n%e7|Cae~iBxF)yiGpI13tpD6Q9pRox+qA zc;~4A2yIlRK8hX&! z1@Jw_jhtSQ13c!2>5pOqUxYjho!Yu-2eau8F5XhZ-{yCn5z4Nre{8#H+HZX69Uh+V z!v|Q#=4j+Dtm>vskMS&AV{Nqcyvn6) z)wr~O`dSXiI@0m~55Q<%hd~@Om#QnA#=DFYlkS|D6iR>iq~$$KAL9xJFs0j!Xb)wc zrz`d?fdgS4E+|+@e7-#AZ}#)LA1U-6+I3g@*nf=a9aDeInVw3_lYcYLg^ypHQ3Yla zyLx~4HAKmLX>8smqaZ!$xsD6z$~9mTuM~EFQ}th1Q}#4!dSIaW*d317h!(ATX}snT zX3@GGgCmGB0_!IK%yaXxcuhsm{*vRAm_;neb*OAgbzu4PB^OKp6Oz8YhybDPG0hbi9` zSQZ{TKQGktp@TkwJswnv2MSjl%z?Vq*mGvYI>>0Z@qr3EAbnBRS&3bG2Seg_GL}+~ zRbn4$-?A5V5bacO`#SrR4u?M9)n?7r@e$=U2#mZj_yAWxsK3U3ugX2GyWQ1jsldi( z4Kt^-9$IEbVMue&E9Q&|@nj5n3g97@e|Xv`u9o<}OEko!^kkaCQ_ zl)-^`kUf0?-zl-o+deOI8aH35unm#i&wcbK9>>2t!}a|I|};+RE#Z*%1u z+;f_@@hHYr@=FR|NMh_oF0IHJfBmfY2j8p2HHtWuxc%4Ox?-J(ITdli+CE+9m0Ve( zqvD&kM(Qfo3^T26YZ-zIVy9H`R{=6bei-3OYibhr$gT2>oEhkx8mH7=(h|m2rpag! zy=k%61qW#IesFu&?wp#t@MK4vJEnbVRk0nGf+gss`AEAo;SwU7swL9re^Im{P5rRR znm2r^kW=A12=OTY4hVU9UEjn{F%`nYw6%f@BKcR3=Rac|cGwU!RLIHuNqQN-mZs(v#?m_@MpT}73(c+KoR4X{?e7hbmQbmzR%TGg9pnPOWL5TZ*_Yw0-rKR z`GD4bQG5U6z@@7w1sP{4*rw$d&p`oES90=*RJou#(0Mj z%_Fzr$(^v?!SVuXR!0_g?dErVnsCRb3UZcS>07u!5V)&~SDYWjQOsF8L8N)>kw?AR zsK1>F_D_E0V^kLt{$lIMt;7Fe4u~S;9+0?yf~N*S{R!TikX%Hw%2H;AY7+|Bu|%K}a91 zQJw)FxuOeQPsZ^Ce6kpauI3y5xrJwWAMf#8jX$P|C&!x@!K-p6D-(8(8vLiuGE?qJsiEe%R~R z^(?tGH~YQEc9fX@SJik@&F^H2Tt|@Nudg&Wp$H7qW0Uq)`S6gV^xO{R93jZFa>kSL} z*?U(m=E^Soho3ysU;MMec82F$Y$R>4%ppm8O59qGeffP_cTIQYJY;SI1H*cX}1L_D_#Z!o7&FODrvJUD%JG}ts0 znp(@X3051)`;aaI^qm8CpZ8c#%!Lk?Se>bH`{nANe<(=Q^F8;y`5$(H^_;!y3e|e7 zDdq|O!Ob4;UAd2&av!@$(Eg`AF5-b3e(>A>Ezie5aw;EuIdb5^)EEmmueb5}2oZTa z;NTW(xzev<6DbM^&Mo``Z=@y}k^-JniNkx$^{|f%n^yQuSDPl+@pT^$+qSpl=RoPh zl@#}Qf9kf5^$#J}alr$b6nUV_508d*HPp-8;^T9dn1}-2%=44~c@mEtiR;>v-}#7( z$XH2Yt-sds$*vXtg8X|f{fn;PbvUHbM*iLdoIe`?H$m!`b6N*xljp`QUR_S4UF5`- z@}x7qKMrm2v3Z3Ha9?+R5EbiIo;}*hPo07Pf3BZ+P0pXDpOVA74Zca}#KnQ9sl;T9 zh<5M98kLJLulh8wT?~t}f!E^zz6f(5?Z!_m!hLks6(3rWCwM-O|HF;Dt-0@OEON6k z(x15by-BQ84*PKjt{l(Bdggj-V#x2;*UER5gD^y%$&(yee6?HL(%*~E22HbxTTVcW zf5a{BZH(9Wz9(1O-d`Ak%t`6eCad5Cw;1*s=l)vL(uUx_7jsnN>crpIbLtlcGYtv{ z$2%_Qf1exkOK+QznA|5FS9>UFZy zIp0gaSS1(d*LuY`iHLF013~`%*V^YZe;zp4JI6adf`RW3WsPx4Y^b5)tKKZ|j- z_mw1b|2WT!xHyx%7Zj?#V?iWdm2vT=z->$3%RR5~XFOc^O%M2DkvlE?up58Gq|rxf z+;+bLout_Gnc^I@yT^yNX^NWGxqvNU|KLKjoFqjJ9T5XR^<#>$2aWR!oQCeQe}1TT z$Mug64VtNTG;}(OPe*ypS$SWMYH!tn^6b{$2=IYzr6IdS(vG!+-jKXIANP1GE_lCJjDf0_Zwx{1wYBpJY{@>s+r=Sr!li$h7^`(>qqqlkyQpgZ zmmLaHlHMUSHURkvej+5oV3^G~FWKWZe_h8r$8|GrblbEU>QG-%717Z+r(0V+XyjN- z_ymcyfhWGsUfK%wHD93Ne@o9|{mcD|b48oH?Yr8jx?iQz9O&>V2Tt5lo~ubu@%@3b zs_v6>D}W@ify=OVHG4$<)OGYQ{k*tAWh<)W&uAwp>su5g6`|KlKg(!{h0K$TlZ4** z63_qbS6^!wuKD6WIX1WPx}C|7IhE4CE_v7mPAcu*|8l)wc~%cRfBoxqZdfX|saV6` zt!#w250wXvWv7<+RnFD6$P9Qa*hr7uhG&{d>!2oIui8nhG@Wx}2oJQx8E|~F*>p9c zD)NpCDAjnR^vMM39)JqsdiI%)exy@LsyuFef4@9Y@9^iOJ zJxMJQ#s_$8yCTS1PUQJi4e6LkT?@EtVIL$$@Fzct{6H~Bx0nE~&J|-TIRj;`yW|Gk zxz>g6Fyi&nkKnrCX{hPmId5`Ljm0>s2df+i!KR@}YXe?;f6_)7iE#Z5A8^u@F;U_U zS)1D8;!BL>Q>3&Kt3{qm7d|7dt!aBftx(I*Jb09zHwVX(*c!bWB#%Inc*3MJ3SWB) zSXi#R!6n?Zw|Z`=sqK1pPw5t*m#NU5Dc16D4yzL!0n#M#ezpqEB*`qe`zn~7tdTM! zMc?rSE^~mNf7}|fcjn|+CBNcZ#v$+m*Dyk)|AyT!{kJQ2UDya|H*{&6bJp6gnvudz zC#b4Gsx5pPora(eiL?CqFxKe{9gMm?!vZOJ66xXIuwvy25oO zw{9y*JnDcLvkuA4gM+>0h5v-#q1EBkOFxWjYx;$*;HU4W*Gt{ESMnK%8Q|!$7v-8Q z;`O@ZW4z@OcOmaGyXYlP)VSvM@M-^7@zn!wAi@`xzH%gaPbKzM`dYuR7|GYT<_{EF zOc4Wsf7d)>;}1BBjPc#IUh9F$&XttDR1G5}H>jQ(D?b0=N7bldkbYE{c=6~%B=r@j zXk?J9yP(I2Kh(bEQLg?F%dRtN(prGEtr#{oJ%m4izr@-Pj3N%c;v>w7_-^i%xl8S*po zedpqRF>O!5j*XR|1Rq^?X!%1f22R=;%7K>`|K?BQ^9+7oa=iBv%u`1^f zHCe1|UFfvoo`_5wVQ>solg#tVOurha)eeBc$|<81LB zNX!qcB!u%qK0-+~<6Ip>#_Kp9f3S9*!~csvOk+wFUnYGO$(eh#Zs%-BtPC4gNYiXp?{6fI0Jm)&fFv^<7CO}rZ(`FV`r^uGFGIfe*@*UHneN< zJ6^^aQP^}}0skj1RK%1y9;E9*!Px^(%5QeRz>Zk#;4at=ByFAy`r003&4yfid3MD& zNyf7d>K^OBEY4NMbSikr=+E)1WAq*l9+awvf;EdTS+{i4ps^h}#M0@8dPH)=(h8 z-`J};I0kl8=G+s(9!=;?h*9oyyu+oy*SyBsC58&g(SoC;ezr%uf0_JDxqoF`F87-( z)zHBG;6GI5I7<$OIG^EY|l_T!v$3qQZ~Vez&_Mx9RX5lX=; zXv-We{n=zpf10~}lz(LiL$VV4lC&7FP>KoR4*Hk1QPH&rF_G(-mz_qk=^iSFpf+)pn~tpDAirsy>?eOj z6&LN(f2pa=sPU8IvySn&PKDlsXkkb2|73h z<_k6`bNUlYbG!J2foe?K)WdVYvwrTlugoFRf1df{^kR%qhC4l>*nUtZ>%&x0sCi{P z^-;JteViVThKlErHzXbrD1ko->aQagL`vejS;g*n{_#f`#y+-}+6Wnzp9`MO8 zap-Ivg?c{|9t`(+vbK6HV+~v8RLQh7WQ94tcjR&1U!Mow z>tE;i_BuYu4_EvD^I+-f|={#xbS1H z>wy>oRY#>#uT!5JjD4^)aBNHU7H#jKe|ALpL6epb;)tcbZW_UvSPb<%<-DR+m7EwI zY*-gF5iyv#l(bpZ+GlE9N?!bRY<{hcAHVaVUbEl@uBjs^xvCbgJ&7+=N2=6+d!fto zkQRCOheM}loO`{gwgbm=I{kY7F5>SGTS-^eI%3Ylm!9(WI=nPo%_pZQPMQVNf4#Tf zbSJP0;0C`@tJ!I{?6D%3cU4K-Q6v2zps~KeN{}qH6^d6!yn~r*TJ{K8Ff=6fKvQqTkl;C z?Df`RQq^N))u0X#Zm9^tibDA2fB2V(on7B+5b}g*m;iR?&)`Yl{c|JX3uBYG6rU*E z#$i4>y`Y70s|UqC`R%q=q_7zv5qMdBbp;ULf)d%RZ6^{8reT<8++sXoWzGIxXHFAwnf8=LAN?nm# zP3WE9ENiGX?bTcEoOA~rh1G0WRkcgBQBLq;k}BLLeViCGY9guwiqXJ+TJP;4Jj3|d zqJAdoqtM@jvyTl-ZKbi`oErs6>ujE**Us9ec0^y0fS(%qdfycKb3d)tSkn=8{h>G? z;G~>&w0ky&H{YN!AvwLVfA#EBuqE?jOZJJ=ka`V>-O*g&kd? zX(9CsO5f(j^OiO7!S7nZ!nWM7)1j?;7wQ9f+aI<@wPh;VR-JYHLOpH+>3VQlB(A9u- zG45p@(_?L1tY@g!e{W>{Bzuj9aoMgj%)wmnTNHGDsJn7~&Q3p`GA2Pg^=L+^S7bcj zS&Q;@f*#dRi!lRpJ6oqPqW({{e298YA$b)X;?xL!94h@&&?A;ICQ{Rb@zBl~ZO=Us z!{HCw3SuZ|g7Dsa^v+zcZ^3aXtrXuCbbTXAFFD@;x~c2Je{Zqj(!)P1bTRH@B%j+V z>kwp&68yv{|5n5OgVh$-DAuaX>B{#fHg#SHluFWN=d4daNYVCbAgCdE!7|v&9C=bE6xgf#X^8Gd5l&{U#ezELlXXO|@9gfa$M9Ef1OVhcMUhBH`K%CDNbpJom5|`>2o1{Qcf3?oR$3RVY=*cYGD>UBDdZw>) zq@F!@eu6*i>6WU5(xw?W|7PJgoZSjKA+B=qZFt!c*Bg;jgEtz_4DsilE&W9WVRrPj zwl2!@qGq4^9?3;j-w~e^gjbu?P{Y$}+0@N8z&{*iB~aUxyad*-l7RoZYWLg%#fVnq zHI<+me^22H7x?$6Ux74G?C*lF)>Vpn^=Fy;D!JRi`G=2u-?#Vm`rmWrnrFrjO$iDc zWbO$r8kngZ2XQtmS?ApYo5m+VLDnO_uC@VH@ka~ppoVVYK(NZl_tI#BWO2cvAa5`C z+$2Smze=tl_GgQF9moIKn-7@(odbKX<-hu~4>bqBe2r_Z>-BZ8o`!1BIN+xq3!crPQTUvUAEJLA zoG5*7(<*InVta#EiF2c+DN_RuxUa3eQ!k~RyU31`=R^~(G2K8U|G|=<6(?iVQ{N!0 ze^Jbt+9xnq1)exQZvg4Kk=U_2Fw#~-NvNO3_i;$r!@1i6lkmIll=Ugcz26x`->hr$ z@#y`bxj|x)xG%%+jTSvE>zG>7aB=e4Gl@xOcS+jhjW~|c#mQS)rbScZf}pM5c@%4!NZZqrd@PsjZN8g?DX= zYyR&MXP;23{}WSO&*~x%cwOH=vBM8;TKY%W1ndL&^bZ$t9ms2=Ju~m25<0<^^Whom z%E|THXPY)x2XJz^KHV>fYG~M`U8l&uW>Xr|qM3m2aN_;B>yL)16ZaL4hv z%dwtqbJklCKghV!VPCj-5Hyw`ey|hbQ>lk`l6nXab#D)PA#!~Ze?4lK$sD1?V|dT3 z?KBXB@K8F>ZOgRLMp0IonbUtSe>m*82RQNYQ2o|3*=zhX{pi(b)?RP9=E`1n|R8%SHSs z^&W1%?|}wmQSg+ke>9t7is8FR z4C&yTTJE5|1nySGkU#r5jIDvAI8y3A)nQB>(I0BU$#J(Y)*yZ%6 z_M*U6ueAV9YcT5d&aF9E+Tez_prvrcmIvaT$vnho@KIa#scD}L!}uYVObi+vOFu8H zptF>+JUygD^+@`8Pw*-_e?M`l=Sia(}6zz18{`Irw{xHT#Je z7hL5n6sNX6l|0BnG%*L1@Wx-{_we0HPB;~|_5l1<@+IN^WtoyB6Kx$X;AZ-5;j-6S zsghuiPK_@3O3zS6namG}q>6snfg^GKR{d4K@>sWroutZpwzfaaf7ScDaqW(jkmBXW4m-wRLhA|o#1AG@HKJnR} zHgNT%iS81(Kli6@e|NIxzsQ4)cwVJR>O^P-zggC`OI+bPcapbJ>ebx!MX$K@&4ylm zvSPlkoSa*(_saLU`m=Q6Z;x_x8f@E)ovEVQduvU2ivl!$)fgWXTR>-0B60(*7%_RsOq8bHjTe|0fu)Q+kam$d{ad~hAD zHMJ~iM^-JmOxy3~O4h^S4=wSBW*KguVik2SWnO!7owuUq=<1K(YbAbZ$|c@!U+J17 zlyUR5zDv9LQ>DG66}%441#gG?f1sa$9JOb?>s9$C@p3s|ALDFJ-%_ z9Z;Z1jTpAtSi@(b6jQ#gt0)jt6n9(q8@UcRBp&s2Zs+9PzxkBLksJHCv4xMehMk9ODnLk(SPG(_*h zQCev6x!Fj~zsz;jN5hYNiXJKGjM?zEsV_uXDo3}fI;D308c%N=C$YO+e}L@BeLUMT zJTC1Be>!WAx_dt{-!-;=&~N?ar~c@K-s=s^^Z2e0`VM|eO}?7!+U-;NzB_?FkS8Y3 z+kdbxBXF`qClx(33tw4NFKuj|eag;+)73T!TH9~7r-|E1)R8&SY@Y_P4|u@_u4XlF z`@w3gXDixsR13sLMHPuQ7=wVJUGR;<599bt*CV-cW42K8e#4XM`N*OQ?>ee;;5J z`eILo?&w`#?BCbyzu7}{8^E>inp>sz`j39i56*bkfRlTp(27#J0ZC#EMQ%4$T3A|7;09nX?S%zLi0%Gc_#R#eqU`-ODgmD?z^fR zH2H#<{^wdn6dRv+32rvB=|@Vdf26mL`0U$3BP(LJQ*2eXG@jF2cJDCWqf|uIwJS~fGa%m>t*f!bJ{1%Iu0ngX=-(; zrxLU9o&161LJ=FZ;G7{ngvzkDiI2vGS`xS|CSeg;g|LWXAG+4TZy+UsAL*WT)<$b-pBjDhTsgEk<=SS>ynJ%N2nfiYV?p+Ck}z=gSnGL?-o83JOr8+ z<8%0Mboiy%Rd>wMe>)<0j-!$*JOO?&#}Y%3__c<{B>RinI$|_W`9Rl@<^V`qsPyaE zgH05Cqyp#s?Q`DsdAX-T-&5vu9Os3@L|H9sGc7kyRr@!v=tLBl!bPKHFaqs`3 zuW}Z(b@>|IXE|y8np2N+v4*x<62S+7Mtc0H0X2k zwFuw1_}FMUf9Ju-Oiq1wB@$zu%Dg8vQN(O;CXr+$*Nk8$1uCo$kH}?i`P3N__Tx{E z;I@+I2*KWNdJGL}meF20RrkWLF4j5{<}tGw>&>CBJByWgk`>aDnCU|p3MA7)~O+Z+o^nc-HPnS z>*g@PsM=Kb3&+=5wN64eg@R}F(9=N9-3_NN_>ZziR_5jYnZvy4naMsG54ix7*pvJS zx$WXwf4_VotuvxFG25J0kQ8;VI-+&~P(_C)=&Djj4BwvNgf{7&%)OGf8 zt1n*Mtye>rlcX$qw9bkeBq!f&r}WQtQjGV9-DL61MdsV@vA;s`fT9Vf8F{i2@w&t+krCJ67yQeS8`BWRXG9< zccl&-^)Zu5H*R0Vp86Phe%2dNkyioxz4~fWzqp%1<8MhKUy>cr4 z^ywbPv8dm8jQOs6uh4cZ&*`f_bM28(+U(`PMYp%=Xcrjg!$|5du)n78u4Dlq22W}# zeUlpnMsq5ss z`h998saZ%}kK%xp`(&Qdk1l%CCha5JV50iKLyL){Oy(8}E@`0QSG?nLN?!3ETkROw zyRngaV0!&Fx&-Kpq91d3p>||LT~w&Ae{AZNLYhbtE1Uq}&U(`#zk}WESvoZge*BJz zB59fry45y^ZS62u&u=!mbw!<7@D1*Ot3ckqAt>7ka;Xd1w7qqPq=3|gyy~*vbz(|i z=rP|a<3@=${Ae59W9X6(Q)(KS*tXl;+~RSg`2V_vZf@~=MKzQysYLXfv%1VOe+KV`o(AnH`cG{^=SBuEmwY35RE+0T>1+0sc`o>+ zKNv&w4~$###O}F(LdUJBd%m4Dj5|E9 zd>Xo2=u->4#>!D@;Zk>+EC#~>1&dqW%%hbPetBQ|i)Vg!_sg&LqBx^@elNUo(e60l zV?Cs_(^6--CUahl+wXJze=_U0SLv}YdH?4IPAmMF9DFiI16<--C-2i2hoL)cNxb&U zEnm;U+re523?mRT>A(MfkjIZ{pGh6ZE^a^Z#P~7f@h8VG62V0tAKViE>aG)X+mBL1 z?04<^qsHhjjo$(bE%Pdc|8du3AZ~1E+@GwUz10z-^YTxvqHC>ae^Ep9i`RCqG465W z#KRYnkXPc2p;e2r)7w6=nbgsyUj;bm80=;vb*`%&@#WI3b-6?Pn0`A46RzeZ{BFNY z$T7f^K!=_+Dz%aMY-{8Wf=sz&V?f0d=fjfmgBbp(YcywlwoF|(0bG_9*NizkYUmGt zcc6<4zeVpLQ?XJPf4ePt)KxmNqWyyZR{D||Bms1xO066xwlcJ}PoEQGL(z_`x09h$ ztUuEm0}UnFe`Wy}55BwezD92iu2qXO_P6#CYnRTm?ERV*WDBGJY#+@*u(u;bSS6b- zhoAUVWqVk(t(46U6r|(O;Y*_@kZ-V@qV8Mb8$Z2%ZgFR!e|_|O-Ak@E@p2Kb-+xa_ z+$)dmF^5$4O!&zY%f4Hs=HW)6{~kDr?k9s9+ThR51fI_ZXJt7wPw3f)uKnCf-BrU? zf*NPf)ebKkZLlip;|>EOamgX2nRTL2&a=n1l8s>dE)^x}4pa*GyqKCt+wRqN9UU!% zjW2t!xjpx?e?bq_o|}3mHSwF)Mt_1kfF@72bn`z7 zEM01jqfr8PRdm~@Dh{Pw>oJg}#KQGH$^f*GFU+dce=4KCuxcT6Je*!y7Fz!=9h!bp z_oxw&Yf~9>P0&#*aPWq#1(Di7l9y*$Lk!~@&0TsH;pY!+f;`matV4yP6eIK zkQ~DszP*W$P6Fls54?3^iPxMwX%-2q;M-u|3f%Rs?{L#DkhKzJ&hDxYBj*9UT%WP` zX#qg)f6Q)k-5!E)ACeQJd=wlFg*+PH+qUo{;dH>sE$XcUY#aJnf=;jK#wc{YqoxLx zmjM65d4%DM$z zS;W>wY^l>1=r#PMA4;*uess&^^Qj#rLdR>2e=b0b?M))2{^!0%AO zb>E#KZNCQIJ4#o?x{@c3|F#m;pCFaVe<&dyOP@yjHa*WQu5c_X18$k zFZ{BErsP`3#C+ln?G^_CgOZPMvctvT@Hv(F)vFehD?1La3Z1yS-XFe< z`Rws*vJUR#?Wl0-TS9URJ*BcJp8fh>_*8eTxuRY|?lDBdWuMlu&~# zTvfqAC4~qc@d#R2p|!<%;Gy zbtQg^zf|IlWD?+54mHRtG>G0%MVSI(q3zZN5X zWb7(datPXA@98YeCakmZ&O@Cu6{e;&TElaT4n-jv8u58RZ+0_4kMsN0v}3Y95)Tpq zy7D1T1bQo+{p5O0F_jwFEP6lV=MstuI*OL1)3@nOk}@E;wXrCe^QskQVwl2UOFnh^MJ!}-K3vFqNv7Uu|=DlI80O1Pk72oL@kB7 zw&7|^ekygATpKO|e!t`^(;-)CUB#JRIkai*jF$Gv#Rr1-`B>WJrGAL4r!L~p8yE0ze>xaHylZ-~l;_K}UIbk0 zW`BxtEb#-dXvqV)<*UjV5MTd}F&BIDT1Rjjf2p$~Yb$ZB$tmCiVt73K8TotSpBT*t$vLnM zbJQYlD(hTde^-?+a(QQBI>~ePOKo8G`>58Ex|;!YvW0fFjJ2>8)>d6%yJby%4tHz? z4kR&RaWFomeqEt)?vDq))J2#__-^v^UCM&_d{v`=I5`knk&2AGhU5-lEZeN>Tu=45 zN3A4AMcT+$YSm?aHwjkobqeilVo&lZavbFS-1W{rf2B_ISo$4je64@iX)beurA}&j z9z5vLJ?Q#8=qldM8mR*$d&T`vda1nzC2?rV6*`nWV`u-Pn+*H0c@w z-W>I|g|_M6-zRNQ`M#S5@Pl62T`O4;*4y*%*B-S=rAD~aT|qa4L-tP&+~c`(t;e2A z{R41Ff6iao*R1SI^Y8hXtBsc&<}xDBt>AB%*;|>0#%!k5F-pjb9 zOOHm`SAn`D3K3f_zT7Hft+OtdA8ae)Ci-W7e-PM8{EIcXM}wtYi@b-}(bhhla72I2 zYdx|?-cV?tG3TDaV%&FCtBmXJ^>eI^UtIF*InCJ?-1YTt{%a=()A`9$mwmTyy$SzS zFVU?7^kQ9s*R(@M(YGwW=NC1os3h_H_Ig<%b6BV z=}^Ns1#njnjv*S2CfCup#y!T^B;U8a!ycAe;)B7;JKAcvzyTm$Y0$^Hf9P}2Fjv7d z;HKo!a=Vh}`B%LsV_b(yjD74((!M^T7Z>Xgt^sE&Ns1%6>!Eil`(WO<=7mm&OjOY? z7`tZ{=fbJr9~5|2!GZW^eUQZbAAI&w3zNF;YtHbekMJ)})UCfLeWD)n_2ryA_UC!1 z7cbU`>W`d5JEx~S&2A=De}Zay+QK)}4e8s+`lE}!C6*>3nK`46UNZ`-o}zh+;Sj9w zEa;~yIcZYgMEX??0w9TZkCrr{l$_k*V3khb5z}(=7^ih=aut8pqc@>ELl`S_2gbUq+2SJtkn_aiFp>lkMjs$yzN&KQ#e~R(0Y7@13#K)Dj zQrKL3fShC3tp+Xaw3GcA>1U)h;m0Th0;IMi&P9e%_9up?ckNGA>Y;4u_u*Mp+3N1; z8^0^p0aSizQOI&;@#GD*VnDyH2VD!aAs%!s^w>Gi7$$Q3@M4wa^HXIhW97eG4|hGt z%}&u5UFM8>q_OJC z{7~XR^`$?$$V{0N{OOO5%QCXz$AFK~Wn1E{jAPknuvm*|*FACzZuh}5>5(hxtJ%uY z4q318tLY0^e6Jm#d$8{b``f7=M?E-8_@qqELL290|CHCdScY`fP>#wS=5hN?waBMe#@af5NPzwOm_DgI*H%KLd%Q5rPkd z5fOsW>uo9MZ;3u8I$0C1KSH}t?|W#b;TUH5FjIz+QS+u-*q4Zx|T`EEB8jZf_ukSl-c$`g1?l#FE_sHe7!1cu)}4M025sH2RB zbhL9?!!nnS`x3n4`-Ze>7I{Q@elIy`Mj( z^d$WB)ZMC`WuM3M{;Qog_QTbe+io=u?%;InfAkN>@Z6Ypc8za!^`yHxoq#V!)18K= zqh_t$xOJYc+vuzpeFe(-=)blr-f^j2F8B#8jpEe5{Ly57$yEB8`c!;Q_SL(;_98UN z1>g@U_~K_dqwEl|t+JkbxxhS9(SD%sAM*^Ew|h|Pp1Kl?lnw0kh1=2=u{xE_`9>1w ze+KTD3gsfw7I+-5n>j8PdWifS+<%6@!Ox4BpJ}Nw%n8}}c`kpp8s+DU>yfLms=N-w z$M^5>q*+wdRk8x<@W9Up>Tc9G`SX#GP4cGNE~x;1hUC*_hX^eNf8*yp?j9s1hOK

vruVEyp;Dh=FPN# z;r_^DasQ#bf0N!^0u!5fV(Q(0-ioP)zN<|?+(3>8z0qY|9-n_g28UujblAj5j<+77 zxr$sf*DF4+=<@ZoSRzv!TOWc3DnMQ%IX}3{0q+_Mv;(o|a z4RfO|f9}ZnXQxjE=j|)DME(rk&-*9kc=J3cZ?isD;>^aB$~=bbJ6O)&BxinmF5h3S zue#^H3QMjBy6GWBfeC^HM>iT7e`@o*uenN*>vjEqe^e;sK>-(+2`o8XqKmKrj z3o?^pj}t9lhe9gFU=f31oZIqx3K=UpMql^A{ZB#1`)0Wuigx+eKY=+lSfBFc7{4#B zT)XQm;sMC6zDI9J!c3f8NGR5T;!@ zYV>84<3pbt`etyXWH5aN;%}yqpg^u|X-cNh?`4SAi+akT792D6+3N+O8Ef0T_fDlc zSZHr6UQ)!B1zIW3vRhGcC*(IMa&7_BqgMFe*GIpa>&G2O9AouafcO5s{zkk22%I5+ zE0+p?)|IDDrLnBUmA6XefBCyIue|Onhsx8T@-nGx`%hM-VO8EMZ=aP*r=ov1zAH29 zjc!;$W!i5&e_!U6?nkRuxr{5_VXIoX3>xM~JpGb^Q8AAH*Z$x*9v~cx(I_#-c%=ix z-L#F-If&_SyjhWC`pVnJCZ4X;_Gq-q#cGs%0l86Vi~4GJ1s5_of3Ow!tJ&4Tx#*8k zSJcj2D9jGUSmE&??1O#Q>1$c114PC71%zf|3SlH1}AI{r&U zk!;3uoBpu!b2j(le;p3O-5#<2-$Nf zr!B7+kW?%`=N6wMvqvGl9Ou;*b_NZN$EJ|ilTGRv<6Uf?f7KlI4S5gsP&vlq(PnDC z3e}lIY^+P0<3L1Em^(hpb1RkcRIF1 z3wWfg>`z8O#=zJ0bzpt{V=qYXH}-i(F*wL^W{rCr7}CZHwuntaR4SFqITP$RI#bY4 zT+kR?VJ_snD&cgrWlV#02s7_`58eO1<^W=a$pf zTodG|SU-M;*LBm`^0~E5V<6UG+op?v9#A1*?_%ETvyRrhchJ#~!5Hy*3f!koM|7uy zZiC+s`Mp2ZkIZ}dn#uj~`tbTXBs17BX?Miye*>gTc{W3()6brJ5^|k6LHmSW!^Ydg zUR;ehc=d6y9?;R_Ge8=!BpE+1`)u2Gwg5#yy1!bM5e5aAetco(QS9l5@(u#N)AChkQ53qkitnxn&;d_9pdF86UZR z4W^3M!GAFPknYcPt^|SmDi)< zb!&A#fsx49m%Q#2ud4@a|Ihq$uF$I#@rtwf;(zCbJU`^V?Wj3x1LWztFO99zp%7wi zt9W%2)+08C>mIQ$vir`e7*p6npVF@cNe5jq-IlzG3ltoAFi7#p|f$!aFHmm*bljuXk2C1ZbX1 z<=>o>Aq9r)XL&X$A*zb}9$~(Ey~Ft5EIIlz-z;%nv>7f7YPaF#Uja<-mN5QxY$u{f(SJWbdzfyi(!M-*)W)RG{XwFkAf9pl&vSzN(Ytc4U?s-PO@AtF zs4!MR+*v6W`&;f^Y|<8LbF}u)*lQ!fmnzO;UbE4chn}Fyd1ZYFF=n62&*;UO7?htm zV6XXI$2%W4D{2<-j#{g={62XG{qdQB_~J7b^UrgJ{2l@gew12u_Y_1(A48t2{2iuX4Y8g$M_nBi|qT67LF-C zz>i=|=f%&4Jk!Y1f~5ar><6IP(&t$5UBh_J9%y@2uD=>{FW38^$_RTR{l&k`0q9e4 zA4GYnwC(s5B9Qi4`W1hHPss4%$qs(%HGZf!+BhQs`uJ8vS z3U^klReVkP`=8h3*arOU-QTM)Hp&2?yEvCp*&s)d6(BQkNAI8SlQG1!pLWAxM#1H49pt76ZbM6yqs9*fwZq4zF}{5`18GyU%l&tocjigM2Yc- z#Fu?p{d~VXOR4aco$>G2_&?gi((e50UMcp6ZGX1G`f|$Lh=JG-K{uHeJ|uhC=h^T_ z3jgu1`$s+tW4e{+3+t8sD)1^}ADdBJTb?_k?a%Y!OAtx#JA1mS_?{duTlyruGiSJ5 zi`WMR&F^d^?9E@U~L4d>O?xp+v{_iiB zt8I5bJGb;Gg%U_ISFUvpx3OFla9TE`{}*NNwV=4VB@I3h7f2vLT*`ZIK-?h#@(3Y7 z=IJLyX6@Ri`~3gR>^{|1nMq1N5w};j6m4cihP(a$VeZX#oz=Fju~&(5mB@*} zH9o&0g}Me)8*U<(fWgK#wgE?Z_q2gMi*K#9ch0Y)L|NKf-}E&0gi-X-i`H9%w+J2; z3~)Nf|2JLd)}$!9X5qgwF;_FeL?CCJiG86UAj(OR(;WzioJBz3*U!qH>#5jLyQ{l8 zJ1bLx?|s*L9=A@86?s}Uv;1GS&SYIx^o+t6#DGo^h5`ymr=oN-5RgWZ9^vY<_8BCT z)KhsXshpgg+Q$w5|9xw{@3;Ku6?a>IJLW&cy<2nQ+P0jI(Q4d2m zBtU>9#O)0WBZ087jco~j{rUP3U^3@iYwi{2Qae`enQ3E;#-(3cZ@u+{kxV}`ngSnQ zM$ki=>tc3nBT~(bB4%ID3%@r<<7Phmt|=5hdUQAePXF?5r_1syJ>eX<)fASHhdV#a?w z(PWwTnP@oa-=RPii=1+Hr-wY2cNKJ3`105SJGBk2VX#r3_ZQl0B z^?IA%HV}mRWzuNB`-Vt&fL~bq?VAl?3zS{GMKWYq#q%X#Qn?yy-sW zbdnb1yO?ZO(Z`Cy++_1QSF2r-4oDam?_uA5|A=P{ulWq;izL{&jLnqw8m91U4EmvA z-tRuRq>Z3_FVgA5#Qf@i6@;_>_IG!iKKyF+^JbUD4UR9P&l@)#4exfH{pvn&^1T~O zm=e+$c{i=M=mo5g;m^luwB}#$ZT8fsCe1`GH&Oe#`1jy zE?L$N^u|1C2tH82cP(WV|LcGKkN;!EwDiU2fB$dAZngSv|Kopuetm5J`*QKQ_&-;> z*H5yD)&KRh*gyYj{?AhdV4#8g!bnFA|J%cx688M5&=NJ5s5=8v7awq2A4AW3MK`Nj)(xkLEo z81>;H2u7A|+bXn2ZNOc`Hm0^w#BR_ski2YLOe?d0)PB=u_;F{gK8GWiTR|%}($eHU z^}KR7_YSkPH>%%u!}?I3XE1Ets3PZijFEEcJkNMB9(k^Qo?~$5ALV&ujgweqvHSCH zz2;-Oz|L#EF^VLDMXTioR64$YO zzLpMuT8G^mBa?kQ?SzW%qHYkh&2-j}6&#^r*Pd=$u`>;UyLZFtwX?2~Hkh&e$7?f_ zs`vPqAaRaj^@_hgPs_lV6m|D?z6T}p+O^NbR&TdYMoRsx_`bOfklHKijD+p`qrkR| z6q@tB{(hIQ8=U~DWcgVPoE0$EBlY-i%Nf~!b;Af|hWvLVM}|rKwHE6As`sTJKTir& z#OPU{jT?*6rnOK<@1xJ&@%&>an@ye4e&vk3aBg3#^CRo+H~cbYU6YAZvexNr!)sW( z8S8|!08@fFV|-usM6dD2i5T%D@3Cf-+HrQvevLSDYxbbce(^rs5yAzTo#BM>ErOYU zFT1QcHPZS`F9>j@aYr@McgJcL%&InfX(Iwtw^R@;2_IH&me}q%wXFk+vLmC57cb~V6GP0et*lM$pnOk#H5Ijvj&gRyEUF)Y_h z+}G+JGYSXdS&-q7R_Hq=6%{sq-pSBC! zk=IVzkBoiIUKzvzP=$A;mLdXGl8UleU0+4b}Le}}+Y zr5BrN-0LvqQ@^h08vgooHW;OUlm1;fOjK^VM|vLudl;o2wP*G*Xri06cMMe^T1lBL zUO&GZIc-M1(GP>z$&K3}3nKgQn9aVM{^+*VQJdsp+{o_-#_Uc7^_b_r4{U$dFO|vb z*4n)k>b-WeGaF+6?VXw`11I%ZZt(pk_nW&ScZxs!EcYyiwKQWtxHrYccFB8;^j>1uPVdRRjA<&FfyC?gyzMhG zu~*3cJz1S&@YWAn9WUC2Z4&yY4;$psV(ayd^l{KmZO{9e0_ouLexKSYK9Xa-xw z@SS~!se5Yb8N=wFYv1Z%Jxw$42lnXh#6rsEcRZnVZFK=mEFRhRCf#?u}rW18oDp|HLKkJy@MHr9g=tX%`G!FIt8giN8z^+;X|Q z;mO~(@q^;;#AgO#)B9EdX)5-Oa#DArd)V=iyH(+;;GN%1#qkUOk#+OhK6@a6O${05 zN)Tkx>$mLMK5fSOxdS0+J^Aw7H}c%HOUrYQ&bFw3_8&)o?H8sU@c~C~;*hg&?`}`F z$JIVX+d6HtrVgK3&VA=d+{oU{RcmaP{dyMoT_dDM%^Srh z8*67y7Pww$GvGT3y-_+)(cUoAyw7KRw~E?LBTW`Y4WfCtbGL7^zEvY9!C}!2I^bEU zq}poqO`O#C)N?@G$9f|fFYA5#6MyRcmh;WJi_eaK#rAK!yh)rk+o-hpfN!zj2^i+F z*~Y*2t!&3UD;_HJgGsCg?5bl~kI;{?*P3CpS9_#L)Q})qpEKfNdA?(uIRVe`(eFfL zFdS80Zn?_cD@)d)fN8|*{8M~+Is<&hMX$Z~pBz;mIL4kpC&drPHpZLr)4Pp4C-GpL z_({%x75A6t4D@|6SW94KV!+B@$@?lbPaJVBYV~VN%+obVhNh~QgJk2K+Txb3b#kNs#r$44#41_dd9#TdLa2%K#H-em|WYb~^?K;_%e&vx$= zn4aRz4odJgdTBK7hv457W>CTMn9VeA55w8Bo6R1>^*nWQuN}vd{6Tq^6l$?IUsBk# ztQQ+6>y>GLAwEZcPR<>7srt8kW_(qK$apz>Af`5U^zm#A+i!1np=pq1K{iHfEFKtt z9{q&@wxM+(4-nf;`~}M^@FNv9Ew9-1Rq=GwSUtqnLCyfDOUwgqg3m+txH!7$wO!C> z^=FM!?tk_9PQEW0b8o8rq>5EeF-g)UCWpDj&%HDguiXI9xBCiZ`EeAgK^-N{G-w-Y zw{wTT`mEgODSWiTQImFU&sIe|cMo)b&qu)`YA=^j=?w=P|2+y@j{{{pDhyC~_W?8g zk)wR(Nae2+G-M_O-lR7s=XCCg-8DZT?ztWHma90G`@YxrUFTi;oL`+G`OLq^#B1>f zKlhcdRq+7NseL&LU#B(F{z89kqxVhLNyiDi?`!Lcp8f~x$Q1C*w$NE|i~tdT#BtE* zj<}XDvxrem3)c)5+?U&MlO%SmnEl88Veed{x$mw0aeL#vuwIyc-{vl6Drv3T$8Dd_ zlnsMjIB3*k@>KFwXE`{AleXzMd&rl0hv3OM5c45)8nm5_b;aBjDP)hUe9|UXXI>Eu2&`@`3D2ab>1-L z@2_^?PgT+!BzNh+3|k*&n%Zjh@VLDvl*s(Z*p>-4&hta*&8^GXC8s4;+yn>IT6f&N zc5M@o8{`8cfDioX8>8=EM?qwi)suZT)El$P#GW0|n9_C+{}t+7N%r-B98X{3Y#m2e z_Mpyl9CZvWckSqniR&aTe|1Ku*hBWRvN4sPeA&;uhrSPVHSKsodjFWqX1%Ao1{ZSs@%%ZDkbN1SpY+Ul>IX%C*Ej8ByI|OeR1 zTfr>1Em@mwdPI3JB5!@@oAsWd{D^5#``xPZOLPQsg`bnuG|@e&)YWYwuJ_0VBRl3y-@LAn({=nh~46XU#OF_qdJyj@xp63S}wt`((d%s*|dF@!RIdvdj3(k9Lt* z@?4#B{3iQ)6(e74kUsy_Ij`)rU`dka>pO7D9d*2W%jG$LW`>^bW zZOq60dPq`k14kF+L+A&oPH)-RJ`A$lv>JB1?WfJIN*4VnKh*gW0bIEaGB zeh6mgMMaYl`>8)H;UUMZG~DDLZd6BoByFQ}OOvOPyKi(-uQeGv$H_W4I+XVMX)xFy z_M9<0cMj@*qaDQCPHGR0HTcWsvyZy^!j1NsNdmxrI>tJGpc#l7rS=1!dfN+vh|66! zhL-o{rrrTDak2gGDd(o`U&jda!vB6JBqq{!;Ec^_Tq+RXQ000RPI9s{h!<%)v-osE zy1LR}>SeymnSV8+Z8x^kWjmzEXQzyykI?~behkfj{-(cq?Dv@_p zb54fwo82{M-=38`5@W8JX{qr^C<6ehiLnnI zl$7C`Z#K{eF>q|yX^UYJGo#bo4r&FdYO0zFn5CXN-%9P2BmOvy``FtPuAPv#$nT4P zvrfi(9qeaS3a)B{5j0K4TMcUa-VMWp0`*$(=)PsuZY_%`;pDYok$X=ClcUA&CSJsu z*2&5Kd|sWW6R%00?rE&rpg1S_60wAH!e?(gBe5O9>sYbv2eclLCj4RV$0XY`$yGoP8w)zC~nRptr&wX=@P zXGg`@ala=y88LS3WOZV@% zRbHUxWXwz%l z9lJdswwBmL>~&?E`1{$H={32Eb)7D5E+=eXzpfLV;@#`_ zJSOyv%s8j{#5em6n3Y_uq<_eNGk5ga?L_xeBkRxlg+FV9_wqf*G`yin&Py+KYd9)e zqj9V6z*lxm`!Tm{Ch_fXuW$FOyaC`d^VL(QW*QF8qDbF>Bw+HGdO@jgBur1%UFJn;$}SxZ6jtby+Sf;$-o-BNEX?P(VZTDilxhB90mw} z#fV_g)Hqee&N0LN-diuCS+L8XAo?ouUh)^kWL$Fd+czsdda!SsO>6ZrowGK@+a9FJCk^dc!c$|EB_+#GFa3l9~58wN}c1!zmzdPCwZ;v zC&QZk;(&K0LoSZ7XoC%Zma2ok9M&!6uI=k?nhz7tW{qfWH#To1AQt$|#F(c`?(V_A z9V2nIBWlDRK)ZyOmBHONa%jOw(<$P*f^;Da<lcifv~N zgkqr2*=;}b<@>K5cCNzOPnh}6@7kQVLz4IVYwwp+K-)@@eGBU3^o?~G+)Wy5`^^SF zTUqhD3iH>FnJiRF-qGlTWn=%=jMdx}-}4D)C-vG)$a=Te`=fekO3e2~KGNZQ9@}Mc z9EZwcXk822HZ`q(Pk!DrdboBob?siA7zg)f6d@!D;yUU;7M-oXj@!U2+MRWw^Ia$y zQpOcLrN->89rs+-Bz|RmG@jy=v0fW(%QR-BZQi-IR^7=8Yd)NCEVruX{1NL(j8onH zJM~5m4w2H0f^GZNoD+^1Hl#mnk_o0XGxPk`3Y$+nFDkizVplwJ$L(N%&05${C9+32 zu-iT0z2uZ_tMLxz#~!_BEyz5X`TG6_KFIpj(RN6-@;=N`<2gQ~XTx{H{O0u;hI!cD zy6sTve=I?&q|$DdigRS_q}W2B&d<#mT$pC&zj?{aN7tCIvw+66^w;1?-E|I8buRENQ!i{%;{{VGu=nZC z8`mLk*vPEIXR88_#VtKnCnVUDk^tu0Y!zznd zt&zdXg2bxZx9lzUqIrZV@+$~!Fj)o=Df?A@-`;sqJ~Ek>4xYoA59>=_%vii<{5fkz zocfA?-U4}jH`>MQecWB=o)NU$S#|CW<52(VjEcF`q*U$ULE9nbC1^a8cta^d0=SUC z*yQzzQ{gw29q~eHoY=~KM2$k$=ZrmJBWMAkj^l@{pV&3l=Iq=2G512&@^oIFl|BoB z_i3d~0Cl=|N{OYA$27r<*vGy}t2{Vb*Gx@+lX$AT!8H)nnTPnvGT!}F>G%F_9o&QW z)eiRo_`O4XV5ZX+xnjVqS?<3PuNkreTrKW`wW@5aoSm~Bcvom!;_o$n+|7#04(M|% zf3zQ{o;i!h7i{7O3zai-$_=ZWzKV^K=_OVIQ^eN(cE0o(y4)3g_BN~`=4P0zQ2wY!ojcxM|p4kco`QQLMIl>-5w5bDIaD&L?+uO)aD| z+N2}*mez^-r}uPtWTLsx=x#;625K*NCu-88vV4(RJ(Ge2-Esc|yYlsF-}k#~uj_Au z9`&!UpYzm<)%(7}i4Me}7T4_F*lv@6a`<(AP{QT9w$&bMne)}0YU(`S7&hgFnLIIV3h@Uy* z&`Lj370bh-x#XrtP|=HG4?d}+XrS~mrCx+ zU%swC3w0g#daA;i1&4;uTm3N=0^8pY)FtWcJmJ*s758Cj^LY(wIg2AkNIvh~1)CNB zSLN?~?t*MOXbhF`Yk~%~R`G#<)jks2a`w3ovQOGC5-3&J5BqkyGjk+9f~5ne!#vJr zR+=w6*eYV8k-e!W2x0^#n)n{}UY@NRkdy>(MOe%jme0IR4EjFupGn||Z@mE+b!V{7 z4fp7^My5B%UhTbX=NZKrqqo_wrdb2~7&ds%QFCuFL8?=+xwN0)7=5UJwk`Qr^EW^7 zb2d-+SH}RtDH2{twQqme3>{-pTd(o~{j=oIl1Kj*b4o5D7@2$WEAl%0>YsMAI>Szu z-U|Mlzw!8zD^jDzmN7-1zZB^N%!n9gzc-$GaAI}?dkr5$c|*4haud}@>OP!@&%wBe zsdLx)JMySM_d{&!*X3M)XXu!Az6Kjav`u4%8u@t*gWOmYX6*yGP z2(xgM=T1AsTn59NTH!m@Js0~XRKot9*f;8Y)H{f~ukD-I|I!f6;F5L;KmU-`@a zCL#By(GiZq+Is~{00Rl20{@wx@a)Y%+qlIGxaZQDjLIe$efYK@#s+6S*)XeVtCJ0T z=5;K>FP_b^b5gW_#>+z0Jv$DlonM~asV#R*{iT~Nr3>xvZQJuGeoO6g__)=WXNNgi zokf^4rhH8@tzQ>B>$viCkKh|fm%Q8~cAR^JKflVyu;Y@Si7(Lc!qgV*{CfZKuZp^{ zuG1f8s#Dq{y>@}!u_p5!^-Z{!QiI>!v|BApy?>6!VV`M#hiNpsrPB+zj=La!lV^Rp zN$~x$267e+hSjQdAU4oog8rjg1i7{kho%iKHLHD63y@lGUU;?d;|=qMc}BrUHsG7o zwZTU?$DlUr2KZsfJrr)zxo+_j59qk?j0a$gf9h60{=dROrKSa+JLj+u^M{ZVUTr6u$2@tFyLie264@^kL1~ib&k;P1Kk|Sx7E$5e zRUYv7_6m6tv4;nbx`yz1bR1pz3vz-qm6*8wP^e{pe_7PAVQ3zd^X)YCy}{wyFYk1sl5T^*L8O*L#j|lNb?(YM%VNIv3hEs9Dr4!0v+b&+{Pp?d*M!qR1bD zYi&Gia}ML(Zgh(o*jS#x^XPVW*p|?qb{IWx8CknOnlFr^!R!}13aG8Ij@CGgpV}Gt zZMpA3Opmt*NpRiZis^G(?aTOo zw6C1osLEp(H^1z{@AjfTqgXh|wgrO#C^yUxO&0W`Sl@{lQ=O>>%;Ut-VCw0qk zY{t}h{Iy%W#bFWON8`7S$&5H`Bg#87c;%!ujJLKCnl>8SZF{{myY@+{niu? zwTE4D3xrO{xs9(n@9o@l8A#EOynEOrv0ZqRMZAoXpk)r=ZWv)pI4teuG$B6FeoyoB zKLYCbR$csF#@&+hE1U(%q2;-M&p5+rU9bVH1B~)>d@ua)pS+;7g_~BruBwj`L+}~} zSN_E}Yz99N|IP88_emuKh;@QxA;ej$YjC;aYRo@REwJ%fzs@y^T^yGXyoI%!}IQ8=b+>`o$-YH-B3?!a= z(0n)i?Cu8GGaSk6(c#95`sq9u_2nPLItY@2hilDK*?ec7Ktft3a@e>2Il4ly!IX9f* zu=Q(&qCl0`4r{-H-a_nu6n2ZemYT!wdF>UCRd7J>2%lbRAm^OuoY%o2sp<#%4%~S? zdmS&$;QottF(#z&JiWkpKkS5;8qT)D$y;`vFS+w6xBxmh)P$cH5_!JU=UTvH_vtgxH4rot{F(idVH;&{iru>*ej9K^Oh-{uw)>?oGkP<;<2d z_|B`F8ueKU&^OLgqjPgmrFTnBwqxBswupa!96Q>XOVH*8Yi^ZrL4xz^ zy|t>(&12wuu9>V%2Gl*5Ne?Xd9Z5eX6?k2%Wd0`kgQxdO*Dl(R!r=!?fZHZ#51TP~ zU`Bt>3dsR0Y+<8rx37${xDi=-Y_DEV6D~!v$?tjJfdf})PF)AS_e8B(+d%bn{++Ua z(4w%Pa2o%Ak~auerq5uv@P1!Au`N=o80om_xBFJbYV>0K;3#l?Ots4Bd>T=Yyb<^%qSABWO=!H39ErIX=h4CeSad%Um_lY|#{z5qHryBE286XSi zJo3bYH^E&uRsB1bx)vfumz-h~!at>;YpCyEzCwC_5^Qng_%nvC+Mob@W|PSTffbiM zoP3NQ=o}4VLpa^4cKapRf;md>+x5I-Qu9M3in`a^EgU9An)ec}hhJ8lV2+Y&oez<1X?ZQ*&4 zC(-$Ovi*u02(}M3KMsuv-@Eo6n75_hHEN}06gg0FV0~BqcOvzBv~9s4HD5rm3RzFi zi7n?Lbau#n1wBL@idk0;DE1==NgW(0TKdL+IB$!i&GYFxzvQLjhfX~Q!d)Sjf9)R^ zH-lmg4LM;<1!t|OKMBvk3hLdC>O2mDtcAVwtkf%dYqVFWwPBY8$E_3!hcyl!Digvx z`dP2B)FIT0u@(^@L;BgisY5^4g&Kf}Sxf5BU54*e{a@5ws#xjF8?9`?B|c;A^_j1K zxRnjWexI-n2P@qjI_(Lb>UDEcHVXWJ`$W^NHw+u(KhyqrG)lnOSx4&a;5gKX6BKmqyS7@kyXnk-ahb7A z8EQ)+L~~G#ANiWTC{JwW;CalHT*vty6uR$$wc=hxJ3;~aFsPF`wTD-A+vMbJU^r1xrUY}=YQ{tJt@>VkNs=yq4HI%%T>RV=HhC5bEF**VIW7y5^ZGpkgN{va0W$?o=RZs2Uhc$5z?V99Qz&V|!x%}9 z0WG28QDFZuNBGhUhoa)Ai!C{waq$Pi@BFc<_Cn5A&?G3ivriq;Gq(YMyi#l1VTvU4 z(up7Q-LL9X=Kh=d19dJ@EcFy}Yz;MU$vw}NDfc`^XbD@ub5ggWKjND6p?gJscySl3 z>2#)3{p^~9N}h1!eVH47G>bb<`r*RmhU?k~Bd0o9jM}5^f_~Qhs0g}~#v|_DcCAg2 zp%%bkPZ2MA?lCfW$ zKe4aB`R`NTTtoZvQI#*sy*zQ=s&gv+rn)XftfYO&iE~h4f1&+<(pXz*O5VJK)3_v- z^MZK8dE@`n-0($0%NeTAI&fJObK1b2si*Df)AGNpw7cF#_hwM+?m{KKUAwhY771AG zggoBvhRrytdtbXQwE^MB3wIBos&@2@`v7Q%iwjKwzN_@PK@PF-W$-QF-<`3Biq}Mc zZeM(dzH1j=zkKh1%q_XXF)n-g!yT>g!z&xFd6Nt25H~9oVvJ47(1c?*iXGM6^s58ociMQMHp3z#+RP_wcQEJrW zUvgh6p61m(%oa?; zWRT822o3wu3yYxk4yQ>mx-yG)sv0td&iYdzMFoDl4_qkOtpgN_OWzg)^cJRJ~DtF-S4!7w}`aGq!Tsy2}-;d|{hdE(za!}mkdD_UQ}sP*QQO?swt_?P}L3JT*@&0=!RY99iL zKT*9Xh+mwR*w5FB|EX(=@aa$WM%MSI*2sB(lkZ*d1|18YaHLBMl)>~En^w@%_1jeHx0RoLFTP*zsSne)?|}=m$C``abdD`sJ;B$1 zFPathlu6IxggC!G9auZ_zi_6(IW)J!RUb1&{6l_Gz><*s;wNr6t-`IZtQz6yNDMz^U!wbN!b-pFoBFQ9bR{O5-rUeZDb0UE+Lh%_qC z5xVvN+^+p6c8^)LaO#nv=y!~;Y5A^Ad$T&Pb)a@@7l*y%eHtqAZ`-PsQP1ERt21P) zt_Qt0bt4b_iCi#ChxF}%AF-Ku+KF(it64Rcw~{*H?=>*av$j>P8=_dV zod)*IvCvC%%KEcY|9l36erm&ilN_i$Is``NIb|^K_6vo^jtXD=xP5$FN?)>lmxC)6 zz60i>HjqV3|wM9><&MPkWPxka$v*7%ht622UJr*4U zdnd9zBUquXyPtaRA(P^5ws=OFVBVH8;3E=l#9l z{)w?1mHmN(=8ZjT&{y_-*rtx<$8&Qbejo(G5*$~89z?to1Z2R2D7F^w7S&P5MK@xM|Q^l_eURj+7FoPj~_juG>&l1N>p1WC{ z*ufj~yu=H{z$)!_OC}Rt=Qvk3FoYRymcf>Ohy=Mf5-l4XipI4q&t4XNmuaZTM%_kQhSpn4g@p%46j2p6c3F4tmx& z;H*?U$Wu*_KC;(;f#raq=KJ0z9}hu;#&OZ2xd)thM*kIfS%7;;spl7IO34O~S0k@PzVf9kh$tM!9U_pGu>(HZ{`Jde-#CuVZe zf~xlJ%-;)to(OZqP8ry++%^1PeC~6sGlr6T{(HpV-#hez}+;20NI9K>@?qtd()%@D}+6)*kj z`v&NL_(HAocW@v0j|~C{`ZQC!rw*+ET^G(jyiW6ejGhpGkKod&Cu(YRCGNNf%@>LT z2KpZz){vTSzQzuAqir!TiZ-E#^z6+zsp4w8J--pSXCwwHaS*oxFhic5VG zY_(0S8?5}Se6hnF7|n?nc+%6r2LTzj)KtfR!=yr5C*z7gu5pnQzIA!lYM)R3<&q!i zUX{P$5>_?nOAY9{_ecMn{{D&Q;DpnT)d=YkoDISQb;3jQ7db=6PSKlCNgf;^(WEr| z6zRN8EgJtMZOg7TCRqUcK?!0ASJQRDr-nHx?s3oDd%^G7E90Lzt?Iy<3D?n$I1>4A z`WOus+#+k57ES-~b@xRMt)G2`*_(C_W~fG0?Ka-1&({xUL%-k1ey#E=F(AUcX^EbR z@G}vL(e)*%p`B`Lr{3+0&PIYZf!J_tM&zTrRdM*}i9FR0_1E9U4vy&+q8-?O@)$rj z%u-@v>dW+#W1YW0==TJZq)y`f3QBu!aR#WBxb#HBx1M2e2x=R}2bwW(YIih6(}OLw zFsa$4aYuAl6a%@iqiw5P2I6}7ajBObq+X``GY-U%JdYvx@X2=Te3j3vb3w`B&%Ff| zZhY=n0&A_X$P0f>eAZ9?LM(%SpXPVrKmDfwexC3sc#v*DKU%tf6XJok^Qi~||ZWSG{1iFxK^zWxU<4gCun# z(OIG&1g#NO$}I9*H%0&2I{+c>x~>(%%@BTr)EjFp21n?6qmElhIrMpd77ZbE-gK|U zi?QSVcC6*<-b&pU^(@*!#jVz7T8WO1CqIvl0e1Q1KXtu8^cc#S@U`XqPc)u;wj1Xj z%^z*ZYF+sw{vKZ0InR3m=Iokd;~mbT@YUNrFm%@LmD()pQ>|V9)s|j!41+_xh_8j6 z7qpP>@QIQacf+I2UwZU^41@U>OezZyMpCkF#j_QDHr6TYHlilYzI5rUUeB_TtM(n@ zV%jm%wrA#=pA#3>6ok*99tOU5Ro|@QTe-`3*otdyA6@gJzD)eK7+_b&y$B7C5v$25N*s#)5&ehuxzF*3S0wLI^@QvD zA@#kV{^Uw`vC8k*bFq8W94d~Q)Y@NaL80xQ5#7If;jB5d>6M3%iw5ZXFl}1cuRXYx zq0oY%e*hcBXM^{DWUAdB?3(-}K%APd2dxYMR5J^qmVz!Jxuq4k<>&3e&VME=YpKQV5{2#q~AD$Nrg?Zy7cj#6gW zAkG69$jJ7TKy1-JAUB|g%GzR+T9Fks(JR6{D<5v4ug~CrVjH&~!yS3H|Ba0iJy)l8 z42&f-d8s!O0ehe0c>q7^tmi5*{F!@s)>h=~|Dj#^(`#DQJ$`zm&w3|6`aV?*^i#u< zI3~OFIafTIQ(sI-4ca+m#2)pDUD}YtgH1o(cfef>MruDeLG!y)l&+JGr$H32+(Y`; z*V+E6cWAwT!UK6TpeucefqUuLnE52+&{$89{a~> z9;YAL4sb7~h4-%Jf#BFZ!7a(Vr*PO$c*JGRIJ>%kuEc%N{K|7q_A))#b996CR#Mq~6HL}A_vl_pOi6#Aq)nfm3D^Bu<9CIQe`I8oc=4? zh}dI!b_1?a+fupYHz!>>{ohAI6Ey3-ZyEuAL$}iBF}35axgOu<^tq#3_bpm=26hwu zM9uf99b0Co`3dlPKKv2!_=)d>KH+A@pCA7$buq1rSN&IPpzt$JJ;~4QNnwZJai7kP zp8O)flY9ox7m28U zVe3;(pWp{dJ?Ns>IF-EijBkpMfNLQ&g%cM`_HH;F6Rcok!j4BHB7gN7$=qDmL#v(u zKtR90l)|BL)3pU9iIwPy5oN%i3M~)DIA`{?eO!sY9Q6e?6C8fBzNUj^OXY%ZV82i9O9a?I60t z_M3iK?9fp742})BQKFffb$xU}yQmLD!o@9e4{l<|XOjuqcGObnHKX71kb7wIZB6<* zsQrAm1ElMis71V5(7_?`|BLi!+3>`ORSbg`7au+WF^tsqg#APx>lh{N?Snu?Ds2^E zf6P8z_3~P2=hyROhkkhrU%dNLjQ>Mt#9s?nkNwRH;w6-Aex3TR+LcbC#B9}ltT=@i z&0PF$#Ra+OU~3%bVteqv;y=x0T6B!4(_gf=@6tDA%IAhphdu%}wE&BnS&NtLZWMXs z&4s;aXFCc*OTdM6G|Ao;l!tgBS7rXnfA{dyr}uy>x3KGw<-|eq_en}1A$=P)M}6An zeM)DT)l|qGc0cP|ld-Z)Xp_DTTi4Ka40n$4uWRk6H~_yy%zVn7D;?Aewh8C!%#$G= zCN42Nc-ZI|!%MsLd%!v}LLWm7_dgK*$k|eb^P%V=+4tLbhj|k5F@-nD#bd}hf3`#S za7j$NMzLhtQ0@s$auSpO^!fkH+mcCNF>1R!ysxw~T>J^N`fw%fxMuDCr6Q zDe>4cx%54E(0dlV{`Xq0#;~Yif1I?cfBZi+fJ!5Xxap*aebM2O`}4y=(Ej_PZ}g)J z$NJLWMLbQ+yk1z7LK@Wt?{$zfCg)&0#9)K;iuwd&fs`=mxIxBRz$I>nu_d}(&Vl;G zhF&+(zX=dfJ|;tJ9e)x72`>x%Q*7H0@1uh>FL+k#0d2d5J7OS08mCN|fAJSV&>&xp znN~V#7klm?Okyos4?uHN2dM+Y4=!Z>fk3siNlsV{M zc@`!l`eC2*@moX=B>G{|7G_U7s20#_Z67Yy-f$*{}eO(vcj~X>!+)WVD z=LbXIvufYF0ff{cfBH}MjX8Vn8@5G%M+x>})U(#k57L201ZMW}-326lraX5ow5Qi} zcTiXpd&i{isoYcQkY7@li3+iIehZyWI7`~ziQb-IfmILjSw~dkhKa^RbskmKiLcL~ z*hd|IUvvktb-V{Q?h4nI9<(uy0-2btJTw?U?{gKlZk7jLY`O$K%*-=W|2&or^78V`%3j%dToRgzp({%N3KN>cTgc z9t3o*IScV>I_h@@8=JYktRMOXs&L${GtRqA1F!9gG2x@)QgY8)KG7^!;pFn4VM}mw z9ruY=%oXqFf3N)dlcxKr2cN>sKk!wk$NX!4il;)+iG0+u)K7HuNWQyc(4x+F`MlD* ze8ynToW-kN!%NR-rK1SnTh|Y(*ZuSZ>DWhNF@CPn;E>mFPER=hk4*t5jvMF=o%`F) z`Py%sR{6Z0LZJA(=WXJ}yfJ@Z&FL5QS5E$kBi?;`f8!t?pI!>z z7$Ps+DVtiq!)2yNaO4gaJ1^WiJ@-JD4cJz9*mThPw5^~sMrn?I*Sv%iBfSO0srajJ z+Aqa^r%5kxZu5(8qZiR6w$*r)#7O8afj8JQcsO8c^uKf-o;f$&k*)Pl{0Irv-81or z#7l5-f1sL!%kp`gJL-kKPxrI_ZR|K`lxLnMbuZVP4CP9FPjYI^E$B zMQpimJO^5DdVNoImW#bU@n^65KTq00=NSbW#}>O^+30hO@uL&wRC|2kVIP^Z_DL=w z1~Q65?oHt@;Bh)IL?7DVgFQ7j=2o+T{3p+OfB!lv%E2IH7J+d6Y-#3vy_uTvch};Q zmj2R<_C?2(IT_4K5Nsf)BKnfOK2k?S%Y31>qO}P%88nS@+9)Wtv3Fd)6QDf<&Aypt z9s8IdK=4>}8J*UN9NKes3wIq9Pm^Z-NzaXl=}n(uk66-Gty81t_BL(*I@Qt8RQ%yJ zf7#sKGftyp&8p^pSzE1(QSoqYjrmUIuYfH+Mgy~9yX)qvO)odO8TS%=#DLEU1`8MG z=FV&ldz<=qH%y-0GMS@mWLb4;!SkZ+fJ>e6@&CX1{|R;o#a0thYOUi9-dp`XtsB!c z$+bia-tDZH@OjZ7TqpD&QeP#{`f8ZIiA-V@<5KoMaU80+<-WfZ4@=&X%R=Kebx(`x$VLyngbzSWozwyk;i!VMyv`WWyvCrtd6I-SB z5pB>29FKY|sWm#%V1iIBIje$Gw|B-(fL_tZUh3=8Uv}a(o&2cL1TPjH(CT+|e-86^ zm#!4^FngyLQ|>g{u1heIrnxEYC%AFMbfV?OSysVduz^=5`U?(q{GvS-eiwFLbpAW~ z_f)uTWzTdE#Fa*m)PuNdX-d-%T4lCsZVjvWLO+vQJep!~57fK=HCzW7GgKV&xFumt zT-!O42 zg!||;KM7s;t62;`y&Kc24@~>)b6+iZy&e9)hMAy4iLU^O>e57`aiQQ;_pbI(#|7km zdfv^M%cC_OCC<3(@8J8F@h4iUXM8w=%NII_Jw$S!IH!qbDf^o?uQ1}If5W_m$PE&3 z3LC6bb+q>zE#AKe-B=}IhPe^c}-di3l2?Oj>3i?bflUTYqpw1~K#NG2Bc9D5=4!rIsC z&94-VrE^WS()9vgk)A~=k?13GZ^s}qtQorBXwSf-_z>;{nrkt6d(fe8&Am-#-_)>9aW!*cf9kZN3(pa;TfX=% zvDLfQTb8)`td;zuulJ*$S>@^#H%vII=YExQOnSkcW|qdLn?w)IRyHyzs2-eqARWWgO|At%p|GmO<>11XIEyF zT=R$y#O$WraorQGZQCE3YDaq8W95jh0{E<@Uiu_hTKte6P8xrUbhvm;x%&<9CuTY2 zGxpPKcS>~94QCxwJF;Tt!6c#sC>RX=nyX(oaEO3iPTYpff2Zj7w4tJomP$RUYRxe^ z@$h1Zm%4ISVaU8Ol}5T5$*sDd2tu|s21dM8^}j<5*8T4>ab=Zr$UT*R)1$-v5FH=6 zLkRVAuQsALa^WDQ7c2o>PV`gJL3h!0MbnNCSv=f~QZ#CaGG79(6Zpxew8)-D+!OW*06zSK1Ex-q)7i?;2#AyPM|sL-xC#kH-J0hbnaJe?_N9>kV@^q+dttUyg~2UeOYx z=AdiHQsWf7>=v5K8G}9PA?jL#@BxW|uk;Y5o-^`!f6-fd$ZDUW9Yvj`6qVSO4))Fu1~3&K$5RmsF>FTWLoA=)Al@JuCWT=sy6z zT8`t#K8{52tKaqNz|&7N?EcU?VrC$GpTE_y<4pb32T!=H^pKo%?W$gO@IYPXy{-+S ze|12tQ2X4Sb-h>z^nmK=%{Y^~xicB&DbFu0$zUyoUN_oJW>-33_xJRw5LpqZB>5bj~aVnW^TW$$(A#Z9Ow| zHQ(CQ2U4{0%aFR)gjp2YACKqlh#b@xe;(0i-iz!dbOz$r)k9zFt!O;~>4(5#V@J^R zI?si{flM9kMh!xf*IcE)4jCE#b=k++-P+IQXTtB)|$*pHj*j``j#M+BkuGyb;Zb%j<@(fB(AS>*aOGG(L;;=?6)mejV}f zk3Iiw%`bMR6pjZrem`5cf6>h$Li2kec~iOvdKJwWM3&c{@kPO}Q?@?s;@%&G@Z6Nd z-@YGE6EqJ*b^lxD>S2Bqb8eTODM5$m>&cv3S9n%}DTe`p}n|HGQ|eEGYRdGog6GXReJO^j<%lQ1&k*%IwG>hy5C*+Bi8 zNjGCM3A{7G2@1G#^cmz4kw3nUA4+luJp$s?WVm^LIIj7z^~d1rm@r-fyA#x>c4iaQ z$%*tjRC&Iv180r1t7EQ)ZJm1L;pv~|OjTUIO1r4)**MQ#`d90U@6&levi%E_%j_MB7T=FN3&gKU<)2Wqc4% zw2ilK1G@^Dn_lvrTVpz#@UW%#p}>#oUfnh4eO242J#(Y<@O1rb7roDlCozLJDOiSg z=2oEHe#FifSm_=+ew^aW?>0FyO%zh zNtb>S>62kz=7@MfY9l(H^rfy3hns$P>Rw8?+ts}Qt0G_1d?#%a`E0^-In4wY4Za_K zUZq=b#!}_$)qI`b_#uKr>KKN);Pw1)_!7e1e>}_=Oo{gFtN@3VGnpD2dp z48mv#{~Kyjr-LJeMP|kl3cOV~=UKA$Dw_OlciE&9~RFTeVJ=-KaRkS2J#| zv_4Ng+Pb!J;qUQ%N7nR=8=hwii2c8+4~p*AnMWZ!%t~jTT3f~0zUEr!So1u0S>}?r zso&g>?fYJT!)W4kzoPCevvD{__<(ZDf6%M+9Z{brCrkmpcfY`c@BLhLIGf!O&3A_C zt_V-3P_Id|qe{dCr zJBj1)EkMMHxeCZEGP^cMw{#ac;r=N^1D%-msU6kbuJjiO-bn3$n*KAsy32lpl+)+M zOaQN>t-tCwi`X(3b&rdHy|>kALB_ z>a}D#(8MjNA01{>7`&N-<$sQ3UjB?Z^n1sw=OrMY%uHWSHvk8M=cW0S-sWfH*6e^O zE}iXm(qqmQHEWqQI;(xRe?o_OL~ueW8_E)TBQrqnCMcNU!VJ_=EA2#0*SZ}u*8}N| zvhe%GZr<9I*e!!F}z#=Xb;ho231==$3Y;7bOoh=J~+I z>(O+DCSqrb{db2JGy64uul1&BA1D~bW{N$hc74*()bsAIFs2{8f3%uW@uP21&B;Tj z{u-~*wu}0~)r<<}BAw=_o&2{xzf~`^yzjNPapl(~k0o}}dG1dip`KZ-_xcn|X)S|H z^bBOCP^ANXo_Dw^uop!=8%g~F_K0&Ro*`F0x%Ai=42&XQg)Bilqx;%Dw-eDnwebmp z$OQ{@_=2#rLemIrf12_M@y-nGU1o@oDN%v-+vpRRYAbkK^A@hr68q48LdVJ;+3T*k zuaU=KB=#PI(ZpDQ|1d5Q-TQcKGX%N*>=Ivjhqn(MFROUkYuK+PEX;0Ndj7$d*)^Xy zjYYn0rG&kZs8WHv&? zHxBm3g6~9uQ)a(StYfmG4&PQxt2K^qN2B&ykGM*(PeI+u!}y79$Z6x9Uj^VH~PJQ zx8iE{e~(S)XLe(f@w3AkJk>eA06PZPpWpE^b~$em^{_~K)`&a7>yUzaf!q8|eIB(omuwAO0-^H}w>3+i%y6Vrq`BlhVSh8I?TY6># zGhcrC(ZJWl#^^IAy@cpZCgs-oppC3-T#s^JZi#qqd%b#<6T{=4CJ ze}vx@@}ZHdHI4a8sPazOMhk*CpZt&h)yI7;7=BVOZ$wA<+0<&_9CRFGY%Rh>X46j> zu|KBPLr((PY$#6EPtlDxo(kt%^!hE<-pt9NtG@5M%eupe=1f&JnJO=FJ)M)w z9BJ*7{+vyw`|nTo@DZD5&WCGMU7tiSlyeJayQ&dHPcfx;Qfj|jLc=N`rLkMRR3_FF0&~9#ThwkKUROp zGfaFu`W0vH|6lLo=QnvR`yEIrf6AoJ4i+L>=;U(gtg|HS*5|^YEGzgj--~VwYB{6< z+)HAj;ZqKVQONgf@`i1lM|XR9^8m>3+gV!LO>_oCa|+*<-%AbtkF)ggDD@Za?cbh3 zxQR?hY}Da0{&@z?+RuGQ=daGW>jkvf7oM=p zz(AAZH2ZrKpbwcB%;xgoa+=|%YIk>|W}@w*dnWAK>$rb!Hb0FK z4S6v|mGpi%&C$(d<^Z*ll(V(rY+=*(AJoH~&G*C2Qm}cnQ>1pkuHS8_f%M$Wc%5AO zkLnf_=<4~Xl@lLc^-YRBf2i;{ZO_hJChk&Y+rXU8aV2+$vsYoPS{J+n$Tu4d?0D#n z>No7aXplVlDwjD3y?zmV^`66i?G(x55~t_8{^bBeaV;cmnY2eird7wd%`HaE>jFL-nYedhhK9^{+EsSmQ>d>?!jAH{PW^7t`E+aH^` zdtQv;4zQ-9#=SGZe_p^{h4;A=k}7uLj_2eFc~1Izm9__0d6Q!`E$WKb=T!3Q3jhDT zFY{+ce{r5;%Xu&B`g0OPGG}odkM6w#37=?f$le*X*U`-e9U;Ily?5LtX3A$aQIIi^ zI10H*!`K(_D`I=ReWl1seb&y$_6;2~NCy|hJyOdb-q&k8e`Y6veTgv} zHr|6hb>l{_AMXo#zX|RBy{(Uqb>@}sUe?su&(DK4xI9mzis$%@mCkV0`+VYhSH1l@ zZn`nx7F_(`f9Nzzri!1BC|rGwt=C6G^)4Vx-M_MoX(pF^$P#xG@oAElSWY@Jfkp{5K(8qC z6*0=`GR)~=kUK=5TbMTBq;sjR&o`zS{bM=b=RT8@4%6>CJ$Lv*YgV*|H6ky!SVCkW z$ISrHf6};?Ne%^HlHWnHm&v_+d`(C0F$lN8|EjP)Zd? z|J2PZJEUV79WUQfPipR4r4(^bh$1ScX$$b<5iR!`i?7 ztM$0MdBR`lzTj~0j!?o8GPw1|a24SaSMx}B>A04eFZ%DH ztjmetV%>8G@yVhe2996c4kdrVj+@A;q17XHQT(#gZlbS4jZHMn1CmNUyAQ7cEn({E z*=}nFZFuzTE&hs3*6@S<0Xz!L4erAFE1Qx3XkJR_Y<7`od=!0(Lv$#8-yCZXe*^Fc z<_;GnkN{1M%CEsw(D&gcWj}aNbZdz-^H20Ht27u<^h(Xb%!$$X1$V_bd8P2+o z6&9rT^_*Wwec~L8$ZMr$C^<$5??$*^^0R0&t2wpR!1nu?N3HXvKIdsz$mdGZaZVkF zd_!uRsq`7a)u(TTJ=}s{neFuqY^{51tsVxoeX9oxT5v}0=xqNwftx*9f6!ahXw^+?YDy)|;M>G8cE6s%drzd)D%RkddDZJ{oTbi(iXsnoJh94hVO zp3zkNV`dyViO%CHo&uU57yb{LZxzqztig{SQs+QjOZQqQQ*U(Y%T8_Z$XOqoouTOfApaVtJG|<~Ee}im|wjK8uO^le=jT(RRx~>U7f1P@t=IXBF9fdv{uYuw2 znE%@#UTLTm^W`9Tn^3NFABL3)?jt!}+hlyJ&3j=GX&ptBh&rp_ch*0?mH^oFbM`{` zNFExnoN@8H=;7BG4ahabNVJus^OPG^^-opnq;W^qw;(>&n$3bmf8q}={5E2{D;+|8 zZz?+&R{2`Rm8kTd<$d(exoFx*9ZlacYUpdJ!wUDoD1SL|S`b^rAv#_t*wHfqy#dVb zv0vusnaR0>mzV99gApK@+?zZ;vS-IIlw8g8ht_tvJTP&o@K#=sBK`u8Dr+%;rgzk{ zcS|SA*4WpSme|yGf7_eIr1lw2xRj!AkH7QWcGMhuoBAU-;o~r@FM4HqdA++vD{-4T zy^vb``Ss@Ggn1#qNGNZ;;`MQo=C6a4MuRo(>RtOyB|QC}<}d9uPu;%i_2;Zf&)~_o zNbV=I+K3ZO#cWa!$P}9o#}DnF4=A?qN#JOH;|pjuLX+T8f6ec1T3+^`-sxv$y!mn( z9^1z~1IBa4<&7T4GNYQ9?Tub7Ml#*Ja_waQyV5tgnTd|bo#?YxdHA2ayGxzzGGp#% zuF0t`t7idSdd;hPhOU8}w1j^8)b-CDGJkF;v>VFYWuRZ1C!h44w{W%@x7*Y{@DbCf z9-4NUJX6*Ve^nU{+hy^BiXMUTXGolnN-xFyj9RIh_j{!eXVh&uAv2UlbLo9GtmfzI zpwVr_`F`7t6#4r;ZMT;;aS~-{hAub!Ta-Y07%pOy{r2#Zqk^bjyzSES#w}9SdNv$s zn^WnUFsF~cHHqJ}o{3<1IyaDB(gT&w==5KSs}{_Ne+R3r_vty|x7Kzk|8<(R*PXc~ zwt|rQxKN~@fnccis?sgN<`wF-_TU*uay*Ia!&LMFL{Ca-g8?dA!~~`vKbI2zGQtqatIj3N@advv|&(KDcBa^{7L^ zT3e@6H6EtVF8#L8K_}nT618y4#?!1wZX5z@VSm?gdmBqb87GK;n7Q1wO^lV_kDvcxmT;53E72sB{1pD(LFlq0|^`W zvNgfXbe?9!kQtDLWp=dm2U$9=5iR-i9GJ?ER`dN&aq^EI?CCXC{KcPf*EvF_ueaNn zf4NEa-OfMX(MUAD74dP7t$+FKv~M4o*ZY_~g;}yLJ?G~2`I#JuIEopdd^Y>mp--*O zjBj$Gv_1>l5cDYHFz*eY|9CT&UQPPa^yikD*K|1Qxar3){P0sMET__^x$teNZA<)L zouw*M%Zrfo2orxj+-td#mYWhw6MSn{q9aB)2^OA*_i za*;80ygU}IfCtHS&UxWr$hG|R48*IMH+R10vM2UsZ?3e>8o%B7*E;X_FkyyJfAwGT zCTX0cA7dCYj$d3DjvgO7MiF!Y>4Gc{t^T;8Nd8=5=un+0QDc z=A57P6s#0J4{exZM99$47WV8d@IAZ+K42wPiwR-nE%c!EqdXwEAW;Qa1691 zpXmPs->&k>i#y}6e)`ajZ#Nu_t^Ag5fp{>Qe^BTD>@X4z0n6{M-MdGntOY3# z1Olng=f0n+6JANelK@wY2V9xm+?5b?|(BTcZ3Lwce4-A$$AV9c!Bg~EC*D7C9L{+L)%Xk?8`Dp0V*GmE(=#nCx7e55f!_|6 zP)t5m!l+_$MCKZ~#&>gB%Q)jj`=uwdL9Bp$@Be1+-I`U^wl2ZHe^QZ8nPF9cih^Ih zl-#c>AR@d}ctP%7ET9 zyu=4*F^gOl%v+6QX0N~pkrlLpEZ2)=@FWXjY1M+g1Lz=F0ZXB*_$$%ML%sEm*d@I~ z7msDa6J8zJmpy_NGZ245o)Bq5F!V%+a9e(3*FR8u4|GG5gX=rULZwFl41vZ=! z^pY)f=gC&Ae=eaX8wYT+_-=d^$|djmj<8XzwSY+a@EtG==r3P+X2x-O_l7*dLC5Q} zZzp`RKR8SH{b8?Tjg5^1=6x6!IL;7H77|>XDjTfhrTL>e+1hLe78-jBb(u8;6)W}id&I+ zLfc|40oMS2huI#u*)RWkhc5DkeS=>N>YiV*M$D}^l9Hjz)V1}f54`J^9yS? zwnX5sMgJk$ek4Z^HW~Kb8Y~Xzg>Kpj;=WiDb&hZrGd6NX(f=-+IdUDL+ZEf;V6_pe z`tmDdf1eVxI@kpV-|W}egWvh1OECD!2c24?jY|618~oO>Gy*0AWg*(v8hG)c@e0w> zQ^!z2A0J^y5rv1;VzVycf7!a*Ll zQ2hgcXv|oG10r!ySB$9KT~BxQ0(m~DTTP)J;G80Q-QY&iTIu@7asZD#v^T~U;p-wg zsK4n$e)hH(VKciJJ)0k&q4#_E}tC)JZ+R(9oujrJdMt ze-|qt+#c{*33a~_t*c=lSG~=x0HN|6mjlN8Yycl8_uSy``mOzj{run)`q6m)=4k!w z559dyLm&G!M|u;qk%{#Y3w?gk_hV&SBYy||AIGpM2&QVT?)D49g@xFW?u*>-- zXW}dW<0dysL4FtnPc(I|WE3G6a-l2KEakJK_s z{=hj@lDf7tqE`U19^`1+f_4F3ZvwXyDG!3^bqbub4}WP29%5;gL)&6A2G@y>{{vqs z4}|1$?~t+0je)lZ_gfHYUXo#5z=RlgB5;l1%k`)-MqVKNy#Onc{14c3KFL#sng1rd zi{K&qf2aT8{AaIwlCehaf5`hHUO|qb^=bkJXKO3h1Lr=*5YqFZ-2|-f7;!teSz^L* znXK0>G-KNKuu7NF=X{$gy`@w8{Idsou>#d-AS=Uq17Fng!`&)u-Tw!D?=ru@pM^L$ zbfazm4S)C$*C4>cfp(s>zjz|;pDhV1pK<(ZBn9aD!b`h-q8pUw1ph^g) z$uDNwCR=FxqK1S`wRW~w@SV*L$V0b9^`?r+DHOHG6vhW)e^Kx$xB6ij0XJ8Yyms{b zwkEvXaDKw}^J3@&o-UC2k);c(!Ow}$G~m#!5s+L4L}QU#jrc1tPT=zdhrz(NvP9|x z9UI6WuZccA$&Dp>KR>ZR;FAWw%RYio&p(1^}8_H#OEAM8M3mpUYW z>?`gy%-0zA-j83yhCk}p_n&fg;sX2QFMq~FBr&WAe1t>Ujzi21+5soIg&jluCij?w z-<|NC6J2Qdjiv$oq!{}cpzx)kcE0j0iO&dJW8gFVf6BkM3TRNHZU_#N#KeD_d#}Lc zwTL;jGPnpv`AH)IAn?A%#}Tt(q2mRdBREvd4TgWKW8_ndC+3(a)-W9d55^Sh6-+Sr zMPN%BT1qt4i7o&UcW^Y=Hs5nC`ZLH`&x9wsYDs&dUBB{;uoe9Bb;*e5eSF@jH`QJA zoQCF!hlYWcn82` zL}CJj+ql*80mIvZuZ5aY9}PTkM8QujT-ZDHe?pv=;HOg(Jg`LQP>a+B8qqBX35gC# z7{N9n_K8P0yvbe}nrOJ@c^S=sBmNuLeDDPd0z0juM+C_ffAD$OsE&ZSNJVEQwUnurM#v=C6Qb89a zFC3uB9rh3Z@;*QRkI10_UlxFfo=QD1=ODKPvH-s8)yO4}6&*}3fA?MpU4Y{m+D`+& z-If{?TsOG=bo4ij%OAV(lYchkQ+)45e})*{VDozd?33TJV9 z;dh8x0DlX?Ct+c}#x&gDA%qJ~4g#?928sNb#}FR|e6LF?^Nsm-Y3;V}`QQJ%; zxKW9A&L`%vv~RG{Km3%a=Ky{ri$m>2rwmD)Rh z5${U6bdxMypl3#b({6}QlJsNB0SCyZu7>>SA3qgR&#c?^z6CZyX z;Z=mc_B%!o%nyFMa!&i9Q7xpTFyrkN+)y5ry<lgWPHNcMKlU)@%HK3dKK3u-9KYMiVMO=~w?n%?r;UdV zd?dOGh*d9jpV)I(*mEFcB*LH?6}JSdhL~ytv?SA$yg73Gf7z$UX%vS z6XemtH}{P*Ls`V%ZA*mhz%hFQF#NJ;V(12En?>I;n`LpbVVgrgLqC4BVWvK?{KU@y zeCT#TGlS?_dcpriC93 zu`&so3(%i{e{D*g5buQY4EP@4=OXbU{Y?5BG9|!H5&ZIlgL{wUJ&>4 zZ!*Md!5vJYj(KS1p5Px}Ft&CfS}*;8jTj`38Tg&B--6!>cvI{*p_hyJD5<|+THW7c z|7*<{xV*q)|H*X*FA4acKRzIo2T&|<>V0_RzyXJne-3;0!#-q;RS2)CsLmow)S;!9 zAn%^!2S``m&6}+P?%q5%bHpGB2fnmnHR{QIj$?_XUWBh{mgP&eKq;yOT>A&zJ0qvD zNXEIkj?Xk`BEdteMad4@e9CPj+UAJEeNdO$p~gHA*e=pwq>s>z_2;h2@36Ik^_ZwNSuICC+GqjdwT?(eP3b=w4XjR(MW?4$)eQXsx z?0Zl(E%R#eXrJK0I~|nvT*6!PpT65=cppucm`4HU1m5q7W7Hp{?*G`1;GAuMO&;*5 z|Kfdq^c4tR4DW`R+K|6abVh+OAUcxp858aHf7l~jxdX;b{%7wPbtXup_q#2;zzTE$ zU+fyppCuf@<2|&N@Rvr-#aenCP0ATIap;O*sa*n<5|k04D6R`|5$S@j`Zh3MQ)j!j zJqPS837)5?$}&A{cQ5EqjRUBD8SfobMetp;01YzSSD?+<-Eu`lLOa7xT>%4WZLYgUlM%SAMQsO+xU+S3S7#dhd-<#z-5Dz!-jDL*Z|_~ z!@Z$*NY+Q_R$%^KQ~oe-BWLP|pL4ssqS44W{9t*$@h+e8df>_g&-$Rl^;sK)^D@U; z-vErAEy`hC&_8)nzu%AWRq(@_e>&h}2K)-vm*r#dap9g4v^`twE2KLCmwWUXF$y=! zi$wIOhz9VmFTnnA;G!eEf0ZA9H~4@E_IWw%A-^yL=o8Rr{KgZ4s~0+b;BaVdP*Z_> za?2s78*yK`I3eto=w7Jku7nVrd^AKExn;m%+0Z#?2#?Ci>R=%NSO|FqtVtH-7W=q`$C` zgew)}40FW~$2HMy9zFxZ2h6u-V_%RXB~+kpF~4gyaIyn)92+^n82oF6oEw7QBmRm( zCl*+Zl>%J@O1{SE_V5W)8NB;SOgP+t6g>6IPy~f6V%AIebao3`f`1_zaf9b(v|A+E7+TZ3+1K<50 z>TWX{6M_8S>zrb}zv5B5zkvUPG$ZhTLqu!X%YOMF;D7ko>vL!dC=f6u`y9+fpZVT| z-vis~1h|%0_gPDb{usF*^o4Oh?1%9DkG=n!&egzO3rH53JbM9bG0gK5ly4~K8yNXk?2&LF zqI|d~ctHri=h7$lgFTGxCe06#~m;wkUxZ7WdKKmdjaEu z*cIk|_)oA6W=XvbyizlgcSp#607*c$zsbJmH$F9RCI8}$`+xX6B-A1F#Fw;T(5Zag z^6Xgw5gqcM1-n{p1N z;+xkx{s;cgf7Bxy))4T_SVA@hKgiwL0JjWWjkI>oWtPE3ur)Sea+;C3F@zsa9P5!lO z6We?vKjcN*H}{_c}|7uoci(2=}rr zjZX30*55R=%dT|X?p9}k#2IpEx^(ony+pMwAKK=X=zngZ4!<~&S^+xwK71D+Y=KR9 z^?tCSW0ao*htz~{Y<%S<;QN6`0ehXHPvbuO*Ow2*-rKP#%kcv|Mc;lIdq^<_T}Em2cK_uMbzgafYPAQL@CbU_>3%OoTX{7&i8 zXKa*-AAg4A9s>)~&}0QBc>_a!;a8m#EeO&Vwi3~SaFO0*TO7DK&O=_64^6ZL>!DdP ztIHgDL38ASB6rG03F zJ7=D-6@W{S?)MYX=r$?827Qx;|0*y8$RDwfquXnK8)8_9VJvqZ;@rsf4qE}q69UMB zsee(chS1?5{t1##QGD@8{MvtfVeJt8OJM*?GOqvF1S(|gkeiN>S`J<$vi2K@ncS8# zxQM`&TZPC6Lyi}Ck_j&qmB)N;A^wjT+b4dY1N8$YP1fPeg-V;vi z^BEXy4s&{eRVX7tLcgjFM+YE+&FMM$xeDc4tA70r{JAdBs^Zh^IbiQ)$zkhi! ze{r#c0T`*hG$<1Vtb@oqfs`~fxS?qr>Yio*rI$eS7P+xw-vwJf@Q3g_HoaHGrHQ@W zTRG1S7u<1VvZPzgm(Z0UdU&ztY$aejd1epayRl;z92w0X7SZ`&(o>^lJzz!dW4v;03XS(|^7>IH^4w zG)lVqxpo~zsU96PJ(O+P;-2;P3W+RRVJhg*V=;y_d4$j}MBXa6%E0@w+^?|@ zpO2&1KJeS&v&%-ob%Ra8eq9jO=LI&Fr@03!3l`0ry2P+w9>H=)aL6#Xzp*dB zG?ND3#^Ag9l}Go3Z~T5|;?Mg1`)}Xa&=;Uv@$r!iW9Nrke2nh_^?!9BC>T7k(GoTS z^ZmrgC!Ex1k zCXcWnI2;f^O+;YIw; zy(eQzBp7L|BOh!Z!Lt)AN#0^Fp95*U!aRXEQv<9WY&KGkW*day$w3IYxhiNY{>2)6 z@__Dq5GZkZ$Xei}_ceIYHE<9+7vM-Wqh4q3xPcP~p|}f|EPo+q1kmaSAI%ST^cSx? z`8)V0KGYi`YKIyU9mQH@bwbCW;+KzEO`AJ=F|}X#E$871-VN-v9S<` zy|9Im<_hI{E1r97kKT#D&at;`hdhZX)l2-=#`pn2FIm*|E#}LCHwv+w0sr>N{U%x< z-?_M?uMA}w^nYbOm_?e z4Czl;e1iEAwwOQU36MBCV#>fueQ}2p{*{kL z&T7;hd}!5RYbichf@!E7UVhElC#^~`xU^H`*gy{noWGy@8dG3@=~i^eNn|735*OF^p*F zM5CQIsttKTyUxdOGsB03efjXda2~@$1s$^w?q|R-47U6SGf4V1c-DS$UrDTb=+j^8 z=3ngx0BAvY4u1JsBt{^ahO=xLy8G<6NkirD_#auRU7Ea)|X#XzPlLYMH-+b4< z&G$*h2l$DX&{zl058+#bpY_}4{k^aI$eGy`Nn$l$Z_?-w_;e(sOF&= za>o`5`9T=h{~mu0af^NPoqY52@G0}n&jX+JAGlb0EyKHfVj=LZ{@|W|@ezIRv3~X4 z_rT#|qYZw_E4;?YMZ|b$Hlr5USAU53{|$bZ<|1Bm;P$~C3s@BaPLcd-5o?%`LDMOQ z<|XF(iuTr+*MSeaNj|Z&F?bzdufOo?gr5fc5b)C=F1q`)E9t{udD9XwXqF6|?l<1- z-+Ir#&t-)62WR`A>f`B zh=1T$oHfxt{ELHsm-r2kM}PRqh5616{%Dqev|I;2=n!jn2F=7l_m=nwY2mXkBb>qm z$042%!__62BuO_qFTi{VfAjo7-N3g)_<(rtj(iU2iZwf61>uig7W=M%0|_=BZ|{s2 z{w){$b~q2C5WKLk<_Zr^!9XaW@k8dv0TWE-MsR@vmxw-MyT~a(u76^{W4`jC;Q_mi zf!#^;EQl^O_9`zn1)nt0IgD@~eFXUwL|e@Up5z+`1RN!BK){iZn*77q81PI(ef`>t zli1~FKmWrO1xGdUU6Hw$U_Y_n7%-FV*0cTw{`0b^f$MQdy!ZI9!F_N4;3xkH9u!6l z@SxcZxEZqE5B>;{OMiU7&u!qh8uGS3dKiee{hAN{H8vE4_}@7JMvBGU@20j<~2yKO3LdF-3jrnxXVGjqsvKTm{72)###6i(7mXH=E)Of?cL0KjS-#_`w zAUXBtpF8v-f|rs`kZDH@6MElXY<5eytX+FGUoMDnzTNU7hq=jFAjJY}9@0BcGbsjzBTi9>FQV`&V&1jm0upH)v{c zLT_FUSh)x^V=VQ|w&8)zm2K|;5hDO%ye`>-IUwHM`2ASUTfgLwS9!MF zQ%=GwI--+&p(#;(*f}>}d2vVIdCpWJIPH^uqHgH(F@Kr1YTia=!j7Qta5?d#Or0YG zLJ%!&QztSI5&%{tuU@?)6DfVA)=XoA+)v#=x9MWz?o+*#%1SsY`twvSU>EB2vO2nv zp>tNXNaL%aaCO<|O9RVB<5qK{@NTd4VHu0O3Ei$R74IMas&*6ieQ&Oqhvkkb^UOWd zp0&-F^?x$b(ai$G)mxO&ccp&vwhq=Lc-PtHy^_I1j2OTw$j6Kru&X+sOvF>MqPc0b zjgl4TF;bS)YwI+Vx5aaP(dJQh>({-+y4%-k4G`u=?3IlkbN%~$+t{bY@jW?jj=^HS zovk+ab~N9zqs=>~PU0xrjAVGV`LPYErx7ii_J8pqIJr^SPvm@fE6-+T;@sPMjsrg1 z9As$cg<|E0)M*QKou97`)2VklUc2%Ybb-0+?j1eP%g#M5bXKY_ZoV)Ms4BPJVPEpp zT<&4cy%r)Fq2=UVzcoT@meb>{FR#iRkf=+z-sbas+sm6HO+gKrkb#P|XOA7TkSh<$ zGk@$@yL;D_%NB^1aA!~6!Kt*m?u=qLNHq>oM>1YkGkdX=FXHTjMkv#TE%(*zAxb$lZAp~M^TVt_+8#`|qeswd9_!VvfG$@8(*p_h;}!tv1l z`=S4EXb*U7zx#_?LuUu?kNyY!QgVO7^93a{;2MZO7w?PqnUM0VTHxA5?0;oQ-YLps ze(GNYlH>ROO(0O9q#&jm`qig>AhJ2wYk2;z{*Mew^hb^Gwh;an+}}kdBi`OFv1Vah z*#rL)dHxifVrUxRhe6Ybqd?2om`w#WWOQaA({Yy5^^YrH!=ZEXRj(G)Z ze(Qku=k$-~@qRdFemf@p?|<^SKXB#zFb{y4!$MP`KfsDYd2giu49^z;<+ahfQGYwa z%OR8Yg_CUP|3BlE8T!X?Jd}SJ|1KGSpXU$tyMmnu9Tl8M7sLHlE$)Z$0*u)7bF3t! z{6l-7KdgrS@^w747dZy+FX>;u#`kBu5AQS7-%t6VtAX<*HTyFk?0-Jjf1dY!EJ4S+ zT`tJ;H=q9akN5vR9@+=>^$+D)WuNzf)98Etf=_I~Ck)3!`@S9lSl*7ovYa zf)vMc{3-8HKOw1aBtzghvJquOeZtn6!G%WdfB3xLU(^Q=@nf=|#&`xz@i4!FdiP0= zQJ<%!K?4Ve{*l8E*MEj3>O-~h{^WSQ#4&(#Hr|gfNqv*?(SGU=;~d}>f(8*2!Ye7) zs!cJV9E4sx@n8J;A93yo%R@_)a|TVYG&Tr#0`6mgSDp+aAk0OQPw|Iek|kx$*~|5t zQGX!&taHQqG?;!U#E{)g zMl(*59V42DOh)j_qm%Djel#EgTcKV8?AqAEge2Q^F~H0#ZaIsQtiP3bEYDL&>>=)J zf=`SVi{(04&Kj|bfn1+k;x%4h;Y*5<1pz@OpsHO&`-K2#E^)8f9LWRNLf0q(5$IS8 z^W>T;H|u6{#n#QESlV?FI9ty`&7fs+q=0MbrB;{YV#WzklhaRUZ4x zRUdKHD7|n-#@!m!FGUSa1uE7#K-h z+|-wkSeP!s(2L3{+jX>rg;2NaVmXgv%qq}p0Bb!!MnqKtdfC0UIwd0ESaYmI5u_7% z<&s=bO6`y*FQP_tY7-g?RETY*Fo%(2(g0815I*!# zT(Rk8tW)i6)8CLCD^mEEQ6rOTFC&U#-((9rp;(h@E~A{9ZK>=v(x_<$`57*zvgzVZ zO`g=jT-29$LqULhM!gGaz2%tKJE6cRm97PaQGeMu8u2qmy~OYo*HdnL$<8B4EWR$u zVkAu`?~C-{lxe$-xCPrypOc%f!8e+9Ho8&eR7)ywNrT@7(|Y@ArQ&tW2Pymhvueb~%_eVdO1m zjte0~Z+s0JfLR(L2?HgF(_$AO54s4B@GoJQ!CRLF;7rku2tMM!-LE7tydu1@itTjL zF5Mms*?egN;b;OS0lno%1LH?i;A!gY9e-p?#Rd^}qULMgB|(M+eB%Pj2z&~*1`IsS zmPS6)t0V;3nWv_}Il+B0;e|$!3zcM_A_WV1NIT40CtMF$9PW`ImPL~?z$*>%o^V(8 z$QvDghq6de)DsOBQNVGWX|9ztJZR1P?`!j;2IqT2JU&ai^zYC(u*aeUHrNJolYc&W zX1@!);t`PFD$F&|;4Sy$I3d~O3Yeq;whT}wjF0+kK)GwvyUg)rsNEzseh)`9MBCxFxbLzP75dq##qj&coqu4@zT|OThg~T2_7pwzeJ4TN@y56aQP*I)N(xeL z)VELS(noRzk3hJCBotoQB_a9>Pp@n|N7bO52_UkdfO(JHBS5r@PuowOF0DJ-x#K4~ zRs=Ysa4nzj0)+*R+FQxN@E3Jn^7y=z4PYIcWg==?9j8z-H&_YJ_3_Nm@qdwtsZ3-S zX%(68$|34|>5XCBx!^MaQp1_scDu4+Lv@ur20s}bR*EzKwHF4Ej$CRl@e`O z>Lma(s0%1JFHbl03ms0g;XTECl|3<01SuI8P!xn`$@dS)cv^w*=kuOI+5=M$Xc@f| z46w}%9F(8_WukxmX)Hig1Ak)+O43W@7oeWe$0aag#ZFLXfy?KpBRDWgn=Oy%52Su$ zg~hw=a?-B0uk1q9X`xMI>AMUf+U1|!vmyIzlx4WjPaVNh?7b}T9ErR;%DW!MKiaWD zejYa1GARf6a}wXJ6P(|eCajg*2l=2Z;n>Fe;rXc4;W-B$Fy zEFw|RzB9dDVxrwY=bQ%R1JY>nIWG%6)L(URw@ZzCp|LqZ z_n^FqISl7`dFT&T;eQcg85?~Ca}3TKX(|}ngeV0de+F3H!2NYh`Y&lyG7sV&g6@C5 zNB`g0bVS}?#eIZ7_Z{-A85wujgu@~rZHj)3F+B9c0Ax7WXJ@2s(T>XvvQWSY@aez2 z2U#kd&vzc$)75A#cF=BQOo^mjOQ5+GVM-v^1j~q#0~n$))PJi?+7)%tI3TuU1=Hx7 zl(dPQlKS%QXwN;`mxu2h&%rpcwVi=>P+XkvhVuuUmpM9^Xlh3 zgY)s{eBXD1Gn~I;?*G;PSXTn+;raN^#PJ^OsF86?2%8BDzw$LMfhcyzP5|YhCyu*e zjzPIi7xN3yL4QF>>U7=eC6E4sd0apy$sU|ym>=*=2tQyP_fNPIDnTEUK*s|r=`$gvvY5>=C1TQxmSYsL&N~@Cun#-u(#m-G`t^d zL-Kw(&WoS(9?pZz5a)H&F9>?b`3mQi&v{b6r{TPT`hNv*ot(eny#6`QG@1xxCYZJw zfx|i}v=A_jhqVbF{?FQrIy9G0%$EW29XE95`zXTmC3qwdLCu1U!G zH-NZu0t&wGnQU;nyZ#@g`L??L9gfA*fA^GguEe*QTkldVmOUOL`Q zLqA%jZ44o3^`E~##;`={3)lDUQ+~H7UyQP1Jw#vqXX^yYBa*g4M2kV&%!WCX4+=0~ z#tz0e8M~O1bgcDQM^9wF4u~y9)=`}6;aB7l|9>LwIw$Lid`IslYp6ihMDUc5-y9_i z>C3>iS@$rgpBO9g2Ac-_hW+zz5Glw{nbWU`ZK2@%PJ}{QvwVmK-UXRO#Ew~dyY!9V zm8|hk{0(HKfDFnoPhQE|On$Fw6De~_*a112d;Y4Ap+4N^+m4a2F6Nj=WqTs$q<(qS zDSsj(IOc~n4P)^y>K?fu#MS^utW!#if-!(RdxeYvQkG9SWYT{XVlVzx8<6<~PMVXi0j8;1hkt;b(dFHVzT4E#fR$Xv7`C~E%D%CM8j-mqDE zr6$b2+SB4lzuCYB{<#J=9htikeMJ2uU4IQ_!}vmM+Ns2i_v$u+(*gF8WkoH=5#UwzB}&&RomHpbjzU~U-Z z*-ihC<+d>of3-anZ-H1-P{)5!hrjwDknv=W&v9M4QyTs(0=RSZiXza&E;R3OUVk_? z=0BeQ^L5(MlmB!b*+cxN_ap0sL)K#tWk#gMOCC1t#dss<&t{9Yp3JY3)ih##|I@WR z#5VqPt&MA&dkVDb<}+SM-Tce-hoJikjFXzun`1Yz$-G`G3#H3K<_gOxcpy;STbcY>bDU82Ez?i7_l4hhv%8 zkAFRW@Ghv+a9Cf^UomFkjh8T9U_%)w^IDL@U+)Jy-CoX(e%@gc^GJQU#D+tiWB)+< zBK8vKd++pfo|L_xgXxm|HYej{r%4z?#J(YRHhAzQoL{9i&e#9qJh5X?4u5`)d7T`$ zXzLtvEx#J(&D_IS$2?ubj^2JfALZMoJ?wtmPohbGgk4N1`UCPAHZeYX!)FJdVIvNo z-Qn}+`|aoY6Z!0Yt{*<{KGzSQ{m=EoXV_&Vw~4+0lz!B z4)z(z9s0ViC)c52hU@TN#DCA}4cEa=!*xq}xNe`~I)te|*KrN%Xz36ePJvB<`ucoE zMPYgUe1`1Q zCFQ`ghwIR8B!vCB?*4gydARQQd4Et^k?ZKs`+LK6&(HgRT}SLPv46e)HgB}Ve%nBi z+9tnM@lN)VGguyd62t>&Q;e_SH;tr`>)*s4{&hXx&z63!KYzwFw4#2lpA7eRn_qK> zI`r)YezPRMiH3en<9dl)Piz5PkNcm?cL9k%FDCs;Dby??Gnf3>BbYg1C@?a#H?FOwCC>^or_Bu}JbYNCU^ zKGxmD-xUJJz7`_N0#Fk3o9*~m5?_^-z~@0^u4b?$e%3s&Z-0WC`)&cV2Th?nydJbM z2Iuw$_cp$dkq@yWyhr3%zzhTY8*#Mb77c7v%*&X~2@IIrL+EeF_eOt|+zxrQ8s=DJ zbtJ))@Zw`X+PETP{~|M{oG<3sY_!RJos6%Z-- zw!{c@aW7OCPSu z@8cNc0}H?#iNpkgHj$0^s*)oF1u-j7FQX(-;;={_IN~)LX$_D-spjN-Lh`wA94uuHG|A+Aj_a|jRFFU3@?!++!7q&GgF+Wl z8&C3l=rf^Q8aa=0<9W~*BF{y+=3meKPwoqTIT^nr^`Lr4O2_xO&%b;9E++K}M!}Z9 zlF<6_en1hDkQB+oLp>tCjr##fPJVY@qP!rVLVtY^n}YlvQhs<4@jQrJc-XoFKY{Hvj!~ZsX*Uv; zM0}dOM~?fF>wopr&p6{qKR|yteflx(gYU0oJmN2=^ZYaZaKE8kpYovJGXO_?$~p8W zlz%7>+S7#~+fYuRe?IN{r{{t+aY5B+Ai?o%$p()sl};0|yq{`3=EhkjfyO_GO*=ixa+ zKRONL$0F_dDd!U9Lp`G0&^E{TY|&m=vQf@qoB{EHanIpCBf4UW-oKY!za z@H77Oj~DPLWFA4+{K83LeTZT8|C42hM^yZS&qA*zivjSASc)JqM%E1yjcrL+Cr}II zOkwf|+68zqH6z z>Ghh7JD659>E|0#I3T@U(yvg+-BKoacv4s_MKKQ?30#ggACY+xg-;0l8);C07c-ay zz&NH`a1n|TsR4kXkcO6%nqZb->l?QuE=_nR5vK=<6O3Na#{^4w^sFMlVtcp==l zC?qh=H89!m_1H9Y!@w7YMGG30F8D8zBO>j9(;)a`kkP>4Yf1bc(6FU2M$`oq>dy-1Sz9I(H8Ch6Y8`36-pgZ7w0>3%$ z+OmOMM+|=91bh;5*ko{V5_}SP5qD5sYe^$}q_TI*T>vf|Qt3JGgTIGV{@#|OCA1LH z2+7jd_@E_?;lW!hSENx6wtOR=6g18Q_5yIfq>+#k8-p(tjdix=$$xTxB8>(#D#@!v zUvZb&*d0mTlUI6P4cgMuE2dW+^YxcK>vBBhU^*Z za~?v(tL4x6@vrm2&-q`!=Xt&yUys#}4!^&X?CIBUfNVzS(+~T=f6(W>_Vs(Iv)acr z^os(SoHP_5-+)25Ay``vQ)djp-sGt4+QYI)peFP>Pr3qRw0|lXs`}&fJU%V2pYNyo zHJkuG0kwFbcBi4Tm>%Qr-{8Lz>p#Cyoac9$*4yv*kY-8owP2xc-|rEW3;OWgP5{UXdpv(1k~&R}%SRC@#rv%6p5TGOaZ()j zJRR*<1(G2v)OmjJac+4oP?ksN&uBqijRVx@L6PaNcR{^;_X(|AeZNa4O;8SU@BXk~ z-H*Qfs~i~sfIq(ln}n*rPRgQ38D7UyZg^jQhG@^9Yk#tz`}ZqAtB;fO6^QvJ@MpQ; zcdu|#WC8x3;tqYxA@5cMQ|Ee|Ff#ca{;H=*@i_Qha{6wo3>MYb{gEr62b0A!oa$rp zjz8ZcspjjsV+(b4+V8Gsln>WrKm9lh`Je0a@4l-|?Vs~!a1`=rAD)zJjEqRs?W{%n zwBcTVpMUqm_xP^sLqBZ7eeyp0`a9g)Yp7wCsCVWx-3`~`yYKr0F#mo3y!(Fsdi(SI znf2Z0vYZym)fL$=xHsRN_dj(xTg5Z5g6bJ*+q2)%2b_<7-S?-Be)V-7^Y+*ITzY?> zx3~7{gm%XNXQ&hz?)9sl^IvUkzik!ckN$_gkbhf~U+3fR=O9$^eSW8WpLeqFF=auh z|mh|hq{r!CKa{rwF=|?Ltjeeb{weR{H+dt=b!uQ;@`ZZ?9$Qu1V4+^*4 z8|)VOcRxT5;`e=!PXjOIq#FlcWmsk5_dO=a75utKFmb-m)82O-fr0Y7uFpSpu%dtJ zV1Mdl-@iXA-}BCj4!-A7od2%Fv-0x}^iLn1I{DW-p#8p|53tGC{lRDm^d>p)V0|Ip zvVrdjD%!}S-Tb%z0>hZr_4yWk<{Q6X;MgJeh zVH@rL59{zq{`>HC^XQA{KhnpcNKc<{+J7jGlgEFsH2vS3`{_TP_u{{v50Artkdstf zUoMmXempeMocUkRH1Y|E&-G&xV$)9~wXhe0oG~E80AUSy#{b-yLc>=_o7{6mP-ZAieL-NdS)p$x<@m4#sr6n`7xPXrjX*UHsUX;8d27hZs zU`4u+02S#W57%9!O-F5SGbDP(@}*&|IG-re!CHm6t^^PSKJn*E$=_Kjb;WSJv3qYn z>#`-aF55}jMaH}uTiaxF_m}-6HTT`^b>0h$*FnyT5gkpAi~AxwzSc&Rb_$elZx+ix zX3_#uW3$csRZZFFl366+6?nhkiht9P&j@x^`Kh`w?Sp_jYwpihCtbo@Vh{ zbeT-uFRY*4^0PmgPSxpgv3*Woiz#LLUh$^X>w7fzZr;LMy+JRLC5oEm@@`(z9{+Znm_MT5UB{=Q-9d4W?gg! ztJlkD;<4z>4_n{8-C6p6R@Ejx(#URqC)%ao9b&Oa+R0?RDa$&r6!$jK`|6<__0yeY zin`e9)zz#j3jW*HrmWPSj5V0?$nDbDrsciq_<5|Si`4@;K~+lI8?WBoj>`0So!YyX zIS(2BRyd>X;&WHIK&+vr-hb`Jhndg6+}u&=c&5KD-u_V+;XXSbEsi}Lx3e=wLM_do z$a#4blsh}K_GK*VkFg5D=72t4l}B~W!cpJ3JvVWj)M%g6Vzo@s2t@k7;wo<5t8SHR z@?}3gHn#~sf+3@?X)d9#f|c3GS*W+^bhc8Gqd;jXbx6c&Jb{e;B!7;q1>owQvAxGd z!9vFJIpNd$O$ML*NaCu7f(8ryL`vPQVKbxK-V{xp#62=5wfL-Cs`OY;R;W>%*PBky z+~u~cZ$+2;xBc>D9**a?zc87rLI;d4n(K6Q;qUkBJdfrOeBXxi5?zrp)|ZTC=;q*O zsf)P!*?!eYa5EfTV1Mqab&G*Un6$N&E0bz;Zi4YqoR8;MpPTJT*xDA-j@@aI?sOMn zb`-<4>4!IT0cv(udz*3EIN23vue;aKJUYL&TDB}H=P8-n0>OpKk0W;diWz!2V%Cer zEb8=&668?LS?JG&nMUKoxL%K6lBUXw-qz?wds+524bpx6nt#zDZTXGtj^8(dHV?j} z^NcBk#uQpnPk$UenH^OvUSp1$Mb9v~G^f$JGBR~B-O6|8t!AKkH_y)n+vfRZD>17m zKkRgG@m^4SAYT<@rL?9k?-nav;YQ|Yw24OL?Do7{ckm=`*wf{>o;;7)rFcY}^k{Om ze7=U`XZD2lg#qEco5wrCI6bRMrSpowVz7s#pD9=atN)V1;*=Ex#<@Y`e`BU!HjMMp<~d zd4Aul>VEH*i-v8IU9%0DQLCiVNDnP#CYN>^JK5bYynp=E1)&|=b|Lz`D6>e3;^Q&X zopgV+JKtUJ)nwFFa#`WJc|_-FE34(B&$lT0X*b%L>#R1Xi6e{BeRYJw++(fH?HRol z_Ab!l?U;=mgg_BVFAmgcat-+XZhc#OkNp$Ef8(hs z7YYjiM1R5G#49*_SGsp>=Txz zy3NBK@ebA(-sta?S@ZovB_e=zCJ13&Ss`_qI_3p5@oG~slhR*zab$itAa+mq@ zt4kSD77CApHa^L&9{5+wr*Q<$J%Mr{j-t}On`J& z#ebQSb8dg|KtWgem2G2Qsn=9T2LM-t0YwV#@yVo0L1 z(H`zZQo~EabB9er@sC>2_T!B6rJFKhTYX+$l4bw8_bz+vd5JNf#r{4vHO4=BBh{YS zho&eD%NNW#*?`W>D95~e9lc8Ii22|py4j7ap^rz&<1=`wRGRFwWL04T|SSd^+aDX ztMq)_yHtmAqvp z>E5Z4pw8kZ@w0RzYmAV7{XSmyqCegHr%QPau_aMoq8|Iz3}nldS8WmXQ?7@KTYCP* z-W|5nZ4?^&$F(21vu^qJm16~9?0>7ho?dSJEG$;8`w(gQ&g9k025`s7f*d=i#9o4{SmAK8s-2&;Sbp_vL(XZ76H_G}@^POaR5d{$JGMcUSVx9hIf zczuveD&JqMah4onV@zj`I9f;V#lji!VzU3Cq$)%Oq?hA>2&B5 zX&0W-)}OZv^N}FI_`nzZ@%D2JqADWP2TZGcYofR^YS6WE=j6;Q@_VP z2-t1ppt1O{(&~kYi7T(17--3H=QoX6S0CI=< zwK^ZY$Jq%tQM*0po$%}rt#fscx91CmEgxZ^mgRP)yRKf!Td*O5F~HklU$bAyKv3#! zWyFPC#;f!E5SA+ELz}GMoo~;vd2Z)%FDlW4PSY_RoPTu*+DSgO7ucnF7(?H`9(A`1 z$fXYuuZig0^r=59x2bx-bY9kKpO?vMMwu%ko#lS{K5iDXiZ;zk%)>C;2UB=Vp!5x` z#?^RxScQV3L+QLn&U=J>_{+Mm9|a~5eKB^{x2ib1yjssXuogfGAOu7_J2zMkqkfK;OFkROeEc zS{z6591dEgoi7;!c~gty32Z&?j*2WT*c{zQ!hf{P64pOCZX8+Vx{+8t%L&D<_@%3ASP>vgBa>TAB4q|@`lq59Ho51Ya_>iP0m#hIJx z<_gS#%M2YkeXp(8#Q@LT71Gg)kUZn@Wq(ro0PCA_>yJ+Lenod{9UXKhEYBSkZ_2s? zzx8vk-;c_AENLd_GE}oCBySig^5Z(84t9ZuTLBv`llKZX8wVa_L%4CF@*dixsgoK4G8ktFE@UN|wCoTc?aknq3ab`gD}_MAh5A^6ZWu@h~ z-Xy_E5iDP~W%;!EIkUHdu5PMH+i;?&K0*|7BR?k;Vpj_g|A-(^J6tV*@2DEQt?+Wc zDud_VN9vNsJD||_>vfbnvohq?QLXPYnw>2D_B!IPi?%vZo9zwBl~L1N8-KdCpDA=# zuSPd1Z8s|F#Ytsa_6UX<!8nl^UP^wo~`D|0%ow>`We40~ZEQ!x)IEu8_$hokppn_-beYtud+fu+bh)pY_)YfxV z4|cb9G?^IVg>7P`^q=EyKZjHWV}Mdu4G-Ph4C_Z2P7nU%9ch7jm@TJ?H)0lcCupE; zrI60^5VcWj>B>359Dn{iruOvg&qWUsS7T5~nv0R@yyLKHoN988Syoa=De78jJn$`a0RgsU1blXnfsH=ib%xPO&xWUsu~XMfW*!87cH-=5=_l zUcqSH7xs8Gqkovi=&Vzt$OqlTp`0vfrkhC*a34_%YPz%6(|M`deesw*XV!R@WEg)SLD4AhW_0C@o_~9!!kg4M8Q1P=YV8Z@eRj3k z6b#ThmvovpVSr7Zby{8M{pMB8#Jai2>lFCbgl2M?HNA%w&w6%!w$j>8b8rhd-7M># zW++c`aNHDIKj99j=^q<)BUWlkx0u zqP!JWF5oJKy^i$yR`w1_#_{WSR|Sjy=`NuB6ONI}UxajpoqYE`zH7)>!E@!Gp+{AB z$@5_pZFbCNRyHaWA0gw5Rd*ux_!m3S9ntWPFMpqe$gAY?0%JzMMU@4!S@dq~Ze6Iw zOH3E8t*Gxbv#&ESk{!AVL(S9O89Q5fSEn0A8f?dP|Q-h06*2Yzt8gVIizv zn`XCAjJu=vTiSV?Y43DBCwHTgndE9g40Bbmf~}0e3q?7P4w8o#ppw)%}{jXOulX`dZ{tQ%jkObF1VZ7lS?@y7ub6mSa`E zWywOdU&7=4Hp|o4V9O||^4r*P!50bNV=`Z_AF|wC=bgCa!)lWU`_j5@gThv;%QQ3} z>0yRl%o{G^&3<)GuOs-*j$j>NRvLCBVSnYuN5oA0_F}E>;BQ4l(c0f>-c&oe1-*QC z4-s+9k>n`5k7lN9p4CQFMw0SOp8Naq0gZX(abK@iJ=IzGW@{a zkqwvQu`-Xw+%_pr^<<`%<5|kCPVdNX?zc{vQ7*ra)ughJV}BDv=oD>|OAXV|8h^j* z(Gl6^3f@0_T;a1$XCQagL*3box04b`i1FF7dkO9lWdor`^WKJf#U9K;+F^5&o|sMW z+B+aEV=Lr%?IvbW_tmT8?;uFFL5{E!pYK_h!LpGZ32X-c{4d zmPJ#u+8nRpiMh-4s|-rv!Bfl-n(oW5!SGuHxj6aRU zEb~v#aK5is?!ksQ&04>PO8?-F~V;8L+ zlNsW;b1h?AbM1r|{n#cslYflo$D8;%TPJtAn9ZzobwJx=PQJYjfr;`9r zK(N0su$a^9jaZ8>S;)quqVb8H74})5KPe-fv!V;fd)SJ{5&DYT$qY!W&N|pI-+iy@geKuy9+14lhYfca!>1vu*NP}2}knwWiys>F00iPjvjVOojcF+ zyXRGUz4W(mc%5hcB1|V!P;K}@yRO3P)@Cg}6XK2qRg?Q(`PR`}?8Sdz<-e@S3op=` z7Od{clvh`W!*y-Sj%X4_C4EDXoaG-5qkDigQ1HQl<=8O=Q+SB992)u$;Vt9*jINmLp zW@l>dfoAoibySYyyuE*ybu^mq(J|hKwQZ~)e*L7^LTD;%Q_zl9CQp4-C)sPUoSj$u z-~deIY^7mg=iDhV5TuTQa|;q=GJ+_`MD8beF+Au)g4HIVJfEU2csL({35 zw2Nl$mv=q7*<~~C4WsSXy!BLGTU!wq+UWH1`nQT;$?~=b?-+xL2P$phe3vx#Sx#9d zEM}Xr{X84~ZgoIj#v)60QP7P*x$>IX57D>OQ$1zDiI{*dp z;ArchNe&ww_mF>{wnss+xy@!p_U258mwE%DRm1nZ>?~is+^shrZ(gH`W#=? zV{Y8HSycy-)Y>&D4ylJ$EX(6dDwBM-yT*^r^U2A&e)5xUx9F`qG%59`JK4|G(%%5N z6&;;?!Uxf#aT(V$4xODjE5D3xVR&t$^R8uS`FK#myt;p{>d~Fmf|)0cn3-Yk4cUO9 zYrc(8d+w|KK}|2|Ie5-hSxPRNlO~y$FPfq=+f7EVE_#lK@H+j1+n~Iwo^=JA=E2B{ zsiS8+vtn1rZn4jr>RGR!hw$n?Uwa9s^-C3ntOKgSd}MKTDJ0=HcAx>-|9%rcn=wBJvR()8h8Ctni;F!@LDsPM2@9g+2xWmLeM$ z5M^)7{K>JUSsPDR+fzuVSMMeaj3U+SfCj*&Q99y3M10REeHVO1I@mw91U+T>5$r@)?gc!|jyvTxVpx!q>v zA~F4P;!l;+nH8Pr=>X#U+QFaKY`1s%dRkjqGNI!6#Wgmb_BO@|B_vM$^i#trSJJ)M ztz3Wly+7SK;f`r1>MAy*4X~*@kR)`way~8RVRTE!m(z_=i+4TUSC$cM)|&ZVOp|AL z*VODeqMt9=ZX0n`fh_T|Sr_j+bCDO^VQ2BiiE+pC{1HqTbIYyU?d9sPj>+oWoHc9w z9CKtXxvw$ZEv#O%T{us(gcreTXY1`MU+L4Sp!!-lIZ;#d=B?HK)r|B>AA9o+Jd{UPX}i|g zZ7cM{O)0)&Uh6t~dhixe1AiKOfm!(U@^rc}E3Rx=rl)87;OIp(H{-d^mHmJHgTn%p z?jrRNJ0nw8$NKeg3Qkw7E2Cu6-m3X*XXvBWYygV{q4hL}U-ljQw@t2(=8JP0=X0HH zN0r{rjMHw-(GwlLs6$@uwObsLuFBcRp^2aKrW3F}LW?07&+g21S*7-oa^KxY3hu$1|?qtNo=Jb)J8?)s|bT-pn6sLi<%#;d(unRBqp7-0ZX(FW-B?xftEW zVaf;Bugz^!ujhKuT!L;dJt3Fo^-Oh}Fx2Uu9`jk?E?Z}+GKp2EOv%{%ZGU&$Y2~X) z5^ZOO5h6adF2Vik7MmbjKb$VL=JP}--mW_yGi|ugw^^FcY4)vD zM$T%ksc=e} z^Zd#jRDU;Fu;&Y{Z06{`I?K=k<=&^K$@+BNPmSue5iaYyvk00zs9~%u=<)HD&`=JM zRj-3vj-B({)H9TH(wJdf9GN}CTjz7hZu`-F&dp10zL5L;q8xhWahmRx_4M2dzHw=j z5{T^QamMuU{$+oRegd3eel=&pVKHx#lRCy2yN`nw#(0!>aa>}5dFO8VRmw3=9rFnf z_cUR)Wnmu{-YlMEUK5B}_>30yQO;Jyu|A1fa*N-(aD?&LE6fgCr&c~m@f^y1u-_M5 zbW$Jl_eFJ-7KG>TS9!gedbFMi5um?QoxMEvERZeTUaNnSUB2K1F@aBdc|P%nF{JNy zyUTR{dU{z|QG1;U&SqauX69zCY>Zx;3*#IA`0cN%Pxe zwpJJJOI3er>Jr+=!{ctH#^~gwL8C2n*>xR0<;u;ukB=;++Qsp;ED>#+M$1LHFBSjk z1a7T{skIM?@)wd0l4CtHZcHqc0i^hnQ?|*3DU5IL#7WUjfq=iUVYBh6&ED_Hq+N@7 zLukMuSK!^Fo5aaT3V`=r?F6lA5>~`>izN2QMAH(?zN<;L0Qq;&yFN#8vQij<|tKKdx4=jsW(5Smh za~OY3gDbUs7r|P-2?@=uCX*^VOxTF8-BB}L+0W*gZ51ea=*LsT;;s6KIw*&<8)xSn za5j1Kn5z>In{R-q+k^srVbopf$5^O0Ss_l3#_M%&EGVJH_tE-jk7TeeChu}-xV2kI zE_^rR(=nJ{V1?`?CVXyK&R&mB68P=6N7jF=v;8{g^F5Pu?tJg=$G4+lpRcx7R58a#_nO(LmST9&9 z$A?uM^e7Fm3|V@!y!(4|X5F7m&q54uw>e>Fcg7rky49~fI;W|w>1fh0OGYTcv2@9pv8|}&R^?u zuX+`;H!5accN2Rq$GN0lPoYbVo_N$kwoaE`PoO=l33>?7J;TYb@0@ zqxSfMRK}(C#3NU+AQN(2cY5BXx69oIq#*rvV!!Uc67562 z%2_+$hER`n<{uQq);ObAD{7kGUI0p(uF#g+#AABg7nLntasf+(Fq%&=_p2tu3!bl|v%TYu6bfr^Vax zqrL$tJ=%;TXSVI$;GTcKQB2M#inB^C*=(8J^Wew@Q+8a>G3~L+*zDc$dQVT*vAFRa zGkb#!BeE~mp})vmAm`qOV^qLG=*s?a+8hvAyUlN4U$7$Bxg9`U{`8av$Ma~s77N`) z8vlJXazNn{u=c2*_q9mX_eGM0TUh?yZI**y7SSV0t-$qoagzHeg)!hgQ>PHC*|V6kR&|E=YL5MT zyLdjw?<2Rb1b3^yUpYQzqT?-gj^CB*UFYp~BX1nO#?Wp?3@kom++xk1s#)`Hn!WSp z$ECqP%8T@xnHqm}->|otAgfg6t#hAS-;TM71fWjty;Z@OYsG$P>Yk~@o0AmA)?Cs% zyB@vweYL_slg28bsxQb-uMQw^btWel^tJ0snq0abT9gIVDNzI|Y3FI*r+|b8Jyk|M z7&fN;^Mo9UuD{0OeYMqUZ3C&!i{`Ug{V>WvH3G{yGgNt>f6y=Gx`%LCIsihF)Bxyl#Ks>QwC2`ql;@xqaRI@}REYNp`S~ z*+}ZJWS$nZKRyI1kVugsP&99E5C^ZM3);?~hwApb!U*5Tv+2>9X5Q z^7?CN-LzlH#FKjA$C!8R86YExOKwtQx{N-7rSk;%-E36bwzh92(l$w9vdQ*FJhu7m=VFu(TirXgBATin7j&k^k zBN+#)8L2TMb;|;dtR+(ya>^XoWO?egOvR59n|>a0Z_OPG`C(t&XZd_V54BI%yR1J? z>P07<;-D?r8}CEk`?1+*;d>#d+lwvEW)q5Yvg5YAjtq&y63i|#rc^t@my~{Lc}IUG z_mP&nX5`&@0x~TR=FhUqyp!cmUTZ#}0`|l>kA~y+`lP6qMShf(kW-lATEj2XC>ubY zs4Smw_3W73fp10Y9#haAyZji~RfJCv=Y9{jR^`N)BePH~p=w*LWY&m{8Lip_>PFNT}+c>eaV@XdHcB&7Fen>)9wZ0_g#piHqL+!t28t8@Q2hXndr5e|TsEuxE$_@2#n*W#f- zwB~kMv!mCe&LDkzg8lQ-3;yiRCdU_>gLRXYH@>-lEY6;b=qt74E^krfnFN1XB|Pw= zIao{zYRk_82rFp_Ku=(9KJcoe=UjQNp!*6W*8FyF-iIBJ0r-3*v;BL140SoFTH2s4 z%5=fyHxX{`Oblr8-~!2HZI2L{R3Nivt|X=cDuONe5DUKa-WBDGoiolkemh?)Zu@tj(!_oXKQ*05P;8uFo z_3ifBjbAG(7W+*NcSZ;e&?GG2fW1W0LJcg2 zcBaZ`{BhfCkpSdFKeRZHoP+n;o@NsVDUT&T9#e73tG!}TJ8FM)0n8;&w%qQfP0_~} z^G?ud>#?70xNHO7STCe$iKBM+3uw|; z^z!W%oC~gDTya0m+1=Zq>31~>9x8yJg}9~PA{=d zyUGIEKH-J6);{;rVyfg1MA@uxQ6sXvmu|IiSkp)aF&}@cRpM*9ox0XV0kCdTa|?l9 zRaCKhl~!b86gKkSq;KiIkMX5#*KJXBlLKWI!JTDS8est!en zjFKB7iWGnR)*0la-^%F0g?{D0;bdofIGb*7#B(0o8z2b6`WQ^gt;rg&-On&fQs(kx zv!lN+8h{UZuwAgml)Hv{k9?H-8N49LK9)Q#d7Wm|A}#N!bYExe3bJ;WZZa)6kkv%o zHb1onXIw(Cc)+)^#dQ0)kK8>ZprZ*=mOPb75qy6U$5J(NIN`y~mrtM@9O9?$Bm zXlBOo5gJ;#-ujgu)p5US{btKySzQJZWCcb&G!-uW!8~P)@feZwi8?nx(f`K$QVR>Q zq;HzJmZN$y2U~L`cXORNF}u-uB1JnXI<$Xi4)DF`RZ)I6Mp{$9 zx)}3LJDB&P&;0!aRV~gg`jpz#JxjNK*Q^TfJenVZemyJh-2$f6{oL8F*J`=xZjOID z+SF%yZCwFbce;Es-(>ui%B>R z-ZQtTb8Or##MmBp`=tbz(QA|(q?9#BfG8ui)T`xd#{lef53~n#gkEegy%p?h1QiJi zOVOQTYn}^co3=LA-qw0Xhgv(WAfJCLtaS;Ru<=zp(AId<(DxIyE0A#aSkIE{{gzHH zw_L6ieKvi({%^j{ZP!&T-OlflyNF2GU~{<01sH=lW55`=voT;W=YzpKecG5i*@=`W zdrMWSTC3)obBtl={r_A0KCe0b-DnWqHQ!4c53!4d%w0VNqSCAv9bQ0g^rU~_bKxVq z$g*!s;3-gyKN<-|@@Ko`esa`Xw}v_iJ;$Zoy-7pv@%!gXFAiRy zqkK(paozG>%oaEE>7xJO>H!)TVvhpRa?mF6K)%Fp%ug7zt=&{Q<0KSh7o@jk%~Afq zYc(8gKqhjw^EowrVEHgVQ(}K0iS@`g#!=#KwDqT!IBX$qQTH>~(g; z1IOK;WSh6kHVp@+8uhc{Z}aPP;U3SnzQ<95$tPZPx^`UQeLPy%qjSH}J;qE9@ln03 zr`264eO8K*$?w-zuRjy^0%?aW_RPL6m`dD*n@+Ss&Q!^{T6sGER_}kfj{brpLeQLE z6USL@xl?=YZlt9xp1t4O6WH(fvy-buqJ-*Z5)V()$tjm>_JUC%<-$%Yu-(caq38J4 zZl3dT*zXBPp}*~837c(b{o7!ackXkEtjm9h`rkA9FbvY|ho=%F4)+_A^c~{P$bIicC{5gc8|bC0?x?=x ztxV|lj9Tx-Kr{Yx*QKg(KuQsCKvIRzi>+DN!{>YtnVCkN%`OFPpj0xZoRm}d07Z`utK9f8A#8x4Q1el}j z8#o;w2lcvp`JENu9n^fzoJ}jI@N2tCTW_Zn%ESnZORc6}}sdoZ@JU-|d& zi1*#Opz-82GtJ#eE6vsgE*#HSUqjLUahz;tP+Lylqm$|TlUn3^`ULFss2o`7a$U*y z_yIt5?ijx~RO5fUaB5Dtu6rk$Se$q*HYNMQnAPrAI}gJmf4bsqs9v!jK-rnoBb{+3 zY0o>*FO!te(-+6}EoWYcsn8`y*OdEZF{D17o02}QX>+R7ZEIGeSV^YYoo|QceCy{7 zI_r-Gr6UFC2~k+_YH_%knHUHV%}8Y}X^}fXE6m@StC)W?sZ2+a=oUrlzN0>wQO`8o zj8mDG4HNMN2d!AFi=dtMG{>6RCzUyhQOL*d*2nLjJ~x!64iC}4=#5GYC_HU2j|JzUFPUUcjJ5O_C6gJ4-Hb@@@)SuZ|DxfR+2h>uAYOKsGrc6?%Q-; zq6WOfSw4Tq#*F+Odf__qA7VNl_Y6l!#HncOT}codQ!6|5rW@<+61UpoZ!TjOn8F;D zQ6Xs*iWhXIqdm$?eQnLcJD`i@w* zCHV}GR4{XYrO#Tl(pR;eC{e`263V5LFE4=-Aae9F_fy6O77aO`nQvD=HV5`g zbBLA9VUqS~?Zn%w79|oE(&_e7*ZonLFLQtVNtZpzr_`$GzuCYy$!42|m|~^GH|U&R zhFsMQD_myTHzS#$0+7(PQcj@u*Ql}>YNySK5mE=GD@`+zqHPDJhq77 zbSSy+u4=~Cz_oNBSFPWVj`n8h$2qpojY)I&3tqhE{(Jz&tKBUZ|00+?U5{IN(&c|w zb-^2cTsE(xw9IJrDK=gZwEqU+Guvjz~Q_Ty_D;F zkNlx_y>|!}JQ;d^pjSDaX5;Gc`78SE_F8sa$-|SsPS5*bK2O^|>#N2Wth(e=GSIE( z!)eg9lUc{goWI)zE^;Qogsm22@1F&GjoQo4KcH)l9~4nOFj_Xnv_p1Nj-bBg432Ocm3E6$lbQ~ zrrihmKsBd^m${%bXJRi|KjievdF^$lXevx0nWJ|O#g28ZLw&PwtfPP2NzET_(N(fV z-kimuKNsOZ{L+4|gz(#$-K_4@BywB?T{;)rApT6G*EV|5OEX!G{=)ju(=V zyZx|v+Lq>`G3UE^pKI<%;m#;J-%{b|3paNm!G<7-`?KHa2hu+_NAp4jkoZyrgP*lx zDo@{2$Vw88z8o8NGH!o74XMT1XC`0J99s?k)cl^_L0}{{J0<&u$LI6+Sx%0JH*ryy zUB9~D(!27H$i~D8dsmY{W@A;JvJGF0TD*y@^5&zCYE6HSn6re$O7wZ)dUM|ic(Wlf zZ|_`WA*07_VS)@)%52zO-8LxIO*&S<+G3W45%;8HdGIvZA@+Zoz4XWK%Jd-^_7q25 z_QX9zWQ#?`nU(H1J?Q8}9G?*>YxS&Gmmi_er(K$IbM(H8dL)+;{21H-?T{I{H`S8d z;Sf0Yuh^Z6aP-p|tlCrIM~{ydJNrt;e^q6p!p&sqk3#JX9o}e@11)iV_^uQm9*n~+ zSwEVwT(1Ge@{fON1Y|wVoD45{`?BNZUBHO9A5i4J0?sUpNiD9%`zC z*U2_uv)M4iRmMM~=Do4l>Nh3rjst_WISpA=>qY*q-stJHSo3p#Y|7tHFl|>E=HJG9 z$rfXDSFpcoxyI&o^BC*(zNEIx9?y{Y+j?^)85&!jlQDdT>Jt*Pw|D3?E#43$w@2ou z=8tbT}jm+ZB}80cFOPl>?e#Bvcf=Ht50ch1hK4Yrv3#<)D} z#^Sv47N#U(LXLc5ub9SEdp0&&O1e3PA8XnTQ?hsF{-vqkxE{gR^^)mjn$YQXJ<{CW za;M{6Uqe?xi3sNHR+ICbfM*@K%V~Wd-1d5B5}s|gB>Z>`K8^U)!qy8H8nB}mT5Xq>F^dWPS3R;WCSk4_u+d)xhT_8 zX1+s>@|&Zt2^5yb1RjScB>xPK@-^{#r+*v+>})+^0tV%K2OAHJ{#QTM=#1|9?3QID zr`y9I$tX^6C%oER#AT-8VpV#wWCjw`6FPrU-Qnyu^4q{6C~Hu*_O?CkhIOc8hVvA= zfIiMc_8Wq!?|{G=UwI>~H;O+}Y7N4C|Dhj36nvrz_%lLd`n(p;uRCv{aq@YN7!?Fw zDqG)ztw`eLpY!TVhy0~2HANXPq5f%;9LZBb2YCr4$Bm8 z!%Zc9w;LmTE1Bb&dhlnCPrj9b{)$aMhvJA;T+W$C+bqCt+1%Kof!N7+2kd`h`}x|3 ze1|?v22c~A4h&o>iC~_WVs9NP+}I;xt9%K{&AQ%kk);!<=fw$iN!844{>h8);L_rFR`+6~uartq(H!-i!zzba zTST$F&+of=@_E|<26FEx0qlJPo@b}cGn&h53DX?uUpn7g7XxQoE&G4X1o|T3A^)V3 z49J0RZ1XJNulU{l?B{VW%_x_Xq-O_GeOY3TafRC_ywB@Jc>@BF*Euc+qxAO$0MJSQ zwU~rVjrW;vZ}9Go!x3Ab=BCBZX$F$vNZNs;y4>evhr@I5FD5S}^1drIk3|T`CW4at zB`>QCvKQdO{r!(dRRn*T1_FR7*IKvYWUWY#j&;4)=lYnmzXJ6R2h{tqXczg8A7EJ7 zw)nGLZ{tT3?NIztXXPCo>x)R=pND=l=PA$c-5t}h1B8m&7$9mAt+!sQo}7UUO(Yh4 zx8pV;vg+t|y~dS%u2rY^!lWIvyDsNCWUP-(NCQ#eqRND`wS9l+?+a*z_~BN%tMKvW ztI@Y04670<4n}96gk5N|<%ix^E*fSh^>c^>_x^kfZ;#wPF2JLL_4f5o#VgZ~lp-Ie z%)LPip}z7%UwWCM_Nd|BYdG@bb9qddF4}%f`5CbR5GY)dPHJu^nzC#5cOWjYWt3%F z0Q(2bzsJi0MfiVUhVsV>&z%m1$!QkxN_S`brXRdl9G{hQg3o~8^kYskZ8QgucLy65-VXWHWS$te7vdK<5~Vf zaiD!%219@69oME_kM?$^nCN)>&gp-p8i$XKcj9fvn+7mNW+HUMs348T zkPq^@-|>v_y0N%}zQPyyawGory|9SEIAUhh3hgfZMW@C=1PY`0@K1M_0R8rpKm_|R z(?|OlZ*s1?Kk5K02bc5F8`G|jR-ngEd&i)}%98&&-^Z9?3xZ>`hg@^aZPF0`Nq*80 zVMu=pv@-_wOXsro(W5pDPtoPH$##ag`i?%t5fWh46z9v0a+^(6KF7~^14Tn>HhB(2 zZpvdM(;l5~3Qheos!GJlVv@K|H?SALK;DVHUk7KV&1p4*n$Lj`Uforj zBbqe5j&n4d5(n}(tG4?YZS!Q^dzN`$#F2kAkIb|XWE(oS1Ad4OpniesXy8C}Q zie3_`>*?pa?qdM8kFrBG+syJ+FqOUx%!;CK#4aIsud<&*F<&i zx)z)wM?3eww(ijkMx>r@Rkc9;{qCYEXb@13W%GYCE&KTQ#{mqtV>iE^pMCUx@y=oSI+p(Spc8_* zln98*9I0=|p{~$y#vr}w1yzKfl)Y*inoBgcg$Mb~&@{fF=LxJH%NFmHyR#=MvuFk> zII?~3k2+zMr^9@t%KXJ^5o$Q76<<2sTy$v1hlX9}I?Mo5@D7Mff6}yUV(WiT`#(qi zK?zG2!}s%PT7JE9QqyrE){ms2H{uV(_Y#Ax_(n$t+bl6TU#HcXzi7L1!<`pS=c}x> zRGw3N-+OV&v14|<_1s@R=3|yWxLziBm0!ZdSJdRBf*v>c<7wpGkN@Y{TP}EzidcOz z*}KKYcU)5WTKr?E{`ddQc>RBeVEr%u^}qe!VAlVW%j*6omlb-i|HWkmBJ=<6#TK30 zkNe~Q^N)X0`ads#n7e~$`jjcHzzkhk; z2+q=b91dT9YDNN5dqMo?)jya~!SWe65()k9_xC56`KQGHlM4Kq@xgzX#wqI0g#X8p zt>D-ZQ|?mDZV|UJ{L1(%ko^DsUf`V(1S-NB`%u5}*YLHH_WxuC2)E!Dy%2J)Ix`I2Wbw4$r-&&9B#lkfZTn zpWO@C63k=R`9J?3;~#Q`Y;iyrWEWr-k5(;dCr2K*(%T0S4V8b$@2Xb~ISkEIe|zMQ z*Bo@(4?GCt``xd>q22N=aadXxe%S{^g64-3Wb)0=J?=ugm6)a2yD;GW>?7_{S}Q#s zmvtz$sE-1?WrDdB>^*5nguigdJx8zg4a6aKmrvIAr9 zMY;Z-ALo$Xn}iswlym0B{CX^nIV&&D8>v3oqzi|=EpK*Ru&~Agt2MB=n?j$VDn2oA z$MNk-m{$}?)e;lbv10PahQY*i%}>qsy_!Yi986s7{iI0bYuex0VR@^0d^E?BVotaw zQehuHBNu=3U-7hCq@C58rT#^Q@DRC6-?|#*5@R{vwhw!jBj$R+lgYBfO>?iHS#r81 zQ;&rO*6?P290Z;Yi*6jB{11LgwP%GRONZzB9?JcCb4sif@8W*>7XG;5WqoQ)x$XFv zZZLzx@yLeWolPrEHs){rYPOSrRLxWsu)FAuaMgbo(sHS5Pra`h@WPJQTrn?fbY@sQ z+8rm#-;oeC?7zSa?_Z!Gf?B8*xBUmD>YjQr)41EPUAg`;4Dg|cRjGp;(0o@tMAtPr z_1ey|+f(V{QU!}W@r`M{uh-2V9b;M!vA~wv4fI~OIE$9Fq4M~}X!_kMOkgipUP6Da z;y!<;FGKz7+{dM9aT`{bLn-JG-<`ExHHJXNu415hMj<}7HL|@QYbg}rB~wVkh5t#= z6j9iZ|NgA^;2@d0V4iLnc`b_A>(kxt`YCLIYpo1AMy14#NRcJLRo&ZK*PL%gy50i( zt4rt!eVl~n7Q-*5o(L;u_?{Pbs`yTIiU#HXM>-J!b)v8YN82~c-nWS@64b0`yE`Wj9a9zT5QR6k&p zGW~Yan3Ji+D@7svP}m@tJ}mTvQGbo{<+KGpu4bnU(~-Gp^@nly)n3IFk2!pb{KtQP zPr;Jl4q!HM(_OzWe{Ht>am;Y*w-HUc^0kxmjuq9NoTjEN#GfFW?NTKz>B^0UN~QI_ zVmj%9DoOhKnLSubYHA=@J=L$h^!`nK?|o+)xnZegyYXc|eZno3l+I>;C3c*=OuM0Z zRZ0InRcKtF*1+DV2MhAacmD%I;OH5Y?XLmuxBeQ3HkW9EY5A^RF>oiaO2`7-V%3rR#Z*@FzIl z7+p-HoD{H~CMO_Li_WVc0|_Tq$aiA2jj+Vq8@_p@j^R16N&JZB*mb_bdZ=d-vh+Rc z7kj(gtEbzo2Cm0ou1SDGFSmFnxtuh``pNB_ zBG;&2+lxni5tkCv-&9LcPywq;Jbzpj6Vfqg>_`&~5pk~1cJ>?)JkRF4KKoT$E6U2x znV6pd%kEENgKd%~42;;`j{Dq|VTyRu9*Hr+#VdBMAHI@debV3g${G?w#metOFd@wT9Q^Rs%yq?WPuSlNJW^2d6?0#C@2;qZ^TsGpQ?Q8`1 zlIM5H??*f&%DgF2KhAVEd!2v7tE}%P;4raQ9OswC^H}>n8>%^vSK@ua5UvDjjK#a* zlC1p{%FNykJ4=6C4$9*_-Lms7U)*y8<@?eo=2a={qTZTsaX6nG_vh|OyP0S$SBjK( zQ#7~NU%?8o$YZO;{b2vk5@C5?kyC2bNWM8F&wd4j3gq^V5Z`u~Tw?l7N8uesAJCaZnWLy1ziTem08D(4N8jTw zPaRK7zIV%82~y zct?h}9OHk&_CQRh8sF$b!{GhmU7vaRLC&`c9S~K|5-_@Cr!*V+I)2@hM7KI@=-;Rn zY~XUUf4mlebIzsQb8~R1(nFN-2bwC1T@JSQ?0?YCQ;fCXt{7j;G*Z`m+mTLT=l*=(D1`vQjtUW@Ip?C3@;h zjoyEMI6oQ9<9+yxu4%kP*3I!;=}H=Ho~r+0oV`_*O$Ukr#dH^ zwY{0*_E>5bG0O;GNYTYmNfv+hF&293S3CHVR+uY;2N7qTx2Te|nbt0pK{4Eck zeY56I%wMTs1Z0pv~&Rernj!2YbGl&I2!-3hAkeqYKeIjZ<97)RdgOAu5N zIu1ctw! zi1Sy8ABc+_vD&q=PE|DI$SXlcH;}cFX*w-a*meCHX}+MI*($cxpV~48VrhRXMz8y* zZjYCmjP9l3x;{+}#yfEF1C5_V`N4j&9Nr5FlZ#g&A+$AsA7wpc$JfK}**H$;;V6If ziMROk%YRLTO?{_$U~?r)KgL}!LENXJhZybmc#*}fH#~Pc^GZW2jNCCzzwCu>$0HCm zVJ81LIh`@Xe|F$43->D}ycvHf){d6>B)NiU4LcV#zq8eCA1L?0XYaRw;}}Y4(@dRD z`iZ-Lk;e+?@|dRb6tT?SzNFQ{p*Y8uFYx*=D}EMd%@zR727q|E-LLIJ&XPg)gd0EA z;l}L`FuREU51@pO_x0CnZv;mU<4wq9_8iHq(mLm2qU7AbOpY`* z2Y^f4h#g!4UkP}Q>dAlW9}SEz+v$jrrCIAr93kCgp`$cr58D@SYf^@&$TO6hB)d)% zDCB8pTr6f{TWa1oH^-1R3#T|8!dK=3Y%d3dSOfE;*aTYVt~WnFR>>cIYxF9HJ!2E+ zp1>?1LeqK9uw$tY5d3Al!DU*aO^Ke{)A!vjh)8fI6){7Wyjy>DCe2>X-PqRc>iM7nk|3pL>w;%eSd9Rt57z_@A}UZN%sC2(L9Y*>QmU$lb>Ig^U6mO$m48V zd>TcOdQt{*eT8$s%@-4~wkpa{(_I0Vo7nkxuqDDu!Xhg18Qv=;(r3_R#(O(oneQEipL<6GqMNV7&M8^#(jZNIn0*mzockJNU;1j0 z)X=lW26xS4w^0kZL%q5n*oZjXDi`T|+gb_wwBzfpZJGPTceEmdbk%W0eq1Sb*I9mI zKRIeUn45n{ex=3_r?l$3^QfLk(lsS6`|#_lVt0LNcL)Z36g@WE@bJghXyE1J1cI0c zio`uAbpV9xw; zpK89ppW~iV%RGJ^tqq9l_jV;gRu}f`{zbioqRk?QB#3pNzrMe_8&Lu;zXV*{?mn|- z&kuc{?@>v4@%f%eEUx%GdBunM;=9tPjZwXpZzqhKhRr``%t2&^>U{Yj6viEceSaNg z3fF&*CE>~YF~&xRXR{Uds87DMck6{JpuZ-xQC3J_wx=ao;MKR|IoSa%tbT_}r%f_% z5~(uKs`gIl`djcQV;y&^SB{r&9>(9-y$usNJ&~{T+#WNB|B0!W8sd7&w%ZYLw#r?o zpfsyh6jAtro8eQKetU?^(f^B{<;{>~mt}vbgrl+o;{Vt$gLA}NlMMSy_w5HQoO>L{ z&W5s~4tuTJ8v4h&&YRQsB5;$GxEPq2%Wghe^s6}C%g1;&12{KK^)bKHs6hZ-aeqQ< z@EHWSpeW4@bu(G(kXWJ#-fV+Hm8I`$?M%G=11JQ+*Qvy%%a51abg*WYoB#yxd;fpi zR5*BWr`K$Y>%)0ym%3k==6IONa94zH95ANO@&tYrD2}y(jTC z`=*iA!K+3-+w^%~xp?;dH}G_@l>7??R^IW91U4#iKIM_xfai z_0~bl-$fxDQMJ3zzo$e6m6);tOfq7lXY+7f`29r)0oPy|xmE?sX6!F+E&4e>bL-q& z=}}#tJNb66iql&i*tprXLqZ@4dEQLw)ke+pD4sq&SYgCza36=)xh=|bi5iUyKiQRI z$mT7t*ufMC?~R)!#RYpmJ9|Qxan}2P>>al{O@-#R(2GtQfnw)6+?aPxve3d*cIkTO zYq--cYBwHCiqGwP*x#Fv{PMS7_f|RdwZq#+vt#Z$zRdE?d$ca$`=OPmWi}4Gz{EW| zv{(>tn$nW0#mM2}_%-5kqfQ86K5pxEcMZz4G0m+xe@@da)o#*{LQaw6pDmJqS+d=! z!82{M4FtMt%Ne*Q-guxoul?uRo40N`pf8U|kQP5&TD-+fIxW=m7z1sT_ubV=2N)qHU@`YM2s zOd|dR$EOb%NJ&eE91n3u1Cp431dVQ_cDF4ForoTq+bIU~W|FyfQrX*Zoi1;y;8i9% ze8|_IqDsJ&xU7eluiaZn&Q|T3DmI#@5hyR#CoLTFv543Z{_Ka^mSCimL-HDLK0Zs{ z)l`OROvd;}Jzwe39W!%B9ID-CWy2I878ui93lPTkX{OHY$q zjXmrr?Jeo&cDH$>H{AAFd8MtCC%)dHHL~}|50uQ`zh27YMF#=ZpNbtoXbsl0)WSB; zJ&zhOzOc2^>6d}AeP9KD@+t)4AXMLDn1(y=d6p$EibMXUTP=5U_35NK!WQ6eFLg(| zKCAc;tuQetb} z67Ft@q9w9+&*ZYm;&jROo_J)@HHmP9v1d@mjH=YDmC+v?tgOa=zGuJet$su;c;Tq6 z7N4FX$y>mFOYpeEF6v zh$QwmN@4Rji5F8Q2F&JYQbx}=kKd({fCPOO$wT+OL6G^JZFl-qr?MqLy%Ty>9?w6~ zy08E=pRat1Kvp3nBZ!ShlO%Y~in_9>OYkCVL^IxPD3FlqqX2 z%i0@=BuM(&e*Xx+O0sXBH24bR!FSJEml`^bi-=;OW2HUc%K3HH-uqAt&VA56efi#+ z@e=__a1ObxVEwz$gZd}X#gqDRyVH&;x~EIk@Y~>jo~#xHzdI1_8>}3=bBtkvYPI0D}zHXaiz-&s}-Q%~2Se~*gzj=I+s z+brbDZRTbr-yzd1a1RIKc5~XtFE~y+spQeta&DKz7Cy@zf=^&&pZn&@%dS3BPWfhd zfCs353s8`ccGYo}kRs*$c~BT*BY4d3Yleq=MEFP$XF{|nhdW$Fg!-4*wjJ+vF2hC^ z_cXaa(5Z*S+!}0|Pf20Dz5qvB;%~n0>~imbBQ5UErb*5#C6QjpHrD}4v*pAPw&X4x z-)e!;jbd3G;@{J6%qt`?M*IOLcVqbWqG=C*h;HoWvh|U9iqvs+f#yR-ruScOE&?ni zb~d5Go3Mz(`^3^8bqZV762*Q}EFy!hzNC5kLDBo)x3@VI=UDE5(8XT;P=bPiLQ`Ul zQFW&3Am7|LuicL!5=99CnBt_zQYSi(vZnMm2A*M`yAva_oPbReyqukxp=tI;#t)Z~pr<26cI|3@ zWVWGCEJ=qSvxv)J{G6=GI%Sy`d&nXktJcxL@x+?HFP)Ex{wa)v2egPL2A#^?;qQ0r zy{3|H@@|K4{SCwv>3%i2*zc3}p?L6rhBNz|g3r#6KUVJVTI;nnaM3A{4B+g7qv1y5 z^)IyUFF`lybF2AJ3*yReJ&B{AD@ff5>FQ}u{UF*us?tbvPuIAq>t%ECcD3MjXH?-x zwD&~G0po<8=IwXBk0(10aiNZTE&T%P^uQ@dU!+g4UDeB%gaFOvzk++whwA=+ftZLn zx1rX*!8?iUn>mkL9Uq?9GRMW->>p%y^>Sp&DC5i{@MHxZEF=}k0>8&K3kyo>`yS{M zdpQ-uM#}YSRbs9}FOlvWZL@E9TGXTCXl}rKtM>G6INz9Z&_eII?=nV5BruBnkqTa~ zLV3L&7X?bRi*MUXMd#{mSP?zZ+*%ys2ix>zK^Xeh8UqRIJI(J_a{VZ~4Ol9q4Yo5g#`-OP{bLy)7=dFI zT2YymTto~%zSmsSFx3vy+5ctBL&ZP!?gbR4Al_&)i&~=A58L(j3D#qQyW+l7^@g;I zjS1b?ng*fBK3eH9>;92M0G{%%$4BPZV^4ft!Prd)JCQL)kuDi^R1V@izVpD7u6#Bq zkZWD`rFm^Uxz*mHjx?Kpy-%;Qn25)OkC3e~o*vHo*!FnO$7_o(_W4Zc8CaX1rD=Vt z%l-HbViSO8p2UP%yF-N&hvRSNQzkNS3igYgW&p^q_I%-Qj%2I8dp$QGVwmE$OGxpg zUe(8KJf)Jeh^$h5+PMM*726a-);zQc7y-5q1fsV`fX2uCr?>7mpYQ!G+{2Dfhp@^*76Llf;o_XM)yXe=9&glT*}Xy7w`;xoX^l=9y&J@CJZxBg4Ysdf?!xp`RVHoOR@G*LeW*4#?j(8Rr^+iLbQ3 z%n$y!UVDDKev=KF<%>}@C)zOhJA20Px=F_aQvLaPxNYuEIn{?+PwxUR#Qyu2NC&KEUmXl6GP%v?3U^=cXS!t|D;p|_9-pZBB&%t@rY}KBsF4PbTLvI0f z6C`CZntuWmeDuG`uS~MLBfGniR*>X`uwQ7+kPpuhqrOV=HUn{s64Qfx&-S#%KYt%h zO}tFDzTUqFo(t+yF*4jDpjg&aliL$pfBx>)eQN)I$N<4+7yYp}k%S=+t=%Y>H|Tclc7fYX3DQ>5p4avMz{LtKyJAB9FD;h#vDe-h z_C?a5KMk2UkRwQ!ds{pt6D@t6Fk#YF>L_MF(>?Fx_mDs$4Jxn?@?LUBRz0z`2-w_R z)=^x4;uI-`?n1%#g}G0-3D=2T$ZPlHbYaBmp555jA=+zeOhESiS!wc-IyuFs`|gVU z2D(q2#v1x7i5eH+-C~?K`>BU%k7&GOU+B+1n9TL@I`62o5M{b9UpIJ}gzBj=m*WjX zhm|PV$05amV_%ZtR>YTL^}McMx*)r)dY1@)mxIYp1f_0Yl06Je_wIExEmo!`wD*p% zFGtIp-~nrpm~V*!{Ft6LzlBL$IzrMi@;-gky=hHa@|?%rMLPv!6)3f=ec3SLy-x^3 z&R_*hMc-GauI|7EfuG&W6-XsOwxWF z@}bP3&!X(}fq@aF(MSE79&SG1NGncnJ+t%lWWVVcXwkpR`fVtM6dFbOJrR8wYhYT{@_S?+LDS_>#Uuxz){>j*a0&v;26DRFYD>^C zWP4j~^UQb?8h;y=c0BA8UZ1iO&$E(@?%+dXGJ-@5%?)vZjP}pZi^3mCv;}Ymb2* zz9NHFC7(&+I17u4Eflydrd>(| zqDl$?#7tvmdwoP>I&lx^l4NTiWwgg@Xv-@_J1F1k6GgB7bg@={Z7pn&?ID;0NwU9w zJ#|pudY3!XboZ-<_JY^6yu2N!V~Bf14fwruXb;x=;8cz7{UUky`a@WIbVZT7yMBzr zZ^c;ERMFH%lH~Bi5B%=rkaF`*X8XuHPx)BA{w%2;sfwI{EzMmjA8#?OOw8aGPn+nQ z3#HVqCXPuYJf4AnA|$js*!Xt&kJ`MPn;(DBqs?lfeR01AmT+9><8OG33h-{zdbkz7 zay$^x!*Sa$SCmLEHeV|hu>jKgQGdT#94molHsWi|Tjeo)A}Vo!4cKe5P8ZeEfj*@4nI{kxL*_ zQ0{l8RQB2ZJh!Hvn{9J_3+8IC%~rb zef92#b&)jdeO!@t&IZ%?a*dqUt!RzGlo=n*_TO!9b%N%;O=7;|^<#h@l|>?-(R125gsaGwNHrnzcDFeNX1PDOxqIwWF{YqZ_Du5_XS3uMEFV zyT|lST9f1RyCtGs0~bgkmu{WgQn9@SF^9%pam*Kg-~MuOlm{*Oa-kT(S|+iyHs{q8 z;A$4GW&3_S_>A7t;BgjCj`U<&0e=~Nq2ZhJwHkN{vpL2t#HFaQ5kV% zO2aJgA_HU#`%$rZ(s)`&H_d+TG=;N7fi_@&(o#vVhZB;T28h5SE*&I((!-%!kE4)y z+JPH=lQ3U~?wW_x89ul`sT58Lgkb9Y;Nf=i%%50LIm>Om{TfZz9EblDh}X%YA(#ZmxxBXm@jxFK-fbTOM!$?Tq>QGG{2(uu&LNTn zM9hhN^u=2k4XnOxIMH;b@z0b7)Y}8mI6qxlp?gP`JC1LyeWtdMrKhlPaIQ-;#7F`R zwbU%vdu^YQ`6#GjVOD(=90@!R#?$wICe@C#?Gf=Da&)TQO1*Gdr8gfkVcaie$1VF= z#ypD7>tS2-Zxp}z_WTkj4`HNW=*#f?9_z1!QMgwq0WR5rWbhKk)w`x%s;hWblZf0_ zXAd4;e#ct+6FfHkBdnj(y+6rscN6kAc&c{bDGj}P3#PAUPknf@7i|czk&AbKkj}rC z>5He)PYgm)Wnk(Thh&heH;ZzRBB(hI@8gNKZ?rKGqKm-OLF9kY;VlV<+>2v>y)Ik5 z^T^V9ho!uMZSL9@z8crp^|wN^uR^o`luX#XJ^2H6qrqDMZNgjhEHD(Bu{4Lloi2+9 z;v)4~anI!F5;mCg*RF$?<%mjuB)yeyQ;4m{8O~D+gty-d0d@0~ykGP@VfW1Wg`Yvj zUV&8NckHwUH?cr=(Ve%Z6*Cd1?je310tSq+1=zoddDLFHqmZd8k6+6QVck7baL(4t zIph+t8;GPE}EtaJZKqbTxkU0RrdX-e}Ae-C|MsDeUxK*NH!&0`J zTk>`cc)URQk9u<;G|^Q7G!WMjrE~oP3+r--lf@lG!=bNY^RTFBcBYiZY4(H_ki-ay4EchzBgb|6E*x z*}r`5afFPjwSO~z60L;yIiKSJfYiZjO%;3rZ~zAkIM>Bd{bfJIaHzip)GW~GoHi*A zXjAy0m{_=S*~b^I6W))*a^kCN2!e_5Xu_7S+(;#48gabS4Zk97nPdzQxsV!mHx)cu zoO8g@>r7+));_;S5PA=vlxXdjI5hQ5BBf{BV!!T06j@V$JQ$=JImInb%CfxMEVGJ% z(s^F)d&L)kH?Nya}p2|v`J8{>Pnh|1VzpSuF{4fvjasP=bEtEnR$2c53 zpKRU+`0N?H?-tCgflJ%P7;W;$L1EFa;Mvyz?`z*m*_t_sQ>KlMT+?lH|wh z--&=8#p?^PWB{NXp^E^-llMRhichVvD~4V_SYdl0e6r#8?QdLikNWOeYsX&``k!AH zMxk_8A8({ma_|9oMY%qo;A!0CNRf&iM{Kt_Vtr13K1&wo+Gfn5sroz(cPi%T%O$`O znOM=KzQ@0x4pMXnkAoX@V22o$~8#CZCtvuGl zZSN@R>rkR0f}Dx$ykOKxsJF?wzzMxhCBNgpSq2?k7QW3#X$={>IWsio=txEy5rtUG zYA+*yqX8odw+UYu7&A?X|HhDX6|>fstLoXVD42hbsY4%k;;}+~pgNLR2bl=g7|8iD z*zlY`aMakyzr?`5G4=LXn(g`ff|*;!Y^L-5bADhK^Et1+CNS=KZWhO(zTlElYM0fX zwH*0~+lz>O0}%q zS#26qAwTxF6y+y0Qpqno#naZ^rQhNl#zSVCuO5-4goGGu-dJ>~G*1)c2b>>1@C;=; zgrh#*jz>wDi|WP$Bh)CE!JZRCp=~Uo$Gkguxck;H-xLooudP%Q{Texm$N8L*9d|gvi*+bn%9bNFrw(_3VtzFCoZ7_>&3y|u z@DC5ojxrvmC<2$JoN}yfrBFoikr#=hr(5NE5r132_x`eW)QOeD>qj7UCc_e{%Q<|H zvy`7wiNIs+JdQu=@?Ml{jx(jlmrqZBQNzd{qs`Xe8u^qX!IxJ*4jfm;@Z))V%yd)y zGy{$0k(W;JL50aAvNv&nH6~IV4w~-Gd59#Np4S+^H__~VsvTCqTVtbQmyh)PyuFi? zAprYw{ZR;tgGv~=DXU>^AwBoc8*5#(?LLMm32?KRUDLbakS*&_#+Dok6}KRN#~U$m z_|(B0`ZpI6RnioV+ufsoRj3&rITe(4AOJ z;S0aba&zyWao%|r&*q+fKiUJs?4@a55eX=VW+uk)tkxki-kgHpgC1lHm_J#u1*b=L z$MydV_cZ2W8g#7@g)%fr%9O@`!jq9HVhFWv`P?#~ZyMNlF7wqR)>HGtXAPrBwcbea z3T7(#>Tj4)R?;qBcf|BT>p(hN%42BR4kVxbr_i-A-oQOP7sNGaN2|#A};&=yzr9 z-r)~uN2@3;Ol7)%WS{mCOBZ_jQ0@4#xQu*O2gutl!hjsgcbqGCb@QtEp*u~FzkDCO z#JAD-J+_QK$G2&0bi^mj@BoI7WD(AGDdJt2KF?Nz zW}&SXy-^d+t*Au=q+k~P*>>_lRvhgf%zY1e8oxpx$u|hn#uobb!Wc9|)S0WE5 z*!s|PJQ~s~Zl9b|eXV8Qp15ik=UM6%@gBGsvk+H0n_wJhLwxD5<+isYru+_+T4C9t zzgiu@$yE^!L`D@w#=Zjheq^{(J4AVWk}U8LxN2tLWao zr>RXJ56Av|W`a9^bVt8_*`1CUW?S+DMtAN7f`brF-RY8Z*7pwyd5Tf@Qnela7^)_g zvm-1|@&>rYro_kXw__d^Mq~6wy+vVv{UK|B9emwe*kDtxxQ(w-&HC=nW0qb4F{?85 zQHwB`ynJ}|lo3W^X6Qd%q-SP4oouqkJxz3YZ%Uw+EYcN!fPL=Q=y68&*+2!#Gnj^V zu{RGboHFkr)<4{>MYV+W{ZlP@!w1JJ=h2cq&s@3Rd3S%o*p`&%WT4c$qXq8qmZwQV zD*W_}Ypn+dqdYmnsr@C)@r^f3H*0U)ME(l4cSoRqKbkPyd=T6l;=8!SzCOq|e8C6H zIXp~x6#T+}Z{u(VbvyA&y{pq}NoOpC>8SsJX~tu8u5bBhw>PTVWh?yZ#fjJIuifSc zMRgNz=`H&n<#35bv1TRW5r7*sBM0E-d}<8akJAPG+yy zQ&GQ4dmZpZr4FgQ(avOhOnaKx7=fXLsPGPd!uv6Ti0|f9(#9w?a*k$2RAt_@4W8T`o_0^ zj$nlZdTRH!P8u;gn$`Yd2k^X0@mVhrv@#P6?leJt-|j8jztp&JIOkkePeZudj}xke zB?xOptkk;TL=u)?x!0LoslZ~rhKC~%GFU4Scbs^ZCuXgQ1!D`2L78{T%LsHnwDH*3Zgg6NH6F8Ce>@;<+yd=!lO z#IzB7;Q;CH^MxGbZB8yvFuscR91-eae_SR%0e7)|ixJ4l4SWy!@xHhkV|_ol#w^ms zy$VrTUFKtcLAYv}M?q9G2cg`kxO2d|lAx>9z0p@D=U>L=vDn|5SQ#oIzAB4<<_pGs z6#TrGG>?}wAY@F?xJI;=gnBX zTPj&uYCoAAAA{QJ zlA0vlIKFhi9fSvcS+ik4M;ClFJX%Ayde8`XpUmEPai?n4#qLukR?aO+8z_mmB25 z#1$ytk=c$dUVR`rY)BsF$5T^iy?1|!kW`y{Y*4Wf)s8%$?Kff*6Hqy0XrH5BHW1+( zZ@fMEoY85-v{}J_ZEyem3F7zZ0T*`U_NTL$zdS;X?1XTlAjDJ?H$)O7!=am z7(yHHUN3??J^e2p;U9Q>y64=Y{LtfqBqIQj8m0d>KBX>yeO$U&wfU)k43DUU=c8J!@IB0)g@TKJ$wBQBEfV3}@e;m4-P0`_rlbfV@T5AP z&#bCm5j#DUu+pc#SNRvWybTBX@_NVHYjDCeBU8_G zit10T_{dipl8%y?1=f(!S}j%5jJsOr4{F>~!6+6RGfxje&{E>Q0A2=3v~XWNEMO~t zzB&YftFM6Jgktp?Q~pRmt#{`RMrGn+i1v0FIQ|zC&+z&zcA=fEC$=yXz}1$^Q~Hv; ztat@Yx2KL3)zisNqItaF{-(5j-I3Q)*`}<&g}2`+6~9-NPx>2s!RP%_@5esj=^l`Y z2PFQH@D&Kqk?g)8e;^z`WVG@Qmm_a~+t@df@JHXor`PGXR7@Z);YZ4`?jqWnbV#bDuSi ztURQV2{bJ4y6#si+WjSOi7>~J?est{3)9s5e!D#P$YI^wu^0hhqnd+%JaNFWz3u4B z?TXuHfno0;<{-Q&JVwIQ=USqGK>d)x?P~(fWsC}pWfzT$LpW(F{;m@&k|$G) z-Jp9}zF%{;^ppfs?TnlgD(htlIsVAoM`#zNB)miRyJAMM+N?K`1iRl~g_1n7{9YH3 z>!WP}XV_HJ%pJ zUR_0nlR?Y(*Hi=Lp82*VaEN>6GrPPob)w~%BSiWhiLx6f`WmlG|6ptX!IOLDCy>kj zA1yXHPG^6rchYkpRgFr9|!H1(F@_u_#Vp_0WeB9OP89xC)cNTzhdfR3~3;O`vT> z>mNO*_4k30=iJRjL+UlxAHC_V6T30^?PMd$=TmVcjp_Q;3;sW#eE)cSe~iBh80;XE zwrwv6oW%S;`Fs+|WLztn<_SL)|G~@qhs^uK%#{yA z+Q*l@ireMt^WKZ_yacq^m{BmZQC>$~Y1sahJZLVR6uq(dRk+n-D`l*dxka*J^>##^ zALtnVDe`T91kLi5{(jp1cb=aKl9p7Ht?ueU)$UIXnRiD#%7mX#K|Z8&@^5NjD%S1& z^=ovi$njNqDf4>qUPCfH`SJ&6R(|i_tJ7%;PH)G-rdmi_CNCo{X#exaFHjFWIuyZ$ ztV)+z_SuRSvMB$)$mK%MyEdTNjxdiororLZY!Nqq+#E7U^4T?9)Z?~FS#WJ=u4E}P z7O|GSUNR*ls5AiQz(pJQY7r-KB2F&NJdiqH&DtDy{LFfdcf~^_R?_*gpG zng8X#JWgXeI9vEp!1phr#80H4r%@4-8o>tUkOsL# ztn8JdvylDi)JIES1pgbH<+6)!c106Zj^L5T67dw65Lp64E9}ntTQKhgyWE5)&8WL{ zN8hc1??G(wwZ&dq?#hyL)bA3K*xkv85q|P}6~pm;yVu9v`y}resY4jlUs^EzG;iP8 zG29K9tqprm?pciAuG>qKGh`eM=fQ1%sCyB@a#Zg&&U;Zh`PwlvcIZ}JUd(#8#&~dt z88fcx1r;j1Hn7zhhO;b;6`B0STiOj6&%KW_(-E%yYvNN7Z=t(<+RGff;~pwswJDlF z+0raL<a|_3qIUf30zbCMc(}R8@j%Dkvc5rhw^~;f!nhB z+7t5X)xbx2i!QbB7c-xAcjab|#Y1qE@ zJMD40K3ngYA9$Fz^DEYTYjPIEV4L(2mJGqk&!GTHe*U3%Xix{}%mqs6G*1P}vX|Pp z%;%6Ux8;b7Ig!6pLor)xR{@lZU*_uX4+pj7pZeou8D^+yw#4AWCA;8%2Uz_ZZI7e; z{$xp8Ku&{Xu9BXYYL0&w_Fm!xF3SUQSRH~P!qdy8`FwzssI`tP_1GD%Ufe)UFz;~^ zX_p&#;h)3xc|F>F`C_vQ$)M%xB&iGGyS@z8aU>|7g37Ai!&C487xOZM!`_ z5k8vZ>6$b*T1jn8Vmgd}w;$q1*^Z}|1t{O{Abt+4Q(M{zJhm-aZ8dW>>w11a0)zKo zdWuU(1v5FS-QNOwkBD&#38p|j!$O(EEZ&c&+(-KxA%6pj)CWr zM*17Nn65JI3NwHQ^)+Z3z$sT2l7J%5-9`SUMFg!9Hfb{2viR>^Odiendi^BuxXRw+ zM8uU4A9oce&p@)xqjWayQPuJ-0iHqIaEv^(`+LDw@Kz9i#seSE6Yv7LL1OeMTruCZ z6C29u@-5!rpuFyXI`4PQvjt?w_~SifYrfM)5vSg0ZfKmknkcE5>;YbkML+_Cl_F={ z1Z0IM1$fkH!5aC{pOpI}@)OBWKHHwfRmCZ|o*BFcGs*`myaUFgiVu?*n`?;;@sm4w(|JR**xfPmc^G96f3Dg8?x~+v z9d_(%KYQ@i>|HxxWQM(d88KPo-@pZf_tB;MYdb4{{E(Rq3K>UyK}u~1Yy<~8;>ULH zXUA;w`zeR+9V-hm4o-e@Gp2UkC(-8i!u)#N6{|Vj)e`&@lWhXTI^L$^Z4YU~Rx--l z2lh=2*~f->MIQHq;o)xFk&m{GZ#%!OiYwoLShH9IpyYqOd)gu=coNrPlBuKi3THog zT)Py1xPyST0I0X1j>@fUciNL#)DV)p$3Js8e5Uv-Cybw($(VkwFiA&0X1MIbuPhwE z#2eaqM}gmvn-GZfq7j*>#Rx`1KLglOfBPiMfN!B)vQ@nr&or@@F0!S)$ zV%Mu01=l=kiONG_RXDlr;$1QOu2r?YL1Tq~3$$MJsKrfgaj)(l!N_BW96t6!rYfci#LOoD>d(RMMaW zHYL_G3_dhGRug)a#Z~>AJN>({9Q%^81Zr7y^ME_cOW zH9rxwYr9>OApZ#mkD#8@y?YH4svswSE7>%`V#?RIstWcST}cEv5;t*IkO9!mPaWq6 z#^lcjn%Bkk7p{Cb*iQ{bsP7lxE~AY*AV}D6BL)i)s~-BP>VGOPAZ$Y-z}WFc?H%V- z(D5U5fE?ZXI6sZ@59#dlWn{8 z*)Nj8y*o81QOZ8s9;Hs*+5PH${_a!zOS*mGTm8y%-l(4}1kHNS;cK{mfBf7SQF}5x zSkdP$Z?=wI^AnAs_4p%x-%n}t;C_6Cugg>0T`y5yUiyA3w?AkXJ?Yg6UiP)Px#@22 z#+&h&{+He3_Bow(I^j*5QnV$k<#bH#B*GllqJDpCDSDxIzKmq_MqU=i^y$-jeY%-% zKJOOj6n#edN9{eDZtTf_dbzhcBYSqj&-9Ns1=8I`r)Yc}TG>eDZ)o4T?&tUmMKU{j zE+}yGt6dj`vsq`gSyaM09YcG=w{^W<>mpwKF?e;~j)yz{Vd>Bs`g7|#kDG@s@=!1I z!f*!n&&8(*{g>OSbxZ`IP8 z{fJ!EK0cP+!^`%5H6>q^j5aUOTHhzVQ~J=Geb(t5@c7(+e7(P^tRHUThMNvmzRBsq zx*JBX@JJTR_u;7LbH1fP@Z{!|*5M@^ea+_6S07!9Wc*lh9&@;Lw6!qdtUgS#XABJO zd*5!i3%@@-z8lSbXiVuHOntIiCQG|BK5Sdhr-66(fF{IKd*xXJD;z&kpSeQqW&5(* zyzL(vHY;F%LCw6X%&g-MUC8~vSndx`M|V65K-d0%a& z*?na+&*q0|{P{ow`#Bw5Q#6yuF2#VKi|9Fexdzm#i+vm|e zEwJP&u6G`bsXGK@==TGZT9gaGCJhbBt_Zpd0`Fg;GDsJLa5saK;b!w7E->#XELg7lIW>&l>4vBO`xFSKAjfbZMFJH z!-*z`Id7=YgK0wfx^y|<*JE4W-{JmJupRz6DsL!gqQQMo*oGSMod5jeW=q5El*TR3 z`y&+)a{XuJra_{t$?j?ZgedPLY=pee^jCX-9K@v7wUWE;8oQd!M@|;U>X_geVwBMt zF!OD2FMIXus9|@wTNM~GxW;4J>?$jN zrUQmQwUm8qA25zqQtht2+wRFfc27xp-(h_DKQ#dUsZNdU@UYyaKHg85jk1lI(n5)<#QR?9XSSq2Ld68H= zq|r)~Hq99r<78lDHW^*dw}XTRSMBkCCgq6&+XnMBlqH7Ea}+x!WWmq|!BqqA-CoK4 zp?I|Rk9v$5YsOqYzX`C^G)~E#&wt(Z`56yE`IIz065DFE6O_uAH#^oXsG} ztkpAGM~5916GPgX+p|AY%a4Nijxx zj^}dSt$*^E_n>K|9-YwVtW~{#E^1wKPTrTbtglO0FEWS@?E(!J;28^Z(%k!ZJVnZ-f{v7>^yZJF~L{SiE*GbXm#fO$GyBX>i&cDk}#-VLuP{4{oaW^SCAZxm;7H zZlu@AOKZ&@NL9JVuWN~M9O!i?W4rbam;D0>&c5M4Dn4!3I4mb-8o-o>nzoDC2EJ)1 zM_A>u2YN=fv*d5g1$$?IXUn>56@CX!Kr~u~nH!Iy2&x$cA_Ef&g4zYFjl|-6J(U#v zZ1Z1iALfC{cR2F85vGW6ct(ZS-W|q<_0n6)dWj84n;ls%VVKK$G5pKgVBc`hFFeQD zCY;wzxmU!-oqnw&o|SdOn8a1RMr7Yzt}Vy!mV0Ep=C^9T>#?_glW1EF>|-2p&rLt~ zZj!?uG{u?qqxy?iWo;XE=FD#yW>Vls_btQw8b))^3>sr2VE+fUV^MSr^Gi^FUL6#tGXXb6VODFoUzrVJ}_n31h_Hv8m_3uVmdI ziT;AkWMAoX;4)@gWZt9Al2m4L!r>bE#vnu?oNq0g^#+Lnm^}OC>TNW9!4jn7pzf?(Z znL%|w$-SKBtOs}8ZF4SS#;r;x@IJ|ssa}eGJtffMv9E_<%sP;DeSRjft)+eaZ9_`C zz`1a?z2#csHO`sKSWQ=ck*7mj8c{H&;Sj?yOk6r)7%sw>NY@xa z;5dey0d~%Rx@g-*?2!Cu!%35K(QJ}vH=NpY1c=!mc%EZa7fw1K=k-0xIARlmdsq2l z2Rx(48%46a6)N713qN*x;uFUs^(H+yP*Z*A4 z<9VO+jOBGLv5E5Rm-AlUm&*I%iyh^^Ib0vc(U`7(y|w*p8Torp)oqn|?syBQ1%uuv z<=2jC$gbLmLA+W$hpiAcg1$WujMe1S$89(BG@#Q$+c@m@Z26W`Qy)D`0BAs$zeP^i zXwrN!4r?A>p=ULR^;$BjwrxZ&+olKK*{-TpwQC-#LldD=4&!xY>wR>J`Ia>3iy6)x z?9skIDDc_ne>ufCbyCJ@UR51jF{Ka(Io4M+h{F0`_CL=Vac*4a{5o!zbpu5~zgC_# znLNMVOXV4@$E$8)SwAE>P!`|Ksw!R4Hg|LZZ6bC|1+a=wCkXIv9IC+~WG zmv$Bue>+7d4rVcHaqQ=IYywwVhyBWCrym-*WfXNfLpS;EYfk@7Q4N(YQ32XO6v_;)al9q!J+YaqAe*VK|)JrjZ?btS`=&)UK&P zZka0RsLCVilqi7FGR;xBsWO{3#p1&b->}*Df88Rmcu$+alU~An)>p6zR`nM*^zotO zHA;L6{MAB9yEAJTv_Hk)BhwS1eRdaB6B>EtU3H>S^(kR21AQi6)|m6dzY#mjSYoq& z_&IuA;;)`cUysIdtgrc6#_&#yNy?E5oluzY1gi=drs^0 z{Hs4xKTQvwiMM>rL)NpxG%?*+{aUAIe@Gml?l{-1VZpHvUDSB*(wvSspg;po#!1ey z^1Nrf^qyR^hw*tzKks_BvtCQvrN@?d;Jo+O$@ke%+5mY^X#-NP>zGAzY@+aiW-8}! zmt)m3!+2<-F3Ej9p;-u0P`tSZn{(KnSy^&7rrxuC%4W0yqcbr9!r>Vydm1^me{$Ap zQNIh?H!5gUy%ocq($As)SiuL9&vWp{BcVWA%2~E^Z#$1*G?L+Qf+}o+#hfAz&I1`B zh^JZZhol*Y&SC~Es#zs%35oALlg!puS$mX6bJ&Vcgt(4Sx%NpnD1;{yWD5fwtFP_{#?>?KiKt#1~*+f1cy7?^s0E zm|QD9^)<%eT4LpLP06^`ZGOJQb8@z1+LtKL;?u648y&N1nFaUGf3d2M@rg$celUK* zzMG<;8w8xme&`%HC)I91=OWxe-*Ga0k0MD-YH!Wsz7q{o>eZ1&Ld{Rnir&oVe|XC0 zO$vl6lST(a#<~vf3d$(RIiJ8LA*f-w|DM>xMkA9-3{Y+^cXUSfW1 zd3g?9_PfNuNJCS`TX>giWc+n(tmo}wM=5n&1@Kk^vE!^eJSi)i<4;5N2;?|P$K#D> zP*bq(AQoWlk4bpFUt7f5r>WDUWpq_TjX_OBl8;HqmyiJO!f>e|>gyQ((i>)U(kfzWR@AnNzVX-}umRe(}4$?f7XvfG))6(%<7hT>Gcpwolzd6a17h#{R?y zKddfz{6+I}jq+Qx`+I90CzOECJ?%pc>_ZqMjLg2!fBVPWO{d2Ni~oDgtJsY<$`M-! zHlS_Yo4eh=OpG98k7>K~SKFV`jfXuVvDIRn81kRQPpofR!gEe$p3@A(x1$Ss|B>9m zw@qUmG0o-Y$oqjEZYrtZk$ZWMd?ugOpqmgeYdMdS88iFZZ}S9a+-jWh$2^|Tiqns| zImZiNe|%Z+?LWv^WA88aj9#Ngm=B+^-#|)1?B4~?Q(^+vs%co1@5Bqb)o8zU?;=~# z3L0D&pMH!9KH3h%hqa#p%wZqY*X7Lnu^s~(Cv?96Ka%Hp6CB`E$@Ap$JbaG!m&$uT z=rexpm2vRZ)}Aqv>t2`h?2Mz`!4u}3^Viqse*#l*!Y9ZyiD!U|nTO(oMh470X@UU) zDK@>aqTQOD;L-houZ%dXK6>M|C?ZVCY?O`G;1Za(!PX66YYU_R&-an!c( zPglXLX%YX>_0o+v&Ys0T!uMYhzog&i%A?F1z7Nm&^5=76PqocAWc?TH?Gir}-)~Hu zf88@cMzsAReiU1U9ntUYvLB7KL7bq^^#8E${^HZ?b%LuS`=DGW5+~jm#BA5IO6+3V zt%DaZ9_+Kg21AiG%X^5eQ4gSfz++tv7h{#?AN07Eu?7>sM=X+PWQI$36>|psc;6~< zo44MiIf%!spZ~Bof5rv@X)=3}tm@aje@#LDD9=gi7KKaUvCI4L7t$B^GH+mf_#xbn z-9#MYQO)o?10d9cZ&~bZ=7@EffCI{zh%J{hie`Nz?>6IF3qC(btx!>hH z`?25K{dMX+O)Vk25{15gR|KN(Zvmbu>w-@|Sf5!wc zN59|u;K`m&*t6IY#?tUli^KiV%~+Hv<7+i4*u^}|+I~N!7M4v(9BIIWMU3Gtz52uS zOIKlP*s`~a-Ps;_CVO1Q#JukF?D^A??<&BK`pAySU|sxRjvw*&h1L8H9b3+MhXbE; z{>bx+zlood+%cQ7e}DV^I^W<8e|4;gdLqwu#ee_RxufHKu^m5rS+NZcS*wd5!FO2r z$k>PAhySI0i`5{jGaRwsTn7CRBMbl@TvxaT~!Ua|+^r1O}1zs?%l z7;d5sP4xr8qh%~@H(TCkswjqbP{&Th>oBIDih+F-0Dy2creu@#gztmReJc`qB*8LUatjH-aq&Y7XoKhK#t?>(O(8MaXQMrDdCsOG-PZXVRAZdShf zHx&**p55mALhf6;3wpljW6XSMzs#ZA{NLy6j6a^onXy##M3#Ac;o1_ny!d9No&GUT z#3Kjo8^0@_sFm_xfjwew8@T9DAzD7)+jt;oCS$KKoss$1)8g zZ03lmU;}urrkPDlW0G>lHrhD0O`|;tV)Fj@U}yVd$Ad%l-;RG}y{8MRoNFFq&)FmU zo;c*fez9ne2 z250*SE4c8ZEs&LN{Z9CqwBtVDDmi$r1_D(_+-Z@OY<$dKRS1Yn<8HqgD#whxEyFhgDG5QkQXEbzBagD>d!Y{A;dU+^{Uf!#8Cf6(-d!1;0q`yP2b!9noh zC9hzU4NhC-^&V%8yPMXAVFPfh48}`?0U8v_S-V1haUOTDFMqNUYn*%$!9Ta=B3#;E; z+ZE$5d>pB2N`r&0dsN7Tkeg5Ue7|kG2e3CnB_4AVTQy<|dtqipB`iLCHw8;QIgkpy ze>XG7$LHJ*z*-C=zGd86>#n<#$_LNF6%@5 z1XOR0Blqf9hqN7#@1luruWjs_52IRD8V z1m>*Q^T<$mGc_}>!GNZy8d z`;L3^l=_SqdIB7VAYa>d{M8G0A8saEY{aO=zD+%o*#bQT8!>`{8gU(u+FcJ$fZ!)i zwi}1XSI0U+7YS(1nd0XQE)~pY8LWb(&Zop=_vmL>^;q-7s)xq&Fo-!vVncE$e+GT0 zyV|FUJL)57neyG(duvVQAmy!Q|M83V{wsM7nlBO)R)vnk^*Vlst*R*9#U6?Ci_8b( z%K9bwtj7?gr$2n(zhNvF+g^@$-DT`34o-BucE%$A?rZ5>^4Zp;j)y%r6}$?;JbvLT zZUDZEe}m8JukC7=60>E)u7iX0fAIIfM9De9U%}2i3rC616Z|WP!~33{?KM^mU)D56 zDeQM=V}?dHE*ggJ1fWbLRRmADL_%?OAqC5Iunm&QiG z7-PG#o!g@^)nA&o^az=)d#m2+Dhj~S4^ZR4(fO}D7fVi3>EEY1Hw+(5f9J5Y{g*t9 zI$rq~`Yh1zJNsN*C-%QQKZR3piH%Q*xuWHY3nt)z>uIbs^k5``xmNC1{N8inonUu+ z&7!yk4=l&$5PVEFZ>-?$z(9zq$FYj)3AhTydc+%V5+4ZXMC>l{D)~Jc4|5*O z!6Z{-Fhx&vAY7))n8*n3e{nR4y>Fba+4kw{PB0hzP*7cbRT-x{^6?S*hPs@Gj1%Kp zj+6M9+77bDWUcA(x%eOC?aK4;8-uabDgJ?-{uOhA;}t}Xa2Ag9movww^l8hSwtVMh zd}aMI#^U?S^?c98f8<)jj%PjW9he&U!#?S%y;Io)MWN#sB`)coMuy zBwe8c>T^(XIOGkFzTihced;rAq%kA058y+Ro4w2*c+&U$mHPxN$`?F5=G?nukNi1z z<(`u7z$RVtmipZN=H;AY?LU0_6P(P&+?))&V~4z>Q4}J|fBWJ(6UHa+!<86~4!+w6 z$fe~-OGbjpYR(xpYUlaxd^$ORtIM36b|xIAjT!=6@)=fg<&1JkF zOouqL&N`6Te^2`__>E|S7j%P*a=}P!3boJkD%WTnRKio6vx5$PBXGcL%m|)hM|Q#9 z4VvWRiNA#_bz7JCo75n@L$DdnTXo^Z>3ov*KmWl0F8j9RmHx#4WFG&`o@LJ+Jt{51 z`emMe>_I-;ytO6}dgf;w^?)D`k*27ZM^!X#cVKS#e->G|9k`$z@F_Lkv4#y{!>gA$ zR5^J{^)byp&@7CxN4uE`&loQ+?3qU0%BuJpa3ez~LW*%Z_;5FD-)T*^(~&-NbUfy8 z9-YS(NcqIBgg3)3xLTe-3;+LC7R<-Sn*K@&0@k78+`8-eKkvv!BYkBi@dWxFE$*hV0 zsIi|SH`>KqxFI#hfJKIkGFF~1oO{RmsPmDJf7fy@WQr=#s>>PPQXk|!OP{?X5*6UN zcIC5wLrJY|Zk?F`rHs228oVnVRj$9rSi#-hX1F#phqk=+B8c!|Q_20#7tJF-hlp}1aJV3b ze=Zac!MR#3JNAuZXjs)&;jDMQJ@68XbJYv0UtX~rHw$b6%7)RX0Uy>DZbIKZB(3Lq z!MrsV@P%Ck#;-XGMr{wTbLI?`F*$h-p5Y3k-|)hs>7)~c8$8vaw}Is1K8Q1=OaT5u zy%Xk2_zTW=tbV7TbLEL0yyRyt7*4MJKzqKb>CD3j-caR(^;IrNgF|F1WsvfIwA-nxQfXx#6h{uUN9I9bnX{$Xm3h@KMIxmc0Jqtw>&0kovKAKMgedrJ z)>gFhj}=NZRX+-{@yN}lF>As&5)-G53@*v24Hm&UHt*fW3m3I)q!dPieX{ojAJ|qZ zsX0;YTV+)h#SGwBd8Jv_8;q~`f15rN=bW2jJMpvOeB*DQ_y5_~#@|1$^)p^~IRh^~ z6ER$N;o|APU2;~pU2FL~9QkeMK0_L+A@TO)U(Ih0P2DymNB7@f_lai%Y<1WAEGqG0 zU>qMt>Mh`DV*x%h)qV|&x;Y~EobBmo?Y#JD9Qst`58?WvfjBZ=h{c7!fAhuqUsiEt zIi=x|vHnp>NjjRpBiILP_A&;)xJtw%Kh`^um-rCp*uAs^C70#xW3kNra(<`bVh--! z-Iz;^Vq?I$&^g`G(vd1s@`%lo{n|FbucK_zT0h;yr#j^X37U!rZ-Ax`9g0z#fy2G_ zsi2a&0BQ z_r35|Pvpmc@Da5CBlBK9b3|-b&U+s_)`ZV~^_`sHe6C#1ZH;BHe)hBvXWzliij0*# zr{<&N;SgK&Pj$mCkkV4*Qdj9`=V(vRR)Yfr_a(%q(mo6A+={wyf8-xcg?Nc&H156Q zjwV@8s&n*`W-Zt>$>p^SlqDXwbK!KI^IYPqHpg^*=NxDPoYO^15~w*mXd7wOeIHzS z1xEzTN&7{n8s8o|ca1x*D9jG^1b(LRH&r1%YAV7`3xH4Xw>Ww7h?g~PaNS?56B)CM zZ+gz_>Kcjf7)0~pe`VbW-g&l(G7qw5Ue4!+9{+D1UEQkJhHG_jd($8o;sa{#1pn!C z#5KJY!RNrw7qw^8?sZoFq?!5ybAaxg5p(WVJMb?)#z%kAJ0cWx%*g>zLu)iLc)2)< z*?4dW29qR(GVYNdmw2@?bJAhH@{+I4+FmeDeQJh^X0}V*f7FJP5YuR+ig|5)G>!U~ zX>K>|AbE2Ro0cKWYvIHN>O<^Onqrq|oPmG8)_H#6Q(pJT89!mIz;pfAc8S~y>+FL6 zUiRGC=Kk$m{>`zcE-Q8i$vb-#4yD&>9g8%qrfib53s%e+@gIHtf;jKlX%{kP=C?v@ zOuGN1a>lU2elg>+g1a~Etn0LbqSXrPv@t3SWFM1O*jO1$&i-sS!yp-~N%A;< z*eJL~4SW*Q+}qADk8FA+whxkD-V;XEzo^#|yA`r0AHZAs?%Er)T0*h&l-tKnjT&-J zcNO$@?9`i;ve&UwXqsO+6QxZ`npfLIPW7+;%kP?=fAd&VvvoPoNV??t_ z>cOf2tWTf5N$`-;?j#J_7M}$dAz%!^40fmR7%wSlkk_B^nNY>~<8@hYUzIBR)PTZG z{U2pif3i;pw1p-eAUv$gzB}79>s*I-^;s_diRNBkxcQ}@e5q$T^VoEJB7E!%24!@P zYC^SU<{Yip_9_45j8DPf2n*CZVoT&neQHSUSAu42LBl2B=ehH@0N(i`i%owZRr?3a zMzF29Rtu?LYJQ!zC~z+QLosR^bJsMIx>2~qe+5+Ms>1RQkf*R&#N}O<$keRvVf$5_ zql#}s>gYSP@?t|d}jUEJP_UN2=uNZl0^*tC4jOIkp9vom1+&r){_wXh`O$y+Ta0(j zmT@Pa&+k`GkmjM|(Wradu)$q~2Wi4Lkv*sm!N!a7W2kllQ~qUa+DO^sFQs(?;g~vz z(qE#)drJ)>IW?Q}UGf>=2bv#299IZvW0!X5rp|V!stCV=IymiDTHT>7OX;aDQYB0KByAYB^4P>K%g#_&a4J zdxWKvxmr_nGVu5wwPLycJZpxgf3X{yt=Co*q+xT|0E>~eMb16#hSX;mb9kqt_rxy5FGuwX}5b1pRjwIsX-bFn0 zrg`+7ry^nTo#*SVA@fE3^8deObkk4S`evm|@Q9>O2l0-B-TQ)f+|&y_G7`lo&F1{ACm`8#4j zX){1!8=l4g#27a_;LD6ahn}O;E)A0Gv|ejdM^(nj`(s2Mh!=V&qEvJIxM$W!dpRy# zkQVTRrPxsl$VJaV5jd=Be_lpj-dpjc#g?#SPAgX=H;i4l-bz2+4D0b8^8#Y%?yZ|T4HmyS+`)Q-7 zXCCG^EiAX-9hIAxN}th|`z7tE>y6?(zsDCX^E85S{^X|o1K%X~e|M@|I@cw68Oe|6 z`TeyA_4@d(a|53~Nu96G%?Kv&i_g(n$2%3pGbUZ@vTq0z~Wd7#!c-G{EVZ`nvO z2?mXwpMp!`Q+V2*nNdipHebVHHACu8eN1>$ql& ztG7MlNYJprkp}TI&-3RxIF@$q%;U~}<uo zK8)vUq>-a^&owB;NiFIQ-_`Hic6A`ZZ>P^J=b8Y*aT=6Bny-fRbpdOFx`SlU&6UO3+vPvJ9{< zh#d&NEA+T;`|_u)KKqnE#z~(6=QZ?Q=ke1&(fehc#i{shG|s_a*&#itp4*bPh=bTJV@Jk(MRi_sScAj-R zR=OSmj<@9>We(CyMiS`>-FG@x0%n zrcp{C%X|lU+T=(sH1Bg(z=+(z;nDu~q_akQe?a_eLvX`(U+e+=CwE`P7x>2Eb?x(i z^zW`dv_5+;91U%E&++lkJ@eClW*=N|RjIRr7b$0~0g|8DUHD0T@~eo%pT6*J+o3w2wcz0FJD;`JdZ{$bwsTKT&3j%LZpf8$KD5g7!73aF{hWIXqDtn^NyYUF0{dZc_~ z;KQBe9^lYb;lNZ^tpC?$lr8h4JzRlhwd?dn(PM#n>SZhr)grRujlG&oTfz;xJpX~` zR|d!v&ma2o{F`q-f@dr1!_ReGf3B&2_{!kay58vT_V@d}ny-p&xZ+vgV>?F!fAY!p zj9mo2h~A1X{8V`JU^+o^XAT^mb-3{l`_C2xb`K2Bml!k+T!;J(>#!WRgZDm23%`F1 z##WYh>}vI`DSY&GY-;~mHhUT#!^19U(jU*F<0PG0y3{Q8JWeYopD)_I;1-_B%(d)g zFW5!x9kxq~Owv3K&;FdX3deTff93cFNU}CoJ#*4#UJZCj?P>wxk8@x?${`1pbwz?x z4MmcsQMwL1(BMGG6BG||HbD5U@^$?$Kgi~)U^e>`G|>-&1H z8>;xEy!PoMfw%I3U%UjtK}!J6(Te(yljQiFeap&UiHW4%1;64D;v=8%4JG z*{-Z}jZgQpx~%h`eBm;Ne<=OvAHL@`e!Q$rt(kQ8HKoS!@|vzy{I;#%w&I$zz((k` zO>_1UevgmN2YBhCLwC>%o@EvD&xqVU^)h{YL8H6!mX12KX@+*B`B6?hyxXA@5Bq@`!e_~(KoF6_qTYo!0vXQlxMuxp;&>a; zy%?W}n=kLPbLbbM&62}CPM-lieDml{yGQ6R2a;<+drLNY}^-o{_f33P&Q^pbAo74$heW2g5 z*T1fn_0pfUvWCgIY5NV8P5JRQrpCsr!&|5 zy3f!Op%%|KfB8;tFlbZ;&hh0w?EnuDfMorrndDAV$m*J}>3fyAPaieg7{8#l6NoO$ z3?5k5SMPP9BGN(5hIBE1@X4K;DcQWA_C(v^f9QS*clpBmDgE(p+tB2F!c)B1>2hx{ zujIWnNAy>%8uO<{)9wz5wV`d?zLR6IeEZAV)SPh>e`UPzfU2#cX+cr*LT4UtLa$ei zC-y@^))uaJ=;51H-&u&M0ohKtAXLm+Rs@U~q&?4Yo#mn48s0bV$pIS9s|~?+`p}E3 z1VzpFA<_=b!H3*VEw1g0_Bf;EhXU)|L+T6 z=n9HHe~IY%oOy?Er^|hRt>wD<=;~U}ah~_T#J6Q#rsnK;HO`mwnW(X(vyBnk>jhB; z4i|D(l{NAcPjVvUU6$l(z`_`^9j0_4_?F~p`Zhfp=##gh7TIkd(x7wLHQ1HHqj)10 zf?G%JT*tL_)K`Odc9G)FLe)h3%V9msn4YD0lz^se8^!5e^|%8>=pL6_ge=+^KHJ@4mQ=% z^>LDOE-^QWjZ3>K)!^g%k?!;8YOHz-&tW$sAOLtO#Quyf5!U3RmybYIC z1rdK|zbfHQNlhs+rCNPrr?uUpZo+=3>_{ffJLE^G%Cyj>bkJ@e}`b!*f!C^1Ebi}iV!}W6ArL{oDHIpz*#SK z?4sv1WipG-R%$7&s3BT<^1Q5t5L8}#7iw^3!vBJ|6zp3gyG}@VsUP#qw{u;8tI&SR z9oX!TSXZBe8S{0m^*`JB|5Q66`SwyH0}c&zr~h34`p`qCr}%)kEVz$HPUBm1eEn;Abit{#>AiJ#UC}^0xVfNhKC$AyUG*hb=~}`bka6HVCg#n; z?A9=*ZyR$ZcF1?kbg*Pzfza^hwCJcueO1YuM2G0NKSvJoFLA{evDEDke+?eZ=ljNb z|EQV2;0eOn$45E)d6$^yiaAu%r0X1S;5nX8)a{O^9#Kq$KBE2q1p}B zT~I}06mcO%cTZ~islRt_f9t1qFb!6s0|yTfY!|5RT$5iUoNZFKHtlV2Q#yx60#pia z#{Q;u4&0Br7W@W7a_Xpzu>TOAspkdlu?Qu36 z9|Zr5y|dArm4DP-e`6=|MDRND`S})mGHtGkbuSRVlGmiJH7Pl2}U zGO(8B{(-n1nGwGNaJRyn0-qxkqV0{Y6A-_q#yKWBg<6y7f>B=cSGqPraQHv*wr@`P zWe-vJd8tACQAhW!=_K(#d9N}q{i(M~2O&FRnj&chB({r~oJugYjAYPk)xh?O-gQr! zjpxU#AjZG$N$R7J_A9)#`5JW9JXuroGZ%ipIoK`Sb+YIT{O*BrHo;8cvqB>(EO|Yy05i0*UMk8GUy`MzptwUB>m~P23f9vSVKR+L`0HieB;|ore}2eaQJsTFG|db)fOxtJ>rR@ zkYb6+`ptLE6@KW@z@4HvL-6SlH>fplsSgMkS4G{xSeovj$FUyvXn8-jsuMNX#5aCz zUK5p(R?G&>s@Opq0T;wl)HIG)RnB$Ii^iY!Xd()6%&mXo+dlT*-g<7UnDo|g@sLR% zy#v2+f4{eD;{!5x)l4?gj?)v8n|YwIF}+rLXneXKswHLHw!tTIxrA z;|&))LFVDgBmKdhT~X&jbK^zizvZK0FbmTT*a33DB?kl@$~tj5NzP;rM`M62L8}HpnWSC%+VP| zX(Nu%mLZ<#vIeL00_=663UYr&dPqK-mR}DJ9|Lf2ds7vDrLUTO>Yrb#@GD_ik^~{! zPW|)!sK2%+yk7D<&SP=T?|j!Cl{t?Ri^cAjdXJ(tD0xH8yBECRq6yCV_p?4;k2M(h ze_0ppf<30^e5Ma1SX|>+Jus4(gfaJbS$VTs`iPWsEgYP84JG*j$684Qx}9buGH5i4*qti z-fK=sTsE+XmCfR5o}f{eH9U8KR3^21fB2l_*6^n6P0dx0=t1o#?#7!ojfP>muP*h6 z_hy=h-9h~g)xl$LrJm7c?HNWgv=5M==pQ3exMv*_r*f>CQ%WwN8w)P8HwBkD^9e7u zwwIpu$x7czbha*Q^K7?Ep2C&Sr2Xyrx2E;^HS8=orP#Lb7E)V|PaL)+=Bx1q~;LKIr0k}ht=1~Cf{OXE#`pxr;nc6-;Bw>**5vR4ApwI ztG-YthaGV2)@Vc*VGmL`&pr=nf+K`$}F z-D}6F+a3_7lNCMU%5JwvUsyY6dD>5{Xw~hS^$FUpiw?d81@918{JH7SXyq?a907UjNFyoNKl( z9D4dRU9o9&LEOw#b=t^0kYBb7a+YvzciCXqb-Kh7UWQJC0jH%d{-0$9CnxQ^#xo=B*|je>3MIbQz1VVFlp$4gnYGG|pgZaL7?c$vTQCoIWHgv|HPL zN-dFixCf8LTO}WCM@x z{Cz7L&-kt4lNzou4>koG4^A*~;Lxz#uvu(h_>PUc&f&}!ZE&2Wt~)x`hvbhf%doxH zOZw!W_)taTsO;Xgf5{mH8MS)g5@7jY$(*Gcj{+k(=Z!l-E7ANM%_Hy^;+J0c={f&< z(MI{5zr2j?Kk`cGnqT!bBp>{JhUgf1+H=s&r`CY^j}vT@XabfT3i87NIgYl--XjRj z`Wku1c#yzP>XWlefA*-yz@!kZ_#SuYIH4!}b*p7l35T(Rr-}Nj~tN*P_dG))` z9Ha|}8O~UOKH(3|3mI3|Uv1c$wT}yHfO|~t>Zz0C?^gGB(XbJmvg*uR!+QYlvgdYM z`BF1Trb}Xzf5f<@3N5U9^BSF^+>cfa6{kgqLMIHjpQn2!*fV>p`LTCQcKrZd?3c%z zjk&vP9%{=`h;JM$;3wOvCHWfMN76Sh;@^T-|KQF1nA?ByB7evHJo9eQ_3V|8E=iiu z;dcpMi+`Wrj?fCn2Nis-X&FQKAaf7ie%E)NDlJuAf0L(byOYgpll|X2!mZ%mh4|x| z^Ly-x&MWEqu$|mzi9<+#jX(H<-?=#M@uF4xgNt6Cr)Lh3)I(_9>Tj;`B`@_mkA2w- zr5@>v)*>k;E7mX|cjCcCA9=c5rU@o-Z$63s0` zfQ0x*fB2uon4ujC&mZl#MaX(2{wR5bv&ndPK&U26x&+076~m7rpO635H1efyU^flF z=LLC;CWS$B!RZ;qqB0>fA-M4;5VCqj&(>(jlT?MfxUQ&PnJPS zC`@bay1VHJmlPk)L3N0GqK1mbkxFVCBu7@5+myB;rsq_q;LQB3#8-(&i{0(>X;+<^ z4chnyP5SbZLmq6^ZVQJ
wg14aM&s`*BlzCuz?bRo1Kc8tsXiR{VW_URYD;76`p ze_p%jOPA~8-|%PDv|54uyutaS{|Or1)Wl@q`+9w#?+?D~c1(CHOp6NVU*v2%f3tt^ zW4xB_bvx^Xt&xqbXip7HLFfRGLVpBvJiKKM4yZ){@8hgU{V(iazw${9QiQ-Teh%@L z1YV07+C1>G^!{O=-(bmPpMxY%PaQ+^e>v_nH}29arHn)MbDq!1=5K7_M{IlcL)xpl zp^m{2j9xU?&}@bAplay42m4O?M6=L+#(EhOdp@hfA>Mq!a}Uw-3L4h?jCq9D>Lea+(_tDS1Fw@DvlS>D%4%e`Rr+ z-3A-cTQ>%xmv+^hDgA+OUrOvn4UN7F@2}Jy0?#U+`5JR;yUo0x^Gk7j?h~LfwQs-S z4;}4tTrPZ=0q36NL26y)Q@JF0m{O~Odod!>x^EOwn2{?DC!F``#xd`;|15RNQj^b_ z#hI;r3aKMzD^P@J+GF_FXusH=e;Wl&_mn)h!h8A|5YK^un|PSiW(HQ4nsp`GSt-2= z^HDUQUJbn*`C(L+p7inz0bigh9UQtH#czRuw+~OLvv-M#mfu z7tkF#nGgF6} ziI})5-u8&$+(PO{@EP`;5oq#IdwnRiNyP8eb)~c=lotBzXV$U=86S~1=A8OnSIYVX zbN^nWWvp=7%a^=&iD!P@uNO{61wuZMI{KuUx11!LdE*Sd%pDr+)QcKBxY+v%x=!9` zv-lXoutcXU8HRO;&+zhgf0JlFpxh)=6UK7fU*X-A^=4za-(Bi9&i02bch;r3-fNj> zk@ceM@=Be=pL`R!&%W01`^{0k;d+{|r<{68!};@7_LOgCyqQ5%(R5{I={=)wL&zd+6qJcb`Si&~H7Vkfh+uHMh{ z_Omsjfk(0$ec&55xu3)4Z~+o+|f?%NS` z-Ie$io=fh1w4fuX3hQh8m=^=87h_}isd>W{x_)mx5lv-b!W+gu{m5s1>y{p39mo9O zf|vPG{0H(USMCuo(8Fht^oS8VO~+Yd+sI8OujD5)^#sA+TXdI zi$8+B`;))+ml*D^+{+&}^Nep}%lwsOVKBfh zJ!q~Py1tT{GI&YkSklJSPaBAxP`#fwK7(6@Ut8rIqCH^Bl!^}MF$J&ZybwK2twS)s zK_fw*7ozc8_6hmsIA67zu6;tvda!e!mXiNne{#2~W+TZf#SB>9%CI;PIs?MC}; zbeQ8)rrq(bKC>pD(px1MtamMk*iewaGJlc3MY+8s5ds&?L;1e_f(EiZtnL@?rfq>s ze{+^PZj{It(|gTZ`9pk`E`1jStH)n% zYzO1~y&3Le6*g|0FXA`X>7fu%T4yg?e^Fz`XDr~6+u#5b9I2LXG}_I&y=xg&2dP_f zb$9g9+xMP0=V|TP9G~)A2b{$!8q&`UO^-RqS15V_UNz^=%$z53dt){K9atiQ0>U3Sv*P7r<25BE2 zlJx@jtsO18tXAr*(QC!N!S6xEC7C*CzeiTXgTp8s1aSQMko+h0uF#}cqlO(+|EO;Z z#?6D;E=cAYRU6+Mx9^2~plBv9e{YHZ(1$rr&<}{=ukVnLs>9#iEy#%^y{h5#w)9k~ z)s}+0;|!w+P5lqp{{_$Dd^#y8ETtY%Fp%}z60R-#`j;MMiSJ$b&fl7rzjAKBY{a*X zyyzq7d=sCMx=X&}*;^gJ+3NJ>+AaKQvZXd*4%bvye?9%=<`V##lKgZPd)s5Yz4ga>AF{Em039$Yi- zH;?zT0ktR)8$+w6?hz(>>%b{(TFlR3zr5D%zry1PjIC-)-TtsPJp#2{{jY1CetAxd zPJUE!#i+2oXe9i|S^m%Be}jB3FviukUUNqpv%TiG9pPvmf5%m&2CqA7W{YuLz~Rxg zA-i**DAN4Izk~0&& zj)X^P{MMxBk1@z|$C4IzzVa*Bm-kq7819vws-e#N4cmQd$KKF+zYC|}+*4NT@m}M1 z;c%RBobw!D$G_M7e{H|;(AoAaZEt@01OB<5_D7DPXoB0bj|e-Z{=6A%mi-v~rxq@^ zBmhT1xWAUb#o=?2M=EOo(8Ju4C#47G?Dd8^9CYo$4uaRIal8*8ECuS2Klk!F8tbna z_`<|GlhM>~Mhgz;|*y>WeS zE&xBz-OiuLJv%n58$$y@TD&(unIFKksbiRho z{tAtRQs?M<@0Yp1KWgsD@rVvz8OQ(PPkd`*{m7wx=L&z+frB;NvyfC{YuVwpiKYri zk;nIzI&JVr;doA)VPhB6$n|C^dod4K7rQFyZTc<_Ofsh&$i&&2lWq5 z5PjyRp9|4{TG_Ys*dSyMEHq)Zey#Sv{U2K|uhI{?Pk%2|&T%p_#cRK*HuG`n4cv6B z%;Z2{CG=HTM{Vj&@R56AE>75e6s+xraI?rcqCqYgRt>RHktdwfOF(Qj)EcM@qE9h9 z`|Uj4wVs>L`WycGN3QNiUggj8NM3v2|3B4=F%CF;^yM)tB)0KGiFK&!bVmqdAErfyU}e9XS4!(&$(I zU*N-C|Dok5jFy!DJ97n(uMM1Q+^JSez0Fx}-9kO(_9z%kjd<_8(+1Vp*PC&%yfN*< z+ZCHva~`)R#rxho*_}6(?6*mz7=E27SeVd6NK80Sx2KzURp0X=( zJb%vyOD9eM&y$U_pO1s^*}{k>)5BWyUM8z-*K)V{^ohrDEA@GLI9i-)3zx0gOylRT z!;{&3DALZ++ImT`a*u7#I~42Xs{h(@>iOMq=(VP5b9bCPx852r&T#>Ept>6$*UfgA zzs{rWLwnhHn;qXb?OXf(F*=MpLHw~Y>woReOXIUeJbH|2PAYho_=yi|QPR5R$b;e%SUf|98(`F{F# z8-+PYg$K6dcd$tl&bsWV4P;TUHAU>m^K8{?fi7fwsm$C2R9=bnK`U)i1b;RMY9W97 zwxj;2g>&y2Z;H-g@J&6^N%A`Lk*Lo-g(R*3jRoS672r~cZq&}JJpNv53Fs`dsYu1x>X zN08iGZ{~4QD`(5I`hV*X8VOQYR$_Y}G+o)#5j35wZrKsV+Kc?GGgDT_G^nEV$@v3m zJyJ<=l#WQ)iY&B(cYC9%y+Y+(L`&E#rvBMVMiB*NMtv{wRNh6URArN}$o8sb1&Y4zh3Je zja)J&bRMu!5cTcb%j<%qTM6#SZTe&Ywq8n2I6ZrwaW(s`p2xjPqMN;Wcw(B#FV_L0jbW z%&nSH>3^#~!+)ah&aE*S4f7f8WHx(su#!ji!)C=ieCqe|xjvlG&fd7!?{6ZtLNTS{ zIv*A{eFlsb%`Ed|$lOIAlTpN2b3c)1cV+$l}c=@oITCVr&AZo3Os#-6K}p$**(A`f7GM zsudhRFILtbWdW#*zcXrLsdl zm+Ga&*?(*)rdyK@J3r@czfW@E{<#@XhX z%^}}6YJ0bKI$;fg(#V`i=RDTa3Xk-YXJam8UY%?+VoX~|qKDUesQ&xU?5JEm2RH23 z7|HteZXG-1eTjKnSS#88xMlBt7oW@uhulZFr+-^v{^muzemovT>U-~oS;+M?{gLla z`QCm|Wv=AD`I&K@P)%e-R3XQk6xI=+DeK3u_q-k?+bQq!QwkhaQs1qU8B|BQtgMJ& zxrUC3KG%Bts1_#sAk0H2&-bdf@$ORo?(=siiE7My`Pz*8qdLEPem&Ul11NyCVX_as z2!FeRLLU)1>ze0gop~E<3whI_Nl9YSpDuUGe;k z>&X88J(Ts!oQfTk&sYu1zq9_WYPTuRAM!cmk^ZrE_%4(o*mnws9_vu%hK%h*uftn8 zwm~GT%x%nfS`SCj<~*<8Az#PqEoaDk?0*${f%b@blRZ^sUR$g?BmFo)a)GW#d;|rW zwwQWc1Ll>_M_<%5Ka16IKlAjQBr;yZ_VKoGWPZc=o$o=AhrDRC=k=V|ef_$-Iv(H< z%j+{3Ls{68j?_@i);pnTKLyxNit>-jVE&+tOpYHz%pA7d#<680Y97ZS2D> zen~CPU1jbm+=Ktl-C(wkbZf6$Lr1Vbs1lj zXJvdCUo+CLe^PXZXY8c>{KYxRc+f-=TOxmBP0QKK7{b=X=5S4cUHfW3@*j4`*Y;~J z`|{tcGqp|zjJ=%Yrop{%#^&9%-jmpA*~@hNPe&t}E10#ncHKSd`8obrgMSAroj9_t zoOgsK-qCmuht9|{I6GT%?TKr#8|;<;x$CPuckTS#HTj;)^_&fVe-8FH&;75TvwU3< zES7GL9o%@Q)d6AJVS8nN>@ChI65Anb>EAuS*bc^v>*+O_%^TcbUFJx}`QM%s+)r@J zT#@bSVKaNOhHy<=k^1!PXMcPy*}L8*O5WPxvD*}TJ#z7-^jXYnYp@TnM48v#3VUlz z*{AZJK+eQX`5qV8!QgKnR{M6;Mpqfn`q&U$-bcJO{8Z0e`|!V6Qmeq|F-qn%=`Inx~EQf=82r&e}7wJyk@^&{1}6C zae^O5+qeueJ?D04B_aN0+)eeFHe)aTbL=zrwD=rbzANh)d-lMq6~40vX57p9K8W_H zzQSJq#oi`jPvkxxl`FO#p8`Jvd#(M7zufmD{@#|1qWC#(!1ww5A5x`h&*(HX`E@@I zwg}+@&dt%@m}6V?d4K2-aekiXbKSRO-Bf>EgC8We8zeehaaPE^vAwl~MJ8O%2rTV%@_W1n$E@wqs{E;7@@&9ZOV|V`bUXlHQ5dMzNV8lK6 z9Xb2P32q(jL$Zf6{8D~5wEy_`eUH6kO!fJ~dS%a||ti9kAwlB{= z>2%2BaDO(*b)yy6iT|pv+sk!KjJ|Hpb$o!MuNy~l9TTsw<5?uXUS7BCNUUOu4`-zM z=UD%o@v~?k$j|sbLhaN)ugmLG`8hAI*FWR`$o2Z?ZF#-^`9cdADF_-!}l;-6W7 z!?s>K2budm>nRXFwds|6RNnJwnEHD0hX>c|tK##0Uw_`yfqlJxf&WHdllLFC<4%7E z=S?8@1fhh_f8u)LwA-sXB!mDlsxE3WmxV*>knZ7TOYxL%8GUgU1a~v~$-}uyeTuiY~ zst=B$$HkkM*Pq8^rSJQCE&G+vzFtf09N6F2vM;1|M&eHP{A@2eZPlS*Gje840`AFl zIrokbDTqHVzGEj4tjhAhJc!BM9=?QeIe+s|Z$Uz#eWEc5h~^u9=)(Mn(71<>W3Dd0 zSA`FF{(Zv?=OqxBst!J?GOJi~UA`$O;QsAX^qg43dkW1~Vq&VSNQKySyPziGRAp z7b@)d;G49+xBmvF{^cL~)S*&YRrFT-lKLIwcfa6wNPiGYfR!i-J{Iuf=Yz^kAKq;y zH2_eRcpr_1s_N}gcrv%=*QJI^aO_*@2m3+YM5VHpdOMv@DF4aT98{hSj6RQCs&U)P z@3`iOQGdVA)FcXC!sU19)H}fC>woVI`p%&L>E(AQm{5qnUw8j_y{ooR0Kb3l@!ZzE z$jC#no-5;CH9bkGy(mP*7c#RzvD87o%8W`?i@dWZ`2vf|IKiFyog@H0eXOXCc~$OQ z%~1DIj?eq#_n6;}wfvtWCY3%H`ghp$K=J>L@3m#Te|#^ZWyhD2GF!p#K@;NP%3GzAop38j$)i>lmk&8;? zx}!W7c^gKK{#tTBf^*Ar+U&ppGCj{nkZ*Uif`4*8!w#&iBPU%6-v~ zmnuMhXFe#610fgqT#vW@UFms)os7!WxWQh$G_ayRF9)AKDW2n1 zK9}pi*R6bCA)lMb^?%$4l_B!|{GItA52$2)F>d;PKF{~1?`I*uvoG|0mg|Y<*7q}z z^#~7``_%hZ&+DJx%TfEs=a%bZsGlDR3T+qUdf88U9qRSVc(EVSe{+9aFVC;XpY^Qg zk9{onA@?bttJh6=oxU&fN%7@?KG8?K>@PB=Lk6uS8V(`3tfBk%) z#}z`t{k4u`c<1S>NS;;Q7DT2jABhyTUkL^UD=z zap(N<-~W+3piZ&rEEz+r`xZ!Dq14r4@|dE6N5ZJn|C+q%T-Gn^Q1Z$${#6<0uk(Ij zU+D9zCiS?m1%K+~Y2`T{@%sR&vFGHAc~0a=AsGa;I$^)?rNBXTTs`@6#+6pTauYN$ z7zq!7o0%wQr--oU%7IHLyh5!JLoE{h)zJk-M~EKY^hgu^PP8`rVl>c9puWIIeB2&_ zF&prBIKXr2@oW%p!mq+!z#TT|Ey>M*C#KXna#O9q-hZlSE;sk8q8wL^cc5^YhfAuZ@zN?1u)E!onVB#mE+KQ&)=E%*>*v*~zcJm_F4#&o6Iy!vK z(rz$yb1QCkU(cU;3I@58j1I-rwe4gppEG>nb3XWgKB2)t5-c`U`Mu8Er%$8#x>>mN z3mk`ZK7S8)`v30ci@!P3TL(0|^495>JZwfdND zR`c>N#q%o7KK`p-t^SYw`~1H?-#7pJV)`-tFL|+2?3T;h|1q2HpC9V~??v^=Wsj7c z4u8nOn!d$9&sxQ`!uJ06IbGPgP|^x-nrA%Gh&<TjzMYkx(oQ<8gAgib z)%2R_UDZzq-t1{hZ^`2unuhUf+-fx1+~fEwevT($zxrZ1mB!p|=lzk&8+Y~gZcw-F zFxx!up%-R@!Rq1lzV|t9f4tp3R37evb$?~u^Xt|od7DJHhxb~g|K=`lA1DpDKe=Oj z)OmQCRMyLzhL;&Bic#4eRQ;P{c>DbN)&II1RzIE7P4~eb&AQv_^euj|x0|;rqz}9O z5I?w`CvTUmdj5oGxvlwh>R%?qE)`0Jv)jIV53l*A9a0X2!p?dRO(z;A>)v#*Uw_%1 zMm2d!mW{=5`CNH>o78USt30j`zV5rh@zLKF6YKq9>$Jzd;A-#rYm@UlZqItjYwzLn zb})T?tF(uw9gQ6~k26|S@4Nf=;B)gns@n9?dOj>`qv7JUm9^9AYgRc_`|I}RbD+Af zG$lS(->pFr???OIKA9)C&FA#FqJL)d_nuFswp+jd+BK_{U1eujFE6Lj2jlbhxH#sU z&+hGQYmlgMJ8_=IqsQjzegAs=IDXlK)^PdKe7gIn2b3l~hW72q1^vsH{rg(weY(Br z-+x7KtMR(hYRy{t!{oudyKi&`mjB^3`h$Aqx&OGlJ@h=((YKqM-bZC+&3{+XTGjop z+qc)*=WSGU(_)ggp4Xu}dD+}OOi#!Q#f@-Pj4Stj$S}80jmLKLkUk95)BN>qJ?QrQ z$LeNx@@{TVtzP5i`6g(7yqum#qp!R4IeeO^1@&WctpDG9z1gm-*w(H2E_s(CB}@&M z)5g@5so{>&)HN7*`s^$8OMmA`M>^7ut%%TyS=x-zTW{@aul=yu2!}jc7UQ+U1-)um zz7vO2!w__NvFZ!AgO6!d)nAf5YC;SFx%0jIy$3Tcl=BWwon7?!~!kUwO2K;KN* z?E!tY>+2;+mX*cVYxDaPy~vXK z{AgmLh2dXobt~-<_cK3_3SQZNlyvw%?Sn{OUl+?D=NkMr83o}p&d*G) zwA|2I2>yNCx)I+zdVkY6N=QKIP1A$qpkNji!-2R};^xF~Mrr>z7-v|T0n@h@H_<+^ zRpJD00xIcxSB?&>7_J)xzUHLBw2UZ?*!5?&zviPVXXGo97$I3yCB?_`Q@N5<#e>pt z-G@$3IW937kP}jCL57|YnUJCcgmKX7&v&?~`<llXEr>V5=6%pe)y{$3%-mdj{vYpc6N0Tsv?iC9yrEU!-e(2o*vYd zDhq7WJ1XHN*B)LUR~jloEj;N_Cdt09-c@20D##D52i=V#+x|!@#Fy0r?!%8uRWowQ zzPw%kCF`f825GqnTOeZk-du4KG_%cQwTLZ38$X@ED1YCPCaCQPC}zk>FAOOUZxr4_ zSKeZx=epgObKiJ5+X?k1pmx}!#!;O9>^gU|w@iI#@96CbJZkoquV12l)*XwO(T!}@#Cszh z1mqsVT>$xTB}R&87A1%#>$+E9L`N(6Q^)Q5Ie*2kvDDV@!zs=utYVw@ILHqBaQP`9 zenkBe{bd^X7*>ZqaGIB5?4-A|zq;!~$hcd_hXIW}Nzp>hTGjulOFEG!An~NGzse5w z?kWKdSCzW)rr_@XtVqL)&axY30mv2vecbCy5h7fHjnyxX)?KYfSa>}>V>i$L-6|2{?ejk`of2<)@c z9JJSH=W-KD8<-!$3pf6R!&HXxwKl`~8cp^MJg2+5GJ|B=DWA+T2?&IJp{DZJ(mm?%5RzHrI5Hr-v zl=VOI|6f^@arn5#@pGZAUPi9=3+?aUSmqBsPcTS8x2BW%zx(m|Qs_3T)!z;IRHJvM zj~9*sz_Gf*w$vY=zaQYccgBp75`Q24)BO2#sy`*2=nxdlWQ$^czI63dP*IDsGv)t` zk8ZaYODsOykm86%9yTs5B*6fO5o~GBa6e1D_fYPKT%CryzB#Y3KYccQmNFw_(VC74 z`^%ruJ2n{f^&O}=hT}YmseJDgCFC^pyx(^TT2hs2#bLrqCf3j#d`FkU(tn9_fNKHk zEuNQxTxj!Vx@PQsT>fPVE%o@rcXmw#UJE7w;R!AIPM86mml&D6D9nN@Ky9z zjKxyrGkh9f5MYm@W7f*0y}M`5IQ*F-$8k}{b9_v=j45@WvmDB*wp?D5A20l0rGz>b%et&xWnjo+8lpmZ@)qfOBS~?tCQ}iYyyK`CyWxTZLcc;MO zAypOYrIeR>T*f@uW*WAoe4onlf>ov5cF?dMQQrxZ75b_2K=%E7i?`VTahN07!A zw2@uBf;5S6{K5B**pKmDKa;mm168!4-U>T^Ax-f|2e0;0+nr04Cx6BgdI`2qwSTqR zR~$LnO@nT$BDzbIKO!~%$`PE&)nBvi6&X0fdV0IReCgK`!N2GJq+@}GV^_S+=}3rQ z_6*o3`&z#U_@_#aDROX>AU~ZpI+_hc!w$ z3%TI0S>A&gqMCh-X{&Gm#!r9I8hEUa*LBx_Nm=bin!;Xyx$8+?KnQseJAWS4Ihw&9 zt4KSIL!q?B?I8v2x(;bQzU0O9;e3i=UC)wJLq2+A!tj(Tvk(3ACOx{A9h{(0Ber!D)FU6gaC+WHXGT2x%K@HX|7 zPFt+C7bLo3ba=z7;`wlYRJkv&=J6>WCD^k6WLX=c9^<*(((o4~%e`&rc<<1=n}^wz z{Rk%X%*|xWK7Z9xfCNmmrfw#YMbM=(12qUY#gHhxB5(5((~pq;O%8Dz3@;m*u8SO( zJWWY?3tBFqnLS)z)B2N!waT*X8|{^ivBQdt5v63f^X(ifOPwd(RA;sjnQHt|P#59luC zvX$x{5_n#OrG*Q9nU@qmgX6r&R<|6W4dF0;lvq)^EZGO3HZH{86$<*cpb@ims|TAX zp@2gWPJjGvR{u7)H`+MO9AqCkejkY`HmOu*=*1aOH3W%vwbCIOA=32}bWpmX0=HZ& zCWG%Ia-a#H&vJ0kAX9h+A9Rr6@FJErkrPbfhX8ffk0S;9!4~?X<{Wt~wYU@+?DHPY z`p3e!`~X-6=ELm|Wd!kITCE^q>Nn63IFa04(|h|#APeIB$5-e%-QXR_5~Ki&^UkP(pMlZK>{nb;+%?yhwq4pui5;e zip5O>yZ>P;FUW)v2Rtn6gD^Nqe}N5Wx~_7IsdWb0#3nq@iujgoM}sAY=70}jOgbYR z;(wYlSB3)sPl=o#a+vKeVksDqAIvy`WE%X`G8(B-L-9=6`s8J+WXRqQPu@iH@`L$)VzLSq@uGYAFuS z;>Z%f@c_yA%*jxd9*;w4i-UUYoXL#bbsl|2b%j~kXf_Lt4%lcea|$b1Aka; z%O7c@bZ133gn4Cg$UZgHKcxHib@23^QN{3y2c7uo(T6Tmj_oz4ckuW2*>>A1h!YvT zaa)h>&%dhQ5bU&EBX@^0Z%B7jkOi|6&89gG_eM1BFk0H9(2)^DRB+{*#mnxZh`$9=@K*JcXn&d<$~6QcyqC<@Q}*cZpz&0dUKlzrxzi_Nw_dlIO64aztq7xrP?kTh6FVC5_)^NzH;Gq5FN7g)t zRu-}q%+Cyp9(ovQ709?|_s~eE!ssJgNPd|XYs`sJIw5BDyxuB&it{c@B7cXlw1zAP zs_cGLq9T-5(n~$LTL2B^M}I06lUqT-`wYRm3hpl3rs^^N-oLnO(eGH$XRYWZ#t9O|yMH2-SQzO)Wx;wkzZawRNInr%{~$M}`WngEeh8lVOQ#HY zhOzYcgb#if5e20tz=fGK0gq0_9IWwGU2mEd8w3)}PCHRxpMCw&)t-!a52)Cq3go`^ zepAC*_TwYbwkoXt$TFi@EG!4_KR~0uu7S5?={><1BB^fyX+Jw~=zpJ&o8GF!%J4$# z-lq*}aVfoy9JAHv{tD&p` z!g5v8!{5`NvtSO}90CNyl`>_6yEZUTs*X~awTG+FiB}}}Q4-WdaX#`FV1QA%|AkuY za~k29llIduldjB#wSN!_(OH3N4rd^Ba#tKK(kVE#I)-4p_b;(x-uL^{>8e78_GADm zeSI48FDvEZX7;>irTC^xKZM5smt+x8LdQWb`qmo^Vh9?tL4Ctv;t>lGMaIJ1L(*dL zwYGYFuf`;a=pWn+8lOgsq~R&C;p*k6SIDqK9na^_;$~JR34iQ{sihESl*!3+A~Z96 z;QAY`U&$ZTL_V$lp*%P+CG5h8wr?04-7ruzC9SMHd{32MX#;*kQolHZG&8z0KT#3B zgsGgMR%Qiv@w2koC#6VQM`@+@s&&S3_KZ%~Pw~aj&9Dh?bxK1!ezT@`+9rHhqr{kL z2I(pK>v7zc$baFMQcN67e;DXm-0L7w4_6Z=@7Y-5^^k-gXor$bltKNjWo9}aEC5+ec^Q;`T+JBmJ!=ID_tuaP8yOB$TV7d=(L&)6Qi+whyp$z1`yTNcWb7&^;6I z#&-B_CR_ePN{O6<}BZQF_JtP+{zXwtu|XJ|r)asIcMAY&9jGU`dKf zCi{|>*;FF?d@WBcO_PIw)klm>pZXB#gr-Zic&tv=p{f>wYOGw%bNQZ!msdFS`mF>e zNs0K!&$xBkAK=_DhWv@6Ycw5JRBB-jHjR6uukZ)gi|z>shP|ukEzGE{&Q1cI!@(}TskBU0(g#7Hbu|Od2WhoPV zLjsHv(l^3;jTi)*x7Nz4I)wQ&>8A4kKipsVz6ijMximeF-J~igqQF3VO@uA)Rt8SARZ#+M0ijo~~8KGeY6%h4uP@3CLr0 zaqoM3p*9Az;rP!t6#tDO+~KE8)d=$7*4{_>QXMk~7j5O6aOW-NXukqz%?$n^0z1PA zM3m5s7zb~0-8L`3lpIQuUNDYa0izxJ3i9Mp3rNSYxzJo7)!KOhfvms`^C17O%6S!0 z`hN{1VM=h_Y?kco2cuJwA$vWRZ#QmIe$um=-Lr<&U&{_yD+%i!8Ik3IW>Ef2eJ+&= z%X-3wf0gTUGVv0NV0YTAJ4K1Ynxinz5I8dwv*e-KCm7KJ{(RMAgD(Nghwkp9#*a_$ zGo|*%oEXU6V`xZ!R?ZDJ!#{z~#}=NwCATP&L8hhwUYk>Qyuuiz*K5sOwjiBMvtO^yhvCB1o#(OV z|IU^N)b01}3r>{Setc2Cn6}T!=*hFb{vr;I>jwib@0fyt?p1KfOl4Cld(=X1_J4!b za_LH}?*qV0+(3O4Vue%JgT90ae3dQqI{vA53pYlrPAAL(S563{Z$7+cTaPFs9cRRsx z%2;wUZ@K-b|MJ)3;ysrjkLhLMaDQy>5Xx4FE+oI@&Cu+l0iEya1CATt{9+Ih5M{d> zh$`{NMvU_G9;9K9iz_?v9xGMMG((m|?wzu0gX;h_FwC)vmhN+q32BJMTt8Zz`6V4@ zw$SW5WiKd04-3;?9D09pGfkfMqWU>792ik1<*uJ1`7ByQc_<-72Q4mUcYpk#KNXlp z8)0=o0cv&b2{y6Znq-p6^B5n(4_s+@Dbqh*GHl2^3iR>Vw9f&mQw`wUV76#!6`?8t zLzCS?-ji~HH5Da{P4&rhsPIhzFih>(l4*$;a-o0W!2rzzo;WLpAooh=n^Hem1PQd? z7=^LnT}Ud^*7cpuXRjiYH-D)BohV)HB1Axt;O(KM-2})2LTtYAJiDP66sW;upZR3A zPz)}ppA`BD#20jyFdzsZ#fWUQN6=<1WDz8AlTS2Z;<1A~BFZM$$q-;9USI(&A1eCMe#opge)%)W?~YV041@4pU+*i#drjRpoW!&yiUxMI(Ez%@_yc z<_DGEE=JA&UI}=#&#C$$Mc5QX3{ud5F)m+HC2hn;H z4UJxpeA^K0sm5eh${y$2h0c-)gHm46p78iVgZJCaiBy``@~-#rTfvWH7$Sg+NOKj} z=k$Tc^r-N1cCVi_*9pDVj)L6X6M)Fc{=mHIXQiMRJ%rk(Q-9DOM*y!S{!=!3eayOg zcW?|xV{6O3jitvpIrSRrD?Igla|=w+4u6kwjE zyVd1vg}NZm)1pskmDcRm!TOf85T~a66$ai9D{dQb4gM`BkyAST$Ix&YaB42^7y2>= zrv&46aIR`yynpDqq1~ySXZxtIGv4gOlozv>Z7Mse@6)WP~pZ-!~d^}>8M z@?k1Lj2^%`WXZ+ad<5a@zZrTt&6zg8@0^g8;z6_OMlzjzYH-XZsGw}z*rCpGQM|!^ zh=&wC$b8|r^2XTJa$ZRvjUNWW8LwpBUwRpLk~svkQh&|y{ZW6?>c358r{W7}tC(q4}hWo?;H|Dbf$-TUmj%&CN94ZYi%Ek9^+N zF@C%FZGVvaS84tVKXO0xCWE5V#pr~i%WHD37#iD^2vnpAV5lAWz=C%j+6$|Ih}%6+ zmfMBCvMso(o_vtl7Xmq>hop^Gjv==iGwE*FL0wtKdFyRlxt@i*k2C#c$sD+q5yVKYxT4-B93x3scSC>ixND?gSbz2I>Ov zT^<4!7np)iTn13e-_fp8IXFZthOyRD&@8;{jnQ&k40=~zFUxb#<8(-GIi~rcH7;-V zHWxpEtm;cuKyLkI&64ikyInJ&imZ|TnW#zFIA2%Km?R)(;0Sa9z>YF%ffv1hf`1_R z@qbg)QFhWoikBKi)Ua54IiV5068?)jTK=(v))B&gXqRx_HF=ce zAu{<}Cm<-soTE1YDkPzu;CJOflTh8rb&C zA6hsT>fEwW9_{*(6i{;oauKQLs(aP0Dt~I0tsOj+*_zLcw6XWdBrX>l)P$SP?FY>2 zKMe?34o0|~VX$5#-Z=H^&R;-!5{gU{AE&x8Ye0BMc)Tds$)xvqJ8cht_?J$$xlG@? zbLt0n;dA^=&;^HG8s)&3JDJI2|H+RAHU&UAr@*~$pvEooFt3$kUhdPQrS9uShkr?n zo@qQlhh_iao`1V=fwM>~eSA;RJasaU&thoXfHGJxhEM~q?>8uo0DrcSCv9Ft1Z5_o zKK;!$=b+6O3BmogHDKD%oY{(MI@bRNnuq0u{mSIaOja|1yV-{PmhdK`;=xUd?lfuv z1ZS1g+04Da$YqWL%waCz4T6q0-G2@JQ`DR0N(3aF0FH{&71AgYd^WZCV{FP>Ad{w5 zZ$J4^K?4AlCDeQ3N`s0A=LvZx$BKA;e51BN^)?W4?%xXD7g3G!n&R_kB`3p`j&L}z zJqYmQolpe=E8jORqd#W!cG;@ z>y#jsDGzi9jpCl8*84+bUmP?*j%wyx3q-8r-<^9+?)`DK;ywKF3{Ifr`UQHbN{sn- zE15K{=_ZjNckFDmSRnu$>lJQIjV_&|Pr-BUGCP#>y4ljIIO$oE0Wt`OnM{G-J%Wj!I0 zoMje{o(_kf+usG;2Y=yF>)MFIy=^*g(8I_<7}%#^;e?jcQW}thlojBeCC0m_!D;s0 z-gqP)p-Fw?aG`{uPg0~Oma0CZRWf!ugx;q#x(|=26bKe~u#jUAXo%V$b ztb!oV$X2_QQpv?mh-PpxE_#6r%7^~5FblVcYeL2fcALpwSm*~f7!dq;cLv81q#d{1 zJAk7^x2+uo&rX^h*S6o%qva$jf5|AS9%%}4>HX7>o6Q`4=G_QpE@$E1QDEQ88dlD? zBKltTkXA8>KYziiJGvwezxL5*A~I^bj5yg_0TeNmrfQ(!MA{G^176b@c$_i(wtr z7Bw0PoPNqEbgoR8=s3W1q3oL)9duHqU-z&E%KSaO0W@Zw1}$`n5{DsdJJ zG^X?V9D=W7BZZ3=<;pHHv-ev^PBbk0OK?B)Kw$fqoZnW;en$r#DU z3{TVGR4s@nmH3zb;E|{Gien)}cPX_8**(B6A5PK5sq(C{H=*12_D~wzBU%zB?P1Oy z5r2;7sAt@_z7C@jZmRmHCg>Vh%yQ-F(KLAr*~^u(lI(rb*5igTiz%alyDl36@7s5# zvjl~Sw_M-HpK4D@!R47>aT7N7Va=g7_srRYD2fM!rpkU#I^0Ar;`ArZH zwD{Q=_4SSB-UEA`P`>6|6T)cT(!Q2L^KU!pA9hb(<{WTKQs&x^D>f2i$04;l<;91qNUiXs{%AmsP~W^kcT;c7G8` zU%a7`m~1WzrOM@Pfl|^GJV%cuc>%TXwB@4Q3=OOAmmh>+aU^E-<+u>L1q*>3VCT2L z)LuNq+!#xzZ;<`(yo@|qg9qaG)&j)(fJ3_b{d!btkR9KbmBY=vC+t*vhUMqri^B!7 zecZl{x?s*wzlJuHx2}glF(tk6^?!k(`_W0TvY9_Cj>@*xEC?Jba3AyIKINn?dPmS9 zI!{_XV5T<60e`@w=gwkU%F+jVjAaWkYQZRe$Y?HPc7gK!6`WT3Dl_onJ{BdhUlJuM z3ZNYhc_@o=se2aA`s1~$-EoAks)!vDJVNEN2mpADw{s`RoB*xFU~JDNL4OCc zuruVtF_Y(5ea25fpCeopK1bX~$mS+%62KL1t}ZyNqSrow8y8<%U0jGXj!04KFl~>v z@BBs;<~PTG{g-X;cYk7&U#!;UFTdin zm7LeV*Z+JJ84U*;5ZXkEfL2abxPmWeR(XzcoCi_Ne8^JE{vO}+lgdcwt}FgzExZPo z6$$m7j_?;$(at^BJ>V%TrFQB5idYT?hI#i58nWP_JQ-4jeaud&AF1An_2R1BV z013n*{xkE~L0JSpyc#z1XwBPVu6sd>AjiST1UUhYcm^un?SH-4?q)KT--iwsx-A6I zJUGvhB29Lt(H%fYQWT^lhM4#m7@fqAnGThBOkMUbno}#^=!kkw?_-B`$%6L)??#@j zSV5PCyc~+IORP^A&^K731r(CeH3fa!uZ|^~9*0(q_0}k$zMo@u2vvgb);EO6kg8YbAxre5!`Y;&0%%~ znP-MvV*viqsJp@MNO#y)G(TBWQya)9r{SBdQ`lXD>@|1$M|& zM(k__6V?-%z6qO#9`@Zzv{{P%ja01SwZ=5-%?XvX_t}m3nnR#O)6N-THhS#^6i@v0 z_2Qw94}X`!B#&M!F*iKihD2kYA#3@s$qa#?cwLA^^cy`OA>u3WX!PrrKCSEfeG z^f1b}O-k|MLb5lC)QFHX+i;fWK7Je62;_cU7l?E(G&C>q@UaUpv0I0;$(O(-5osB8-uh!EN8K{|?tf!IA5G=m0loD*&ecws|CXgt9Ux>Z z=B_Y2QXJKBPk(IMJ1p}tRi<{EKYeyB^-f;m`W3t4uRXXAP78}c`@Qb~js4uuri!l3zX>Wq~_ zx=LZk$)2T2Va#g=)86jl)P^U8@0A;GNRGJV9{wj=-S}ibGvOEMTLbOI6$B<6o8pZ< zGm~kY(H*;XhpM%umzfXqP2L~k^E+Lye}AerzPbnUsle>F5Tv3729KXwf<~L=EGcNc ze~l|jDyZi{IhAsVQ}TJUNGVWn_Yi_t62VB)o%2#+)^^duQ#W)8mXGpXFp3&z-Sb{b zwr3ua3A!Pz*-C5$?nl9QMij>jp3;uB++>3Cqv`%_lfv=^`42Awf(HJ=^-h!t{eQ3A zm1(+^0`^}sm^gOio@J>1XA*@{U{?83_csdnKNCXlMt=ar8n1<`^lzzrQj8dxT6JZnZ2O&1IPvYa6as{>s+;j)g=U2ua9>e@$b7uZGRx@Ftef%Ml5eg3@5#30^8JH{; zTYdTXA3G~n3`)f%Q&HaSeg`-v@LzvzE8oVC2-!+^*}HLYLi>JCbYS|l6&Me+$KJ>OhYw=Uz=plc0_(_MsQ3eEr50KX5mF8X^qj z08WeFO>w8I4JZ2`&-iNo84zW8X6f~1A(0kb;40E$%?9VlBn}36!CLMmzI1;ZM?mBP zd5^g(q5A?SNcJBPlWcY66;TNK>=pQRyq~=Z3T~XP5@C{v7w5ARp*K5iV5OuH?@&rm zO0@a`jeT!4kh4*fw9bK(@Eu_S+hDG&~9&^=m+`ePYU@U4YPm|7*Qkp3w4RV^p=;i~6z zS^E9}pmoC0W_SVO&)=1q`rYLGbiJbQ{jb*!`v+9alt+#Uw%Ko$N*S7zq#7yT&dlo- z3q!@jSI{h$UV8$>$^Z6_ztH}kZ8$PJlj`~lkLGA{9l?c0COa31Uo4R~?3 zS_BY>71 z-Y?x%NBXb^o@R9iR@DCPSqGimTPov|zJ;iLjh_)^d0mDj+TVW=N??sr-$%cb=ed&( z;G4>gD>i77SC|Ki^T^fWJ;zaz%_HfxVn;tP0bO= z1x9w*t74OY3N0#-c+%bp_59jBILQ1&&_1)DrYTl6BX<7W<^-F~A1thk zz9aaH7z;3PEkl2zHOh`YS$hX}8H7@nwX=?gs(^h%srIt_h|8-r07CosWS}v&@mPWY;c4GqxXi69YrW+>5g%n)?1elu z(Rm1G<@TwNIHcOq-r}R|)Y0Y`FSv8PEQ(_iVG0T22Bm+N{mrQD9Icu@Hq_nkV$lY2 zd75GI44^c|>PYaDe|#_aeA)skd%sV$S2E`dZu0_!nmEV6@(34UQeD@(;SYawzFtcA zAi>zFtr)&+x7bf1iF?>J$-~~v=jp;bElz2G+#uC;|CDXqL&#!Y@8G>1`RmR6JUh(U zscGf=ZPkATtZ{N9V{Ck}-S~7;g5%dAZq2Hy6Db;?h8A>X;o>ZtrEG?P4OD*}46Dr? z4tr)M6~KCBV;mr6go8nV3N2rjChVCivasnM#1pA6U=(}zF~KZKE^@v=+>n0#HCRg5 z@^-!1$nyVQeWK~jWC&I%`6r2xm<84a`;CkVu4#Xd+75Ks?4?k)>#}gH7KlT?+^B;v z4T%bLm_7_{Sc3~TP%f+(p^t%IxgfIRJA4mP=6Nip|BgAL!y1^Vw-9ROF!wLZAlJZz zh*N*$5qB6i1gn6d8@QP5LI3GQL@#EWMI7=SqVXbX_3`%Np!%Fr3f2Q<9=2Pd5mbgp z#SeeKi_j*9B`K9|M|J_Cofziq`{Maxu^q(_Fr4H4nbjbHWN03RhpX$$p??8XP zZXv4G_`^Lv1{F_n)pEO_EsD-D9Do>C!wqV=e{x?u250M`ktr0z#sR>`U@`tX z$qnaW#yRU@J0bH4jqPNbd};yCc5N);_(y*{-}KJ>zPL&=P#c`N8p_iznlph7>RSEZ zZ(;ZD&w|Mn6c~!3XZ)#AqTk@ud0l_QDkEj_9bF>zXUGk=zk@}!O?rD|dj%?!kSvRH zgd<4JIa_w1Wyt7vbRpF}zqq72g?E4IS*bNdtaLIdshclR(bVRl!q|bH@}BC;dIP27Q%}nFcHfB$X2lAkBU&Iak8^QbMw40z ze?39?lN(6WUc)j$Sy`7uzu1YQ;&We51I&UVrNR>HHt%G$(d8ijz1##06!s5D-Zyef z`Sekk{sds?%~*jZ2{Fw(u> zk8%Wi-|hKuvpK$w8W%tT>ku78c!b(#Wm(GtSMXvr-bzYwWw5m8EjK0yIs8Z*AAl2t zhM;^L_yF#{t+X5qSl(ZcvnB%gN-L)p$#I-%_;Zh$I9B#F_xZe?lOpaJ}THWdka=t~UV3NK_*)5qRRj2KE~gz%e zmlbLRW2>$fn|NS0=>2zc?vebCHA9~2{5WC!DL(iadoYmaFf&8+JHJ*dL$Jx8+x<8T zlRD$$B+p>Kw4nU3 zr8}I%DUg3fO2JYADx_)V?t;cc6$n)S%{@(6)A;(8>aX4@dSj2KOiFf`&Oh(I0KsS? zYIv=Dfje39M!}{6G|C2JsbJ;|l);FL;B8i#v90b3$~p7g5=_frG^hv_9gI=W9>&FOHO4XiYFRQk zV2XeFWq9QyXT#8v2OW2C9b88~5-z#={Ng|E+8i?qPzv8biMaVQiQNMj0J%Ns3DSPc zt$3w^T${pBU7iY1>nLce9bqv(`x#zcz))&S#}`T@Xe5LRgVMPgXwb)t@SHN3gCiQ= zwJfdk#;O}3yo2Vfl|3|&r7l%o_8et`qW6DrxjikAoS}v8<6iC?8NlW4&TjevFY~ja z(*c2}Jr^zYnTDcUYCFtzBcMd zP$RgzF}DQ-#*;o-zUzYl+J?S;$e0B=JMSL22x3s~c;o*|n2``Zg8n*BWRujvvRHpU z+4^KomqjJZHWCIdQOFT=nZHZ+z8ZBj!NnNJ24DZqyf?h&*9=_HI`>7I&>!=jQNgs##fW6$STqHiRztN9d2K~n9uczi zAi5IdxDQub71$~KL|_C-VV#$psVRTgL&&>8sWwUWHqxof_GqhzY!>tYfn%tzDCoPJbfW3#zpC^7B{k z9kSnHS&-@0YsT1F^*6tbfw|bPbjQ$&_D41;K>Wn5k_Z0tDOigqVL{as=fm4 z{tW2$2SG06eN(eeL+sdk-$K5_#fQ_P<=#ECDNUJ}NxEsfIaa zHC{O6Rb}=K+BF}LApMMWCUckijR7aX-vrt|-seeH?+LL&Q7;Cw+=hR!*t|#VM5_vC z8{l`o6GbdT0qac=Ko$>@m7gs&a;aeW%OD?gRB|1*q4W>19*|~u!wdki-UHiMv>@v7 zXvalgY+~-RWygySH3>wa$-B?qVe=%XLT(yF7t4E^z9ZU;?^KmqvsV_G5P4Mw8(!I8 zi0bn%ulvvuAaqpOIt_oE0D_3#q#2|xZGoen;8?ya-afXfjEeTa%kK8!gx>&X-+qNse> ztMRww26wgTPC&G=ObyC0AQM`{OoSKGwI4#^8W=b&DMiaB&a{7kv4~WwzTJXh;DENA zXS!YthfUB1D~=@RPqmo3Rd5vBG=S2Sy73wX<}^|O!m zeH`7E$kSgKr7YrgDTE^Qw6+-fT)>KhKC+37jE&GF80D)uaa$hP8{wZwZ_Xit;Tok- zLkXVH%j4sK3)z1xEP{rT7p*#f__&gm`N)_M@Yd?+%KOFi4EDvbC zd_zce85R$t|L9Vr3V>zh@)HFqY^(*nA9|xR^m8LJd|TZtf#Ui}(1*;wXu^CkG6gmI z@Ai~k3o?B9|*V#f(qvX}Ep`qH* z^xpzS{yWv7+M*bB8K+3AQO7ToOU`vP=Ph#6GCVwOgS#g%Fj`&{Q6EfVBM%plIPB4e zMnQPciKoDWLbc=ANeyzUMz2_WY%_OKKseHq3*U z7iRHy&{%C}$X3WB)>;Paua6Ug2+?Z^5B^sqcIuFJQ5a~m2e72hz+DDx3Jeo|8U&TW z<>tJ&BrDOk61I%O0z96-8b&X{$0N1eS<04&OgDe`y@zi(aNfHKf#(3h)eL$47wv8JwO_c#tMY) z(cNCGuMR+xaiMwS+bP}BCy)Pl1XWq8>(sLC2&3VrQSRu76n# zeJ8vPG*vwO=lEML`mc#zgkTTSg~XBIhLC??tb2)#jq_D%zgHrD>RGb}Qq^B7`MHK) zQ2e;T4S~i4k-r11CGg=ku$-=NF_ifFNxHE)9pPU7uy}*3=mPg&(GYG}xQYZM=GDumqZdECGjbh1Fym%!zQgJ3bY5Qzh6R^NcZB!i-!{9e|Yw8@68+0uWw z$|Th90xeuTEFcM;Q#MH6YyLZKn>FghH@m>K_zy&)qF?4oWW>STbBpRzPT^;(=?wG@ zt*p50^YTg-AOJW&hbM~~%EeQ8fsUmZefj2tEzUxzp`Xb7nNNJX!H$R)UiK%q&5(QH zk1N3A`=e|=7r;DWPL$VvpioLqZsUJMRY(HYpD8fRwXYQ8XmUdiiK#F<>}hvldIPLv z&`o=k`z;@#61@8vW2EZckik<7iJ*jWov|P^z=90{fbWaO#EB#gK#5OHLD@fu>B}xZ z?0%|}YLVx!YH;-Sw<|nDY}&z8z2GZNWz4l)$A8Leh4@3enaO%bL+ zVHRb~ogR>e8$qE{s2Njyk|OP7=(Q=N0%(YP9BLOvhnktPbF%_tU>?2u6_lcUXfvKz zO=xz1>O3X_5CZn9yK?z?Qk8BRxJsZT@-jULhdf78BZEZ~PIxOo*_>>v-3Wh%Uqw!K5>jJQv*ZuSgeK1F3YPx7T0|OMmav8IpMsYJw=dMj(h-dd- zoym|<{tU$57m5><#c4LfVb-M@Y$FnG=UD+d?x`_gA+5iX)SQ3Nl>EFn@yVZ@o{b(S zs>KJY(;_a>lma_`KQ5SisB{sg=~mBECL_IPUnfmL9?v~9)uF;w$#=$dAobjAvXDb1 zAXJ?)s&yR~4U++4Ia!6y-40!HJxvBQ7!tY7W;k4+c7J#XI#LjHE{|~DT+I?Q8gE`$ ztpp8-H#0YrvWI^?H^sUX8vPs7GyjE38Ks&)OA@YM+mg0qtr9x5X4P{A_fZINycqCq z$1tkM0Vns`EpPbxgw>DOCvVrdgT0x)fq(J)Ewbyf^`BLeq~AiZ@lX{OBen1LG_P&0}xA;FPdHZ2TS?|UC2-(24ZxI z?P0tFrmBB)&9E){bCFf0}RD=#vw=j?#4eVN!IDfBH@qwy!b{*9{q zQ3rUHej4P)Ab>xHt@-M(so5daMSQ*Cr-Of>ym5UDb_NuBsDLQ;GgOvA_?p1c6*c}Q zlpI2WmOlSj_Wz3u`yaOT|M`FazyA~F`oFVW0TT8+fk>%*}X+jP793i|2{(dkEg8v-ZJ{dA|ihLBkkMeeeWZT` z{wrzc`b^9>_(ufF!T+AnvkG5}9@Pw2ig}g&qu;*UfBn+`18e>d()@|>lZyZSEx@J! z_yLgO&oUnqBbz!<8wDsU_=5uIe_u=V@Bi@#f>Ah_^%Y>0 ze`M%?U*uE#F>=}q8}PFK?B?Ic6aRhach(_Q`HaDGWx)6Il>P(z`u}xlb9m~yJMp~Y ze?Pys|KB&+|NAO5g}J`;T)fhPniS$O}$Un|!@h*Zeg{~Wn>#*hU`}RDzNq0<0cY{~L;2?qDj`eq2JjFA_Pyn8A??#|Q>9!kI^{+Kk1xX_y&a<;@_|zqZg_aEc4)A1iXGn#S&uz0x>|E z@BpFQ%7c55n6;258^V8ze1iEK9!Spx!>;8CC^hDNJi+Za%}xiJuz#cVTPjG9x<9Ck`(gfh& zJo)yuVds$}4lo=Q@Oy6{5NYRV6*bjDh_xISPPdqTvur|9Mb+Y;rl^hBTYDG%-xXG`t`e~CN7VFJ zRa(vIOCYrXdwltlc}VY$XuJGE4Lt~sCu_BxRsxs%G}rvk+Pu52uyFdK`9 z(nkjSRz&3WcQ$Yy#p{Xmmr=qq_E!)2E~?!VZq%xcUV(pp?=|& z8IV-^iL-TpUM9g*35)l8m~p28TIS952haPE5-xwo8@gczl^~SUpy{CjP9`l=lIp%u ztqqx@*~y%QOMg)8qA?2cHxM*2=5~mCvW_rCR`*Yu96CDAnPGB727$+Bp1h|$t0&pG zE}E7ins?gzRn{W-_07a;AGwczL+3u3)k*I5YpWv6NDSep#O1ZK=RdYTxSx4XzGx7} z_{e{pNx)(hnx-=!wY@i51IXWS>MGzXs@pMUOV4cDycsZk98Md|bOQ0`*{9L_MI!~@ z9h5qjg!xsM07Lh(82<+=}0|bN>R{eT_toF)m%LKFaARbgs!Bn=@2010V@@CfG zBTEzlu=d{-{NTYFYO#G@U>1`cXhNiE{K8X}99sJg ze~;9eeJuv77T!OC&6Y5}YT|=yE&jq)grDG|j=U{CtQ1IP!}Jh7Q{ynA)9u(CrCFoXIu* zjwH;JZ9fbCF;S_dJ8DX1q_t<6H!gqH|A82+&sXt6Duo5aDZXB56$ntR+o9`m;i^l{=wgE0^6j_?be0 zB54<6uK*S8$D~WkIZ*2X!MZelk{N$U`#6ej=kdP_yMo2`nM+^Eyi=|B_MlN#>-qMb zt3pC5gA+Y$T^=pCy#*?K@IrqXf^7*be^7H8fwxxStb zn`a207{Mq?%6qhv7WVJ{_GzIL>?_ROqyGBHp1c5yy=gkK&Li)2 z@9_x~*e(#L-+Vs)GD|Df=h9tZnB?Wh{U<^EK7PNxS~L1lf}Rp25PyI0c(Qh#$|T{$ zjJ3n4a^*0QdQYI%oK)>UEPw&uv`#nk2?$uL0khA!AR|3BUqRx)M;nJty}i5m(jlPs z$l(az${G-$WRVjt{+n2~LomVn)+PBfn5brl*Za@W*k4MyS3c)4MY+t&8hrWNGt;Pf zpTL)CG^W3nv_d|S!FqpE3xXFmX10}krLZn^`I|&h?jmo8w6Q;SSBc95Lr|*7^h8p^ z8^b9X7uI)il%+je;}{^ALr!=_A3~4t&(11)3oKuOhpsPz2$sp9skIhj@&=vz@ot4D z?Cx!ZVkyB@Y})y@vJNV2pYP2Ok5Jbz;>FW_*V@49fK}Gf&@O*SNHf;@@A8p%UUmTY z4dDL#62fDY#}5x)Ol?YuY=KuZ1Vzvj?udl5GNu;p83MBVND^u049T`^B{nz2ThH*)BA{*p$iId^a;x0Y&5p!{k9 z6o_xYd{6jAzaS4UIzSwjfF+ire@xG|`I5`bi-wf)lDdDO%Gej39|m!;mI1hAl?Hgn z49WJB0VFP`#Kq4ae$c#NzTnTXRCd|d1)_e5b8kSyiC{_pbzk~DA|#^REo1-R5`{WJz|1vh>!T0G?qoJn6vp}6M=PFWW+<$p%o2tU)) z#dn(u^izLA=gfwk6-{OaG4Msy`PS=oMr()~rNqG{J0xNaY!rVv_Y}a{>CVA8JRJaV zZoh8aAs@edID{p!?dJwQ`$Hi0_F(UAlg7)jm&3;aamNhUXo~;pse5tv>-O=20j%kS zWPe#m?r|(o^MJLU#f611uC%RC4KQ}G@4Bk=m-v4`(EnTPsf;2hJ}Tw$x>^XUP_HSM zVnLepob6Zu;(*V2c7WvsSC5hcJgL*K_6%v*el?EF+lNJ!_CD$2doGx)5z~i`5~Jgh zfcb7N&*J$5z?U3<@5|mu9)&c#T8ROd92gdGyoxsU9rD5I-Ep+K>U$!*|mT4Ke+w> zJxUS%CZCpP4cYVlZpekfDeinSc`suSl}F`4e?%kQKFo8v{~XW^_-XIIxb2#;1A^K| zK-IiWeHs+N;L(-@GNxlQ7GtoJtoc#^G$G|}J@WCqKIryp7_sJ(vT?h93QhZa- zzqm3lG%SL z5S;)VOi$#a+>Lh&;K`wI15T%8$lg^^7|7&hz02VaF_}Tg=5Lvv4(Q5CT<0%|ebRXy zzGQ@np&`M04EM-xN$83C0(Kv0lEi_?#GyD5N33Rn1qECxqf=$eZ#rNl5fb*N;-cp; zzC#o-W1r}mhA9FIL5NcDe@(GSPk(=No;_Q&J~C!0aRklfNUzT-jJ0vzIEo=bQam@QzBq z@$VOD_J<5vKv>EK$}QXrk#lkSJ(FkhRT&oJSg8o+G5Y)q__y_2c|%MxfChg%mH6Bz zvYE(WsxYg3ApZaMAI>Dd76EVol3=0rB=Tl*laH;;rndntUfq>>h>xxWgj1FF!!81I zX>gN7#=vgV1+(aa5~CO2tQ2qThlslVPZccXs`x%V32=#D)4wE z#CX5l#rboY0O-@u*7f&OdYyl-9GWW7o~UVnkp2b0+0uh?gd71qD1QQ(+B%famEMQb zFL<(cl++VVdZ9pGl0-t|LgQxG~nVuAs)1^RpTogK!2ySKUk@vhRYQ629bYW{b>yspwLNq zu@CJM;Nf&5C0_9bFfX$ZSh)Bn4W#xJB5!!~WQhyZQ%Ea$L%Zp}a}o@Tf_33HSG4fq zjzhi?P$&#nS)hu*n>xPJYB;t6`O09pe}CfF`w46l2$JXiF9rB9>Pn0L=gFk@jnR&W z&++ds;THTbO^e>iF%W-70Oq6T5;lmm31cd1(Nnp3vI6&*r|pt{`Bb|A`3bdB{e2;3 zzCuFh?XPj@|CA^h@}>eVi1{UAK>dIEZ~6M3{-#c;Bk-fnK2Bu$-rGmtRz`| ziL(Ii8fT5Z>Bqu8PZIE;?t+qq13&M-)&^Vu8{6wvl(?>$5FMhu3zqU(tahy+LTF$44WE*qFY)*oAl+XlTjpg!Rb z30{bOd1t!T{$kHvFCvt#E$>eVWySqak1ZP#aP<;HhvlG(3r1&@F5Vqj3<=p2bAS&pz?Gt}-8g{eG9Plj*t0KRjA4rat0=l#Kt;Je;ctnvTAKuqWM~Mv%f99UHQ0Jk8%+T9 z;BOAg?ENq&0AYV~TipRx1^QlOHwbzhSE2riaE+(BVE3Ok_X1lA<6rB2zdj8NtbvH& z)wM~)gXDkB#~Tcw=P05PvczJ7XiWl>w{%O`Lz?EJ3(T(mzo^e zpV`B!CxpyV{fHl#AHriQdaw>k&j&(qldW_roo#=^(R45D$AabD~RC z3xUZfmhm+)j^U2gFNSS-G}Q`uv7kZ8mjr({zwuVtV#r7XID2)Qz4-_sBz^lTkQnjg z5H%G)RWReXK0!zTxjOi#)Br&T!x@ABz^P4v{o_qj*YzEN{B@{|OORzG-ugD%GXs_^ zY*~n%;e_Rk_g?kb88MpI{g$%)0vYO_swwDkhJ@(U7S@BFj;;Ny(N|78rOIROYgm7W zQ@>isw*K`(qCP8t7X}0#@mCJk5c#wG2H>qjU-G}U$#$%k3hGu}{|7|6V~Sh$y~zD* z79Zk@ycPb#91n21z5Y6&)Jfzgim}t+?8}0#4GLZhNdNwWyufeyK>M-yS1&_o({O=% zks&{^oRh}v=lgB=U$WP&4lDSKY95a{;oL5+ku$zlpArNMI`hg?W*SmK6v zms{y5R6dv$uOhvNnwO_&QQP*S4L7olLgNg}pa~t6te$#6k_>;MqJKl`CqZ9MK4JA- z-d@2f5;0(G=ViHv5^Z%lPwXC6GAYp?uVu_)pNPXMmsJxN+PF*2{L$!{5B7iFLTzXI zxnBCxbm1zD@u^Zhe2RTE$?IakpViUxroEvkL5Kf|-0@CDpmbSk^`g-)aADeZ47@iY z$tT@-0z9qaXMh5s%H8)^Z%H8YvsLgT?QbJVs3P`UsJAOVH+@31D|I{g8 zw$D}XqjZtO=KD>N4$#<9exrYR6RUl!xYkuy4a$L^mITgg;4XA$W4`O%Ja0@N-h9EX zXI)L;xQ`YWNrO88;Q+$DDJFAEH#1m| zNAJ}DBWhFIl+`*)en0T0zf|#eSuo_)86|A9&N+mY`JX`PqAQ)(7j}Pyf06j+>T!Dg z+yiEGs=|Jv7`@vIptlgZy^(*fIC%#GUUL@UCw$TTKECu3VpbCBapRKtsYBOn`lFr@ zuPWY}dr5lodW1d-sGe?tA4q3^fLSL(m4)mLgbpo_lBc=Y`LEqXtQRChC~VgEvs$ z3)F%%0|9D5fB-dL|11QVRr@seo_p4+Q?;`plTd_*yScd;N59v>iPDQprzz87n!q(u z&OAnA^bMTpTt15f6Hcnoc+~O0*cj3i9+_*>R7|=zGo^0kXbg((CY3By zcMPiCR84=$AIN0x8b)95`Nskpnt|LyPidyWbunz0YSs4ZIC^LC=l~P8Gq5qVqTl1! zVJ@?;^X}kkB7}2J7^_G)SmdXZ)7ssO08hQ~IHvUXIN2oK_@p|_@_OMBojhHyAyQ}h zFo6{aV7Qvwdjtb@{$kFhjDsBH}K)Zla(CB?#?IcSYy{+Paa12IH5) z+2sU|Px|>pxI(Vh2o-()1fH}`K&U(iZeMhxJC&@%;Jmy~Adwv;@(Jb}BWl|5i*`b> zejtBrO1Mnq;aWW9y7?;W%WMb11Mhe+l_NM2k(C7#=OoP`l0FB_;LHzd=sxeul|E}G zqu1gFg3(rAOZy46jkgXb`7V7y5%GewQ7YAjX6xoc-r94Xy6hu}lrF1FQjVZ`!7Z}# zu;2U6dHWva>+?D+uF+E|>srj|^@cgGPBwo=Gkcyl?{s$QC0M9sbL0z{X@E-jxUHHV z(iiBE3B)tGOmA;dHxQ?CzN4O5NF2dLDn{DDN;E zFN5=HMEh*VSrDn7QI@DfISBSUXC3sbUD`GA^ey#lQq)ysZl9%+??9B~9m-UvBaVJP z@2>58M{|c+;&x%5W$(GxExS6GqwSmx_K$eK&-JGFA^uZ$Su-*Gc(r*!%>uLkcyZuz zPY-q&=+|wv;BC1J{h9BoDa?P;OuyuJdJSC*TN%%yvv&k)dIr(luxi+(3$OAtx|I|8 zf$8_jqTOTYL=(>1Ok`GB0N1(|upM7d;b^lOH#&bjUuX4%qg0+n+cSk_N#ySz4b39q!ijV1}|zq%ePW8Uf0xJ+l&5Wq~w{&YqO_r)vL3B z>J8F(93RZ|Zhb$6`r^`SFjknDbU&5b$pYdh zyBi4gQUa%_f`pqkN2)Tds@5Qv8noD8@zC6v7Uf`6v zi>5}_T$!S6D7V-rG!3yLKF|X3ExIV_ioMsG{ab%srhcns*H=WXE}QLRG||ysMnUC0 zZf?y%<=efJNVdaiHNNG|331YyC~AQ0S^yr7OT&|G#VCpha) z0AFbNdW(Nzx2)?iMsx3z>jmrHW;Y2QK|q)8ur1w8yVEq^*f-I>ZWT9Jf;TpC#EG?F zV0%VMPTaHs2JGQ$TorpvOXLolu6?I6eH)agKr2xR$W7hDGQ@RFw=%t>-rqKUOHFxp zY*B|+ISED11{$xT*Tgf%;n7U;mCR~KQ{BeO!k>RY5NW$$9{O2iN?sQ4Aq%4p%67_q zJw}FZ%_(2(j2_CAJeSA)%6u99W&0jy8RRg}?1Y9$L3N);q>(5;jU9MB+XKo z-*yI?L626Jt|ITG^Tk3g%__J;m~wK3Qd+&ZEoal+Rq@ODaauw)Y|nJ5EZ~SOwAsEI zZR{xBPvXh= zXvfrM)>fS@CQD43+}qHYoSdan@79lU(u`2~&(CL0j!q}|9iDdYRD(d=%2zA+jh54K z4ECe8wwr4)QXW)cIR;%E*351}wIf<(O*o3{H5l_xwGYne(gZ8mxHj(-2XEta0&9Pp z4w}S%Z{;)0FwCOytG70O8sj4hMo3<7y6~Wqc{!-f7?dFB9sBf1In0Fd+*Wex%q;FG zf6lR59LAdEHnL){?==Nj1}Z#`@|;vOJgu8YSGcwv&c`MLMz*P6$7V5m33qJ{rOVTM zq?Kvnvj>xp#G7HTcd9g@L9d?U#A$zfPvL9%ZO^%F8{;?X(J(za?=p>^)1~Zj3CCph z?&4<9l7){%YIz6i{mR*9972bjwHh64jHH>M+a#v84yzPzzq{4_k-A>*>@7OLo_XHK z_t;(8fs;NM7|!@aR+ywa@aQurpXyRN2FYm(l*;RV`6VHFy_sP{< z^6+k=rC>XIEuOF7eklV;f|!AnDZ+C)-g{@5rct}YS~ENPq(&?+&u&vxwcxlhqes8x z@J3S}_2hp2wn6`ZHI;F8ftPrXUG8M*A%eL~q%%<3s>9K-&k}EQy0q=>dAoLsh`#%M zf-Cpro>ueq>*RM!72b62dbNLF%$)WS?pp!n$1Q`cuv~l02%T$~`arQ?Dm>PPaQ#m3iUsJ}2h_$96h(g=bs4;Y8ayIR zWmR5nFa2Jhmp+I8K!0rn>xaXX*Dz0TKSVw}VIRl)Vy+e@=BR*KE}GFx-cH*d+BmR8 zcUO>(+poChwTt@fK#8Rih8uBJ>BxK5>uMjKZfvML&(>(Eo4q(Q)VqGyyA0odUs~BH zh0r|=FD_@+Qx}i`fR2B3&?} zGw+!nZu^tG6c+aF8q}e3gURha+Gw0cYul#_o4DuxL9HTcWWaxbX`**??is4+S1(_R zdW=K9lIwI5Z#kpXeC^8R)RYqLAiSabeeZzgbkS?Eem!Mj0CvX)rmLg z#E(QEm$48;BIcCN%LU)guQWid_g>KbprT3!qgn3i3V|oeiOYTr%ip&kK90g}ZfGxY zKBDJKE2Pr<#e#o&D+umtS>BBjO^u6`bE)+-kXU*cQh3-s*z$=uio|Ep()a%7dy}50|Y@W92?zew7>{6`Cibiw|wkNzUM-Rp8 zga!MA2rYcbw(wjXbuUU!VIJ#nkIbnUu)-hjX>>y|t!zL_%L^0;KGiOuV)_Ixdo5U6 z>hz=7xHx5W-|S%flf0)Ux=8!!a=bcN`};D!aJzzk%owPh2bvW-qHv$hU?$;-AugjX zvwH6VY)d9psW;Gm;5MRvYt`J#Y6=QK6PReN7K{yoA-=rMrkmgmF@)oFR~6qJcNIya3t&^bmBKQn!*%%XWn7vyD);3 zPh+!8?zG+x)JszLX9f$8T5?J({#T| z57&Pn!er2r=}%>izAH4e;yB`cYVX9?JLa3LWK~8j9q_#+Y%t4o8@D!?#QK#@{nY?J zK)}CfQ%~0IE)?EK3L(oZ!i{hQ9nUPO4_8w%*XSX*_4_M_@ zE4PNI(V{@Ey?#z6U`AzPvwDn1+_Km|D$ez$k}mj$27kI{v_n2eK?2;3ICj@(S;ApR ze?&QYDQeeveM+x-QRNxPLgl>IPnbJC-y|s1IlXRo<*74P&jpy^PJsDm*tAU1U8H(z zVKtY3+4KE?h^R%Ho~nI#!C=Wa?Rg(u^QCORl@>PJ$;w^@@YA*rEr8~e52D~RKO+k{ z)(1*|dO25Rz9C*8<3&d&nf%8po{*+m((K+3$G9K=QT4r;nPSUOuSu0Gp zA91VbaqRY99`q6Syj4{+ojs{&Vel&zhGcsPrG)NQpHx?0U^Kuj|p29j~@KMHo%sFm*)7{P9qrTEys+(jCb+ zbLKY5%Y^>|ZQNNKCvqP^#e1|r#uTN0ckqf8R7iMRCzw-LOPMppMml0RUvOB4t(Kc5 zyCm{o@nvCJcVl`U!Sy5vrzr(_nZBXBmk{r$O*0<3U|lN^Wrn2p6(QR1lSK%7_shDw zt~qIXV`eOfCCf;*e0s937B~ao7W7w{q8D25f(b^ouKnnOiLm1`-z2j-x`I@HURSxW z6k2aG<}iven{FBaVK5>|TW$+JLp~k_6VFu}I`t=|k=>HhqdK*>T+Gha5~?_AP%M3R z>r|I=-HytOj=onMsF+ZIap_ctL-6LUC5Qq6BoX1-lQgq8by%reRm2s1w&zeUguj!_ z-dDK{rzC}XKW96ZL%WoV(k5AdP1Q3Ss;AVzzzh#9vD$?YJjY7o?S(g!d!k47Gf&xT zRc2deF=ZZR73AFza0onfgX$U?%+Bj_);`y$;|&-d>dq?1u8r@7b0C zsyb_7RB4=-f!i`LHSCmmvvV=59dor^kAtW56ewzUH6|7W7wa*7n}f!G85qmOZI<&d z`eARAs{>T*o$-rPuzYKI;D`kr(VnlP^H{*ipF&gVE*XLOOzT^bVk<6xz8>nrzoh80+J2)= z()wWDLLW0@m&0p(3N|C_Xp6>VJzCZGCflDcbyINs{l-34k8tz`vThauWfyq^?vJB! zd*cYx((P?@QttcRmcrz?tgPpv0CTNi;q-)-ML}_YIc%&4w(`*;Dsa}T4_3knuI zd`@$;v*i{{CW_yGn-EWRm4f!dv#XzDJ~@=o^Q70{S#U+?mGgMrwdWg#zCcE17q0?* zpT}`dCtH`*=o5Ueg3}`1>;&!dES{Uan8U)RpBFRZ1ii8Vx9h~;KdH@YIu?0|Fus;) zxwy4kCocuzjKs9|!+XKZUsChNQ}YuR1g8a$K@>6AUcJSC#eMMe_yXV=b+re}yf}x* zcPK2Mevr`&nzYj7xDmDyKU!+S!C)TkCbX&@b;Jx1lWk|26H28S^je%_l0=;vsKQqz z?abxPj0HQmg?ULoQ}SvBT~d2tMamWy%AnJL5N+0$zpy95vOoE?2#M{RG;w+8bKM@` z8VZxi%f01)bAzd&@yRqKm{9Tl8$30Cc~umY)-ySDBf{-^3=-wrO_LN2 zm@6h2Ede0Yy=Ajm;fR{Fm2Z%dpU0=Q=V6NMwqBadJikA;O&~*xAZ!E}noW9sbk@*j zki8lSbblT{lG@+Q)+Q%nB8gEz`ETB0px8>1zgA%8!MGEW)EHB^mx8`>fy_7Svgn)n zw138bFJ(I`O`CN!*>bqb7Ec&ZLwUD&h}KP4x?RhdRCxQN7jrG%?dT*Wn7ubc9kL?4tXWodU=zi- zReqFj7)!dU%0y&kyjwjsu24a{MXPj+DI#irVJLz$f}3aDQn!aqA(muU?U)T-$JcqD z`qTNu+in;q%lSoh8y|vR+^cDSux3w~_SR@B0-!Kp`8?4^=wvb;^@t(~Vxu$p@H)T7 zaOeOIkMZA`Ti(++>ge65l3EC%YUmq(Ldg7#(N+55IAbW?%|yXLZg<}&i!+2(Y5NQ) zy;9wBv+ObZNcoFpAaLBGR9X1goN!sSFx^_X<9-=ngk6CT?>gnS*KrI{AoeMVo7@FG z2*1)yxdjbos2|EqtEZTDHIK>cARQVECf|Jzpy+5Zg?|OFkr1i41)_WJabB;>$T#4yBLo1tcj{3Of zF9s)aqpHjwR=Mj?eZQPlUoKbUIEjuc9_&0d zJE|l0=Dtri(rp}HHwwHXpNckrHg3r4gSRbab1D`u!9oL>2F~{P!`R)u%{FhC+r0)S zbCznWq_YoaxN6GG`aL=|8|$#yCSh5NOE_-uRbrG=Xvp>UygQzgdC4qZ>&4^69{l;e zn4AvlejYwE{~@i9wHBG91)vOEMdu*e_f>KozvoX3C+yua@)X#b#V`4PSof^gf`O?| zmt1&sDJ|r%i{7w;eHa!KaVtz*te4DkrtkFQ&)&cp3ezkAPKzg-~V@qo~3=QtoZ)c0lJU5bGxvAaNq3&N~*}byWr6} zhY(ixm?^s~lqj28O*B5%=sozWoqdP(T{xLs_E#8{?E4Es)&wlfMp@U_ZE1HMkP( z5;||Wg0YX?j-vN}%jNYM#^=>|mr=J`Zsx1Sl#yl|tB>HME3*Z^RyO4X_NDWK`^uNk z(R$4;$C&L+r>A@w+FQs1Pgm-A*-8q`C*hwxpIL8Z>W5~(huWuBb zgl~)2$dF({lxepU2!{K!|4;yD+;aV_(+O|yHP8X;#pcL=bTMoyAH}W6#_z{LOVa6R z4dwMo{pNH?X2utDr@hQ4?6GHFJA#YDYd$^5PV6ve*n=G=2Ml8QF^oTY<$zTD{w}XO z^g`$jEame~neNgC%m~Xep~V_Q^6$Ebrb}P~SL0Q+&J&=5NA;}7+1q(RC9K`{emA3P z*!dMswgSX|VZZO&t0Ec?7!WQ%xe~3wgM>(yTefew=3;huTd}-L~pr zkCIDvYSuGhUh9p1Jvzp-DPXltFWV3e`0X~H)%rC#deUi&=@M`DzRs4Y-ed9gD&jwP0&dZv=JwP{;kJFjB) zbq5=u;;dOZ-NSZhem9SoWG#p!A+ogVUfHVecatuh`fZ)&SyWg2ScL~-FgrNs2;+l7 zUmc)-q3NWCx^O$cN<`S><@5Y;dAhOFUvJT6&x+dv-MAQQfNhkzpvI63uO{xn_VUx^ zS?^J4!j~Ly*r?WP!1;E+4uHTK;AiyQC(oAdvbN7`t zH~29fU93ndNbyfW(_9~IZ@IR&o^u|l?npg1N8=hB0p_rPikDtN9rCoIQcl*Uqv$@qcE~79JAmW-b{AAuw_=@dXX$h4m+FtLsNlOd{f>zQW3;kHz zrSBz6+)PLxY|dL>DYx5t(iSus}p|}eP27Dr3Y0y6qBhJ zm_3+I!7+5D*Vn_MoSo~t6}_7662l6+D#nHGZ;jVsXT^I=*#>4AZ$L-7)#1{p4-7PO z1M>=fF9^YwbMZb}-x`Lg%3yMWHRtGnK_w5wJ^xO}%x1gQW`UkVZh2I7f}lZvu8j`W z_12g+hTsJtMKsgU&<=j(?8gTPj<{#^FipeP3gAe676-V-Nc{UHDpr$2=vQ8&+ zHp8H8;a5se+1hTtT~NUISxnHm`FWKB=QKYGDg@#p%F;Y)qW_iA( zqLmL~p|sko*T;J{6|B>LW3!UiFEgCIZ)LX%$A?7S?(q+45$@{O?#9}BJ{zs*-Rnea zZA95&K3$B_^_x#;(kXjz?!-P3kwMNUR&CeqwRwz;eY7li>Y8`%)2ruz=v`_Gq7rx)bkYka5Hp_GQ4pXQTJKeZ+ zZxHHyTo%@eN1hKenAP-kT0^+r);qy+o=%y}7Fln-<)w3h4-2)!2=9ei!Z;LmFl9UI z)#G~kNc?j+DCR@9sc?S+0n1^4 ztFU5zQN+1_{>0FQfVt55rB|ZjwkYq9)}>a@*L78nn8g%rDouecJHHCHr%5`cr!Xs5 zR2rh-G+-X@A==PSjz=TK(Z?I@f}FW6$^#IGonK&tuV(ELHP_Zyvd2lFnr&iIo88&C zX2vP$!liam0zH`psEqkNZ^)-8Fx`%W^N$-d)BDVS9O?6#tzbLszj|B@%($oM#42H^ zzkIAXYPoPMwpbtXw{vWUbM^#-$$R``1m0Xbnu%hyeI2s zzI^nSBUA*KT+FXkkAZ4SnfURxD3e401Voh^aj|b0*cQ><>Ck0pAI0O*z1-0Kh<-85 z#bm&LrEM0z73=rKX~)HZ%axRcxf@j~8{6TvWd@4iEjTQbM%Afo1yi6mobvCAu{l6x zDTExx%0}xo8Q&%hFq_j0qqP*hdBU!=XOBz6TQq>{Dh%%CDhuoE^EO_MCei|u{P4`{o8R}5&hbSq7kud97qQk`|^==w$*kfJn| z3UDx(s9dHdPe(Y|vrTw{KE1FC6`IO!*81!!mnV{#Cx-4f#-g7-PTTbexPA|NaN4o= z_iheZsav$Brup9Og$1?AFo=A7ubOj=@Is|bWGSdAcI7*n)9?|bptxj;8}~Ug4`a-K zAU@-xw%(m-=DM2q2Bn#3Apy{A@r&qv-8`=w|IMCf<4NDfvkpoz%DlY7eeZQC>m3Cx zQrj1(m%Wmg(sDU{j8i{Kdc@FIYVF(28;&U4La9ZZ28iv$$N#p3{lZvRk|KtT&BJ0E zSjuqP>#9!2Kvaw6aXW%0{4}{d=BG)2E#(z7v>(VBA^r3!kIxL06RCkrkoazH(+Pz! z(&Yi3RM-5Lgp0nC*!2wK(LOXhDZozD6NUiy)94O$;C)*1K(D6pRb6ZZcx^R7pQ!Vq zl8!G;G_JF4-%sb)PS!?@P{{Gh*8_Get_^D(#o}ataSS;+ zv)C-kk_Kg2m@R$pZR|A({O>2XovgqwIxJ_9;6Co{J1fGm6lROYVP^ZOetG#0S5A-1 zdACoeB|q)A`)VdX(d3>V7w?{)=@<_lm!Xgsa4(R}=LXZw)%{7|?-hD?X!)4{P;(u` z6togv8wpZ&U@kJ2<-L`6uf~jjC%xnIOiBH!E*CX?Oj(qe_t_$cxOJBtw)d;%Eo$vn zPo}qteAq$AOPM?skVCkGlZf{T?TtO_;G+E{q-*@SpUg@J{R&mi2 z%E?B#E4PlKY)p&mlzRm_{(J^YrLjef4Zx4hfgeuHsKG$qd@?`rKGUs#3hSU~cX9Qm zluI110@LKdKXZEzmu`1|WVIEAD}j%^#e6%b zJV-3|$7ff+8la|Xt@i62e46(jb$v2j1HWS%YroL$%R}cGx0wce!QGgJ6%#Wi5^oGNH^w&QB6BE{j@)iDuih^ddt3-OUF~ysVakEf`U-g`~3$1T%CjUmew0T2V-8N@7Oyp8^eNC8?Fh|A@gh>pc zCg{N6vGzT=UBfUU^h3_mCC`St(ZjgF3Vbhc zzUcIBPQxI`mBZEuo{q75UN4hgz+l*7K2bK6$sTwbV-^N1tfw9jq18<|fc$u4Uf_n6 zpnzVzsUo}=yba-Vyy`X69bNW=JCFP{U!Pwx5ImV1HC*v zpS2CG^?GuD-H7te>-N=F#!#XU!-jBrF0>c}>Hhs8u+Vuy7@4l*#|}dbGjVdfy@0j5 zg?5H(!)CAMT8GUx#^&25)I(8g_8@TCjKEUnCY2*NuM28t;`nh@C|k4v#r-)y zGL@Tg!C-yT=t9$>hesb6(QbF{mX1P*Sy^L6u$xVp^N23XtBK#P|vA(!XBBo%3jhXCNvMs>^x&9kU2$bg-7G*zGB5n z-AvE->zisIb*<)}=dFZXFxC>2^MiKwTnxgEh_k3Gy5DuvOUt@VHk?|9>B&AO==dpUlEcM7Q)wH@z`U6E{m z%SdNce~TH+l$R)+ib_|#rlQTc=1y+wCN!dee+w^O_Rc@?270&2>?^mZ(4gt4F+5y@8;jV-XT)Uuf zXRKb~0T!!|({z+6qjlCg^cbZ3gtst4r-nyPqC`xTYWd6-IJ(yFD$sscGtaw!2RtN} zH>CH}-8P}De+%7LanbHPbZDk@Bktw>^iVSW`C`KazD3Cg1(Ph}%bi)mYl_+&&X2To z@)rj%{J~27VHKBf{Ti+Ow6CFHm@LDk8iAa8TTCcz&)bIztxS4VJ@v$1YR?wT7!Jd; zdd)sEc9V!Zwib>_0mCi88QWuj63F@HdXU$sht#XH03WpR8)ye=qa-~UvRPSFxt-^D z8QYuCv|6nW`kuE8;IrCVSm1C;j?`?ed+OQqxs(>?xXQWJ$iO2+dBC z9%vWeO624RHxXVUy_`jrHHTpeWjK1~OPOkJqd3$QUya#rd@Pv4+51NMf&Rp!ug_Fs}ZvVI! zb{0RO*FE33oV`+RVVxI$pk}9M^qr%Zj7zVi57o<@u<4~0%M$9p21{*VANq z^3^NI&4;!>>suFIEC)2|8%y)uNGX+Q>u;@j;5uXU zz&U~HzPhE(Z9+k&!t*ZS>pFv!+ck{K#d*)LKGaz5=#|ZX8N+A2KAxfu5uVM?F6cWi zZ}*`W!}+7=C(9j_vQIz`TsuU8G>2M9;sa(fcwaB?J(+e|NmI)s=n&3wK5^E; zLRu{0qr}B+)IFHs;bL@gd+gzG@;Jkrg;(ZSkX(BNYe)y(M&0t39&U>sqUzGl*=7M>iWN`L# zS{4@5Q8hK*gLvmdW2VgP)xB7nSsPG`X?KS^Hr!YbBm41`rgQDM13hMM-%g^v!vM`~ z##!SgPOLM*Q?`F*!G1PY3{i>V9z_On*lo?AuMOXc@o{49lMWA!og%q1Q>spR&t6^X# zVKpk<+*-4|Q#G!=zEl@gt$b<%uBWHNIl!AgoosvAdQTQ=t(Q~(aM);aE3VE|*TO9u z9YZZhZ9ceM7Sgt+RFMJ+>k857+E~J#q(s$!x7Aj+ZMVHB-i>?B+{?y$c$?eo24&Fp zyi;&8NaJjyg~O}xx)JmvlU9e;st3sJrplUTD=WM*gZx#+Xxk0)f@c`F9tz*^+TcUfl3cW95#Efu3L*skr7##g7! zrc-#W^RD{BnaV5YSxb)%f1Qrh^awS7t$K|bP|n!Yi1NZR*zFAddJ<@h7slHGu&>U? z>14Cvvstv?ZKQiD9J@E}-)FF~HmaOIx+SyE5BttIE)#dR9c%L84ax~5bis$Y2>D&m=Q=l5@Qkp9AEF;e|_R*{<;SC6v9CgY+uv$zmTvyZ_~IQy>Z( zwm+y4t6RiQ!<)BD7;ssn%H7t1w7RuT4nkPO!QvvBp{Z-HO%r-^LFa zpLdS4Z&bgf+$of)cAT(YoZEE>VyOFaAS&nG^ie$Ktb^1Y(@A-2?Q5c1i{nWWXlJvQ zp{T*1Pmu3SUpG{K;9R)zYUWUdG@{O{>2>X!P|eiS}{1I9y_X3AT2VaRcdX z(eG9uD`5lLZ%|H>pd3FXi#ct#XE-x4h&EBUh9$zq8f_-Kqqcbry&{^ zC*|?Fr228zFwz~97?lBudEYkDOfdswJ*UM&}Ubv$787$oA3k9-zqiQ1^#r>{BcZgQSN z87zzO*b*>wFcsNT!t=C6^!)L^@)2GoHFl10<<4ul#k?Ix1pV4)c9CIgX zu8VF|P|QzuAZZxI4!mF@_|)@&{`6+X2zF=X;>7IZ<_VwW`mpVvOOA@KQG483vv}im ztM}DySw@Dt68X8?DQI`o34`S9fMhs=Wd0stXE=Qp%e7F=Ur_mhJ1Tv;UYDcIW9~J3 z&gb2&D+gT)V@yncZBIteAf$V*+H#yg?LFoypQQcj+1`u(P0iEuo52lXy$#5FW4cGV zT_m8^lq~Ka`-lR^Ua!SdD@yldN3(C6TcwQsJj-FZw9Neckl7t|R};&$r1RV4GZo&j z`%|{P_m9S&#SsO49${5)43U%c-Lk-_=>e+9H|4tIn&W%yZ4@*_vp%94`7bLTm;|m61Z(G{5UcU7!kK{hJU8lgHm47 zYSImk1#5i0w3_ba0DL)fXFmZ4$%7*UYvHTlz%V#2ke?q6jtNe6Ku;9pjkS(%4Wq#^ zfrff}9(r~zgZanGpgiM$v&-PPFgPp_*NJHOe(n{2!VzG%kQfO+DqfKfI$bQU;GKD) zs3oT9m6UuxH{KD41#WOuz<+l53>+5lQVG%cWAT3~ekBtVU}RauUUXypF}w0&cV?mn z<5S(S2fS3mUdN+~gS0RHqhPg$RWaS+a*fKHSv|9aus72%knmruXTMl<#gJ}Ika`mB*q3`0gbi6ku*3-xw?}UPtatpQ$JGS!gc=6JG3G_% zsoASL{4S8djv+6vZt-sjB&!!#vgT?QPKNh?fv>ZA{M!x{wL5Mx(Ff0>MCTy#2U3?T z`YfOt+Ym9of>$>S;iKNJvRk=-ASp>{R4*-N#+Rb!2~Gq>Fn%qtg~U z=nr>N>igJ73dT})Qf_vi_l4p64)3ia&n!le#viN)-aC1}+u?ga(Do2fh}N&+-XIJ_PXT8ytf+#!W41{yi=7?6516&tf$+d$9_=YGTcK zv;K1)u0vdE$ayF4-zI;0>k|)VG~VemYERXk5$al{g~kZ?bTdV$d*jyr0hmBkGE26`0D+@O3JEa*_l znjM~#<_Psm$$;Lpeq`e;aXJ*>*m{cBi{&Jg*ru_zO&SELuZn&QW-^Fa0`{R4A0e+q z;`E)w>5kPSR)BOilz*QdGgY`DP9qMII8DyS@IJ|V!v2rH?`w#Ef8Vq6iHqM@G9%}W zJpUn{x8&a@;?S^1KI@j_dsSw)cI#f}p~}Ikk|3xL%V9%I{l%E!5Uo4~uF`~3ORf2Y zkwna1iB7>NExU1<|00XL{aLRNplu4-oNhcC*RnZ*4;`69gbhy zX0JMgzE2}Iivpj2)QZ?MK-{E&n+8gd*=N120$8IAcOfgB$2zN&Y|OB5Hj?X>(MX)N z@IE!fTB|2_1ljMy`@(r3XJNR%AZa(O)pyU3H9~y#!c6_HAJ;^N00Oa^h$3hE*sO7> z&C>eQAhDW4;X33q$=L3#f8I4%^H`GpSjm>Z1`Nm>@YvX)iNrVJQE30CjTZIxR z1!Fy1)H>LIXnW6;P>CQ!!{d95h8!`75?KBXD`G`uZxDTH*f}dwEU4ttI%jdXRAG4T zpZob|A1lN2|FZ{%XZ?wP-U-Mi$4MT{(j1NoH3k za#B1sl%Iwp9)N#b%=0vNdKSMcW_vJ(!#V10!-boFwU0A{1X3U32g*a6rQp8B@|g&AJ;TU)GFMqu&8Ba+{rMXb>m8ZY@^h&Z%A`AYoDC)tC>i_MTrBf2zSmDQrZGw zpqtuKO5oVFU{lCdp&b;6rtTNMa_N{;%6FGq+sX3OH#6mk1#>lhH4jABntEkrr zax7b4I|UT$kXvw5IO5rChkKD9u}IfNBnl76)w9SivHst4IjrrUI_0x&!(I4yj`+zL z{>~|P$Yb&K43G1SeM|1hAulC+8&?|SGGN}l@sPU*H^f@%VtAmUByf^(e2jiUprf;{ zIcF+vHx+%MZZ$S!bXzqM_63=Lhv^zdvB^*n`{d6*`Er z#2aE1*;9!)juYI&A|M!lNLgwGP7ps1itmQM!{CVInIRM}aldki??1WaCtpagHpAH* z&hv=#%uY$n-SD~WyN<&hYGPf;-AT?P`P$*nP^%1Q-6grnzpN|n4D7G#PyAG!p|&IY zGuIM*!KD<`DAEA+qQH98PPR)9cd3m1G@Ld34mi6=(Hk*U5%1)G1iv%tXt9Ku7L;He ziC!t5#A!(-ON}Y-L7yAHcA?8+do@T>+TqOe8UAfJh?R)1xXSNbTcO5NO|DURK5tZm zr%_LmTqTCwoeKGE8^{AgWOEplT# zZ{$tn{ENu3aQ-EKHp9V33`WdeBaWh00Na}EFOuhRON{;Vg~gQo+?+hO4no^{24#OfPawV8m_@k6|Zu@l!r z@|t`-xS+U`Tsw~wl?_d&B{ex-GZk-GM+vNS8)qN)8}#Z}TVY`h!r8Qo=Ol8+7&1VJ zn~AS;4s~=naxTa-47sElV(muELxE>a5{o3x-i#4kaUaBM{2#P6qVXByJJ{2b$Wo~P zN#1RY7l}cCBh_V*Od#(@JjV0ju($soV(BlI@r~2ue*B)V&)NGq4_IgNapW5$x1}XK zk0I~EA5!C7Epk?pkh8D*{r?(sUEiK;F3wC$RNt62vQ<_N+a?DLuKuiT6>&FJXlU$f z#2h?JE-9{9JBn?5&udL zmfeStIcl(WF*eZA1rN9nFOIs>b_8e-BSs0>LpV?5zN5K*xVQZrHHN^p4DiNpyqZC4 zso~-1ig?ATpLoU70<4cO>d&3_$t8w5d8l{EI+Jz#6VDV$Fo8RvP9pf;_j^Cj56>?D zx9|IZt&K>`C$-d{{ZF1F{*b)+Kjbk3hJ(EC=NaRkA+M7U_w?sylHcKzC-uMQm{V|+ ze(w@4Avg@;x6cb*d6p&L6Ug1!_}H`l2EvPYNT#rEWT&vjCHA+1+H=FXz~&m-4GLIp zv3QnbO-VgR&PE4AU%cjUP6sT8oKvQRzZBwsH;HHe%!%w9s(lx~1GH+$J;Sl#M%#)J zawG?__h0fSjl%2U*C%fQm}vXCyYMxbtis3fyg7UKB>F(?D726dk)NkaHew<6De@=s znn=#^^UgDm#9YK8;G^UWqQ0`LA%1@IC;aXx8Nw6JeTcmZyb9$x;s#0Xk$2%v9M1fI z=T0R1NG34Lxxzkzng#Zeotb1G&3>QbCms@vY4)2twdw@55y1*R@sQ*VpXb~U`-H^U z&)gGpS15_H$kI1(6 zStakAO2Ar=by$Uco>j`0WG!&F;QMHQk{7@Y>HapH3ZN0Gy>Tx#)P^Xy0c)~_25a2l zZh=W_4yrDBKDcu}XK%n0u83>eoa9X=!EIaY55zU#-#;PBMJn5Vw(=leoQio@-6v>9?|Bd;yG)`-I6C^9sHG6}jDh~UJYbOGLTW)A`*z9?*duH_qtJVaE z-H8N;{cm~T@caJc$hiAHXI?N9j6M#vE$-JJOzCsy{?yXsxs%$C#PdIR?7zhca_+x1 zQIFSR(fl3qC4B8rTYl>DpV;t!sh@r^e3x}7JZln*E1Yi|bBj2Kw*4Cuu)j!Lj;$mS zSs=aamJx*%etq~l0ljBg2z{YRa6UNr=;E9Kv+fd6T=`^1b5}y1owvj8ZdT9le12^-DX*f@8NM5e9t!OT~gn&wjg8QOAwTT zxe#Iye%GWx1u*1Tf8_=?k3xX#2cXF;pElSJa6ZR=;BjyL_qz3GzvtiD|8pOFpY`y( zeLUx3kAL$gk{6*S{o;^+$g^#z8K?Vt37= z9&2z)lUc-YyRm@fzexQK$rU_<7_k9&cmqayC$2ert+Ur5Hzx5FF&FW7$YF=v2RRMa z3AKTFT0xV>Mp1&8i@odlxM*+}bC5^ikz3&R6R}SJ7Dow=LYU)!h@&8v?|ahJLmW-9 zjzcW`U~t2E`EwU$BtQLG;~yLk`H?`&!0O04lDPZ9_Rx#@``-PYr=d>!;x7Lw2X;xX z3wIznKfqk@+Cx73eI2`AJI+%T9n?eUQ?Rhu6KxlJ9Pd4=;I@jZGzn#R_(W_YsmPxu z>%5xic4LwppF`(=gq#nK7a`$`El6XInB0j;Eb*;`i+JTAPGKLKSa9=G-xVqJZyyrx z&|p-@a@s`L%Ou{DB4)r05JSls2u0*>ykKLL`g68*JRsmO-fDt#qXB)`1ry(PY-sC| z^MQSn_yNgh$(h1k>0=L&J5WP^3eTU^guiu$i1>lKK{oDx@1+9Hr3AbSMh3QoK8^~R zZ@itKdc&HLdjS1Mxu*R1uX=+#&&dW@&rma@e?QA1rjeKjd<+=-CtiM^4SHxfa^dvv zTs9;%v@pau0@b8G^0nVHb}3s2`xCG$xu=pub2wW{6LkU%jNBWDbJ`gqi#)pmW{jG~ z2IP*9lQqGA-#AO4ddh`q2}#ULV(gZ->In(*dyeb4g2Xj)215%qJIOP2JX3?@DjJR% z1_tpOByYiMkv!;41K#xls-gF&4FtqH^3cf7<9Ua~E3*Wp9C(_xUytc2&*OUxG(2nSa zdU%a?>p;5gFVHGr@e$~t9UAfBc+^^Se0!6};G98$(lhrM(p?_=_=sPD6F^Q&;k9T& zsn5MOIbW4MK)@K<+Zpn4k?$y1^Ir39krtwzT)c-y?$j^NV!X*d0s8=){%qn?$?HGh zEWc-e{SQw6d9TC0NOI%<@&5Y@6CCn!VDKc5C!ZVEigif6{d+(Dp5-^pn${G9^Q)Sc z7(BVDc2-jQ?{m9T@#?7YhI9KjE)!+VjiU-EgoT`d8WZma#8g|gp#&qsW$6ztL-u5a z%M-X?&h*HCB3>wZ2Q>=2J5s1^|NGq})EBjX0mh1Oz_a^xO6p+b!&>^3b;8MLfcNz@ zo}Phh#%6sM#5?vr3rbwRp6H>6@xa>rjpKiNAH%wQdt~3fGr-Z6Pxi&KaUYT#5196h^iGESkKhC$1Fb6>j?hPwb+k<3x%bHtFZ@ND-mYMO z3m^|buk5Vlz)+G{LoR=FQc#XFo6>lOT;FG*kwo;Z8ne97$c-U;5-6w>vANW}!ul}+ zp<$A`g?;{GpYn~kF64#0@bD1GS#8X#k}-*p6tXlcVBA%6YHl~Yw`EQ&Cfz(0vqHU- zBsH8l{D9{tfM0!G?L2*)0}-WFp*aZ_p)3e_$2ovHLl%gb#wA4KP5$QTT%) z4*A{>hM52So2>T_*7c9v^4ohwEdA|80k`>C=l@b){=|j9aB`PC2gK%24$9;82X4rh zKk!dX@=vb8KlEn3?U<6|jtuf|gjLjqX|m>E^^7`-fH1J&OM*Rzz9PZ5k_35wX1$Sw ziKOzloBpl?B_;$L_CM5tk|O!`7?=R=D`1+wCVA+|s4ctwx4eX$Ra>=87qS+q6Iq~q zX+?qn&YpqA^FnX0xukKmt})o0L19~40Cx6#Uf5=eoEC2}CHxfRqQ7+w0u#)e!BN1y z_L~RRTVXh7Fj+ID+!6eL4%~a_ml0eY7GN?fsU$yCq;I`2#3b@;K4<42&l-JH z^vQ=BXgGI2_n3s-_1jPW>lve#B4_pIIf9X84QKPG_Oz#PwUun|SSoEHVD@e!^-a8~ z5MVCm49RPZ{bQ|R4ug0^>TCF{*1C|b48%JMDsO@z_;X!=9V8nYd%jwKN_im*4q0zi zbx?kRdr{-g%aK?-0SPhnPmzp5_VW`;Qp+Qy*h}35G@D(&w3hN7d}Z zjhDJ-2lD{`nu}3?6>>3~6+tJMNiz}Jxe9Enx=I+XcShIHLXhKSo-9Yn^B(`cN9INa4Df7LVef z>=1J5h_c(0R?01?zviR2l=}|G?Oy4T`o2G9;XUPx@1?KTbQiLu$a&~PsRJTMk}|9c zkm$^XN15#}w-sk7)d`h%J?JA%aKN}dyjgKB!S2~c?0Qg3Fhw%j0yekwE}640N)`!GxqkXBXA$nT~}`) z?72@Z2CHCpzd3w+&<$ zA@9RF`=;E2sqvui(}ApvF;GZ~3?$R$bqlp~B2=7(ceb28(i<1Ad{Mb^5pOFLfttke zCEu^?XQnx-cCk`gYudz)l;U5uIMgWWxrK1yWIBj2O#`?JI~@yU>D(@FgY2p5!InD zgD$1Oog(T)iY8Uu@7^8Z8^-l`KEo2fcB2Uu7*JCMBbr|hEfLZD4Wb|T>O;>vPL`cT z`Q=Vwc{G_PRp>*@DS=Xs?%@_S&=+v+_*${! zKWvdcr-qoaoJgXlk#&Z99Obts$7J1kKO5{0sdHG|kJ`Xr?#K^CU`f=-c7`%Kc*1_H zr7R$GJksYkfV+Y?okG=L-nW(iqL!&Iy<#)%!H_do<%74w148SPhb_+9*PTJ(oUNR} z*@o)vN0;~Q8N|C}3do0dma>EbTR6iu?yd!1L?)1L?4Jfe~{V8L-i6Oos%F(zrb=$9p zcYSdh^WS0uYYeWHp5r(DO|E<0RquQTOQJH|7wQ0~EgO!sh-VJ2uop~`FO>94KQS7$ zfF;_*)~3$C7ep#;QU}~6K~d^)EdMpcDLe$?L)Z9*QXrM^AaVETQoL5@b=44NdkT7p zLM_Jty4b-d_?kF)^P!KlhT(N`@y+aGe!hbq%7>kC^eKI8Lq0%cOf*XD5od>~8dc4^ zB2$t)-4y#p%W#K-&#!2d=Ej&S!j16gHBze*dVjoy@(Q|s4`q0lp`JUs6mPVGMTO)Zb^}V7vED$d4U2w8-L&Jh`&)f zwutWUd9#o$J;nd(BKEwoVU)s7-If;o7ZI9Ky-4{P+e?D4ruXX#da_D@b}@p$(GZ+T z^=JYMrvI$JJ%>C;>C<0<$+;|Nkden5`uxwMoX)^{M(?y^H#fua4N>4Lt7o@Mq^R6w8r4swAG$K~D#gWLo`>vI{=Hio z7aIHD2k=^7c3ZUJ4r8Tw=TflB8N8M zK&Q?=Ym(qYF_`StyJz74h6?*5;W+g`lScgDQDpCVPC7aq`VL#XiehK~FWe9CdbDy4 z^QHxgv!EWNE{Q9@`Xj`+6ovxs@bF2{CrYKZy-?vO>;rvR*7lg_1Gv(DA6PH<$ZIgK z3s20(&aTn4?vkcu$FtZ(9?;j>eu_ufS}*48i1RO0hRrh-G2`D@pf*$mi;GElLFHa+EQj-3A^1$WXsm%%vyI)Co;2o2B38d|PQ>ZDwXYmEM-3Y7L-#0Rcl>0~3hOUE zG~h#&HdPC0+T{FP`X86Q%J7Y9ErX*ZaTuUrvkjtf=`-=QTAu}s zX&Js);g_qbu)e)FvemQItFEm9b|VrcyO2|Pg=)U6~jUNJ`Fy> z*^h7kBKTG{_OVWgNQ=0dL<-mP=^!acl9h$AY0-PHlK0$F3$*`ZGW^3aUVTCpHd8sH zfNVBr^;zla8!Q8)&En%e20gFnCy(5WGkQ&q_%=;!6?yAIM!)dk053s$IsW*11bati zsT}I!yN#&dY+=_42?geMe!>aoZ@b%&IgM{4>gK8*3~7_=Jfm97eVbp8EV}yLD|PC_;^J6n$a`*Si3=OYfOwF5d3%(3$$LZ7R~+Y z`(Pu_I0KOVjo}i4YN{KaN^EvM78+H#_3pu~qaSE8cpuZd_G~8iUOCDlySpO%KRkqZ zjj#zO?CbtAwGM3PmR7C_<98m>Pw8a}?2>*cLR3nnmW+tCNs_o&|J=H?($C~3RT2#z z4cbqYWK%kG;nf%)7tt+`m?I9_b>WRf;cEI@CNqE%06+i0f~;JqQ9KuVz2=4MzAA3M znSEEh%wFg(`Mu=LXEwj8_^_9pB<7A^>F+}7gsS7JH|@eoy?Zdt!yyhJ^;oB-lEKSd z*#(32^QgP(qYheQG) z^l0Im(23LjK}?=|FCW%&X>!lE|6^*p`E7g{tXr*=-cR5;PubY3DH*YzhZFDTQfB3K zA>T(+W;?lj?g=Jx9tNGmbE#y<$paGAo+AJ!(O}DpD&nVKdJdoRp%qZixuaCw@{=u< zDFrF4rN^!_+Kf9leahW8Xk!vSJ9B5N?10e|28mrMi*aS6kuB?LWR7Q#dv{E9Us^;K6UDZwU`E;J}eDs=pqh?*%Sdk`BVA$81r z>!UV5bcA~*HePw#)@Rh0{`OoXYke+hkHq)BexgpLVx%(D@+|~*E0E=o?ysRz#<0rN zNcdgiP6rdnpPd%QY7P^YsbLZ+CFBv15&EiE61cN7VF%dH(8FSawvP*kR(8@%^MhAKr$JLf zy(y7^V4|_z5a6JmtWs8-3*}quHHgU7xsxTd3P7hTNNrxgYNLWODYFQMpj83RLC_wrsC0i<9&Zx6y&_3wC8t zV{d<`k4LUx*$^_JbL9M^y_0b)BB#W&FM2EkNNq1I4ec$4F-&`60Q+cx%WF?mJlE=u z<$m<&5PrT{$UiL#`#fn;TeBo+VBF)AUuoJ+%CayJvP5QuKd>p`!<<9#`MiX@WTr{2 zp>N#xRNOfxsWVg?$43HGw}rmu$E!n}t!8DtWcNckgLYv@=NG>|JSl9P$~N^XPS43- zB?YqjD!mhocV2t|5>!+#)zYTQQBxnSGw^FgV^v=E=h3V5NoRzhwBoZ1_NxubT7gD~ z?sN=IpSEu8c5)8Rt^j^BlTOhCx%@fVc9>AOriwk2Lp<)@UaE~+$UqW_Z-`J_lN%o2 zNw~K0`04R6p7Dy@Yv-N6V3_?c)qnIqJr;+5IbdFNc(@N>z?v$5Zt9$9WuQsH(N|o! zaWKnBN7cgkrO@GiKMU-f)1!}8GkvHx)BD}tgYPn0X3M>Vn5UV8pKLYRK1H7)VXmEa z1SVJ3AMmo1v#Y1vQQB$u(9Z_xxs*>bieDW#A~cp8`>Su)ez)Oct`A7Xm_)iA>xxc( zaXGP`Li#2Ft1(B?R3fP;A>3~$pd5o^t2YUi)dz8ge@V|@L8yL&A2Rhm|L%YW4uofU*HmCgHR`3S+$>0 zRQ1i3Z&ojeK+X^;8aitBC>#X)^`O{f(U1pGmhl(6NJ+h|f({ZaPfEamk<_E~d%p8Q zvw#ZY4zZFUKZmNheA7qJp&L`DulKv46yx3Sqc98-!L#3ggzIcM>fFyT?^3>^l*UXu z{!^9K+5nkB>Eew9(9M=q#QLnld0Fg2ej%2N)Rs{G?rgS$6fZH42K3w6 zg~z|sG~UhVSVsvnfaL0bv`>d3w7p(`5(RZp9AipY?HR(Kn=S?hjQ(M!hvJNEVvbcl0yNKN~LUB^6^Be%EsN6y7{7^96}d_ghwvkes2*-}0o zEX)SssVBU`F2z(u*$j`L?yzt$4@^b4X$?PPCdCPmYQ0%-XOGNwecFys0;FXi1J%7} z*+?oujTQ%4#?B8@+}joL4>+F>l8V{BO8cg{fm@QsCktQbHLfWoBu#%F-lY$ohWO2< zv-L>p;?&AF3-6p-8i+$ZDo@hZd(I@JltDXJY+YaL4Z=?$X{QUez9n2};*d1Qv-HDm@K#$v~2Tk`Zp$GC17t{DL+Z*WBBIGpq zQ_}rqf7!q6YlqyeWjv?eoAtYV4zJ%9eUjeM`oaUYTD}OBR8WOb1CsqM^Cr5imt?3r zq!UR+jk7{SjC`AuFC8Mt+nOZW9|}9CQn8~bPjxn_%YwePQeMXU(D;(y`MA(@OxdG8 z`kQ5)z!o0hIOAVKt;KxUZsChGpP$Q@Vki6{ynil?y-2m^w9rV*o-h?(70hjrS6e#C z@UHqd+fi$U+1d~y;MO;9^-B~OM_P_e-5{&I#;yu2FTveP;MWF(5!MLQh24(A7)f~X z9c2Z0BTyU?Mn9q(f605zjcG4M4jn2zU?e7gvLzDYC**9~^qjRPnK5#3Mblc-klZ@$ zc_BwvOs4zpsOmaN<6&IiyvkaJFrSykQ|YN3ZBPM)=mY}L=aoofq-9((3caT6N~ABu zk^YKR^~sZyKej7N45#blv4J!X?`KSj)X)4bLrtEj;C_L(lbXAekRpG3$f7FguGs!M zzV+$T_BPDmmWWO4{z^}*QgAV(_VwYT)jOawww<%EV0oj-o0`AS@B~Bae#`_lJ5m*T z(W}!5{pS@VsC|w8M`P=VDB><6_F?K{P5aw?Tf6^B3c~CM$+mg$I~G!RifqkRi|(mlIn1IlAUziaAPYCMr> zN}rZhu!29k6w&V|pN;)faYPr+^hrB5f+5=K!B~A0;P1HLE8IHiwld_7oE>@PE4}#b zo>7lc@HK^p#@r7&D%Zpg>*!@I1}W<87)st6!EPD4%l4+=eaJ_J~dq&~UMv94PDs0w)5e&mvBd1g9V2$RBk1 zB*+MYaH@>@YME^ z6NS;wY9|=W+6vTdG)Lu~D0d_pRxp2(btZ6Lb}=r>#ov3B(!W2rCwB!IvroajKc6~L zrKviR3AD%mT#my{i$_r;tU7qt#Q?UW2y`zThJxpVNa0)MUKqbf!g-j?XU*0MoXf-& zD&?j!X~aD%VnGSPi>`!V=r5UGet7BkGtCKn)v^_Ch+CU#Jmh(Q|9A8UlA~{zq_QzE zu>Vx7)1S1{q(sw z*mN?$`O~9tJ`eE$W#bj?4|`OAE+JZ@sN&izR;Ti_<2K(_)H4(V~fw% zxmNCxLMhj$x8E=XiBG3))>2=R7jF4_Wr%JppF<@+bGA-tFZ0U>bWW~P+G;ouTtqfi zLFXSYt`AmX2&cc@qZg0};DM@lz=6-n6SPn4o4TbSHZDmd?muLjUAksIX-x}4jua#z zN5n~UA@@z7b|LaeZ`YvkP8f8EapHE~yg)k$Nfbw~9DpR-s4imvd_pQjQug(F>s2^f zbKT0O$V7w_6&uLteEPyajcHiL@oMTKSca6^PP(_d5{+baic}}}dv!`%e z(7U3WQd&}rO3neOxI)FC55xHCY3k!sxb}5%J(e@CYseuxF;{diMT5L8+tk<|ya}1= zAZz(%oT8a!N7}*7TlnPu8C93OvJ=Bui&`F&rgX;eQ<{Gibkx(~v!dBP55_2zj*KMI zc&g=gNMOl@U`X6fy=XUCXJjag8mCocX`a@)w1yZ{S?~?u729#Q#yBv;Wg;iHxn~%^ zQ!yrEIN>MuKFJmliNfcJ5^ z@5~|vav=oB3V(!9rX@B?{4MTgetwFeerD(#N{Eo8VI&8Kq8Z&WR~}SFzv1I+pV=Wh zZ31Zk;CxLeO}L#ZSR=pukl>#tJU}N&ci%&Sx41`Eh_@0N9w~Up*eFbVEgQ*{pgzf5 zxZ;iQ&FvRZd8$74@l!&j$OwN#D<^-=a*pU(Z-fl@o8CN~u)doSCAd!9({;3AC+hD| z%@>Jy|H!v0507K-2CUIIcZbI=@iK$xW!Qb%0RfG|Ow}(u@Se~ggM$}Va`xd4v$_ij zNb-gqXyy+ivnpGz7)ScV!!C(>#3MED$6?RBVV|@)heM2FK*q2$P;t(f2K^Mjm|YOj zZ9dYDm#kD+(qa~vV~GOeidfQEe}P;z?_YgkKEk>gzpcLhVH3hSf2;`hHD@CK4VjX`aesb98g^+4~cO2eqOh? zX9vk>qNz)Z8ot}^RO^w&3pox-CV~YZg`$@cA|r+l+Pi;$eyyxCce54hygwZAN%bn8 zmTQh{!9($N&)#-0D9m^^X2+0GGq!OJ=vYNR&cqlE^h2zzTuqS%w8k59E(>FeBt9xT zW8?m5y=R)%dZ=_KV$)2Ol=zs-?q}=QFo-3SpK8+H)LqOdmcrmnPj$VD^8V&(G2}ml z8u9%cS=UvD)*<*x0;I4E(S==E>K~pC6rMB^Xgz@P+>U!gI@1%#YgL+H~XdtwEpdn&sB~jw0 ztkD$`XW~_u?zHs#zc9u_(D?;>Uw%H??H<(H~_wCTWt8xoW zPN@CAY(L7w>8G-ye>h0hA3AiX3r?OrI+v;Vk89g)X-w>xrZ(ah_8cH$-u{{a zjrJZk|K#;KVL=pob*8Zja=1)h7^58_WKUOcGL85c$^%Y@$5@r>8h@BMk}m-m$vscI7lk3d>$*l2G=3t+Nf# zg`JVe#xGaxq-S(~EUcoq6nR%EI{0|&8!G$r-SfMAr#hb2qtZVdUfk^;B} zh1oVpGlzS+Qongz3$RYvxB0ktl!Tz1^B+KRZpIi&Y@&iNYxDHrl?MFr8(A<_#7fAD zpaC81M}OF#a+mwAv-r3cP14}phj{idhg|O4xEW~S4&*#Kld#@qE0N>+I{Zw6vdv-R zSTi}3%}}TN(CVEhzl{&03@K??a(vI+6c8!(q!}d^I^5?2EK3$WnYuYo|I3YrH<4pl z`?#^kO8FZ8LZc)g@0~l?JLRow(~{hUQ5?H}tDZkH^{JOJ)>qb=dc@Xxyst$lJn$m? z@s77(*?z`FG#-%Y*hlF09ei5y_LoSPOLe)tvkbS!n*Krx@)ojIijLbLpA@+GiNy?+ zK^m2MMj~W@Eki=y4ZXzmRRnrE7w^W6i;Kb&I&qeb-?yLkd_#lRI|>5@M4l-d|$*W6-M2n zzE`0;g2bF7zq{N}kxF!iu3F@hcPG^mPUPW5Uji^CbqR^y5;sJba1;_JQ?K0ffxA!0 z7I8(PrYAO*kux3#e8F2(DIioBMJ{avWz6Owc^#c1n#%g}H$^)o8`*uitN!}E7FC+$ zhh|TR2KsIbW{oe)T=hFaHte^B)ZYA8BUgm9LKOZv>P*W&M>t}Y_^!(d5{tDWOXhM% zR>0CjDdd%G)0Q+Yp5J;10VDPkUVheG`Y<|Ilh}Z!UJ5xRAn9j;eTscWH*zz+u}UWU z2lSRgWyCJBjs4O{YPX2ujVCKh&}QE73&i{v`u29_`Y(0E;9w6k(H&4wQ5|nuG*;(^ zq$qj-<0@K27bBbio*%K%o-Jp*vS$w?0c^uowV3t=`{z9825;UA`)R?fZwf%ifnG;B ze3yvk+@ljHY`t0%y>*|EQjhQlBiDiJA@4LBnX^wP_%4BTsptvTp?>RER=mN_t8aCa zQ$$Ac@tl6eu!UqNaI|_4I8gZMu5(|H=uLy@PP&N2og+29OlcMO1kA*4v)xSsn1a~2 z@iW;wf-du4Cn|WvS{qZXIGX#F*tNtcn-%R}!)gM5LBgd*xHs`_7q=W@^h)U7#v0Aj7eZ=svq1w3%0%zrd zyW(REPDUu7@~5hB+EHkcJ9`1ANCxqjTh+Cc@89`ud)I_dus*^BvouNi+YR3kz13(Z ztp{@}7+d(nVCtEvT5>2jb8YNii%gR{g9>?leCt3=)pL~ZK@7bIe^fM0#bTWALBifp zPKI-NRGWYEh&jsTCUx+IUG`nV48MdUUc@89UD51qjiLk^9{7{KmJkhQDO@8d$9akP{psI#igZob;CE}&m?mE}Z@cVZ_!F9nrS_FKt zuZ`6F4zTXi69Y*UmdEtkXr+AN*LN72KB2Djq5*d-z%c2#fJ)WF8e8i`mDVr7#`>*R z9X()eJ;KL&=)Sj8@Z1L1Wt}IE@@7ei@V0%)+9hxPbK;i%nCb{ztBOqP8w`J}WVmH0 z83(PO;osQ0dV*`|XE@=OzzQpd2 z2wyHrXmN=Ex1lwD+)~+deotn^G2>mkx_Zb4_G6+2v zH`@U>)iE3>LQf2aw-~2yeVXp25yb2j@K)fS-T}KspNImF>r-Vj%EMvmy{sPzHRJY? zK*Y#+UTR0Cv`0EerJxLroNZzbj}nDpUXLv(l%?8Vaksl4q$tMuWb*}*gU*s=_Ngyc zjL7#+n6cCpS+0WP58j-X1w_q5yTKJ4ebH<`O;TT5oebZLjxTRqi%GOY{!xB2YwRyg z&F6CTNy0>sZ7$YAH^Gd0g#Qo`7k0=q;Q05aTz3uEDt-w9^ucGC!DfMBUt_x4w7Smy z4kh333v^(e9iH+>y58w*@`glX(gGTM6H&cHhS}wb`H?5ih^EM+;iE5iwcHm1?4P}- zpX%nkY@;1uQSP7e&JU)}O>l-&O&Q68qV~zG*-3_qgLm-5wr`5e@nMI659r4Yfbf5@ zsf~AM5UGR7lCOSbu<`NOw4`h$VR-*mT9GoCdX?nJaMFHFHF@?=#o$|H2+nH4Njr*i zo)3J7!ugg;FGbS|QBduHrt(X@h)&`vVUuMydy*OdTdQ>2-Ivf4iM8+3V?Qh_(tqjC zCKa}+8CB|rj-CJ4rhc6xMG@2p2mEIU13NcE3b;N4!1o*xjCFyVD?$&cW#nLoo|7g9 z6t-gPZ05h7QeOWaFQoiXe~I6IG=cuoWAP_Hy%v}p2VQ!A=! zYE`nMPoYFtX+!KEiINVVJr>6MDFSVUcp%+M3ojn+qzGLo|Cd8TJUFukX0Chl@Y-~a z;h0BP(%Ea$%YUHmSG8dSwZ8cIRpg{UcG#N~_0N@swjAzV$)EROXxEsKs07{6KesgN z9-(yK^q&Sv3(AlSDmNtEuL)5R{?hg0G}@Gg)E!(W)f{?S07%SEn`D-wLiN{CVT%7W z7w#uxsTE6`_OW>Bx+{Qt&dZ!ggj}{fPLHq^j^ygG5i}>gxqZ3N7;Q#wed}6~Ell&y z%YA+`8X4n^U(9Nx7Bt7o&OEo6p~wVBFQnv)-u(K=;dx^B@t3^jiQ~QOx*a^YJV2eD zB;vq5%YIwB*dk&(Rj{$BRQ}j7#CuBYMs=GB|Mnw+xDmHfoDVcAmGpa7@i-SZiZs-1 zzX{ixfJ^9CzbLfwpd=3#*mq|nKrXk-O$A;?D!K^gqIh#WF(hnK$8Q-OzqIFJ#)%Lh zoXe1Te*!s0`&0(3WBCb*TgPK7ij=aSdbt#jP<8x6$_)=vR^4N%X#CghUe{BU zXy!a2^D;#0xn8E4$ZS0B`Lb+PJm}YHeSG5|QTXd;?r6nFy?CkR2~p6qM_&TCF_IF82svKi2P8vlXA#o(j`0 z)+wrw>qFB=Xs#!Rk5PC2qzoU)=GhEP(8zr9Psop$xAA?F6f*c7tb9v;q-UN8LVq*< zb5UC@UAp@H_Y&HH568dk>UnDFw&{B)$HIV)t!1DEMi~6crcR-f+B7!!zQ z`698z5&!Y9n0ccaRUf5xr>E?&e6sV97VdJn}D6H3T`66L;A7POhRs=`ME}+&2(j|vh++tUssm)u}pRl8| z)*K=}^x9>WIKq&&QwarjDz9272Sc{+MSiYm@#!S0J|;4h(rVhWRC)jl@5S$``i#rP zC@mPSuqf*WsR;r~(krcAcvL$8tZ@*2*`bFQE&=Ao3nhk2>4PSy#Y8w!dRLZAJ1zga z4}Fd`+zvn=&i!e2Sai0qOro5Ke(?|K&HtdH?{hC1uQWbI?TMzywK&O#6!-;_*N+TN zg-)z^30~#~NMzSjsmpLVgZM@4Xs7wtKqbz*urxsY>17UKr2JtM zuKysz^hAxZqgM&F#YJq+5x=B%l(;h~BC3 zfzF_N8Q$*%5$QZ)SYUu7e&s`kabi7V(o#S?94hm{@r-VRIrMn#O7BQTEHR}NDEVU+ zP68M54gKPw>T?evvYAmwAc799~<7^B_7RxYNf15>R{)Pl-r~>a#@Y_(~$%d$yuz;gjNFgrs(^?Y~xtyK<8QR$Y7@0 zX_?=O>X-j`%@)$564IB6ClHR^45gXZ;e3s&!@;gP^{&NztzRZu*yFChtDP}<6PwBT zijMFc$GXZ#G`Z0=w5+~YYSnWb<&K2shlhW^EiEm48Hp^#oz9ba{o$A$ug3Zm`TtOl zB*zbYhnxY6P>%aM0tB~O=`CxE>C`h?IfT6%a+(t?vAQ_Aut7|Eyt!bxXy`WWEVq@7!A5vV6$SP1UE!=wr;v z8K!#ruH8T6>-0LREkzedXj$J@tu+0o6MxphHT`xOS4O%pwm|)TliLDlQ~UBZR42nL zAY+|FeGj`SU#N3-darbZfKPrui{o2E)B^`8E7w-sHbBh%#U2B}s%)9eTC!^>&@YV;# z;c-RO&AlGz7A`~Uv)vu^)OF7nR&$W?rjuLSmFSgJ?RAcda_@oj-I{b$vtJxRQv>&|MRuSL#%?q*H5oPw}~vtQ){^}^g?;7V~#9v zM~Zr6H$S;g_~m`NAAEr^1Pv+R0oH<>zQ&2OkraMR@G6f$tX!H)Xpe3C(D3}3!Kvee@YbvN$tCvD=# zu+=Ph1jIv$&aeLaAgu2%=Cr1eD_a($4V*-ftp|2+re+-wQHnJrZ+YRU0ZXZ(fc;e1 zrEPA{PfL3!E?~}?jfNiW&?HW&vvJ9B zY>kNE@!*dH!#(_b)df30>zS-_8sRbjws|#;;q3t`IO-@MoS5L*x2KyQX&-AK{hZtf z;5&Q~t!qt7ys9zWPI3bj(1d@{&2ulexEdtthPm$$1Q%8{DarUqygNC4AdhA z(NL-tE1#r6%Ve)@POSnz8IBKJzh_b^b?RvU6=2k&slcm+O@@FcX#d|`Q2Y@;poN>#Hf~$#=$-V|2z`<=MRsGBMS?>F$p+D0JJCjuzm!}$ougGOd||2p zMR&7f=?IW}P@iiz4EfUS)TmG*qqwZdSPTZOg&$UxkIpnwuJMO9jT1d{pDN|@1XX{2 z6Qe7AYHR`10PNy+{)|m)d8jZwOV1Cq>&tn@^1isMU{JBqpjV*)yP_l=N^Z;4x(L*o zUbLVNj=FeGcVRWC7Jj6(bwxw8*;Y^+ae8K@oH`BlGdqgIm#cH%>?GbO6AMG;D=O{^ zFho-pe7u`2NApA5I1TM%bTfoul6in^_(b2seP-A}1UzJQz_LUQ z6XpCAu(z4%gyGtsycQV}poc-!$GooLxbf+so=N;zRtvx3EacJ)7%*-09x;dfwVJ_< zf4GIFZ8f+2Pq2mfK>6dBDH{);d77=a;9IRp#}pr5BAbnld?=zXdY>25UqE^k6>==% z0x0|}8ret6O4tfNixBn2NU@w{lh;ckqCsE)drvHOf;#!V|9rVGQMZe&@-{lbX(NRa zPHdTR1h3r_{c4J&5)esTKCOjsj5|RrMR2o}niPli^2J zk4L7bc7FciTK$C`S8Rmg>2DvW7Rf)%tNt|F>z|dxOuJ=}S_&FXh`IJVdQkjkcf$2D z*uZTVxpwCN*~Z*3G=%HVQb^o{>XbbPV7U3}d$|MuaY|)>hhjdFF@9SA`Y^|GE{Vc7 zkX4=kU0ll`oe2&F^u25M+FLZ=It%CL8=Gv*OOJx%!}%okCMGwqOv`CbNBjP^#g{Vr zeB1Mrn2Vx8cjC0$a9jR(Oc3_09oh?jSnl$whYTRTBsJ;$f*Jt?XGsy{p<%$Wz*-Km zn!Ly1VrQKC9iBSh8x`zMkAnp#vN35s8H1xgEn+*?i|#w3XicA$N^#1Rj$@NQmuRsN zM~XW!RNGNe>6X-PHefl6#d}zK`!GzsZq?uh@T)f#o$>FBAMBU*2lSfu3&yTdh}%yq zwqlw5bE*g2Pm*rFF}hR_n*p2xI>-InzVM$;R0OCs=$N7KXC|9|1HPI+3e2<$FFI?G zyccoijx1N?v=wbE9&b@Qne`=*nVFfnY-2UU%U^{avs8PBYG~7<9hN7H9zsy}`GoE_ z^2uG&T1@!hDqzv7|JoD;HqIm&Ohd+oh?lu9BX5P(mfM9lNt;+o0J>#rb>7Gli`NTR z5{5o#_4H>pW}~+@g;5t&#i*8^Jkp$W)U;r}i}%?@Oi}V)hsGkSvS+8+NyQKFoMjUw zm#2&m{0v*F&8o%!ZZLrL3dDWuO;If{Y_CFIUCi6Ob=Xa2zGL;z-Uz_hjWoVTnvAl) zNC@M9_oCG@#*+&PB%Od*4ejo^NY?{sr9d0!0pLs(xVgz{{S$A2sUEAlCl&3tOhWiw z0;e4@7kFl%_1O#m%M!r4P+U*wLpG4A(rZH9H(ES-SqnEFt~9ux7`jNC?P9GYY_xRd z5Dlw4#eO^MVP0N6cZI~O?U^(Ywc)pD|g+U2Q_|oMWhnS)0ufiBR1!bQhwtJ7g7mqj1Rbnl! zHM71+j0FSZXQxr)YPq{YK29ptUlVwgnV$b`^Z5~JOy!ba-0{%2GmY=N;|3*VSxww) zuDT<;6jP2hZMNNtQQwlO_aYhRLn-ZBRZ(K%1#At{A*U6eO{1=GqMQq>v|UXlGue26UztuB}W@*UfdfNmFqN5 zIis1UzD$@D?!Ws%_K)Azlg00>UZqFJJ}!dfOT*Z3%7c+LT#r)(WO5vdJ`XQG3Fn`3 zF}d_a0i6`4@53$BEih->7;NyPu%9f7Ilpf>C0Om=U~O4HIZ(bQiKmNGF*YFuQ-W1H z*^9*!b@yd*iMOv{?rMpdUo)QF;Ec|9t~Fo8l}fBFCP^c~({rtSi~asEr=M}I`U(TE z_jQ&Ze0V?Y>febtlzi8JWlclt6+}}iApHP{2i~t)bKd;tUC?k`0P^?ce?RiJele`Z zBh+&FLKB}yxuPIthm%jzRp0kMee%w1u^;EEtOBIi_ve_4WT_A3RPQV6+**jU6_d*>o$RN zx28%f4cyk1LL?au8va$T;tVA~hfPq-2e7rUR>7^*P#52>sw4DpE2bo1s3A6CkI~WT zot7lKZ`hK9$7#UWhbVV{jT$6Aqj`{nDUr13ci0L#{J}BWqb#=m$!Gp`IVR2sZ2I?& zR2bnwI>z+ZS$Y%-48bof`2RX|2cp55SlxMAIgyk(Mx}Qhj{I0^q%{ z4hbkc?CH?O8v5_NuQ?^lQwb1X+UIN)h1%)}cLI`Wk_k zS&Q)Bx0*KJzHH1Fjc)y_!C&t~a5b+B#Yd=JLBXb-M_A4$7iU*(gd?(oydy{+suVa> zMN5?y4)dZv!yfuXC@#E4NWkINyPnU6J%RK%MS%KoX4 zbkA8Ky^^#@e2DR$$2B=MT~jYlmPYEFTlc~4dfWQh7Sm?Gc~z5i@U=P%;3Yl!`UET; z)cvXa*({OKX(G=N?XWgaLcnfVXRq%b3N4>Hm6ewc^|8jQ7$DJGp+H!%q3~pQI@H%1 z$A>EIcpFN<-PG5-Vt7sY-@s+i$FaR(u$O;?20|nLbiVbyo558X-NM9pPti2L>@X6h zIwoCl%NNt~jPnP>Ha;5uHz+&`%D7D#CYr7AQO%9cA>)@e;J|w>ee0zK%nM}CXR0tq z`AU6S@%Fn4j>DjuFTmF*!4=HvS4AAR%( zhW3X*Y|rHuycp5P``>tr-43>m+yso^>JlYWXYNWA zp88tx7TGLc`&xZy9qt-BoC~20kw({;vN_VZvPDf!hFkxv-8)9%xhGE6;XxD?Y(Q9T zzs|Da)7KnEnrXo(Bmy{p$QHw zK0r!T<)GB{(QzDe1n?Iu%?-Nm0ktrdCVEuR?qvJ(2;#eTHtcZh9GoAen~qUdy+^+t ze+2Hh8QzPq3v9(->__G6P|$haNdNH}S|{wr$T%0Fg0M@IzTN}GJyjDYvPV2$8uY@3 z?c;`mb6fCd!7UTlsIzUg0XPmEcEoka?#gZ5_;8gcPiH^Y{p90#V%i}vaaI+#Ff|r6 zd_4@4%qw*K34rH^mfZ}x6cLkwo$|1yo8lD^Bi)y2(!6+;qO6GKwMjGaHt;m^cx?qdh1OP&3wsWFh9SpZ8hv!dS3tq*vIBuz`-`c7qPZU)d&=O2PLGpR6 zDa8+P{P6Wo)ayRk0N_0T0xl)b_JYGT0i4~K1LU>i1Ullf^DyWFag|^}qPmuY*XmvU zv3+T>ojXui7e5)!boP^nCI5Qz1oNkU2y_<8>%u4P6)!74{2f*}E(i_j^L?V-xfwP* z1NesFy{svD4*y~ZF@XbgXBY%xrT7dEr-YyY;C}#6K(D_?+%V@6 zL}UK$K!^0jyvLaD-*+bH5a8waJgg71_sY+C@%KEeJM(#G9_QiSf6l|bL1LT5-U#QV zIB&)v|C~2XXd_O4ntzAzC4QHFen&&$JAZ!9=ih(*{pWmT{yqFT|L6Cg^M8K-IsfPP zX}%8J`#g>zCWZUL^?utFaS*Np>+^hGl+X8tw)mchHEh0r_j_LZbN|rY!+G(~{c}I( zt$yzR=e+mahmL&v_q+jvT*M(;jp5k&y%)r2Fj#wG-kHyTpMJ*Z{GNV*ekRVxbF#$0 z&QIo;`1IHLIMZ{H^Hp6@(>!T~-X@rX6|oBbRM;CzPT=X29J zrab+1E`xKqzt063$rArM_xio|)t_T#@%`)^=YQ{Y-m9?-oR77I8nXG?aX#iwoXaC0 zgU|kbu7bXQX^DTIJ74qZuXDxk=T3jLqk?{FvCH4>a+|MPK#cco7k>Y9{;xK{l7Y|u z>)d?4?$clAV(*OW`0HFeXUsm|eJ94e%)m=1J4kbGW?r*=SHal7VVyHdcMt#e-?5`Z zuyd|znsXk&4iyz*iQj7;jl2t+@a+dW1@Pd&b?;$+$5qTBAMg;rUw7OM{GFfIDrOiY zQRhS;{2Hbw-rvo$`A-@%V|DF?#4-A#4OcxjxM7ZkRIHQ(4{3lv;Ec#QE3OAaNhEH4 z{nxq3>$J{=%Oxr3Gn7JD$W$n|!u$0;7W>#ahvUa?1zx~hzkc>WTi!6&7cp|6qVwT= z;H1QVuaU(6#6K>;_F&Eimw@R3e}apQfJ3<=$$p#jtvZ|;8dNQ1X1SdCL<)FWnDFNJ z@n{5iqs%;xE|b^uan$eT<9s*kAH4#u2OfFG1AP5CfBvZ~6tQrxNOXQBdghz_`}H@K z{m1caPK5`5zedD0LqUGOMr*_Q1|Rt{6GJ_7(3RV-@BT>*O|{pz8G}~$iG8> zS#kjC8maQo;yQL;&c|;vzZtLboVS4H<261%ny&*tK}~7r@5TFI&%VXyaKx-z5+lRK z$``!v=lh{c@V!rDvGF~KBw(~T<806muB=GRuL&05QQi8d&&3E8Q53-aqo17z=-lFU zGV9gw7?0og;rdw)?ve)TNOL>Fm1vlMU(eSwe?Ryx*?j+q7Fe8*&kwWtKHT|pP#VbQ zcE&LH)o%M>Zl8XBxOm@Z=70S^d%oZ2hB(;$bzOjH&DSLv^Zns9pzpB#n15obNBaQo zG`~h^;QRSt5&)7?N__5o{`X^ zG4Few-~NQ}`(6*8C;FrDz0QGugYw4wT>t5Njeq*wpZ)%Ox*=7)J6K&XK0{EjmqWx*?d+~RZQ^Pwi6 z&DW2W5~zc@-~Nn;;m`i@X2mUQLss_hx*7f*7tkO3RX_gM>k#I{Z@v0k1;Zu4YTo@$ z8S^BLM~6#79dcf$_*uUUb!QaYFb+_Mol|$f-{X34zqoJIPv>!uaRGb;aA+6ceZWIN zdmziRMVvLy*RTl)AbT`_g9tEBW|*h&rI;Vh`TFtY_RLFkv4deHcb=Ed^TqfU7tQ)b zxTxgI*BvmpzB+8c9L-u$UmwjDMbFt+Z^gk+9y+qX5@hhoho1!Skv=$rb0deqRsgpL z?IpFHHKkH;ZRU%)A@c~|==q{tPKD>>d~uqEdNa7-I-8Jj4K^Ep+|4*Qxy3$t<7lss z;KXm9EXk8^RU(V>NtU{xwr^~8d$1Mv;Jm{lq;M|j4Hv3vV`?S1GLDhlC$fHhCLyjl zUurM%?Z(OK{5eucNl;S3PuXkndLN!~zCXU>%-)1&d_Lt7f#ngAJpZ&GF>S-iw1aC4 zxUThx>r+cWiGO{6K4ezvH3^Q?gQIp`l&#*jve$oZjfe6m-KKbkW z4nriDLO|h~>G_(yckFx6UI>4_`vf&r+>c>}KYu@o`@~;A4B-Huh~M8Ikl(g)-4TkP z-{ba4fhO;7{KZto){#*Cz({6gC|Lwnj|L_05Yjf}Zzt`xJ{%^m2 ztMh954=p;S=l{PG-CHMp<=6l5J=S(pmHx+l{*dbDFNoQ*TlXIfLHv*Qb@`9}RsAnX z)J6X>A6yR2^-BGZIBL_)>VHj$os7==Q=H`&C@z2N%aGK;NaA<180dc;&Vde^WvF## zHCzh;9BX8M?PMk}8wy}RZwgV?b2CF&VSN;%Qa&CgkS6-&N%mzKC|>rkWa(Rn!mgfN zh!Y9aXSk$y;Yvn6e7`y-o)Nonp9tPE+^zkECTYi1@W=eTi@F*ar=7AIec+2;8#0zV ztE;#i9K2EFnEfT@3-`R%{Z|?Z{dwD;uR`L?Ox#g_gE5lS+{yLlCPs_Nrqjc#egg;B zEM`BkwQr>s^v0cHWP>sNJd4MweM$@eNab5P@ucNqeUGpGiE37_J4)i|ZgGv)tCbyf z*`~Tgw?gJONifty0a4M&(vBg2yidzx$z0zTD^m;ol{klOcj8*ZJRGP+xY%5Z#PUQ| zJDUD~J^|8Xf67W?k`ybwx70>kG!+bXujOj_-W{t=sQU7O_G`*vSF!dXd~&6?yVZ-T zYi>uQIG=p>QwLun$IWh-Sc@fWM|NlCzT5A1byA|pO%PlOF}3ys9^(4>d+X+Pwt{(-iF5G_Vy0Tu=P!)?+>R$EcC7a?6sK6g*aUIsp zjYlaVA%z0=2IIE4a<8?!JKj&5`g#MAGHvzi?MUvT&6c{1=|$AGdZTpoVsR}(Q6ngS zTS!&pe746juo-{Zc|La8%e|M~{kH3-F67uv-Wt{T%&*tad&%sp%kCg`tavBhni|tG z-I3a9X+Mg|k@7JZ7IaOfteNCzXoX`bT5t{5Z4+#^?b}vpU3Xq>rinSMkNL7XV1zi}xqskoxQPw@1Mt@Bc)VXX?=+&j zW1Tvw(`GC8O}_j0cKN>3uVz?Yd-^tb^zyhpE~4{m$83Q2y1(Cj-<#-C3_(SI?BQAL zFVC~&olU}S`BM?Tm);W~T4#@4#uMrg%*$9ZRkx5nc^7|9%SF6dW_=MF(Bm#2u}KMQ z_RSa5_S9didLyjw!|IvBV|pPTy-%JsN9vSqToUCrZohe8-R0h&OQ?iE?yZKwR^DdL z3OgfK$%wKcW>LIPfkr!zgF(4}3jj$k89C%1L7?r;#^9LZv9We^o<$pVzbxWlw-xT7 z!E_x?vcED!OKw26^PVN+X7xea057sjX@`rhFfh(9>E^k*liuwxcFS|@ zwb9GhOyhj)orH>wR9%1$pjmlYG&Qt)4`h=In0Rg)BL~WJrG}HDUn^FB?YnAMbgD(w zQL^6U`HMeIt4&732s;TKr2U#~8{EfmA@Q0-z80Y5#t|`I&%mQ*=SS&v;*=M12msL` zxNSC~SR2c`auAP~0vCO(nosb=!eBr%NxySOls~m@f9sw7&A%t*M{wHXZLcoZ6{rwb z%hv7Ie=TmqBl@gfmhe7*UwmXQPFCg!V$D&)N#?15ezUtU=^$@5=S@hGBp7HQfMIapU>M+NNScU1~?i zdn@pBE}Ku>8Si+_yqtzGrXFs9xzQ!AKg6Q{l$*QogiA=Q0)ao=pJD>vh^s1}HYX>4 z-()&~ChbLws&UzWKb}P(ro%`}Y1;&1e*-MdDI|+p?UFa!^D>lxlv3ze%_DmNMhlK* zdLKNIhL5kpzLITq?%RBwWGjGIF=7v9iI2)CFMN@8@$eGmDm?!Vm zE|_e2&l1TvO3%k4u^whFCt)F`&kyziZru9H$*Vs|GLyl@Qhg{8dnzR#s7BCYZ%X{A}HH*WKLWAm)dT zTt*#1a%Am)#LVuQ#S8vKZX~#{X6wy0i*6-s1nyQqCPT&e0fOzYtaPVy_|WW!8Dkm6Q0gcU$@_ z0~`z{%#I-UyGylOojm$eLYg*UU<_lV2XY?UYwZeu54XW+ESXw~)s9&aJNxZ!p3&&N z@;Up?{2WYB1js|5n&VzZFhx-H0HCJa>8b|B&PAjfSM_bRywP&*gb@LuWGa!w6 zH@s4$%w$5ryV4c6a<;eOHmnXRwG}fzvykkUeCr^V$LCzyuc#8rVq<=j9OW6NN_lto zQH&aY2{e4lE`lMXQ%y0Vw2&=du*@iqUeZQ?Q(0!~zl6kv=Dj!8KA*~{eF}#ic|WYj zrSv5B*=K_d=C%#R@NpjN`W~PShmTTMAZrvIv42LDSe(XZu!UW^UQ!mH`^;otp4Y>t z>~6!P00CRQjU%N=iWy!vSt-a4=tWY~bcyqV`w3ZA*Gn6%!w2*^ZYy<8OT&*~q7>$T z10N76$Lmv8jL%c)ucXWBGMuWlm7Rs&ac~u-l8YqoSISxquEIkxi%Pm;Tfvq_LhX5& z9m!d5(b-MYI>R)DV8!Vj-j8z~$n2}v;>pG4MWwl&aaHSz-l-?tKkcvfuR*d^)vtVW#r@cXU{@Ar$ygriVQDI5+$2%Sj6^0<<{o3Gsmzdo%3WvnDWhAh#rRmr+1kNf5j-0KMy$WOHo z_X&xGE`PeZJ#6oHqqybIAiwI1;6a*=lMzZ=<}YvThcm?<_?K(=QX74SL4D^J<-2OsT)>7Fvf5xy8Ka= zsHlhPpjGK}Mc!ABb~HR1;C;UtC)sj}$^yx~Q%bL?R(4*xPb$5w-sO}$SE)qQ_X~Nq zw{MAkRlHtZ8n~00e(NCvc3g}^yioQQb&t~)CZEkZ+pd(g;gq9ldk6D>6{YnK45&-( zmC?o}813WSLR*%P5U9Ad#L}YA?U~p;Xjbw#|J4e+&ZDlQo41R1Q#t0-S~e3036k3` zQf>LX-_UFGc1%0xkWV(*l@Y{Z4iEhjA44tE#OiWHZJ-^;!LgUUDn8#j^MLVuK}l(} zTba=z57?1pH7w}-k*bD&e0PZmxv9k}y+CWNa@i(*LaIo2uU1%CUW0N%_Asc*h!>9n zh)%d5reh+IZb0St-7(Oci>oh{vy+Q&za8HhTu@JTwsMkoai4go1BP02u^w8U1|7X= z637nh4zI_lMohiC$VYk|Sd6+m z@jY`3!5Ycnok99Lsi;*G9%D|IpV8VoC)iY!&b7){IZ2XmaI66NJ8gGtLQvvcW9m<- zz05>F8VkPadfx6&aWh)C(KI+pcnIUFCYFRIG*Z*&Y6_ppvZvDRlbMvL9P5{$YFR!a zXd?QpaN>msi?5h}-(?x{iwe_t2fF)C~wb|djEb^+Uxd3F4(K+oYnW@a!l#qa}}B*VcT<;YobA?;s^B=HBs!X zyk8e*s$SPpqliwUbIQ}K^^c|w9nwwHnbW!O)my+AD70GGoFqL?lx*Diz*#=%3_qScq=xT6k@fuLD(Kl-FfltrAk3hkJDKOH(x z->*#MBKbsz>3DJaxG7Y7#mOlY%jK#x|+v>{!R)UO9e5%g!IJp~x&Bech zNim%7=^k=-RkV6&E8+RNQqKl^?e!rRukrR8xI%*Xuc8gAwgXUUr4zF;dqO*yo3reH zZu?4q*HzyOyWGDWyY&dZ%42;|+4Ts1j2?I|!|EwEO`|yUWuJAPva-z=6aZk(mco-u zdly=4$cqfxkZEzazn9Q|W7-@B?b1FKu`kb=MeppmwC7@gs~-C_Gf$4&VT<`v*$cJx zBT%3oM9J6t*P;|820{f&H}>VaEIbHpRmOgQhkIBb*Z#XcZI@;A!k!cS?(k%lt6p+< zhae=+rUOYak)fORbSZb|Tl+jR$4Tg$L;40Tr-Ul>I$mi9$ElOFdr_Jgu?Xt9bxkR&s&SDD5;184Qn45!hQwjBd zsXLWj=u+EwS}Z1MWenjejNQXkSe)o3Gb&>%RrgZaygfatQs*9wcc&<<@TeUlGuM0@ zWR-Ha@rk2!WW43cIde_37Q?lD#m1Uh9if;7?jHj~q{~B%Xe3hPmlSMV>(CSHiRXj1 zX*TziJ8&DqXkVcb9c9u^2fuS9e*am2hvM?U^xn&2JumfwBS-D8cS=a-!z8 zy{F0NnH{;_$l^YL^yB1-3!%P$wn`KYi(rCy)NNetWu-(D#nQm2$&w~miA<n@2tti37e=H1*uvD#6WxXR8^=jMv>97FY<+$ZO1Yz6vq6HAY)b5=tWDMrWWQbV-*h zRYBmE8^%Uh2^1>JP<1!E&0(8=n$KH!ID67~f1Yt2#bT#=%Xq2i07mIvvfFxR`e>Z4 z>D@27;@K-_8XMf*N8i~bPkCk~TX$cQ=%BBXFxIV{znNN694&)jZW}W+ZVsYHg*;xT zo%keQ-1@L%lJ_Tk-vV*r4+Ax&TX#j!@bj-r;*{S(+02C9*4CQ%&OWJs=D0Aon((Rt zhJ@{XPCCamcYHEZKiSS@M_G^M=Wu7zXQNV%M6I?rYJWk^!q_~T^6@E1HtY}E~OzVd9SE@OXNs1NonhHyIBb%#}!vv?IZr)%t zl1t#L-No7VBe@&yV6-yJ7ddFVqpzyBF~zQgKNvLcTrT#T+sjgP)@8Yo-|Av*L!6Mg?g{bAK(u}NTtUltyyXYwwaBcGf<)Qwc5MwK2+p?^ zZ7f^MZSPEGcbm?sypWPhadQbs!kyOiDK0k$VJmEr=Fzmps(wp--`bEjlivNOEGoCU zw40kr_m}O%fl-uy74mScu0I`}?p8EI5=U#I;WezGhNGMu2n_`1bX(gWHQZx+lXhc&OBLUd7nbFKyY#%e-oDjt_ou_P8{4}t&u_wcMc%9UTzxBKlN(Ir zuF+vE81Pb?W05O;43_V8oeyTF#_OXRtthIZd6+Uk^3SG!p#WX%7JR+vgxlxtocLmY zM&V9(*$GowRSxM=wentUR!s`kpZ;pa?s&4p&s%*|hr^k_SK&*4c8|63dd3H#s@cVR zyOZtH=_9OH1l3N5_fA`YQB2oOxwanf=dQTsSE?DiOtUvU$gAShYlm@2BR_mExR9!bY3RLm!rZ@l zBK4L{wo&ud&fz|3My1hUJzc3!RXW^I1lzx_a$i(`aY8z|Oddm(^zQo!CgD+gH|~}- z=^RbfmlO6qOYY9x$88I0>e}(Jb48ejoE9B=>rJwuyuo`CTPk(~4`e;^;;5eR9QWjXmul$n^yDPi6D-pdAc|GBjc}Ve0-(F+dfa?W(W#jXM7Xeo%SH1=e z<$b?@g7)|`m5w7*n_F|SBEySM`h8<$mi4yp2Xc8aiSi~8qb-P`;DpUCxQdLY2cNQ1 zmdf#JN4I11hX1jmsAc1br&le1*cQl80$I1ruj%S;U%lXPM5jN?O=JR^Os`q;O&{}| zY6o*ORu_J(DEl2L0hzzIrAKWFBvemH_gY+k)M6(sZ&cEJ$fL$bL|n+vgI5w#QbOIO z2O;IecJPKwbAri|1=l^RIC$~n8A9UJR*LtvH1xVXUTJNYhJ?4-Ox!!Zkmpv+7|=wV z*WE&r_NyyTF zM7w;~+%3vtu3oh4`@-Ar+U?L;Oqd(Nuz6n0Vi?=SdEa#=gwkW#UHV;7KX)g^Rd#zT z80{(Ekt>EOgU(oAi?^}eZ`N(pu8pC8q||~cvENx>OQz??_ccjZC;NS=DVmXFK+tI@ zqO9Dlm%O$&WAQ{G&se?*zga$R*P|zYEPCC2c2%Us3cp-hf}5z*QXQwnprp?s7t)LU z3|ftB*ZCP#hT>qxd8VhtLZsrOt%Pw*r)D)qFF-GweB3Up-tMw0HE(&}QlBpR+{icE zg|F9ie7XWGSjbaqOZI*1z7cjFhs4y#lzZ1#{%G2Vu73*0rObrK-re>xmhjbobxYUT z=FT+XN0bhdX}a6>U@t!kB+!GlSVevPva=(_rH8OhoO?qa43G1w2cnp!8hL6Hf2LjK z4^0~H=h~ANAU)zGWMnI^ldp=>t;hysBF~%8?j9xEVA1Dxs@M8!bX*b!))1}iim}eF z+Gf#nDqp~3K9Jv-bSI|J`a`pSdf4{+)pvBP?1RlzgU0zJhE>H_ue9p7%GGVL3ty-V zs)mclxFRO`p&m=FzuFWHH_7`HkJ|?vnFeNid~psc_$?bBR+_Z11jiPZC+V%kWAlhl z$)YvMj=dfJ&_GQQtX`39MkY*O%&kVs3rzK7sGk z@qY4#jk-uT!?;yzk4bl_AnuF()1adFwY$U%KPi}3ac#%@?eZZQ61D0x7sjhw{A#EB zn|ODNy|E*0?&?)~c({G!D}^}34Z0{+Yu;qGnylPO$G=unxBMJ`ksi9_roJSr8rlD2 zOPS_=vk%_%wW$b6UTs?POfa3CD7z(PlOXg4f=wu^Xxz!f1pHtBtFVWevULFDA=r{Y^ z%i*pL&C8R=3qX=~thH`*LFj zJCUoDovw>1q!=T<=x{OaeTv5vv9v9ceBYi}2Oq1KjR|im9t=k(xBjwOBbKHCg;{6v zxqb1znO^b15N17!8)>Ch#~sW-K<+@|^&DM>_1j#NmoQI%PW=lcEfI?K4nr(oR6IOh z?)7a?L1-Pa7d`N#N21up7>7JuGiAtA4hDoNozyM!BJv{N<%?9(V1rsA5lyhK?aMiJ z)0KNu)H#>R_wMvrauTgp82U6eV}x>1-`=MVy4#D7o+K3Sx}`oHQDyMEXZU6xMj(7Jf$;~2m zNlY8h&f-aR}>qycwg}1%R_hlRl1}}9u(9ICv)pJDIpQ|nZ_>j-7AI%7 zSgfg&7vJFc>%M~JYmcev#J!_!Q^YHbsgx6U)jq#Ri?dY>N7U(_l;K{elWkYer(1Rp zWk=gc)U)kX1NF2TqT_VF?z%0Vtrpw5b%W1&XXV$p-8mIdJ;#ltE&U_J%X5Q2`FEFX zE76aCZt`LW-=F41-Q`b{2!cwqErR?Ir=M-J2^01ei;F=%72M;YJkqQP9f7u<&ixrw zYftl(`~6d;iy+B5Y$~uyctpZn?x!8m-J|<|SkHbs(%cc}I%aCFgLV-ZXc5&?ovjlxcr*rQs0X_59-j ze(=`Lksm*!!kmz_3af5eycVATe=hp8U1+pdTsDeIc@PQ`m|&qS*U3Q;b85AGg_{yE zp(Guau3c5m^Rl@*(mAb{*R8+{^-W3IncL?r0?hLW~pPV0>2%9&o-m6 zL`JQ=#?+g`W%;D>c$oVllOLG1*8L+tk^9>txg4YJ;@#=|Spzhptc*&(mKjp8o_{n6 znrbLre@V{ax!ar()_c*X*Ic+Q(9-(>bjfE&YgGX2lu3$@N;cj<4Xn?Pf&bZV*QAtR z$wN->n)6AsR_jAbUi^!7lgPt=pci*1fBKSGj<2HM2cNJhB2_1i=y6>JKz2}Ddb@f* z)$+;l;=8Sn*}GJ0|JL<)iU7R-tk>~YP~Kw%-Ap2j_Qd|>Qozz((`BEE;Gq|i91o> zH{kp&vEOj|8}%&sF;?#s%p0qOMjbXiF&$6EUD(rGo$$R*USHnIar)T0>ux4J7(a&a z!B%_hG$y`S$NlcMbnmBsC0!Tm-PE`P%8}+d6xA>+Lt=4U(WfIvFF&!%ES?mTJYpa# z9#-8tx=;D>z{g2XSW@7gvuoFzv^LRo0{S@dT!<>{NT)6XxE zI;>#*Y<>6)m-$I>_Md#cDz4*AvM4v3cwfcD^~&5E&_?a$G!A*c zmv_ygYJxs4tAk*FzFgYPur>6NabD?nq0t^%N=n;J>TZx(<9s3eglV80#|1IIKJ_y6 ztjqKwO;b`og8A@{$Li)*w7h8;;gqpe1>5}OQY?s6k2ds0YDepOfrQL5w#SI@pIv%S z4%dc1F%yY&Pr!bjIS02>*596;tuY$p?J*5^L83U}FpbN9cjXIuP)=_*-)+qO^E~LE zp|3q6i>c_sTRu^n$B2dhqPbpiF?(dva;i+?NhS3viVsA5y9T}2p}=7;Axg%t((5ND z1pRcHs+Q@rn=m1M-04&PK5K;@0uHi_7L-(ZmF-y2B|V>Z^y4BPw0mMl2r3~GzIW88 zw7Tr8)y7bN*XZlU*;nOdPc@h8QMxpzmfIF;5g{!TUzex*)m<^W`}JOK5{DLJ7ir>x*(KXo)psRZV}G1r1)+srcDjRFtyjiJ zi|%)xG)etPYQvMPp7rHbZSU0iPJc{NNj@uYRoF%QkfUk8l+sUo3G2_W8Gsku9G1lz znqA?4;#NqB#7TX&CWa*upC=FCjSaouEuq>FuQ$#>J8^%H$T-+9b=6}v^^))nC~1>d z+?n7Rt|QGBdP-mRpFlh5*ByfBPo{*ARk0=49TQmBy`C@OEXTnm^unAfKTXdI55f5& z2AYJVs1O-%F6OoiuaRqOJS4oQRMR?6&Ua6L4eCI!f(Dv_vDRcOx=2q{%sbD2 z$~IngI`4VypqQ`y_1HW%kIVhmHES*R@9)${z>z&ZW>3*n|5zLYNLlxM#Td492=vI8 zAJ3H1Qh{6q{8QPjq=V&9O76NZZ8Q2OvP|Nb94I#-fCXtqI(e-1$BU(N=WbHjbgv)1 zntI+YDISv9KW;trkIqg?8N4nr%ae(J2#8)9`|I0WW%s2wu6H8<2T0-{?(~vqvs)k7 z0l4B*&>DW?q-WwtE*~%J$t-~P#m?KhKDLj*PS%Wn1(^M8yu{E>Be`d8eM?`})$gzGZ`}Ku%o_TP1kM)U399OHKqjz0zAk|o{-kk-10aaa?(dV;&SxHm@7ZiwN9u!*}8cx4`lm)yxw2u>W%7=xvqd5a{|gXTW0XsTe~e5PGy_B8~5CX zyZBzcBFU=jYN79iXTDI6VeK5+{+OAErV|(TMtn%#W?^YY8M5m0KsTs=J=l`TpV}Kw z9M$pUs#JIfFu8JAgoxNfUe^$SH3y%HPKJYV9j~-dzhHjb#)Q?5=v0h;!D6>r-&3gk zmL!vPeC5iIIHOmboxacH63Hy0R^BRa7sJ^PR)idtT{>=8NR@QvwOd70xLKSKoX|pR zNY19+Kl0qpPD{x7xOa|!nC_{VdVcDQ9x!ezDrOQJ?V}%^kJT8ZH=sazx7`E}^VP{G zsc(;5C&%qVmL%rFju-Be1ZV%El{X*GB+&+>|8uBz=hJ9s@%?@*7TmTzzd@vZ?Bey? zXs1o^5$(k$d8_xv7e1UcMSWV{m&7MI9qq1$6&ugCdC(@Uk1@1MunQ@zIX zPxS%0;>BrjiHzcZPUp0vD_rdDuwR^S1{Rynrk+u+!ZxYyx3A>bz79xiryoEHFGYR< z9Dfn(0+MD5ydj_ZLZ_td$?+h-3QyJpg0p2qw^4QD^jl2G1-G_)^p(=pTl=X$h9}hd zq61b+ALFPib>;XxzkE0!XxUOD9^;bV?8BSgk$4o^NG_9qjg7@PFgK7oxk%;p+INAe zj^y^3!3`pb#GPC?I-Q9KP4d8gg{!6H3$OL+a3G2|=&Tf#`!e6K{v)E*px7if0{%ZP{zzCSV=Zoiii9NJH z#ixBMxWVp!y=-d;@7MY>Plb<5`zND`-R))5G?j_~KXp+Nx#dox%H&cX>giUH*DxFj z7a$smXfomxf5Nt27{Ns~m+htAb_zwiri`>r(mb)X&95P`3jOP0q|D}=Gij3U)!S;V z^~VdR(l88GcxP%fE3sIxfC9oXoM>g8p2V|HrD8LGEJc5_rV?vt#k4ilpnLJ61<+(hMBPXD2qcW-xZb7o`8H1!FFNYwu9*JZ6;8BMtYD3$LPB4 zNxd$A=@0f6i=%q*(zp;*KtW2=(~IZ3zrUBe{UD{YS0w>hr($0m!?fvT7DU&@7+}jb zI_e{B4Y*b%r<O`M|2R;D?zQ0(E}ATPa&A$3 zKoy8Dg8-S>c2~ck!V461d|qkew8)N%=<;0nP zV)#JbKFwQi6p$6wgM?i(zyl1{Xq|c>lk?LNdeNkKXKJ;C4e+B{K0I?gi#tk*mXHD_ z>_U1I2SQ$E+egehLa8jY&i;tQol~rwcov_#zjw*Y=_L?|ylqb(`n3kl$C56Sn5)@TeR&Jv{px?86js4j_jt%X;B@rd_ zQGl9uH^S|s&s~}lN}AYZOM*l}@#Xc*!~f9MJA%~u>vM?GdrSb`jC|e#tDXJOJW3PP z{KF(93Z>&Et{<=aCfyUF|GFL#e;kM14l1?PX^N#?6vu*S*RQ-fzs%cmyB?W;$6{&i zg#G}NmiOwuKkU|bI&fHhK3hx&LkA#81xA7%Y8pOw3#Zr#C#x(d+s*OwE%>36}Q zj@CQ$y?E$?AT37Iejc1Y9UhY?q&L}VpXB}S77pMc1(2uXM2eqPVH}{#Car4&nU)?! zLJkeDKh5(EPC4OlHRPsDu1j`*Z3*bq{#lkTmnnY?w#{r@^A!VE60%wVTx7XA9b`4lElt&*+eeaoHo4{;&(Ven6T-HS zVS|hp!1O7^jRa_0DV{SGYoWPjv3x1@HoiB+u`iwsd1|z_)eSU%MnyC7LK^jWo#%A| z0luiwtF(Xi^@Cnrv%Rxj`{ShEZkz4+ekAb*Ns)v9F4@}%5<*Xqk87o_blZ1U{1dx21NpEMB+Um4 zdM3tB7KFSFucoSh z2hW_nh*#F>1HCbk#QW$*_N)&7Fq*y!?5f!!x_cP#!rM51v6zLRbE~y(XIaNco#UWH ztb;YUU9a7Cuut{L0&-Cj4(pQyn^G6H<|z>0gZJ2Ux@Lc3l@m{<9*uhh&kvGvvttex zkF4aQH#x?kgr>M}r~6`8wihRJ9Y?IUp^)bt$L`>GB(20P)K}T43hw+9TmLvoY~Upm z;LO>Fv-JjlK^3GFz>5o)H8bIMX*?nOBa&85K7mcDhh0f(tTFLBwsKy^fds__jPQ)& zi~3Z9$>iKsv&)EkUqoie%vQgVc-HZ7iz~KLpq{(?4@75oFOM;@^=I2#I=G}#j@IwW9K59}f8PtEk)tY9?aFCnV9eCk(!@%&g5^4^KJNElmtJ8Mkx7=Oq^ z?HL#L{(zZ}yRnbj$snZ2eD7SbT2exGfW(ttwUx&jhZiq+U_LR8mWAqJoql9#@mZC- z)7cHkgmFLGTTGA=>NddO2MX{9VY(zvW1~&)nC#E|`QDX)SHLzEI+nFjxp#IiwMIr4 z5fU?hWQ5e2#u#a@2q|}K^0nsc>$~Vz#|5{PRaT@T+h?{USmY-fk*51D&)yn{x%A<$ zL|7>)a(=?=yZyL#}n9GL`EnUM|6lK!(6gc#M zm&LBe%+fyMw#b^71_?K{^Y3RX){%D0H_cWCachKJz1ZJ-X3{?!esRb^5NX))y0!Sb zW#?;vg>>HL9a>!-IsV~46peZEH;N_i`_DvO4wqtIUp~)$_t-z!E{8dcu`&~9qdzgi zbpCRzYvWck*C&Y_Ri|9d611uMxUD^ZAkdg{u5`B>g1dK^#3{x(H!I)M*6KlXq}}}{ z^!9I{dg*meclsZ zIa7hs-L7P;iz5a3a0O}f7rUwNJX$)pjp751zW8)`1U*q!Rg}P1YdRT(zR7!lLD$Sd79s^JJCTrJx7_Prnn9)lYM8~U*fmX{|VQ2SzK>NgmCIbvv9 z3iRei57wd8{*;SX$M~?55yOd<&(+E$5GGir#9z)GX+MbitW>-g3(f9wE}Uxy+m9=V z`!EoCcu&Yh26=NSAl6#>cnwH@`nfsCAO&(o-*ySHr?Y{l@5&1V>~p$f=q_6dnE;*A zRJ3?X74}M|ZygpaZ2tZ^y^5D`|5ytM>07HkEcWtAD7am8gcef=|~qX~_@iRJy!3V3c-#`b|W88(>Y|{gwmA*$|cw7G>q7HEou* zoQjWah`A5?jt2X;IKY>Gw*TM$`~UjSWd1+7{Pu4yKinmLbNSB@q53gq__r+nqdRRO zlE$|4e8EZ(G{XF4cMJP1AG+W?YXgv9fsL$ zTYZre7*w+Td@-}zBd)~=a%OkM!OLuoBHVr?2mV&=utI8Fhx4z0E-n^op0ns0Efk;4 zF=;{wd*jF}`rg}oeGnL*5}xvJI0lUZ{xhpb!8l<4P-}kz8ZnYd0@?$t_PsKwz2E~k zH|@5UMhH?I<_ma2a0Fr@^Op)MZ4YuTwzlbPKgU15E%ucVfs(wQZKAlAItaHw@p(QZ zgZT{+tooo|n`DT8>BIc_^x=Ca^QB6t#veD!JvW7O`w6G-okA`F8dquse7p?Jfy0~i zF9=u2vMfPTXgkEtfC0xk}3mTySlR*pr&gmygYhN{lGVqWiYJmrxCMwY5@h4(YQC zhb^Lr$42qjgr+SjTvX-?PI_91~1gy$lrKfZ1#(qkaJCEO!<8_(QDRx zanldQojJ zV3FLkp88nE?t6L^P-GeERi;*#+YQ)w(ON9)k?W+rYzy0|3!vpP-VTDJK+|WFmT_6o!j ziR=A9hO06FQ6~Xp@4nTdB#_=C+>plQHN4VyyYZ6E=5*iio46p^@JehpJM+Qo+uVdI zu>!S!$ouIjcWDoBt*_P8jlb?|5JQ8&dR)`d2ceG<(i;AxFdSq&RI8a92(y(ntYG=@ z;@rsGze)`$=oIJ;^r3T--#5#+qhg7mgBr;K%3p^{b$r1#x~-{4IMjOo>0&K(bjj>(=>RW{l(uRsb_(jZXeG#Q`ThO1A;0PA(w8Url;!s>#T&^jjkWUk2kR%@y4FP za%y0(2>^p(>Fhv5xt-j9 z2%4&!>j3i8h`jLci3iuY~rb*yw=vq77h8?Lu--%=<&W_KCIl(%|1+ zB<%BF0X zD3i0kO8pnBr3+D$%3Ui2SBKrSeZO0OUq1f5(zYnZJQj;bxgXw~p-am~HTS2}(4S%+ zS|!H191pCr$P`-K!N!-PW-j&P;$816g<@`t;|&0reV=QuFb6Yiw+FFv{~3s6k!K=` zc;TX>HHtcxpVMwNk(*w*TM4}nVQ6HVDyjw08Yy@`Xu;6A?)b1<*A=vmhVv1Bi+1gb zU3$2<2M)EEq=I8v;p;NZuazdgTz3n;+@;t3c1-pttO?YMfoZoBL>8DTdVjF^sZGE~ z2W}}~KB?%fXn2G(r;eO^Gih}MHqWweQr!;t!ZttY*|3y@y{|vK$Zz*vL>VxVRBeem4F_$&;U(qE5- z*pi*rf&LWpnN$HFzPZYtI-1O_RtltlqM*q+4-N#XFuhIlNZE8f7K>ZHyzIK=lfKxe zZv5OXA_ss)vpt~r=Wg!O<7B)Y;)D|7D7CGrn%pPbFdi*Q4!LDlhem&YD@AfF0CJM^ z)#+nR3zIJHn`zyyA~ll*dR=du54GMQJjG|5;C;W%%YIP(&ARa+7opSoP4ky%-hTC7 ztX6%(l@SN+ltaU%5MTIg;nfB~3|XdVX?ULXw-$M@iWhOdJy0En#@?I@oBNkF#0zc`LJ6~^cx`)2jx9Q>S^G~cJL zTZQ|`S4yV>cf%V}7ojaEb_jVV`FRs%;pHbQ7VI@)A3TUK-|7`~mCtM?m}^!~To%F- zwW~<`mt=s^;X+|(8M-lWweSfsEq93R*vP8*UrwABj${%F12p-!Wn&h4c3F1GZH z^zqwWvVe&7<*H#*h_LrKfEixFDUmhPNAY(09Tl6wmT=)T?CQ!+O!4<>cXUC|U9q)R z6M4XmocpLFWmPt3$yjIke|?fgr~>W9s~*=J3xdxsgS;(a4&~w|$k1hCKY}z|YWwgC zdPS}8Bfq-kiwn6WSnEb9cDc>sVCfjc@-ZofK@Vr~3M$PTJLEOr6vySxrdQgPh>u!X zS4yfb!Czn82Bf5Kz)?qTXr}yJ|ygU=oFOuQu`MXjEx2&yYWz^f~ z7{6jZR1^DgcI=g%TpLm$j8Bc+zd_SwQ<{Kee)h-xYrW8ne^I(*OjsDM`A0E~~ow9=<xsFg1y9_SHnE~l6V4>o6xDx)te0E=uHI z?_TyIfBXuW0b;z5w`K#w!Ym4gWNDIywyJ}hha3Q{51)J0hAT@oV+e_zobsg&evRpT z)5*6;7L{IfNmotF#?&XB+O}>jOM44J<9yzQh42fYA z-I%+rFpDB`pE4CiHk9MocF$N!&Ros6|I(=re_KxID-Y|0n&r=)AjAHc zb^4*)KBf=X(DZAC#3lj>bi^k3?8y zyZam7$CKto1^v1eJ`@61&yT!D2G^v?gO+=J2Z^h;mg*1owT?#@>baou^9u`oKi$RN z$sxCLw+R($9<8iCY+4{bwXQ@U%dZYn){$omOSXG)G=fNxk?30ZW!eA;cHZuJe<~)# z;NgSQ^9=>}ljHi9P}?Jr2YZs;(H}ooMQq*e)rIL6<3ar#R7`kOO|b9MF1wYTm5m>Z zrnUh9$6D1Osl7UwJ&n^szi)TN<^N#sJ+_qBx@Ex!q9FmoHL4-+y#dh!0g@0PK!D8C zS1duAZ@Y8r-s;pC^^dd9mzlyyf6L4@O+-v&L+73vypC;Lqm9H?kG-ZmriMUP;yw7X?4%% z0o1Aze>Xc-)2;tB)X1v9fCBK7JvI>t%JpSZyxm@4}uxyb`V zJqw&)!YEp=dr3W=`?WTAZ2~HYHj29_KJT6&{_SqBNW4u|E)KfoFdhTl3YTDb63<7f zwqM?35u(r4Fgt#Z$=U~^{$*#Q5i`J2L0V7bmLp5gFJ;H4JSo#WNn>gK= zY)(kH^H#X>#>d5&Msd7!m#0w$m~Xv4&T2q&&_h8E%}ALIr-P{xf37mreuqP6Wepy; z@>XnKFCoZg%JQ&xo?^})cm8Qvt#5(DA}3)~g6H{9s3P2oa`;dM8a-XjvymSic{2=4 zAaLAFol-LRDqKmU^wB)2smw0;bZe;lc#%c9V|aT8zLa6lo5i!?7A=R6%>b?Nej13y zz%1{!dy-{V68E$vf1eup_M)uKS(O~?@^JFuAdK!>n?pWgH#t|Qt3RE`_f>KRe8ix=(Th6mvd!?a)oS$x03NzV%)NGY zo{rY{YJTB7;Oa+@{n!M!rSqcsylY*~D4ScVxiNK++e5*2e;Kv0E!7n7?wXMlb4ra* zvrD|(Y?bFuu}{K_1y8rO2CHYLBC`R-k+nC?FH$%Ime5NAc~utkd#YJr!RGctP^kJ& z9TINxdJGXgN?dxrUb7qgjUD(*&UkF4aYd)=)XgB|Rf^IGGBd|9Gh>gN!lk=>w-Ptq zNItZi)o^m(f3-41%q4v;O!!6An65jSjvJ+1hF7R)nr-RdAr>;Zn7Nk^Hkq_3V2;;2 z)er0Ly3dc&8PZnbm{|q;+kEu8JiwL(j6g~xeoF1r*|I&BpWB zwY=>z7IgaAhC4%#W;}DV^Ilh0Pq27~$?geORX)?_pnG~d$ScNyzFQ)kZP!u({< z-R<+S51#q7Kz(4Yw{nr)ENK9LRh>qVR-7pDK-2jXoZna<#6!uoRFQffkB=Zoc2LZ4 zE(6AYf1xS`VQBQqHqGZSSjSZtXs5w?c|sd zb$gBm)INEv=EX5APh6T_cEgd1-uQHygWz$bs@HO-UOjk9s)zXH-VJ9%<`UgrtXFNC z%vHCqxepQ?z>m!lB8y7fSx+kQt%=C&XFC^&8~M#6ZEZq1LKV`cd8`5^2jb8r!cqv>c2$bY6y(Hwn!Z}+VfL51ybDynF4P6mgw zaxMq4+YN{F)6;z^P2fzg#>A%6#~d^iFc%+v^}G#><@C9XyHi`% z4iIk#2&@&U!<P{=!ttf%clj7UCRcZH5inK>7aF2*)CaGuMPSas6kAN zA~01QYP7M9hX^AluU6g}*9AwdPSDi? z3oE>p$6~1z^8~eT)w2Q9YM+I_c`-xVY<5d=Ta^3f&@|h8>Kg@pOIGKOg8cfJn@#vm z5bu*`wgJe(_p!OQ2cQkmq+5K7ra2fK<$0p-F1f}%^$Fw#Rp{nDJw#&JBC2D{f7+m8 zl&y;d+~yrr$I2zOPE>2We+*r*I34LI7@6^r3AQf^`e`;gJ1?%gcpW|>nsyD;Wd}e~ zyAW>$Ih~s6;>La$gK7%R$_|A0!Rxj%rq2xz`N7y<$wS4PdaG^3-vN9WbrO<;n=(n z@`D(JkMa7@nrWxGC(iC- z#=q8lbf2aOFg2+G9doMhf0wemc-|vdO0BT+LAkmE9uvPFiauA0xKumQ5*5yXJUP`#R!sZn>iUif*DH6ZcV6Uhx2;iRdj8)5G9}f>w zsE&W~0jU}b-U@i8sOA$s_C;HkT)(^y%eA@U_^zQ>11ogfbuoNwf6Xx4LBM6p$c|Yp zil>d5@`H`W4Mcd670^2H&qDQJWCLLlrO$(0PQBMIxat+0Ml^?`kN4fvIYm!zISjYv z>M>5NikaP@Pt~c>&Z)W0N?#}DEv3#vJwEMj{N{*0Q2|&X&(rC-=wfAj5m2~UGhD*o zwk54O{s0AIY(B6ae_on#9-@Ffc2FZkZCnDNarzu+s}lUwNHI50kOkBMXaeHkaAzm9 za;BhGT5OxycBQ5-56(4|o|UI$prCvm(&I$lGq;Il9+La*SqAfYw9sfz-A-Kjh%;-Z zxSKIT%Ev@ckhUF;v=P*N&#rrr?!(g%jp-{>R%Yoi+RB!Jf8t^hdeE9Y3MqAkMBZX` zxJaF@R%@Pq?o&vO209f$xrYWC6#8npE%?LKc|DH1i>9wIfhVjH=7u}GzsGI2M%T=@L8az$TCAyK$wvX#H+rP9!#x0<) z7(S1j*0GUfe_q$F;0_OyMYcL~w%!dU5KY+Hf!Odsk*o2wI5P9~c`#j6hiL?S+e;Fl zhZnPl@UDjIAgSjo>9P~9Npa37R*j3tgsoL^15B~M(R48nlEb~R=t&mW;@B%iaewgW zZHokgfVf?pbjQWwHdi{=Lu~n4)A?dU0cx%5o0}Q?e@{l+ilu0ImF&hc)#+B9q12E; zIBgxZ7bD7ByPeu0q+ayfDc@(#M6W6E(+_NSMkZT`uhw0V%+oL^>~t3%J0oZY99#{@ ze@Qncs^pPhgv0ooJeSK@C6qQ#~Un^R?NYze`@Bkt;}R)99vl2qZ^yS z(ydnl@9>m8ovfOAeYs6Hm|Lub>)i8z#Su2Qe=S0~^5*NsCKzYJy53mKp{>~A9&&wW z-9)DwhZ4tS;SSNig1a56#dd#$cAKW&VzjEJV`J(buh;Z!Yv<{aL*s1=S?hzhMqqGT zilgzhJ2l4e;aGCCsH2e*jba05NC1vdL9f`I&qhu_Nn&HTX(LC-Zt79Xi?bB^d-7&*VAR|-d}_`!OBz+(9O6??`;bB8Z7lOVlr)0Pw3*CK zGP~a$%ez}?T^4&o74l1frb?&-RE+#I$Ae?`vYyiTHJV=)_B`eJ`D-${Kg3I?#X(*s{wJq2}H@dCYZQR|6(Ee|FIs z6v?Z_U-CtpS?yp~>qdC4r;h4R&Ne%Tex*#YbY0MCcy{!>^8HEPEtipe*6Q@!xzo%) zI`DC;S$(877fdjbLs(39uTj9!Pw-PG`l^}O0WFI73a(mHb5-L!Rs z?R~i(PmFN-+G!_xIth?%($rwMf6rdEv6B>ySthHz1(W@8TeHoT<~Gu-p}^4-)ty(i z>VCG^o|eZT0r2WDIJ=|Rk9MOVdw_*=O+8CqIxz(yFGEf@eiU;chtZ4Y#RFe+I6+TT zOEPzX#iqkjkd|jb51zCjN$Bw&Zn3B-9$1g*b!llp1S#k9p{wKd007&}f783u7G0WBW=?qI{Ci^7g)iTzQiw>)U4Qh#Lji|1jPPhuphtCMs|* z!2o(Z-G&LdN<9wJ#q>~CS#WC4kGPvJs9hke&Wq>3z_H>d^OLAF8ZlA+v~hNa>K=8sz>o;ee{zX_^){ABsi-cKexG>2!;!&YPtcVx#d$LLAm^rxpYe>R~{s9&M!ciNhZ z+C#o9E(Q;4HQPOhsJFimFfCTKdEGj6H$IKq5E99ln<|GF$!h+KSy1CP+Qa8)2 zD`z3(=ngOUpoZx@e>%tNZazntzU9~}vpYM6KRoP~+{kHoUwc4scD@3?V6yTjUfL;7 zoJrCCygUzKpV#trDk7$=YPvf4xEN-yqhx;v`(VRuKmkCsO_G0FF* z{YB$tf$hZT!A+QZGnAm0qOr}5&7LR7Guj18<1v`~cdMJBe||BK4+l=jOPD`Mv*wkx zI<1t;SRD>13R1jzEZ6oW;8kd!0cru{S2S+z=_5Nr=^xtQW83k|eEhT=%HW_0xjI70 zK4^-|mI4gPszBd1m!i3eGAEEDcw&pgh~}Vj2G3|UIi?VaQ@bltoh5Y9YGY{tPe@Nx zY!hRCfL=^Re?7NayVj}vIorAd2aM&qy`Yoy6Rp3!;eFfP8V5pQxxZjhod<>tffvyj zK#}qk$bs*GY1R1L#8`A_I|s$8j*8lmPe0`FwpoXjI1A7Q2dawMVA4$^jk+QXImO|* z3J%TCoLfh1i#@QN-L7P&9g+ftkS2w{^k1vxN^K`oe{7TN37yN$@H(3;Udc;XR zu)14$8Wi{KDce8LMSD34;RVZ~!Q}f@0YL&jF)Hv(P80bQJ(?CIkt+nA?E4S4c%Zgq+o7w6#X5=e=GUNjPt zHE!(g7pv)Qi58khx$-xlg&t-IYk(n_Ubm5EQH7Es3C_;C=WRM&9X8$Sf;32*qcX?< z(fnG6tJ~DR8FOo3th7g0Zh>+HwQt7`LUX$*f7ZwFk?@b9y{Kld2lV8$@Ns=wp)RT zkoVK^dLuH=CEwtf?vgZJD0UW(s2|@6{X&rO*Y_vyTS-YGM z%-sg^=G_L3&coO3F@dDNC$zz75!zR@f1AG0_xlwUIxCLvwR_e|mpZH_5*^A9|FAzQ z{tTdad%E1l(qlBJ`sY1v$=A2nL>@scmP+?jLb4L(Llg|%o=gO zd-6q*@f;{l=g_}s?7cIc2=dduwX-K(vFT%7j9yno9G(u-F$CTXXW0h8f7t0K zaRfoLYQ7zZbBGw8&d~{!O1+Ab!FjW)+j4fGBhLW{6~ZXK0z5IdVuBIcLrx3%mePgn z0vs?e?_7M8m^pgs?3UN=?ZXbKMtK?_9#UA4h^|yxEw?)- z)5ixETO8+lLi8ZRz3a+8UCnchl1?A>OY|6w^W)&YF4}Q%Qic<6wqwT5e?ot$@#@8` z^olwUW)SQnQX1p&Wot-4v75eiFc+n4@}3x;Zv&*9r43M1RYxD!0-98!Oq} z?&0NRo&hA!Htn)-!gK`befxPAJz8dgDT57j*5pTxFW4;Z&;K{c3WjP0xw_nkmdYq}@|^2i z`m!#CEqYB%!MnelA&7mvGsg|Iny0c^JWlufWwV!%_`di+Lmet0f4g?$^PalSsoNE` zPJMewHZNysJy(7+LT#Jzw~Ki=uTHn6Dl+m$q+W;a2ol*%xmz1ked(e&T^}{~)(G(l z@~zYP$#ZCHQro5g#lXo$Gmjx!D4@HZJv*uX49yM;W{DH_F`6>f_;Nl1S_n<9J+(ER z5V}qfHF@n@eHAe$e_k2xTfuxzBxT`01RrYh;%t~prIO~8^FEGKl-ON&p{Pz*EvKFD z6=QM_y^rY=z6MP8Z0a zZ6BQc>U<#|9-n0fWsKF<9J|4h9;Q%KKv`QHz^Gl(PmqO3S1mL;0%J74I18CLJ;~>2 zG|Zdx>!l4XwDeQC+4X4$CZF2foOF$X*sXm~&`*PJe+}T6&QL!VM-y$<9M`k5Sgl6P zRo0G_EatV(-370@P1-$6Ihmv`+sl-WDYSfEX2Ob%ViW^Uq`hBp-j>v6%$k70cy;zz zdceDH&KHMAMEQ!XD=Nk?e2F|RvN9Xpl=l9 zqvf=kT}^8yrR{;a*19^M7R!4%7LjPpX=`2&(~O&SY3>zEaT&X5a00E}o9pM&@X^`L zyQ)s?51R=;JXY|^hlE~4$U3Xl)m{XQP z(2%^p7>~D-X)L17VbT6;C2WHz3|5eeuB{^*iO+%HT|^YdqrE?p55*ylc~;? z=WfLhFNY^(^KnsgNIXYu@M@52;^9bc!_-ge6uwHuhTL?)pXJGBmO!^Q3cT^Ga(KF+ zf1pK%JiO;{+Vb@phI#ssIz>x_nKV7_x!Fj5?krUQ6uJocB}ix^4RUG&I9Zl*q_2BP z;X4VH%i`+l6T`O8>ylxDL30K+WG@wMj71&pHfwZiYol1c9IJBi1P`as^V6|~aj8W$TSXZGze1=iV<~w&`>xrC`Mj<3)iN+TrXR>RB0!+kg%Fe}0 z%tAg8V%D{Ao&b%IZidtG#lP8C^{G#%xxAgt`Gm?+YPQuWed|Apa*m`N^SD66e>U9$ z9=4ArnJyPW{w&V`f94Qz9D}f;#T#{r*6V8fP#LI7eO;rn`hTV!4)7H+=p0$_digQ*Nj zlpx3lwujsm*n}0@Tl4V!)!nv+hoM}+I$#!djtbWjvgn*f?_cQIk(fCJohvW$Z)o4c z1K6$(v{$=1pN!=;Pj)S2e=t@tB*6A*`bwGCdN&JRSuHOw+u4v6_5&C}K*H*50+hNt zY*tTc--_rLGk!c4%p9^!`yDW*X>;N#9AP$%l*=%?UB(OQbcV|4bet{3`=Z$FmlN5v z_Mnh1;`sPD%&W=L5JGzl{TX$#9=tet8fVKo6AGk4r#ZaNxv(2#e+8r(?diF)Bt4LQ zW$$iW2;%JMJB#}7tkd}muT2K4wK_v`T{)J?l~%9}5PJ{C7ic%Gjb||7om)(6OP~Xg zLcfW_f}P)&K(r^7csg)kU5x53naN07i=%y~PZuL8d*ySgTkODZIwZ+ZDa}`9+&bnU zFrFoNocc%}?Ea-7e+GDJFUVgh1O9cm3A(;tu`?$PnnSl@hS!vdo5KJR)T}F%T^S7) z)pZF-9pLlxcDLTeCsRV>t7WsCs}5MBj|x~Gs!6lhQSBMkJa0)SuGH!>988w;;(08$ z&c3zuqdOcMTNgr!ThX~WCmFC71bI}pskvB`U)0MP@gP(_e^-O~YV zuwVJG-JR#b={W?X%ZKJZyEzusCf-7KYc?1<6H1;!3MTFLNqWpw|A5S^VXGtLZ`A2+ zxH#U?G}vFM^~^!8djNE7__~CLAyBm~!)%#qaaVnN;m*?;7EcA;&S6zroKQX~^|A&S zcs$;pk*IPhe+9Y@+PsBS5Bo|$t-h6QA{PVYn4iReS>}$j%NC0wka(!Zu+?$XhEQ!` z=Qmd6N9+>;G=DDUXTAzI#a1``LCD$H-Eck!X71Xk>U?AFE8mgQxnMfrOH(^a2I|wB zM<6He9zr8}GsrAJL2~0QlFsuBWli(Vr8v=vK;3e(oH7{+N(D*$M=Rvx;+6-ne}ei3(Q)0_B)KbF9ey_X^bM!dvX2N4QS#av1*#~F`dz!3=@b6ilkHOOLv< zxDLDpJsVB-XuPQL=kOKOmwHBxR*-Ngf0y||+OZ=ycI?^Ue0;qU>$JWaHX0lmixtAX z6;A3Wvro#i%<{s_67IV#y6ds3Q~FRVqOOa@NrfzFeLy$G>CT(ljRm+2r7w>tfY@jR zT4@uC9vL>`NNTX6B4$Pj?Z%)6;J*{8Rkh`ZBf5ItF zS)cqHx%`n0fqSpfocTcuJZm!SC>2JmYjZ*&E^Bn9)=hCyhYc6k5-?a}t_0CJO>P{+ zC}<*f<>gYnu*V6%WHoS~Tmw#>WGhwLGs|VO zc}T#98{UqRW_2uP*;zK?n95FjIT1G!cMar{*&MU`677w*M)nBgaj|`PucN|2E|P05 z3ObQC9{Qc5J<1OBT_v6GQ8V2rO$hB|%0+mawmF>IWte5W{7`pkuJRhFf0unPW>6~m zGT)^cBKkR0Mo=OA>U#d?{~+y3fyHUC&z=VgD&to}bdDJnt*b@8u8jHYIeQ9F zO5QP0!l!oZ^`OG7a;XV=S>^n;EOaFui_&3W&UVTgO`PT>>nbJdNana&%6cKACND^9 z-C3b;>j;YVa#%(Sn-&k@fA}0I;l>;_>As|IU zJOjJ2bdERYQJfz9Jo9!(f;6jsl~K?fCmWwl5+Y-WvYqSMDY4A9~BF;@eK#DV5zn;N#l%2rpQyw(C8P`wrYaH*vrV#X!4KEXW44Hv= z#1*lO0A@06e<;d^-0k|MifHhP#{OU`a<7oxTWG;38>l+_O1fTh>83Dz!K@eH z0HO2*#o)#iM4SmO%i%CFXLT6E)}K;UXDtx2xHOSDH(De4L`i7_h8pS%IU1KcfnvJW z@G>$s+{w>t6AOpGZ_v^QdmcF)aUnqUjgyt>U>~4W*xj)nf61Bgrju<0DzWx>j-dQK zI>aeWO=joCXzXulo2`&me{F{Ja~mqF+t5zYMoN`*?}(OaW$i{&lQhPb4KLcy{Znw? zoUGj~dR|ZA`dX~E*Ars*Q>#4gXx7fNCw8=$K-bWykuTWHwnN;pfr`$~;qJ@T;&Fgj zA2n%ElLZcKf4s8As_Bl1($)j-m_LH@5um^ACK+4#v1++du%DqJK;<;rfBrHA2%+Rk|+yl$WIIbEmr5&IT!&2WdpN1shGYHlPAW3t@240WaP>n>&wo zZ$9Ag%ZcBOUg!@!bWm9KrQ`Jm4S;*t?pNq7)NHu%zKV`pc|(KtI7+W8^qbgNrd6Ku zwws&we{m@|9$o1s`u14iU^2Hi!xJ3p#f_yFFTQ?w2YcbjxwGCV*BDa78pN-U6ZFBe z*H0vn&LeMAj^;@`StYc{`ujE7@r;5S+7({p)eK>Ze+!V?9gCuE98cNQ&|%EWitFt-R;KxQ zL9_MV;BPsPmX3L#45#fp8BCWtwbRjHj8mse@eUo$O?o-FyQj&?+4*sv9L(_qjjggI zT<1&;W*^9IowmJmmM|JmnRN*HNq;WrHJ6O2mA1~**DF$!nPf7MQMFq+ddzN+1m~A& ze<4rBXAIq0!!Jd<-6kuT@F3{tGqet!cL2xI2c(`TwBbji*#+g^+|!hXLvgCFAqj~eF!s+uy7 zrd0~7*`iF_!L1zc#}j`e&qZMjpBW5x=+Z9Y>?)0J)2panjQyn?1i&pg-gIf8F)I}1SM>lbZ?bFZLfv|5 z94%K}SDvkTr`JVaA1f20P%wGN>R z(@Y#inC|`fDi06VC2fKe4VnNHu=e^Y-$$=Wl5e28&@mVJxQ&v#v>zIbNMpl%nMY+JTau){^3jXAr`I!71StlE z9MPU$0yNo{Kz>A*@bzA_f07&xmDi)GmN*J?xuOcI1^YORe@7aX(rDur=mQN|repcqL z8mU_cqH7PZIa%9_LvSI1b4$fz7=q0#g{rJO*RbrMHC}8?5%0KZIhySQF-`knTi)^l zJv|_ubSh>pe@xT$0=*wsMgmu(cX_^)5c@9qC=OY~R?iGjXPyIqa(%7Y?VTTtZozsP zKTi2!6M0cwJo*#=AcALqx$zi%Oa)!H36V_Zjf34-ngiNa9JI-vXT!e+tc@w%5 zuG5s8L*ud8;ove(ufS+2H1MHPWHHFL9igI>a+15Jz=jDy?>Uxt2|Wvf9U`in?q z)yEy>#rEpmvV%Q}xh2cSmnN9f>w__+W@zuA&b>tkX#Xu`yZv#yh>_iCbYnA*9#8bf zJal?6Z5PtmyUw9aAJ71(AEixDt4|lqf&A$be}B1V=|a`GD_Xe=1)8m1s0+mxZWpiz zURR-5bsnv8dHxaH*x1Y{%glCbR^o&o1C4q_sDdKP7Xc~h#MhY^BMkKr)ygj zt1RK5TVb_43p#@m2W*`PY?5wy;Y!A^J+CubX2aCb*2p+POq;8$+-p|IHZ^%!o*4?Q ze{l+_l$O(_f*#AN%1}o%raZ(;tMu29Vw+rpZF{~Il{-VVWe$PhW!#`rHXHKJs^bxk zdH0(<%m62?l7j0Zu$Sawd%SJL=Tjft-3|hsk-E{(;k8*R>g9f|Hm}wjE7rgZ=+{$D zGFTbi-+2*o%rothBcD64*1CLmPPQ;we+l83Uaf+$S>Moa9epp`4SlfIQ*R%t-3#f6 z>%O@=8&hcme5;*6@9MOkRNEz-7CAjbrR*H($eyko=t`Ot<-FG104?V?jTJ5~dW};s zI5`C~RnZ$ol2;wXM#KBSMQ@<_NVJx7r_{&i+l_ae<3l&pILAtn_one?UEV zwL+6wdGlP4pnY7&SBZ!8Y#uVq^7^=DFHu4(k()6)zMj69EsYU(xC|r)wlIa>`ZHc# zR^#h=e7lJR)CI%tkrEL>?{%+ZaG zxkC~DmX>IoW#l3Rfq*<{(dt6a?ZI?9nK6ojyiA40g;46ANW;l=5)a4Y*DEdQ`Cu^6 z@7oz#Ms0S7!|QdtPoTkjd!V`CZ7AOneaSEPCY_DwNl$n7-M@$Xm-9lee<-eOx29j3 zd-EJ<_!&}o_Q6~tajWi@(Y?z?;!U(x>V7NU0!uLpb*_nO3lD6%7S+*2R0ooX4%Yc- zwm2g)-AyZtrvhv@j}_~!ynYB9FlUg#6vtKratjsHr90Gkd#Is$twG~f_m`gT1M%+j zffWE~w}Gf4f!($Vb?gche+JA+DWpMJL8m0;N2BfrPFJt-#Igs9V%hos^`~yu-(<5M zk8jniPgVtu(D46jsP+xbmsxBN#THUhVo{Qdaj$~K^{Q8xO)Y<`U;%vZqV00E9ueg$ zq>gcIv%YYb?NWVGEAsvuQKhnlFV(9)Z^j*}{>SWsmK*3t;gk!ae_Cae*9XZ)jI>Bk_$SA+72|M@oLsJfj+3xgN_C{6n1i%#26vEq8w zD*PJvA8KI0Qnvr7f0k7v7HVZ2-eNXlCxWah`eu7V({3|WBWho@>=yTvazsE3x37;N z?!{^@?7l|_9OTo?yb7c&;ki17>LrvO(a_Th+%fcm1?vd0-Yi*4EHV~#eAHT82am^k z6%w1)5?fDfAvFbDlR$r{IVl#!I^@|jM(9}IB{$~SG~^%1e|S(wQ{h~ZQ1g%zsnu3*%#qo}o!P`I z$$j6O`gPx(i~B@=;YeO%E~O?^NXA8N)eDqTX{$?UwM{NsYKm+qaY)mUpLvCw3v33r zte(p=_usFve>eQ+y6Cu)Fs!S}EUp_l5+m*g07Oy5ei_@W$aASGusQQ!Q@{zx^9TTg zrq+r)4*(Gzd7eZrv82iKB>DT^;?d{+KF?xO>@g(cKC}dcX3%HCYdSNFqkc}(M3Dru z!{5mBwqUM>WFEOX3GwWhJMw#i=8~}@1%Lj|K_>mrfA7TnV4T<>JgOu^mAUxbLndZv z&U4TN6BmvOp2u=nhZ2;z$j<|ap|N$j5wjxa8dK!Llcw?zTW?$+QWgrg`m361qho(WtSBLj624++M{Kez=mrT z${~i5f6=f_GS*MB5#8Mo(gLn$;h}T@Z?CEkLss>+t*;Lo)uI z6_B|J(VQCV5-ALf^?TjrnBUyINazaO?HTQ!%{8m)O!p7k;#Efh|dkmx%p*2^dTCG3zyalgvelo{05@ ztV70i9%EdQRfX|_1w&3{&I&Y(mX#)D`SDJw=PK)lE!>h2V@TG2OU8zb8yY}$<`H`# z!2ZNK=F&J4TlKwXQm6s`>;-&$j224ofA?Zs-}{D)duE=0##Y69LZpT_-Q!C0B9!)- z6ydR3C~B@MycYsoJNJ7XplpuQ8sZL6G@vLYU7!o+@&xiNvAI zh6T%O=0%4jBiClI${9cb(qb>N?q1D>MUfRAUl$_#U=@K&e4&Oudtgeud1gdbf7}_X zMGRbWn#=OSq>-iZ5&4rsQTd?>(HPh0N;kJ zH)1O|tUX9eV86pQ65Eb-_8yOptV{e2F+JL#eA&x9xe!~?p9`OL?m*4#ZI7^4vn1y* z&i%NO`RLatUYlY)*Z3PmImo)jfA?`q`g9Yr#RTmc3d22}4L9boZfdP&I6>eF9(WNY zGY@uN8v+{}cIV$G<#CA z(B;|Uh#o+ECtEu7gxR2`CBtVPJWw%WitI)jf}kYZ>$>Ln8tcS`{9}r}f033KHcfBn zgOB{|RWZ}~lEYdDe$EXdaGzX~#K3blI7{m|13f=gct&Lbly$p$X*bY&0)iLwg6+iO z{1Wod`6X91Oh$la|FGZ0R$#oNZ#(?4?~uB=XhTuJn>iO}5dMyHr#|4!gWliV;Zpte z?`MpNre1S}T{2`}!K>JGe?qEqINv4A@%!vPl5?HReSda-?FERJB}=?d@!fNxrm3o4 zYmEhiS*;Ue)x;{Qdsx1SlG#jC9F;mv6OMOH3Bj}AEQLX{E^YGt&H-m5c&p#v4_iU} zi~f0gn@HB-od30+JhUvpz6FJ;b&r5H=5QYJ3+7L8D*@v^+t)Nde`8hk^WD#kP2f|w zP{>AO1Xi4~LB17T6BJbJD0Hi1_s|eeIaEP6oemq+f4}fKGd)+90%Bv)ZsL80!geI@ za}n*_$$H0{C9nIMmoeTu>?=0VF@N8)I`LVT$2lo1vY&npf6=(q(i+TFR$>8$9^cd;$H6UJk@wVw2(~?Z3g~px{3|neaF`@q z<4_sRF_vfeAr_>rPXJ6LS}Iyv+Lh&1 zD8-s$Js9&RoJ>2cs5WPGwSNING+rkKAT@qbexQ=~fqq0Rx;*JfJ&see! zk*H13-$R8jK%U|KY@-@}6Y$s;hvPVX?|lgHK&RKlM~d*PB@WNge{R3OdVhq4_1|%m-qEem@XjQfl!xvR~fz zKkCnz{tO^~CSF6%>{L{pQwq(|lH+)DoX6={#XQQqxLPVM&U>Ihp)rcRU`w32mN_#d zwv5CQe{CBskit<#L6r8PciR-+gbPG>!z8W7H!bY(%4@l^M7wHKh@sd(IwFcy4;q;2 ziOdY_B$@wR!NUnZl5v!wG>UONVSQ)V%R~5a@U=r){qoN+u5TajYhS=)YGMu@@6l_V zg^&&~TUkAjGL@I=5b_wRgHGRy0e>OlDrl5de~OTS%kLiBd0P2WE|UEsnl8h!ULjMD z)nMrL`;vsH+$_a;E|g>+yu=jypcd)g&-pno-f>zV3t*kTeVyKahoArD>wNtE{yqI! zPM(WA0~?tmy)I?fD8~ShXsc@lyK(m(W9;igB8LZ+`hG0C;`P3-R+pjMZEqzBueW z`Hyq5qChM=GerBrkf(;?tn$V(x<$N;12X^ee|^Tnf2 z=-q~9B%-OO^cng>5?>5A%d@%Q{A`x2LI!%cLWKF|k1oEKncH=WAzf?Ow=zdu-jqG` zk$;N#TMV6dJk)<2$I0F+dylAugzP(;l8{6hXB8nc+4r^gRz}8=L}g}Wp1t~UG>pg@ zmvzS-!g1^8zwhJw_xJJse7^73`~7_CIaitxMT;`u7RZoCyN0wP$7sMuOG^vnvxrE! z6EbhHgONAZ^U9G}E(vG|*)BFAYX4i!i3*L6=I$4B3ohnnDQ2D?f`4pYL)&lJQ!Q&! zh3sCAyd0?*TE$h0jVTid@msIE@4-c2g{=j9)aL+4xc@p2iyL`#?5^S@UOImcm$*NB z`H7(hi}vuL{H<)ub!@+2zs}$zE`R|cmDl-g5w=issuWnxxCzLW?@`F^OGtDfz6uNi zjXpz<+(b3)=wIudmnjcs$}$vEs&tNQiX%uV&V0&Fe=1g|qi(9GV!^7$$k8(v6!bO; z^L<2P!(GQVuSCd4B_T39>Ws4PK1fU}%o# z)5N%i5fjhcH$bctD(X1Zo+QfRCMt!M2qqD8@iM98K0C>j1874det!eJA#(IeidJJa zpOJ1XkwxOI5&N6FZQ8EcS2`gxmYFn7Ae#H1EMz9?*)-*C*X%zS(AUVPj|p4->mlCa zae7mjtQN%Q1`F?p)0bIK+9(rlG=$Q-jvb(D0}gy7d4=Icc11km6`d?b4fT}*qZ_GF0* z`DwPm1I8_$-1Ep=w|+|%+S}p}7=XETIcK18!16vVB5wEAcw8^rF5*k*e>-`)MKh=f zf7q7*1e4)I)#n?0@2F`pqCp04FbpFesSA~Sl^<^S{nc4?^YdNwlrYP>S5+vrMX%7- zFf$Q>T0ARAe6{eKx9aX)rKr0^rd%G~O{S?{fH*Vwq5uAwTZpYU%KZ@ce5(jsrIHer zbl7=QZ)r?%tl=@+0@0S$C2@L^B_n+g9F1>jg!~f-o6AnULgZmE6VfLJLxj&FB%PLbT)Sf z>YPNCC6Xs4wL-4&JoJ@d^*4#rEAQR39B|d6j+!jZV?T%)`6ZZ*c<``%B{KX|%wsB5 z2d7oaqf1t#DCxe}W|3wVk^zhHx!)r^a{dKO9v@T5+TK*uqxv92+4-$AC;FJfo<_zK zuW5QyHUy^ra>4P6PlVa%u$Gpl!_eADDG<=`Ys}LHLYfM7kJm)#*A5*v@vV(n{}KV! zvS=*K6LNU2oax=ExGw{`clVLR3GJC4s$LaUVdl)#PUCs|pb$h>^8F5sWzz6IXW}Oi z_f*!MDMRX`#f*XiFA2hF3wt7H)+e`3xJUVBOhnu*PhjUp+DiCx(pj6!75alXAmerD z)FcUC%ol5vcYxlQ6dBM8n#NVWKh?TplJW#e0LE9Jj8p%3eh`v6nlof@{E#)tqw+u? zDDFmbN!f)-X2uXRoVDivqd&C-a@M^<*7D%vdCwd{aL7Aec01DWhyUA9>KXgHC31F< z>5>d0FBgjc91#~j4w2;&P70X;MoLrHpKl`+%Gr!j-`hc#^*T&B-f-NK{m{He^1JNhf=!MQ^)2!YEU zY46(DIt>(w6=pPW)sy{9gyZ0zuvN;CdS zi^zRaOuPE?Ytr^6Ndko5rhdw%b|wEJlEF6)-U$V?SWFqT2-s>l8^e0{npI&ZiftpB z4C22%AT77k%~3b5q#56M0W{kQXYxd|Ho%2He?~;~qkJ z98w?%3aCW!iG1H|Aw1~GaHAB68M+_rG@Ko`xAF6L2bb^;M(W?Kqi&F=J~ALzZ5eKUbh0IR+bDx}TKV6rsty5~psY;gLTCS$^G?|_ zR@52rS7Rc0pfL*O;Bnu^zC$`*_x`k65S#aDSpD#C`IKeJwMU3VpFouY-w0ER0_}~m zFY=^BEu zoUAmzpZpr0`xBQ@Ni&!ccO%uax#3$vozKr2PB}PVYeF;<(CDgNA3Klr)j_p|$- zJ1GYLs+qrAq_zMA>J?Bjbd_v=&}nd|Kvab{wE81GQUlF5+Ut*sz5W)x!=JatAIh*>b3sz_`uV^_mN2&pZUj4 z7D_ou5!{M`HHHzGR|*?feG#f(dhDe=z&~-e2G%I)5oh+JFr)eL*iskj0Pv=kP%awB zQjl$}rZ5w_6umBjGIBo7Y2wLAw&b8n#Kp0+0>(sDt7IuP>)k|>)#ut@(0c7?y_3Y; zypYo}3i)GTF3}BV3nnaJP92HE{WFlW30Dlaj(j;s@cqq!_{PMq)M?#FQ1UAZ)HdnXOXp95Nx?a#Tq(E0*oXPYd2Uev zoXB`v^clzMq4acx#TSzmAN?=1Jdt{)-IItY_{^8>NYQ`cFu7fg<$BqzV*E7F6}__M zGo9-$i99ItN57!&>+Xo>lag6)@%p$_gl%5MS8t8bWuBS%r*z%2on1yp-;0gVnf^px zgm?w-?pIJHIL4G@VScLyh`p1&Y@iG{y$--ei!5Mkv@^9pEUI*=H1MORSc3k2p1>XC z=`2F0u`X*3zWd%Eo}5nECAT z@!QUAy&gVyb>_kAxz;QurdIsC&&Ap%T4wKaOJw&2SrDmPhd>M?n?YiTU~%B3mcQjQ#6PNrd!t^hEnJ0jox0s=nt|3^Xgf0iJS1k;Z>J1}fEc zBYkYMdX-_#SRH|b&j8Y)i|$%TlCuMRR9Hb+>{`E&fd4NwaL8+{&OCQG6lHndwJoQ9 zR@jC=3D-d>a>n-s$0IKyXdw3bt_QGa7iT)p=iFF7a;ZPKHPR@6Cn04F?pr?ILvp-1 zW1T%5%SdGK-ivF^Uqj@5r6&7F#Ok6Dq?VDiZKinRcZ$}L9J@5}39e}ozvsmynuB2C zGMawHHQDhF_8XPWVm9xx&P(66>0370iV9p5y%U3aT*ua za$hj}I&zdcuJKla=`2X)A%XB&7avDjOoT}fO*~+0wRx)kthrz%(s3&v5-D=FOQS*W zEIveIesEm6WK4b^Q#@R$6-$mu2SKcFt9 zxsnb1TpGiW?nOHSIQpw689?bZ9s$#k2R(grEfwWImo)`9g7X<7M@c_oF&H zxf>Jyk=OTLH+o3$MShI3zarYVd zMqi~mU4IoPE{`v4(zIyT7t3V_TrpU&jK+3+0g(P`^8Gu;ViS`BJNc#t^|bCG z4=L+Bvb8*kkUKsF9pq^~H0}uHxADe%Fx8-`I-Px4oEpH^^n89z~N!zF| z|7X`hDQa=zJPbS0D1x&TO-B8UIXXv4J!nn>bye5=9sCdNgnCpes`@(mfp+e4i@A5z z`$q#PXU;eGcV|W3Te9D^|1?x~We(C9t1;fJ4E*Hc%oy)*E%m)HSVX@yAJqJ?H)=ek zes-|aluVq?m;q-MDsD@I=?fUW<8gnTc;%VN70)6+m|&Fhyqe&+#@Wu+QwA5g2trBv zj(B)-IOKk~wwR^kk?BTNWN!`o%gn)8nH5s&kwe5{PApNx_Yn>EM7-{wIo$0CgZ z%&lqGveM#V3Q`iDpytcE+4%TK1y+lRSU~$pR-H7PRvdsJq9|kk8;=p=(!N@I;?vpc zkI?x?B)kK>L2~ObKi(l1zsb0(a|@%v1q#5~p)^H>|)8X4{o zoBPT+I9L-h{EBz_@i4JN#zR}hR8%-iqUcJi6%7FdV7`m1=eIRc5PBA>CXG=0>Ms!mC6(5*au1Lxt3+mVXHyJ>!nwxV)x z7r%+fKb^mOp0!RiNFFIX!13T#phU)ZLg>5C6Yn%$xy(heia7RC{=C;Cqu1JU6U6M$ek5cG89~XN=8hk-AWjKxHoz^Gl;O{MCY{ zcT#O&4kB1xOv0I_)jy7dmR>fhu8e={mfp_7kF0?A>P#O)o8QcC)Sf56B=bVDb;Q?Q z@eg4kl=c=FeqG=Mz{gvbk%iI8%UlC3y+_f#ne1@i?cBy_xj0Zg#rPWBGXO^(K@NetfmYm$bNM zApJ{UC6KOUrflQS4PesWDs<4CPbp6^v6h`yH@}BxDEE?KcySY0UC-2 zS3qH(z}}+}v^&*lxOwlk*|?Q?sA~!R2J?Yv5dAphMN&=}FU>v9IbH8cUHYZh5|h^} z>5Xsl6G@L~QrsJCOFL!!o;5o^HY#x3ogpauCZ*d;po95_8~?7duu$EYENwBK{y0`y zd9t)IBk`~q*vZL1AB>GgupBCmM@P?XBUX~(S@$1*f6mcnlD)MN|9$k+cIxll*U%A3 zS`OJ>Wpyw0!lVzSwkgPS4TG<1J=D~mQ+gFBK03JQS3E==L37S$ zQ42@@FmVMl=HUlBf;;gU8+XOpf?R+I7*wnmoePTunCV)}b`HFRan6e`ET#n)j91q< zUGj7;YZ4+THHdHILAMk1>z{xHKCu7M!PUa9N2yr-Kh}TeI2X5KVII~#_%f-*z8awxo#wI_$Z~0LHGSKt)qnase9N}wfn$;E5BHL;_$I3M z3DW3oz_!HSmnVSra_cLTx-VBW=xfH5XABiokl~?ZC-eR@@<-l51PR&)Yx91OE5qG@ zX$8SOr?dn$`}>ZM7dTb~uYIb&{qovRqDwliXZ*N%M2g{d7Dm%WWLGWfx?sjoH&1i0 zVQa&sW{7#eU=Y5=McDxq9QZwL>&l^^Jy<3gU}0B-uh6K>G(s-hSZA)6)T67$0KRWm zV!9usM+@s&oiNhz=56uLcu3f0g2c1?*!@A5GV9aBD%pcqo#0WwL+_K9?6x}@70Q`B zHpLiz5i(_*`;V+2y`?P$13kehyEL$0^9&M#Jj8VbR;~&^57NNGiAF0V%uBU{yzs_Skt-@o zX?cI_G<=>9RJ3YiC!K7;FS^eYXd0&zv3@f z7tD~=eNHcq4S|YXU``O`1LXn;@ zTT8z}1XaDBvg(yW1pm+|g@=i)yOUixB1;LOqUEUb6*YD@G?zbAmt|eHseVMeQBGKq z-hIJvKCd##lVbueZ`B%HSjhEX9@D|a2t((C#`5G#YCXSLh<8xendhGxu$(#kY?EkC z*7UBLkhT&ZSFXG|zr^!B1O{kxQf#&liq*VdV8h5M^x-6t9JR!V!N@;=ua0Ig9DCHU zN0Bb1CvxNP{ePqbzde;kfo7dLU-^5pWm%_*uzL4Lu+OXUo_aeCvKI=j+uWMth&RMA zh$s9>;qq>I!&b&WXf1*F_%}FwP7C@~@@+boys-6`&2v~zsVRX~56G@My?PT>{V>(O z(yjLb8QhmCof4_7L-bwU4)n`uDXMEzf^EcO&q}8}oI&A*mb&}Ggu^LatxNLv$i^bH z)B2F39rR-{nLAeaFhmF>a!*s*Bg*TVC^d&Nx|u%Z{()oHPi3YrAq?1*9+R$>h9Cbi zO89dmS|T08<2Z_~0L4V?Y&pWZpf2;a^X?1Xx$2in)y>aaYCYcf%Mtc?V^wet{cn%P zZGKfiYep8|q;>z6ksaf&HFv*p7OwD2`?V^!v%aBKeSG%vYwb2(+j#l?+8rm>VeCU& ztZj#Vx--)Q0f}JRM#C=(is@af->egWr09&}U^MA>`50Z#E`ZhgE+;Dk`puwiJh~+Z z#h{-fl`VfVvzXP$iX)fko()9L+x-bkd{m$sWFI{}?zIytdc6jV4R$JsSFFSzdmWX* z+>iF?PVD-70-9Cvwx44;h!cMbS8X{Rxm;fxz~BD8%O}8t+Ug%W!|iaUyJ6$N2IU|3 zI(2Cc52y1lGR2$zt06>`!At>1m>%j+GlvCd^-Umwhe3>$c#SKQXl_O?O{^|CX3Y?vSn&K1&+vF5o=vX6FP`Sm z{)Ji|W(>)ugvo3QJCA5&AC{v^_&kKy1!xgbaUzJ}X0T0ExLrnUHm4#T zV1HD1H|&~EpYeUAU-m8wF;BO*nyL7A>@w4A8-IB3zQ6XEA#YW7y|Z9Hvi;HrrUfxn z{JTndS8*E8^D-pnNN5!l+x{r$h#Pcs4Immi6X$iSAb-J)A0fwI$oB|N95iAgkVCgw zo)z>Fr+p+ioN?F{-h6~C68iEA8n6ZjUQilU;!u{rU;0$>V_kI%Tv*eF(c$&CE7V~L$7DDz^vObw7_?VKexT$p2#&-!_G63y&W@ToPTst}#U*CpunWkT5TZoK^F-q>U@-9#AOz*+h zIiD>1j`bNqmV!dr6LiK1welyiY?i*i5JYANf)GC&ovm$DEcu2Q0?^7yGdbfL&^?IE zfaf^XfcEBRKiaN-A2Ei1NZlCHAFsHBc_WW+<^9qFj{KXjEEXVt7Z8Fm-5%%jGx#Cl zDOO^3KT%ke6rp3qj)W?fZ^UeE2fh%vCtcS{|2|s@&Z%(%^(W=>6B-<%=O^KF9ETkK z-~^p)1kSq~bMwC5QHqw^0(~DLv{$6(E3_9kDCD#CvYh>o)%upDLbd3KgDp-$)5XLB z2ToB+>iTn|VutSoa8Gw~K2a2TNYT@KPa9Gbb0@Nf+GyCFv)laY`SRODi1GOKP#%us zr=h@2Dr}EMwkb8DX6%6Yi!UQ5xd-YdZTw#Oe$IIEi(mcj#u$p`21zLR3e6AugZIu1 z4Y>`is=k~18IDvxe=|Stt|Pa4F0%%f`F`m>%yjTKM^Dk51E|vp5OEX{eGtJk)`cMC z>zvrt?{VDNQ&PkRJxuZByS>CZ)xu79Yq)qEQnMO9l9w|zOM)$a+MjOS_L2WPFGB^- zDUHV28R@x4(M*en-PMta_v*-OG{f;sljO2K&B#4t|MiWVxi5c4D86Nz|N2(M>a9G1 zZaX%%Yu7hl0S(WhiKm+z0M~toY~C|zL|!0jWvL8RX;zMv>U8-_ZwH`a z(Mf+?Z5d@xeCqTo%TX2roNlpa#jrQ6MpVvTwG2Kt{?tveZ>h?MaAR`AUNS+q_dF$8 z=#zsD=l+;H4K*ue`f?!CL5EFHqEQZhzX<#ZH2^RN&iH_ zbE}S1Y785$s^pDJFRD5C)?SX4yV*ROEUZxAYTwo_R9)yr50Nx4l`625bI*UkYNe-H z9mN9>N;3@#4p~UUF0%*Sfrs~)cuQsTqge7NEoppnjF9Ed$_!;m{C1fu@-oXG`UD!Ck(%ktwcu|X>< zH)?YsNiu6N?OVDqCUrWi$;5%LL@7T!maEcniLt?RwR<4j+?Xu{j5=GB!`uOqlAe)arTQ;+lg#hXLa}qJ~bx`wrBoMXG zxP^ToJj)~znYoa`QIL+SXHwkEG$c*>Q*GftYQn=WhOaZIgNWj~jN_olQrUtoqr-#k zv-L40j50&1&3|_)hmv)q?W*#_1OmbQNvt1w2GvY%cAoK-(ixsMtP*9nS5uYy%Qv3U zdD067Y-G^+wMsC_`Re}|>}Es#09?9rX#7)x&X^H zu|LVVR(Bs#hXfB!@b=cFM&p_NbbAIUasnSu&y?lPzuClX%&%yu@0E>F-oVajENe~R z0Xy&@`~Ir#J!@P){Lp1SHr@0;*TMPg*HM!)>{A>y8AgY|C8r>-=qpEpWt<|K#p;VR z8LMiBHyY{t4Re-TKZREUqtEo%Pc+)|UOX^%myWp;AtF3Y7X%xo=O!;k!lR>JzO$P> zVuA0i$#IPQ!u6Kjr`z{rcUGaXu;8+PgnH&J_|Q?GR4JOASEZNojo>e+@%@1yws%D6ne3n!yFdVKUaBs{WFzYiIh;H zi*E7z(Ztoqa+)G8k9ow=Y=S^k_W8Sh9vaX!QWOVX0bdT@Tmv>jPuond7B=<^8euZ< zAq}`4M|!BKMwoi`Pc;DU` z(kz5*Z3!&BMTU^o8``sl`;?Ez{dFkULiKS|8ek8USoZ2dUdWJsW9rY7@CHUhwcANL z=XS1*Of%mB$s#|m+V|TXckzV+o}K+slR3SFx$NyR!6Tzxc~mEu*xix62$S}3s&EjJKi7Oo3jmP07!7T)<*HF_5mnWP#cAE(tKc?@EIs``e3zl1IIhvFCXlgh;x zbg`g82hvA}T#fh0bbs9hNYBj&`HxI%Be`_KO;w)(pkw*W;Q6LbT(!05a+9nE$AW)I zrL<`^*MG7Om;j~LL3sfHfAE~SoNj>oMCXNsX#WorYodB~Ii4fI6zMM17xoX6g>knF zE2AEhI0G-h?CRZD72hBJ{Gmi_z@rI$YbAil*S%1O{esgR*wgj!Qp;ohU#kVYNwY#3+LN(ApJ{416igYcI0DU`t` zX$WnklL*toP~YWl5T(BJ(!QW~hml>)*lW%`F5p|PkoKIGFgrm+VofflBqn^eX+@38 zkrQWQrNS5}3lAq9J^9l_u!J%Wdkhyl0QfjKJ_de*ofpspV>6XaS+m>P$6E!?FTZ2D z|1w1@GjTNDGV6XNb#5(4BGX`ksra$xPAQSF>VWz3vM;Z4H6;*Pp_t5ldqDFPs(035l!N3`_L-?L*aeab9iG%5AmE&a7#h9wiY5}^MwCoG z8en9!H(34DL_g!t7(=ISp@vfT*V)E;M73YrZDTsDG=jupk17*c0h}(#sC!j?^pXIv zbm2hE;cVmwVNk>cGiP;T>UZNG?a#S}B6*CiO@tnneoG4D7d5+PBMmatsNsuyAkC^b zSr+otKycURIK)?Mp*%G1PBP#{f5Na&dXXGidL^K3H6#XM7Ip;w`VUOZ1O{AVdG2Yj zBrBC_&3gD03+&cyL5k$b|3$mLj?Cb_=^N7`dh zJg&L#$nnNUbvNk1$E!~>1MZFsnJ-EnDl_t?`W2r~NB{aB$MWOj>o{PG6=HMO7UJ^x zv+b(M4%6?EPHE9UL?&^|Py7xI0)CzK$mRjHJTOuIXUi3DQHNgeei9eNN9nd}sJXD!|P50liI4;MQ-CmK#)%?;4gsU0u=eeb~5shxLO9wTn`^dRQ;*KP1JH%fbDfu?nTb>_}x z-O#ERd`l6rQH8m`-bDKy>%3tXC;nsW3P+aGyKGJf8q{sRSxtSVYsqdxA9#>#ceCB* z$fjmjv*b`oN2-x)|GcD%yrDwdVCgGv`ju8Nzv^C>i8`Dq?4%r6NsRli;2qc6J6 zKQ%hUpA-yAqXh7D0qn|3WaW=dk}Kzr`j|zPGLAbQ8HfwJG(e~59{WGX%kNVrSx$&6 zq0SP~1TTLG8~!bTY&^2|k3KRu(5lamQQUvKFgQBAXnaC-F|ax{0SRv_r~F{^n13u) zcJjt2KdI!)NXt;8Y1Z)4I)Tmw#Ef$R@DF~v>j+e};~PAyb(*;})E}}hf5tAG?Uyrx z(=sD5T+rl}pYwWF;#6|8I73*(NmO86{8zntqB-Q?eJ&8S?_W+G@Gqv_t&O=Ws^eFT4*Ok`cS-tQ2HK{L#75E!n z@Zqz5C{Uz%EA0FBj-UAbrU{RR{=_r@uT$hd`@TU(Qhb78VW#|Z()rF3{2n)kX-9zn zf-J@f=1)AVVY`C3@iK(EGoCgv3B<3XPv9;Pxbu7C*Hc0%`I8dV zr<&NdZ{Yo(KnZNYuShGZMAf|Tak^Vr!fVNF4WRFJRF{3#EEh!S7EZQugs&ks@qDnU zFZ?0x=_ViR?t$pE%y!26nKtF*Rq4tPO*0QsZaGywcZ%;+-sh@d`l=BA?CSZPRV`-X zlql;}mwHZ%eZt!MNiok{>u(IqY;04!Y!L1r_vmB6!vqj>T5K*f7DD9r@%1Eyzu^(nMgR+{SxMn zK&NkD<|m(4gHJ*rngqXQFsM1rB+qmO5H!%S*uR_rPuAh^WR2-;_?om)=90D9T{JWt zH(I4E@a5k(Gf8=qIo^X1VSI5fdEi5E%H7)s8*sIEEA}&%btm(I=)IkeIV-R%5{)a% zmOMFZ4BrU7zdQAO&#iBb`=yJ40>fLHuyT}R;rIweO#SX|aL7#6x+Xpg)6@I|5Nc)# z>h<#y7P)!jeZk15%!2`wz_H&uZ64@zy0hv>US72ssqIFEuOkc_vgxMRYi>UoOcFys z10Cq%1^mKk_ZdCgCu{ROD~bX*%Ne|qqt=?sSe7an!tso@xwv>j@aq7fvukg6zKpM| zi|D$Sk4F%{gNe;5kSFH;H@+{`0A#TtXSp@e#^_7{!A3Ui{ebP6F}2IpzFWf?_*=A1 z(BXd7PMf7C0TO4ZRz?&Dhqi^(x_^{&w8%44%SzDF|5b(6fdJTU;Q&ssyICdW<9JU%4(rg zzXcqAKi3S8%m`6S>#vuL@R>I12o|6W+D*EhQf z!IxUl3t^#8{BVMJuLg?a#cCQn@Ev3e2lxmiQyjEL@XTL%W%|ZCm%)&1$br|pdfeR6 zHgx{Zibe_|I_LNt=5EzQ(V-VUr5r2}2}RZY6aMD=zmdS*FyMQNx&c*dq{uEs!_Qy0 zkTUZic7GG@9RR8n#lb5P#iYW%Nec28mNMi-syHIgx{v)kKKca=2i-qpvfM6Wk=uST zXgaDzhcK+58!lB^0DZyAn>7aZOPtLA`JEP6M)Q5;3RX_-NgbmLabXd&p)l(Y#TY4dePn z!$C$HUsvW`%j?vu%4?&W3^*Q(>MKxdl#;d1dS3U4YmO&vPrAc07c4S*Qd#rZG}>y# zTNp&{ex&~XV6yU?**&P8Bb|%#t-f&wO`~Y>Q#o7WsBxb-5`6EC{O7@7Y1k>5j35~@ z1DED=;lpUcf!)NB6eS619z>eplN|j5Lc75r`*tOpfA;ZUm@7!+LKk+Z-Eewj zI%p%lBIp+_kON%n)DLt<*UWI8S?LkEs>zqHt4nwo zS)F+YrRZbosgYD)x{^lUorxQK%_)07*x7!Hr)|PuRJ0YSUVF@6SN~9}&QpX3CN9g* z-k2$(5W~HMhd@n!kA-ig0#PfeWgk2WCAqOziC8l>)-+A4Yi$*^%7YhRxMG59K-C8Z zW;?&1T@IxmbB#y-M45$1J{1i9y(2Oyg3zu}5m?l_e7C?0tVn4jWe2DYIJ;C`%`X5A zzt)@Px}Rs=jCKj1v{nV?x?bJ|=C^-bHV&WKv`(VEmc(%7#Y>9<2W7(#QJEClHz$$h z+1l%2?ko_VS;&pCY~~W`qSdP1_`sRDo!@$Y1iF7I_+Bk~8^$(f*C6!H=#N)JV)4%e zvpYZ1>a$Uvp)$OBknTvD5Na!oI~Sk7pzJQ+w&ZU=I+wkf?~IM}0UUavF~FG7zCK+S zRqD}?p!BF(^Oc%)XUD5lZ$m4tBwq1<<}E+}fLbl7ao*r_7@LXEcPdV5i2wDUGzl|# zZV``z>1f>t0%qUk!$m%XHGZA4{Awve@Y-ZJdC@##f6MIr>6@JTLeLk+eJfl+80Gtz zMo<_Wf7Zi!wd|DeA36H!-reT_cv|z87H?!o;VpBJ(jNH2y%fY^ayx=*U2GPMX4jnM zg<;Bcmi$K<1?F@`@xFJRaD;pceWQO>5g!qUX0HBP zG>c{k1|r7AGiMQ%w6eerxVI1MbX=LEqk>_#T9pepKgM-&oETsItM=s@`dC!#AjSS0 zSudn)E`ci2hMSvZn_rEkkAQYox&=o5^67$5Q^Z2TmY)WnR~wLMn#V=OSE@u%i}!pa~i+e`F+%m|lk# zLn-b^s^m*Urlwyx0zbEJi*YRqogDIV*eBXBit6FDGzpeE zwfMHVzL8ZdHF;kF7-85XMO%!L)*^77VnP*hX^FfSwKFj%{0%;BsEhv2+d376pTa*O z_xDAupNYjjl~yDi&#Z^DN$vQ!mUYQ>^z3|kBGzo;r~@A5Eylr+6XzqP>(M07+M3gg zX9(j;h7Kibp%UIG8dYeO6U}hzW!7nar@pOUrwp1CnF(xI_$u7$C^CIzmHR|)G`SAK zWRr;Q;SX1~k26)@lKD?~(S~o6H3Cp7o=@^wi|u2cNr_ zPlCV5Y|u^XY^n?p({fj`yo>#wc`$j|7qI-u!PNREY2(LaJKc=0pymqhQ$Jg?I z^KW{LL7(6Nv_GUCK5f@T0v}}uUpY>8u4~8ez_H_J`X}`R^Y+3i24KPitzZ+GYSTBc zISezJ>oz@%5EHFm9siA>T-Ll*RL4)8>c`%BROxNKsvTG-SXYGzc*Msw$McE0^=*WM z(GLNRSjJyXL#x33^-*3URybGNkD z&%)lwA(D@1$*i3XX9D7VaOh}AMZATp1!SZXIuddmU6$cL3sFB58sI5U zzuL1}njv{Pi*#>VR*^3dp%FQ;@;QwDgq!)G$;X5+3^YOF%rN|_R+PxHkJ7mVQL8sB zZ2*p5&RL?p9@Y_a$E~KhF!ohiwGG%#Z}I8vFl<@njP^0w!crPc~ z{AZ!>dsY6gmugXP%69DaUY?Sdb4OB|5ek^L*8_#S2scNY+!8M7)e4+*9fwioLJdmS z4UkR3o<3}v$Cs?NZ-*DFEXZSYzTy<)14@)a}RH=^I`?ipOXU7 zYufczyGl_D3?7+^NwcY&dr-#7e_4WVZ;)QbL7x?5H0KhI2(UQ;&64KP6=;Us@R#&x zai#SR04v#!oll4d2ouOw7c+n=qaI1WHjkWdM{d$H`-7`;y4Qe_@I%eoo5NU_eI+h zUTp8(INpke(7l00ZI(;--Az*_Co2z!M%U}d?4@R&o`+<4R@G~u#K_#Q#&Xl-h}!IO zs?H4E$Mf?~IZm;S(#peds90ZU+3Mvj7&l>eP`&kNx3Mwg zujGLOC90lb$-4uvEYq=sL+;+XP5E&|we3)3S)uB>p}C`yge9G-VTYuauiyWAP7Z-JPSIW~*l70}%SkGz{fzMeO z5-VpTn_ z{#j?0^Wi#4#3!^L^j*|F&nSx(wBdWPu$jw|Dfs$f#Ac2^dqDL2A;=+t^iO<#B6I^Z zET}CDXYgoOh+kgnPBco}7}niCVG7EO>~I%8_*48CeNFhb z?~8=5faSgisciVqyxNukT(aMue!oQ45gh>I12(n+GN=y5K4wGC6vrUHn#2Suzn1uq z+ZpU~k5#A@-ssvxRk((=bNfl)hT_7KqKYg!X5>XoZ|xzIxgM=g%rX$_7H%G z!;fQLP@Kl31l~bIMAi_X#6ZqE1|1mi&G@f>w+n>$n7!N;z86Zqp-%Y&kjG$u*WpV5 zsaxbYlt5AJAWchd+@=uvs3pyBvad<}AsiBXFNoT;^%eohGX;RyYXZO?!?DP~h{3=J z7RnP4DKm8sP{^jZhvNWBaJa@ZEA=WKS0A>$&i6 zL%lo&z52Qr`6}#N-|BJ{3&r`QWBLSMz)Oz89>Dg-_)BZ7I0#$NYfyA_vhoUy6ckBz zB8wAf#RPGGC{Nmn^&O^^~=!3?mYZ^m;wh%o$Ktp&gX@|FMtnv0yBjv#lun?U<$8O27R&457olt62E&cDxmSj5hfg+9xNM;JfZDh})JzsO{V4&3BO`H;iP zQQm+Dbj#?^0;>*#4$#NoH71_214hy?Z-*qx?tzhp>1fKo(7o>u<}pZQ zW4!p`wBFRlr3YSDM}P2}vBr+)?R#d$NnLvgtm@$7rrG9b@mBqE3ETDRD5n?>fc1p6 zv>PtX2BxkiqgWISm1Fx@qxLHYx1dL9i|Il~awvQHw2&e&0cge2>3Ui`-@Qn8A!fI<8emTM(AF(`#t4`F=pcw4Uj zp3t+BCb63#p8(D51O7gadjNg^#|0>w7rJ%?%)67idja&{nT~6U5+CBQ)N5B53b?~K zt^x}jIPE4d2);)-1}I0^WUevcw02zRc>aGBObzlJ{0q5N0$>=nRa|3sVvxy|^1j)p z*;s7=P4FZ^(MR%?L(tgALho})5>hzLg}#4m>+X2n@qt9-MgOG@{DT9dzJLvY_HV^R zG9v~kN@FDK0kMSy%(0yOfE~wOY=%=|fhD>*1g^0AcnLxY<2e5XMsiGDKfI(+@vp`( zVS|o9Jx2&m1vQE5zx+(}p%?<2^x=9GI82EWidLhXY``{{VFb7$2atSxc3ic88H0cb zS0p0_0BG_C^?LAEb{7%_0|Sp`<4Is*3z8gNJ`k}3On*B?9%0~TJ?DSB2eAWZe<&xR zhuR!u1d`=jF#JgPZS;gcX)P)gN?}Jc$ZW$j#6b-6*L)E$Y%+y%CPI#+cxtWYP=-vf@gWj9}B?6Sj@-d*Dp`CyTCs10UO**~|m@#n* z`ZUyP9e`3OElw!^@UQF=J*bXlgPo<;cTb^Rz%SrvS~B|&8h*>{by|QTQMQ~yOc-Y31Yl1PIqX8m%`s_d!*s|-{xwv=oHUwXL+$AtrbglYIRQ3(A80$7jl z9+W>692}r1!7o!N*Ru040AcKNyKfdHM_T)cVkf`*Oul522qv8^^@v!Ui2vnQdwO^Z zabYE1E1*=|#DJ%44#m+wiKow7jVK2M>GKN?D&8_s)syuY{6Roz(BjaNN8nO#1ry2i zWnf^X3dgM;hyeu%c}bfsIP;+ovQk55P>kyWUSt{$p6XA0>a+SCQ6=>Ah)D=oR{Cc;TUHo5;gH{%0@Qp7*wm;ZCNF*|(OtGUUnEmOox8X}YUokgDLf z%>=N9mEWi|;E;c8aPXb!-pHkrhi9%$IllyvI?^D9{}%HBea|zhdCjDWXWkK9=4*1s z-xGN{$AxOlZyDTNbur^;02Nbzu$F$(TmSCW^JlybhZTm@<{p{ckDmoi*xUfKWg6Xg z+Ff6Y4l|+&(m>fLP8`peUXFastNU;`-_!mU;KO*lSX6OCx-0zt)pGBz9D|K78os}+=9(T{SQn)v%kr5B$x0Ve|j_RT5DL#&yU0i zv`$LcoLC4X38EG&kXDMU;0%M%T&p&yWAeVryq!7fGzPV$^x>-SJgUwN+6x!2R+UZR zw;jcK{ltHHvhvKvOPHe*9XiYSncN3r-=SYiTAoTDd#)usm{4TpCBUWp^(TPU$rmH# z9vsS#3(GDWxwu{%VXMFkUj0g`;9aR&k-j9$a`x`cO%cXQ63;%PWk>1deZ)(U z45StfG#w^rUlg#z*8q*)Kq$O06x<46?!3Ko&`N*W3DVswcmVd*GsemxTYL=-#Q(T4X5_#DpYPuN^SxwOgaIwh8%B)-@HdO+_q*!r^K)e|^?+*Q z@r_)Fl)ye_1D4~D49^i8h5&H4Cm?B`kh$_EiQJrSGESw#?@mh_ouXV zpkRMs-*++s=dpD^1q%Iyv=}pwm{6&`A^@B~;`=$xgXN=ztabLvhPU7_^K`(zJR=EZCfP6$dN(E`b-?D&0zn0M!0AF9G z6k7T>sV6g`SHHP#$#9(m=PJQMyraH!wbiPpVAt4Tx3REo)nh<~_c&A-NN__Ej?}?%IM*J`IR%_g>zuBcSfj;6>f;1+=u%yUj_jNCPPz3Icx^ z)EgD^<+9(Mkp}N3i7ClH!?3hH+N^XOc~ML{Bd7M6UZ-h^1+LusOry$gyy+vct3Bd;EwBuY=Csrp+^mSUwu)%b z!F4Pea2MR9-kJRZ2k$E?*xqnaTkd~ZPY{CTZ6dLFUlyzSr|W-C;iVhqR3ypLK8-?> za;YnC=46LiQhTleVMEimuN7w9*ZdCP&aGQebnIKl`<_w#$AqV*aj)tt%-6 zCq%P7Oi#D&tsArNJAZ_t7F`sHYC>I6W7nS-$eT%a(0e(7ob;7c$p|GK zperbQIw1^LH{CR^p>{sLC2D^r^Bb{HNTBZ+C&X+rPKmc4UbB5%{8W4-^4zJDG5)P5W6VtHry!MC{EXP#8*Knv}lF zs7He9?P>7Z2bz^ihCP$XeOB1H94WAt>Wn*9#&9TOt@Atstq7`tEfjxwcF)RVF{>aR zvTz5bwF`7``Jo6|K)-u(c1lm;UiE8O9H?pfsarpjMCT>rz)|bg-(&>fG0Dmlb5r;f zyPiczK_rGWtAWAftJFTevxU6hL|>Xza4Sq1`dm%~o)!6Bm=B;*^vkUzU}W4IW)cL4 ze&)U<>^vmE`!BfZY$AVvcGD^j4E3W1xQ;t-UIv;<#W)bHaD|uvmO5|ejk+puH(!RU zFzk6YPV(V8C_a42_yhGl`-TFzyw!%7#@}UXcPXd6jPFguT7@Jmgb>UpZ5xmK@&*^3 zV6D3FpdY2oG|$D;0@2CL?-UWm^WvH~170?mUukhLWr+>}rUie1o#KAoyJ#WYTdK2$ z-T;Ha_<(@@nHaL)BJVW~=d)ZxZN`LKdYp_sf-OSwgY}jX;itWH%V6T{%Q<}ko8H*I zW#cDHA{8^6;20O#nB_THDy!_KnbHF71X@sSe+Y^YE=S|U!brQ20V=;cE9W&45j36h zmEIS(4DZ0(Bfo#L*QXA`m-^{^qAFhGbyWj^pctTvcBLAZOYren`FF(-c;_*B+j z{$j40F!0fy5%{8)x4wEc-@Fi?ajZ|5*!6bHjuM7l^eKNsBQ_+7!z;=D+Yuk!TVIUN zzizHFFT`E(`8L_2*8_RUG~}{@B<1Do0Hg)D!`em6_sTMpd=m#>fztBXFPjr1iUn2aQzmb$x)xE0mO(t7r0qMd&6t)YYRdPcpmktXFjK;8<>OO zJRw~dC|7^ktRmx65Z}2te0G3<0KJKrH^I}qHV-NtI5TcI-uv7<{e+BGBKg$ewbwL2 z?aaY#jsjJZUKT-owU-%f*f(=Hj`J#$x+>FNmkX?Bk|?7m!)Kt6pe# z3^~^MChjo^j%za}Q2S$HfZQJq$$31(Plms)@U(V9r&H$ISF>9rp>`)L zAgK}cJDn5J3&ui~^2*ZUEoTBwjoPPptF3hgR=q&9cV56uasOVP%;VzW?S~oC z<}zBi!!echa6h4j9q#i{*B`q`p^H~k_d`d@^m1#4DhW6byE}7@ zH?_L$j!8pp8<5A1^YOx{!puObF-U&~;PweSrsB@jDn|*g;n@hzdeq4Yj`NO-k(h*5 zV^WH9C)N}uoy4-6+$#0aJ22nS+DB_hvcomlyQO<8Q$JQ9Gvk@Wc@U-y zd(u$DY1h>5OJuz;vB2PtmH>YVF;!w0E{D7FJkV}`?2pG|-6lOBBgp8C0a$r=lb%=J zE)ep|ZRq*oqL8Tc%+@EH3iM%?4Wk|t%kbelB~6$`(XKvx zk-}0VjuY5^OFfK@9mXooOmcKm)~-%f+j_c;`(WRlyEcAF~A~F2V}~gQ%k-B zRa;<9x(C}FJr2>P7qbyiWa{ zwW*_WyI!>}C0-?y}%F3fW zq$x;i+P+(NVIh`dmb`>4@1( z-G7j#AL8YRVoHC1P|dq6AR5dbvD*VFlfL)7@yy)Zg@6pi{c=gh+&aY54w*i&+k-JVKrrS;2$`vBz{QoSvXQ{X9?X^hr%SyEld`4)c=`?$k#_J|vDMR?^2d1cxK zF=P%BF*{mM5$^>8lm=`&WZpvle%A#YA`mrhN!wI>| zOsa}ATn5ccJ}u>3JTjymfL3Bu$V9uaE6 z?IA99fEf@^S5MODl`#_E=1q=W>qMK*Nlu5~)ydpTVU&nAxM%SuQz zy^?NOvny3{;S&0dyXPL$nsjYnEtxMB;pbutYm|SC33PAWIk6uFwx0kXD~O>iKX_2V zSXszMdlv>(W4K8BUen;i_kjnVdqnhNFy2!oc_W)U&yVE+7&n^Its5Viln>i^k-d*b zs;WWFaeY3G@{`44R!e7Sr;qGDji%AKS@;3Qf~?c5x$JknV88LHG5QI9cpKGVT)f$p+CLzC5L}hWZgTr?-kfEF3d*UZKVLRthGsgIDEcz7Bl$6pakF4yOpyW2+24?RlixS*-;F)Ou}&J9 zobSuWRcLg`Wd0c-=K#NEqML-syz}7u#k=xRe3Gzy2ve;ZW$h_LHYo0sTfERF5|e+X zd=iTK@=kK2Ik@aA$UQdSziM7r#Z|!y%hV&KUfjxi5?L4VA*Epr*MnV7w>MP>94tCo zl^!p{fC*5_S6sjz8maeXv{-SzO1;X4#fVubWn(2Kp8%XF4|g4}0qi{`6np87X8tt> zxM`u`%b_ouB-ideqRVg*j~Dth=J9{aQHtZ=)CVgSz#r^Vvx*2G23!o^E7`P=qRda2Z9(m3Tj~8OZxz1dcONsO zlyybOez!E^?b1qE_QUG>8hSSHs9>JIS#7@~Z!BmFapKF*Tzi%3=p3$?X3>A+5K=t3 zY6sP$G{at_|5e*Y3qF6ha`u-d%sP-t2`0{=NI`6zbm}0s#HTqMpd4P*RnWod18Y{ zllH*VX)-P0D05aaXSZay@Wg*pw@@b%W75%xRL0N=j)i$lX1)C!r&<_3HnNg9A4Iux zroUWTJgM$PfJ&9fZ^sY`*WscxO{Q# z)fA@K&XyAMbw7RZYMsyytJ|-KMLKPhi#=97pBEpQ{TKxm$ffB{2(+7p<5o7v=PqC| zdY|u~)Rvy{2bVx1A+wB3Lj(<$zaM_0$tz+!%G=ZGs%hGW?#)R4kR5?*so=4< z&(bS$C|^Qu)HNH2LOFlp0@O6-6b^<D!eMKh#e{H8lqsMvj;-K9zUYGLRuwBQJJ4 z^f$9jX4N3W*-mdOE}m=3*u84TA9CJq;g6JGujDwpmD~aXyS$z|JgUHU?6=?odE(1M z$8im8J+hs-FeCO49XbjDb{Lr~zFsb%MX}b(k-%c^;9d8X5Fu(}w6g7R9%Cuolh<%_M}L%NuR7bE``XV#B%bj$nwu(q zG?(6Y(T6TMXWo-{9QO3X8EB{%P)&Mh?fXmGeIMcU!xvM(^RsMN;Kbd;ye#xbi7FG- z2=26-;FTtHEpmTap+O~;TJod!@9D4kZVyik5HJqs94(CAC$SR)Hcla!8W&=ecy4vV zC0~nkb#YCg;@Zj6R<-6Neu8=t5`&cA66qujCSyL9jbU zJS*`dovxJ^K;|Y{Lsy_5p=d-kHPiC|?T#t5R0WN9ep1^~YUx6@YL zjtAW~g6h}A+bk%qdu6xBW7C?3gluRMc$ zTSRt-(-VK7<}euonz!UC{2SL<`nN2`-i#tNJDeod((1cjt<#^4JRRvzOAuKT}ppTX1(kcA*Nn#IsTzBWcejec5d2u zYo1h7hH*y041SgJJC4B3Pd~2o{>V<&Xf9}#^(*heC}E3?Tw#?|jYZvfZX>BS{p>;I zPouzpsdC@2Ca@W0GLTA6BKjCIalEX%$UptibYq_3&?A2ZC5(@*F`FEaqU0;29}Acv zMK*tGU#OIQ6TaB$!9o*E&&^DOphX^i1}6VvRz*6FY4dXW$0eRH`N)69$x$*r$>~grL@MOF(cn%nZF<{$(jyKp z#coG*It8*VDmz83M#Twd?IvD%SjRJh1mu!iNM<=OB5iiu4V6{71v%nsVmN|rji<#JTz8uPV0LDC<;V;~c#fk+ zB|Lnl7M(A2LpJ}VVQbtqtmAb2M`wTfzF^=YnA*jTjIr<%r}<=z`dY}X=Z722%8#?x zm^ajmj@^fIu_PvC%Y-CGjYZ=2g?lt`h*2#Libo6I`?6no+dX>m`C|Ahl`usTYQ zXgo_t`kmD6>M3MwjZWrJ!o0cc0@RZZ;g`0B3OKZHCu(R$7@{;#g4iU($&7!iiatYT zx~;fTJ-%7(@S#t)Ed5z-JY3nw3Q0cQv_8e1V)Uqs0)>r?7_!8n_jG%#xsk2ldx<2S zrK|l44LfL0o+Tw?9JKY*#BoP_RuI18o@`BJ7TPsal_>M{G&f}%#GSb9E81LbC3WVW z752x9pvHjCoNtFw&jMc8Uo?LepbT7@nv!b>{fzLbr-TsGv)yR4z++jec&&FMsm`a9 zRXL5$yFoAG=JBwGy1jm{G-QqEPrVXs^(0x$c)^43B9?v7sm$WND6gbi7Z;E;-K%H6 znUNj92>fP`DrW&^r_Sw33i5Ysu<1u{co9N&g?1Y_(_7J+e20HOGc_h$QJ`Q(){K+# zM2tPCQ4}hCmfJn561fk~Uct26Pf=3qAbTvGmIgo8NDqV8nxx_HJ_GUJ`mdjIryzss z@W>H;0UYx<;1f|>5gW^2qFA||iQIqe?K_rVw^=&B4wH1FPH2SJE}3w+;I)=B5xdi{ zdExuq-buY*gkj0}z(&67D+l8?aey1mN(LMyB^9F+221D*Kv;Z`nQxcA{6oRn>jlcO8jCQs|bNP+0+-<_y zc$k&_^Xqb3oV9 z_F|Nk?AmG(i&6EpuelTmxh^I`)4g&4S>TZe~J%Yf?`&J?tHAheS{3ryy(b#jAW#Mj6;9XZ}hmX`F8KiXh*g6*#c@RW-TAi z!nq_xRwv%+1=S9QG>0%@bQq72_BG$-bl0r`$}p)}RRDMlo4#jh_Ug8fRJz-lJu|DC z%EzGb}x#ub&~dSi(MfNxB!s`?Oi6!r{x}_<(7Sip;w^V| zspx-!WDmFR756$LZ`bgR2q;#CVsHST^j~;3o~p<&Bo_9BJ}NK1LQL-yUPjf1xiN7W zu2h=S{)r{m+{3mR(nY9Kbf9?g)*yr0W0ecnaX8@Gk7AZ&Y+9-Ve;ULk2y29VXCP82UZ`-Ni;p zij>BGfgx#ZrhB`OzYu)xr9uEIayGOvL2JPoY{(s5V$i>khnmT%M?3xrxl`AdaYJkM z8Ti2e%2Yt)K6(W1t`YL812qhS_k*cC@Ods)*DVr5RTYrVjN9vNk}N#ig(7drLu7w% zd^4Mm_zuAb_X*tHyCO^Od9&Cz|L%%6O*|Zaq!Pi7A!LW_$a807(YTkE5ManAqhH<% zYg{&nTMXwP$XEi0wm9gD-i5BKbBH`BM?o%w%q;+K%f7=(L%ul%2`$pyph9KAMv@n` zF9Jo#YyY{&rfpexR!7?JCi~h^@B?kQ2k9K$%Msb|6ALaRG&2TNG??9Tqqau>zXuW|+z&4VbRQPPjCJ zGwpZ!T6g|OevK&yxEem*7;LoFO-_rqtxyLommpop9&bNBSa?W$9BmmeI`W$pR$a7^oV*t(tC_Xzx zoVD@Bz=M2kN{Iq0)@NPGceOOjCBv7uR!h`V=k0{XaQRF*B&NsSl%J>E>G^ABIdDC* zCMXM1cZQxg&?s>fRFZ$WZFF9EuIp_vd3cT&7*!S2S6E*W4jN3vfufvK-o zT|1_+X$v&rM*K?!;6I|KH-T6olw%{$o=Sqy%#WtR1DL0b)**l2%=1y#E69eX4`hZO z3XGU3T0B5kXo)tX!E>TAJ(1fEufS9P6bUiJrOw-y@9`YLd@bb1lsTn;e?4*AaiNB3 zHx;YypMxc!p&KgmPu~z~^)3Ue3U}PyL2~;OhH3HDs!z`&-Zx<Ws$qN7j~(HpIw>AQcaDRUX@y|#ysr5fjT1RWF7 zUN;babxQOqal`OCsv0gx#fz_yBZbj1^i_4KlymLRT_(R=_AT+g6SbLnSnace$<)0f zyQ?m8pL91M3r*XS7P{;@HKc$mt`f=M2joGW1s(RW@y&87)zuMZVoxKNz@l{!0aj$y z(R^Pi!hnC)?eh*_GBb29uZ;t)>r9qGca%>a1VGiI8jcoJn(;E&U4|UPeD2z}1l`R? z19TH2%hPRmRwEnB2+GB|@w%=QaV2DEa?K^}Up19T&+k{9(H4v3J0SfOwu@Ll^ikIF z@#kk%o@JWJlC)T@Ay)ZYw`mwl2vs$%+JWSEF*kp8Jj#_|*d|5_47!wnEV<)rgC;NF z#$M{JS*xLHS3F5VvWexQOn*<2$aB3Yg!o%6Rw~tzcnCqv2*T4k3C)auRT^J{s1*nzC+b05IZIi&24jG|n z#=Czw2nIa0Jw-5m-(RX@tvk%SyF!wA5Ntrze0JH5gY<%zjx0G}gz}9#zxryFQ?rn? zV7zY~=>CQZq<{N~*%O-qs|ZR*xPpL3LDr*#s0Yu?1~1xn&|MOzY6bU|Ko|c4?Q82` zS!s0YgNHF0jy*tb>9pEolqj63Y{g~qFQ$LB40dc*`xt7ITaVSqkI0zQhy9FzqJ`uR zb6D}2yz_!HvQ~N7(wqxDRv_Zbr9x3%>gy&4FB8Fg&u;To<)kdd-uLMKyh5P@e@#|{ z-l9j8s`h>y#CSvFAYT&NIH2qAm9H43ShVL(3L4Y}3B)Dyj_`)u8Ts?(Yf|~KeG7l7 zIMY?{yguPpG|O}>5RfCD(rEaTsx8%WaS6TUZ2B=i1MT&Uh^uROE|d}p5-R}Z^3qrO zo-NoHL=+|HCZWq@HVm|$A9M=Nn(Lg4xpD`YG-F$Mgd(@QD;Q*@kKN>%defp(zKbOp zlMq<+g@TFHNQMeg$KnRSM!Y8y9i)F;8l8k3A3r$^WVyaHjS%CJ0v+@5v<+V@2q~Ci zQQP>nDL{DTobWuI=&T0|4V>ik9lRhI$B5jz76?&oMMf7@j0id)wta^##^k|!t!y_2 zu$y$4nwertHvv(VbrvO;3QoYFZv7_O?l)q)?Icw*9GhF~w$ubt?9rgQ#|wWaEf?2e zq>3g78QL3-ab)yGstZ+?CZi1`%z6}DR`%Yheu<5hW}>X#Via3u2jGHoqGa%$Se$_o z>rW{tP40|nnL;~tnqW$&-B~DiB12+*xMw(s-?XHFaCwIf3il!bsc)5{&;Ach_N7`AT&Rng@Sc$O$;V6|eiJ zGvHd$5j(<(&4h+byR{WhSf0~X!_C2IfRBi8=&votJoMV~OsuDY@+RqFuP>Z{))g(! z3nH#der;&}F7mwZEv*Yg_uXfFCzRcGo}%5+>Ulv2i>N zR$v&tF^Z@%gomkVRJ0M)mmsjZ4~`zWEog2QD1Ie&`-+|V4O!4mBHsWg79YpXBL^J= zKR!dTjVe`i+i|@2N%o}=49qhUxM!v}y}#@JdTDJr9#g7mFs6TvW>4RtKT}A2jZ#oR zoZ{A+G3t8Y&T+Kqom2eyq{ouVprc43asRP)0u_-`guXW?v3#b`E6QK>fSzC2q~J<7 zX>IgjBDk$~_#S@z3h-}FJ)&ginJAZ+%^`PJ=C88{b36Zr$AnxdH9{+Up_`mx8M~5n zk13xh9&+{fz+r!$%# zKq@t~f5pGDFndP|={}o&BX$yX9{Ba$d}=Sg6W{(p#zTMKuf7X~1#B7dScHkkM8hOP zM!9qGm3A}D)Qxd!z?n_}4P3>fJY-*QGFOc8fenf`>krU!gn4)*agwfV%pamZ-0d&_ z@dkRN6>EHQ`EA&5SObxKkiD#{xqQoC^-)lFsObl@G7x7ILmRLY*XvJX>{>842p-sS zaQ$I?jiGQE;rh_Ag7mxU;2N7>0S=v9pKw)ObY#!d3w-@4V?h1 zk~XBD#|u1&0Cr8Opd-O9P?>o_$4$5}sdGT#6Pf3qdJ~ys96ubEVvOQ;Wi!C^-fius z&90zcvxevVBndX}PHp#92P+L(L=@zukI^X7$%{RZLEgi1Z~(xYx4&BEN%@C)zNxRM z6ncNC8zU5TJ8HYCtZo2ijA(T`n4v2<3BINl?Li;M=oUpdLW=Rta$CqT(r(a;GbH2M z8*`#P?!(L#Gd#5u>a_QEhN3=HpL21dZE(%k_tJs+S%yjr72ji=Ge%kfPy%*M_bZ03 z)dEm_!3}ZnYjwswR$RlFZ>3 zd1}6_)`cp!4)Gy+NIZut0w)LHdyN`hyUs%p1mGhYCOkQlyjy4JBi?>ouOWzDl*fNt zjtFKxeMeXl7Q=D(>YsD+Sz+>EZ=dPzjS%3!T6fvNh5~jL-1t1m2Q)?VQU{qNm-ja8 z8%U}&uSEcNQ?aDI(k|Y@+tX!P+@VhYZV{<{Y5UC^Sa&anuiD;jX_kg&usn$y?NY6H zDZ>qd&8SQ(u1th&LZz`fx6ut8cj12>%{WUmCO8@V7%2-`h7iT0f9Ohdbq^<5hVOWh zYE4YHSGPx@bd?!MC^&1=TARMbv|F=-94^|;`R0nS=0R)ip9iCU%$XEJgBGD>1N-Jf z19Ncqy{bOe5pqsc9w$W0532TD@L22Z&tm8s-a7{~2jS)-wspTT_hQi4+uncFp4tnl z6MyRai+DiGqAIceRY+`j0J3(IB#80q`(SWhhWs8?HGRI)AQU91`$c3eG}jff36fV| z*PvoahsB+HC~CwsB+0)%nno(L>GV`8X{s4LR7mvoDZbU1_8cKk=VML2=Dv`c8VaF( zWv~SxoUQO6N>j5??#$SNI{kl}ncRK@g7L}RB@-0&DR|hgo`Sy(k_vLt5l3Y3Z=a3* z8FC-vMjETNawzi9gD057elUE<`+0s!G3p-u-LDfcmUd%FhX94nzYm3h!Wj2RJ>Wr# z)Q$gWSs;Mf2&6uUoPRU~LIRAh1__Z@y*APCqup><~g_>k>`zdRQSvcBIea!8zxG)#oV`R!3vc+x;8Ifp2{b;E)|uA0!WiT&oaY496x^_ z%lihUswXG7&l{`yQY>FgMZl#hb4a>su4V9{flnbiHE-O zm07jV0kM7$5dE6q3yhU3%@>AzEwCAS^^k7}*-F_!0FO(wOe`vv3$I0U1I*5YO&$-jh>+`+C-Tr;0K|tEKDT*}| zQk^CErE5c_)ge@{*#n^rv$+e z0#^e+%L1xR^l{|}=)ADyInf|@4{A&O&QrTf z!~jO%5x))ADPeim1hM_?QGl40!t3F3$3ye+hZmD==rz~hBW?YHb`Wd#4*s{7u zV5N3pF_aid`S%BuI#J1MV1S=3;dl>d$CVI zGXSo$WI~0Wcg{ZZy4U*#c^A=lY%)12hQe+weh1D_+oJJ;JvsJbp*1wsJF$_^C<0Zt zpHH}-O`eLsxPXO_D+Of9ULgsqBB~5ATrJ>F|H| z@)r_%%YWI{97JcN5V6^Z0E~wB5c>Z6?4OSk!Is{Lwt&BZS04~de^~Q>fa?$dc#+pg z#Ifwh7Xq(7Y!m`|Ud7&-VNYn-Z^}YgKphdW66IcLz{B z{P$;76sh@sWHHkwZT)&MzXoR%K0&udS;sjX*@14wwHTgdD5-m&ok7{u*5So!tAc0i zaSnY!X`h&bC%>r5pryu6Qk)W8Xdt>d=qcyv7Mhhy zfZ8nOwm|-H3g%Ycq`cm)gg9Mfn*KKOoQ7EZx^>K(;PkYA9O>exm)<=Z;}ynMU6#@< zE*Wpq4z~zl=*SYkNpkGleA20$c?e>k=a_0-16T2I0vR0g45a|R0c3etYRlFe>^N9Ua zrYdiY+#(UUDAJwzfI%1^p_Z4~PMh)|Qyy$-0P3!V?&=##X#T?cC4at7Y@k3y3(D)D zHlWEuaHAV7lJ*LbPt1?wqJsyHGF;kNK-n_fRjbFX34ND$f0lp!y@|T_4tx|HnwDle zg8*Lv%06G`Zg`bYUkv#Rnwf3$PHwd@-d-!<1E}yLJ9t)lW2r%JB6*-M=X-apRvD}e z2XF56;_YXU3OG{JP7RppHiLEz^l0?^3}(lpWE%;fPj^C)7bX` z)oLzZvO^zrD8zr9snj17XknPvy>2{bh1;QmQdRXEY7da8pcfldiQuB|(sdl^CB*&xn+f&1`=*-F%aDJz{oINEZ9j%%dc3Rhvu?BP zrQM2ybO*DY=(We`1BxQxEhO{G{hVO+Y9Gh}Q9c#0wng&HA zTytnVTe$qYF0~((7AOp@90p!9&_n`6xq*@~NJ@7}0-;Q6YxQyQZK1lxBK-#(Xq;vR zZ=aSsnS7n>S7R&76doQ0s+#X&;*WIX+fjE>j3$2ouv~2n*!y@=x|I(H7sBx;=!07M z%P0-?;e$joh544^yNAv`JFzrPhf#Pud)$bqia(t0t~nIuo7*MQKyc;DqZH2r{mBgB zXh>VRy~al*v&LZFT;ahg1xpP3a<}H$Sk5C5cKRcliUH4L<$1{ysclvN1e>_g>OH4T zG;e=ulWuAd_v1`aghcAzX%vwP$i2gZFVc)}vAKnBM}zr2vO%LH1Y2gtkRYxnC^iun z&p8H!g=y{{eXqzc>iCN47JuW74Rn5Ae>)M1PkA29C03yhXE=j|0%;gaR>RV>Wq;ym#AuVLTf?z!#9~U0c|4Y zZipT;I5E}&6cmJ`mBy*ospwKOuhpOA=OY}J%y|~EE5<7H(e!6o5(Q3xC60BSn!106 zt|38+V1BJoGU~ts0j(MlKZ(S0GVE^oPf_tn<(uC=1e7aNZA~Q`Hy^XzFzJrxs%QH3fdr9M z5!2TSSf+8t#=Q{80^e2wrBE;1JfNx;{Fev2#%^VIa+(pU{700+2b) z;G_@e3941EMw~x#hFX5%;>G1#0HkEOO~;LaaDutNy)xTA^a-_xfp@k~{Xk46%1NUr zh>HXkl_M-=!WHOgW<{?*@uWR?@>~P()7GPVe99w|nf)q4Zpt?RF@`mDy2G3oQh$M4-gGTVVks(8 zo;NK4F>aS3V6LTHvtc`!nAG;WxJ5GlQI#P@1k8J`%Sjp#E%#^{ad3avX41ntXdZ6( z94((a$alFGwLK{idXn}%zE15GWk$T1p`U@Vx=vn;WQJ!o&>l0NiWYO+ARJp6ooo?} z>N3!PFk!z^Z#?5miONPwXS4xT-^5hcT(v)+wIel<2|!tt12TqsOA5#s9j17^J;4W~ z7pg>(PS8AM+KT1S1B-t?HyA}79s=poHuN38*tY@&Zg{m-tOUI<$o0PfkwIXUv#8tiU^SDRyl*-C-4#GVZo)yGZHMSi7L*91)k?I>5J-aj;B z-NaWXtj!BDtjxRTzCQyf0Oqo)xV|dPn@9p1VqFPFnTz&S>s5cGmJ@IfD1m%I2E#*3 z_00tmGt*O$2>p32Ht>v|&$kt#=MsWHUy?QbWgS+enrXGk;ixPSE(Gq|x5$&Xq5P#&L&NYkfc^G<%SQl7gxj4AQl0OTQn2sxU^|!(E<#)aEAj|!U9R3PX_wVyefQ-6WrQ&yz(CPw*O#?C%3xAE>Ydfr zK;~_(QJ*6AcfiPiciFUChadCk)|Eo6yCV`N~y#XKQF^_%oMf`Cp2uXMR@9&gCn7@KfLlBdH1sHjG_i#yOpO0 zHu%)@V+sMNIJ|9phI^a69tm;~7@Cjz0$~fUoSJcu!&Nn@Ki4vq5Dh0U1&x+<^Yee- z67D^lf@8HT>n1eE`JA$0Cm_M8!ybW^->kHw38+;8gwpr=uJP7(t{iL*N!*a)(iHZhJ6Ft`{`00wp)Q`ny7cWk?$MTp)_pE^UUn`3YV z%K&(@N7@Hb291C4evnt*#|JRF?x26HZ%Q~@Cb)LfRa~f2a!6bbg~oS4F^)|^+^?-S0Wp+M!DdY@$Fm$UD zVAGx5O*fVG*4?(dM9A{S-j47MF~ndR5ngRt`cH0unj|2+71ba}U&$eB*%D0D1;t?T z6ulyBAPt-a?&{~B`Wc`0&PyV@?%rbb##A3IkwLVjrRJPvm6PBeNZ)_n`4@2QkIYs| zkZ{8+0Z*uDj8m@m2Sww^th582_j|s1fSD3obPyAE;}I!uw%)i+KkhW zISw8ti9$Luc}u$o9XK~oFvvul1~8-U0!+&88=&4PrK;$s*9|rL8X&?Px|oRW)9mXb zUiJ>;j5Gm0WcpDo7U7U=R&e~+i^k#(tq}X66}Mn|BvjZ8U_yT{c^!-&lB8g|O`yVx zXaY=|VhQ0X<+&p8Lq!86u4f5&H|heskvRp4h>QSEerNpP{*85GMjz|YD#bF05a zMC33wC5#-nWs5J|OhGhvvS7Vy$Pyo0eYtta=*hi_ATXe9;&-zEH1jD$kX@HU z{e?bD+DLVt%>-rmdM^TUz86ausx{a7v?Kf!3JM*tVG-HUsI=sNR_N`nueNUYXIF|P z;CNyZpdQeHyK0br0>*Ds8u7ANoLzhc1EYVa^ur}4bu9`I0l|Pk{Um|$(erH`j0`6)}& zdO^a)?(jYa{QdQR!$N@1JvSmMZXudtUU#Sq-_0^t)$E&aSGzZD6uCU{mgrSfTzN9R z0=gF~sUsoRX*?uiZN_L?!zTYc*7if$SLaYpu@c*EkSDn&BpNi&iRkm_+k;xj13cOO z%3d9{a2-V@W*^WX=r-Lg^g`tulgx;~L*)}9nyx&v2xI>+V zZ;9J`0pHcma$mK}T&w8<)F_uOTM&HvH;=Jtep@!cK29-hl zF6kK0Bpg(F;d1am>YKEzKZNeGxqMse`>of9@_li#`b+yCG=gCEo699Y7`=?uiz;85 zMD!yv)n90THeG4TM2v0vN{Nb+-rhGMjm1d1DOys zBl;rCCuw$44x*LKZDKDsb2R-2j0IzYlm4k&A?wWaPpm6 z>XG5nX^b-gqTuzcrqm&DG^PUq7%JLv`vhmFxBkd{Q`2sw^@F~4M=wC^$j5h3;q~^b z|A;8KJKE}LsY^Ce2WIg|#H|;onJ7mgw;N%9JB>fRJ{;UXhmj4`x9tK!Q!$_{q+@{m zu^2Mb&86i+;d>bJywF~+0Z6G%pvW`WI!Nga zrv9Dx5EkB&P(_J9X{|dB-E0%K^Y#LXqzo7uGrOZc2EuI*IkZwN26YPD2H6hAfid5I z^d_spWY{L3OXxNob3emp)B(~HB9wFhzP$XZV!t4+4?YA)b=Rz6VkV#nv;|n4kK95` zL={4`V+Gn5xdEg-(#ZgwF%hKc*dPXVDeSmPE%h~!nbRKXN%K5B^9mm8IWAq8EHWV~ z_c8DUT8w^y%;k;s6}W8?(z$G+MPAB(rU5-I?hEbx5(~%B0psSKvh0dE)ce{miCY@w z8EcjVxFk#mnbyN-aR(@3kZ9Nm#n+y^*R{9jnm-tYx01?93-ov80BJw-i+Y4r2 z#m)lpXX6cCmI#*(O)8+BYLz2*m&MI12rbiJ8p!v#Ra1TuekL^W7?-@zUjBlA*-Lvu zBpH#nqwi9s&-O3cjv9#fe(WJ)$;rlA%}ka;JYnE)&X6fp25 zwX4>rR}zSg#~Dii>64iE{wTW5ia+h0J1PggkK$nTf;E^|R0ZUnrrztH{v_0fPGoW! zd>)G#j?#!~c)69X5{|tP_-JW=4gBqdd4eBRu2c=5YqRcSu9H9zDjz8Y+H!xfIUSUH z7=Ht=JKG}L_Yn^|STQS&fll4g?^Xz|;2g+T1bq|6(6nvvl=sq$zd-Fi{@6}+1}lxu z15BiZie=hFNLi94op|nFUl~ZgB^Yew&-Hn6P!k0#+#8?>)a>8v*OS} ztlVWWQcUc1!!jCt7)EcMH1H()?2*9PY4J<~XdFPte>2V&Qr{M_tWzfB-GFiZJd%(~ zV2~z5d^te5WtPxS4rzFQwBs{@=P}@gIhkWjhdXUp8})PI+sg$fqj+gAiq2y5bzL#@ z2}5nF06hEy#n9V3ed)jj)v_@J!X@?zs==0p3v^|P+{Cxazt`^^%i1ixsZfI#KpsPc z<^i%T)#^|V_-F_M0>-?G9u9R?)gdwq7>Yk8N`ruK<_x-jG*D_hL^BU=&QaOi!BDUs z3fz{%}b@b_(CaEyQ}Hhsj9c}!1)^kE+K*{Pw1g5MWtK!itM520Mas|^_q z1Ck=hkl)`Y)S^bl2Jl?+1T1qVW)g&yJDm_-?y3C9t-Q;=!KIe-3E>&_l3hM)4}f9- zf}ce!*e~vX^U+J1lvHOnb5L&7hwS!84*u?8cGe{WEcF_L8tzCZh&^s@9X(6z#)Q)f zq9M%#Zj2_UkY1Q&#_Y;m4F$O87szg-eRFEamixMTqJwVdeh)#3d{IQ#B6)tR88TJV zx_=7^#!OpG&la)4 z-oPCQ7nFQ_TpFSUImH;*LZY=(vInz1Km_t;yZsXxXdSwl8~gApZhNM~3UincgbD9U zUSRRaU=eLxX-h4&dKQWYaIn4e{d5T1hEvfrQkSo!imvoj31H3{YGLf29n;;)1#T<> z4bgRfu9}b_HtV;Wb2xy%{({YS9ht_Dm(5bDoSPp7Xa{L{)UBa0!tDrdyCu5?5YtJH zaq+t1=Yz%IUX#^)_IUF4_|mP$L6VF1Y_DEJtKe?XF&F2-1+%gxSk_-dadv#4tVE^N zS#?IX>3uaSwj({NRrdQ1Y4u?a?1M_4)!lM`X^W+y_UVDW+77Q(< z@_O@b;j=-rnKd9ecZV=WfT5ekd(LmBje&g~f3a*NgE4J!{iZqEXGpe2%cR%d373#6f_E_^-!* zqYJ9%wc?T1Ls(*_ch0~X+yL;;4&mX7}FF@zLn>hf{ zDEf+q>(d5r+W5^kJRidPe7KOa7jre%JMT2PljZQ?9}Amk$x_;j1+}VKUY!*y+M**0 zjR_L*&OZQe+K!_^o5~J;p4&$OgRLchRGzHSD`P=G3GGxpt*|3izh27Px&z^dL%iNZ zU#_7cuZ2ka?&aAppZKo5q*8x;;UEB4()t$R6=>Aw7SHph`7}six~JN#$uvnwwI)T!vO=$ z&^8DWQD78nMx=s-ke9|xEe}^z)qk){B{@E9psyL@=_&&V$3YvIcRmz};;3 zp2LW%Ltb+yJb9972SbGnL__m`A^!l#_u*}l%;=IYd!lMUa?OJ+=>d|w1N-_5t>zd! zO8{Z1HWy|Tn86yAY=8GlsmM#2Ahl|^EB4v zKDYMGuwTiY&YVw}mj;ykY=L;|W$FTtj&O)Jlu>}jxpxgJp=|)OekU4#%+AxJ%JNE( zMwbY-i|cW7>$`lfS0gb5cuIux(w$jhOfip4(MRX&={-L0eE>dWZ<7E6C@otWg1lpr zA}ib755dL-0cW8b9>bOo(|NMMUJ#{`U1HazXoN0WLYhQux8k}62IMiy`C+B&g%~S0 z3T6rk^CgToej@57&vs~k#evGcyIzV*X7J^>aB%~)_->=$iw^L?gb!1^>)*l62q$f!M7Z67@!m~_6G8%n3sv5dP&flR=J?FCAqoXiHBobz-T*vrS(B{SyqsIWZHvfv;RFio=P~X?3Ddo|oC?mnazU9^ ztB#i};4;t@a*dhOa6|t4#U#*HX6GBnLuXWOu9X~qujCNkD=jC8vF&HzMHB=lhnX^K z&o9uC3kHfQ2Rs*l#+)C**LJ;w)Ww4Cr(lau@JwhW70VCwey<`BnN4&BYy$eKOX>!! zoCJ^H8a~2oneqbk)`gSAQ8aeG`nr+)6*B3c5(WL95!V%Zcp|Hc0O02Z#vd+NUMHR& zysvvb>Rvl;>LyC0@>v$#Uw$yUdh~ZW?a2Wh>sjn{RKBo(YAujFdrkmP-x(lJVwFX$P__Rk?dfxSD3RbbUpv8o8}EB~*7G#`e<Qx#f5ggD7cuFS10!Dpc^Z_YB7Ck?jJHRpC<(U=;rV+ z9U{?434d=>6Z9Ea>`BM~NMp6TQzcB)9OYoJJFvPm0fo9;70$c}JwyE2F60L)!tk)T zD2Qg2{RaNrZQQN^{t2P;$UBJB!!!qKMy&cRt}%UxN9*4z@^hGzQjjKpKG9(U*5E*Y zJ_v1KV%KI2@Uq>j0hS?wo3@QF6j0{7tu+phsthH;P_*ofpofHm;Yb+pX;&$n=I%AH zBR5oZ%b_5s^u_NtzsVpM$%n8~z!{dQsOw7s5Z97XWR4dD(p*0nn`8inKZ5USu;`nD z$y)SY4B*CA7bb|O;sR5*Uku)3a&cLI!kE=2#b;6!{XC>V|HK_~^*|So3)||Bn`_*7 z6R;l)oIBU(d5&$nKv6_A%I%b5d`HAUAf>OslXC=Nf(hJr*iDD7QgXk`Ik)go7%$3o z{WOB#hD=MD`U1s`T_&rcm&|^9H%ah+u=i%YinH6g*ni1ggdz->ZXZR8``+Mx9{VN& z18%{90XI?pd!E-YtLj^8eQWP1=UgO8NmVtOHg4W#w9k0PQsggBg(}ZIn+9xvzGE~> zm&%0V&b^)GiyOk8VB0-`c_o`xJ2d52g(odnM*lKplx!jEH1)t-dkj~vioTe4aqZGC znqD5S)$?Rcqb44k3^)SPe)V8~>J3_4Sj*=!pI@hniP9;QI@;8>y@O}ijWxzZeDz$R zVN@n66Is)u`HE6@?rDZzOk%TctEwx^zF3(fQ$znODX!hCoC?{ZMfw7M%RtRPiETJXc76GtN;r)T(DR z8{XS!{B_X04$Tz~8`JLRB~zTCM;;9;8Z$OaXU&2-(rT=)5b@noD;MEUe>$Nq%KO5H zFRfK~-Mn>az^W>cz3-g(kt(L^fa7V7c5&~98nq%Q*?Y?0Z;X8|Ik2xNSDlId{P=#a zmy&Y3Iu)?52(5rELCLy*x5rfHD!9LBDTqrS+04+hAAAH*b(1T`d%KO_efQXILu>9x zpaVd)=bW3~y-^R&pK>XKKct}#w0T;9R%W(Y$j&$f`Ph4bD8ZZ>TWm50dBgMBALv@J&ZIgKrAe%j-@?gwzs5qXDxyI~MR5p%()}SUfsNxv}Baqv% z8oN1V)Cu`i(N!PI`4o*@BKfEs#d;eax(ln#i}*fIu97}7&}ds3C1`C@>h35k&TSOF z(O1W8s?yGM%kAmW>sEpua5_d|<9K-(Z&L`eE$9e~UD(b>=~Ou$my=7)ZD;G~U{0iH z7rDEoBIPq{hpc;lw`TkIjv8^=WMfGqv?vkhsPDR8j-mqR_dIznGya?#i*PrcEZWCv z!tI-LoSvCvO0i>F5rG`e7xJlh`3~Glvukklxsh$m#nE0juQj5;qlNb1LVpM3~ri{FFSF4oPQuZc)vAHTee65@-p03AHa31B3 z+scb#w+2B2=~Z|urE-PxV0p0;mj@H!srlmRMq_w;lK%Fi%liy+!nyn?^W$rlKGAa>T*3R@ z2ow@;kOqT)3~BL*=JyWlotMY*wZE>wpLCY5+r{$zTrC(gxEp*PO;Z4G(6`wP>6E#j zQ#ZH4RZv(D0E0wMZBVT#t(L)pmYqF4L;1jHy42Q8N$a(6%|~lKlxXxW+nw9c1h<*m zPv#TvWX#?%izsLS`a|C=28uap@436UJY+%UXXmpZYxSBr zuD#ijtCIT&T>GQj%id6#|GJ3fCtZTt_JOtH1`RJJ!AX!NOEYOV*cd5iXuO6N zVup_f>frsA#1PDi8Z$Ybub#u$h~ib?!(c8WBvapJee6B-V_{=E;n1jXEY914mXarm zoS!9njj4$qV!>yZ>TT?%C)CPpUoSp?RC4vEyqfungDdzwyzSno|GM(Zmy(PKhMowu)&>g&rR7>DVJmml%zx?a<^pzwTuro^nr zDM-z|BYK+($|%?FT3Ot>GsFPeemUb#NR#o?7}s?8p6RdMF>6j9UR`VjNi2&G5ub`(4GVKwa; zkO72ZT<`k(NMTN>V|k%}EYc6%wDX-2J-W?;3yZpOl-OAM&X$i?z8PP_-OB+pD{I@GDhpl_h`4>W|}%)&&K5v#cSJc`$X#G)6jP) z-a3*7?w>zHDHWKg(Zia5)M3$|M!`e58frxn2CzT{jdwYL0pUSysUq{LdC@Y)3^)U) zmgTU#AYdy1v~LMi<7w#ZC$N`ip`Z;;6K;vSV40!qt!Q`yTn?I}&5TnOn&e}YgmBLeh;r7IgDt@hOM>YcM zd;*ji{}9~J&TJXc7RqT`Llt_on>=%2#)JXgl!TlxPS$ z?nioQJ)bufjgveODlPW(rQBcMmOnLH5tcI5@51}*!XA!)-YPZ^Gj<IBAN`NzUxSBnPA!=vvE~`o#f@s_66RHwWr+~!#(RWRFJ>S)mqrolS%qm9~KG} z_E^l(^FcW*&7C`yxKw=zqyDkJOS`z4Rng(DZqDip1>`k=_AAtK$A>c1(9lOZKJMB9 zv?0tPc01;O_>Ro&VOpinddkt4rXlC9 zSQ?GfF@Lu_L|w2+b5 zdx)~G#^9}HBt+Mq35{GKC0!C)tm**#%9-Js=0R3B9Nk&aT)Y-j3*AWrgmdwoo)v-Tou7g&Oakaxdhyp zg@yOMx4~qFwiBR%zdzvW*q}#HuganiM|)2>^83&2RuPUtalkoMGmnNE}2&O+G8G|Z~ZI`rk+ zZM2tfAzD4FtL-tLUnpRxUk&>7 zP3?U(UB%&i7gGY`oFY?1D(wEKc@!rvb@}StUD}dng0Ru^61?l6m78^oBzW(CDm1}k z9M%c?w3Zg>bSv6#NxWv`!$n&5@_qJvSLryfYLKDRj|`P!doSc9*@B*7fHV)+6w{n^ z?w)a0PGzngUroPok@%Qm5NSu0>-L z+_-b4?6Nl3c(lMm!6Gt7!uqt~v~AE`lHBRV&T*5`jZ0b2j>UXmdDP~AWQUPF)0F!5 z2skOdqITqPiBs_7H7l{T_cXK7qVuYesES#_W>`=6Ji2FJo*d;=B^zd7a@a|nJ>g_V z-3Sr@mUnbkp3R-Tv=aE4TzK-ltF``g*dj5JqZ?`@!St*iS}LJS%X+J3@kF{wx&RW{ zLRP_ci;gNN&;{wrC5Z@s)A7}{XZaCa*_%y2&mHUWc*F?UWHRNr9*g#2&e)qd*UK_c zor#fhsZOIKaIh)D^d?U4i<_Wa_0zQ)xm4n9ZNLYWxhnP54hX#IA-#b=@+2+v+_aRa zw$;cVN#pec^^M8{OaKprcxNRX$1|Gj#Ivp}9-S_Y9_YaD_`=11p4(Hu6!Q~m)6U(j zzAu8~eAjurE>Y!ajOlQki|V@8h&i8ufcH5-kGm}rOq(n=1o|kQN9cSZsIpEPCSH>F zC9`JwW-IHO9uk&Zg0_8#pkF2yq?~3x1F8H zU8$XVLqMI%S_ZzBK1x-0tpJ;>J{M7HeU>(7n0jwDYaZ zFHTN)lO|{LnzU|7(cMX#O;6e`JNfTXmc$(<6GJYbEj#5iB3DN=) z*}65ix`Ft6bG!8`&A-o3@EmXGh2Ps0s*taD56)Q?OQtA)D(`IirXR`e(ktAU=7C4w zEUxaAN7bc5TfP{b1T)Uw-XfYV)OgFa4hjudA?TqP57iYq5LxJ}I+fMab^2ydbaCI; zhx6;qO&DP{Gt>JuS;iuzZnPvVFBf6PbnVSppC&D*TwmST1G(q{C0z1hr|wM7c1|zr z3h-dqDe9bmr%F&{%SEo%LP0zGD75sV0%=KII8@?f<3QP7PRE5{y{dXHqPPEY)LHAk z2>9A`bEZfS*Uha{qLXra1;A``*L5+AK$#!6QE<|&z(qIXte=jV;$a)q+wqPYbVmlB z6z_#n^YyXLqLaCq9g(0rjgFpMp3>8c(nrb82;%#HSlm-LE}2~8mVNJMJzCJGBM6qT z+fUqThOS@rYY_$u< zQ0FO-_^fDF3lwmSZeFMN;)1>&`tcU=XXB}xTeKUOd{Y3kX|Y8^@m{I@i5S6R1C7h@7B!oJewj50$elF6(dy-4($%ffn3(* zn_!}gxAD5SYk%1yh&nzk7RdQ!siN@QbG}HNDG#@m;SdX%#K9YJCK)XEHWGC=UpZ{Y z^ZeP$6J}$Fp0!DLq+xUJd4vPdQzUJB^Qzu|O_*YD2>8Ywl*O88ntRIey6T=4qXP=* zZe_8wAewI1dO=yz5u8=jG0Ormk#+&+qd=jIZm-mTm>k*&t|PE8A0FHYerFVf&oVpj z{kpuR)WU+za?7a&)h~_<;ND|EeVJtaEK%Vr=jEX^;`qpdJMxtjUGa{ug8J_79UQ%X z>+WK|(7>H;R^YpW+@@1?eqNwdhGuVzm@(EbGng7aYAv1ZcDazR{F+DNhwAy;B4Jl! zD`}5VTzdI-^=`~eKx{y-$7^Ozj~MraO|v~`8wihS6xENr^H`0>o0&Bm?ZNOE?(Z|T z-244`@sf?@>b*efJ%nKixx5d7VQqGQ4CiSECBuhGpTcYccw`$~B9oQZY;4he>sb6n z%gbC`a#VP}&Ts2vp>3EsrP)eqrOjTrzrpZWd*S8;W^M1SYv*OU8-WyW?JJD(d^md1 z-RygM-b_O_V3_dQx+p9->x=QyIG&UhQZPHpx+hG3%0ceO)n2RV`<4HE&4uWH+-vTl zTZ4u>S^pw9Du#Jlj{^0mJ<_fEC84XIscvJm8 zL1$qJR15~pVU$C^fjUI_!?+)FFM&tiiWPVj_O_aY8@A9SHHj{4L0jlF(g-c0)rx|? zp6D!M@G!vHj;)o=m5&+JX;x+v^*hn*eo3Ux=>Qpb6k@JAJ-vfdc4%w!(e0$K`!k z<=fOh$9FIfYLClib9X3U`1J0ysqgc=LFjS1u;S|}MT)L9R|zH}10WaV;b?>y&##gBp(Zy~a?Woyp24v%9 zeb(D)j|S@b=7i=6i!myHlqU}(*=!=;h85u`9q6Z@*<-Jc`sYYJ9hTg=*u?&PIsWXSD!qGW)1c0Vo`M~pa8#VA(duy?FPz;=mBP}oP-<*(n@f7z7!`QB zRL3cAl3z@Y(2{E4`M!Hx-EPsmo%AS@-N`P@nPYqe+2C{CPTR?BHrt{Qn_dAtX1jgg z&zhn(AIpB)mhbm}dwXW>`5MRk-E2H-P(VH&qtC>-o9v&#byxYD-65YW)~8|y?ThZv zrj*vyO^}VZQ3QP!);PosBut?+FfR>EOSlO;gvBg7KVGebxyR87Oi7+e=#p48 z7G}`|7L5vjc5vgt{Y9?L|1d2Lu^o2j4?m{YgIr!9G*M2(uE%2}s!PbwH6_cs(9|f;uFf3ld0LDgeV*E8av?uC}1l3Qcx&vY!=d*Zzh7B7HWK=Z?F9INL zY}rQOjI4Ie-snUNJX2^CZMq>!o@2^LJy~?Nw!sUay@nyu#eu_GY?%6}LB6h;orWG$ zVmE$&`bWl6xLrb?ixZh+jG!X!!^Vu>;64tOiB|QwPae)p&d5#IntkpWOCZOV!AK@6 zoih9P$2E}$uRGt+I_C;4lYrMgsSKl`KtZk;GRtiFq*e`A03_7oI21kcBz$sA_S?*0 znZ$VMjUcm#7V53<$S@-fOgf8G}CJdkgOou<<4-$a|Dx>UkI|JXb8? zxi1(q$Ly^f1oRTygt?N16O7?Z=}>KqcQN`Hbp$baw@mGDjE|@I9o`orB#i@n2Q2e( zZ<6!+us2f71>RfimECZ!2H_=gKiK+AYC$pmx*iWIPtXJ*v4`j`hu&X_1yqOk>oaSA zQz;l4sBOb}ifDOaEDJn>Pts}E8cw5lEJJY&2*^Q^c0|Ad&q1j?Bk;C|?=c~u@0R0D znX7s$ST&1|JLhv9z`#@xqM;fe!`uL1&j=s|ldv@nnYR+Lj69Y@Vw9z5TSN8+9Iz5^ z)e@6P@zp|Pi!nsLIge%*G<>uZ3SgV2u>?ZJnom%6_8pC>r z!HS@R$1M|9Wkip`X4jsRTjWu26(FUwJ|5xt1!GOd|1+k!GK?9z5&IZhPegYgk&J5- zlk;-5iDd(GAp(Sn^|ob_;h2Z{#av>}IOC%BA>y$(rv+F5-^IE38OPF?g2yd?8##{S zoH?T$#0H(V3-;*P4TvuVmT4401ssU6nx)>HIcB0Ez`*1D zQe^}ck!a9bcp}%U(wR{-rN?e=JZ_gZ0oJU>Ge)Wwp?N|o8{aYA{wAK=8go|Zu#75P zi{r>WQ);7fh5?w$^N@Pa9b4;vE>Idw8jHMT;eHCMfx(-1W7BMC&0uOy1|233QY*>9 zpeG79NvSuxrbV_ayq(RSmGG2YCJW&{)~Fgysz81@V?$M9n7*nRy{ZQAD=LByV6 z%^dr=4SkXM*(0C5LB zbYJcsJ60H zfV5nn(V4Wmf-pFb`BsvD+j^|G2~+|Y$76U4?iC|=o45T{8=&Mlz!Pm}U)@qm#0QY|}gDwqXsGX)2y+EK^IV zoqDEMo~YOeG)vn>Yuw4tDM1QtdqtPiQ& zxckvvcn+`zdxq+G+r3g8cwgaN<_7SZJ=M`!95UGq-pk8_nyZg#E%LI{?iJb1dss^r z!;$-?3ArDCXgME-%b?>G?uUID_gHUa&Hc!PVQnhVZ9*Tix7|eZOBZGX1xnB-vs8$2pyAyfTn?-VD>%!VTmk) zekJ#mzqz4d&20if6=h%15FMU6MjLYn!%LiF>Vj$ljZ>A>jTf7}w zLO{tJLwkG^nVL__kRSrvGDi-S9nfuPS!rz*X5}bcNC}X^T*}x2bCh+TQjs3v zk@fKz4;Ffn1B)dK`!*3`1>8*n6QgIlP$DxwqVfu&6^%dosWVRsowI5VWX&$x4ZtqB zj-VqIiOEMaLqXIf$Y9gZLwGKJJO*2^%}A(!FDK8{6#Vd9ME3C52k89q@ zF&W;2CEsJ*AN%gB*FHLnF&kw8JwR+P4hcBOyh`Drmzk_UC2j+qRVZVA1zQV?0+dF~ z3hzlW&tg7aBua^@DP+6xhDo%+&3Frl}2P zw_0zl(c|7I2FSO2SS}7^631Dw$vPJ9(Y%>tw|+3H3R|t=fnLVCB>ooC72vYtVdXpw zWO~1H3}eS9iu}u-A$HC`j&~Azkgr4s6Wh~Q+;GhNj4@%WI$+5N( z3WYIKDI?&lbb_&9!NSD}DRg;B*-{(ryymk%U{Bv68!+C)#fF@;R>OFk9QHbVFP7L= z;Bg?&i+3Ql19?Ab@EM6c@gu{BbrlmiNaQ-P0vesE2H><2POgHHX}4f1#X zeteuCIq>n3pwE8nzwh3M4umcwYq;Bp-u`vR_;c)d`Ok#;zZ}T3y-yNXA1V5xX zbS#B#+A|#sTR7<~rk24lXQ6wJt2wI8*ajQ~vL|a8EcQXnXvltrm!_%;CbpH^)ZB)t zu^}bEx@2I+vg#x5cZ$IB*{K4O-3dWK5Ne1a}-)H!ojYL zvI@4U2k0b!uz)C_IyEbnbJ0o&rMUR6gmEJAJCLej9VU>EsBJbaB1i5s%LT$Eg-q}N zoR>l7VQu}kFNZbx%MOge`0|l2m=`q5fo}Tk8%|mH4wxhKarpS(gTMGjIZ0|9x2;hy zus2!6=ykM3Qc*Mn9~ro~LS<6W8TS}Q@HARU!x~(FwQzbGd<2HZxJN~Io|5J;!A>Y@ zwWqMaVFy4js0HMV&>VSz!T7E&G8ICR1D{+_tEhuM&#IeNS%!qylYT9E|@Q4%r3||F-j6pwR9e&r};NynhJ^0lPn5S=yiP)pR^!3;0`q9@N z{Hl(B=#gWyh=tC)q0o4vw?e^o40cWuC@mft@iNw%gqj@&Qd7HD(Vd;lOSCrL%WcE* zi7%paAa{!+&cCg%ezyvsK##hOW~vEH#zf&+9?5Zv1|`-rMw^(&fsK<*QO{L$`(`}k zyasz@u)X)p0bNrYgaq)uLbE~)1n-Nu$yWz|l6c?5Oo$z4k+te83&b5TGo8?Br<`e=6PPO=6*#B%-DzHp z#+p_f#Luw*P~u0{c&g^sEC<=qHWAiXWcXppk87ZL85P=qU#EygfIJBzhA2k`dl($& z%wR*yQiAwL!jaAboP_WKnEruDCUMpV$t{}1C%<}|tQ+hxe(=-(8ZY@77qXs+E_Q$0 zvJASSYsFK=!WCIiV{v&i)0lGYI<>EVI-v<$;0pWC-MIW{HH;bT-kd&zT^bFzhq$XC zS;BUY*pAa%l-cLfPCKP4(zS!kjgW7ik#sXf*T>R3#{vMW*P@!aSl2%g{N zp(slcWTXi5AA>81cAXMZG?DmRfIlhNfhZPo$DWQ2ni!#}r*sKvt*3KjHPB~)v~N(+ zE-{30L+sydBDL^2h`i{HselN7mN59u*fW@4tg967My@;g#Egvz87nn~fok}b$<#-- ziE8ZM-542gp%ZM~64#>2>|-?P0XK=v_!aA)7nQRF!*XLd>^zpiyS)t?z3(_Y8i!g!S;qlrD;!Tt{(>Q_1TO!wjUvcY?Uyr=PFn6whLE^5z*CNpa z`8T$KqAn8q``R7}bPM9N#-Ovv-yxPq&d-=LC}RC@cp&o#}PdHi)A7(VpYeod5)olCMk`+|~Q0+?c6n=o6%!64>6B z@YK=xfQ$`mA5ApEyr_kLcOJvOJfz8rPiUAh%pHMmlkepb4gNY3M}@yJQCY)R+a`f+ z13yq;&cixik#&xJyYH2cp8m!}5&y?~5`Fm3wg=uj^(5H%U)%auJe!Rsx3e$QTg)Ga zcmgV|SCLm)9t+xMy)VHkSt0!IMJC8QRi-UZK?EVKZHyMoz5WV+W`)J;O&DCJ##vQX zjbO(KY+uB?Dg*u&%4f`Ge<)U@1qt#Y#PH%4L!8P$&li$EqI@2Enb;f9VuQW^r(HSB zcY;1#oxu;kYpwpP`@eGu{`y-GaN_!jb~pQgL8S_r$3zU+Qr&>$?u1T%zHhY)CtGcq zG`TUTs16_@@k(`n5=_*+1zGD*^`j9>=Dd7bAq5*85sB1)+n3Oh3Q!ym-C78?f#GUK z>xq2=I~kSsu#-uwiR8KlT`}apu0wp`5Bo{M_>*TR{wDGT$cc#o8i29$jRE0r0W`XT zemZ5~K_Pa$QlZBHa|jQ9Lr(?Ny`s}3$J`NnKnxv84G-~uSZ;qwJ~kC%oy;kQoQH1) zA9l#3q76OL87cEHV2=%P8Q~jmAUUQ4KG$H+sgU&;?;f#Q*yo7z5&vTn!WI;e>;oX8 zajH8y@&G#JF|e!0&m>V?iz3hQz`n-+1(Yqz%1Ce}0(#tIaSizutk*N_N&slFUSY%R zdz536^@^B({1B`E#3Wmc%dglw_9f;NanRY|my`VspaSI3M;8CK-w?;BolRaVoT;!T zY)>DF=tv0zu85jk2sy}C&u&*2u2V&m^|a^)GTsYo128`IN!%1s7N$AO8LJ9~pbgY+ z0&*A?Ln|bFV`f~6^nWT%ut(8Gn-ol-KR=OIFcTnuuQKvJ*!Q3JN%1b3^6}fqdky&= z_)R39;J}Cbyr+k_pO09a$an_3xq#HKQz(=9$eak(x`-vC@x=OCBrtw3ms`jItOHhe zP~F*xd%zDyvqTVBmD}RywbvjOL!OJQOP|5!Jm*@m6DBYHy~;!TiYB;EvWeDXeDS^U+PA3Zwczy{k9G9@7( zJBvM%wuOZNhtFH6aIOcx7|(_5)f@4NGqX=%D;m=HNDJFiL4G6IlEtiJU1JV7qbzu* zBI)~Y9H$+|9Zi4s*gw`F_e2)=?T|alpp(gedWPNc@kzoN-o3+G;ozfva=?QhAlpID zywN#-<2I7ZWT?i^6%9U#?c_q|l#{l1i&-Ty?2XY)@K>>q`-(l|xu+zrLdIxf5ZS?v z??C%cC3IRu)zlE*BWMvtAmcW~=~_YkjN9-GWPkmh2co|k(E%(Hm{OYWYnsdlaOp9B zS7Sb|n0s*B|8YNZeKN@#?B@`k2$>g39Ij911aki`>kbTsLK^#!o4?mx?@fgidJ*Sh z{#YWTh#o|KlihSaCnW+X5I$=WD&GW%D#o0j(GH!3tu@{f9fZu+_)Q~mUcs?&tRAw<67Gyc^YB$!lm?INa)p@mnqwk_R|a`S zHkUrwE9&rm2u%FE-(Tl-hrEIVeTRMZ^S;nsIA?gDEN0C{Mbqcii$k|z0bMK@yPa_` zkFlyYfEqYVgt%uXc5I`j%y|mG`7h6Z9T&}%hYEBxgj1a6=N(K`q_fu!^N^r_W)^{BE$Df61=jw#%P+MdT%{kA3+o-}SS9uvQcK0dGr0zXLa7EC8bH zcvq^d?%Ef#T25?`mm^`orpY_|Mh>qcGlOY9%48|rVtzW z*PN8_6NVVi$M*X8oKprHBnRrVTfgm@p$b2~O}8$Y4Cbe1QL^FG)K!e}&{N z@%&(nKVvRnKYIUY^CNEfjph8<7%lW)3q45Yq=z1Ck-zyJxBl%1{KT}0ZJEfN#;qMI zqo^p~BYg;4*G7|uDL@EUe*<`l2CPbX^5Yfqg~*M@$ZfQS0F!cyUN|BXl~>AVa81~3 zaESBYbA`C0B1YmvvBA2VD+12Be+krm!BiLr2WY9R+3WyUQK|I;xP zNbJZz{DYqu9Q{gV z98Yy*W2(7|$+(q-Xxm2Xe+70{zFZBm6jrsVnLcM<=C$%8tBO;edCh9O!$R%@KOUA7tC_vsK;|2o-d8Mx%2pQRZuqe1c zIx2l*k>Ah2$B;byki+}tIs%47Pz+`1FQ(5Z zMJspW2Vk7>ol%mbgcUL~N|fj&BnRmE>d=Urxbc+dcPlCb}$QEOmcSzn3b**?mI6g$Lsfhyo2J|i2TLW(6 zgIy!?RK~i*fBqVLiop(PL=-y4?3Sl)XmIDNH?NFcJ(tj19kB&C)8gEwlBG?n9h;o) zt|h(&Mxc&U2Z&Sxzqrtx! zkLVQsZ3=Lhg>`$LKUlD^XV5)NcvpKeMixU*q38%p{MjLI5&@?~{6PjS(ugb~IpH7s z6Id7nqc`AWkh_DnXyk%pQ5fdH5Vg+z3b~opSer+_WsehZ;3e<@F~6{HOYECK+Ac5S zSBx|Of7%X%LKFAD5nROG`h|;tUpU;~{lR*C<%Li3ny`@(>w%r9+Bd&MT;4)ngsbjB zS%HOz4g7b*u1I_ru``vCN5LCy+8{G5hQt8yrsr`=?gu%H`@!^i81~@*HUex+ ztBcrDPW0)pJ~}24%Y72mg6N%2z#*Z9zs^jee}H*W+&H_Ac(y_q0M!P0fPD$<$$+v2 zo&`QLI;vw_N!L>Yw!jx(LU3~qY*)x0f?*l(L9W+3D3S$(??!@Wt5WE0iH=+;ueu(P z{UTRzP8MdI`|&4?H>gGP-53lYfBH+iPaY0-I;^;lZmi|eyVx@4#~kw@IGxAY1HOvn zf6#_ztAJpK?9`GsKw%MZFaiZN{K6rg!2*+p94=(i;QM=!*Z+hu8{!xP4)c>IBk@Jh zi+h^S;MxOF5AJ2pkCHWH!;W@V6ZaToB}w_dECd zR}3G?0sr!kGU(-DEN~q2O~#<~Du4|emP>Ai*Lt$*;OB`dI8@vaypi4wJ``*Af69By z6*Aw_qL`y1`6Ji4@e6ESz{>!)rTKZW zd9ky>B6tBiYTqNKN!AJRJBWTEu~VWWKNz2X6?Y@HVrqRcpuirJ^NLpx*$B*Mzy~rU ze&%s_*AsFMhA99$lY6l5ZyU#gf5C)x#wRA@2^A@~%;$HAZQ%YTA=B(dMG z&HR&B`}i^@gLnGaO$2i==&@h#o5((JZ}1v`IZC|Imi5HJjHf-N_*ugQ2(o$A8f>&xW4tG_U^9>$PWq=^Pwa$mZntOl z@TY(u02PuWdTUfYnNR|;e^wxAGJXoY?vQlg#KE4a)fP?8yKQtJ%o&y;%2vWhrF!rt zRlr1Xt}&|=zVC>$l=mx(-+UTE(F)9t=#UfrD?VnV-}S{}-3;~}!R!&d#82#c@LhHv z88u)b2HXUZ_3(FyY=_MQS?`!9HxV+$O0ee0I)U#yjhAf7909xqf4&M8t&XT-e}k0C zn=rsl8NJ22CFHy%KEJpn?6srM@-1-Xh&jPWb`W=A8st2adJRHMWbAQm*cJtmiHPU( zTXeq86Bel~n-S5&h~TNG&vYkRqc5KNA9L!+F~qPbK6CQHf{=LMu=g)G_Kz3?a!ew) z!e6%iH=g|m4hc5te{dh-m;UzM$l6VeA?N$a>EQbV*6u5Nue8Y}b4G&UlO z=B3Ok=QV}7Fxyw z=0@y3_$LamZq@~uMVrWXWn(f#9vD{8d+3mt=+L?#OreCW1I&^oXo9txK3h8Dnijlk zbmokg@D&L5RdG1j>%ioxT&qCILfgXqfy@BMa!BIyu&Wg0wTYZST=*v+_Rn#)4bu#A zeURzD;!TN^e}I)v1E$rK(+2USLL>g7&QUxX0Ec|3f@$7kw~ct@_=-iJ5MamYV;;iB z{Ug@Kcg-B^11tCn5qn-Z1D{1 z1hLl;^(a5GZofusj@>h`U4jIy=cwUc)&;f|{-%iKe~b&n*nklx_ZctH42yVVpZg#V zi99j%;lIuk4|Oj@-~G-z4zh>D7CvhZREawF9M(XE7_jFB7mDM(_=&X&zcFzHD@MM( zdcmP@n5zf76b4}X9B*ehtLV9lA4S*v9b3NOGDUfC{Ln^yoZDVo&%;K z-*?aOk-^h>uQ*%8Ch3QK?MK!vDg2I-+l2UoPL7fJ& ze+?|+NekFt=-7hB_z5(f?P(iernf2F*KfYX8ETFI4<7U&!8;D~4jV*4ZVH&H#DHQR zbjdHg^52fZzkc7<7B=N)zy25A$!REwEyVFNCj020pBXs8@fhi=MmB+1WDuny`9Q$b z7-B!~zIf@MT>aN(Bv_tbxp@*h9k5mCf1*(+j!Ijk_PkBT&i-kc+$xsP0Eo-Ow}XfS zH}$vMe9Z?5@OjKUb*77&aTL@*R7sdAsnI`?0v+u740T(GiDP|1vwiB%icG{@4EWI$ z`0O7n#GkkulFI^FIL4EVIl)5w!o+^#D6pI0cR}B>zIosgBh)$XjjNU}4`esSe-V@{ zFi>x>5d^dp^FpRY^~I$^okM+rtl@R=mU`|awkNBLfW|}&xzSKXgbfVxF&`tJwr67Cvwz}&8TQf#Hzc7BjO;tS4`d|#MdVq3^25LN^ibmw%inkT z8FTp8xNeoa^bU4xk{kmrG$4C3e=WmW;p#->>ZOd-51T3aHlu4V_a5NSHh46PW#5xA zY=?P!l+UApgLn7@HuOe7o zvZl$tPw^gLr5x5Zk;Q+k{S`Y$Ge-EI31S|=^IB5lFl~YcL=>t}R}U>Ae^^fF^j&e% zVCx{~pMs4B$}OS}B!}@?;|1b*GHf~G@BupKu}=o;7u02c#3jFS2exNs{LaaL{DK|E zHt1*$**qsg>(JEw%HYbi8fo_I6%gUtm}(Ffn`Di@;tC$A~x_sEz(Kz!JWe> zsG%$tuDkNtv{5xD6phwEe~k+G=#Y2;wuyHaK9K6II~Zdp=liVH)M}70&p9NObYcl#*?(He=_;PL_G`w%#5X4fd8j;Hy|zz&2tZD4}9Dm>Hrs_VCv^OgtE^2K+Vj z2$?U$jtBh=OgK-T5BgzP<0O_y-sg+Yb;$b=dvt$+ok-#`1!9P*%o%8J0{aE~cV~%v z7>wz@sZtI84X#Yg-KbrdAPqJV4wjU9%3J>G}p z>zl!TA+b##YvZTJcFSZNw7sMx__J<>T0Gr?C`a5E@F0?#M}q>yqId{c2lD1Piw9Of zizW5M+$h2O^EG+@#}~hFf!}%kBW8&>>K8*pV!j`|nFk)`e-R3kzqSrA4gF9XNou-C z9pk6&W)9_?h=@fha281Jrl8~k?c0I-2M!&r3=l8m0HE*J9rmTdCd8+J&a)fzLx@f! zKZtq~1gDjIEh<=m3jBKWy}i=tu{GJsh@-OU`ef3Sj*gGT=}5sA5=VNU5Gq3SbrhQ_ zA!9k>oPp^~8}eMkK9YX$wx8JQH+KQO(h_^BvK!VcX`3iMchxliAf+h z0_0~MqvyV+!#5wN<_&o%>_1$R$Pf#C%`<%Mj| zsgXjQf8i5b`^0#D@Wa!@I`uW;p5vMwpMeQ}Eo+ID`rsr3s}?0>e*_@q!#z~ugL1$# zKPCjbGpuJGP-FOMjEL%Z-1#3Q~13^u`Qzy?5^G~zN~Sc!hO;U6_J^0%xjh?eL|c-gkKC_Ct7f=8D#)Go3Lr!gRfb%c||0l$_RF|HBGGIfyPUvQm= zeMR&FObz=nUcglP)I|+;=Z@fbAg_onmXXWYpk5V;bY&wk)Eh$+XsJ!~ZzUF=yu+a1 z;rC)+eCn$(&$x~QJL(sP;f^-#L*DR!e^|wPZjv=Y?)UM}gcFgULq6b(TR}cxz=8Y~ zw?fALr%pATwc_o`BokRjd>q*AyJ*$OF&nOGqYl)Pmc;KyeT8UXDDL z;Pq_l#uU{>rB*HDt8fto274ZT6D$tgDbVlax`TXlF>jM!GX7B^4wg+26cRXeAw(S+ zt2*!rY}Cu4+z!`aVFzxJk0Aag1l+H>IK*N<&qZ*CB}l^930i9~w8dOSk)5CfBK^oi zkn(R~Vv+A_f6Ti}@?m&CU|TSLe=iz&5(+r^@S;03hknbBhcp&)NJI{V%(>yV!}t&N zs968Q_jC5p!wsAm!$d>dXJ2AHFkX^@g!c{z<54Ai0Wg^##; z37w9{#Gkq{iZEz+D-OjI!+M7g{>__WKcR+)YEu zAbMfIb^WT1`(Uho@)upme{NdTM==w@NQ(m+n;M+8(y-2)aka#VWeXk+lht_TipyxfWwMzApeVJuj`5z(_6wd$5K20ODXjcJfvH>SzPEM}6_ z&&+_6;8g~7>DULrexx|0q zctPm@VriW}b_Hv!T6<-Sr;}(qa%no(IxDll{VfQ*9 z{-D~G$DJ@@6ttoiX2sa!i$xf%YBkXWtbxF^o84gtYA_cRe~4XnidrQqObWFeU}Z*a z#SmZm;N3BwUwi=IC}cjdSC|3MkirfjF?HvUnxSvJ1F=gu`UZPw%qutxk-J6=7BPY$ z7Q@iv4KVI-qJVex0LnynO4Nf2%lq|&I%w3R%6yitYU@1YITSP?);R!MNiZk;Ho`>KKzjvfJumQ-8 zpoYsrz4Lona)`~mp`VeP7BVy?!o~x~DxK-ButZ&7&cQz$Lr(^lNb+#3);&gaAA({E z6e4g2#&yCH8Ojid@8Hi4_-4cgfN#cgLyqcy;+s)_e?syLA3uorMICftXAU@K=tXdm zAYvQ<{tX=8%Tpoyeqojoze8M=+^ej4bd{e9|l zhgxl+{NarlWB{qX8J>sKC5kMIa`k1lQe$2}aOg4Dzn9xwjagnquKA0p#(H2O9|dlM z=DpyIe__X-Mdld(C41R>EFX3zn|MC*XsDkBmTT-EaiKl))cLN4-idwQX^_AzB#%R_A9BkO;~;gDY)IEg-XJ>CO( zP~gXY*WCeggM7qqu8twMhUTZ`@W4pZ;BM;TFr_qf?a{I3K;#z!k7Z=D9J!|ib{9n1nOB^BYE2L? zf$UTe7b5i(^kd^2AsCb|icKK~gt$;6@DcVs16yL#3J29;&A4UQ9Da@9 zr)lXqw-NUjfMEmfcq){{4!9GM1FZZLe~hif4nU*RFTNe)^~JJ(;yJ`$!`uT$_cKnv z#^!@h{)`=D>klU2TjPMW5B@BU*ij#uJj4<|eiREFJNC6FUo8}Up{@w=D&!JkFmIC3M;%g8tFnLfP-eN!gQD^ph&3I%y$bP~)G6M|WV|yuyp+ z(r*Pyd1@W7kdQ;9b{^G6^2QQe$Nun(7zu(g1cwbl^Cy1)Kf|NImO$gFe<40keD@E( zzy^d)mVg2)ipW#K88!Ifkd@FQ*xL>>;KVo39cxBLEvh`?x4_u_%j^BgD+8ku`e5*{ zk$)GtFW!8x$Bf;#_G$+_+648MumaFkFs(YtCL9ziNoG)clY=${`U7p}kQ*t1RYEWu zbcX|9hfcGTM%)hdY6Fjme>dbEAx}xoC5fpIc0A#e8}h*)F0<^NG-$Gpy8i}qLh_@Q zqVd3Gf_$HyvegK6JQ#1paNu`KR{a)z>ytOM(Pg($%i(@#Awuj;tZE_`{{8!lws@$hr}NC@(%lRix@5HLzNlv?{N(f!mn=F(?~E2f153G%YXCx|L^eQ z{r{ax{$Jmzguz#a*gCE`P*4t+5XOu|L%v|0qHBQP$P_@Opol}F;T-~+1e+cE9dtJp z1A=D6$0-!GgZ&xtX**<)Uqn8|V8sL`(HX*l9LI+>acc0{AfFN2M$`$!?cdMfHo*NL z8!bQ;@L8Zf-yg1rf7*O}fRXuRZZ$wq+5!FocWH3^x`JJ#J2=ky#Qx0-)Oq2!OmQ#; z0`h%4Kb%sMPsjaJ8lTMvgcLarULK5Lf_f8tpJs52Dg?Y+sTn>*i~{eAOl;ENdYS+s z4|XQppC{K>WBC8De;olh3n3kk1oHep?~^h->NPA@C-E%se=CZ>Wx&>l-gLoRLEaY} zjX16{n-$__f{x%Di{=X;H6))6gdG4aCC70+z&S9@or)E|AnTU`nEGek|HGdX zi9bX!BG-c);Ub@V@i%8Cjf<&!wf z0Rq+WV-)r8!M%m<-Q+v#(}1Nt40ycHP8tYg$;F88H9Q3VA0mr*8H*Vn1|SZcArPD| z@^5RxC%S9c1tZf;#c&ldu3ik}bslpq;iW^}<;+6O<~-pq&E|wi0ADqm3$s9VA>LL& zr~pj6f5ZJzzlQaTk^u`f1Bk%g3Bm>G3&T=o48k~_rH9P3;frPyGIMnPNH&Np7(f7Q zDp+u2R^u`L(BpA79?4^RI|eAPAHP7~HD1idxA*D=tnd0Vp6sVqgkW+FEzGc~)8%7+57 zf7F2&15FE<)TZNMn}dcO=c{-Itpm>?BG@daAT0v$Vi_Dj^JJ0lDR>sEt{DSfiq8j} zkt&^pEEyu`Kj+$Y5&?11APL8E(Z0@5hv}JONCgdB_#y7etA6POlLX%t4q1ukM5G1{ zNE%B-oKFWCotUIG+WQAUkHL51?Ev2oe;kY>i|+&bHeAn3;OP(FGluW+xPLN?b9NE% zE;lQX7;?R2xE^y$qz>Ie(YFN802=vc68L3==sTw#;IzPEWkeOM&Mf8*-^F!#!8kZe z<}`eto$KKNb!5^8zc=$2woX`#N54`lr2M=vcWn-jQ)? zGR|1zR*A|@G0PAslB@)bz{8v|*l&pZg<$bJkar6lGOuhH`4^0-m!oOzf6p_R|Nfdz zqmt`DlKy3k$oh56)D|4PJA>z?hy9Oz)5GtDTjb+8goDzFk(LQKzN)b|!6F4C*0Y`f z`QrUZ#DQyg%EVle=f=;S$>G`-_-clAh-+Hk^<(QeE=cMH-^&${7-9`-$`n~$kcuk` zIt}aaHYg>>xeh4Oy+9X)e@L(+RcFk|F=_nX#qaGvcw*i^$0R%-j=SIEX!;7&ZejX3 z?g)@8sH@;=7V8=)XtK^!Pix^zl-2`Dip)psQ3!_h(BXY`=q{`kGJY0HJ-{xIV~wC4 zy?}8=Wc2rEvVJkA_}*|0rTqM!tp7ESrm1-UBauV@ymoLeU^KAre=L+};`?N71U3l* zvJP=?$aHWDk-5b@GUyyla><|dD`Ki}9E5&$i0nshWh}HCcuXFQiFw5yLPkZ8QT&N9 zfO&^q14`&&Ts_Pyt}nH4QXaJed_gzNNkGO+bZX3JjQK^s=5E?Zq<$;)9ylK1IR80b z;kflV-gPZ&565F1f0sVTDIAZ7<8s!u96KC8k~kAPM7HOJ&qty^ z$i78@3u^BNRT?6S6jxlGq1kL&k$Ad+GxHAYmQ?X#(dCf7>1NT4-9R12f9KnhY&s zm6BXb!22PSf%iif=@uRqdA}U*2aA!sAM}Z3498Umzj9AbjnJE>9905kWL;d$>}5o>L$Ms@D&)*`!`8$67*PGc z)}Td8fGQ%tut#O@9o$ZW582*8cV@%6r~!xEc>Gt-H$hBl!d@WnZ4PVwd%oxY&3j-h z9JF8i+xPq&hgmuFH-OD4z{jS+#s$1jm16v-kew?Z###q%MM zl5EL1|64tQ=MhXYS75D8FgFE}p~FGnA(JjzR>Zm8L0*d~Y^gyPha z5Wg3Ye~!@_jxrt!qRhnh!1&pLj(SQ&zNMuhsEM}^Jay2UTtRd*zE8zD>70SFK|6ZL zo`1E*h;GIj``9ryno?WX_dXv6ASZzxfiXP{pGBh60_??qG4>8RD-fF_!@b<(fIXmD z6t15Ic$V*T1Z4e2#9sUtbAW8(@EjZ1ZA7-if2LqOBCm;E@h_em=W*zDH`qCBf&1~~ ze_}fU_kum}=kugQ7Oi04GU9{T5!pia9qWXN{xhFF0z?M{n*TVbW-7>TM9_pVhpxc0 zVSiywZ8K~bFe8c3p|DL$qE|k11$zMCGCjjug{?_`|N9sjTMwVDU+1zI8`kmFk6t3& ze|HHJPQl>uU0Z*@&y5$!zHgNZvSE<3eO! z?Sy9uoCR962p*BI7Hq=}ICTHvTuYh|f7|k}=h~31o9?BN_Zjv}MaJeod!HB9nt_NL zk@2kf9mCK5`{%*bb#%(VkqE5Me4+_bURA*Ur`NB`U(&$q#R zqKUlLfTIuR!Y)R90yUCPFmaHdf7#E^uono;_VBYpe*XU6_?+L7pJ6kQ^M{|k&-ug8 z;phC}XV?t`Ow%Cu4s}pF5FdlQf8caIi2+!{dC(6ezw$ZnFq{XyL(bcAI1lgsIS=nf zxNPAY;k*;hLtgKDUe{raSdsjUXa;^3KR;ujaL>=r8TtA9`_K6b`8oQWfB*UUbN=V& z&-tI9JMtWOce0PAguE9i;U~`lI|$DK{Y=i=qbDo$-uFD{F!KK1a9;g+e{neP@Ogjs za~}P9|Ic}EvWB)?{abFxm>YzM(2)==BEs8=2SwIDg54r+eC* zh)wDKI(Lb4+27|jtQNcPz9PDRYQu5q9Y`kGyIQet_e|JovGH~r*=Vs)& z-CyUzcgA!4Iv4W{=`;A9*z?j7lpu*IlGqHRrI7Xc0r?}r=~)m|;JhK&`Cs@0 za329x;eX(^@Ge6xuP40Ygg=;Jk0g!>GuR|~J>Xct4~_@jf3E1yac$s{c_y+k9`6vB z9lrnlQ!wCPC2?020g_XH;<3YLn8Ap%{yvVV=|GbI^BGh;4JqRN`79uW%HlbHJ|lh) ztm5y_NOAq;xXE0Q5BYt4@CjHv@*O;bP&+Ez<_x-rAzpta;8xVtBKm`M&>(L~DsRDX zPQJMzypZ70f5gaO?UI>>(s|_c?q<76W;h z{~PxeNYeU``;s^{@)kHB@#Ggd53qaOM*xJ5Ez#JSIgz{uK7#?NBz#4PW4q)t?%$K= zi!6AN5qvg(ug@2JkN4y=o^N=6;D%mgoZz*gTZ4sh#C>tU<8a><2!P1-=wIV|ObFW@ zx;vAQe?CumGXI~ggU|d9?=w7C^nEV87v=?@$++OXF^))4p^M!2Gsbhc{+3~9$j>mq z3FG%eo&e9CkWwsEMHBurq^9s(QGqG@f6lOOP@KpBqCwV)h3k@W`me5&0oPoS z^^Qa>o{QtAQ@IJ%!@&E9>3Hdy{C&n3{hj|(H z1J*m{5#Uq6AiHnIF90XCGldp=B=VGlJHZGqSHB z7mx+4Nf7~3vk1YU7bf%)xjFO`$;*?6K(SVW{fCEv9^H`l$3ub70Uj4TB+@4je;vv} z9hxu!DW4~KNTPQok{?Ce1++t%B|JFB4CHbf3!SGg5*l#GrZU-Q>Hg`Ub~?d_ZCfY?!*U+L0KPw zCGjL2o3Q=<=bDrNn}+-$^;_dH=Iwv|-KF&Tc%7cz?estW&;RHDMmm)KFXw%H`aixu zU9z(K4{h(BcK^Gxzt+y_k)8kBaIAiI<>^1Je-Z!3U)!FN zUZ~Tb#-~SWEkS^O652o;F$^Kq#%3o>%Y;zzlR*CaB?=0IBtdmmasf&S;G&nBbS+Ja zISPh0UmPtWzm8c7i-e^OL}`UiT2!MbJQ+v#g&4iNK!N3bqnelTc>R1iLPzmyYqr0I z$INmLdgxye<}~T)^|?6Te?n!#q9ga)lN(2Uh+$QkQhVMR>xtH%G#xzf&iQbCWjBYS zk5TaKB}aF1P`_}e^`oz>wSM4@8~O+W;LF$R_i8+jxk>Xv&tn0Rvh1$#p|=KfV2=I^ zWimNBOV>U{ho1nPyKYI9$P!zPQ5jn-ObU4-;EL`V1X^^?l`-mVJE5rB=+&=j0!r?S@0=zF=V}a|(`kf8^}~#BWPCE)XEDlo9QQTM2cr{abA} zSFvoIcHX>#$KuAJlDw(hrKPp0^UhNUm90XxnyYOuytCy4LkixqvGVPUTBY3nRq5f% zIB2)aqMD!8vQV|Nb~(81L@7_>^WCa~!^m9~3+3&VJ-=6jCYn9ado8J+b{^66{Vs!w zf9KNXRlf6kfV5X$rapA9cox$;dLHcnR!6*GzH#(L7OU0LK7i8PpFO*ntK3yOny*jK zNsHEZN)5z2Cn+dTob%G5uxlSde_1iBeLYE!^Kj9+9G30z(TS^FhONFrJAq63%)`s( zF?+b8eSANY+)s~Bn%;1)cQoG}&F(3pe+-JHM(XnLo;)sPKx+@}@z7pctE1i5`@{)z z@%i$%R#+THMi6b4-F31Wv7vVK?`WD{Kkl?5c(VhSF4nX3Ap+?VZas&KXDbLXfvdOZ zg*`8GR-vDk;Ud~s=6ewcdN^V?jUf81MN ze%H_Sila4cVzZcnUMkah{A|jKUCwBIZA|lOIz5#)AT4HDtlucx?H1a4GG7X4kNprT zT7+Df9Nu>cwcDGvac@QHbRc@uGJBiZR;5y4iB>N6o6t{lnZ%M^%mN?cZa23 z&t}U27Sm+x&{vlkKkgQFo?egW;3M?cbH<<5x1y}0o7(D^TVKxs1UmW)R+yx3i}STx zmvZ1~a}MK)+D0?I7S;8OW8K%J8p=8f45OehS{!f4_f7mt`?$Zo7wO zD)x1?oF1Ny^_)ukM5AeE{u;UHdT))6OrEu`x_o)lNL=^qLPe+f>;AOTEnGmFh<9H{ z-PEQ&pSR~(yk_2KZ5;s?I6JZHQ&!(YnU){LBAxO3c83CC`oKER%l--?Wp7@gv)06z z$S1t7bX#(UOL?WVf6!dJ%cT5T_w9H#=Xya8O-mjv*~jFerUxyd0zkfGbtT(CQEXS& z@O^~l$i3S>U&Q+t7!T;)dez467R>cNGP^RL$jm8489)hI1^#Yj^BFsDkEo5l9ZFlV z_sgWWaG7;Iu}g(JS{kq9Xq=%urK!|gcg57{8h%?fJGLOFdmr8tjADennJeZkXO|-eD2=)4f_X&`dx;bJj)lzBr-M*2I zrWfS==~dNH#LupIp@bK-h+`hYN(W(?I6pi9F`rDEH>75Nu`{;bdQH7Gn(37m6@Yw` zZ5&%?F19%Lf7-&?}$1&+k-~P3SS#Kf>p2Wl#t85olV(w%K}KtV6!< zDEj;y&knkA)xyQ(STfpne?02z^kBoudPKhKTF={J;~sVW9^6OtUYwWdDXypM$S ztEIcSf490W-dowxZB4#=X(vVI+ole71&Nx9k#OCY6TLX@sON6-2%fNn3#VMPeQ(5L zM&ox+F+`B4DGO84ED*o+(w*AR^Mkk=iLO~3S^sf?q4W$LaB6RoQ}CD;LHLX^{=RE> z@&(P5t+bscQu%Jzy`Br@J-Y2?+M;~1=uAEFe{E|vGo}m6j+)hEe@E7a4>#uBV&jQ3 zYo{4fGQJAxqUuE7go!!7cTeGr*!A&s5#AQFJ-X<>Ys6=vMO&cT_;PV;D(o6-G;Y_k zeGma%TRUA&pG0p~TQ7N&!f@-alTs<=j`yZ_pi|nKW%TPOWRMJ+m$`=*GWAUC~r#dCgJSGMz>pU z=xIQoPlsFeT18S?sM!Hs%9HJWsZ{R9>6>6|s*f%hhtn5j>~n+OYkVxepYlVa#v#QO z-s~;ygc}ydE)&C^FS|&0FSnztw!G^*f3mfCznqJY(qDe_s7~*m4mQ+d5_!|Sl}%LtxCZ*SYcQ6l^dgLw^>nHCW zcJ*{InQP4ffV_UTHLNa49I3wpTqR4}3oHA|_#sKp2ll=u&=>b|miZH98sp9Se{njj zSM>9ljUwb0OYS8VuIfgMF1DoN1f+3xcMef6$JID2VwKI`;+aet#R zE!eeadrKknR)}XauDOeLT`smEysSj(<4s9t(}SGcd;`aYFnj0O=((He*75#6OegciIgbRl zzjDlTBS|lTc1P`O((19=TuzgGvDh6ls=Xwr+8iyHY}-EWfmmTLJEnhiOgp=+-^C+P zq?5h7#LE0tt-BRHS0@Xmf7RB&KWnwzQZJ)@m+MW>dmsWZxwCvh z8JS0l@fBNXXEq|@E3H(*A z4d%(2ncfNfgi-I7rl9YLs@lm-J62QH2|5Qx56yGtF_ zot?MGE9wn*03?s)e^dIb!o4>4?|?K@HKq395^XHu-i6iNnkAWeQEx)6AM*n2&};fxHxs>5XjAm=nKwdDd$5R` z6aFR1k$Ss$yVr4N7=}|_N2`e+qL;fh(r=U7aXl{PQI(q&e`T!;->jP(;vC=IlA^p? z`qiDEAmwmA*^cyhy`N@s0itboL_hE~N||}iV%kp1!L zZFbWWujqntf9kCKAw8+o8{d}K<)&TlXG(M_Qo8ZcMr!2Ujh+8yRjPL{O#IUG+`hvcDKbqc~N3+f0I9lvf3VgTQEq8c=18A3W=GD8Q@nhlioj<*9Wlt-F z_jOIbl_&GgH~a>TTDTp8C(La=FE3uj`>*46_pZ#7f5K*-sDsOT4h_{XRlJB?|9S!pj`PaPPA>Y=d32X9 zDb3l%NKUnsPb5d+i7B+qKs;*s1HKMibFTq*_ou6{PpI6B)-?F3obdNTT$8sOpCKQwx7w*}bsCHd( zd9aX+>Fs{q$nCkGj1j0oI3~-wRqUY$%w(fB(_lZk`||2F^%bi2F3XK3Zi{Mpe4?^v ze`FW?$r)@t05L$$zbpcJwsaibOT~);6o@XXVlsb~M!LS5)#Z4*hGD$5_qTUo_T2it z@vftTtIam9wK^u(s!5n@v*VoW?~nuh-loTULTHYt)Ke)XV%5W z=~(r%$8OHL=OFN`G~3eBR7VZ{Vn0*q_ip-XM}O>q|4CDA*mC5BNVjs#sG%NG&`x&!akX&r>Qx9v0L%`%g&*#bwBs!#dL*!z!%Rk zjiSw0tzci=W2f%X!gxJeT>Yc9Ty_tQy?@-OS@d39CP}UIshOs)c9NG}P|Z%D|J1M2 zJej^;yU3qauYAdSQ^fU8PU+pZa$OpVYfbbx_ z;SnG}27&ncEb4QvefGZR)fDs52_RVpwe97ciG$}Oxyw+YYkzlC zjZUgxb}0i1B5qEi)VP1uc3^2w9@csKIx2up#<4SUvM$i!eRwTEIpu4;eF)FH+hl0# zW7%qDae6`~@Qy)B0!zsIS$clDMJAM62t&{KUV%IAcvrW*TAj>Fq24W`d(R`zyDxdb zpDAs>b6n#o>N_jg-|jnlH~_7Clz)hs5t$Kg1YX(&P5Aw&YyRA9qmQcp&Dds#UM7wO4HO_j2!{52 zLhThwvy*XEIuZ^p_qg0I55{a^$-|J}@jEfTakJM`Z9?|x#7R^4iY{f#zkim4Q4X(Q zOV2GxyPv)$_46wX&+lZK80T!f4-c}`Uq&eh5+#+MYme?(cn|%WGUayXmGb70Zvf5P zF-0do?-SAVE=wMt#$B&N=jLV3M``6jeWT4}kZpH49!y?-xV8X<}XoLvM337Xx<-&P!DG77c7-w#pmZ9BJwj!c09 zBi{^ze5W?e^RqBH@x17R5RKuX>jlLx3)31aHZH!g%@B;|K6PWPr}(oOo5sMCFK3>) z8Xx0GJ(j57my5#@+BHyf&}Q?(d+P>Ums{~E6fSGaW~X%1(@GB>8GkjazS*sH{Cq$- zV!iE{XUK%muBETc71WK#!Szo~xwFDY3%2%6+2qm^66z|xXzu-birq+*W^Phrdi&_G z>n|kIn5kVI#Zc7T+7ELF`wK31_o2G8i;>+`oF2U+nlsx=L?5r|y|LQ;Bp!NUx~po| zugQC(V_tB}iQs;<4}WH{u#V)pu$)=7WJUj(CXwphis zeS3zU*fq&a`*30JF4}zZZC-@X#+R6Pi6+UhN#7XV9&g;6pnuKJ#!L+)B*EdBm5u_z)A?B z8lRZq^(d1Vk+U_1erHUq*h~_BNN9!IQL287{G9FIx5|GE?HlI#)>2GcvG8JX?~xZd zaBHZlEFxD%1Ak(TYZ|BGvHw)>Wb8|y@(RhIs24#&3>Iln811I-XP$fm8H_Ab7w2$J z7m3;L4dL>5>YlmH_h}N`3ywRyOm)9GEw1!^qpKV=g)@}|8^P5m2;H>xxcuOzz|g6K zdYDq^6g>U-5E}32T$+{-3wG-8)H03SZI6J*>HXgI#($f}-97OmUnN@1DQ3#IMY5Kb zC?2VrHj?$*iS!g*)>LSifF$i++<>Kvh6frl=~kEJyL)^RS@hL5&IJUj!=YRYTNxF< z)=v8RFb(3vw%U{ZMtf`Vu7+Ch4<@9J{`quxy&gx9JvS-lGtF6-bM6><3s|O2pe@5#`)Zr6A8A>y_dpZ!`0V#IjI5fgQ(Vq7?2Rcu-9(;m z?UTMKAhW`rY3>2>v(sUSg+}LEW_X2d&p}PCb${S#UC5^H6F15bTitMB0P%W$>9%Q? zwJbU!Y3ck*t83@07QHGb2l0sM74rKzL@qmxlD-V7;*IGPz3!59M8D*?YvWC0NMyy{ z?KvTsLhR(i;vXz5EDybCZNE<=Ba2X!3#?*IfR^PQX^ zdt_Bi(=e4a9n!MMsWG}I+GX=Q>2mScb$^4{;O2R8jnKb5KmF6I?-<*RJH9J7c5H?C z4pINK_5+TDA-=#OVzn9irX`1FjCotECv zk->SgY1u?%a0N8%NcTxg>^>1MY9YJQ8(L>m9-`j#0CZ3SKcz2o>z%VnP5E^kRDZOj zhKADU;Tef>|2g){qIlQxYgczo_4kX*=IJOTN_-gC$vd5#FYN7u*1xym;b z`G&f8y(1cr8L0I=YzaahmgitO)mP)embmwTIn2*Za-E;hcL`>WbQZg_< z1GyXeXvZPjFlI{X^uyDGI^*2iHE&q+l9rG#EP0j*sygXWp(}AL5n=9iY0Z~Z;;$K)msB+U%Xj=8Tsb+q=~o89c9GR`P# zKxzG@H7*x2ZTscvr*&syHAv79d1yJXHtAU<;d`w$8*dALvhG9Xp{q>ZbjV(XPKK%z zX{TMiPbHk6Y1r1re$Ls2YrZaL5H?Z3;VlMZudkCY4}aDIz212=SvFlv z&OLe;Oeni76!(w&_nn`MArn1vOCIW$-GH^5^!;i*-;JwWZ~fc2zO6UB+bXZ%@PzdK zF>z#x^yjp{)s=bi4mEN4Kms>K9BG)deoJ;6@ys;b!*V7r0@E31cr?AU*Vou)=T9Hr zMJ_!+;BPa(#$kA0w150bWwyELDS8)@N-*~J<57>|`7zCK?Xyiw(ZX5rom*Nxd$je19y(WBy*xJPsRD z^rZ8;XE(NUE)VA6w|sZa&!6GJoB^^801e(uBh6BUZJjbnYDs;pJ~*4!!`^j@_RTj^TS<*VT$ z62?7?D)6gvB|_DtHUGrq$?k@2KfjFTCNkYpx1EdG&o1J?7ir$#AX$n7_yww}0Iz4? zpb+_~s((GC*PyA_ZHVObjM#I#+EfN!!EAQo{aqi3F*i-(qCmLewVd7q z*BAZEq+d^C4xAEJpmgXP!4rEK!u?0LI!!h%pua1RWqK>B+9YKCuuV^?f&<|OG1#9- z9qD4OzAHF4!RJsDQ_hmb)AS@x^kc~E@7Iw`gRl-hpiz5q^+5Bc9_Lb)RkAU&c;4n~ z9)GFH+k@zC?61L>h=6n)%=|R%UR}1{ZGE345LM`W=4M`xF&dLxoIoAJIIC4cOGQw?PJLsMqho`%b%rZ<>1M)mW}Rdct# z$AvhzVs`W%c-8U4E8~_)?1cpSoyHZp9-E5+JIX?8}X^#6c7s$`6FGQvJj=DHb;%3``sPT4}U`@pEtyGl{OBO_2`A*?rdO=L}lA2=e1$hkRGfH ztv$^|p^Ne1E)5P%RGuup4Ro@QdNyr?1&bl@6~Bkl4xKz@#HjoFPVHkQL6Uv{2;M5K z=|$u)3%^SY!cVM2qfBm|9Y?;UIu)DRn)%rEhASXjweLY-umOgNPT*SR7Y+WQxp4Os!MbB)f z@3`UlIQ90*>`0ql(($&h z?V?TFz-m80OQhz>UMO3yo8#X0UVZvlg|8Gbd*l11Lf1{_*7Oz6l|rZe7$@_f)7=(* zt!P%=`Bd_!nqH0P`;>HW&p(frc~A}7@VW=tQ>P|uZhyAd6`&Akw{Use zxBxwzj%F?8uVZ%clJDLigi%rFo0%@L;$vzG-B$L%UB>1^$g!jl?Sn8*x{fxR4ciq7 zuUyKz$8dZD5&epHtn16==#M8dwojL0l1<4!p6+MoxchG07t|yzL4TU`3Qp^Lagv}4 zpeTOl&u8mRL;1rGQ-A5y`t7{)hq=#R^mY3(5Ow3%c8RCi%3TaHPIM|#hW4hT)K<3` z8dBS07AR#l0G*vzG;E@9eQ&{hPAB!19@QI>kdfOTuFfdV0dEK3-q zJtUUmE2H`?u&XR{6hCUle%f*ftDvvTPC4w~VXv*OFM9j!c z`c`nKW;$!`I%{*?hHbYMz{nhfsntapW)?iU_b#}ZuzwAZq=T;p_m&(b=!RSef(=|6 z^=wd25+`4|y!xn`z6jmxRc=s`Rjn|>8@rl@{ZL4%DwU!2J-i>Z8Wd`yJ?1M7?40-} zt}#EDV)^p6=l9mjG=PY&fXdJ0&R4^_pHls`eMsi;YIHy{%t;7?Z5l z`N2KS_J8{*pSq|(ij)}`laZBaAAi1AQh$H0dU^I=Cvis4pImE_ff9@LzC*ciA4e>X zos^&qvtrLrqN6U|ZljC~Yz<&(q`~In&F9bSWsdgIwRa1BrT`_QySg;Y@w9`>h4ZNG zU7Vz#sps{f$?WBA4~D~08`lz+Mjtle#wOA%kbl+U`WTGvHQ;1;v{Em$H1%<#56)be z`6$r`6PO;}Q({Ze$wJA$j$NO*)ftX#cfmJ#ZGbVf#9NZ=$)hSR z*_)O=B{J3hWAj;cQdmQDME35^dgo1b@8DPSHJK9}5nS=%9ih(Z_b5XaT{FK^vqOgYJzATbG~>Ksj& z3A`npaOo(jZFkSy-isE%Ixu_|r8g5t-2nTSJwyb1yBwc)3tmk}pm_Xm$e%aF$A7VJ zdb`zNfTHyqYjF&Pam$Y04vH8Vn|(r!g8|&Ew>ttyUhk&Qx>1TOniy~K^@nGq$Lu#R z(bLQ2!f5pOC11zPm-7%E@JKuCM-;1U*!lIVjHM_+wpqV3AlTNa-2XQN&_qR%Y zRCKyL7xzlHUnn*Qw=NELr;Chjixy1xKA)fQmbyJT|9R{@K7Va0<>eaVWZ#7SzFhaU zSyRI(@?VZ(#_1K))d`#n)+>OVmGU_*bzgbEQkd+mlbe9}Pi$NQnu(jt&+^V#X$e9N{y zb!cLqFmpA{mFq(-giVIp+9V;2k{vENmr-jhgX~@(Z@qIjKSU!G-GcLx);$QS z)8m68Iw(8Y`}`F;)6jb4bqoXhF+QaIm0gbuAzH);JAf-;#CCr(7k>yzxA|l3nQw2V z?u)t|5Aon4ROT=2E?BNy?koeIRU;=|rbjE~>+}4Wjqv7DWoy;40ntMEgR4Af7S;}qEoDk4djgP`wocfx+vL5P4f7Yb~X8tc9!-GUBu94HH>rfwm*Jq`{7-X+iEuyqs_sp zZu@?G5RJ5GvLO36+L?{A)rugGI3J?-Q z6YOriFY%8{1{2DkkmgUU!U2Lu*&Q$2D_l=OEcX6hi0~|e`2WJPZ2}- z6Hnclmm9;CYF=l5b`L*M)Bh+4|1m0^>t@+se}9D^120P8$%B?+K)^T3hbu%Ge_E`6 ziqP=+!GHfYmlr2)q8k4Oj+^NH{{5#TjlVl82T9LFrK#d44*)$*@Zf$ILts9ytBS3i zZY-Eq!9MR_@nXE_;qV?| z9>OyY1~juRQ9_Fp>TX(geX8=knAgTDzSVB?y|hGNVLfljOMtGwUw15>GtXHtmfd?t zaMS(T)|u|Rr4qcq?!W*Gcbwi~$mI-UAC@>p;5}U)H+y)q0P!sfhN{%~xYCc)m~Re! z@PE3Yarg>I0igMBPyn(;)uT4*-gV8A|su`W^HQTBG{x&;Hw%?8HqAK@kK8G#MuC;Aw-gTIY;-sWvjA&#;0$W~#3 zA+`6m{8T(Nta2c)ul#+4vx8M}PFH~E0e{Ml{JN*A_b9&UZHoO5fW^d#+Y@^>##erC z?qo+4uzzRDgV^S1r^0~aAlthEb=-P}G*Y~6(*@F{8}=AOrnGu?0Nu3fC%7Yf7=^nM zZg?0|;qTI2&sKN$0f5UN`qVc41>HjWNocLU?8CD6i<1@8pzsareM0r3eq8ftmw#}r zf{VuS=%e7R<*vVVEPVb6rSCCo+`$1!jihPFmA9BZ;l4`PaF8V8Cr9|}EKONz0A@2Q z%=Ekh5oOu4K3ce$(fZKHfNvju8M@RfJG>q^KApvq&G%KcXBa<1OFn*E^KLzm(lz_s zo5<8Qulh<3QEv0}_7Uy3OYzz4)qhgjnXZ3rqVv=gFq?fm2tO42{JYyj6>vrJoOdJV ztfvfcZF6RIJGz9>(5ET?af!R0aRtwL~T~D z-Pgj9KI+!n0ml9?QID)>oqx)^A`n+N?JpQ4;})gQ?vv&FUge^~+@FtyNaaoAr*E>l za(ur5el3X}>gI*>`)O{@Gcf%YeH+hyW@r)iwtYeq>3{y9s?zG$a3zhS zk&k@HoX6V$0~!IWYo)?jR9ma}xeEKN7XtUkB*LFS;1e^(A31%DXI%fD+jgW@UOPG&jLrz!VIPFCl$w+WHTJKyCsyx_VQfec5IO2%SK*T#+4)h&~Z?E|z3}E4RD}U9SD@X-F8|n75QBDS6AJ84= zz;k<;aQL$x_UV-Gcg=231sHa@4R_K$`(no6mhL|QpxvhX`F?pZ=F@M^`*|aup5I~| zGy6eA&qwc3Zj|SRO>!@CJr>iGStjjcv^p2ZIl(4`@xUo#Y&TOsF>QAf;V;F@``baF zK0HN1x_^K33909PwnlW6)a`by5A6H$iPP_%Jtz=JI2cBiCi|mNu4KOwbx{a`BWqqu z@A`m%;9V;{>GM+-LA5nS-*@dbMmfl-k-NXVV5V`f0NA5{aK={M?Bj8(A7$?hr*BN1 z?JqyGfj@vUfP~d6>({z5>`HuPS^mYOGM}=;D1QwbjB1~5j>f>Xy`yz~_ z)PCGrY6fd)af}boR^`s-wXw$gf-}Z%zEHFUP0u)uscLjalODe3C`=JIioI^71zm;K zGt-K6vwwsC3+U=XD95~Wf!v@-TD5MN5j5FMsL{SWuX4|Rit9I~^NlO(668UkUt9p< zaeqIxoKd>8XaZhdnX0*PL@2d*N$~PQ8mt=KzHNA2yxa6=_`F78iLri=cto0TQD61# zV~$n!Xf_$TEKarCl?F%5Y$P^i0pAUcTQzYQOQ%e3uj55>>Ik zoZS^JhF=rfa^gu(&+m%f6N6V26I^p8daqyY?>b$30kW=IE(IqG3KcK%wVmMQaM+9i z=CkI*{;-nlf&-F7qlg;doiw3j(v2EV>1l&9avpntZPb~G>V5QmqS2=B4{MPbg?}R* zT5U~9<-!hnxF5bzApFSqj~OYqu(m7^2f;WsC-24Hr^S9JXwF`yr zK~j*n(jCB&Nn03B=@tndTvtc)pjdExvqgFtws(j7Y{|X3V0dsU_pb-UF4fR<+U~U6 zN@e(B$18-gstRYHopG1hYlYU?dVeXXaLt(8oe=6nN!R49YAgw^GE+qzK*Y*1^I>%^ zH_}s)lAk8nW0-z@Jk`SfM$xJS$s;_xF4xU{{4{pwyo%(ZijNYC67jweE$<5b{Frzo z77GVBt!((Y$gJ>2ZjtpIZ&49c)(uD)ctNGDbh|jWZ+_^XM&2RTytlNkP=Alq+noB_ zr>@Jn+wJLanoc0Bsvy>W+6~j(@71QH^(U$mqs$d{x?=*lipaF~XzR4Vel6R}z*4dW zB4U7Y_9fA-bVAS6cxw6fY7ZBgeA#_>&gPkll1TyTCt>o#{qV_n7KTA={?(op$OkSm zQ4ca9wgg#dinhrM+#dy|Y^Flm80tL!V=7ef1%`qPbK4}#IU z&{SX<1x5VPDti^`hhRnT^J-_(~6+c^Ds;p3pjDj(?l;`c(lW-?i}3 zYcL+SV=nawX?*3UW(Xg7Lda`ves}Ryg#ad{(?=OPE+wijf11~|MooS%0tW&p>f$QW zSG0!^TzBWWY|LWqn5!vS%7}$Vbr1o(gva^A-K=Yc5_$XG9M|KcnpfDLte&hV$P?Xn zDk?t-FI8r(n{jRkZhz>dtzAdYbHhT-O@7Ix-@0jzx?^WG%+oDhYf4a;KUEY48#ox{ z@B4RkOWXOB-~9uYNKlt}Ir(j%wH@+&2o1!Vr=L`Ae#44l@r>BDf{x+h5{(tkrPr5Jv1kB5$#Oe5+_R>S&?YfY_i%UEP)RwSDt`_^@%`TO_vEI17Mzus zGHtQ4zHLo8PJc(=b)3FZIZs$j4_hmp#nrhef0^6nPRoU#G9SUX6%mZ&48?zI;TU$R zl|Poj=lq{9T{CshBqlHHr`9;=P#jY$_NMqmaYC({jPdan?)9^959`~ z>wCVQpig?Et%NlQWZulk%+vJ(szLL%0^uezl(D6Ug!E6-^-OhpB8UEBK12h08P~J% ziS7EiC4Zv#u}NH?&Eu?`ihKPf@>Y}w>Xq{}d46GFEJE|5{*)+iAKFr?f%M(O0;+Al z{kPfQ)cfaCAml9;eXjM@xXc-<`9<$X;u;mgdiW~M$0GT=2n#nZ-*bude2#&uv-)Mw z=?|ZOa{IX~JJ7~lY&IX9JUa;XYZrw&U!aM33?XceV*%>N@5|&{zpEtJOi(fWZz_m4fN5`*nrgtB`d`Sc@;_|V^CL(%yG{i7H zF84M#?M|S*$mwP%CSrGG6JzR_yvfPzejRev+9i$w(ik2C=0{pwF@;;Uz-NvSTx2J@9KC5z^$N z18#+jdt$u1LKpZx$B!FsvfkAHC` ziHvst$T0s~UvQC0H_dgb7T62YDFDkS=#})YI!0*UUn1kohGSuzx9`g}GE+fE<<}bB zmIS>sI*!b{b>5{Bf1y#+b$4UW7+Z?eTNVx5i{?e!cY1v|1yz#!_e6ajnIuUF58wz9 zmc}LOyVt65JM$ZkdL1O)va1hZO@9aewG*G$#@Gz!^Y^p=EINBLIvl5HHMd+Vu_T7Fr|fNYc{DKksH+41rD1>#NIOjc(s#CF)4XhR(PB7eSLr$Y_Q zuoc0l`^hoeE9Mnqm=IYfU85rr#MgTk6Xt})R3V~YGmGe>dc7*4jq4wK%63+F7RIH~ zmNEc4TR;@F&r)GzHvIPNhQ2Dma?#}w0{MH;7+vdB=;bLke&Hj0huDX8ID)F{d45+!Q4=VxlXrn5qy zxGdrhOtyNXp?u72zCAd>r?uODq)?CEZhC*+B4Iplw`t1eA7_`gP$X+FM@POLzR%5b zjm)Bk9ML{z0vn^p*LvNCw@*XWQ7*YD_l_H0IlYpkW&-rTt$zVJE6IJ;{f_Ytw9ngc z;{Ota&NDJtRXK30alD;t`V%>BYnR=N;3$Pm4fT|qrgS;B^XeQ=N8dmD{`8)FL6;5~ z4c&JfB=z_!$QdvNg%p9t0JW>4^7jpDEPc?p4l3uSp1gXgIX!nM6`W=|!u8anL@UUm z?co#G@7L{Yb${>d-eyFP2%+rZin<7S%To_{15}4(Jn_rXq>kVS+}okSS+a#zq?Udr zn^xuzU+bSe+^^&Ji@4RFO+Gg}a!8#vvC~N#Je?`IIx9eG6K;nY1{U!d8Fw84M^Kp| z!8}EQpUmMPx%PbUdiDZ)R~V%Thb>6w--3bS(qZ%2I)CBa^R!bNi?oJmarn0Vv&4kE z98ROj=XBpZm;H3_TIGI2(2lZ@#yi^Onjn`HF90f&NwF$k>oGHP5W73r=u;D15@v69=6)AI zJWW>Ok6js6<`J!l@#MYDcM2)>`?e@s@}O|mr+-{q<#c=_^BLB?uToyX&<|s`kc8WO zzYUNZ+)=;X_N2Ir3Qu-9^?K=wM>RxcL%EA!h8K6MJa-RTYAI9$*qptuI13X|>QPQo zkA1BcOn~MjULC`MNEZzj7yBX6UTn7YOd2A_BKGC7bFRgg5YB{X5E|iv5q0Idh$|s+ zmwy6qy@v)|{Wx|X1J0jWSum)2+2ua4PQg_K;#iY)e3fQz;u~*jv+~Xivkvylr0Aa% zWP?u5bfdysf$rJF*o$n;13YS}WqW(XMOq4Hk2{1pcranjg9ULEVXY+dpmbC~=6s|# zxHw=^dxDJc`qsC{<4(*jy(dLpAJSS*dw;v_XuYNUOuY+fQ5NfUD$4J;DYpUx*wHK4 zQ|HQV`pZptOLt+Sr-xPSQn@(9;nC0BW6@p>*1`vJ71MvbeaV+CXl{VlW#M>zO;ikm!`djuSHl19r-Ldbz=!josckcxwa#DBLf43EwI7 zM|mLYMkv4^`?oCwVDP^m>Qmihl{TfIrG&NfzV5=?k-dw~GNr+rz$E5t(WKKpD07|n z>qF{4N~D)YdG(PEMeBXAwhpm9yMLPgarO){puAMJJf;`ZL!)=;3sgrwYZut?x^p+P zlEU7$F4l&tLZHup^XkzcJ-Q?m^o&^TG4k6~aJra1gCh}*#O|g6S2J!f@OZtS*<2^2 z!G&k@)%aSLSdU_Uz2T>kOzBQPD!Dl^b3wC9sD0ecvCQVsk5}n_s~5UD4}W4{WzQnM zqOJ2Z8m>6!VZDv-Ln2*r_8d7^7hLh6`P8qj#574Z8nqU_lY9k;>TbAWmThV%s`UhS zLvhy$LF!(^6fn&v*q5IggQ^Xfpqy6e%7yEE_Gs^PXjI%E{Dv(c;20a_3rWdYX489d z+RNiH^Hj|{pU6SZ&(U2f>3^&}Y^;m6JbXY3#=83|(hZcAlm7|tqZtD zKd;%fT=Mh^e4sv)SfxK}E%Jq#j&KWE#X-Nmz!J7JTGei!E==b}&3}Ee+~N$64LK$E zE&rJI8rD9Bsv zxf7g>nK)NV84l@pkCpZ_`+j;bR5xss9fb1mIOjL83Etby-G6;V9B;-<)%I39GR`pL z(0y#)V#@?aU`P!nc_hh`0tU~6AzpE| zVEL)SD-Dg?CnljsQoe#zBQjY(Zp}gmiDJQg*}unhQ2Tp*I4t)!@U4BDkFuo|06+K} z>ad8^U z9~WGV_xNJdBehJVFZgKV*|+flwgzLUjVmCEqH9-v6&^`9BB09w93pFkB86E$^ik3P6m|(=Y6RFt_$pD7O~At zTG!>9Nul@ZHc)Fa9;@OP%+*yFoWr>uHQ;AjMNKsy_~yiiuOu{9F?uKxx{C)_qSy0g zGq9B6mNiI*PHH+BMX}NHojApu z%O4K59p>B1<8kM;)lN$a6fHsS{fJ6u$SF9$MaQ?2Gc6*2e^#2?en4iB18RVVrn2Ai zwk`2uTu0}Ixt+deNexB9h5=WsDUrl*gY#~i0?NDaLMbb624 zB4%-Mpf{g-KDV0>;7N-3=)jEaRC{A0qaLR@Kw=!!Xk(B?kg-=_ij_ zmk}B{Y6_d=Od@yMbdhKSlDFNd!)&B~QOeks2fKd;#=8ao5M2V-I5O^lDfX~U!R<%7wJYTzg>(+_dUNZ zG4w~@ErZTJvtAzD!e)eG(cH3^!he}-q0V(Uj2TpSi5%-#CPsVynllt&B84?zZ}mA` z(DthDY5$CLmJ5RT02plOt+z`FBds7z*F0V*NIRK?9Xif6Q4EMjqSazTQ;Z|OTl3Z| zx5!FeCWW_ZO4L+&*1iSR3LCxmH~MitV9y$Q`wWi(FB8?_`x+oIo7TbGnTh#TlzSWW@1tq4XHtRDB8DKq zPV^KD6dX5gZ~0fvI8PHP?PC7$EC<8lL0;L2V$tUnv)P>Qw+%Jn<$qq7_W-3_IlEUf zP+O__ybAIBJsReTpb5D8*Lqny8>e>wg8o=){`$P=&M!4q+ma9J71`H-R-ykOT)F<% zCTn-QfA3z5vp)7N2C(IE@pCzt(1ImG_CjHT=HII&J@71)V@NKgLBJ;2Ez)OzrGYPb zYwYo(km4VV&da$awq1ZW=n8xPnDzU$hNQtQvTp z1Kcl&ZM^OorY9gS5#;1rXjFT`Ia2CvnCnO&C zn5qI2gO~u-9sXs#oV2NF8nZd1 zM;LAgI~i#{?SD~Ta{|Q7LJtz0^!D`GkdJe`PV=Wc>d(h=>nRuGS(+nK_NmUp%O8TH zH@rA->~^<11jCQd&tufX#xb~~YlT(^8mSA9MM1#TsfoPX_dNaRMIFOf&AaS|10>D1 ziP`z`dBD)pP_|ScHryM~K$t`IIMfyqYJCZ)-_O%7B7b}3-FS~ui4xvmYt6ah|@kn>OwU{cS&;wvW2xI?ns*a6sgIS-%tr4^sd)YrC81Fa%qDFQt$9v zSAUl?9p4-l1Ramw>_b^*v$s=AQ*Cw?qr*7lnLlBZPjc6b6!w$BE!=pR#m=m_V-RA* zP_eJcJ%Hoi+&|r@@#J2ZOeRqi7>M=d^}wIwU2xdmHQH7~RY5Z=O%7fGdGGW2HadPI zWSLF~Dyrc};E^BqUKRF-*aDU-x`26j?SBOwa@+ivs~T^`3#8(yPwc5Y$Y8lyq+JoR zMh0&8rK=$UD4`W)HHY~CIud<{&{fBW=z5jmWd|v=N8C>bG(*SrveogM%QQ)c3LZK0 z_PlN2f9wZPHBH&0kBaIVm~9#|eM+SF+rW)6x8H^Kp@ZI$N(vTuo0rQBD}=YD5Pt=A zfA32T;=gJQA>S&C_O(B<-iqm2`6P!mZ{;$O(WfCUp?PDNml8`S%XkQPCZ+ezV~)Lp zLu!;;d^4nS#0tBKy5vbV1Y+$0rr#fU`uBe&FFXKT{ufXE2lc(cmGKYUELA_e@V~Yp z6|9wi{whCnCzz(NmEr>}B4#s4q<@bJj{If^5R!DQEE{xsjw-U}cz40Nz!^ng0I`OhYI@aIBDm#Uk`W74h8OxPSafmH<1f z0n;}w4pYle>b|(Odkp0YcDA)1$|H0PYcrJBz`rbD4}kVI*0v&Zj!z2CO_8f0642q( zgJ7zCU9qztk>jjH*okgDDkxDc7qizsh$aCVFq=01E@cAJB>;<6p~~|G=2-uju>4(r zK`xn>5zj2kL8liv#s3lIPJgoKP*ir$Pk*?0+@$}Q_XxX9OFR6gp#yj3IC4jB)}%Kr zUhsQb2dIa2pz*>)3V<7PDdHh(PCSX-eVYV8j$IJnCyEObylyc2y-S7{3ss6&#bLsvOfe{JW) zeN1Ih7RlFRwlGv~%yJ zDO6vx!S-_!RcH!v71_f1G{V8#j}3O5SPfrEdy+<8-BkSma*u)600VJ%eH>plwI&so z?~i9;M1n;8-Y=Hlk~3TcAU#MQn9g*(T<^Pu*=rCl-3#vxaXDtm2{=2_*RO24-?s?o-KE*M;StPwecHMb2iuZ02SqbqtZiNWM+?RWRxPx2pO40Sy|z}?UCIlD?19= z+4EW%;a*CveQlW+7x!}4x9<<g}&NJ0y@cax3&@$Vr& z^4mIEwi8^~=6T(B<61y#RCV#v^4IVC#zi{EFU5vRg$*7pc8PtKnu*+b#Wltg^i}vH zAbXZ^NpgEwJWT4PLk~hcvoPbfjQ;k=6Lf8u9ZA0>zq?FoGbvyGRk2E);}!)vlC0eD zd2C?2n951|Tu?$spTU#Z_^w&h$B1uXN+NkYJ8ny{Mx3)M--{a^Z@zx~1**h*#-RRO z@Tn2o(|h7Z%hs&Aw2yBlABTUVG9cbe8v^d5c3nJ=gE9nRaE-4%d4%cWGw0dd1k6|I zF9k0i6g3`TXlcN!-ZMNS)VUi+2iRYw5$@4d18C`~u<*ypqV%Hy|Fc2NA}YoGb}My%$L{S)Aw zmEfZ;nMb(|C&Rxi+T#fqxV)Xf;5$wYBA~S3smlD#!>>vs2R&U`4+>3aZJZY)Snp!q zmHbhU7fYTe&Mcc$_l+y7SN8)Ld5F%E9^c z+v&)lP=Bp7clVKr|H=i^dBzy8)1HI_aG@7a+JY4sQJ47&f$p4;T|LQ^a8+KHo|R!) zm2O&{!M|-<2hi8zC$5tc4YjwalNGOw{PcSJ$dddO*K8cikM0V3Jutr~A^xe$%6 z4&cW3x+%&|^k0<#>dc-7 zxm4{qTRd;^JyA?IM+PtUf2q9J!odEa(sNDR{QHJfpqut9!~!zA>rV6fWv@YvL^Rpl z@*_AhD>|{CU-~Ee5xhy%M(09`mTZoDk9n?m;TA{e8r7~R5oi6A%gU>lM^3|3z@sz+wYEKB&Hg`n300_^7f6kF&yTjZH^&maoExoD zP!iI{{upunn)+~tgYBl0Ra;(YH_teo(N2UIg z(e5u>G2D^dXgw(MbZS_-3Id9JzjS=4e?FA48VA-1m-|6!cO!JtFD8r6sW>J}Vf{ze zt>55tEw%2v$ykn+9et+Y=8Vv=X1)tJT4(W)lD#Nw^9dn16w941V6W0V+6y+Y9M?xa zQy&GFBa63#RBv4Ch;%m)_&e$%hwf2#UP-&fnEzqnyWa8e>1L9-RS(?gt4Q@tG3zJA z&xfU!Mg`4!e&unc2h%JCDs-Jte)BY0rjob4->Cf~oDTc?`=Cci9`)NBFax!_tt@dX z&@zYwR4BZ#NXc8$?q0U3;hx+88kt*-5Wo4->H9HBv$PHg&qY&@UZf(~s#9~?wPXdF zU-Rf#_?tz*snYo4rEDM}sXf@>*;E)*3_v*ZokF`(e{td)saqYU`0DoUm+0FvYELGy zhgYpS!OEa8)z<*FD?y@=;nnV7{ zf_g=(%(n!6^DUV;M zcHI^UV>A?6JVm;cHBap8U~Zd>a8B2mkn{Jc=Ih*T@Oizs`pLvwEF<@o$@d|Gu1B*3F^Dut4`X6)RUfx< zFK)vd$jj~wMVuP7JsJ{Q?|W;VL&tAZ7|1m%_o2tYunJ%ZrO>!LVlh_~DtVxp9!y)b zugD_6-Qwx^mReG|5J?_kn-Ha6X@)j$A00ww~b0LGY4I!!$c(zkG z?z1bCbi(6YtIu4w-ri2`L-)w{GKllk9lu+;ry>8^Y3&crBCjoz?Xsj#`;9U6Yl7n3 zy_e0jY}bHg;m~c{oOsK8j6Vzhzfn@s2nm?^^OYhL8UNH1F4bS?Lq6`6&rQFxrY)4D zT7}jddlqK)+>_6q*Eel>N-7q7uX9Bf>=FiqY|K7>oDzRsnWO}wlGf`qdewGI?1Hh7 zk$L`ZX2VKR*V*C<SfSvlpUyeL34Rq#Bi3&PyZ-Kur8*_pvu_KO+e}7(S>GwL)pOciSmd4EC=(4OJm&y2 zxzw|zZk00K5A!76N^;LedE77=%FRqwh5Tvr`Cbz;dfhzD74Ts!^Hatvqt8@krhne1 zOG0?}2&0@M2YY|*XpU&j{Nb8ra-&%N=kC>0b->qC_k*q0+X9|XFm!T9%R{R-Wc|?P zfeYb-HQoco9saQzzVecCBOX>6?e>d}-yU`ta(=xQd#yY6=u2Sqn~0EJr!qwQ#la_z zAGqcflC9*X@V8H^ZRiVnhc^k{_&d#27NW!iT<`Nm<{9TH!=y?Lybd7edL`7!IaZ~epeXu0I( zTp9HHK|!jqrp0nXyCxQcr~aW|_iGr0HUj4Xq;7#Jz3jW(^hXLv?;GFIVqj!G(%Hc2 zrFPx9j(tOec?e?;yMO7wct)xih2I#V@UUg{!}rO=AaR~G26F9dfx|sJ!*GH6M_dU% zgNj$G#x7o%wh!pp?C7lrpQ6`#IPjvv#+qzHDE?je)X_|hQ~i?Ee+$1UTg2b2yp`_; z{wQ}>h6RdRRJvHYr{sMdM&-+`!q5}Oriq&O++2nT?-aVuY=T@ty*ta_T-^9hzBZcE z9JIQeIa0=RRYC6DZxINLWptW;fR?_oq;?A&sE@y?f34~7a&Ut>Q-nrSjG?XLwMEu8 z&v&hNWqqvYl<7@kU)-r$I}10hF;m5_fhDu;D8rJdKa8}x+{ZzWNcR_@q~epo-{Sbk zn{M2Exb>jH{!%~>#yzKyo(Qkt*@n`~g_Ji~jz-k7AE3T&LG$oL;^~+_-bmL?*}p@8n{h|TXKmmLT04MJ=LkGno=3$rcVvq z6N2MXt}9tiDrusG_Wl{*LYBn=q`>*@1@9D(R6hI`bsWV~kGbe;k~!J@v+1ZuGBeT zE0W8OZrY+sG21>c-XJE~X>7A$>ekcKjCDDj!ZlDN8Tvj}H;&%=6l6$=D@mZI#O@+? zZMUp06>$oX6@fG9Y^usEcrLY6YJ+n_8qq?_ugy<9cluVSCI>HDy4R|seG1&m^p`$m zLuJz9%-K}qnsE{jpeMI;)rBItJWXfV?+#8p{dGhZcwQ1fng-P*_TUQ3POaXw96qMY z_!DUNaM6Lx3iHXH#6gnL^ccY5tloSd1-|5S-Ws`f0!n7C`zDyZ*!_FT5wfe?^E&b3 zVylcy?JBi$0FsO7(|QW#`m~aaLtrg#(74=iBdZ(1>Ho{0t<}iL+oa_b_{0*8-XD}U zUtx=QZbwjc+Mpz#I-AwmrPez~!GdEyu*(%r84OBfx!yaqoFK272!?g|lgN4P%%7Ce`XI&r|Z- z6PH}+<^jt4m|m$ct5V4et8a!zRo|2Z&F^P-^YKC_)G=kDN2l62k`uFG3w0~JkH@X zj3_3>hEIWR&WljCRr&*H<9)`}BN6iFT_qmLjKI<*+HPhfjqDhT+wHr%fLtZN&M~B0 zvmM+xzrN$m;gA3c|4xy%jW-*2P60BqmHQ!^yS!(qD6yw;rnNwmEU{jJ!t4`nU*wfE zg7jMRuc@-Q1jQS+)_TE#qo?|&qXqUupC$d|LnQ3utTRM~GK#HF#qM}&G0{q@NSldY z0~qbUSJf@84GEOTd)&2bIi6Wgpgl=79Nz+{p1AwXYA zX7YqR8AiyA4ak@I{;C1Gk{SxloU!9jo5`lK@NpLQqmOX#PUb54$9rT2<#X?PvNI#! zo#w-ZTWvD3WrWjj6CL4>C35pz5bvCorn%z|CLsI*1m8!5p1=sBU3Uv6KydF~na8ra z%~xE~;BcPwKaZ%MYvlhrT?7t*Cg=;TW8|sUs;L1z8g%ef@33iyJq7B2%akLc^n0RY zH#h6JxlR8?VDMBjp)l-c_ zz++bQ-xC6LI_++rovEL!S3_DhPCYzI^Z$buE<`A|K(Iq{%`(@2{Ca?AUFt~PEK3UW zFKQSs(*P^`+*jxe^2+Ge1G6vE{oL_@F7VE^H@D>qJo zC;j+g<;k|4!T}5@1!$F9%x!%kvIiXw4i<2%6s-426}pzUmXo}Ws@B~ITC=)<^rq)s z?0krx$c+B@Y)2dPDS*thz*#3OGZCi^;C!AYMXrY_=0z$(A5k};G%x?VxZ4hapWe|$ zv5dZmq#W69Pw*(D26h=QH$exKp0peM8@!hu4V&6T!cn^{AnbmLKsU9%cukmlH6*S` zVy6IPeT;9na(v>d^V@wogW|UCg7JV2)8}j;Bg+t6?eMIhu(wnG_h&K5V(W!kB{G&Z%^u9F5h4+~XTp9=)?T2f-D26TX*ToaoIZp@P4y%u)Yq<-B z4aXw3=y#8rff#~i!Rh;<8o5IYL50azg>^gB#0aEeefFaIt9ph)pPg6Wpq<{_o7OCG z1>1>tW0Rk_H_o~1j|il%gf`tWQ6s2;?xA6pVy@qBqb%;ea%3n?-0>$EQ_~+I?&3sBhZfY?(M)g2h zKR5}V>;1ukgwXa4E&+Nin8@Phw4jfKI%8}}&^bDcB4XD>&vAgb&AcdAe}Yqf!UB8P zr}dWXI4j9bWrq>d^pnJ7L}s$2%<)-KDAjHEALv^OG}6qPk$O89bYXd>SD|<(!5w1Y zBUV!a5QazOCux!YcR(B#Tt_nCe^da_K_qAYQI3L02jORUsHFObkIdir*C=0&jvkW3 zvTPnNydanzWjyUHH{c=J+So~wz&-}g?p|>>HMmb?Ql&sNzgc}*)(z6|EXdq5 z=(y@;);iWqSKg(#3xcZZwp1V@^N4B9NfX!T_V(|A8tFyza&D!h)|use{+p9AgMfHOwAwc1%u?k zZ@=K1&9y0H=i`q0^8w%2z2)sN?#qgkQuq46P$V0R*wxbl|j%K?@RnPhS}7(0)=@qeigvvxK(XFpIG+u_m~z zmJ(YiSb-sRHVP5(zaJROL~YMIzs>_9J$)2Oq7kKQZ+%Eo&nlu5ggT-e=)&*)u1MfD z7ugug)nBbh)&ObUb7a+xN`|I|&Ac*Jbn&A#I+k6dmdt#KbL#z1bon#K2IUDUW2lAo z7@qfN980W^h2R*A+YWvhr(W;s;GtII_iAk%S@E)CFt8I%XmOyiaJ;|JQ4;XTorU${ zPWY?%DRkeKt1)ZWTSJ{1oEjhGpgw!+KiC!mTRwO{^EvKl2oZEoYwWng13)Goz`Y>0 zTlvDPXvF+^cN;NV0)P2S&X~BJ$LeNNYD_)+`1yd#$Wt>%={T|Ub-u0op5D@vPS2Eq z1Kv{QJq?K&e-WU!xG1mp1h6_Nl}QMd85G8w#dUp4WxA62b4LmS|M+u2V2J6guw+eO zzrtxXVD`$GB#q2Vr0k7)xaPe(vv=m7{A8o3G6W+i4@eLAuc$8PeS2{>P+Yq;Vf-~% zVHb%A9WDX-h(s~jT1ac|N?69C^EXlMn)`7q$m{JJ`eMG*pFtBi;J8}Ycs9nl$>y~p z^2#vKqUNa_&y|v-nh!~?8+~S0<^GF2Sc%3JUkly5QN*@w4?B&xTTECUQy8C@YM4Pt z7w7hc$zkg&Q|BIu_wSoicssL08N&+a-o2j715xTZFhd6}bSWhahA!fpFGA``W-LS{ z5*K*;G7@{wVId2)K;Y$C!{JNJ&(Vp)e#YeZVbKjsC9;aPKR1!!sDAfD(gze-7W4>I zyInCvjoymJOSROjI<(`9cH7*~jIOB1?Sxg>)h+`Oc2U&KiM=XOMwPrnq6r7Sz6ML( zGvKRpMC9PR6ygSlAQl!h>e?RT>nkPno*A5EH|Zlh%xDbkl$NZUrAK_Go7M~Kuz5Kz zY{oC18~@?WamsYcnnJW`j8DT{esv2uLyXKyRf7Efa~2!2mM?a*fsK?*l4N;Y`-idJ zIbL6=<-*-{);Ij>vkyNcnWd#U*V(Mn`VA*gZg>sBuSnN@Weg(`{F6;cd+De#F5)?CVuUB z(N`DrGSjqwzT}`99D`!4gJyQtLoSkG4B+Wcu61JmQpfB(UEkBKz5hl074AEqiUhat zJN4ck4LS?-+soJ>ar|8Egt-JK^fT>SEjblEC zgMiHldMWt}`RwV$O^;!&z7k3o6)|V>TYDwfPXCo2Bxyuue0m=B{Ex+&=MkK55KHKC z9y~E^XcRg}U`2$<=J!g_Kdvr(pT{YmmEcc+g+#!A8~G56B@CSD!~8tVarTT6YL=5f z^FtD967T$kH&T}+82G_U_38>s_xEBWfm-a-s{FI=m>$ZHO5aY|PDJKRE5ejy(p0bz z?+k2k+kM_n^2G-@(l1=c_En|-rm`=Y7J*p3>sL(hd_>$3AEPI7iUJ3hB}!2v&#!j* z3GFI`5Io@zCnjBffZ*Ht2Nb?uf(eUG%dGjMJ;EBz;ow%CO#x1Z{T03qwMqsw$H?YL z->#A`+1UzxknG)(ym#0mn~t0(h0t5Q;+}CG%Ad9S#R$DjY0F)nE4@77w9(c(k13f# zqh#Xt-N7!P!*Ng8hKBw>d8t+Zh%HO$<3jeJj*3E1Vw-#H8}Bbg=HHZrAZBta`*R!} zkb}z*NAzPbHN~C;@czP4iUsf{JXb}@QR!CD^(qvq_U^brnwylQXv*=gS|Oi!@J;XB z!-R|jnA3v-Imc;-9dvYSHZMq?uxppeaun+Q%Y;XiJKg<25@{oG{s0;>?r|W}A--66 z+nP3;Vb&fv5L6+7G@n?OnGH*6sS}4?pA?$b(#s!%OU#XMc;DR>0LZX`gr|Vc8diL0-=3qe^<{0m9kFR`8#VhrjAK;FRNxJMUN$TZ0n0i=r8L#JEoLEsCDieTDXh?`V5^^vF z-xY2svJEm+@DcC{d-jkt3~};3z>)IyexETTjg6bl3=&l&y76|r3dYS;^R))wXZ z*d%_l7@#Sem3S)3f7ahy!j%Vn_I*?*r!#DWLkCqr;5-CkZ#w;>v6aoIv>M|MVP03L zc<&FO&KI?=9~g%H2tTZDshTV4Y8f00uJPWKyUZ#fRx)4s-a(r?TrY(~lrBrK#0k;H zISUe)S;@7?RO_1Ld3*MAoOL3OJeXQ;LSKFrs8EM&3DgPMk9Jx}>rD=r2GF*puKYqR z^5UPX8-X5@JT~Ibz4{K9g=CDl&P||fn~CQKSs^Y6VdS;vo13nT6H`dYdM}&9N#wYv zbRP)Q=QtjgEs|wE+rac-$qBDTyw)a`gGW%=YI;IR4Dw(63FDA`CnuV^F39z1$pLT{ zF0{+#O{fb2U)*}f4|LY}Ew3%_UnX!JY6;%ch=v(uF!lVL@e0^a?)^`i@MsRuR{Qdy zZ0|&KcLs!stXjWF3C~Nsb>J&f-|yht?I$2)MH9# zk6)3*v9Jz;M&*=m?{Ifp@bf9nf8*+DSdtP0LP)bmwgE?_GI|jr;NM1o9VC4kvl~>4 zW~jO9zG1V~w(g{j4`+kEbYOm)OWX~bpc70euw~~9TS*2kT0OhwyFI~15V|AT*+JC> zt0)pC3jeHDP2SQoZm;hswm8#V1J17Bi#(zuGJG=`+ou8*|1Wx!sDD%w3t-%(H)N^X zsnBc!!WrKuv7zzX+c%Pz26jLc?n-}yaD50l=K~l5P5$u$h=4dfibrkw*8Q#djcTvW z9pc;Xd-eOuVRaA^rk@qPt8(UHTATZVIe_N9xGgdCF_ zyxHiYwfUj9J{)gbdUXGBedcz2B=tSt7!!751$M)VyiC7(tDCES97$~P%H?l`YN|6j zr~2XE{fi&XPqyNga>UQ3sj)RELg$;&e*ujCrOQ8POe!C@mo`_Y=SYa(*ePcx8=^uIQS z!p0xwkDvM{TVa8s*3ebR#QIBemPt!?oF_fa#nI?|2}4Wb^F~lK=1b{VsiR z59;V$iZB#4RMRrM%!8wXjFm+B(@$HT~9(ObEadz(mD_mm+ zCx5&&>DuAT$Lt+eB4k|-dhQ56GOJ+y%4HT4yR9SOKI>nOxpl}p3}mJKGz2kAmq>EF z57%6C`^4KNz&vf6vY+M}l!Lk~M z(44=gmo&7&Re^%~=Mgt=9!8P1G<=b=AU5VcY@Vu44$ned(8Nh#sED3A!Me9FuCizVL6~pQum}E>o8s={L z(-=K>K^tCp1j8hN`9#CiEwkrm$rpc^y?WWpxRqmMw^{9x&WhUnhug7@!Ax0>O7p=^ zmzY&zlwT0Gjqb|3VN_jBq}?V&=?bf3)Rw|aHJtyE(Pz{n?)7j-UagEePbK!gl?N&t zS*<62yN7FMOR-g*pYx_}%Q$}g2DC0RcBnh!8{%)pSl7aNs~U|i!B@yhoQ4lepxKKA4n)AGgn$hj9ZIcRW1`T`Z?us#f7!3OV6hnHh%l4y zF(xAa%5Oh#7kFhQf%nSOW4PHbKF)mTTv}dl;aQ&1jhJ~`)*OPo_$#Wp76RwyaK#E{=iXn9attg&)IOE~9sTRY=+E-m-NWE=v6D zdwdzH)OMwyCty9gdxv|$dKv-KKP!5^vP|wh)f`py)ifS*9XVO?4k*MlA9GSDvme5kdaDU#`%P z_YN&gH`ceT$u2!UKS<+a4dXUD*YS)ZJYx;h?#`(bo1Wg#jV|zZ?8;Ovh$JNBFHsX# z{5OCz0mM}t3z-k!3#T-N0j_`E=NN5eAEVdrANo6J>y&#Wzb5?>$@5V8`_mou)n0VZ z(2MTnA=x3=S%md>Abx9fhwkCsUF@N6F_Zlqh+uclZk|v=+UMx?j|X7rE)#U?=Fa7d zCr@pK|D@B^2tf|em~&5n)}LRv#~T^dSv~Thjv=8Cr9a=5!$IHh23kPePV7?vcC_dC zPWcX>$(MK%{YHXn?f$06{^0+;z@qz}`cDRiG@qcP^zF_~fK_Ej=3q`&e7(pIYW`*3 z#YJz0KwN6(3*Fl6!v^&WVrpk`{0efJS-wIJ~0TL+k19@TNC^rhvK^O(yg)16v;^h)r)7BM(Vpv-?HWD zK3|Hji8hn!O8kG4>q$s$jK-F(N411Xx;KiH>kH1ln|d4K9bF%|u$nS7Aa zhWl=ZnU^`&M)csBUd8;8U|3?_I{Q94+thlLkhBtk&N}KrmTa2ktFZyF5PWk9_0lJN zDmgWF5^*{XA~?>3ZDro1>DlJ_G*hVN-*bcChl~rLv=M|^iZJWh^^h`*dRQLiXou5D z5u|=^q-NN^;TT?-fskDm8i$8@q+2Gxg(U=aTB-w9u0CFQcU&(me^L!upgLXxX$2q! z*WrDL_r8+?2GPenXI3Qq?HZ@wjvEq?Q^<6m3iQ*ME06n5vg@ID< zb}E?4-zDsR?yulBC0i{BA&&6~|okx9Q5E8(TI$oei zAAZWZtEl=QZg%hOa+|!HDe+G-rXf z|ADVfuAwPEJvGY`d0*FysYC~Z+hyg=2*X-Y`2n5G^yO!lP4=1m;jxz1Yg3;TfnA+3 zo5ii6KyIhd<{J;*&d0)?;%9U|tNa-TzYlrbnW#Xfz^>Q6dI>kr+EHhT*wmS4=9idE z_Fbk%Xz;?rl1L+Hsc|i1-B>Zx>cMUZaclMqCtY z3pmI6Mohw)24Dpxq$iU&(*JbH6L-u`ApMV`w%@$Zt<79)XGB#6 z&5TvS$@;S%=<^kNd%p6ml8O0n-m}BoLw2FHs-m?_Uj$@B2c+=NUBwePAaH|&H9Tw}07T^|?coBO z=Pl&isP~c^H)@f>x<%h9FN7;ivWE*gEk@y2fILLrrlOV`O*;2?_$KMgsJC_`GDB0G z6V36%IuX!ZVIBMbSfJMnXxv&zXzeJpVg4Wl8u@z2SJT)UegZ_q_r5Qa*3+fo_}k8= zi(4encw zxxOoAC-!9^2y}9|HYtO)%t~W}Mpm3mAElBkJwOvqJxmPReof9LwKH(93CYayhw{=1 z3wp+RILWHWXp%di>{CAhf)y~KP+vhXZz9G&(2NHNyaGiy*0$Q{aODMvMm5V};Qo0{ z${Duczp)(?Q*vJ*LrSq{tA9Y42fL_Q-!~b~(C|oRxaNyz6pgaXL4Ze{u)9d5pON;; zpI6x0xKDiCrE;D_QS1RxbwgcuV?XE~`mKAPj1*d`QP^zv|5NfttLRS~+aF@~NkOtA z$Mo;8^%IF<&!K%7DVYe{+@_Z?%!*5!Jb)^;2I7G`gz-)hFtNL!u-F9x0mE-aaVY)D zG1zWFNEF+3@@+tYvPApi(}un!+aScZ@pjM4Q>ZjI`LV0?T(Sdl1e0Fa{rqy6X8?c& zzEtsfY>Nsn#w;v@L28niJ_t_bkTTVm-+KfJ1!L15YrlnwJJ<6ERFkq(7cv7?pI(-z zs+Yb(H2upt{U;jhPu3%UD0<1~giSgn5jv4$I6%B$1?KnypeeZX31rY#$mFX_(%N`L zo1$q?SUE+0O=P4fxbcS3*g?CmmE`TI(`GRcukorYO%_+?Tme_Xha%hee>x6jvde{+ z`nf)GhX}rSA6G8vK8}63T<#0#jo&AwSM++JEl%=tGQ6fio7Dp%h%x-grpv){p!Kt1 z7qf^i@(|ib2STwQ7i5WR9^lmm1!t~$!QkX@OihADP7IfBA&(WY-{xNJqvcJNO$^=4 z{v6W*eQ4QS%O@vAiVG4n?wh!Xa&&NH`k2iS%%~fRK_Y@9+R7jbt^hFd%NKwZx!&Nc zT0_vtQEg?36-oritnfcs4`OSe-G8gJ3va6EPbbWe z(qdl@OS8!3Z7x1J6af)fUmpYt#7k@-WzQ~rD8%WXmij^n|5znCjFgC0YAQn~oa7_u zDa{@7zQSmQ5dwETa1a7Pr^QkrdKGkl(PXrDgVq4GK(>*!h~^XE$@ay|y1xF|+#T_~ZGWCOB%~YPbTO4q;6{TW2h(Z&xetY8^EN zF^1=kh0KpiOVcL_1L<}Xq}`dYt1O|P881f|qr&O&YhWxudN*j^*9g&z!*;_{cjgaz z>LZ(uL9otu$9gKm8Mj9Umm2RFux{!$(BvGDenYm#&o@wDO>k@wVN?j;io6G= zI(6Jd7Xm{Vp2nu8crFz<`AFi2)NOLL&ONWC+nI=^uPBR(En` zE-!q{H?HeOqmw&k!cWmpYbI~$VuqS*G6Q1q6EqL6lTMC%5JV}Lu0SF=5RKP~NQ11$ zO(#J7q7>fBH~9Q|t(g^uOxZS@;LLIsu~OQg1RCK{tKoC3?`uFEMh3c6cK58HTn$>I z(Yu8FO+HX~QiC@Bg(hJ#Yr&lB`ES>+0hzD56A*HyDlK_m4`m~9AcVwfme`tilaPGJ zUIWZ2T#oeX^9Lhx0KI?4CpQ-f!j5m`Y5n+vx(&;0kB2|op++2PV2-{p%|oF+@TV;d zkS77^D0aG#g`lUE`Y_ z;)exZjmX?}iTwT{m+u32z@#>7GP#=f)cP)o5>Pq>HM$U68kfnxuQIuVvFzmjPE8`@ z+E5|9Ayxa1D^&5h0nczxd%=I&UPN^uIPDgOxqheQERmPyezkkb{kJh1KinqS5P8Ok z2%c92kBGk^GSDWLU8jgAa^C$CTs3X5c&A#vrrDK%r$5oOPsP+iE-%WgJ!yr8X=rAp zX!{X^`%lLmj3T>enQR;oGV|54IG!ufwhrq{3Nu;>prXI$QYozHI1&OCb?IC)6D zy@BG_WK*84{h&lR&W$u`dXey1Z0edb@tP;Z*TGjmXtpQu)23#i18Lug!;`HkWeu4> zd$MziF?+K-!RX$G1P#WuJHq%>2f>NV!w48MBE)C=z1{?21bsh6xs8u^lbF%h4(O;8-k zDbhc$=v7B*(nY$Yg@NW-pjIG7DEQ4NtX%*I5D%=qI1DW9zsuR*@>Dh2^!!r|ru@jOTrj+SS3Z)MI%qMGG3F1(57?U#dE4Ax>TVChNn_-7d>+rNCl zJXTzYj^`p!?jj-3nUH@R~9MS-fY&`4z4x!;1Y#8y8cMxEr8 zGQBb)@(In4A4V~t4!L*Eq!8t|ZFC^MfgfL==?bQ~e`VyG#2U>SAO2f6i3w`V9?K3H zA0ru5Cz{#|4uGtkcUg$0>aYmhcsW5^HpyfG`NU|(c{zypMAzeOpUz0Dj7=Wt0HlBJ=UNL7Qo5j86Rv@=m=M-GZZUFrIoL} z8{sUp`!3g)A*sas_COKAZ0?Xxg(9I^m*4Jw>+#RMV+SE&!QA$@Goq{G+0m2yFR8=- zs2w9yY?51MwS*ve`U(-P+>(ocyd|4$mY5+e{7Zy-pH4gLq43CfO7x^uin421Oq?mL zBW=?&9{?KG!p}z12?@repzj}r6-I6<0eqg26S`j@E4tTx6~3mohNJv%u% zMxU#n=7oM?JyvaV62C4>Ps&i_Q*&b4FBwF6pR&Vdgpw~s=*Fmxhnh*Dy%ld*FDLTO ztAfv`8f-z2bI2t|?(By(YTupT#(x9Ivpix3(`R&=pA&ePhqVMTNU(Q$E^b5>|BdwO z`2I8(MhJP4Rx+sbS@vql93nr5;FYg+8T&w}&ces5jz!T~)mQCNu~@6o)=rsj>h{~@ zzbWRJ&WQzUkf{GGyT70(<40s(Bt;f#aX0WIdg50ICZll{7Y~%(WKQBW z*q;7}36b!4QmYq(OL*MUz&B67nOEz7-`i$*gm3+;3js#y(w`mpo681V7k)(g)>jtFO4NC(U#Ln; zm;G!tgc@()}+=e6DD8q9Vw5%+9Uh)yxYe>=nA_4yiFUnx2cWOg87O=@u}Fe zJ_1(Af#WQy5p>o-=q>ZiE!g=snv3MCv_rq6Nm3l#XQt_tcufyaj67`U`3u_k&1t;O=_5zU5~4wjGu#DLh~4gvZ6^f?>8K=L;av<{R=p%q2`I-Thf$&q*&=#uZIOvs?@m zZ~q&KySS2Ix+DTp8T~|9)ip89$m--3ZUf@X&HC8@fy{H|_zxN?DQ`GmHQ{L%Xd(Id zSH!p;ZfitMft~Di297I-bKaMK^txtUP5SdH`Ke|qNVsrUEVvhes42}E?(-mk3n1R8 zi!8oT^gb-)IGljr#lnyfIA59Q)L7m!4?L~I5!9ay%vkbM-{TkGh@Kg!BK7MmRK|*_ zy-DkeLZXnORB2zQP!=-kVT3+jdG(!5F~!fJ1p0VH`FC1)gGW!W_C{o@AXMJZLQM$1 zIg!8Yri4Izf;6Wtu0>ckEszmz%Es?El^biFt4&pEP zkrIFs>NIBo7UXm3oqXZ}^8Iuwz~*hUCDb>%d*F^juCOAq`e`2UXXuvxbY`ZTyrsE2 zuITqWm$ygr@2^S!W@HGh>Ev>sZ30s>F>&`l#T%LLJk!bb3NMw>LLAo zK~R}g+Whs7Kh^W06`T)a=K$MKsgoy`P;}o(6Vp4NdU>rwLBLz?MHImrSSh%V4h~6r zyFEk6mKZLF$82UNKF6wxbGv&HQ%@-Fd*%u(WrQO^3ZaUN^Ed0Ptxktpm zHoa=OSG*q%d|JP~Ro2u@X2lc?%p!hONRayk{O6BkdmLpvUdJ=x+OIX6W*zh(x0DcevEOe$}GtP5w<8ecJ2mfKY^`YxIX>f?Z`Ft+Sn`)5myOxFyu7u{+15J6LgJ*JxR%(TmxCl@ zG8A5v=uyQ1Wk(qm02@}prF&weKH))r1FNE%svx-ac0-wf=bm5pNVgxVGl{&>e2wM) zDs3GS@)yEZVm#Mt9jf|+;K#cv1VhDb?rruSLYbx9GzGH(ol&|4YvIL_8&lky^rynK zB}<^l#8WZRnTo{{i8+UkCFz_Y#K$XNy0W_+)D99sf~rA4K%&M$?esUL?+N|`ovv4? zV*3^`vXc*rF#4pK$y(7}ru=pB8_Q0}FoA^ct@3y8b@y2hsj6U=`Z}0L;E>69|Ndn* zf4~}BT145Sl(^E(^6Dqu?X$4a6Qvv6ld7qeyxlnxtBjEf=o;H1gO=+3PuZwlo+Kjf zlS#)EJf9U11boZGa8lA1eka{|_GwyL&J>0Uz6`@>r8|Oa>V;(%F_&LE)CgC)2n$YX z+Ins%*UlCDVDLdy0Q=vXtIkuNqfe!Su^~+lu04GoWOWkKg&s$*tBLeyLg>=YX!#hY zGQ`bT$sF}0%M7-PIC;BO{OT1uD%i{K)&&i>32*~vwY8?Wm43vJd9fKJXw*_mDENG* zVe>LdY^HVmrr=|0^6tSA9g$&fc&Klx(l*0VjCN3ebY%P!e+KUjEx}MuWQuxn)8N$6 z=pVTEGq^|m?kKkWxxsn^5~>BB`DY?8Y_OmP0i&2*+9$;b#6VEqvC}Biv3umM07h%a zA5cRxejXeT;ji!LB&=FX>ufuQ+xIK{(B0_5+QR_T#9{ zJL!CgO-p=O^C3F){pJSDpW@dMCfp$y@W2Gv9(=L8RE&LR(Yw;s2o-^jio(&9GnZlv zAxnqsaELC;yGlS|RscDsPCCL~;~YV1H0iT*DX&neHaWqDPii~0_?^~+=>Y_vdj<<| zP$Jd0ViK;y@AL@a&ad#VdW5v}dxSjK@Q+tw&`;F)i4hqk{tg&*o%RM|Apa3PRqhpr zb9B38GZ34D*qn|50>`A*Fi&>5>5vv1DEfn2i{*n6#c=Vb18ezBm!BCsEfS{&)M>=! zh2E%bdVK2&dVhW|OqTiPA?;9S!z5dKJ+W84AT~VwS;4W6TB1#auuCFm@ zWb{=Unvj7&?W&2c0w0$DF2|E;E3Vm*)!h&z&j@-iWJ>M22MPy>3&EwmeqfcqRz~Yr zDuF^%N4S!D_E5p__f`+d2+H$0_X}Z+O~sE6e<=*^Wat|ZRs?%!vCcKnsa;X-T3-~#gNw{NPpx5VYZ$? z#RdfNP3n4{AMt3Ef%wM>1d4`y`Z=}R&jS32XUJ&mFIx6YI@fXsBslf7^kB34U;WvDd(?$ z&}_Sr6>(~;74x%Ueql^4Fpn`-k=-G%rhLMAUHR!N_ye$&PmI;s*Tmodc5H527rPwp z&$&=D#54F9Lw{J8o}e8?xT=Z4uK6>z&j0Z||7>878LDnz8)iGp!+!t$xm(y_aH4$s zUV^V8v1sFe(}pvX=dUq{fE=#5mF97dev51pz*$mFLfj{Gcku57f3|-%_!R!>JXa#& z+y3);^B&?qyx#nLc$Yem1~%tIQAvFf{v!WQ^gA~Fj(-jN3W$)-?7(sOPvqBA=SMW>m zo&EZc|9^ho!``7yzv~OeE9NYa|EvTVllj{7x>JSepZ9~G4z7WEcn&i+;J3ZX*(cd+ z^Zwzx4#u-jxcNDbx!W2~M4ji~s^9tIvY11&-#Yu*GnRfn9^Y_W`>!0==X>KhJlOU5 zyMgn!v#s^q*=_;CieZP1@rTz`)>=ke%$KG&IZ+^}%=g;lW`QOj*f9CVQpSz#)zn?GP=RhoVevTtIzZcT1bDK)= zgXWxr@EsGk?|a$5&ii?P_UAm<+WGzc?|F}(_ZPqCr9bcg>%7_TWGL8;Gud&wro&VeilR#O(9^b^fn8Z3}Bf%OK_P5kCNu4$Z)ecEa)W?um34}vnEyZc`M&$;M7TljPCX?}L^&$;yXx__TD zGWyYG7C0ZdKAb;4JHNsC0?r51>-T(IyT-W+;xV@H=iG9BZtu^zu#I?*Kj(h;Q-909 zw`28vZur#ScHw6G#2gSu`L>Ia{ha@+P3rG+|2Y@)71#cAF80ng@_WC}#k_#^`R+S0 z=hFeH9EJ$y*v!0UB{rT30v=cw9lOB({i09{`#bN13)n!*edDf3 zU(O|}zvHpr@4#J@+?S{F=evUV_#DRl{oO!{kw=L3_jlwwO3)ks`Hs@z65LCFzQ_M` zoZ!+KkND?1kP6HJd`kvG?SBAY$}N-4Jlj$R&>_IA=rIlBV9x8K**|k{;Fnu;Duet0 zMw>8yFK7Pa1l8V*)H2Bk-f{fSAMv~SyN&O-Wdbsx`S0lV?{{P|5EA^qa9 z8>a^J4Chx25Elw)1m^3;Ho(vll)r3tJ0Hh)@CN$`wIGfSrbm2#&VNvJJl~m-73OH8 zR1H}>Jm-ah05FC>;?o#2CAt*X&9OGkkVHJ+Wxme$c@P5szBi)XxUbGgz?Ct$9(VU>@ByL_*Y_kY2Ae4iK3hkZ7lZ{}M8v z^8GVU+5Ec^P|g3o@$l_u1W?ev-{(TReftOR^ZkCFzsGa?!(id>>d$wi0>0O0kTlAe zd}JXHx|#bC?PSmQzs>E66bZ)9um0dSbA9Rm^W*5>JlFBtj(^{N#_#_5d^|TIyYq8^ zA07EQY*kDA4(*QqKVMkjr~khF2(nGI+n;eaKNs4?W*M=ji|M{HW)D zzwd>9!}*v8Ahnv$`*|+lDCWAQpYdhQ_6Wzn^99e3@2|NI81u0CWhXGcP|Pxq)1T-3 zrow*rGbjfSplbO6i_YyjgV|vpKymz<&&PQG_77G^Tz`MIe?RsTThqVBGw$b&us3rd z_3sq<0X)h4UwQCA2!>5H^o%6P6Cuf`=qD8$C#x)+soAJ`ZTrYsUFYF}7 z6CTb-es+cqBIq;oIAcY}!{!+GGo>taL=0vP6w_Rj4H$}1w_oxt2895U*}Dhhl(+&f|{Hld6M{uJEvmKRnMH+o(;@C zS>aqV7_-%RNLL%)mvD?^QB(2jnuPEpTk>TUn)9dB-&9r}_h<3=^u_=1KmH&8>#VTy zKYyQ&S@FOB`qb6u&wtU5UJ?CoZg#Kx$N%y@Fg!lz;=f$xE1ZA+G8T1NJ^ssvApV!( zp8c19)NGlBTvoqTTXKSyzAUa^_XV5iZ!!9)v zgw=s9teD#7p?@@A30MZ`ny)~l>1L_x<$t2Hst*ixVS<3CPDE2C%%|XUXNQQN7Fc9{ zf6K#8^RAa?(k(V%<-ZT*(s;jRqd44ddogqN@2hAQC>Y_4YfP?qdP#T-)a-Ivz9bYk zJjzEXScmZ|PAkpb@Df+Kp!u~GN8Qfc+z4Om^~xJU)SxC|bxNPh)%6nGpZojzy?;-d zb#UnmdAnE!)M9T+H!ejD5-HlItD~oV;u0}RA3NFowA{PNj=-pPFXNeAuJ%lNjRoT) z$LeC%Kz|;qoV0GYejQrRT3`3jMY=y!a(|E|Krn{43Gu~iX1+{M5)P!hxF?J1_+rD= z;sJJnfS}%;Kx#xHx64AkX+)5=kAFkK6kVllP$T9O6cJ6Bhc`Yr=w!6okZdU1M%{XWwtzGX3?NJnK_FE==i?e_Os}%_z7{;8#YDD74*(2@aY9{)Ndq?r8m#d znp=91w%PC2XJooxo8B$J@X9EKP3H#{Qip+he>M^xoCu;Jeup~oC&8Cy1SeB<_QOz9@G8y{3K<{)D!O9 zDYCB%XTPf{U+5*&=)TLNpnt~R;i}pfaeb{9K#vkj;o2P8g2OCyDsp!;KEo}B08@9UbQvwyN-F3p>lmP90M z(qvk%!5!h97wi5Vr&lrs7@mq_ne&(Q;xjG!BfavvZM7q<$as1AkpoZNeHX7l#dMDc zD5NUJb+e!Z!440W%a-fST1>9NdPhjapqrsOTM3M-~@H%($*%q z^x;+@Vri9Zj+>tMIDZ3U7|`j;YHxs#KsdJ5!lO;|!URDO{Z_5-it}!>cD!zxF1TFn zAT7+qZSH4N-FQZqrfWPi*(@56}V)c~K5Xhcls*XXjF1?0ro=sdZEIRPc~{nYkRc9KZ3% zGx)`?V`Dz;Y=6}yA+oWtaC6s>yCdE86{&(@3BPEy>f@^(e;l?K{(MN!K$>n-tBJ}+xa zB}$X7_Qe)DVeFQuJE?-KtEXS;0R;8pz$tfG+Z#4PM1L#Y`4|c z`&_rO3Is<;gcoWpx9=C|bsTe1BBg1L9GO34@tW1;;#sJ42DT`7uTX>9wZ=wRkkDcN zJjHCYl@xAy7@d4OP;I||?VG&|_;si4wz02VA)j81T^^PouCtpAsx#nW zJI%k|1cNUE$XgyIu5k>9G7J-t@A2~?+VXbYyi@w9C>dpkLZ;k4q(vOZM7m(;MoASMlt*_46D?rcT(xg@^T3 z7DoNy)LreWwWoI{_0v`Nt>~XN3Sl8i_PBe;TS}74-J(=%kkV{Kn%i{gAx(B^{E;BY zs(-cFO+BcQu&%%@Sh<^ezoEg$2`%hYzpM<}eRfYYXL`Fd-R_=qy1L-UguV9%^+p(H z^AvSe0_e}RdOsS&d|hp)=h%Iho@vblE<$W2>tys+n>}tseMyzVTrsc2HH9-JTn^RI znY=y{KC2s$0?K325PYRR)^>pno@_gJ;D5u9ZgHF%KjPf{<7|&%-*cd4Gh!9AVp9?6 zWZG-AQ}Y2a9GUFhEE~bz-y^ELb<;MV;>SkoPM-2w73!fp5MBAEr`t!`PU}ZFbCYK0 zr1;46`2o^9@^-(Qk_nlKV7Cs|lbO@< zHP1qKp+f0yo=BVR=6+{}zSsdhsDDUxY{1tGoRMb*(MS@&L~c79B5NC_JO{>ZH@s0x zT8n!yDed@`%Q5kEtcQ3N=fSY>rx4D!#|2HIf;aj$4pq4eJu~Fs9XkwtcWnA@kN9`9 zyb;VM@BA%+bde1cDmlAYmXmXlOWux9<=&xAC}&pv1u2*y8~&L2| zoW<3O`HyiO<4xNE5Dh-OR%{9Acy<@*8I$S9Tx9%~ETOZFEj|=j-ba1``Lry3r;VT) ziF$q4QCLCzx(r7!q6*yQapjI1!o6^TA8YrTG=$*xii5`c;CRvrW{PMhV|yOC zjdao>9P`4uSQmW}=vr4SP<{Wp)?nRUCrLAfueRDy`b!-MJqdR18H*Emd4%d(t|i6M zBZ&jPF(e9zv})K*I)8%dC$~^5asgBb4^8x~T%k8cdCEQ>O{rg?ol1p+`?-0#L~7<^Vi^2@07^&9P^WC+|{&yy4) zJPS@@^3Zp~&e!kSAo55VwhO9X-OYAs4x52`xAjt>@6cXcyGU6&WCmf>=viQT`nONe z-&}#DMp%paL4cnyx5&-qfeIj3aM=kPBaEi@g zgWBv5^8)_HVSmEfGDAN(U*0d`7$ z|CEp6QQC;TL_8;T)$48~z0c94V4A&XDOMKl8Zk_%cpv+-BroVzg$~v<_W547b?;t+ z@T^n~9=I9Ry;rA+d_2bOYa80#$stzBD;L(cD`t-Pz6NBY^MTYRMIs zW>)P;%gU25I{Y4K)7Ju&J11$gCLg!Y1%7&5D}SJ5c&1~0+=tQFJZ@g6NKpGf_QV#O zh*l7yCj`+sy|8m3Y=yQcizQ{Y2VoVsp?izPZA2jf^k`jga`%gD>1p!#nB?2@V>EiT z-#YAtl*P+~dNe_PDDL&=dfJ(?w?xGbM{_JsZtO(S*Df$+jc9CP+eEI5g?@ z?tiZ41Be{_Omnzt6KrTpXcE{G?C{L6oJxik4^SP^DDA9^TF~U2_%x zu<7&i;$9xpCCu)kel$1X^)S8roV-3KuoH+qxSS;ZOg*`mZ~ANx>5AMfqX3Z11sG`* z`Pxdm$Xv7+Qq(8^y4*iy+zo8*>wd8ZS$D5W#A#2y*P0rxI^V*UAhW&2xEGQon}6&A z_gNM-2#Dy0a`Jpa!MmQx4L7|z`=q?R54c1$8I?uD3A`&;@>DyoCfq6}X5yaJ&p|Ko z>XNu`Kd$CJ`i+mda0Luuf2nV;tX#DSi-Biib=B)rr9U~krKGph9wTji2>PeYK5Sjw zkqywTqa&Z!W`7vD$NIj6=Cr>`JIT*WCl{|B2DJF`?D5dAe5Wf@ z@fj6qaKGRCyXuuh6!P7~bGOsb+)f+)Xf5Sqb<^&v9eA)kscBF4MgToVsOsPr_s=^K zC6U8t|2*?bbgP85dFz6$;3?F7<#SIZm7=BgJhFPvcDq_t-BAle_AXo7di;v-b5=LwSGl*K#^kVu(6}XfJJ`1P*44HFpvO zuVz}}b#IK9KRqB24W4+JokFGnUeCR}inU8mo%VQD+d!+#GmBA^6fF@TdJ#z!G1AP0K9zm{97cM&%26jApMfq9b`M`Fqsx%N1c z4yCj!o(r*SaKp1zX~ri1?5MN3zHNhDkugbuSg%p+dh!*eG35K84u5`s2AH+$b8t3c zeABx-4CJTYsSuxV2Jg2Tcss4VTRrxB!>b|rZ>83mgqXOp zKVgH1q8q_HkKvA*5b}ZQw{A$up{sjub?Y;T<>jdA-y)5ECl=#c*7RGCy8M&by1b)* z3bQMxyJL;heJT@7DSz>mb5nZSJQ`H5iH9bXoPJywQk_f{g~iL*wC}>vSL^gpt%&$! z64!i7I7V+T-6lFhP%U-*@(P&$qd*1caU!YgdIS|uuD1`D-LlGU0Ce`b6puh@!y`Kv zQES``H;J#&p{Y~j>F-|WHWf+3Z|I$x0fjMcsyJi4CoP^4Du3_}l)VixW6xPO+#W68 z%fxE$jXMb>;q`mhPPeu$o$=laBDDCo=R{NVyAV{$9#6fJ65U-)^RS51 zfQBCNH6ib+Km-ICHByr7Bj&s+-|Bt)oF)H2o!Y+RLVwXc5`!nQ8bovQQz0y)#oFI^ zyEwNB;f4~(!^Wb|of)`h+_l@BIHTCyy1Q$e-48y8&F<~aCPJ=Xj3jH^n_1@6lXVi? zCpOVV)F$C=rIQ@K===5!5dlluS)?I{dH>M%OG6fVbvjcBFYCLTb&ST%iDraLJfNDw zHl`d1QxnzS~ zwQveojZ`|vXG;3gxjL=3&TY9EpdpeJg|S*4L4Q|#JFc2-a*FmZQhnaqx!})xp-P_KA6of(^J+o*UAssA^`wzGq27B5!Q5xF2&-;9ZUwH6Op$9rBxz$(L z#Wi{(>LlfW@@s3orgKc4(p*uYU1qkJzO)~FxT_|Iaui0NWD}`Wor~!rLH$dvjQ1J@ljNw0{wW2_?95 zid{lSgu9GdKOo8fyoJw9F_%%aqM&XQsTY;r8>@4DeQrlP;{2{TB+KM>(QixOk)kB8-AWgI}cG+y#NPsS?<^;b;D4%;jAr*wsnl~-vHxqoHO zg~00ORuTL4itb0LQ||tSa@t!dP*Fu#3um7lpcN#qgwriCsMy#BlHFoP~V)F^;Myo4j7^p{H?3DSelz#N<6O3P2a6} zM7>^+7n}A&<~;U+Mn9I>z1J7d`{h}T(c%*&+-5UcC&^NyLR)9riyJSf)qgokg|dm3 z+p65G?~cf`ci~9!8g!jeJoo6_Mp<@gI2oJEdR<)?Rb4Es*ZRUG(s|oJ(9Y7QO!A)9 z{zF|?pSQXcQ70aN5kTB+Pu);P+3T%u`w&|eE_qIm7}ACp!y3AFt+*|=knVzBjNS3I zPwmI4KmdH4@K~xd(obVsRev&NEkxp8mFnVfDDRx3jZ25uud4J~OsYs8HR`Rri9TH= zr)F2ApUYK!MH3c>@)CRc?o*32c$B)d|9BAkcR0Ypw(HeHG>&_=cPdE|oYm#<@C&NR z?}rswjil`%Jn7IepdmL$Rq|GW^txbwJcxmlouBet!IOeZPSYYfpMQ44-OTRR%G|A5 zDp;t1t${v>d=<3@dMs2J4(oVZjw|}Li-KbRe$H%9=g^JTxBfst#=bv18oG0*X1Q5s z4B5F|?^0K@OCS1O-!9H6p{JY2n$mCg;t2Au%VK@pb)Q{Z*o0KyQgwAZg%oj7J3!-V zw?3so|{BjM-5#k=>{>2aCHS` ze0UHy#fNz6ZM`WS?m#_x!t?z__v%Qo-NbA8T#VjU)t<>u)%4g%{0#8j^B*2D8IBbw|^Z6q-)K!vJLY1a`qp4jy}G+O87+l z%iz5GwsfsCMLpIPu%*%U5PFWAk9C%SmG5yjYk9K`>%*fKt83tJuyTcwFEEPodos*s|R#YPZb1Gf;wY+kM z#B-N1WuBPT##mon<@7KG9#LJ%x5FS^mVYl(f0KK3J^h^Ge!{JGI@Z^>cAmoBYlTS7 z>9ToeT?f5L#C*sq6 zj_~mLE1f6u>u}GaU@W((=j9Klh^a2PZ%@0=oZarK+&{AX;pkls_q~4$`fS|Uaxg^> zyVrFdX*D6r{oET0r-+CuTy9TK_J7pwpN6HY`FI#Nt8Ee&>wy;?Q+^n9>K#%;zC$Nx zw|SX+UrNuPOO}dy{=NW-DL1}LQ znmqK^;<+^$u4?I&thyXgkfqD7p4()49PZZTru85DT<@wfc|Bhjnc6*6k-seTi?V(V z2(kqtpz>1kh~Gt!-pvi~tqi}PxP&tRbc@$R{?d1w!?akwHFcpLtjR!1849K|_m~bhceisOixlV|pD~3izxSD`BxDUb^s@*E$7T zgO7z#2hsw>8N9qKcz@-*nWmOEonRfm!aT9}l)iqzP<&R|QxD(mt9>aR^-wf3x)+aa zwB#svJGt@V?ikyRq1~8nn~27^W9tKP4&jX_D zO(ZX#?FK2=KH6qES1k6L{#okbhjDh7-n{p*$XXSXMKOKQQ zP>a)XD-RM1Hh5UUaG|_*V)Zd!%jb!5`}<%e&3M>+cEpkorS(mCbr3hy`^`YAY>;-_ z@XFU^2O*S2kAE%oHf!jrIYojTLbCGdUo(()oGy1GNmaG4r>&{&8uON`C>;uT&U9Gw ze$l1%8w!2(>R4^x&Y9Xo1H>OR7pl6?RxGUI=)cxV&QmtOR{&~To;CZFsKxEsh1+}6I>~pVlATuf=Dw1jSp#&Pl9e?6n*-y_%wl-ZDgzoS<8A-#o zXLmg0rwe2u?$~;N-g|8cx1PiTlv9BZWt>&@)=9`+Emx7zYLRHY0=;*Y50vUvWw`Oi z>jRc3fZ;;@$n~a?TvXu+f?9jApu_ZYDnV{Nd>+j6tOZ7~5;H$}WX#FP3b`;ZP)vyX z&W~_6pMPTg!}3$M`^!69KA+b5poZ{gz2KN5wRIq)4o0aMEzNv&#ld4~gAdm=|A`C> zlzUAW>G#!?=K9T9Qx>(SSJxa7SSK4#**-AS@*xde*N0LxhVe1l72;)Qbdq@WqMJIg z1r4^usu;^I)6P#(g^Mhm?}oEjY^O`Gp7+%CYkz@oMIpt+QG4u;px&fVwRYG$oRv39 zbOJXTbyKp>D5FM9{8_KG|cXT5IC`*=MD```k`^wYU)HwA6Q(dCtn(AF#LUAouOwbw;Ek8AUi zWNs}%gN=_2Ry+H}?$^hqNybZolNQQoZ_J+BoIca0SBnd`BoWlgpRbqN$J7JpWURq! zHPBzmgqh-FRG_Y%73hq$>AS@`-%}$+@XAPIEvD<{QxBUkn^K|-kY6c zuOD8E-J-i91|vNpzt?Tza9Eay7IZl*wUqaAUId4M-XdbAD687`DnsIYwU6{u7!1?) zCds$6yht`X=omv>^;%bq6;BAt7l3>Cr)~q@)WZ0*nUdW@;-$T_{KP-5Ab<2()~xna z;Y5jCOGI5JAFks$GO4k(wY_ku&HzRZU7)IJA#GTd(;9?hr1wpO64CMC=G)#se%_sb z%e8kkz7PDn(K{zWImQ8KDsF`|;dN!~*;DhFIFo9ogLt7fUbeW&;exhS#dZqKoZr`J zU8Vl%LNDmUYQ=7xEr{>H&VQIs$A`7VySy4xpkt$vR7ptpetsvO>&AS@XS$+UGTA89 zCD7+hctxwPkH1Ub#UWlEA$M|>JHa9*O*Yc~0x(rJB}MumQ7dD)yBXQ?h2+WyN_xHr zI;{i|+K)>h=+ll4rRNJ(m#$He3lB(*@S`4+6fnubaF#Ye_PkH$K7T3~*VHIBJusU~ z(61Hnk^(IQ#iW=c8r2#G^>DR?9~E9az_K3!yrr5#d=*moD7x3CDFK^O4dtUhYrbF^ zV< z$Itnh=f|n8l2$GYr=r(nbJ)Ij*J5#5*lTU9?ujO>-H($N!2*>Ldy*hOlZ-|Avv$`i z3xx#%*v}nAv_WlDu`~;k)Px%r)p~mfwx{}exfAie3jwjXoqwO%A-a5a>Sh#Z;o((B z({xe(dRvIpCf+3ndoq#qy1tU#QWf$&PzA{eO22R8IbE7t&iaqz?d}_NwbK)7Nj#xa z6PTCnnqPI|?H!AwTBXJo#SaGnL+kcEsdTLDSNGd{H4VE{i1kDVJaKtDuh$2;S-gRp zPVW6WBT*_&=zq#``FuR@i)bq(S?nCn*A8^Z)gcO2%i^^L(xGOq{1YOn&U!k5XyLN! z*P5+w-?}*L*N3S%ZXWcKUOiqA+*#%=E4Q149x)H`_=yF*tFWkXm&Ms6ItqfcuRSHG zLLVM?o^%;nmD$1q7J<9tiKB9i3!nTX^=(j(mxG?^;(zlat|k{!`74q?X=8tSN2ZX~ z_BF=xa$BVf_FhDd*wBL2dD=mfob|hYO@@F?Or5Jg_; z@Njg)4TPjI5kla(94b5QX{35B(fJ z`);v_T~A;mH^@wS@1V}~OqVddakj6D34ow`_=M(cIEhA%h#Znm{bPpYZ5RCrlxBF; zUVnR7ZSH<4?OyKlWk%GG+9=F0FXY=oI@0vp;jb&8(05O*M$mzI*<c^pF})pSM1U{f<8JQ8Y&_N z+-OuXRQ_AsmLKx5I0N7x*;;TS%*gKdsDJL_8?!X2r9&2zVEOC2wuAQ@OeyQxJaPJL z!t}N%^;udU9K(Y<+@`feK=`i9wxMh~;F?dsO|0JCH-8{jBH!3AtB$O`r(AJYdnlqS z5VE4Ias8Mo;TlUhUB;OcohW0!I7fFjlx1=FTh+?9q~w42AOEL+Ej0Oioym_1%zs>G zaz@qauR0S{BmYua`Hw104*myfcc_&?mDya-{w?8k$^_{S^AUe}=9k|RsxNgY3V*== zG{ox!RKnoa0oCJNO%HDiydr?xZdxJRbFLZ9_aJ8r1yua z@%wu_S-rQYzC$S-9+sWJo5S3IJAYJA_;O7`ah5qOW-3XBpH=ypRlMxME>+UpSSp4=@-fB`Ftx~;4F)ox*bJ` ze|T>3kkg!(?ZgMF;TRP}g5XDkIw2#}lB}OL&X}&vnOd@+YmH~>TU_93Wq-Hn0G*X& zZa;t`=R?g`eeTpb+Nvnbo-E2>BN5%|QS@OR_|q--AI^1nxbNd}6>r`X&#RfLv6D59 z6gk9^lyu2sPs`A%HbBVum^^j8y4~A#y*LH*_9{Gr<7IhS%ZJxJfC!PsB#@n0@F)+G zQE_4;vm<3;*oM!=HcmPqbbp}3WOyeJ?|VP%Yf1*rjDjTlB@^UwLrE$GI;_*Fn4aB^ z+CM5Ak+%CGZG!ze+`WD7O|0(Z^R{R=uVORg2V8D-KPvWu#KYvt4Fq zlYH*bHm*uPop=v{wqc`F_Xi!ZtOT_mcDgLj?<(F$tfoR#dog@`=B;S;0b0nQ1qGl% zxZk`8-gUi+sUwTg(^p7HqSRtRZ&1V|#h(J3`x-F>0ObgH2v_UVNnj%Eb&Y)NfL}@U zy5P%(d%r0svjlf@3xC|Shwm@#hmfyVtKuXl!P2_HdmT>)`K7;5gTgc+>h#*v`L;H| z9xE8Rf3|yQMs0RoXT@RS0}UixOK&{BURy~}AM(yvQj<(Yi7$=n)PFSATIY<=B<1OF zm+mhs(DZu?m?SIg!3F&sgW-AJoiQGx6FX5sA-zgrBfeO}eSe*vXHQ;3wA*Rdw@(8O zhdY0#gL`qFNGUbQOq|*US?!jK^RzkZ^^hk^`O2Tv{oRw#9DpVCR$H&@G%0r1JA&0? zed^`oyEPs=Zn=78+G)R|2~#QP9QpON9OdA4QsE$uc*8tx5!0O-?w_m$C3 z=YFv)iODmT^7fi?$K2|xC$3m)*LL+JpNX~i`h{;Gd$}zpkmIa397v8fJJWh)5XWEF zHg`!Yf$uBQ3T%`dn8X<|6Zlmbs|6Bc6gN9Dh1NZE$A33;pNa#R@A<+%d39Fl)e#9a zZmI2#ot?`)X(mtW-dFimtQ{ti&8I4#3fgS#ET_uOc}~Xi(fjpi4anJfLz>YiVYB0& z6|n^s#(3<~SK%&H)Fd5mQgBa3ZM2@!-QSrBfRgldNV@WQ_K7xq5F2E-OVl7K z0h1^WcYiOB4Y}V*^oLuIuS|@}+bA!$ZS=6-lw^Xu@N8{g6WA-L80dQ@T$(88S)D?t z6;G=Z#BrPR`+P#MrPxBqOZMBM-=H$I+%bDQpkMdPif;GembUYwF)?Qqkg`1NcB1LS zHBq1fv{=-)Ham9Njf>XH?tMRNb)M+E%p>e{5r6NL>us~ECN=PixU9<{;fT{3R2qA- zOZYB&Uh>GSSWmor%yn@{1d59I$ku59qTN2G4HS3s{()XZmAynA1(Wf+)!BxCW-BM- z=0hCIi^nznQG(L}Df35j(g;9pyn$?Q0qU~S4;8HJ9;*N3CA>)P%Hr15v%8I}OLfN< zn}2*q&ag~5OYJp)CBocQ;;Ct;Occ}&ZAjsblDF=9VKw{f@x>#IykON}V;l}^{xW$N zhlIq4MOCZSh9eiPHZHCZB~})@#e%uKJGHWQc_BVPQQ|(eTsqOjNe{Xwy|S(*c(JUK ze{_%FvOqZZK-1kw6H`y!n{)?|pd-2gTz~G`ie2a6!9s2Vx6-J~)@k*l8c7*|po-mo zP>c3F=3V|>wA&+~~4zEi!k7Kb8oN&A-E6t<K*L&(}nSh4!9ZGV$? zd0IYs7pjKE`_=DOVt=}xuI;rtByT4|eG}@A_w4$$b^OCX4MBKa&T! z?R(=$A9u&y&Q1^cx`;r|dkm)G8h>zi6!>*FGZ#TPepVM@PPmK{r zp8C9AuMF!RC8VKmN2HnwGk78cDs758<8Ni?lw-5H9duyMKrD}nmX;E8?wCR%UkAkN zh6t1QuW?7tdIJ%L@ygEn{QAtZPJ8RNv23D9zbeBq=Gk*cD(rme-n<*{jO!i zM(`sKSRpaIH_=DQYFPnUvF&O?I-qX^15m`y! zABE^E6Sf7n!+)|LH;~Tqk>_@4au0T{reS#qT`Ix>ahe(>`tQ_8lxabB>@)X}V<1-7 zYXpwgGb^cKCzVZOkv?2h%59~Gs~#kjLSzy0V~TKTZ+F2PsFkOMB7c59P-?VfY*Q_! zUATLm%(vz*l{gKObOw=fw~?(~H9QQL;B^ z{;s0p+efi$uzy>#`FU$Vo9Ej6}RjEyR zQ-D@gw(+~)_m&h3CcQXZpXc|cNDliV;&)_;bn?#u+G zVf&@sFQi$8eYQs-ZOk#Nd}7}>QonfJOmLwEpMM9JvH2PW57lQ1K8E6+nvzeY1p3QM zJ*#isvD=p!%5?|LUR(v9+}s`2-!S!n8l0+9E*ZozwcQP($ViMx@%t=vXe8^2-;D`ffZN53tUmO{aT?& zM1MLRgT>?Uki@++J=k~j`G85{l#O;d96Xnh!cdu$Nle_4y_6V;K8Ds5)C92y7S z_ZAiV&(dp<@2E*SsHloV-cO@GYtH1N~@Lhg^P zyx;d*@f6*=@pQC#|MujUbOt4fbr%f*uzt%UzYdDndJ!_guXThGgE!%lnSOPUzJI^c z=g+xa9Q7Mq)Wb&*93$4v$70m`J?LYoBZEA(1G0+XpnC1AuN*W#4Q}c>}+Ut(8Iy~0Veb+k25_q1&c9EPrXpo&Qhi=2r zi?|x6o3_nX0<@7X?segz7VZ@i3dVw#tf#M8XNE)(S1(Ppo!Ll$NTA$S%3)RZ$$GkFPJS^M? z4R3$9^mc8xvv_k~>@wca;{@aO!eei@c|KJwb)_Y&`paCs^rz{>??G)2?@-&_!OH)# zR9}QN;+FF+Y4$-GR(z77n}1bds;p48I$q!8dc z9^D|`P@*cNrr|i*UU(8it)!r8kZ3*-Z;cyeklXvt+uhE z+TB_BP}ZH)h1MYK{p9v? z>m%yi4ln99sxTT`;(Y3M{d=*?r>4x|<%ER3yZ}Q$yuV2O$%~o&HhP4RWWTH9h2CFW zX}651V@&&GO`lBjbh1GI7}kHpE8XAuWAt2!;CE;?2L+JgY_)7qf-Spi-te9%VA`V^ zFQ`J|RJ*}SE8pWv?vF&Fue@@ZH)TE5+q8-uf;ACAy5~{M3#sh~o@_!(^gMnR$SG*N zv?w^#F{k3mZ0c;a&B2?{^_DB0#$C&WsF8&+nyMNE*ezXeq9Bdm_uYROQBFt4nN(Di zl50}5+xW-k5H$^IIt!F0zaO>Bybqk}HMiAMYV;bEJqU*Y7C=Z+;J1zvfbRhi>)SB& zhE~&YLDH~xkBPMcB0Fav(eE@)&C_wXJmKRU_Y26i0cxZIi}~E;-BZn%4B6Hyw&yZ5 zaO{Kb-nZPT6zTOut%-knml0d_i5<>y`I0M$RGs=|oV>z4Z)|9gO3}bBNXPp&Quje| zALw)xGclNm7fO2y-rG%2x1hs%Iv$bZwDxs9oL({kz`CW>G9y=any~JIl9`uh_(mWC zT=Pp%!SD;KKc>;~qVCf13_@$yt2^X-DX6T5<0D^@_u*ljCbxf6g>bpP3vP-u4@KF4 zu}kggzVUzw37rbD8 zZnq#kMI|8snK0NWSqJ2(NUl-gcGoh3)PFTzDIsyDdVJks{Te{VZ`afFRiF54T5TX1 zvjx|erb&az9io3dcM*NEm9#)KA6H8fqk}_FcV>2p*z0b)(=(s6mW#!`FPX)dKUY+Y zs-d^DPfm~NPE(I)1tc>)pq43_sRlCP9!V=`NLH<^EbYEUDjJ*{O%KeVPa$66@I zO&HL7U05!_`VZEqCf?FsC=l6&mW{*qv5Np+yPe5Tdl5#D3|{tjDcK5w88oF|*4W?d zr@L~rIT%Co+8^Hx3WKw>T^Q>$k&QLE+flF7TvV%~6X~b4-K**z<#fr~0-4J5cA?*E zq$y8HCffy*(9qAT`tYrU40jK(6UOS zqTTzhjhLIj5Y8@i5v@R1Zhgi z>VZ+7rqH^L&1+42$tueh*~g)=WL`fMIq`e)a(jR5!4cE%w%ELaR#U0n_yF!vO^mu? zME2p_#CvU@zp9G6iiX!tpwbF2hHzu!GtYsh`q&+IYiUh4`liqLsf>!vQ{TVeC-c7A zY}=^-+M0elJ#Fq`JK# zk9vPwx5bm@QjAQUD?vL20aD#K0B4r%ydvCC#dGbGL5FlzeNmB=qm3>G*$KD|W5-|G z#AKc6`+yz9v|pe0T|4_czzM%g2ZU;_)Dqc}%H(l#TV%WUdg`|(c0_NO6S^&Jdm5Hh zuHNpeV<>UrKyGlOhSN$VcES=5qvLzE$kgxPBKGCER zjmh37A%k@uNAXOaQe+4(M^MTxPS4n1(DF^fz&^fFANqRU3ILEMtw>)N)ug95To!+a z_iML401ah ze`kqlwLtvBS~95EoFYidsF{D+H=}>|79Uer0471@z(gVoX{6pQg*5Zd@3w=M)&1sZ z(b!-IcOzz}N(!~Niv-}Be)-SUk@D~7`y;!d6r%pL+`U+Gc`BhQ9BTixs_>c7TvyUc z35S!Ggpb(i(@HYwvHy~95sI+EfVo$Z;=b=Y>@N{E;1&!RuuYWzo?)u$Y2JUm_qmC#l=qxx zS5Y=@)|zXs*^ZHti7|TeGE(&2)*I|QIzknb7C3uw_~-iyT7M*K1yIXgZ{}0HUvBrk z&^`B$Ci44HCdLGQl$ENw`+$ioxEa_eZ{>tl8$!uP@*(J6=}w zsykrKtC%(S{ko8&^4cHZWk>X)*^oxIev&k?VeOUG%Zq!~$}xYEE}>aT#4@IyzUq-+ z^H;zg-w&G@HGyq0K2A?4?=AX_1gl=N+P92XyAPXrDfZmc*^eQn;9g zkXPF9R!DEZz7T)2#=|QbH}+v|LU&;Y+wM5pvKRnyPb;6r;8xH1o4*!t6={BZFC^>p($2&1FZ%&xaA+Pe6NKtX~;%2p6`H z9JAEGG*IHr*bnhVe{5_jtxEfTA7+L7P_iYu`6S`G=I?)AR7z-iLN+$O?l`JBp)+$s z<>85{10AIT+G4&=yEVAO55m6U72tR*wDBSi8(T9r9`NI$+a2j1@yTg>ux*6Pr+&N= zgLxtf{*3hfaiQ;hG-n8fvR-5>#uRQd|M-mA<*tob)C`d8rJ!NrD$#%4JeEs;)?L%Hz7J7U3i^ayd3i-o z1`v;rs}{c{^S+&*4AJb7fKupPb{sW;_gv(nHlOk~7zI&Xlbh8TlNv9s`#p#T?GF7yt28cyA*|Djj@|+*}Hjg+Z}fw zeR6+-8(LDMvR#$hc1i5kcjZO&i6$R(`i3Ys3XRBZv3vF@VNt|lR^IoNjoL7Yi~{s0 zhO~e0E7FdAfI8HclL>|ugTz8_&+!$NI|deZS^+vUIWNxTVmV%@bt&l{t47KEx^cI! zLyp$cOt*b;AFa2cc6u)NSMI9p;nIqa}8pr|8@OU@Fxpiim9Wo?6lY5ZfqYQQboVWfONOXU8 z!*2Vp2#99#|Ofj583DC*_KzSH{N5@?W{&Ar*019=|$+`GNWC&2U` z^RahO&~&)7zI`Y65mzT*+XblVI4G;& zdGMt*^GKruyLl69da@k|F8xtQx^yo~4~*o`KtvXs9~aAA@8_qZ%;3Huu)b zxz}F3(Pz046T0+u>w-De(V;9vQW}pcR{m_S&N3$mO8bQdD*^FGC;bj zgTWBS!dnc@%RICf?`eCt5;y{BCP8!^w+EAGit26MUGJ0y5_+44xB`P=)`;mF4bqIW z2Xj|f1vjC4-0tQwM$s7P1z3%?TQc+KvQQk5&US~Zw6bWYu(xsYV0nL(ng&}PaXg`2 zTjPa2pvTriE%9EJwzo~zs|&^!6Es~Vb(R=#wAj(J8Rc1T=@sCn&r5~Sy^!l~l&;J+ z*6}JY4|WbH6TKd_^}3{MWDnl@bw8`+u?CLHD<-b!ZvoWGak+@R`2~Fj&qN%y?5w(P z*iasq>HXF{5rio&cgKHbLzEMn`s?dBfv3FHDL}GZh$bjQN29h?+uI{H#j|(Aib-fQ}p$D-@Hx2ms(R3C-rQ{o8EeAPW3&*GXa{j|F~ z%KX;e070^N4#0*1^7Sg4RVVI9-8W^%zMinsb@aEIn-$?3dz5v&fmU?MA6INope-s+ z^ULu@)L|>2t1o}g7U&ji58-_lJ(lzQl2c4oKK3UOYn(h&{)`x(#`e14)|+tKzk15C z$dYVcbq5deasoo7zEzwVnjlzB8n3t-rAqs!1u~^qPpqQB$m%_jl&ApSNzwbB$A&6M z(u&V*B)8ilKR)kSy}m6#3W;U|@D5A$jXx~+^%d-Nm6CrD`Mk4kT8@gF_2Qae-vib< zG%?Q(xpPym5BG}RRiiOhA{6)h)Y1h= z=aW7AI*kawmSG*i|GtAI)@~2F?7TKRB(-CqF}9M>CiP7ni3>mqYB42a9+7S}LC2@-;o2#%Ys2Y?@Pr}IWr5bE0Pqo72TQ*T7x7g;>PuDPrvkCU8K ziCg3X>6gfk%?~@j+yk2YR}tY~?C&Us`giQ_JlKD|74GhsSoefz=CR5SjC^;BEw^hm zukSRDq7ADjcKpGx*8R)|;;KUYcoY(mxq;1;{P{RIUmHyZN zfa@^tVqxLvQ+?i4y7!w68ehTnKE-}0!2BkrHo$IR@YO_ce+58(4s1~YbqEs+z>)@P z`^0~^4-&x%kbbPeHtuAM2(rEj=zh_{uLq0)OYz0@`Ke$~Y!Wz-JW%cz;1EO`B_PyU z0vwaT@kNED*{dLn$$51`e@BI5((4k#qZiKkX785KB?{In;KiU8i$((6(}xq_lj!w( z$#`1Js4K<@XbuB{`oSuelqgBwOmEzo-t>RPhjL^Ay(~MGF@s#y3F`Nl9F5WzElTE^ zE)ahpn!yLXek_7`&EQ`2W5Fq?2eS958t6^tH1wmF_^qn44j3tAFUq&T;>1LM?gyy* zvwBw8n629(Kes!e(idPz5EZ`_it;S8^Z7P!N(;S*^I=Zg^EJiZT8#I4j_R#lwFiGv z&F=Vk7<{p~J;P%tYJ0TBR(Iq@G-6BW!^T-G0jM%+3$$4u0A+HwK~fGF+GaTeU3QIl z+AMj5hGL98aH|PHD;Oi1ZF;!!%Qc9;h-Cz86?F}^ul5##n$BkajBkkK%!p{d*LQUe zNN$Z$0NbzvPOeC-`D?2wFb^qX1s{LxY9+;vu-Dn0^u!x5S{3JgP{os*HoCp1-`7_# zWBQd6S!ayN@!_khlTM)W1CfFuGP~sN-hsczH3xX0EzvK~M5Nn&zcyoID|>zWMp-TC zZ!Z1n9_^gWU$p#uLF*hT(i?)_E8C_$Hr4yoU;6FBKBqptI&T>BnI_16q{DwuHHSCA z`qfoUgnnY(p1qYNr>;95;2;-FnmG;yl?keOX~CcL8@t~g3kgKd3d+EdlS*%LOotL3 zM@N18jMJBKydPmV%QLGS04|Cu_nb4~r7JODrwPzxIBK_N29ct3OmqxOykvIpW1tAYZ zv|^rIeCm+gRFrjLS@dF!h9~p(;)3Yjuujf5w?Og^1SboMB}jzjm)GU~yLIlF?aRIfKN6WAJgAnQt>OKax|059;4 zJ%fwS&I_afW6N4n6&GCZdbTj$Pm$nGAqzMZ50Gr$?zxVVBVn*osUs;?MOqUNC{VgB zZbExej=OH=*8t*T$53xqO5EK|W;e(6F34zI7UzYkhSN*b4wK`v^wFZQaWSu7PVm z0TqeTw-Yy9IF*0a?JfEb3QIuU26ks!mof$CNck-!VxX3Im+s9PL<6wQs~5eIy`^p^ z$*+{n&8~)!5ys8++P=>!KacNorJ0Po5Ib+i+3_5~*Z1bD1C^Sx?C+)hb< zJ6>18JY4H{YPGb>I58jL*om9qmA~jk_;fS68(Ra_^#gyC5vZFxp>Fvs%Og-Ij*+a6 zn|BeIOHW@Oqnmp(qk;*Dpgzy_+q-y(yf2!zd0!Vk&E-)eegU9XOZ99)snr+dIaY@_biq zn?iYdop~uZN}7B*Y77V`H_jjkM#ONBwrTs!Og(u%LgF(Pg=Qy02zEsv<#d9^}(=mm6Tk zM{6Wx&?3xb#bLP9$;|_-N@UI@3GFh*CF777&ejh?(0;U2u zVnl!J*C>iHjmXkCbKzZuQ&{c*JbYds{3KG?^d*nYwtXy$w1Yo#;U8x2JLlft<-yDd57)Lv&HvQh70Mnb%OJgB>TuILlJC2N$)qrTRe znX!+O9OLW)$y$G+VRLOh--v!*%LN~57Y%mt|G?4Xa?fG z2C<8;gH$1NGNLa;bOWA~h}%91-v=uZpM_Db$G5YQY5|6) z)k&JuF*iRwl$Eou_Zbkd4@G|3i-*U)_6T4h0#xk|xHDc|lsiBY%E|V2F@|Mp>{oxb z|Ky5lr<-GGUdp)^=Zkd}7K?)`!yqB;_Kgw=p*mfxi^+FD+H|Xx%)Z*J+466VBrRDS zcz&_Y+0AJWgfacOuB<7&(nBR)`&32e?&{cXBT!sUh`Jb&tGnd~PT zIHDP+v~g)vF*&c%N_&u*9wipFC=HkR;>QEnaavHcrn4}QpQx7;ytprE^D52_w&k5= zZ7cy?`I^}Ajw8O=Tih7L(V)E9G6(xRtCT0|uDZa_KlPa~(HSAO<{xhi=Hp(`@hh<$SeJ>p7`W1ksl zUhi@cKVJJC!;R;p!1@ewT}^+aq>uGI+X1C!*xh1d044sJnzI;bw-r%Mqbex}42a#V z3m(3)Jg?44c0BAzRB71zY`Nl{w++Vmt5KDPQ(vz$m$LLS(2P};<)T>8>xo)sx6)QR zO9#Hr+}Qza3h_)Wgvcy2_ab_m=7pgRHkA@<+bt?d+MT{{kv@>Jm=%AA@!S!+4IqAy zt~lMaUGRc&L>)CjSX#!Qmfrnxw#2F=(5m+Uf>OBO0OA+8>aZpBny72^+?3;b86^JP zEP#O*oH^8)(qP!w2mGNWj;O}%$kXz^+7KxKTS<_!3&0VG~KM<^vaVhAeEp*n&I@bfcgUsfqerxLs`m}qC6d=$W=NyhjxNlLjM$+A3 zYijgcF?kl=jJS+BQ1?vqS#YA@H~#Pc=&Adcmb%B|S4Z7*#>+F*Ydr2eZIa}gXj+oB3cqKGlsx8Mz9xX0)FfNuVDyiI={ZW9ydS6d$Yj6q5iXOH>R zCTD-1%2P6Fiww_2>)vRlLzVLM*S$P-xbdHkHhkP5d4U^Zfbz-Tok#=Goig#N;cg)P zcq6)rf5zCHor$G8oFBW9Nx_qf<~X>^{EX2pMg(&1)Ul^v#sDx>F1JLo0FwjT3P#y2 z#mX4XcZ`1?(fFdGyB@#4jUoezbv$RPcxmFe@m3%$!3);%(r4QIRY#_#j$-R zDWh?o{;rG);x*X?4soTF+1d0Re1Ec631nBKxSpgmS!HD)lrezJewS8 za5_(apYA)|2S105P;rmn--+MidpedF?Auv-V99@GoHnxF`6HHsIp>Vo0-&~BE^z4P z;dmwjNiE>qJwjy}5H2wJ%mb=6h+5$ID=$3 zt2{_uP_4`WN1S6z0o4wpF>jU29>6C80C;)F@OfMi?mP6sL-Ji_p}38xxs=a2_1Yg4 zo^^lo5M;-+{C1$(1@I1mDx?W{lgYJ`$`n4Q;DISJGvBoHE)O}^fIX@p=9q5<^u>bQ z0^=`;7;}rwYoBGVhS<~SWl>5#N9U4j>_JA-Fy2rl4!}5?Od7hBRQq1#T-5A4eWX|c>&nFm;dsN=y{8G=aEjgu%d zMD`kqU}`W>c;eWFGWvwIytXW&emH;PAn(ZDP<(V?c&b_RMyBUE2QI)t9D(8LC`-Js$t`3DnpUa47L)vb%Ml9rjp&!mPUn5=D9BQTbAR*`t6Zuxc%S`?~BzDl*oa0zjD3pK# z<|PJv@B`@XT^c$RtT)a!K&yrgJ(kNZcr`e;w0cSvg>L2fO0D)Ce4qFZ8TB!LhWZ8v z1WIPuUNP`IJGlfZrqh4%dQZ9RNURK^SC|@>8vD?~f?=(Kg7Yqrk|&5ln5~l{V41hL z215&|G-dSdnDL~qz;C)_6b1U~Kp%GiPV3uAU-_SLpVt4EzM6Ct#(2^N&{5DYkb@t6 z^*v9&_cG{|@9+GjYbO2hbH4m7M>zJ!{`Iy0{nB%)k7sbc&xe2PP0#SHn^^3{Z*_W( z5&z~LVUINOs>xONCGbMDEXEIoy`9Vq9pt{>Sk0l{dGUhl`^EsRz1w#p>$-QnAT)Uh zNWUpWo6mR+^a{dR4P}Sg@HtdKZYu0qG+|`yn1*IipLyB z2Pdk>IeCi+e=1hw^H@8y+^aM*AT7nZnw!Ut1#C;JRCzatj_M&-jv=Gb!Kx9<1W0ed zBR6?kFpH39AZwXtX~ScFS2*ad$)W-${AT{KcRGV9dd`1`6-n00W6LqhsP)3PE%KQ| ztjB+>MgD8e|FIYSV=jZg_mn=~eajE#^-Dg!}3= z-9sk=#+HGaaa&wM2;|Wz*+E&jeU3H1e+KoT5|~WZZ?}{I{42nhIhe|@Q6!Z10McF{ z*OD__cqizk>eVHrw;j6hd+p;9OsY#4uCM_WA)dqL_2Rp%ao#u_3s7$7-t zc96GNJO?N&2ZC-zA5mO}B`p zBZ?1snq(b&1ng%Jk!H-6LT5KKpGdnwoB&+7vT4roQfG`wUcPh!=K9;Fo!0+vJwMqg zUvqzZ{?_rBqu=(;kKOw{ztj9pbIkrF8`JzxcEz7I(T@%CBRi-k{UsaId2eH3_duln z>?y&I4w+=qVt(#hPS^aBQ#|{REKTn-eP+MybbQZtdDros&x5(z1x zw`dRYk%#9{$|Z1Y3q*+R?$C5B=Q+S0h-H7lB|?n=bpprJ7Zj31uU)mu#rV^BpR7cu zi6r!d1cXKo_#mEG)fdT+pRi*Q;1F7k%Q_|p`dQvfT1i_x3!AD4me|L60rCMMVPOlo zOdnhX9|b)uQ95D^YPRmKvAM(CdY6ZN&oVVkh)!nR9`>)G;#qUpv7UMc>6F)#7MFjg zOo4kAlTL{}jiDqPn54c#Ng86+DdZV~agg#HSY5frA~3}dZSIX*fy-k}WINdF2HvB7 zOnONg!J6!LYdH^a9Ei`Z(_8!jwjY{ffPpA6@rie6@PJ`&@`y=KTF{p}Y2t#8xSNz` z(B~lPaFj-LYuvkoEnma`QyAW!r^tVNa&HH2i;DeAkIcf)45m`5U23OY@vkWoY2WRJo!oVj3;au-9CTf56oyoUk zSvHP%WL+gUj-^sFfS!lFAlqT10ACv#upMmpaH*L7su-9AspyV6*F{VoXqpGQO^-(3 zs}U(7)hljXqjr$v!Tb!t*;_SpL!km^74*><`kgEXD$DM(40eSLJ;=F0Z`}3giG|5# zC`B5!i;%0q$N*Q*K{&;W*UW!~_8DZ@8T7@-oB{}{00f{0idE>8FO&e~LFn;UQphC6 z0+dsQ6T^4jiSI#Ib7$fMKS8b@YM%=RC-mw#h?bz>h0uNNK2B(-^myN3!@_yXEY-BI z$2elve$aW4DW7lqDMJq@VIp^jots9_5PI-q=YIK9Kl=>k74wL_#C(4nKYQUXJqP=C z+JC9JlpScr#<{jFRH)g?8k>gOac3iFFZvSt>qJaWbg3C4RwPh1LTHd zhhlRWJBF^KaTxN5_(6ZL2g*3O(p7AY&=phcz=aRw(wXH0cWuYWoX40ZtL)`>tsuCk zNPa0;MOP(k01C_g0nd4-4S@l^!nahkJeRBP0Z^KJsopFG^NctsOIjNNzLJVkB5=b) zR~|xq8TuyP35a6=gd^gr7;TG$B73pJ^;)Iot?S7Y0o?-rTI_#iZ-boy9gQo1_`r6s zw+eQFy^RMrKA$UF7;~hx3|#DY^^QZ0J4cH_;}WY@70Aa*8wXU*p?W@A9#vFOCeq3^;R z6p{L*?^IAvL5^mC>HpAYd^6D_#gQ%+wNP>{gy>F|I$y8pkHg_ zS4`@+{r0V6Cs~p&5%||5r9tT=>f2H5!SqU+1^rorjm3YV3rfnN`+;46y^6;_OOla| zaq{LEfNl)zI`GwiJp7Rl6LZW;1wrx{37>@jGxV(GA~d(_HZqSQkuuj|kArZMS*c}O z$V~>=_UjBjRxLQeEy%a4OxJnnk)i{Cm}Z)pGR8TP*zA< zao~Tg7P$Sw0Q#lCiabm3%R2lef zV?eJVo}4Hpafr({6h`=8j;7qb{3EXN*?az4!yh^Nb&Wtly!uBrzT?Y(_-voQ ze{{zm$9lvYfBgPwU;D_-@5Zer$cE|MGtm zCpm-6aY>(r&=YeXbSr*^FkT)Q81GL^4{KjAQRFR@-Kc}_#dER*Hm|07Mk>K~5m+PS z7-Fa81oj~Zxv>?Cc!?s9nTN%-g3RyOQjz#7zCmWOzh#R{u1xyaR^jV#Oq1?$qJcOt zgF;M}iD9cM1~Zze>;wn)&8|_(#j}5-a1)r8L_Cm$d+v&T?#j?^T;9Dcwlx9P%<){t zb3?u3nVQucH%^Hwt8ul@kHYI?*9-szcQV^3S9uy{s>}v(w7{N;vS{cf&`0!&;MX4X zK5S*8LA;*@t~`K4;Tw1gjN&QAOQW$Gk-k3XJrnB?`I#Daq!d`XImFR9?x7+s z!Llym8g8#oF=qJpk`Dh3F=ZIRijAPCEBD=<9ov~a1|V+dY{Wn&oyjs~oy#?36R{EG zf_Tk0)Eyts5pVL4-;saNxHhA)UjyBQwsSC!V87ttGr%vv9*1}VyP3appJN_Sw>g7q zH^OFo`njw)gUn>mW}4EsMWw9msHLI(!M6={<-_Ey^mfuhQB;Zy|s18xeEQ5ZeK}3n>k- z=R9T6)M^^AvBqf~4+D?4dBW1(%GTj~%wt_S1(AVw9+~ABD<{5+g0c_`m&bFiREGeU z%T`%=6zt?+g<^k9sc;d({3QunUSp}V4d`sKBJe!$k=*PmmNd8rRA`?<)Hy4P~VC++n%TW6S|sXeF3qa zObrNdMK3}Id+#8KyBR%XJW~#P^ecpL5s`Q!B?FMW{)j~mea#eLk|-!bK%7gO{3=u^ zPkzVIN~g!%he^$%U} z@Atr-`S>{>pDovE_!aOOqGVquP>`^>5sQG0;sDRak5;P(@^{F}C<5nAx}wv0l}6hx zjra=iH5^ohaggwjzl=%r+ndN-)u?Pt#3;~q0K*SR##m!n&UROTmCBr5Jlqm-xO~bf zwXkm;Br$&(z{P?2ZpxkJvFnk-PObAp1n3Om$HgPZnq$dVF38{dIYKw*#CHLMCHs9Yyt4&utqfj`9r8Yop-SInH;-+ zJ}~{mFoWdjvxs5k6#TpPn2_YggI)&|)CKtioZ3zM!pDz`UUQTWXB39K;(3MA8u>nEV6eUBqMM zP#}R`He`{cLY``UCMpG`^IXvCV@nB>-I6OQ{P!uI^r|b$L5%Coj_jh+P#e{6U+8~F z)%70F$ry+c6POpLH9YtnxDoVTV}=(55Odg*!2zVRJI$>Tn^nNvdviQKdkAHY?>e$O zIy%-A7e_hERS#D|e340M9TsYm=n3SJog>~4MTrCJaVyj*D6Uzt zUR+V1I6{CL3Dk~rtP9(=zVaqE53t-@NGdP~=g8Nv^#K0^BpW!KIkP~g27mbYo78vS z7U*DH@Q--bN2mVi-@jt-f7oDuuHBCwL3{*$=C9n$WXnx)q~I^T{C7P5OAmj2Y8w6} z--~O0a)v)KefS%n=bG|8XV_j!LH{5?{r1Bg63#dJS%AkTb{L&sAI8G))I1z&cqJ&GO;w2sh zKK5j;y=4UWI~jp6T3hs&}?EW1bS#!(cLbD+Qax8Mg6J383^oIv%2hf-u3H}p1J zkGm5YJIMQ!>_84)WoSrehCE_0pSlD1*U%}5y&)#W_=o}_4zz^+aJYX6#SY99;S7c$ zk-r@`-TGFZ+eSu!3%1uEF`VBq>W@7%>G*%>lRu8*->S-Maz+9S7ph<|2pji2;&b zsl}9&6RJu7pce}HGgn5<%EM$7J7^MmN4lh&6Au<8K(PB21#wZu3ngxFLdYIau7NPi zA>Wf@Z*b6;Qz9M$Tf_{Q-BfqM6bPL3V8kJVD@s3oAaRyL5rBV<1}M8dc&ix$7G2l# zg@L*{gXJ2702;W7_$or5^&v#05w;6*LIsMAV7KF%$0FEIKRGtu<5TlkD5zf|1kTNN zIwJeN3*XVtE%aZRn%A~~d4pP`-6;tWmz@h=meoDvAVt*+D}XCQzTxM>cC8iIryX)m z4{hwxNCGH#tO!q%G6jN0t&m06nhihY_p-yCfrW=LJVB>DlD;aAH>X{DY)f12=#`&<<1yl%dtnQ=j zS72s)lxNB#`eM-Fp`q5u@rrDpQ?5|K`E%*M0=10{xG?xzf5t>ah9aO#=uj)15V2|4 zGOH&-4tIZR=ua$r7&VRn-4bGRXo%OGz|^_;4Js?hQKnBmpXzpn?sSGeVn^a~f@Bp= zAB3GA5}?`*1hGgE3jo}i%%GT*c0`A8f8yLsg)U0O`gBfd+!~OA73)gmRF*0*yD2X> zbC55_3S{vg;bS~Bfk37o^Jh=ddb43GoqUHj4i4z>O zOlNc7jGJHNOW3DUv~f@;WFXOnslq-aqZSUa2kb|VAcjbQydS9% z9yaiGPSRv2^Nfj_2KW^4^YTo%5`?Gk$d^b2uS2fEb! zT7&(_$qdjqSeO%yV2GgW#Y%8r3ko1@B4Q}r!0LnqTN6G9RYxB1qUA_K5B>}Fei(mK zp}Op*dQ)1oD#AldO`I6`IflOY1kexQCjf&}G6eMWBKrr#wG(qj;}al+GxX;uOong) z>NOB14tWqYIKq_gATqz*Ko5aElL4i-HmEKlR()@|LJnZ;02@G!7)DHkwJ-y0RiHQm zbS`ve`4#~nmYNK~VsmofjKT*-PTqf0<~*bYAHWAa)y)BeB`_*`>Tu?(`KC9~>p?Jk zP>Xb=(@t?49_VX*e$DyM8vB=6FMfXcEr0rjf7u6r`2JHp^snoG ze4T0Sf9z7od@q+^i=N_WJ={ko^I7(qqsB7`T;z1})<>Qf^$ZCn2?d*ZPMUucIffgn z$kUl#MJ23#y^4;;iid1|B8!Al$4#)|}Wkk>kTr>FW-#6;2DoGUN&o$X=2qv3@H z9O{&>!#>P7qFTd_0}7z%5+?GI0&2_lhyy%QmH=Irp*a>X)NozworVh%u9M#miW4Kr zg@$L&W%NZTnW!^#nruo+7+8PGsAoN=z=Y}0q$zrX{AQtpZza+hpV_(X10AD^_)>Yn!igTx|fRT>hP)^If zS7&wTb-fHR3CPR*l{==e3k!u-bUJg&Q6C<-JEES76%ZFH9Cvqp@5wC_*FBaJa$Tss z%h2@Y?!Z2Z+8Hl&NG*TdDnM*)Q5Vc1#tIqjIf*uJ~$F`<~e{>0LFouY7zC2qDvhNg0wX*fPG}xj0=@q_8pfGJlh}`PlTuog$**r zy$h`CvX4BE7U6%(ZLu#-wYj$cZl7#Xs0!HHu;wDHY*%+$1vwf@a8iOk-m4>6b?6@ak# z3JrY)_0$un=L6DZ-lBQI>Aff*@ppp(-_hhx#6%6&;6Q(tBYNte(7BQVst)RY9fL-W zY|2OAa@0#<&~H!(u~Z2LVk;UH7-9`*KxVQ-!n>Bz*Hesx;J5^ z`n%q5!ghc7LkIoDtv+#yZ`=GMXW$?G<;NlqhFs7ecEyiO{fbfkON?x?9sY`OO!wTU zx{vb9r82L0%1*f2MSqCK^~GzdD?pqZGllii1CiOypBmad5W|_4F3)lY423BRCJl5Ow616x(Yl6lDiG=9SAS%6!*|2`Ib( zyn3v04!GqX8Tr)yA?9fN!!<{Zii_BBrCWhU{g;q1@LXa-(Uu33*CsA3i>?Q z{0d)M5=kL<3)qMwZd9~DMnfYJFyay4n0&qy`hr4%M?4TVKl(wA7@r5?fGS@EXa2N* zMEnQ+X!)dnk)oe$@vr{P6Jw(N^9Kd!(+Zh0)fC%cDeJ@7_x0a5auI%79ZTohgIN%Q z?F0+-uE~J;e{eFP9>KZ^xH!G)`wj1ACeZ_qodSvUfUG@AHT}WlFqt9jaP&WZQwuWu zY3A|CuBYj(!`RHHAM;=To6k41A@>u1=J6pbgd_iHG5!39uYCF`LC^RhVfzYTw#4HBLfVuQ_ozOGwO30TLl6UkCO2X0v`x~S$I!^-{*{l!YM}1Eonk1 zqELa%%mXIM%{Lep`QU|A*TP|BN54;qM|hGY1C9i1%75Yd9m8IjE@mvp0OFB<@%>8E z`|2muU*S3qHNDTf$NPMZ!$1E6LK9;Aj)a+*x=uC3e?Rn%NU66R>O#>%jtYlqt^MP_ z=~kOzYBv2KP<;;zdv+s~)lP>shF5Hw0umGA9|8aX(OgO*MfLel2PD#GWWPk>^W0PV ze0m7{{K&;eUOxS?xcSdK>T-RP{x_+2j9nj8PZ~ zVFKq?f1-;w2%bILnEOaaQ|AUsJYvrZko54GQzL2hxtD~@{6a|)3FsN|+%tkvx$usr zGPhVz9$?ApIz$ta5;T-$Unw5wuTzHC5h?8eqc1w(72iO+jZ^oPHZaJ4cRP2XfsNKP zwHX{au15@F`kjv>uV+PI`XHQv&$)9)_lnyW9fOvP4N5K?fbrsdrZy_)uK0|L?+8kT z_i`dBOu7Pnh12)&EG#$n%=qlZz>JkqsE|F19K!f@E^5$gMMo!>4x_!fjT*{{93rgT z6nNHQj(ZwS6sl>&;k)jCQ;Trl02>pkb(qNQ@(zLch?9CtY!XFB?PvgQ83O&#I@1Os zz7N0sb3Fj2!9&IOnEvxU6j(>xG|o3*Msq#auz*3r->1wpuE(93RMDBm=Nf#HOA9uM z!aD8Hm)bEPeFjQb(7?(Po}cgHx`s0V5)eoq)A!~2Q9Lp8IL~l@78V6}r;LEr9EX@Y z?~z0v#v0E9{ARF7dc8P}Wm97wgH90w$cWR#k%ONHJ`l^E9Ze2R2B=-=3T3)R#Ngbx z`yNwX!#E;YTRI>~;<)MDM2y(yz&IrDM%L$N$A~$8?^zENQ92UtBd6j0@jTDLz}w)S z=O8Rl)$}NWrU9gXNnJ!38%H!zR3zM`QNr)_uepPK$-2&{&iI55f|m5WnY~GF%H+gaUkaZ$!rDe_^&~^nyoyB=VN6xeEZkquK>XL&M1o?v4)eLr?=hO>7u6+#i|@tUqUXzL z{bQTC8zvMF?}m5|x2u$}z9}A#k0WXqRI!uX!o~ThoSg2TDs5ps?9hMXx4 zbc@XZth{rf!*C3&V3R!X=&Qvw5%5B%AI6UZct>7;mS%zH$NOVk;aUIs>`wBFIi2+2 zbd56n`a9$w=@NN*e;3!me)cc2gL{E~1nZ7L^$O&0nj3hP$wv+si2fp3rx2Li0QcnA z<7Bse%WsI=;kYV>_>NGT9OfAFHmxZF^9nB(*5m^|Er!-*SVRbFK!Sb7)xo^t`al-} zv}a#`j$NvJ<|LlR3-II%0EA+G-QbS<#JVnUuz_&A#&P3wTq?(bWk1JL92dWi_c$JZ zj>{g751->l99O=M(>Pv#jw2T|z5Dn4Lq^XmI7aeH0oEV`W(#C|rghutsKdbc{jAF< z79K%_zhPHrGvppH-;WMh6bd3kf~0=NwBwS0e3BPf65vV`M`;wG9~Q9cdZuIOJKm9V zL&y`l=`luP$57LFm{?QbT|OOG1!Exd1_%y!3*Kv$5TYiqWI4*x3pT-xC<1{`PU-UnJ zOveG4!qC%v;`bN!NsLE<^*rrOnB&c7@5LAbDHS>g=O9qUytEDeLGKtJ{gqAY^1qNX zRC)Ut`)M!dSUd~%*xIZ6aoMj)Y-Ms+&(jdP(luvY%xoloF@y~*$QCa3%SoR7WX-@gau5AXe--}CD@Bi_G} zKm})}yvqR$7NG_{%Z|rB1!LWXEd5vSKj{vP7tYT*y~t=hA2rRA)R|{l+y9~R> zI-S1bOnYMU^&M#dkOcnr0B(Gw=_RVl`BD4aZ`ysvp3`WxObrt3Enw@URD2{jNzDY|FL6O z?28HPI5segWEx)vSxy)BCHTpIK6_fqAUpamV_#rRPd3LH_bM!qb#x#tM?{mu`3Qf0 zJqOT5dbWnX{5Sd*S>Lbc*f7$h+hJ3{&VauD*cJcgxi_%)(5vyWb0i1%Gw>5ZAx=n9 z*^>PAJR)=v4;{!bQ3rAqFx5`$4zWSz`8}VN%};Ve{BaKKph473=}%$|q!XvNiGdKaNdf>qXO^^>eO-u@Th(GK78UiJTl7 z2P(j+e9KnzpV#?FK_4@8SwS~UdX|U&{`0v}+p5>!b3b{_5{ohZH)A*<<$TK^8mw^5 zl{CpY=56PYC{Q8QB*J}vKyeNGP6#Ib=Xn2eo+pv8ZU1@RrKyxcPT@uV-Gwa znA1Rny~q!VeiyLUJJ7Ezk8xgWc*eiaWzvsr`S-a3&fRDf`kwjmvtB-9^FMl@N!G?S zD)^w|k?{SD;n(^*|I9n)$$%~TEhn(+3F8jgLa#4XS^BU0OV}%a5Ql}m^7|V9b88rS z=RdENX?=8g36^=xsQ|m9#<~%mh4`@{v4#uSrTEN#?8kq-?|Z zzs~>qeH`B(+^Z4y7BMz>GWZqK`zN}NxE=PU>Aa}Kd6WHr`Z=$g&cnof&O_nj6ekcr z=k2`dJWTxOJiN<;1pao9s@8*2dp@Bb)3qzQ3bGZ~E>q%{>STp?7e;tbDD} z={p;i`)ek&HUD!Qi_dezrv8izH|ZzFMjYic zE=u-w{`Z)apXdH{F4ilq{p(!#&W`Z=T&xRdpO4>(HJ^4lWo@bx{Y9r*Nrh`S`{2*9GHZcj#@7bzoDDdLBM_husp z?g-dp|EvEviqUi6va`ThXP6o+7|5$k_;n0NPW7I}2OSUp)l8O8&o$K_Orgt9eDLQp zOy`_we;-F!57@20KLhvY6U_bjEHGS-60~2Rr_?;S=YD;b{&C!7&N+MheLaie9Uk$2 zTb_|9d$KyBA8}gB^hj$xX#E3G3pd? zJy7JhIs2NP3&#<+{yYc7`xDV!PoEJ1EwBSKiNnUgNMnK?_y2m2e)>Mv!@s#cR+PgA z)88Y8J$;YU$ViY2tO{T|LPbx{pP^oy?WX6)^G2WX&C#uk^FGfvJYdjA*#tq{R`M~!K814Ua^ZqrTpZEVf7vA&pT%U26K2P(4 z@lG7L{?mAUf5tlbT;E0$EhFPSdpr;33*&<4Mlu3jTSzY8yr1!BW(-&V{~#~_?zy^Y z9E)ij@x6bZkK=fr9M8d16SNM0wxSl_o8${DbDy|Bp8qpGpZQySG?U~QPrS!R?r>lH9nXcl4>QSCJ;@#3gPPVY zety0OIi3qmPnsKJ{zMc#7i^$^Vdj!MdP>$nY>jd)AGWrA| zg8Tio{_q)c_w}69I{BGjj-K8d`#?WEC(d(l{-g(JTxUA}FTH}+pwDw-{q=v!@rx1Y zNv}X8fAVwu9DN_(`MH08|KA7uWXgk0dK*d`3?OF)9gPqI$a_0MpT7Sb%}8u zr}Yi_Cy|c>a|kvO9tW;d76!8q|#Z2ZY^4zy9C<$AoVC zznwtP`hR`@b}pOoziA*`*!`bg`96~W{qy(k0m{k$c9|~FeEp?=KbCp%_-_Fe}Y3m1$=f6(`{`wE94~?s+>d4O-02USQ zwh~%^hrpGtfe$g`77!DT5mAgM=NHPz-9F_|m?)%R9#B*{a9+Mz_%!K=wi~81FrawU z?RvjTS4y#mVmU*9qgs`i@AS1I2OAB}UNVoR#q2mg$PPe?!lm9GM$ntX89dF7!T~u> znLl5yg68E1>y@_mH5y5??d(u^AYi(t(fx5fWSczQHr664&3ET)(0w;!XY%b;?^<41 zN#ntD<@LDSEV}K%7Nsz~H3tA0cOsJ6&sk9y$-b%Pv(4_f;TNx6e@K8aJqpeyc^)3G z>E5a}(SihvZR+(%o67{PWU2 zuZ7TpgK-wc^!`b#MJKHG1d z7h7|G`MgJ)&0+R@M!17Jn{8hUa~{6d5A>1eeql8xiqW{7JRSMbYQs)BfI;GMy4-tl zam}damXcDYdk>FMX2M~eXuW9V+-2KbnfEi-1esl-nu56>01n(}r@Av!VZG8#E|eOo zDahH5xD^(69FLsG0e;;FAzRSsApoZjT50ir>wa9M8mJI6e(&vq2cTZ8I{;qY`!lv8#(pU_!wJMdR9=|70~o(tDJ zvQ&?EJG0t-<`8YH%@nq{B3!WK97?T!_Xj+~z#WyIz0X-vNSKW=LKh0L0o)MN60>_eW{6~(|~!wW8;|zyk1~R(pU5o zg8=UQDz*t00J{SB$Gx$5+0E?t(joVHvKtRH-!;W$+>Fc05M+%Y2?e}=iT5E|yK3VD z!U(1yA?#VJb`rQ_Tr5O6^2`U714e*C>VTb8mSXaJTo=!E2sR3Nkv;+;d%b5w(nn{& zcsJTDGPC8Cm*<;2rV4adt!_*8EP>@P8s=w$9`*7L9{QC|W-pVNt)&p?B^TC8o0!rM z!*;XGDC1a@+RoY4QeWSHw(0w}qt{BgN{=-o>S$jgm`vE5+F0cp`FP83sxR?PzLnrY z$LIXSC5`S#`*OWsFO}X5U6K?o+Zwrq@PRUa(LbETl>>-C?ap_%nROx_2`To1v9kbQ z6x93nI?IRB1BUpr^qqA(X!-d_B%n5)Up+6%R?IHBnw+%B$-KLNR$1H=f|PBpyUX3R z^&lg*USEFwV>9|jclg#_v9X_>MjU@`Au90w?Gr_g10XlU#dXZfK`!p;Ax`0 z`89e34@Yy0!pzTNzH8qX@LV`3_J3US_c5-47fP<88cf$MkgZAC3jOGUA`TEIKufHx zlh|yv>tP?0eJAXH>IMWb%VRvRNnc;bkL`sl_U&r%x-NC<(Q@5u@hpUAwa#X@gG-vX zGC&0Jn3eBs^QhI`?dl|rWvpYM2-M(mgECxD;_&R3rw3~VPs^gvQ#}-qM{s{Ht?M&K zk3`TJ-cH>)ZgW}Qu1qa*4=Z`M)_ND-&Na!`^VmP5gxuPHEUV}Cv7f22f4l>ompU$^ zCZp5EJLpsD7J(%jP&L`{dDTHSre9~Gy42ZyOAn{ZE7%a0xM{;Q&29wh01xR7rtc=c zuoMW&&AqesDQ}0+&okWj!+AwllG(hy`SO9>9Ix(6C*=WOGnVz9#l2C_OmLT|aY7}J zhJ8u$j&|mMh;boBXo$8g{g5xN?MQG|71dHmpoGvMuVV860(a^xj#j!Uu~auj5xyu& zPHB(yv|PM1m;{^kUf|!r|zZaBQKENldwUYr55>!r5m%h+b*`lU20R$Nv~T1 za0Uw)JMkst7g^s?cSe#y93RywXDx1=W%4-frdE-Eun|k}5moLQb?Gq=mL?tiVLG*E zn~ny?=WOSd1^!*}ufe|u{#E#Q4<6%&wVJcE)eS?6;=RcR6D6=^uEQ!x_58SX+NF3C z{5xp~i(Rw;#iyf6MyjS-(|kBlO(6FJJXuE8{WXZhP(=U4_Qi4Xp^)yGtW^=eu~jzc%M? ztJJ%AjgpS{#l7yB+f9j!+qN7+J!*x8MqR-NT-HSx4vU9qmd-(S&hGMc=?@wBhOu6s zG8l30a(t=A!W?N3Y_SAwB%mib+^xl8G-9BCXQg>IuRZj^-_y>W5&io(H)Iv%k;-Fn zL8SAfhwB45Zrz*3(nNm*Kc1-^kMip|N0snucBYrz$p?9CeD4cn!)lArY@TxunLV4L z z5RLe;sYU0vFJAO3z*6%#11))Y*0a0_PM+61&LAEl=NZ}FZu0{WdtWbOwdr2Wb*#d* zOv+0v2$|Au{pm8Gv>lA4Pt@Fhll9zk`s+QiH=A^}E1ym}zoyAEJKRpsZM-;{^ZoL? zyUnAvIO$E+s&nvFmYy>=DKHQnmF$Lew0n*-3C*XA#mfqx>Cu3G5t&s<=b(jmq%G$mZ&HTpw&wIJ~;qE8#jjuSSrfpAnD0-$Pv^?Cx6ZZq3`DW5>%9 zk3<)_RWDJDp{|GZxDp7xyky(k`J|S|^G;6Kd#*Lx{%}N|jTB~oGI`fRzgs3Nj&fo4ft-s=i^^JZ4WuTQQ9 z8(&1+efwTp*GgG`8*3YdVVo^)cRV@Uba#zy7sI2@PuJrfn+N);*0oRl>2OhYA+$)E9&DKK^x3}0}x6+t)&F!v1Gd^K=hT- z>tJR0`%5YNTlaA9&d0Ty@$=y9S6Sv;YsP*g=AFU9zrEPA$Hn$C@Rxn^>Rody2Xv=4 zoJV$NJ$SP7jeb8K4zs+!k1=*Lo9u?}9dx(;MPAO4SD(|jxlUZwS)QA9d=V@khY zp#P&{s;d1>mCg^w7T|8uknNWT>#6P+uS$8pwk6oK2|O{Iz4m%%AKa}wsQKD9jX9ky zn%zo&>|A~KR`2OW-`p0AqP;KWEq`{UeQYeZs3NXLDn(BeD?%`d60E@b^GV)UYK zY&T63b*=0H19Xk4d$#tRo1_Wuy)mA45Z1GQW%~-3+^dJil217lL53FtYoRx;(({-BC!|_MQ>%Rh1u1^`dMyAm1N1Al_{+>>?lYFe?@yS!ewql=@H=SMOwx(xG=!brPaT?GqBwR7v+iPL19tUGS6wlLm0?oy#l#fHX z+U~0zx$jrKEegx-c^%1iUJL2axC{7{i_q)Aw+2FUOuE!a(XJuU$uhd;>1NL*BRAD0i{rJYxZL&-bWqb?cf(yvvNNYVCr zzbzL7f)UTye$U`TD35D!dtT=U?Xqom+k3Bz>5S?(kD^3>FKBFO>2hRmOSxEUtD~~i zlHK_NOwyrQA3IMBGqQ8!<6iZDpTTWwi*--KOmuYe4CZ9{R_x62e2w_q@u0XBwHHsj z`n~lQ*|pyac1qvx-g<4GIp5%Kh{0a2#0HD$v2SND5ZM;@>+&j>oiP)=NB`_^{b8}w z@9)*<$%%I-Kp_dveAXJ}c+~4?)6oW*b=+?8SiiX$fU$KVESv7Va<8v{{q|m|X>*m; zb@zIf8?5X53vs`bcWmfuXD!rqNtJyoAJ?dtv;ew(xby@Af=CD97mqpKj(xY-Zk#v& z-udy4e>|4n6{aj5E>J0|u=~>@IFS2Sw6iYcZudIy`D}4HAh* z4lSs!xqY>~Mo>|ddf6y5kvErUWII*hM=tMcVR>V$!#R1V8xY6LHkw%$+2!uM%CSYU z%(6L^Ib8|6-n;dG&$P8r1K(Jn*5cHT=I^VKj%S^RyfE9T5gO$+C$3!!T`(>SODAyD z_{kqM=U!jh0jOxOzW8BeSAO}Z+M9ZGi<#a(Rz&*BN$q9dROU&K1bS2CaYSK;p`Ed( z*4|k=nKGOm7JRwRTYQkC`ljL|I-if)F54<=(7ZO~LA)t{7q1*w3%b7=9jI0J*NL@q zpT?{9S<;J6O8pqn(yiEN&uvc|o6)Q@cpy?~%{?@2tXD{j6h-c1AcLdaksb#I07C)R z>6r#Fe065Q<$3mP;BP!OmJ;l#Ze7lGIEa+Cj-Dqgd;S0{&bf7H1PR7cJ z(HX#a@msK{31Z_7A^a6^m^qq zO8qL{taFAWjD30@Zq+tlJi6|x=pA`P&pf63D^J;=1@JphNh44BUEBU2GL&{O z(o>?>x_s`*7vNMb^hp_i8V}ij zb0!@o;!gWEefl&&Qbt^ER>$)a(p zM4;sk7?PzqjZZv1vu0;o_HWCHNOZb<@;QV8C-gZCvq$w@9L@V7Lg(^=xWjm8 z4)@4&^{{p_>#g|b{sO#aDX^^^n&-YtYq`iyMrGB2K%C3zi8coMjr=P9PZVE&;YNq& zb)qElNj$f6!+cvhrx`crSsoTbAdF5G-qmY(YtG{OVOWSi#Jdw}(?g6KD&8tuor8i| zac^bPH68h0hIMia>CB@8eoHR=cz@NGV=-3x&IC*w;D>g?9=6ABh-U@@Q&+DqmcUx6 zk1?X7N5E!UP5xTNHyb(F+cmgQuZ8q%;q~f(qd<5<}YKJ!iwqC zlUBcy=*c$EUJ)_sQqJyazyuu?C+kcjxJl$D1_e}$p@dmpYD7^!KVV| z!nm0yp%&Y#DtJz)NcOm{9HX#07!5_Vl|rd=FYtOFr*N6V#bcc zE?j4q)3K>}k8Ft+{K@ZoGzVeN3O4R4q^Cy7?n?OAed- zz7J!sPsJ0(YENo^K?4jDHSvUYT_{&>B}yk^tV8f!U8p^s*BN^~E_dj(SB~DB(Sl2E z*z->5+4PhdeD-nVlm-gMCe{={8PQ z2I-~oZaD~u6&`oBtVH%xT+Tu1%?)!tqGqDu&M9VIcPFcVq0bowYAclzm&2Y-;zxiA zrgHYAL_(jB@(i2(@f|%vT3IUxqN}d}Lk8`5?`ntJL%sFhRo~gc+uN>*C8BxjQ`!>M zI|Jj^O9oGW{9&hoLHjj$ySE8OY+fnDlHaK(W!Ie0;_gTTu#n9TDY$3CQ|b|R&7L}e zo`1pb#Liy}k2N4Jk^__*_dvD3WfZgPOeNH{SvW)MU4d+|5ruYXU$Qn?fllBUuC&G{ z0>u7}GTOGt{Iw1RIHnV^lgD|Z_SEzK(ChJxv!Pc(TP?RD4U4GC*Kxd#0c~E{GrPDlo~_g8q9gX5X7oaP+R`!Dha3+0 z2V3WVJZvxSdA110M0pBfXBPU%NMW4O4ujcO7JO= z18141bJ&ozHCMr{?LB}^B7}NX-olv?6w)bwjc3L4p>wBxIX92a`u2)pFK$;G=lNFR z`7!s{RmQe670Lf%2A;OXcB_u}E9q50R5Xg}g$fMEJzCMx-fw5(k$+L;W3!sKu`g|x zM5RbVxj_mfd4lo_rsoeZt4upq) zRReKbzcicjb?9q^dI(2BFKVwS#=&?9m(_jNT!0I*xyr`rX1Gdxe5Bj;>vdv>8}xV( z?Jl7{Gw*;MjP=(xJcLf#o?T%$9<@P6gq3^2BM@q#e&Szj||KOZ%4+_3MjtH5|dgwqCa~+H@}L;UQ%!0^A6y z+nBS(v-0kH55aW#aNFG876)#hC@SMx^PB2au9f6=Ek-h3JnRFUjuVO6nJsvK9|4lT z?$2+_Uc>O5Z{$~|#$zoda%zmUFM@4Lyw48@TIY9&(4FSH3qM@n>3S5pNaYdS=#TU3 zsW49La_j6elB0Y-TE2OKxO)4Pey#e{SZ>#_L|q}_){8=<-dlRbzGyBx_BhFX&hDqH zmz?MBTyq9rZk>y>wJxQ!lrExwKpqi+o%8yMnj=xWLioPwhpnj_-TCFs^0j}J5ATL@ zMxHvP8x&KR)RP=`^vvzAD^+OoxTA-tNvLP=;J}aD@mFRgM&fBkl}%9{8~t&nDvqwq zCb2GHlVh40TAk<46+WZVg%J>eHKIv}CFd$Sy}qqzLJyi=^*HUeIMl>{=7q@qW!L8Z zOIkadtwTPBlLbW0_bvf!F-XWyJ92xF&wCL0EU% zdU`#)*G59VZA5|q2cMy*Z+tDCAbxk@{;@daAbVX>(NgwX ztKYVZ*KO-#ORVJ&ZEg4m zaRCabTyj1Maz?yUl!J(hoVDq>R4Vd+yxa!6s3RA4D<@Ac zLJUtM5TQT<+Ptd8lEhnN*nnquCVFbV(-7w~$2Wa3FMCz-_-qBPzpC(SU-2dSG-hSY~aS}TMcJmnHvwlp1`o(kE@oP`Ez(B*t?B# zg3O5#RJG;q^Z}cHSg(_8(j>a3%ZX!lsNX00_E>K<@gyIZjdnV1#mcy~=>oRQ%k4Yc z#dOT}x0R>j2vwPH4%)&vi1{Y%oTp%Br~0w8>Y>_oS#L_OntqaLy$MW?d2!LOM{9&f?i3!XQ|#qET?uILo$wOJfeOK3B3xgZwH zWxqZo&iV9zA{L9wKw7c3W^S3d-mYOv$~Kme=JyKhdxV{xRlEdh_UxUSVL5*!$Ma^B z+&D{qtd6&Bf?{|z*z4fhz5)cOx{_U4Pm|befzLLyVi>7(RTZ9vm1~VehSPbr(nK*) z8;w>U)BWkVf6at@f1M4f2i)%4Y~J&Cx|TL;vY%;xZI%-^nl|3`!q7-b9iEDotrdfy zkE@-)#bipnc4tRn32RHL2C^jgZFemWOBd-Z%E{D`gv6{GI&K!9@{oWO?Zr6eD!&l{ z>`S0R+8!|IM(0PLuVL_4B&N23sWQ2H&dFDW+zOc;L-oBSmom4#EJ_CnnYHrVyw``< zDtq65Fg{r5HrcgEeD^pk7UH7LZ*j+HmJ>Ykg4u4ud@H`g2P1D~@mf6T+}nd6^|%`J zk%M)7NvGTlOm8y39jlYD@YnQ!dwlMd;h3%T_#_hYnN?4#`(kXjopy&=;gY#;eVU)i zyD(d2%Ph+mNPaGN*@;z^{UY2L>lw;V#n()KrgCmyUbicLv#7WSQ*ir5rH|2mFxE#& z+=q*jf}@TST#Nm7?njTXUU3^W+UrO=_WiCWPnvY1g{YQROGl&LFzgjAB`aJ+{Wo1_ zzlt;dVEuw@@w!pBrx(Npr_o&@SC5s@T?%*ZYb&b7neC|+teo1y;BK%yUTznRBdth( z=ONt@w}-*+ZU2Cu@Oii=&x{)zLD8J_IomJcmFh+qc_D{TNiChvACcOH2N%-Z?TjuL6AvcnsrsDw%gMJT)O!D z(4(^B=f96}*Vn*noQ#?P{xf*9LgAHs<97+7JSxFwjA-P>mA^KS+{1^lZ>6Ar-x&M~ z?!CDez*kzHHdGB~$i zhEj-o`0f3mDJ#cuLJ0Dm&x`-^pF^+7yRN7_jDNqOlix;HC;Jw?OBw!f}w9Oc1(@5-x z9Obj}e*U<>?d3U-*$`Nqg>}kLq~s{nvnl8+{7zeeK8hjOaLX0|qx)9)RlKnEN&#S6q_#VpLPYg|9<~587VB05kce)f+^M<5<(jwz74*{0T zk)OlY@Rahq=g<8u<2Gl&zKrm7E9aHN_qHT4UY)_37k;Ig^7KPq;+bEkmy@Sw%jQxKoRfTCVIS^lX5Xa2&?_*VnpEu z`*;5RBz1+`9ruCKw2-xscpjTpKC+n;@e~E@wK~YR!kW4RB$C%Fq0+U zBzYxm7^RUJ9$@nEAnok2K9MBdrMsmB0n>849uxQwL!3_|UiHd<9`8e{WOdNG_Au}E zK-|3c?)n&%i1mq2&`&H}@4bzl$HkoLH`@NrM%X&s8gcjDw6lVF1T?0(r*t@SS*Jht z?uDh7*u#>1=3U&yHe2qo^{K8MThGEihwaEFm0TCtz2UF~JXPqj=IkwlV>0icLs@YU z$uGt8Aaw*=KD?2CTzUttd7gY>A5{$u$o7Y;FYDW&+0=PcIN~`UtIImB*jmY4^7if` zJ$|(NzL0FHia6yJw%lXf)Lg(wQdK02eQg(unRUIaI?sypBmGd4`A|RA7jtaPh(g`N zIgs`S>|sbrYG(NexR-dg=CgC;yudzs?vBFX>=9)-fHQ@EEOQqO=qYirh#i|0Gl@=! z-P3^&bju@wS)m%B)6`8w!?#er)`1s6+jU>^>~JC8154d2LKdC_2;&T!Kwo=E?A5DL z9Ud{15gomNnG2NUkUaJ8@|HDGhn8jMr&V(#)W+D>y`m$E}T52lI!=2DmoKdBpornJKutoz+#_d=9`dANI^ zWror`7e!INZ(B=wb&%C0+2MBef|rC^sfM4?JFdN5Wq+{>1+opLx?JkCfgQ&4X3i+X zG~GjTT4F8TJk2~@8sIHZ-iri-^{6bg--6}(fn$oPR0?`$V6Vd zVm7O|H%=AI5(|K>?dG_uV!%e^KJ{n%%50v&djD2TYYWx81Q}Uqdy2ca?n36VJ5bl1 z^al9BD`|ZB6jIfToy*HPkcC@Ae(?J8HUkBJy)PuFYnRy3**9{&57(aQyIR@E@h%Nb zP1a9;K*1m=#ya@fk_UxNVvM6e`qW8_cN}xm_vhz)U3k0m#ZVU+7GNLuvgJUU_csC^|ck@s+ zYjN)sD>Pgqd$@wbi^UiC7CJ*8NS#XIZ7y zDcZ_DCe%iZp7sG^YiY;q>NIj5KwTVZZRqd0xI0oIwHvE~Y$N)foE_G9thNS9aL#4w zXcl$#?%IPmz=JC@9$vcGI#v&L4bR?lEw${CW$x8eSjRp>*-x&tBe56$-K~? zqkToaxoI8V`CWXk+<2jE9<$&%_mHP8t7W=j(5!KCzKyGcd$8B*u@!^k^?byCJVu1i zm?l%OMG}nxq%7)$1!T!?IvXU@M!dGTbir;hG$OTgxaj?8S_DPzrZunKc45`kadmTE z@cF3n!~4pLhgqzcYLFU+-oo9iiVQ9u)nx8~x4O!N#EnU5mhvQh66YNVWntD!{)%4l+vDnaLR}i^rXi~0MD9jLhr-b>5O*2c(X#8c zX`NzXg^XuFbi`@l>+KPLAjl@$w8ks<>yghNy*)ds%S_#RPkKv@u;I7_O0ZrYmQ29) z+67LXdJ|d00|X$zhpJprm0VIG?Ft!bhtj8=MWIgTUO)#P+1B0K$7R!%l)8(P-K&zv zqq<{X7fYo5+B+V0O;q?GDAm^(=7q5s-7c@e%N-vil%r6;dyHRyuldC{_DMIxN*>yP zdm%5q^^Utks6z*}yQEY*y&|0CLb6q}J9YH7SIa#dy*tBhn#U-Q{*_|eeqSQPeLcW5 z^3;IOygKvreB%>m_5v}FbYB$0n7R4v`BYg4`FjMP1$IH^JeP4U%MiSA>fi{!#S>Vf zT&IL;nIm$RAl+4ePaLhDAu#l2lYyV@Sq;cBP>??y{t6r!dn~;21MLG@=B~ zv<`PvUAE8bet(+fq)jaHeMIX6-fw=06+qGfWaBSFe@EH!XFWRkK78`d-}PyUwJWl; z1^(UTP;tsFlqpsc*l%DbSsAr@{IHUwHixIkZMcH`TVnyPIv5X(ow@@(U=Mjk=?&X^ zLJfu9)C?(q6iyW*XYr0zCdX2|8M}q~O5$C;tJo)tsN(iOFUZaP9IGu76XrUhqx(Qb z(av=Cvm3IYen2h6Qhk}}3^^%3-l(@rv3W7~$NmXPFnLkJtPp6?&6=Q zdf1+J>QWwyBIM>k`X|;9j5C7}4l8qWTFEeIm&sdy4dUYKs+-JGgoHt!ZQflvtY&9g zS*%9wmLBhuE)<1^(fw^9s~%Q~Sr%dxyAk-c6J zEYz3t)d|G=UCo%xrRbKex1mGMjrsQctmY;+I;WGox)&0JYxFt*Za|U0J{Zw^=?L1* zc^pwNvsKkQe=~6N^PI5KxMiam7t#&yH|7Ch+;Fb1ju>mjJ8Eba8}&dS%o;w41~3|| ze0VZdR`=_A@N}}9cX=FkCobEXdrSaxI>UTfK?OLsN-$bq>0k zoUN>&e?8tF#9DT;M@4kdU)|H1IIKA7KF2)%(ui!6k>l~? zU)%t<PXRkiu#nP624u^Yp$J@UbvT!+D1xun!ZO zY&M|+WRIV4V-sE)pEbcIl_hF+fp^l)$DJ`@HDF7AmEOosC0nlephj1&aN-qq;9Fov zflFyWBQ%ZAfz`ha4Db}?kUzLO#1nqu7|}jj0q#u1XDke>6VN6p0rE4+%Yi2+f8mtj z_BN1#h-OUZEt%7Kms5L81{8wjSwnc111oyNODo{Caf(O1Fb_Xy zw_ixO-{_)0a1)qyfeQoP<_9J77ycs!%He~a``{_R$V%-f2Bt{W-V=73JLm=Q3U0U#f8el4-YZ}N zsFz%-1r|nktlB!=W$;EQLh8BTjNu|38!?rvF3SNfw z>`NVbwO;T@^D`D-d~B)zXs78p1{(8wf2Ql(Dnj_Z#$V`)FJfc5{$Kd*f5JHbg--v~ zzCUQ_7Y_`O9e6Ln4He0ce@cWf;ma+<`f4!2OhX$WBk`+!R2@7>C^3eR(|{wLXmxUF z0C6;cdH-pfaGOu}0tmAiDD_kO_-grWAD~c1VEG3b*anKxK5YZwPRSg_KG{e>Q3!>5 zQiMvaZ>_2c?F%R`SsVF-#_taeYPB_(TLYWw!L zxNCpW3$79IC3aMBu+S>w_PTrkA{D1gQO!}d`{R^8(g$u^9~g;_N)e)?DN8C$jrYD6(sdHMj4 z3|xmDKrJ2un@YW5@HwDfoqB-$TS|N^@zu%@X&6z#72F6Li_CL@fWxxky6wbU!gKza zbn)5A=YEOdbUvTwRB=D!+0HOGe1^Au$6r_(ERaRVKOCO{^jbPO2?g zx0Z<-O;Kzf@EMDqk3_e9Sa7)V18(+7ys-ZHf7;*{Fd?-dm(FeY^f8n5AN(cV ze{=o&b6cgv zo!V{cukx4xpqB!9mri)6pYc>KfiUOM2Y_at#$6rjf!~PHX)T}t)%zDhZ*)5t%yJ-x z-rys56M^UkA}&;FnY8CqkrHIcy@EL)f59HoyB#Fq<>%hrp^?wYsYV(R=f9kZcp7th zmDwuq#S3;>OWt^?BZGB?yod%Dp;vSv5=GA#JITv5SGwpajjQ{wyNNLzYe`UvPy!KC z(+*=Yx5bhQ!mHd_VV<1a?RXSCrtk|j{rG_F>_2o-cjluf7+VN zbrsH55)$Q$s|(vhSLzc2$*UsU5FspJGb8(34wS3u~ ztYHnTu~w$&%MU0arYzXPezygDfBPM&Zw%dU=}l!3h(*(|5uP3ok%zQk1-B6XYQf43 zmV6j#ziUw-2L))n!2=CUj8z2L@4_kZJ#le>nc2^5&RJ zKX~Qe$h9Bj*l+CdKVuse>yBSecvA^`8&?@D8yy$d&$yT;cfL@afB;n1Ba@oN#CXcl zFIOI1qQR%5OsN3dyqHC5!RlI$+A}!1!ebO|1b3YrBy^XlEsG#vm>X5o1_#)0M_k34 zK!sWkj2E5*pijZXn_Y^>e_MJ;KG3cd0jx5HaQ)j3OX}MWpeRm;blo5La4-02J7COU&MTw=HfI+46i{c4tcLkc%XLTaSA-@uf8bn#2-&+15VkE? zXV|+jVEHx22-tOBzZzh{3PGhyl)5eu4)ah9A8 zpbTZlcQ9QP__6Qs{2f7mn#iX(E}Ry*$6W$>_oAnukFcF0Jiu{vve%`_mVr_M=r7dt z6}>b)@BiOD52E}3fBM(|X#eT~qW^v-(1dG%%Yffy63jymj@t{uMOQrr2*j{+ivIWK z0;3H#dl&oS2G`)4S`_ZJDnCh3h5z)M39XJeN^eMI(vQmp460)UYG7Fm6eFYhE4qzi=2JfhmJg9c!AdCy?@&m0x{3G?vNtbatj1^n<+`C)735la624)LicOlBvs>tIABu#&{_Sg`4RcYbuc>u~^aeq*w=< zul;=fXaDyz-r|;^pZvnB<`!Z>pFZ(9Ck^ovz(l4#HwQ-2Q(b(%iTG6!ZbYD(B_q6q z?+VD}f7DZYM%YA#i@wS2#~k_|-N8COk#pB%Um1aYfG1-AuE9TfpTSo6*E!y`8~c#8 zM=rmcZ39Z)gdzB&y(Y_LwAb%(w~*cM*bE$fk^YK@#fZ;-uVZvY`?Ej)J#K3}(;~K% z>YCf)`Fzt^9~!U>eXe5zQn3d8smeeAK=);se`tG@ob$y-cu$ynmBJ5wDBZ zeB+Y85we1g=k62RgQxuO_}%x}|6K15i!_Y4*hff=aFT_Hj)ENMQr%YH*kWSb1QF(J zf2!{Fu*0)E5aMQuQ zV)AkZw*IfUazDi%r_|7|xN`cSNAf48Ok-{9^6_+>^w)7y-1iI%9NOfqyXgSqKCScq z_Wk5hNt@>Te&q7xAZMxpf(Sj{BC&CWzU5P@AIsiQ{a<_OJMj zipnb_$)^3)H-7WkSB1bR!>#ALp8y72MEgT_>VEc85Owkypmd1eiusj8xCS|I#Cb^M zmKbN5lP_75-B#m$pmPz+0rveo_17RnTzw1FY5vKhP5YN=-1X%G7=xd_YXgJJfA@xs zSQ|i+rkK$LUJEx6lk+UB?>Xf#@JTmQ*-P6>s zKGyS}Ou#3PF!iYkzx|Ev{3AYjdM3VeL=KA;JmrMf`LH!j*I z+z#+B_AnSG^C|&=?bL4j9Py|IaY;Pi$rW)y?74Bv|FNAOKpVj7dZHg7fBrQ+Z{KHb zy2d|a=-=4w?>QH-Ch@aQ`^=}Pghjg{hBJ*Z`4k`dwA&P$gG6SE+x@5QKiF@S^ zdXy|2RSJuTP&mRrzjrd?3|6Ec_-@$4oFH3R!Y(lWqjo|@l6P;y;Q%e_7JX&<+vL0< zZ3^!lu?P>iVy?Ud{GRu;e<=s_&+P;Y_I9$IhZX7nQab^y_0vw@_#B=sd2VA~`TXwN z#{`Ve-!(q6nV+@BgwOW>%G>;);Q!OUCU|c3P|D&G^b4d`d`Y@I2(9rYEP)Mg%A2_} zg}>zn=I%0QHZszB69#%icE0S$C62$13P`eWhy$hjKWJaf0fLR9Uj_4|Nm5^L$}Fcsw~JohshWPvIFNt1mFoHj@)07-@a zNAm=*<==590$%;sy*lfNqfGZ~Lhyga0MvB;j0LnchE#&R0MzN2+Xx;y=IA(sY6#<^ z1mx`$M-nf)b@DDMe`5}RNz6eQkCswV?ZiA{K|hQ^VxZBN6YM9pSS!;mXv8XlEdNJy zP`C7z+zP}q!(wrEQ2AEf1P$V~ZXo5>V&f)jRREAzs)&DD-%RT(1%0fT`X(|J7|(v% z%m=gOyWX1G$iwk#1-4JH9L4>xVyR%Jbk`jaxuXOJPySgQf3PM*ZgvBpHRWmffq)iK zpLiuNJ~<&jq>|;7-u`5>dmdu#Ps-)Dxx&P@`uy|vXmD8a$GL!quHx^mxZwS2sw{~4 z12e_;YQ9dz6Lup&VBG8oHf5bJhdkg6CREEfy{{9U0wKPf3 z@iT?~^8Fe72noKZm@INF;o+W-cpsmB@(WF_gF2X-ADsjiHMqrpweQa#PSZa2 zn^L>{f8%{ab%$*zZlj2r-rUN`5&X(Y2M{qh0TE-^2Sxq|>HKrwzm*|V%y&A8DenH6 z1n}ZC8%*tqBc^iI>0cmBI$i6VH+$HtM9P`FuWZ9?%#7K(~B`e_06qTBLbPL={C*`D&mabe-d`iwSFyyLX`jB%N?b8a_?rVgxUp*pTy)^GNDVx^zZx3xRH=lMB~) zI?s+IN7Hn6IIfPfoc2bTOBced>|*noU?24F2@?NH|Do5eCqy6QXqN)Fg)$5inZ%B{@{M(ZJ8l5q+8v-59yxlZ-8jE&qwD=7GGMaE`!G zia_uJy9qohZ?!;xk5KVPfMQC(EK4{B*mdB6Wnc&W5ZHA{MP?oY44?&6AS4~YdjcI8 zYVh#D_&%YFTT!`aK#xxdcNEV=e5j{5e;!dYyhmXPGBusUFYI{z##-g`cQ}^`zbFBx z7`Zdx1c8okg`MP1nCs86z(1?lykn(1p%vqY4rbSc6}OSu17>{s%y+a4FHh{VtwLpP z5L-+N1fbL92|FtS&;G$}GeD^4zme>4q(M7Qxbz=Pxjo^vf#aT#>)*fYr@x!pfBB0_ zG!fXyr{DkJyUh=>UD|z+>o$<>KrHTn1pA;nC1e(WUi5*{=W*}ww-Zi&kW}ggo$crE zPySqGVBdn{CZv0=0P4AmV-M&=45+g|aNsy^w0AtU<%B5~fd8G4@<2Zn6HZ*9Y#<&1 zf&o%=!moeO$as$l`7Qk5_;H^ff4I+sFgL^rzdju+zyt_-$S=13^Do+574SW;0G$hT zWFhG~5ZORJny`2NB1m^0)=sWESA0}@A#ws-D)5p>Q%~qen4AHP{~IqqA?1OK<1e_s zQy2Y(NJkWnuO{T<7e`*>9-<2BgL%dXioXG>kw*;!Fq*i|e~0b;g*F$#f0jechR3}X z%@VuWiMaOdUqC?q>$8A~@CO0^!N&6FnjKg4^&m+I4hK$ZUszx@^` ztTGHtC+s;i!J=qRh)@}5Vjajnxg3D-E|Hz}*uNePWSYSeg9&!r{AOh16hBXu3wuF?*H0%rqB9! zSm!CTI-&jX0`8^rK$seM%}@Uo!j_+2uOguSpcbO^+{1mFo+)X%uc{ydol9dFMbiQ| zj;97Gz)!m>9~`t&fBZ9gT$vD!U-bHRLMVRG>Jv^<|DxF^B;yyozMs(J-`6LEWcWq1 zqrDM(onAjq=<)CC6EZUYz7AZZ_(i`z{t3UX{)u1T{>HEO|A=1~{sF%}{KBuVC#3Hm z$amvUUI(UBmcFk8;pw_xH2Z|M|DxA{D3#Uk>r?xtUo<<~SNx*ifhLuW z@9PZE;lRYkJicFHo)mjkMFe=7n_>ZddYXIDhHL)yMU7+UCjx~juBP#8_>5m4l=Q#p zXQB=EjrEz!fB!?IJ5ZSuwi)Z(-+fTtU@S*F%5f{nKwE}_5<=dtVD7w3$67d+cQA(k zhsV3PZeTd4>y|!!{pWaaG5?F}n8v(+cb(tY`FOIGoBs2p4+5D2`uwh=AK7J}I}@h< z-(EkSVc>};_u7_XfH}#IL6N3XJPt2ya3B6U&W6q>MQjnCD&6ye5peqc_v%E&ugz!oM)v zfCu9De<9v+El?Ou>lhwrQRs32`x9RHH&N8fMf}d2w`p#k{{Dj?_v(L#NB^HdK2NCT zY0Z(~f3svp#WNsQ_6L#u!L@(RLz()oI;|J~9zFg!5A=oIG`3IJeq|c3r**~Oo%gif z`{1v|=!2EVIxnBF^19bf=<*xG0#Q!w!HhAVh*?CKQQ1{+e*!ZME}QQh3B-k{HJ4q ze<_yQM+yAC^L<{Xw)t7Gj@xaf^~fLZ@qhLVW8C?#pOxwPcw;GBPwQ|GwOTHohe%4|&l(cXu#B>B{H#pt z$G?Ama4l%l-DiG5f5kH^V||L}1#2i1f9JdrmEB+0_rW>?ng16C+W+9R+Xwa!ztQ;m zSpE(E9%yf#k0$K=XWcrjv%L_ul?#mP4PFNZ`rp1jtz&Q=!Up5|^fM6PQZ?b)QLu_} zvkLL7W1McVjt+i4f5NhtZ>;yln|~4OztH9%#5&ezAGG=RxF59n z_qbnZ^UrZur+v`oUu63a+WdPQ(B_D;c+(mV0n4eqzTcyvFui`i$Jz(+{_ppAj&QvH z!J^|i!SUbkvHrpF-|x|Xas2mtf86USZu3R4LxnJ*<9|@>Uv&HrvK{wrDsA{4_xN7_ z4~qSJ{XZynSk6z^|3$XHzSsYA+y_(M|A|cRe`DD{Sn~;0{z0;TFz6}nDW0z%jQa(?5NCLal#r=%-J=o>1!(e-2%k)&gII z`-DT+KFIRFj{P9we;>R0jd1@QJ6Y`m$^OSPlYJ2J6Kef~lqY{<**`dR+{-V*{ewf# zr#1Zt%l<*9i@&k#pJQ=PzXI3fB*Yf?3bq*Y1(&UZBV@40uK%X_WGE23*gK}Jp1|%2sQ&^ zl|R;wPkxG3Sp{MqQ|StewZ!i^ugAG*h$$gz_=9D4dGiO?j-NkxcD(*E*~U1%v;TLn?Ef8VJ^5GE`d_aj1(oo_U#|nTo=jrSe?YCng!m8C`air5)H-s5 zfBpPHtl3#{jbP$ zod06_8K~pu4`Tbue`~n@3Du43`x}Hd(9F&B{y?DrKg9a~L#+S1{xLl-|AHj{tG|BR zze7Z3>JLDVfBMhoebHXf(<2!9=@-*|nLhvXFB%sk>gWD^KKK3k`Gau(Ak2g5d4+fj z=lyB_5BmI{KNnNwKfUkwzDLvN<9>a|h3WX|Ir^a0f39bGe|~V^|I72k@l(H^`o#x* zKK+Z-=X4&^dH&hnIPP;^C>cau>G$s@T?+2Y{|FlW)8D4|`8@yK^gcNL9}xIJ-ha;f zgM!EV{Te^;dCqj-(T=|z(MU)&KW6d&)qme)E!KFLA8<<3{~=#XU|yaI#7`L6kt?4# z>d?L+QVC6pfAFbybfIr_#XWR}aOV=CS#0s}fXfaY0#b(#B4el#LhYkxq3%|WP`@HT zHF86*Q{T!CGgjre_sD0-}N_CN#D=IuG8v&-uJA} ze!pJ97U1V~Bv*f4$139Ib$9&r_l9J-Kd-|n{;$`6{XIM}e*OK(vp=tomMs5%9cI_x z*D1IEd7bux{O9$fviW&`VCsKfHw3i7_w`CfX7cyz)L*ZE%GGf{HIfAF{(lc?KmVUb z+M|l;e-|pkn}op7js7P{`|wYsebYDSL_bLTlQMnw&54mleqoQHcIHaGQDdT)wKZs{J=&w*+4xSysoo zc2ZW$=KU@inBqS9K-wH?;pW~!+1#oy#A4$nf5n%>XsVua-E8P6>0w2Nx_?tuc|6d! zP5~)v1@;LAm#-@H@0&*fCLjf)an!`ZigvDE?+ESjiFuCT0t|(^7_R_%#m4wPe`7F! zSkzcR^%`DC?xHOBCh;ilM?I{>pl;iUOaX0nm`xzXjLa+xN@=C zzqeq0&ukbhZ+7I_ER0Hr1P#J8-XHdi?F?D~R~)!sFPda{5w<1M91dRgv%?|KKqt76 zW4D4In$wXPeVP$4STc`{McOkOw(YYq=3Qr$P)&tOHt|a7ZMgudZK*OxwsmONe`($` zrCn6-LoXHw7LY#mCOq!tL5@%SrlGxESg(7D4|Fn}3+|oU3og;Dv&|tcfrM)oK_!ld zVCBcNdd{ylLdHn#Zu2OW*EoB%ccwEMqFS{oJw)exdFrq~g^iub0q=gIZmICxB#SP5 zTo*>Ukqsf-_|+nyIAd|Iq8qakf0B~mSbx(#PLLdoCR|-httIm^de!y*%suu+r?n@T znlz{6={H94xNpkaO@6p_dkEl8!dCO0)O*6#i0VMJ=E&T3shd1Ja7S90%<43s<;lHn z7e?EJ^(|O;KmfYKz>%t>$FX%x7I0&-Qbk#r7sbsqchG}3U(R}Sz3J8Xf5e?%XCRZ) z7n>Uws`hixLADQ%3FYhPOU%xds{Np-bB0+R!<$4_C~7@4G7*IfA)mAR~~gAoaW``1Qbq&lO8E4YQ0%V5 zL4d1ET_vR$mjpiY$h-#DfAJ5C2l(Qszc%FRvN)RyZ*%e%@oK5NDa^*^`MRgX7}P$> z&39o6D|mj{vY0tG=9VwioAYdLYi728*^k8_7Kt*`II~_$XDtR3N#3rVpn&qH1?HWW z?G!o?o<0WQWlNyybpCQJy$TMgw+VOqhOvkcjH1HuaK_MCg&=kB;=SxGYh$eki&3?S^F23(Pf41u-{82%4ebvjY!0$vJ z)|%k1zq+Lc=bgTnS$XmFv{H+*qB# z9d!K0XP&M%Uf$w7T$XwGj*jpTCEVxR-FO!iV~VZz7Bu1BEYqHKinpn|vyBu8p2fmj z9%Tjmnyrahe|(V21LthQmH}u~q}M6T-Rr|+nCnL6Pq(cl}BL| zQ<@#Zt${VGSxxd>O>7uA9s|cY-_VitfRF70HdMfOlG(dD%tA@M?{s2wmmMo3|?dsr6UZMRPCWw>D3Vt#SU(1KEF(-PS;dB; z8JLWSXLG;a$1xS2-TnRYa{IQLH?b+*)iPY1U#q4ssf0RHiG{EBtN|<>VTe-Xk$c>3 ze|5Y%r>?tDX1TOhgoTY|{kjgWsKb!g)KO@Vqn}fj4r0Ub29rdM4s~-`Y8qzb^L?9M zS56x&eNQ|$#b(n2u3HH9pRr88 zUM*4N?FnwBXZuWtqLnS}(F(Ub3`?$_A&IZ<>#&j$hr4(twXFF#nS_skS*vY~O7rfe z08(nhXLHgm26dB54GLvSQ7UC7YURUg?pFKdzPKY?*E{$52be1e+u!k zYCGfD-lH6A!s0c;H&1(<)~WKO+(N3h8$UmbBwU*Wfi4RA*-{cv@JQ{v*E;lT$cpe6 zJ=}HR`vSAxoe$b!+nDc5oG}m|?Mr(RCpv#u4a8+7?ayQ+#URryR|5i$_cyCG;D^akQgbVI6f6>z0) zvzH`F+so0&cC*v{eca2(2(I|~lWD?MqTJU!TN|eErlZxhy2C-8wAUSfC6A-SlIuEo zo)$AO6t3%bbI4Q&OtAC4eE1Tjgfw>s%Ibav^8Do^0Uj*UZPn8o9g`jVf4p1;po@u+ z1msw;(Al2b0YfJ$H!DoHLZIGTeE2w^|w-HrpRyi}@m#`%(v7=sXvw zbo+Wyr-vzU%l#76zv^PNclOxN9)lroEvOh57zGXbzJV!E24Y~J5#)JRM!7bTGXYum z*W|b#=u?8>Z9mR^pE>ySf8MwQ7;A8}bGY9%Wj@;HPMI$lMPZw6N)ky96~!Y%E>-72 zjD0}Phbyt6RB=4P-Fjv!>3cti2}fp#m6@f(p6+{k?b#Fp#+mWFu37j|XG>KFcI1e{ zyW&nfX3uw@Jv#9%!;w!W@*~lW!Wc&eajWX}usglUo{1ZpyQ=F~e@`yf2^ z#(Xr}B#=UG*`MYhXKk6<;KJ135RSHfUpMAUk67uRyt!;w&8zvP<6AB6n&6%h;|TvaD4fj*UF4&a-s)qRq4`E+zdYKjv}kP!a*K*#lqG+jC5c z?IXJj^-F!x(=v9pqvO4=hG{P4oQ?%~DUtGs61s&$%NpyDf6IOgGyEGk(aD=4mMV=0 zI73GtY&i&NVYIgTYup z5JE1_Ral=+6ypPe`XBpuP9$SO~4p;m)$5G~ZGf$hP+#o~fm)Z!sC%Et(h` zlN?w3(cz_Af48x>xmVUzHkmev_oF9;dKeHA(H`5|2`*f-t154HM`Ts@+=(L-=aq9y z-dey$xnMgnk?xj4@5xBdOoxJ7Hr>&0ohhQ*-GzhfZq*wcpE!mSY8=Vh%kAMvo>0_y zZ4XzG%%ES&2h9Q$Tiu`9Jh3rZl5(yDFd z|d@W=|0E-KzV5mqf>&5H!xNXVluo2-_ecW`_*3yN__P2Uo= z9-VJA-$o}>-jed72djlz)kO;WFC^}bws*Hg>uYZ^s&x4&AlAy^}7Jn zf8S-{iCi|j4Y8DW3CS^PJ?ChG4M_rV&t+ZgW7^(2n-!?1XD5i{Sr9M1{C;9$3u_7@ zyDpAUt>sDwYtXh~cgC~EsJFU}3rGhkuGZ>Oon73c$ZjqeFHbDc!E-@++cCGK6-QAa zhSEXtsMC78&(Giup2GGWtxln~aDckhe-IPczPp`c+;`dWcBMh=SF|}iMh9}^Y_NrS z+-D*Pxh69w_oU436EZISM+BhnZa14<4$jp~?6_C7a+!whRn$($z(-#Il232 zW@-zTd(N<{&sI&hBeJI2XKNDJfBG~sK|i^^Z;MwxD%fXjogGzXv%{vp*Hs)ocGuIr zvKJmUdhP|dU)g+CQIXwmk{v%*WLFyWsd%t@beV^3AXQ-%EWC^Jj%Ln|Yeu1T1@>h= zH|II!p3m|2wQM&ce2tqY=^l+acMCX&>0GYg*e!9&ARLUeRi$du@tEi=e*@#n>`rmT z-f()wNjKJsKHs?`GWVCLi*~r&dbI9aqRUMQH12iv+7r&;(EoZD=o_Z*rNw2=VBi9tr^os|dD3RgZSQ`ufHWyFxG z>aETX_Wm9hbGH!YFu+}|%=!DePp=`By_ejbWo*$j2)r%1-7>epiHPykJ?o`X3_ArF zd-j1Kvk^arVW+kgQ$Oi2dmP!Y?ZmB2rE&#=#W0B1IcIbmdfcm0f8qpe&lwS$TG1Ss zoosbM;Xos?*XQXlBF~R1{5BJE8jiR7gz-4Q#b!|^28Y|ta-Z&gC#kAlWY%`QeUDFi z*xlyAmTY#14+0Q6-aqI!RfE#j zk%!mzxd_Ac?6pZye;418W!EGe@UQnFWrw6|$6>WG?YmjsoHcWpZrwo@VKRIHfV=Ie zzY;iC%>bR)dROwHKwA%QFx213C=<5*^0^vm7 zqOda#x5JPdW}n9xIM<8nQtddycAj()cm2Y@aC`l4YY*?Z(Gu68r+wbC6QCRPu?5aq>;Ro5=W zSivT0Uc=0dyRaEBgT9S;IcqG{>FuB>jr;42IO-n0Tc`Qam&X%tbbXV`sNon^qGSo- zE?$Y;HLk7|e|d|Q*ngB6iISK_nl}S%*`p@c+3}1p7x$=jvaW?1)#TZ(^?X$~JO@tx zgsZOr-;a!7zD|o(-`Pd8g70?4bLD*KlcKlEjlphC`kfUGZ!=mEFKXFl*lP(xTcGcE zJqt{VMoX`Xs}>O>+1_o_-2};Z=bV&#>KS{l?XgLPe~KzYS={(CGe<#$W~q4d9>ST^?eq? zI3tB6e@Io-7EN*GF({po52;6$mfUh7l6*o}%RLWjeXMjjwL`d82 zY58E6%A=cW%H_?8J8Lx7T%BLSnG|%(Y7?c#{jxtVWG<4;*ArS2du?Lnei>&WKg26I zcpFTa#&;LA_+>3)jav1Y7&^Yyp_=DxMsPn?fA1F7xtNWqoSI6fVKu#5yas@!g5-+@-x))pSah$pLf`ME#ppJR7_%aLV-n_A zH+g9|n^VK;LSZ98?yJ2wtnTRo#FK%LnQP`PQ_4YzW7uKcFYV>$T5+fri!D!5&IEG5 z#;k2Pc1FQ3?`cjh#pZg%Mk<<(k=DKHf8+wr>YWXl`KZ+0OkP;(ZtuOgD}OjsLFK!;?>Ezp(2-D@_;h!LIvp7=`lu z7j}L42kiQ*U#@>+*QYm~GVovQdXPPgU)c5S=Ss}!eB>%M_vr!Fn_ujD^f~E&!meMU z9@zC&iPTPS9_-p*M_oSX%y98$e=lH0Un>?k0gni=vc8kI?%c=Zk>O71hiiu z_yBW9inraR>u%RwTa`EsNmA;jzgsE@Z_HC}(_GMNcCy<7ydc81Z?6l?Qr zo_N0q4zQ?!Y=PJ*6)lq;bC-|vbVa7ih~vh?o_`w^g?UX^=D|?~7=q17eXT3{?Umoc zn}V2<((di(DUoVbKc3}Tf890YBk@MniJaw#8Pj;ZQvG)6j7_s>tG>Uu?DD#O&P;W~ zGYo05!tLT646EHh6hK|-9 z;AbFInAx+cXz{33;$aD%l<(VvT zhqq$D)~{FwZpa+9btU@BK_BRf2`<)bzJ-GLFd8?%Y5M*ycJB!JRiS)2Zh5^QSTC+O zDCmYmne#YZZ!5iNxl?WEx#ks=ktFNQiZm*g;8FHxXs+|T?1!UG9&h#y5pQTPZk9F5 z;cAYR_}vk?Ek|>zf1pYC4HKEQq2#G7tOG*MkPBO)^r^R96qA%=mL$O{Op;@dYmjH}t=gsb0B|C2m2UcKu<~_8+ zyXN2s1?5tv3@PAnS7u?jeVca_cPbf=JIP3K2TcLqne_Auw z6<5)1-+qB5qkrE{jEC}@n`@^l4yX8t$= zW4hB$ugm%oZJrc2X2L;b00eD=ySvz1Z-2OEO|Pu)^>{}id-!lN9z1QQPm?sJj<>1O zjxevYf1R+TF0URmkj?oST(lx7?-y)Q;?%u%)l+YRQ^eaheM1xOTXddQ3tE)s9=*A1 z3Osia*UY;{{+ufyFk_eHe(!hP{&DD@Ecx!&l-_HP&JA)5al1$UNyYJa&EkBB=zpKN?g&B_TnSY4ZS-2sK$5K}zZ8-XC=zPv9uoxul zCt;1J!DU*x?_`MLjX4w^6_&*OsgpJ}e@$P3P~Qia#|+!N26&X??7T;LwzhUH?i5jb za#3XE&ROjk#TmI%(yg}Z#n2;v&ZRORu@yg4*1e6^ zqBtb1l{Urhjilw~P)a3zK(q!KhaxB$IV^itrs^!5dAE~qj{aMFtToW5mcFsne;1ow z$y|~anEvHa$b11YomH6;3JZA4e`dJ=rd?hhQ-Tj-jHWJRPwrr zFX8EtYj6oM;rZ?K<+0s0**rjjsRAib(U$j-)+8i)Hd39{J@;_tT@ORV&Jk?udg3fj zKM#n?0)l_A)*`ef&rYKe|4i(3f7_{Sc?d^U36C|GE%uRRxk$gxYU<=SrV&Bqmxpw}2k)_noP|sw3F#YN*_;6*gxcd_Ppjcfo z1{I$>XB4#6eLAwds1n48sjpRH+&z;kiG-;U#p) zp5ig`2gUN{?L$-}*ofZ2+}^3TN`kZfjEFhCJe-asvwz=>l&fs=8TBqX!DHeR zamGX%8a}9t*JV5J&RHeF&0>T}pzLhBEmKODjLb9hc}`>hUal=PjLaMsQgfkqci&i4 zFZ*^nkfKd2$ICGc{8uD4uBILP%{;oC6A0bJZB|LRG4{ zb9z^|?tqmwqx-S~HS2zraTL^ANDaB+Yo3~tS`Br9+*7Qm#&~b)%-&Y#&hyf>aKPL+ z%(fdMVSnpkzfIIJShoqcp%V!yhn58Fu^A~nL}ZZl)sdSGv(;uVMFhp3n7lQ{v*;oF zW5Ao}I8yU)`wKe`JIRUX_pjLT6RNBxAlN@+$B9V{U-Iy!hbHLF%LKn)A+(@*d_jp0 zw1a%>LZ)tKJZnj-+`kKlRz2nWPI&3sx<3*E#(&RietKdJsV(|Ef#-su@O3AiSMc%u z2NST+2U+XCH31WxRg<4q!uM?dFaet|-3~Lvs4jr(U6+FZA;AdqeU_9`h*WqIzRY5IhN#{giKvHBGM(qI-#Fn*|0Eb{1f|b6KlZVl84PSWZw;s zIcVx$WTGwyC?JUUf3V*AYj+#rZ-vTZJ5ZiFm`!_Ga{;x4!*vBAJ;FDXC2dxUYA;qr zCQH<12qq^!WgR57uqEPAkXtu_@&}@QCx6fY37$cIgmNKiH{=Wy++VCY&Ixp`B!`5= zKda)th-LU7?D7z$a~oZ(j0RwV_aND!_3V2SK*E8B*G?oDadmOv?o>X<`;rWkr%_Vz zdP%YbAYp~iW2d54h`SFu38BrL%9V@Mh1)pq-Anf2go7(Sp~3MTIdQ_T5AvBI$$z%u zZV#wJ9*c!agXyLpF7_#U_|G)VW2TrqM#-vrNj=X0j>(UH90Hy>-XE9k#q(>B@b$$Q zYvt290$ZOxu__&*HL$i<0fD1#S#^! z8)@T*&wWrQ5hoE>lM9ReYj}vVMf={Cidlc0ch=%Cb8>6rE9cuinPdEZpSEvK9`*u# zYNPW(-BbHzG0$&Xce&f!mfc)X>btTw=U9TywKc8r5ZfWaVgQ(3C{5<>Jb%e`VvjiQ zhI7=?+3mGk0tT$Ha*k)VrpV3i1#x&i_x8x4DEIjl z8Uw^?{gIkF8rZH{n?hpqrgH~<#fdFeG$OOun^?c|^0Hxqx zI%artXL5uHL!)rorHVH$_pU)EXG_3k*ueLf*Cgonu)lQ=R^DaYL@o10p zW_~4iH}@&u4E|*1-hb{gxJ~L73?F#L3-*%_8=7PhnM`*mxc( zO1X!t!Mih&Z)#sl^L06&LWl+YvAfUV?z@M&h+cs->r(}v>`6j8Fn4~D?E9md5daKR z!Emq6483~m?|)KA$UPihui*@VJ!X6x_#fEVA*XA7Yn+g9fb1R^|G>FH@S>BKYoBND z`BfaqwKM%JZItso68j63<~kMK(2BVcQ?n=QR`#{1cSA_Do$S#m2<>;?xS)*4yKa1R z%I)b^T4YssLwaf6r;X9Ckj6Z!%?X7em1(T%Cuq*;YJU_`GI-xs4=URZ);vvSZZmYIh{FCsI5gp+i#GI5DVdEKJo{x1RkdT`n;%}v77HA9(FfPG zP-g2|ceELOT?E<-rsEgngLxJ5>KXN7g|+vz`+reB9m1}x4_CuqzHD>6#r*qr(OSZt zCDmc%Q;QXskBC8KKx65z z!%23QF|otgWBZg`)aT^!WSr)58#ecZQ2v4kCC{8EMNvK1s|uOGuXwMzQn9$t=j zD1Ym&W@?<@n+rs&&^eBIOG7d*ElL=`oxyuwvh};!t!?ipP7ybbUNwt-Vui7MsIGoP zJY>)xgy(8eIuKPodSe`rXSr}CE_Ik`f&!FEkHiQ097`%II zt>;%RddaJDn^{eDr22W%HUoQD4xwW^3xASb-t=;yOuj5ui@sh6<^lB3j;AB)@LcVV zDt*iXyH)Rz6k6Q&P7z!o%K{OR7H9-t@qKt_0AaO5c09=K+T?|5^m4Pjt1^`4 z6rT4Ro^E38?%;7;FTjMLcGr3l_Uog$Q)bW@!AwSv)%`J%!y+-_I$na|!s%3EP=D%S zEn@sy?j!wjD@iQ*E)mo=a`2R&*ZJKVm&eUc>RfQ0p?$Um^%Wm3)xlOy>7?&dALnXO zc2;1USx}F{-Kgo!@dX32MIN57C~?dZcD-~>4z;6Xp>hFOuX_oJf(E^|X zbI8rN$G9VMC`)n>5MrlPgMTwdDRq~O+$}mwF^PcLKp|bfGdJ#X9O4}WMlf7O|zv#POl zUB(YGF<5%79~{m}wuDm$IP<8Sx$yI{M(FX^kCeJL-C|#DBWdl=O$?5U1H0HOkz;Sz zkxTp?Jr=~`ei-wcI*X~Mb7$lt+Rx61$EDp~wV9@H*W28&qd1<=h@tN)CB1m@@&d^& zSB_3|+OLk!VO?Gq9)ESh+3$&KMh)QJuV2gH%x-5c+!x>6ZNuLeFeJZRI_Krm@+ouZ zrJN{mxQ(>gjii=`{W5LHW0^NZM#HpyzMLNxgjUq5T5OlsrMZ6|(r3l=&>+NJy5p|Y z6T;l}R(W#Kg58YAd53+?=vU9(^?lebAEZtq<5X!~xENacynjEW_gUi_6Vkw~Pvncr z*M)du_rh!WQc!q#dFp|!LkeQmgdG8#vK9{@-TSEvT<)pekMtRwUsH$4nUy|9Q7xXQ zRlggd>QR;5e#MLwXdwC3={5>b66eiGGH;(FR?LZnz>p*lMG(F3b#TEfQDw8}en}BP zTn`Mkm^W%B27mJH)-v_QEEI&}gXnruu0tL$@02t59D|apP<}*>XJU-aH)<|jKa}Fn zem5gYo?rbRqrIczl1*remSkH2CSL?73~d$&cC9)Te#Z& zMtdW+wi$k-y~E8H?Y(}x&k3D|BKg12-X9gf^!bP?{C`G!A77L(PFei>^!eiHgZ7Rt zN`%64*qwY-1mO?bd&>GxDg-pqA82ozm@JA6aBqL3y*07EB09l~-A77be6s$((cbk( zW$?d;_NK^fDgq!$Kru1hu~0xa`-K?z1M{u%u)9b3JdRD$-7UY_k{41yC`%rb&vH4G zGMq}k+kX=5e#zJb2W8j{6>u`@QyTImL9b5pM{JYrqZRgYJ-k!T911 zCPa>ibNFDnVfhVj8JrK$G9pjezdyI9Hn7A8P9cbOAd6szs0*kiyF9vren5$SfJh78 zslXh-4qfA5JbKe7wTjC#)Ag>9Sm2SXg>C6{oqr$v<8+-90w^dD-2ycWI2A6%Ofnr0 zqg~YWzz)4gOv2A6OLPIfh&%S0CTPqT!9Af>fN!;iZO$(Jsa-Vz^(NbKvW(*Ae|_!; zK^}flC_tP8g9bzjPj;`;4<1uYASpfk67vVfy1JfiSj6A)ci}&NX zet&wu4}$!IfC28*lQ-G*$$M*yJhN86hYAJO5L0`X7TO!u43ovr)V?3IXVURMxGUrQ z!H)oYuo7(vZ9jf7>>o7d2mAd+Q%OC7aTBHrIF~PaV#0BJP!m8#{zicV0j{56;crW8 zEBm6XVFvv+ZnzeB%vZoxgg}SD%E60LbAN1sg$MSh&ufiJoew&=m$!T1MaBGm0P^L7 zjFhM}Gx8kw1gLb0qh$Z#ta5nu&q9QdkX7QXF>+Ts6fsqsT?>4tVsE?(#=;ZoI+Ua# zeS7_5FwevW_7FlW>pRT&s&hpZxx3gn?*}X?A?55R#1{I8E6~$@)2DWx(CLCT;eY6V zBamQo`PcOV+42i3_ZJ#{LUv5p2S)rsDEUAC(l(4De(u4y{UcNu4v26VhW4RuD1Rp{@K=K-MJFKl z|D_E>WD)rnwU+qR-?T;h1RgBVfqc?DU@krw+uT4;Xn@)Oj?-sGMlfssLF2-H7lO1% z55f=j{eu;muIY<-#{Eyd?{kFyGGUXadoJoI+j$T&f8*5LAz;w_$Ilk?|8#yd&QF2EHYgi-CV>=2Qhx?mIej}}`oH%Y=LW-R zNUdOEZsmWWGl6pM|3EoUeWmBG(1`+1E?;d4t`e6e0j!xLGCjSRnaQupt7jY2XaNMf-AvcTz~SWrlHDIa9Q65 zQ3riwO(vjS=6|@p34Jpl(_XYsyeSOaVML1Y#(0-I-<3N)S9Sx%2nTnC%ZIfTIh*q& z*j>m={6;BZG4~mVzqq~W8tZTWPJQ70MVV_ksmE z&S$KOf7%;bc*x8@*lGO!J1$Ompr85RH~tFei|6UDxdiuN`kVa~C`4Hv8e2MBGfjN% z3%t+zY-PH)8WCncsY9CESjULD<;Zymn_syw1!ji|U7$q5b{+Chr`JEnkzV@%;zxo- zl5LT95r1k!pD!+9*hHRE6r$zzC3rVD28d%`2PWonh})N6lk@?VVxWNV3kUW=nmCPU z6LJ{gMikcL$t8I0v(EY-zWzkPI`K2EDL>d(xv&TnH2%2Ee9(*WZ45=2KkX4Ez^v9k;s*1}n;EsYoPSw3R}B>ax&%5a)?m`0D@YZBewClp z7%c*O>@{Yxb+8gY$AL5Yf9w5G#c5q+g;+X_Fot1p70BUxei6yBD(izL>YrnPn#Gxb zavweSZ`M4J$BrYf9DqIz=}CNwGx(x~OAf^AozU@#4 zvu7k=a0DrU)k>G6SLwS*F_q9u5|p2%@PANBa17Qsxd?oj5u;2HA^~nr(nJQ;>gLSB z(x5NCefIy_e*GQ4VWoL+L8Segem$)@|G@D9?UqjK`%fSJ5BfT6s{TL)=P6u-zWw&k zz7N}5(Z+u52LTv<*1R7)T>@t6FT^mu#?Lw!=k?Ehu~ksQWE(#7($yLI-z7FnYJW6F z^hN2v?n}VViOcO3dD2h+;(s@-hUEc_-{Mfxx2|y9T8JIUiTeOoaB4d)aG6tn zX%(RmYX4K)GNJ7lLJ(Hc^W(Y+YveB?xW?KRp_cFc`P_4$&VKfT0DUB~HnfqJGvd2# z1+s2|&SOH@3!+z9q9m+RApVeh1$VSw1!zCetrOAD*Qhge55OvG0$(W#@PFVc?<`n1 zolFrYq%Gm(ZpJT@$&C>^pD!wSa(Mkg8UJ8Me{j6-gvS>Rja&xyns9bg;nK?v*4YyP zhuHMG4Ft2pcMONMmQ)Bv(2 z6s#JjA*FyIwL(u(3IS13K!1Myc_yy4_KwKNc;hX;bDc)+ydt-5QK|{dIp!GmEm_r{ zwgYHeGHyY?`!fz6{&NRCB6RCmw=JR%`I~C}qfY$(>~rn#JQvX5BDYQnWd_Eo7z0EK zL{u0@X;aNs?Q#Yy2&%NWI$N*E_&^LG?-HKKLh;@6d1sGZ3gHlxBY)ULEr{86FgF)k ze)105#%}p$%2604rDAj^ph9j8p&P*dJ_HQuV)F-8k7C`(X!$_ zxz;|PVaDxuZTv89$8`QaSik-`k(a6n~hSpY31mN86ZX`7wx$blzjy+chWm_1f3TX(!F%J03+1@hI#g z;!$J_VzUUd%}ro?+Nd8n1Iv6R;{D25kbW|;^tu=`*-A&^dzgV0>waYB@^~0e2npgJ z95uw5ep3jaSN-QV=~x7bOKEn;LC_q3ornAVnLYMn&S zLwsya<;x6ml?jzIBfw}kh;5*-f}C`qkz?$4jOrS4?O?z0{ALUr(_oO~;5>e5!Le>= zV!1X1VQjSlHjX=}H{<20TWNUTu8ugjupFT5OA~<|rSceuc|LNQ zDe$DEXiHyhO_t&gEZQ#aCj#sl!}art`~9YP{oMOWoBAGu;ke-b_UoBJl<}W9>2KqC z^ix*kI6)joRHWp$K7E~XbwldOrQV=KB08eFz*3#ln}4@kWp0jUUZ<%!?_XPK&-Z#G zJ!Cl+>az<7)3sAh7~~)xeK>2`yNIvxXxe)fV57bI78^W^@oe-s;Q|cT%_$I{o}F18 zs6j92rw8x02h!q-vRB}lBu0JQK%cy_V?dtZdQPALpN01$gW^3KNu<;S*;+b;zfpz% zP9zMC_J50U_Ck6j+cBZg0y#W|W`I16|M77{`$zndj3bA5T)M(>RM95U&l#gnNB$$n z3|irpkG6(;=qH|pNf>0Q5Uud%`X~3QE62XJ?Mpa3yQhr-an0j>o*r8M(#)Tj?~xDVaY@n^ zV8#7M$CW~80+1zDGv{p6Ax>n0Mv6H1gy@(NAB|7NW=}m)69=t*q9`kd37v^$uQHFL zPC3VCZF4Kp-|0v!+WTYsL>Jy9_H~q>ekrcMW1OFU%8vrqLhKrS?jfEAeaxqiLfR;a zFMlC^fX@@84LYBD=69@AhcXw>1~Lvu-)7M4PH@i1*c|s#WrgLw;(A}7C0ex3*g4M{ zcSc5zDsm#OQIbbI=CNLdKAevbNs?_1NK&iZSnuXfkQG)dx1^b;IPOa>OA^WHOg^mw zPEmyQD(80cgM^}m@6Y5N=Jk+36}!lX4}T+Xi=3}>x9Xujm2j_lhs*o-@fT!-@??Fq zD>R>uHn(}|k@)c##TIsOSZolwx1zl{nI?#Dn1a=#t)DM8DiT?dkF5^NXgt->xS<(M zG@TjX=ET?Sbp&}guaYSaq5{ro@Ci{HN}jvz=lPEr9jMQV>ilb*f;ky-Kltd1aep4Z zRQ+F{;U4xgmiiTEP5@L#tn0Ik=YQ$ThjUB%H8_u-SpS#8+;BhrwAdHT!;Kw*pt zx}%tiXkduvyqw(0!zavCI1biPNf9d|@#ptxlTV(^hFk8g?k$mkZAOepTtVfAnC)oqzIr-O@)gCvuc?0h~>;Pk2VUBg?wmV>nKpYg7dv z7vNW;50HXJ6|+<7l5BSQ+jLxA62J~QRME-9RLUxfM~IY@-ypxoVH<4x{(7~$?a&?$ zGMbcM0wmYcv1p@jwu6!APyH{%ZPEYwIld&e_v?7$ku9aT*i#ra?O|>tRev3f58akm zFD+OVkb%01AiouI%mmE$ig*p#5UE68NYrINGE!E-ar?Ee!DE@V>UebxNIw956ARV0 zQzi~&Ip2?&eU3R74jN|kE#&TBKl_=5C0y_g{ohn21L?g&qOY2-o-O$DE{kkvxmle= zer*B0S=D?;p!4H}kVCq}X@8aUYQq_KU&w3{H5|jPFUk4L&FFPw#SfnJ=-b@m(fG~w zKGZ;es>ZQ>lC5yH9SePrZpT6Z$NaAt;R}((mF6vpD}8q2XSNq#!6tqjz2uoY`~>e~ z5WFgYb|>k@0^!mlKjU4m45A+9Rz!8o5Y_Ri9hL{GhMIq8Vs3>6ZrdigM7cW7jFqmX>8ah5;d~ zNYrUi!f=8@JHP8$IVhfx#U4Nlt3Wvk%^Y-Rt9J|KEymKAH?g3#6KNgGK&@pd9#U>P z)q-Yt1-h)t&JL<9>%1ZIHx;T*tyOkj1l2B-#Xzd29%rRcpJG{}^g|y8 zcYe&Md~0@(h<{613(XLexiuOp)R<5u15XCE@z1hQ+7tCIe4a#!d#E$`Er>>! zh87H3U93}3{Q^8{_|)IxF+&Ioqwzr%Y!S`kX2pI~!)ds4aLf5p5CedS<4E+tUs_%o zI$5Hh#rFn1BW9k6-jy8Zj~eWTn)DUU5|Tp&C=6 zjplIf@Ll|_AnHdbiqFIMVM7Xz3HnLfL1FwtKMHA4Ayl(CcjDuB{VgiJa-wQh=t0XV zZFr6-lo1${h(331jiEGK4GKLJT76WYUy57#qn{?~W*kT8N-+q=z9eK>8LFYq+mGvQ3CVO2l9jV_M_h=`m}GY>1v1bOw?qn9FzLPe*TT75~Iz}erixD zsklWWyF$=0Rzs{=XZWx3uwQNk=k$t4;oOEO_BY?b<+zLZdkOB?GTU3TM7sD z=?cm+%wyR{RLOW2)U((@te+?v1$s~rV|k*0g@PCn8;iaV?U1NrzuFJ-gMaB}l?~B| zZht?TU*sh!==Ne+8|~S9BKM(xZ9D&!#+3Bch{_t; zIGi)&RAHNNd`SBRfegi5$k_7jpym`P4LLkbSaMAnI__&5sNXi^{w0Xy{)M37-?uYpMOmB=Jj;`RrZDD+ z{u}GUqyMK3TC8jnU8a3#b?4q8F6iUl-s0IgD6e&x`*7cQ{L|$PDBF!Mt$)2SwDqs$ zJ^$bA2aW|)egA$xKc7P%NIKV~b0?aE@)LBHk7}I+I~PRD{P(JG+&gf*uspNj*}aMF z+hSY)sm_>O%Rx`HvqZ~>SV0D@XvVDvHU6MQEw$`hGb*L%OAXIre5Sy6FZ!Sk;k$!k z^IKUf{y{^F`!)UzTGMBdWq&NLmDM7Sh{QXPO|NHi& zsnG3B1pSf6Z-2k>zq*FWedpg^E5r4n zD0gkpa-%OqwA`F$q(sX-T*Ef@4gHa!|M-{J57`$SQz*M}oEA!tYZl{48~2Rix5;w{ zTF#&S8#JcX@A_DxGF@Tz0^^5o6|_QB^8ccHb_O-9_pNs){cNIeck%f|Lx0H`uBX5G zoU9w$AP>ymzr_@(kCx>)H; zyOIU9yGuomG9^EN9Fr@{ZaXTUVp262c>ZErSko#hrd+5AEofO z%6C}q_@(mwsE5B)zMtj3RldV==P#A-XE_LR$h0we9e?oTwe)$71BG>dUT5U>*S~+3 zSIFz|v;61vXZg?T&+?zwA64>~dN}#0l0WKU^n-{h`Abp!Q6+yZhi4es|BrgO{_Ovk zdib;dUy5QV-O2ucmTNu=;@=g)gPNCUg$M0#MAW>4f_TvWzCYLKm$rCN{|(5&`>@q8NXp-Ps$sH3!Ww_oE*Up(@rss$xtG z@_)*1^R+DEH?ZSkNbMh%OQ-H~`U!uZWGNS_du4~SdB@Q8ui zzx%@z^(Xlq3;87^C^>|Dzu#jVm+)BW_j@EAB=B4Qyhqsy!uY@5Tc|L`bK~bd;&6%h z+wUVE;}4(He3&5U`SYHI!qr4Ij7}7qP=9L@W%5TYJg7W}cqdT^=cFX+P4X4+%_&jn zek)+{{(q{d)&5)S1^My!^`e+(Py>IfW8p$X8U|FzM72s($iSxmtvWd2oIw@aa$`bY z!Eykv{8GjuW&BG``ca@_y-@lN92BTc|5hOjwd-FhhW|#94CS%)7uzERA64u}b$<%w zEZNVG(iO)8J@!GV3h-kQ;Qm>0jO7=I$PjgI^l3kz-~Cn?qaETlNX`q^ z52Y^t9=?b6#(tE`XzzGGD53`?Fs>I|ulk@KCdYqJjQ`7eq3r$B?|#nv-+wB#hyD4Z zV)LrL_r*2E*ZlH3;7m_h)7(^2^MM+tpy@u z$ST8>C+1baMDhUD6Zao=E~q~7yT%Kgz6Y4 zmlF6}=?f+BxAGTRJm1RSTtAST6s{1}T^ED8EnAe5A`X8u)?cdbE)ws$o zUuxhnyTNbc?CkM^fvt2sN-#GGcBSosb1y}Cn%UUH{}`%TtLrFkUz*_WT9 zB>=vFDj;a>P<{;QhFEVV(%G}u6V+gKC2~(&nq3;af9fo%T^bK3fHhHb1?{%cVk75e zAsXB5CEQq1$uxI$0_5U8J}}6MUH5=T+-6App1cK?nlaPK^?!6VXb^`t^BP225d90i z2AUK#_j)^6pZ85nV3_+I3aXD&KiP0IE+bT}tcxy_oeZV$;z{amUbZ}X zF2}U2nG*r?0Bc&{mXZzxLUmNbMUqJhvr86yb8dQAdO&-6xaoW^a79xs1GXt4;@g+; z6QxOP$yH-m`G2Nyr>0(p>7y)r^Dz~!W)O73?F4sWD4%5Lik{|m=0$qGTv49)Sej{N zAYg5@V_@0X z!)|2rOJG0r$wlmHPtrS7a=+B0p1yX*7O-day-YLcF3M!u>o;mO9i^!$kC&Hm1Hte@ z#b|$#XMayzOU-qe-$iC!#*KFFOq+Qs|VaaEfO_Ha1DsphC-B@^k)6y1&IVPxPY@!sK` z+sl^hl9vIeZk^le+8@pb9yRm6A--!HhFFZU#(xMfddK5aZx{d@pVT-Sk1tQgnjVij zPzb15gKT&q7A>QP>2_Ujix$EOOJ|Ce%}muyvW|y>Ez^w=OjN~7wwHY~&7PUv(ZN`TvU!RbvuPnFQNh>Z7FrjR( zQP)e>Vd>-ANp5j`daGK(XE3w(zG#X zR5YY za#wK&5-+NH^jk4sghB;UC8h0-Ba623=A`-MopJTu$li;XZ0s{<1#+gOr$RKNjj6G` zSp6m5(mAKyys)6dvQ^pfh2oaDL${@UP`L$r&5Y7|^ta{NDJ)(9*rk_+lYgHvVDvzk zi(4}rFFnq|#WK_-^_C+n9Bt{DQFcRS8sU8`dqv}@V3&5dOkQ?(R zd#w}2|%Rx396%*6A_h zqomoU=kOK0x6kt{4#T#H++velL#Cp3QE=+R^^O8e>3;Cz(hbLl9Uq<5OMVRRb$raX z&qyk~5X^-4DBZ7xM|YLfyU{v(uamG|Z;CCqZL67P(Y!NrYk=!IVSuVpvsnBqotyZ(?Ky6klB zCI5Mfj?)(;Fp3d-cyO3`ik(N&|R>b@PdVJM7IGw@ev#><5)B8Y6C75iraJteKgLYo; zoZBtP5;(uddzZ?Aw{nb|uFCW-$CSSlzJ!}TmgrZIO3`@Td4FsW1lZ5n14n#_q+$D1-f;em)mJm)&A(t`m&-Ox48h}!_$7;M09(%8vreF02YDw-f;V%11C@D)e@X&keKBsNlgv#cN!kxo!xoU*6g)H_^ z;o9|%T$x_Z!GGHcja?(>aftZ_M{o{8HFo?PE+RzXb7{c=*L1kyF;Iady4y##f&?Hu zu}NSb$bBdc^Rd2`m+>Ze`fTP*oN|M#-Id+j(KTM0oS3FWt;FGU!vLC12^Fhm%LBRm;oZ;>?gN=7P-@0YgQvy5lOb-u} zWMv&jhH!(gc&0L)InE~Q=Rx3TeRu3(#w*kBqO z%lW>LXZ;zdJpHI8XB#2NadYil4DaA5m^>Vczs^hyMv=rU$w(?Txy9tv#|HQ$i4xW6 zHVV8vzNF^|*H4-1bSr1Bxp;O(yA9{#u8h~~ZF8rm%w3Ww-7XyFV$$N~b$Y~;?b%oe zYk%~$!|1#@FEE?b?0RaBuHTD2X=V(iEFawI?U+&0`M7)og-JiR*_hs57i~6@*wJQ} z2qrwjbXhyrb4fLMxxO!{O;b&d>}*_`3wqozJGOJ9dd43cY3T_kaJ)vV#nwCCoOC+9 zV{U7|>#rOapU&#yz@Fy7lTDvZho?M~E`LjQJ-h@sLov6FDi1Hi0?8g(p!E$8g)p0hCp9NPPOI&Z)q#n=sH8KF()8=X(9)I1_ z^%*O$i&&*Ksm`&rP|^g#b@aAh&S_qQa(~#;FbpP1K6^oC|A@EI2xA@J?Igd^4o0^| z!g^Wn-~2`qr|%ce3qz?VcuBbpHPvTzdU>bax?6G=ZoH*Otg+c%k<#LH?RNgk4fdiR zw@M@s;N8Z_W@bJcZ|@}l=kx1=o_{aRJp$bG=sn+RB5Zia+xa;jFXFB}yNCGpoGm*F z$V2Zg_lEVHAI`-sV%gJm8r@-jn2%xgxlv(EjSh4lW&PHJZj(Xq6d??qJRz>{^ZGjS<^BNnGB9Ea#zK-lo^Hy+<#MTw zBDjoHF_e!l8ze1#yX&OfG=C=TYX6;oF#7d5aOpT()6c`}%H8hEwYY<;(K&GPWisB-`u%(rfsNg9tDUf0Co?Gho{ocX1t=@`R#5EEuEI0?^~lm({ca~R*-lVRnz13 zk}XP#>Nj%}@sP1!OqH1@&gf-4O7h1{4>kX-Me`mNlwe$M>QnJvP2-c5+s@&N^XLoi(XurX5%bcx z$vrf4B=^a8NmzKM1TFW0U+WVV{v}7wqNNz<2jDXMZHwOQlz$fV2d1m<_f76XU09$} z?3wYaZt5@`@||b&%PFWR3wDfxdx{-2s1^rGr-A>>Ll!DnX?NvO@VIjQGUMgY?-?&WwpE98u*dU#~0Nwa>3lySic(j zXz7pg%$DBv)_*nE)@~cs;}f*MEcMDY-3+-#0IxQi^3Nx>uwTGIeN8s^OLS_98U83q z_5w+B%yhegzrdKGa5=^Yf_Q%*J?jRa{pHC=Mo;CJbwD!Ng|-WLG334G5w?V z+rL$Qviny1MMFgROYMi;#r6AJ?Z-XH?;)5?D1VnTq;u16wVymd|2{=0ZwaA_-)cVrc@)2?{rvc0f2;khVMZT8cT&l5elL1^|E2a@&4!uOKdSxi zPk*)bR55q^3YE+EZsJ3Vh4E-MHml`m`heN^ysMWX^(>j=^l66p`@!zsugBqW$VT^Y zfl+54Uw8qTp?Zu|!K&>$|6*(0V_MzB*M8GoDu4X*YlgsmuO`R-z8q=kM4UW9zrF=vwz+E13ukYOeO~5xp~-m086n@_AkyJ zBg%Sw&CmgpSMjOd)bmS)Vu*~I3iEP(ww6ukLzID7>Oy@e$F^cEtcH87y}KVEkl=2% zfPY_kSwUwLJPY?4YWT(=aTyBY^848%J;Q}&x6MNa_mCulZ%v?+ogw^wI~#>#u{ed z#u`cb<7RkBZDPe*n8I?>zh3>kD2k)8O@B#qV$Rb(EsQoC@_5f_`&z19-CUID;^o{- z2|9FMFX0w!GTxpZx?MbO!(z5oDvmvL$LXnerlaxtDH_|&(lS`}e7u~)Hw}MtCx10P z?5Wp<>Fe|Mu7M*tUKC z?z@t@*$Egp$gj)g*lf~QHc6-T!CQf?+On81z0lF=wWcinIhw%RyF*#lBsn@uYa3ji z20H;rc4tnQ>zcZ>+@qxBP!Ry;HGdKKIgTf42a?3)GJYW!!Cz5#JTbRddnVF5j-G_E z;a`-U-8m&h<}mKRn6_xbFhGw>S}NsB13sL_MLmD}3G;|g`($J;)dFL4c0NbQDzjpY zL&N*k$t)vNC1s2l4(AAO>Yhk8Mm~?O(8iuNake>Pt;GY~S8sd@CrNtrD}U5euS;72 zqB0rpW)aJ4%w<0@GKa@Q6LqTfE{(L8@Mf>KRi@g`WWU(fv-bdLK$gFnTW(A0rBsJ3 zVsb=`mzpzc!u?rX%7ERqDsShiR?F*WK0T~W@4Os`lN)p2j+Dr=9PhH;tQ&Wwn>zZa z^7CQA>aQHHr}28VKofkssHDNsX%2r&A8`p8&ok)%oLRYZl{%5z*COEDn%~z-g)dph z6H>F3{XRwH*sf7mYnX0kgBZxY9yt8(WkMZ917Pft6I94{yWgdky2;Tdj3wB6H**-^}T_87INDMM{EW?{`I zix~C4Q!1|Chr6%B(OHBSjXS(wNZ${u$28tdwb#W=BaVZOEsczY&8+5^^D@f?O;V+> zT+pXtEE^|g9H`vwu%z4s)0uysapseW?L75M+L_L|*~BjNIitqFO-np?*YbFH76lcd zkMAy*tUAw3nz2y>%x>a9=+FJ0VxMAw%hEIF%+g+%;T)zvYPyu8K4ofidqDQ?JDtLB z=(@=ycH>a%4zN*-k=_rP+)gQ_V^htiwD%feK4KAEJE1$gH|+lW5YB(FOQvF9Jsw!` zD>!k>e%+46ONctdlq0Ug*uM2JxSC`++=lc zDtSayFGN+-JjAUca|I6QJ$T*(fM~6DB!*WQ2gnRC4x^kV0h82iv(7hcd(K~n090sX zWo?$Z-=s|oiS&P~x=l*}t1M;_>Eky_-*hBu%uxc3e$YqNlOR1Q3p3>Agqj|xcd0^> zA<_Y{2A_o-4sNh3E$<-bN{hQ}2km;dowbX1t!jJy5zpTgv%p58Hi($J@^Z&JmrWJV zvrp9}6_57RlA1p73i_K@yg0?n$5M@%d2_~eT}&q?dWH%X?@b&Xc_RN&-IsCw8yaCx;wt0>k7Bn?fI$+P!Y`R%w$cnW`DMl|GMJjnr?k8$Mx=YOrK*-dj&BEPx}grbk8 zW;H~-N$hr5o%%;jjr;*2{ZUhcM)nt)n$w_p`O?&k_Yw_f$VXGNI_?HdjkL(DCkYl~ zQ1?qyGeoor2xu5Xe`#tAoKK|9uXqB0TyY;wjqP^ggao7GA>yomX=+FWn*@KfF*FS1%_5)J#f264qY)Y*8#==ORK#V?`8iF&!j|`Y* z2S_s@=YT&PP^bWNaXRqs0lpK&90x2|T{XB1Hdt=DM(CGI8aP_HXu4{^jt7p8OO`9& z<4uX1oB$dCFu^y$FB}v$`eT3DG}AFvn-s{b-ke}Ph{mSrFrsFO=?59>s{(1=1i{!3 z7(wo{El`;d)ZGUseZlr(KJ1hvTcNeiTT7g5+GxF@=GxX~MWJ1+H$8w9W(|C^EM zOR~PiBI_fq*99QN2R^iy;)oL>%EY-+_x+q%yTiExUXW~GOO_||8NYD44q^{bh7g1{ zK?8!G2aK*FFsO19+DLz-p5r4`i*W(&FLcfr&H`%SF5)8YN(6zio%#k|cn^iSI=JD$ zDd!?(?mJ5B9O_Ei&1!>fLG1vE0DUw$XcpGFyLPm^iz9wZ%CUHuV`~18 zVEJK^Nn8)wgby~4AVR+s3Irqmr5s6h=YxraDybt_@gMaIbT)qo;S2~|f@mgcoDV7& zsHP82@f)j{f{F%}mr!yupuq=qiy*@tohT}xFCs7cnJ_4HL`g;9cMZ2RpIL`JvW>~I z1e+In3jv&A*hwV+h@vR+@2Cm&_Fa|p$!6hMfvl~^NCgAL#e&5opK&T#Pm-XWp{!Zq zETUyV-P}S73m<>mZK8z(8xH@s&w5*P-cZeIA$dNlq3sZa9WrvE2=NwFBeu?hwpXos zc5Dk2Wx&mcyB06}I-cbCD>!a7|AjE!iJksOea<)L6d;aoWeY(PngHJz1aU}Elj#>0 z@|Qw~oEMyvGvH4xP}j@z897+PaY?00wLxVBMVE7{yVZXZMFoyas!sRGVv9l+p!0Mj zssIO=h=##MNHapV%cdr(2}Fnkw5gqpt|5?tq)?j%0@f1N&Qe7n zXVg&h*0fLB`3>7cks(5KtBGvaYI+qmb;GW7(0@7ynK zcM!x0x4XckKQkvl=Bh!btyeD(B`--KC&?OxM?ilCqHv`5fG8E$YFa6zKn}_r8JJWQ z;;*<-k??pf0zD6dz@Wpi7~oEUb&WC<*+Y#Jm1LPe?t6=V1t4L6x$o=QK2Z^Uq@n0R zt=5kQ8%p?BgSYNUGMX`AdV?0PoCfVc+&2sAXT~> zNQZx|!y>&j@Kd8~ z$@8@k(+K5KKj`?6w&dcKz_`+%`v}=i^aw2I%iHw=k9qK)TOeI=ALww55&VRQ1t@HS zpB(EWv#X%wQ4T9l zR$6n#XGLr_M+C1>?o00f{$RNK9Ul!ik~jLgpQ->JlIQk>T>sl40{;sOEyckI2<727 z5gQ^3J#12g_Y#<(pY`KDguZ|{Mw!%zdNkFJ+q3PZnsG`Xw43_^cvL4*HEy-Ah2*kq%*lvrCL`)d zt3oQ8+)5>v4Y!i!*e6Ob3CV?vF;gQfNKHAUrIo7JRX0S&vd(}mAx5j0C0ti#&H)ErVrB&(G*yH?i?5Q+e)Wp@B^gk5k$^oR z*WfP+4Vk7YP@Z=mE zSf7L#kdKCcC*_=^%yj|A3O6S+XegwV37d6Au$OIpoei?h;TKVJxmW>S^T*{fvRs06 zRZPu{Oo}n7JvHkwOEbbCutyY$e#5D8S}tjALMq<>HuL9aM{pDgDw2QmccLHS?gB>} zQd2zOO$y@x1OW}CVzt|GCD3%h$pQDWlc(5yWhEPkauD1h!SVn%AnF%;^E>(v=nsO% z0a66ICEzY~f|?S5Pt0qmK5)0-flP9(H)gR!APG`i0FD6CI}D0)!#fjn7w$fI4O9|} z#U+ldiM|Z@B4A+B&~AT$SOE@mu52ariPl(tmFol-<*saeM^t_gj&%yeGx;921zckk zj=sR5HGo|Mh72lH>fr z&hS861|0tV3x&v+1O9UI!D)txJOR?j2Wnx!Aa%fYq2U2hx)NW*ck#Ozhd(z!T@K%0 z=g;(oD+$XHJSNLo03`WE9uSnG^ZX61i0uY0Q6k5ZAm3JAt%$&L5}e`=Xhsz45L^y7 z;F*B3958eEjRAk37<|yCIL{i6V{M(#1H!%l@dIQH(ivV~2+-~DH;#FN*T6JY1U02t zz+_?n39bxfvIEYk^!yaP>SyW|MWBpxpr?4CMGd5BiNN;_n9-kehxP^Z2J+P2FN{&q zmHFT%fzny21fTOkJF2AJd?WL~l0o(x!D2cskVIrZ{|0}7_JfH8W+utEXd050RVl&O z{NOc#Rr=R!0YLK)PIEwT!ETCn=wp35pdZBnu?oQ^IcF#!+OIN^MEeV^CjhheL9potZd8KpV>@hMq5OQn>InpelBiNGqht)r zi{$zNY7BqtH9vSuERWX&Z;5#Y!NF6>BN$i?ze(B;D51YlhI%p}>evAxX5-sjj4YDfWa zU*Q5E^F*}k4+?ZZU77=?626m@vp%F$20)1bZESqYaSrigw2(&kk`TP&Wuoz``))b6g?eA-{k86UGs{jN>(6 zDkXn%em@vXV3kDvgRvx79{q#41lorCU@U=C5~a`Q1jQ44FqZ`L*bScpqa-Sy&k1@b zA3oRKe?(c56u2LhC1xtVP?q(dP?oztpe*fglqK5y@Ayo>SboE2zQ6IAsNeh}a`E3{ zLdmi5zEGif9sv_tqbgjR^C)rn;20Hx>-m3REHi@Y`Cuvu7D@SFE(uoXgRv~|IZDaN z=j8Z$pU(*>Xkf!{_AqV(?Cm4?&xKFq$udVf@0`Hh50rc%->O~aJ(k6N1J}wwTb{;#kH7YNOxE+gJf01Iy&nV@IRBgd z{CtiP=T|34v_=FcXCN(R!1xMyP6;iLpZ^7h7WWPuFDxI`n>1Qr``CYu%EY znK#*Kn+#D`;jInpBhR2eedial5cfH9@A$!GqK|=8A<~Zl(JZK241^tEl3T0f>|1E% z!*f#`p3V6FfODk7ze7#_pWyQd_KrONu`YsL#Wipf2HYSj^Q^D!KqL)_LEL{Y{|cE& z?k8kB=76-2donO$z(L~KPx=*qvE6g@_Xb2K`Z)ulQ^(&xbbjHV|FAt0?nOvK_`!-2 zoZ$v2&FlwBcrgXiZaxUT&obx-k$X46RR4wnM4JM(>>DqMX9_`pX8z|~p+A6oFA4owG;H~39gbLB6;=)?}+uwcvfhnKl0me{O_(|+;{%{wK8DSldQT8$XE0` z2=Y}Ts)3=e4}35BrFc)&1_a6aSJyr6cQ~fQ^9Ajdppwx)1-26RJ0RKd?Geb0GEp_VW!{H@3sY zeVx45vHWyEUk)hE5Gj=8K7Anl=&$vY?E_koAZs6-LE5AH!9L35wfDh3eqMjDkDu2c z?BnP4XZbaG?SGd4y#6fzdHq@b^ZE-D`N36wVjL=MXV zOFm#CKUm8zOyp;|Z%pK{9Ps2gMk49Mamnx+_!PWO@tRCSkk{z@;PvO1 zv;6QH{U5UY@EXW(vi$J+`oVC1VJ$xx&M&Mb`auN4`N3b_KN!xhSfZ8P3$^k98`oVu;4rob&ZXXbr!?MYM+Wc7- z_r@?LS%P>U zaG0NEab4i5rkz8vi%2b$>qRc zRv!#z0?u0+d@sAhxKM2e6y^*;d|Y#|waErh``<`R;4ZN*SocFiOtF9aMIW}KBDeoD{2_k&zp-AlS4<-RS}*X819tKYOGt2cm_;NQN?`ne7km;t;@@B? zdys#9$-iG$m<>X619lO}(jOclj`KJ6Zoumij3sc91AwmjFYtuG=lvDF^6&7Q?dthg z+v5+|M9d@&7(G~3$$oxtfjA!My?kN#fH}nS9@%bee?oru3rC3K2;AZaGnkO?0n3;F zFVFXI9x+zJEZVSL(Pz7`KREt)Pq2stcM5+!?SOS0@SCW17;v2x(QXb{Nnja)1qAL< z{9p@ZT#l$>`4^nz{}0;9Uu+koKEUk`c-DVi{x_WAh#XJsCn{-x;Ut(vd=JNGIR68- z5zCzh{ON#)9NN`4j?^7+n)C-R`SC1xRnFmk!&y7KJtIN z-*`;4Lws*IFIYctjcA|bd!)T3ADkzI=XgJ0C{&>98ukFG-A+jZg@8Jebu$uUMz`|nt z@gC(1xULC~7sm(R2SybbOLBZa+X;W1>#z0)tffgXns`M2pI|e8=C90=@H0R6*MEkL zB0LWWB$QloNX}Bo^+1tHsC|MvI}poAEDDh~ft}_HVL_^6ZD_)n8=n{de$@?Ef)7k|Xo7pOSeE<8Oc5r!nFE z5Ac!hZ}`YNMrRB_Tm&Df=y1GOHZ}7_Wbq9j}L@Aet!Ka5kG?quGLL*D^moF%7l6IaI{&00Ua6 zPCU!zxPJ4=O&DWL8r(GVy7QOT>yY}->{9jaZ4*5Qd zMIT_=a4lAenq2l8uqZd)pGklud9CDbHW6<+^LgnAjD0E_?fE|E9=Lvr60lvhoHX~E zI&)ou3|L)Tqo^#ru~L7%KD9@)vL}%X;TCO=M5>-M6!u8JqM)qOG$$VGrqlYa(e?9d zd^;nh8==+|$SW%DpaeUPv{y^-1OJE!BdH72J-dv6r!dfl$BBROn2wz&-AqplChq$8 zBYHJKEQ)Qcx1kTC@ry<2?s+b3&z?RS8DiHjO!{`=I3CsY-dRIJ#`+nI3wM_qM#>+T z2s1x-qTy&qe+w9=|CCd~&v*UXJDjxH?T!*RF%WduKfPedufpRl7YzNGKZR;CmBZ$_ z>V<x(=^RdaK^2O}qS5>M`pBc-OR=g}{gB;@MSiI7e zKZ=xTJgGhUn)N{-EH3-)X7cu8adRE_j=#Hm+jTRM=24dN?E%8U`vOwJpf>5Z+$H)I z4T@f#U0LRzs15A4iHyQo>v|;_1uK5tbl|o6a)pUgv6X+;W3yy;SKl+ZZJ+J^&ZVvS z{9(_x4|~ecwz)_&*NWTuO{(2lJ05FEklYrC`^eA!a(#r}jepY5ySq!6DfA;5laE7Y za!iw|-j4YVCU&*w-k10H(_--of_!ezALor>-cSQmGN;r9%X7Z4AHp~g0i)AW3is3U zT=|o$b!?~v=e>Tv zKU?ee3^uP>9Avi$$yVXLI_Bd@xrD8wi|LyanXScKwE8x2($1ge6U*N%d2`H}8&%P6 zcT~wUHU@718yN3#SqpyeW{r&t#W?P6Wy#$prMiDh?`EJ|YBM>fh@;%J=WUYos|r~Y zTl#*Rm(z_Iu%H)BVJz9@5{QrM6W~}+i+R7N{d!MR-uSRBeL-D7Fg}_aY#~;RhpJ8b zaFea+bFyD2+TBrZC*~o|5309Ap_kQlo2h!Vq=4H@qRydb9i9!D)=Tc2anC@{c>&3U zofLm&*&ni*HnzpL|8N|+DJ}WR@TXg+I_nGwrE$^Sb!%3J_0?AVTE`lm8+T(LQfU%Q zlv%BBRjquE6-j2?-~leodSkkbNkF###Q=%Z0Atn=^{BHDl<>N)o9;whi+BXg-X(x= zx2&D1`@*>|3Z{RQZ||ys5yudpMc&l!-vQ4yEHYx-IX~ zu|K`f&J|O*l6_Ba%2m>s=kj=Iws6f`*Aop%{DA^=LGQM``FIO{dz$y2BAhlG@BDx2 z3$x2j>K$)(t7BO?-P>xaQMeZ8Nt_-Ehw_~=N@r6}9Xasj`F5t{73fnaPpgVFy0>oy zd6gU48%HRx(1jKnob1o>9rSnxAC8E{&JOe zyP)rk?cr%mXYZTFY3Y7C3vJ+qW3+#R*+g;I!Kjwr^Um)9RlBaky{Fzz6GNXKwHaqy z(~WRLlEmh40K#MygXz-TE=Id6GjW^Ilk58)NvS`^bC#C(>Dogl^x$f`N8hTk66TVq zx@(t9*tTT#Yan<_>n6JUn|w1iCHuMoYf-Leks;paeOm6c3%cX4i**~HoXvkK)GucwB$@jV&` z+<8?Mn@6bD%~A{CtV7}U`SPaGc)YebSFu_w3(4zy^*N2(&EY$t*WFshpr6I{}f#szT?cnl}$fiD~D} zCwnuNscb1XHfCC_QNI-LWwAwd^_dC17o9ne{xB1)CzBoMBe%GC({j};sqrjDs-Syp z(7tpHb2)F*eUftN8)7H7n?Ea}9m?Z@TT5@qN&(JwdN=yQJ)SG8Pq%+Yd3Wgcxm!)d z#Y9CMy}bphGF~efReP5ez@h8YC^;q@OHZql=WqE$4%cHzL7j9kt(sh2r2*@p`uD=! zUV`(|?3F{qls-qQkg@i9Pqx=qw{uT(NifVPA1|FinXQeSB2gK3TIuK`=i2ttf?Bt; zi)SYs`3%qT%GJi`$Ebf2>b8q}7Umlp*ihQ7UcT!n?V(R!3!{Bm4KGcYH!>EH(Ysa) zap?inV8-HXbvUewif7!ZyOn0_{+gagn>!c3a%*mfhsjJI9r3g{FWO+vltBRdu{`o~ zkjGqa3QE^DL2W4NH>qBa>m~L#%Z>tPZ?1EPi!j{L7wG+`y2^iR>-IdZ&NQ-Bo3z`J zd|*aVfi@~>$p`en8a=89)A8}Ha{KjSX}#G4uKz+`T#M|o3vQ;nMO#8wC#~kL7bvc( zmh+R-&kwr!wC%Ypq*Nm@VZP=cz^A^sC;-`w4%}z7Q0to{iq%c>yn>m&*4d_+?4}Wr zSS5=`#k5xCCC7hyB8Wki28e`QTv8(!#}-KrRF>*fRSPueEI8ZTQO%{^qL>{M+u5nS zWsb%?+{E*kM^fqe)GRSQ8lm+Z!gY5v^o6@MZtoq+mJ-un##sAFMJ)Hr5mM zkPD4v*4-fyj)rkVjhrEdSADZTKIYb?j|J5#M^V_e*V}aL&JT@V7+xG4QEGZ@ zwH#FL`p|!z)V4O-5z0o{y6u-vq_A~9k=&J9&jkNg_KCXPUUN0xM~l*o^c&{!Zrptd z(&p8&_9!`ywgNlB<&sS~`9{fEQ9Wt*HaBcFnlF%VRU27!y!j}Qy%650Nw1IPxUH5N zJEBe!$deQHO<$8cB^}IH(JjX(iE{R~NwuXI;-Y_0d;4h`{nTAO_CB2hFbE zx|Xnb8MmYToF&DSs)}3lmMYtRCz-oV9R`>9yLIx%YUVAcx=VG2y1Bhp;b>V;rQqx~ ztL~`Zd~oD1cueg0Q{+4vVSS#KRVl60N#1{jm8AC*4TLM}kW#h^nn>_i-=iv?ru7W} z$Io)=DkuJzM?Y};>_{9h}e5XwXrQ;n?-|B0Y%9? zV@~oJp5m~0)34*Q%j#PrnFkb40cwb0;2ET7&VFS_lwaxPLpvOARLpEQ;bkf3*{+V3 z5q;Dyn|keo^X~7ad=9$&Y{~SVWmkVxy?7j=H`hE%J~c`#k8Jua9q=4gY9o(JS2aQ5+ZXv@7`eMXw z<>t9~gZDjqpgJ|70r8jEopV_3m1&5Y?|20u=ExO;{&r})v0I$AV=!wO0HJ?<^L3oC z-7!VwKDR>+J+P^v`DjKpI#Q@fxtpUVmYU|`Zu?0=SH0!OQ3Kb$)^@-!6g5L7v9`~a z6ZJyLR=LZ#e99dF*wm;kh3fI@dfr`67d8&}jN;kMWU^7fz&Opl{IvqoC?Ho@ z&b~aX`LC&41W~;NW^7TeXuJOD0G^f;~ z!nzQ~Sy|7l#*Oo)sIX@YR0-DrvSfazKeRBO8lzjkwn1zo|ICl9*+zd3%-~>{&qKec zP|a4HYvWOzRfE}1Z({^v=!buON>@6XsSrk-4e#+DvGa3cu5p9kKDL)zJb8>b8Vyia zqg4um(@{G<_>qrmgHEzZ_25wG+f(e_%%HO1d}J#yx+A(rqrI_-?(UfU09d zO-oU-02!LrSNZ;$Gb3|XTJuHgt(T!RJvJ{Mq*;1g?u6J}$QD2KfG!G7;kN27>Xnh~ z5QDpCdrnj1xaib)ygRoT@4uEySy7^Ce!1-Q-IiL))ux-tNBj5yV0!L^^5QO!cJuT~ z16YF0#YH#g=*WND-dKQcX1JA6ad|u6S-bcQ)y)fo3vt%c?7V%^ z{o?X?)sMg}FhD!LZ?49*3OwoBU`cV17{oj+4{ zqjlXb=CeG~gKF-_@pj6_DRX(5@pW?QE?a5tEW1Q_Y;NvyBu_`T&3bZW&GVdDG(AVV z3`dm(IH70e6m99gE*_kJ%;)?JhjDZ{vE}ySHPahR_fLQd7nwf6wGuoQ_2lS-tUr+s z%qbj4$OC`RMlMymH?MJyvq`tHZ*D9q`^Wtd3!EQxFK*ljtL7SvUP3X_tdZt}I;~W) z+`i3d5iQ@_1jv?s?{_e9jM}X%#pqTlQLmpA^VTS#sm1U!p|mkpGf@AJyZxH8pD3>u zFgQ9dwp;$XLGJmDqYko-Sr=JO+A{IW#x%uWZu2O`{F-utWYm8nwG~ zUshcO^saMM6R- zPcQW?#jJk4pM-nb3#jz^N+(N2xG^gyedhFB8T(Awi>9@vkHm-u^oPrw|0>AV)8 z;C1_A)3+xnZ|U3Vs>dHrSt zq|yZ!Ef2bs-w9W@qlL=d*~dnGBWGys9J3fgB1H4cY`kj|#teCVb8xQv8~77;LGQxq zh@w$Lpi6qXY`oe^!*_mIFDQS*BtVr4e0|=I^T6g^eW~&0=Fo{nuB`#=d>^jP^tpe6 zMnkHz7u~q`@$K@Wc5j_7;@wQU1AM$$hr)g0UwBuv+Oy=1;n)FN`E`rWFDF`!-=}S^ zwOfVb50TIt55MhHAV-REE$~44_)c%85GIA!z`^LCxuKz#$v*c@z_7a)vw5!pRo<8} zxC}e#XlLxs-^faNy=s^B8q)`!>T-WOEnsKj)pg8cB+gHJH@6Ym9VxA$3FBFhaZSJS zmw|K<#P=03NpEXkbnwkR6C;NnC8T_PvlGj&R4a_x?3rbee!B77^OAS0Bi&^|oyCN6Z?NX_J5U0{xtE zmwF1i;-xMG1BP0qE&y1n#tD57gM-=c7&?o?Jljt5ES|!lBzc*O{|diqu6OGGNbZ|e z3B!|oV7mQsaVv_+kpR|@QRXrsQ=MRe(7oy7rjIe~1mJDV5Lhz)C+TjldID)g1R zuJs7T(RXeNGSe7~oIEU-Y;ua{+7jz8QSXV~Nqtl9!6H4>*dY^~==qd&J z6gcMh7QEXw3^~dM?W=ku!Jn`W{~Z+a302Eq^E!kN{LfIxI-wo>ppakTHZH$W$n=0J zOv!}Y#(EvcZupHtmM}~BZ&1hvDHr`~6!K{RP5gTlazzUKOy>TQtayLmn045dY3mgH`*--NKjY4kQ zY5I*qMmhBFDCB+o2)|LtyJ9fMl1-8CR;=gjHwqaHoWDRJFa2a^$^F&!F=BY1^$_XH z^4_5ikO)kf<7*W~rW${^*>u0<%*&;qU0t5Bg4pZd|6@b|Be2yKK2{7+O}=tG>LC?;RMT z-XpA?i5uM6Vs5!`|IMR$FgLJaAnL6jDI(;XCAH+L7uXVo@_3jd&WK;GBZ=%}$&2}X z<++LDRzhAfVy1u0ea6^GS8`Rx{PikQkgzj)7OL6Kdq|O%8k1diK>~ssrMKH{0;i;M zrQp!-0Qd}=fIU3#@6%GWMAQZ$Q1^!5>Cn-pT>EUw!#=&BPQ80uvdf5y*UIs^&b0gJ z{jTafc;74Ww#l=Md4^Atdyn!XG6N^7MYka*qrx^$X_8-+ApXp zEJV9e;b2~4Mems+1n1~&co%zAE%^{lT08aA@ZP10piGO0S8Lhq`Lbv94BZ4@+coq< z($9PU{H}72^uOMzw!Sm#10roT5r+X{;~k!rTwx{k39kd1=ow%R8BmWCCSN#jNNVhJ%beddQny z5BF>N7P)b~zs$zXbIa*lV-LPQWMq>klql2iV)(|UWlxx`*)Sfa37@$eUFbtFy@naZ zFfW9TdVwoX(UYz2achCVq?C)tv@YabwOFvDczu8OZl-XS_mgQ1fr~9U)6*FWpbb^e zlCqr3fHsEtD}d-&vt#doWDGYs>DDO6CG8dL{NXAy>ETVM!9IFi?knT6J8RVb<}WVN zZGwcGXUHaT2m6oZ2>aJb7VB`Hu3wvjez@Ic^NRWs9$vKP)!TN*Ts2uFChyq#c7Ly zNQwC^@7!V1P|aaWVu4Jg;FHo5S#vz5u7k$br+U(&cSG&d{<2&PAotrnQXwQh5=Nag z8k-zalh5bEaTgrl6r0yJuy)#0qa(*Vh|_=RV+;iZw?cGV5*pR@l&oL6IZTQR4&_&uNYtT8%J)O8abM<&>`b3}bRYI|M29dNDZp}T*5 zOd~`ujJ>%<{O6J0OQFp7TkBo~Haei^#zZPPSF3zFdK?QqFviz8h{)AJ@4~W2%(zz$ zZw|^G(Ckvb9;I@w7=pGk``7lyFGlOxW$z#8bbrHuPiglH_jaBrtF8yaW@%od-Ryib z(pe527mR0#9O%mpTsjYqYG1en#l(L^`YK-vV-%Hw?z~|3V3sS(alb8(6}M=%He~GM z$gB>V(3^W&V%(;zs|mWd4qnYWyYrxv9$YgplF^b{JDyT+Cv)H-41whk8V2@S(3IEw z>YvP_m^nwmf@+Zx=A*eKn@>l{9!T`{-|A?(@{~Z-i|KQ#X3xiDtlUd`R~&z(ImTmi ziIuc=wov8kA?P^ec$`rp5xuS|{fM0u89eK*FXMb-1?jAD2piXaaL!ZEtnZVhp_Tj4 znb7c0o?q+9G1MO8*wr_V^xE1hZ+C9mjXKilk2^77#Nq8Cg#!Do2`m&u5h zc9ZMb7ij%71qXcX9`z}@gX4ePIapw6myEFt=@N=?ALZ?`Z65P;u3zU*w7oVOSJqZA57~vffXjdQ?%my!ley|- zNoRGv59h`qt+m&@LheQ;=EpA8##S-kcF1U z_WVOi=e+A{hk6r8Q9Pm4h46r`??GQ5Cu%i+2dg9%f~Wm@E#Tv#-VG1^q~%#Bu3;Yi=CH%aiCFF}74Ipr+vf}UoP9(nSu zxO|V6Gbkm~koN9<{7j;E`Fx4|*jHDEYC+5(xi~!U(W`l)AOe5kmzgy!z!b@^yBn+y zyQI9VVx(>9X!qW7q%U;_;Vb5NtHUk2(!3bP_37S>Sq#*g868ow!)?c^;8gNsf0r3% z0)3=AUb@xIz3mmJ$!+d7TVwt^*3Q)n{VR`R<(j$|YdAZ7^!7{k>=&RZG(ZiI~t16~;1&n=_5p=3W9J%3ZbuCYdQ6BtR z0=9LkDUZvGm8g0v-BH3YkIJ%6P zCjilW-K&30Zi~T~O zS1$me$WrF=i3y_Xqo_8QH18v0tU;(L^pi3GFgJgBobUBR>&yFna+vvvX97LS(GQxZ z%K1$^&g|LE)oxpD$}bDzE^=@0jOEW^U(ebWml`^I`AK>+eiHQCTjeX@5Kg_vjLj;u zR`0K{#FU5+(W1bXE<2Ceog^x(T|^JolCHC}Sj-XUxa5jG+WOhzLKU}NXH3+`v-2J2 zlqr8D(*YwM2Q4Phz2ixxeT|he;q5nL3}PXw4r1ehYM%-6)ErfuiuW^3Dfa|j5-qr1 zZRV;@E-f22BA3lB#s(?p^zyJh-!tj~h*%sVX7UDN^C=^hv9yw9TG%}D6wX>x#7XSL z?W}rQ6~OHpp6ilvV~prgKmu(iqw02%s)c_Fd*Xd3ne(aQ3*0SHlJ4<{B3F^1Y_^y4 z^D{RL(AO31bP!t}ss2|l+%@7X>mH}Wx(Qw(=GQF#;z4?_dt-iCWSx3FdI+PmpQp0< z&Uvkl+>s+I%=s}sspka;yO>8eQLfcbk>w_ZBQmjh07==Cc%kckrnX?6zC6&DjOBlr zqs4bwWSi;wz>K!HRf8jxE1)qRo%$q1$h&%-9SR~BYdO|`q3+Sg(hguwQL+E=bn#>a+l;NR3yO#WR3?tll0X6Na=JdqSFQ zU#sOp6ccO4fkGPtT+OMucwaeVPWOLA%JLm9uq3D?(fNzyV|$3Xngw1TaHMy&-X&*= zo9k<~jhnALO)}mWy8Z3GZx@)lhVB`_nZNooQrGi!a(Z#kVf>6o=wgg}h21Vy{rE;-k6YHrxAY#jg9pg? zQvs3&vpaH*SK1CNV&&`nZgzix+JhkbK==;GPvy)d2XSp(^byW`9?B3uB(yGUp+q!h zi!>Gmy69X&8k4vgXnoD%IpHzw*vN27lkZZOw1m`62Aodl+LppN|4W4+qqrBh8!3?; zV6P78PULbaOo6j~?-V{)D}7|WJHhbuXWMFJ0CQZami|q3ro}0`AIEpP$O!7rPh1ufJx}(XwJOG z9l|LKkJ)n$t;%z5uHl(W5SokE9pfLwBWjpjXA`>Jt`GOAKlQv@#fR^0JxrKhSv}7VckuHHK%4`n_Ys$Ot8JN7smWiGJ})-A zd|FDeMcv+wuk0Q`p{IgD=l)W9dG+#vPWIAe1tCv^*`Y)?eXM_9iv>nSyM$!X1(U9P z$}mt?noq-ddvS$>3S0ptz>S*p^l8Ii^vU>wX~3q{n}FCWdg0B&f}1kKC&|T{;gv$dGL&Q{>Qev*-LcQ(|_uk=B=y#sWk; zDdbg}(&8S5m*=W9Pb+;5uT7QY%2LU@<+*Ow$U(>&2D)*K7qZ=4ZlXeY=XzY~gWK?FnTz zR~R)cspm}_)9?mZ$KX6ql&*Sei8~L|Hu7}Azoc57ToNSWulFrXaBVfAC;8gPZ5HWo z$lcLQPWH7cH_6%UVh0RkuAY+=E(YlC7Wq!SL}D8`V`ZULPj_>8OPaoi{8o4u8WOW4 z3!`Q(wx@rgbji&K`7lUKsvPm`!J&(x#C&q)+Px}1R?7S~W9aib;& zZ)0nYwazMW7e=187L_wlsLnB&n)g1O#gc&x6Hkw}p#xt2`(Z4bQHh7e!N2F58B)jd zhj=sQOui^oJg0NEvQD~YM~sKKhy21om9Ib<&v$>!oL_R%*y5Y}d-PdDX ziG9_u#O&a&NA6QQACVvj&*55waMsbJU@n6k+V?IOX@6|(`mXzzv$~mvOrfgRm;WqmI3iH_ zTt9!_mmDRpfO_8orZCl*uIIC7o@l#gsF3-uz4%lhk8Y@BGSZT5Dc5WJN?q;pBrXm& z1?$o?y-J|Ly19uuh6x*y!bKJ>vKUbx)8Q5yexC+ZX)QK*@bxqF>Kt<#E|m4+d28$V z-IMo7c?*_>Rjbx^&KnXkCgh6ukxhN}R3U$4!!q*C$EFic$Qf$y2_K$lj}H4hrD~tK z2N!dyJ{EiRphGqpnQNWC+Q_OO9g#g6s}DPx?M)p@VtR3pwdH!eh~`&Y-y%<7zRQ-? z9>e{k#(;3Y3qAQaiB=llGH$pu`U%EQHRZ>?E@p#i5(!Wlh30qi^IALF1LPhw2sCG z7+u~yT`#BPt&hCtj-et@{&**pku*<3@gOF|)K)50H5ROb=h0ZfhX`hZ^<);oWrR8;pnX?`5-wGsGB_=!_ z{D~{yE$&}0^s?1M(2UDt=m*bxD$eaqxV)ps1XaE@M`~|$RVl%;9|fYTGW3sE9ZEL) z&Q7=0qZfMzlJL4cWFg4zM?lPf&XuH07ncWH!y?dYVp>RU>2TOf`D{5q&9RBcjw25o_`>Lcd>3lqx0gf|`dT{T~2Ao#6q5l-;O#!hII zPekh#kdzOebIx~t=E%t&Rs=y|pMuRONWoP^Wn*7KFV;M;sq7|R?hprZW?o968DifqW8P96N99XD^)k51gCDVkKIb;AL*&?BS+Kp)1y3#Y zy$ivfrY8%>!N5$?LOlH%gH0_pl7l*UcrT6^;{QNrF zOu4=4`LF5e`i_ITpRLPgeZ+LsZIqwZ*m}9SF-t^Gr|RFbe($g;FmReaKpJ?Ab=Uegs z>!gpb>1$r^CL8vOoBu1+ME8a)sYZ+t6T6Gjcz;d|r=llF)Z-Tv#Gc|9OuWabPM*8# z=9y0B%U6qkN7qHE?d`R{z47|0AwJ2>pWS@1y{L6}ZkR_uUl{odHIW!>Ihni?1mpA+ zVHahZBz_6a_7YQjY0-Dcp?*xE+j2YV(C+lP*)2;Z$XLvW90MvF2S`2YT2pG2Sz4iK zr>&Ae@E6v#?rle>QF)zQ0V-f&4c*K9VwcLYJ2=Kw6EYzFV3vHC@g_6eKQiARRl;fX z#HK5M`M|gSs1n3ACed+R_#-(g+#T}j-&6_dpnCY9sS=)}Zow~;BdWQFO(Uv=0q>3K zd27%l5Jf>sF!eeQW`qmuG?ou65vl@}ni{L49zq0aixwJ;EcMYuW=C+hqs&*>BuVu$su2E=|Uwuu4Q-*t5~N3Jtq;DNJOJn@j8U{yQaITWH;K(_yXX?wG6 zRkdzg^MSY!Q{Yl0lROaw?+^hIkW>(#J}Qnm=URVycAVI;N||fufT7UoN$V}C-$bE* zpfWoy?#CC_ab^GZ?Y$kLRvSeUd5)imR@n37oe=~bq7tsS{HiL?lHicohD5Yr1wS&| zUt|LQA(r__E4bQtme_3|rw?s>P$00bi&F}u;#|jX0a`dh*L)Tm1R~(DM$!T#S?6|# z-wa&^Nh5nmV%zS6KWX0HB3T`Q5FA&3d-6%H$ZrYr2JiXzd_Jn{=l1y}DGdaI^}RN< z4P@WR@0?+YFiXM{P z^Ylk+SZYSoJR*vR{&D}W_`WkSA=xIK8B7v7TYZ#_L!5X7CaD3QUYX_N9~1b8Z& zTcXZ!e2sMOi)eMYZeRo0KGZn|iq2c45Rh^z_E`ws>Jb%|O833s@^zI`13GzbIywAmkI$Sg&F|ARZQ|JKI8 zF%%`*7C-0L(90gNY$53p$lqcD3P75JLQ}0jc%VE}tOR-T@EGPGaxqKhu%x#w+cm zsH6uiWKX%!rh-H;P#M5cK*v|x5AP9O#Dr&g1E~TMXa~&4^J-ClTC-RQk}k2!DXs^s6d*wMDjnh3a_dowp&6@l zDcX83ZJwi?!&1PObU#}TtoHcI^Z$il$XsIo(biuih4t-!^Zd^|{JU>!+3YOlQY3B9 z^Wek%_I=sJI`^)Q&N(Iiv=vhA{-v!B|3fd{(N5(bvRt9vERRE&4Sjjjy$nz^C?&eE4`TfR=}~a7I6T+&gT43!BBcj`g!X99CbH&%b^yqF`9( zAW@#PN96e*sD#Y(@A@WS9%Q5MdiY&~zvu_J=qhMMxWo!dt;4+T;QJkui7Z`Z(RHA* z@`Eawu{hu7q={cN%R{M|#G6gC$NKS;X?`)gkAI7tT z`NjrjVn$5BZb1L_c!aE;77yWiQWM8q#yG=R!@zpGuwC4F-h^-A4ZkoC+XSg4!Gox) zdA{Kv3m?vo(b0eGGk(T>eAhvNjU3owUu@KWx6l9Jt+3VxAZE)va>V)l9i`z4=CM0} zg5-*{sO=b&8z?<9W`x&{=E+%<4<;Kd19&80a8h}Hg=fojS%U+fVt(H20_`nJb9+Nw z2G83<4&;0d5wIV?6U}S)&-{it!Fpn@`p$C_3gS7;uk0vvK8!2e=K)fH^ZaTf)NTWR zGRCn|1fk|sULez5OAC=8Q*)acTxD@5@s%br4g;!Zbb<~f$PQh zW4?f$$V=W{1}uAycEY|f(T*O3HX#7}XZ)Wa0kk|3b+WeD*2In-UxMX(u?P+ykj{{h zGXJgrgJ%2MI$!h-Fj(%-@BYkx``^95_Zf`Pew?2Rxty_TU(6MMs7TBQGpb1|PBPCS zm*{641iT*JAVs8M(RG0k_93)8_YI!oZd89jWLll@oN@ige(4~4!bAV=yDt5O2Kt2q zN`Kp>-*JdJTay$()4yvH`UG%g^UtRVogDo$V_@dB9=7lZ=^U*1zphVz4S4p+rrE(h z&4{V_`coOv8kA;W3IbY+M+cq9>-h{@0LSmMlqUM_ipb@s&meajamu3($jP(rxY?$~ zz36R&`AdS11J)32R)fWwe3UJEyWor#pv$>mpxq@B?T$?HFNW_I!tCeze~+iXw-PSa1$fYD_i zHW$;hU?*b_1;JOuTfexqBSt>06${=_;Vll_Z`<3 zo-=-z{=-<2Om`7?n1YQ6 zz;r>wn4Rsf#j!hVQv-gExyXVyfK8j*6K<3rY!>VW;F|ckZ%o3dLZT9T1GGQ9)BGZj zBoAu==YkIbP$~KrGW#BMIolA}OL?fx(%&dd0WPcDeu!nVJ1@@Nflk$lv*5zA4jPQh z!!XYFAHJV|i)40WRPvF}g;U8uT%vkP{UW(83jjoF*kw@X{)2!#1$%wI ze^iM5Gp;JT@3FcHf?G($Yk5RU4FuFP zg(ERX3-9p^hG7Ar$&pzOZV1`-8pj=@3n;9IO%dxM8)FM_VnJ=Xd>5h{Xb4 z`@IZT-Oo7DCmu@L5!OWD=thOW!)LvbHi3YYMA+*m*v0G{sd!Om%<$m_@mMG$^zT4v znk!a+*eRO#g}>Vz_xm%>W?SY5pZ4`hJopsS6Gx=tTAG7B`@BvZU;)avYqf<>2m6r4 zy!SB>7$RU(h~NcX#P2*5V4c&C@SUMg%%0He{Qcql{j=RVx3l(h?-&b__Z^8SEus3yKXo_Qb$kN}sz2KmQH_f~?7ZZlZmDm+#i*f&GbS9D0zjvCr7&m`A0> z7s&-3K~TtM7bgeg>F>Sj7wh-;d;T?dJjH+WJs4l6x1=FBJ70$|BU44gsegah@B7UK zi@BXXZgE- zt(BMLZX~#|_V*+1C3tX^Cv$IXB2uiA_aMYN3EQ=gl}A3e^`GxA)A`F`PaI~Hr>pD{ zMwzRVer0e$vJ#T0e=@Ru{XG8QXy?V@_kX`L&-fHHQb@BtN6bKawBK1mhRic|J=@v^ zeYo#2zTOt3GqmNNbUDv5#64<<)(|3pX1R(FN1JAkIbT{%#i}m+c_)VT&_(v=7aJc% z6rKXBO^cuOO0jPqbNnrYObM!ZlLt%8^Tsdq+TIl6vi?7z*EWFt$MA>D`>+2AdW}Q0 zpvOK3T%;2q<|)u=yv~{%IOAt@n#;*4Qt~iIK$LerUkZ}N#IAmn3*jfgWo5~KFZ9|1 ze#}EM+kHG`!!VWTay!3-8&@p~Td>uRGYY@T;F_yvGOQ zJv+b0^~JNBXPxhxRNrFe`Zw{-OS+q{{aQ}Xi+sG6PRgH_BuywzMbU&Dc%tjO^DhjfW7X!;IM|z{dlh) zhl6oFQqgl~ATH{0NP`0vj`=K>3j(?xcr!5F!c7=$AoX>U+zyCPttB;o0Jfr&`u`~^ z?r;A=t5uzm0fHCUUJmRG_<#GY?Ert^F6*Gtu&8wo=Kuo-ToI3O zCve9z9>-eR8(;}EpbOxCA_Q&(C<9l@u%9>{W|R^~+@cTWzZEwqhE)Mdol$Q5jKMho7x%?~p$a*FPyZm|Q1OX&obheH zuxa{?`vM+pM!^029U!v7ac<`ybb|s^SUUgy7fww1BILNs7XhaOAqV8c1#;zIln95o zJ5Ue~uzq_yJN)g8w|jFUi3rgTLN0a?KSCq|=go+@Q~+e;az5KYIRLS<{2QJO*Nyf* z&uuwllUU%0X2czToYMChZ^n{3kP{Hl1>$7J!F^Fp_>37r2AR;GHr)e-Av$jsB40BO zZa$aIy+EXXVf4N~(dHtH*GNXrW?ajiJ05}T!i=R*sNVS^Q7-lw-fVFtIAoK0~LCHkK250Qpw|@ai@vmb6RlC2CbYG0k9(|mb z;lZ#2;M1HLQ@Brmo*7Ud@Is<}Q{=Sn@htu`?(Gle?%#iA2j!)?KYTyuPa9$8pWH{_ z+7O)nVw#M9@A#iFM)08_0xQhdAx;|bTVFJlIitjo^EX~ZfyaBOBhKGHBXpP_M4SLb z+@1y^4yz2WE8l*I&*br$bN>P5?k^nJbeJ(uj~@&hazYLVWk#0pKqVc45a2TudjT`W zig55a&d8nJM?9nIrnd@wpo-`Ia|{@<`8dD&IWEk9m;zSjJTHUy1@p=t)?vH1mF0}H z#J$hBIE$W9Z*SOLK*%BD{Ou1KvXEx%z!wF_H0zbzc?aBht^5^~Bf9GfV zgQ5Guc+I~@{|7=Zp6_3pk#hgqcjoW~-i1wNLFY9*u`FQRD zL$HT`;^HxdgoZJXDQLEoben#C^%qKc<$#okRq1# zgNmC`dp{^RAX9kp=lI;d(GMyP?Tf(8d>p70V4Ax0u`s7!Nz#^(KZlq2SjxxH}yG`@t-i0e!L=eRjMD{H0A zSg`GP4rK#ipEjwD@%_bvWuA<+{uc%ch*;QC^E!%S6L_R7{896B1y<%Sq!@enJ0^?= z(FJQTuv3?ydd*lf^H|N_NB!&HxPM{40I$IB|3c2;UQiA)+cA4!Pd>BSIgq7)_!$Ud zQ5=5O&3ay&=fD3V(d_cS;oAN;5L7cdYPLB7eAkRFG=MJCzsRmHKJI%Rh{WMuW_$79 zqtL$Bfxd8==k^&RD9rQqY*+ld>z?hsFOCc7)fsaKI}h-}KM2)-xSn|~GG~11FErom z1O|={Xt3W{FO8o2jrqk;{W=GKHr+gT{}W0JeQL&0{X(?0KNvmP{drcnM$DBP^i|lJ zGdA*X$G(_4*t36~8=!66!kN$cJKA~r)wli=f)V5VFXZ67?&G@uh8_Gt2L8d}&8SUO z1EG!o9UApd9|U)Op5y<|csBN5&-=H?w|NioA3twiC%#BQ7uQVcfqU%4Y0nHQOV9`;7l@V;J+!e;zAe^e{}>FE$Za$1gU~ z1FrjPLt+f`C=0^-84o^xV-5e^xc~M|AQjP0=&u;FJnU19ml@%SYc>whjrenaU(6H` zet%)9oG;$1Yq5X$gX)`6pZ|*E18NL3#XtDX+0F)HlgDvAt8g4xs{eX?wqtM|tbuub zeh-OlE}!vnC|JO}nY$S4n5Qe)(WhVEKV#y6@0_uZ9S=m<74qtTUvye9zlKdbqtm{x z?H_d7_qFqbPW!$FyI?*)nqRv==YL;&Kj(j6pMTE(zJ^`)t!Mi|!~H_7{UG0dqt?F2 zH`r%i)Y{K^zffyG=lw>leb0lP_C>AzAmo0b)_%?dY7PDrd$!?3*c525pVw$8EU!PW zVf!E+_469z22Oah`Yrf zOxzdGhG+SMl>6e@(%Gi}V&c9ix5FPy-1poWgZxFxeb2prubKW^J065Tn7H{Kk6-Nc zjDGvJ%l+Gb?lUHCZWAmSzcF#&b7w^I7ZdkAH~Zr1{+x^b^1LF=`%c&f_t%6x1|Z~M z)6aW6uQ*`rA1^arR=@`1FWa#P9v>{BJA5AKGr<B`AWavBRm<*uIJzH!OsiW84v&Ky?|&DVnTo3&vm|kKd$e~0Qn++ z-Wx!FY;i02R=$X%J-hvdQUWS%MlJpLH<6+ZzbINKoe^Si&00X5{Xuli59$97AqE82 z|BLVCaq)lu-Y+T&E9Wn?7tTYN8Q+7r-Hf~fhX#nT|AKb2K*5~Pix5sh(h$xA;^{A> z7uxwZI_it4!uJBX_C@4DRPMhbzP{J_ZxC>Q{{>+Ogc$knuIK!N^7@6M8h`Hd2LXrt z>p3p}gR&~W=(R6Gi=L5RX!l=8FSO%rM$DnT;fBWF&4{)CZ_oSZIezcsdtE@deXk4m zgZ9V!FKSJj?+XYntS!IZqy4_0Z|;B4Y_}QJh3ABJ{Gz|YFZymq+x;Vw?f?IN^6##H ztDD;qsk1;F{_FXFA;GxKjI4u+Qq8C~JkJ@qHuwMU^Z%mHaNg<KJp38||A4^z*Xx^6 za_HAzwBC$jn|~hXpWplXhxW#K-|Iq}0}_3H{%*fV{H>bX>x))ICf06_?;u`-=c3K+ zgTMdYFTQ{C>pD@1{a61R=KBVUPW#_M&;1gWoGWHF+T36N^M4qpKk?Tu0u)1k4(^K- zxk(*08*^@lf_$+#uLAitScs74QL*sAAs>olkqcqYTucEAcFF3QeXG3Bp&dqnOLAMfYT>z=>EGl}>k9=_xLLe8;hgfBX71X^x7_;!62 z?IKt~as3xk4!{46l>2C0{ugq8&iZ{EX-~h78}1)u-PRg^A20a7j{o{z9JhaeJ_&E= zujAI~H_}eWdiUq}=da_|pX2)P?YTv)uofD40dI_A@=5Zq*`*efA`>0M2Vwmjl zm>Y_|&!5Vd!N~iYkae} zA3+M=6ks0I1@eZ(U47BaT{Dmpv9ec>E@wXX@YF8Wu?L>o{q}h&x;MS$_f48(FD{e80-y6ZC;%8(>~V?nm>qy&`<4fWDS zb5Kog-B5naESh~N$fs_lJReETU~;1SgLprtqQ@ycoZw=%pw)29oKx}^PT>M@KCz|O z1hK6B*S$&wCy;IVhN9yyM-bUt;Zb$m%i6KS)oZ#eZmA+P*3y5*zFErjl5{R@3~g5N zQ1Psi?XRm>xHz4E5#BA5mWtMKHIW5;j>!{MUCU_KPl0o@nASR+L9j8V z!{Sya_38XHi&J^976A)IPqQWvbZ`gdV!pRYzT8bpxTehQ(hCv4L~I#4Iq5iYSHU%_ z;v8<8={%^Ez1yyp!}+yJBr5f;+^Oq1=Gm8L5EicWeNxwdM+uw85{l>Ia$`Zw;l#QE z;rcauds4}{ytXLkqc4Ygue=P-z;;TpSNG!C)R)m<_UEH7+M7%0=5co%l-KEnS`+2d zIk8sVZg+u^Is*EHluv22#XKRIU6Wl-bJ>TG5Y(xyshsZTHqKhg8KpEFuqy*kdY2)w z_i0MpunvxY4~#!-+2TZftcm4`PSRa^(O=qHjlwV#<6id_id&&NW1pNmb1$!v1LR7z z7of;OyMMayv@^1EQQXrPRMp)zY*64RX|~uPYXi!*>IKpVB5GEj$||BR@7t3jZpG7; zQ4aB3KniVlT0NN)Z9=?$!AfgwrMAtL-pa`4r_V!w0Qt6%*%ur^=!lGgG@m41MoH1HS`Q62%PI5E`7rKtmOwpyp;kS?QIOp}o4T zedbE=Nh!XMktO-n;U+2kg^N(mwcD+i`&~M|IYM!x!DGe)e+#Dj!z2_$y24cIIN%}c zeP^V9x8u=6d!6^i%;4Bi&dZR<_8k&?=2+PcGY(J%d~)W#e`0J%Psq#{kE0%$;qt*( zRYJsiXIyTvW5omf{lbM3UykYO2?wf+TpT}zl-qfCmMeflDWr4juPtKy)#*{}ShP|r zav|^_75fQ5vOLONGG@WmS8f*VFWILn9=1<^+OBi+E?dj`1khR*I;V@`onzl7tqHnP zyY1zIMAQ0N%W}3a7}S$yXZke@SCj!b`6Es$5*rMFe=&mBIOjV*k7|NThi zO1RfacGx0^P8&}5vGCzpM0xUVU^{KcJ!rPWrdqy61h(<_c}=IR(!%qQ-_uqckeGeL{|qfcqP|HB7?IYJ8~M z^+RJ@4RSit4eM-o9Z;?|)T+L+k5>sIk_v=I3~m+*7p@PINI{p7cZ+g$vg!Q8(@$%( zy!#}tC6MWy406O2UmUOT9`e9f>k)H*+G<=GnovSLE!1Ah(-yfY=-EDQPRF%p{J>!D?aK2pB)~+5ACc6YxdpxsEiKx z?3ALE;^T4hZ@He`ryZSg+3~$V62K=uWRRKJWbcbDPk?+hxn9Jsc_sG%6!8LoRMKga zZPpG)hoC21zy@}P)A1OxI&TA3a~a0ML=}z52BBrO+~){pSK3~lL1$cQ z2&B92C5}#`=L_V3s@jO-+A;!$aX1X;qZu1oy#V?3OgvJp{j9MNtoLN- zGwWI&c@rV!;K~q63zRG{T)vWjUH%@|vTfHgOF7OxgKkf%(@~W(drV){@mlir{l2rf^O3CfZjl^#UbPi3r3{ko@BijQ~j02`|h&(*JSTwURvbt$;N zZ;y^Q-YZ4gP^>B=BpC)T2SB-8B&u=xH1<98kNHDg`)A#g`6#bP5UuQg%Br2+^X%^y z!nBaM{q@*3fY92`XD~XoFPyD}4t-omo`X?OUsTys{3p}L&oB}}L_u_tKdY}hLqerf zeM+Bp3E*mDzc-1Z0J#r6)F{=tX$o zQok9F=>r2(^c z8LUqY6p_~<8zg>fdVd{khw%B2Ji7WwQR`n!IX5<%Kul+xaa zG*Dr|a<6iLb%E7&)x+mrhODKz&Cc4c^XY+YA%mOqZ)1HDI{H$cqDkqaN3vHzaAV{N680tVU6c>u?(?}>cCKxD1(M)W ztjM(1&dKsi?OICZ`Qtt|*g;)$Q>3EDBwdc1&z9UgcDo$U2lA2YGvLst{PEV+%0Q}E zy;AaSJPDeA#avK8s4TLirkm-wdQf(xkDq6~zdojPCt&eopEVV!`QfA^FL+6A%qsIm zp!muAGq~NK=i9S`TMS$^cKb}N4{E!C++f~EX}hX;0sm|Y8^{p?&bEKackcDB56N7-zV@pNM$0&^DX7QGL^{qoR*`d&zveZG&H>$;`J6{KjhMe*@ z@A@i#^0&ELYmtQuWf49-i5CI&9LZz{^(MHs+R-H)ew9P$I^B@$bmeo9O&!0Op!jwZ z*svtl5H@ys2m>k0vl(9bl76#QpS}G&t{nHYvtUcO7jcj;PI%0pInr*=PT5n5Phfhs zY6GP$niB-a(g_L6Z7eY#@3{R?C}6G+CZwW&ZppooS>;?eq>^1O6YFLMfH*H!V3d%0 zz|t%T=cesWIX>q9aPSTT8ZgiL#Pu(U+>#jJup_QWY8liaIfDM79BTZRsy$wu% zkB3I}7oJUU$Ih8sa{;-Z$coA#33`KinhLb*_0@K4R%?C$iDGlvJl^MY!|h^skIGw$ zbPj{f90?ckP?V%N$Wp(M=tK?4Vlc z2h=p8T%>T9H z_1aJDtAkq3o|s&)-aVh#o=j=%COIv$sS)G))5~RZBG&};oDn>4h49iFK|(#w%Ji zlm?sqvT+r=%C2XLw`o_A;TwB3Y{00X+1<8FWd}vjm&o>|slBg_!*LL!E3a3O(Rwdc zbV=N(^4{EOYWKW9qTMZ0y_Zp;n)6l_1+QdUPbru6L4yG6d!<_Yx57MBKMj$^_-Tse z0=bQJ+@9TlKW}qCm#tiXa)TAMvL5M(@`#0GnN;iT?n@cFRibb0J2l)Xl@l6}QTCyE zX+Lpt9h4@dWQVGB^0Et&RJ`x2o#`KdmXEdD#tARiB2_UdhPrJUI$gXKfGd~_ zjQ&)=Z|Hor*do|yyw;~KOe7?~@H_14a;S-nkLpC<7Ksa)P2+WcgK!nikn*zL9Ytkx zSyAl7w@pK-+amLyqfp8#x_)(QZ4{tR^$G9S?##`?6CEE1W;PzopreLC8`%B~6Oi$t zGala&k6#=g-Ww9H%2MLmPB>|_*C)xaJ!sLzI918D%464Es8t1X%YX9_SY^p|8hbDO z{sv}`UApdu`UGWvE$6Rsz%IX}_D^p8CgDHK+m-z8e27|J4k!h9#6g!gC26za?na%J z!9`3jzoMcv9k$Tqp8QO)N}c=|(E>@c4!G=M(>#k8Yi%X;Qws?~Aq?gnAPhGOptW)< z7_URu-1mt~JeoUHk`I(1lS*fQD!U!4hBWlhI%G=aD)AtH6hvlOY8IFk;a))ZKbh;A zU>JX#7SSDC)L>|Dmcs**z-?Cq1;;yyBGXltCY^t3A>&<5pL%#2$BFDP7`s{A3R=*z zFx*jZE*iqSYR|)gINd(Bqp}GB$kEz#w+g_Ad98K8D(bq=7FQ0b8jZ(Gy%o?)a6TZD zJnU|t-Hu{^^D3%oBak09S>8wp1b^`N52$aV+|-4NX8A!Xe(v`Y+&j{FRJu5f1fzD1 z!{g#_SD~%EeJ5Lm-kr0dU7TKRkJ+xs0I2+vFbKA{zi*W-7|fdSuu{iDoli^@@}8_; zjGV3-_sM5Bm$G%o+3pAi^|-2SKR3|j<%|>g?ysa zC-JU|EG5S-jLCrWg%qsYE3>=h-j&Hsh`NrK?5=h>77Y3p1KjXT9*P=9K-o2?4b9D!D+kecflz zZ0(90jckCBU3R?VT%VuZgM7S@=(fRowKRw&Kn3Q!?Y~rf;?YC9I_<6}&ht?j@igIB z+`euU6l`UVpv4ho0hcX(aVSys&Zwn*LotMZevmtOu(Sm$2JiJJ23#XlmUQ4qt$Ozq zu|38|brlTMMBiTe^g7;FA;0vOH|(&q_iMBZQ8gD*canRm8(?>lsS+)WXlGLn)igQVEyy_fbkBSY zOvRxikJu%Bq}iv)!Iud2}sSYpwUh@aX0o)clAtv!t0iHca(P>&q%ZevU{eGsxO5Ywj?BaYKkvi z$cThV_Bx$fhBh;RctlGCiNTU}!BX0~P=RbcR@FOsVbizmrY-k;*lYT|1j6bWoY~ek zZ{}v$pfvr#_pg?P@VZ`8#EnCBmS0@yvAHA!785IsJ#s0u?Yj5VMR~YD_c(fgL0Qm6 zX+e|IpC(gze+Q`2crxoC-P-4x#V-XlV(m#>93W$UJGY}*ZS#2IJ>NTM#$Z{Pbx7n)oAKN}7FBB+z zWhRp>vXWkxBoEKw>(ILdCB9vM_Wk#1Qp z!t%BP7XqgjRfj(xp^+CRgTG_h?Nv(;qvIXH>;E`E*yDc}j?a86R4b6!@I^1ki zKc+K?s5j}Cozp>bGj(_MbKD(2qC>&(fm%Q!=FR!$_dLwM zvnHmB5rBx>o6;0jb)`uDGH>=L2*7Up2cw7qplaaA3m-OIOwsP_TvzD{WlF8YsWfv2 zY*jC!qjtKy0cFfbTkfBKWdhwbIF1*7$rI<4rjA*)$Wgi+9ulKjEZxo6`@46P8&;+D zK9BvHnU5;7TEaU4Gf~?JCoxN+kKC1U#`s86KYo%EAt^6^OUYoRDWZi!-F&F2<>FRqWR4*4Z?FMA*- zA@d*C8#LT|h;X`rXGSlehZmFlWdO^%Q!=ly^ExcQ@ zkipB}uP1e{uAJd+?2zT3g)2IsA+_QocdxRyE9m_91L7;fk}G${t9>j_oDZS>%LSk< zFhFY|`n;_XGx1A*cAjAq#bbYi>P*|jN6G0-_1to@ls+ZvDtqU{aDHmbfK1!)1_GV; z;$-f5KT#_V1RhY|yFEqcDp9Xjg}&&TTYVtYn< zbsGkrpM-lj+43Vaha89lCetj4xKlI*cJglBY|1cpPBB z*rGE_SZXeRy%tJ?nW99b$I*NEc|{gNOH}XEw%Q_|dNQU+F3(lnItrJKC9{GB-jJ_{ z43hraXOC}wFgpuOi>uaM9)hKZ zRQe_lMWRrx>tPk{cjHj3o>TK)^Df0x2=+MPdZ+n+Yb@E}Q#(VdJ$Q%p!kR_U)s*?KCq+}Q%n%(-a159Rvx z_1T+HM$!+{W>ehCY@vw02+WaPlayB?J)VkMeiXfzU;l zL5O32@3+vdHf-b#dmZvdyH|gHsP>gU-dv@;HpR+hm%U4&g;)EW5pR#@T}L!#?1?gb z0IM{0Aidu~xJC`2c7M!+!?uGPBn)8xX(c+H6&P8gSi7dTOuE5)y zH7%mFTkWpKnRnU}9>gU3E^a&fX=JW`-9$W)=4Alh#<&%iq8SQ&9lKf(8MgvL0h1!j zpk@f#HZg4ZAy&%A^mg5$wQabE_}3M|7fgQH%Gvqsvk+FFIP|5f7zc)ru!9qC2x%FA z>c_RH?ACsuT7H6$ptV#!_~BbzKAd%Ysut|IT|WUQT0u7Ia1&)?R|Di5BPw$uK~4_k zPk%c%sTvh(A+8RjKXp2jh}@8<&(G#okePin?Ii*TEAxW{jL(XA=*B0kCa+FLCn2AE zGM?IHPUUZ`XDg%IVISNU%jxxCYUy2nrWy^)0v+ZwZiP0?=&Qd`!x#=*F>627@;QZJ zz7nEUzu(F+XbhWMJ@we9f>?3^k|0N6l0`|t-gOTWfphE0D0h!iq4KCJQ#!LrR>WuA zyc*-}T#_u7fY4dr2F(FPcAFpvKOjSbFYgPUs9y+IIadLAqUP~3m|DBFB4A5@+W^Iy z_w5dBcR0R$=}lj*aEjk7JF*eQu%SsXy6N2%y$@V++dND8wQRsMc0oP&RQNo)hY z+j0*lcf-_oFK&T+P{Zn>YwdkX_FQwsK0SQ9u4}DnIKYnVF0)@=t)3Ro3kN)U_SD{~ zEp_oJFMB=S%IT8aHhyhR*N^Id)(ME298f6LdI{Z-`^}XXJwjTOWWsy zXVM}KpuTm5pXNR!wWY>dfk3%fi|#SeCWelKr|#BHJu;^l=m6Os2u`p0-Q^tSupUl( z$l-6HH9nojqa>eK{=gNw2nb}hs50VR$LjN5jOKYPF$JM64Iw)SH`wf_Eia`FmQ`I^ z*7KS4&kwaU1T9_cZ>Cm%^9ii38kl@?t2;8FV_WyKpml!txO!=otZuP#Nv-vB6(S?S zs|jD(!seAWt7oWHEfD1LyD;DJ1zze0eWMmByx5D=RPG46c@X9&QhRZo!Z&9XPdWJ% z{u8n!qo8d6Mi4u5zqFtW}{au#q`B^-JN^dV>Put%6cD*Oy(NkWl9~4NI zAsDhp_-}LX#ic|a>?bogtLQLf((*Z~3kWtG;ix7u5gP^_Wn^2+7WdiXdbwp_Tl(+U zg<3m(lT>Qu3XeK}OOFIpp6^-Jq_yPYp?iP=9k#1GoEleC7p)}j4`~8OyxAb33|qCG zdGcQG@IJP&M~KVOt-1Tc24q?=&3n1HUN?zdec)JhoAudFJR$TyRY%@U)TqO0V#Abx z2#D2gu6xjkq~~KN4xaK>aAKFJ#5)EbVf|dPp^G)(Vw*dE5B~HXkA>oEs&Tz%gtb!K zuh!I4A>239ob~tSjx3|0$tWp2-Jx201F>K!jH_EGi!1KBq#5&MRqt?h*>4ntD?vp{ zjIph$aIhVBAtT?$86tnT7Vp>s9#RzHGk(!z7l!J*jDiZ8q#TUCvK8&)y5#zv?_N2S z6IPw?uhPwb(mB2_jJGx4S>oR6C+%ShiHWhexHW<*ktZGyFmo(_OZ1R9JTSZN{A|{l z^g124%HpW$%aeEVkuk87I@Cf#D5yHuOKXjyhx2sN7{iZmog4bLYv@F?Jv^JSe_3O2 zTS1Zywh{|0)QNbkEpu29jMX2!A}jB~00G*wyKjkqf?sfr4WSozSXth`)%#sX-Xvkx%--V7y3gon?I*WK*v>rD{8SUzj|dES7UPSf=cF>X)&yqJqAnw4ZfLckm7 z55Go#p?EO1pM8ayx9&TH6IW45Rc<&Lj*TIBXbVonhHW>Z4#b?w+YsMf5-`i+eNg=Nn~Fj z4x=~^h=#K;bS&x-Vb+i2%Ffb09^MIK>O-`DW?1_)RUEwvBW(lT=X19R)H6{75wQSa zS4>VU#f*l0`&^u{rMszv=2iHB+SGNM*!@8XlgVXH`v%4KGKvNFM1-RgqQ5~>L zJ-4ikWPo(L&3ogMwLmY5Dz`r$w#i|~AOh8oDf3aqGWu|zbQ>{qCCu)utAu7{m@Zsm=x)}U&}D_d6Au-QsPod$WW^87~5eLthQPriDTFyGv-9c-aGPD`3AFqGC?Eo!V1Pl z`SLfH#~Cz#*YBWE)D@;)B%ZnuRQ7czm&>ksm*M@*YV{rJGTmE#+A23Om#y`P zq;-dfSSGu}@~&Og&xk}4^$N^n4dFkf5$@O6D|L8>SClR=TqiI?-0ukR&h}%!jNkRK zi7ts>ZXaIocCF}`E?mBpsRnY7;f}noe?j^re38!D>q6VV1+Dd{!>1!y!Z^NefPA68 zYJi~uE9A`8q!MJXRC~iJ&aHK&Ff3^*U+j5t__Q{ms^P~Vxeqm*28!BmwyXHEl-6u? zt<7P%0u6dOT#%3V%wvLnPDF3)3v#Vx#$C;u)RpbCN85;>b=d*Y zG&Z_`Tn(R9wV4v>GRob?i3$KKfB8c%y~YXd!VgsKyZ0Ei7rl;~LTq-?^^RzA+;amz zWeLi`GZSv?W4EqW#MNCcOoclqYuzZC5xZqU75Wd9-eAnC#a&Pd$K86iHwxN_5Ye!{ zoRgt$x*d_MDWQ99&(>h9pRW0|VWg%#%g{A4yQuB0Ynz=Zb^NqaGyQXFe~;1H&Uank zkh=xhIO(D{RA#tFRiB%r&pkIaGDfcVJXfq|C@EIGxhuD&=ZSaGD~H>SyN?S?0Ihgz zdOF2E^Rp=VB`I}FXSIEu)XxWwo~j~}#fN5UT{zQ+S`Dz#js4<8oSt_t^H2u7+uXNV zbE70LjRkppS1Q4-zJZw@2>673fB3vD4S)vLb=?Td>pl9= z&kOUihenUrZ+CrjeSy491ENq^S~BH%scO^cJ7}T|&#Oj_!L-ik9b%yS14Yt_u+&K` z=4+uRW!Q0_58$y%?8yUdFV7EXBr>T`e!y@o`w40r+b%1H&*n}a(=BgA6}WgC(JUZlpS?c+oFy9glD;Sf3&&X;yoC3r~?H2BYbk*&)47mvG0U}8e-wZ~x&+>k{ zoEMb&1Zti2kW)b^dIg{q6U+C*x+6TD^p@9iDAAvffA?N*U7Jmi^SVnA2!PsO`+|0( z_%@)U!}ju7(hdQH;@p5N8>#S*_ig+*qVBWl4I=ZlT*1{&D-eV!NekV!3mxnOeRzU5 zre2LLl<9|cC!6)~8si%So*{V&4}FLJ&%2y?PLFGVc%~RT6ly8?&>S$)W0BKu#6TgH z087Tke|54@1$5f1hee~oYFUSOxbq8Wc{$DV6w5qCZ|IGg%Aeq&FZ#D6Tqc-o8@kvs zZ+?)a{@~Q_UT&^|Sjd;IE#K_+BP)ln&#VPf z9<>^tU31^Ox=+|rLU^m;dfFCuzZ^h--(Mage^Sli;olw9dJu+i6(Sp_PbsCg&ntFm zS1*vT8sLf42}wk1opAzKQ}QyiYV}bBk;?Kez__zGrh2Lc5~zL`UcAr7yLYSb$>c~7 z!%FxnutRK=q7L2(XXtN^-BF123Xg3vB_GXcR)kl)4mGm6-+j4lbwUY>6aB;?6p*5z zf3gT4y0^LQ;{cIRgcrrLB{=AQn{-(-EN~I*$1iui@vh6AdC7w9*_#HWi23jvFLQlS zk|c}ubSRS~9UiE*Z;;l%-9%$|7pT+eq-R^|7W!tQ7O2Btd$qQ?M-H)NhIM1TB1AM) z&>kPAwBlC>qZQ69C9WC6fnFi+F|7Dlf9!$oiRJr##j=;=Lw;skHr{M|7I2+wmDs!wk3%;p%FE;36a6(_qv#< znH}^_J(wid+j_q)KX78iH|~YV0Tc?`@sm^@d%l;qWO#4l%iua!xjq|%dE5@nfAbN? z(GCR_1a&kXfzT<$lGe=0vBGDoq4<%Q|j*G&K+{KYdCUlS(}uUcca5r>eL zGx6dz)h8kpl%;YL!%di8&eHTbCM%T;w}l}ts%P0EF_mWWHcHB3l@ySPh?G@w>`%9< zD2Ww*Sa(8<#m++^c+}Of({T$CG+f(glv(~5haaI>=gYT4gRfR3& zjLuNgWVT*#Uk#z8uUd6Sb|8gXt4Rgqjmtd1UN7rr30BHbezfSl>hDF3vVbto_@lJe z^SGt2Sx5&<(9eVCi+K)={GTAjy7sziWEA6mhkW zPUU2Fw4X2MwIi3j@FL`W_b7BI&1_}jG(6dNiW-69E!}qDJqX!iG=`Oo^o|&6;Fo97 zS?|aZw^|e|C=dI>KHn$_vFP*@vrC}IR<#|fi5VQ|DO8zw<%5U77C8z%CX{k5qEb@|HZj4UBu?JCdb0yzsVtHey!J=r>pI%5DFMN zeR2+_eO^3lu051f$eJGUqCYy8fCE=ED}$^$dW{voFYT_Jmdgj1ug;(p%H7r}Hr^KL z+CZG$&ev`Fe=cgozcf=|K6P`=wy7fyn8!BRwwFMSlec3`l=7!})Na3AC3GW+a`D-G zi1n@KgihTK#N}m>8ze^$)d#Ysk#Y$T=x;WKlRQflB;8jx7-;K{afWZKY%*6^9ye32 z*JLHE%{OItg51B3m&9E9P-85y#be zwO@#m+?4wq-v0CL#cZAD82kralv;$_^g+ibgMGBO4Bw(&Ty<#43?C>`aO;EbFUhE+8fAH48mC)P4 zdN5mSu{rt8O%vS#8NFN0cc2fehlQG1+tOxYf5TSoP}dO`JA3!sbPi58ytY=aAXnWx zJ;{Q(8Vg=-54*QFUiSBE=2p$}gH0PLq@}Yh`GL$BQf%cT0m$bb@vCI6i{5&6%V)F9vAX$zgGRK;n{- zf0XO`0(ou!?mwJrB`9MT6Y-s#BC2&a!K-x%Fw|Q1abK?wFHDP#Lz+OG;I?oLa5=CYXQ)+pRl>OU8qhgat*d;rXj`qiGs` zWgFhchT;r8Oj24*o8@*wk<|q5w7N_{e}3}M3y>=)=E@_$o^PU68M6ry9tvcC<&(y?W|e?{-Pf7piqf=B56K-e zM2K*|N>5$-J0UcBY$!wM;>DMScqrMb?T~=BaN^S8OZYf#@npt|cJ7YrCCg7%f7XP1 zE$;4@2SdMa6>8dI_Igu0nH+c8yQ0lz{d}d}MkkWWzs~tzzFAeMO5_h;o$u=rFvPnB@DE6)VoG48?s6f>E-`aDGpRJ5s@z z^J#&^T>WWappfJ$NaA|^+Ho>rf086K$WvzT1{(m@T{}p}$hCKi5A#NBL}mj}eV-`Y z!&17Yj3YGnhTu(PYeazqq)-d@R~3!putBgR=;fh7_X9YI8s21*wY*&Nw|j=Yzl#=Ad`)6gqMtN{e~lO`$Of^Ar$a%wzV-w7j!c4*Pl}+*Qn)%FL5@$s&B>Q<&+8@m7)h zrds&_{Kdmqhzs_1WNt6x)S#phs06mtoz-&Dt6LB;84wW4W%e9^e=As4&)s#0J~~Y? zzoV<&l3xr*opQ=!wOh!A$!3`BUcm95>i9x3{R-Ej64znMy|euC8qz-oGBh~v(kX#3 z%_0CC)I~+lc9v$Ab3%j!`SrX$EtQi()e?|LJ8M73cw+E84)<+uOI{{J>OhtGITwJ_ERby}bp{AMD~aOftAx$11QdGas)-7SQ7yIW-%f2PaD29?AM{`}b1OlMp- zpM`4WpRtR$ZBi?I`-WasQ677Dq1_Q!yKbuJ7NqYLjVz~|Z{(&_hpWT7;x_27)<^cO zV??0vmiJcmC#2G984~`jQg$BIbdWH9BTvEXV<*mCY;wx?hkDAZeU(7*`Gi2AnAs3z zGcM~Bia9ree@Dgn(=gs4;_XWhQ{+mZZ;2zp=Yq=5(q6_LkQODqy7iWEFw$f>-cT44 zZvt0mwos5(B@}lYoJtJO8Y@+YIPnzZzJ0WGEvgeKT|7c}(?@w^+dE%vLI|F*o3K7R zKJ}25?KD5bB8JwX+qWUYG^7e*60kh6>MQC52&0sfnrxtzD zllm=oFOj0Ms2n$6d}9FbaJ(1$3&f%i8wK?PeVb4r<(M~eDP`Hlit1Y$B3sF= zCO|6g>+vjcWmIf;s6^Z|w?l?lkn6azG8L+{S?XW2kG^y&bCak$nq`N&&Ipb)nr_*`qK>*__UwJ#NlA zS~fjM(6LR$25HxY?$jRNi(Ev>_u^_qcdMvsgMAhL=?DkmgFS77SG*0eA8Kf7Q8#-} z@aVG2Oo(}VD%kt`QFkSzN&(}{)YL#sx*3_kPP~SfTft6P+=TNjKhZPnW`Bl2e+Gkn zeWBkENZ8z!TJ6y-_kL>!mrfRcsg#xRsx}w zw3~MAZ!U?F?1TD2*%ZKxlho$RJp?~)HxT%b+NNaarq@wmn(cSSyHEKpx*CT%y5?-K zc{iK2f(5J6^v$z@Y$;vmw~Bu(?^fuKqEz*dy;tTycKP1TJFOb&bS_g&f5=(6FWd=& z99Ri$nru|<#&`g*4eVzro!-K72d)wSZnaI7r3L*a`2`Q35{@*fu&%;#iG)`qNMBVs z=j%SaI7Gpo0^&@b3ThQ7i{(B(m4#Wo8~T)30D;P4L)QaT8mE0@v-zVBUz@Bzoemra zldir3g6Sq*oNl%UUGffve>XR`wBFqFpu5avGlfq&4L2_RP*2< zxlIt?0@@wouJZ`<_~0-=^Y>C*U8z?eM*FDZKEw$*Cr%|ixJPq}e?Ac_7OKCgUb1(@ zXlgu^QB>}64q1%+Qs5VacZ*sU71x^+cPfYNdL^$a*I)FH$GF&Z3Djypw*&L%E<48Y z2q^~n(HWLo2q#xpoor{M7YJMjXV+1Ar#BXeB|q!;B56CfxhZ+Vtd|#ZI$ybB+WN>U zAx_!$B^54x7+fT_f9@SmDI7Sgoh(PnL!+Ntuerskd(lhPYlPnNN)Di1N-)-Wmn;&1 z5Fzw;C@3A61h6i&`mfaJhAFt=b~JC&-M{*$qXK? zj=FxJ+E9qdSryx5$!Jeo=tARp*~n1jP0Z)1s74}2IE)BUe~Q?+?PeOJyRy@bW;-l{ zfglB98)whvvraSM+TA0P--J7s_#u}TpXRl0$9f}^fc9ikv05-z;q}`lxqiyyi;LbA zF*&(O1dQFQ!F_Z3?9ZpH1+~}cgy6JZQMda-@BM%_+-gKdGj}kAShI)dA~V@_-!<3p z)4$Bk^>mk*e`=~pmCPrzLR4{*7^DeS=n>E_k2kZ@(m(OkSN{T3pMU#zT^sw4uFVc& zQ9rsiAuPlHPU2uqccN#aIP&~wk;|{rXKK4IeLQ~&?D#xrM$J&%`Txi*x3NnP))$oT zKuzkaRX8hEK{hy*b78$LBk?wiU6bbgNncXRMhh|ue@ThIi0s_%OYZ%Vllp$Nby>Z) zm-!{+ul9$Wej>iBI5e2+E$n zfWA)76d+X2%zre070a=9%%QU5iX|KG-^%x!zZ64JU+|9&QtW49J5;{DvWDOPK`UFg zcGlFDGLPy}tf z$4^OYcrn;rvdhT85l=MtBDNo9(SCCy4%{(@sHFZ=!H-UUwU6HBh#w44(ffqkco~X z&6itGgm(=-_qsXKa_3w+ICtbk0T@#TAGQeh@JLeYgu2osf>BKDiIHHNj)T}isT1lU}@8e6yEH=vK>JlH+TZhU@*{sjIWmzrX zMcjH<#s}ms!Nwd3ZTpsiT}BAcn+};Af9yR1ju+O3dstR4b_%mK{A+fL{Q=0zft;@2 z$$Fb^0!#3{V86O$$9rk-&V}q^t*_ksxkBh{8F63&V@JX}jgK0R1obq>MgdAtEbGmo z3HlFi+sJ$*kN3gL9?Ef3frVfrI;&M?yOK6l)(f9|P$ zQk?w3vYqc3owKOVE9reSH`|T3px8lt+~KuOH6mrkwjEhRj4I^kERQ)yP`Bc(BZEd= zJ^Ztekce%I!b>HOJJ0F8CSR@)tU(_-a-4E(&42=1j84ieL(2pl>VbJqUvW&zyif zvZ+;)F_yzixw25la6$gz!u2;1*Z2>Al8`Jy(v=lZa5Pa$V~RCuV|^u4L2pn@4*c=U88W8eKk z&g!8}cXE^kf#}p0L{v;uf9Ii%YTX@>PIb;eRVI(b$Q#_{SGddIxDJXq#D4fmJ=J>D zPRFrecxeF6n~9Gx^|>sfh8IpNL>#*DIe|o!?L}PIr-rBV) zDwF`I6di!V!MM)OgY)Se)>Lr4xEZ3w3sjc)N_NQh_toXy^0%m%F80k?>f1&+)fTlA z&TBBnc%xO9XCKvr*8L%8jI@-IfqmH%Po>yT@aMxYss(3okD}R1pO@Ia+LQM@th{G< zT-Zi#L#^4Oo;j*vfAd73?L^;ob@lFc)S?o}wSHJ46NuMN2)jkyCAr1e#%jn)wq0wB z?8ZkBBk@6ZUb8$ha*Pktp58z5jdS9fEfAay&JZ6FLO*Nkz;tEimD!Q@-L~=6>|>sr zCgiO8aV)@v3U6mX(Ge;tUA0ZtkfP)xMTy-Xk$ z*)h&TZQmjb%u|u6BAxEL_M=$KDXh}TFKS>x9>VTj3Z<~vUT&|1IU`R{BkZD-8@;Lh z#o=VHugQx<;f}7q0+hAA<=r_TnU5?U9-n%_>2ax>>;ie*pGbb1Ph*^luqr|%V#v(x zd3)Yhn`{uFe|eWD*0@m1x}NzMF;mslty?tL<{`h=*x5)FvE$dYaeDE=;?O$m^@ViS z`oO$nNp))Ktt~eC!`@(Ry3|2+Cz{(7j=OtQ6(}b)7Xi2RQerLcmSRqmG6HYi609AH z?u^xWy+JWfXYEWU6vD6#X%F=OeJDaOqbhHm;(Rove|v&Dk)5~Fa)o^t!mVe?qdey` zKK!0mr_1j0i5jB{@(h)DL~i$0v(M`wYjzYFI8y-XRgH{)z3jFLl8n-=7b>cRE%F4x z3|=G-X6__;dGR81xN_2&E1`Xb2nQJiflSbvSna?wkhmI}U1?;bHcIjz9ZE$|+8P&b z6`4n-e@S;HclV6unG4g2_XqL3qk%@>DC*mAQ06RNP!1iX`s%?GyXZE&yH8NP<9fWd z&U;xnfRz9^t(aA?#&$!YU#D`j-Bxw|L5Wc~mJ!kc%yc{_=@z+VuWu5(tv-(1Y@uHa z*;KE|h|H}r?C85@M zFrhI>-Mb^BNL9pf+kzM$tPiimrD9V+Os89Bjj~-nZreLh02NXlAX55xB%(S@w~?v(2f ze-sH@C4I%~w!gsZva=*Mme74& z?otOe#`oSE$9H#ECN2#uYwW1vx=G2Le>u8$6cI00ZvHM!fO8MXK4BO6X}QRxFou=^ zeL;b{fn0L_JoKMYg_7Gk?IEOdLZZyGJ{_rMG(CiAiBVoo;$WEm=bb-Z5BKOCUx|=e zq~ypuqd-yPYPc-9^Sz$)+d9fObmrz^M_+m&Io-zaux)b{*ScS!K82a+~Ye^^-3^GBP?ag@9%UqjgqlJ1k$-S@UxoaP*qEs~?7 zI7@c=oNhemod@rz-Dqvc(|xRfjF=(?~jofGct@nI$}J@vx*`FESG; z(~e6oxN*A+{UP3l;PGKhEXV6{cUY*%%0xY|osm0@j*I10RPwResT+&-f1b;ZW+`ba zI1`j@)%3zQ+$`@FmMhw+?JJ}>4!}l5TGuAlZg?Z9W;CJwz#QBax7(pg^Ql3M@x?eF zozn(L1Zpe;9e}1k?`rvNqui82!o%?)5{zbs7eXMeuXV9HtBG xoL2p+q}_(&lj7 z9>WM}sDgOR_nVUubFX$|f3?2r1JfQlw^&n<^^xs|kVRNmp69iyfbAlf6r{Moe~NI0{$1O zR(6k*w40t$MKLBcB~DCWPHCyNnu1FDiaMTHO|OWdXHnX5sDfjEWW`Q8=X#Lmv27G; zvaK#BWl_lahTGm>aTFs~ijBNj7nK+_fP}XjNN$9t^tl+r2A*H~dNFwS#uy(bJ1Ohf zC(;*(vLt6qrzv$re?i%CH*63Erd=a0R51^aMV;%+r_09Ym5o0x+w?&xKNnk+c3h&j zdkO5NdEy(kjd~aPw{31_+hIEIxIG4E7GETl(5aH0^rL%w0FT%c^=S%+i~K6ZC?n3! z){8$#CqE_7Ku|Il`i}T z+Dz7!#J7}NGu>9ynz)TuVLCPLr!|5sjKl zgcE9ySP_}Lf9x<`W%9c5GWFRF z7InIx%hEhgFT)d5N)Ubbz{n3=?$0WrP2 z!?Nc;pHp?n&Zjcq7Em+_pGa9^oqjlMwaf0hvEj%keCRp+?i1Y7a2jBm#Oz&aT`V?>7f+b6`x~m z-kQVW*+93Az4d3(&FW>=1V_!$?A5SG-Er}etp08GuDl6Fz?jy{(lW)PKfd(iUDNwb z7D7$~e*~MZ+=;LeH?Xg&_w-H#m8?+&Wh@lFAue%SZPyGO#I<+dFAfrIm$UyBMz=A<4L0!N>2cWB5Gy$Cf1w%ra^C%Lyq@~)$10!Y^&+8M_(MD# zfjmPCuLV*3C^PURoy5m1+C(i^7geJS0rH0l|CF~-!7STyO4c&G zf9OEZO%P@@6?D--w(3dS)OmF;PRYGzis*59EcDe|JLqo%O53E!P7mhJN@o3@Oq*hO zcGGZ?ee;_U$=X8<)y?X4!1Ql?nCq&H2faZ#a;-hhe2Q-9rb2t>2`Milk}0ryT^1V` zffxis8zoGp?bUH}@TB9#-F`$qSy=Jee};Tp25TDI9fp!9f3jc6btgJmb02p5<&8C# zy}jDT?pDYt7DcoRI(wX79&d5PeGr+;S-gc;uXQ$Bx~-PC+r+HtaIGV|FRU|!%)Exl z)(lgNZ1;u+)zAauy?N7lxE6wy&zx(sKG=*5$0Vtwp(vPa=!~~vftXrmg(>3JRmP*9&TOMpNt4#K*^I2AS-?H+bT`t zJz1_|NPp&!D2~U`TCk97dOqcmd~;09`_byXE|0s#n|kbP(7q_q42E*G)bFISDg5Xi zKXu&%-dGHGnnpvoq1!dy1r3CSbj3$WIC=S$ zOD$XxHg#T|RyC$x^E#zff5Jpcx+5NTxpk11%vKM~NQ-3TZ$isAE9j@OnS7Fcko;b+ zOUA^PmHs#r&4J6Sb#!oFt+3Y7yEkMPm?&yNF%O5TZ9cdz6Z$X?otPECf0MmKml%P7BM1T_7Yx;^#jW2)Njg_Mkg z7w`PVkftNCaL5RoWzT1l!+V3AghTH;MeNHy_I;Ptl2~o+_H0PIYxFUJeoi?i4=#wm zZOa>HpN8_0Psag4-1s^Ye_;}YU2wjkQlI8g;YqKTexK``Ev|*SA)9@LP{%XwK3j8l zQj9G$?JBiFEPYi1C+_W+uSPDY&t{vsr!hL&!Y&(0>-0IEH}?nmeBXQvd33{70frv3 zN_*zTvUd(K0WpJS#igR)l38C@AEx9OqNq2=|A)Od*;ZUxmPJ2^e+B{sh^FegEhxz;}Wj(f)&?{)4TXPwNPC4?e8+|A9++}ACTBiuN*N`3311uZJ(%T`S70U;c_XLP3G=L`n?LM9Kkm9#XxkEhsipE3abzbd8slHsA$PLqr zLFdp7E}4bZ(6*N@f2ZoXb?r4+M{;w#PdUiN+DFqviEi(g`&HQG%kp(vtbF6(-?qfr zkG?p2?6Flh0{u2rCKHdBYzGiWBm%^w9St%+(JeY&Pp}P?neq9W>dm_xS<1K_}`*td$b)3Fk?|Glj(*t#@e-iF~?(J-X$G5A3r z9vW=C9bxMODUI9!<_hbJ_t0mQ49?+cULz`SKXK2i zTk5X$?4?w>f0Xq*JK%EfxZ_*ROqJD&&ef?c3n`OX6A6yukZGL<80~tWUb!)`$J5xK zn0zww>?}KJV+eJ)a?i^Tdfvab>3e;6+3jo}cw2F)#0O_LTJ2?L72WBlJwKyFQ5V6O zZ)T6xcsGueEFEpA!}Gm932Ab9FP=40Tjwh5R!UK4e;ndpoSPm+T_AMdUlrDq#6rCL z=GB%T`)pBetIeEons=~1%obc{oQYecpK?&&aE^1#4o48Mx!-F)xTQINE{|yV=j|MB zBGcdo0qZn!we0#1FH)t&b{hn}8%=>Rp8B>(M-VWQp8(3J-Lpf$hDw?B%*9SFs|2c0H_zZl z#*#Zt=tKGFi?hGlPLqSOW$jg>(@dHOGnkSyZ^?uc+dA6q+y&M8TN9W_c3qX)>-bb? zZLw?)RbS8Ii9Neg>2YkZ8ovg|t+}(7u`#ZFe+t_7aoj$xr;#roCt&^UZPxML@Fd^> zr0e?Ln!l{Dhg!I8zK?A+UqZ3SIMGlHveMR_oE?mRcqx>O#Sl=BU&vd7&oM0HryxON z2G|@~)tt5#Z2%~R^>yv-?&8p??prp6bfDe7L=g0?UfFh42GaODSW=i8HX?2Zwjef% ze`y^X;~H#(+Pel5cKJq!mwwb%z)vX{C~Q~iDU#70;7cOJ9i9?<37A&do%YZ@IA(7O zStO-xDN}l%npe#hTQ=Gj7e94g*69U*?$J4&F5dO2*DC=(lbWV$s+2fpb$K4^wKa31 z2NYbr%dC%NXN**TG3LDa#dh*1hQ0Q^e|nxla!o!2$RA7JxG1TI z<;Cbi3-(-)Fuy*jeA!{Ia`U>}Fg;6i5Qx8R-(iSbrZVZ5(=SEU+LkkBd%u<&ns!1r z>-x3mA6LePn?{dEyl6}V(j@WvHAgR%Uaz54zU!bklr1Kk`(v?)>3cSs9^Bwof35Eo zb+nY^;X|B!yTLSHmlpoHUQF#0zHj0#@YBF|!OGhRvQBLt8{ut0O~QOYPS4R_`N@u`IK{kNe|MHwn`*N&(w7QVWUUbH`E+$Fx5^1ZooBGX#kPLO zb8BJjqA zT@>ehmmE(2F1Zz33o`GC(LGg+A+Bm?)Ly=mS=it9!BK??6W)d z7%5wfg^kB&cq5@EI-O^bf99gCW<+l;fZtvLk>P}d_sR-(qpc-lsCdn(NLr@TDC2Vp zk_IwGt^3(k&hT3W1sBv)6tt^PzSG(>Z=x$5GB}A-Mn65Dh4zSeSX-^e<73ezGJ3-~rRRxffVw=P+zd!Q#FhpZrjQ5TUQd z#Vc;E{7rbRUZc=^Uhei}#4v7-SpqN}pLuc!;#s<*&!=^;#K_;ip)+QM;kdnq$zN%+ z2NRxsT$^Tke{CMsxk250HGB9f9HuMj_Q>pSA*4J+QyUuNa(mdET=EX zo)}s`5ud5gSVmw1(*8*Ou+S=^F(L8kf^^4&xgBgReta~qH{c&Mbzq9%j92&n4VxM%@@Hgnc+Y z=;t$YVwQHX4A_t>Wo7k%fr;8|LC&9NwzLe{dTMsyxNlW>fub$PO9=zkzCA8ibTENX%0}6(b0vaFg`Ux9eJRKD#;P`P zs~pow`s@-0x+7DX_AD^2?LgRX_~lkS*RMBhNe%{>c)DFni{*5|K+flI%Lz5Ye~0v2 z`#dP?EL>sX&vWs%U$%+}Md8;&8joCldFe!1y3`2Aj-zWf#?$FFbAdfY({GF;UF0kF z5r!-NLR}vIBb)aM7{LKm^lSGTKH<3VUd%8M2$}cnE-yhwo2+IJFOuyvFr15NcMQ}C zm1cf2pG5#|xyW6GUL;wZ-nF}4hH1cFDu%etDtkDO$V>bA zx+?V1fiXbot|!t#oIJW@no`idQ{Zoru(y>D?}H7yeDQmEH7b-~3vIq~H^wMiGY4J& z3KdDyzoKU|DkE)<7TT_2fI%n$Mq76Mk|V#StE(Ws z?Wr$HsdFz^Q>QqLJvatk=|+EtvUSz!acwY5=bS~`OL8r_6EjuTJ#8;04L6&CX&&9D zR4IGLNfk|;#}HlI)K4%}f9cjP^@6Jp1J5t!jM-aTOeWAu4_*uhopf_8+Y5Z(;u%}wq8Qr#&L++tE+`2cZq99Y!cZNos3~xD7jp;m+2(oE zpWoKqMxD;<=_QD1AOgG&q*&{CuXHz3EGrlP5-e@3Yv)A0;h*lxe{CW|XrR=SYX%h` z6=PYeQq~-Z{);2fyy|`T3)Lh}Gsf<1m9@ zye>Kv?caL$viFD6GFR^q;c_m%;n<2*FM4QyIR}2Pl@132-p);6;LgkpHu5%O%Q2nT z=^T#LURgv_WjUdCf4cTOg{!@CPxr#@GJA|SI|X7Udt{ErYwN%7=jC#a1a?me`cc14 zL3}E1b2u>EA8ENupN@^8@*ETh_|7DhV>lqEV|L)pzM$%AP0?~QZ)OGxbWLQ>kH;f# z?4O#}FD7#|q(Kc@jI-x4zQ(YCxtf#a;%$uXy+0Apn|Te z*~E*IhqdLwG<)O0-`AE=%xJUG>9wMb_p1pob78wV-r$&nl&`^U%{>_@wVOwzq$dBo zQ_S@(dyz7o0B>B?`^PnviGhlJW_A||Uhn?7Yu>HFK36hykZ+0aNAelW^0qp>9B2gw zPeDXs73urFe~32cQ9{Ij>C$`gxe?!DHYI<>A1f%=Wu*WlN_PEj} z^w&qA6nS|+4Wv-t%kPzSb*3QV@ByT{j?>bm?~o4k1UUK4=CfV$67YyGg$dNDpE?>t zR3!!a?FiN~=JwT-YOXqSo2>2|TgM}ZDNgsknN^gOi>c>ImH7y!%rahU>(n9+t?8(H z7{KPCe>y`=UU!%nr9lm299UtVqua?Yo!+AOht9namqoMP%6nQ6Z@kATolC<#&_u&G zuP2KYCz1@bpLN-S4HVaw+xgmjB@@WcB5WHf_;38YkKDKPR#9-~qXymxb1)jVFXIu% zW-)u+uQ$zDBf}In!fE-e4L3Zj+R5dq=G65Se~tR*XeOTVc+L~azMvLB1*i3IZN*Bh z`Z%Za+Z z*~zQQprk*e?}eH{7JuCp7~GpF{W?qT-ig*R$Wd;k)3(mvyw9&|-ejw{ui6h~w}XS; zfAv1HmumFd2rqxf$~Lf_;OdQ>=Aw4qF0lmVHOqCe-re>WZS10o1+8? zzh6*aXD9{U-ce)S#!%C8&H2v#~d~pf#b6VUOy;TIHz4F0c1%8|jyM z8uaB7PCvH<7Ju_+2imw*JM_tz$mgY>f5ucDqov>&bN+D91Dj8rqKW`ToczbTm$y^K zGQ!c3+N#MduBE?9BPB zm0}A}^@c~Oa&MFOZt{FS#(WM^#8o^y&izKUj{h)G?X@rWVP4Sg9`9MZdQJOgf16II z&5iBtCG|?~C)M5eEz?E&kyC~|12j99={#7(ZL#f7t8GQ!P3CbJ!66o2+y@D^d0|=F zXWCoIOQ>nn`FhXi&--QWN!fD?hY)iQEg4DkHVxuh_ zt(7H_%_p<*k>2@roYesU{!I!-e@pXr*K_rPyz0KI=LsS^ZFcz3XtdpyWA~JWwVAvtE_Ea>gLYS z$8jmIUivjI1CNFY!-##*kDdj!jJrG*h_LxJHe{|Jtz+-0PWfFs6^Va^vN5kq?~o?t zsa*HBWB(Md0OzFRnT3Pbf6MYV6VNaY(=;zJpMBSr9+}7raFeneYuQf8Ej==e`VDRe zi1g9Ytf7LepVGyOJ(m|*pqu$f+!W)+_d}@+ zrP=ECwi&yux5S}?QeXhOBf}hU`%_(xmMp{>>&rZ9-;e&HMd)>jtHp)#)5n3wD1Czt z6Eb)~@i1P@d&a{RiQOSEDeC0eW&b7*M6%VNr{ z;7V(5MZJ6^8Fao zqiq0i&C>*Mu^+AJBQ@S_s4ehLEyYlQfzXb&aVZWtkA6VRzFM`<_X+No)5pXeNlknK z<~^M~s?nHTf46A6*!vn|eoJ58Mj+DLo9wn}D2KU?aXX&lv&62utqL|8WI7?EYLDA# z)64U+Rna1ZRiYa$w~(6MAD{&Lj*fy~ZMn{rud`@Bc|-9b$>$oh0VeMUR0GS&0+Yh= z7Kl^{=1aL;zGbHss->ctE$GMP!CZR&g;2jEeLj7hfBh$@%d))Ra{22pYfB!(l)g*f zo?wpNM%f93cmbAJTM-@7B|>d{0iaOP=0QWgCT6Y9EC4@1z`tK>zi8!F2fqWZjN4l% zqz`*;Y?in8c(E2RBy5rB1P7V*dtVr?GJ4DoiRrH4wReB@;{}_Y(?e>>_8C<7Xogu` zKzf2E?`L%pWPe8Nfe6;z?kQ_uIrQ}fX^N>oM^$6^2O7?GUpJUQU-l~&S9^J<``J3+ zVsCTY@@WBwE0q@m)uP&By*}-vc9xy!b7`K%+lBAjIDVLVe&P%GIt2kHMRM5}i{ReP zJ-esq==l^H;XW(!t0xAbznvQ)f7PeqQ`TdE1Mm#6Rv zl~+{mM@T~-th3@Dxp~mH{bA1+;>~&>N6yo@rxw@sRS_=st$TLezDaBmZXDTkW*^V9 zw5ULaSbs?S81^qm@5NcCQF=yd#s%Ws`#km{kA5WXD!Xg>M~nh zx4W^>ER-lhBSTvIj5K>>1F z$~b&vhr?b?Uy77nD%W7d5=;dcp_;3zeWSwjh<~Asrm_a8^DoYhpdE6CgLSc5Bm380 zV*s)H>j5&GZ*ECXw-CR+Z1jmZRbrS0tu1YRNOmW5;IJy8MxSBgf)L|=R_aHlN??Kl>qmky0ZwVS!5D9>Z z(c-1j%vNv4Qwa7=rVS<`;R=0Ct7a@bjtjX_FSKU&fvjGZ_5^k~1-N4=-}y7A4S$=J z4PJz0+}fIT>fGEu_*&6%qi5W+BN%Qo_J7G<5;&gXtqfzZ9YrmyCuXm{;p zbcwKoMes_t;GqrP&dg)XW`|6f60f6ObJgIZJ8E&GADJqcWfL^04%xKz1b^RPF9&-! z38vh%INwrhx=@uRtaWbL`669&P0^mvE4HUqGtA>Y@lRc2-0hlCbef~kO{gdNWdCj%^BO_Bx)AnL!>VM;j(Itb zX4^wN&X@4(5;VBedDA_}vVXo473;m*ijT;xR$XV0E~-6SgL$x-@$uG|{kc|}uW@8< ztJTQ7_{DwfD_1cS=cC-3jH~A%u1Lx=jZFBVHX=-_EaUzEBowS9$NPpS>rU=bTHyX_$ zNRc5~L3Iwf$dtdYZ?&bZ)b|r2fZRcw@H$R)LjnEl~095-NV&E8&u*Q8}*%Qyq>N;LS$<)-<*#SZKc33S%`UyL_*m^OI zyB$NRVYE}#W?WboNPj_2mN5IW)(cUj=lNoVd5sk6N>D|f(caM;rR6w*@!Sq$0O$}Z z)?;#Gq?_WH@DZGF^O2EX?yw-)Br8W;N$qA-?F5(@+b3oR4~7Y&=}$eX{pyi{v

) z{UaF9F5WSFFx+Ed^{-LXHzrr|s@*3$iQoXAZFQE2`& zf8yGl)!nP<`{ddLDtI+UJ8sjWSnnVR|5P!T>vjC&^nw`Sxe(v;ORCP|wF|#uoAoOn z9qVxtof_Qx=SC1={ERuF9&srMl>p=P$SuWUM3sBhOFH$%UE+prwmn zbMVE^VSn7n78gv)$Fdd8B^aR`yAnqEw6E^bD_NewOHmztXFNpbxF65Xmr)OqJNY%9 zpSF7`D7*8F5zf2K;UT%#lfpvt1Vd;y9N}fy!65(YNe28+ZzF0tI*;w*;byj&!LVe9 z1ynG7hmuOXQ&lxpAZulxpefqVnOWhMqW4TZH-E|y_IB@uhM7T70iR0;B2JGsq^OQInUx`|)(qY73&FZA1>SOUVV>Dg1cFd{tH3vDBPRPckWZ^{vU{1+dv{h8M+rqv zyMJI56fHalUe4`u?&)%;aKgB}cVc>Xn&}d$!l)6C?mC+o5d`Q~c|GIl?Fck18`AXX zWGHa7j9iSaL9=0%feDs}mz6gv?KM=gCfC{C_5})_`6oz&c3x@c&}&55I1#yfxOHn>Cd>? zuN91wTUR<+&&Tox%bqYo$9i;Ifu9fm;L{Xi&N~XgpFi&%|GtuSIq6;ph!yU}n}6Bi zzyd(7vzfm(T__f+T#nX%@ttnq|2jwI->kfWJnPjg}|0tUC-cgSd z6QS1{B6N@{_gr|8r&46y4*H62w10vskFj|?LJy0+ZFriPKjbidPJb|$4_E2aW#UAs-OIMHEPHyt9z!l$&35z2_6_ObU5I*B zgSJ8A({M$a1Y-ZZV}f$(Wa~$t=l1?qERq>yMpsI%#Mx?NDx=EWneZZ=Z|9Xh?xJXY z99>@&bc%zFW$4!7?B3p981ebmX0|(CGOJT_hG&u6xOR51ug201w|{s0U2mB=q}trl z3_6+K&}c-19?Xrn*{S$G|#nLvo%`y>2BOB&cN>NJ@ydGE$;`+hQLc=YK^Cf z$BJyBtGlz$3%r!GUYM(T`{G%3qV)a#K)*v_)3ESC6gO|^M~qiu`h>Sy)f?wFySdu+ zT{1`5M+qgF>4(k$3x7EkhzxMwdJ05ZfXfi_2b};Lq6OdogH^7|LaW7RMU%~1v1a!b z@3NRogj1-~T0KWZFIn%zLHqs0V&uJSN zA7>iVUQhg^GPL$_<5#WeX78uq(-ttfUMNgEahG?vdpdPHUVn;m?XEhB5|hL?JlzxF z0#dPYOuT8iqZ zmSPcO7DT@NE zHeWf~5e=FN%QC_sLh-Nv31bZVVImab|AW*7{@?H?5sWBzO45K>MC2%h3|GLi!qD;K zHtR~SQg(KITl$kNH!B~qf2?_avYn0R^UxhZ!O$2daDNZ1H?d?sXL}L;UsIKaa_3Ap z)*y3)&r-jxZ;|@iO%~huJY7U)6Jk>N!8K>SLlaFqjbKKjQ~pD}N4hiXHxRcmi}h`_ zLSWksg5q`)pV^M#Tu<;OF`p8Z1!?I-mp3*9N}(AZ9WMP*Bbu zJ707zUQ?8)T>!dXW9sh;h-dD0%&N)i$oimG;kyp5^Y6)!r;=J+z zbh%?!8kF?11U_6LCbFXOsX;|0J>pq$!_ROPCVw#}2G|eYx%O0&c;6ZeUVsC z;u^;A=Hi&5N}Nrv_gReT##NARV8D_{C){hVO7v(O<2x)o*8SP}w1W5bG1JS@V#$NK zFl^!zIEst9@5e3eRo=g+J^RN zHkxKerD@C2_7Rz?`ECWM*T4jyHP@qM8-KF<*+Qib)NF0tS2ud>1X^piHRCSF&-g|q z;}NwU?~F~AtxsE>Rm1gpPUXBrEd+RcszF=R=3Mh9S9_apC})wNFqLjgzzS}kYSPi$ zpA2WmU>+|VvF%)4+vh8$;~#-GKN?gZ)evFs)>g9x`219M{3#V4r9J`JsSd^vyH8vfoba^*_o}IIG;Z8(b;#v7=2S`B zqgA=TDKQhtOuRlAt7p7_nTPvf3V#;I$SyjMMsbUlAf}r#HBLE%PIm8Va9R>L&?j!H z#yh@d9yfOb3W3-7zM^hWc@@iR>_00E+(Vdyo6@bglXuhoiRq7L7kVNcx_KpcEeig$ zF;CA%q@4Zn4grSp$r-JAXQh6>N=|37fU9)gHzCaRAp)Ro*E6_CPbh7NGJmS}p*6Cq zLD_Ma8Yn3ui;U-?H|)JY9wBbnMo6=2MxARVTBm4rl+I-@+uRvm)83+@4{g}aca=8E z@`E9paA!ZQ=OuoO8%$_gtyg<}$6JQE(NMI*e_4b|I7EPMdEm~$Evb$P+T!O&jLPRA zwx{Rzjk)+zXg88{&sh3;8h`if-p_=mNH1sG+L|w?I|{cBq>l^LTyK+DQ$jUiE85+= zbNg1N%i~F__pR$B2hTlT^!9Geo6&SN1?%iu?H9?SXl~-xTFgK;(Lj{1Vn|UmyqjFY znS8Kc$??j=WEKf#V4ATer?35;Hi#dNxAyy6K1v-L$KF2d0%=a ziwo2z>Eb-!E;Lg=P6PBiuT~Et0{!5&W(SdeXV36H=2`uf=1~htyf>1M_w@{5-t>ki zBDG8>kf2}AC(w0XfTQQW795QA-ggG`fE9d<(q5ET_oI19cx8?SDYbiKm(a5ry_eT~ ze_iy?e19)(ymcY*<81`}n$URE-6V;74}AG_x0bC7VB7L7U(DqNm?D##jt{D0pNVkp zu6GIKV8*wt!MTs=sj`?i)u0Pl*V~W^>QmvaZq?E(x`J=sb2I zC8o~$mIqoe-%keb{*C=VA& z;D2;-tuEeF*Ff#xrm+^~&@MlpA#Br7@LCfl=L^d{8)LbU2CiB`n^6Y!IgW? z{Ie6>VT3-ry6UPsZd6_X^>sIi4|ennyRM_;C6rQms6WBX*afC@*JRp8(JjyQ)20;O zc3`-3^sz6e@iloGnG9MY+fV7qKJL~joqwv#_*EYghjJ|C5}5Yo#wz^^)QIU6M?exsQZ$Hm2bE4r%NK4$W$$()#JRJ)PgrtN!X?z?@==;ZB*`{B}>y5l${-@RR=a z9^a?=s)Q#r;_|g%tRsI-ztVjkV*Oy*4+E;eX|$ z;~AV0_;?AZ)1ipTD_rg0RxY#ayllQ4sVkppuH&b4T)3Vw2{X2sKGbIb(C~(2Vw*`Z z%N0k-0i}m|-Qn6cq00`=6i1K0v4XX*UXx7kfFEPVay?yE-f3*YCCZ>SseI|PXX*ge zF3jlau*IC!6<(i{eA!_I-_0uiynoHMl6FGQsvpKJ=3)Mx%x2$Qj>XEoiwd7d)~Vw28>i)1XtuRl2NY+cR!27j;=HcDXrIYZj`nX>JNi zYvymRQMn77u!_FLY)dTpY;&r%7cLWOxt(f?bhu`Q4DfNP(5A+BHd24ht$)0N%?@P- z+c|G#_313790y+di`fcJ58U()N+;FxX)c~8^|A)^02E82r9;@_&#N$zPnK9UJF|yC z_~RNyw|R$ojI@RQ)Gd;G`37keC5KV_D5SQphhB_*o0bF13Lk4+lcCvbR?pKC6-KX7j^WKfa94OWi!25kNNs@A!2>uC_ z1ZmERbL1zJdj<*k2k3fI0m=Qhe7qDM<&6`Ln?Cfr8+WT`FK* zH)(f9eK|MYJ4esteh0qr{MOx%Sg27@VIZl zc$mm}e1%k4Kix>oL_c4%v~-@resKtkRTDp<1^t9J+Ircx8wkx}+T=7dQ`pqG-tpOo zP+VH=&%r}I?VqL5?7Zp$nmPq>HXuXwh=Eaj2Q!Z6W$rZ28Ln(&h!;3rYQTzb*e^Xt zO~r};5v_89UN+E-$$u8C`1UlKW8nF;^DT|t@&~@S!qQ~jW}5Gxt{9u0AKVBoP+=4W z(L}(23dW8t46RPP$!s=rj;u>FXl{Bz-}ke&dcIzu=&na2X2nPf2HB=wYGGCYpQA=& z2)iqr@t8q4L@1_S|DDw|2dy9`)WWk!TeuRKfd@5`jHomOB^BlLkSP6C+;iAk6ixTIyT2)V5jg z1etrqWMJJVV}GMRv*z2pRzgvlxtzC;=|=4t>S}})Bziluj3NIx#vL}`517sd?(h$t z&VR{tJ{~_RogWm5wFha*mdk<(BWWi zFHFKwNNybhfp9w<{R=EiA)JAiCB;OA(iXl|HV2%9yMJGXzvhs_IGxvxe6faCK)zmL zc5;+&Wf2N7P{EY!0avj;s+xm}CcZxev;uU?tM?m^mL!^pda@yEX}lK`YLIC}UJ=xc^%y98vo;SZJ0>#jY=2Ip<-{IkuOSNrw6Nv^SN7qbLeRhy z%N7$Q520g#+4at(tWsS|o~v+K3?Ucr+TT!+dB-!nHaI?ncx|za|L}T}vxff;Iek;` zdVwOU5(gCq)|9!Wc+hhOKLh{f+Py-Dw}Ybf|DmB&@Z) z41b$ZyzcFKO7^76YGiQ_6}VU2gH;;nMn}{A#6>e!y<5FIV*8}G0zj*Q3*C9LKFQSV z&8gMn^WHh9+nd9`$>;FizCV^HpZEDTBfQs065zp?C0J|N^x|VerheXk0f#)` zacURZEC3_*jpx$45b0AdS`kv@YrnsrynhyrRTZ{vLA)0+_}p%#HhvkcE8-bz28<TFg=aI*jo*(6+^?eKFk%-sO|O?a%E$Yx^970eU=*4g&+wW45BfB!p^aVtYv z+2zClpYs%8W;K34hOFQL3?Lc9+wqQT)uR^1lGvH1Fr_3mXHH%g24dSfdpE@6L4VG` z(m&tBv8u#Ev>R?GTVT^X8%o%3W*~5nnta|3W10Lcd5YhY9W6F9g#vGJ_3QLO+Vvt$)y3@ND0z;y}gOiE9!0|w)EwE~(oOa-XAn;w4m;xe2OYthl2)x%~ zZ&w2G7Gv7U&g0&q!JZ}w+eMmIbPB8jEPvL6J8YJ% z6|N=E02-0%5nX}EY7>q^6F~4%+lb65eikQWVJf$=&$<{K^qOG9T^~2r1>FwD7VsPG z&<+>YTn2p&5@6u1M{x^RpAys`k&0r)n2lT(QM&W|3Xb1}m`E8dae;aldl&1cPR%Yx zvkzzdv(GPDnVi-4&-&pEg?|)_{g3m8^<(jG^E)OW_6;%O^ReMM{=^TQ74ma%o^lMx z;PZG+IIk}3G0i<32~P^@A;cg!`@7grsx7prEOOSy93sbhF$S|;hx~o~tl{ZUUoaqw z9~Ra?J{5s$L-Y)7mdq^T`>=O#yeQt}R~C#Dc&SJggUWQaOX`%(cz?DtK{o_B3V$lE z$x(7?>~)uK+c5b2eY1i%ik}CPj_;4O3g&6h0g`wq2;hv!Sykie0U}K9OEwvgLvE2F zp5wk)XK)T*v$xmMCmxY?A?t%Z9M3*qgLqV!CDu~ma31h6 z65ED-|9K_}W<7?x0DsSB*za)3OU{T-7wrveBYX{F+GhB8X5w6-3~2FtxnxZ~77OhR zO0cO4dkhsmXk?-sO5r-z4)G?lZ2{*xS7q6&T{CYKo_7l@OeL{!;KIJ#xs+k{?ZttC zz2sIPIhmWPs|2=H6Twvh7tktvAhIkbukqb29R^K*cQfgY@>Qqt95Fru$>YLwZ;0BCKoit zlc_mp(b|Ey<+%8YIVxo*m-<&t8u|d%DEB`6~Wx zSh!P7#O4fgX@9t%>lB5}jcrAR%@aZ$g|qS;1zgLoQ%l{R(hJAqZGTgUhZ+xB=&JXwSV~1mbvQIF|R$2g3{odO?zE`L0^^%xEexQp6tD^^&oq1h|MG>e9pY~-aZ7kx(KalD3S#hnWTyx8;MzEWqB*>Z+OMD z8N@PZp5wrqnL<}7%-~q6u6cM()Nl3<9}ALU;nzYGGgHum%h-L!%1sZAauS0`-qE=* zZ@~Qw*nbT7>3et{zcEI}nK-beN$%|-J{cMIe2%Z>+%>*uExGaU{Vd@13*p4Vn*Zc7 zpEEPXnR&8>NuK#|HkC|;k`caEfZnJO;^X^?$|XGZC&w0Thy5grT;_22|Ja`pLJ%F) zUI;&Jr?w{I4dP78xDrwt@^?cJao@+Tj1FLojDHXcwGQioDFXWgK@dyqBYd68^PFib z9)K5|D_DL&rIM^);!AGB109Cj7_AAM>tnh|#?UVAXVVZo_*ix?Jkx-?A75;McMKd*Tc`b`Q%8P?x~*SQkWTK zWq;hsGXCTQFZ9F{=OU+xIsCcn>RosuUbC*ko}d(y-zq%&H}VSXOTHP;LN0bO%EX~~ zyhEeH#lC!tpJ(;`?7#Caa+eHueLQ1b&<5Vn_6$7Paw}Lpk~@^<1RuGDOM^npT^rBIr=cZv79Wp z7=TrYI6nvXNPIsm3DlP5$-1?O^H?{;;7@+}*_*&yh)At*!HIKv)5dan3M(4MWKn?T z?cz)fe~-7B27;5j6;H@x;8`6`k$=LHeZY};jgmlnvHb8ncEa#HhWvZDAF&rc@hB%b zN&;7P60@*3Fuwbh_mi`Z7`oHM3`VoLYjXm9<}u>|mWIv?@r29O_4r|K#0=}4i2?`O z&yB4uteuOz!wI?&_aOGF2W_YpN_Sjs@B!{61{MsNnGVmNREw?>vuB!=tbZ~FvXmiy zeexpm8gX7oEc?3eKX)=YL!`F)%>(edf8x=1oJEWp@&oLlotI%E+^n?Lf{kscYn(Z; zmB$If4)9AvYyv(vt}Em?s@Q{$BjwW0?GYcd2F|a^K@PTtm`zLvjA$o_4;DYioh4%M zQH6gkooQ$_a9L=JIV*8D%73?t=BV~3$;fbi^9(^*WZ`u-VN-_RXPig&DY@&gPlI8f zeq!@)&N$>O!?XFWAAj?d?{kbC+$87sYd?`1^5@?Dd>ng_tih0T4(o9A5_CMnF_&e$ zti&zOfoZ!9_8w>~u4)}3FHYHtM~sV9wyL72uK3V}_Y@2WJX1p~$bW&EFGQ~$Ssg|4 zOIc~<5$<;cLewvAWRRj&tjq?5m!zYn!uv;Hd!SgyARfuzTWIBoNg(r3#72Av6c+Ms zEFf>xm9mAJk%;|Fa#dB6JU|8+i@hRp%T{aQ+`(Ec>ihs}kg=rBpZ#(FlYK#ISF%pn z+o(&(T9qcLU%vMECx0iD$oGG*^*iy%ohHE=5zk-7YdK?(55EbuoO?VXZ!lTZAv!j$ z!Q<{iPNx@b4NMn|9dmtp!`^p|I?ttjd=J*hO5i#IccKc?XcE-s8;)zoqjQUav@8># z9VB7T*$NAaY7kR>i8~Mb!*UQ`(u~ySFhbe&@BtzFV+KqLXMd68IiF`a)Dl0>I_d2U zwFW%p&5l;SYjCnJ{>qa-wZre;X=8YkChHZ1S4}^J+ge+5ZR?9!XdtQJKtPmGxJC*` zSK^kuep18Yb$Z*;(G_+0DPGhP>RRrVftTS6t#j-zsEuI%tCS?z{)*#Wv*WW6HAF35 zt`uSuye3f}BY)`2oSTVq)^K+gakleLE@F&7-5!*HJt$o4BR}KOwZ?sdsg>jRxnX-& zOi0{iEWY_8?tWuBpLk2o!%!cQJOvoUurEHb`KKQH!Cc6B0b@pN+L0jY6LxE*-#t$o|4Z$=qg zY_4v!AUtY_hqyVBcl6^`rey5JWZ@c*yyKJqVJiD4CkEC+p7-bd{@^<5M)m$$k58`X z?oB<5cYi4WmU&jGvWElO5=UmLsPHYL3vV`ljzt;&M)f?@d5`fp=4P$-VEKl%0Jo4j4{^0O^6Ed-dBby5J~{PI zJS2D7PrUoW)krOH!dWSX8oN}d_*q|Z5bvMhN`I&aU?&A&C0ApCX|Sh77MPOLR)}R2 zn4DpaGf~G4gR}2~4%l9e436AqO$B&p9)uHes)ua!C!6<8t`|A9s2@A~-C@QTF$?z{ zRHJI#jT>I{cIyg7MTdA%slgD_TvZW!Cf5Y_trgkZd@VA!py8nnDN>$Zixi$;#y&|M zjDPiclB5WzFydYHlt~?4;4FS{w6ACRCr0pjj$db;eC`M188DhM5x!!`r!K-;4mB~k zFM%llSX#sd9LNRLWN}}*YtOeuiWfuXWpiyFV!B%Kl2~D5L&L4*dpKo&c9yFKRwGp) zdCd(bzbf7%?h>;DM*4Q!WNQXg>I+xK`DKq@^BH3(LIhk+%!Oj> zDA=pIn`v4{BQImL0UHL~N#cLN$}bSNpbjwLyuf4!oHzJ=pTiyL{r&Gfr{xVaVUwfE zhi4fMa$3h^nk!wEB7^rPSSPO~^YeG|3p4&j^61e0{63_n_MpGDG2~zDytSL_rQ5docjV~!|%24ViEt5^+)2n-J@;-1~1m*M}KUHEv@3? zT;#f@zy^5lv0`=ZfyZR+3kvI~LIrqxA%R8VXC=U2fa?pSRiW7IxnlKVBe&6cga}sv^#%kW~_V z5;0bz^3r)LNo|rE@X|uBB7a8xLn)c^a74Hf0dZE{%+UK*Q##rK)^96?8G@#-3U26f zK4crwh*jWfx6P~Ut9S9~Tj1WnkXBCKAL1_(@?zo%HHw+<_xGJYeDCQ$a?c^oB94$4 z`-vg{j6q-h3>P{07gjmMUUI+UYoWZdZDUL7)(XHN{I=TWPw`mcRDYWTCLB($J1lgB z=WEJ%u3}4p;K|^KXLSYyOtEs)25*DODI3D$|^`TkEo1#5@99^;uNzE}OM zUFW_nAM2Ybzzs#86IV8H5Y*$Q!?n*x9N<1DdvM6(zxz>Ek$mUx zr2m2QSR-deUw?C<@_xuzB<%&yc^<2G=P9=ScYPslDoV`56Xk+adDE7H|tTEq;k z-0`}A*OFjO@qIk1a}vjOQV$+nlGmcIyX(>K9B|DKZh!H8uMctL&$IZ|OCbAgh%LX^ z!tXtVI$7NA)o8WBfp$t$N0SMq817r#VR*2JubC!x4Di{B<^=562pXK_%;yx>XlJM8 z0~kH(8-@2&U2LP>x#KvGPQ}gz#YQ?*qws^nwa#Pu>Ot%vS1O#3~&+7kj^;c$_QAXeHd_Da7;TgGqmQ#%eA-5)i>1UWVOlYiei>ffKYf{^UX@BI6#&Kxl5{1Zd} zSclw1mnn`EKCVsf=!h=zPEvz$q*s{mvsviN9QO2(gMWYgG+807vA z?a5gt=iwXIAy~$cOX5Bm;v&}ZQ_E*kqfwHadT60?+Y>r{s_iqV#}R7Wzs@;gx@pTj z%msrE`tc&D+RaYNsT4g3P1x0I>+J9iGYi#C%ktTeJ5#sYJadM z2{;C7u9OwW(~~HmMM!I#PZco@`{j(!LsZyx*&Bo09EPnxD#?Bsv*dgsxZO>1-bnuT zjn$DDN4Ond^~&(fKl|WQ#|-D_@4Mpf&m&I!;y7gQ`~wU63+Ks_3*Iv*6czd{VFam8 zPhz7mY{@<$m`oOc14LpGFdP+YQ-A4rMmQ^=8VUC^RD(hEY1 zZ-_(L7F-U%V6G#$4DO{+XqI?fZY7EyS@T#FTFSv5mDn*%VMS;6vs2} zOT0dj|KsE2eF6U!_ki`dH!v*p;BkIbVEi~&S&WZ6h^ge`D&EtlAMjPbmaKph0r>ML zPM{y~N1Q;P=!YNjsqy~{#z|tx8`iU#E1(*Tkjq#J?jxLM755g$dD6kDAOMEmDf%i0 z4um6U%SymP-r#32`2BdsjDI0xf}&q!Q%ut>AghfIRF$Hp)o?dn`G7xz-b`LUmUzG$ zv2O{($|U=E*tee?=3jgEbDqh5^;`^B;_UyfJ^%eVf<-Is50?G4S7QvEqp$qk3&Pjx z9`MKxmhPOXh+YCAnFd%-=0gf%8+9Uvx-o@7n&Q9(uoT(+`@jkFNTA|IB<_nt+@Fc${QtMO|Ao(rd9wNX_*WcA ze&WQCFF;-R4=nS-qPO~wJK+8m<8d~Im`!3l&3UoTf^7_r42kT@Od=Z$8_`#d7R;W3D z^Z&0H$@%2${BlzM!1GWeCFgAsodkq8@H(Edc;G!KpR$2z>ct5_3V1+mvRAw;wgg}= zExOGW^nVAt_^~|DkeEOs=+B-@kH-5!`3vK!LqUP|83Xjq9*2vWqsW7II$9Mj@MoMC zBV(5YN3cp4ZwffVCtiNeMh8a17ha3kh*|~xiX8XAZ@=s7`ToX1U`fL>9&*Q_zN?T2 zHs~RNOxMOxfkfi_X0KvC0apZmC+`FUyeZFB5r2;y7_k$^=I=^Ik<2)5e6?EAuV-=o zhi9mx$B%nVRNNAS*c5lpOo$-18f1QN4qKCY26aS@u~U_`ofY~ssMlP!Ao(xp zL#Q2KLDV-KUN3krBu}|~{rP0S?||?A<%gHDk-&lcxgS25_6B#{hPR0S zyah~?NG**P9XKyg?l(5EfgDC*%X)ztm#9Cc9u+(cUo@m*21{0Yp?BiaUIKT`5^!gL z`}@d`z;f_=nhCgeWVVeVwJ_2U=C)iiJ%6}O13n0k-wF7bq>tsY5<_^VQP=~c_|$>_ z#sU6bi?7@mc)vH`0iXL~$OFE6yTcvw;XERK0C)NHKfm|RSAK(-{kwO*dpa2KMU9|= zD}v`?sv=Z$ce89~P?*o?Vh^#5)aWr{0jVEB;)Q1cd{2*Way*riX^<{{JCn#HLd;p~BOplAC1zfj?RuU_`Uvy~dOAEYKelfCD|D zWTIL@0-^+3b3@NCc1RrpjtL-K26tGPBjyGga;&fZA^Kz)zi&CzX}jLZhyL>Hi+4cw z7U_fh@TERA$xr{_E2sVwTlnEBet$oMffMu{f3fGpujls%Ua`aWgG>}()goq>GuNF1 zri68g34D&h*^9wd2IQQDDtbm5*X7Xs1qU~0HL+beUYpHuL=!hlytv1YK%Wqomx{BF zR!pqPnAzob`C07d74T`ux`Lx}idT`~9&z8HkEe#*Zs6K#h~v88S+15dDu417;Hr%- z<{}(SADlO#HP(*_Kfwo11dK8AbsikCtO5^-xI6SmC(<6gXap<-8B)iKDtdl0YO}K*h#**yUK^?NT~fy> z?0HV|ZPXF7Bl0m)Z*lLpdVfTX|LMC0)Cl|S-!=aW`2wjyJc3&xG#|$+eFBU#W3(bRezF;{PrPH;|y~R0`A1$ zm_xpQ*f&F5A^rK!eem6*hf&7xF;`m2IWX`G4bBCvsvR0#8H0U?`WQ_?-bak~V+}d0 z3E_zdxhOCz=&N#cgpM+)T|GGuE%XlniDT`6?RXimoert5JA9n*3^$1RxF)l`+B&0g z8cu+7lGti-?|)mXxf#dvyQiKd==(z8$xS8`7t8rJ57@h~V`=a_J?SYw4Dd6@o3OF) z`T(m^V=?QD-7=T3sE8+|u25cNUDqU@5L}$Zliwb0KfLE5fB5U14f{)l7S#s}{=vt7 z>J8Laz{}ku?$;lF%jf%twI>{B;7UJzC%7IBco=xoHGkf#2&YYG?Q`JyA&d|EFQ-%) zxdPwPThvvWP)$s%r7w5M+YF%2-VMBFF{t1UfSd^!W`KW};L0MO1>WvvDPq6O&K|Ac zTJ;Y6^3UA|U;%t&a!xF--pQD8!#a9`OKfehj?=v6lq>oSL+yrM4Lxu>1Msx9cM8rY za0$-!5Pu_w`;L_6>~WZ90he&sgAeo<4)TQoku~^(8})-b6rcTv`GwC~VfJLmO-Qcr zwN^vzN;qfeJ$%)zq`q6Kzxa>0-I9ER>wt}hb9@}%gSiy0%Amc{f;&e^Tzf-Pu|`b~ z4%Vc?&-hPu1>77>uzmh!^SQqqEKb(;Na(|%?8s0znoXWpy_aWY;3Z6CL^kQVP2Tv~f z<%&Rf4);tT`E!;1>HpZ+$Y~h^~`pF3UtWa7?L98Yi z?0?G1k%EytmGGfCe9dtV2PAM#hWm|RX)TT$xHK;Ige$BGFPm-&FPkB89D$6)ah!ur zbieD)KjxBtIAKeV#s0*ZU$0)n%#f=hMj-Z_hRA~bzV-32xLqrOc*Kx-A~NTI7#Nb~ znZk~z4qoH*3m0CHc@d0B;B|nCdp98w^?!wve{wByhJH9p;Ao!h+;qWHR=`Q+S#T}E zkyF7*MI6$Dfs6W@U`;`?1dm8)^*8=be$SP>Uu*b1!#z&a$A|a(pK(Cc*KVx1z`1|l zmn-dIVh^a;3$uzQ_(=k%Ce#^NkIEfG29mViS()YDuH2CLg+&EV7y0KG&VxOHyno>* zZ`^fCXR{1U)$LiEJU`_UOe588`rLEfg8SZbTIDA?7H+ zWQM92?tB?@ctfrD(?6nUg*!AImD_6x%^MR?5oFsn&$I&tTV|?zPP(zJp~fK+JAV1T%)k8GJ@8sL-dJIPRpr`ytinC^-$^)YQTRXAKza zol>))LXSR%&LrYMKxVC|P{Ff>aS!$sxpOmJ>`2c!BWDA=C2!;K<^;1S;9P}JC_<0U ziy@3@R_q&nR&Zc&$0OzdSARfC27bb@-++gJi~(H1r${Rvd1_b;INGQFyRg6({^YfN z&8&UbHNSk?jPRVk>KYP@NnMnQC%4Br23Aj!1k`>lp=G=JZW{He!;B$yqGC0=cOrk$d6O46;O`8dv3*TxJcz?|q7PKsQK+jq5 z*l1p(9Nz6D9Po0E$38&3fJ7)i#4X%sBp=87{Kd<@<}Lr2U-iL*`J-?2;bHvQUlPl8 zolI;Ad0?x8%Z159aIV19)G;fk0S|!sKBm9GPXpE$aGa?d*>;561>pr@&TYZzqSoM+ zL;;mMYQ#goJA2r6x$-TRK@ESMgnZ|LC!w;+nCQzB-YR>B$Qr>#dY$l_g! z{qq?SjPwtF1jbgs=3~Ll`F<~`A%1&f%8c7M;0b}Vu)&<>$Zj#4BQSW5^ceW3W>w-v z4YqtFhCL8Z0nfm5T+r$uiUfs)Z4M!I05|9pgXe%dQ#{hV;8h9ka#eo7w7UjKLD;xYUMA^-6ya8 z7l%8Uf=BbgEeQW|oaubFjbdQ7eD>a*UDH{Y?Gkv)a94_16RrE)IOMhMmRSk6<`ZU1 zV$)B+F-H7wwK{4OQ*?hxE?>Ve^A2ei%vj=?N`6hIGz%hOqHyw6f0Nob*kSG_3yfq(yS@qchX)RvmC#vH+f9vNQj zDPHJf-b%Mz8Yz%CaH`!9cK(OKYbCwPh>mbP+gb+ z`xfE8DdKs4=H152E6L+wnSiyX{2{OR66^z3V7b{?hA2TX#kj{X_xhb{<8JthqsW_KxczlM{59t}Q;YG9n4VlZ}#Z95FRS~xa8Vlx*0;cj zC2$N#l8(}c%y&Q|5^)^pH})5Jd7t?n(&P9KaU0Cr?>RLRyMf_<$BBRB^#fM=;g+E$ zgfHDtC%wS~?CF2RaJcedeoK+v4dU2X%6AYcK#Bn+PCQ4v#r6nZCTbs*b8v1cFF9kz z37Fk)tPp?LDBrx1TbwGWOHjcf0||xn#n6XA|3G4A9L0HUYFvWOt{^-jK7RVO zG!L?-@x_aQZwT75e$LFmdlbeBE78!Q zATM>ecL5*}OoHG;Uwzr{*GgV@xcQEApC0Wu#$WTO7r@Q>%~ufL2aN;F6JiECEMyMn zE04G8P24DMmdhFZ&@guCSE!LN1CRPM0FY;H$$QE|pmo&S1|JBC5)YmKgYt&k-FRv! zs7HSZ*9Z5~^lG*ocp9WO$Mf8r;Wx$mnYxx+#zuc|s2eeIgu1aIb>lEE=6ejOkGS7_ z3mk(1{{o+nveLjhuZqPHvH+|{T26qvF;CsBzah;RtyGQ|l0{-8e( z=R43>K9~kBa7|zk~i!oJB#c@y+QWGi6_OHF|r9bLM|~ zn4iqeP-eS}nE9ChK|Fj7qO}TtwR9hI>K~pCY7pRI1n9N6zM;MKXqy6P|qx z1;;O31w1+AmdBaa^9HPJ!%D;5z>)7Kq7 zVC7K`E|nCw8zjk|=s|ecORhSLS)hNw%pm|J>j8Db?oq+8%tap8V(()A39CkG!y)dzpyG#fu0z*gjXjqreR2YpjoJb^ z-UEigpSX@P0_A_3MVx=#iW4;zXEl;?^jwDdD;Hcxt4-N3k$L0*?Dequ#A?KHd>@H@ zL!2Ax3+x*x_Gc_e4A5=q2zGxppeL20Z*Lc{FK*QWK3xESICC37>sLrtFn!vf zw@T_mag4d(C5!wF#AJJz14&+l$NcwmA+^pQGm=02YZDk3*|VQLJ?ww|&z&p%yAJw( zCe^HvXdm$tPHtTZt`BAyAqnug7dMzoI!dh^{i6o@dM4<}VcvONiF*yc#V&u1z^Oj2 zC++9?bTsy>GGX0Ah??^xx9QRlBE(?AM>oc5t3ul~B=z2>kNn1r0C^t3<`6rOCk=eq z@3|HdU%vCtKWD2^nErpxH-GBgVQ-RpiSUSV4{48}MK45z-8-ko=%L9&ECr@HMXwZj zXQm=&u_2?qC-c}7b|SPHL$FR|5~I$zZ`L!+DWGPCrissI;}!Mcn-CrZT&1n!CX4U; z5FGV#4qiXC50ouov_jYMwV;<*X~K7_g20UtV`k4%4_=btz^ne~AH-1qG3 zZ+~3%|2R`5UNNL!@R`%|WN|FBaRRJyhhEBE@)KZ266=6ZBORBRNrG=zfHT!%#)VMm zU6RvRvRJuyA!{QAf}*I=4Ux(o99-~8j(|HFXQ)OZM>yd0Zs5y^B{@erIY*fKMW3ue zHwAO=0l4nKwSIqi9H?QD*O6SA^t%7aXa17|`K{UTUB~}&o{ORG_~FR>#G*_TkG#0S z8H+ZUQ;)y_+iv)ji3;qb~d?fI%L_*Gk$}li> z6@QL#&$ zWpExJm>qv{qw1vgw4AJ4Q<4ihHt-bk{>1UP0ksNy zOc>_SH+UxG{gHSyytlvlqhGn>&wJ~_KW65C@PpqTKkgQsx3yEEzvYIS8EWJ%;s3w~ zhMb5O<_3z(&&R=C7~XfutP!Jd-=L@HT~*>P@9fVoeBe8{tPFJkiEnt% z$d`xwkt-n$r%)_JW~~7~1dub}heHfjRH8XXFsC=vVLto>^f128=|B6YUwxgQ`PDMv zX5c)7uS{9cxsuo#SSguhsgP556zp zup58QQ^o5o#JMH8@h6`m{eyyTMnQ~RFGntbz5>Yuz~5lV{Kvfo;{ka9%9`If^`Bbp zlka}@P@-=giJ!jI7w-0%(;xbIgLcSQoc^w5O7yLUxb_F9i>v|3r9K}oux5yVB*)Oo z7+mF0jHC}Pqrvple=$#4~`+@}$waqB-qeYFMMC&WT)rS5cMnHc^ ze-QgA9(aeiUkDD<5nZG2IitUN7i5hJDQEa!&dX14@H0aMUgLll|C!UGwl;yO44?lx zgMaYtKXuRBKwpH#o>yf=mp|geu;#F%5D#*h6j<9~{{5Dtf73RNkR?ZG=_#1EgJ~Lt z&&9fnc^wJH-)g~vy8ur8aGxSKL|%V~=flMn;C$D_92Z%?-&lgr;Yf?T&cn8F=x>pE zdF20uU!F;ruc=!T{5Zm89CBUEp>XV0!yL##1C8R3Ih{|<K=>R37Z7;_YA+2r3I6>UxQ8OQ16cJ(FABPLth?6PB3E(lzzU;qKhpR<@D6$x zdQIqC$GaAF`d1wLgI`X1{TWmz?^OXkg83V*B-9TH(Q)~!kA;cC@hOHEVg>(!7&E#J z^w|g}A!PUgtQOe;H%u|R-;#g1?OhK=l0W*3gN`7HVT2d*gV7Iq41a4C5Kj7_0f=)> z>ZpI=^)?Irt3d}(-Hg4x3w}Pq=z(jZ4-Z37=uzxccrQu&7W)cUFX8o~ccLC?gKjJE zRKI6JNRJU5XE(vVbIE#cmWb12KYe&JSRGu4_&eb=U^cFXCLlNq2O59<9nz~J_z#{n zI0NW~k$VlUs}wgSeMVux>ye)gc>T`|FsT*5>;Bf&`%|ysOJ5IpEQyyr;X!`$q;W2w z*)(w9KI?$oMt*`1 zqx1Zm3^jrWlMy_G0nUFybeLS%gN4{O3y-*uN!-$LKcLT6lfQGk$H?3Dc8?kWbKTIG z@m8m}jh`~cNl;U%@tHIqMRx8$m&>n+?hbSc$^6=&%SHIAc#dd`fd?~ZsR(tj$on|1 z*LEkrs5`U|hkDaI65XDL=o+Gzx9hc!t{3_K57!L2Jy{2wp+A3W8=40mOSJERR}K7= zKXd>{u0ZO8VST@IPx1_Yv6w0pQD>)i2@Kp7&5$)y85}U|ad6NeX-9!0N%W`WCegVT zYc{MUpbw;fSW7|%9e9;fkTJ2&T&7c4Tf9H2R%^T#awQ-Y0?!opFk&Xow8ra-nq4tr zM)p_&O%psguaSS?Agmf*hL~Z-=)07V{zuM=;ScC9k@bmMTnc;K+0Y*x<~;sd-%nkG zI5pg*!(I8wkx32>f+D6DcRjCRPsL!W92=#jp`Vzn z@cen*Gw&ixqcz)XFfx4=-;Im;2zZw#sLND{zo8$$3kiRoiQK5|a&H$V-_X@2-^#3fTftDBU7(5S|qiOIkQ*dDkPs{-CWoP8jsU=(~ zFIyWq_~k?w5FClCtD$ahN&gn}`4upvKj-s*xG@FkK@ED0LoXNmh$p+H zvSjw|65M|)gzJke}Z$NaB>=YWPQ zyBk!6K|LP7TMo*zZ=#z)>V>a<|2Nn85A7p-?1z81F}#)|G;Dr*p@oe;I3N<-tC~O` zH9y?(8N|mCU(hB5R|-8~Xq?#K#k2YL1^Z*v_91%Q2)-o60_RZlFh5CjpV}5GN|;Z^ zJq0~2%->-jaPS)x5nG4XO=Lerv}jNVkReyR?xEiO)b(HW>nFeY)*Jn^m*lgRRUnvY`7j0xU?4$eO6woFj68ayHQVPGat%FtsSaE6LoL7o)UD$-&mJdcmgA?cwKeZqeq zURR1lvgY9Ue!e1(B9`F$kRSRO)yNXW;08Gl(VbJdxhq3=3|ZvB8$l@f)g2@EG>KV1 zeLb$-SHR;P9yD;|1JDIT?MPi=F&u*Ih5U}p=j0GR?JysTJBzGS$|~@f9dL3yQKFX_ ze&MME_^uV(1STLT3UeOn{w(n~a|yvCe11xqm--7-y2&sg3XKpn3htL%;ajU;CV31KAmxji+^I+TfXPMS{h*u^@Q*4!JbR zpJl8~p#hcVb8xx99cEj?d&0a>M)VT~{9-;{h}^^(LxTKI-9OZL|I+>A))s$T>2U2O z`YIFlXmU}jVQ!ql#v<|Cgoem}ir-}ZmpqFvJvU&(59ozzU4|()ww5|lX<>*bXTgtC zp9(L=$Ttu}5q~;pr6W?wsA50&sp1nIQLNYhclRmI)_=cG|8E}jzsiGT!oWBT{1Qa- zT@P;d_xk*gJo`U#>i;rdxWRw!{0GPW(VVcZzznwM2%5d%!{L6bGT8&6(*=zaB~uKY zJg&t|8JU;QGDYy10`qd8{uydzU|-0=QR^q<`_WKwKXWnA5gPQ@_buWkbQzI1l~^D2 zYf0WjcmO1ZnYg3B=30kj)4G05_hz*Pdt*?`v`&1J6NvPq=?c_5})qqYaSj zYiklWERwIhJr|w>J`TVcTDpk2;BJZIgo5k}(dxsjc&kD`eTDxpI|$AR)NnMm?h(H* z_b}+u4f9Su$v1))y5ki4R63l07|DP7BwzE8f5&=BD^&19@jkdgWA^Q21e5&qhB%^U z@r@7t%>LltAHCTRkNkfR-I@P5#|C^W~FT_I*AIm4q|GEUy$LuJ`>oHy=aqmNG%}ZkeE|+w1}#{TC45`<0KGcMybXVG=9_=}gR=k|NB4HX z#D~0eIOp)s3<-Ao&9DExhe(g{v##)tBI`0>hd+6V#SMIJ5B)US1_lK!5}dK*AfeW^ z%Z!75339bvMmSC~=FY+KLk~urVBLUQL(>C#wm;eiLk*4l-(`@G(+cM-m*W@q&evSr zpwUkDP9{=et}K7Sg#oW`GZG^eTKc3euLF6wpTH9(IO&1(*`f6eeT1z`bn80F%`-+Fa_K>d`=;&TXf9c+};QeP|B)`M6(WMGFTbO^#CwmV1R&>452tS$ZIX~VM z7Z?0m%$Jm84tbbS9>pz9qyxb=!fZ(vqh2Sx=SIiQ*%SiFI! z0rZIy#170ZnG&!L11|;zM+ibt6L33|125PZ`cE#=uK-`#1rM6&O0lj5&ePxe3+Ole zpXe|ASABm4^b-H^`TwA=usO~K9gZ)3h2hx}JgTy8kcT3(c)&a&sLTC)e3;GXaF={( z?qELZIAGR)%+x|};D_`6*PPwoItm}1oq-QO=q0Skc{EWwe(sOI`3hfq=a>KMlUPjp zOJ5xLUmnNj&iU{fzBP%?aNGn>G6n!|UW|MXEdGCQos!iXnG<;*n zs|Y^z)nELY8v!5qZ!Pc-FLBUF{L+H_;ck3%y@$RN`i+C0*XL`<9P-)WDb4c1Cq)e; zQB{BMVb&Qj#lf~lx3L;#A)s-J^ML+-N9Go4%!ZsSWXj5GGA|Uwr1he)%8k^4@whSzqXXZv>av;(#YS#2b_FdK^Mw+Ovo^Q!m>{ zBAj^0eKqJ$Aci20{K4_TdnLIQ!3KV3k;#8tHNJ9KhXE7+S_g1Ki0&8i!ta!j|x(B;0|Bo@4)z zk3qkJ@Psx7>LJS><|h%`BL-TPsU_;l1>qHtfZeTQ+p+sJ1=jeYy zxa!dX=KjkI0{{MT$V?zca=!Qb&` zz>mMQKL}1hdY?Z$)Ng(NZ;hD$uvdS-y#WS46cX>IRz+2WTTi4v3EnGrHQ~%=H{iYvoni|x-ohR5`C8n`Lr)Hx_o08s$0HeY z8BmT!UW{>cg2NASttEQW*y_OHKYk3q_0tdU{Z}*lZ|@9J*L?SXel(;1yrt3U7#Ge&`ioP7b>dYUm#=q|Iu%QY9*TuhB^iM%lN%WasN@5 ze`za!@_q6j)sZ;?1cpIRBO#{nz;#I6R-vU%A(l{PEH&|Wt}IKBK_i6dP^Lsb!pVq! z1mr{szm@EdYrewA^<*n59)7) z&Ci%;z7!YSi?r+?KwQi~RGdlsxg6PovkxT)W#k+QJTFV~9z9`6WiD~894f>2x6}}R z#b3v#a*tjy$yQ;cldG$IK}ku+iiiXk%RbSMLd1VO1O2Vq#ERz;>+1MeoD0~$l&rEP z3FRZmo&EbbictPx{iG$&uX-?J_Mdep>A>Y7Thch14>K{1UPg|Jf;ra+G znPh+2j9Ds|wR;Fvj>)tv{Etk#Er0Umll#Riv6EWkTPrpTO(`6mI%^fZA`Ak_KU_MwYhgxw_pHU)3RR|Ry&oPK<(oQK4`*W)xhimJ5nnuk|q`qth z_`-ZPUHIjt#>ouPJev-bez69{&n+L^!OxoT>h1KId0vYPJYGe$ITz<#Lepi57V*7 z*T!+VosK@f))K{b@6LT;Ze7*$(?4|2DN*M8`+lL$$f=q_B{}xOv6^@Fq0PkMY<_=H z81|ZW_Tv8Do?G^Pe}?7;1m z7WmUwV{>akpxz~O0GS#tYS%<0TtSn`I+aqb61jLeCp_9AWSs-&dX6=Jl50+288`%X zcI9LWhPsXX1-r1-I;A$&9l4gXUNV1`{v3LCQi*U{4*vO4h7z;0Y>GOpw~6;;NZiPm zC`*My1+S(~h9eg7yx~yVd4XZ#l9qEJn`{I#XGSrTZMGb{m&w)2M4r~?>`T-j&)oU_ zuv(AebHB*9?IGoq1DCSwWLy}uk!6BSy8BM9->g@e!LCGew{_CjtH?1!`n-SPS7Uc> zo$gAdql;sc6p^z%B3jIPuCRbA{Fn`bpY_};F4);^C_JnfCt~&L_rqo}Wo6IX48>wZ ze-+Lfv;%ENTKMKT{Mvz)UitH3p+QI9 z=JbVTJzH*;?g#7|lew!C|H~M)&vkRzcIX!ys?4)B+E-K*FN=yD!?}NiXH`z9M1EG5 zFMaf;F=~O{$cz)YGK&d%F>n~YN! zYTVXJKaqf}ksAGS(64v4Ad^46Tk6O95;?WAYXO} zlDgHa(yp0C^L~U(Yfpa-!lz9c%E3IVb3ROHC(UPN3+QjP+-GZQ>Afg#LfJod^O}=+ zZd^v|*?haSC^R_}k@sl_+sxj!GlNF0;u2Lx{fJ7!MskrdZ6)RAdQr~8yHbh#RC)-{ ziqRj={gj=(&sk|_1b9i1ZSne|6ys~Hh8Ilfxd-hndh`Tsl_P(EYjqBtZ02v991vP= zR?7*V-|_smo_A??{jNy)cz`9|VWq>kfZ;QF5)(uJA9uu+TAeO3t*nP;F8d zc@?S-?b)Gy^Y<5v*u0osyzm`9<$aU)IsJaZ`-*wvuP@V=qR94i>>A&iUsZp~xBWfXKBw=r z73~6AD?QzpzKm0Pnel?uyR3BG(AVxis0XsB&I0PG-*f!7d<@9oFMnfSC7ioThFI!8 z@{fOko#|4(6I_36xPrW(k6&gJ?0pcMh(yVkq3 z`BG#@b!-<*A_RBPdAePI*M&Y~Db*MAu9V>W7e6OySM-BjC$UpWlUIFWO{a0dYo1+J z7Vglt6WEHCwx1+F>4P@XRIM3lzSI8Hv%!CJK4=+7Wg}P9(xsd(!Z-$R?E2*ufqAuF zF?E$^ZlzUct39#b>_GZv(nmUS0zwhl6jlGdO~<5fM#uCx=9W`bE7<*gu2CmAhH(Tk zZL9l&^l5c$af|G$oD?o&4vDV1V{fMvpGy(=+jYuf%E62~cp~G)Ep5Q6tSZMf<2HZ6 zcE%L=tAW=tuGms;=(3z9r^Y#%0r}?C+r5WE^g{k#Jutn^S{XBb@ zUVFaIMQ={mX&laBt+3;2Dc^Xaub^z$7dmxgV=%Ha4SgtM)1mY$x~&)w;3&*4t!Wu+ z%JbDbk!_9p%91vNY% zq-lGiFIX8v##>}aUY-u(s3RDEtMr_@Mal$vxz#$j!+`@c#&;QdXz0MLO}f5Yc~2+b z5Fhd*-z1ycAREeDiT10-C}SteQm>x8py)NhS2oF?U)#1~ zA7w8SkEgKfx`OsFBeUwT+FXZI53}c~Y7JI0b`%N{5#D;IF8{-i*e%xw5QWs=8lKusK zeZ^+^p1cJ!6mRKw?nkToF_wSeOL%%YWftYOBJwK9nB;mvA1=BGzQ~yHnv+uGy_B6k zo}H6o^J-P>rPjlSWZWv_>Gq_+}d0-(a3d1Uo&37%=C3huFZe*D^0(UYsL&h zudZvFUR^gdy}D-HpxpJZB=5%MrTA-Q(3RM{UR~h1#I2&Ee-PhVP1ja*^gBZLBMTjM z*L2iP&Y$Aj2)ZtG%&{7}#R3^+3Ub}Z%5_0sBPjCJ*S1`n=bFAG*G)~YuQk2C*7W*% zA?2X%V#frTQ7<|7SjvCFI7nPfGkKwF*-|fLq3Gztr2aKU=ji$u6kS`_Kcndqy8fE( zm*~(<)>+{OljDp-(ibVF@<`iZjArHLQ*cM<7urT8&1}xTPSDd%mHd+aLB_;)OZupx z&#dTavx;?ku~=cpgVOB?X%r45RGAo$R77vS%UA&V6!Z1ac4dD|dAFoZL7Q385|qOR zAs+3cDtltxX8uyVZRk~9+|BwyI2odXQAeZ ze^T!6m8tB7F8+@b?|#Vo7+=up{v3qCcTXl9OICLAlk~2jxt?BQ7W87OT-EuQW#i~Z zAB^QF?1G(?b?aun`f>qW)$VN;fU~9grJ-eB$EgzUUMPPwW<6VsnI+u*)4fT@whm31 z!>PB4fy#&^_b@5Bzb>6kbPyFR+0V8CC*BvH@|_^VQNt7z?3?Z#A@Ew zyJsDU`OkmVd+rh}ku2{CqnDZNk?ktnM70Z=%0&0%#N=x(6SLEdVdFI(F zb|eIv%>=GUC-*h(Sr_0hCQoSTK;LKC;wx0X?CXDwGOhAu69DGuguYS9sQG7>uE;0P zRC#7-6(hL1$b07T<(|zAe$ew;-;uxEfA7!^l#faOW`(cGLP`FPt?!iAVnvkl-1aiR zXhNCiFRF6(q_A>}}l6H|^o^I<)t)|2vea4F08Zy^=t+mOahi1vX&U4lf)x~Kcr&=P2x9IeJ#1CT&mv4N%)b!tfutPCy|@vL;96oYda;M zkN^Q4ANf*`hO`IzjgxVOU4rP5S0`Kb1m z6aJ)J?5HY!B$7_8bJ7io-atqBxhGzRb`-tU@>RiBzW-|-^89+f0bz}wa!cZuzT8RA^3rPv9Cu%&M8mGIw^8wHehbuFt(C-QQ3bYQuZ0I zNc?QRChfrS{x}I#*vFU|T;x}_@(GaxJsZ(jJt9=m`}+!b$NF=f^)jn4vG`i=ScQo7 zqnCM9owoQY;F|r^W80Z>!iwSwgFBIxkTV9?;1; zkqmw(HzFB)SgY1$!*W>%W1N5Z{h^+GCVD!~k-*XRXuyfE_|wJjXRSMSZ+`8V_vg9m zig`aMXT0~gUKeg-1-D(#8@j7xnKQRq#rsI3(!6Taqjs~H&(@0^GhZAOYOXzzby zUR(8-U+rF*4G#vn+V}Fa;rUv*8x-61;q2iDeiQ0hDS}_H+I#butqXs0^MC*Q&x6Ow zbkuVH_~K4yU3c-^{qpDk9t~P<_vP(zGIq}nqDQf4cXV*ky6gV!j{ z-wsF4VV{JD9dewu9QS{)_3$)v+_Ujy-2E?m^gNlmt+D&HJ(-dos^MgM)|pM==Vs-` zW2OHb{_0L0don(2J-XfLn=q9^e2E-IPpHh74QguN9}V9+L;Lw`+U>Z1M4w;&Ui<$~ zlP|UKs`vczbwVbO!^!-tZ$D7Sug|TA2dP=)>+{61U6p;?nGAntZujflesKGeY9r^q zI~smF8h7U+ySE41c^?GJ#?O1oi0+EPg~C> zm&19tJ+R%wkv&%8q~_k6(d2c@7joK7#2;6Fjy?_Hc2w(l;NKV{-9PRa;}HDoW@hbxn>w(mOPK zK1Fsbossq*ixCXB7QpR){kY(g)~ngOg6~M@Rag_~68H!_iGIL%i=f++;e)dn@n`R< ze8{Uca;BrD5~%XDm$$0H+gmcRuByH|bhb)0OU^xXw!eQNlE-Ol;LAfEY)N0>u{-lO zq163ddhg-KJU*6uPG{=Nn)_|4+L_wVD??KWAN}d}({QiK8>#i9AP#{Qw#nYt z=FX$-`Z5rT-RwV5dVBn#m9MAUu!UbkCNK`&w-x*kThC7S%)9uyflY3!J?wr}{~q#q zGIP%!?U#S!&n{vrQ7Z zz3kVn>JFtB(fmoiT_z@fAN%&?NhMlbH-)f)%@ijW2T$gJ0#%&9rfn~{(CY;h)!I(3wab{a zB0Ee*{`u8s<2+aG_b{07ZP^*uD%CCP*yHw_5E%|@c^R2w(Wp{sO&d;g)?Pxlg zxIS?n#Zu;%?vDI&Jv>T!zmx8&6A-8rq&)17o!PWIJ(_l%t_IKOk@|Nv*tTE!ig-_8 zhKdSq!*Gu6;v8G&d=v}AJHCr|+{Zf+hIe8Y?}U$cC}}5m@lN`9)5`nvKH0_lWQ~7! zC~2p5aZass4kzvOF5YP$?{p}UobFvBLiY7vpKDygB*NroA zbeh_;{IOl8ZwS`7AEo2oFH9Z*AYcf>!;4NB00D-;XG+l%^Gk)vBmfHxfiIJ#h(gII z02B;?FOy^#-T|Os2>f97DJccugCTzik32eI06G|g&mfOZ8UPN4;8VzBOeX*vJc>>1 z`Gn+yAi|^AfS_>M#CIWuAy`L>R5TGwC;4|zE6^+gkiro7&`9{JT*BlLfEk7$JR0eQ z0YG60{Agq(6gv%*NdQn70{@6=DWou&1R#bX@DHuLT=wjGG>-tpFa*9#KBs?C0pMW> zdqorg3k<% zbkYELFa)3Z7Sl-sP{9!R%r$?yCmI;@2tWgm!pA;&8cym#IN(wEa3H#;J|=hBzadyR zqp;H&nneH%7y=*0gdFy276EWz2z*epHg&Uq9s#Ie2z-#^{q}@?%_0CF3_*D0(Fp^v z!4UY7XZZ`0NdQI|g5UZ^(>wye!Vr9BQb;Ea01HFl<9)j47nHH30I+}XD12ZMk0i`5 zR5rVfdflpHN2!x=*_`Zx4nyFBjxekP5W^7o5F-rh0H81gKA;H0IshLGfe#RH z&rz>t5da*9;CFw(NwWw54nyDrjz}`X+|X%kPovf>0)WF1`0_Z3F&lk)vZqOC4gt7f z2z=NSix|rblScr27y@4&sdOq5m;b`#5da^C;CCQOvj~6>L-0EgrC9_3i6ICNQO2$U zV8o;FgH$9Ei{nWV-LtPSOm-W%A4U@K)3oHWx64Lkr#pX@q3{7oA{t3YBJq8fgA9)# z*D6D?0aqAcB(bjz8XiGTR))d{Sn*Uc8jB?Nb^p;Ug1oH^g)bA9n_}t6Npw#$G(3X* zt_+0_v*K|~F@imY$|cAH%TV}-ZL#P{B$C>9!O`#t^2Rb08^8s?1M}fjEXXg*P;5X~ zJQ_*vJp_L>Jc1mw48;a`MN_ep=)U0PnOu;gmPhglH%F6n51aR6OZ4>ggtgh!zQc#; zZdc9szM2A*rzhBaWM5yu-EN!j{Wb-`qRAAW1^YJ1ZU@fy9-JUpG@guLFyV3u0+;u` zoJa9kG?|DeBI$k2ez#NSd#}!;IN%jOiO2Wt(76l zCia*}aOC1WRX0lO&Jx};iJwu;p#t;QpH3bh!9k+p%(GQFwnjbL<36#X?#H3>YhgIZ z--dn@fGqpsWOZ>abCi2n~F z&-^h&hL_%pU_GW@KW=x~f1$hDqhNovIRUvyD62hu7st6*#{Jgtk+z!5RnU@&7yGyl z8x4?*9L2WrQu_RHftFlMINMfkZGsHZx=)lUmOP4X^3m3;ImMFy3{k^PPM@orzf3QO zT2AZ+@wVI%;=s2~*?IVYMo;S>a4D0g>oXoBDby-fQ$@`!Z%vi~2uI}VMqYou0sMM= z?4#OSLqc6mMBq_`0kVk-!sefljqInTk7jC=n=CxP3aODAtMFl5vyiHEJWD1<;kaN-T)V^GoggMu9VrF8jEy}itq zetMaQHtwyNC?$Ny@qN}GbnAqSE2bO0g;o>!0yr?c-)yl>FYPz8Vmz4sx}9uRIYI2xjalMmS#$k%DtwgFluCk+25dl+be7&Fp9 zAuzGo&}w+=DY5z(TQiM=r7r*LSRux@e;geA>$u--J*X>5EEilp*uM_!hkt*OXx}g2 a{&mc~Ce}10k{bNu{{RRk{^m{dSwjGGk`+|| delta 854699 zcmV)1K+V6J=ya&)bby2bgaU*Egam{Iga(8Mgb0KQgbIWUgbcI|dfa~xVzBz5TzXkn z%mqlkWjaQQpMnvXXQ6?0~Kc7B1`aX=uFeYfQDlsnF1?qy*A>v#HiRQNpQ zDU;klDUR@fMFYK{LsS*AT_?Hld`mul-dcLYs{|IX(|o3OP0qiHR$X z{uxC{VoN?g`4lNK`FJCI!rh2EUmcMKaaXG`_1wS@e6*YL-x>s~=|xpcv9%qbu>9uH zXGq+e!nJ7z=;HJF@-I{9ZQrGr%QX4@m;MdO!iqazBa^CC>r{We80SVT>BaOHB;!1- ze)q+1@w*0Z;vLR^s{S?wMp?zJ@%gd}I1Ql6cvC(4Y6?7vZ4wC;$17eek?9}b_;m`( zoi&H|VDT;2wM#fv!L1GfK(A-8Qgyra=0{1r(ds9yUZ>vfbt+lWGm!n)qkm{L8n{KR zkS}4q)diZsWjKF-4iurhRC)vaL7e)oh+W>x8Pnsu`#?gXl~P>*Zz1YS!R zps1ar4v(4nV3W?L#G6Mw?-Aeb^X>I`GTm+9zBQ`1^=E&pd-x*VDRl?^%m!46-ZdkV zVS;?b97Qh|PGbFku?;3YLG6ru(b)of^>wq66sZba912*{V6k zfr6`=M9?FE{(gLRwjRMPVVxeoWNhbE#h%XzEZ1t^5N@?*U2OtK<#d9)S^EpzUp1>& zt?E^~dey03b*ooBWN)Y4LDR3hW$b)??|uw<#q4^lNBVi>{{0yu8rN{@k=Lr15P3=0 z@G5@;Gk$#r3k-Z@D5J3y3NAl)`_dYGTb!xfk~4brxOJB|v}f_Z8-|Z5i#R12A5qvC>%#X)_`E~PYM{V(d{eS#kV1Grog=F1&1IDj3Q}>j$QO+m zo9grek=Mw21Y;HgmN*-?`|deZ*jak2-^h9_H_6T2v9(2ZH4$Akw){0h=q0TLM1FrF z|Hvz`<=_S64ingfr_4v*1`k6d?&K zn@{2GasKu0_gko|^m{Z#bbyYhKzK6kh*y;g@Ee(r5TQD#0s4WDgR01k8ry^S8XE#e zEksPl#!*F=j9WHEO9muh-zWsmxQ^~s=!-K@d6Y1G+hcX_IUMe$GjM;mtm1gmH&(=ISLOx<)Or+Dr*NEcwS11Lzc^5jWqCCeMxD8Yg|}86RHN#ita&l zpp-Zn#Gt?}g+>rBPZ~(CWfy=ufhsT@ge9IMWM4n;hMw!dGbz(zz@yPiT7zzVFkls4 z3M-?MD%U|a0Ht+p|k^+vCa;)8mp*~^Qo z*q4Cg4HgH^Xv>O|53iS<;|(1GPV|?&;|&}oPFW>(BgF>vInBW^8FcEM*3c{FwA!KN z)r3P(r&}L3j8@qLN|*iP4Nd_!w@l;>cL4$%6_>^34J83Ux9jB%<^czUWP5duFw~dW z=MA(0$hVE?4H^Og;FqZC4e$aS{Jf_Lm9n4Y~p0m&EQ3Y61bUw=nMw2?7Dj zw>t3+w0d zeq%mpe(Wh}X6|P0X6_c5QSZH8qADVdfZmG>+(oyes{iH7?=QKU-V*%Rd&)jgjI00l z^8McNBt1EjUOASYC%L=L$r@Dj4`A+hg!^L#pQ7xZEFO<4eJ>4vdHO3395`_9;>nxK zVOiea=zVk2ThhlN{0=27v~MWi`WwD6<2{EA{^VhAjr8vXvV6@W_n3&M4{N`C*;k;3 zXEA^6o!-Cf!0)?x*y!$*kB5Em?H`l$HNx?wW{lg;q zdH|0Lzh?xG2ETI%PnW(v_WE4g=sS|n)-=06?AaLcD(e1K{k-b+!`{9P{QAseo_hW+ zrNEQ%7_RW_#3`U5<^KZp_fS%A+Efo!j=)Q{ui|6$QU|9Ftk z+rXDk_^>~Ucln)h?Jq0q4+>^^>F<}X9skFMsJ!#Ym(Yq2Zv4l@ocx`m_?_`4A9eme zWdO;SmwWiYUVgp7`F|n#UpA8DkNAH`e_%+-A2OW(+ha@quX&5#A7t{=alLcx-Ds1q z^@aYxs#X730j2K`fZtp5pZiJkd(_8&uC;&9 zy#J=={u89*?K6LQj^6n;EAa%<*4I!Mki#c1D}1>?Judw9zhWSmej$3k1Y&>9>#Okd z%QNrp&);Q~ybHh&G4_qz_|#jMy@jXVabIs5K9r1iTfR)qdo7>E>yMu5{^+UCZ9jal z`|g9kdR*_g6R3C$n2WEKkncOzNuD!rehzxUVSk}%nz<+6f59>m5A@>w_gDM#HU0|5 zc#;BN`9|NmI=^H_eUwFgd+C2wf}XE7VhYXQdeC3B_yWs;#Agq!=u55Dw?YGcFOEr} z_hOCMo#Q|D1&WdnrSNV}{GxY#DznK~q$hOnAigW9_?N~HrS+HRd_`b>xT{#tN%THa zvGU)rACX`{@`NIK%K7@Jk@yG`dy4>iF5dGUz~?!N?MW|vP4vo}_G*91o<{Q4j}57; zzGS$Czh9R^efY1xK}fw6Ret-p@3Jet#QMFCz_b4bp@>_wuO@UVptWM))^_`tAX* zGc&!ZTZ=yLZS-wOZ@-amr|R8hO5RuU?O~SxhWcscJql30m+taI zvEz-SlKxgK{YmA|cjNybWBB^jzeT>i5PXT<{V)Ig>tCLHnE&OUAzk+OV}BiQ@leEn zgY+zq`XOJH-WM3*I|F`SU;ZXz^ACpfrW*V6M8Dci@1=G5d~AQ>+ev&jivIEvcl*O( zecj~WjOytW{UgWqJghJAz>m}&_mBK+E`8cJuW`p;&Yg$l;!7H-w};Ww)mQ8MX|%tB z3VIKuxUcmGFTTrjAoqNW+I$?;Fa_UbQoV~5FMRUtJm(#7kyk|T?J@lR-0tgk{D|(c z7rWo+pV84zbnJi6=-4N^*L(ZL7zsC;FZ}B{JNidl3@E?6;>VPamu$YiPUR(?SGn3(MASU)t< zK1P~-9qEUJ_i@7F?;tkd@5#RRkJKRju2kcXV5%=-9N2$KzCQ9zDXCZ2zl&J;4vKMq z5qMB^|6h3aKRuT!bGlyu7W50$>;CoG+WL{H6*88;98k~EHE)7zzPAY9Wzk0o@xM1J z?3G39-6Sr*OE-O<@_(r2>UZ<^%}o7WBl_Gh?#1i=e4nMa)(Eg7I6n9n{~%EGqcf;? z`Ayv6I|zTQC(0id#{WL%>>Fv|G3QSL@jnwFSiE`Ie+=~KeXaWGW$xjqU;ln{>x1{a ztKeVTd{ZX$t(o_C;K$xUi~Y{p@Sjh@^}K$b{`+ExzZ-AfzR|0%@a7vmdjj73_HTQ> z$JYEI-F}Tjx;qA*LU!Ihsx86m`~B>GFoiD@^s|41{f*`7QysGZ`J)B%xfJ`Kd&*sG z{OB?7PRs8z=zaTL|7ZJ1&oO^b5m#?>jX$nezdnhwuhpGbw?lCMJZ-LL=i&F|YT)XV z_2%o&{64Pf@%$J317y5^^!2FK@eC8WFX+3G=F!QU0HGK6uqf#un<7s zU#@>W4dG7xO+ed|o#UGdMo$OFHyPEQ4B2hn15bLk#QX&N`mH7oPXiJ6`^f$`vTY2a z?GH%qGHncE;txpf5@P%t@goxAK|=nB+>%$zAe| zf8%~c!aaSLAC4D$8ZiDtBK(O6KoCC~(9?f3{Pn-yZ~pb4I53QY!4X#7U;hk-?AHzN zw}78NZoX;ru%`aTz0>4z2im~BM~g>{<(nA7_%|^52CybyTV_G#zk?E<$Y0?To|-&d z5RX0Zx)9iNZSLQ4|M~JKnDW2>Wq|efJ^Ab9H^AEZnBu;)23T4jQ{3lZfR*(z#eIKa z4Y062rg$1D`vZ#mvVwQP{)%AuGuewB{Bh$)BYdnSc!?jr#q(>tbpGyp+_xgU$dCG= zpZa;V+TgvO7tX6z_v^51;Ld~mu?}8OHkhxUR?U}-*8vvI-QfQleuFjh#q|HeARcZ6 z;5=~mu2McIi62tJ-7l2zPG1Q>e6N2e+Q0r+4h6G4ywK}p3UK9qBA3HAT;O5I0|oG5 z$otN&2WC-Nx_^86pF5rkw#Ju|PIq@x*!*|MHx1AF7?pa@NBz|3Ud0tw^E;(opVZ5L zB7Xf+?Fm`}AAPWudvEWTZF|1|<*mNnhf7~?g7$iM?F8=-z1#vWbeDIZ>)?OiJdxf7nQ0?tUeI9@P8d`d6|ii0Zp0 zwP!FY@Oe0I?%e*P)8Cb?P2_e;K?U;_fUZ_3V5KeF{2-5yZB1{ zI3_-hL-|SwJmLqP=KiZb;fVMF>EnoazK?xPw(usL;q}!o0`S+sg_q`c8{^BL4-e{R zx_P4fQWN+~XLr-u%b#~w?q~XX5Psj)T}FHYbiH}K@3ppP_3+sj{A_E z+dOZZze17z9~3S74@9Z_9i#dEXp?uJLHzx1{GAEuAAjQ?j^-zC{Jll%FD|Gw&-&RX zQ4n9-Jl4^tU}Im~Kz^i8w7F+ddbD{p+V7h^R}BEN$7cAzF!CaR`eg@6Z@y10)Hm5b z{il1-%)dN-BkeVs-SU6z*Z)k2@aHd&o%e5l!pT4V@+9p({e4Y>K`3wk_NPYzEfW6! zPrpE`qW!l&#a&nZ3xW*8@Rx!3OW8!QN>TLQ@1OoNAwjp##$XRxLo09mw?AR{pMGin z?N7ur!BdFa-CW!?2|!_jt&<1;_9t(I=ic+;!b33pU+I5^|J;9Drom`J&Rn>P{ww+y zoTcHv6odUM_NQO`9=gT;f>JF!;2}(i@PXhR?l_nba8qrFkxl|$&^F9h)``YTAN zzrtVtdjIwK8~(-qgglK7c^dzB?{Xi?|2Qx6iyMIB0-geZgP*+_j{cANX88MWc3+C{ z*MEAGMeBcWga3cM4gMG1248u^J##qdn139b02QZ9;bS~JO8?k)iGKMukO#i;L)H0y zKYg^Td~7JxP`KiF~rcNt zFhkJz9-0PEY|8xpsu&Lc4>wDA6#eDrW3pjtKAh?2Is1QLc|DK#?vrqu#(A~aKuM<1 zeALG2^BtJoWH!W4;tA3wcR3CObzUua4(nqg% z$kz+$k1$_A$o`b@ZVIpNIII`K9|N(fJGkk~IDd?8s_san7up{}kshYEXS?C6;vz35 z!^fqkK;l<-9~Yi)J{c2V-F!A7J{*>LS(JZ!{1tyXd6JFKitU$sF9W!n!oT%!zTAMC z8?SPMeK8GX5FQV2@vPgw5dZc#4WEhccHzg3EPB893?LVu?L3R#@4L<4@4E$`>}UIK z^Y{C{weR-Yb_|%sqy^o(ypx4)z z9>CETkJUdkgr_)-ccPSkef0G5^JS<2l6;e7?nim(UxMeJVy?bA@3Ox6^|`9q{pXhq z-LEd*8ASuZEUzxz&-O=b)T@7Maew#!-xbtX7n9L{jX`-ph}!qzwfcM z&(rYh!vp;imIoLG@Ui6WJ%!8Hi%`*|f@j<-W3;bg`0f???#m?v|MdeP@TLEI@Z|IA z0gce{Cif%ub+Qk7~ihpuV~tj z1FFBGT>qiC>`%Zp-)y9R=ovpiwtlxYe|MDn{{{2(KlV4iJ7VwO>CRy9vicK{{JDb4 zqwg<3)KAsnzU$ieR-jS6?vQ?lrhn=3^F7|x><4zXg_ymsAl?_??o(FlFF4)j!G7E1 zDP8{))#L9w>AmHv3YUM+8LnOzs=U{2c~hM73F7;vc;vs_^9w$`^pmvxYD(Ubxxd}b zukO84P9E$2sSL!2{XO6RpvP~ENIdlk$uJ>q>i+fK@ALWo2Yo)@ebwg+xB7|+`F%-# z9y{@AKf;c^`_QZ39>4zjFRvu}N8XR;q;U_LKR-431!7rW3HyJ~4WEUZ9O^kdjDBxr zsC%~5yDao6VGty!yWn_l^V?5F=Kp61P+van*Z6*ty>Fzy{U!JR`UDRD7Ak(f@eW-4 ze)(N|_zV4qIKMl=%lv)S#~`WF_k*L!r@>{>U;pJfjRA-P#eaFpY0yG_#qZHzA5nK@ z6v0M+JKP@=|9^kqJn0!|`5S-`;p}?a?hl@jWcOEiW)JUXrg+eP%|!4uS;0ruuU_)3 z{*Li|xpyA`boqi0eHK|h$l>+-9`VF4-#+KhcQ*JsUF1L9;h&$xzo*QOceM5oJABC( z@b^sCQ{V5b@B8I<*Z1Sb`!MwTtw-9B-^hDd>zDi?@0x#o@67O}!Dk`EclsxE@6)0B zjP(6Fuy4`6@b3@NzVPpJL%@l$AL2w`#`-fLChXy_LfAk3(~G}49;-NjC*H>inI52; z_Ambg(+)fRIm#MX$8#XbJ(C1Pfw_Y`TTEAd>`=`674BSBeFU>E} zFD^9y^WT5|511@C|L=0l-~Rd6=NzRV(cLp;{Q57C0w#|PLf^v_pX-}`Yt_8_lpt>Q z>BhGOA6{SmQj6|8dN9jZvGbHd^Sgup+s;Cq6T(=Y$;h%5~q~dOnN7F#P`B`wxG->$Gnl^;NBXd;LYTeS7tba{Ko3 zSN-`J?AxWn)IHR^_Lj?_03yc>_DP%hDcv3M@+E&w zAx6<0{?jk<1tjy*jz!_Kvp;QD0H)@rdzYao6#Mx~eHjeotZu)o{7-&z03zL;^*Hl7bGZ$MnClh->kmhIRid?+($yL=Vl&?`*`s4!vz?~ z>t;Bx0h0V_GgyI;OD=gq!8`<(qyw0d_nEy_!rQ&pr5La{{Ggc+vc}V$PsD%Yd`YbG zqqv}V9(I4oAIhwsI{aO{`l-nqR(;oiUh`?p!(7b$N#nO{8oKxy{WmO{37AxZ+FqByMN1wKQ|RW;=^w~%tvha?ZR*z?}yOCOv*Z~5=%KKr-q*AEzsc~_r$bAo)seHq##xp#kg;pPM{b-S6?)1|RXx zcR$tV)Yxx#(WkrrtP=aW@sD`tn-B95?|l0)KB>XJ9l=Mu^X;+r8U6Kx1L(8r>zmW) z)7tBsL*&!S>+4p(k9~f#lRl}sKDYYgHP;{W>ihHlM`G2V*KU75u~6g<@_IV=#RDA|G7Ez$4bf1IrQf~_5bG3KT@;)AP)VxsrZlN(4SjKzn?>Ypxu6o zJ%8@Ae_p%&5qthvyZsMk&w%s1vz%9a2B0JVuOz?wx}C4d#{ps-7M@N&o5}2r(ZAikN$$a>8UG$;?0Ae3IBh>-M`9LZ~RO7q<{ZEUIILR z`3?i|w|0aCo40@YPyhU%pz8Xg!VDyFbh0*ppAUEooK}!E;0d|)ZBMUO$HyIg+vqte z@Usv80!@0qpJ$2icRu|I-1DW?{|5-}vEYzA9lw8q*FHa*SCF

a+Yk!}tzg_m2CztGp?@bhh6j?B-u_ySJ+e3P%h#ox2)?)baU(?6fvzvCo( z21EXi7I&cJ=gD6I9{;XJzjvR02fqBH&aQ8fn?HXV(sLRQ53(b_O6Z|FW^nrN{Kj~$ zQ1Bc}bI;lEChY#s9vPV)37PIUkpXrU4!U_8(B!W^%E_CE7x?=>GT!n$`S^2o zzW>Hdo|hiKKl(?=c|Oke%7)LqxgaTGxx2W^TJ(7d=acyAva8uLnXbsP_Ux=T(WUk7SIXES=QbqAAf zPPW{Y1Z!AKKw2q*FgSBrw=Hu~;zr!3wlhisAI}X*v6Z8gD#4H`N}`HUi~AXV*nGt3staI2*HUJ*_VU3E3vr{3gG zL$ndCE|H`s*k$AD3w3{Gl$+H>SY5AKb8D8a# z>~NbSEI)%!;ETD|c&o`_Z)}6CD>+JTxw&c<(TS1~Hs<{`BVHHmj+!CK@v48!MS^0O z6`DI`?AqvYq4Q!nvh{x8gMnQY(=44^&5XT;*n&CIPGRO|KgTW`xm2jkeC$BeY%Y|VPM&^rv*8n>&4gf#1$O;Ts59pkgE;np-7 zMUqDbDRS*Jf;sNX)eSpiRm^_~Y>c0Tn81fkhLqF=-nn}u zW}MBzTV2jq+(j>`6fLd|+DU#)FF3^~kRmiOgkht_3elGol4bnP%;IekPEx@vvegP_ zSEX@v$DCbIavbb0wwmn2=+d9Xln>{Aimm#ZPz)C}F>{dCPV2gih8=%9KbpHaw!;>a zbFb5H_c${MqsYs)$P2Zyos0Q7R#pp3Q!nQ)qPfl+Vjsm@>A3N0 zgrr?=;U90b)Saj8s_-X#ElXwIXsKqi zo$69jxVSl$EhwFv+I*#9=*p;hVq2<_dXke~-Uqf^Ub<+GtQ!hpl!!&EQ_UM2>$(oy zaHVOJeNtIE)g>d~S0dTB{BGWG61l!$rp_QIm)Yv_X5p^I8KZw00!EBFjbWj$YUdt9 z+=!#IYsO1Py2w;cvUb(xa=4}Kyq=vpoZ{*8cs7_kI8N%!6faXIW*OrHg}qTyYgcxwB)8oa^?`cf_*cYK$5&?Re0V0=XR>mqK8pNe0NS4M&eak!2eF(x;} znxconl7!^ia5(2(cYPjR0|)-8SCt~Gf;mZuKg|o+_ti!xdm%RGPCb`ujb;WB-|E&$ zzT!8W)UATgMPbe`!v>$ut-nJiE?-+)LGSs^X|Dw*e!ZLNS7Y6<3#SSz|HduH4aq1K z-ETH>YA1j3aw>XJQxu7I_q>s=E!kS_t<;`$Q3w$JrkC?fIL{~_B}-OG?QWFDO!pUC z%OOqHnciE+S-u|Z zG(hBN(`kvlI-Lm=*?Tt>Ay#&2srDur45fcsy2hG0$QP&JHNtHrHM|tGf8DM67O9tV zbYs>8YPn<`8R;_Nwe7L5CCyluevGpcJ~NBH@~oBEy9b9hrcK%ktumCyD@X6xlGf!R zanJHu!t|Z$bQ{d*RbA0Tr!wYBl982wSA08qN7tkmd*X7%qyFaHN<=_OPaCLSLnnWx z;HD!hAzPha1>yWipA~hq&Wp=jX$`F33~(k=g9%&^cCUjP$4o(1$&N@?&ayvGS=8#N zs#w*0k#FS%vd8C}ywT4AhEhtoh~|B@M-#DM%*x$$wi8ccw_n?ca8k`O?RHk9nD${Q ziCnOt(Y3lfEDo|hN7`}MiUk|>qH%v*;iJ5d_mMc4m40<(FN(oNmm9SRi*Xs7iWF6u zY#qlaU!8S3-w4go-b>N3uU1E*7}O{$M2gtRF0-4{iD+(?6Do{;Hq%B&*aoEDteBy% z+PvTNhNCN~yd>>SbXy3qI$y9GZMY#9UTmpa+S}`ESayP4$(`UEMoVY0XX}3(tEsP| z?dg5D>y|o?M;B_8)6UJbz4X_a;WPG`GN?m!%v0wkf;_uf zA&M}a+KqspCCR?1XFN{l|96$B`KxQaVu>QbwzUVot`i80WqsnUm%f0 z&JJ~0mJ7*{E?IhnbE=rONppW)Mg(O<@p7FtE+P8%ilt|w_T-cXRR^rTi0Jb%I~8nTCNYjyg!^*e@>o8P(`* zpgNb6iPgKL&DNC-l8Jxb+x*hqW}UUWXjyQ&&Tj)S6PLg3x801JBH^Mi^Fwvz^G#w% zL(@sQ>uZeI+!lF_WpOgl3#Bne1eP|92@YQwvZ+wXfio_*m7vR)v84%jOC$tY?eW#H zb+4BW#P8Fzr(FD|vx%t8^J25$@o;Bw^R;p0R)#UH%nKFluRDKlrtLg+rf^Mf^bNWY zJn_6EZpUJ)X4vU-A;l71X^w+X z^CM5SZQ7e1AEk+YHP0flM)xXK^YL+SQ%cR>L{(4c=St4C8-mOt~VY~U*?@lX7s zobMHbW+R(XW%PgO;cP76#AO*6k%LI!)!_+jkkmeB(DI1MF5h0*jZ8B8c7a+9t&DSZ ztE{(58?%?Vc68&iwidJaxR(1vqpp-~s}KRTYI$xDm;A<4%ZM7{G4Ta|Pp#Z^+3$5q zD1=kCNSA>`SYCz9)zHN`<%pBjDLf&{mZCNknjB)yuQz`I0Ydk5D;H~aGqW~XoR4dB zjiw##u9X?ONjZGFN`5Uzd#~Rl%}Q4G0(P6zaev+Fi8+c2V`_tqM$B#F?QM0Amy&?{ zc8RAech@GQVYQ{PS4ldH*fy1hvoEphpgt~(5+4f%AKSBVJ@hgOuU3c1w!7;s2&Q$MKW^1A&lU!Jl;+V~LlYy|gyQIl7Olew9Gvn?Fs>v(G&MHlVSm!i1u&^^DKHU6lVD2007oQpcYM-L|-+jqb` zPR@UH;I^ubw$WZ=4zaMe2hL}^s^03OG1Vet458?}mKGO+vE!UM@EeC9 zLQW$bYIdF~y6)=2S^F1>J7xGv?iOq+GwPNyuJnq)xQJjvjn%WO7y##1kH-QP(Mlf{ z);^I(7fdWBee^CRF>#|5+nXvcl(suqU_Q|6WzG)K5QHOny@^MiKf?U9^M2h_U3Y)= z;)+yiyVU6$7mraCqo~E3tu`N!J*LR)q1agV{$MnhL+BlKHO?uk9c{jec083cgCWVr zkvin?Z+Tn_^bU*$XCh|4}Ht7(cU_T1soHxm*WbB z<0>D)p3}szxBCMVBiTrE=k*N9_8NaG7U#<$O*n%wD=gYd3Z|nvEZW=FTJpAy$LkuI znpGVgBg@_e_EM7N+KY|}G3`%2A6^j^5$Ifx*LH8+_Njl);#OF$ z^*OOFIx||O=fM?{M63!aY_4mbnfLa_OqMQRpXaRIhy0$EIefFz2Yw-5DBTJcX3EO~ zFD7y++~kGRA@UIuqkbcSfrHovc0GXdPM26o&4pT9F^snciy={>TX!xZ zAY;*raR!yx+HE}NaKmI!xcPV(U9Ue(q#4c)!qCY?(U%)ML3Zt^RNlZ4G0vPk%7vEHwJUcFRRjMxQOXjL4QlIxN^%&GDg z={y!i!Me^aX||7SCmMgvORdg#*NTl#H>(jA#?>2TI2^r;xV&xREV5)|x7jL*Hls$^ zIy$q8+I(0hv+<-)@@%FhvJ>rGbs0qaRy@x9jGQs7wTowx9>nupxl-~qXr;t9XCbOA zoMNtD){LOSZ*vgW%1KTG1`x80w|~r}wTbwa5YWIa8$B;eFcb5&ERQmGfipbSzQJZDZ zVF*rNcH*oY;6}Oa_KXb zHQf+9C&@NLohehVUaQ7bS?+6&-lNu%Ipx-3O|pOM{Xv|~X2n+DNMNs*RlAabtfL#mg(7*8B6K=sC1L?5ljpV5EQqj~@8X_r z(pspyAT=)F#nQRM)$Foa><+BQE@mq&W2cO&kJhM5avq*V`Z)I!>I{Mij^?r;5v9)7HeL$ZAE*RdVL9*u8XEsyP@3sWWm|kXF7Cg`*P>JHu_(J9B#)(LEXJv}Q+Y zHZ9y%Mfa!c$_P^PM$)lZw4!_jEBTQd*QA^*H>GeSZ-_oR4VfG^+Qp68Hdn~DmgzRw zti#xm4Wa|d-HQ2kf3{DT8<{#g9AS+uw-kTKMeJYk@kkQh@-h}j5gS{p&n@}rwXn4c zjz5k@JME6XI3RO!G8E~k4qN#uf@yMj$MmawxnLr|Tw29yM3P(bzDOVa_aTh?kHO14fa}QF_>s8mC{SWpmQh$d(#k4uyX? zbt=lhZKUEflkLUC?DysjWVZb{y-iSi3H1W;D9A) zyD%ktDRA|onwdJbTsYcn)Oz)pE9H3?<|*wOw;Q*eBL)#&P%5khUu?NZSD8&o9*~V) z3Fl3Ma^;o3@YZTwDEmd@##x5)W3-n`Uvv)a!UVC(SPHRhHe9^3z1@kt1T%kLA2|;v zmvhOctR;GGqtY7W6|$ld+sROqy{H@PYOT%|uqb(@j2OcN-bK4;Xc8Wj&L~Z9sLNGy z-(z&TAf<&mx$N9nXkO{Wrm9IXn$Gt{;BJOXE(v=?yk!}uZtLc()H7ScrR%^dgV~Hb z6rqlbv>?c46_eI}gU@_Mw-$d3uzxHwwq4|%KAbmLstiMmGveYPfSoXsQj6^c*Mxu4 zlBzNe&lC*gRW;IuMen&B*i@)(to2o@(54$2)4--zRBXdKvQRu^3?}mMddl&s^N`gE zYzg~wL)RB$p@zFd;2A3nEryKTUZP!aTxXy`s}W9R!+1iwlArC`$)hQFNm;9aADkX*kh88#=G>YC_w` zRM_O&)wlW^Hg`E6HO?d|M9B`EK!rig0)1FzZbVB~-rM_jQ)Zs1mAht^M_ACci#_Jv z*tv9?u|97vJHhoXXrk0Urn;g@v)Mx5Q|z$6WVW83xwRVRh1P#dnpN(5Y)iCI(+5r| zs-CNTt?uVi#7cGo7OSqfz{)5vBU$cS5TUxbCRgrWESP+8q)r&6woQBN)BQLDukL}k zx?wCcTgFUB{jK6eq!^A9SQ1M46`8ZL+A09$kB;CB=Vp3CmonVW$pXMTpoWDq4;K za@7~IHlOEcn?#`5siR2p3aNDcJjXDoBNiSA9F5Xyq_3es6<{rQ)a*R4Xs~qr^EuBz zUXupZtml_Vpf>XxsH@qA6U5>q=x|;nBj#48OW~duTM>6|8#g?o+CpD)Yjn1UgRMU? zjz~MvTws6t;U;NihSl@AaS@tzS@+)7QQ~lBayQH5hIla(t0jEDV`y&$UXm$l%y&@` z`wW*%!z9Pe)GqjlaKLBYw9$di!htnXYMlwIbI~&1YD8>@O0JmZp5jhrbwM+kt!idy zzB(vN+&rc0&3r2hnJ|mVTc3O8tYo`fk?s6cY3qMQsFF*MaIk_{Z#=C$*kHI^-Im1V zw8d^Sn4-E5#;M5X%~9C)D??P1#c(Og8F+|svN4!zIECag!M@iU3OP= z!O87*qwVaGtEnLy=89y?%2A!im9W`X7s-ky=`#2QY@Vhumn!KSQ?P|aFt@;ce@SAOSJ=r9;7STg^m!XiQbHd(i|m&!94fow3Zso&&L7I#sj}A zHNxcA_Miv${LC(e>(RU!2WvjJt3}8prg?w4Zo!+eteb$UFBG|sj6Vy6;4 zKAJ93+J{N4HpHU8faMaejFsl2MT&CIpp>=m6p&$@8ap5#+R6OFF-8A12bk`0V*p5ipq62fU|T`a}E27G$;_N{+$hs2vH84UZl;tx~d@|UF1 z`;;%Kl@zkl;4bZyakS$iTrR!CiK2h7V3ktMv;YhI@hUprW_|XekiTiw(4#Eaj@cln zZwA?mxa;mu;Lkla>A@U%S3>HusOTrE5a|P5v4M?m6;(+?e2LE2t+-CFv2be6hq=1S zQho~#!6ejA5%>^AT8XeN7uaB<3eFehd@e##xbCEjC?2Kb?Rw>ot>12PRXl%;8wJZX z1MMl@!02PS;EH8+^j34s^y~xPd0=xm<2xMBs1|E0h2Cga~Qmf&8pc!6U=4bft^UjwKcO8 z%srAera+D@D?w@WpS`i4K=FqUujsb~8aucIyYtg4NV+lRQQb;R$n)a3omSPko}+YK3RBd>NjWwy4!@5ti%g9DWomGt9 z6G+Z+K(_)%ynzEZpA~fjfum%qmk#LtCf{uTblsXGQgc$73vPPmj zEW67lkQafQ3tOH%JA^gW*@<)tDOaU1uGeQ}R`VH(9M%{WZFj`U7-hFhX6ByKac&+m zn$;Woq@b|4JI`G3siwzsZZ_bCYTLsESb~|m)dlWoz@%kNN|vWuCY#e*qb&dj>GJw| z6elG=+5RqO_K5G{dxDJ2(fOk}o+z66YDV#uhCNTppEK%;qik!ZPcBrepWVWMyHzqYG(! zI89@rHQB5J0FQf&csd-)9?%8{`_y{7{btiA^wPK`yN;z2J*!%8pR^Nn^}+|Ij=IV?1%Qbt@*P!Y7~bfKGT-RJ-&1N8r*`Ir(wcmMpU+5 z71_qatyV^`&E{e$Ii=5S$TXt;_qcivtjeGk0-+5*js~x0|%m9e1S4#v{!_ zii$i;gXO&B&gUAk9xiGFMGZ&Y-OTD}T`~%*vYxC07*D@TuJ&lVi|f_gt~<-F+vNdU zodDusuC&xr`%O@UZC!e+TkUS0h0Wh?i*@Z>w{*p!!!_c)vD=dQ z3RBnT0uEJwd^()5@~Y?QwczlAy$)6zhAy|kZuYdyJ9CGhZ2M%HsUc*ZZ@TQBlbzh% zoVuZrHSbEOg>6`hm2HnZsw8lF4AF%*&|^>V2S(c&CBYx()7mL5W5L;Ko@H4PD&06A zmh=L^jH?TEL0r`9kLu0TVL@Q)+bjV^e^Mug5SBN8&O2!!)=dcrw&Zk9#yuOdSm)_z zb6tVD4{8$O=p^4?=rIS4xAv!rL1jw7T3qgDEZ&UC*n!``N013^me=dG&+%k^2|@lm zD#NX7_+2_X9p=F5kM&KCZTpJe7LzPxTgWs`YTzxgugA`9{azg7z0EC>PAqt@yx4Q# z4P1tQi#1ZI!OT(%eK7PRx!m8nN+CAvp*$0KE84gvS0#N`r6vw(^|B}%S5=9+Ti`ua z26xwW?t;O|wVa*2t3ur#FUMQrV%(No_QwTJ>ZHSZ3M%#Zrna~A$vqOJvcHluD@xV# z6?5^mFs-gZ>V&)$8B!@Y-RO1F@oJG-d<)rsX6`Mp?KRKkLD*Kn`3~2MCTvLSGIK8t zdd2cmD-N7KOyN8f9b!~xAZ}cxa(iBza2{wx*_}jl8`f&3?%F9yp*qGi>1te z1fu(uin2Z!@pz4#t=}wyj{t-UNU5lIyXXGK+n`*qvIi_X6Ckaq)l`2P_ma z(a%7`xK&$&;Xtr}8MP(VStRmIjH@bty1JmUqHL;x=0W(ziUx@}>axCz4ow$cJ%0k^ z2w=dIg=>%Kx(7qQM(fl$l!(zUm6>#ivc1Wx5s|7K$gAZIS#hCA)*=y*m=m)nb6)2e z`1!6@=X+St;DV$ZGgNZ4l!t`|OGVMmXtz3?rC4^i9IdNzKHs)}1H)19P>Aw>{j%qX z;iMmgX+g?iYJ>8wDPAk%1RZ3AUivj89CGs4pIO zJ1n$H)1J0PD8jZm-3nA4!r-J#cjuKpp^^rRwaGs8BiiC;U+G6P*- zIcz(daXo?T0tY3J&OBo(kDG(pQe}F7AcT0(nt6g5 z!4(%T3G3AX%FQ=5b{KD9S80n0?~cvMB1LWygY^-Vk^ELLqEUYTO*< zsM%0%j`f&R#nP|m01GsK0wy{JOMqdUQ*93A$@hIHF~W37+luaZf=hYt+}Yh8R4vUm_+6WZnq4}9m8CDmE)vi=}pdZIm2qOE+453Qr*|9vMvtc zZU`2e13J5qmOC-I95yFpZJ=0wWRy%D@!eS>7VE35lZFC1*iH0*Ey5}Gc6pYB*@C#0 zbNUFroW8`;lf0js<~5g*0(F^vcwzP(Z^gar$;V?N=LZtN&ivZV@$<$`F5Y>%v;g9q z#oiqyT6*3X$LKxZQZAjU3nTUu*(9{ zP@I^0l}tuFQ`y6RIh@@hV!%P3&FJ<>#Dd-Ld4fbkQIL1waG^Ri-f`F6_88XKd2TnB zU~2mm10r&2OrAh@tV#{+50i#}a>drv#hyR|HX5WZpHiH~6jKBZLz(>|rfAXBaYn8n4f%r6Tciasgy{Xt`_K zSGEI7VDwbtp1%!;8BIi!j@Q8$1VXfkY};n5bv)D+!U&cz$ZR&{6+~%Nhu&vERka%( zWw>m+0~I9TZUQ@R$C(`|=M=r=!S?RQMMdV#LWOvBHm*cVSjrileqj zvX!z^kIzegXFFGj{ycAaW?F9bFJG?@xKfa7J;pXi zi?M0}ZAMfdM$Q$j0x$|2_d+|SUHj_FznwdPDC&Zr&algY-mP`II|Q3z2}~RG4a?yH zP;{wJm)2sD#%6ju*-<)RSE=aaahn`prm*d|r(VKb<)l(P)33l=zu>}de(;BQwaqf0 z$OA@y2es>P9MT8pT73<2I6R znvNQ?6p+iV3uUZxx)QcvyPvJ9mDO#xH}SMLwY9fv(lqlD_RP#~#9Y$OC67(TOfVe8 zKa5)jb4v2u_ori(kpgP#i<8u!x{4Z)lS`m~>Mqg?-UfZdrq2Gs(6UV<&P?3nRC>67 zf+;t;aDlu+IeZmx!gZQ&Ml-UG^5zVdUX^0H(d^H1MleM0$o$>{$aQ{Nsy4tD5W(~) z!(&fGt{nUfM7JrIkn0+imMMVjB#aC&?d;;kTIA5}I$avzgzvVT2)njE-~&Nx+=VIu z2iF_?4X{Y_#n8lqgi`8Xc{Fd-Q=;?I@Epz+&W>#mx*^f19lUOt*8o!J!>Z z+tsagWZk7askz&Fu3AS!R`J`Je$mTSlLHusOd(c;H|m9d+0F;km>ukBmk?{x?t@_u zR>zU#Ie@dDiv9jl9)imNzK=NH*5Hro>C~#MGi>TYanvm~h!KLszDsUGUbdvOf<$%)AW=$Xi7rG;9vYNQ&w z*o)=78CFbd0y>?Z9drd^PL_RPtatLnpSGH3o>j0q1TtXtZr}i}HQdpqW{Mh$*k!_e zQWPiGwFo<0Vw4ZJo8q{@$YX1NY4fl}53V=r!TxZ9^YP-C>7?%PfUqtpc{)m22|zfO zISHmN65$OG{Tm<0s3OD^O)oufyCB`OUTTz&akCLE*2XsT8WEu}@I2=@@pN@7^OAc=&IRLm>?;rqTcPs0wLmbzBjGY&&jnHmR%N>iOR)EG1 z1$J&sBu=WlO?L}xx-?UNn772NuGJLu72&dVGjgAc)!8|1%-j$0n-{jDc-l6;qtu;= z@zmHbgtaVyGe)~Uu2)7RmZ(Q>I=pl-HjcOb&Jf&`UEvfM(9nu&F^ky2AzzPuwXA{@ zV1@!_0@qH*aF_ZJV*5jWa!^II6gIWUaUSkyR*nD;E{{u@g&>1}*>FaP)LMh zhf(B+lLqM1x3sY16UCx@v3BfUbC)Kd4X#t|FHaSCTYCUKk=)jF&A2#9jmZIAA#YMd zx&aPo9Xk!bxK`Hnb_vuZG);^+o{K4+BDkS zu>9?*L0FFF+0$-uhA4`(02FYe3hS`tBhV|x(4HB$pxbZSDa9}9>7ehrh`l0~id$59 z*){+ITY7%0pw|nPuUXS1DA18fNpkX?xdUXuc0Tu(V}CVX~vCVF*@(CuaoV{eUN z^wvZj;y##bmM05sqNs}iBS#>WkxtiDdAZGq#GC4Qc+1xW4h0s}0zd0o+tYcwT6o2N zQX+8O9G3v7n3Z0{lqSHoh&TaL%+AVJxH@Qcc*5D;%shmP&~doZg;QrwY{O9>eU<~q z+*I0&+vR$H@ne6bNZ8zC0(4w7>KyejEvg-)DE(dl`BSjl8FaR~s0LW#tO%oQk10q* zaW~1C8+KVy1v@~GO}Yaf$0oN7L?=ny=m%4;SJFv3pSd<}5I|L(uO94R%6QhOJ z&W{bn5yA{h*EPFs(;fQk>;RLdvQ_b&2OgvWGiLp)T&hxH7C zSHa`gM2MT3GPA7|ZhH`gBw8H1=;qcqLWH5T2AFzH5=qI^<^DX2_fuBrBy&uYi%M@i zLglr8n@zWel7BmGc5G!81tdvBt zWuw}Q#%;q$*v?*4mzp4xlu;wAPj2gSFYCmA!CkI#ab%W`UYW>1*fuPwE(Pf* z=;NyB=u00%NV3?@Y!*BP>ytJ#5n)lgD2+SKt*+K#zU`Kda1}(soplHV#15v%WVI!T z=n*4)ZWYZ!wb>#A82ROaY-)4z+d>O}?buVrrq0Gx&#BU0>NNuO@ao!xWlAOd=?H

t|0j0%qq9zK3eIA zs@ZgdH!i(RUpnbQR~0cmiluT1585UM{MJP7cJrc=?c%20t_F1#x7KaGzusnl_VTQX zr$}^qE}3N~dRln7=N|kMUMK4-h~zu%oCWxWfXMZIXcPcv;cX(NxKuw-ql1756&v`R z-CSoa4n|LqpSxh~`V@wMJ32t-G&5fnr)-CzFO(2re_uv*9?BxB?-eFm$<5M zu=ecj5Jr9-Yt=*-Tes^q!x3@tf<%+HslUD%WIo zJf~X5K~eTI#H2TbOQy3q&pC2@T+kbEWo%b|BW*0XSufVAOjJ0rTP@CiH)lm|A3(9Y_^tFrkc}Ia8POKCYgG(gLka4+#<}FIn3%#17{(=g)e{^ zS3nh;$~mY=nR6#!y7kt7&Ouh-23ca4=zOW;d1nViU~RNzf4z)GMPuhsZ`X6wSX(%G zf{(b&4#p`t$O0I>&kB1uESf#W_kc5-N=2)9+YfA^p1IVt+Z-|mvwDrSos#vA++qW8 zZIzQ*Dc@~-rPz1sHkbocuoSJ+WXSP>Itci|arpGM0skCCoz~QUT0Al{Ap@_S0{*0F z2#aXP@qH3rTQ%WPJ}yOo)$sW3jE5_)q|hBM<_#rK9zbCWbQ_%!Wnyty1tbJF+?IU| z@DSv9^>dC=u1a!i$pB2FJ(YL@Ik2FC8~`!M#`*3vI}fq32<39xcPA>RWRV^$ydMMO zaJdM|A)vVC=5pqL(aV-(i?BF#8Qf|!(;F#U-6pC#TC}_>50H0LVM?nHsup(^gy;42 zf~O$D9kvb!fmx=H*Ns7FC^sM$vC++I0W;;wGyF;@%?3_;FXk-QEf>@|=g`ykcJ|rR zHNVa&*MT*J?yg=Sv`&AFv;%hSQ@uT{7Wfg-ZWK;?`mA$*bhN@&vL<<@dL@Kw6-yji zHoZ!#n@~@fL_RP~wE&S$6AaR}XC+Np8JnYGwTIY)l^!3?piWY&(6-h6+6?gurH0D| zi_T0RD=h3JCe$P-Ifm=_zPu8hE^o+#PsBux{7A8Y9$B6y4Jp2Vv%JZ_znA01O#| zW_{zL_WOD{Ui?Aob6$X0aN%0xn9^yxY?4|GjH04o&u5z^E%dpw+G{Dz%+A}DDuIWx zq~wFu@hyY9*r7fklho`F*o^ZuejO?G{G6sM?s86l!q(y%JKM1k6?HNpWO+PfE3;c( zW*ia}?A^XjbF+p{&geC`5CgFxH-;*-t8No6QsaCApxX&OLK$K{9Ub2C5l~B5jRUo3n51j5MjF`$NdVV_J!3qyR8I4s)Z*=1_Ugd z>!YzqS(4?htWT^))})Nt*+eo7KIft|@TuB=F4lgmJq*cO+r5bk6ugm@ zvrZC!g-bjQt~03gSzih3#5!PvVness#BOdnoA~SmYZ*KR07c>Fkme*kEjNUDsI6mK zLH3FC=-i^MGdI|k(B)eKjf;LYCl>>V<+qRuH2Dkwu-r^{Qz(U)ieDIgx0-H;FgWgi z+;eqwD7uhoEQXM9ETeAD84A1((nTv|=5RQJ)*_az<%N(e;{pJ~cC!up)EL-lfSe{t za*9n8w{-wyo(l&AZED(b_R^lM8c*fI<8H<2y~(VOI^=uEKv?y-#83rd90s$*>}NCrHA4c*8S#Km!zICId_70acfD($7QIz(+`C2X{U zJQ$btp|dLGs!CTaob%h`(%vqT6Cy0rRZE$vmNvYiAYhPCG6)Ael(pikr6v>LHyw*W zGC6k}_=sAzN_2$-bx)Bg*nDgktj5{|D#6(68plPfqt84SvRCQ4?`O@m)VLcC*7U%?;1Yp=Z^2Zl;#31F;y6)E zywYY72%vj;+9L)aoZ?^`o(y7Xn5PcH(17nM_2$I-gDx^SdQ1Sm#7ROgw^rpF3C8#iO&|7?D}{N<79FMGmia9d4mcFGGz!hJ=cd68K@j zS9iN}P9k=u84v|)SM}99iAuJZ)_j#HrG;lJ?%*v{i`jZ`P!cSCi? zdm(_#x3nIv(u;oLVDrh6+-*-3gs`7x3v6ol^Q|=-#bH_NUQ{$Qlr`Zn59VP>T>1&~ zwe+^rFD3908QI%^XfT*U&di2mNJBcOOT%^K#hmX@_j(v&s#>8bW22P~*!=9WBq5du zhTmHo$pdxLO1ysHAbfp&)3M#cUIPE$k`OSu-$Et_*ESIZInTB>sHb5UhrOI>AmXu-i$U9rp9ioFNeJ zt6{*bTAjDSR%Y4xDh0hJZ&-Fu3V@5!61E>?Oy?~hq^oU$_@P>BBTIJ5f}ury&G7;R z(9(JXSzNkR>9J;-uU1zzRvYK)BSt~g>J{-OIO%qskm`$#L{(`q328^#>FFx&j$RMu zVM!O$VU%`%*Chtgt7w7;3&HMo)WlrXw33k%BrjT$!KJ#?H4|7qQ3kW3-vSb0%kIIg zMS%ej*$tDsm%+b2jGEL{(k`jz2##G`a71w!&Uois;XvVr6dQ=RI{VzkN>}KjRe8&Z z*VPR_-dqA9Wwkpj&bYNf+7KD&{Z87M<`!%89OMOm`Gm5N+vI$7Q{3O(v<+)R9*2z- zoSIgqnuC6vZ??$Jt@DUclMAG*Nin}(!ccqA1sfcj@P-iKBrYevXLX8YgzI(@6YHo& zd*uY!yg3B0>wwkB%)M<7NRpbBwA~{@w}P}EI|8WzyVnj&H$QJebnEL7eo71)YI`fq z1h;5^e(1A{R7yAGv?*4)vAp@m&Cy*l^Ko$sYx$nw#h;t1y|&HMT$vy=7qDkVaUf|j zKm~#CKqEhU{B9ar4d(D9W6Ca&k(?6B459{N3xfN1D3=2cI92zqae|0F0faxuYm?uI z<)UzOZVkC38pwLkK~y1k+v?X4l2DbrbC}nEddKl+9KArWL3eEvup-BR@z1nDn3k=% z6w70ZFTh@r2V=`#SKge8&zaw%88ahhhZZNC^z2Wzv12+l)NX8gI#@X`(K#wCA%3&p zBOq#qPKa4bECH+tpz+=W-I>s767=9#vuWP2NLdo z-ry~PYSImJWgGHX6g_t$96Mx`x;ms0S_l?)JxBS#BEotG{%Xt__)83-WuPi9SHnqb zkTX+7+*VxArII2QYtG8#!q1kpDE0Jt?`!4pGO8C#T3rb|fx z3SvZ8D{SYVM3x{1(}{-LToL!=I!=#&5H~d#sV%k0l8^TsJLSt=XM$2cQ~kqSTCCIU zwP6N|U?_IxSwTvlgni()N>W@So2BCz5GnhG|f-TObw6jk?)JS8*}ZT4dhiKr?jyy#&V8ZW1x|LoPiN1 z={h9?zL%7B?owWHSOT!&c5wrZA5QpsQDe7)Jko35Lwpsq{mprU&PpYLRLqd}4I_Ck zS>w?%WKEnrlav!!WoEmD0c-~pyA?|uuN~xLx-o`F0fN*?V*xP_v?9#U<_;47fPiy5 zQqj8NHy4Q?oanLx%Z@{j@^RaLTkIh)l7V*7^-q!_wzAy z_vZ3;tX;W;vIoRgJgXs3?~NXm@EAD-^Qc_t;}NeRaS;m$2hqhLb3|mOZgXAS0Je;5 zrxU;$ZJTS?BD)Fkuo>clh4eiIK#me`4q&u6Aywe~>*XZa&B(->GWNoMnZ$3PcWnc8 zU)1w{fKYgfsD@LyC*}FHbIS>AOQ$fWXQ!x2_H67OQdLnUck7z+IN-w;O_x{U?6^cd zoY%6=Vs2-mWuZ_To!Bl*Grg{98G@E;hDw7Woi~u zLaZ?HDzA$WTpwYpn=8J5Va*WH9D~I*LiH~09P(h$8*y6P40Xn)bDcZp1bMowIaqnC zI5ZAjSA;vr|3|RM?2;U?`~a~16Ngh z$Fo6rVgU{bcNg*IzV48{+IGOeXCe;4usf2;Z)<4@YBF_ZDaD9?sBSlD7~mxN{#+93 zMOj`4h`DG?NZn;!7oiJ>k!X_h`K7xcqhiR%7UC4BWX_6a1Im+IIIK19Tw*A9c}vn=0q)`kX&)1-1$pK$a`unHu0XV(DI_&`+z$kepl)cPU8@pTY1~|aE zIi%TYKH?RqVP-s4tU0&1s-8oLYCH$;YA`6g8|?UqI|b|sJ0!CO*dvBUKh!iODE1oD zL&)8K=e6!C`IPw7xf$k~;Z+GXQlaTy0IF;1euE904C5hU+pA-UBN_q)u+%!w;g~*o?A{TSdLf=NlDim2 zO{Nkgnm8O!I;Tm6B{S1nAMjgN2lJ-FWAwO0hjKAn4(7y%v94S5vIOIQ+5;ZX98QLR z0KtR%PeM$ACkr3YbSs(qKAbjW-PDfA%G3%IArxmiWJq-sLSQmx}o zWN?l*(^2n@49vQ>?X(1NrOL*;4(8OrSV*HpLiz|SgY*;y9PLWBl4eVbq!9JAF*a9i zZXs(pU)LvoC)fJ4h>~gD&OI55muT+D;`3Qpuv@D^;gCoOy57y^o0*({dADR5A($@j zX~2qK&I0G+OK!1*pzBG1yif|RLrjcKEq5Fm|CdLD)hw(gnnnuJHMkkE+?<(b2@eJ9^xRM|Zm zj$)huo*b6ABJW)1Ojz}Qy1K|~IRht?b_RIm719|%2E}XFFjr*9Y4fSC0}9-Nt4N+W zNCO}>;WgSTWw^i2qIEXhNMz<<&B>K3NQ);9?5#rjf~xCw9xs&g5-D^bQ;-LBEi2&Q znEJva5x*Y+txQZYBoHNeFsL0EKy$1Mjy(^!nsPM<&dI5S?6^LEv0N*K2#yR~nREdX zP1uq^6WWpFHYYn+*)G_dwrjH8ZEkLfsMSL0!E{Bt7_!%-{S9)vEvv}e%Hw$I#(F0W zo;t6=Pu099A$joXaYI!Sc}wWfdzt3^a==^S*3J+4L}g6CQc zP=}C~#zWF9S=XICiV5)wWmSIV^##a~G`j`kHK*Td?_ zp1K*k-Vn;v;H!C_0>*!RC5j%7`$B?Df2tg#EW08_-C3!LBkRq8qUS+pCoLr*XP4qC zbk$tF1LXe1Y6Rl;vTS|B@@*+|(Lp;&kXYa(Q*^1PO9^;?{aOHjsA^xv^`7-1^N)rb z`_M1<8g(LBAHkrjB@rO@lwZ9`&kovrcfcbgUjZPM=jH(1aqd&i5X%^M_Nj5BtT2USqw&70e)|r|oL87+Y3P za;#@G%4f=Krn*0_25NSlz+Tcg)QEGDds@vCTU*_d^pIQSt-2mmt~u_7ob~6B z2@~9IA+bBR7^Jg9&q_P`kU3*MR+$ksOkktw>8yf(O1&tWbBF|s?gBxB*VEN2NAtQl z#5>CE7h(&h(-ZG%djZ+nL~w~c1&kk!=)e`@McXUGT+QQ zJ^glnxA;8wz1DTb=s+hXMhfS%=VQS;{#d|xhgoX{^R6Il3cBHG*24JuOx9V-=*`vW z)}3=2RYos74f027USXwyo#l1~aU0&+^G405MzIq9eF7h6<3vKUPjc57KfT`tZf%^F ztSU~N^O(#B+tL-v4Ud-93KRVmC+nMEp(M0_f(3@KWC|#hKk6l?{84kQMmt_4RnE8J z39g~KM194O>ldx9ZCYoRkJeON@D3Uj=2OID_!l_`j~*`mC)B{!V}?H3BBhV0CvLX@ zQMMOMkDAkVaxM&x3;wg1+Mhi;YzV|PQ=UOJjKs&UP~A>7E%kZ2ECuMAqW)anUAh5( zue4uq1zF21wv#^0UzR<(IMHjegBi^nGY5h@Ol5hvy)x>{?4y0vJineKh?G`d0&D+{ zKQ-|*sBX@8`uX&r66&{o>c{oSHud!Bd7Ig{$ssnD?YGYHbZ;d8+)*dL+Ax^AUz5ON zizC;R%2fS!Sn^Gv!gWAGRRllbMGA6%wRpMcZZ{EN{wmM$R1YrzsBwKpi`HJISDcIbqE``qm4 z%YzZ|$ng_ZTXx9hn`o>lfD<`RtnYCd`g-b~JTO%+pwoH{ z&&(YO+xwB-1p0EY_jXVi1VbNY`N`*vNpGe?N9{1fRRe2gJeKW{tH>`X3CdNm_r3yPXK43=N<|-!}i@_q- z7bnJ)7ZoHQ;nFALR5D0^zXuT90x7*u9=Du1y)KHC<6cvT>~!n1c4ilUBA|-^>=a@_;nb0ZM2Geu>>-6t`Dc=*fR7=ph*bbbh zQvPV-vW9ZB&b5GRQ%1F&%^Tpf#;2p~U^`S-d}W0` z7nY87pC?>^TE)i8Ie8;YtO@;Wf#(UH*LO2}n zDt#XM_i|)^*8O43FJ1qV@v~vjTvh^#^2|Fsm{+g0eA2G09Y|t1FkHp24fcsZHI|X? zT=N)2rT#cvgvT`;Umy-LvJCc$w?p3i?K#2aocEy}zEQeHWYy!2`&V>C3CvUbv(G4= zQ?hpY{g_gJi5~3-an9-f`WR(DiI*yOHV&OL)pAP(YdYBzC$KCF6ngyIr+v60|>Kb!MA8L_d zJ|Dh+J9{jfDXg~e0)i>ikcqo5*!*V{R&dl|S5ogr;>Ne{Lb_ajwj{+^l8wn#Jv&;H z2-uzkZo#j^NO$E_oCcb;w+>w(fjUsY#fEwDDcW9l8%hl!N?C zDv8B@K^(LMu!4GKI{m)W)+Ne*e(`LDLlkmlszsvNv9Yg42L8A3PYLGEETsjADiOglzBCVG%(p?8chyq>O$#GFo5Nc*oG~~uDOL6E$ z>tSS-{3;3;)OwAK6HqUE9Cxff1BV-byviuDfJvTjS_SU^1Q@MJd$6RObty@S;v3(0 z6kpN!k}v}MP?cR5Iq2m1#^_8krby(|xYJ5>*=avLX9c=>30>@3LW7!rAuJM`q}yV$&+;xm+47=X%rH*yJi!~5ZWVs|al zf9Y3$piSpmPtqKW4ZZmFtO;O02*pj&1)}=^laL+Ab>HG1f(eHE^N>T#gk*-r!ti&z zWh%kbD>PZ*V6TK1Ma~*Pa#w4ZOi1Q^P%TnlAk9c5*ea9+oAgYHl?EdZ*2J!8e_A zyQO(Q!~qech$tHR$$^!Vn;g79!{dWW04*eJ=_BmN75V2`asId5O3g%n5zD2hfDnFb zgn3#xVJXMbb~FzN0(PV1oi+#1Y@fL^`;Qkj&%GO_iYG>=N#CT6+>3pFc@uN@qt4Iu zCaT1+IC!R~90OrXKcW8fcqRG3HT&}lqWH#s1W?NgBbXlf74J^Z>G|=kHyZzpiGjwp z4A?5F!)p-_WB;}B>V`~zq%q`1Vi6!i zCR?Onn2XAsv)goZK;HurDZd-&JD`m>yZVVd){BGC zMg|e_o_!z)Ic!ckEr3Yh@+|np(QcZ|;2Ym-1!+xeJBvR=?}J}3c@4P> zQk@U|s*-${L`jnuh~^3aJ8q}tUNBhZ|22>{aZNA)qTPjQd8@u0@wtoxuQUiHT-nQc zar4Q-@q8vb6rA6GA)bu;YP8Nc3Oc;930yxqAPth0Br;g@u#(Z7u7#Fn8l5#q#wt|qfZ4mR){U9TO z8NCgiF&z-lXh2_((TzU8In{put>r?|=y zsaMv_A$ng8Z;`FdTpymwQ^o5gw(m194dZ;qdzh!} za4vUy;mQ~0<0&QRP&1^bLo+14FhqVnv$(tI)tN%21b(SE65no0BJl0YL-pH^3VdBu zWkhd>c<;1-6#KOj1ps(1)WJ)x8XqF92WWLvWNdyemr=g=#ty84115xhp$%Pi*-%6v ze30!woSF)KbG7Rybw3(s@S9LZe_@V`In|DO?ZCzWFTFSgr~9Fa*yBU}xg#m=!?>WQ zOFJ4Lg4>G7Lp$Gc#7ro_Q^P4pQks_u3j>e*z-!%q_S8i}VV~c)VfUI1n>2B%NCl^Q zMC56!lIL?Ptp*f*VyAC*cUmk-jcCxd?Mp~MH@;*-=Hm0uA4P4Kl^d3xO_=#^*y2Tr z=ow_Q_W=xTwQaMXZdL_Il}K%#$*PZXmHNpyy#V3PId>BIl;#GbiugloxH6&=L2}`l z?tVvqyNrkSc6^D!aXa+ZT7Cxd#Q9r{-vQ}HR~zUEACZ1bnhkxHKYO)7R7ZQqsnZ>$ zK5Lz)4iStiJF}VpeBVi*m&&qnby&FX5rU-YepWpH$UR`$QG|x?av=-+sl?Ug72Q4N z@a?Z-KtEd)C2cd<8lfw8Z?;m0XgS-j({(z38S_&~7EtBgi+4?gq8A?{-r%#N{fI;l zvupKtw-Dv#@x)E{rK70Vkcw`cOK>D9&xt{ zLMfng-VFOy>ZOnALrh3IvlpHEjJKJ8XZUgyU&xJ82G*?S{)!Je#tKR4Dxr@s^epIV z!aLkaIW;y@bvsAq;=TLge{q&B6SnZ&xB2PxK^xTSRV=!F=+6|_oF9&QIqoH*+4BKl zUIw3Ibs|I(9v}h`+P=pY^wXOpwPgt(G`XZ_$NnMoo zZq&UpdJNHydnEYo{fOZ@W6IzBvZwBNyP^tOld@X=jofTUmsdqxb}znvc@JXK1@L1@ zL70g`KxX(I4XZCEw=#TI8z8Yiu$Imz=QF(4(50r{ESO$#P7&RRQIIUn+#bQb<2!g4Lb2$#S6w z_X&V3xgc&E%j2wko&wji5yT;7ySns5&U_}gnSnX@L*FJMzI49+a z;@*1(^rM5Vqn${22?5IJc6^jA#wpE*cK7+P3@WpTsMce0d_;%#`0ZvP92?gR5vk=n zw#OXqX;Q!9&OWY>?DbaJAAbOkntfRBGp==q9391U!UASIDb}(#p7kSfifxB`fV^LG zDSe%-p8ink%b+8F4RwwS51h@bc>jL%tp|266mAzKK6HKa*P(p+!So!q>z3PS#X!H> z)x(EMCj%i>WSGOcYS9m6wYt) z316l;2vu@_lG_=DjOTS$#>pRSpqToJ8WE<}s9&Gmw1QJpzAT-#T5Fw!ptSizZ(U7`z= zEI1}a$esvQ8BvH-^CIz06_ezFJU{a9?xbGt0d@9&Uol5vwR=4Nef<2G2G5TpVKHhs zwg3ysSJJmOH80ZLG9$*kA&Vxvt)i? z2szv!a+!h{8p*;cPyK+J-IX>ZeBTM~CZdq=1BSaaR&JHiX%zPKakQl&m7GicW!fLr zfU1Ol8o>kbEl$0@iI;bkPe;$V~@CajZ18XGOKHq+?;7yiCnOhQ3 z_WZq`?PmK11dz!Dp#C0`9I7S(7*RwW}bSEzFe)k2H>1OBGm2~PIiIB;h z13T{?ub-w_Fd-d|(*8ui|3umh`1WA5qHzO%l5G1$Da$U+E6__SHigMG-P5}oM$y-; zxeiPqfmVS$NzJ^Er{Z>Mj^5!FTYQ!p#jkRX2O`P%w>;fBWHmJ(DJJ>FZCNpL%`-Mt zDogP2Ml4i)*0BHApALtU4<;0Ep6M^aN|)W^AOV)Yyb4iq0wkdD4g~B<@S8Zx+IIMV zPCu!BF7qQ5*Md^Sj#Sx)#>f?&G_FQ9D4+!>yG!>gzt zp!yDeBl#OaE_88`ry$3nC;`}{LIRY3qHsEI%h%(lvpjlir7R|^&cAm)Z?!R&_sml~ zSKc*kyRMKCVFKI#qJFy$7mZTOayo9s?gu-Y^g(!?(5jnXuKkiubpkX)0{z{gHm2~v zE{z{xV_3a>h*0<)cR8ipg$Qq7@gvZlSUr`n)o|8Fym;9ZXPBj3ty#WyxnwkdJJ}m{ zxK~dp?c=D2+@xJN`jFgq<^>uP;(=Mu&)lPfr6*@r3?I=>`93b}qbhq#$Z*v zcTo4xD^*?KSlqUKdZvV}a{J96W(w@~I%nIdxIxpK$RFGXjGZgK>PGaRv2lXSMuYGv zZ#m%Bk)HrJK*+zZtfc6ZO*ehA{dURiNlj;5EOz>G$sdd*k1;wt4zulk*mGe+f4aP= z2YaN2*8@4F^J2eGXSpQ1FF3DFi=X{7M>0EOJ{VT#dRT$$6F56)O=Qn`7Y{qgGcMBu zg=92xkv6`_H*P!X#=M93@$vv2p|x$d%l2RAbYUe2$BA7^T}#6EaC(-!U*Qe7Hu=*9 z_E+uFjUJW=yPf>{1g70%kJH6=e}`&x*P6_+nYC%BWJYGg!9yNXtWbwFWMcXlI=p6` zz>}8_qzddI`6QT#0YGhRx#Q%&`!a`VSv>?LZjx_ZML6vpNQ2qEZXr{N4G?e#@2qL9WK6pB-LO zV1abMm;uUw+kNil7S>w|HSg4)%W^fy3mV1~bX{QErfqxbo~PCR1pH+c>Ebq3$L}Qx zR4RJYu^~~w12htT&5aK+PI4fyBTsJHTT_Oz*Y4Cev0-cUP09c6e|nA&k%IXxPYdD! zuKjTs)_-Gv@q85AlOC2+$E)c%81TY+APIJOCaAcXz!S84o7yw`*R<4nk2NJKMq(w2X3D3eQpJF@#5m`e=ZWn#xD@o(_8D+BK3w`K}ALu;r0ZxhvJ)-lIWyx ziE=Zc!n15mQ4 z`9w%YCr#xwIp5v3KTB1e{>@a~#l}sF+Jy(y_yHU24m(EPdjR<01V- zT^ah3eE~fl6#qQ%1Rt+^bT8NRlIY^`;9Mz|Uf!icf0}M`97nNE&9;>w8#v?_A<*gv zW%^+$M~WNjP20<D+elEX!dlaOyRXkJd(OggjR5dwuph466w^$kdX#Q?DR;Xo`nr z=yzlsX9wX*>=@n^bnobPk#WmGr`MwszPY@s<|Ps2->Bb2D5r8Bj)CQ@GM!ePeQd8c z&T^XFe;U2^5s_+rUCr{?eQbiDl;q`;U0@JsV&3BM${HAs?n8kaks!aCT=Y04trxCU z8rIi46M4zrR#G7%f*JiRzUm+HL5d*Ij)d{bvEN1cB3r;7X4D#CoRKe9TIa6uP+*@% zrXBsG+e67dLk#%dL60#Rn(g21&K;NY>4$C;f2IKK`wMEwZxYFtGJZos^)V&CiqB;y z2>A|iuXoYkO+IDupq7P1h-$yiwQAuc`>>M?v*9dxHV?EU?Y}mjxumf|*c6P{iz&YW z3OKh8&!ljzniZxT=e_{ghH3q2h40A!eu5`!`0s8B9~+Spsku*q<$6W8Le(1HAPVw$X2MMTI59bYW zZWdnTT})B=^$qNvx!4#pLz>Gyg)x}hf3L0Y&%Ps`mtc}b-nr~ z%zg#cf49wx(%)&`dSX&?P_EPbM2CBM-luFSTmWN+)8ngoAn>lkdpXo^fjW_)&?oPu z5j+$@1Xnx?=v`c12^p*o_`GkCZF5YtwX?sac3T8bbP_e7I20M;+^l^}E>i4xPf(*bbA&kH_K><%bpZrAM7BUGh)q>8UkiqviSA5zu#(npB zd`{-I;zD5iZaXA~BxfIY7YZw|y)P;oKZux%2}i*>pyz^e%su^*466+06wEeC&-*KU zla+i_)lx0zrOs|3H@i-OOYH09C*cW7%#w-xmKTG#~IB&y_0KhYw1m4_s<li5Z7mMG7uy6M&G{d#$r1O*-KoS=W>u|W)Kd-H?raK?At%83(70X zeo5zM>D$|cntRHx7#o0*4pl78-1g#yqM#m>0P-iC&ap&m-QVfo1@(E4FC`ih%JQ-VU>f50IG8@>~7wYEOVLLADi+P|w)7+d(2p9ak~RE+urSU8kS1ZDS1 zdu>~Q8aeO)oLKAue-znf0QALHa+ze9P5~9r!&+=PuIX_4=8!3vd&K_C=f4-_>DabqbVywRmOHzraW<6wJ;NI} z_i*^2YPpS(+T^sgA~#1KnTP}PJ0merg2h@ToYW!V)cRa|e_ZE*@A)Rb8`3F$s{0G!EuM)V+63!`TbbgyA{Ev-VK(Q;<5Hqe{F30);!)%X>@N_vUb(a z^Pc!`JtT$G!1%X&$Z;^Re#i(b&uumCPVIUIl&%TEle1r`D#mcX_X*oV?xn@;r{8gc zAs~yJGH!pFf9H_*plwo;bJ!=EOBVU%6qsF`4KWdD*}4CF7=~fKs-Fy82!?8Pl+(!9_+V2v=S_uFJ2DNOw?kD#!)7jMPu?n+BJ3? z)NNJ6k-G|R6Swg(8F=Q5&F){4T93xbxKv0vbl2ebe`W;ii0(dc*J&$Q-)GPPUtlC( zE8B(Qkw_`>?ei>+tdJczU)QiBp+i0`ydepb$%X*}Xa}zd?WXjqkg77DIH_^M>drr) zTmyl(|50cN$7uW$b9jG_S^*P`SsRh|e)QXN!nsb6%~gGS=VV`56XfEknkHwbe5>D5 zJD#>@e=$_6u|40i?@hV*X~6|t=t3^(m&MBbp3bo;wLcS)Db30bz;rR_fiep<|8h4# z68q4`q6R>`MW@v!`9EcRr6m1EVVWm#5>=SEE7-jKv}|Oq)BbjDx3@IoS-!ozo&4ym z^6+s#kmG;>j=ZYU$E0z*pzLxYE~rx0Pjn6se6ldQJvPb$n>n*ge!P4V z=XW;dp$*OL%un^zEkkG~?APxs20olMm<&Ii=P`OFig`m&;^;6ym7mb}*+nmxWFF7g zU7Ws0Z;M`MC(J*59Sgat_h~QM%(js@g)oM!+H>l%(x63y+AD`b-KERD$ ze_1DX_(4&Q|jhVx{B6My>|H_u{U z=IN3LjugtWI|l`oOi_6kZp^!FN@i5L7D1n07EBzbe^uaW>4^8Eb<}LlXxtB$BSj>% z%^pxDi?m*(C^+FGZe9_?;s~8U^0gdyZtE$0**wWuK`trSf1#Te@5ncuxKb^OIHc*<=5hXQB_(f-44>rWh$>5TzG+%->0TaXo}+n!5tk&GRHmdqP4k;BX7svw9N;FNZ*`h%(V_ zP_R&;#f_7P3Rw9`G{hMKjJoI_qU!4MRf-W?Q*d6Eo9mqtFHoZ$Fax>?f9gdP6}CVg zDB@h{^-g3;7f+Fl{(-2TBLLQ(A~Q4FJsw=H(e1O1L23RHiNgy#i?eCqC%3>9qk83v zF^awrTx+F$%REUH8iStYUD5)vzp>Vr)wl1=YM*0`fu_9-7yIV-S^tTqAZ3IA@7@A0t;s>Q9(AIECf1_{PYC7H6=x(A& zUl!N9Ol?zPG(TP`U}byNPmfF-Hls#=^d)iL@pV?a-!-AXh;=~ji7#8KeTF}YId{IHdErS(bm9w;}g6Gd^N4cZ3Yaw>K=?KQC3 z=Hn|zG}RxAAu}>D-4{2Ngx$>qb#*!7Tg=-K$;0Gq=TIEce{yzvqpQ}K?f2XwMQTwY zHqXZi+~g1QYp}w;SiTR&6={n}Hb}7kEW6b@3SeJ19S9>zzZep^+n@Rs$AVJao!7AR z#k-7#^LgJ(z-H|bF}?4Rf2Cdh0)VTX-KeV~ZplvqIu^~aP^ohzx()74vm(6OaWR%O z@}R1YcildFf9A+0oHq3*V-aV*d$fy|sMop-L=!iKH$wADQ2{u2?<|5{Q3Fs>xy1W! z$ZM&!#@f?`*4$ZG0BR2}L(}@9dbGD8U|K>gk`u+;RSm1UtvKlzM=R^z9nZ&cn?LpO zYQ?JVc{n-Y`+ftAYW$|nWyyzNf1AS<*L{>Yh6B*we{!;|o&LawF!vWQZiD;yEIuHk zCEdHrJ9!9=4=}(7J42dHdanZVHf-W?RP}LOu=GU(g$QSHl%+3r)+Po zM0%>X#pNaOxjTyIPX)~OOC=tT`#W7>Yj^f!;=4kk2Im{y91BapfTqcf<*`3kAGv9q zEY5MEi};>=BE8>)A3(V? z0jSXG91KNQyuP1Hu5pF)6sI@)&_$y|x&D4!frtIUdAO>+@|%usT!n;A-^?k;Owy{zwQ`=`5dXmo0Q~ZKh*MVY*NUBUf9GFEaF6|cd{d1!^II$Kjv@1S2&^?7 zQH(CFqP)+j&QT?zX#*KGKrKvf%Rk@K1A~@<%}3{K{0+gAOuQKu2uOhR>Tvh*-#iaE zJc~a-1>dljy$yArO<$WizjmKUU=)G}o+dZq+>EXt<9= zL>GvW0Y$V&wS``U-NVtc<>a1G2KRKl3tV`i!P6IMwl|>&_rM9#m0E55c^6&OZ=igTfy)8b{io*l`HV&(hf5*T&h*6Dy z1{?m#{pW+6mcI|`s@LUy$ux@m`=kI}#-FvnPemcVpXYhg$DFotLXyyDfB*Z>qnjz7 zjiP!U7(^d$r@s&MzhAMx-yc3nX}Sw>-l08-zgywo#K-&l&}xBQMrEZ7ZdxQh-tO;r zDsF%O>a+f};_ZEs*7TpRAN9}Y_xSrqM%i8szPQ;#5xL==K4d&K?<@5?t-9(I&N)~8 z?-}ZyUklR}=Kp#7z&)`3e^K+7woyb_FHm8v*7o;*>+7)RuTLx`emRhqw{Uyht!gJD z^b5REcH60e>|~_rY1B>vci`Yvg#MZnq2_P902ym?XaY(>n^=IGIfE|8dyAaM ztJEKm%Y2>0kY!esKhYDc%_T`cU0Yb|wL21am8oN!G&hnLLZr(ID(8EF1iy#r^Y9v= zYb&q_F)qYP?-}w9Fw7^$OKQ3TZO(u^Y1N|lrEXim6STHKf8F`5%jR3QW0*};F@(jZ z&7aV>(C2qi<;W+5-u~@VF-m`8DapsrT1Wb8xwl42chid?Mn`%3vrp}uS~Q`1{n}sa z*(g=G*Da?(gNaQ@DQS@)u#{9uap8pMTnS z*O1dVw9-CzA@%S<<;?{r1-l4&zq8KeFLW z5n=J3P{IF#AbgbpI9aTpn0$2q@dXtRu#GCdx|twu%m&;%Tuis1^|qooBW*cp^EOUo ztk2EVf5$iAnf1N!Hh->Hwo~d#mh;Lyohi5M=-CgN-t5yyQT?59;2cNRTZtZPP>(y5 zmR>cMPQs!eb|0nHiaz`F+wXveA|lOBPmI`P7|%~S1zeubt8yF)ZH}PP>ff@EIrHnw zbWZw~A>T){Le;t!qhgW4x#E$zSUM_6JIENzeN%3|B z!{*_2wLGzs42a703*hrE_<&X8>0RV9{mt8dHrKNvll0_J74;))EeG4W3WixiVss5I z7hIl2gbZJoW!T+*w*#QCJKdBFaX5fJ-|cKf%DKOt8k+WzOfeHGc#D_EY_oq6Jh(0hx{ zF+Q#6-iViLNJj=hZk!RZs(!4@ol-}S`xeu$WLJC`4^LH$EuX*8mnSNCrod#Kf9Ja@ zzC5}7%2jb|mp&=2N-VnZ5~I7V!d&>X4=l5-9WiSDQ(gn<;ZZJ{qPPu z!Je0MOhYiO?0&s!-)ke0XNU{zeC3XNnEGk&yW^wN+iO0ma~=JbT0So)JCc%LCOs~v zum&p>I_+%NWOD~AqO~VnI5<^We?U&?mc9aLE9P4ywP$`7Bi-Z7^2CIwc)84KTbiSZ zIiIQAG8;mVWy)=7)$;+fqys-Jo10lVl0JQ{PLwL>eisOz{&9GJX9C%W{o;g9F@4{6 zJ>z_)ef2MECh<%Ki*j16i+{BB{z)Vd+jrv{yk*?W3F-RY8@IO~d?{S)f1zot6XNpD zvz;H4RKmKyiNT#!&{s42IKjubR#tfG@3F7$jw=>b07e)sxT-QkXOP2%~LOj|xt;XXN4Awylp|LUgm>;dg2h{@j27!QZns*@TC( z^t8#%iR!S>FEStEq8Bnc$86h;*)FlS>zI9ZNe(D*SuEBPmh=x1f4az?g9BUe?+15d zl_z!VoYO5|vAv!5-4oKjGQke3vRyJB z?_?KU@p`=!l&-hyL0h;vwbB&RiuchdArTPJ=%#l$;c;yCwrYCW6{;?NyOI!!qXQRl zf|YyUxIXm6`^~ele}onD^6k?tVoaBTN8^y?-Sge(Lv^n{)JZ%uxq5G5_1e>E#|_a? zSSnwYc}%-6G*DLN{qi5uZWzq!5tu(eH@4GN8xN3-WLtWgS85QLXX;~MVwEl4^fWAl z8jgeEOa~H-MkF!1rThWB4D_#hO}m{I$W4%cpo$qVIfCb3e^YE&cCp-5_Hq*F%lKL! zPr9{;yG>D?(9wY*;p>r7X*;&v4_BwbI0V}FN_QX4WuIyM0+RgJ9otbcfgWRS{O9x+ z5V|5kfi2oCwsYzc2jWO70cXT8>xT4$zx>t;!@^>Xy0oQ$1JEn$h>i?X)nPbX86L9C zH>`6RZ5P(Vf3%z7U0bB)3Pa=_jAm_%t4_Q>HhS);cR-P?bY3DSVJ+dLetBY#_4M?k zE6zLIn(p56E7+U+_-%&Fr59j2Ej-bJb%%ubDCp;(&&-r_I3NvjJ!1UY-~E(W&hAsc z?FQBD<=c~7_yAHV+H8N^GNiS9Y{19LZ0(Z11rsA`>2~IJf)G)!chtb)v`> zE&C)0cW|*M#mJX}*$4Uwt-h+qvGz>eV>2=We=GdX%ey`O9T?)}#~pegbvGUbY@YE06(q&Q6_j2U`zxBvLBkSHdByM=j4f?=%)<@^B!hAUeh~U5 z(u1SAo_F}eS~P^h3=~o2KUCSXdsJp?3unxgDv;-9yi%uo5PyDe`)h@-}YVND)M8W_nZAZL5_&7w&uQ$0X@^R zq!5tdeLV5tEnpxMb?Ex+QF{p+cYF0&aqA$xfX~GTE8{}YJkJxQqD<4lcV9AyWBcJ` z`1EIxM>gasFs!p%tghJqwVY&JTz5b=QboEs2~W>Qpid}>P2M_3&$#aK7CRb0fAKI& zt~MEE)Y64Mf>{YoOa$J3%#H*0TWDut9lNF4I`1O}E}xg2PpwO*IG6@Xv2Uh-&yr_cFUY;K|s zm}MAcvq)tl#zPh{JAB(3M*1Pu0xds3;D|oUOFu!*x8hifBOQ!+&}0$ zeP2O(_j8a!2gI*NfP3%@RAJegCXT)^!bWEoVD#7f-G86E9Y7#`umT+{qF%5ZKJDLb zW*%QpM zaEm3aBKk%2>k_7xLn@D- zUU1fiIN3-YhhF=BV1z(b3_RjGa7=oFA@%f_Ll(9JiwOE>U_4PLbH?N>yL7PC0g!?%MJ7(|mCypK;f8XbwA&w!xKc;wc zR(LtM{IT2ZW_iZ|9{_gU$?l;R^BD9cIrGC|*xB#W*MYpur+xe?`P*0ceaYrR_Tu4C zn0tn(l3pLi`po!nEB&4=ecdAXl|g&atl zqEv)hA6QpaU&BG)e{!}w%&_*sV;4KDUi6>c=(Bs*eyw&ayvHLiAr*H40>JTRMO6uL zz0abI={T@I+hd=OWYy6}Irok!50kYk*RkBI)-An!gQ!sO=0^T5t)UG(-_CUaM+k^^ zxVNec1c(t2g;*+x3`7J4UpyuWuAUu4B9rJ4crh@^gVlcse;y{HK{H+TvBr_#r3i#9Y`yq37#9O~9djJtuEWbV}WXg$*S z+++-z+1*ZNR-&K+y64Q`{yA=-)S`7|Sf6-|0bw-$0-bd>M>AxzC0J#4uC>FKlWNJL z|Ghy9rO)x`f5Nu+)IOG>m{q&K-**Jzb#DM4HyNN^cKF`<)B7_2CaDaQ$E)36To&N% z7rx`IX%nQA4(KeXc{+zZ;y=_q8^SMx& zo&ME0mb1=|9i5F+_1VErz6<=Hn7a4SU8ChW#7BScf2+>5l`90)lj;zGF9k|`v}NSZ zz?+(WA4Iz5H+bF9k8n=Xi%nQMW>9InU92m2(hEKQz3T_AU&6`Xz2w6SzH65ekV~B1 z=zF9b7P(qcI*6XeAsa?io#EJ}n}lIfi;mC{F~W)(yO&)0r@lPfq`8`CkhC1iyDVYGL6&vj?mVhIpsqWj*j72k;&Hh}H>Z)Cg=2^9R>K3006)$A_ykQFwy-EnOqD zsUiQ`qda7$)Ii+=_Vc?MPhN3YN&qmHe?UQ7=bz^gohM{3BP%sUIUa+*DUn!@e*Pdc zdi-eysqQ&3I0Ip9=ho+gq5X0qzv_8!rmUnj+x@CYNWmbyC#&fSpy_9a;S&Oqi*+pO zuHNhIF&&6`cu36?Y1&Y}Svr+ihs{pUCt7-w^x3|W7nC|NV?nG0>9JyQE|J&qf460i z&dGNV5H1;-Q`LW%;erWfeWWpO2~}elucr3|?G>&%*nBCvVb(=x+P4+=!u*tSdJB%X z@hXS@t%x6ZhE1aK$oFIW2-d+)rSf#DLq2`*w?I2P%TsEj>Ny99A#{w<8%g2barY+7 zulfjXx68>s=vRd0PHqHy_et+-e>wr@MZQa~(Jg})nO^74WpeB1wbQFiL`5?Z?fZA=O=04UGbyYLEw@(?J8Qd_U<#z069Qx4|c(+t9(6M?N@&lF2668 zk6LLqpAL1ab&(`i&>nm2e5zQy zUU$K{XTZp!mfBpRrS&eQ0rA1R5pJ97bcE0C5!TlZ&D)Cy>;htsaj{c5kxdQ@JIJnFA zG5M1Pd)Rf+$Qd~`Q1EcxmKga6U?Cg4kZL7&$*XJl@A+zl}{TjC;1u4Dr5CRMAsCT{B|Qf5=f_Ea%rzPC!#AF!Z0r z?GodylhK)rcHBDy{9a2bzSIddk22oD02JPbt-`K0BR+)KYDRewX{@=CRHQkm3eu_3 zxjh4K*Wj=9bV7WXX{UM4w(uo1_U~C<>2X#^K14&a41ViT2Lt2kWWGkyRd;()d@EwH z7~SOCaaJQve}DXf!#Ap(U$y&*2%Ci8qo0J}Ctys$9t9oFn?98?w8x-wbqQ%y$e-I;=lpU}8Jp41iv2v0&i7~D*!&&$Vr_dgDqq)) z0ri;Syp|R8^FSU zs#ux&)((qVEPcgR}2&luA!oJP7lY$tw8v4nTe`&WD2UBiyz_PTs9dB1s>jKFfHh>Sm z&OEtUlaG)74N`>03n=DB;y{sq%Ok7YbC714?Up+MOuraq!L53Xi#J&e!B^2i`Z zM!qv8q)T4y4#EWPIzQzGVc74J$@P+mE_waMjp}C~?|Zfo>L|qA!`VOW@gCQ&kaCyo ze~DszHf0o);NN>4P*^$!C_-Fg3EQerio7Ti*BHWJ!D1gMp9KdGU$EslLbQ{^zr2ci z_Zu}8;D{e_znAL%+UY-%Y7fnLgQE4YGfP#Pk2B(S+tw#q`!=|YrKv_BLpQVSyLsX&pPg0@Y3{SqtJREAsJ72Fju;{h`e+fZf zPLUXgmq-R5dwIFn7Yh%}vT)Te!@C75?N>&z>0oIoMl>jt4eA5^L&p9FTg-gv5nzmb zB+S%B5gB#nOoZy27r&tlP)Hrv7|u~%pZ4T`Udr?oSayt>fQ1VQQWo`BRXbY@B`9-pi5>;$Ft@1QsTp_F0=j{6hpWn)W4st5}TH1ttXZ~S5O>+cp z*pnOBbNJqDBX6v!gSLaiPzr7+seO(bkK)F8kA6{o`Mh_~t`mE&WQQ8H!ATbmX>$c` zcRdu*764Bp9Gl}KF;A5Ce+%0c#q8bfdC($%P{v24B2t2-g7Q3Ixm7nbH=eqOpZ=ae zlKGd^zG)VE5%JQO+wvu@iycCHft#f<4|`H0BvF7>=cO%-%0)#wA94 zVRD-Tnp$oB?s0UB13UYC8?O(sAcf?og832_ec4^01aG+Sc~Iete`(|U-bxm3coWo} zZNATZzsBi-XpaQU=fQ5pid^r19d$$up;UL0%U6T6R5bToJ`eu1rcc#z4;O|x^oPU# zmhfAmp~#FP+v^}WX+F*V!z7uWpJPu*Lt>kGimqG--Y;y=avUDbk^$GWL;`}&ag77K zues!RZcpijWgWEYe;YC;bPT{|=fJu?_lKh83>Q$9N{?jz(XPR+fauiLm^4B~Nt%L5 zK{*w+@B7oQsccdnmBzARKMSv8QQS;+JrX7RDI0ue-1K@n_1vxO_AzssZ^_taVONwE!4Z{m(%~;r-?t;1f1zq-NAESAuoHGcvIU@M zRMVn9Q9_uJd&w>>N?gPvUC`-$8Xe*GEARs|6zC6IMLTn`1|`b2a>S)7*(GARjO#@ zs#BWpl~e4+e^vro+0u4MxDn-XyFdLS_v67P}r|44;Q9$<16k}h2mfy zhHL{&Bue9Zcf6zRXS9HoaQ-QggLHANPZ8LQ=G&STe}}tayU)3yR6Pv$<{5srkAO1) zyEfBqz8$i1ni;a|tEPHWmoK`~Z=sE6OTuMF;F+yHsDYFH1aNL+bDb!^<;$ zuvO`be`Om@#C*@(?&mq*8;{e+R^LY&F`mo%f+(cv$7EG5*NgVLy~I~2g)Z1!m0Ud6 zw)*= zr9|10y;+CP`%YwmDZW~%vfdj~_Dzuc*0- zW#1Er@0R((O~x`T5#M{P1!0A;tx!hSxG*m8VlY-4L#m^Uz|WtWo>qR^7PB9E*Ti7v zQuo7cC=Vz;9m1pTGlbXQipqb@yi$Aof9b8&uVrKtt=Bz`ey3;P$Eu3f z=o6wPw?#&u;mpcba5lx0u*7p9N^E%|K_FPT(h?p5Z^bN%zuoxsE#+esEtGD4VpWoT^Y#9!`eEtOWf@0c z_XXEafs;zryt9W~>_+;?XM<9KDAk5z@Qy~x?ODHpR3+-`=F&C225+UVMg$VDP58YF zF1h79*M8tI^AKzQqT)7FI1HJs&T7$>W15Ew;n$b0#|#> zF}*XY(Wxs6C<5Wj$<*nuJ z`E`0ZO$o5-yE?wHFG(WJpC?lA&VngD4|p3F5}+#dD_#3XAqn!cM8Qu>e@^nKSpN?J1{%w14C}Kh4AW zgSoohZ>y?f5g!c=Dr1R)D&#E zo}EHGJiy!^WaRkKHM619^6YQntcPWI11aquN#lB;(ZulxuG)0)d=#q92p(AVolarG zWQ(zM`qMht;(E5A{&SEO>3N4)dNHH&+gp^aS~nHylOBGR?7j(Iu!8F|71!!YP}+U* zum*-?{Xi(0Ki1hHe+o}Bc}Im4(mpfhNpO{=_Fb+mX*(zTQkjGHT;Isfij1&1*rlTu z8rJqC7nzF8)sVw}KwTR?cF4E-)Uxfab7ais3Lz3-pM?v+tO2 z$Ng3n0XoGY%U+a%*OAu-Y^+wTQ8rqV9cSg9;P<1uCXI}j9{0Wb=>ZiWi`;&tR zc*E`P9f6(T)q=RZK#vdKiB#L-Q&EG4^~DFzdHeWYRrvl_%dhh?r}<8>y-&OY-MleR zW8)}opb&OVe^YnUK%{c{HGlob{u-%83uy>ToG>5xlbEc}pRGz_OlHtJ)b_QNdD)G5q_Z|BtlcuAB z@c1YmjC?{B{a8H~&X`PEZum!4p=fR4%vrlnQhqYUsrY_z7OH{ZCchyB^OLtf((6}k zJo`mr^v>E>Tp1Tci`i>^IUywXOss&klzTxCf7rRto&Uj@_g=UQap7>H7bI{vb^B%K z?^8Z}OM+ev={{a;xLOq}lH59NA`)C%0E0dPx!J1+z zf3OsPg;wYnT$^{bNf^$%vx9{T%AQ-0pChG~+kQc4_7~tIW+WmxL=O}Wdy)eZWwort_ zzG#j7niIw)K)~`CS8@j-PH+ zi5=<d?ycqhiQL%6;v#=E++kHtf7kM9 zTH1?0QO)b^Wl`{Uo?&K(*6SUWcnRKdX`qwWy`zWJykoU0>Y2uaa6t7tG9p~xF>7ZX^Ap;h*()MT;>Qe+p1S*E-P$c4!V(05edPcf@`*iA?=7$Nct>^E-vH zfyljJL_NwJr0{X9&g;*4(vVUAh=Z2gBbINCqkcYOeSx8@eNE%(r5IvJio;LHfivn* zQ2_Mgxzh#o>BGs#JZc_k#WabWDtNhDAO9q9kE_ezd+~-S>CWBxEpnY^W>>n+ zosP)od_Eaw3qmgv(c$)WfBfWfyYHYK=aE22)nE@o{bAJV>xC z;cejA@GG({bM+)z%>IGiuFR1@?r}%__J?Q_-hpB5q?LFXQ1##rf1$pta_@b}se;$8 z3Dad)D)Cs|9Hu;7+xO?1Gu-X2h?zHL`@lC(C(Zo+7U$;nCpnvH+x9i%j+0aktrL|Ps^KWX znW03vxcA>MxzZr0yHELdY%6>A40hf5wNLdCKs4*KZ)`RNMciH6cW~s*09xR=7f^6r zA2O;Czzc=nDhl!Sqb=J#Bo9ZSGxEa_@5d9`S2qHR@K-t>(hN~CXEWUs(SuAfhT(fOu>p(?h!&as zsY5!!1<&>TWbe_h(szSQ{Pd5xM$_C?pcji4ZMe}(#(&s0A(WBgQ%J&Hl1R3zsBeUT=%(cTi`M>2l&(7o@u`=gr6v+-?K zgq%Y|Cnx^?jj)*v)$tP}TJgyp{c>v_hJ68hl^qFGE8Z{9xT$q7-^b?M7|BOHd_6xH zi|4hFwAT2MVShiF&xw3tq|9+4M&S%zYOkk`EJNYP*PPGpgG_ztQ}TryqJH zj9rc3e?@t2DQr=M&7h90RT}c~cP``Bd2J-jEzo9D0)s^GO?ZKg%UWB-hhla^G!;!ZCT=E-D zuqsU-ch7yvxN7|Ro+xSkBBm*D^<$$c7b5DRYS^LUa+OUmZEM)01rh~-411W;OYYpA zG!61~D(^mnhRk7JX^=Q)xLq($yf@yzyH)YL52DP~F+9zu=ieiNlEaF;&*KF*z2=MZ zf4OK75@tNdNw1+W$%N%fRkC9)3 z^!F~lD85|DUsY{Y1Pbyso+IpY7lCYEr!KZ`uetpUp(2H-m zToMH6-S1q>*|6r#75v`}tPoYHnCZKHF4loLg@0Qw{e;Vx0PW^DJ?0~{FP1?S63y%H z_-HG~nYV=zu#X?+`Q2lJomonknYleAYD1$x2Fv{K^V6*r zrG#yWZqHpohVq9#T#~4?mZ+5h{=(KdOfj!|T@ZYc5$U~pix5Q6nPB#*AEcjC z{2*B=V!Pi>I%OX#-G~*&FdYTsF*%qB6oj+EA|(&On-^eR^9nz1UMN)IlN44xR9Sk# ze9!aqtIJKs9?sX%!af$$T&+j=ml*3ik$<#J?+WOnD{cZLOXXiRl@|x3T(5%n^#%Qg z+@_6LX1%*S3Gm0zX^lLtk55-dVhHPs+J19^@A;a+{W#%Z_KR>|bqmaDyj9;AF(N4PqTe69}m@j7zo|Ai+`g} zzqDYaWq8@|xR;De)$Zl%FXJBJk&bvsBQ5V6M}M1<^}PDp&Tr1XNh!Rs zWvlGM!!<;#Z`H5YPdUy|tHUBGDNY9%pU_6zT5`?~XZXmkg1Osf1Nh&Ulfo#U$9ZFa>+gC! zm&RPzXgh!){s9xR|5Wdfzu!&f46ers1yhyZV2po_-|y4juiSLI)%KWu4Y=w$&{x8Q zDH?aX17~rIT^1G6!3ULg}_G$>$_|qCHLjQlB}bcYlU)WS9Cm8=MGs z8JWf8)tyQw>+|yiAxO0|SP^WbV_!c;ihi1!Y`DasLw#n?AdQB`I;|J#a03&Op1Mxb z?>x_DO{Xt>v6y`sI!{9I8uLo=R)UyvkRyGh-L>~U)h@Mtz4gVzmwUQ-9QHV<=s)Ru zI_@qw!tdQGrh~@_5PznnPzy2P>3_GKYQ8q79w%7#c0Q^2&pUEpews<|h78S|{Mt0( z%`{iRj$>U~AuM<)0&L(W#uCSCDOc>wdU$ z#v->E*?hCRzOo1Is0O^!s!%>?m^$f~tRO0>88d6&-_^7|(tj0rbM1@2D^T)eu<3rj zW{pX{|0urFhaaLJ)-+W?Y||2F(szdy=DoQb^E0$lM+C*5SKrZWpRbs)^lyQC?79Wd zg8Vn^ua%Pe$T=PXf(+8H3~T*6Khdy-Xot6Qj#{R*C)kHD&vqzBVXq668U)>k^W4PI ze&m>`m-C!nOMg@eHL0CiHd*ub2^Q9Jy6|_rJ6uU~oIEO8K&t?yH>tHi6?=H|B|+Ix z9Sn!g>X3LfNW`G3pfU^kY~fa5`%lBy2}_nA_jqyH-}(C7=z-zo?&`agH9S!o@nhfx zy5)Nenoi*BOWgXt7d9^-G9*!oDDvz-&;)1MypZ_+;Y94unv`YVu21l*bktT=SP_T} zV)qs(^B^36CLOQSR;%(?x z67pI5V>M=vEts;I98cadYEkliNGBAxvO3Up7|zW8M&=O@{KPBcm7JEBsgTQ$I-2z% z=nh(77OSyJWxUKjfa+It@Xs%_KDzXn==&M3$=c2fFarwNpI5mk=>`8j7C9@(>x%0vN)Y| zZv(SDS^>HB#NV=?Wl`*h36*xMTd!`NzAm~F4ACP1l;MRSYPo8VVDvf@sj@H1s_v_- zfBG7Q_VHEzK`lDFQP0f%DJQ{Vcn3Jx$1wZ*N5!gC zq&(y`e%;G%=-BL8T}gQW@qxSDbINp^0mhG!dQ z^fXK8)oSbu1M~T`eyF`tzJ~9djqdrPe^Vs5q0`My6Q3ye-!xyE%SmUQKYe?oEp z8h3}gprwwDLc-1RMP0Gp3&QUvTeM*s3v=F?N`v5egksG)GI0Kc6e$qtXrSD=bUyQY zi%>3>3LpMZNkVbM#1FGG1gZSr&jTIi3UjsS2)2M)(Vb!s%$PHXp8;goNR#1k{Cl}U zgMV1km}7cUAqgQnA5U^7=^u`cRDT;DxRaQ>kWH8N3T!#>WLCCh{it5 zU}FA|+G+E_@9b9%n7?04r=iwd&xc|$09+lk?_uxKtRv<>P1=|K?e^TC?HetsFHOw! zMnuYq&huW&_j;?Y@IXauE4ts-%&x7f?7sX>`|@B~^9lJijqEF;t*Zvxf5?A<=X`a^ zE+@!u5}e^#fTH;jf)=?=Ra!#|{+B{eZchXZ+T`$T;g~|WW|!PMj_o{p_p%?w;f4Uo z7uW-OVq)$;ecPn?%E|5}-JlwdsrL%HgEEuFUvvvc49hk5moF5RNy?!!hiOK zALaX{LIAkyo6D;AOOSw|e_`N7en1>O*Hcx#fJ2JUEhQYP>oV{rM0J5-ghm|#5!MyC zh8imTVEuBsX?OZTE&?t^dRd7HA?CX{*K6WLkp2-&CW=gQm)RLP0qz{9fJ`u1jDZUS zRTmmx7OnK-`{{W$tsEa8YS@g!QAAF`H{1nntkuTjTVI#$bL*4Get;jFwaS|M1%71V72>9q>I&^QejB5UPelM44_gARc2uMdyzv1Z#_pti$ zrF&0nPgG<7cl{Q>(gr_%t3L2_GhYc&L(MvcYJ>*e42{sfe+rS17W60h7rrN-zFe?V zVeen7WJ$CBaFc6Bh&bq^+W`p3{})xlOkV33Jeah{`zic zRcw3YndP%ve^E?mtDPteivV5&0~Iu%-jJ8~lR6TqC?zaG!KbHFVf zj@-fy$COobnl(`L-0Ad-R}U~9mE~T9I9eF^ubuH=*Z@~lRO(5s7U*dJ+ZaEuiSinL z!S?=Bg6Dnv{xr6>?dNtcZAW)UC8@U|<0Oatgsf6Xy5pat9G8l@M*x9{f|(X2ey zx|Xpm6j<0Keu0>G<|C4F&kV3|r=bA>qKuvQCXnzGS<*Qbrll7J0pVl1DYh@(E<}nS zE*3g`;;U0aRJ%eMnEzb9mhD50L$*@yTL9x))v!gmgT3Xa9X^Mpd{m|b%J2~+-n-6T zEyIjef9obpT$jpGW(n{nEW_o1W_swKk_3+wTs< z;c*C>DrO&TJ7Kys*?LcWleLnn+vpF4;QR^KeBY*d(8$Go!TMzr3AwYs0rJruXjt^IngVd2Y0Li|T3H>) zSfAs;+ntFmi5O`E?yP_~EEk2%nb7r>-sA0&>($+YeY7?};ssAnonX>NSqdjhf5C_4 zA?3#I3KD+v+jtu;@VzFC2?%6u8NQw(Bp31P0=+E>HUGG#ICKT?w1lIAe}S)jO<#9E z`z6<`^wQrc<<3sMeh_JWtdY7b5Ymdx0DX-mbV|Ohnz+Bx+M7L_f-~%RFZ4ozOcu)M zptDHuE)Bn?rG{QGV33F{Qn%Ni_Q()#@55^&tNPl= zMyr_aGw(*b`r3~faVUVr@s=7j$L6tO8*)!Swc4*u9-4=%_n{v5utwA<>eu#-TgF*C zV{d=if0Vd`KVu6KY}qpMF7DJS_T!O?7){BiP}EX1dVrdtHT=5rw0s-Gf4K=aio2zU zfm^d#eSUtLV#?_5%z5Yo@UgLsA5Q+|(4u($F#oKovOHJ8u^AfNTJNB_AA>!gUW1>n zRF=)uW_x>M=RcO8J&@CycRD){g!Nk_0;cuXQL?qv?K&iQ?Ux(g^)_EV%iQ%E3? z$p5(&(#Yj{e1N518rPK`e;?O9CFm@+*B|Qm9{Ch_jpBUlK9J(0{;wd(=?A`nffxXd z5y#yP1^8dtlQ;K$I8ZO}G3P2x&|my?JgR#0S1q`w2OI_&Oi+^nE| zw26(-nR7n!TwAsR*wP2>2E}5fR|S%@f5L5Z5k)KBO;NmP-xvAUM=flysZ<^0N-IaQcM@uv86S z8v8B1PvRh&H7c{$LCoqy``DCcv2@kxO6#%(611+O`_DD6OWInIl91x__DzG`+xq(O zz3(3EV}FH8Vn9Rie*x6=8t-khiv#)*i>94|fHj;z`|QW#Z3kIJ?sI&M=t0Fs7u-fV z*oWnGZ3vD!7w+%RX|LE6tJKQY)UY9+FGsczPAB)5Are$EKjAGTkeYR1#hq^#iQoAV z)8kZ`65-5D*(c8Xi>sM}`*$9F#~M&BXHz&!bo{7z$NXKqe*odRC0rN@WR$$#>}eVM z{kwK~%&(cF{(_=(PPoN(bRsBTeQF0&yzC(*LSRzOxts0yB^JYOg+xX_7&YTvU1vIT zwV53nk0y$Tq$)?+vz}tV(C~!A`FO!2Fgrtx}!|_27?uJ{9bf^L_gYd+BV5bO8 z9`aBIMB?H0f1mU8*XD)B;{)v@e4}dMXWNfni7Nt*p#VG2U#OQ5;r#t~e)>X}B0E34 z{@!H26bv=yeOcTdMIl^W(5_}P$F5C9fe2|k^Vw{^w@`Q`s=P+-_aR$Rfgqjg5wc$m z`5!u*?eootpiv0>>UFi8j(L~BKw-%Nc29E;^v-49gT)U9v9s&;>CW(&Ha}X9v_SS4~ zpb4`V`Hdw<5pU;{KLN$Zhexn;sr}w(JifonoZ1P9?zRF75q%u(GjJ%T@-yvr6CI;T zUdQLRf3VhBs#BLY|Gf3sDO79poj;88{ZVhKL8rv`C56mMt0o{NK&Tlrxa;LA+*~C_ z_T)0;L+=~+_wl%OtMeA?WRr8|y#Ru9JvO&(lAcSy#q=S1n)+qgo7c}-M6PcdKS4W) z^22CD(_VUkU|^HZcEE%fy%~=P3l8oec#_3of9F5!=x`be*a}k+PVOgdUJgWZh)o*; zm)8k)cda$w*3KiPXlstNm@2RS$0F^Z6<#OWNpijjLff>xu|wCo0DZ3DbpG0A#L+`D zt-Z(tKp=nT*?tZ}fydj9r4YkT8IS04D!U`XaC&GI`Oiz`DaYJ#F3l71_6T;j2SDDc ze|UDy()-Qy8S|;9;cc46oze!nge>~s9>J^a{%+ajz7QzN*7}NnJTKYjGi=Zie4hlk zBPam!?>G3zqhMXHV+0{QK6ac90|c7>HR9i$mFc@BU&1e?x>~@d*7p+D6i&My1J9d* zk2|;A`cla-UcX3YIsQEYBAkj}@OVCVe<~Cy4A?wTi~zxt@A4eVGym$eBfZf$v!|_* zjOxqmG!_OW=stdJ`xiFZH21^1Wi!-!$_h%PHu*B;JqiazgVx90<;Prsz8%w%e%7Dk z)9#oR+T-5yK4*b8)%jn+jBFR6X70oDG#(PP9)7Rb2-^i}_v58`QE4`A$mAjTf1hV7 zeuK2M-SON25t9xp-X?&@%-+!(n&hs3 z(7dCYAnbAq4_Eb5@2>0`?TKZ%f0d`_o6W7P-s2GFdw7x@n{0q+Tip`yO!l=v@c9mB zf9{)I%|5aYM0KJjk$0t#H2L+_=qUf9;rg0yW+cbn;ViuO@+Q4zdan;sxxca(saRAe zIg?3$(8QAw*+PS~@#|rdXyr`U+37ivECfj2{a&On@aE048=#oZyCkj=f1Fk)h}FIK z#cjBPjpy3L{2a>b0Z7%~aJ90PP&h)~Bh{VCeBrl`Z24Ubq$NZA@ObhexHSAeF(X5K zKWV){gyhwBaJeu}IqV1KTcMdq6*lJ2*RWo*N`%Q?zKEqzb=J74wQRWbjeo=QdFw4^ z=vty?o`XPuQab-;N#D|bf5hqOC(6d9$nmWJY~$mIM66*>`d{zs$0SMC1ADkDGUAjC z(*s;=VXoL>mnNVQ>GHy{XPmDLb3Jery0i5#9s`d8O#u$wkvcFCEG?k=b0eV*_asY= zD=-lOOUWXa*F6+FYR=KW{>h83^);k-XzA#n)5X8P{|i?tK6gIde@*~C+x(0F`}Y8O;`y`TstNlHG8NFR-Aj;9*>D z)4yBFMOKwl2rS5^e|VQgJLc~Zl3J+KNF8?+D$&;b_d&G9;F+TSyLWTJq5ALNpHlqu z{YLOT{#6^V_@9NC2027(0QbVV4DUPtemUz~BCA*wktSsS{ru~{o{#%WW0yXR&fCO2 zPG0^V1WGpf^uEOkINo|z{=K>KYRL15rN^&eF5(9C z0$$AZ^L19UP($L`uY4;0U7o)ma-L>G;V(Upw%(nW9sUyVFm(-nQ2lnO5~0!Ee-6+dK_$y!*jU)yAEX#;JN@uaQl1CLpTbsICD0ux{AEXf=(MJ8E&4^a( zt;VW{@a)_B@9V@Rc<{yT@3A%w*I#t`zw7#zf67|`O+X+yxQ6(+iU?U@3I4_g1@X?` z`~UeJFoGpq|9|79H2>>sAOHE>hVfAZ>%VdIGT)Mm@xfpB({j6s}{2UmY# zJ807PAnnbL>ushcmw{}H+!bX%JQj>(vv|he?4rkea6Xn9>{SXq-|#&DeSU8csDVl4 z&%N*t2x~v*T!=RB^18@Fy!wZ;bbpcYf6laXp+0lWWP#iV_}jXW6dR1Ozu(ul-1m;$ z7BnhwW;v7Z$iE3FdzRn7gs|IFQD%(Gb4nZ?ed00gp->p|(#)T`>=9PvMVUEx6?$L1 zm@BUT-x&D2CzZePf$R2n@BY^q`MZblHF)`V-@b7_UT%MHqfhC`!hC!E``UjmeqQ)#69nblEA~@UpQ8Daek^>L`EfA=L?i~&I8NW?IIMo?tEBi3x+I@ z-MzypFw%kFQwitGuQ@NGCZ4lce_qd0^fHI3{bs+Mj>(9-hyC&%c1@?!TWe+%tdTivOqk^WXJBe^wEN>xJ?3 z0AY>)`~CoDuCKPt6eJEntRK1L<#T_;)>V&4ul6ZvDG%|G5|C`P%)>bD$Ck z|6c#!-^Snkf4%+x^!fk!A~7F6O1;C!CEnE# z9C^-el@ntO;QC-)!2Q8vzt4N(|8wXn%lp16LU}aZ?471&F#PEV0`ZF{O4W?oT_?mzBvcVUnZ@cqecQColNR-B!DHgr>HxAB=8j0sb z!`z9vd=?EK4@%x1N>QB=U9A&eu$-M6#tg=?bNhSD3m;U>xisB^pIsSifh6do;BmKe z_2$oQf%(C~*bsqIpz-ypA&xG3h5v+-guMmEhID>0 zm$QRmw=i@z132UH(b@6RZ{cXhMZBu&72_vRFh40+2ue-BbMi3HIMH4(o)%)l{D8Od z`{MgV3(topy^0xo1fLkP;DS+yzgrWhBdeTmmf-wthX0K#1X=F++bo>01o7`90y|N@ zMRD}Bdqe??$#2!y;Dzz=cfbE<-o-UG|K^v!dF6kXp)(I9f6qDo-#Y&{Fa3S({}^Kz z?A6{9sDe_7F8JcelK-E%=-=1=?FF5`G4yYrhg<&Ndh_r1gPNfz1fA7Dw;ag#^@Ha;V|K_Ue`zDEtM4+GVOGO}yt|Cd7o-0r}6P=rZuW|m( zBaZX`&zQlUuwe1Q{~9wP?+6(8P9=VuUOYQUEx>+}UoJZ?KjMV(x7Yn)eiE;Y|2Gye zcDx>=Mzt^|!oUd>$pJllF5JJRMw^I{ALsMT3qCKKfBYRaP@FvYa}AEy*DVm_gyej+ z13=;bzHa}2Sv#}7Ot&2TX*d4WJ zdQ5&7f198w8+G;n*qIA^C$Yruv(|IY-(2j@eeV+SPxggHEXnJ#jDEm|rL!iwA33HU zjm8&X*EERm&J#obV6-bU!Bg$Y^pAZbp;dXvc-M2hGJFKS){Q>*39;#E-`} zhmY}J@o|A=;`7$gpZ251YYP8kGL_-sf7B!y>k%8qLEG&45pCcFX#vpdxVYge`}?U z3ANRvHW{FTXD0HT|G*BNa`Il!uYMs)t!whL{ByM+(QV;V7Qg?Czr$d~`DpZmh~J5F z9h_xdpKjwlpZn*2TjL}8atGrs6ctvTu@`btuJ~Hg?m< zJL&2im+OACQJJ?5{IfidYmxiK7@xU?j~QHZ$os@hke9e;CTVqmH4D<{3*+o00pqd{7pA={)o&CK1u#&f3oi$%s}%^ zpFKev?9dL(-Wju<@>ufgF>C;E+Ec7QaQrpX$JFY8K5zGvQPQ4Tuz^^l&OD>>n ze1YGHeU^0~e^)m8ngjYwa4GWmU7OkZDz8xa8>kTWUgh=paOPnUH$?dQ`MR_P7h;Ke zZ!>c=s$XsY=lQd2=Q6XFf3Q}F`F8fHxwn#R*GVl)u3gu%HfL|F`@1^df#1b1*T7ol zk4)a+=SiKZyJH8R$@8Z_*1#}Mp3VFZF!K1LivXz=5CiD(OMBA@Idfe%KiJL0%8ex9 z>`280JD>2O)+MigP%b@1ZS6$2yQbnW!_o%NyHf+d-JFr-NgmQBf7Ajb6Tlt}h;y){ zM?`BkwYOZwU+aDQ(@$F&I{PaABHosq3ctEmSj#p4{l~LV#85uD9X9X}JNXw|Eo*g| zS2T&E#IFB*4(p)smv6jBq=hfj{eN5qm~#wZwDt zfLYetfjt_3B(*-DVL~;PJ?m(6iT-O_D|R675ZAG%{eE@kjkFO#zu&E6-$G@Eb8HH@ z!Yb$7TSv(Q-sIfR#?$F=2!6%6Z&O8Kaq#I9?^_8u;|ag|-|jWKYYMFPisAPD&-~E3 zmzbWtZoDKhf0G7sy6C%=21iO66e}S*)rCbYfxQfpnBjPjt6~dfG zra4q`_`cGk-}mliP3%b=j%`-hY#SVu`zrSIc7Bx~l~_Dg7){|@lkeDydB?>XrrhnE zvo7&BY6rGnKL2-~SJ6!shu!gP!FVKAulF>ojP2(;mCIu*z~t|GjEG`wx%Yhj$tRNM zIjI$je_znT`K!wJPZSpw2G??8rmg7Ov^EJa(*Du4&U`jL5AK=IE(_j#lxlG}4uJIBIDf43@Kzja`0Dn4G$Y3w3M)HjA-oW~}4 z)h+H~&vR|L;6?7#0*~i?%qm?Bb@r{9v}MdnuZl_}tyG?}L6Tc)_;L1{#$aI{8(|l) zf#h8HpDI3Iqcv5}HyD$oXSRlw`*sBzF_YXxBb{hZQoV<1fM^3bTpiE3$jgZg3UgTPLfw6LPo4aR}prKoU~r=*tzom z+)#W3=Tz>wo=g^X<QGgllu;)4R)9`yXrps zg9@s)_lvj2%u4peBDXlYXXPst1)r$>aCY-ZcXHNXG#)>mjv*M5*k21VcilUH_kTUj z$d*;f&sBMILQP4FG$YgciE!aLuUY)uTQ?Frsa?-2_w9;>s2B>nTI9R!u*jXSm$y9+ z4S#>%C>%s;XyRLo7=hedUblO-5wQDs8}3fFy|xq7Pm`4ujkPfD+nZ!R46~F&^@x=2 zi{UrZ>f3@?3f!NTv@Hs|Y%N!toqBpu8IIQRCw7)j1zH!Qy>1Rq>o0e^Y^4X(f;-Li zwzeFAzlC(?4{Poa`;zW9wm*rSVf9$;k2LVj)@Wy*Mlm)9j_pE6&*a6~ZsS+#Jf?0- z`Bi~Zj4ztAFSVv-AAjHu)Me`r(_ygxegBttJ`W!c^(kUpv0W+mTHy~@98|rRr9KZL zB@LIyOH1M3E;@o}YigI55|0)fsRdpwcqPZ&zw15C+g)RyvksT+J`XzqYnLcL47eoU}zewU~}4=NhG zxxWjZuF$>qzns(6 z^I2*j!-XUJ(vq{rr|5jHw}(zAjrC&-e_7P2R4j7M z16b#VQLbB26D;yb&T$!g$U2vdKo2p0>^Iq0MZSbDRQvAaZ+P8?`1YdqYI-z3J!?oE zBA9A>y~_lTj@5pL_rQ5oF{@=~^S5?NP1UzIaVQw%%|1XZ(1|Yf5xy*g~szxS{ z;j`useL2=e9nF?J?vm$Ey>kXG@FD%fE!~(iZl`hY-DNz%S}-WebOQF$lYjDmwBz18 z*Hj%^@C7q#JR16<+X<2>akLY+8)B2dAtUyf)I%-(b-HLQbDtc`rIsr81b_H{#>Zd8 zX@aZA)+lcf7MH+^iS(6&4J@#x)Im&r(&sJ=finNvyZ zTz&K59=dw67f*oupPYSlN8mVoxtAfyM{pkyNT($((tHnvBDZcb1x`w%Tv z@I_@y3Y=Qiq-F1w=f12@T=B^=2QKQWMI0nHMe^a^Q1-2QUQq`td_nO(9Vo;n_wy6V&39q)WjLUIXEMrpITr_Di#5MTdtWTr0-p7 z*xm+#;yO~7`a?`E)pgu$0$8}A=+lPRz}*my43dq>eS&{rbaNyM*R@AquU5YD9Pxm8peL=yc%1Pakn!-D>~_?^J6+R` ztS=XCMGpd(8CoX@jZuGp>%~q;KcbbhbY9#1w0aJD41Utoy$!g#?NvJG+Nn-1MC_cj zn4b53+CMX{&DXZf#7^fg9X;E2*8URTV;Y+K78%~Ozj24H3v}$Y z=WL}*uJF+$_XO8}_{J86Gr@oMw6Y?Vb9la-KXPZk)(+>V z>-Q)ziWlwS@Z=Wx$F!YeqKi7;r{~9U+Zcg8$nSpQ5POlk!4qL^;4OwVl1(iNTZZD8 zw3F8KZLN#1mgjqhewO@@;FDZ%rg9Ql18efaL~zMbG?bKivSA8;!S}$Uf7Sx#*vpq* zU3(Y|eQe)?>g*Bc!fuW_E zJ_JV&i*+k(sI23!H9o1FYC?mOq0YCe%gI{df5ENr=U2SJR&iOu7l7XxMS^RU>s;i? za(0JuKIf#P9yvVmR4sie)@&b|d60wcI%YCP1PtUNpX2}L`fS{=p|ZpCI~r9$Cm|Jo znOXymh~!yT#~FdW?zK$Uqa~yd4#eErw6s+17p)dDUaChyw~Jy*tIhGQPCo8+|MWKD zj9}--^MqWFHC^Lwec_P$y~9-ZI^<5h7*3z$*jCce;UB^)ZEI~K29uIwF6+Nv7^^3{ zdW53GnK$y5dH-B<46)HPaKI~iqwauzfhOL|n~1CsZzU1IHchLqt;(~{Sr4_AHrCqc z=@C25sXZ0AhQwyr`nx*7_A2%%Ij#)2 zN!2glvrRO2PPMh>PhW^V;LFBV3xCvFyZ8*LX9%&nOFfHTIZdx>Zd2P9e3B481pW&U58>kq7-y_uenS&uU z7|At9dy)4O9DmdrS}pwywLQsySL~z0etZ!N<&p5sx)kQS{yO<0?d=f16YsL#hqtM& z?Wcn}doMw&A8edD3jEB6!z|%>2H-7FX`UU%5*76V_6ELcO$A-#j>p|8KsA zSb>^!u|Fl2h;6l>&K^pg9&7%l($s4D z;zQ+!#17s=U&BQx4&{o1fjguv-&o69@n8`LfHR%5$$qz`9TqdE!4vt(MN&Kg?24Za z;_YKlCx|QS$v^5FUd#M?O766TRDy|@zV(GrsI4JYsonK(T&`yb$4Kg}BCD)v*h!YjJ zj5VV-LGV8Of;@Mqa6B4Qgr7#u;^Kz%3~U6a)=u*?)2{D&p)2{mgAFiVSfjGO^n9oJ zfkEwm>3a`BdeTt@VxQ&?_XXR_>sQnhh{3CS>iwhX*o+Q;A4Wc@_{sjIrA5Ly3((y} zb?^XoGG@k~caZ`cu|!JV_7Z$~)R$IN=%v2ovev#a6`VRbwY|U}M0JlnA?q6VhzCLy z(8e8ue5pRnIQEx-VdJ_=X{NX>B=hPQP*0bT4@SQfqm__s3XE z9mz(4H~5NwjmD(7-%<-wJT0-OSASjFUGDd-Ph$?o!g&#_`f5wBSRD1W^1R63zT=A` zzv65b@!6j_w05#e%vCrV*bQ~!%h(kWW-0Dhf{`Kf{TMq5??qMo0pRhC)7uOtSz?tIHuY59E~y;jGqzoJYFAt zaL+3pLu|I#tiud$#yxh0Gr(l4WJ-2wjqGtefKw5IG+lf6`jUSrB94>>z3&C8UQo{q2YgcLcTE1f2120?!1V=uHF3|u)H0o{S5tNHI zx#oVvk1d=up6-|`oa5}(OP+7tJ@3tK0*0orFOPX6vM)+ITEun=Un}|Gb+~-6&fR#Mn}#R-7|KS@H?#&SoaOo)xHv_BED8QCc_18u;&*D+x*`058}AgZpzuRV zpM2tcQ~zEuR@yjQPX?Mv@LVOnrpBZAI<|KI@N5p_UTU}=&!sHZqYP_Tk(@dbNwJkjW6sgJ~5%v9TOWq(=OgyylU`mw3vBlG1F4P$%U^VdkcGi zX!wnQ^h!9ngYF6Sp*4;!M>8=;H5Puk9v#Tx3%)z?7`46Tm>vLZeFRsWv;8FJS)S_} zJio*D-_Y23FfN%s%yRZh0~8&s?cA{2eh8OU?2p8Sr&$Zvq3Vq)54AgC?z8{EI;Xgu zz#D?}w^e;=@Em9l9&`X$7f@w3iLp_CcHnRqE$XpY7*$`<6pnR`dpOfS&Z&x#l%Hfx zQ?>3A4;A@a$@lw_zZbSZ;iR1LFfR9(_+GdmVWWu0F7)^Owh09f{My&0?752R%6#M- zH&Z#jaLSsn5J(Lw_!ur7XM0bQ$%5ihv>&?1nIzkY`nCXZ{9lW=wu%ggEvOb>2w*J4{(jGmTSB%CMRkX zXRp5|(r2&qm;jlM@4dbYzs7M!LGo26%=6&gVQUg+K%H7_3mYmvw1OiEK%DlpCP78? z)r&meS~C(jiP}zhY!f%?CUDy%Ilc+cWLzgUWnWWF#&^$EUmJX5V`5W(ucP11qD2T0 zp!z8L{DUGg)z*UdcZ8d(cz-HJ=S<6)o{p=I z-gui1qxDGIOX()4RwIM)pm&i#e73R1@MoXBT=-geW(@Nf5 z<|e+vXo&NyO%7MA&64{Kt8$;IfAB}X8a?(#KF_|Mni;1`DQRYYHrH?$-9&SaC+aBF zRBtxDs7)lqzQylFzl2rv=Uut`Wxgb~UTp8#H>n-c-k4Nl`_vxPU1H=`vxAL;J4K(Q z(XMW)$MAB0@?S=E{@(Gc+it7t1HQ}O{Vn|}87;FGy@CGO@+;`L#ch|ImT3$c1kEQ8 z1=`65j!%*$+-uw36R)gZ;O|?2ian%AT=pUP5U~>&!nsX-PM5eK;2y30wN6~n8xHU8 zD8v4n_e$P>qaCD^yl)W|$@@>RUGn}{d!lTDYed(77`DJ8_^d0&#eD0c8383q8U0M&)`B^)E(anxRJi`a@Q2j)(pKjleyt9rw6JAi{s9!lOan{UP5OH58=K{aU7(THpKp5Ch3)INOum{7T!!2S4d2M3NsMQ)~$o(Wy zYe(_qBX3OYwckO7lb&|?qQs*u;c1>QlDdxaFSv{@*tpY}Y->~M1#N7r**Zl_I1XTn zUPylr&AiUXfEb@#pLid;%izQ%eLhYC$j5{eA59a(rz8i*0-S?f#Oh-|o9-}bc=V-z zXb7eZz@y1YoX(m7+S#5!*6Nw`0xUw!f| zM$+>sdzT1R4B#Y5ci^(nuyn{C4hM#AqwQIpqBF2{Ziku(rUg9lV zCu(5ASxJ!!8VkQpTR1fo`|@@lcG3&ImfPOz0-m+hz$|~!RsebvEE{_&xXH@r94I_8 z6fT*C-^4Fa`<{a@gKwOv|2CNt0wEjB)-ayU0Ir3ee$hOa!V4eHea(M*o}r@!)-xk^ z1_J{iXBNYg)boS|j4#5j{J8Jr+C?0@x%I-yniAit8kz72q{f8?M~RgeJ@!7G1$lJx zh*KZoLuh}eudo)t;(^C*X_AXX?qxy2N|U_sNY2x+gcqeH!^*=+uxW%V4JrYCcYH_h zBegCEO^OiT&u1h(IIdtSa5dLfTlGlfp5OpQK)SzHlDnWxPh9g=9ca_NQOPw?L4i|N z^76_aboz3nAE1oOmA$xKFLm;(Pc8YTh3{2-tm*!Nz#9dBD0=3>Kjp_(h~H^V@5nUv zJO`V;9`oUG_px^a98HkMI8-pR4@n&)zB(=ajtt(!RrAy>hfKnuUYpn&o@t&rwHePdYvij?h0{k8{cb z4ZVCq%HGsyw&_Fonrb!dtl*D-e?6^>zovkr>|-xye>mln2A^K52y>VF47PoX3y8s} z?VYXbJ>Oih!qU$1?-t{wzx6^?zXvtxer_^Rj%z2Ti~vilqj7@N_4U-Zj3!FT==0=E z=FXb~8u5w4sk5ADgwyOEXIRz+%KzR#L2+=JV>G_7(YB52famH@O>9m`-)~ObMxP$| z%N=9@e~iEP>T+)zmuH7- zqF-m8Ue^a-tk$-bfM?1&6*Ua$t(Ub$KjOOH*!n_$gV({Sn|XJgydiaC?6X^3KRJ+U zMn8cM_|YMTUv%j)Ir2V&e}i&O`XGIRq8ha;Nv#z{@Y5x(uX*4T zu%=ArE0arrF?aa}_*{>$1rTRrZ-`CE_59&r-K5efcr_Zd+x!bZBL}dd*&Y=f_OVIq zsC(4bZfgqG5`*)qz1+g;i9UqV?IB_+_CzTB4Qx^whu{-rf3Ob-uXxPTkMH_D4OIH^ zf0KUO9r+O$B5@scea#Ij4z;T)W+@iNr5&(vUFgtMicM>{P+XMU>A9hftQ{PGb9 z_SnfuD52)Sud$eOS$FeA&<7jw%xEL~p4n)W)35uR{`beGzV`u1#h;~xUHOrBNG+-5 z7MzuP(cECY@n0%#y{^6Zut|AVd7WpvCt(jBPy3CU)nBiXYpQ-okAqxeuiN07|K=L? z{TK<5#5Q8;v*4%fm+e{)Ab;IBFRYtTbbqg#oWbvC{xa6ser@5)tfvC{)(>ikBHPv5m z&0*mUSl?Wz0^f!wimt(W9=PSbhwCPB;8xYtBwwNjw8SYJv$Noq+DnJN;`-^hg-bv2 zYsrRwo|coWDjk3R?tc)=*olYAYwbCRv<5a)Z;zg18x2(d4%=3&1In1AZZz6L-Xf1# zJyj)+#kk$ppf^nYOw*da;x+&E%yy`C4> zT7CJ@`s@9*?MLZ~*( z9?5S`)AO-GEG*~df&FKDa&DxEUFcJ&bE0Zo7qRc|l{W~trRb|IcrMDe?eGI~hUk$} zI7eaUdA!WQBqnxLyvDKyTWV)1PRfKc$6ns%X+>yO!RCv^UmNrZ_e_A zIY6+AJ+^_qLT*h?K7Hlpf`!x?TRscBORXfMPmh@>zvBw@>99_Pju0GEaO@xZrK}IZ z-7n9Lsv-Wi17%DrxDpyo-{fYcld&)p+VXlZjM3Pv_} z3Z5tNw|`i{zq8FWL9?gnSKo=V$x|uycv_fu9qIwqW5e%Kn|9z?ptHRqHcQYXYMF@9 zqtfDQQBC|i6Bqsmf1KT3_iUk2Ag_Z+Bh7B-Sa1+(3)ZGV=Y=hTQic8JzJ6(4_vthbz8v>r2k1`P%A)BrSQ{YiAR`Dq3jo+)M z`J{b^{$sd*gD_}!Q;RvaU*P$1OpGa?8!z3w(@x>depvdPB|^Aj3b zUEg$uqriIz!^33hH*Fd+=s$Yv*G^Ku2l*jy^-h_CFBa@`7@TXqIcn0IH1EMXXtgd; zmwu>;Ux%BaM<4HZaMc9o5`P}h^Bk!gnuPF+S3auRi-nz>sOv3wf4_AEB~Dg;L|yNX z+C`~HDQlb2^Um0qZ9wmO{SR(iY}Go{9k z+{a-{L+4L8&U2$he!XL0u9n(^$)V9TE)iV6Bru8FJN0JIGU_e3bbsWB?5#8dZ}^yn z_gO>_Z@RJfpB`G01TpN#P4%6a0!*R^Gfj03#dU5x*_T(Xg&W5At(~ayKEZ*$b^S^` zL=_KQYoEk!2ebMtsf3baPX-E2i_NO-Zc4uANnf{i+B!n@^^PpoqxY5wfCp+mo zVP%{jT;IfbY-Tm&uWV=7)_#J&_tb_Xc0*>OEVg z$+ept+UJ^n**v!<2X~3jj~NbX>+2+xdrn=28+p8!7^vVn?Z3L*L-3(-Vh1BJ{UMm&5uXCm*L>}tMRTI_Mvd00?ps_#c7P{;P0W)_ z+ZQwez(>%12KQrlZ339~9*gAZ4X+WuSo%YWNea8gwSLh*feoecW&RezqvM{X2$7@r zA~^A|5>_8P(>lJInv}5jVZHB zsd68#@r?9075u#-@179<{51!Etv)DB9)zN;>wkm+_YvQrrPM%I50Bgf zxccA*a^JN5o8xz=xm=ugLjBdFo?)}ROp*-1Cvx{?G=#sEdKObg!Pn7+kr?mX zkSVbEixvI?oQaaYsz(HWzt$3dT~y%z-|K9mV0^ga{* zXign|CE6=)2lhF&HOi*}oeyXn(~p5Kv0&@%>3wXY#}GSxMmt%5^?i-+G8>slwQBnl zRTqx=fOFRUkaGqvk=}9X1EAhg;)=atbXVH%vcFq#^Mx;=@+jhF=@~Lh8zXp(dR+4x z;Ah9SvCf_#iR#k`*AEk}Qwo z>4AntSmY2)Y{nhF;CC9;Np9dleDY5_sWug9t3ShAwSek#>OW*_h+ot zV@%)gcq4rUDAmxqW^9-{?9Y4B3t-UALoJ-VD6{+_ZYh!2n~tSwv!Z?0q>d(gy6pXy zJWGA|B0oN%KaEbq^>Kwq!A&XFuh5|SWn(Kb+#Fh4H)ztxS_e^uJp|+}+64PIY9#1& z(?}E6f3LBBp*?j(cN;x0b9K2hhHwJVPNF6=W3nu3=17lMuR8GC9lZu-HQ!Hjn%Bv0 zJGFghdQQX+F2RVDy{cAS>=nFE9UNb+=Reph#!L37{jzpwd@FzSjr|I4h+QW=V9k#s zI?%bdQl~27fm2lMsUQ1I8}gXc^S7JiQeRJ^8<$IU@MB{q!tMq^e*a>`UVQUM2ySJI+kN$5Ez>&fKb+KvoFwF7G zt{5VJ_HW*BSC0_oOIsS|_igHR4A%kc=Km1nu0A(q9Y&&K#qa})ubhnnHz_$nw|;J} zsmS>YF51t$|H?a|{vh|_szWKf2A>V%n=+|=gJ85Iy(;D;H=V*7+t1CX2e`pUI0ri)gXb-mm*q2jj4=p9uugKBV+DP06 zwTk!StjXW6IxDW!y~g-78r@92wyOF2im!)YBA%83-kDc`?V(jD*U);Q&of8c= z*R!LK^IB_ItI=zhm;u@={8QkN;b zPG*V}=LS6Tc5Zws`wtd&slAbUuV#>os2WOn&&t?U)lmfVP4_i3t{qKK$Ox-R}J^Ypwd&;|;S@sd1|`jNTd=WIIE0QTD9Z z5ja$J;l~#FIoJD|%k?$|9~XX1p$k{~xLdwb+J6abg33t>K8>r+IUKa*upxwpcA=V`io|R{vmpJ=!J&Tulhb>`uFfp zS%349_*TZ{sJd{J_rKr;s8K(+ddN?IFR?1IU1|4z^PmO4DB{5H*r@DJmGgG3hs;E; zU)6mUUvsTDdZ(9($vxwd_)87S@VN=EIHP}rX-=DkPfX#`KXR-kMQ)?b>>V8;I?C?X z`j3HB71!0~V{BqKNKo*I(B!c7(ChU5{eQ7*)iz)@x)h?zdZ}obs)0;;-xD4H{d{xk z;{Q7zgJ2uH9-DHuGO<5`=eYA)%LpuTO>o9wP7TO2CdeDf-qBNr|Duzm)7vN*OG~do zw*61VtF{D2$H}R)6&H4Fr3a4NCoZH4QKV3Nv^tmc+cVa_kros;r8%r}*H-G2J z#w^Q0J_gVIb+Y{QfWQg-rqIw7y$J49RJOb!n};>NrFFL@qZkM3#B z0SbP4Mov=xO#b=b@jspz{4bmo{Z#z-0{6GLR`@yLbH==F=|RNKWliGa;=Tm#l^3$= z8O#+>`+kG&gF`zigI$FW!5%JZlz$Slm;D(-=iCM>-kLL-6VC;HXoYnxtqe$pkeZbzEkj8cRmak04T?alO`es zS=CR+MPNm(UZhot!|A2B>eO|g;JoSfsM!i4Z^>}VDz36a@8m?ql{xmTeltXbuJ^@w%P)?_Qnj8Bp9n^4(pj zK|vpO!+<9#THfHj@aE)wZoOgiyI6nw+|KVu9AK}9IPQ@>0PkgQ#O81^yA{LV&rCw{+A%th>tpQuScs1w;pL7bSUoCklc0mNZ| zZohMh^*-ACpV@*wm5*$&5uH%ZY2Z%i>+3DH=yJ;WawK&oJAaY;EK!-vqs&@hFee@y zjhOPD=?O3mv|X8}HSz(={n7lj>!S(d!xiL=@i<&)`o{zM z0a7VHTF4QbulPwFmnpvU+g^^SCk(ZL$)u^PeG*)h{)Ub8H&hd?t?9%U%py{Kftun? zF;(ReO-CJ-eW;%aVEA<3!ZuM!b z9cxi5XzW|*eWk~By*}hUzDHNSL-i>7(U#%@Y4(MFHs)Z>ni~CWg5#7rDrKxz>}`z^ zJ#%n*Vy8K^Wc0xk_6eHcX#T(krO%i$ZS|G1m}tlfXJjU+vBcqWQ>YUyXxbI|S>e}7 z-$@Z8U*qg+Om*dJ5-WdP^V%Q1SHh`S|EiS;kL1b$H)?5e8mBqBx!%M&eUSAuD-oLK z)0%%7pBm-kC-9Ly_8>zMAz(<%t^nzsFC3BdW zZr+RLA8y*rj1Btj^bw+WW1#bR7c%-t{K%|!%v$IC+0g9wFw1}LI#QD=cmQ-f(1+l| zn=Sc;<@Lh1yIrI3?9iJe20ZKs)fZ+lV&@3@#XphD+0HsbBU^fE$-y8U&?i!3InhNB z?H$qoMO9&!y;o?v1fv6guU3!pxo6%D6}{7W@OcX|1X3zlxqbx9<1Lb-b72FmnRo zlqMa~xb6Zh+xqGeEY#}7tr~jt4BfgLAtc{KbAz~BaHwX@cqRYV5I`&J!xh_TVAD&R zP;hw*tOENWzDO%{FIDW>R<;%X2_K|-3X=8)UCPkV7T|xg5?`b3ho+f|ePa)t6m9X< zJ$&D$!VXQ%p1hnm^IwllFhY7%L!%;1n0tbD#Mz~uhyNlV{*r={&=Vr*Jykuf_!7Pw z$*PWC4;lB!e1dmx9MnD-SF7}^dpZ+@g_u7UZd8FG+D&{K8WTr2(+&NQG6&)7LG}tN zbr)vuFx!9bvw?sGU_}vQ2`()A<)^P<-sCkt5v=@2AId|Mb=bGDwTLMTMQ|_(IVvVZZ;Hv%}-hy6Ara$^0SFlNCF=Y@(f*iCzKsQvFST zBYx9qjw9!D^dW!Sz-^R9xy3cD>G6>Ik9Q~cr1<^}oQ8iaTwj#O&>9xI z_&0ygk)93_(tKp@9lEky-{7fU{fcU{?%lJx&hXVaT=FJ43S=(g$l9V?{Tyr|+`w(7 zzCw$2&skT#;L7lxhxBdUq`!wfoV4uCp{)f^JeT-J2d_`qVhg6P`M4~V*o)theZm=9 z$7azVb<-!>{`h&#j3T%>`>SqIa+m{Z}^_z4%rOjsQ5Ig@E68SO(GLjgZLSDTw^k|XO|sx4=n*V zmrHaHMFH)Xk#rA50$`Vy*mMsbe{&7DvnTkdUf`zx;=U_>;WZ~wTsX8j3+;uXhwK_l zT>0x)euM$`O&$$rA@E+UC^6|cL|b~>=>!qIgPv(*2SyA>W5ZnZ4AHm%UlqMTv~z=y z9l~6WUu&k-vdXXLFVvaC4p|fC=E6aw=AQ1K8FNJMq}Oq>!=7t2j7#1ce~-XPC1*w> z@sH)7su;mPdCZ^ZJ~x0z=qT zR@9}&(!=RDndK-wG+VdFe;i6?4wC=P+tW3Fqa*nHZ)IQ7oyYZcn4AHdNOwz&=~u5e zy*>@=SG>e^6&f$!`D=NNvLCLwtlT3N$CP>7H8(BJi77bNRZmOZGl|Wpxq#Ib8Vlci zthy%+f%Ms1+g6k;$z412Bh!3!B!{A(B1xbSMfL7F@ZkxOFXjF4f5PmN6n!vkbvHbZ zw{VfI!Q*p3eHvWyTL122|6!aI-t3!Uwi{;XBaS|1<|gkmcY~h0hVEVfz_a~(H%wY+ zKkb;2sV8Sqvr5r3K7KGiIhrI58sns|jhR}s=o)n(VrDG#gJ{+Y9<&j$D7rBonmuR+ z=AG$AVn%fT8KN#+e<$!iu3^+d77HC-W)K92Ej3>_9*fb4+`W@FXV|D|YX}cU_M-_S zCuwEEUsqZS!<)A3k9&6GeU&|OH$Gg6sbJd*{j;zj|Z`0>yhHh+TJ%A;v;{i6JZMa~7@~DbN@ZUVnYm<5T9yzlf zukRuF%&Gd^Fz!TboXHcE+)I1@%l3U={|_5jVo%o@#Mk?$G-uH%Eo)YP>6^W-OHtqW z?rr{UKXPn=f7JEiY6y4mq%|tu>G%SEy$aT%-C68MG#ase4T~AI@0@LZ_TN!vc!d@=yubngAOROiJLWYUC&-KaGV}q zhmv0Lc$ugm}7XA$pPiQW(^Q|N~j8hhXM)j#epSoy0hcIEfFT=%-17o{Clt}pmT(tpnv zgTZhy(BGN^Bf(lqj<=bI&St*A2N~v^Vw>kv14VZzJ=R;P)cot4n3+Q6W3wBiwRA|g ztUhb@f3?xEr(e*&9!efYo|cO`JH!?}+mJulM`pe0X`B2!*;7x0o8pOHR|^g5z8~z_ zXGQ+5;-n?R)pD7Zt?Fo^8Let&syADm`>Td+xjzN3NkwCjS@1=@nVQGrmtJc=_i9b& zJ*TIih(z+k-pQTyPBr|2+oPaE*Qe;kGY1Nve`F^13*6L35M?#;U3$@*VDSFzl z|A}ZQD-9S~8{H3Kj)l~?2Tx$`?PoK8s^ni!KugS=Li3n-U{H;jf#hc(AQl(^dE2J3 zf7LIwRlC&$n}JtCo$}A`S~tt*NHC{WtFL+YQQKzZ;pnM=3n?DPhB!!O2(0PHP`Hxp zwJ81b-U?6PyVq6X^`?q@>7g$CJKwi@6m}W|+KQU|b3#rXu3fNb@}*GXX%|ghUbF7w z1L#v=c9G_fFX(@YCI;~en8Q=_;s$&8f0{Y*yyRQHW`0#>+SlF~f8TH6VW|6k%^&{d z{T3hnol{Az2yIYzjovz%THR;r1IEg4_N&KkB}a_&*Z#_L>Cb6^TRbc&=Bn?TnmPTB2<~znGwWk`u6+!1wa3ddnk|N@XpzU=;xXni z2K9c6ngtcRMQg0lyd@$U<@fzidKyzn>J-9yje(1tiGGv#ikDquWTv?N^FEhOdk-6b z>yWgw?xq={`*_^0SpRqi)Ol5T3_4=I?E6ISzltbIj7;WiU4$!6 z{Zr)%%=KDx@fhuYv8RyZ)bbg<4Z_huCWQIdma$x+y`c}j(DP5)6?KkRsh;OhxITch6-eT>j- zRF~vgK4nY3clAZ^YKZ~bL`O~Io_h5XEz z=`Xr*syyNA6l*Ga2&GM9K84gO{{8iPY5HHmHg!6$2i8kptQ+(_z+;$cqxB}vRpHs@TEkOdb_dh_aX-;#vJdYgTxejTlO;VQEkCH47NM; z{@dKh6IpJ7^?#u&r)IFIGrFY5P2EHEIN*(fL7|yOSzB;F_W7si>&QNzt3Htbb3gol zxtCKkEky?|(p%&+dd~gi9x@wRW~3&S!Tnx88PCqxir;1#_rboS4+GpyKu>q$0{s_0 zu=1Qet`BBXt*qCLGx(X{b^N`~+`H~lPwv`l?gQK$Lw}W$WJXb(-c|Dzfc3HMj49vx zSp;_!R^|2HNVHknf(PaqByff=OuS*P5^K0~nc=T^i`F~1uA^|>k*048e^bWrMGxlB z+6OpOIrBk?ol5KM##a*mdhL1mnQvWnrc_*}W(5`dx!_O>zWF_m^{TdZAb)LfQxiw@*x}MF$VWB*p)Oc4c-54v<+|r9H)N2faecm(u^IpF`G75Szuuu` z#JY{a+xw}%)IRn@90{}8j`(pAl|C}x+lQ}&%uf7y&)<9}4;$+oyj08ZO>nl+l%yWC z{rMUP6^QaRfc8h4bHe-UYb-tMRD1pD*OLP`4SyZ1`&Tqaoc4Tu+^O||ukHEtUl1nhtzf`h`Ev9-ips!rtY~SMb|a z{7cVz75%t5Atsq$(oQGmyw3)m?BA#_F_YC;JvO!MS<0-0?^Ju3+6T?A{r0kVsE?qP ztAF;W`3{DRU_C1Tu34ujVWBGpHs=UmW_0>k8DX7tlzNO74_*&%bK(C`3(#Li9~Jk~ zkdYpoYO#~elh{dM#)f7|jAzmRr8JZ!780E}Rkthauh$y*RX5A}?qL#6lg0VQ1_oDhp?5yquyo2d344sZfgT%{f+nJrE*SeIP%yJzt2m$;O3&^7I(F0pBeRD%S~h!V{G@)aZ8Pp zIg&Zm_`qRc9r#?mouUQ!)qr1^4A8AyGHPgDoY$`k{aSa_e?+!ew2zoOjz+PZ*?;dk z#}8g7d!*3UD|2kc4ZK5Rq3Gkf#2GU}ex7rw=d^BmSDbq@mwB>^UsJ?%($gil74qHs zzRvZ^f~9McA$Ib!LT`N`*tN^p9wD@V@8)~FpHLk9$4s> z=GFq8NzsXpFSW`benevoJjfsS_kXP6zTMQnawMQtUo4+RjS2}S_RhSINfB(CvxlBD zMXRxIJq@H6#%Ifi;JWp(i=#~9%mpnM)26$f!9#$@h3y>qjfWL`a)$l{YentHYCdm5#;?I> z|CeilSN%}+Ppz{7 zs9N{PeUjn0$;>%)Gr}gkd}czjvlI?AXub%pKy%kbvs6Ezs#D1z_Ts)pwwli;{$e=a zGFSY6xL5y1j01lBU*01)+5a5djQ`_%BUPOXb%O3vd&@Mc6he%F=b-%HmD|BNyVY66 z){XUo&r#gNYrH5n^MA@AUBf|OUPU|UGQ%b@s}uCL5_0Rkj8>u zw;IvUOgOSetKP_0&kHm=(Y?^8OE@yq`^u>S&*?pCnM?GbHPOv}`6R9tUL?6mFX#?G zE*_dQ*x23^AqTm$^(0s_8ZYhbblty)x#bJ~jhzCE`^Y;7)gLb-cB<5JxLpTBEv|7d zJsyH-$U58n*DCG%8bPhwy_c+m4=4dSm(_z0MF9zy8-x#-4n6PWyWx`zT2{XET6ImA z!GsSg6@FfiHubMMv+`t~Cg-}>x|@=us(Y4&4=sP(JN8M-4z~%~$CKh-yy~4*y{+rL zS%3f>c^*Dm{F2p~ZKi1AvYDk1>2##N(}gUMsQON!XuG={6as)F{Av;J?p)fJlFBtAEoW=GKKYz`?@DyQgyRy>?m<{ zQL8EU&#mTjtJ~ks*_k-yfAYh7(E`u?UG;x+`V^CQ6HCyL_T2rd*BocvZn$Wa6F1`n zlHF*k zA+7F$jlr(m*TAQ(36)m(UDOzaYa91_UGxlJu?Nk@0FKsa7M_B+;0H1rg?gNpzO+dq z@lPVpJAV8?i@D!Ge}{%09$2H>T4{eXybVmbZJF8A#1MZkT%^Qo8~R%(8K~D6JW%rA zq{x4VzxSog{Jh4}g+}sqULNy^%bd?F?S0A9xp8O&%TT#r8M{j!spiUJ8*h7XV>bhJ zoudEwQZvkk`yu@d)JxHvkGJ<4IKAEv{ZpK(nQqe8kk9u6*P9w1{74PnF7kgYg9t7L z!epmy)h#dJ3itj-}sL`T(MIKisoe8N;u=*>DXH3%`nOZgy*2 zBlBESnQyG_ZI?b`nL|gYTHsz$nWtooR8;5K2FogNErqv~`Y&-6en4>+Z+rxqDI=n;kU5BI(x3U@Cf3J^?qu7F7coPwBZv8L z^b%vs-AB051M~hw?{EKYuhr-d;yM!#oP5xSR@OsI9Gy7cn;9V529AN|5EqVK5^`m< zK^R_xeVe}-;KB2GkdGMFJqgk@S}~JIXx0<4huWHYr0|Y64bjw)8G(N?TLU^we@Hxr zKVxp7t@B*g>6PaaC;I7bm>w)=vzXBXY-mf+!)yNMOa2F0misC^m(up#G%3}-Ebxe* zy3cZ7|K&Xtys^+XQ@soYcU)e(R^0b5>F*p6eO0(UO?|S3+c8deF*Cd1R&2W3a=y)-du{u$ z(86}?*XbW4mST3HG12K?(BK}t6DtUI_}XSx4t?h#Hr)FRC z8#N=Cf4BR3(<$mj{|dB z+0#4HipFR>`BrBrHJ-rF)`w=#e0W{xcVp(yMMJO+YIBGj!KW0g5e89X$$qbXGLwf` zUS?k~n+vw~12KQUJzG48K9KbLOYbn8ks$a$yyeA7m<^AnW-5uRIn(;DLm2kJ9TPgP$Uh(d{y22W*cvM#075F zzwCtb)i4{juqxutc&Wgyv;FFoTh&$uS|FTV*AQmRz2v?7+g1h z^_#t&n)~~A8fUl1{z>2LZyvMXZT<@)anDRf?!%$WIwCL}c6MkR660iPR~U9_}C3({{%gA7I89eiEs zM~bDX2T)EIc`$9{AZuUG$;i+IE2MU)z3YC&lmCWA)|pxExQJ z39-=MTie$tjV3;G-ig(DA$m)_Ii;oYJO!?s_S zZ)!by!xZ3ozizuQu=DxqSnWwMxob&z4S3LsJJKvT0M1SM~{0^SXU&&RH8oe0I z={iLt^*a9uujSZ*4L-KF@S(61*cQ=!L>oa1=s$yEcF!-X-L?> z$)@zE@H)*S&VOOvhwxrS(}sHxp(~Ev!Zunyx}5S~eh);JT3ImzW0l^_7w`rA#D^AKDi7$pS3if9Ps$F}Pfu^Ho_Hye$t(qTxcL5F;I{V^&Ld~* z2QO*trGG+0pgr-&_co2M`aybS*Z!o^d~+(5bn|>iuNQrM)X~PS9Uht04-T?t$j&M5 zuF--}9;ldG@pGN!BL8wds3FR_QsMg1Ut!H%OHuulU>{ z6MsnmyiV%P|H1co0^|O>pL2UB---C`pS}}4(BJo`dQ^S=vbU@B|1qXecpB^shUj0v zpUfk?Y4sO6C)e+#bWDErkbTFQ61PgMKChcz@c4pycAH8rudtxAbn%`=fXp2c?+jS}hK`aiBU;#$@0_Ht%}(%c!tEravN z-^e3XKV3pB=mnkrNVsREoLzVdarXvyTz9tF`#)fXZ9Xz{joSY z=5axs1FvuaFW6#cnll@=q~6MV0ez}!c&GCzl$>3_BO&e$tkX6TDnM^~SY!|fE^zv|;BAPsw?!3_Tx z-mdVlEC#$0Gpl>ld9}3r3URPL#p1$ErT8Fow$ZNu%h||$X^G)aT+|2q-G6>wjos5! z_Ke7w}RiObC0l@&QzV} zyZYUn_`B#CVGf}Fu>z&+RT-D&rZpy;VgH!z78$NOPj%7lyRSdT8|(mQ9y|fP0kuaA zW)T~$6YaiVcSCJVmf$~4P=7Gv?5H|2vv9Swvpwp@sL8xH=}9i~)4JH@^eFy8^vOjd z@XCAp(VILsP=K`5`~-gfkU7w|S}lHUt-b^KRzg4fM{_kef)zE`GQuf@`$TRnv)Rn* zeN|grKD6QSX+a24)SGVk7;_Qn|J8}F{vUg9)~u|yt_l8?ioD7Uvws2*+jF8Kie6d; z1e==*5d{QAL`A5m|6b3)X3qJoZ>`;)yvh}O$ES0k!sy5QJ`Jh+M&&+wD}*OO^xIrG zaFLi=*TY;0EfY?q!2x*V1_f~5i)%v87shZ8aeSN)GHEvKqHm(VwmD8-&DXHLoOIAh z*jCeq;56Ab?=vF2RevSo$fT0}9rYl${!V1CMD_(2GEWDc-fRf}ZRic#@HavK#Mv|l zmc-aknjv>*v5!Sf1D6Rr6yjj$dcfjgFN;_v)?2t5Q~MFmPHBGHTv=}aeeOqFqP;>A zpG7t5SD2Z+kjq*AabK7GN^|6gaW z?_NyLJR$h#-!+m5&fs|l{cwR=A9j)l&II2afFsSi$RpZBTW`*^eo@qA)+vEc1jnDB zLo1A1M-|EsQh)3f)2dV~7g}OY*}#^`^cv!SSPn1?@yx z`G;WsTkX_0kMoI1Z}GP9RY$($8WN3uqVWbCRH*a_CInhZqYStzc<1ap5X-Z<(3^?) zEx{hia{%U{;EF!)6_`KhUG*?#xC_Z8 zJ+aAWc(-x)C5^lj^xqV+2`bvS9`YOvI58jn1*^y13Do?#4{p7QS}!rOKd7F2c7k_J z+OxzJyZ{*ZliWxYO-^n6=cS_qo)%_jthcZcg@63&Q*W~Mdd9r)#Z_x{pNKwK%W3$A z>-@f_df~i3acpva{Kj#8bIhTYhW_=6F?{jqu{S_$vO=IA8flYw=fDwTnC%{L3go?0HdZ!Pb`rgQk^d%1~=O$_w4 zfPdqHE}6_5iV}spIrd8gbIcv%^G2dcq6GVaFj$iQA&&#%#Z8hp^99PGE!uL@)Bdh% z+F0Om;^4c%u4&gJ&BTapZ2< z5{V)KK8mRjMw1ICtT>(I$DeqzZ}xQ^Tk~?kZ261!D@;Q9|$euuJcwga0-|H1^y*}ZYa0Y#S{@o^2zvletWH5+(!GHgW z`03$X2(x@Li6QptO5^V8zy_Y{=iHQa!yw>R!Q^U=3-oo(@dmY5RCmj~J(o7>B zl@YI1FF+f&0W+oR6kn#uc{eRT*-iScB0$ z=N>>YCf{HSC+-frBdH5<2jy~`|I2sW0K+6kE2`>-<+e>?{ z!oKrMAK=4-`^Lxl#9qI+_pf*?;TXJl9^juDE;LlJza3q%iSls(kmfvn)_)%$r_l5i zV_BHhG&-rL8uVJ9)X_zbpaHmNPFT15BxW6B4OePuZ%WVsi&WhbQxY??Rw?op2iRj0 ze)-(%ix2G0lu4&ca;)IjAznfB9_Li26wZ+DB>kJ@fj;rl=B-8SsINHkJ4S{) z4m==$*|v!W4%X{5k$fe7uzz-|i$T0TRZ(PC10VZQU|{&klrF%x*UXZx5Tk@zGnT9! ze2*#|8}R$u-nu%#a=+sE7c?V|RvaYl$^2`#{zNZs){k8MYwW!^-`{z&?K8lbhv>f% zO-SIRBYQk!(O|I$<{8P?6P%FD14xYd<*}S*@N}ru4E+#&81&w#6o0kdnF2INL~a8A z(KYZmmgtPcp;Lk7j%W13E0}n%{2H|)``tDUP0lGqSMp!TuYvzUv?gDD>(wUjYd)SD zPai7zpUbPkBaG^}4>Pys>2?zBq7ys{b6uKX-@xnO1mqiT);aBf&wJ$|3Ej$;Jorbb z$|Hw&_$Ob}p5YPW;(wM-=#VqNn6B=w3(2<%xFql?pv6Drc?n8k1Q+tcWsM``gH^<9 z={EmCa*AIX79ah!w;t#yVsa}?lsAGX6Ed;rYGxlIwyWG z$D@tPPCs?`Cpj-G6C6W+AXxp7rikIs-8=HWg%I;iWOGR#@qYs)LT(;h6s)tSai~bE z<#GkwIoY=$UrTg}{xs*c=15MggqHo@paG8~d3X3ZL!-^o=ESES(*L1~P}oZCfuNZLQaQ`o#Icf41d2eC96rn!P^v#CN^U#eWnVeHAyS7biBkdJ9Ag6x4MTf4 z08K!$zne6Qg7X`%)plVXss`dIO0z}1qq$_syQ{8xaue^yZ|}e|46fkrSny*5h%mWc z?B2BKl@Pt6uR>iL#q&J!tv(_^xZC|S(Q$0?YA-&`2jBKhN9ljNP6~W_Rtfgo&$S3K zk#@d&o(*5)ti@izM;NIov?KA}oYel2B|8$W7r9Xo5HvpDzi%!rmo4Gqo9hF0Ol z)Yt9Z{bJLzU+RD2sc#8AEX+V0;%M+|Chhz7q{msS^=y7U{~YrMkP?_b-gurLZBIDs zFI_tD*q`1rvb_d(3UO%U<(y-WXqS@yjJOBc$FzFSGIwB#iGw}-(??pyNrT{Z@VXqV zW1>j`42iRvU5Am5afNf5#IF{ZR|#y__AaVZo@Aqj-rs-VacH=yG4IJ#46TxAa^r;( zt|?pnI)tY9E6&j+{w;AuBputBf6I@Cr(8Q{RA3W)&ZL-69D;3p&ZjRuBE$#MDRnJo zpq;DSfyTKRnm#Rl-4+yX2mj@Kr8q0-rGUp2b#&-+;#?1{Fh>QB0I^NNx4e&(dy?+d z2ZD(63Vwg|ZT#cC9)Q1#HG+q3@DmIDJ3Kl1n|;Fi`_>7>M1wdX#*TIt;N!CusOzxP z<5n*Aki0sJt7HbexYpb0$#y?U(j4_a6<23SToHUQImbV_Fv(=q;^glFZXP1Q@ONzW zFz@z!46Qd37hzt8Yw!JG+Qt}gUs%J~wtuwoL@oniO*Gvg0!iYjHvD~V_YMV5 zJfh(S&gW$d7uq$k_pHGA zoS+V|f5zC5&!^5%0go(_5qml3DhxlSf!YbeOrZBch-MsgSgx~Vic29q7 za-M&3l!>ka$@K%X|B0`Wx_)|V5v*U@=I1f|3BCx>!VcEv&e4G{8#IrokZ3m&zklK* zej4|HvjtOjMnR&DvuT_w)e~YJ;J>l(Z$_@oHaquOl~DHs(UK6MmV&+wGEXTFr_g!i zxe;7IpfEu=KGjmCR>UKOcpyX*%u#REIOjy1-rjmHFU`?5XYke~g_rU_%zH2rui*>3 z`^ND+F}=TPMm~9(zsHB8&k$eTzk%%)vF~yb!vfB9?_BR7@E%fki}-OaX@fO{EwL`Z zPYZx%Lvpus;t$F%@&tcpcyMAu2;nF;wFmw;;_(EtXQryuE3hi;E28W92Mq88eSw^Fh#!CAhxeI40RrQ()fsOw zP=Cal|KQsE=(DZgGTQNlw+7Z6YvxzKZL#d1vo+z1KIilnkNtxy3fS`=I2(iaL@}`G zZ&-9ola)RnZ60%%n_*sq#y;@z%_1|2j{5)}5}3=xNY7?sW|3S!`o5&mj$uK+!%y;} zDWQ`X9#$iZXb@XOD;lYj9)TkqDti~k8O z>rYnqR^MQZ$I-HaF@T zc25HiwMU4OMs52hFP-6Gj96~p{&pRMfDZQLDFQPX1a^7Xcn_+*Eq_JmMt_H9hTD7c zPk;xVa?HVdkTa`)HV-QF(EaiMb*s0u9H=T7j>FJry(*G3lX>!S%;nwX){I z2=z{QrGtM~54`1-%HZAoOah3b3(U$AG}BseQUpM!9sodVDPZXd!Sdg_0=ebsx;!SVro$8X$4Qm-|sd$+r&x>ix@m`ORr-_ujo%ySr9KXMuGSORi-dKdbVI6D&_ z7DDdzYh9o%Yb1Loj0LQTD&oL53f!Wlb^>r3F*Ang#5X_d#Xo|!)w&)*53LeqZ`4}~+ZFE9 zo++0VaoX~3j?wB(;Lt{-c}mp0;ydo=%rclh+JZ$C?T2<_+IT)C&e z>xaYm_QU_|g+FjHn<0FMR)j9)y(JlP473hfH$1;A&CSw38B zNPwgLx3?L$9f9jaypYz$^oX-<9KJb>n%G=#YBYlOtT&&zWBd&=HE>buouYJH-KmoMT>t?oOSx(R=ZTu%s1m zqNyr?!=)Y{59F($XCY?r-wLJoUvA_ zdB!Fou)f$|Z9?Smp+}1S0CErorvQIejNH{jT*H%W#?M${%+zopJ+-0%FT}YkMpS8Y$?v2gXb1;#FT{ zzm2_2+jnD$@R`vcF;6us;8JYh4qu*2^)|s=1)noD-8>xeAouvTeqzo{<{E!JB{*Yq z5ls)sXW*PIG~8O@6vSwWxA2n>g8kVL*qR#W4#{I2le(Z^!Api<5EX*cPAYz<(fAB1 ze?J6%ZOcxG2Lv!-%M^BjoCmHsf5Ezfb1J-T*7Q=1OZbhy;@QSX+5|etJ|%y{f=qNX+j-J_?NOfED`bDr&H*2vY+zQudrOdbEvesj zk4`kZ!CfT&GcWz%U;5f}9w)kB@Z}}E$8F2AZTs2{`l38dKT5(!TxooHpDF}vgtIo{ zgIFwqYs9$>txI&-y=e-4*#{=-$6#Qrh%rw%td!rO@OMxPoWku;a+(IG=S zPNAEOek&}PbAyB(7bYbZq;f!`0iHfa?zW8^459l5yi3K0 zoKkY(7e5qum*>2SddGh}*Xk;h_tnR%8Rwr?+xr<`fAJ#W7xb$h3*kRJ*H6cqS0dI# zTuXBOJ(0wtiFOs&YJG1Vl##R9veg%E*JbYrA6f+hf>-P4A-+J1$1{gm5p7szK;kUe za|A{cr@;AuR|IGS>TFFgJa7n*h##LUp20C)T||yWC4TS!`NDrWuhWS`5BOEMJz*+fZgL(8-Bi%w@S9)YPgCd^`E`~JF{gEauG@O;5zH$e0?pWqe-T#+v#y4F6vLAYD z>VDI%FGU&HBc;}oc;U}y^%k)L2$n{@OPta>=y~qEv&w%jqDTf;#WkP|J*^qU9wQP@ zf>+pj5014gDcE1be*^lA;+`zad*w+z^$E$8VlA|T@fbYDK7Y?rdeu80m=HK4?LNa!E5@UMjBAEUI$0WedvVqs!W&T#ZG?;Ll}mK4 z+UJ8dB>d)X5chkCV$KQz-C20PkD!r@IwWHj`5NR^?Df*Ax3uzDS<_~VO+76<96L_C z5a9OLGk?V;m@AS<_GkWeAnT9FqtdXt9pObex;cLnu2T#Gi-J?^z$2QEl?Qwh?xCni zl3y{BRfAj?w2u&?WNcH!>_?6ZRc>)ri{SHC(iAaKP8%N&)<>U7yn1H#d3&z?(@6Rk46gQ)}XDZ(*!rbyO&Ns zL97cLbE3xv?BR2qQdEKGfv+WaIplfT{i^glCw$jdkGU>LbJ!ZX$8*&_PhGYgFY+@BAO0V*!ZfcRqh1%qEaD-}i^_f9k|O+x~@vePInh*eFxPei(RM@_IR{Bk+Gj z;CoN_3Y1PGKhx?yVqf#*bGCRvoHLL~AUVW8@@Bk0a%6tXH)`W7zwny=l!x=Dd?qX$ z@UirAfN{VbUC_vhNq{9y#()9?em|$+!H+y0{JThUlk`6xXUEt15?W!X5BN#I<{f@! z7Ri-B+8kIQf{T;ZNnR5DpUwb$DENO(w!Trosg%$!77!nXCFT>CjkyU}&36nz1;-3J zyzjFviKQLQ8Nw8h@1?7-s15{syP~svjHm17tVREPeTA;JZjnlJtL*5XUJ@ zX+4tq0yYr)R+NLcf%y7u))^ok0nibN8-M?v#TU;P`*gCuA$ymH2_Hn&hfXnz+${D( z>?KkETVY@C;fmVDOcO27Md0l$x&IrgGr zhs8aJ7^5Iq0Lw%JQ}G)Ne#?K8I#7G!;|t^z@?sbx7@toJ@s|$^FdwZ4_Dg^Hbq;}J zs|_C&71-u-#<56Kqrv%H9wFZhIFL1pY>ZcqZMqZSIPJ-Kfd4~Q2JuKktSij5ME-Ns z5HDYZPG^|QTZ|z@yx7hva+@piv;laX!F^DWacIsuL`h7z*&KKtxKDr5uSMdmznX)C zLE^eT;t&M?x8r$&UHsBOCHIcp)mv+k)E6Hxt@o9}F+Kixiq0**Ytrw@@Fc+;1QM6u z-<{5VqOY_(W$<4j+WI4RUn#&AOGGyjeQSmsJG8{16#`H6tzXHLaK6Hs7a9YIH-hN1 zUXW=F`rVw<;}>#?>u`Uz#Q)H)kl5-8LqK?l{?98P`PO*-%B#HNNN-()R}A~LPj->p z#Ze7OR^lr^*}&)<{I96A^k{^!Y)u?!>BupJ2BRKwU}RdwDXEgcgT*m3B5};pTgjj)rALx1TNpejl-&a)!=?wG%#54Ur(m*s6aKi4g2B{O+l1 zC*7*VM2=|Be)v5^TS+ixT?#pSV0$n_<6MFL4`AXXF7TX-xl{gkXs!1W$lR(3oniU_ zcG?CO{;5;>;zy0UFFh9I>#;EW?YzKqeg3JtjeLJM&ZV4`k@@J4IHALMfOwdL{1NeM z{L*0uPWK=Q8^kpJRBJuW5u?X?{``CE(MN8x?Qd7&4`g%*?{{irT7RxHBu*g95xM{N zy!pickTor_-8Lrs@u+Gwr=FhsU)cHgS&Nm$Pk(&IYM=S&=bZUI)3&}i#K%StBnbJ6 z_H2KQT@rHgo*~%*@m+_D!PdYrvQ{4t_|jC(%u`1j1gl%GXiiI#Inw;u-3 z7&hQAQ&4+JSn&VeItB;)k9wELA3*PN3|Y{BNBzEad|vYltcCtDABR8qOa5N_2>E}6 zru7i{$2=aK{AN70jmZ8ah7mJ7ABHERM!bletH_y#crR66Gy;Bf_xI#C9h|k?(H&=57)_G z&TlTfQU8&;AUI=4%Dv}3NN%+2JobO6nEO8%vLCvy(&78~`r#i=aN0k7)PCX}S{<{$ z7{5RC(g?=+nd5EObi2(Tw`YBlLqY$2k9YV56Jmu0Od3vqD*8z2{Gu~^m?)Y|f^U9Y z@(!8PpqJ}nOClt60Wq2PxQ-m;Ky-IyT9rD6WDkM}vhnRl}=Hpxu zC4zg;uK=!*vAtc=+b%#=EU?1Rop{a*+#NCXYNSEK7JCE4O&r8DiH0rVPeH?$((|3e z(8Np3An+?l2dzZS-Ts&#us7^WNq+H(j+h;37ZP`kyk$Ed zJZSR$5f9lR4&Df_IpTY}!`y!u$3b$={7XGx?yLUW_}_QFpNAy)Q*Y=u?YbnsZhtWk z|I{7=j_sM3Y;)K2*Sz1J+5eGwJ8>L=O zqZT?t^b5ZAm71&%W=OtY+Z(~ zg5=}gV;zBmw-=7`@rZx7N&fQHPuh9#)lZ23cFTFz6RZaYwFk7q-Vcf9tpHp$*r~vK z!5{qI1?N-4xF9-Z@cK!y?;yTE6)=^RIz+IC;7fU-)m}&^)cr)h1NTmJtpo53Vaf-d zEWuaO1TRU>R^$0NgoYr|5xl+ry%7C8f-mI|--%{^yWazRY2|+t4{{t5hPe|Pi$y%`^K2OfuuMM6;T#9(0jU=pXjK|>q;d)n?v4jUqe5YOPmqmu? z)`f)ENIckQE=PZ`rvzULMiRg8;!N`=9%BP=4mr}#2_+u&3WYodBM5;pd3}80Q174p zy-Vur7kn~VGoP^w;B}v4{BP|!^fme_*0|3eAM2ci`+)B>;kW@i{Ws4w9Prm9XoXlV z(M%xXr!60&l%9M2zdFyn_DeXAv1Gr5odEQQNW1`R))Rk={^hL{qivt(FEUnIZFJs! zY9WGi5B%}Z_5Ry^3c%Hov>lF+;4K3a{B>p_9>xHMCu!r!9eKWQu}bF`t_Z2k*ZmG7 z7ZeaY8nn5HCi?sQg18O(ry)I}qcI<46lmwa_(okhNE-ZXryqLP-}lNH$rHWL^CU*n zddPrt`0{^GeEQC!zrVQX?VOERB(S$Vv2EKQTA81H0mUWI$&Ap}gxhb^`*q4!z{{Vs zL9f7j6?$Eot{9#Ep}HY}0bK^qGv?5vKB~wK7~=>3FMA{1eyc0)ct0??IM)+SCHXc= zq8=l}d)Mm+wD=txzNkd%>g9>lawDGO<_In#@f&|L=ZM!oy{Hg(Ba(|YC`q@GQKb<$uS;m=GX-33pM1r5)zvA)Lo-bL#$Hw^*ViXY}=l|0G ziI+nHeDjwc?>C?6Cx-5sPkM1~fw2Im>|gPveYE@6f5F%GJ$9<1ARJlD&-`3v_O6P) z9YN2iym+9f`VEO)<+7EnMMjpu5jT(M+NOU`bFt=(;ltCr>S$jx*#c*T&qml!xTPoI zynqR_#+kN+{~;;A@$o z044aNj^LRz8)si0(Kqbxp8m~^9ZsTZVIa+g_vEbm@^ucZpFX**AMT&~azH@&idCqp806gwz-nY#wsWD=Y-(NfcterpG^>yxP^}S!1+ds7X4}ZNk2A6PvKiCtijdy*{;ZH4=CqDY$ zGIs!62JC_ zuWAS>!Z#q_*vVrCd=JqMhxRpc9Kegr>xUpwH4sE?pFkqW^YQugQO~94}Oo6}AIlk5sG6g7fCYqkDo~H69F{9rXaQ%l(uG z-n4po&12oSr~ys#@tp|%(;D^y;^z#}Vuj2D&P;#ggAiQyGye49eIWezZr{%u)t0?u zO~3l9zx&8r|KxZ6>mMBVTX&$HSBQq*N9W@+FFp4VE#KsY8E$iDF#mtzXj2CsMtYEZ zkyl&G>VQqy1fJhOU0l-h9B1sPl&mZi7@zbfTL8uP-5+ubH(O1F$JY|tHQts!`{3VGWH0k8xbV*(#VBd3RRCVcpSVJi{ON#FC7kb1Nb zd*C5@E+O_o4E>*;i>QC1N6>YzKH-lr4uG5Rk#Z#cfq1$1IUl+qY7@a5J16L-FOPTN zoDgqgWQX2pF|a7WIr7PM#XBbHFCpopre_-HhdPqYy;nbctyjpu*6Gu)6#BpTTjzB? zVW$z+gMUWj{ |DV#@_&U2IF6j`RCSZXY_8-;O*moRgihqAUyFdA`GfF4tb1*Lr z=m34=J^rc({^gDE(Wq%LmL%@<@pZz#FZ@HR9r^Jr{!`8Q0eFA*77J_$OghfzY00V& z2y3au9z3w$J-fhy%gNbi3WgJk%ybc>A@xA?6NrX2C~u0|L7$#WVm~Ilk|##;1h0iZ zJU=;?!UGG>52t?&0};G`wcVis{#V`xho8FqKXsJabKmzf$Umpe0-URS#W@FOUgC2m z3A%fBJ>cCRIdEan$fnNr9r<>my^mP9de$=y!{6CI>!x=pGUs2Jx?QU{;%~z=W~YstzPYqGu*G*T;v@0`iQlBp` zxCS`(AH0naLBzKI95p;Y;x7PXq|B>NAx;52fBUzb;npKt#GVB4=T@5(xtoi1&*6!Z zfzbQ;TS>IFpZA4e3+{`6=3e)Oc5K(!cT9zxH~oJfT2$Y0pWyAZ+}khBJlr$x|4Rdk zCC`9$v&Cg|t|F=gj|mJF#{U*LTEvWDx7T`Y5+7Yf`jm634=@6L3)0mi_m7G$8uBSdWoc zZpY$3kXLT&$+Q0~IlaU6BQCJvtWJMMUgSM7 z@bA4$^u%?x+?+r9Bmd63`aSNRd53S@sdfz3fAF(rP!CUDPYv!d(O7$NC7@LW-q{*j ztTF`O;m1leg=2FpiP{G7d}x+C)XweQ&Z`wPP%w8E^<9ZCN-hAX47^tpawlvAiEb@o z6Y!UaWSCZg7sb&d$L0K$JZgV=I}n}BbT_|eK9(4Qo0#Xqj8ld;q7PH4&aP1|++DeI z-WJ*!h=>A9P#(Coj(sM^BW~oV#BKbUgw>>WXT!Pmf@y8!=IPRUmHN)P= zqOJ@YSSDD14jv>#H&jGw%Wvx!y1cD#9JIQBa!Y^sJ`xTY-oLe{Mj|k<&wc1KS5D3X zZ@oc?b`x*FkA4R*k}r%g(aa(17a2cYtS`WQVuk*veAf@~bX|WSphMnE_rVdcX@U=I z$3dJiG3E$dTQZi8)Zyj<4lg{^|2gmVmhPq}kO(60^~FBtB*X~8$M+=35NKc$k=VoS z?w5VjedhPbywfG~&NkwQFC6K2J|E#8U@!fhcS&#wL^JM{i>XOI=G%V#0eniU5B_zQ zBK`bFJK!4riTQv2HeYSMk0#-ZYuL8UbB{mAeu?-OAWylQ|MmP)*cr(`RZ2doQ}8!S z=yD)5wjeroWY1p0$^m;0_B=+u1Z}jZ=bWC!2{>q&t1kLzqm-M-fL;MS;4NB!WGvC@ zMlMdgkHJzobXb<{Y{x_kwbk$?Yazx=j&oGY`+H&=KwW=xpl6Ns2a>MxX2)S0LFTM# zI>H_d{efujf}g;5ATFs6rj>~-*n{SF1V8%rgMW}SBjM*^oNBewLohQjvObA7B&H3YC~#}G&~5RE zw-Ndav}c8Fj$OBg(0qWLOnWB=RheKSmmv(C_cWkcTnV- z#48ra2M|oQ&BrOu$7BzUxGC0|&Yp1ao_-MI+)O+*$l62V6`F@5I2pfnuET?z=+d`( z3ohi>xhq+pUf!8$n1d+$)H(}3>m>6>ff01^OA<>FJ zw7dBkj5XhfRJRHqf<2l2ZI4InTL@PJdoh15Bi`AFbM#XZ@A%O!I5T3}CVFie=BgaA zjEIyGe;VGUfvL8~9p54NKhh4+tBp*U1f(3n-hb)UVl2Gl!_T^YX-2)`!tk8<#4K8k zC^EO9&j09>`Nf}Z&%kKEr=Mx7ne>ad>BqmW)xV7pV}vf!C&%*kwyW0jHm<~Q9mIb& zmhh!QpWDu-(Uw9ELYDG)t?;Sm1h*l=ozg>4lm3px>v{+Hu^?Uzt!>0h2p0|uQ|o)I z3lhA_@+lds>d9xsilO#hC)HeO|I*3jN6|X;5m(x#IgIAO)f17?h9>tj?iZc-L&Oqp zAUHVs9%FMRd&C=Z0qS|BY{^ZnRiJa^eYHlBd==54m8%S96)#*SnqCxUx0HOLA(B${_v;0ei<*n z%yqxabHsP(D+Yx)4w?6!1#UsbzbhHO0bS8gpFFFL8 zfeWEJlZOrOI8vdNE6q{SL`Md|P4s2VPgmS7O{9XNN%vZc(J)6Y0%Hw2j}Fc;u}2}+ z^PAY00UfWe*p<)1!_p&b&~x0q<51?D!`zDfvRmRz@SM+(6Z5sIk2&!z3JTQM(`RBhKw~cWA zefpxdL2>Ny+jbyp3>Fs^V;_4!6S{IM0s64dxFAj1&7SQuxCga5>mzvEc-Um}fT;bc zf9-qC{~qgx#!;(n_wieQ#kya8yC;431Sk7z#Iy+C{oB0sO}G1}4!4{5WoWr&V~&SA z;)$la&;tR_NQz5r$iRPnM!!}Q({Xs^f_@Bw4P+E6!iyw0&r+~ZXXLCq@1})G{40Ly zcMBWfkiPN`A@q%WGIs!E5kcG86mZXH@Qbr>W&Q1@=<1BJ_;^-%-Th2ERyO5pK zzs0LDPEd=B6F4|~E|EiWWhz}I4n#HPqxB&^MM|P>!bd?VRTQirca0Ftg1BxbvsLIi z-2?Q6cb9miOlW@(I~xjadS*1JB^Ua2G1S*Ib+=9CiJq(X3bDiKS?!EqWH#rtWt5HG z^-R0qykCczT6mg4k)U zlhE5$uDE|zZ&JEcl*ScUvyJ6?)rcH5!d=a95Db|J{G%YXBTEc;Ubiw?Px~zt?~G z2#IqMmU}xs+ON1`TyQ%0Whrn-;4^>T_n@^3i;gM>69LK-*mK5!Zj((JFm`w!2}e?n z6Rdw^BhJnfz5sB`M`TbqlnaoMf_M-7&ESuU?*n7EeUI;l009wGCGVl1h~ERh44(&f z7UiUf13r=~#j*l30H4<*Xe+~1da|b)!68^i@|?gn%z^{CzbW~CQNpLg* zd@=A4-1$z8pw~cwBaiz9t`>Qk3O-a4M-G2&15ycAg&!3oaX_wb-#c)BizL6|+j^do zn-WYpu(|%T9G{1t1wPM`_mWM@q18@&yU6`;N=!siayUzJ8wp28ycd8Oe_j9lJ;;CJ z-0^%5XtZqpQr_NA^7(zK`rO}?gO(3~r_j4@+XLDRCAq%+{SA$HChkuLG$z9R;(mXY zq&*OWLVdHOzAeIcgRTM_fmi4R&_7U!k1IbWx&>!~KPT<6C7%b@g**@W{MUQvLcZSv zbMr2L{XX79&lS2y-|ORDj_1XDkCfwi+xNJCm-Mgqdz8DM@^7CPs^5J6TY3Au&vL(g z-+rI>%kKfhK>7>0pY&5d-=637eBgf>gM-o57n`>4;g>0_C`#k5#4pis@&32Z!$T(Y z*&eu_^f!zL=&USW-{0PEO0L)1`)9s>KN3KCnsaw3ezo8CdLFa~;a`&bLH}((|N1@I z0=u7|d(Y3y$SwWxe!ianTl;)#4{$eMgeh>ZYRsVl@-`+pw3)BeuJ6I}kN+ZuVCA{voe!kuRxAyqf-e3LiTYvF>=|8`I z{;Pfe&>!ZUHEjDo)gk>0{Q>s_+z%7Lzt?-1J`>%P=l-K}$9&6g1c&W1 zcWNTI*qgNT_Amdz{eXBY5@&y1@Y$i5nG5%JhOUGp(g}Hih$ua~>D25YaWkysJaf@hnfo36Qbz!yy$ZnwsQa$9#MFf3kjO4Q|+=#=ZRv8NPZjrN@g>XoXkoTFvqYZy-cd4g(Qk)8k ztFFuVlt96LW5$<6d8olQzSRn)Ztb|KCxIH$Kw3=_VAd1fRV_8f4qi|f@a%+lo64vM zIpPbd^pwC!RFX$0uEVY1IS9cuK*0dpmE&`C~rOp9=c<2_8qoamxEi8fO6C^=xar=IDg5cE>LsL4&x2Wsgh zkJ`}G>ndRrgJ3$-TJTKGfrgskt+b^CW41-D1*_~XEIDnPA9-kGYc+Qs)aRXkqKnuq zeEs4T5BFm9xFCOm7|;S$LiH#L)KJ)NQsW`T&M5Of98vyo?C$B5lJ28vNHOfA+4JFW z$ledTku^xzVzN742P^ix-(}YUSf|5Xavi8_9_(UkAhIWzAzA${ds^-wkLI)KaOYn8 z8k-(=_O;KkhuO{udj*^9ciOcFiAiIphP^!--*?irXS071wG%_+nRSf@!h zxGzQ_47!h##bUgkGl%6uAA@@_?L|XD#HOVkU;q2RuXzhm@+vB!d9slFHPnL0rf%wa zw~EiK&BA}+Xk`ds<$^z|>eK5xIi z&qaS^sQnyV0?FsV%vuPfo(mo7b(N6u5H`c#5qTuvCmex3F}QB~9pXAtx^NWn)1c(k ztmGcz3Tw8PVIwZx+Rx&9P5W^*O1|HIev)3}8&(orXWHFq57HzFbbQj4kG6BKaJ5EV z<9^oA_E8nLl1W`=DcaF1Rmnpiv9r~}#=n1|=pk>%*OpLn(ZP;OsHWgM796f!m9IJ# zb<|^Nhgc`@EO3!7xwX9DZ&*7S(OFE}f3b^=@EW3GIdL{s?OM?B{J5VR2wEPV%RYL3 zj7Zou_OfiHGHJX(-Pp3VLjSrbwSw2_r|rRG}w3t(SM3tfc@&id)bN!koqed0<( z45x3zU1zP5c9r97fjZS(D|SBjK>vUBcN##OlKg7>JzsT1`mfqW4i2AN3EfIe$oPk{ z?gdyrY%XxLUqQ1PjQzAwU6j}Lg9E>`RGiqs^%5CBaIwJe&TJZOMeZA~QLpO~F5E8M zCcG>Hni=0o`VS8uHy|@t#|=JbM(`QrR=xZHmbGJd;d~Et6RyDuYT{b8bQymKd@WdF3)__h(sv`%6pSdX?KH;!Cd0_^Z-UUCb-|%7ZEt`Zb;#{Y=d; zx6HW27fNXQYAE9z#H$U|DV`T&5`9G=^(Q1b7U{$SiyI2KWK1liulEk=zi3l350dXl z2-_QfNB;*DYC@jh!S`@bLjQlY@8Gwl?YQH>?m#~zZG&v@y6uNuq2-XOa4UjDBot2u zgw@2q`mc|lL%AlysIHO{MAxMS+Kr4U8tn=uHnR2CN3=rK!f77qASli}>(xfP;`;*$ z!iKjhsmnlZ+hj%R%h&5}C6o4bybFG3WffYP*22o=ajnY7=9=fggmJYRz61G^T_ zCzyCXH6qWa2`I1I@(ktDvz*kg_bf;Kf=EotbCknRk(87Ag+KtQAJne^Lpu zCtlyNPNF^_fR4GOZR26P_M#4j%D|k1YhVJw-F3LGllcq41zdj%?CxLoGh|EWsC%-O z3q0-xYiw#A+A)cFnv7+LT1ATmX)8q5yHZc7+xDry^+901)-vf=@`Zco7w{_`UtWkj z)=rIF+u&Nv8yG8p-}N+}n|VDqLw@IdJ=TWbKL^?m&;94m`7Gz;%j+;E*&Ve>5crpT zJ&}<3!8#>Yk%ND}^jFVM<_^>suHTkM454|L#*;Q8_57FX1PPLL(N>;ym}1VHl0Jl^ zpXPEmRoY?CMaHh0xvN5VYHLE~>xF{7RJ#_V?0_JM;{juf&nB2}`3=S?`HW50#O(PQ z?=EyBF@f8z_4fD38uZI&RG~jn>Ft5+EqH%2ci4bhTtJhkvByBfp6p|6 zSzeVD#G_p(v{p8}SI=uZZP!h`U7PWF{C2F;YdrHB^D0h*f9zu@lP>Rl4CupH-!Sey zD_)cOiC zzJ(gV^FDtNI3RO7_7vD>V7_km6~DRfA@+Mm>ABBQZ2X>ye<6Gfk54dp*FWw@##|&~ z4#e>rO179=+Vv2+^xW;$o(FJ|a1H(K8tj9}+>K0(lVGhN_lEI>KGnfEMqh;|9*s4n zUXXR=*;d#OV6MEOuadnc{{F3u)UCT9Yu5W(1$BQTYaaR#)}VSr%nf9o6)}H*zwZ?DaQbR{pu2%;LmmI74)I&>J_vI_d}kjgaWzA`Eo}zC zGDcg&@NTql2iV_99-04a_Yc>(3WL4vuh)??#Gig2xj#Hh;wvQ|iVIl{JQ@2%@v$~8 zh&X>cHs)7eL_H65+~dz{xuD%!{=8PgwL`s^ljmv2OItU8>v>3Ds|Qd_#2hbq&pLdL zzvH{z(M~4zqOX2}eLZbn(6>ga&^t_x&+jW(D}c+ve&y>s{EKV~Ffq$v#_x zmN+S26)4yLM!7+L8}~8CyiVSmxV}&3+PO>S%{6e=WS-uTee~=1$^F4(mdo*dJTQNv zxVElw#^4q5+QX(FzxaIZ;x+6g+t&$s{rda!`U!dMKd*njeqR54{k;D9x~_>HEQhwe zmNM#w!~rFfJpbA-fb+rHM6O#TxQ^^!+v`el9U7*+Zj0;i1#)}c(k0iS;oIx*EJQQ) zdEMS1XBCO;;lP|Fuf6tljMw)Y4Z|B&n3*T?7e?dwx} zA4pD-``8HNxgh&b>Quo#2o?b7apbz9eJ&s|+v}ds&p)rjTuYwcYp=ULpI>gT3!cya zx{mBSxshIf%^NV5c_7(wDPahK&pt@o&(NQ2vQM3;&oN3qGvtN#da@s$f3AOr*$hcKAtHOd#JW zv|HSQh9@=o7goEGeg%%{bs^k;oFj5y5_ki6tG9WJ_j_!eRWAQ3M_8{%qyl zk$1{)l;M_uE)hQ;*}B?^@)4dC~uW^Su~B(BCG1hmX*n=v2;0{#w9&MC3jZMzH4M*y^_o+Ad?6y4fDMvnMVsrkl zwePdt+UITW3(wcy7wQr9jrXMf75JJ0Qq%Bqx0lpt8FqWHt;5jSY584Iwf%}eN zD}s8QM1&Xps&DY(h4g}4~!@D zciewlKW+PU+kVj2+gHEBeF5Si5#ChWul}?jMI?Jj^6}t{qkgxf-G6@1oZQbkLOtUC z+kS@M!{2c~Bo#@2`V*{GN&m(3U|izg_H*c$z2|eGUh%$d2eex|UeRuC{t}X1)pOkb z;djkHd@mLX{QWQcZTmcU&bFWa+5O>qJO}CxfWy~#fD9b|ZJ&!J*Q4Jfg-EV@-WTc% zk;C?L$g`t}@av!B1N|ZglmrkG{2qP>^@DmO&x`lyZ_oRGB;(`VuF%0FQe{M>9QTRq zY=XPRcpni+3a&TFb+~`5Rvg}Ad_Cv8>U;kMy%oG+k_Z0aheY^pz41f8{g2GgPrMSD zw=pS$4}Ii>ZJRzIr?4WvAwA$GxM>8G0L&Ixx_i4j}R4COY*M77_S_z%Lh3K*A4&4g$D^h`2e>xda|a@(}Cb_j&ZT)HlJ0Oy;lahP z!4gF0mKf;L^1yX~$&hlxN*cf)4`3l_jC~J3gUla)_&-F81l*&tXypFWAeu zb0r%sn820+1(G`7b1X6@d%2 zu=%x3P={NrgYubMIe)CM2&`#v%M|dc(8+Bj;Wj4eTp71n@n`UZxJ}9z5h!OM&0K8d27Wpj91mom zU|T2j$)s|%l`a=-$>fXo;DS}?+yTwsk%=dNoxidn84L@V2R8wfbj3d^blPpo*U`iv zow>0mQ{W&-=XapF30=;K$AuoW^(RX#Dty zf7{Ht#uQa8RI+ld_=lj&B=5fi&i(p~h0zo;!4bZkqRBkVRWNH-&+>y*Th*%^yzWnb zdGjf6zLodh<(4REa3ZY(HY{Q7PYbxG?QzU*kJ)K6dHwc5>f84FvzhSzEE24*a-2%v z_vT5T-?r!Gr`(R-<&f8WmwWH;2NeGO{T>*X@A8B6E*GrN=fRJAm#1IlU*E6EU*+#- z2Ri&yZZ@BCG~T=X>-){G?{8py{VJb-e(n!;5$|$Z{gm79y7eF<@G1Y)kLY~9-}!vr zkv^ZtB=RoDV11X*-{o+Od;k7{qXoskZXvw0g|gY}+S+{g6F7NT@9#QDT@eh2faUMo zlXDKbeQeSFI5?uMKi{W5KHqWp{9bLopXZ={{l?+_JizQn8NE^b8hz#tR>S*$?~P@L z%?9R$0Z3J!&(WWI19jv5y~voq%8}*%lz;l!0c4?f`KOPo%-83=ug`z_8L;f{dOmpX zcC0N}8NbTAU*)T>^258G)$XUA59@b7tu6RLzRN%Dc);L)mpAWzsIs4acG$lA8PO{H zlz+zHA^iIOuliv>)1S|OgYNk*w?6Fy z`Oi=Jr+rfE(>^ffd6!F{_DR3`w*y_ccll>*riem*%D12L=~wyZ{v4=>zRSPv?`wQG zpZ3+TBYl^9pZ0O)U*%uV?|tgenZNrFY}r5e?|=G_^VNRXJiN-m+j*6P3-T^^0N0A= z&H?Hu#Bt~O8f)9y z`+jzN9=_@x{ayT&_ug|0#=~oFA?2^Whw}H>u;=k>Y@qzT2B7?Zy@uFxX!5+v-))y- z4SavzQ(t`ta@vPi-$D7i@1XqEcToNw8<6Aw`n>t|`S%)z^5^eoK&x`ka;!z>t8Y3$ zyFS0aEq{JLDVLu0N6Me|L&|}2`jkKKALWR_y~|&72PuC&A1QzK&9?m6H{0@O-z4SF zzDdgEC$^OA|MvucFFE?Z{>T5Z&o9T#^}qkO>*cn)0!{ee{>T6KfA90t_5a*&w!8oL z_3D^p^?&G_`EL3D2%9X4E}Ju$;s4o|me)Gl{l|%XP4@gtwM(M-`X57@{?GEV{g2xv z`|siAy7>=MNWoBf-~G>EQ|<=6{~6L_1xM@WsRWx$5`rRs|Cz);iZ2k2`s+v-_O-w~ z0423XjX^55Y}_-B7+o&0#XSjT5##Aqy5qZ%8M)g)s?V~MFDx8tgmpiNDEcN?=}PU# zov!bA`A*s#BxljhR(!Rv?uo_tU1>JB_rOE4;v!=Y-IH41Smi2@mQ*Ten5G2Y%@F<VVHuV>$imyMskG&ezN|H7zuI1fZkc!D zB0oVb=@b|H>-chA&Nt5G<`sbV9{TcSb|~0z3~7MDFgN*;$uL?a2D?JO%Qn9JQEE+p z1bFw}y^g8A950r+H;f+ObD3KAV*1iaS@0)+y{r2b!XValYYe?iI}emem*J)^K??(#DRo02p>oYO#B0Qs$M^bw4Yz#GF5@kU$OwA=!sRMFTy*1N@B2ptH2 zOq}dBd#eYR>Bv1!NO4cX_ihgOjZm z)I`qLn{*I=s!Fle!_vR+kyo^$Fj=gVB8ifFdHRPrbgn{EZfZL!`J!C`Tp z?Abb8l=X?$(rn>w(#EIv>S|jj6Rv)L9M?i)Wp`Vd@Tv6x%j%G?Gn-x6_9N7g34)ntji;egW3^6`AX8 zEM|wm77BiTxb0PWl_K@6(QEH|yz9AP4Nlfb>zc{noc7PhS?2&>LqK)i95pC7Dbeu;A7pfm0w zDYjlvb&qTq4Nt3D%r0~pVLAxhQoT*D%gnqt;i-p)2Sv&~jGgN(b*}V8KL(2`ZX(A8 zY<(jJ!qB@=ia81`Q?*Vx$~k5F>HiyJvgoSrv`&3+HA$#J7bNq?J( z;5{5>U?+^M7P7k-u9AY%px~_F%U>NYPMAtsxi#8N7Lkg)4|4Y)Q;+f zi$}j4Fyn|{PWRnSvx@pWS2H8pDvi~7sr49+MXv|J%TSJSCJ$~$q+=Do>CQB}KX6Tm zb#%DmZ1wc86Ux>E_`uU?!b{O@;YL}qXsR%nR%-|wNTx!kSoP+n3Z9SxSP57A5ZbqE zSK)@>ta#@GqBf6n{}gS1^GY)6+wgSsrCWU1Ob!8aJbUT-NEt)N7pK8~wJ@s+O=(10 z(Wfzh^QpGM%|*TQ#<$!!k9)qjiJ>>*&GqxyF>Qv)9me(DLE5j4TE8-0sOv*8YoV%HtY1k9h1rWn$>03&w&hd_g!}}q@5;M^z}s(?wN59(hhSw>+J`0 zv!cS?Mw`^V;iJcYhK`;etsx^(XTjSa?vZmf&(ih0tm*mvFdxgIzqYQmF&joZb}NUq zuCfa=Z2r$;>`+k0x2pr}l}N&3WYpnW4;oD<83e>{robvva^154vYU zF6?B6S0k9#iIO`_uRi(rv!h>@r?Km09V49E6~k_Wd&Z8R>jG&ZOX{cFQ@=Q#hTsX=ci&AQvd(nvgzDaxmq{>Xx5MsqKADeMZtBQA zAe8&kUL0C~P7#j}>GEodO>9gw)rHu{<`~55$X{tEF`5?4#0NYrV~n)dfpWt67F@gT zc;CHrja36&7;`;M^gi8Ok6k6$MoaZ>!C`*8Rh(sic5IjfRDa{jTiimUGiJ(aFx8di zmfqdhMzVI7DmS?>wS06j`BL`n2&KIG?v+JE&()NFKGo;)z)IK0ps)Fi1aM2iZzhUo zT9`gG1gbG~^4G2~(#?FS1(Xh* zxs4~Uwo5x)_E-4`77<|py+Ej5R`uf%#g~I2U$lO@NZFzoFGTC4D^dS;2(*rC$d6;U z6p$5v)jV!6PHFZeAH?I(+Aep?)HHX|wi@LMw@bIy*}A6pQ_3D5b7&#$W|p~fhC(PT zAtlt8PW7f*Z`XV6&L0M|Xi*s>H#_u#!L)y8xyENFzEtIgJRI6Zwk+akL-pN@GhT^# zQ*5X!w+EMXUTk`h#V;;}R3h;;Rd+q30l!~=nw=m^%FoN;y7qHz$RGE6NP@y^>H^Do zu_y-Rsn=T+4dcWb^YHqE!BQ|T_2v=;PMxaP2kp;pH5S50IO4)#uhD9&qp8Qco0#KO z$yQ@Ew1mF4ie`^yTU$N1Q(?a`T&DY?v(PUJ1hpm))lGJXXh;iFH=0uX?O2X!Z>vgw z0h|ZVvY8chv!2uq+g+b_3wJc^Cb7x&yH`huHhMVQCnoTo(~%AU=3}wDnKW&V_?lV@ zMeKx>S;R1?(Ost*TE{q6qQsu zwoa?8%-w@`iIig)T}~FTwEayytDDY+*-s2VY3y5eSFnD_(c_lSdfm-ajotyav)b+J?EHr#N2V9vtHc33M|8q*wnFKD#!emoA>{e%T1thk}xw#lu3w^!h| zBW?=rK-G-3P6Bm!og-&e*Vi!GT2|JbYI%C9XWZGjWkK(@?s@%3HZHg6V2~&sb=aLG znL9vGa91t(*|~oiPWl*qsTBa7uxsx2b&&3$Bd=MS7-k#b=ko4drmMqk$s8)@+_n0Z zz>LFF5uN??K|~!{fL$fg{gM6ythZu1rmol9p{<@SLbpI_%fD11fRsglx(&qB;-IhnI8kzQGKHDzFwm$9N z1Q-wm4CjnAL_}|kwCZ<%oH=Ef_LV>ND=o$D(CEm$lQQ#$*VFFAuDh#%rkjmhRGP^v z(2gIUtz#L)0bjB8ZM3G&z3p@~yq(8l9##o-ODKk?=+y130jxw8eSV^n;(RZ8Pk}p4 z-tDAD<3h|eIjXYL3(N%jv+&Skxv+7S`Am55RQEVE_T58|>GK9WP03PQC>@JGYa&qZ_B|Z)| zrZbb3T`9|huuD5Zw>}<6a#QFP@Am~+(()qp_BI4Odh-#IaYfOghr>Iwkjx6qoYMPProW&%ImbdW+R$*;&k{ zNziwuy)J8i?7FEPKH{4*H2l@Rx2;%fY$O~$$r&=mp2Uf69+fIMPqN`~y-^0*dhd$2 zqQB{xlzhnV&A#j)if@|r&YJD?nw|y6?WMA&Wj86VqOhno2TBTV$s>vUUa(~`wNuI7 znq92H_%TgAQ8V}}mvk_>aTw(hGq-yA2~5y}SBIB>Z8owQuL>1?RP$?SVd`OBs>{vD zE~r_k>xIKLHFa6Uo;5k}{AL9G%d2Q8v4Wm;4hu4=9_-x?A?pErdmHJtV1-FJJW_ci zh2zuC?T@zGUIY`^bfK!Zku}SnUPAwabd(Z$EGtFYCh<>Gii9cAGZ6bH%ji4 zwAgq-BCqBe!0AdC<7e94z*Um>o2CcRy;K>0ot}%!epPSyM_qz&z zF#~Kd2F$~;$B)yr9?_e0vzof+v(dSA&5kMD#rb}hKjcOnpO`D}Fw%7FvE`yyIqq$B zn5^%+4i_X#3sYe@y)}m$^H_)JJzbr>T4=_Nr$3ejTsnlU?ksMST2AibaetGr<&Z~Y z<_fM6J6eSeLV}a~WZMg*=@vVAe?4J;Z`NS$%DU0ZjXb%I2ZIzcZIP@x{Dn8h@}_@v z_2Jre2Ei@1hDA>qVPXIh9~AGzpf1KZqR@5Av9w)f#`3h?=RpQ#P<`Ko(OU0#TPBQl z%l^<+XKLO_cE!@%()SD7M{EjFsx9)_xloI&b8=Um+5SFR3;xn5xm(^h_F{~GJkwxf zx}JP3+=u1dr2e48i>G1Fvmc1Y*@wVC&u&({8;=^MlSnroL}$)gH_d)I%4_+vQ4O4YI!ft#mz|= z8&SCP?GhXhXM1)E$27k@Ob))X=h36?OsQ_X^}-B{<7B*E+Fg3QS)TYf8`fjKSJ3%& zphX>eJy~Z#Y4$q3WOE*ES2VOh_q&Nvp3hKPxQ3)6uzvFu&T&I6Wz)`D?1yDAXc ziptLCfCbVgemxrQgoCuU^m!pI=~Iy`tCP;}B|7b&n$^NvjR!a1%(LTsm<^RornX@| z-9D6&A5KD{9#f1XO@;v&>)E59?+?YS9&OIMQ}0CAa+#`&$W_=5ed|KoXm=|t-35YU z+)*=~+jSSlG-&R^juY?kSX3ucb{JE`4eR=CfBS#OQ~bcE|%v*H)5$%36{lJUfxBe zOX(wFzgWq~yr>6v57Eu@73j`yB7-%jkHw`T=HCS*3FyJ9(Jotz0D@lU_C=v*io;&{S+sL^X)48Rn^HFo$=kQJl5q9 z6o`FR3z${6%fii8x1(P4C-r3KGtK21OIItHj4&~uj~7@Zbi8qAb8l+;+^ttB zt!F`fJufKQpAXQ8m!)KeQVH*L^I=|gRCo!rMoSNy@%U&APUp!)Ywl{*zq^^R?dtve z39#|5>4 zoDRHZJG{@QH(Au6td(JsuQlq95Iu6FtQjBajXiHprMk}x$Wl6o^QAa#@6*lWv6`m4 zbLMHi3zb`{x2CrKXewU%-L)snon^VZJdRoZSTK+CV4V1O`aFH`;8vo!$uPA5}Q86~ztX^P;kRVOL(wlb9|(9Cbv$z#^m;8f98!3 zce>Gk*Yn6;>)C$0?=i~U&E~!Cygz7$eK%dsSIr&TNiwe;N1L!>|>7F*UL2$G^*bp@95J)zV7vTu$-Uw z)>fGr$z-4(6ZiZWkBhFGb)9R_>oaj2o;Heq<{E{I)3X|9Jty46{Uro%7P7I)D0X@C z6)ViP&ZfVJm-l7H&aFaP_nHNDm&$lg_36hf%RGBOPpDu~7udeXH+_EUWDZ~IY8e@N zJ(}EAjh8!Neiq$%v>FT6;t_&#L3PL6eR4zs1A#?^U&oZQ)=zce(|zg`%vUUp@ctuz zQ&x9snXXqc(`o7?le(v1(7(}rci=YVdM0l!o%3n746?_SO7ruob6+rMjOnFZQS-5q zb#Tt8Avs`7(rSEmkIDYJhQR`L@i#}I7*ZV^10uI$kJI_FxT}k7*9p^&yP|HxCHT6J z=sKm30*j$CO(7BxUgyiXQ&)R?j-b?k{wi8Yti@vf=v*^fD?7KIwGQbTo~_gYN$bwB zQAz|1Z2GXagIi;r_F?U;SLV7tg$(VUGm#lq6M+@xJLutInC$QU$=Kx`v9sP$MeVAC zomb-IZCD9!3+~jk&kB6(M#D*g<5$V#dwOhMI@2`2pOvK#{l}y;S>@x-_*iUzOUz@P z-PN!jc__=;pld9=c&*91)hZq7ftSqI%&!~H*tfm& z;L)8?)OZup-Sd2VOv<~t>D{x$&%#{Rc5Ho|t+m^Jk)=k>y7kb^j?o}EQsGexx?MYBl4*q>RJRwM-vow40tqTI-*#b=1KQbq(ka` zXTWHW*;emNr|Zr@n9Y_sBYI11goWJK^halM?yA%MUUSYHT{_#BbfQ1T(h3pXfuh`+ zc^S+0RiNiu<#RXww%YYaGHst{f*MbaQOYxcxmS@H+bnz7-rf(>wPyN}&WzZJ7F~m=29}C{JCW6WR4|EW7b)xi{d`aJTbh8EMrMQ ze7Y>?K9eqN#toLvqc$xx|yTf6LL(XHEbJ8sutbcA@g=7=BU#raxZ^@g`& zY9CP3(C?U>YdJZcyGj`i`)_w0~@<(`pJK7U(daS0-XD>)=8UX3MF%)}l)~ zTRK%4UZ!K$-C1kSVU*?6sd}-0N82E&;cjk8Lesl{rboNI2Ucf5DK&o*_8f*<|!EQxF8Zkcptj>&YnIFBP$%S$znZ z^=xQ=E%!*^^z9H5o!qer_?yo1Ma33F%>A@^)|QFBWc!a*)tl*>KR*S{w3#GLS><$B z&PUZEb8c>0441)yJ(+x*%!W7fykY1l&9@ajfZqn)-?(do-J$dI>ctB(C!@SW zz*9|Zt*s3&E>E|K(yPa(P&=5cri&A|myMl&!6uaUX;%;3NB6dkYjdqN*=9X2j7ROJ znLbmCq0@}6X-bvx43Vg^e=X#|A2y4!lb!aLJ)MP?GopYCv%#%h_y?aE2oTaT;uJyH z<3jC+!Rb^%u6%DEFUVFm@kq?Xnp+RdP6Zs)Nf`+DZg^sPcN|LhK?xLfSYZ7&n1J?;yB)WU-P+yD5Ve*_DC#|k+g ztWXbpx))XmNdDi-`F){;W?nraw20P!y^GL8h=87x%eCIMt}CLc&BHPT|FxQ{pdUeK zrNq<*4`7a)v|50Kc|+()#h$aQ<5md?xX2@4%3oUClDSHT!(BIok!pK?L`nUn<=4u< z+UxEE$@JG<;OvW6>kNHzFk|xd0?-a5m_QQ&i-TCga^1Boi^(EJ8FIP+f|EagP6)kM zuvNO6-Zu(E@5t>UwuTFcJR!H24{NU5FN3Z{?1|@~Q+?EF7s`4^ADMVZ%$0bv)gF9) zAIH`9U=bYUE9f~KdTOzK`|&|&ND)VZH5EZ*B~XO#2)6i1p1laKjn?l{`nRmU`<8A#{v_NVEA z+p$z9FO?2&gx;9NPQnRB%GgC(V=T{h5Rdb>!eOnjy6A2$tKrZais8C{&gs>O>aMa) z;|&7i;vH_sa}eKdy*ikxR6dTBRAId0sQBI^k7lcFAS0va6^yeNFB=+3@Eydnifmx; zbsre>Svk*?h56tmFCF3B9IA)raM-NnzGv#3uHGR-LPJ#5}z;Ejo=;h9_!5x zlUaFQhY!sU7`7h?MR@73BaVt@qkHI^`o0S;i%L7Rj)kaaKk71mNR23k`y$^;@xyo1 zc)5w4e7q>Z8w=q-o7m1)hz_zA6}{7*TRx6ehEs%iPtIIC(EBHU%a|@sJr?17W4}CN zk)L08&EXUroJtxWV`HhEJEJu_xhY768J0eRWA4mQf{i~k*2!#sS#KKp67RPU833l< zP!GDg)e$MvE`rFM41&N>~c!#wzZ8V_5t>l;Nmr09wgJrtRA2$pyQAadj1l+_`$hP2}Ls;c(j^dCR1unb*!BU%WaL*|Vy*8*PNq5uprr7+v_|v>!hXl;>`L#IAMfm;Ua$s`@>bU4`}%fRfao zF|($?k-BL@oW?uKX)i7)lewgoc3kh0k{UToSR?xn!nNLKmq;PZR2#B~*J0HwJr#OC zivgT4=DctQhD*vNrpLIW@nL25)Sa`c58E8Oh_Tf{^eqAG4ACcgwJwIURndb(3j@z5 zv18bO6D`CxsCkIn{i=jsyC}GU+(qhmZdR%`n==q3()Ze)-={a#%@@OC(HwOhv#nV_ zQgCa8>ZHa(V=`W6aa?W-Z_tZe_RNIwe%xU@n=+dAnU%o{OgL#GIrZU`14dC`TgJ9C zSe>i`44AT0bcdBLXL?k8Tq?$?Ve0-l5&DmRyaV}j4#pzdaiYM+)~Sbrl&*$hZ2RJ< zdws0u%Pw1Sls&OVS)%PGz1*=zVSMj8gIQWSUBvx&>ad}w-cDT8qIkq(oO?hA+PgDz z^117_V+ISq=Z)|D@FZW1IBb-8KM&Zo-CUCpD(zaX#o?jRAkkj-j=DApGK5d{u{&XZ zlTJa;l1O8OxomPdotCG$ngP#nIwUo4)Qr7dl=s_exS0g^WTOtaJf5j~xY7n=-3DYb3+9g^ z@6xMs6CVA^$vrhn+0+Y0`*70dJ$ye@;)eyYn#H=1&#XLd2z^|zNsWP~8k3G}SHxu0Z1XfQ|O~0<` zO}@DYRKj1dXPhmpB)g;(=>20vENA8b9^{lF+)w;VuBBArZhG%^1AH3;c0CM|^kAPHggOt&O(*5pkn>tjFqt~@r`-k=N+&v?! zc<5Kd$BH@u*gcz;W}nI2P6B5mDTK8Og0x-e#csS2x990~qC;ai#%wRlttOAtFv1>{?gET}jpvl9)aHY5+8R51PLNrs3j6jjv#}yCdz6%~U%7o6M}fb-)Wpdkr)=^GR_HGhG>g|AcMKXNNr z^t7Dz4sAu_LSIDIaw#*Ou742H-50z49`M*V9_+EJUR4F&Y?4R0#SPMj`xcoUX1gn- zIt$mYm2M#%XT4v7oqpsZIr^th`zA}x=rpUCsCPZ0P#WROORQ`~q)~*AP#kxL0t)Ev zToIuP@O3P28Ox>2=YM5)y+PBGU1gpQ*{h@uBzh%TD|6{a>N&$SJ_T$<3y!QJW!XI+ z%g>$90iGJ~*Ar4M&(+GyoZ~VfQwUQ~BI19{`&5ryj(|tB%t;btB*4jShzi68004nu zBjUx=`$*rTWN8;UO#l)o)o$+?bV9> zOjJJI#tGoMC`h2EnIp~!@WEn7=Z3Fz+0frMF+md0cN~_QdCB(UVtEX~%v$=av*{~L zi5j)eL0%zOVt*J*Y|6;bX+E-q$&E??TX;4S3>~s4tjxPM&K7i)La41(>XN#N? z2&6Xm9r87)b~o*b0ooc6OJj#T97};6X4@aS++9Z*y>pk2+Xu+w5r6?{3nwGS$FUj5 zk3@@9A?7B3I#6fBZ`T*wOAifU2HiBg5O3~7jQcztBY!)jP(v&{7|hGNxu7WKgH>U9 zvN?N8IXjCTD&vK${hHaWUILlp>g8nYqau(wA7w+koqt88!diM?E*93m+W1IJ@;p91 z{rj|o1kb=U1<#OAVOcE4>&3Qi`gJe%iocc&GjX{gXq#i-5b=dv+~O0!eeRAyegw)h z(sog4c7MqXq31?Jyh98PN`nN5U!bhn)5p7WS)lsB%pM{2Y>)4w9|}OGb=&a4&*!xBjOjT>>f zIX%j`BD&-@_G=@<;g+5&RN)N;y(*(K#hU`6p&F39egZ_@$++Nr7W26eCfOquT77!*Sn7ww~EJ*Ke(u^U~O~_a=P*ewD`5}X1d=%ZQO7g5m{S>!b3rcN}Fqsj+%n^OCe67#r(Rrzp&3X?oo|KM#yIC4hg| zGP^y#A0PKn5fSA`E8-2z{@R`th%lY(m5Rmd)-%1&TGfWil5@3Pz7N9DyBE_c z$tSMsSGFHKEn(7(@+nyD>(0f_#sZ$H6U)2!*f`6EiBHnl4B`H`eKgliXh-&@vDWFk zn!qc#G=`lJXFeMzcXu_eD`r9TSthF%Xam z?uc8Br;QirB9S6m(JzM4BH$DgE3UFV zN$jH4*2nXU5|N}%Md#C>$A9bdy5y_Q=|nfuk9L6VPhHo5rYGJ5x_WmtzXtUBb!avL z*nO<-mU)P8-A@W3Ss9)wzbN-*YrL3Z_kwWky@;dyeuWNtS6{rlpotNqG*Oa|3}*c< zyk?)5|7iUrxqFzRC*dseH|BO0?ZS+zWV{c39z9BwUR?I39kiK3FMr7X>ATOV$(D!D zRVb2oSM0ci>NhPw|NFFfISolR4uRT=5{lab=zas$7Yy?PJ47k`vCmTa)LHuAuw!w3gG;e7p+4 z%AVl=ipL9}$e*zXlK3$m)=MghaxNa1%@zBaQ+Rtkn~P_yT7OjWaPd>N?qHHdh0=S! zzCJ09$0D86)w1|ZyNh|Dqk_RW?cT@NgKz2#$oA8jR&O zW%1D7g!}&8Jb1~;gY?G8&T(2XFOpbuOtmSVcLrgzm>$Mx-=|Uiv@GKT`jFH4llA^1 z8J{<6#qFgFWq&3}7(Fw3b0QDBeg)|Eu(}h+wIHMKH>KKd&Q}s>x}BjuH-pW_}QGrS0Ekf;c zWpWB~5H7X7Hkp4xnTRPWzEUiRqaOR~duy%y2m9Wj zmfy!Eq+iQyh%uNt;lV$azVF`R_ETbCvFyC# z1-9U{Jb$xrSFaqZLZEwtzFs?lo^r<^y3htqgL`t~k}lncUa>eEha%ggkJaFOP_Fg4 zk;}sR_!M(r-iPAEWy+Hpw#U49MA!Wl)HgdiQ|?zY-y2OFG@fp~uZ1^ohx;*?83na~ zB*NfAc`*5)vn$T{bamUjntj*h!q~7|n*!YplYd56IlL&-<)A$U`?I)W_dd|&+M}0^ zezmaS2t)a7i>QCgr&OunWe=BWtmu#km)BD~!DU=YS4tMFJR2E*r+X56k2OotPb)** zyy&4ln;94TD_QK0rwg%UCYAD`5P2t)LX!@Xvhd%167HFG7QcMw#6RzrvZnUsxOa?Q zynlFdkW^GRbVnosJ)Hc9eB<>yC-0cgi}^SJDIK>Tlr4Us>49n_N-XIM(RgORjm+$L zJS`r^I&YpJVKv*xNb@Lv^kwzlwg_`Pp8nO_L%-02AgR+Y7@_^pgZX0bn+NGgPL+?5 zIrW2Ua9A zoui_}_lvkbP@k>-qKXUC)4lh!boID)HkNU^)=Yzfyw6tSp;xnayuulwiAR0v<W=WW?1?&*nf};*3k5XI)Vg{3tz*nwOETQBE+NRva zI?!zS2`uaE!h`~=zvCiNdX<^Q^#~~f01hPg4vrh0qbws zro|rR)X~;)oDfV?U(LCHBiXcUmn&%XyZF7IRK~9o_a6m2)RupfA6Scj6o2hdOb5Cv z9`(%~djY8vYi*HR;gf-F*@>D7Bk!Ph6Chd=g0Ou_Lspv=LII`MHx$x?jxDM|SS{R7 z%Z{Zw))~w@zAxMdln(DGTo2W`T*D*za$)=Ta#tD^-ir-Lnb;3#^;m>Ec^9-gpaYxl z=dbn_$OX3)L^Iw);_ddPUVr@+b*>Q)<~VILab3qEpK?Zj#M$lv^|l9uFJ0+|uO4fb zpdgd(66uAjg)#+yE?4g5-JqI>9W_BIoxNOoy&OP@(wltNB}){@U`?e=e{@sX0l-+4 zr@(&AWwnAj2&a+Vr9Zz1=BbMYBFB_#y0N@;37EYq1d=2*=uob6L4PqBp0cfNb$42h zOQ?)*DDDnfJr<6w(I@BqLpmhKCko#x^8SjBk)-e0OMZS>=}{+kgfTKMxjrQfl5vf8 zEEc;7KsN;iyLP)?kYH=pG-i=KBU8oj5UwmC>J@9o3vBqdXTbTt?{%A_@ab+fZ7A)HPASozYO zkr8zG!1^I^)EW-(8n<`2fZsL%hA}q(!<+4K91y~Ru*3!(yi!dIP9kRBK<&JC z1&HqGcU9vZUw<=C$-@{w%q@K&-R6R@>U={KvAJk>;1@WO8pOT4ClBrRbE1zvfwHg) zd>lY07#AN5^@VIb$N{xKWZ7UA#DLo6qeeKug5*UsLn?{7X z#p){hXV1M1hb=UA+$d-KGpBc@K!M!Na6hS5jOu+{g@4x+)D8jEtj^v3%1f)`jhFkC zX#)O#?9>x#`E|ay6+LLX1W_Dwd(zFgex1+$2SWBqwSZ~{Cq{)N`1i2kPac&?>$}R*-*ygt$!}0eFi`r$ zhK{}}R?XR1${Q%VUP=%izrt(4Evw>arnbGN&OF>#W0rb;Nd$k2IMr|bQ@Hk%*Cs~l z7xhcEp*HVxtheb{p%6TCwNFJksa`ep33>S- z`f+rs-pf4=D5#Eu$Sny^+O2bC(%wi6+%kPzkMH{?V2Re#nqyeBnU}8HNK>{?Mt`-} zW3+{x9iLkA-U8j(-0F~kVyv=;%<#%>&rE)%nC+*#qIy#CuFGa@Io_K-#un)TgT92^ z^?&LoaQj^Qz8RDX*TA)3h=}Zfi{QsAuWb!yCoZJQ7C)Q#G-X>u&Q+WvmjFJd-1g>Q z`wwRl?OvvCK1#_yCUrl_!C7VQ&UuVjabiGf@wsTiGJP!FK|YZ|(!ByAGZc|JIvL2q zLxA{6AE3*?KhwnhlnwXejSiR5#&}Tl+JC82)Imp6FEQb}d~eC<<>lLc>P(;wMm8sm zO>?SJ=ArGu!R`R;pj5VocBy zrPqoJqn0XngI*-}>!H8Qg5Cg7K(D_lNfjeh$onw~F4U!2?`0rq!pp{qmu#Z}?Yes} z=s|xChtrr}_{ULb$Am5Edo721H7EqbMltaR!HN3BJXQgOUY>Or7EiM(hz#@_G=D0{ ztWw!)>rCPL-tS%WLivHGb#0}-eBY$%$?K4yVJbYW(E1BReW7< zUf#RiOqxWjnCI%{duwLp@$qK0*|?_#pH~PrZL_;i$PhVL%JS}UGOszW+#m$`;kt<2 zDywjp7Wv{8t`5YoyLaj%5x95#sH$F*?9E}1sim&;__eZF`P1s+NDILt`{);SH^hHN zmlcrL;={MwC=T|y$gH<@_ZovKCia^}h`bXr0=z}~H}MqJ$ps7Ora&ZC9(hDk+o9W{+aw8goY4>2}=-GR4DC)C|+M=A@2t`$%_EPcOH$E{Q9wvWDK1Rb6 zG_p=U4^PQ<*ffpn&fL>Z@Zzg;0@d=`y4FiQEy>L}s>s4q;E=4G?vphu@eIVr)e`!r zYj}{X=H6Z2?gZ2)tZbk}1%cd@9CO-R^}%?NPO}Ks#FCXZ%X@_&fEes-Hanu2^bpz8 zTPo!1W5=P6qoT*{CWDXfmWzKy$D*1W+t~MI{Af`uod7Q>UX_FLLpj_KtJgszP|~)_ zZ6q@zQ0EX}g8|0pXmzXUAX>wjb*rPau3b^k+3*RDj4mVsu}N6bHaZKzgQLT~ZKf7bMl0PBCfT-iyo{iOAg z07P2;XiB#N?zLr7>vC0qrbagFK!O))=f#cue4Uomj(E3PxJs3ywK=`Q4xAxc%`aR# zLsrtCo#xs91F`f>|9p3+N3eV^=ipo>%Mc=!$PKp!#A)_lFI-8vXy@>FEbT{J-Wp)ePYTJJWfpZK2yC^32a@JvX zhu#Z6!V}vtDqb%gwmsL%vGqjFKl%B&SWW%)^z`;~l%Pi8fPEIUR}#1=0eal_2`L?? z*Sl<=+-Xf*WFbyGAv{yz2Mjr2R=N>86njf=;VQkS-7!6tMzP|REbG$uTNm&9%c(!4 zDRUfeZG!xfzPNv$pN*}5K0pnt+d!-?m)pZE#XgG;7GE;wZKs^TObgNZK3^7j>qVh8 zog`_8Wxn;hdf(TtZgr8nvUmm2k)T%FaLmJ`Ur`stE*xcbwhLOIJ?FZqDHL%o<6{~y zOn;AeU~}JR&uV+2?u%V{B1HloT{7|Hb5qC9kET;+`FwxBb?cpk0xCPk5Dhg+bQ&xcp|?>pWRX*w(rBZ zJDExC#}Gux5?zY!=V*UTrg2RWkB?ar7+(aN6r~Cf9QKdOatUVQe_{*&1zPxg{)H_3 z1(Aa$^elh8%ay^(e4EXl%WEL}xYrk12fmdrQgB8F;%z>I=)hroF0Poxw=GKmNvC6n zi2wTR%3Tk!`dX~#?~n5y)E4Al$iN@0;_#hfY;`>-?d9=uy5;;S>cv567K0$P%4E-T zXOJWJn2~`jI2UFl;1~Iaw+9fPp#i-PKw~Pl8UcS~1fb~sss?WlG7u^eFdnmzJ9qzn zA8IvUOO!*HN7_Hc%aH9L{yG zV1Gt=d+ZlOSa58&P+Umd41)$S1xIvIK;nP8z}Ga8$Ri5R2Oi#~G@0FdjoTPWFkC=h z?aRb-4+$RZQg*%>^iTP`1;?criz9TJD4AZdrO6{Ese>4scg?(XHcOA#CSVXuKvbv6 zBM~SA#i4e*(V+GsT+16+!e2Za0f|#K97q@D5yA53B{LXj77806sF%^Q2IK=D_?v&9 zf5Cp+EB#16Z8SeW7Q^)S>$!c9H9`AKb6d~n0?_sWfh3R5OZ=Tf;-}5$eh3N3{os2) z?f7lm`S|x(zE<%W1dD&JgYSUZJb^@%$PL!xSB z)1W62%b(}z=hw55&H!>?JOjqf&vXACKREWCli#_Tejp+_Kl^tKse?#pz6zo1JeuE2 zAm0AA)1`MH{!W20UV|zxr?sZfm2jH~^CS(b9R*%?MC4MgFdRzpV+4Q2aDO&>JTuq5 zallno88jQ*;YJ2fjDk+33~|Z}kH`;}8H4vsUxKDF#*>fV0brncLUn}`pRE%{w00gv-^(%Vk5LeYyRD*-#PVTqtLHYdi~u- z^ISxFeYR)GcYORcXXbyt&u78!-+e#N!=EwqYp#9U2Rd~4e7+8^j(uT)-;|%!r;?bW zjGO?(us(_L+^L43rr^(qyCSlJ6ea~eDhaFo(| z5FQPt_e72$0v9rz<1Q0XXCJX9+DUC}0wQ%F`-RYwCi0EN^B{jhM}KhMc3#RA$t%Im zHs3$w;UKojyCKgB>cwzf%?Pk`VEka!bC(NM=t_(yAojV3IYAU)0)c<6P3Dhn`DvFc z?DplS-P9w;!G`_42Asd+>$^t$wohjKw{4r}-M0@OENmXGSHT8;`-iJ0X{fGH;TYzy zT$MEoF)j{TL;{xUz%%0m8qt-2s0yCh*@U_OWH7+3pjl3i#T6 z_r=2A{AELat%u*Wavq<*+V)@U{?8hl%=QLtEB|4$e&#O5?XPwAr+;w#_xNVZt*cgf z6rLqG_HE?*YT;Ca;rrr2VZqfP+$4;pt4F92H{cmQIqaxQJGCf7qF2&M;+4ddAgtRu zyQ&5ceK>yw@kfatA{*>PB6w9%aWV;z9As+|LSJSX$gZbFQ~}K;kBhtffLImy`Z!2w z3tZVauxmM6!WIu<6}!}4O`@YbYV}PBY4k@1`dd%(hnG|lzEGK!RL%lvY`EO9LO+j7 z2(Wy|C1ODO`1@JEV{H00juZ~`_U7k1_7G|Nd#-E+uDykTje*?g=F+tDhITo=XI9P z^(j9+HDTD^_;M?Ao+QXsD%dSYtM2x}y7|0Ls6qu=Ysfmu0|7+=_N4(lAwuL52`F0l z^bLRS^?S6Tjk(a{*$<-Fa9CqCz%^k<#YU}~t1k^X$nRpE3vXhH3)?q0oG?aRRgoq& z1hw^sW6K2nWCHtjm0i^6xMG_$Y{`$Jsd}ueN0R5WD*T`(KoJv<<0h2Fq3qW4`Z(O= zX?C*}WMJ^mtK%OfG(li@O_rGLZt#Z>GTVO%kW+&&AAZbVc3?7p+fN1Q%pZU9_c8qa zX{+(QA6qhg=g|B;N@i6>m?9Mp8He;zZ&%YtC`-$#R`AS4UGioP-}ai7z!8oeFT^Y@ z)o(DE;$g=gDIXA5*F9`t^N1zb!HV+Plj)&dVo4B>s%wG{tyI8_j^CX9oUHF7=&*md z4Khut!1EZcUtdxc(N7UH@RZ=Oa2*9r?5M_$seOHw1RClHnsm@vsA=gz!~tzc6+bWwk;b<`%YnxC<3Q zOI!y#w4JN8c$UAyU$!9QyoGQYGP9Qk^B32Fh zRL%sd%o8VbKl{68+3*ON=SqK>fl9u?@GAWjMS}p$htfpwA`3do4Sh`p%955F3H_rv z5`0H@8(HT~>$oM@64O1boNJaHRL@yTcTFIG!Npj*rdOLMygr%~_Z7fp8(4cjhKs)-;Eq7vo$Xj%q@h8vksY zuTT9QN5A~S-*)gjHnC^@Yaj9ZvtRT0pX)#7>Yw)T*La@C>W`m3rSoSOgG(hISXsgC z4h}T#kOEe*2ez>%ULf4hv*b(}kOGlos+j+d!oqi%39CKqVMc#UdM}LuOh6Htsqj_G zZJHF}1`15=jmo*{e1$jEmSj!LWEXoAMF^oJ=pxvdqu)eX()Ont*NM_ZX`;hlyf&>* zTL?R0KcjJC;5tm6#luIzN^XynI{Ft;w zZ!cf>Y>joT0DFJ@cfA3u(cvUf0IUIVlKm&(p;U%`_}i!YYwbU>VZ66y;B&g5L_?6$vEqOY)4e9=6E3y(940S?BC@ z55SR+0HowBcEEGM-+|zun6tbQGA3lp*|Yk9Z#6#?eCcnT=HKqOG2egLhCl4!@XP-% zMeH?AwDr$-{}|8TedxW)-6N_zSwHi|GDYX_1ZB*5p$&;R0a%x#*F_L6cv9{Q{@3p238p3!gfbM5C(|5`Jpe4YACqKzt8^7ne${H1=gNe zY6OBYtI3heZDy_sQ>s(`;KL}orOBa0k%N>pbO=t?kb1(9-)DPybapS6`r1o`GWQ?5 z%KzvQrrQ;Qr7Bl z(@#?KHu$FE*oIWx^0-{L4)01tawbSC9CWtDHujSo#fpt_6AEmDm4^MwUPJw0j%9zi zX;?}-&u?4H59axZ_8!VT_6{oi3?Vq@d+fnEp3igM?Ksg*RdLEy^dmA6(w!Strz8BR zy)d{!;~`G^oyXxkk5_YC=x4ulLVv)I%D;Y;SedJNzoP!_KmH>=i1A56_~x(m>u+Bx z|Lqt3?vLMV`Og^tdwu%;`;RgI(-(g@21&5*n)dU%FCfc~9QGa_D@B~C{c9}AY%LHV z1uX@MB}~4OxC5n&IN^YE;9$m|0>Xbn-=yiu z+xk|}f`dimrI0v-A5H`aR)(M*W#-DDwd{hN+LvzOOCHpU-5s$LUQ{J z3O#T0e&0c`*dKacnNq}GQWyL>6gv>D4Eri`DSUs}o*@l2EN9r)t$}`iixt9uW57=B zkV`e<@<5zH;swZV*jowS*28kOIo>kP_5|C&qqbG%%Tych*%QO zm0=*G&@BCkAGRvb*h7E$?xdz{+0>G}m4qYwD@+CcoF64tu`%zpAlROT5~A&H8o-bB z<;fi9*eIJj1}|~)%<`+bX25fx-~`GHzAKczu&#P$2a`4n3X#hN+}&Z0??GMq0im#A zZU^b~t1@$U&d3_bA3(c; zh%g2#HUN$3IjVod0eB17G?mrxa)JVsuiFT;`_N&X^9$Ey4@Bp@uLmOu@;lg9VPB8k z_OJNRH|F`X=lT8Y*unj*QQzaUef`HehH*5{S03|mk^^Y8bCG8fDFCMMer{2qyOh9w z+!})n{#vivp$o|0?iy6sUczo-%%yAt%p)9owgl3t7Z-m?pxU|IeKgoyj2A&2iLF>Z zT?gze2q8loy(e{)=^FEXp_+57(C$7hM$Sm*cn-)*5Q#<3W{&6lvCo@>BEvdmb0x93hVX=}XGL?b*Ce!{+?>1wUhECH}%<%DyT#X(0WGvT;z!X+Z?TwNWc$QMm*=?S)KI`Qx{5Iy_Hs+sK z)Ow|UMdrB#RVxJbd5xE#(r5Dir*lPdg~E;u>i~azwEC@&DQrEo56mGKH5Y5gM&%U3 z^r1UF+b2M^MWO!2Hvx8r-;MkG#@&DKHLxE4w%hZ1{4*ba+Wq%l^Y1ntL-^YG`+JT4 z55Cf`zpr2Pjf?(XH-7Cu|BT&(nc!y)`L5kK2WY>H)u)s}Y#aeRw?O;be$;r}8Syfa zXcd3Ik3?S+FC2wAmF8;MVDAcr=BoT)&BPjl^>ye~kmSIB6oGvU;XiI8i5GirG&Ok& zBS@`6I}#y2#B&fg_W7~Xct7P$PE75Rg5T=;@|l3ycRBssp!yUWCl@%fu^d&e%jO5O zrQG9O{Q(i;8^@&qav&(l7qRz(R?A8ZbijYdAxU<`cf)UNe{?J4Mb9#qA7$?bLG@gM z`^@=X*obZqhRQjw8!QvnEarKS>OK%*50QKn^$$LqwsgSkIOc^P?LXZ zuHKH;FgR9Nf_fqkg)%g-zE(VI?k%V(V-L>pq42=+~*dNW4&*3j_N)6;+q8sn`dn$OozLL&E@TEL&beC=Ac3 zk+1MKB&x&Nt~_SD5?0-8SGb>8(eHma$6xEmk3IR1e&z@c-eawxr2i0^Y0Q-*KNTgw*&NtJgdnu$uziVmjC1BhN_@MK#~nK8 z6CE7xLnuh}Z6M*NrR){{%vNvQ9_1Yalz%?JAY$MAh2&uEaZmzbMknF(LD}{e0tNr> zKVeCqb~)m3_`P4uT|X5h^Rdk={QvzgoPjdmO9&U2y|S+XfqSi;D?xv-84B5U3t@HC zDPcz6XMD`ApDROnu-FI)e6A2LLnVnBvV0?@sIOo**ChlKbXM33`kZ&)^z4y!j&o(o z3oKzQ$||us@iOcO8GQhxkt&c&ryQ?g?cr>^KX&|IKFh`<3t(kv3!gxNazC5(NLh zhv(XxgTdw=n^jWs{#o25SRZR!H@sy2b>v~ ze7RhHAz2XS7ugQHrOSo09F~v_`%^6v)Mo~d*2athB}b`>0Mvi7wC6LB`Cr}AUT}db zuCi24%esxyo>g0LAAElc(YYn_Qp)|zJSSSETcb9}p9W;)F2(`j8BmX? zh{1tSsaIwHgDN3F$TgwopgV!p#RccJBwBor%!SLK_Gjf$Ko&ur0y=wD9LWwn;by7?j?Sx$k(j|FSeja58jRWH>W5I+h;m|7vT6* zHDGC_vg$>YAx!*W<2q;)Dn@(uu4uVLboM~xf_8NAeT;vTpX<%YsqgoMpYILx_vUu) zZWWH{aN^4Ob7U~*>!IkfFMA=IuQ6B`_K-?#7JHafbvy{7b?!mW7o$JXcla)@%Z`Ff zyaolV!}va=L}Vm{IGi^MDFfJs3|8{ktI&5G{7(oNVr)uPJKs0jJD%Gz_DS$-Ab92T zaPok+!&raeu)oIdC@P^3Ja6&91bwfO@;J91e%h2ZAL!?iBQ+k+qR!`XO_Tz0!wI1! zrQd(i<}ruYtfbdsiG;17s$E$>a6dri;A!xinHR&skx8JAqVgR3Qik?s!&Qy()gM$? z(b)g1?=ZgdzGuN_&dfuU6H-ydXC*#s&MtD}bA*4;g;>a3tp#g?wvg~ye#f}b3?7~a zo>OS7^9zO`t><~crRdi(j`z4%tMrVY=j7{Y;$4K+iHO&08Q4C!Q9t9$!1(%?xd4NP zxfSBK#K+vu7~`UbT5vDz&Z!TQ24tvy`iwnbJk0G|$_$<@YUX}*V>!yurmads%>CCr zAisa|UtY7v0yd9_?{)raqYK7wh{A6VL20SP{o3F8kACb20*1$u=KH`t1*Fqmh2xF^ zG#}fn2`SI(_`^9CuTD6(9*sx>vyFcYgtG4sLL8otcvf*5jWDVV^!v{-ugCrQ zh>HIlm-q>+YMKy5^(a=bD=Z@257yIHcwQ0DUPpY7wOE7Jh6o#mW1RFI4`v(U_u#?` zDQpam>vhfRDk1+pa4PJ|XKF9cXTmrx|(Nb_7{$Iz={9AwY zJnyh5sdzp2F)|KP`yGd=F$YB;agDY`KeEnajxnHp=JCq`i#H!{q7eNFjWD(J&~Njc z66Su5IUb0jHOwfQ79JmFjoawoc7?1nKet7Xpoh-z!0`3;+$T~!w^#T;e;&f?{Nj~jo8F|yufkg=QuSd`+ts4)5v?@;}6*W zzxyBSN{W0zjL!_bPtcF*Ja2hyMquki`PaPUveI99sXDPQydS?d2KR0FurCHYuglz~ zHVEoCj2GB(D1WetdQf2>@S7#NU=XGkSd%pg<7wW-@u~}^qxbpx!jIT+JbI`r$&)r;tHSfK5K+TDs(3yTf`H_p;`!Jb&vsxN92b6$hd7Rn zD2{7rU(Wv?zv8(3b9`=JWOi^|NBi=~wBtC4jYRF|IMu5HYI|cAueVqyL0&dzis!W{ z<}jw`wHIw@urJulXO92`&U=60Z?<3K*M4Dz{4d8DZ&fzheqPH}F~_c)wO!Jl=VXo7 zn9DUJIJxZGS0sy)?eg&LA3^drKW_nvz^a{Lus%MZ214j z^Ibf5{PWzxcdY+9U&7k(@1J9~dH>aO{v2oH8Q3uY=Lyu=Hbeq3=DUB2ab(p)>F}KL zUq3(QusF9D&Ii+z^ciq}CGHjLA;#)|wNBuEgt@P0o=PqoL zhP58+Xg=HPboQm@bri=Y8}TOWP3L~y%5Aw9sc>wK{QzrYcOFxvb1|) zu9OhznSYP@*Y9BS+2ns`@L|oqg^bTffrTH%J_Q}mg23`geMKe3vv+|X&|q9%{@oZud-(ltKSsp5*uaiTE-5v0`*LVg`TL&xu4&M2 z`!{VbVouLKM~dHq;3DP#AeHhw2j+YExep)?@GP^x`0x5)wkLnM4~&Iuw(amKSZTJ` zvtRM=?tAw4VC(1hEi)d^HsRm7UA+E`Iq;ABnQf87&vp3wL4u!zVsczByr?|=t3QGB z!MJhY1OIgne7f0o10jO;KYs5G^9y4Nb`xV2yZ~pcDS`%nSqp#q3ik+GNnxzQ*Ti%F zc?{P>+48(*{XTyedVXx!`tv!lOk*3Mvhs}h`Zc!V|9qYs=wsL(9k$_XXPxPv_wB+C z|LXe^ZHcTZ+W6nJ;ooDB!@dGGzQK8QBtv~k0MG~g6>SQ|$jo>^`p|P>|G?DsFXyR; zQ2Lki<~_uJdcAp_@aFY6#61&8?^RD1{$lA!&8rWv*3W$%l~k$ zi*x%&4XG^WXTHpB^WS}*d8}bw8)h5t{p$Ol`Ir9c_X7Piz!&{BPT(&hAAX)~3i^Qh z`}b3<72WKQ{Be!{xj78G^PkU^d45cAWiPWI?g^<3O-eQr()jB`Vh)Rk?|W(XAOHRQ z!Ly)Ev+sZUg7JzuE5bj;e1Q+8{o_~|aY(WlJ_BNoo><xdh&eR-H?yDZg&=a>z^->V-u-uu&wdQ5YzcJd}6!~zpp*KhL8At z?SEhYd>;IqpU-W5AJ`Wx9JrB<{#|gskdGOP49xHv%OCZpWSk*v`9H5QesTWKYxMOTw-CScxcfer z%Zc-S?EU#Z@Y~<>&~Lza|2psS^Ze5Hyu;7)b3f-1KhGb2&wKqm|F83Azf2e&f7p#- z_TL=-HMsM46+Dy6Zgqykm?Q@X)z9c`|CYg1j_=R z=J$ISE6l9W@cVuF&*OrtO)OHOf4whJdWsTB!0p~F5b8&ln^?JFvw`c-J9F@|V`USL z2JQs$W3=SJ3e8+^InAwxTZ4Zshn--#mQ_r4?{ZgGrtm$~8R2_)4?bQBb$<94%NssN zm(B0NAcRZ*5Bx61vHR!mQuFWn^Z7CeU&^0&Dt@QJiiGxuFsuUNuK7JW0q;{*LLJ!m z`0#U|=!X>9F$D4F_vg8u^MLy9`TJgp`@($|GtL)zM=+b>xx)FrCJ2B2%=d%)>gMOc ziVslf{QOeMCIg2ti2GqVklEz(_p15%_TTp=?jQYw>*09ABGQB359jN~EO)}XF~7&} zf7=1=AtLWEKVLJq7v|O9_Xh$Rw71IU!+c#_Z+?E^n@5`0>iNEixgE3b?}zhq;Ca5! z`TDDUm2_@L5LuM-_rQOO)cjA5gKz(0-xvP6ul(n}Bs|ymJs6Klv@`yNVL^X2Kkw0= z-|GtlTMQfs^>f@0`VY^K`^N8fbGx9wsrmVFUpecPV6^)GVf_4?`+Cgni2Ff4`d`n- zaomrJ`(PJyd*V6u`Fm)0V*Vc5$N$wn-~KD+e#H0j`FEV5J&1q#zJj^mfKP*>3GVBE zXqd`RwBu!NCp;I%2kv+N9eh6@=6}EQBx&ELZ~ z!u#fX|8slc`xw`l*I4og%xf%3cn)Xo53~pBF8{;t;`o2O?sptW007MW_hUc4=g$nC z-+4BVPdpFi*Y|(<{0&-KstuYTPZ2N&ko_gvO4(N8#z z@rHSWy4aF^&vm2dr}NxC`2H}Thu_Ei`QB&$=Xf**JU7z#a5cW;57)u`=@o&HQ$EspY#3VesEvN($4c8^TBdik`?l~4w# z5hd<^{YMJz-?bEY2RuXs_zX{mh+sd|T~dlDSDsU!b5IJ{sI(b4vNTISKkVvOf>{2@ zib|O%F~5Xd<<&%eSz~9rNv+$<8LozGaGSU+_8Wglf1;S>LB-?PGQDLk)`yE}txicd zL34GrgQjG`5Sj!k61wFEe}OV8rCv&Af9$uqa z;o5$1caZ~6^6_R6JGNT#4+DYpWF0aUf4JYtT}u*;bXkkE5lx7W>otE4MPs+V3h8l=( zC|yI>zIFBX`tZ5e;!R#sXYon$!}|Un)2V+>$^O8tq=T+7qZ+>yA0${LG5IpUGs!tG zoGo-%yn5F~;*~#LLOW5H`N(h9!l|&1n|##Yo3-a{Xssaa+nZJWQgqZ=(I#v6OJC7F zrX7hL^R_*Iv+E9@A-p}AG)dWmhH51+WsJ?r1!ZH?AEc1m(4h159Fjb~`%}0)Y8QX- z9@INFH<%_u6`^UJcAFM3#(>dK9k5#G%97FV4fDDNgVEso%QbTaccN0<+oMjiJh`Nu zr7zWi4){#V7Y&b}ZJ+6pF)ivo{+p zl()lD*e?%NrN0aMvm4+OO2?OU*Cl@xFJ2F>(v99r=@c<#-thRIke%lQ+k|{O2-4sC zi^;A5R)|Kjte-Syg?x7~u}YUb2*CV1sjnDydnlXbw63%6Zg^S?B8wpq{w_gY=n`vrG6H@M03CjJ~Ff|*et7p>geVlTeIoWBs8d#y-a=4ksX*NR~; zU)hB>*t$!HM+#AcE<&B=rgVArvYX5SE<%XNiizrZ)-Em|qV8z@wGSp$8%HpSQSI5@ zM`E&9!AarsYFUh8-QPmgNppV|s>M4#@n80n-*KRowwe?KI(B&sa?uRm#Qh#=l5u=f z8#WO>#acP|z6>>@@a{re^=Y=R6iJTf=hVhZk%+xl);3QZYs$aUuwvPUXN zKTAI5L7j^=v$J7=YC?QJ{Z5b^cYknDwWMGC&Q0?yYxaxyL22GlbWneQApyukn*Q~v z>yxSN+2%-uz9ZbrdwmPF_e$g{C9C8v56VdbQB5Z;GqO6x4t$GgsQSWP^>sb2C;JW| z#}$#@9q^cebwotoip%a>rIU?k=X)rqJ=LIoUN;X)e2fsIys;}vJX|x|myaMB(=TiV z1*PyFWR*Yx_Y+&~ir;_SlAPPWxy5i87Q0hE^l3%tDHNN)Rso;vIo+F>waPJ76YY?i z5H=gneN}L=pmV?~HgBEM0DPxtA$zPXqk5eT$jLs~V=xZq=1n2p{dk{DW6zUDR3EKU z;ddaxLr2kU6gdSkARovjhtJ(zx2pAgKs7%FQTLJ#0jz44iH3i8MudVvBz>a2-|?$D zsHUYlwxW-S3Ao@|wvlU9WACy7#?G@Qu?)g?d!_4u)xUGz8CpDP_1a1?_^JwVph0IR zrHlKE&7c!)GY^yzK9BFy)7{u5m@aO+((V;*ryjKQ0>P9@E{WA%hkWY@*0rYsM}x5T zc5PHw=)i+x{J#ua}9i$-`5bue{7^&d%igP=fC6r*8*pFAk#dhWQg4DZ4 zWaG7R^6e>pz*eb9(boG+V~(`ZU+MDt$cm@GRd>A4*#K##_3rR!x5pH;BxCht7v(93 zy7*$q%$u)4;+uBe5F*au^Jx8KTygZx#lywt{&Efd!BZEy^)ILs(vo=cWGCLu^}puimZLq z-94A%^Im@+IDOwXBs5FJt2J2gl3okG-&k0=dm{s9wSM0@NOXc#Dl9KS4#@ndD@lmz z3fosb)z_}3Ysm-#Tpq1!XYSPNkFTa$P9-vV9Zv4g6YbC_Q)T_32*@atNjF)Takgg`A!h_1o?6 z)@6U_KlG9Y=Cx&-7rn(kq{WA9EYas-<+f>VQMjpJp$O8Q zPxQKn;e+IERAg4s7_C>Rzr2=*Eh1POr6PZ^Xn@7Od%i~kq@v&`=Uz~Q!Xm@(HfqX; zVZHaP8!KH4iC=@UYYR^DY|`{W_uMBrRjIl1hcNTThc$Euu!6qVlf+hI-I6;eQV=*h zuZja?mtU5NKOTQN{$%{w_~Ym&sJ=G%qw&X>Cya>`-k;|8#PZY)ryR<3VR1oK@|}O) zjrTLyVd>>wiLV#oS-d!7v3`MODY)p6Dt;U_Mc6)TaIY^^tJ6eqmhw7u4K58Q7LQA{ z?r){x=LEw@1ogBIjU*f(l5@OYR!F_?o`T}MD+fet`u-xgm*%_>>0(I*FR+|0B&bG% zR@B;^Kb7Zh?Og*Vb`fLK=L({_cwv8juS~|>hqJ7C)E z7xI4Rs++eA0d2hndD|%8$nDAJT5zCSX`a;lr2@w!QoK$X9p>U!xSB?Arbu?YQS77n z(5!ccD1aKaj|VaOtY9atc1O#Y{&LFqxuf}X9lTcJ$D(o@$SH6KZL7N_-z$H2o$uPZ z`P7x9^(4hYTHU)8<+PIz)zmjzpN$owLSluC6VsU<;5E+ECj7{J zyih?Il1lfsrRYw4?9R7mRLDM+^or`_H}ZM*){?&^kBir<1|gO2(7~l#R#$FHF;S*! zpNA_TXP{nSL*n589Y^}kzGR0qi0hE1<#XW<{BlPz&+2%Cm;kaM`!?DKt?>!%l+A(kT1-cqZ1>=85|+1xCu?;x zKvYA9$`R@@LWN&aR+pFd@wnDl+I^kLZeKq-ePRU7N>ga&TPJ>%b!#YL-LIBJt|7%# zok)Sn58VT`?dkshT+@GVIOx~y;{YjVf7%=IzNIO~JwXu?to3q3Wi}8-*nK6_cBNdw z$ULsPwsfC|4h%IVbEgYFyaG6XxiQKXg@fyp?SD3Vg07G1?y;inXjSL@#au0D;+Sks z>5WbFzB#?F_wA)2$NiCbIJe<*7^!BKefN!)Ij`FHre@B3B8h(wp*`ypwDxqUg`TGl zT2PzvkUi=AcHD=Sc+1Q7EbsU931rLH>ej0__WFb>LuRkbH}pZtb@`#F1p?xE)1C^D zc2Li&fTZ*Nunk}QD+NO_3YO2MB-yXUbFr>pyZh~)(EjOUnhkx$X5!@6m*NDTV3ncX zzkt)H7yW*QsfU9Yuq(p&NzXs910h;#7NmJY-%rn1>(;43sk zD!J^#RmfzmQv{3q9>6dc2zLGnwZr^XGCO#z7Nm{Z+rf@geuBDi?ypV?5<$XLB)|5YS-@5$SrYW_RCZ%#&vR~Q=xhu z-H{8S`0O07ibhA;Zf)w?2}M9RdcS#W_QPkCAJi&`BuSpc*kb6_`%P^FBNz82p{&|X z6>IDD)4YGJ*6-b}iqDO0jD^%z;btR3y6bd!SW~cCjT`LnjLBkN(dA}=X8M@!UXw20 z&q@|;^4H-`N=dwJ$4xh#gSy?>J8^=zjCdt3g%a%c8#YZqIH!;;SZZx+EK3cj!+Lk$ z)U*v67oQ#pqdqT=A?Z>6h8B`SFb_2ovQu&n-{ybiZ0V6!7TZJAfy9#d9;wtFU} zO2>aDWs2k^KO$1uctg6Dtz|=QCsYuu-gZ;Gfj#S5iZXlISb2q#I9icxRnJcy-(Bv+PfKB#UQ;1r7D7EWB5dZTa(5K0AG} z%wT{ly6i3$_25AXoRK;m+gf~;Np!WH!eoE)kHpfQGcaD|g{ZQ!T z)?S4K-+NlHV8{C+*r;uhg-UB9ohH1vNs0Hda#*~uAB=7z*Y1qvP+%3~sC0RQe433w z8>-Nk=|Z{>-cSwn-6I~s#ydV4QFlHYwW7Kfq{pw~sXfzr4?SoteUbN|%B^bL9QuF# z(bgq)>5mkebrL)O8kFK|f8K!i;2qJTKs$>1_g*A1o65c^+g(e-~R_w{;3 zT#wzZj?Tb=i4{=h@BL!IL$*pd)4Cy1ehsNH5#a+?tf zbAlMxIoqdA7g(Y$@T)$~7e{|*-R9b3HL}ka%-{%dA?r|-yG6?hR6%a-?8exk{FoG? zGVdsqigW2Ek1lUi4)Zx?SR0`kKwZ|Gm9f2lo`o(UGLgucZ$6fBK2j_HwW;!iN`#k(qgqez2hg-@jHL*%AY}cWM7=- z6dX`DIwhz6hH@r*Wf$OUhGb4uL3sCA^LKL+FA!1LYwfzalAn$^@!1Ow>vIu}mAB5Q zV*3iWQ{mj+oU!~|-Sy#B=ctr{bFWfnaeyH7!&H@9JAi^a*zWahu{|A~K77`zU7L_1 zn44O@ey*4MprN*ljj(@RQBbg^k>jv}Xlu}SWzmn7KfU#KMd4_=sv1350E*{`i#s2U zxtTL8oe9r!8S*-tvy~sFD+a4qHylm}wH99Sr#Sm0r*sk& zw{~WpF~v>VKT2z_zt@nQ4J$i19SM4>Jy!dwC&2 z4zQqX%gJg#6&j%oWR@NnZWX=QiS}0@!e@@kX^}VOd$)i_^GgO4oe!f49C9e_UVw!w zd~XN>yN9=)6-H0Ja{B5)dYRj&0`v+EsNz9HbIDxP?aDZTf4P8H{+Lk8bKdqw{4B;4 z1Xl0BJAD$l*tdV%^Cw!^%wmHcSiEdyl^ZB1bzb835id{ExpgCL+#9HeK8DO&N_q&z zs9OiJ!>8qg93A#@L&Oj|VOsZDW-SncVn&Ux<NOeg`h;dw2SpptK)}OzZV@GZf3>3`{EWx<#w-3 ztJOAx!5z2d=2fqhV`P-W6ZMAbn_=J2^#Q31ZbA%;jH~ndb#XvDwSKl-czP?lsTFS% zV2tl!8Pb1cKb@2Jj-Squ*qpX@`PrX|Ae*kW_;9FrYg-ecL|3c0+FK=fHNkF#0(fRfs$`j$!d(}?^W0{B@X9wF_A1&p@*IRN(@4NUoZKSjwBJ4qlIwmt>b=RUqW{W838v_^mrl@s)eX&nI*a<2>~7M|6xQhe zeWL8iH-Rib-i~WVE7Kd)FYeq1a!%XU+7*1@Ku&$Bb?YfpsMir?=k{X*`E&WP&e9#z zJE%%g4RLwBO^x!j5OF4U!DfKu&!Unp!ok>mwnN=-Rt%#y^_6N6Ei~ReG3To8doMYI zJ>K^FLcfxKMg5i+$eShJJ=;~}hSJ{I2GV8&7%eVOe-y7r8PU*ymj}ka$WR-rS z?0YhQ^JUF=a6S2#s!er8uAp_XRX!PKEFr1iepjaj;r>DsCOoT7-@{57kGJ13-(&~`Vz=6jn3EC z=#k|M;h4awS*^=ycUE+oBtOTZPvXP75g~s@Rr$VKd!QZJA3pWF<*uH|EcSP0pKL&@ zH#+dL7x3i@!q2GaY0GpQ%a70PIzWDZaoAQx1%-@DrMT}??o|B~K1wb(dp+%jz%}=8 zm66Udy-qft6KQ#fKt^%F|it9hsU z-%HH+YC#liKn_xasLK%M-SR95fBt}QOgAq5LzZa60l0wPm0T}wfUU?>@rUv|)hiO-O6 zS@e5r2|ef6CfUTtC-kuyYq7of+R}e6sHPZg5%T+b|UmzfRTpIFapR3HqchQe=vRMX? z{X{__z`GJRk)FEicqU|j{csJ=M*pd7WjX~@8b4P-@^BR_6_lFB6orcTxO)VWYc0Sk zwsC3-{N#(kU4_PFdVSUtW2#nZ`}Jv|2K5#Jo>th8*VRk9&^ornO2^TKu+Mp$*fdp_ z7-`loyS$Ecp%Zl@6Vp>skLMsvKklbeH-TQCUshM~d|ZA28utW$h+Q!5M%a0aQchXB z16}Vk;xBZwSrMm?>#anHBD>h|?K zV52tC)tP-}(-Xuf!<1Shd972!i$trVxdZaEV||!orcJbmj7VUf_7KH~u-CKlfrBqf)I!dCl;0aiTnD}8^F{6w}6_$6|63|{u3 zv}v>eNv()~rz7`6QHyijN`=cfGE?6Rq<4={1LQF_tE#10s7VaKNnM2=> z5#vmU8}p<~5NPSw6=FnASPF+ji_F`0dbpmr)BL#b09?eqdRrk9b+z9J`dK`|Mt`aQhWxpkhjhSNUo~3TP zJdBm`cvw0pPOv5g^=r)Ueip2GD)Ztyu| zr}9OAy)<5bFcFoRFNs)zgNej;%+Xl zeX%|yO&c8oVAN)G&mYjEFC^$+6jA0+|HKJ@0rmO|C$t8~#xI-@ppO5RO5ksVkP}YZ zSrP=aCtsBN{8{M*t6LNQNT>jW5Bw~E(O+f;;B`SYovt%*(*Z>R9L=mY@>9WoOIT$E zg`i>v79mD6LzZS*Xv%~tF>@q;Nvd$`@0Wh?Uf-8)Mvingznr1G9Poia)E%CnUadxd z$f!s&ZaDb36}YP?X2rE*L%+rEX7xn1!7G_3@(3|!98Tk?boH> zl7~u=_|}cQ@t2pY^DNY~z5C1;dJ80F3>AuVOF5Iy; zGsj}(rlSEIr0l;u{?Atq>XYPnK;2X)fS`?J7gJ{&6bg3FDG?>W<@@C z5uxl)5H?S48{{6=;f7gl!UQ?7Ny+k~gIFQLT1)dvRqJt41hO<-`=uu?Zzrzko{MW& zLrF|U750%{r;7!#M=(=q;`;^yaak;j4l#2c+&zCKr1G-ENXK+b1v1sgOR!c4yZ7$CDJ~R6CCvwcY)K+OnQp z=g+0RX1v9r_m(7t$=6`H0jU*oETFBv4&qWax~FGB7~2gjBxB+4INs*q-h|s0I(0Tn ztPuUNpyq$Ptd`}Qcycj>V3fFfpEhT!jhEv(E>4eSg8a*e9u3m_ESAcDN7hI#59&vx zKJv!}InH3uMElou6HAaOqi%WhX>4AJK%v`#SPJDH2`Bw3pJ@IH1M3PgabbfcQYB;DE?{dAY1t zo8yI7Hn(_7U*Yz4%q};yPj4o?yC|+1ATfqKj9r)3H-@it+ah~^`jsxhHl7{wqY-wV zae7Sz0hy!+;A1o)t$^X@Kk?MRD^tyB2K212m%cAv3ZP=!&FO8vbV4i+m7LFHje?771ruSu**KjG0v~Y z<1xDDY_YuXsF^WZ8r&9_{GIo2=k%z#3^72?^30&^mniLqU|X(-*1ft4n=SC**Ed6W z>rY5J1o2s==Sc0vA~_zQ)~i0VGsGw3SfT5c9jILvEU1-#+$bwt_!Ts~J?gcUdB~6o z>)pni&bzxE05dTqkNTo-M(hvH`T@&2l$#-Vb|PuyVEc8y8E%j)xAvdiYjrd?uXTAx zB$FcU%(^i6v*w@uYb9d9X$c&s{7@4|SK~o-ZwF0UEFQsP9gDSbHNew!>8t27ZE^O! zUzUOuYM~8(KBgdV`L-k>-yzupNseBHfY{6O+g;{W9OiG_E35KkPMcMFaOF~d48!^0 z?MahhU$?U-le;b7gtu$Fd^{4aeL@%>V`Fh-=_zY$cDDnFmp|&1ew5eH&z@d1TkURC zD1k1piA4lE!|{1F6}e?ga)AUMblo!!)JX{M-E?1nN6kWdgE9O-AluZJj}SX0z(l-s zD`B6INS3M9(S=t}OTJ)6!wAFDi&puGcb9?y)zdKdgW}HTR`z*@DqUu(UJ56kK!Ojh z7+pbLuc$Zr0&oE)lgZ8^6ZQfpr&rcMOBt*XoM zu&WGzDZJ9B^1<9|t1a64T8cZHj-t#5uSwPv)NWPCP?>!LoWLx?&#|Gx;Jo|Lo7E`w zk)%~%$M9;YTmh0Ny<{j0t)9i<`5<;E8(JZT+8>{z&EL5PsIycHjN9fg`po;?Nv$s3 zA&Q>ghv-ghFRwP*E_L;AVd5u(ZPwXW8~zY~Kh+iNb76G5_;$>~G+Q4peNMCX{`Dq# z?it}}Z|y7cA@w6#^#(M}t0GOhb&>nU&;~(Z`D0H-Iqucl&0j?Y`0~t``IOh zmG|*?$^!&D^y{f2VjZ&%QPQe)zA4xJ@k4S@;^+N1qxKz{IQo94UgLp3_ZvPnqB{fS zF;4bv*VJsNPN#8la_bRAw#dA*J7Q^nB3UUZmqpB%pOj{MC<^YOI@6`-2As_LWOri&UKk-n&)Fx_@`1?waZWwq)C^I0pebi9+@%3); zLeyZL1=(5$xdL+&6FcZ<3)a>n1^N}*U@>u8Yk?=r+;6k$Yvb}bW5;NYW# zw4`^d_u65Q6wa(_zuq2(`<>voSwEo+;o~p7{1Bkp;59vzU@`Dy&u9Tb)xss!nKZu? z;L`IvE!>VlOTXptLa)Lt)Ar7o%XWY~LG%0!NAhq&9(JYf{9AK?tyv0I!2@nK@$ZFo z4CG6wUZtW~mHx&fmPEvVHRtYq8Olfac#_SlY~DHJ9YBxz-0#oY0qaDxgIL0N1b0u~ zZ(4rW?Jq8ZUAYCoB}~ebU&H`FI!?(LXBlv)+X3W=AbYz{28)(F`f=(o03bA{>t6LX zgd~wV3IJZAI*qG*S;Y&amUcyOcJlqg=-%Du6%y50r5ou9x;X<{*6Kr$T}(Ho zNYaxfq2Q@_U7c*yhkfJ^Ua}wlU)tWRSygsx|NAaEOIay@P*L%plhF4_k8qL_R5}q9 zl_r(%o(tz%>v^8N_xmQPR94lhIh{1Z-Hviyx zeNXwrrg_vwTY{mEE%{j6Tsre5Q_87&R#CEcCqISGyWb$)XJpk!;6I1!^I}uu_H+Yx zc_OWoQS&E%rrE5q0CC(q(vZg^aShvHy6yMVo>WtMkUoHXF%QTi$6QCO%lxQxoBa^Eakv)f(7=;=Ghdm=U*s#Lntq%59CU^a`HxHBe zT=v)ZhwrQKa3QETfh^?xw6>DWbfW9sgNMnHuku8Hl#W6z&3;A3$X@^lWL_&s!yXPDXTjm@eoQ*p3&yxIi;|_`saVx32iw^vV^!|^INGw* z(^x`(Q}8`X2fN!{j?EU{Qb2|WpKg8OBcXhW`XFvL`*luXH#Hls7Po@3*!dFLDF`OM zqltC5Rq3jy`>ZabPe^pjOjsYC_I^99r!?vIMPDF^9w}s!u)Pf%-J=R(L3B3{Vtc3u z;6P$~bp!^5XFF=VhvD-2Dai4$K1$j->aHh${;ceeA$$3(&-*+u*EjZh%a`>K`|Zly zKcDsT3bi!rxSkdZ#F7-l2Jve|Fx)1Qx#^vKSw6P!DMy6ZN{!Xzu!1{KUzBv;jbXW+ zh9rpdkD+rf4GmLIuUatdBhb*N7%AvU#becxpHEK6)HHG<_U|D^cQbdV@|FN^_fhSC z_Oy$4@NI1uFIufO?IIE$h;!MV9`1%aF7uVR$A+f7r*Dd|Q(C(S5kw4(5$B$i&^j}; z^u8HaomP*zyK(jWSvW6}hd*s+F5lJ|%7oF|3rXI95PUjG)09JFd)KmUz3E$hD_>v9 zf+8+fj*!5lS{*&{zaRkm zd~y}wm7xu?G}15uJed`NdnGQl*1sMI$W)ZnTthUQKc2)j)tpsC6GKYmX~C0!X2WVF z)8upa4-Q40I_=CwpH!8TevmX{Z&~GS%>(LqF*nRg6<5F{bJJmSlJ>heSJeqIW$z5} z4E6bl0A|^D4REyi?r4E&pN%FW2G6N2a@Jf`{hjCco)o94Pb~{BdkovUcHYZHFsk5z z3)prJSPA#*Ej*9mqHivz$Z&q7%V~ht)d!@@p z^{Sy9X_H!GQ7oi}N# zz$*&994g0)(=*&>5FLjyG|_kqA)&S(>ah%y?eyMadJA{y)z~Lu+~y}bUI$NtDwC*s zUib&h`^B>PNaktn)$502f)L2HRrlBu#39-^)TV60-B5F3x@-}5bT}coq8Ze2&2wIA zzc1l-@NXuW+&bIyfRb*1K97b9^8W4K$a-n)-`ZuMm$z&5Ou}m?66q@@(h0J|MZ)=r z4I+T(*0rdL9c()n2mk$H)qGFZnphNTV|li2y1$^>7IKlIw93lfdakLH)zrm(%Qx=C zGVA5e>daH`G>21S6Uma11w;t*$qG9%7x2MO@T(z2Y&osj{u%mz)G3c1cRUsCBAbfd zvO-}8+l#9P>wpqhr{gsAmy{Nd_k<)@#6=e#D);d^H6sT5_BO0*4q?w-2OZ5wQ;)~# zIBhZMXN%I>YtXCp7{aT=9ei;2RgBH2ttlQY@0s$K5mLd<6zSU~q@z1WGlSRp@V2S8 zu;eYoqJW0d2PG+gvn`rhc51QxE1jVmT{`apWZY9;Jt*kg=ZH~Wb7ebd%v-2<BQ4Hoo{-)c`mV7 z{3r)$-|BELETHoCm)zfVNwd5jv+68voS=AK&z zdhHi}8|@i?y-qh!%|Gt+P?L!5nkwYS2C~S--K@Mh%JRvQlwp~dPmKVmPD=PpB+2X^ zzqA?}TXcq`Tp4lohV?ju0b?ddk}fR%4%K;xKO+AG!2^eTT|CHIck~Wg0=v)9Z6v`+ zqs||YO7V1m5@jl}M!mCG^fk^=+;W0{LViSgj0oFJpn9fiYV3uks%^uPOINzbE`3SX z3B5jWZgmKzpxFh%JyLR&e#x)=4eY!wI!E%vKTbO77o~4H^nt%Tp+`<^KBInRy&E)0 z$6I`hR^hI!Dd$4x-gR7G-m>%A8QX2MJT2`Fecsi7%s~=kWWVy0pg>W4x(bL!4?U&s zA4d-2N9*C;E-g*c55{#G9?I3IkMZF?IZqGr-3+_H)_O42ix+W!Bp{hDLJ=ZD4x1jYCZk=l zCIn`Gb3f_u;K;}AXF(h{TKNRrG3*cC+j0ovv?!Mk)(aOOM}sbsw7b4<$r~&rlSa*eN`RsN(XM!5< z7aKy`Sn>HFW+O@IEh}I*Tr-#EfX=x;UlYlHDa?Y~@VgaUx7l6$6ni;PdAlg;onu1M zw)5(yGkk0=CLAjyJ5aR;CLfs>(*1dC38!y*58wgUgr)B32RAoG85tTXD{3CW?oxY< zO=?w}Sv(8JOKB6WW?Y-nYpIMJ8TSVFz`O-<+(3ZqKS)}WNL6GPnR;PfDIeI@gOyT$ zf$W2vJsZ;5PS)&_mX~&(a8{(I*0GYus9j5D!*ncaBax>l@AEB#qBuf2H|tNS}rs#Ej`;_GV% zLzW`#q~|f+*O(aHo#OV7y%rd{#1rv!?4((`sNZq@C^43`)QOYD^Tf)%0V+nU_a=g4= z2A{7U@8A-M+KH*t5?MbNjog!e`mnQC>nk$Xv{j{E&Wru{w0fD0{ljg&SV1*SRd5tgTdEY;BVj(A58G9?ydNSyRzm4&xh z10p-DH_+}E$t~YD8sak=%GjvxvOUE45v!5(i3p0vPU;kUI;-NgK8g6w=7&?GTz>{B)jg`NZAM{Oi_`{i=W%nAP`NGcxwF3!VVB1()9L z-os6Gv{P@@@WhJNxur<&NcyDgZ_VXaPS^h4?^jlxc=V>;G3Gmo#YdS8LwP$-w#}-W ziVVE?@Obx!G`=EY`mD8o7Ubwt+os-!P=>C>iofa)_O!p`z?rZhlO+eKPabka2k^s& z7k&SZl8hiH@bP*l!xK}tD3^u` zD!b7V{6LiXczYA{9}aau)!enu#Mha$->0=fBq#sLL;$qMMIa+4$_@l zvKpw96l(%VG)3xpP_U}^?|^?dP(v8X22pzA^3rmM#x@Y>tX&%Anhl=Do!fH;Mko zhIw<*wIgK``8xUGKqIvfq3w?k=ZDW5i>1Kr;Y;qBEX!LS0<1b`j%$TiWuxtV*4bw)WesGl;3Z z{kz8!`T1f*2jnr9oZmk68KLnOkw6x(id(r?f9)RM+Ym7n1}hc1Qhksbe2a!$T0K?| zp}8xUW4G|C=4c_+OK&z12YZOL^)YMe1n&AVe;3r`b5bHeXz`kVh|{LC3JnKONHlzH z>>`!nRF#o`1{rH~F?=3K8zgUtW4QR|TkJ$@&_`wZwZo3s{SweOW*lDX@k2*rRkejr zl2#_KhX`}Rt=y$0wWFuz&a(&Exf9iJ11@EpbUIKQeN+1^GE_4VIk%RVcJ3$BBpgIRd5cHo&qv|V(HdJYyxYLw-Y8yH=O#(pdMGr zE0x{DDiLxljJvjNrds8R=tY7KpT9(IFDlZ`zm%AzV zO=P8itS{lid6*&gr%1o53H>oaSM7m7JD$$07iTB+^my<6v#dY^wdUh0my6>M~Mt&w^Q`v5Rxa)Zp=L&PbS);#xbO;!&Dc5tbmdlkl zXsJ{&I+{C3%D(CLu{u4RVw+FG@xI-a8;mQ4$QQdMG^H@uUbaUgyES#afvAA-I4(W2 ztPs#6-C`1faKc!wECUz=`&CW$UUin+CDU}v@V?%@oewshRgqloUduOZuiet&!tHW@ zyS1+iF3r!wla3#VYKBNAAxZn^)j(98+@8P;W}9FIGsNFRLSymvmkHVLk%9a?^6aO0 z=6Q$RwQX`j*Me?A_+FZfG~F~|*zn#Wo?x%HV^Dq8(U(_QH(3 zGlM)KM+`Ily))}ALO+xDr!7`|pxqG~zgn;R`!2Jz=9Jm?<6HpNm8o0{l!c*xLGqkD zM(rS#kU&BvR4+Q`kgv{;41;8UX*E+^Ers%WKiq+lc-gvBZe;e;q_;gkY0Kq776+bR zZ8LUvJ()$KzgJeZ@YW<)fm`AMXGn)--GPZOAFfD(!}t-;(*3z-r~M*F)WJ3$2HMUR zi-x&g{gdo|>S0oYP`s{*#rd5heUXyk-WzEFWpfZpY)t_0Iv7re1P7NAQko+P94jnsC$IK3b4Nr%CPS_xj2_# zlpd-}nO89(q|-_5^ChOxHmu!OvDuRn-AVc3c%-MOxRB89ktS7cmd~dEu?e$AvJHgA z5UkUu0myhMJ+YU`$1bbVO+=6 z=DoLjV^1%V!Krnbu#p`!j*G3myg5FQ$72gA0JroJ78?83c`T(au1$#{`)o@-pT%R7 z$J9L)4GJ8|VINvCea7ZC{b;7^;=zw*((eyY$9Q%KG-=}Oe$DnY|J;Q<+(?@f#!B>G zxZw7IGgxu1a!;K1Uqq;aO+>&rG4>Cbd!#eF@Q+o>;T zYC^P>*4xuAX|R!=Jg#L#MeFjd!tQi9$=Wa8?V=@w>9cx&d~T5L8Uw8U7Vl`{qbKd`x~K;hge(M=u5z@ro6X2A&Il{&g`zr;|{5BlH>9_j6d`ftP+o z^8KuBu_ZA7%kPPNgQL1}=ODUSSq)lw?~?>ri)60s)qMTqtK4SR-^jghq~84fSRBkLT2yN~ArP5EN-W-`XM<32Xb_@s7a|n9O>4G3H zJpV9%FPpKcEbXT=f7iMRmvY2667M9>k$LPl5^u&E&GpA<_;rRq;or#2;-$tcX~!6Y zFbQso|HgQ$ZD>L%0lekNFhzN0%!QyS-stsc7E?swDldN!tvp2t+uvaDf5FHR{0sp6 zZ%|*B0x~%R;(h>$e_*5k3NxO3NEH2sjsv`ZS|EoT=Kug208|DDGIa&)9l$Tp3Xo6k zsjoNG-T+Wk6k|vMr6mDKr9?<-7d&tqEYl-{(sxBpIY1Qk0DvflJ;PM`eTrRBsKdd< zkiED7KzH62pmi-gWqyX{^_(DmPLvA?aY)e4DDm>7VJj8{Y&;*7Za_1IIuvBtCAe#U z4@jj&1V1ZGwtMxCD>seyVsuitQSkxo`-Yk_SaWYPEn=Vse1?>4H+9z;q6fUl-@t=2 z;P3}_`wy`AAJ{O?`3q9G1l;{^@VGBz?-$H8lEt6%e(L2PFymhkxi3h629?f`(=UAb z2YUMJ`?0AGmxVC!@Eht}rg%>OJ;^w&DOhdl1Lyz$4)XTKo|Ar6PmvW-z-e z^|UMZal!T#k4tU7S5&0TDpH792EU8rW(F4`E2fKDN$_OIpP`P0gOkd9(K23XG_0e% zpnohX8GP4+fcmB_CMQo)(V#t0EgT;Q&&kST?60k51^6z2^}P~?wa31*)WWiV6W>j; zJgfMuhZ^h^gRL`wrdxoH#RHcM8izFjz!VaT-0y=!=dP@n2pu`V2Uqbh3bm3(q#I32 zIojOJ{j!KVllxq6V`K)*5Bf^~fP4Rx=Wp=qulo24-2E3!|2NclF5}vRs5vc3Di-TA1IW~fczn(#c`B2m0mBn|wDK?3qx8XQ1>ntyLRA#XLfXn1-i!! z9)$=kV6GI#(;4RacgXq{cpJo?d_lSXFZk~~?w0^6{{|~#Y{BvQ_wT%5`~gh=y>I=7 zwEu#IqMRvU2Mj=e{xito`?e1#t5kGI_>+$u*YFmmK*nyIlLQfR*0>;YVt1J#qCH@! z4ur~e8-SDM=x~N4`ZVOoyQ`W3KItGKysre7^>4@?KzoQKrk-&QkoO$%RS@*&*yg0! z5L?LnxeaC>wi403z;&)1!Rd6!3KMcc%9u12jE6duK?QDqv^9)hMWEMPZft;0nvskX zQP}DcvfdDZp67=OrXTv-VE^qs|JEjc=FTsq_GeB2EO!Ph|AMdnYEM7o&EJ9PGfeky zAoRbE`Q!{Ep8IDdNiquQNz+}w;b zM>#p08jIcwvi0%QHOiXX1Pb;0dYF@_r^pUfkWrqqx&@2fD{{qgFg&j%Y{i2Uj z9O7VxFt9Q~GDZnG`dPFYYMdB0C1b8j z&+AMUvAF`~jR)W@r6Kij9=26G!#LU{s^jb8TAa=D2J;v%g!?x;bQL~BPEkQ@fnBpB zRq=p-p~|f7$vO6_7{CF?^vP${h3P9DN8aU~IEH9?1viQnFRi z?s=8*J#Og0>4In$*N{vODYrWZdkfcyugvUI8*o?X^U&~0p}XKaxxCXj^bY|XD~Jrw z_Ai%ZzsCm?AWe)9Bjyma)n9G)cUk|;m4EMl@2H<&W&X7-XE;5|{V)6IZ-85wlYT{$@M=8AIps;&)kn+b_!LAL9#Pz-V88U59Ia zj%BJWN}DAB)(Qg3Jihct*#)PKwcpx*I@<|@zw`#fl33~rAU)@KfMS<)iZ;`N9t=~X zIU*GTW#b2OPWo`R``W|j9=4*g4CJVD8bRhkcrEP6ntalM?Wh+~rSOW6d7eZ6*Elw! zL5+QcOhiI%lR*2D#3j@)^7(t2LJ`il*?#%X%ddX6)3D)A&-P2~GGH;meqp?Se}Vb` zl+|q0{4N6kr_pZyf+UMyyBpx~-{T|x|Ey={RUW zdVt`2*CmqHCi?>sAs9o<^8+q_j69IJoMF2#qpk<(+%$@V@k_XHu!m&jaJts2gZbzH zrF~aq{DH|#)Gh32n`XIH1vaX0sKOZ=h>8e+lOy&tDm6D5hcymqwPE^f9!tpFp&TZs zQz^%i8KUe2)Hli=<|*{WVFwUUkDu)TjC;T5mp^Sw7Js1Mzx(#Tz^i|M0odp9^dIxo z?|%M!%*Hjp>;>kU3W%sU=wo$~G5gMTgjAlBBZt@rJA?1akFn0MPl(JB2MTWDYNgw> z2ArVF4|Ihw^{sOP#H>;dM$dkpBY8&vP#93@-cLCpGPQayr#?V&eFp_lfDAJd^C=W4 zGsQ5($i$c^qO5Rvg+z{j3JKHB(Le1Y+eO-eEg&07Fr)z#?K#;Hy7O393#qLGE-{yv z8NjBP%WL&(p8jp;%Xa2`ew85Xgi+TMo8}fJA&EWOrJYN1P)1S8c3%ZWu z`2UZ7{%36X4P*baCY>Vop3mHSTU1K>zz@F2RFroiu+t5sF2*>2lO0KP!A3$KJ@MET z5exr}dHlrpBy4m+Q84DRd8D=^m4{i1bvFh!)DvKyChyuC)6H(MjTEU&B`6NVR`Nw8 z02=__indb$;H_|<9;wcKcYW`aK`n2f+8V6v+APbaE>AKq;!}&xpr8*SA=FWHZZOc_ zXj5Y;XuWshT}Q}&L>8U6PDLh8if)yf4&oddKBa0k)U7E+oswA3rky<7=UCsd&Q$>R zHiLBOJdfRGKM2_y!3;M*`+i_N9b2r_O!=`N!G+@oQZ9)rNn@;eX6i z=m+Y&w$J^a@no*AZ=I!o`AtePJFtp@XN4HFUqe(mPy!);o+mdA;Yg^L!SN!Jb?mF^ zQXg?bz-Z3!=m#n1C@Yl(jtpMyxo*+sWOax%OoDH{$lfX8U-C2g%Sw`HX{27 z-z|uIt>4#h9Pv5)E<^Z!#ZJs2k0(F55CFlF1GbwsP*|T}9c@?Z%*0l~JollxnT$@^ zT-Bm*pe{Lo5^;ycqunrwJA=K(Kj9xS)Y(6RHHFmm@Kk^8HRYFG_QzNB%bxvh5B#m| z|6Wu5T0hOU1;*gN?7hFn_3!n}?HtX~eH{cX9Itv=*lXmW6jcmefHk258|m!Vpfm*0 z=#<&&u)@9ln;|-xqYbpm4KyZuStW|mZy*#3#RnzKlT^? ztDxF{qHS>E6RQ~L>#2|rW$r0t;uO_%ve;@j7O<`WQfF}>`>8P6`@$hP>6W#aw_cFK z)%lPvVE14QF#onrmtkK@j#TrhD15ov2cK*zQG9; zQhV1a(81DdW5S1^K)DP3TUK+VE!x6gYj(hY<`{&>U2Vgg+Y|74Qb#J7Ex-2vuX+3* zZDqDSf7=4T?aqJkpZ%J%{u+<}*7pAC$NatKo$dTTzMxDiWAT-GK6Gw4h$D<-FZLAt zgMFHnNF$cSqq$`rIp6b0E@k=SCboUsWXL?TbM*Tjc4UDy$iQ}ooH?Q{J~1{!l#IlG z+-0X+5gbk4z8lLd)lfpw@nt#zc~(gJHLn%0hF1|9au>Me?tEmGsHa4QlpKX=yKd^i zu8Vkyo99eQRWfjlJ<;SctUn4rLtQ|*nDPN3lPep1MgrVdlz-jF@uo6IE}E@ykbJgu+w)zxh3Gx32II|ibKbO zW7`ZAQ=lDzFbOf{83mc9*jdOFh|Wji9~*9+omd}KY_vt#c`xSN!Qm`mu2a^3;V{pe zRf_eog@iJsAdm>l@?%>Pj!IA4Jp2h*8|1Eo`OmHWdx`hV8fKm9M?zgxh6l78p49~U15Xsik||T zA`wJ&C5Lu_@FA64LHmecS7(hfZZOV#p>_pGZ&*s}SP%mB^TfFF%TD`akNvDe13dHZ z^7>T<@J-F-Fn-H+9&`ST7k~51aGR1j!H;JjhW;3@RQOta&qS&tIHg$AD_h90a~uM^ z60{$YF=XAbO^%@xd9AE}p*qApz`o#f+rwOkHI}}xgQc$K*t+PEfm~(E_N^$eRpIOO z8%XrPuBGAkMsP_nz_y*YivTzwh>Cd%^SUye+lvXYjM<<0tBw3)-SpSG@h>|E{>~rY z-Oo8b+Q!cqIrq~)KFptW`JXmb{AnwH_ld`XZ)`2zPx#T;^L2I)C0UHN1b4&jOu?SVo(~(#5A<<%0}XNX zdnRiLy$~KYQegCdf%4HX%~{$6?ItE5Z;WVQV1~crBjhx{<0Hn#uP^SW?O>naZ}!SO z-u>8;KWoOn?83j=%ujzs&X$r_y)<>t(8hDvLL#i*^CJ7Duu90oPpOxYAhx0rizLEN zK^V&_>yXo}VAD2puWY5rJ)+v*xw#(&i^*m>po=RRVa}?DOi=iPv6yfL$deMSVPP&6 z-9vJ8v42Rs?hAV8SgaXcDcEn0B~&l5+cb~gUnQ7-F`fG(h&QL{fPs7-zyG%9_ft;a zu^MgZw;u=d<4+r!_h$a|S^3v~!*74j_^Yq}tQTiLu}fi1|Hn4^S)2U!#sAH2HbM3H zS(6w5#3~q11S2Y&i-0u*do;WC_2$hnN{V{m*}m&V%$b_gR8~a3(9tv*`(8|@*6O^U#~7`VGmgd% z3vRaG^5HD?wuu|V$zJ6W*7#!H?*fJvnc%e%%1n`a_*Go2uk6%zt4>3_`~s@6kr1r|&Ql%c_aQRT}0_&do?&=E%_`8?cIUwcEH zLm58wpY{q!-P~TMKYevHe%TQ4`TTtT8N>ea$Nws)zxj8Dxje9kJGWu%5B=I}{No?S zn)u&+N&oPZ{IMs$du~Qgo!Y1al8DxSv8lRIBp>@5Rcn(Z9IpnN5_v&;bvRc8j8gE} zaVf&*7(h+i6N6VJWz+}uUnO9?2RVM2xO_*a58f@8r<66FASHKQH8R{Dnf`8wi z5uY4Vqk(u_A%6TQ~B6m15WG| zfB0t+1^O5KVeqf~ zh*JH5B{I0=7c7zW=63eIg>hk6io@>S6}|=VKd~RoDjWhXE=)(`JyZ_?OFYhZm_jC~ z3^Xf_<`#%&VE`*u7#C3@JrAjm8#4eUmT45O=ZzU!iQE-jkNYc}GQ)d+?>bCC^#;Bx zkC334=_-uR2&Q3aZaMpze_m;RUtYZKVTm+ChkXy?4b=ENPyUC0P~PGg$TPsxF`Elq zk!jz!pOVou<~i4kJi}ixp8m^)a}^%Yb`I05K=xbV%m&H@LN4=RsP2wDvO?GsGXQQi zNBbyy`0BnV&ind5CHu{Py!p0=?~H?o_{KTR`egILpLi#vxc#aJKs8T{#?O&j-@m{l z%?SLX2!bcpU;po)f7tcuX3TUDJ(1z>%fJdE&TvUc9%d=<~2jhN@W`5BP-$H2I~g^3a}_I#a!X92jJF`gY8@F8bF z#zX4G%H;o?>vUO+9*&iDe~+>A)FIH4hnqAa!=9~z2;a%8!40sl4A%-sbjEXv0RD5B z(Zni2St8;w%R4@gLzjaWWXU@rt`A+5Ba7>WU-$DfsEB{B1GF30!8ViE$+(;M&j<6 z%lCTl)uSXsMAHF(H_JDgPISA)F;WIQW)_mHP}X=JDZq1MqGDr|r5d9iMFbLvcn(2@ z@*RH4_7XuUOj?4Eq9~-2Kx!8E5Hp-B>A%X98Bvb9AY*3;?*h*Wu)TnnCjg$CHW`%P zpF4()1pLnU55?sWf6QfM1>S#t9zf%fUW4zvi3sFik|aa;R zzvlsEiA^ZBnES;HU&kyK$LRmaCPRSY_dPYh*;T-;p!}{pe^3H5xc^uC>d?OatuJ69 z%VZATk#W1ZoYBVZTZHc`3Nu$=aKSM6))`_V(H>B~P=cM`Ep+F4MUHAvqfGrW@&Q=? zc?R>pzo**+43B#HxzAsH0qxht^d`7?cLvW(efvN9&BWUnFrZBnazdhvP|Yj^9H#-P zS2!m@CY^7;e=xJ*{ls~7-}9u2x|*LGzmFz|d)qdNQ48%5_q2btA4kV=K~ig6S1LN< zF{Fd0tfXG>O+*B?P_l#qCaHkO!??Qe@&T?F;LM3toiSq<>2^@Iz~7xvc%j~Zj!Aev z9QS@5_h$&dP?-UaBfep-SCQ-Buf;PlXlI=2YXjoje{S}3mW&L5P)GbkTz zWiCIvd1#8#lxQP1-XG-(EA6k(bNfY|;=1oWl-tkW=k~wno!$KY=Xo6Z_qK!Y;*lc! z#^5_grp$GUddb3YZio18jLE`i&UK4=WES1*OG5d~?H9fl2FE!kaFG9rl~=(6j8Dyd z%0#`Qe-FVj(^D9PC<8=Kpx1DA^DS2&^@{sT1N`65PU)V~_^lJ<&>?8g?NFa7>Q@M$ ze$Y$Yh;&x}iQ_Sj^FPNM9JhatkK@2P-{UEcOFzd+98bT;}bbHCNO9&G~_?5}<)f7YyyMNJ8cTF)HO9x=w?zAua~8CF)s zw@i14@|oKU{-)z|ns?Aqukf9i)^x_wc9C&n%2<5r^L~PewWup+C=a z6Y3bxjJbF2dmTq%yj=AL#x)(1y!t$T@xnZQ$=`nY-?TGjB~Yjj%;i~z+y%7N4Il01 zJ~@xm=*!p=j>Oxyt|CMQ9@y?%KL)1$e?M)|rXG>u+pC;^u`MnXrkMH?)z90M-?}4&#LH+qJ-}C1<%%GnvYE-UO7*q#aS#+Bm z<-fw%Nh?D7-@iZlFaS04_yPeB>Ie~={(xt-=5qdT=LtNIV9s@gi0ce>Q_W*2e}lc& zOfOg&{d?}a7?WzPsGzAfV!Re}*izrQ7{?R}KB53IJh&F^gJDAS)A<_VJ16=-*K`=K z@i*NbU$>$FwG;1Dl=Jt^bI`wB<1%ktBcuNDo)P{I9~0~-a15SUCC)a+Jg+*TZU_qF zXVT6P0Kqyze(pPOa@9OH<9aeJep8-?{l>Gs_#f&3Q6%5z zIKXb3$9C8hj6aXpvt98Yo*UOoOo-yAeWEVy?imp?{%GSp_-_d+J5Zp|DMnr~!ek zZ(YG25S>Ub&{kn<&VT>w*j%m0?QU#+VW8lFnf48mwx}O_wjK`U! zG#DGc<1F#d=Wcn0+x)6~e?+x$NRmhU{r57&cl~aIaIVf{yz_16csJ6FeD;Mr*DSNd z>J9sj#SD$}FmC*E{No zhYk9>onUK(*O0}5V1~Cc6@PvIJeOv$N1lJX$N#53JdZp7WncOB4@G%s5$s78wouHW zKlo-Ie{D$gVF&L9d*o|B{>T0^zYEH=p63^|*9mq$sz;CpfJdEN=?zuphF1j9E^ z{fPi1$DYeenQfB8e~3AJZi^USW}EP!{~Vv69dmp?U+cPae}S|qY}$Ff2g`|tugSyl zl?Hps{r0E-=J@>Gcn$%my7{v=*R_kWR)8&oannIx$2cv`cJ%M>$MYRfpW19cKwt*v z!Y;;o-zo6B2%8vg#-HC|FQA+K{0{9%*l|C<8$aid^Y5@3f9CVQzat22KL7iB^mG3A zci0W{`du*Q?~Wkh_g!Pm?;pD3G_T?9?|GOX`q_T{o|k;j!@M(}cjRy$-u>r1yju@@ z2x}vpcfon^QT;k^98pFPDZ%d%)bMcq&+jNGeCN;a#r*rPzyF-C%)iG!=l}fvbNic&-Emu zo^0XI`RCat_Wqo|{B=J2*ZHV3o7rpQUt`Dl+qOwufAbv&sP8ku9#*f7v-a)pXkaEccO+&qNg0@A1 zI)UPWv+Wbsh2=gBb zf9m&V9$gxt%YQy2sR}Xee;r3yxWXd;;rD0hUyhs1T|x5opU(#d!4|Vh5+l(JoUt25 zR+y8`-sgCf^VfNadh3~XeiHbP`}xX)&Af2mxzy0szJ5*EFFupb`xa8lB;fJPaq7|c zGZbqwww!;Wi)M2^)b}$-;nx3ezW2MIfA_cV&F1e#%fxv$`~LB#&ff!WITA`43F<-i zd|Y4vIYAsVCWKhkU#L-`mxHdaiCRN7NTJU-z%)<2art!*lQy%5!$_H@FVvj`~0{9Qw<*J)nHP z^|zXTVp9<1_HB3X`8+(=;oE*~+;=Xg|5xp0?r)4XpZ~3I*_iM5SNr~{e~%9X`V-p6 z`d54Ueh;pP`V}0Ozxv6yzHmK$qOdWFf9vb}9OL)%x1A$l3tLTdfA_xc@vRS(2la2? zi>d5i?)%dYAeeNU-{WUona`j59r8DRwNKPD`rG&Y{)^|s`Exs;+sWJ?zV)NbKfmww zkMhTP-{+e1vwvR)L9e-%5tx7hz;^nR`f0)ea-2XRN3(VsN zHU~-Uqn(#YUG>N|!HkN@_)37AK>v+E^m@K8FQ|_HPyh4(`M=GI1OLkvqVWI6uixt8 zHvSI{;#bH2U3{LZ*ZcXuevfpZhhF~2eZIx*=P$6nvi$Wwb|m?~cF**G{5#KR^>?5V z9{Xo|ki%ZxFQAc!e`ja74-^Z&+j0MODPb;z6pR8gYl<*pg@afHxkf5brc?~2(i}DK zEZP|xj1IhMv-^}!%MC*MDy^=YLTlsQf2s2=@G}AKzMbSd`7`wbqjO+bAUB3VTvEpYO zN3=gid?5(Me=1rbBI$hfc1UjtmhW4+I=I(I0O)?xnT;&ujq|FL$DJpJOW>2|_Jt(R zR@P%4punW+cYdzANxIYXus<=YDTgY=UXa%Wn<8H+PhR_x(GS$8sguoOfgMPC6xQmw z^LL2&NIEN7w~hO3wI*HpA(0+)NU}Lm%YAfukQ>Kde?>*Lco3H$YU2$##LBB(%H266 z*O{&A$|WBV{6P@crK&Hijkfm0XZv9kHxmwusYV|Va`&%cvq^U+(mU@)z`RZ`K!8@k z`L)}JoPW>7<&I6<2i@(K$lXCZ;(T_axQKLpNmzWJZ7flOb!|}Iwtn_}HomsUurjH0 zz0Z7Pe;p|nc!E>n3!B|*TV?_M^l1Uu!8RX9gC@(eeO%b%N~ZSZ`#LT9>9jr^n9J&_ zpP_SjtB6He2Pyr6ra4ap-c32X%M`6WfGAlH+bu~yAr$Lt z;Ui%{sZHZ$TBjkgy?k11L&x!cH@vP3`f>bxf4F$q0s0F)d|&0CgOw`X#BVLIM%VX# zd+DrIkPhyfn4s(#3+K@fb1WZ@07Ty9#qIs+fwW)5_X|pwT-*xm}&c-LAji za=vWY7h@Elz1zBXx@{dL*wBbrBlDFG)u`ZAyXRtQDAO~J4r|w$F1@&QYT)0y$26qu zfB6_qn)&hf_rXZBSFP6s3_jA8uj&JvuGMfDrl`{FH>7=d1;TuhL>g7)=C8%m&c=m$ z1G1OoYcnnT#0Tx(TS%*g_o6xdL5KW(him~^-)Q`Ohz_3;;?k$^a(8Q|Io_0HFDBA$ zbK>q+^%#9Qr!Tc@9Yg>GrM_c3^Y9|TeUeYN z%nQ&vKqq_{c1QYsbISc!#0;7@t(v#9Te&ymY%LS{g}f2>dE zmv3v+(Y<(wyXA&$xUtHNgcWp3@BS@bZO@{1VscMVnGG$}Kx{k;ky zT*Kb(gvJWBbTu5wLiqUEQxmq{tjQEhRIEUD`%?xRv0jj(&<~X`Y1REeI5J|zKJ0=J zX0oBHl-%+BN_T|+aN69+uo!j6e_*?5yHnGTDZ){Q4D?y5%V;g#R*04d$j6j6X83ry zTU(_eLb{F``nj40!g+?C4fK2EeCq9sq&JrP*|{w8R3NEb2W;4C2cv(jhn0R2q^8eX z4|+cjq>kpr?MyF$#`1wYUJ@vl3l!q`#UQ6*MCUDeW7gXYx#_h?51E>Ff7C{Z-u9~muZ&0!Uo$K%fB8aJg)r-I_|NZ9o@yPCiztd{bA zEv2nqGR6_mbi_lN$X2pee+U~%l()8dYI?L9!7(&a2x}V9dBW z_OoRQ$bB$>W%4}T)K9}dY0koGLhPd3tLv$j;=RK-S{ul0wv_G>*m21>#mxf8zJ9vy{b@v8@N06W>I&Ic&{J~nak@V?Sp|r3+)ta zuFV@x3FEdt`FkCZ67jwpD5H(u4tR4=cLNK2+}%PUUUHB7!yoj+fqx{0zg?8@Mm>Q$ z|J*m*R&C?u3wmthe|yR9Gz2UUQ}c`{r{ev=Qddm(YRlc@c67ieQ}4m<3?zIdt?Dpt zCI5crGw6bka(<6uNF@}=4J%k+i7v5o)g~on>K#C6bOsEs%U^o^P|LTVL#nXVDvw#}*UONxg5&jZVI5j(z%FO=Ool zRTg_ab6tqhe?wc}i0|9s`JUL)?RV1#1kfd$lSP3rZjnwaGs&TERr~4kaNvCCPrk=q z0%rg~Oncd_Q6o8Cwh^4t}lY8jM_!43>xkeC%fM|7A z6MZgH9D74FrNT;0V50+p#sK(8l zM5wqZS$vc1LnpSZQ%bL8svQn1=2>PsGG$?;&EiSd*RU(yYj6=yrnnC%&n~FJ=GPKv zqnF0Cf6Qy{W-EHR5GrM1M9c>=i$vkkAn>i-rSRf++Vf&j+4_!Dn6vDQ?B`hw+nwjZ z-Z&v3I33mJOJ6fjn6X<`Z>++s7I)FQZSG}XoWXa@XriS-)tIGbx@MV#qjonC!zR)~Z@JRlVd07H@#p6`>})mfR@Z8(dkd$59I&c;Lf{Aw3&Bh$j@C0f09&&zR(80+(@;d8CQ{x#6Y`V zf=j0-_AT9-rgE(s?4rW}{^(-6ekvpM zDzbOQR7^af)X#47Skl2$e+_i()eUw=fC@etkR1SNCYxQq;>af6@tp4btPNdbRaG ztelLC%m8v-It$M9x;sL(INqK>)?Gp*h(r!~Ol+^K$TRrimP;8T&{anxc878WJ#hrY zh0*cOQj*$SrJg7ot78_!@hYc=P&&YtkRRDw)ox|ThQES*NEu&%?|elN%vH=|bSB81 z`r6j_c-yECJFMf6_}2R)X!0fl(;1A4>|?si+_Qb3K90Z;f7hcMblxs***vM^Y&vfz zplzO#S(5g)rDyUfD3IYjq5XFnj|+a&pN`x;ELSaf9AT2HmvptTIz0==oqiw&DYhP# zBy{h4ZRej;Q&rCLwm$~@8 zl9DcYEk0}Za+>HhPX^fXP-%0jUg4{~e~P#8nc^o1-8s>X_l`Whsqrw~66D&L{Pnz8 zotIZ*@nSwr*92X)J76B5_ASZt5ehmM~Ufe|s;Mdl=;*);VV@yIl03+|FLF zQd+w!%o#W7kVRhFL8A%c1mSgg+uo1Jj>`5Qr3GyPPYqR&pQJA(oKV9)BW{?=`e}T{VH7it3O5OA=#dmBy{+X^hP~$m3VIf99@N} zb6e_;e?x2^`l?V9IXJKCk>9E??ym9;lGz4`Z3FkIr^P*X zUKlVo&X^0+dJh?f))%I1tA}A_N-h+DvboNQrI^}M>kXjSb@v*)2D-H1pl3&Eh zC_d&$_BR-6PThu%TQwN{ocno|svEuk69t$X4ClDC4RoNY$pwNZ{vo__XyZ(@ZOu{C=375esd zz5l5EjwOk+iQu5rkz_F!&~ZhY|<1TezLCRkT&BIK9F=nf=u)rF>BK0|%GrIh7UTfayQ zn7cYT6oDX=ON!>~75!{mB#MfM;wCs+W)=H)p+~PCK;gJ^Bxv|@#A-2)(YW)-f78ZW zy~y(5*G-TZsrTufbwigjXUMy?emfA??y|S5=i@kh^w%rQ)}M#=0)zFVj)W4vH{x2= zTetfh(j{*|>kJtK@qKr@)JfvLZ>y<01O6PoY~vk{2Yu%DdWRcmAhhg+xy+B?pI>0w zIg9sMprWHyYsqxh4yDz+p5bFSZP~0ge8p2J?(U`4@mO`Y(@wZX26GiJr{ws)JceN1 z9aZ9{P0Oy@ms_pQyOkfa;ZiwH7-X}8L>yV7^~q)#F1&I@gg2>MBQ!)lf80385e*Lr z+0!1z{^0QFFo)W0PdMW#dxM+4+Y<8Wb1n+}{Sbn;cZa`NR(vuIQg5QNBj>!GSeM+P zMY5C!-rh>O9feeogHj$q=D;fHDOPoLH2ep(%ppYR27V-*+CyQiQt}8#oYG3-r;E?2!eme?`bZU!Ori5QRW?56fU{f3~z2JQt!bf}MzL z?)Rz2E$etOfFE-%`F0zFIZ-^|AzkP+yW{uy2sF5GS(L9Z6eG!bPq&_{SI};|AC5H) z=|<#%IL~g|r`EJ!N42$o)7PKNWwZ%yr@IKyn-<4PVm46aULGG8&j-FHHIE)?f=r`_ z)1b@s`p%vS*I3(8e~JV*m0Y+2#ATAhWxWc(Uajx^U4{nwK0q$O{t7X6XU*M^npF9-kqFDz4w~G#!ABh4i9KUbbuwepX!e{AmX z#bfc$jD&HDEjP_by1nvzalTqL{MQ!pSto)f`{;0NyhHK!?wjPk7nh#aZHP5TZi#|B zzp=SBTHS~)sMNglK0?av3ff<@YY z{BjMsc*}y~lU>l8>$2XJAS!>vl-Y-S^9>i3Z6%M*WfSdp#X4QA-}^mntGQR_S(V(E z#<5wp(gtXRWO-<8vv#SY5&0*HYE`TdaIQ!AIm^Zsb8y}oI^;bH`fPol5tDB)fSRSI`Yc4hXdb^1xw-`2BUwQr-Y z$Rs%$=A&_Ti^S01@6+-_#(NnF1xxIw;Wu?<8CrjYoHdpw{dM8 z@eyuUAu4gPrWVXLUz-l`;sZba?9h=B#L4cBi?m2BYf?M9c0c)3Jsq0dZC{%t+cpd7 ze12UHfh73@D7#{GJ{}j>YU!G5tK6^KFu5>spyY+pTFhHICmf)hm=2QVe`Rggz;dqH zW1;J7x2AdnC%T3HD!DCoJQMjYUO$Ugt{?TLHDaYaSv$XY=YT`JE}?T}utZ{)neSbc zO6*y(3^0i#L<;g`bYGu@sg7!`-FB3*#`5Ro-Cf~!JAvc#AP<`l5w+99qx#yc9C5sg z++xk>+-JjOyW7KK+|wFcf5g+^E!@qy(WN4KD6cwXR{Mi?sf<;jg~jO|DU%Fz=KHb&yd#!ZRRe*zQ~hRfy2NYY zosyos2UQ8-HS4jo^pM2 zeAME7p{%auvxHujAb##T=G0o)H!g_J4SP3}yGymvR+QU{JHew(4(}HLzKf3tEH4j#7sqOfZ`Y56*chh=j+-Go%S*9Q_Ee+neS&6ZW%l{*a%Jg)Bw z+dr`Y#udWSDFb&4zyx!$%D1q**39J$#OTdw>So*cTG0jGdNsye`Fy3X+Uo$D?XulH z*l)e+7$mLEi$}PpoNc|!-*@@k+Vz@scJ6AiH|#9Qf6-1X;V~51O&;R+jvA*V-iV-K z33UauXix8-@54S|(ZLX`6Jn!;PM!Msd~1B{Y(H=MTnDw1ZkNf*7q9vHJxCN1mD}4( zNb2(GoUcz}C5?|<02n5fBgjT>X?M9YdUq&vR*SUPT0%0kpRTkDAQNRWyDNn;Qv7Tq zMr|ure?KNlQ|#h+S0Y5m0$e1{u~eq-lU6(p{oe<`9dUaw!L1` z?KN3m^UKzV>Qo$!-RoHJws*F!@Q$U&?23udA;j)pulpxE~^{(AoMyawdg>E&hvTRJ5jJkw)k>^&7D9ihnhbKL2E7@ zUh}i{y1{H=fO-G2)646e4AWpVH8}1IR#ZQH0hZ)=1s#O>EThGG*)M|SM3YtVaisI9 ze?J`jM`m8AYx{6|1VCKW#quds5Bf%J+he|O3~)9I$f%J=b^9^pSPV<*`rOBxjvZTJ zo%btbnBEI)XfZ2=+gIniHK-)~{710f{jt1X8n9-9?K*&t^oF^fWCx{bofBV(`CMAOvR`5jG_H%tXJU{iRYFqKrv$z)`l{`9 zc+dg)k%Pr5-Ik|ysP2=4d!!EiEpC)=5E`Vfq1wG_`Wl@Hc*f6dgZ zO{woeRiywF4oJ+)#JVctTYQ$Qy-hI7tyq_>pBT+Ug*;4m8+V#QJeI5YdC<4#c9IZ` z)DoE!U-!pVSuBQ7e*=$y?l(K3DvsukOWzZSH(u&4ZaH-v#ty9hbgR}*czjh$2$4@C zh7vv8^e!Jn*YI`{uJ_IqXk)WHe-i8U&9^y2H&+Xm8^iVS>F;jBjhG&kRxm^-EFWFs zxlqxj3JywS2mfU(>GF{&Z>Ms0&13a86GxROaVK4ImHu4n*H+@O*)G@>0)Q^WRBY77 zJ5Nn^-}>){9aUnK&az3RA8wyGYqk887l4L#mh!4VhnJ&8yOmyNrkf@pe?Rs2fW)72 zf3Zq>(!@k|Jy+nSm7-jXB5j~GT`2F>nr}hm+I6oR9NAa1^`g(}9&VMTg2*^~(X1@u zyb};hwP&{_c~rI!XC;@L279Q&j-23NBl4}W+GXiR9>ciWKZnC8KTMt(Wp>Nz#K}Hf zFQ80KXU4_jvzJ|Q+@>n6f6zX^ZeJ;c%34q)SL;YMD689=TO&zL0{Cm9_w0Tl-L97x zjqSOjIeoB=dll3(`Dt>3e%F#?34=(8kLV$2lbnLFN*n@>-;RshWjx*2>}K4Cr}5I} z?MbU&pJynK*cJaA4K0)IOTk))q3;T@?|%mR+;nVV9&L3!ftbmge+EK^fcG3k-jMY0 zmx*Jsw;<};J)&sIF`gp5_FD)y^=|RYGqhh$))v9@;By%$!B_vn{A_PO&<3vVW z=)bdPD>WMVNg`<_ZOXW46RQl^8spD-ct2_dJD+F zLVz(zT(+V_6{T^|?&X#ia%%HLplMtHB}|;!oF@{8ZWcg5eY`{8r6B*GBh&CHhJemBEbh@T4KBCkb7ldsh>|!5y1Lzxr6e>BW_{xkU1GqBgI^f1SueGWZPfXQ6ZTwoKE;9V2Fu5)T_; zW|Mj#p}hv3D7ilnb=NiMjuS&%!rc>5$$n4J{3zG-&Q$Y*xO;5Eqm#jzb|2PR2@=e? zy{~qNn`D<~KBQtJIFF02JcYWaZMhp`q#|w7zJIgBVd(Wgy? ze^BlTtAv*%#zx2>o)!cL>(AiwrFSD1>s1m|3-iWLR|{;%zB_CV{b*`%@PxJig?qN! zVLO>Qi1{%9e%l=M=RW#`{18@Fn$i4DuOj4n~Q20uF>X#ApV>_f;)W4Y_8^-;qnNK(Adq2LrJ$hc&}RnfI@ zk$>pd_48&0Z=+lq@9pgX7$>%lf81`5(_4|-H&)ZnGj%)BemF9_oXMv^PY(1w@SyAG z?ONdzZOs#?wVx0Q9zvE(VzlwHV4rJD8F#_C>JX7ZZ8S`45Pi1}!c{$br*OS4AI;5N zNmp@@C5%SzQ=dJT2kl+#6oRC$)qPE#URXp#r)CrO3-KfiMv<5$<{DaM&4D4?OZ3=Wj@Eh+H5zA~z3EBrM*DeI7S1l;Uil_|J|N)Q zB42r3TTqJk)C&g*i%Q=~kk%bj(S@BsVLecqRMYk!z(;^eYG zdwXONyq7zK9V6D+WiP~He<7c?G+DRRv`r_h`Vmu+hUkVJ_`&gTxnJ%Q2z$+fYYf7W zh-iHr4>WD=@J#v@dRRQQ2tNJwam`ic#i@)@pQ#~uS??CEMcO~r{i}L8k^NAbX?=Tz z@XdSJ&4dHYy+T|~M(pHnBF+cM#MI%w%e3td6wP!wz`~~F`t|~2e^#!>H@2spy(lA+ zVH-9d?0o3VcK>>|DRq!IBJZf0U+2mNNLD?|dN)0~QoGstkxW?1=_20RS43j+x%Okf zWWv|MA#5n zbFd$3eY|=@IV!?6{Bz`N=kqJKRZFvaYdxUh3m|w`E>tK(@o(k0b-Z`s4Hv z*6GDKZg0l+(93J<1l9rARtlTf2lgSv?MGX6hWX7W5^&AZo?ZEaixGZp$tf`$gODycD5U@p!8w z0W^AAA3eX$&Ru}5rpr0m1JLZadv-rzThKa8wP$p{d%N)9 z4WCYk5LOf|De5h}&&%E6NJ*TB?Fe`^>rc3M^?0F}TEIVj?e^ri=@_0Jt-Dd{y{N#_ z&BvXRVee?F_fb49i@gPOGj~xRqV!g`)*tf=j!QJ?^^mm zJTv1MfA6MoSv|wsA$+V9-#~tL(!L7KiQgP*lx((OxR{@?bFO^&+Eh;qJoWdBy8*@o z9F#6=$5JKMdHs0qnzfx>jFU!We7qw)LRGe>6zKEmE)&=GzBi?hpLEZ_*<3RqUf`{;2?pCbIF>O=BB z*1AqN$1~_L@<2mh*`X{sJ*KZsx0u>cu|OwJgeAgjH3cw!_a$loESONIyA+qW8%4AJ z94waFiFYIvg7C|5V|!GsiM*WPw^g@iavMli5tLK>xmBu~y7T^tl?bJ8Y??)vpRNt( zesJ-zqx2vtT1)MA2 ziiYwGPhF;?n+iAgYQX#RUUO;LCSkj&e}{;#UF81+i7B4zF}7adpKs4nDe~vLa&x=o zNwMmS7D|HrlBM$aVp#$_gy&0zBYryUPG&1CJ|e_L_Fm~((f`HSo2{veZE2tn2-Cl$x3tHAldyv2kZrg>u z@P*vE@3Ljzss0+AO`{0e~7ldZr%KJTu0?aSc&V?xj2sSf?F?}%jZgQ>p^$d z;%53-yZ8p6llnZi3!-qwKqtlY(k5*do2S8FzE|Rs)fsz;kbJ|tRXOwoT#e7}f?OT; zq#he5Y~)38i9(oM#=Yrdybgzc>CmOX(5z7b+)jtpIa+d6tmI9OIi+Vs=W=x& zmQpO7)$CKfo@%T(X|vcoe;=E1M_7-;En)$QVu|F0a-MO)MS745c@!%)j&1*TUqugX zTlZ1A)sq)rdaDga+}C@FO#1;Q(qZZP^PF;==D)9?MT`0wpg)u#pPZUHNr7fe>&)rcpuUGQmIwQ z+6pi(8@^OF)2TkA6Lorq&|3+4%V&6d@k(4I-_KC-yLgE2l?n1tKa){rxmiW;;CQ}!IXvz=sK4VX)6?Co zk|G?{V;fpK6&v(1q6wO{tTFm+AL$i1b^E2NubhoOw8WBU$j(f})iQD}3tn4T_tkj1 zZ~R-7F1KRTuRmKxm7S^tw}<`YLJRvHNCO)%=!jsg$)nMge?k&lg!P*$_lt`oRshyY zI<;Ayvixqy-sApI>NY$n_j_}K0i|3gi%Y(;cMX~qyJ2K=`0RCB**1`%zv~C`eHAXf z&NW`DO8@`>40je-XL*17HnHi}qmf~jk9-#85p{)djd-I;ZOLyLV6SayiyT?I0ur{g zhUnH#hs&c4rk?jMg<+=Jfx%cGsuDgGr(`@b+~fA_HVoCR|4UwduXI^k>ii~(nx zANz1lXufi9pksrL?AMc%B*`)*=Z`j{FNb;7;{ctWCb=sG!R*s4%lnw^!Fh;`;9AV2 z+-%PDubsH~qZRzah5r7$lhOldD_>a}{6z%0W^RfL5uM!GLR^Lh2zUEBk&E;7L8f(K z2{fJ_f9%Ea{neK%4LM=_JM`dk=XFph5?;RCtkDk+@om18iw!&HX4M^OtC&CUYhV7= z_!T*;Dnw|{e(lCN;rRW$udoO7ctvjA9~=2K-@gg%0k|_==MTL!%Io5W{c&l{&u996 z6u9lPa57+5-g-IPkIx_fbJ5rNuZ&acb&u?9e?7u;9_BPv(y#YA+_`lv>9@4PAZJK6 zFJ0S+Fb*RfCpmp5gzm53DUZ`q2))4!NP zf29F;Gz4!sXjj(~`mWUm9!Tdd6;J^a`Z$HTH@6RF!u{N|!j=1VT%<=wU1@0Yzo3xpFrV7#1Q=ZJl3eQjU3+;>TaIXjTYjuTp;cKs5xUb( z%Opv?AI*4!wjYMR5PCMJalH}%s{;`AW2STxp9^~%yXtQ8&gG`*qN@2k^U4gsf3@%Y zdl~{TAwRs_Kb;=>G#saNk#4q&aGkMl{n_*|^bAjg5nY2D{5eibAUwl;w~3_%ANTLq zc+fzkwV*qk$orf)U5~QSxf=jcK(4>a!V6I_`iEMp-eo7b5Dw**A?Q;lKCKHwyjvXT zuV(;FgWFr^+lmJJN%5BTHL{$mL>LD8jluM^hE;j+hrmlyV=R@a3`2U3O8uf@kk{W4D0S z{cHOvw#0n{Jy^+^@x7s+--fKi>@X^%B;Gfe7y1>$6 zVjo^wu#6Jd#IQ&3UV78KV)vtgPTn8Ijv$%nah3?D{%|6Y3`=Y$g-kZd@^-8DF1sU$ zTHn7gYoR7seU@k24#_xw=u8^{zqxqohtH=~gf z^-cpPTtJ}kLx<~a`;5wZu}g+R+7j$0HDyAxmM_E7I&lB8Lm;@QW{FR`AMMI%`9&nw z({^#$s}i@agY31h)v@j-zt%|pIrON)k*S)QYT0yfx?a# z-sLMPLtOaX2QF$qU4QFaaM;Dvzr<0&f-I9#Z>R}>j+^D?3a7!vmOYU1(iHvG$DEs} z-8S5w1J!Zb%VM~B1BOm{?=#~0c)FG+4kVmz96QE69}mIdaV`7kv6nF+d_RqR$R4k= zT1O+%z48g3YF(!2FnIY0N9;cFa9tf6@s&1XzSiF7W(`+$^?zK#Ow7F#?nhXzFYdkq z8F`vINFAcB!3~m}yMCLBkpV49%S$!SJ*UReAW7FA?^$Z|T-_3P7E*tA447bIk zp5Ko3DClGBSH^G=z<4o8t3Gqbu}U|qw_gDKw9P)(j`*YwyY1*r1}{vA48T$YLLY&? zeKG@=n$(GrvwuVn&h%3ZVBxDC3=j;;m2oMO#W~chV}`c#8gMOKQ?x*-} z5hchU-3mp>n!i1BYT{l@DY>g;bOw6mLDm!{ao2i4GQW%VnLW^DyK9Cnx#YC5o*8I% z*9*Qve;ABZV}hPgxK(_l}VQ42@``VP86Zk|*j#H^25)za-j{TtWzO<^!$ zdf$$h%%YY{FrJu&owcL29sP*^*vD7&R>j(9+5^}qn=MUfMGP_))kl}-5*(s_lGBH~ z72jmrHh)VtB_JYyb>i3I#kGsm<&e1jdKfE1CZ8Mm%%t1SzpA&46>sZ1TmmAC-i7B~ zf2g)FqWMhBK5_}^W|yaQjO6uZGV5{WQw`#*e9de2E=;Q>O}#1Yfb>$CO;aewhV^;Yo53xBNJdWJ1-X+lXwU;``vdB7eV4NVn zaRINdr;pi7+9&Uo!|FY$Y;S0IvJ12ilnrxyj22n6qPe3TW9-!BXGgM<)?m1lL9%_4!hK62v#i1rwiCd*+1zFX{vn$zHybvEXZ0QB2 zz(7SYsPnhY>q6&|xREkDh_9n{KY9D(zJK@5Lk#+KL_%=-$Ujd4#E!yBrIXcbcksOa zF28_UJs7+DtEOC5Js+I1Ex}JRLe1qG+g}dM|rd7 zG0%Y{2jY8Y7iar=^0i=;xrcbhd^%i=`tnKpH(iB-&2sUi+4u1jzn7ch31;&!kAHZ> z6g*QJv#`x%zaC|<(ZSpD~ zw*`_H{}>dX8^3whR>N(&+jRjkrAXjU2L;4S{EF9mWi|!%Ai#LwFG&e|st`63YO)H1k$WvgDtT9@`mm^pon)M8aT_fbnL>$2i?|SrQx=El`YrF|0@IyK zx9|b%zQOe0m$Tkews8gBmW}}gnyPNVz+G<0dz)g+L1B2rsWX6FJw1@+DlHN|`rT~h z^dcS>`X0r_?OdIXA4VW@l7Gtg)fFqxj_Z}d1@@#p^g0H7SXz`v>Jc1nS7Gn%KrcK> z1gt(bS%wVb5Hy+jxpzL0m8XY@uEOPA?4!lw^rR+Yduvy_8?(B9PRh|#KX@S?_lZ>( z-U*v0%XgKsNn9Fkfj};;*o)}{9zn8^-6wlXTL9`|>`7(WuSnYiN$h?m9$M;b6}UIv^Ro6<5`g}f@YYxLhA~i5 ztMSljmO}^2bMHBXTG2Q9+OO?^WSJ^7&nm3o05CG(^QLdI-Ngpt01k<^^&(%Jy|bYw zaq~He?ldXM#ai{dRey3^=0yIy?Wqt!pprNGb^1K+pNF)z7wDJx!-ZWcXPTOzaN{f! zo3!~5;GpdVM(@Qbc}bgd`&zO7a7*tR#nM&05~o_UcQJ_+k5yIXQi5#kS&L0}U;9U> z9+7Q525(OTkWY2P@pyWELeBL{_kii;CZpmzvb&WA10UK~_kW9GcN(lZm~P_XmJo?{ zx!tMdhTipS;>Bi*ZFbAv-STx<4(v2EAMkLq6Z{zQ7GPke`SMy!^r0*>ll4UY@!Uz< z$^=wdf`@HzSC&_ee4}}{kkgHS;@2DU7L6D_tVz21*aZYz_`|{mZ0tkVIZCc|Wu5E* zB<15PI8aUox_`#A)rsqVT#Mp((C^_{ec!ga1A*x%e6^Y(!OJSh;OSfBaxL9=X274q zz^h^1c#nJv-c^br@kgr<@=L`om}GkcQRg^yj{NX%Mn9dyK4Z6s^~1YWZFcGOxLl$v zZWL%;Ns_g>1`;xnK)jq|7Y5QdyEha8iF zCT=$`(ti5UdvsueyM1}*ilOrET;2Rj+J_@1sK>MDzuuR{6sMnPO)fZpkxLPU+4#05 zV*k0g-hXFJjNKs>sOI60b-elPX>x@~by&vUrk%Xw+JA3*kTR(76i)RjU5e8&s;cDt zCZYzMkXs+XzQU-kT;1$F9O*q&3!xBGS+c9cISbBv3mE>5_gWH5g~=`k81X9;K{6Z8 zPjlBC*eAovubSBi5aVwS_ce0|bM146tqA}IP=6?~eU0D53In+6dfFd)(4YV}S{lrO zPB9y7&vJ0RU51Fv_HXqh*y1}t+@!Zi9qw|X)8#Tq=x-h@@B8lDW9}*IJhq2j2F5MF zo%nE&~`Z7>=xYP`nBLPXG_v%eDF|)N*4X7&&J!Lw=J_rb`Y6QRXqdl z6Fn|aXPRjC__}0a+bh%+h#kya;4w=%Xf9V#u(4EIXXqx-srGg%A3L^3r+A~>B>%KN z!wWfip9KN{`e{59c~`cH0t85vLS46GP}Kl2U>`;I4w;3Uw+?G_ zxtenO&<-gJ7@Wd*$Y^8#u_>d>>VKT!rM{e1%Tmk(2#VV1^fe0r?XCXA3mdo%>w$^h zHS^DP#z5dgp*(@J!{u8q@^&*w>&xQvf{IK#qBhK5^d-6<2<<|&$=&&y z`+J`_ZqB6Z$`RzfUYq614^1_|$49iN9)^%9batj~MnK~Ai8No)-H+F*=? zn*p$876upd9Ba45Ek=K#{H(Coik%b6n@~LT?qMwlj(O2Xdc6&mv43JzF}r-gaqYOtr5eZ77l+@&5ZOL|}X8$hd* z+&?eV=Vhp#SHryE#&yeVb@Sp2WC8926|^kV*EBduOiiIH(S8zRwaV{n_4-NCRbDg7 zPJI~0GCtl6hp0s~$)Bo!OP2L&tF+V}Rr=LQwJV>Lw%Wy@>$rb!| z%YUcWBaI}Mb~T8$9pr~_tNf2ao%`$waW3+GbU6|tn@c*g;=Fq@!EP@;(K~aFx3Ik& zge8O{9;|*5ihpQHHm}A6Kz1a=?HZiHu>v1K7a)?vf4RT0MAU?b*GzeZwno}`|Y;yNjfvM@uDf4)F;8p zz(nGr1zFszY_fP{h2jgy3PoF#JiXdC&YhTW=N{U3JAZ}9k7BFd?dkwkez<7)PpPAL zRouew=M1V992P}Lvq)R$se*8RQVQ#l?(QnLl8bk>0BIb!%x!ZR?#HvT4Z^)~5vhWD zl?9njn`JGm>^*Hi4z=Y?kEmP&o@0)AjApA0Em%0Pos6`S0lY;@Uu?SF^U6ZN=L-KU zJos$w(0{eqHARdD-7XB56gl-m$HN>vYxq=QZ6N_ydC5y%K;iu=#L0zpTJF!lLO6$Svu)FoI zG5svXWur`KIDEYKV}AwzXj%I;wMvS?Hh<4qr!13vdyFk}4M=H1AEad{zE@+rg6QJv zik{=0DK?5|7{^GjCnLm|btASzJCv8@Lbb&A5{kX-2~g<5&+d=YBYnGGd^{mfznhs2 zzOPQ)pZ@x3W0+$>VDIinYzf6HJ7wQw%wECt)tgbViRs&K#ZSJh+HJb&!G%D^vy zmHcC6TiE{x@8Kh*yceDNW_wz_86fj+hxn=U?ghhYIN>=s< zSTRzEFgZQob-rZQ#0PN3WaBO&^4+fFK#ckICx{=9_m*eR){}5{pMqiUub2*lg??6i z?m4Vu(=TDiZx%EVRU$UQf`4=Mtx=Y8NADKm?NboP3#q@~IzJg(HVhgQKtomPUW*;= zVy*8^igWia@96$~O`>I)pF`}(6E!+h6})`Nz39t$vm?$2=)>MP=J-K}_uSN{FjKD` zWu;eknLd`*QqDBh{2a@Z3?Tru3OCH|vSrR`@7^rmB32IzAw`ts>wi!zgpbM{AFfJr z07;dUk$bBlguP@11p&dkqCNLfj@kD7yeBJ~yghXGcH@BTls1><_lM|rTy%R1@TCv#H;$A1vA138%U?+s`W)hK{~ zG-hHft>vCrRjsy-RnEhe9tg}Rg(A@hd%CYv~MXyu2 zWX_9Vu|*Jo-+xo1n;q<|+Rr6gyf?GJ1gq}XYf8!%3nv3?^HSLhvCrJ{lCIzM6tVpat{>1Y2q`{yqPc4x;oLv;6iE-y_$hH-YfcnmVe^WE|#w}b0?%J;>(f&frKj9 ztxeXLBfhy;e%X7FRFU(QiF6jIW{ed3OLSdUY7c2)2xGq^>KX}T>kPYxZ>^GL6}<6Q z?F&-1_0~Xx7#b0TdDjMIxUCK*9@2Gz8n;kQ+1%&Di(;VwN1R~IwKGmOL9BUc_0+Yr z8}@aum4BbXQ82f+g&)?m5<9krdPUgSh)9 zco0VFvF5ngBl5~gYi<&PKgp_5pBF6iVmk_=_m5Lx+>zXG7d(XV_hx=+KsEue)Z1)o z?|fcSb>;PhUay>3%CD#zmNlEnu5}5R_qGg)(R*tUj(oH7&V4ObHf6s%j03BUS~+4g zVSm6XTLZH4fP_LRU7M**HqUXR=5@2}z(v_*ZN_~b*Tm9}6!UB+A0t3H#Ur?vPRv^# z+iJ9M7S)5IXT&LeuFS_epM1%C;j}&k{+{e_LT4$4EvUy2pw@=~8oM+QV?Q7B(u6=t0erCe- zy>#cLW)WY+zLJQ&ic`s1rJ|}QKcAawry2q}?gLJQkO|OXqbGxUz zL9;<+47V7PYu?ym|UesBP7T_8zGbqENKabm{mJ(O44$69aPMxp; zx6M>DhJ1;=TJQ_wS&-Rf#{?JIa0r+p)x?{}!r z)wdH+l4wzd@#P44LI56YrfHVPmrSz)%ReqV_iWMKYU_T=rJ0e5CmpTLy&miJ<^EdU znu0gC$aHa#o%g5x_WGnK&3|s524?5aHGI5K30`T;!~gQ zjd`6Rh-oi8cv^x4@NPwaHZ(L#>O&S#n6~C?Tdb`I8m$Gzo%i{&5`Tie>F%Br@9NE+ zBE{DsAYZo@Bt+xxj8JMf9?@~L9n(Hj8YpewvZWuihuJCmwKHAYz7>zVeF4{8qCw`D zMxLzFH_QvAMUEE)Z0zQJ7uJd3Tg9e_O!4k<@B4CR$eHMFm(FQ{Ng=)5C~jOatef0C zfxrn?Lu?_ApPu~5&VP`<<`w8PT&Q)mJAyA2i7^0)LOVsr#q$!Mz3sS-HnyG~AX?u5 z7t@J0xv3WpW}?sTMjW9Rw^3HzXl{Ec-Lt2|hjEw%y3awiS=&cT+0<7?J>I*1bC_O( z0kis7*z?p;-NKmqVOJgn(W#Yjg=VHLA&-g{E8Rv>2pvcTmVedu8w%3N9FZSqSfgt@ z)(!N4DPacyS5TNevBk4n}WG7^|8_% zA$0TlP|H%e7mylM)+_H^E6bJCxO=&Ax6b%*&hQ4SOi4ZLD((706Gqt;5JZG3t#lDx zMp;-DhV8xK?tiJ{L&eh4{63!nw|L1!XIr@ z8Km%n$@j_S0Ubs!97eW;Sui2Qf8#wI;6|=LQ^DF2?|<4IL?StJ!T#K)?!ziUVBf${iP5YjXnV z>T@pEZGW@a@6v-jL0)m7?Uew|*n1yjs6~!2qusv_{UWha)>0|=<%e}7mgysbHpn4l zprW?$gke>!Aso8KYzT^W<^YgE$}P|x^mdm8Co^iy1gqDUu-c&pyT(6oo_-@~z@yTQNH@r${w06*?E}z5PVg zGbFh1?5P#TC|1@SjIH=o?PfRZ%@gTE-FMuB+e4X=55&B8SJ56@o*xI$D1gt^h4s}P zz@juK;w-pe5Sqg>5GzqW+vw1iOsZVzOJi$Bv%L%QbeRSVKCXl-6wmw<>MY!c%=D^N z#(!yAA1}}Ad+D?KDu2uRBPQ+KIaSoFUHEQ*cGms|?XQD&J80eMSa8eBmE5;=idAf# zHVWff{s1gJtKGU=0aZ0Q8Oq~*^G?A%IXs@Z3hE@a6RUl}?cDW#G)qV>_XMPzKvL;S zC3>z(08Y<5z)^+`QMi7;O*i|d&i1QYDStOa!QD60^D443{JYNUbj_Wg%Vz;aelUSK z#WOM~oy^-x$Shus9vc80>pfaQ`sZBTec1auakXfzgMZoQPy2c6Ad^5J{pe7N5m*>i z2l+qfFT_{So`^ux5vN}5RjCWlau4u%wLgVKUjbb>EPpJP zuS2N%xZ!&PU9QUQRtzt!CMpUd>3FMS?_+6>v5?f2Kas%5Me$Br_Pby+tTd_icewgY ze5_B`E~LOXQ(Xp5XA5%>O;C11wZ2h@L(E;5XW2&pS>8GPz!91&At^oLW6;;Z-Emg? z<0gcFWV)oZN7FtzRY``Mu08IBJ%8K*mmwOnJm=;G(AQh!JRyKjE3xMEJxTh9?dy#4 z>sfFHJewb|GD#3`lWy#s5VOVmWpTcI#0GP&)Lt~&_0)AV7o7IX6xwn-@)_sWwjk1% zwR?0sREC?jT!kAha}mu!kbne(PruJo5Oe7%kk32eId$#!p3!hQ)t=YKdVkw!4GZ41 z>4^tzoB;*B%`Bm*y1Bl{W9&pvRI{%&tbdNm;qZAu>@Xfq+UrE=8*8xZbj6U>vOT}2 zjAV-B@xH%e z#$l84@+54=tHfVg<*?m;l7FYAUTGv6i&cETxVNl`$-^ZVuaLIg$nUqryT=jUo-bCqnsw(6oPYh_#$Lu8G#Wz@%4h=Xc}l}ASf)Z<(d+Qu*;eqy}Amr z8N6Re1Nv#$8|!rjZnU|n$upkxo>||BjYiU=OG=;8w%#S9wGALN9e-w5&a*!&;^i5g z{rAVR*tk4yKDW;JWGHYiDny;2=l*dgx`dUkb2}XP%L)**_ml5>_k7t5&8c;s#Pxk% zQXhCOr}y`?w_zT^Kpr?7kK(R%PfK~syrc0b%;DPJqD=!Lpbt0Xi?%${&YSDJGrhWy zBK%@5&c2=2=w&@~rGJ9n$=1oNR|+4c1<>NoVs#?yw+cPFjhX^NvVOX+N0HOYkO2JH zIRxQdCyXw}656Ci2+yoOI@Q4GRn;Ccyze(1L*!1R-qjK9Bv%|3>yOQu=LLOvz|bH( zcApQDaaLBV+f9Ggg5P_S4`InG#H0uFoGyH06|R=%k}pgj8h?E&TLDR6`Gl(cMtKqU zq>JRGl}7AXe@t+0*8U_>MHkVhGt9qk%V>_{|*q2=h*`Hd5 zE7GxoC^-G32GUf~&VRP6YIqu_PtltVe|?ISV5C<QUz71S!AW z4a$eHsI2HS#_4X~pBBl6Pp_MMmKGLMO#R{v^)~gskj03nR!-^xqAWfp`+0z_+-NUY z*l$xy6v(f&J)nJHDlC1!26q6**1cE_!QI|k%j7*p5Jz}G3-^$$L1TPN!l29RJ`CO~ zzGGdt7JrAq8uq7BI!Vy^4glGZw)%0TZ;NXCC=-JO&{Y3q;Ilux^hiFf=uJ4AN?XfU z#fXX}{pm&Mz>`0ov@^v8_K^Tq!>seiNpJoi1)Pm9F9!UZ|ImFjuv0j1{?ndqCE*pg zaU`GfTR!6yi#tkupQ{mfW&{$8|=a&^-e@N%zov9I643~6vWLs(^|*pE4?0m$pnE3twOm$NOzh2(gUB$w3O zxw@h$IlXHQ@=7|I?uLivZ7X>o$&b>6;{h@UGOq>hEJ1E&p;&}RID3a9oc!e*jtZ#M z$bVE2hwh1%S3o)!`2q6vNzGqFCCt+2Y>)l>yn=drH$wnK_W@k1fA~oM^?!S!h1j%2 zbG!QYq5fmvc((54!VL4-@BF%3aZGV`t{5mGxq!~W?UYMWMoc7dWEb-_7mE#jX#xnTr~ZhuctuCj#Me5=MM$phNUp&H`=FsD9Y6ampo zE+q1+0^wN*IV}2|Or#g!6Q+FHu{I|FMQlnzrH(ca2M3c#EopI8bC(X`W-mI~LE2q9 zw>D|_N*9Hdu)}+U``DOj6EDWyGRUT;)6WNDyHiK7rucC!Xe&af!Ze57O;#Ki!GCU| zo`Fz~u}zox#~{PUX|T>*P34%b(VIy_4RhZp3_FxcAtw7$VD(cF=g8o`OX?(fq&XM=l4uo!~H+yV|;$SOtGLJ@9-ST#sSWm*Qb@7 z5p>AiuyN?+CEkn%UFqi&)PIJa`(7o}zy`pUhU&boZx-n*G&OVVVS__T28JpS-Rfb3 zyj*KY4wgW|Y#$6{l=a^3Th`eS@8m&3jfjjeri`mRs#Am0f~Z-MFf<|8;=MIAzi%0H zrF+!fPLb8@*BHWk@?)cPnlqhr`Qjey-TuH%_fN3kfZT}e=6+3}a(||;E<4hfTlKUa zBCEsX;rUh%LVXfnbxZbi8SP-G(66n;+@ibQPM|eX{^r)No>A;%KHZ80&F%zBW;Y~z z0F1vq*VpON-})`RZ;n2@YBrqpKBn^HdK`u;q$FS|tgk9E^i%us?(HnSbkNN*?kF#g znd2~TJ5u2#ON*BsmVfv5TqsGxYqEZ{mHzlp6E;-XBs%Z5@8tL~&(AZxC{@4<2bUqr zx0XZw#Lczj#KVdR^@e_3IwH?>#zTWnrKg)X^IcFn$~2>uaR$pPAACbN-A`Q->!KtLnLQe2RKBlQZ<0qLE*82_@)9tL zA|{fRaB-PbhJThqcYk_<;yz0=jjOC!)3E4LL4jCtZy~vKqzV+oy0C4QUaKc?EI&dJ zJ!{^{#rhE_-iZ`cNwXz|wEgnFd`~Oypll_iQ&q5EjhQl{r}X-4Z&q!jG9>99&o$`C z_I>bk)Xtehhc*N=v$j%i~q_*yw-rjGhYwb(nbH|6j_62g(St5@gX zRHrTgGJm12vagNtu5ag%h40bu(&G=Cj8z2T*a_%BioL?8(+y9PS3lP9HIj{aB+xi_gGkzf~w7)VB-RIfBma_FHKogI7JRqsy2A zLxSNY`&RNtNG71?d64wyO|m{H%-xG7_!BXYi+|!@wGD>;L&!$Q_yLAPQ&!$n;@ebL9NY%#;P|ns8Bkyzca35AwZ~+-q4Y7Mb5P5uH zyniRM`Tiw1$L3<2MjHYH%?Dx4vYx+!;L3cb2POo)C>^GP>F2NSKB|)>^ zM+8^t>>VyG3#iKl{bk4+_(4EAB3_ehik6?%Cx@{nsKkk4aqkQXH-A_xUPRS1svdx@R96JHJIg_*1U208n zZAV47C4@ab^?KB0lG?l|oLb8bwkU_ z+=*{PnpLS_o1C)6JD`qazkRgYGZy(z`7DY~2gUJtz}C_z1C2Y21CGRyhLz|q(g~te z+dkU0>4{%CLh!-)xj(Vf2}MK+bsT?;$io`$DHbd<4FPuvj@i(}WlcQ;HZd+%Mg`u((EHh;N7t=N ztsSjMN?<6&Jk{n3XvvnU++-tjP|&mz|HRd11L$PFdv7rNwBFP4{kfO47#cUD@xlG* zZ<~B?v%E;7kDxev;f=eaS3x_}#qf`ZO+8Yb0Uc7Gu{WsWVh zUp2jXn6SCCQ~6z(<$9c)jZ#TF(X>0oa60bcA2Ano4%Gpo58|oFF9-SbI>7a3I6h69 zehxcV1VTBUp01j1A2&4gpw_YaOCsr;wS>a92#6n|x_@*s-zI_EbR%-4^*H@&YG{*oj(c}+ZuY3L+>Yjf%O zBb3L`9zXI4OqMMVx@ob3LU%e0tdlPmEyVAFgX(<%Zq&m_zAQmA0d1q27$sPf)cc6W zSwm2neS|sPax*jNXH9`KJWEDj0;Ys>in9dOzVgxf0MC{QR6SMgB!4FdbTbRs`0_*R zDidok69~$zd_=R!3YJ8Nh8VGYhXdb5}JuBVIpYX~H z8QImg8fQtT?cuaG_E0K^8)|2hsTl}|=b3r&$2E`f%x6a|K!tm!Y`61Q)4CE@6s+(3 zownEHXTm)e$lXwHTz`8mF>;sF8sH~B03iXD)m)f8!B=DHsUS|lG9Hy2tX_ZW6Xd0N zDiruL%~M;Fx+3Gv#J5I)LxH6xKG$cW+T}+zm+lZh zl5T*EXu}XZp0v`+@DEg&ylmArBD$dKyUCoa=~Pyexn&{WIe%sg2(8>Btc*Ra$_Xeq zyN>cxEwCJbTTXhdHb=)K1MsU6OI2$2gsT%ZfHw_&OSFxDptGHMp`}l)M@PbS&!^q_ ziun+KQoN_g4f17gZwjwF!fRYx-&ax#VA*nvpb^bnwye_KbJBfZ*X`-O#nU~iu!6;|h9UBy zxpc=2NZ<*8s`q6x#`W-?w(AMJjP-6hE@8&^nDpx&Vlp@@?81pr4WB`$Uce6GdfP%p zXjK&I*ie*v@%06W!djEWfP;wEnAWr(9+wxSXZf}3jDHnG!O=f!pP!nW`y7F_+O`LA zvw;ZC(7uQ*cxtD#RH|cg(ci}B6P*{p4pKLNB^(tDc=z-HVBj+=yCJ(+cOXtJoHeQ5 zmlRy8ruUIPyt!bt>n?xm1=1M=0hxisq$Q4(L{kVI`A{|&sWhO5p$XFxD5trewLUOe z+eRM4gnyr=sx=Q9(4!vQVn!C+n9jwm+3cX(v)&4KZ74@n59EjRO08B~9+){hK0^7kMuNCHy`i~6-Itrl z8r6vcN2-OE!MjfM`@uq!b2-ZeLm8l0Zcl2#9De~4Qe9TY5KQdD+lNuQsIQ1`KR_k$ z=l*|ct^AkN3Vh=JlvYsi&kX?kD-Y81xlfrN&?lL2k1vSX_b(G=R!#v~9!e?r&t>|R z6LGJn8#2+OBka+EaPM7`(9et?|=0LVh-d;k;E4M4vLMSw=EO7_}J~u0Fi~yZN1AnHpCMlDb z#lsq3=FXoM^b&`@PjZ{(v1)M^1z%0SD$l$ocg!jV#z8Bm-?%Q=# z9t>r!Xk8tIHSXy13g0@KljraGc{_qapx%gjbTXj`#6OW$Ie;SeL8#-pf1V#$Ne(07 zvp^b@kG0T;@(@C7ANZz0lYeB-=)@x4ff)~nj+BuL)ANBckNhS%81u51xWi>Db3r03 zdt5TEQ{bfUby{xX%3OvXRYuLf2k!=gb=jM1ZUCgYK@5(bBm}WARBNv50-abT6~IA< z0#}Jml)SRMjI_^pkK*^}oXR6^3ZI-i&A&%PB%6PaHYH2^j@!H@bbnxfl0hI$&pWHF6B_7@PDLU=JD^XI4f8wBBA%ecw4S z(s)-2-=Aog8x%o+5=qax`juO8YMenD@hs=h z?-950`MM`9HOGV(zvFYC+s*v08@?b29EFa6B?ltmb8Zb{-G5BKai>hka_S-W(LbOZ zz#3%>_}+)FpTmzG=s)$uEL<2fMFb8te4VI#Pj2#wjV z?v!yMD6R;2WO6$Wt#x4+xZ=E|@IDY*zW0qi{Ec^VpUiuCjui`T-t)NxbY#OGk=R!y z^WH_gV)$mn-+vN*Th!|yve)N;db9-#nM6xG7unn3w~)E{o;I`c7KCxpyyDyWeoZdN z{SJtEs6Fv}5QCD*8iK;|@ec4Fu$I3uMW-bJa|LGyLs}ld2C2~EgwGKw$V~55QSHy- zY3D>((ZJ4`me9$&ofydT^TMLRy*WHi8{{Hb&sYa|UVnw=W%FM7#xWjACHEWSc-%AM zF7A7diCK)JaUtg*;g@7%gisCg!&sXJJsETf>qJ1i5JKHddc5f&T-)ww<7TQ4c@CL` zNj&0h?m(a(vBu#lVqyiMS6fa9=KH^3HZdAw03UMPKbL?znk|3tn?(G%TE0gA>l%N~ zAG{w-RDWPq*u2k*jA6iC3+SvhS3QX;akCo{P#PBr8V`-t4jQ~gQy^K_Lo_H$>7f_S z*7TCe;B|O`1$5}llZPb%&pM#9SWGN9oS!8n4H)h?+8oDJeZo6&q$Spai)W&0gHz*d zMeNV`zR6_>ukvwi8bSrZ7202Pb|MG5ayB69uYWqKB=)pSKz43lH}G>}L=OljJP(f` zC4tXXR7ULlJoex9-%G0IX;=^6`^B^QzLWp?Y<`}Hi)VYr@3w?%t(^Q8B&T=0ZQQ96GDnLIaT|sL0A-DuClscSYW*t4a3OHKLopVmkIVk9r zl;-vh1pDUw^%u{u-?KTk&)1}ASo;>vH2oX<|DDGuN?6x`+-QgT20G~t_TSn{i1XG~ zs+YM3?%KUt?LHpV1bP$hRDi$s>VFO{tJ?v-6}rbh!yS|gX6p?WRF2JsRW@=eQf=g! z&Yb`C+Hd{`@>>${C9aR3*U@k671Lrbw2wWyG-}yB)q;E)y?UO{0^>R@rujS=JJy)v zYL53mW*qr_|CqDR`K8;r|I6pkYvJ~jZ_eu&XZF0eayU7OKj#d_tx(_2KYx;&?Ikbi zZO7!7`#Wgi)$E@lIF)ZpYYWpG&qM$igkiMRh!?Wl$T1!X4KIcktRuEKFlJ z_O2IHdw6iU(6Y!3p{t!ph7IfE46kFJJ5X>wF$dzklB_5QWl*H09=Sx%F~kC)y*FNS z+3M#Q@}tfOtF7UBEq|Dc$a|b`Ju%<=&vTfPpEX$GU4Hi)Bh2|V_TcYX{PR4%=iI~y zSfl6pTt-fy;yj+uz8bYk{5=PLo~a4|5^bRl+2tBO6KmRa#PZR@kWGC;a3m9xmud>w z1N{)#r8MY5R{>hE7W1C8r1YGLH3(<}7egdZbb|}%UBv{~Q-1aY}sevKR=Q~;f@0jzck>?em;i3O(b(COjCYL4$c z*MHAR)L}n$oPX?m_eP`l-xxNpFPr=3>fhKlKT{hq9(&}Q)8HKcsq=pJOG0ygm6mND z**P0Q;y2h~q-2zNAqQEv8=Pie&PLFcmqrK47%UQ2Zy)_xT(S`!)0_^w<}5lmNook$ zNzJQ|bknlE+k0BU?Zq7aFoG=rB&YG>)-YE0D9kHb0e@FGXpD0tcTmXx=7%t0Fp-{N zJ&E7^uu;EtloWsW_s?FBe(vFK9U%`u4WduKHR^9{=jVHy{|@EFytclzE!GSE;cqVV zUHwC0qaBad(prY_@U+$g*9p>tqPC2h9(|BFU@`V~+>Tb=%-zv_wVPUKOlh?+K`0Z5 zCWVBGhku`0Zk@jx*$Uk0e&d-{f{e;4mc=IyASGF}M}@ag8#<;sK+Cs44Tt&?qOkS;{pCZv&D6nj>Ra6r}L8b1G2fQ%7Uvofoq z;6Nx+NRyRZX|H+7;0@?IlCrulLRaO3TpqHm#>0Qkv*>FJ9#gKX;rVK4sF4@R?O{3I zcHyvv3L+n$_yolRxrH4Ql&zDqJvYm8-XIo%=yL{e<2i>fX)iw?)SjKsd!Razi~ZDX zG0s9bYhb?@|2%`AXNNO?UL$zc-*_vbw)ov2OZ0f=HTIL&{l?gz-cQC`f=L8!QUGTv zWF>#I6;UDVU=d(&7U%=QJ|xR%KSH7=Y)4bRgUV?>!0O&Ap?yiHv`TB3vgD>BfxaM) ziDfuCrcK=ykQ_!Wi&m=>O)m?_@G@UPsE#D?iG z@AW;`xYmEx%TF(CUT1R*L5}}@b~wl9ujAQ9^ZMK84)x8k@pYU{SYL8^zqXC;A(I~q z9`a~OM4h?W^U<25;|=A5)$=W z&DSe(z{xS^i;z#tgkx{ukt7_xhV{d>Vqg8NpE;g>pG~5|fPMk(>EBvB;pbS2y@2O~ zv-;=knAh6xne&ZDLYhMzrdbKv6iR>S(d-p3V4O0)cYN#bhTE3mvQ3Jnpe7`7Qb>|d zBPAPl+-chK+#P;MBLm0ko)WJb>p=BU8q{sdYph>XN5i$@oG+cV0ahAZ%h(>RYjk

oFhkRXv^UUSa5y{1eR(j$=^?~C-xr+e=YHJxJ^l0E=X`$_>2l`%GM|4df9EAV z=Uw0U`O^= z6{T2lIMY(e1#%Mkvb-nYg-?G9kDQk{oGQXDG+i%;g%i#ZXU^?^bA#V=;^&=y^P~A( zDUpv&e|gkzkMb{Gesk*o^!dn$cG_^!*c+|0c%}gx?p8EEl4#?&C^>xC?psG z1mx}tL{Sr_Zn=dLfB)D~0l4q?hgGrY*_p`Y*NPv!-tr1MIAi6B1pt43{Be=escH3n zD3)B5Ou5K~120St5fwQcgHDE4kcrGoffA9mRfp={yV#IqdIvP1e}lOfM`yuTyycQ6 ziSp%Sc=?HMySM0OnlUDqQ;reHH_vbcg2VZPsEa(K26A*he-wW1H+=I=JIJbX)d*;9WiEqvl1YE=drE&0mS`jqtYh@h7of<^|=tjDW9s>x= zB92zm2SjiU*K|*dc~l-xK0g3K2rW-{kG|Gshx#LTy1^wgNSIWy;z64>*IaW5cj4&d zhRl1a8!SbU+bKohnJxpm49-&wT6SY|26*%v58;4*;Vn*h!v24Gj)#YT>Xq!fpMTc* z_x=8^gE_~V_X{Sl#oQ;u`~9BLcrO3`9>fY^o@4yx^1IypTR%U&)bE-6Pu%#;@xOKG z_t~N@`pIh$cjws$K)dsQOT)%;__AN&?1j37lRA>5xrcVtcubpkNm24P6kN$bFK{pL zWUmapB)CBj6;OX<`-vTHyVuUEo?;I?=g=GIUm(ZoA0}@)Zr&b^zlWYF76bcsuJ^le~Fho>;|oL4fo2)d1-FD}g#%-4T)72p(yb!4E9v_w*1^{N0I z!aQK3ZtbBu?L8-wJPih9>`CU?tqZ$hz?2>$him3?<!oM1G(L$qNNM-s{hM=Ga_# zID9m}XQyYrePXQ3x$c^KRj3>APUf|Y`1Y^*N&V}$ndpP#`@Z?--FVgcqYmf_spu2LNgn2_3H&H)wZEKZxKDwv@2dp{z7YSkU(N%?R2aEcp%3VxaqouvH9XzyG zr0)?&=kr#cW2WW(<>J5bbI!5;iH&pYV1$3~_xE0G`DYGBU0CD7Bn59>)H9cM zWk)2}l+dz}YfTkTy2)b4q6fGVulZTRID03-dar`;4yGU`B&ke3g$gNCihTBzz&?gM z5At>M3e!t4HC`@;1~`p^UWIh4`u}TPcIZuNrNVPI?*K0`w^|3Ze`a!V&+B(s4-nB z5#e=1b70~FR=e2`Ndi020N>Xb4qK@n87opK?MQN+5AAK|We2>kkA*rKz0!x9GjZw_ z#gS)w)7hTFr-pUTIIzORM6(h^7uhM9t*j8)h|Gg)!#?nQXABF^$vb}n$X<+xNVLgweEoff16&Eq&0BHNNv=@bv+03p z!3E~oqqY8C9FG|Iyd_saxlB=BMxR{TbwA zf3b8vXL|FD_cfYSevqrsJ3+m4jw>qng3WjYC3kMAxG>t1*|mS>Qs^hNQ@|#Ngvp8j-MFX30jGrbZeG6gam1{j9>s4T_*+;0Q-A&ChB5Zhw{P*gr+&V7 zOfWB*zyI4y{>Fbb*qR=-;!N)ZXMsIz1rcDE9?SC%WXast-Sv>US+bGl`y%pw(VXS%XH5%$0?T3(*% z)qy_gF4hzNeRrj<{(yd0&ZTp_8FJ$@)8k_)|gZnRV6--nyy`7vWbK34jAG-0AlOi`N-4eE_*Br8;=z|yBRxM4?dr~w? zgSt#um^y#=vW$MGOy-YV;6|_4geT>4zy8Sg+NM+#) zUPbJ^w(&!pM8NsLDt(gqdGaL>FXTJRMTo@Ap1MQ^7B7d$wkYa( z^n1Xq=J^Er92yvQ#$&FJAy9&O)MI&3pv>aK{v3bv3F-fShJW?&x37vA|AP-PuQy=NdCgy}$GqyVPV)JmScI>AKNtBA-tWAxdHjj<nP_r0Ug2-2xmE@JO%5J_(ZA9d*0%{JK>^Ab14|Wuf{$Z%Q zz{8P`fKt=q{?Z10zerF9Z3xBhu-&~y$&=3BJC+OwP=f&J>$9X*C49IW)eSViS z7UoI&-$PBs~h?A~E1T~LI+RBn5Rn3%80g$uWU0GsKJo9BOx zsD^yBbC)Xj4qqHCi5Uw@Ds#RGG$9u|0&I6VXU&ZeKNh94!oN77AQ?*tOOvmtzz+ZE$w*AR~rL;~IE&m3Q2o`<=Dd7R_x|HnH2?Q4AJD*wr` zr|es!{KgvTFJ{cMmVfn@4q%En z1Um0 z6xNoDXGdA9wNqXT{hV^bheK2KGo}JU#?Q}l{4g`fU%(B0e;x(toIg6i0QA8PwKT5@ zOPcs3NA5+elN7d=gZyTZPu$AI2VolK}XSvXA)vXP~p z%%Ek4=6*}aJQx~fz+jEV2Dn+y)tOoMxXvT4Q)RfPIbSCd#u?Aj`CmK>*4=L}VeaFi z-uUjh<$vZrLHvz-!<-Av`Jta(%WcLDHrOxPTXe`kiP6~*$;W@g*IvNE@xe8yxr9H+ zV}MUD%E-%LP$E_p7$|;~C8GBN)1kX7Ajzqn6uW4xUWD1mQUYZhedt5Z(+;FAk;ek( z`jf|w=I=cE`z+_a-rR%!o^x`pw0_>wd-U)2Lj6GU!8y5O^<>LGBM3bY0N_ZFVHMgi z7#+-g3L~0eT0VdG9WDfce2OP}o#BKpZZiLk_urY#PY(Fai~o9BbN^wEZ+}>_|Gej1 z;(@t&A$)9nQIjp{HSUCJ5W)4c!D8SCHdz^op_b-<{3cWe!fAx3X^6zyw z$5W+VwUR)Ze^;#hqrax+kPJ&dEPckO5Tfn z6xE&KdCsFmYXbdC8z@s%0+z3wm=@YBvr8%VaZHFg@L-RM)4q7v?2Ct$CNsqQ`#!7R z_)Y;sPoaMwFt4}onK8fLzZ{fE66fril6Zr2Cl}}m2z)%AOIYRT`B#o1B&kXi8}$G3UUG`VAT1{Uiv@+^T&zb ze)WHF0P<{1|6ltS)T!TEO67diLo-(WckgoXMuC4>iwDovMfRvqe36pC*tKC!RyaTN z1H3Pf8rPtgA$wuv^4K2%6Yd};5gSS|#R2ATlrAGDRrKD~ZoaXUqXGy|HUS4r@ao<` zZi9GlQowtm-n|l*qi*JTKQLQ3m$Th(yqxok|E$YD9L#*Je|zU=eSFt5;?Ph1`|1Tt z?zevs@{c_@ene3B6h(5d@<5h>cy{?7hgjX(Jfs5vQCMRFiDwPN24hSBzbtV1f z-RFzt(bMc78WJ;GAn)|*1Y3+6&GBll&gQo{TA_=CcYEVKQ?9v;8W`a#Jn9}gT6N+}rR)T7 zsTs-3;{fGS&1x40`qoMg!&fG$p4Mk^PfMp#5C`@ z?|Ji6>&`X*Uk!&mHhl3RWbU~xRHoq|PC@BU-Ab+TWDkiN?*4C1gzNd%=wHmqU(Eg1=`wO?&VR=a*J;w=>H*i;sJjKwgZ5gTzCj>Ft5wZ- z;HLDxG8Av&<3RKZIXQjv3haCIg&-@@&&WUY2eQ}qTnAs}XHEU&rFf1%`R9LMFYTvS z5YPQL%m3$V-|xru{>&=SJNQ0#e9rG0<%yMLppP|EdNioblyLaJXk}${6Zy6PAGPCr zBjnn}!$8Pd!kyj8C0$^Un>r63W4jnS70?p7Xs&BV*Fw=jTab#R1)%P4{qQuhxv_dKlot0!_O+(!`A9RySo(1nTI^fc##TJ6r~Ve%f*2lsZ%;aMUD%#L%C#t;GNEU*~L9u24J;l*jE2U;OW75^^xrRe39 zCx!Oc9UPE)V(gJ}t5`YOZ+ydS920|o<|{Z)e=)DW=bjXBf9HQ(@xAV)#i?&(<@QI> zM1|D_n@k|c^YZpZ&l{t3vDe*QfVkRG$qKDGVD8x(MxWk@Cx#2Dq-ega31zjj-PY6& zU;LIip9e7SLuO9S>oWVrZ_RvwpLdA);y35}XPx4j(60kV7j^V^uKS(yB7ZxW4Dt}Yuw)KOiR1?mW2J{b zhMPRm8$UyrJ<&W=U&njpl=wL$p{TP0d%R1Q*VaDII!T|RwkIp#OfVDdu&0@a$h>ZP z&iv+Y1hI0E&9}^q`~HW|ggSZVl>EejpLYN*NjBl1pLp#fzaxNU1(s0)mjn)yE56qD z&N?x*oV0%r1IMQ;5N!pNh6NcMC#mt?lBVAwFIIH$nE?j-%W%CBO9wp(P^B8~& z;u6HYZ~pSH>zZp5TsP#x=l;hJ{t%Pt?>REhEC0^ohC9VuvjQZbg#O*E;|?cq%$mGm z=pI~|Y~=xbL6UM{>G0Gea)?VN)Tl}Vws4^WAPuYsxtDuhb|)8eS-5zgJ=FP65D>aXuf_qk0eluY8Qc#(YtC!tclcY!{LDZ-=>N|Bs=xmKUrhbEr#~!H{F7^B z@r>>I%Z2}D(U@8B#2VZM2s;YHK=?GY_c$qV`{ZbFrfSmU66chnJKZeDhtZ#NJXWYS zxtx?7@Stvt0x=v_b8CZ(^-3Mq5!CTs=G=diC?5_YlMS)qqfx%N*xx#Ej~Nf+>Z}9r z!}sn^>orV@vbFAP4c%o9=bcK%F7H%-_?5rD z`S1K=eg@w&@;A=Uu^rD2`xZ6T7h{6CFmR^Gw|eE)QC()-DUMGKY>;;4h^?lp`8pkTce$Fwfd0WHF_CMEL6V^N&4S;+zzKT|+i|m~fW-@c6!b+%I0{C$Iiu34ZqtX0ZQyzu%e% zGq8W1U3d?iiPn+n$5DRvbMWK-ANGIVt!Y_p+Zy{Xxl5T8=hK&)C?^pGK{?)}IEpAJ z2r8uV-_v-Wch0r;-fQoCsZ?^6S+&-y4?`G?V;{Y@-r5>#)ohIR=kf|?@~YMvVt@zC zYgpFoSAk=kIBub@JzXNLxsduPf|BxZch8yO_^(pgv?1^n3~2jPyI@VrVRL`~f}H5J zq3f=9--8Tmx14hq{M~fh6wWqU^0~m~j(HZr!2fH%GJf{i#gFm_v-^*;`LF%^>w`pl z{rK0HGHWeqRRE5g@d#1EO(-9jg*#SNeSmS{F;E`B#*(i?ecfq#{xRz=Nh|CLH#J5a zr$KBK15IU(%crmr<8a73l$w8?ms7clb{azKH!qP|#}5pqy|%1q*v}UH%;IPH%m4FN z?#Y66p`RC#Q{@O)G=S_8t60B0+druh^ysZ^#g*{LDu3ei+&X$73}I3W&hqd4Hsm`V zFMq{l{^XP|xR9H8#7TFnT!0t)t{WNl$`&@l&e2U^AYvJqv?V7G__}{Dc(#jha^O*g zYslHG7dvsi-Z5+WC$v+|DAZQB#c!U{D3!19(>5vq2*6NCysgSaBK~#6r57h=S`2FYQAZN92 z*>8h)V6pukfYbTSFD`$6d-?uQQvAJT2w`pXuM2 zv4y=L#wG+}z1{gyT{k$;z;?)7=dp3_l|mW10_V6sL6P{R$~Eou`q%M;6Y@+}-?vsF z8B|xVw&y@(_R^%Mj}Izy6NyZKQvazRGJTNGgM6wrGGG3#)3Se0{Hf0}_pp}!#_P85 zF$CIsG}P_Q+k~WV>&DRoyQq71a?FvvUmpnMF#q^JM4il;k8KprkM9FI2$f@KbT`+Cx5ZHOP~HH{>q;@w9K>O{wZcUfxA%+V8lAZfcROnMBAQ>okEB|;e=!Wdaln6vh5T2zl8j!TkW2UlE79p*Xxe`F z0q=qCSWkb&fgJT6*cRm6VSY77#}QoQo^NaEL$$R-L@8^$QPJmBS`^x7U3|!OW)V@-< zO}NF&-(J34-3sk$=*gKSu^s^(Yyd;UT08yoGC!|PvNq1g`8Hb9?W)ZpEq@JeVPBTv znY4eXm3h94gQJLpy2(|GbRgmWC-7fz%4T4I6X@NMXUR;2@GN zL!xBiE5P?X^4m~;`ED)W)eF8W$l(9$`ci)$`O#qdVb(`+ZCyacvWc*8VSgh-SN$vuKQ9HpKd(Uo=3Nc}h+q|Oahc)%AoM-M{!&oC%NisP#(EF79i(@|I}Lx| z1*)>Mm*)i^M9yyP+P@&Qp=I(Jag`-520j!R*&ZZX@QWrxP)9?|&Dfle#IP`e7>?%R zAxZ;p%5v^pe}E{^A@_Ae)$h*Q|7@4!P#H(l$2z)U|Jq^v!>(SE(>b&@MEbDF$>=X} zoq=L)jsC%Z=r3x0WHBEQpHtTx%p-rqqz>!d(!S~e;Y&PSLhYP=NDx2#0qga+#4k3< z#wwR{Jp#EH49nmD{ePI3;2gQGm%+DOe}LcQhd1eikgB-^vVQ#m&X4Eip9a|SNqM8y zUcx`!*VAJyUWT!()UtE95c37`c8t%XfeOgS@Ul$xExrDsELxmjKh#aXw_Sg>n57bb z=D`n1#S*;-wbRdEa6VnjSQi#tm?EyxPX)9D*T|@i*-u!@OF#X1+7O?>s5pMW+^3Oi zpzgo~QrGL^01znwW1Ijkf*_bTkrUEP(C-z+DzM$Z6!;x}>hoXxwPss9MsR?v$N%+R{RU5RC0vYyMYt09JSL1ZI z*r`gS8UvCpUN5jd#8(;=%AXoOkZPZ zYSefyC(!-)RYjmZ!0%9(&BAmrjUUakB4bi(BV7j|YW};dK5cFWk|_{@R(JOfxJ!^n zlYR|ECHCP9o(U;lf0qKMirnzg4Dejw9wP%}wt@S)fZ)dokOgaN_t@C0fGb;CM1o&y zzkYZcevdWz=YA)gGJk)5xBmS64w+zqytH#?Mz|*50qES|@AC3@{UbN))IenY=N?LY z(dkVb__y@6Htw(WSAk+Z*4ZoiVf^_m?t1_?;r<0i%jNgm+fa=dcPMvoXd8a2%{B0m z#pI*!PD30ZUv!Q7#6Ie+pFY~smUoDHu01&UJ&Y0O9t95m&B1@Ta{X*3(66j7$Xy1< z%5sljjk3|}Pn#+x+VSP&kR8Ii$V=H+3pOL>9;LVeGT`tp5<@Te{Mnoy72eHoCfUUM zqdp@nHV^ORG@f6uU?-JdDm`#_Z`O9c8`VJnZ_Tk0f*km z_?xl(&4Au6p0R&6z)68|bB&j*I066LueT<~g?w7_y_fgo(3YUQ){SLeH~|>EBJB=_ z2d$3O)1UWzyj~Z?4^&G7UZQ#}F%9*1e905W*MFD`pBLtqhUdcoMb@pMIWYQ(#tFP47Qs(XMo-JPt;{kwk-@Zf3(pu zehpevUHmSR>5p<<=0BKo`U6NFz~1Jg9`36){99+7aDAG->&s39Y6ieP8s0R#U$DO5 zxkraYUoG{27~a2VoqYL$V>al(8ax6uU3Wn z4`MWSCdjJwTc_+Ai` zhOL1#7Uzc4g`ai>(qy@Rs(LaQH?9*fuRnbfE$!t)I1VB?=wH|C@tkPCS7qcM;5xvX z&f$OOx_+I$o6XPl6xR`VUao&|J^H!6J>mNI=X!zb#_x3z*Ka@9%?WG&-~Ep<+HCe9 zm#7UHOiezC{X<*k?Q1U|P~Xj8^D=-Y0dfHjmp#5`?y|<9-ZIAg1dp!1`)Sh`vQU@t zf_x1y2Wpc(saPLqAW=V{_z`=&qwM+^PaA(k=a47zTC+=gSeR2A%*zY<5pCl|I`&Dh z?Zd=>US4oGn}WPIzc1ipa4))YS!OZ#4auA69I`Sf#r+{xhZ@u)ZeZ_jaNia{B-#E*jc>n#44yP`3EY^R6VuFr; ze+BEMGC|f9zghO6fAo=u z#(l;P ztSvwLA#g(L@lSu&$l*g7@}J7U4qDc3d3|xO*ejO0VSZsutuT)S!Bn=V1(CUm6_*$k&@X0vqb@*t-Ac^L%2l z9Aes~w;>c8NZ4>Tk7G}wy@G}3^+ zf~A4=b)%r2A(r~@%dXQO+w$L*RZ&(j*~jueKlA0MZT_#`XBle~55ynVc-8xD!{7PW z{MGO1rvq%!KjQ>;J&8Qn32TbX){r>;>-jqN3UjeX{^=h7&*pzH)}8-+t}OH81vc*I zVu!1+J8qaa8kiw}Y)H)E3U(>JZ+`5@e?9N9-l0u@_7{v-%-IU|sSY`p<=)G_Qw8bY z?*}^_oS)a%_<|Qh`)RMsVv`8KD*TK^oFx{Uko~=mxqCZ|FJ;*$H@lzta@t@HEq3c- zXRGiVF4vrc6IznoRn#fAgf=JMJ5`CQ>Mp84|`whuo4{=NP!e_1}?e#`%ShW)dY z|M}efmjC(O{?r4`sihu?yu6pXhTh5W(qRXE7CO|QG12|Jm-DCG@B43l%VDiu-XA&q zC^!7RzxIDqF8zJ~Kjjv?a}9pCf2%GEsXpN4*T~zOlkNZ|8X5|zRLo>`0G1hBF+M)`s+LV$2c_o<2xcrzZ^v^`9*VYb zayuKa${XwS1Mnn>7Tn9`z1CSgALW*MU6%TIi~o08>Wk;1<(9mH|Bm|tuLlbN_xmjM z`M+Bpn&N2B%Vu8dXs^%j`yG(Qv$O*&h|ACK)|+OzFMdxh^-b1GdoT6>Z9jj67;tWe zpC8_PAo*dm|3B;J-~K}o*ZHki{HI>tXK8nQU;05mEc_Gzqpsl$`u)CN#slsTkSep@ z0>bt|eb7I6f7JW3q_hBy0g~+H{ZX&@1Ab57EB@afAOEUeh+8f3DYPSg_wUQ&`k!&3 zKvsEqPv{Kf%Q#1WBul*z^7?;mpP&Atmw&-~fOd<1`w`_(FMGLf>fpXhJALpxEE@P- zw8T#^4k1Mi9{La3XS>`7&&6}l9tg^yy_WmqcmEi#zw-_EZ+^zba=)b=m-j=zWXpKL zxWey#<{{?aPyK%Ch5O?>qM1L>`59Mu|DSrHUqKSF1W7i>C2sQ5zyE*!xw!uC@B1@O zv}h?0D&E6#{ii(U(a(7N8OL}Z%!kK+RX;oz?-4EifN>4o9BctU^J950^cTwijGv!! zzx6_VM*Z`mgl2ie(H&F%P#GR_RkPc!+WFO zu+=~Q%r}fPh=VS8IFx_K`i63-|IhuGasFq%F7xfLaf0Wam-&u%`sZ*Lfe`hdcTbjMV*+U-u=sEDfP1><1Gq18DsArCVO~!^*FA4OD2KnV zTkF5CQ~$Vr`TKu5-s4{=#SHye4AZmtAO6Sx<9}ULV*ckc$%_B|&!^^kpZ=Q(LB;TY z)B5B#jQ#)ebLBNm_u{{`%PsD|fB6((aUK2}OOpS~tIz)1zZITY)M)6xq-Szc(tju# zexrYgHvb5_0RMy`BELah{a;%inB<6GUqtZlEyz5R`zn7YDAfWC4dx;A=EcJVKw{$+ zOaM4f9xkUWCDe`YGRFw{;)kk@%x@`{+HgG?$+i{8O&5Rf=>%PH#tyx^F+HE_R)xq< zLO)LCkl~S)N_CRg=)xiw|0+OXE&rq^8?SgdD*zHkl_XbMw~)W+qQ(fk!s!8O6Hv;H_mNu z1at$rq~gxgOtlIOs|<y&4kc~y=pLjN*xyf$7UDN_AsW{z*7-`MmqabDAY;8QZ2$NM)(=T%DIO^#jN z%+-I8c-#K|%ng-94Ao{{ydX}m84Mimc@WN-&(W=)3NGVJey9!U8XEa;X1e+Go7<88 z=4l~ae(ife!C}{Yd}#+E&GiF{AIY{@RFl+5&9rv{ zmq;QD>8njX$MV|TFcRD!fi<$FcwxYhxLJPUhtS^}Jf+psbzjJ9xeJ z3FnE%W=LKj3U>0>Ed&$X8R+O~Zm+bb>{g^KY*u^CKCI~jcm|cx{DeKCIkB{PKHsiW zHtO5De~;cNIG$b2V9i?lY~Mn_J|lm64@}e%r!4i|?TilUolsSMnT_)fCu%anj{HI% z){#0o%kHZOb0TgX!E+EjYcBVx`p~;Cv69cXjyffVL%91P&}1qx9xS~j?#gPQ5^y7M zZR)5^!(2+?ROH|QS$!Zdk%(YD7v=;ae2F>A&6{aXOy4|rZDQC{m|vm0cu{}%s&YLa z4ceT*?Q5E{YMf_V{ysV_z9hV!1Qb+Y463+wk5HBXp(LxS8q6!?p;vLxe?fwtMq=*o z3!B|sf&cMJc>pxdh;1|h493SSeIlI zIked$*fdU5kiO1*$~iY+Vh?}yzM95SI=Lsmh)=s955E_7J{8K&LNrRt*;mDA`bpxS z5MRso)N#6+cLs{XZmJGnDQ+9+^Qp}>IS?{`Wu0^Mofzv0Y20@(KKrl#401s5+dd46 z?WuKoV9FmFStzn4jboywRW9yy!SU?EY{QT;J>lj;Y3|`~U7{J}A<%zElRth~BDnIa zX+A)?*W_zsJ2c8HW-Qr--d54o4Y%UsVI$0|3nvbAFj)eJsMP-EzumB@=Kf&75C zN!ab%+y=KI9m&t#-CXtI@V23c&47il_e(vNd{%s{3mF(mB;XW4b#OKUuZ2F(4+y|j z)jo=wb=+;LX;>k(KmdP{j-u$Kp~#O<>~BfV7G)yfCRbIUL%R3i95hw$z`0f3J)_6w z8F(AI?+lfq4i2A;n#p|@?I8KVm2w?N&vi-;dYC6!-86@fvZc8* z-uZR83l{-sJuE`4LS5zceS1e@?A4T9t(yyx4YT_TEk--oM)g5cul4+$YS`@N*FKyp z8EG2{X=^UAetmx(NI^L*dB1(fBOwWqu}W_>6+Y9fj2}YeES7BA&=KWXFazcTK8P zd4;43i6Z~)?+GP#tJCZcwjDwT-}uHJC8Y_$YELonwfUAS-|IY4!n`KQ_Fe ze_IK6S~h<>&5eoZL_fOc)r?a^1Icp%y5_v3yRMsw;I{R%wg7^XtHV(=ZKDz2uHFd2 zX03M*!F7a?`P%-dEE4+`mA=y7<_-tr86>&{)+IIHdp z$@wlX*FdxKuc-tJp{3eP^Y*T3VRS-WXvbZ_LXJaiKTxH2No2Qgqb$C>*N298PQ*oHkp$Nz8he;~~3`Ti^=s zlX8EZ%SW{FyN{c8AWlvIlmx%e@Yw8AM+fd)&M(2a*zP));ClIGzoK(%SgkCT5A(j) zUf)SJ{7P4;3OU71WIvK;rSr*023Y~~F|hU+5ma^EhjvhTW!fH)MBW;clLU+GKX8dzX3?L*E1_hq`}; z8Ci|t6u!mUQp^(;#|035tuxZzt!ARXN)11WEwk4911E*CxKiBj)8n5|X||Zwz4v>P zVS??AT~To&u~1wl-a~1R-To~^^^^!(k&~c(BfPck^Zf`Vdnfv|B)+}Z_iJV2z9**H zm-FC41@dvmk&aV}Q-6{U`|YNcA18ls1uha2t8ICjQJs{2KHGA0Pz!Mu3NtFzc%Y>F zX5XAu!>96Z3hB?1FOrtB@^duY6i@_JHf(R2y8|}|$>82$rWauLQaXzXt64W4XDs#< zZ`tc`CeFhCOVz}5KZ)NPSKXkqtn77H&#Z-2H=rLYq%4-1H-o)3zOXX%o2R>zt0^IdYfl`39)agTVTd?$czs~EDIpA)41WpJ>ZOr=SQ z&|n`q4iabV<>TGKB7BX@eh7c=^ssu!o4i7O_+gySr!#UCyInKapUWePRA`F^@y)*X8@ZwWFry zOK!9X>3y%nM^}$@QZ@FDU%k@R3d5}zkB|65=k=Mn_h#|dZcO&LQ{OeoZI5FXQB2B{ zZ=U>6YtDw@dX&2BR_BauP}QXKE2r~MpM;pxdlnHKeTHv z4LmuX_wVU6hR2VX661gSx_e|z{_ebdL$%+~n3{@PZN+s?GCrT1dzgqI&4rr2;R`w? z?e8xU9Fd?Ug0Nn85YBv8$R58xrJ=#(TgW|MeXA&^{kb~3$3k+r=jmJ2SFt-CUOi%= zUBC>pZ>Zd-+8g<+i&Jmb6LR?Oc8vY~4j~~1Br|xlt(szb-}!&cY0Sc|iMB_f&pkK& z6!$|dYr!_5x(&PA>LYx^diX6jT=2}s*Ma3Yx``pd#0~Fyw<*j`qe0Dws5r}D>Cb&d zsEs^B^1REN`g-MDC{oW0kKUzsI=S*zJh#`T-RctBb3oQ5dm_huUF|#TGb;uL07*c$ zznY}(JF({9^e}xOG+y(6D~IsK{yHUgT^^cfm9M^^T)y*!J*k9D`^-h>e0|JJ3xX zn$6Rke7Sg*PFUZMBFBoev?bNy=*Kps@o`G-RvwrE>rfJeTFitU{*+91x)QcA$wJ&` zJEYr=Jbpag7OgLF3@C%1lRCdq^yzx=D4j2Jzj)qn5?O@qg$rlP%nYr12=Sf6SEEt5$kZ5~}iKibDOzu>%nrI}aj zTS=F9V|So`l@mdMD7w894o}5&`5Pw2_px>#p2SvUtV;H_|76tTlEhJ}+mmpCVardyI)E-H6RS zw!&2`e*}d$gW~||v-OoZG&C1#q-{WTJ2^|YlO1n`^K5O4{Td-}TH7kI{wm5wb9Kes z8mn(UAkCB5St4N_?o(O2Zk&MSauvN#u(E=ktd$1{RX=F*VSe`N)e+b!6RxeeI=^RX zl@11f&QEg^7{#2YK`$j0^_m%G`%e32O=RAi-ERlc;hAt>P#>%Nq{gvRGj9;zw)$Xm zwHY@2+bio~KS}R|Ar4YB#Fkqrtyt>slr1;rJ*Nd^*qyMm*z8V(HiUWp=%b(vp<4>0 zn6zD-FBx-t%L1X@QPelYH9ZExB&NH4X2*$tZ8IewHI&GGc5y$nkB3kBbHM|7a*Ypf zcUIvUGPI%ShhyvIHl)u?P%$0>5DcBK?a=iO&zYIN@uTS(DcDL52LWjox#^{6$Gc-3Ej zE3p`$r2P9ISZJiLDsM}AFT04_rl6X>o1LwHklF`|Rz#`LW?dm;GV~3oveJz|ia*jRq*p-PFe?Q#5xn&tKu-RMK{4+`cBEkN?u5JoM?O9T z#q0fjOCn*?^rmcW_OeHw&xVT1Z}#M*Lz%tgy?Zx`Ytj;NbnyXU*bwDtyrHw(KaCio zDjXG%JARY&g?qigw#C{fW=AKRAkF#8igpw9amSE~D?gNCbMHqA8kc*;=9_#C zdR1+uUDGHeWL5iR|ja6)kcRD zLNr(Qm{shfH1iJ-vefCTg$3t-m2!Nx5F(aNMdQxAeKpckQk*gbLd-A7PL9I$-SI1M zI;flRP3eZKI&ObHf@tM`*^pjAv)Vo3I5?>NBm2Rxn@A#70G-=+&OXK!Cj%=Q=nTPJ zx9~6?ff@OlcFC^Hln2)EHTO@iyu`s28JpaEW)uU%&y?meJikAa^fjzG;n;~{#t;m) zm93mqPE?vF=q!Uv%9(FsQtbHN+Gb6BDNR_#+ytV6he{f{-1)s-(@58;ix zZr{j-*Y~vBO=0oo^!Xs&{hP_z6D_??%+Yz>L%lwX{IUP+k|w&R)`%0ev4u}{&`sWD zOyT}GXf<~WR`y#;3a+Xd5G3(=J{cc>H_%A04Q)e9XzTds`d4?c=Nk~R_lbt2A*zx>lYP@T> z8Ea+RkIp=7PNQjN*@J}40q2EYTY{4{c8Dl@AG+|qOQHBh-VYhCkNMRfUTow(OZL{& zg&u@d83|)6BT`fg>`tRHHT&>WvZX^N{m#T`KTg@l>&Ld0#4C>T(TSsdlA%`qn`w8{ zFbRSQk>F#0#!?agc;6`L!a}~>xJibo8x=h}an{vW}~)9-O!up{q35Lw0yN zNi9V$=Gd_Q%wXn(N2|>`T1iZ=rKdGD&e{cux`UpbuT=#n8S^B!RF5VrR%ZlcgfydG zX%t_7R^s{{T3vi!3o9-i-pLK1Jv6sSiwm)iS4 zr+32RwobRb-&KgaO>!umyj=UD>a@rEL=geuoTX8yd+&CC&SGf(#5uj?&QiP%7-k(xpyye6w6LA! z$C|qxdUi_q)9Z>5%!QW9<~514BPz!xAGTX_JQUh%P?N;$Ud*{m$zvayx3BM8R8kDo zRp%ZO0$HV@tP(4|@=qvx-M9R~O9gKCd9fv7(YehCRemK@Zai*rM*d8zXTN%$T|s?+ zg|*ICIQ7o@xg4x693C8_O`)&op|mHj@Zb@>kHqx;H6_Y+WPRqaZ?;908eMsc_6Jd* zora0);C{Qpggb$io*udiS@rMzaNXHw`d!!(0m>y(n*IKno#|CUW}ae_`-eZDsZ|lu z^Ao^!nc+FG-wYrE9?WC<{8SB5Ug!3I?R2qA+hy?p^NV%f7mks}gOds)4JV319IrvQ z=J|ts+!KECY|49c=N{j0DhKU%A9LB`RCDK}s$Pn~_K)1I+hY5^K6%TKhHXB~q+%Vk ziZYVv%I5P~q*tOEs`KN()O99efslsuKyUO_W3tXpeslJRjSF2VC8|$PLsxfyQgikTGwQefKnbE%UEQ5corq+zk$C z9;|l+m3o$ZM~RsvLTInoy#|AS*LTA~O#2DrB<$JyiV@1|QH%%WIB5Q9&y63GoKEfL ziIK;GoQc_boZXFZhI_{JiO%|t9;oU*nQS+2#ls9ui_B=68KlI|8)Vv!D?jUmg4-(J zbfyXUd7(Ub__wB^cP2RKjyAoC?C%PE*39$L@C>IUAOK2c!xc5@;T^(%tKV-zaaCW+ zDx&PHGgG;p{Vn9N_ogr%?Ofusbu-a1_T`Y}=uOTbPIfvHy%fM1!+0=}+Ke%7rFdMu zR>7gttHBAH>R8wP_CA7-BxrA?zzG}fCAU+DID~IEuYn2T-poAWBE&c%u-6xJCAlX< z26-QX3u9AGI&$LU$WqULUnkBW6I1#-FdR)nt{KnmrwX6uLwNHeToKa!5z-&44|>5bzOg^&`lHFttI+$tULx4uin}o2M8@|X6EaD@ zy3?=clo(Kl(dY=l(sC$H<`^8Z$2(=NIAaI<7qeB_8~Ij~V5O}CzaTIDZHU5GO64zZ z3NLi3*q#v`#4LOio?briIcF z?W-NeFPoBV7)sK0eRxieQ0{5U&|o1a5_Bv2S5Yd%5#>%}iPv2}* zqyu*l#r-qC4y@}1ef!o(|9K0T0>t=xLDT!s-79WQKk-G{ld7mqb$xk@)l3E%k;**3 zs+0W94y!yatxloG8CjRis_P#YP(?!J;^tF3ecV@n?R4A}&rv@;JH_-~b>gr3XxFJs zoNmr{q zZ^eqvkUd#`kG#=qzdvi0aDWZQi($&#cf*55%$O2wI8+a2 zpWcp)1rwYxoHYErMBB~)a*ub1E_={os@8jd!sckl@y90^jMAKj?ySmFT28oJ^%a|u zA1#IOpaw~H@xux)EA?Sbyz@14$%_(qWxu}XoIYkyVB#KNwaW*iem*$&y;^mfZkNB4 zSjj)nEQ%Si)25^P*a0!IO`KJ2e8gk3|4JLZB&e*W=(kp%Us|YPogfUVg!Ub#3*ZcY ziroYm-8i<~KgVL#Uix+wI=(wnclxfK3f}l84aGhmFN#p#+L{pgT6tVMch;mh;=<>P zr(Og!SOX|bv0zTZS&5aaK4z`0$BEwU3j8c(*bXu8Qv7Jjgo95Q7yR++2*+d{Oi2CAWC=8Fc zbvB6&UFk}FC`O@kC3*7mA>KVu}CQ5b;?$x^eBqRYznu3-11@Pruj3E zU!@nZmmB{z_$b*3mQq#+P1>~8y>g|yVbFK(*?t_QBX2QcUU0gtb}Z8qVcEog&*!kU zLq+7o>n$)%V{A`dnphxb3P<^9N`;G_C&dpdZF@t~JzE3S#*JwCW0hU`{%I$dPIUWi z+}5Goeb3BAgp*(=amm<^{Y}reZp8_Ap_S^(_cDKY3yE8T!|zr}^DSXY4IwY_gq_GWdcIs5_>bZrDl=Jpx;9LRt%;8)k)!JT|Q&b_@Fnpei!(f2-S2r~?^ zEyxBlTbIy?SxL~N?SA(n;>jzL=`^VPu)TCa)4A?}=(CovC8Gx&w0fU^5D|DB$w4yG zFL$ucslM81?^|$uVEw4TZ!f?jS}2hv1mgutscEbi5UZh^4C!Yuhbrldqe5<7jq=6p z?{%>zrd{B^8}jCtK4&&^BKh38E32FLh5^e%-@E( z;XI&7)x@xG39DvNedNp6u^~a=Z#1rSva`K`48Zd%rq}AymTsA~QY>fh^|$P3>9ot| zdS=@$+ohK$sY*O|ei0=k`KyMelCu~Tttiz_R%S^|`{B#aD-%p>bugY^-W_bFLrJgn ztAK-s@d`UZ{d59U;}MHu z%WIf@T-03h?o~ey@7)B3LHSzP>WwS??RJ7R@5|S!Qw)A^_9T6uL_?g`K0CLNnC|xC zd(}K&lE%_g2NMchun~C`!^XW3t-fQ!vqmCxJ88UCrsaEof84N{aJyUjZgnz!>2iC9 z$5l4$SISk9u++?*9adB8H_erz_%@Y?UAYw_hD!YNe}rXmE1?dmXkk@0$j ztv-eTJFt6y%Q07r0Lpxu(+1gnS#iC++N1R_PmxNe>=&`3^QqY!;W$kM<+WNTG<0FU zf_TtCAeillZcI4&DKS|otp{M5wv88}uL^cN*Dlq)--t!;Eo$TfKjpqL)yeNe=tl9l z-wIs`PMUa_z@%%8x!$#+S0;D%p*?I@m?f!VpEiGg+3kC+-Zw9DunziGp!3k2_iVNr zfliY7bu^RP!&ej*i?6u7zQ=boyuXZrtkvKfA82F8Ip=ckefNyd2Byaol59Xcsvv@o zBn(fPv8j8w57gG%A^ATgc7nzB`I%=-Ti*>0RHVz@TYun}dFk6>Df+iP6b82uuL>J8 zbH-|aO1g1B*GM<*@2^DVU+u9c+|b~OH%tN+*dLo_q#h--8n)&ZlzJ?0geP_GgvcQWnJ~q zB|e_9d^Ft428lo!b%pt;$m(V>jM7{2>*79tc_27LeY|(OE-^nO%saP0<~X~P$}&t3 zCl#l|`2#nk`|Z1fAOx#jz81VKRT(+$7l6>~*SjEeolaz8JzTZ#Y84$;VHt>-GlbOp zkmt9ed-W^dwz&jO5Q~rO#6RBZ)A2ohPQ{n>4NO4sdmqI3q_`3&;gFZWWrThFrgd+B zd1hKkmEP`Pk3($y_2d+1Jl~8l6sAPWJGRq~Z#UprVg0@CAM_3dqDA0M)=2-hnB9{#J1Q zf%f%1ns=6u$*{FwNNceTCXJ*g`!(*y``owWsv7R3$#IrdAaC|?`p!H%b zP}%dl=n7x8OH1168MDnx&ywW*9j-@$++{{No$JqPG~*l3#0Y4z7Ni2gqvYy)2{Zqd zd?s<-^6o0<#7)i2*`*osYy5TCb=e7#&t>1CSV>h#?wp{lTSx5ory&koVx{b} zB{w<0d0)W(n!X>~oqC4v=MWOuIoaZI5tu%xiB&HIJD}0T=#(nJ?2JIGveUl%JVX;N zD)W6Kd!=laAUAH}=^q>q5BJg)r-VI1Rm5`@z` zeU*IOy29hk`7#dQP@Oq4g9a#aw#DG~6#m@97;uQQ{_V8WhubnMslG!_X5hHKwiCg% zR#(Kc9y)`)HaQ;TYr3k03bNRhk1$=BX!O>mGwIoJ^g+m_kEA?*uv#GEk(qG%?6y*8 zPT^O3P;V+VN_;S@iKWCAO6D)B?0|EX)_A_{$*mcxM)H0`NSf35v90Rk-fukc51R;_?7p$*>KyKjTB*uq-kfq zdpEniuHgt<>7JXQ2YB7cz;bn;IyRvm0Xa_FoD~K8mBGs@m6oUPlR8U4FBf@HQrVqL zMM-pazHrszJ5OD{_S-x&vZ?o~wVQU0v7gS|`bz8$pE2Tp-X_iFMRFhXv<0}o&xhRK z4mk-*gy-?i`T{D(@s#ZH*MQql(S?^{X2r)iJr@Eg=khO{l z|MrbptgVhYD-JR+9x;Lr4e0KhO{!D!PLjn?;bo z_Ao=Yl~1C0``l_cW6hR7KV4#kstk2|_2jrQx5SN@^S02p*+-}iEUb4sI!4EA#~Q<$ z9S^~0SR2{R)V6$G0?0+xG#!iX7)Z}-n?nYUZAp@UJB2<`MN+>m9y!BV$PKT&En{0? z*w~Qg4NG!;aaL0$8=un$A?LfeT_b{cF?O1{xge>I)SQ+%?>4Y~jpx;o!D#M+Q}$N0 z>;*Z2D(j2V^%hs~->-mUhsp|a&{#LS;Hh0Zoh$O-_9h94&Iuu&;cgWGwB62Rr8BxO zE0F_#w0hxPn&NJ#xyN3m^ObPnK>!%)7NtJ81kca{i@Y_@S?R@%pml#O*hw#Kp{bfj z{(Y)!u~4NxC&urT%Clr8eNL@a6>&NwE`7Xq%4fgj2YUzf_{xdgj01Y{qRFR+VH?Kz zIyoR~P$z_tE{^Mfd-`%&*U8p#xySRm<-MBOgM$zsWtF+ znqgn`9e!0*Azr@;?)kY&FQX2*qCz`Y?pf-Qe`h2c?Ks^w633! z-xi%qe)ukuOKru6vIwa1huFn9#k0)^X}=(5N_mT=m3a_YX0nO&f{Z{M6uYEPcrHqR z=$LQbw}L;>BSW5!seiU^@(k<&?Z#6bT)aU_S7^aF3OHYqSYD~Ar_dO_(jz5_gt)vv z+lE@5v>IuKiPKW`tfw2OHA}@C9Ms;B3v8o1{r%x7-(X7E9N6`P4R;d%*sMIcgEKii z4sO!t8`(;S;e7=R+?J}{#aX>c$kSGTLS`d)OZDa<%lV+D%02^*=WBjo8e7LxF~`I@ z?CJh;QNFzm)D8cAfdvLK4J)>CV(b%GTFPQx*VciLI}P!@_2e_*mis=qjz1ICcFwJp zu@yijV1gDN0&b4X1`z>>@(JPW_5K%WhIIuS*x!{yTTN}4_Z5!MH08tF2d3QF46#(4FFVz8N+??`jv$5#7 zzp!S@@~`>&B%hb9%|4~#F+cCCtu=x=|8ya}&vUqm0SH>S&^kNVT9$-cbI z&RGWW=jEywPPSHQrrYCuZC8rj;rJU6!%eg}E#1hJt@N|WB0^3>Yg!3X_<%&&ZsV_&@!} zq#OQ6RxD%y#AW4t{&0dL?HGT%xH)%d0t@k&CD-=; zlW-j_nb_Ou@zz#s#Kh!6z;QqxBmVoLy*{pK5WQ}iOwWgZ<$gab!aXwmK`S0z1)-qx zMHpeJ-%pKxbGm0SG`_i!@R5!%=it-a?Um zn=_n-LK4+~a*yQaem?{w;ABx8{*gA?fb{wG;fKRpU+$;gbP#yj&rga-HNH>uQhuK; zm%7)5e)15ejF>zv@-yhATFYRDvknVIyX(_5 zkzSn^s}T;V=PgdYLPa6gb!V6_BSE!4mr4Z5eY#zLX`<}shgpQbSU+un8v$E+QLHpb zH9D%mMPUf4HJz}bb^9}a&@yZ~g;v|AJB&Tq6mAD)f1OWPAW_R| z>oFI9w{Z9T0RZ(kZCg+v`Y7Xcf>nHZmq@%bSrL?Cx60g}zDaSMdc6Kk+i&ad2+7N& zPQvmX&aXjxS{Hu^ad483t$pNm0w8eNy9N4f)2hV&Soc9v(yZg(5XcR*jix^jcNa=R zm||lGu!1m>yGwOePknQ9^_yte~emV`P(i2{Cka`U*P z`n#-dK|(Nk`S8E5-eE^SeiLfv?_N9m_Q0|!0Xkm9yLxl`UIT--$ER16#hoZ`WeSUb zySZb}dsZ92+(M1`aM=}Z!EZ)QHAJVg*Bg4?7EqLON*wq<(xJp{GUK+K(^Eh&R>DgP@hjzAJivZ(TSVX@n---IdMu@Q_C$>cBzi#}jp>zj2?a z<rs^byz^&*WFzen#puZE?1p?4GKBUT$+* z5ez7UR>tAXfdd|PLF=wIZ*sD`rglf|vj_8$!=xujS1Nhsi5tCVr5bl>`U*zdUfbL6ccgL9mu{kFU# ze^Ff=&u{ibAQ4twPphpI7F$+->mN@^AWqW`r?v1<&}NefrLp8UT#eh?*sU))7Y6rX zcOaj>mjDnFz2u%c@RyY=@W;%F#*7DXg|bR*gIr#u4gCY063JXU0stwpuGP+#>O z8o6#|u6pR8sqV73`?=tM{rkwTTR{(m8dbcSIaF z1o$B4eobueGg3FeX?AcMT1)ioRIU>U%N_b@4c!Px<%Isl>jB#iZi>M<$P&ASgdV>* zv6w5-X_|Y!n$?Jg8>h_>AT#g5=>l%;gC@o+$K(#%@VjMXyKFXp)2@2xDqYX91HMyl zZ&PsbK<>_dvGz(~SQq#o$A`yZe|e*b{OZi`a1G5%W?t-9pzSW+2g& zD`Evh7K#M{JP_|sBR%mx9_b+g-X+ZRn~*p|6-YXpXW_bw!=XNG$_g3}gw{{?%{Oj` z)6Bl4$DQ|Ej`y~I;T(-6%`>_Cnqw{+1wZe#!Q}I1V7+7jnM&F0ZA{eHaX#?b33x5W ziphv z`3^XQh%I&D>HBxi%maB}J-1=N4Hyl_1JTRc>ukhX&1c{vzGD1F6}`F&_#`2_+KdKIur9EfhCa zI5Nv`@DehFcch->+74%}-$U0Q+be6mAJ|_JrCTZcrxwJJ);yS()mLSOc>Q(CHcg+# zwhKEA2wD4o;A{(x5kFomxqIvWmYG`OXlS-&?YU|8R}meb%!9)=B|Q|WNDZSc9aY4`kaV2TjFGU#bhrU=EUhihsSpWz zGtpP(ZZ>gkV=8#8jA}P~!3|m_Uz`S1{-q=^Lc2qM_^pvo!6@Vqz7wKkC3)4pf-M2K zu?`qNH>R`RsbiYuGsk}@h)EIRu4IJ!wzi0eWr%%vC%Es*Rai{hyVSDDlb)zS+~$nb zrF8UmL;f_9)w&lCLQ&}X%H~xF*GflW?3?jn>x>PQmQF+EbMn%& z(!^nZ#>%NwcLZqX{5Ue4YYM?YO6v`q@7|ub9i3KO_rsyl(u-|2e24yMq)@a$pn5prRD7@25dj(if%aZpqtdgU z-PaieiFVL^AHJQER>Oz(B2E~y<1uxRpsy%@kL``NAU*#G6u=q#yDR1WC)lUGaGShU zr&uA_3tY*ROP7e(T;E$on+2{_v(nSNj_$nq5T5GgmNOTU@rFvxrqhDF+s_YR!o5p= z5+)%E2P*mmkUQfHj^`Lw^P)xvI$~#J)M@?%V!N7OG4xQ!656dtxnQbVY}dk}=t3fY zIdJiFJ;h!ClzxSDb)3ysh&|WH6H`;E`f#W6@%}KDz&-fglgR=&WT_Q*I~{@-ge$K^ z4}d;`#_jo@R+gNkF}>t%z6S1>^EYBxS2XgB&Y*JGU&YAa`7q$1H1+)u831V$5?Brr z2i5EC#@y@ods0H4cD`O(?R9E@l}=N| zVW{Q$0voRpbk2J6V_yTPejt?2-H+ z3FcryI+O2$2s#L1ub7rRJ@>VrgxF}HHJTsA-5y*C{F)z8E@ zyTAIKaGLM!)ewUtXB1^NJr1#dMD}B()H4MYJftb`^XTryDLGlj4Z*b!gx^>wX3e}j zTAN#~Z{evGreK^e@%GMG*4zPet#J(`Rpa557S#A4|sdR;tZ>@GjiuqfTSJ&25#*$JhZ- zoNcK|L};er>{ouEoH0Hb=Mg4594+p(JipcAQtAB(399S6?0)lZM-t=rIDjcg)WLOP z8pL^}Ujkw}-VbcAIZ0oCA5UAuX2ZKa4OgXVXJG0vgb#8x-QrUbKA-RaaJu^yKI-6D zT|S$mucs5n#p}h`!`@h_v%Aq*!9<4i#U#}|($;MTSsQ%AXFwc(OS*rM`f$1@vlp*- z4IiC+J=?#XxOq9#tncG4$dpfh*4PiX1L@!0E1Lt>sxIaY8thDel~nLHn0#oc48Z&e zNhMne&8UP=@zQfX0y!@6>~^~Py>%C%I*5K42#NN29L|So1b!eEfXu$&Xu`}B2_Db` zR;}yzBO+MmpG~n&FNAhbyfYG`chZz!IDqBKGI>kq!@E*6WQ|kevns25mcinp!PFYm zGP@7h)Wd#=v}{0ssaB58WTDJ5JW?oFe%B-0zRURcyjxWv>#|Ux{%*>I^<%XwA=an^ z*f9d6f7X4kyxW`OLB0Fu&+Bndfw7x1C?dWeD~cn&;r#HAdNz*i z+w#p*sqeLrfME1H%H64DU)IV^LS;pI6=ZH#73oU*2T>J&pOcQs{vyxPW11wZvC546 z97v#z2;tn`&Svm-tm)!ct7%gwT?j8BNpGqloA+Fwohg6h^)IL|G_RLIT|*3vu`lYB zJ?jvr*&5Hzp146YcDgqS8-Cl*JPkm(-GZjq-79AIxRYHer?1DyA~s|Esdu+0trGp3 z+t0x8bd-O8vsXy+iN!8F0nbcvSXe*JL?Sn5mh$JC^{AftPZu^%WFO2;Ww;w<39a zv`-EoVh4yH!SmSo^QNLdQqm}&5WtIPZ{?fOJU#h;HcW481tB10c``0B2SgGISsOoc zH@(wGU#lWEz{+g$R!gnOa4w=$^&{5PmaLU{+v+8|_Q@*Z&)&QNw3Pe)n zqfU)~b8^nTH!vBrc03_77cZF{>+k`LTM1C8HNCeFJw*N;@cefla95fAymp`u(AOg? zN0u8Mj+a!#;hH-mLy4<)McUDn&=*Qd_1&@*=K#QN)5B|!_~KLAyR209yq%Sqbbi)swfKfB9s4k6yVtzSrV^HZA8EdC zkbb=nCiT!r#CQ)zh-wO5sMOB)9N8Z-N}7pf`_sF9AD-9SWltB11$h^Ml@$ZrcOrOy zfRyZgnYN|fm;^#>JHEPPX?|>1pNmD*)@PKFF^QM*+xkzlx3o&LAd0P#J-p(@+Pvh`~zais4x#M+AQvhljMz9SeAqOaNX5=FIV-aJ&5|{5Xo=%^*OgwW!_GT z-y6_aclq!muq!~rxVVr$u%_(b)i{iwkk4=8BV2rZq<)m0RY^l`&pE}W*|taiv<|r( z^eXMDJMMf?mWm{IAEWDDz|<>$P72b?uS$2^N22k*t%p0JJv-V&s+h!&&+p|tc-%n0 zkL`nD`JVg^d={PF``z!Z^jeW_t3w$)IFh^!)|Oj(`h|p&bQ|`v}A^@{CGbEb6V9FWgaH z9BOw;Ib16?%luF^E&bbm+9W++BepOcsxcBf|>tIBXocsC-N#@DuM!9H{FEswm}mHJZl?|p^r?dD$7 za(8O3ND*WNxu$l(INWZZHm8e`>Uhn=JST$>uv$CGahg4pbcmVUgcoIsRi)Ycdpfg} z_r9?oG5_)J4S*+q-LATC^Rx}CCMx>V=HTZKb?V<{*;!UQ_^TIf#&#{m)^`LvY@rAak-+XPXZ(iK)x!c`1^h{ILYqa7fx|+AG~Q%xpQ~vM4u4S zWYYQxpZe^U>tazzN666@Qwe;4=gYbHK60vcR>al1UAA+7gy?CaTSrD@$nLGVyaxXE zJo5Fn=DT^rBp$7MN80z08sEv+d@l<}$+*x5T4UaIXqvoHJ{ISfbJ9-v!C6_aAj#dQ zsbrIvd*dt~!sCq~WmR-<=Ubrk-#pT4CveX57e)!EK>Cy~!}kU@(g?Npvm(onXD>zD z;GtZ&2~5v_&9u9meaX`;zJjjDB_?-Yv*4IWXD+(~IJS&u%$&;vgfVZAeGHb|ewdZc zn<8xz#eOhvw^HWSHwg|vR%ABUdj`m#EjB4NzMc|KDkeWbp2dduz3c#o$Ed?cSW=r}ztgtwjF^*Tuiv`h5%6L}JWj*|J`u2S zSQ@zhyZS8U^C;NZs^;skI}*BN%_(=-XdTIcxl)4BSn62%=0OvlZ0{fFv3#~W9G}4@ z*Bno`l1C5Qe%h@dO`mY0+})GsKTbm5-CL#=LJnzR6q)y~Y(+)k0H)2X%rF1y-@{#pQzw(3ab1>y zuZ9&zHR=cCnSgZcz|^(A0a(p>%r(^S@SP;g9o#?-QV-t=?5XNT6YpEeB=T8*DOVnp z4`o1(ZtX2@D;at8{_zZ5Q(th!6>DkQlPhkXvf1ARzrrR&BJg!;~}AHz6Pj6Ml$2e}n|MKfL!5xMtp2a^4MK#Y#xh zV@zI%LmOJ>uUke$=Y87p#FjCCcZcWwVCW4)7I7?(N^`t}O9Bedt8?Mn>s1hbT#<1p zKfOt>!?_)Fa1HA#P@xn=m$RvUdP(m#t|)|Y$cw|gJmC|8Ij=ewHH5wPr$gB`Pqlqa zIC8YJlHHDX^->a7YP)u{mMW3{Pk@AXk`ND*j zTd%nNbLYn3?^um<&=#sUL?h`UWwx(R=NBni;844E7WHIJFpGRWksEJ}|9ZULdN1(y zCOQs{==YI5=eJXt-odL^Q)33)IRZ`Z*Hy9MY|;U{N((~s-kgtUc4&&p%cGTOym|iT zoga6f3pu9dyR96$v+VPKHRUd^dY8;MPWHTL7g1tc{`+JFA5%y2FBl}JCw3R=TR^7h>wNts9rZjMmAE<7 z_paMkPrL920P|c@cUC}TOn2e{49NDN$6G{2cCTYiq}#(*4&L{FXASEK$&#d~o_jhc z;x|RI_b4j(xn%nEbEA>g$NBF}hx_FFrI;!IxS!{4H2_(!ONHFx#3h{Kz_jkNI#L0R z1G;OrGhsjGDLYaRr%oy`8Vq-8noTCvt`zMXgzGZg2RpGxIK?o)hp~F^oB~Wl6Ce?E z{a`;|b{iz|mjOM008u$55YvjqNCS5QoeVi~X-qtnToa!d)>jI?fBC*cV$Fv-es<8z zw=;9|^6OW5XuZHBy0pNP&n- z^Fv7bE7d26wqWGe_aWNpi*m^xSIT3r@E_X9d*lIk=V?o<9;Hn%?$Kq47c<|a)s!3_ z0$}`>xGgWw8CY5Ct3Tx7Eiqr5P5L(AHhU4-46eVU=iXhf1bb&4p5UsxMb_{HXGG%+ zyt^r4>V8OnzE|#?yr1TKb#X=jeKRGQ0ZmMz*^DlqC08{Ivm$8s=ch## zKwVN*GpV24upiyn`+ZKPhi@{NFn6NMd>7teyxDbs({Nm|Iv}~bIwtz^JtM8%FD11- zYDxvV+qqh;+5eNbH|yaH|~x35`h7?V8DQFqWt%aY1Vr7 zKFxdXO(JFK?DGIN#-2^hnpLC5m~hb9&OEDw@OSPZgYQl?(|>VoN=2tqH9cx0-aKn{ zkx(Olp^N_I{SMMge?P2HawCfej>SC&k9}5CS32g`t7vx4p8$)`?#}uYJsyj<7qI*s zRV{64ZK|qyr>@3QO{+?M(O;GD^Uz~@;vN%es&o!2K2gN>NaFdUU6hMUby9^F+}!t+ z?PO8Q-;YX%!ZpYZ()p!X3*LZ`F$j<^SFmM&Kns>u+7H0#U4a+_7%b@HN{+!hz86vm z_`0&`Wnc$X5is(q{izZ!$wk%YJU!)(2^7gt`tS-^`yvle7&yXo#niTV2l1nhjHe+B z34QJ>?RlZBC-^;loJ}*?`n2z zQTi!~^PPCxvGSI=+J@;l!S%-04ZS&s&LtG9NuAI4o1qa9vD}73Q@@(vPWvCZdLzQ0 zUPUpnfze-tzU2 zx`IrUW!4J4xBbcI3_3Tb_BW?@6R*V5IIW#q8XYC~a$1F*|8}$9vtJ>kIHnd*(qL=6 zY3sZ`8MAy3<_%|g{*<6{5-)FF0}*U%8da2BH>th8H&U&%ezo+k=hx#JZC2KQqAA;V zc}l6t)tYV&SLf|i8aDf-|9F}^YWkrAxtVOJ7HN2*KysuQA+{lLd`%$M;jm_QcwDRF zUEK)N0kPB<6*jVDMw<-uOr{h^c3t13SPhH@2&519>cQF>>Jli?)#>zF-jcjr%N4%1ojY%R_L!LIQL~q`0f-BE4FKo4OAI=G+X1(>D4O$7|e{sWWb)^2*<-?mUdqncZT%OOQwc}99=L+Y=;@)rX zdYC8wR5T2If4Qdb_{>Qt2dcW=oWAN$=#DHUh?v{Ef25x-my{7_ z9{?Q#Xw~f|6YJDnx}pU4cKcXwP&n`m=D_W4Z|Q=!ks`Y?D;sQY+qxJ|GkPVw8-(*W zpW7tYZ#hCrhincr%AklWm%y`s`ba__-4Rof(;4yrj)Fjp4Y`$rfPE7%n*88_eylh! zIfsm#3=l`>*N1W`&G@Kae|c8}BtQ#AIn8!eDx@*nyn;JxoHqB>!>EvOE)Md^V zFB|7tL>BEYrcQJ?)G7beF5$vOja>IBz_=b7r~5@Ikv2jZCpRrB)w|Fyy?!whP|*0M zcD;r{7piZ4YJ#?ZX&$XPYto0!L6CK>zn`q`0-GnTZpo#4uBQma+jM_*MO43?qq8wb z(y4X07!0Yc)VXJFe+A}93A=ERHY-K9+BPWCw`YK(KChAAF>DLoTn`mj$w{ByBArDf zdD6G@lJcH8D?Rp<5k2lph(MeIo9|zGBh>2keLY^+3u7Iu`{{<;y^WK#0dM`x;O>r_ zQEcxEDpz!%ucoib3#1UYzW3-0YU~dvm!wi=j;gL9D|M=t4^$5kEmH<%(ENbx<1MD$Gx0o zHnYeYwUs_7*}6^i%0t1oJ#!p*P-d{07eJs|`}BUIbfb%XD2j@jv{SF#DS2&9fZAy? zb9jQ{@Z(5ee==KqE?|{ZoMwL@pEY$<=y zjtC6cOOQ}cCLY$_MOuc_=d6KL)4tjATBjQIbs^3wpz$7?nRp~c zpz5C~+l9D}0XPr(lsQ)cE3Xt_1{z*0ZL`19wzN8*f0V_l1UEy0@@&4gx=*=BL@?-Q z+vA;8R6l-OO5tF)wQr^YTNZWcH`6S*+w|!Psq=-#ul6kTkv+n2#pbI?)INg_Gm4L< zjk;WEMoz_9wl&-ZbXh{Lh;Qr3%3Qnj`=i`Wk>;=0534Cxg}YHa=oJX($G)kdFLY!A z>U?2%f9-M3?tW4(U!nizJ7&*ZExMXt=q>gr1C8>plY2s7v@t-AFud|DwWW%=$X{2E zk#=cp>Zc2%J-p52v16xLVJ#5#y*Ircq#xDjtdjuLA1He7Ptv=$w==ahH*_!<$t#CDrvj|ys2%V!kVc>P%L^(ivf`z#b5ovF&aRv? zTjA!^pe&3R7l4)Hg|ryD=w^uzaL1=ULQQAO7&6Kj+P)4!Y9`f-|NFHU|Wnix>D=_Px$*>g=$eq#&Tt zgl*4(0~|Hn&2rkSat~3W+Zqx6%v0dCe7u9*iFz7n=_8H`>)0Wd?;is{cDKu$H3%3- z`)X==dH%{R{;`x|ooh=O)T_c0Oe3oYe_|}JGOHJJ0Yv4uu6!7qqL{!%x;my<`JcU@ zCDqlsX4~82 zCW)t~$>F$B?EqlsLc594DR1E4R}C08;v9w7t29O6%@EXkT%3o~V^f8~@;x^pe|JP* z>n!~o_EpwacKDjlExTlQx8n(Mg8NlHZPd=!kg<$LCinQwHF~5O|@@Q_LBec97 zBIjs18|azHQ3}TKsk>@UH%s~q(%CZydsaY7<+|LwCy!oU15pck4;>j!zu&yB>`-iv zi&y(@Ee`}5s9#O1@PHIWIcR4df3rLZ4ws-$oZal<3{$IFSXR+S9(J?P^cXy52Gig^ z#M=z2E(18=&QxCMoD*F@0_()%a;~#14}? zPF z(VCR~6myf?2F&fSW-UpD)T#I!2Jo7qgRWy7%n=Go&M7u?_UAT$}`MpcCR8fwC?H34C zKx-`0d*PWEz!1E>f73c&CGWrk)dHJ`8&P!c5YpRTb!SBb-!D}@heeR}FYBdDh0per z2+RDfYHz|A1D^Fpg2mJA(_&Xa@I`2u#hZy2hmFN-KhG=NE6QY6I+Jd;C`Lt9DXU*^ z(3e^6=kmocK^t~m((4(;sc%ot%!AJ9$lkmRxX9_dERz$pf1W_u4Vm~^wRAQcZ6>&S zwvU5?X(&uUC^T9AaIRK!Ia>O6&~rYJI$-wYc{QglH`7&-4B(67;XMJX;?)LK1)#Im z0lI|CB@>&M$J03MyVDJ#RzOEvP(g^#AZtM}^@{2+;iKJHM)TF8OB5YKE{rUv?gDQ9q&@`tnxJM`~ zTx-isQ;;z^B6JZYt%DG;!bndp1a&i`AhchB_e;epaTmB&OSobt1e#vCH&JLCs29F{ zQI@PlHWkIaYp=TnBMylJQDR?GShwH>NLINn>q!dhF1@kzQJ$r`h9U$lkB8ez{G{z34&OyXS2-?`SKPip=)03 zQ|Mi+_BMh+(<k3+nzXw|si#k$M3cRA6-ef{k`*64r(?(}FFnq?mv;D0B2MF$P*e`>Q64^E(04~pf-%zGT8lsVSI^bS_W zPv>5D%;^HT7EmD$!G3nSUqJ|SMLB5xoY6&(N`*wy3Ket#b?K$E0Z8Qqmj7fivG?iG zn4^Mv`n+yHAjXQ%H*a@ryQZSHy(U0}p6safeAb^-J%q7EnfRwk%OOVq~_yey&& zBr7$XsqG2}>Bx0mje8NNBSU{(>@2x`qO!2$rJR0mgsg9X?Oi_iB)PnriYSe z$vkVjw_2GDAcU`dLMBn;D7!0ExZa&7f68m!_d;Bw2__q5U#>Ps+irMG<}08pdDaE$ zfB}n+mZ2Wqg*pW-pSE0<>zse0J>9%OdEXVKd9XDu2C@*3<3t1z{{c0v>OFv}1Vj=n z=;pNQu21LU{QhcbKeFEwZ+6}?(b2AMyYu^ft*#LYuwsC>KJT4ipmY-BJJGEae-g3G z?B2&y#Ki-SIo==j7MY_h2i!d9W)Q0}Sy3mDb`r~t!}HqiSso*+qi%I_Hf zBAR?dS%QH4&R=0F{s^y~_$#KZO|nV09snE|5ODe_TpNd1V^(4pQcl+wA9 zyxq`E2U>iHKL@EmTQgw`i`5$yls=wKc{Dc*63O65JB<<*`km1v=Ar{~?W zL0TWZTr-2biOsJCVbcWV8*Cf%ZMg?KBuc$psH-maHD6tiT;2;}&OgP9xgS^UB)sHr zPeN`>7ALT(?e+dye;ugZXXjtM^9_`qkS&xx_j54WT@5$(ip6He$>Q+b9(US6^{>h- zca7wtI76qqGZ;ThQ1U$|T2~u;s1KGF$c#C#!21$1g;SfZSFifAeTtmd6oYOu)8ZCe z2@2c5Lu}}STCW@w;mRLyR{P71uRvYzEUrGgmvZ6xnmIHtf2i!_s7kKuEpwn|uY<@G zIHO*!bQtvv@LF}l7wR7DnZ3jyB_re5B*1H8=v+_$4VT^dJ zx;FvW&Kcj?f9>X7yX%Ux@S!{>PlDW<)K?a<&K`v1I0y1%M<}8h;caDg0oiE1j1V$H z)@Nf}`-&Mh%aUOu6zRW~dlU^6E(IJ2ZSmK2HC>w3TJigSqn5Li-_+`q0uIJIS9CKu z8rNBBT<@pInVulixH*16eh?g@^ueE|a7w|Rd8oFIe`(!CGnQFbwFQ*-F!6>^RNlR2 zUgtylVi_n`(ZzaR->ZEYHLh&Sy8a%PEpup5sAqAMCa&B{SzJG*fQW~V%1=~Zn)=bu4<@DX@0($8U@rYEAEtTEs8L$qEH0rd? zzR7E)e~=&w`vHSnIt6Lpb+TNCv?3ppT2>DrLO6mnV1ufY`4M$G@XG?WwwDj367`o< z(f(dfe6vD;Kx-?%YQ4hQn@;Sf%PM&_X)iA(i0JtVcIhYvT_2$E3P#FT{n7NjetX3z zQ8JJT{3Huw3$l**Xm-G6H*0W9thmbDS+wJaf5+3)HB37~yya0H7N@rGj`nGFJXFv?)rDwSQeZMvrvs4?@Ij0%_>rIsIk&gIKEtI`g8tv8Qatr*C+M{UPz%h}%r zW7>5f5oVHu;9(~+yOxSz`}N})K=gaz=hn%ksY^QC3MZ9|rLuKTAy=C;*rn#Mdr_b# ze|NJDS#Z(3-arlpa#>{1@wt2IG^rPD0z6(giwZ7Y_3Z7uH;>4um}ar)pvx;g0F^@V z?`y;GsCIgB=Ia%|&9wqV&&Kle8X@&NcAXVS4Rg7i6v_ued(Po-GWtbBsg!fvNvJn; zrO+v3^rRjj$`>`l>V?T%9@^RJu}q%Xf7HJXw`%=dv-$)s@Lg#n$+#xa*hvKU?xVn5 zK#SR+wMae4F1glapYmbEb>JPpT%uSvyl~s^1M6<3b00KUrNz#Ji+5-O?bWvQroZt5 zG^!qaQJI`k&E^RJ|2O4|-Q+w;)LFhg#5d__0m*GY?@)tuhhtN-OB#N~{`Rqse@jt8 zDeDUIX~=-5O}S&a#K(%ol+ou z)ATH9&il@(^Qn>pE)L-$5S4G&f5=YbSr^uXuQyNoZ~@K6;=SIAm-Q?65cw)iLO&KeUyDAmA)N)Z0_gw)|t4#o_5~@y0?3AtHoQ&J!Q{4mMSue|iR-qbOIi zYjQn3#3w*!+~q!hnQlDEj>d2pP*b)5)B4#&sX{urag!s^{`4@210-TN;|)px+<(G? zp$CCyK`&?2lro=s+0U-jky_oX1{%R$!Jj2oBP;?(F(Nbq#J>e-W5ZX5qG!7cUooyf z$eq;Z)*QRTZRH+wOH>h{e;oK0^#F3pJ&fH&b(7rZ+ZwPQzI-;&RMJ)XbSpjzk1R;Z zD$>M@v*@5}kG!ywgNO+f?CRZRcR5^EpUXx`RdaQISp(C#?S{J5V2*y?_p?p-xW!Sl zlQXZM8cX)X+zaIuCv*a1*RK^Ysu=cc&{o;>QqzSNPs4THQ$l9df9$RKoKeHp+Ysv{ zsDW*wM3HN%`Xrcln{y6a);;v5JH%W4BVb0N@^snv1F{JbKn?uq`C0O|lfFM~y5jt0 zc(w#ecbrXm*liq89A3rT>*|vr^=5LnI4OP_u$fpzIb)J-nzFO z1>ma|oE(TGpU&$~e>gC!Daw#%{OPz@%pt=FvPiQ%rC`x_fLmiet*y98+So=tc~;xW zX1@S$0?-%d#)>#oewKE)0t=#cU*5bpInr_EU0l_QtOWdI(6Y&;Q@5SvZ)YuVXQX5W zOWD$&SyQYRQ7${v^YW%D>-z_mHHJXZ)yp=v*2bHOdC#;He{;5jMBP@L3cZo#TD@uB zv_HSu`EJJW_vd96kp0Q!!-rtkAXFHgd|?mvb9R|QE~K6<0i1oE%d}j-&X3@fP!G^6 zN%tz-f-uWV?h$xhE?X@!qWvSnf_U9Fwi4{_)4jdP{Z$Gsq6e*f_+~KS`yl><5uVt} zc3;Z%XHiTOe@qNmulVr1d=72=cHZE|Df15Bd{{5{wSCkBxyt74o}BC5OkPk%2CsbM z0snTmx!W^n+OW2*ZGj4~h4VPcUn%*U+e=n+aQ(X6!;c1PEl{X5xkX0))NCSGmfyn;QzD;ye0@p;s1N-BOwGV`PNWh+`-DUh&r_c zcfhS=JfM&v_u&kDui|9Hww_Qi=Q|AoqV3EUe-o%UK_$Ad3cItwMH|xaya(BH!HU)t z3ZLehmnH(=ii%g&yzg$oW&k2nQw04$$HCb~(BW!$~LlA{n%t`jAX*5xXBi%!P%B1$xIPvfhCi(zEzh&l=574536c zf7J&hk!uh+r3eX)$cZ4GcFhy_XfT*~kRsBY@Hnb&$*z>FLeiTpI9c{sXQ0xK8IDUXc61l%2fwoi_lIwAwgq(N5 zwG{Fhcd904?PhqUtzObsFQ8?g#s>ehe*jIyZSVUjW%gbmRmZhnfX&TgFI-u;=bRCZ zxX}T_660v01+hCZiWa;lpmq2FQR;`3gTe$}?B-6r4yKt{+K!%~jK~tILw1o7J$#Hvlu$UZ0e;a)J zf{X>y7eF{+42eA>DVaidUS^9Kn~V8atPMxxwc8a;%N>Pm_$SF z1!5yadw#Evj4xQD+zrpAQ(PZ?e-P68zz7gU2MVES+~T=GC_NBzi{{$!pi_%2wjS?D zVj-6Xp}xz((GbzFf;E|rU=ryteZ)8SfCBaSJ0ScazcW}fyGvC4lg#lvbBqgG|5#ve zP3AH_$9ol@$5%hoIG;Joe$wMU4tmaL2A4|scL&m|xdwA#fc2|GoL5Kee?daFUg5h+ z{;;|B=}sEYM(U7(`iMC@9qvbf9$#k4$arx=NA9PU$^9U%t3u#Rm3#@;l;&L0@f#r4 zKSDVfvXnQ6jF<76c2iK-WxT}!pX%}=H89*X6EF`6qmu>dJPB;E>D9*g$6WaP_*Y0l zxESJ@d$&Za!58baQ*u5Ke>^Erx9mk98Kxx8%n<5s6J93s590^+h>V|F3(}lxMQnyr zp*nSBuwg#VDb^JOPDL1ee4a!g&tsSP-O=Xgw*JYk_%GQA=@tJGYkj3?j{kJT!Y1Ut z6)wYlchZ*g(<@>n{E$IJ1ceb?0wq(FNm3f8iGT%+k@}3gQIy3Gf1CfM`b1IT z+Cg3X*k4qqS{zu_!D2=FYaBly98pM{;=d0#w=q_TJqbs9@@@O%g8xdapuB-dChx)ofB4;K%X)&qk2e0?jO}7-Ki%5Cef*56}?F+9+78o?!OH zApVYv4!_Hzf1SFt#J}WTrdkm4AVa~uqF1(^%yTxUW%-t?7>lP?3Dj&-rc14qTywdL z;%O^z!-CRsM)zjymfO!KyY~*aJhso9qBH*Gs+f-qX`jtFA3Mx#UyEc!|9|HqS&PwU zWW4;mZ0|fuGiyTl-!NGApBxAd|M6OVrAft~VwYo7&8b&&CZcF3ho4m9>Pn zw}rnje`u)2 z(6D=ZQ{M&E3MWCx3^d7T3kf`oY&YwZ5!!pS$d=(P4}Eo+v0DMIA19zq6eNrgpnZcE;zqJJvT_4;XM^NA@<CIkI*eyuN+q`;K3KzU=H)Lvo%z$es#vvJ`=-BE91-XiDFc-XRLX~B&Sy>c z0c*mJ?{!DlPFA#sCS-KB!M;YunT65L87|*mQNA1Kfb42Fz`5;0x6}Je^zKO?Q7>d3cIbgL)97lT5R1hpTl3j*8jU- z3%~R0*Ny%5w|Q1_e}04S*th6wy3jGcB0z8#D$GBA?t0nLl$CR_)iIWmwPq|xsppez zg2vgIAaPHPPhGMeVoI5z!jp{+nNMWBcGKJ*pqAu7`K>b|{{8)z3#(=Y>Qy|)e-r*W zzsrm%c@C5K=RTN<|E+)i^{tS?`JI2nW~pC)_ScVQAb{&`0mT1pLzLV$dkOl3P~ELn zl0@tKxf|4)3KDGF&kpkNZQpsp?M;yTDH8YaiuC3WSq8)3D1NsLs&t{3*4`0XIJX?x z`M`eyy4Hoi@~F6gtbcuDjO!nCe{kiVj5`T_6VzkJbp~^ajKi-#PW-3ux;S8qj|Abk zCXn}d1{J<1#>QxaZ6SSe^2IXR28T z5y;-#@!)v%DmN9qqP}E}Fb!pmV^i>QY(b}e^Dphp0PU3~(19Ug*E+bZ$g#FHGzV!& zwh{u@36$!I{jtte%?+&3cHEjHKml;}KBsly2SCUY`_qNRZ@YYb-0wbv%u}*YL7RN% zE@_jmjoyxZ-!pI`BARYmy0pdq=?iTI)(z9*BRveKraxQIrqDVmUmFhBf7%##rY*Tg^JB;E z0BU+HYi?#~GSZa-$7)bRS4{xJ958M181qsGp7w02Sw;UgYQV^*H#Z6*#3y;UBk+eP z5CmRA;gNB0>QQ|S7jRtXNgIuArJ=1b#tQ5WiM{{Ycm?fF);Ti2h^_cNM*O(({Bdpm zu_K%dy^W+vAx3JUe^BP&``y9z+=O@O(5|YtC{($`GolqrNht44Ge^EkK?dOF?E|F2 zxVnNiz2^y7ck%s_2Sp@baFO7#pjf5{4^FJaHMs-|mlF@T2%@pb9!IFo1VGenoGjl3 z5B?lgSjAkd2Oy+gX4vP8a0VZM<9*jl%s*l$MxT$YWxyX=e^6ZDzWdy9pF{R*WGvzM z&pH8L?bj!pTje+p$1#Vs)KxyWeJw`bu}IDc_p0C+mg76{78q-C6n15p-+icp*$bAE zrxzeF!0I9FS zep`H|xm`C8eG!F`a69+~(Y<{&p$Pittul6F8YM|e)X~5ZoY)=D&w8dx9&QWiC z3?^M6_7VeeJB@FaLa1f32jV54S#W+dHHu_qkkXbCdWLe!Vqf4ZJPVA!7QvrD;ZnE- zLZRH2?AG^_JCDs1w38CV3p8^|SN^Rr<7+due*<!k{Imqt?m`iKH z6zrwSfvAJ#0~1y0F(g6K54hOstVV!{M0Xi#jfDBkAoc4nLe<1iO*TtdGsGCpv0*Ip9%tAzvO=|UCPjMe)y+q9UxmXJ7|HUKKp_sDjFcnt|7 zS50qVxq!BN-ttj0=zWerBck>t$O_P&khTKoklpi{tI4@lac6AvXY|yDe^8r~(Mkvv z2L!2Aa~RSHTp3eHlSAK45FoK_`c$dNu?btU(*2OodlOD@^dr{Cdqtp|h@ThJFsxpp zJNBb00~vsL<=4KAYXQ)iKXaADf9($<3IEuaU)%YOU;pu6fA-E~ttHoz$2I3~J{Whs zH>xL@QU^&8zsZ0G1s4PZf87Qq(!Athc9ENiNxgBKqroRBczst;45@bfnzwVw-AlQe z<^%e*{YLSf@1yek=&a zfdoX70ou+k@FrutiS#pK>|Yx&5ybTKH(U4}M?bMK#LGdCM11?de1EjrcW?G%FS%>z zN`b^!v9f?7O9|$BUgV&7!ZEhwX@=Uf-PW`2<VT~d>NJu!v%E%LySg`* z1r*IIQ$+YUR#dYVf3AFLiHKDSCAU}a?pql=BivdWw?nc#XHXN0NV#Zz+!DNs*)rDo zT`fWg6>(zhd)^pF?vBKbGqT?HwEA6d$MFDvob>-+KIZRpecz{ucaC#pjFpeiJ+93^ z`=QZCA$F6*T+lYc&lvoUvA@O1Mn9G8oBp;JWQ4Kp#`!wNf5u5{4f`vQE0KLdrU37g zy}X>c>`Ms%!WIMD-MIpr2^sO_w5Ad(UJ^j`?nyS3WgsJjHc{J`+`C(eo9b$xEU?}n z?w4(3Uy3#Y$m)&eqZMH$Ko?sJIf!6aI8w{z$4#{!z7ogob*O0}cI_Zn)rLdbg?%V; zWd*7j6p0qqe_*(kqxmeAUs}l2B#|_mP99=}GK7X#i=!+Ds7q{JsarI9o?oKS zja83bXnvbWA@m~JDN_e1NmalL%Qw4#Uhj?Qq#AsKxQ6tQ{gzU>7W`w0fjG%s5kPqH zu{^D?9jp6hGu1OyV$VdzH4=#sID_I3@dL&=JleXyfBA%?9Y;SCn@IL+;BUnG_G9aQ z`|QWBMC{Z083W(({_XppeciwG+4x(Ta#8y*j{Jr;y3UY#YbOg8*b>5x>NnuqM$#*@ z44LZ=@PjtI=N1aONF3S?*I-NwtOGhIUFVylJ~XAmCzKY04f~S;s+rjQAbBG8TS*W_ z@KH+Se<)svt5tH=;`+(CZKY za}jvX$huS$-AG9SD~=b(Pb7hdAjcXTbd@B)?-C`EgCq~uc$;&Z6T+;U4Uh8$tAc+X z%e6N6zBUVnCIrK~#npiF_NlM>k^!5HJus#S_}qQSU!%U!M2f=2{GCc7)1s&yv2n(b ze;^RW^GMVnMj>~o&>l1ssr~HgYw2bPcLJB5B+v>*Gt|J$Iz}c*Xyn_dAry5KT5&vx z8AyI32vaC}j{d3?SSKDi)p0ES#7(|q4Y|tfJBATo`SIcZVU51RAN!6&l=EpB109TX zg7`OV(bqozcgE_T^KApls*xyY@1)>Pe@clto@4zf+|tmF0%G&9Pk=UX;VZ13jE^MU zF>Cdx4-XqYLzRtWW54V0y(9=Zw>JXoEcMgaS9Yg=9kX`Ao#&IY+wO&|5e?tUOT^KU zF^)xv0h+gc@+hpdKHUfX4IF%TJQhcjKvc-?zhH$!HWau?stC%Tk-V~ z;Tz+e{%c2m`VF=Qd9)B}O3+*T9~!et;wYX?!|!8aT#t-b3kWnTYnKvt$Z>}IfdMEVW`nXfKBIe4)LPUe|3t^vXL7Qvj#z3O_DhYF>Y-u1m0Q8+K=fAN3A;A zf}7l>PxC?S-#LgGWYeL2?6yndr-;ZG{LYC_q@Bb(!CKKCoE{+>63?E5;wCd*WJkLv z@m#+J(bYFUgEk`L_(k&g7{{dj$R3NVgUFqrJ-=~9u$;&)GvwRGI;fBxD3lR5CS zhK{y@tkvJx#;^ZDZ1%VY|HK|K_J87RKtObmL`gS%mnkY=-5o)aCG6OpDg<{nxZUzh zJw?^hdqkJ+w2KBg8gke{z8AaH)C&twT4| zolQh9Ihq!L{vhr_tT4PG)*J)#u~c{~Is5*~lD*!)+2^xpz1^}Zu0q;uB#oyy+OhNQ zY|s_!&vQ)=!4no`|ICk`dOGR zEM04^Cfyl!7=pM3lpYX?=5xrl)gT5h!qgSvom5e4wNkqo*0NGOl}+Bgbib`Je)7c6 zmbu42bgXBao?|vBx{*Lw_3^Y26W~zt3sp_? z_H5oIgw|Ojq-Q^YU;5iBA4R;Ftxxf=0O<_GmH5lWWAktGUm(&pmKR z?_;0atI+Nz`&`{6@z0$^ilP@gLkDu-D%CFCQOYgF@)<{PeZ`g0oW(6O#5ba+=It1JXvq~77pkUV9R${XlReNsi}Xa8ULH~we` ze~EwIhQ$7l`+MN=Mx59;mz}834Be8Axk-vN2F}{VqB(SHy;Xeqr zB7=_uFX3xbkDFTRTEsZ{O=3y1OYfjqp6&9MgOv2}0T}b?x#F4&n_Fq3bxgr-q_KU=h{Q5E*0iMs_?Fp`hU)pKAX!@`7T&|#xf6!z; z>F0;-LD&JYj7D<`Y~~6XRPik&)Usm3YZ5Q%v~+1s#sEk^iN$aj=N`8KR}W$*8ESLe zGWxi1+Z8z4$P>?S?!O-^hs_1*MxyRMCiyzNR;5N`{p-8TJ?{Nnmf&Anz6pr0vX`qZ z7orM@6O;_prJkh$7kYzTa`dnhe|&;pLLTEt;;he#a|x~xAm=ac@28!9d|u`J`~@GsZNz#J6-`jMV?1(d@n+-)P37C;PPCeO8(B=fKQ zgM6-+d0N+R9G>;C$LovZtPB@4JPgzo!$6Hbr4SyFnGTg>DkM7BRi1-jNH#mNrA0M`G zWVjYw?I8NWK1xh7l8RR;kZg&Z&Y-#+c#9F&ImRslJ_m=f~@j(LbnB?u-?l zN2YWXfCF3e#bYDYcw&Wle@8;x_&xzmAe26EL8_Nh&o5 zC>wYpf-6RTyO`n`HIBqn;V=Bbg2HC}_z2r^KH!?RkJul4gWsGJ7hc{*K?|}`J*>*o z#12A@e4?HPnzy40s548nvE_;9EY?6~eXJoT4HN)>i7xTLJV`w8e^Ax>8bbMj=(hS$ zBM+=`(VNP-Lr2!7BUzWQ-rqGIxGoacM*aqK>x=ivLyVtatj|xpmy9`_BOwO!C+rY)M7Sl$6 zY6ImvCt@DM-+1)9z>i zYpd%8vH{#S8`ToK83;ae#IK#GUgH6^zk=zP_|XCH68x=&d;(&EqYq8SlSS6Sah|}h z#OFW03ducxEcJtucdaC>4H6%_Yh9T|kv_17CNLl5e=B5O)hsTeQ1vLWF?ZZ=j(!jb zmX`Lm>kWAdEXx~|fMy-aD*j-&aSuN^ z&hI__uH&kXxb~l1@h7E8e&c8Sxmf)$VAkSxcmmf7Em}7`ZlyF4<>ys{SORK0Tu;!z z8_0oef4lK{4<0(5@8Fl#u*Zz*aSdNAHCYDUyWdSxI@BWG>D_7M+CE}MQ=n{k?qFiG zI%0+~PXdQsatAPz06#qXD}74l$=3dfqyMa<>S%j@<~7!fF$Ol`y8o=v=L&(kGw(LQ zbW3YK*!gzXDk7gxd0t{P`nYr; ze@5}PViV!iu=0>72G@-mycBh&>`at6aKiu}`<+J~>M$}gkACiBY|mf6jo@U-JSXjD z5qm$E@42|mxfu#)U}PIf&Jr0hkj*|%@4y+!8|0WAuWiGs^9_#)50_nd?18HT5=M+z zPj`&H#p!E@!(s~xKeSguO%i_JAufczf5s<>3(*bMgAwOA?#sy@!XM|+6+WpT2V;)) zi`W0=P2iVW5!!RY6=+k^{{;K!$`BJUlzq2Tgn&^c*VXx`X(Z55^KP_scu<;jODK3s zn#nIKgU;SYU%$kDaoG0s@##A&zjIB!rx8OVYKPgJm#wAAH`{pF(1`KCeNYDne^M+4 zglg>(2h8Ub=qzNH#JNa5-NGaN9j8O)+|L^MGfpvoTsNl$k!O5JsB#wKPl%ObO`{h{ z!tb6u$|ya}+tY&!)zSgOl-M5-3n#+bRgt$J@&CZLkM?K8#Ef&B*l%xKFSdWyyMa-F zD^Y)A89x|2;=_OAY`^z4{Pg`ke~08+?O8I`9}sD)yk(uU>?0tPkGLy*looN}5DLkB zjIoFu#m#Ztt`0wv%Ja=bRG?zL%}e-`SMXZCgt8t^KIb6*>+PYL79>y^?*Kz{6AiUS z&_HQ68qq5r=ye%1H~2^Q-mMNYFsh-LQG1AgDO``(<~N@sy(vhGS3C;1e}GHfe^kWf zfA=x4SIGB{82!K6o7k}q`g?QG5;)TAh>=FxPHvP z5g&tG8b>npEyfMW7yQJ!eMTYc#t)XBV59%CtK+^91(|z9%x$ixkIlRK?k?2R_=V=nNn0Zg$9yloY773fH< zmyl~zJ#sC-_O#_N~9aI+!5+(1eE-sWJE!R0`)~Bzf&;nZ;S5D%OI@dc^+~Pd!180ru&c% zpIq1$?c;ReP~XMrC>UuO`^XBP!QXKxjn$GV*N=+loJfIvCTU3>2k@X6ul*Q*B;)lP z_xOz^A}27e%|G)N7+5k#VgDd9d7-aL*B-c_HDyQzs&1~we=gFr0obK5c9CDTu*)x? zAGGy4*0#H*m)*Idmx$fZvdjh5(8-lFG3Sh}PwGijYT-}?w@Z2K53pgt`s!BXzDD2v zH_!Q-^C$Ty;=g?B1V(@GJ66A+k-7H8%ai?B%dc=#B67B~;3HkFtdg|c0w!Cl!CD8j zTSH5^e1V-(e=cCX5}}2|*F6ak0s{1*H(Ykohn=+<*gWt%oflGH0d5zu<$FTn#zmdj zm*mCgj0^a}OZs#>>9>jFQBbr|*$pN{r%JLYW1Scdf%UK#hVeSl>+fd~`n zJyGdU$1P33to4Q*e9nRA0(!JO?oSn^-9($uHz>bM*OYsIy=s#5pRY$#O*^U<7c*kl)KGk7tveTXm_&L zzlweX>3HcP&R96K(tZ~K?$xOwe>o{UlZN7_>H#+gFM1ZB*6%2JiR35jSr;upEpLgQ zu_k1Tf6m*0Te(wxgE$C!$szU#)Ds~P*xZfG8-oYq3Bf_Z2SgiE|9rl;Qog299|PvS%3Q`OoKnFtC62vc%8G$sUrxPJY(zZ{5gM&y=Dp7pQx4k&knI z?(me{ER+4*)fFFh4)RvWNW=A#nvi1I{y0XQaY3DVEz3^J?=!fLn2;Wwb{DFh$XQk#L zcjs0v7d9EN0JVSYE})ws;}UgIE)=m;5ZI0WNRKu0*H0k(N?<(y;P-*$8Q03+`HS4% zPrl<%J?6Kr>+8p!xr66Is6x7-76KT3UEaOzd+dFt&HZr%=ZLh4zqHV%;sIOI7J*^)f*1f%r9YhZ&{!Y#)g`4Ok{$@Y99U zjd>u1rR-GS z(Ahe$Maf4lvxz0JpzBH#mw4rX8c{>VZ!#5ebs+UfzTeMsIUCnsncyY9d1hi`!@qKD z*6>@!1i1Dj zRb!jR`hJT=Ia1xwbO&r=1$rt<)yEd4NSvm7`Ezu9@NeotJ5xO7a|osN7~c|Tz)6KG z;>nye#<%FPu2mv?T*MzqjQD2^jeE`Gz_~hbinb7@DrjxNp4g+F+g4+(mg%I{vwuaX zVQV|qqjCzUSu9e|JmN@Dg!2>k8r$Tb>s&Xetp#Yyf=@`7V&Ph_yCLeYuw7~q9q&r9 z&lXTvHPwp-Y8okn&yFSY`i+O&5&RwfNEGD%W!HZ9%HKMbuWdMS@E`v8^MA!h_&Z`8 z(&`%z@jbTE2ibvNk62Js=RUX(dw*;1X7Djfjeic25nQ-uNlQ@7$>!jc-HN%}0OMl- zf4JLOOM1Th;YiPF7?tciIr@&}! zH$vMsmyQXAl^Rh1u zSm8$daH2Yf#Aj|K4uVkwa({hbwb5@RXGG=^{v*Fna5SWyMn3?)7ulD8_%&R|tc&1> z3)thLjx-Ur9&amyH8D#r;%N)D_lX4VZ+=Uy3|~lYAw+t)+_N{C#HgP~vnez$%R3Ou zNC8i`w#G0e>iEWW*2okVLLa2A4^{A-{F^@^{q=*jMO>C(PXDU$`+ttT@QaQ3yPujV zZzylScU5&2x*Mc0*onM~u&zSzSl4ofw$(6iw!@W4TH#yKd$(7H`K$5lo zm_?9B(^fguQ5T?}yji=*YTz#|PQuvF$I;K@bl`tsGlX%h{rx@^^1B*|502P_w>-g?|y z5`6f$-}%~$A0LCPtB3*qu6v&-+-vy=B+@2;OoEl=cE^uR|b%l5!O~z*b7)yNI3t8 zkd=I-k(>Y)<$vs#0vAt6aN%FJ+ZA#Crk&WM33!K-u@bQSqeBJe(dAbh5TJm#oLtm1 zzCuy%>@7Ek40h*CfRUFX*L(30eAc@dstlE~;|{vHCb8XaR*C0ygWB2)Aat&j^KS{Ij14c)k|TK=Pk*kZed^77Z7W?e|IVABMQVaQ zzi}i=3K4j?%Vrg_eb~y|HoAG+7wk!V%@1$+(`*5|7v*KR9r|peNL6y`u_tu8ot1;z zbT_uc*j^;I3u*DFp0bYPgebLgNMZ56{y9`dy;+E!B5ycu=E?tlk2Pc;Jzg^)dy(-H zQbX_vQ-4w~3tkP&g|iBmDF@n$OF|CrJL-Hp0SoGj^AZjt5cMhy4<2iqgDi>OsWVQf z_;rTkc)uCnC*-|B-kalln!K;cdxOJ!u;`$eVL%ZpMgXoSBF=_-RIWvxLuTVWs}K8K|O8goN;h^b+!Orb|&EmmqQEqJ-2O$bV_D(qA%E)bI_?N4YsqqfQFve~?tj zEZF{5M8VR5`_L%9HjemE$lsVZ9~S%@E}kzGaO#1BnM9owmM-9pz%aaUD7yj!jm;r?igXr~Q5k+hdMHCZD%h>TQp77~q^Qt-3MVC;A_-K7QsIon zSAW$Ts0(mCJo>3Zd*ON<%fBe9LYfl4uaVFI>9Y^lq5U{=ydl5$13w;AX2_fO_8UW`x& z4uiWu)EpL_4RTr+3@tbeJp)2;%`Z9=ILFZGbUPn8Dn;7jXj*MXVar`6if={r2 z$PiZmMF{rT6o-_|HsevE$7RaNLXr|VKXN71-H*pfY92#;JiY-fB>or-DhkAObAP0Q zsr84Ny(hRM<{1Cp$A9&QVkJ{RbVrU7FRC(4dSP<|$x>LWaC%FC#5kaeX$LH1R@70_ za$21ns&Rt|)P9-dEwsuxF2;ZEU`s8rkBF$d1tN$3(7_O2%mdPg0%8+`^lUk+)sRI) z9b1Huiin-0ND#IN_Xwmsuk}OGU4J2+JM|c_lvo;hP22g-% zUL|&LZN{K3TuX7jfO`OyKXP3_$+-n)5ghkjbOFv*8o!5okxY{ljTK7%rr6&B|Jw39 z(H(xztqvNJ11*-W7NW87Lj(Wa6sQ#&y|R;I@|y$lThP}g-&L3!*i^7XlYg_kmH3@9 z2;BzPL7Ol!+7lMncIl|}V#sZy8VuiuVBYt7xZiI4p7{M9m@?o{pq-mrj$;NExQu*` zY!$hl0VE}mF*|aN&c1MmM6P$S_v*L;%CeGOwj9WQ(Vyr$d>7Z{dQqWYom!9IKlwAz zrE@&a>&0Y+@P?xAAgh|A?|*n>TJO--9DcJV_l@>OJ+Uo-B%}kUO6K`O#|z;5!dT&H z+r{sw8qoEEU<=A1jn_y7oLh9?Hsw$^55iP_rVDr$jhu^Zb)u{!UJPwH8UIC_$2?wx zhpCV<(7j{zZL;El`&pp~$B*%wJJ2NZt}>JTILgY{mv?AyE}Rw^Uw`dE1JW(_|MVTk z7nIAm8vRR#C@+F09G_+QtSa3C$X!fT_kupW=ohfupe=yf=h8dIg>E8xK%P^q?DBjpx1%7aesa%&A!KFVdyzt z7o(`?r5tvJ12YB~g^VelSHiOw5ptZYpoa!lVXek7ULMB-(8BmV!uyoK#^AV76oR46 z6x@H4qh0yL|9|T}8NZ}Yi51U>$Kj`8of^ocybit7mfI(Eo!Ey%pXA`Rp>=~Ikj z%;9kyCMHQXStaRP^dsjy5HEq3JdR&JY&mkgibC|KuhClOq2I`y5=pBy5#uaKpU4GiukeBX+<&5fdDQsecQ8*7?=#R3ijU*{ zcpL@zlJq^^;&?b7SJB?scaq~r99O@`863~P$A_U8yz%%0w*RmG$GVb8&oIVkCf;w+ zj~bb`0ulnSb&~QkFTw2LPrXDNxEJ2{qm99Rn?CG|2{inLwCSePFFeKz?6`nn@m4!% zun+jn41ZnFiNg!4xGrNnVO(K+NwAanx8>+@-b3;M{UGO05cgxcgS}A~fMDyfL4CAZ zi@IM(wI8?~&&O#3o=?#5e5kF)^J#cKtk~rFDjXNT$3q-<$KyKMm-olxR~%Qq$4UF9 z<8cG+iyZ_xe#deBdz@)CalmtNc+Fy+M4dB$tbgO$6!RFO19*f~V|$F*FO(hP06ac3YL<{;$+TYmm zjX??gu9#1p6t|*u$C%&&qfaauYiCW)1y8@pIk4gXgXg<=?)dxM;yBiS&X=(^{QKu1 zHh=FwJ?Hm07muuvBvc}T3&8}@p$8lj#u0GAD1rtGPFzYO<^ z^$=tAKdlqEA4C9gQ!|S}-z-KO$^#TA0FR0JP3A6al8&_=>u5^sbwYe8vX0`|04zF> zeK6_QC0S482SztpLj|%XM&IuoP*^Tu4S%Ms$1Z75gT`eb>Lb4gPU(OCPMDchT6%*` zdcfBhH<>tY2vDnnAPBJGQMQ`k}Ro*&n=mP3`*zqGxCIZb?y1ivLQ3V-H+ zP}vHZ1LR)5_aT$SY2R!o;MoTJ zOWFk-X_y25a6iNrIRfS?5Ee50B#^n`df`PApb;?kr^S+SV#F`@N@R#-Q+kaQMN7zaRV->z8p7Za=a6K9Cv1a|Ao1ksN+9&6r z?so+8DR!xmN@Oy&;{SS{o9JWM9s{;vw6o6ekN1tZ_3NkaGqfeLJZR&8(}sVIK_2@G z*!T+P6_L{NM-jwThY-r3n%M8mhtwfQ0sF`E|9YNw2<3k{kL)4-%j=PK0)M=)XdB!! zZ6{&=bm1>%PkA-}yq?=D+(qWUOIb z>xhl_e)|49|B|16Kcb&H_@Y1K1pcBsV0^)*pbwb8em}ul(GY*+Z`b($Hiuz%{_D9y z=Ene6_Cow{PfSqI*K(1V#DAj?i8(AC#`iMuAOHRQ!Ly)EcjNkk@rpT%45@_q0w2o2 zJ#Peg_t*2mPj|VcF)Vu+v5>TvPkcDEIra}^EMhN#vG*yy$H~19OQ4s?cOgf`aS^1A zzkVz6vyo?$aNI5$9B=+R$B7?<`#`kS9?1I+eOwz! zA78^SBXI*BeLL%7VHooh7%R^fTgkQ~FjtG52mcJ{aQLs|c>_5Q9W$PX=OVb&a6Auw z8qR}~2sv*HeH!4Jet*t`wjbeDCO#a1N2I;JuhCFgUcay5`v3#{&p&?WbPwXT-}7hV_5JoQ+o6tQdyVgaJ$lxi#&J#Ke2JV- zd;y$~>!a4TRq@QO^w+uM{)0c~vg5t{ITz3E0t@rEIddj{yvX2u;!8s2gj^e$8<+GG z$f3sbSG+*Z&3}kbAN-yx2;;T?oQpnnr9bE9<2|!~YezvIuRZ* z1u}p09iJSDRarS=9vCyp0=~qbHLt_HsqRAqOo)TONq=5j77~(%@jwKGHa^Hb@j`LO z{s3yF8Dg#^7cr3cHQtku+4z1j#_S}&2PR2_UF}>gPhNzx5PQ&;^a8Y&L{)5%5>&d}1#%eh(WoJku_YF5sF)fTBQg%z1*?l5b=0#d)SB?~qFH zknsIae}5qfB94CsMJ;Ke>C~7S=9EeB8VGW}smYUjAldh3Z`6SLfSQB=GQaONlrjPY@aMh7`I!i4`|o>zATsbx|9OvWJ+M80y+^$KACA+) zJ_8Kz&wBkgwpXlI3JMvQQ#1&2N@*n@w-R? zVSo67*&yQ;4k^q-hWo(zxW9oM$NguV2p(GU`?$Xi@&*4R*Ee14LUA1JG9^iPoIm`( z{+^tW_5{101^xv7{e5mQlM9%JB;SGi!uJuhPssVW4#1ZkNorKz=lQnljI6t^db<^*FTS_r9(_{SlDx1VIG{nb_pOgDR zzoBv;AUBS`lm0-80OJKJp&70#GH`_de;7Ca=Dy$oq8%mDj`-d`pO5?g87B^SyvcJ4 z8NP>hM}MGL7xRIPGqlgQ|Hgi-#&*lT{RrSJ?hEY#0XcGA+!s7dl-Y1fQq@Qr#NU6TbbEDrZa-ZXT!}+iW zzzv{1h~2|^xPOdKT%U~d@4UeC|20nVdk&fRXvaVKFNy)r>c1Bt{r(ZtJ%8qtMwM6M zfRVa84Aqtlbu6L`q0oe9jTmG`y1*qv112UVU)_ye*pV(Hc|MZQ(n!%A+KuF4(I}WV z$ctiUhUaz3{A9`ehF^iaF2TOcZ5SI8%Vj|4<9Taphn6Ykl869lz@s6A3p zPl)Z5q=OqoJ;n=2l!M8KJ5CRmS_WzeGcHqwBb$==9;lCOC;I#ce ztbK9)U&oKz=TM*i+m(F3{(b_g$t-{Vw-rtQuk9oKZ~v$=L!@VBzdAEVdaYm_eDj8i ziLDf=Rfj=4z==>`0sn2j^>3Ni3%shb0~O3A6?yR7H>PsZ)MemGGp=yOLhejtda!fl z=Hy=LnW4FDCAvoWbgCOnp5@Kyg3&iqG^Vq&$i9Znkdl| zXT z(8YS(A*R40yO8MZd+qwmvcJ5j*bmoA0UB3XIx1V~@Tzx#w!YNHb2oz|i0a&vWFDp~ zCvL5U^J27eJM&RTPd(QwYDO(_W!8sCeDTcHb?m!~{+^|4$!oY}1Trv`(@O{!ye5`4 zSmA>t!TYk2u3E{E!CUfV* zFPY;I#hvr^r5S~+BqKtVhuLWQNp8Z!D*Mz=eekNz(Sr=+n>Wif{db4zRlcQOhu4`= zcEXD;{HSCa;q!z)y;+-dbY2`MjCWm7vrGL9j^?ahqkr(Aa-4)gDR=1e_xgI6PB*8? zsh-JfPbEjC(@aI^{R?UwZ5^2~d;K=I6aV6d{^ISg+ab#DtSeM%C__$`x=qiEXu1v5 znmwCLrf1HUKi?-W5f$GRl+-VgKmT+q7gVujo_K3ztx31&eU{&0yxT38%l54fE_D*u z&n(?ikbkX{-`5h+b$_3)ckM0wR5w;a0nRksEcNqK=)4&NIeO*rv@bT5tnp>PYKiWSd%}DvR|B|i8T9_q}Sfr}cE@hY~pVRZnddd#w z`*tW_=w2xHfZ;^apG-?|5U=SHsXt}GO2zC9$$zQq362Q0PiHhcOAL@53r0SBRN0Yk zuln46dsnl(av!g|+|a32Zh{~Gm=&x0&{avaRbu%bSWyS5PwFlK`Re)E1*fyWI1#u3Uq2$eQ*|@Wf1)&gyVg|eyZ(iS%L<6omr2QRyfCO+M>YoW%Lgj72#>nl;QSpLIpK< zdQGP@;2>yfef%ipe0vzurkfU;>!+=1E`RO&d3IIX=W6AjBXusXwhtwVc}=8|YYBN( zqCPqh9nLCOIQvk$p9zg}tia#OONSE6sd}V(eckW!7-Rs>tX$|*Lq7!7F65ON8*+y5 zo@1#}P7CmG2a?iFt7GaKljnB6mNuz}h+D!;)?)c+FQVg9)a&&5%xC)AQx5H+zJDEt zSe>MfL8xrNFK2xs$lKPvkJQ#)G){r=`P6J(?{D?ySf4lb)u}HaL0c~zbuiDyntfgu zF$LOzma6PE;2)6eigZ_2UHSG#_y!T5JTmgIaqm?gE%UI>K_(Vm-21huZr**C1H>3w zMwHy_qL>-)Q2-G4{DZ-V>t7#)_6Q*@p+>wC84OURsx?{2;j!^*JF z!{z|Z8QonZ7Gu0tg}5qSkQ`+B1EV^FU{TBII@hcrmiAE+7~V%>FB9hMbyf;F~Yaf)-lS{Arkj_Uw>y*_u5bP zYwCD8ELZv^c&wK6{x~-d<@=;x=T$dZFP>06oXwt#Kz{6s>+P_GaPTvVuB-QEI<28H zZg3YAXNsqjGBp17ZQJ&ugHf66(mx3gpQ1`kKb)hP4mV z!%8c%YuU@F!kJH5~cNw@G&C+q!TsFn3n7ds3G-CUgCoUgyhpdvs7*zhT~=4KqOmavrwJ*HdZd zwf@OHtz2J{aH?rSF5yUPWkb$m8GlXo!A?a zGEqOHS6iVd=rXr+GaL;1*O8iKf4Qh53F>>%gmxK7D3B$FZQ3s ztuRBn@cMhFrFjTAh;=C3g1Jv_L)`F*eOjBxfv%AfzMPKidHc?d`J;W;vRHI0El8Mj z)vm#|W*qVpD}T+_kL$Hzgv+C4FOl6Y4YABDda-&<%V@T{WWsc=Zu4HdGi!xgr*)eq z^NUx|_pNlk+%}i+K6$p=lfzDBu}fyt+0g8}`w6<@>TyBECuuqF=b!5nt6*ZDUv@U+ z^7fWLDmXu9VLjJ3;zTJj?e4_R9n##NEtKPrJKMRPu77m^k&mm!;>HzmKZy0?4rK@= z820`-+aM;9B7d9t5e zC1kK)Y=0&gkqKHC07yuhypr+I) zxkw~O1sc@SB|`(}z27O@lCp}X;iL0r_7R2P$f=v?aZJGPz%1q^OPScJSnzbKNBTNW z+IzheyDJdbSL1z14v*y6E0_6QsdMSIJml<>J%3D-*I|bJ&Xji!FAd~m_QM=FOSN8x zR*00lt9I{&IBjo|;C$G3jPht@yOZ0EMm5^wa5q?5-kz4ICXt&J2!Oj-Kia;n7f{PS z+HZDEt)7rEPSW#{q4s;RY<08lQ+hpJX}Wl3005>|-0NP8{dyI`Y||yTxU<}_uHMXH zAAjl59tEM5S}xblq*}k+eid62=;^*cll|lNjt5nOC}30?_FIVF_q=>B(_X!)be9b4 z@Um@Y4(cLMZ1uLM;xS-M1F_W0{y2q;D=zn&(2K7H>hi3xmA2Tt<>z&beb-Y&G1PlF z6tBb4UoUHK`bq(zg|-It1nRenskCZ7s(?O;+-43e<^!3yh{oX^A)%03=j{jp9zt9kHf+WlUl;UuK`G+K*{=u+e^1_ngzF!Bh_j(%a@2-k zRy0lkWL0Z+1*)11z-r-oyNatP`=F8pRiRSQJO@?99)4%lkU;zx#i32D`C5JkluXD}il3>x%jWq5jDI!0@HDL5Jq>6FTwIYZCXc|ouM`NnpJZjZ%^nsrDWoJv zIr^pqR@!_=g8`!HZajtLk&OLh>;_ z-jZ(iETFQLC*=SF`P;{4+XU|4X?ltvt`?`CHe4}}?P2$RBBC3*&K#`X5SZwDP=(FIuxdpYiv#)K@&gEG0N|(+hb>o zT|H@5y9H8w*vc?LyMI_La%)G+3$3AYY}_X8zGJ&Ge@;{Cu)pjh)KuxKx49fZg~^;J z{(BAGS4VwM5^Zuyw(C!II`5(J<8pJTi+b!vVY#(qSKAHj|Ec8apDJs zgB1S#9!%a_Zeha#YTW#)6>@_dVR+u`A__LE+S#fe*ymT2PUHHpn$@A2((i4Mf9&XT zT0OK4JItf5T|hu`r=_}04T9$c!u3(GX!a)r3IF&>{gXG_NnA{`>+y9d-$N<9&GyBX zsgk2m9hlXQYkcsbymBxXEH?h@xmNGHcmh8CIkaHz)?-pn&iqg?ik!*E%k!ATTh&SF z`lfX#z28*Anai%{H&^;P6uRdXlrQ;L!L{2$gMZ>()-ll;SY%(aoEqci)0#$7zJx*wT67^Eov=7PGf)5r4gW#NK-}b8>YkRF46Y2% zw||q9G|N{r`nkDpW8MxRf2)6>vY2zW$XY1IM@e7kr}xTSiM#)suk%=PUE8wjuPD^2 z3RDEVlcE|V2*MKH_h1RadjkA=tvqU}qZ<{s-n%9bU!Ekm_gZs~A&Y7TE_P`soF}hL z^!xF`Gk3EGiVh$|Vh14v9s(dXJH-LJ(SJju0*;nwu-sh86FzH{Y_1|qNds?SPf6;& zgzzJua$JHzWFBmQ@rxMnZMqzPUQE3yh#bTJE7R8!j+%-@3dLI$$XFj%ebUA~i|g8jx-p3O!DGo-``7cP z{sZGf@3nvg<@l*S-ruGASw^uIdOsug-Y)-sfRusm4^=p5f^{Cr&M8_*|}T1_05B&?~Q(S3w^0TUVq^{DlfGP!QV*q7E>s|tW?L&#I&sWDAAg@rIRWJ z@{84Sfa(hFXW5B#eF|^=AY6;eJ{QSHzx#hLd%UAHNZYO!$ zuo$b33lp%esnkS+XX?yC+x!RbXLXM`R*!HoCzi~NoI`v-?U5B)`tEuVqkj{@Lm&3J zafdeq(G&>{2V&%55~uCKT7d@no;nv|(zIG?z8=wF9f|PSJY&U9)H2xL^Ha}bCl9a; z-?HL-%sqDJkR=vKCI9%fEgov4FYobXfl5VWZovQnEsK0JXatuJB}4P^7a71t;6IrU zD0=#Ac~)P7SdT|*UEUr4NPiNzhQ+TiaDd8{dOYuw9RqEI-yzVYwFcmAy!8Ub-ja=k z-lbh1*Uif`+V9b1g=E;PRqg5{iVYPH?3;KGPP8vV=8^%>@fU6Ttjqr+Q1 zR|zVq-C9ra2M(RF#I&gP={h$RajxPW65<~8{NbwBKy-SaLgsdCa|*{N&B3czP`wRq z(gBo2%X^D)oUtudn}3p6R0_8hdA*-92?Cp67g}wpYFK5O`B`P)&O@df|5H8x$tmAK zVkG#CN4dlepH&ci6BR`4-Op(vd~h+7a=fCheobjwAlT|n4P3aLz9#lRTa{MR0LqpY z?Ya9+_r9=tIwOUl{BhPo@qL`j;Nt9F&$WEOwf$&8j6oQs(|_n{S2cOo`fTGO_M)Zs zmm$Pn2TSs0q~BE!BJKMerYP_%{Y!8eKyvx>h33f=k*#p~{3JwceBAoN%iujNR8Jl1 zp^u)1!VO^2PlS}Nf#%5GPUL&o=vsOU)kTxN8)JWhfu}WiPX`S6dNg;+;eY)noe3<0 zNsInYXo0h}RDadr8GSu_->}>)&jFS8P9>?JCLz9(A5z@QA||IV0PWw8Gz$b4KXuO3 zY@Fu6s_Ji$nmcfahd#!*Bdbb7Bs;{9+Xxt=wevqbwdV9?1UhBL`-&GaaS;ra2n#2B zNWp!N{$&lot5i6^T>KsdBIJ%JO=A9Wk5BQ_hO>VhX@5mL(J!ca+qivE^w_YUxm1n1 zp!xtyvtT!^#b%R zq{#2@4V}Dwjf*M#u+s^z8CaxGS-<~%jDENBt@HWG2W@;m*z~o%6tTt0HKhD{x}(4C zN%^rKGiJWmm!=sWo6mK6Mn8drOw=%0BIDO=z<+K4tY(fyc+ zk^84`vBEjfs(eAAB}&JKz&b~W*f82`%>-ud&f?J ztOfc@d-T&mpIxXj=`%wLXB2%b!~&!Vjl;qQOF_OwdhQuNisu9f=ic4{ZxwzspcMV; z9e=hLqq3cE`w8>y?5r36AReB1RQBz9;1GEHD2jr!PaQs8AD^G}*%>^}(h6W4xS}eQ z^SIzDxKMCJbF|m|tEGbj0_me9bEZ*NGey7nv-)yRBPqf%#X0p}p2+|gVu7pMBhG#n zr^e>d}P8iKRMl$ zpf?y1hjXW+VA=mAU)`QB8gc>OFu!sWDEKg7egV4?=DZ#CK?J%NVL<%`c6B`F7FlRxvN~<~}t2q<_CE{|$)4S>~uJRaNvy4E+*p6zU!+ZA_-K2Q(F} zeR!wVgO!%|J2_+r=N?$T3ImhktizJiE8R8!4Dr>xH}p$2doPQlvQdg0OM}I86(Aox>MBPyR){GrFpxCt$&nrBSae{ z1h(9a@B5{X1S$f{VPx7OY2jGnWeMiOcLT~p9zLOZ@+2B?;oA5HXJ$N@VNQPU`R7e| zu1rswG1YU;6s6pklXF-YDE6+=ZF@U=_ITN{a%d|*SOrP`%3Hp*f>{*m{AtuEG>5oU z;~NeWj_(V2tb5)2=%EHp^naHBrm`lVo)oYHJnW#J_>izM@dNuKh`^R5MsrFhUP;;x zE>JJxBCY6rEr#7xjZY{gaIN=Jh>GDI+siKfHP_~6po`MMP%&QM0QmNep#UFB?(}Uq z0t)JNO1qHF+wTkC=Jtc9=k()En70TmQnIF@bMaxq1O}zBm%cTW<$wML?YXG9p-RK-vMc}*CNh8qlNvmsCEoRWXx;!x?M`L zy7+msC>G98_w-o6`G4UHPcrL8@z(Ng^aKHlKNWI_hr!1_v z@$<@pmh7$S(I!A}Km(|RzuW$Y5(Bc0k+%)#UTusvcsoJ}Wg)~|YCqxC9jVNqU$a`B zZPNGby%R>n!h4fsU09!6^39k@V(s& zL82e&b=^w^b9{HXXdBz{owxG(xj17~+{NM<;gb@&hJSg8Du5w8Zs6e=qXqWjl_39HIVa{Yv+&k9d}9-f^q@t#vs7@|}B zyDf4N?UC+kXK?03UOuB)4q?mv$f_Og!TqMf7t9FG2gVH3F2`_-B zelw%{$Mwn;AKu$tgM){V9yiZWvgZTL)E%h=PO|DG0$`tKgE9*6ITgPxgrA4pE0pg% z>9MNG2_?s7aeyeH&BE+5m8Sj#HMVTAt@CNB=YQe=a>#v$@o8=BQm{z$^=^92M#2tQ za^Pk2phu+&MM-*4#hRHzz)+g|(@*p=X zjDHfVFtl^$LBJp74HY}M^xKDkh)@uGBorJ20lL3ujIJuoSyl8X#WWwSi3~gBAGzQ; zn-DE-a!AjX8=DLkvG>2vD78r*b2ok|p8Jiw6v$TH%O5vpW%!u>&{$+~UPCe%uW}@S zwD}tvbQEP5Pf7%(zrg6bzU1|cQ+&X8JAavFT4V|=fvLmfy^l9g!0xs%CIL3s*^=Ju zb4f)75Pic8NitJq0w;6eZD_1=2WNvj7rY*!rNS8wWQKrI2F~J0`jDz;kD?s?Q(^9E ze&>AZ=`&LZ!5w9Tn;;y#ZNDcNJy7Mnkl`;u`Iv25VG(YmTz1mp+eVRk{fsN#@_)fQ z{i{y3sgr%9HedhysDg*Bn?Ean%v46?gA#hR{)}yn2`Km(=)T2cdzFp!LS6AHg$JvF zu%oPHXZ~F&P_HCcB;Op26EC&^n60VL60l=m1GUDD>$KBaYVaL_F7%nhxN01(7n7r24Lf1s4?Y8 z;+I)j6d;i{xyy)VDceISip6a(rsUpekMw1#}E1$N=c3I?^1aAdl&C90!a%XOnl z2uK59n+;}m+dHx=?nSyP0fptb_qPNZ@0<>pc|%XWH9T`<&%NM~(`@@@Nh|3>RRt zfI;SKUN;qH`+BH+T)Bzay~{rC7eV$AO}4A)*d>zY~QVPjQtG<_QeNcLw9O$lx*w( zt95DF^;A<0FH{PajL5nl@S`vp>nA#ADdnCY-7Nfv(cML+^J^nQYJV2oc&@ge4aoKx z_tVlWMnqI5Qu{rA+4S0Yq)%p}hf!$8 zr!;d;g3kq<_6MwUzZRS-q4>(uG|1Q-_6ZYq@;Yu>38H=afa)|5kA$Z8n&t(^a z9CnV(pY#ex$U}G^O&LQGX)NR+!Mo&LO73o2SXgp5Z~1ZUOG3smpcBoq^C#96*-`zH z5N?sjlV|Sx1`Y*f0&i3RgOv2fjQB&X=Xd?L@@O(ng3WX)uYU*}Ju^v#`zAg{+z*yf z@-)Oz^vFt$W*Pd}lEZj>%%Mt6*yAc}@m`eSTO&1AeCT6=q!#kM%%s4-9IUm~_kv@; z5ZU>7BNcLmH6BHrkk;g1@0SG_gMmMR1p*n&Sk%H*id4IbMq`-1$LNWyPnsItT$`|u8 z=3AHU&&Sas-@sht zH+oBJlQqulVQmkx2r6Wfx(Wh9obh|YT~>$I{6S$J0W0D!^?~^1Q;ga=<*QBsu}3c0 zDDbXN+DIXR@{tW8Nv`|9{{6c#>-!jb*MkSgIe+j=lNJVt{R8fF@(<8(zc*VjHqhU& z7&v?OijZYBglEGMtf4j#?Q`tL{ZPaCXU2S>+RoKn*}VPy6(sfB2Q{{r-&8H060|)b zlcOjtTxK3O{JCo`~G2)Po2= zDE2WTa-qaki}+B3e^0>iCv6{4Qkglc(PI$K`Z7Qo2>);d68?2o}Y$ z%k8Ym-Blye3bt;g&zU$eb5KQRz+3V1q? zVj@CN=KE31mzHk0wC6U#Q($>pToUVg-Uf1Z2rFqq?gi}m{_gD94+OlxYnvVzYOI@AcumL$ z!x>bZZ{fg$ZeY(R6|x>TVf42N(&GFzf2GTAyN?jUxU4#F@OkW_^Z+UUDsJcOX7s%Q z#5uUGol&xUp7<_En9PcHO1TvCG7a2}qZf;whGjyO&80ooZI+X+5`VXYCn zrGu_t`-u5maPB=Uz0+nyra!0vaqys5`)S>4yYLh%kA$sG3Vg#X#M`^8=6@A%!4h^; zD(lE&;)rV3xpk=ua)G%2>4!p2_m3!Ja*C=qbWp@DW>^xVa*IwQ?^G3@p&#`lx0Pjq zV;b%&F65d4{U?OK9_|q<4tovEB*8@WOA~rAC}n*<&u`sIR6^`6l86x3o`0gmYDhJ! zq(@Xs7Kd&vNgK)}Rh?C@gMXL;6v)5Ir~Pq5hI~+$Xdt<*SB$k@-e(2zyv%2hZ=Ve@ zGKJ_XqNwz_IsWm@+F^cf%I=XyGLt9@tE2$EzChGm$gl3T&JQONe)UjnCn56pfWvH_9&qE`NQzdN{;?Dwy{` zex!5YAj%~;?-!;5EnV2OykJ$Y)0^%p(jrGYU#vMxy&k0>#wq`zHy+d?r^FB2R_-%c zCL=IoNbtD8gVfIYtY6A?YM(A7`zkox4Zn$`xdZNOQW^6skLH)9Hayq9V;DyL#^7yw z!yA#Itx$20ojB=guz&Dp57PORyg|Zq>1>HUmxlJ@6Z}E>145G$1Qc51H`rVKUFP~{ zCE^5`EL2#N<7(;w#<<9~Awps|CT@%l%c&Q(N<%F!>Rc#H*n$JVLqip?%*$$HecU^@iLdR2Bd_3{yD)#&c5(*^94*p7T13vA#k{cc2C$b9F1f>wLEy=0Nq>~&DXs&6)t16 zRCei#OsprX$Q0?%<%EwS9YWGc!%WMGCAUuI@I-k@fSuQq`Iubo#szS2s<<_RzMyk= zK(1Gb32L&j#ngW~*S(BmR+?WP@OjWR@FDUH7O2P^roYUPlY+r89`1;emPGsd_3zLA z5ecs=+;UjzOs-PPdqrTU;N&Koc8c~g+NM{ikK z(ntiKIc`0Lq7YQ+{b7uyvcwJ0?OUA_4z71D^+kOl3VWUTuX?x*#t$8qD=I6%tt)_> z^h&QQ#Ik>IA4(JV-#mX8MRc$hdE@zQIOg$F++mhf;`LNllI78_FT{u1=MIuRlB&t~ z=#K^kf-uHL{OPuZSdGTaqxkYuH2!M$Tm_@Ohbn8t>h9)E7jk@3+`zZ(RD}@~)=o02?q($qw6w2aTVj zxZq{|R(;_zXhc^pH}R(T2)zb;`A0T=RD_B(IEz)%fZUmloxuGhXBs2x&&~C}wL^N% zDoTI4o0?|`^dv3$%!rTPwHjiJdo{N672(wc6pzp3n;-6Q+Z*d84-T-?5lWY-G79$* z^eNV!;?rOv(Y>_O$j;aLD}~*t$}%4@{zHD7$K>lgTHe*k^S7fD!d1 zdzrj?aI#MrH4=;ySzw?4U`qXJ(*zgM=eYIuk>FEfj7dKFV$P1X=Mnm#Cf#9t zY8$Lbv!%Q-o#~+nVyE7d>#5DD#PN_hD6iruI>vjl7_()%7}c+9mBbU1zut3h=;MD@ zhNbC;Y(aG4%OpoFWZ|7lX9rTaRJq%`C$QO>-pl}nwZ{%=n|BL6Jmenxc?~0o7FoE| zGYC$FkM<#8!OHoGh(qHj?*%coVPI$^+3VI3sFX+wB~~ct+ks!P`!E52XLSs-haY@D znBF=K_?d^l7NWe`90@bRPd~}ab=H59%8R~)&r=RB&6lDd2a~Stc^_i#Si2dYvWn00 zakU>WCK}r9mHkSowb)|*Yu{(p*Onm#>>2}oS2W?Jxh$og4UtI$@eiQQ=VJwE6 zz16WS`x+}Xsvk^3pxlxVZ13LYvrd2maXN7e(Y3yNEM5XtCO`$bJDhAUt<`^RKIu^( z_NS+7sv{pF( z-G;FFn3D)9pfoN`n|rZhiU@yAh02u2ihCNpP$!M~!8;LHdh{7SBVp3s5i~P7B^4ky z@Or=FG9vES*WfGIqikp=7I0zyz1%@QSvXhT+N1<^@)Thde6%SIWh>xftxz`-AV)E0 zK5UfsUf9oR<~5Z(BjyE-+BK|vFEJtA5MR^ue3{vDU|R*z^U409J^+8%?Hadyn0Icj z%#%+z4yC})q8DapsA!PjnuozG9XLzIjGgMZvG$c=w>^SX!N&x|dPQD27+VSC^ykh`O|y|J4h!! zH$t{y^LGRl>2bH%LzX>$(}8}kQIG0cDmCu&pWoIGo-@96Gzps8)r8Si`yh>R{2n~a zXx&zWY;_|*Rhs?lJ*qK9pzIf#D;A_dJvBMsp3 z0U9iVuKMmQzz^vi!VJj_nj?^ZOQ>~4QEjx-VLoo~tl)p@&Xvoxu6ahKa%MFlg01ss zQG~1ougJB-m3`$GtiaK zqSpjB>KoZgQzl%m5-CvesqVKfkNPgLH4HCauVyB6k-#3H=lnf#pN>}0F>a)LhacA^ zEF%a8umyi)N?MT2k8MkOy8RXw-BUaEd-!`o7eF&NuD*-Q2lGWWIrr4?~qqq zH^}UL9;TyqWY5O8rAP4F>$D_o-fTFA<|9sMclv++8m#mM(Zo1oka{1em_sw<@e}&! zQNsBCiJRe0x48;S(jDo9KHL~ThfHY32Vu^?^RD4F!u&YX=OxBLw-ar)9@RiU#-u~} zJ!a140S~zxgxRYVokK6dnm0ewMk?|By(IWd{c4yhzu%5ARKV&Rgg;y+O>J*jV+%LL zI&6P6V(#3?E{O7zaI`e|9$$G0WnZvCg^`>GHO0r13q ziw_S^=aWPNIEpDg@9W$JnZB%7WoSkUx1ZV6;RbG+WJw#*wnCMFEwU|B+VV3)z^@`x8W3MkdN7w4jaGnTN$?_gk7_NyzSn;T zKB{>`-8RSt&M(fI_InoG-bQw;DO8qihp4AH>eQ!4lAJlNKW9c1EZB1fp2jZUJKKt2 zg~D-;N!L$M$UA_FS9VG&4bWj%pC0;=@gRB~^@KITe*ZEvQFKhT&I}J61f3FghGenH=`!0W`u=fW! zuR=EJTrOZ>geQNxXNdoR>DcdK7hLrH!B!&q@&yfzTWi7Zk7ZwEUvDBEKK|rG^R$^@ zK|P-fy!k;*9|SH_Z>cQpL16QMlr7+eV@YT&AL_)6x*d6OJ~$ zYj|&qh11D)^6f555AF;roVkC!Ned}4LAKGU>W?v^;KY68C$he`-vCd%9f4wu9}Wt$ zPZlFK(2~2N+N+szN`$Q?nEVGhOsVMzm=q2pwswXG8V*pJ45HsO(uDq~6~5V+BYJw< z8?U%$p@kZhN zqFoX5vtT&y%N?l_%Pzm*#NlcN9ND8uI-$x{N^0M@w+036^1?!uYq?_GX?2rxY*L)n_ zN3^%}g-MM0!i;q1&>dunkn%}v9?A>WDCjlFd~3S(lg}|e6Ht+eQiOsIK|S_1Qz?gh zWkG!Z648SupcH=SbJFh0pp7s~ENd3bz zJa@|mBhC*SGN6B3f?W6j<mYr9d^$0ymOIM2Hdb^k4Z~tuDckp zMY=3$J&I*moq(c$<--!tv7Y3XUU1ZjZDM&Rr1or`rvT4!i7Kp|3Ut>*RY!rI&p<<* zT;qmI6dmS+!#Js0J%7!G!%QP|V51l^RIoo(qjCCy(Di>5!RHBaf0|?{cz#lKcVr}* z5kXyih96hen#W%qWoyxhA(jX54#Iy427^=i<mw1BNj&V@(&%Zz6iQ(XJW74)RtP9k*gD7uK=5R0+U{` zl82DVy-#_N=Z26%>cXn}>wNEy|Fl^JQ9noAi4dz*)v44Z>D3D34@0W}i?He8VlS;d z#@c`9G7-WuM-|~;i3;IJ)IHkUn|&)EbK1`Z<(|+=i`Fn-lFey)TXDSRWC{f^W~$(8 z4W`1m*43%gmGk!Sn3Yi7ZNj?ws%lGds}T#YH$yCWzGx9RZakQCB9;q-Mb3si`0Lpil%x9?hL6bF9X2)f(!{9oW$vsk#B+M zD0t`|NvW~izF%$M6bJP5UW&tqPkt?M-G@&NM;}Uv2Luj+OCD-@lk8o{p^ln*y%!N7 zVac(($i5sB2RR$!Xw?PJ)P}aoQSxO`%bo03WMmTe)6ZG?gOSlb!;InQ6&*~A)uhA;@3H}G`uDqBeXiheP+V5Ffu4dz2R zAu4!26dhpF&WT@_RAA}**?d=DT_}HOA^5kshoALSGT59W%P&t|aF>kh-0=Ff{ko0NO#j?kN7TIuUjW8Y*C#~kON z=w(`mR-FbaIIVp+3K|1?=z)KTuV@rB8p0bTEP(;&@D?iL7lB;0DV$GdqUlGo*24j- zh-X!3^kSQ40X7w&WHfHN!H{?V9@P9hw|3$`-*EUne3DEBa0P9uVA9V=8|0KqX88(v z*OLQs#k+ix`L@UibEhQo4CV|Z9?Gv|Jun{Q1%Z)v|whPI-d1$v%1p zF5R9xL5<_QFm?wHu46t~Lbc+6vWGsI<{$CHw};NYE0yleLEl2FJX5TA!T!@{%FUN! z=e--yaW)D}&sK(Hx{~7_Uvyu0gx??-^1M#w=ax&vIu&c@vph<>7}qLqgx|Q>^;StRlERJPd^Euwwl;||fI6jOLOA8$TVQ`$)FZ`0p?RF;>AX)1 zw!(A;M~vBs1x117HG7X@??$*3)Mmc-)3ZKFu90zeLI>Aztl?;&(CwNJ;O>KQVbE9g zrllvr$?MJ33TSr%)?2Mv#hEcEknf~R#B*6$vwqx3yJ2$*n5Df5O*_YEe(z~Hq# z2g|JA9>qj0=h}aG>X9lze;FMOfIr zKZIe*%b!kWO?_e6pO+?48i+yM8g9RyGJh1BNE4;VrUjwQ0ny|&t%YCloF!P0?0N;^ z#K#1Q7Ueu`}V2jZE;vJ(Q}UYjyKXpj5M$>G&^U79in-&yymoUVfFSd@GV zU<$nzLbFt2Z+ z^wc0YTaC%QDpj}Oj{%b-#-h>&i^F#ba29+btqH+;*xtfkLwP;k`Q6AtRKC2vQTu+3 zK1YCZmdy3v*xcPE^Bq{{z@hqWNpMzsQ`!?uUweNuP(2W=KQ2T}1fzM=paO zEKq-oU%q-c=0ZCM?o9K?7w$y}Z|KhxBB$Kzhcmh6K#~VIJ04J6?$PShB+n8l{IGpR z3cR#1N$nn??D(dd-ShAK-~jcf1of3*gR&bp)cj-^L(MN0MwlcUKNy;}s@hBxCKF+i zN@^AYz#=5l9BLFX1X}}|dYw@vo!op|CojfV;MW8L)}TvSh6zu*@9^` z$XV?Wnk*J1QoaPdtJjW`^kC4dq*}21B_dx4FyRjO^M!X^8CcjOwvlTT#6;CcRpb2bD96&edKhkZ8w=L;<$7^Zlf^htmA z!O=Hn@*6aF_l!cnKeka?REO;z3HE9^5o|dUtRPw-J`1|^T#QQ)yP01rpPa;frLmX2 zB@qDA0yJu$_Zmub)lay|4@G-tqSJ>PIHVYDN1VF;sEZnm&O;p?L0W-WK=s` z)C?$okDNm~gZI)Z>QY#O!R$AzF{gk$Qkhu{QG%#!M1IkM>{qdXo&u&=s}6nXZnJ@EzKPm3Jt6!_4VCp1SjS z_Dh%_rgN^Kl6n9FC3Fx+YfzOQOE)F2!!SQM`6x)b2n?Wf{GILeV2@Aj!W!X{K|YFn z%OGo>j#)+yQ&O)@{mgA6C*FVBC8fMBE^zvo@8u%z=}Nj;hE}BOYQP>`r$q(Hto2b^ zIYf9Rd03kC42$MIksk zkW$(C?l)%hAE!RB8)<*fBx^P0zBJ2Re4v^P;r&hj;c6SG%r<{h@yV6>e067$itNsM zpepCC3&W*T5;6BT*IEB@zO0u=@d_KN%{V0&rcP2wHt#8;VIh-fbi zJQ&T5$L`eEh$rP?$pkH)^Yb-8-2L-3;?61`CW?crYXnHX0!_oQedR%=_vTNVr6T%; z=|l(qsav%6wlSTD*{;TscLuFEQJjHzl8BE_QAG6tg4sN^tKaETZ3|Mh#sdt)TO>YD z=7xq!XuJqv%*}s@PZ`8)cE(u=#w7+wa{b|u-pxvA2+=q&&Nq7O<(3FSke~h;{h%2C z)=g+n+^HokBQ%fTH>QWtLDd~jT<_LutI5xGazMo9Z&i9uzoa9 z>jqJ1?swjoYiNt~MG8R0*!}=DyWxx=)kH40;TUWoFm}0c@qxrxH#+hTRut9N!S%ci zi**0ohuVLmd5jPh_%Q7Oy~KijAAS=9Py|74jSlr*pEUx{cS*bSjgpKbT|<$P>&6l4 z;d!#%Bp(@ai+bLJBDngpF^KK6K;rQL9A`PO3J2!tcgh8r@`=?x*`LC)h;xQLj=N#Z z`t5pHZjX>Mnm!uc$7wiL_+jQYl(IfIL+MH;Q0RX;@LCd1=`fY}!FmMVbOEpJh7+=k z8MmL3h0CSEkh>!}n8>ocWKU7EKg-(s$WXk(Y*?X8$;hLOAHtbTUmPa)*8(=alL_}6 z2%Q`G`D>vPP(c0aLXh9X`GLk9sqe`RoSL7IXZA1^3LD?`3G+rpiQ%52@T5me5FV)m z^9g?}X|xAPziQqL)i}LhHO2fBDj!=cfS_%kw@*r z7SLh?Lp2uFzK8e;P%0R5V7;%*jW@JAr~7~2F~kkEP9tgIBrf4_KU;QYZ*{a?wz=w&8L`6raA_xV`fSr+akWl>)}2y4AyJO zUB<`&E|lVmFz&&@Lgxm-wa@rD32M5e8z&+18GQgV7d{8J1@~C~%i2W7`DEOF7EL`Z zyGcPHd%>F5cVSye*7QT|V{YC@nX;qK!1#6O;ogv zUSw%}ejfm>>kzQGzO-3&THxJ=2S|!aVMmz|qgsrH#cBSk)_$dhC%VH4flhx16Y`FA zz(WtXCW&GbEK`N>E1WB80O8Wm8^|RqhP9B4V=SzQ8d$vguTRxVUwEHANcwNC$?E=L z0fa0cBgg*h zJc-v4_e)P5TBi6mx*>1>{yu+XPy;If$h&?knfYI_^?&~Ul{h}E5o>l)XrkHxh+2k{ zw-_?VQGX-_9K1}1EUyOwzau|c!R1nytj#<1YlCZtFHiP) zg?SRX!@Kw-mTp37Xb0x|WO58>G}Qs$ti{*iXk9Qp;9N)Z01E4a6?nKM9SAaPWWipa z&6y_sJ6X3ooWX)Ng3Rrc*jZ5zvDaCyuiHm~ z0v&dIZ-x7S#Nl@Z2duk0c_GUz6eijJ$Il&Nz7ZGObI5R7-SWtZTG)EZQnwPy&8?9n zNU_&1*tEJ;f6^Tj?w8xSo@@9J5{IeA6ZwAHe!B=%QcJrz+iibW*kj;F<{YJc@YMGG zk<(k^X)o~vC;jS+nTMreRb;&9KT;n)^y@{3I35J8skeCoR z;D>JP<7waJZ)pYzz(B?EBk)<{m$Ic;?`~1yDsim?98V*&F~`X|KTiC6sZWSyZ{;{@ zki>whyk5Wq$w+^JC2WFLa!?CQY4;@p=8of9>n!m=+(2ziO!z?lw2F-OU3DGR-D*q^ z-eVKj&v9@W!-<@?x%wF*jJ#c#sR zz=WK+J`@(1k^Ae{|7`VMe35YGkN{xtg}%8UyBzrR%5Q(`0u%G_LR4qb=_4UH9ejN0 z&%Xvu&KKiJ#78y=@0wWEL1)6>@eH-61r#P|@Q8H)&0PIr(}8eDA4UPZ zG-)ZFmy0DOV>2vEPP2r`l9d=vB7f67hFgH3FK4TCC(C<#;sq!JG z3;!Zz3i#-TaD*O&x*_d5*;X^;c`4b3Hxw@&-z$GzD8IJ)I<<(-XId8OmvUDD5=)3E zbxv0rsldBQZ(SfwD}^|-D6`8$^VFY(Wb{4feO$B-*uG~8q$a`yaZK-6-ut@~+@~MX zhvqn$1*tiurL6V~AKC*Ig^6(F{yV?K@d+S(_c|=_Aiv}A0|mdRHD_=(qL|MCJQqUT zYzcq-KsW`$6bQ1x>;pdL{FhNc>52RJEP^!eAAlCFyrY+-rC)7+2i(nJ&|LtZ)@h5HB!Ax zj$x&{T-$z1yD*ldocb`r%{gWtS)Zbv9z4TcU#Lf7Ybg)E>=FqZe11n9Sn&|zFtO?I zZYAyJg8dQUR2O30`(pT(>0u*SP~@t01vfX7%nXn&kLXS~j;Zk%3K$6YecNsQv2lM0 zUjE=KrA_l#yI=ftCWg=)bNdlCaiLkn(xTQ3V0K298Kb_!I@Du^Y#J~<*vHe(o00mG z87tfk6$Z1XPrR9kHs*b$WewLiVA-x%W>-f2DF8ZpGQiY*KIJifh^#TkM?V`CTphQs z6})|^S9ho(mbJ?nB1-_ai2zL01#y3DuGoPRj5~t}pBgwhs?^KhVMhAb+3#BM-dhSx z+ao@Rv*@Y;h6+p(@?vQ_hMk=h9nIk9mF+?Ez}pl(bTCBwzG31Z@0{~#&NOSwaBMfL zdDy^_l0@oAQpbNio;(_#uB{3+(eMvK7xrOz2qiY#9S6L6iQ?4@0_uf?P^5nmL$h19 zBVQID^kaUs*j>zjiujSfxM%-1roaKzn|!yRaAKARn3oe_a8VKiWs*nYl!$IQJ7EbR4)=JJ1FX_x)&?0FY(1^%GRsm3^V^`wXF-{S8R4T$nGm#4>Jx z*~Yai&vLLrWU-7;F}k!gdxULB$0}x2Jkf1)(f)+!=Gk8MOfSw_?9qQJp~nxZyKI8z z0z9s1k+ytt5D(>1a!VtOk=oOVoiv)+eojC6J!mTs*+_kr8O6MShbK4yeB3q+O{1@( zSTRPC@7nTtSFZ8S;hD8gMEG@hOrsqjR0Y@_mSZE2IIUu`AH3PZGgBlp{!T!68&+-a z8#8Kk6^@`tL_0Pm)+T=h)MIyWQ2xO~s!5`sC}sYf_w_b-$msybW~Dw@C+G^t-m=Vu z=`2vCtc`IDhk1_=#v3KXlJdL5Bl8`gniU5H$@Fq*2ntDet1KSGyLH*c7gox35h2J3St1!4cqVAxV@ZFP;UF<$nTqGofgD%( zu;7myJmd|&>&*ELct|k((b&@@fXn()MZw-bIP@->it=E&Vs>6WgFxZYleh; zgBi)j+lQwowBpj&YM0WnUGp6}T@8Dl1+Zg;y&gM9(<)@TS5V8@h-aERaW?yI8+ za3sk>QTF0TLBrbP(1(Nss6wKYwu%Bp zm)+oJzBM#-y^9zpw?a&=1AI)tcrYb!$qWO4(UjGLVxYu!CqB*4cw^Px0i>~SF};{V za8?bCTtDm)QvK6=L;W1`=ipK&E`g&inEtM}4u!vdMx}oOC5HrpC$vuIzaJX!&B;N@ z{hQJ-zMxNQ(~pv*@ZWX3$!7%Ok~{sOc1TOIual%?Tt2ROn)lO-I@~^fAspiIphB}6 z>RNyV3wlz$c-U(*5*1Y*ZP-uMAdL8>zC_>}ocL+;Z)ug9AW!wyB9vO88|-H+#pc53 z+qkQ4U#))#ExiV1VX}CMZ$AUyiPubkQ3kR_6PU3_AGs01nxEi|?e&)yQfv;pvcdXBS3U8=+vVg`5&6;qC6gBzxEn|wi zG0@rp(@UMX)Zs-t0(G?;vPn+X2lO>I<{PaSG&iZobN^l}NGRu@d=KtV7^aaTX$9xi z^w0z(UdZnN)ku7&tS9}B;dkjt6+p0pQn?H@t@{+OZ9yIpr*TkwKh8$^#vuEbclS&r3r_E)^Wef)d*oL=m=p(_LWR4{3P z0BWE8@)}(sywC9x4zVG0_duEVB^>3RLACUG$FgXFx=Y|E+w|SOP&p-Oa<`(_S5Hmx znSt~LiyvTTTfL49#XdTZ6k(=}QIxm*a6Nw_=%Vdy8(dCTiW&?|vhh4<KZF7|mUxB3Ngq%R`95Dd>JK*9B~y~mN)HCd zuJte8KkUG-gPdPPbTYWFCaqa^(Hdwop1|`L8QXty zh+!fa4OE_y4lO?@gK5R~09JJ-BVp#36LQMvoYp6nYJ>R zXQ-7ClrqGx?_xlxm14@e8xKVL#h~wIb%iMCYV?OjoNR#osg=<*w?tIU5vRfOfY^_p zZN<8WqF|i9P5X>9nnEuco}HxUZU(rgX+q$eUnqh|Iqbr7uVM8P7uL6wz467J%GJL& zgQ1;?dY_FD19M7uVWaG`AxM96MgD-QscUX3P%gYxOKdJ5Aa_P~fYZaTdcxe(U|VGT z(0C7i7jPv-6qK)E@)PEsP|Ea-B{=2}$W|+0$If)e{SqylPk}h0b{hZ+-525z!XqL1 zQib))qn1VB=Jb)e{ff*4tC*lZfaj2c!&RNCyF&A=cW5MNZuE^FmP>yO-#<9;R&oB? z{lJD0Q~SDuV&@G6xm)5w3JyZ*#z)yLsd^Rn*9?{6C6ky@P zfq=Smk8F4jG#~eRZ{L5O{TXg4^NYR}ut=9`ZMFjE27E z>K-$83y46DzxJP;m@vC|Oe0g=G+c^W3yRk@)l3V->bH@392S2g z2O|lEg@&eObfg#fey(v6)tB8(-LFl-Cz$X%2&+%NdcZ>4y zU%sIf>MpJ0LM4B^7oa@CyDT`Lv%MPA?e#?AbwfzWXrX5vonPV2=(+~xbL6J13{E^C zwXHq8z8S!7PEGv#6Fv1y{3!3xqnA>Jo*B3>9_Qkrz-`b6eLo?_{QythOL`0nhHdd8 zK^~Uk?hRWqotIYH*Z#p3YHTISS zsV*ixc^f%))P< zpX8yWGw|vnLuT<^WYA#gxckK|AYp=-=%)5xLx4MLM$rg*zv2$St7nKXO%=x|iaT^q zb|{*58~lDw=qx}Vcdd(k@ta0Fr*0<`_&`W9 z?3#bSP2*mdj~lAq-ycyxJmGm6?~{-cz@!6-RZA-MeycT$xifQk36S-#MGS&{1Jt>+ zrGg70en%fc5M8sM7^LD_9J-Rv(wWJ6d@1|eAAG24n_Ug#muHo<-u-mP#LaGF(by^i z^!VKzA0Ju5mge>XEQ!%=)e$Wb2OBdmSdD+f=P28o9W`71fcglyM|T1JamYCFbX-lF z63dzh(41vY;~)JF$`;md>pbPhK5hIv(0Lal9~+BC1KzXhOQ`o$MH=vu1~!tq52L2? zA85LQ{1*BK@CX2T1A4S9;x8x_qCYv#kUPB`<-?2r5bgcCpfds1w1^>{z&NxDoJoJi zjsdXyt2*i%`k1K1_iwzYdkTCUqojE^swW2T+Tepe>G%qMLSUi!v=rbw))&`SVgF=& z!ZG>$BJLjNY89!{*HskyUskC`q2h3(;07;!C9~J0b{AhnIPZW{V8puFKk^R;8wr&j z?_pyLAGA+CTnW6G!9^G#J|>8-$P<4KYC^$1S$_olLx4|FR;SAO^`0~JK-*vcVZTOC zmffM3fNM;2RW+hJ@RwDh!0B^aOu85vL;Jyq%wt|G2IsA?_+zO*?wI!oOyt#@>{ICm)*qk>|kx5I(8xbQon!XKBIqM z{RNVy0PdhO>9Noc6lROr6ZM0aTa}I=(1Hv5hv-!sP6TV#tx{<2%Q-9ool-j5I-s@rscCUBI3i)#Ul!*!7Tq|W&9 z6)qm4a2JaJ4kvH3*-fOcCWx7S0vXnRaw01n>+5dFPchwQ=J8aaZ*^45%rhZg7XWPnrz^zkumplJDYtj61Pb-*Q zAc1>6E)-KoJ@PHJ=l=Ed9`jvR;M#j)Y3yHC|Vd^c8C5by_R!8{IoR6mdlz_HC>5=R-;Ll zY4-_;T^d0495D5=Z?<1{I_wE+!pTJ7`X8}`_6=%-+(2}m*Kt!}Ne938?;&(r&_7m+ z6kW{zTziPHKtRzh?#z4Za|O~jBvjR2iSu%fN{b-x+nIll>UBx413tgGWEDT#udc!>y1Y5MJ>EUC4`HE|0JW~z1V}d?xP7;hjbpQ%BjHuZL zv_jpTn#Jo(ab7xHaC2zwJ=!~CBLF5O^(DBJzVY)Nm5plKd!DTtnTX%MG&?-(Cw}W{ zjT%&_(wKkXdIEPEy?w_HkJvDWVaNSpO#Ano+6ao9(Kd@JZc>*vCRUydxny>(tc zGS4vubm!IGpqaBo1nR@o*tWhucw7H6Wotc+kp0s$s}NVA!8w`Bx6Kg~9nbgNOI-jX zKc*x}V4XNa45^N9)Wq(7*w)#*46t2mjEWw%dAENdL|8x+78t|l`x`nCM+U`3ltGkC z&At0iQWmVZ_udE6Ncue&5P+RQu>&X$aBX&^kcMBc5JY#@SDZ69Qb4m$PgaTh{q2|m z>;V_V9s~A+6-uq2XtAT|WUu-iEwLgQoZ^ciMpW~(4$iB(6KJ?fB+yelzHIgmC)hST zg=BwZ^1PD^49j=>70*PPU+01G0{t`f`%!}CbFqreNAm&T&kmI1v_6p0Noh9};1Cm5 zS{MW%F~Kk^pWRE?YJAG+38soqfCuM1DsVdvIz@i~L7l*(-}}>W)&bgwpZ}IV8<838 zY(z&&!G{^<7~yU5WD$RS^`O2@>EzgU$~J%g$kDgvOY5Ys?TXzHr6njS=o&u>T4~3~ z3s9lpmM2Kqpaw5R$na#XEIpQA_IW6%4Dt}xN>UyqX)h};wCmNh54x)IU(h^(0$f9G z29~)y5Bq8q`F23wV!`TSSPgSZitV$bAiu_KQLeFIcBzFQs?2LLMt~EXD<0so9I1a= zpG|)&TcGG5zOh0k!k;6DK%kEaq0e}JT09|!I)Hbk;r3Kd?H#)N(hkcPBxXMYay$OM zK?cx&;gN~I0eb11fX?4wvrjBjFut8_R$hmny>tL?zC|#Ev3V zkLri22IZ#JSRH&tvX5%-ZOO71i)uQ)UIE!-44Q3znaaqWEVcYUR2-& z%G2z}_bFu|)b*`Sy4T?nQU(X>fA#oc`^U7W(hFN6ONLpIse^6(j?2fMuFx=?)Mcpw zkOzjr%)iQQ$6?eFCW`ssl0n76|R}o?D$VinfG)!fJ zUC!5bYR;4Ns%uxoSYUtcf#8_?LWfUxhBgkJ4!AMjt|bAYJS?$W!WSS{`9whVo_+*4 z4lFYl!0u!kUyRc|!5tk1lUd7_r{}^6%XP^O!9<8@{RIg@5FxuI3kA1^mR7?^aC$^Q zNi1pTl;9+Vb7T8nIeRsC|@^e>8?;^de{Efi=);Z)oM`) zL0jl&-S^itZW6RcHps6f_iyjvV6JI)MRZNbojUZITo)q;aQ-{FZJvp;EpZbG7&0}y zv!E$D)$SLH(a(P;#HDU9jFmvauE5F=bl&#~rr5(B2E9)x2_{MsX-{}Ag{d3@$DgD7 z^$U3ja?kZ8muF~kGs3uN8mt?bdQL+a*z7|_+{Z*ZKWU%raoBZPXeHiFz( zjRpBbBP0;>l}33{xIdT_A?c?FVrI5EM%lyKvibM*9~6HOjI~=#LV%*|KBPAv1q4sM z{?&bQ_RWD5XB6AL1qHA$2kNptX8s$#uaNl?JM-Ga4&NI#ud0Z8I&ReQi-diGz=(Ws zakFdj8X}y3tnZT-#*gzM|KAsl@tz>K*WI3*oPMvMb_d(*Ap$g8oVyphKStX8eV86D zyn?tqp4)%aInuIZa%h6mc{OCP*#8AghGSHOLtNGpzz`d#3ZdtIC!S1^lQubtFjGJ* zFy2Q)d0Dt*h09AukaT;|9Y$6z6xTL0w#+ghA_FLLEr2fk`{xST(!p*KR2YY^`ce_Y zocr|&t>WsQ(Q;vBNoR6D?a9FuBt%%BP*<~#$!vcjDKuDs67{6y34O?bOZsLs5C$p} zjC;UZB3M_-k_o1~z{&~~eS=%(N!{2RfJ>WgZq>+ayG}#78`{(%M?vS`{L(L0J!XHS+kt~>`_l0qKC?xW`MojeIERg(&ibxA z^}NrAx|G0|SodJ_aR>rjhV>3(CStIY<%v)#oL`|=-fO=G59)XR_Ca;dLJb)_5-ePu ziZ3vRz`k8Nbg4kr41m$^u4b4LdO>{L?y*HakyoFdd8+(O_z7h=!`}+fY21Pdl+}N{ zb~AbF4MAl1?rC}7B-gW}Ewlh!W&&(bjxcYG114k|*iSnkM9}H}rh%JRltBaz^n|lM zkM&L*-wKcqLkN8^;6wnt`{gl!C4oQw`@jG5e^lerr`v!0&+k}O#Q*%y|Js-CKZS1CpU_O>>9%?P9Z``^g@{qvu{*QZ?SS4MOKugkw*M0)&tegF6C-R_so%31&1 z68NhbCHVY5 zNqNoXOV!VOcpPnh?k4+v!B7{iFE?gCoCHO*0!!08DdF{^RlyI7Ulo5*8twtZmsZ3U z4@uJ=FN3=PH$ce02Q5T*;fPeWofV+&iwoq(nd;YL`N>#BX=eX>f1htGfVHZdnID-e zR}xd6(f**rc}gG9;CHqA6SPSXIe+jfm`{$!L@G=j=;PalC+hjl9=f*NeE|wdI@GwH zLzA1eQ`U-S?3_ zzB)K=XfOibf>&?Tx#btxvfYzKTxvX?*Yq{}P4KZFLd6~#$lXdy{~)_0@-0uneo@GN z{(<|~I|-%VFb8%s;vpbh#kL_|3FLqUj(fdswyR>;+r7s>;D>h-sDY$^6*iuqQ@Glr z*L?T>e>ggmEmgHFihd9c>Z(a0f+8RyB0XwQKm|eI>+g!2t;{iA#=AGdfOGaI|nF70o0@L0F^Ua=#~*`=Na;-zignzcIh0=k}nSU`s4W z=!;U_(Z{a$;&odUPPz`eU;C6M?r}AX6O;yT+G5tB1Bvu?*&O1w zp0ezJKOjuH^Us%QS`~knhx4royDlfdt@G!h?Va(UTdXF)p^bk+fV{% zvmD5iSf@>J-<>^wuGF_|y*NI1SwcXT0O!oE%L24zzqPyOPjcR?p+e%+?bd^(!-=^8 zcK$ig@itfS;I_}N<)vq$y_fdp-?|h|c1e zkB9RPYY5?DGxY=&pj=s=citV&?ha`AH!%NJU?c?D!ge18~D5lHrfb z>&{aXmAXZcM$c?-%0}RYF*K1gZHC^H3Zuh2v@6DcNJ5tP#-L(a<<)+U0r!-m>r*^# zGfT`C^AkpIG4L|Db=deEK3duu>iHmgi<-)0(+me0*Mqac;cPDgD^$Xkd{vIN#$(bt z+hc0i_vaOC+ZJa0ia0q&x&8b*s-@#cJ#@FIt&&!6Kh!*Yz6p%xbB=N5^@f8Q#G0G< zMXb|*d)%eZg_AX4Lo^edH*KudNZH-F@J?3bbx}tCI9=f#FK)@iZKm6-R9_yzZP)1zQ(tlP1iz^JgkDb1I_n(2$cd&di44*OC` zY5EgyB;V1nUy;CZ-BBmi5;owYv zT+{yfxO@_ z@4P*%8`x9oRF(er$5 zCpyXY5L7=eMhQj1Pid?1`QB0dgFdVLtiqF2fe#b5z>ZTUE)@4bJC{6JXIlM#q!ULj zv==VXZw2!;lPTKliD~8q%b;gFUVMKi=&3$NI9gv%*9;dl@3da94m@K?ymcBvoV;Qe z348pxS!>OAg`K7FyxEp=w^GW?ohyK*_3~M&&g2~LN7241#4K?@$7#NArtI%>%iZh{ zx?K%SIrciTekc z_$eNgO4kHKIFNKWi8*#e0Y^cEiOW;-f^q#W4yPzGzGTu7zc2oF)=@e|Z>~JBP7-J( zm$6xa&WX@4JH(T#L;Z-uweB_t4Z)a2oCNKl$_I3g12Xosyo+I);G2qnCFk{>79^HD zq|IqAH|iTJy}Z5dj$oG`o*$oy;{zlJ*9px^W7Haq>E{4~fN{m#u16R7c*)3XbL@AL zat#EsbLRM~pyisXpX86K0Ox^uMy>Y6H{D7@Df@CM zz;UJ=?+#PlM$^QzvPESt#y`+-8ZP-en0i2@Y9%5zl9f%FEAzIW!LsEWPQ`dcVSoga zJT&ouxQK#CzS&uSK{RVWlM^rlG1M2x4*&}BTUJ3aIwLcy7w(kowlchMd_S7%zyq6K zYxm+v`?16-7|C`pM13&`cH<0KbQmE&Z|=gAJKPqVm-Re8l23`+;pA_w0z6f=U$s zi0Ro!=y_H175gN>W1cUN6p&XSX&{|qEG)0&;&kZTH+1fYkCdQ>1rx}3TWY6?MYmFq zxV#Y6@uC5LGwI;HTkYK8n*#*;vHIHSpgp3<@t6n)e34!(+w_brq7^xyU4<<~v*d?u zaU((&>PX*;jvna7hnZMC3XRft)_zriNC2(3pO*D|v5uazzkR5L4^1oNFP4gdCS^h1 zrd%-cBW^p4kITYRB3b`Daa28+5QO4Fv}%++KdaY&MSUvjJD4%F7WtRG3zUrX_S$c6 zbo;}O205fLPi45IST;-8c9r*D2_`ro`YS+mSD%`I?@!^m53M4-FRgoJ=gsMi^pwng z%5m$$gL__GY_ac=4PZStB0izq-Yt7k@x}w@j@@v5>#^3Y&s^I?hw z|1~`z`=~f5i>@-=L>X7RCe2`0%kKcBB~nd)HbDsK&CaLrA_1Z}Lw$1Ed#bL?ygsk_ zON|x)c3GZ~k4?REbYPLDG2GElYmOX=j0)H(Vm$JPb@jPCsQ6Q>nw(xYa>CI+JhI!K zaystXo(cWpkH6>I2oG>YRpd2&;8K41TEkVJzr&kW7x%1huB|JSf5pW*_J=$Cfx-8G zR^5N07B8*saH2%-zW=WJlPrk@Wg<3r-?H=oEGfF(=`lRu^kNM(^ zqu_vS?4;)M`8#$W4tGorwx8*K>|qao8-7B>l!7uCmxOc3de*%SV-#26H?^;~kip}i zod}_e-Iu}b`07$t!82~p)mg8{JKmszdx>y-`ewrE9)2HV54pwTw4ak0bsJ7v9yeUt zZTatCNqxW^+TAkh4Y>StXAUcs6EXO~+0>KaHRQibSGz*=a89&`zMdI`KaTZ(?wR?! z6g3mR4?m9V_iu1}Zco+tm2PPoDDk%O)$yq$Voq%kN54CPr^41d2wazC{Cb~a_X=NX zT_wWP0s4Ixoyh=kn=pA5uS`0i)}QYVjfS*)6>x0-jN);LC%cLJ+BeRca=I+%*lTyD zifZr~hG+SB!xf>4gCQaYa8h)Cl7F9s;1@d>5zz1Q_%=_XVW1)lm?f6(UXy9YQ+;$U zl!wY3%>Mo`&jtTU;JqpQx6~jqJDpwyGCA&*W3yY7neN0bZSbHjgq^$@+h0Vp6RWgp zx|+pJhkaUiOmzFj-jg)4I<6OaF}_=J|B!hi+nO}?uf-CG*ey?liJ82A=RTnXgxB%8oQl@klZ(qX@ZE%K15fxWCHS*i2*Sf_t5n6cnJ1$&t#YLHP-4!s zzSX+n*C=V2CgnNsT-}d<4mT!GMF;@muU z(x-)a`W|E1{R}1=V}Sd2+w`Y5h`bXPa>Be4nUsD}Ydt=@%%@a$gc@MikKH{}v5JkX z*{p9bDkFdEG!xtjB7nng(MfFs7)ICg!0G`jH3>L$?jLV`eT_taSN40hy-LUaB!tIw z=P&nC5kVJy=`VSm$rmdi(2H3YR<4S(wfB%6fU*(^{!vK?D`62t&{^zKjyQY;$COn8 zP9iO&IV1KObcIBU(ypxy=Q$9+>_u^#cj4)}_9`VjxkNefN4U*rD;b9Zt;6SuLcLXu zR1dq!(o&5Ynl}}Hq+CR>aPQHB7PddNQd|Qe* zZL%Um-C9}*s4esSD#{e;W=w=6kSw|1`9F#B=I!-T)7q-fC)7X$w82lo0b~4{vVcLJ zi{7E29*6Di*VsE^fCg~xsYbRU+UsJ+%U&@9zSiy=X zB)0gazyi6lVISt^V&TsR_JN2`Mc)Q2!%p?>cu3!qe+Ljs`xV!#t2rM$qj?3lMH<-K z<&GM#DfHogb19Ky{dOp2bvyf8)CP}ZSy?Z+VjK8BSbbY%jA#MFjLSaaKETCI<_osH ziO{EFP4?sQB?WfnPM={UYjT&34f}_Go2TH=Z}VZ+QBuN>q)gq!a{T_~LRl(PSqD0m z#CTqpyj3g6DaD|#5VagU(EaWk&6%%U<`$Z@M1U55-tryzdqSJ8=5|lnEisWAjq~0)2WYlO>p;l7V?^s zgpaY3*>p|B{fP~(a0WlbSOWeQ{RrY-dOdqGIf~xr>@Hwg568vCdmI`L zxh~v)X#eGB_nA>}3YivhxlxrOEN*Js+C}tm$ot?T%6rypSkRBFJam9A`_t7b;jy_% z4Xro7{rAVBI=(xq(~?4? zsvuVWTAFnU)T6CjrJ*NCOf@Xbes8V)_%gRUNo-kxxySqs+?Psj#m?hFeD8lkmFOg7 zj&D*DQu#QNpYTs=o1a4$kS+0lY+{5a1mn{4On=2IRKIVxnEMHbKxZVr#APENaGo$! z1d31PrA>P8+yU7)R0k=gu90k>J5+u4)bpENVHm2bq3M@y;j+u9&rt8Eq~w{39;R3O|n-#rf3d%rE>*7#+N7= zW3YPr37JIZRB<20eS-7YU7!F}@S^&hC0R?!*bTm~k|JE?u%xNnU#36vG(Or_7q-m& zaNh0S@j8J?^;>W5SUlf<_JSx@4~P;`F=g}}E?*2m9!_`%Mk?FC7Z&HJTRgc(jrt<#>(nu`zQbM%S*~pE3Tw?D(lq3&>@CD;878K$dyq!}wAKZqC z?Xj7oPkB4F2Ec+uS8BOK8{U1_k)VEE!N9f|K%JbAtl1LA`HakeAKl#SXPyy~Xon-= zr1~6qJzBXgA0_rjZ7DCSdF*>!Ti<{aRK(R&zUG4_?$H}}>20}nV_UingOQX||rhMszF+Nv^FJ?2fn5cJ--H=66B-#PC@rjqvdgJp{ zQFB?ihduvIkr8o!I?-LXJQ+1DMT*cF2Kjvf%>zLZqU&t+%Ya4{N%*z6l`g;yU|HXM zW|La~c(j}ZJlDNx$I9)jGu-%?vVh$?T=GHLz-FTh(FW_!N5}Y8Qz_*6K!t~h_aJw2 z+N;HM*++^UC_M099RNrzlO3g>3sOOp$K= zb7rBQZ%vZ5bm2*J;qhcuWMDuctvvElP2xzPrW$@3Ai6|F%|09#=_Tp(Roe?->)+{+e7X> z=K11#5Bb-V1r5>Ypo)9)cYIiXc8JfTAe`E4i=7U_QDlgPje>8l8qn|Cx`2sNIC%?o zn}8fy&;%}^?PonEeleIYhWV}e-U<i;tt-L553*?zcr~7IOiJScCm5%hw;zA{N@pSEV*uWkgYX*r;$sVQ}=e^ET=Z86!qkRrv zs~hD}IgpRpQ<-!6T`8&{2A@JVid)5t;UM9E(kISs7PRFuB4B>M!DFRH`Ua4;ie8v- z#Mgg(7coi{FEVXvZ`aiB;-6E5p5!e;gNRXkPDWG~5SF3lKkcz*vpqY%sNYyS^VBK_ zcBZB)J+((A+~DW>_CXJPaCqLWl2lIA;WX)`M?T*!2zL;ylRWVLk=Yc*EMi15f&Pbo zWrX*;fWY3~;+4kyvpufy!%f_mv(JRd=gZ{rLu{`H(`zW$;WEm>&pVCHs9RKe@{Yr; z(`BJQ10XLw^Zi4bJt(xF#?MZ_uM`qJz0hK`zMHLjD!uo-R4!n}%*i}oFHo*#4fnK! zM!G|gD)R=>I}@}WHRYoFKlMeO3wIUFSNG95)=M^j25$eN ze6Qqoh?jLGC4EMcKvW_4V$0xrx5IVp94O@o1N*mz+79yh>xIrqYa{Zlj4yUhWy-z2 z3sS2#P|p_jAYP^GlWPH174_hI|6VsEsbM@v2JgZJM>))ooOA*WQl$I_hFKQuiT0Qu zEhH|u>`R^9(r5#GmLKg`#I%!tI1t`i+s$fL2 z#N|pn-QT$u_@lhbzD|Yk)b5gPM@`@M3bZtJKoOr)0f)Gwe$4};F-W6c!5`$@)%d&) zCx6hNkCFdL;Nr zy8KzL93&W(E>j3c$vF8ZB9iq#WHH*DBBBrOAKQL?ZE=v83@5zP-QXPF>7YlFsyR4TbZOa7 ziN^buEcVOcoCgQv9)Tx+mcFU`58mbKeJEl}tzrS#1|Qmf@wt=1PWf%h77~|F_=SKf z19Nt5cw#n@_3Y2t6uwuzgw(w#7fk)234}`gSlqmA8#QaEe}o7@2}3BO!|Qefrz@`& zcYK-ca^E!#(~PCmzSrzh9gMHW(~<5(-_1FGwLX}btGCGSeckDQ&y5a7%24;!@e#gv zps&N%w{DuI7J`B?(}xsQ?<8Kw zx1gMHa$HIiEHG4i?5R;G&=uFKbgOil*$KHX+esK~Pt3uQ`c&Fyzzb2Ma!j_+cQ8Qd zsX6A*2S=1vf+gI4Ne=R@Np#yLVBu~7hVIB5iqkmb{tf&2n&8j>+l2#silv^n`HQAt zhG09O!*jwrhX6orMWPX>Ah05FEgZg zT?f#u^H&<2r#Gmu*=MKfW_jHv;@&D>+By>H>4&pv?fPoH+>2La1e$q0;nRzIBt|fN ziWtg<7e}*v$#JVpXnItK@g!sJ9WAm+CR{kGh_Q*28133Xq7VG%XXnfBTfbdkwwPq` z`*h-6bm8V^=RbFc8p6eL72Hhq;-_4mDW4Z=BjWK^>%8qL>Ui_59KCb}%*X48Crzh5 zU|F(%ySgpptDH}ydlGO_RDJFpg=()DJT47YE5a!)fI#(Q?ZI>;^C7Z`t$^_XKx1xxsso<_VaL8Cke&zZRIJjj=3 zH3ike=dRPOA>RD*+YaIaEOPQ|UwvAiV)7n;X-BQEZ)t(M^afiZVA%wc3D}dAbh9Mm zGlM$1>YmQaxI#a3X~hY_zq(kFiND7`ZUC6@oQc_3f{Ci7%Zbz)VKeB+Yh8Xr_9I#! zxd}!u?PG5+ z$Il~hY5tR+7=oN(jXt}gvX@&-@g7D0>^Bg{Q+4@-jwtKd1>)~xViR2jdg5+R+pAJ& zqp?o6e0Uc4w|NH|&l&bZesAy7S41BErgHMuNe(i8tV3O&WXaS*+KSRYY3Cp?S0^LW+#Kz5U|4sNHwgP7tNMEVgQt)7qoInY3g3YDe#gvHrn+KUjmPGVUpkYA&m6#4M3nitwzpM# znpmPWv*gHH5t%6Cxs#61KJlxsS&)IL=hXWxD3}w6Yx%~3`ME>a`F5O7k~(XDE_m!3 z`RU$Ia+a`QSd|1swv6~Pk84^(D|k6_1oI^w`E|*OFX+Up5Ilp|^PLW^x0C9kPj#eX zy38X#hx8jtdZl|iqIw1I5S6}H@o zEJD%Ob1dIkzoX;)wKwUu#hd7okUO8d!;|cGOrMx>+SllRmBGB+jtYQ*-0?Dp9*sSj zw(09qS`c^{<=SBl&E2^6_55qa|wmI zHnfP-j^B&ro+||Q>K_b$JWmn2p+1^3U;MGQg`jy25aAx~TkVk0|ruq#u4ZZM_3QQIBVN8CJL zSWezFAZ|`MctD?*X@B?6-xbDeE9}-AvpGwF| zNS#1DH}u2ZM%IS1Dxi1X-W#*smQ(qNv6KUr#2Wka7JX+A%>}xld5t_aIv&1U=3Jfr z5WLP#eOF;hcJH`<|6k!5@7vBe9hF3;kC(%{_tWc~Ww>q9^1(~gP(Qyf&def&!09eL z20qm6Xs3g!Nqn}Ctgrg+m^|ipb45mn(0~07Y(5e9I;`AZkhji%b5L0NZ63B}%MZsveAM;D zKAiULiH5+6&)<960|U)V@qB|H_^WQ-b-$gh$ZfBIMC?UB(Ac*?q$pL6{xVOB0@34e z9}{~}0M4sv>hlQ=A^ddgPxHAlh?L|qyp77vcIa9t_Id3Dhk;Bf!9@jp@||hR$@2+I zA#LmmT-ybIl%lh7^4_BRvU=ia<%De*H0Gy9iqX6LnsyI~Chcbe*+sgMMzal6f^EUJ zA~{f-~b|t+1ICv_F**HJ^_u@&2S8V(aH$j{;iH$ZIxBbmd`KkE8%s9 z%$4^-Homixm&FEFj&n*wOf~JCr{glxac9*`)Z?*jpEBD64Qv*eC$H?NNAI;wogd_- z`QVU>E@)})uFq$ECh)6Px42`HJJ&iN9p`OaRp%|9B+KNN(9d>G?+0FzH42XM&zJ2= z?7MD%A^=nnle)iLV29T8^TA8XM)-7PP%rfPx9yP4s;k}=Kc>|PF&q-0xI_Juiq$$I zLHuue96o^!`$714}|AeT2=;RZ0%g;F>k( zDYI+B6fS*&@P7N=ZLn{Sxy_}{pfiK7JyFAj)`av!*01teUL)s(ktt#roST>C(^q_g z+J*IdEGl=mc+yG#;Wx&3)oAJ3T15E6@+6k(+A&aAci~5Tx-0fc9}5Vi_OVIczp8Nz}NS)xD1F))@zXm9yyi z-l!t)iMxw2p8H$%5D(k^O5M0?_no}<<=}(obZTtBe)jyi9DcaGR9@`sgpjfO+S?rb zc+O0O6~@RiubwsJpYOd!77;jlz&Di$;kS})U<2uEJI(J-x5vk@*AN%OLz=L#v)St3 zF}HTZi~`+MFDo*G-4X)>o}V>;l{Y~a(}IJoFqk8!1?hYHsCdQvDWF1HE>hTBvb?E2 zZO~kq=O{n~H>`?(S)JSM0XtX*4`5@&l)`H&+q91l8pN%CWN;l1aQaA^%YDyl{Mi`@`Mm{@BAbnPfBXB3q>d`) zFKD;IJAF{+<+#&b3%!kchO@>tWuZMa5n%*XeFIT-Z3hmN1)uM71A zhcx+Iq`Q?t5D%WDh_ix!`vDmc=BOk^vw&N#*u(7JcQ-zL1xx7Y0+W(nX7118vwKpp z=G^KB*vwy9JXdb{z3z-}Y0UWxgN_r` z+B^ro>|W?DGdoHnb|kx8#Yd`pnKeR9KaUEes#%`T;`9?a@2CQQeZ?MrAK5vEHwV%& z(L$h%W^ROZbr(d8%JQ}T6mgCzBCv$zqZ<60<6?8By(La^ujkL_#~2z91Y?Bx95!kW z&*Dxji{Tnybvv*zvCQSgdc;QmD&Z+)h*mkx@hoY5qg`GW5nJw9dty!%!{f{|iWVnq z7JL9~IiA|jSK-Bf^mC5q%Wd3o*sa>-*I4aPJ>hPE7uaPCKaud$w2DU<*fk#f51rq( zQ4mBr7}_OeFL9ggabiNfz0Ult0@3v*pEwiIw76Rw^#Q;b$OOTeP!GFeDR3OFFlU>U zef*B685^BBO7E4bf&t?W`0326n_=E69mN614b{`h&wc2B#5Ze)iy(_`$Z~20x?o;! zwgWF7LOu`*+$NDL?_77guxQ|igU5d?Hk1dA__V%>jSohwE(4J0lt?xlj$p1}d#A%v zQ^;d|pR?~Rv&_ebn~qU#AUZpP+$o?FGsr4xChBYro%{{pMZebLIC3`97^jr~OS7aG&1a`{VFB>~Ub) zqpJmX;uTnGjeGyP!PzMp6D_Xp^=uQpWQz00BI#TXjoI}1U6%XyVRr$``u2cbl8L+Z_IPsN;*bYlAvB^| z&EiQ|Dgbnx^nF{-L1Z`)e`;g}r*^2G+r#^Ra{=BcN%|d!8=tBL$vR5c5;oF7-R>C@kg<31x~1ur80B zrlRLTOXHKHGp-vwd^vcm!yHQaBmZ%EG({|2fFWPTUvE5u9CA>&_K4Hq8=u= zk_1xhyUR+`9|=ZuRytN@0UjijQlR|29yC3aH)m;O`rbd_Kg6WjoE;sMO zQ8#EYxqF-raoin*q=V^_quZQ#+!m|&r`)D$2@i5MxK*#Z2Ja=X# zG$)3aKixs8&Lk7S0X7~av8Ad!`ut|GTAv|e1AVU z5+7(S8Ac0j*#0c66X+%VA3}sJcHeBG1(Krps0O&bi@#(V~w?&HiCgZcK3M z)x(QjZXOl3@n<-VP!&96Y};Vgc{rapRY^c;k@Nk-#VS6gz3BZtySH3fWprfwvhgCr*T@Pgu#d0ij`~>j4VEXy$k^cZ|9d--9PVJ&>mRq z)3+u%96hegP$CFj(iyX zD|HF3_#*Fc_dG3D*_{3K_v{-N9{Sj~WD}?f_vw>nd|8N{o}WDBVsBAp+SWB*RA&&j zn8D3%p`V$$W}cLPWf*3NKy;j;NySYYNA*~GLls1S->6*b+RM&$$Ye_Ng>C(&&NHMl z9=(Xp`D<-n_?5Vw=^@@3h%W+ZnZbI!h?iAr+#)3BSE7itB}KV&6MBRfu}boigzJIZ zvpKms`+c$ps+Ji!Xa{ccdZ$KyQsE1vksmh!4%(U>-1x!rKC?uSJb&5FP#|9BGxl+g zVXHZRD(Gq5N@v#7$M#7JJGpK@?g`YbWFRPodQQf_ZwHP960$n=@6yIkX=Da-yHXI} zwQ$el0t3>bzZq74E$qwtGsCpScBe(^-sg^B=sY@p=0i)!pnXuKwMADSI=am7cIdb$ zrYj$IzUEbd4TbdqSSkFXhDBaZWc2#T)}XF`Wxiu%8*V`fY@otRR^L3GY-DIju!6a7 zm^fsABrQJ%k4u$<*kM=zFzKoPaED0-E)fkC+)10`$JK@!-9_pM3kY1rxOCU3vUw1C z3U%_0Z=wG@Bmcg>g~=T7On;sQa)^&*Ost7qyf0)wKCgxjeTNjM*^|5YKe)(;Q94|I zjEm_?$WVh_FWyh+ckKp)JyY%JNf?;}h~$9_5Uuw1uUAd5()5%Un#z9jBYu}Nm9oEJ zF|~%EBJW%@6va%}JhP_3HT>tnV2Gdaof!Y%+xw;|4ZS~+ z%qIN48}z|{M-E*1(Fa5D{mfLvCwTAKm^FeIDWj}@{+82TxFgf^Y;fVTNGldHXGm^R zc1P>T*AaDzfuJ}yNi=3%nO=~hz+C(J^a+$<=<3aXy!FBe zcX&fEs2P|h|8)J7Ha-4b>s|unuQt(m+_5C_ZS}@}TH!}oJAhhP`!P*c@-l!!_IAH{ zav$es+HT*S!Jc5IGZAHPTd3~Xx7l-N)#_u&&4MUl8!3~uoZQFuy>gGj;NJ4}`ALq^ zNuW2fl#iSEf%mSKh-O2O;BEPT4QY=t;7^H&0v3rVR+PP^9k=0}ADqjMK_B1ldVx-c z-labSwL53*;+*O;shY)PWtyhcCpq#(q{5$L6&tv>f48j?qnclin(AbSq*#mXn|12!Ay;Lk1gfiZ`{*GxTL1>f$V_=+%nJYu8}& zu9H`BysbjDl|sa_Ij7Hmm*=r(7NglY4wz2*ZO0h(d^ik;MLW)E(Hs_6YoSqFZaeF9 zem&;niZZOv)lY4srNYm?Xm!UY7}j{IaWNe`1+GKo`|CSx>tXc4Lx8M0M&F1QlGcAa zUoA*O4&>TNhJ`AB7DFYA8K_xC%S0G8KPX2!qK6 zv;Au7fXj!+hk1kOzW~Je@^PbERz?jeeIj7oje|9hd+R&jOG>sifb7J>$JvXO?CblB zb@R0DY3%jK7X0e2nfX%oddL=UW6bF=x-;?ozUo>B^DB( zcTSJ`IWUm--@P_}I-@tS7h31eV~^_S=VMNi;~*OjVq|0dhT1AWd0o47i!i&+k_6J zde-)OkUqSTw=y-o=>U3d@9+~ig@n-&cYDvv=+g%`5})IL_qcdvrT+f%3$I_jnFG&c zH}-7Y#r^GTUgJvk4h8%bSM3BW>;oP<)Ry6w@IBf(#Yj*Z)zkMjk3GtQ>+h)09mHCk z_>$WmF?*lx>hI|`eGnCSG-{zgVoN$fTIg(XJ=5Efc@d{?bg?+B)u-qtkOpxI_hI(y?y>ZbPTYLNkj?S1qASGZcq>xmq1Br&i;)L%g22Fr+_@eH?8HIR~YLof5xhaRr_D1SXRd?&4hTkKomjwDX z4{9HJik8(o?QptJ1jhGpv?HGmfHfu@7QnkgVm^N;QK^faI}RL(~I z$AZu-(@pgYKY?-M1BY{_#GQc>ogq-5qrVS-*c~%(7QFQAgVX5c)f!JoYjsj_q$hlc06C`Vzn|ts^VOY<3fl1{3* zA;?1bE8gv@5|xErG^%tHw0=kArUv1TiwvYc8h4;4WMREC?;ey|U0#0i-ek(Bq}w}x zHQteLDn9*cDegf-Zhp-7lJS_!&JVmQn(VdiFOF=jFpV4FTCw^Ip|B^nTs}%JV__Z8 z$&q1CHByvMyN^QdYwrhXxz?v$5LZv@^Oo?kz%ef-B}9o&^NG!Vf$pBqp!`&L^ZjL8_@@P+)r)f>jzN_Nz6@ab^A*0P<$=!%b^$0 z?r!&Xv2Z>OE1c}ETx-aSyOezzC%Bd3lg{EOY8jr75I<}Yq_LB&^LAR3U9RAN*LOcQ zlW4~GFourh6*c*ZIHWru+6hj0VIj#gI*oPY7X+?R}trbY-hPU{U<-i9qb%&BvUr(5y+GEp>`|%JPUyCK@?D< zIUEFS9tgp5WUSXXvK_IV&s%_S-%}F_`<6uQC|okJXFH}yveYIK7wx&--lJ4t(pny4 zd2M^xDB539#g)a~|<6#(#gNc`0mWV~ykoj|8Y zG31qUG)dnh;_7nUzS&VSirP17BVU=}m$|snDGoo5CDRD_(b_YAKJJ44b~6do1FB!c zb_ZTUc}P~{05KAxq=AME^f#hY1bO&5a zmO$%Iai+vaA>(H{QkUHRtV#a)^MkFdhsz3)&oMrXRjS^9oUDS0)by#IzQ$GePdoFWkZ)iX-$`yo)b-IE-kf~B+3T_RK_pwT z5$$7edCfsvEFR70CF~nR3~9oz+T)hXx+j>o--_u_nj|mJo)lO(9QnFg7ctQk(52^% z9ACtIc0w*+&T0(q|3Itb^p#RoMR5iXDNA8k+~TP2xKm_*`4@Xb8n$`PzbXCzK=Zbz z9!G8P{kMy1x_dh^c#_Gzg$f|Roi4$co5~&E&0V!4H`loMnGZs!(As^fzRytLTp}Yp z8bhuFV7+Yl+V4*%cSLb8M7?KzU0XG$xl)Rw8%!+^CMXQwcwU%0sPnz*&k8@^p81ar ze?kiZ!#rGnFF8$_#^;V!Ht8bmcnr8x#hnrh`=p^dy-N*- z^5?aG$ZqB?5l(=hdqjwaLqB6{tl!`J-v_0t+I&8BDZ3AkJ6t85aU`Ps^YVOtHS=}8 za&(Y`Q`lV|0o(mP6N3MWaSkM#XpWRJzh3;z@z!@KoaTQgh7@m z4IrE88Ed>LmA5S7PCaGxl~b7W*#iRI4j%P?!9VHr&x<`|+XjT^vF`USt;&Qq+Vl~& z+~{Gvvwxdc;u@+<^!R-dp%bqL1t}HwFN2!79*fl{snwF9OScR6_S||xD4XmN-Pry7 zRHyy6qn;=JY3m<5$(~2T)sBuNd`-%-T?^&c-$3L)t z2e~uUPOv_oeAeVAJ#k`08-Uab3V8(YNc_mr1?@8fHp2pzO6za*0{7f2Z*9QvIChbJd{1^FuY29g<*V3TtN{ z1M$#B+}&_>C5WxZWjT3B&4XLEYnbB`Z1(Z@-X^?!nMCVAr8#t-HpvJ$sHdp-*>~!T z_S%ejS~+e_c{~czgWHbE*)o0-kyKnC^63qrm?KIy>M^Hk_kM!&Wcom|w@-pCrgu*w ze>gwDtr-fz_tBb*BK$E&z_$Kwql3}r7#laef& zfd_GA?%ceew#)l2`{L_&7ztlBk>r!}*h#xnKadJ5YR6G&v;gxVNJe|i2U(EEHnSO& zAFrhc{qYI$y?Q{*ffGR6=EhEA(tE71fBX4$>+&|cm~dFIURcUuoGEHyI63rZu+}C z2^AiJ&f_%~yMv-L@BjwyNVP9me7jB76v2Cxj+H2#j=hC8{Z0a;%&sk^y}qwxyR9j#$4Se`X7a`tB8PFLB0pe3LuvhEp)-MZJ-SpXG2xXY9PHPB5 zN*B(1;VJwQ@n86Pv&E4%M)W@R$|Thva4-3icZ(maB`iu2oz+|tR6GQ3Ksn+Dp6=-k zj8UF64{yF1=j3CK((T%NVg-U$2}>-Qm)#|D$6xNL+OpNi5q_27f8+M=vGw7y%HpMS zg7_RxirIYV)Oxgsi*P_9jYzcp^Dbi^YhN2mU9!Wiv*6?z&|)o_UHU57^C-o&akgN| zFU2A7KjofypSMyPXxp&FH`bw4AY2g^JU4i4lWo-)IbbkfxD?jRcue6_xu~*0abzF^ zr7U&8O+ilfGn1c4e?AZeuYQLI&Vj{6;cE$gt>yZ7^<(Dev2nBS*=yi9I?~kWL=DSh zW2D#z-)tE$qXg}Qr$cXBzmQrLr`-*B8{`nuiMwUEX!hB4tj+@inBMBSRtT9Wj$3-e zl>6#D;!ZqY<_a8&P1DL1n9v^Y-V@U^#KtVR9|Wv1sA^re*hyoG(HDuAl)a{`EQUF;{En)_1}( z(CY*JXt?DoVe=?_bB4X=iZe%WnO`$IrB{hanODUtf6{?fFFRbcsnbnOq$4w#&BLx^ zQ3&ngL%g@^*~-43>%?e2bict$h zyZ=(^Q93}iucoUy!#Mm zHYy4Q+zU0X%95L8^!Gs-+M<%xzrTEw99`UOe>1HY5GK-D_-!}dXpKr+6z{3Q0!Mpz zTDyL-C=4MBFOiMwUyNJ(a`+HGOzPm(s~r8y^VHq6;duyQREegOF!G=8%nmM_Pp|2I z;v|tgcQ1NEILVO#YyQ0s9(%Hy_m&E$l=8e2dTiM5db^Z3a8^px${IT-tYaT_>Fi&R zf6FO;oW8g!Y^Ur%=WRdFrvcC)gf>P6wz`MM@3^RM%R(Nox~XmlFnQ3CSik1C7k?N0 zV2xu<0u;w7-kralGIxT8|7IIWYIDJMbaaFJu3C;L5h&m*rta z(Hki03ZLZN(T(QT5RTx5U`KzE#7o9af3x%AB}nLq95r$}RX&^op_loSY|AyGzDKU? zCH}^Dw`VX{+{m_P^K=IPhQaNkoz7wIj+AFTUnw}^AC7vm=BuVTe)AcUwOeYX@8x;m zu%jYwHk2U=VnXrtU`5w!O;%SPxxox`1kdd%SPOkZe@hw< zS0rE=_{&55U3F{b&YX2x2US3UtX^kpc&Y7ZZ9r3mFm>b6p9Qe$zBWKc7&hU4$s(TG zJA)2~oVRHj7u%a0<>XX0=0QIVXM1;lL{hCv%_Auk3U@Ooo?IB6|ty$DdH z#@Oz$Y;S{8v}>ZF7a;7{@2}wQbXN)}`I1QyXQwaEsZ`*>&LAPnPgC_;e{<{$N=8jE zqmoa3HpqqhO`Si;1^Y8!$>{YxfldhHAd6k?yG4=(I{k{MQ~kW}#EaklLkP4m&*y$G zAv8mi4}$wmK{p-e!(_c74~SqBl)J9xb)7W&QvYN`u)F0kqu;4Q-S0JRJAY?Qsxnp0 z%G&+pK;}Fyt^PQ93*q>Qe_9E6Nc-!e-Vr%}tMTfH+&=cX!)3NaOy@5s&g*Zf_ufjF zn^Fv z+Q?puSks|2Y8eDP%ie$7BORLeJJM8Rgd=D1x^(=sZ^on3xqFwNYb zUoxBAZh52)6t|mie<7|c0US+`ID~25Vsog0eXZrqbym46wcg%n3%iMLr~dm8tbOET zTfyCSXB;hr)1|d9xwW@>{+^K4aFFihXHQ%x@E*0hPe=4nB6f<35WBsUD|mfrih<3y+K&dY7zf_Zv5fZFt6ZenC{{JJX0U-^xRLe6!0fdg`_FIYp%)% z%F173U>q{ne;rxa{$8qhvxF7ZUN&>Mx`sNVJL-=m;=F*%L5qK^iFDXC=ij!ydq z75xk0%vGmE9X$7qzxTO52g9accYs}U$wI@@ygQi*0{5ge=7;M_pS+U3EBy5l-?xU{ zh2*|JTZ{E-qv^_QX;4f6s1m<4T7BJZ&C@?Qi;o!6nK4 z6~Tqa+pE3?b%m~THGLDzLTd%nI5-~p>-`o)!ST1V+XhUuM+ch|y1d!pVpjCRlv6+O zqoP_mE74xxY`TP7C^kRAU6+<$L^2-&XE$!1N5u* z36yn=$;~AL@eOthPvdT}4MTilg6{+%Pls*pE`VFU4sJM+u?16@;p6-!)vl}J`!Zh{ ze=z|a$N#?_^T<)P_@}-TBvVOGqO6iv6Lixbb+|Kzwa1#m<=D zA!Hs+`DhA0JXT2<7j#af;)q@V67)DCAn*lEvbhn;fzYCQSf(Ts_bt_XL@`N7yukI*s=nEe?f1VP9?WYq{qnOKlhwj=MpoT zKD8#A9?tr!i|tjv-#@Dg?vM-OUp}2XnsWgeM+g7J?LlU3Dq{9oPI>VU)bY57@0}qO zi?r93!bK2{nADO%{W>@$6tv<rL1p2e>NfB z@>3b%dXk6e4SHI3@PPOh#c|EU`3b$<@w2!4@N%<%J{5iABBSugy0UJ?@S|9<)G!Qr zmI-`jo}Q8Zor4?j5g;3$f7R}ByV06l8kQX85I(FGvgy_ll8w(c-~6-Pvc*>zKWzlj z_ZNwBT9gZ8yFKNPB(BFPjGo9+e>%_Co;ik-Io3N!G)#^Z-11UhL68Y?2!qx{kaZlS3C*(cF(2&1e>SqL9~|F1 z_o;|Sj4rxE`s?g_U)#}PM}b^lJ@1X%ElY1c0E>07)hB*>t@-_tc={S%jzN3}7ka~L zV8187Z`ofOyh;U+a}`9MwHcY|-|{&cTB`OXE8V4m`p63upd@-VI=KkdN?F)*p?zH+ zpsmNMK&h;S_fh!&Eg`1ne*&fvO}AZ6=TTGFdWJgm!gF@k%xsqn=CkwDTd~c6 zuim_FQ_rPOImbsuQ@o!kj!V$#Jvj!0IbG!W{Q7uNWR6^Sww%MGueTjsSWTey}w{5uy z5taG~&X?;TEWrBG0yzA3Fi zgq^#vm_RM2MQ)8PzfAWPqVS#oengZgTtLtQ~ zjXhc7;h~&g@gxSkS%({@)gX@Di>{<@_lj?&<$H5IzH{o&Fv;ZKxfVHYnK^TyU z`*!N>$~-O!8kH`Os8#CBm254n^zDX(Hr&R`2m)30?WPmWiX8IqyH&TPE*iu%jv0$jJ*$gQoPe+xxeM+u{ODpmPhhQ)b0|5 z`HM5*NcLEzX#f}d4W_0p^3c&1r0Tt}e#!(Pe{x!4f0Da-KA3&rNm9r8a?rPGFUxz_ z@zyKxsgxutM`0CrL^d&l6UDACg*vPpBZ!fwc zHM7WPMfVrdV0=+vaqhK#RGOy!I!WiF@B>Hqf1$sm6!3@{=VI^7r|bRd-1B6&^8t5p zQVTZrQ_lX>n}E(Yx{B^TNoWOD|tHe|pIIu7p-vN&ChH>vczFAv$8wae>#K ze?BoF>YUZgf0@Y@)}+HHfN^^N`-%6v?GVNrlk4il-#L>OD4D+3^=) z$L`weP9cqpqRI$3rtnr_v3DZnN-{TPPIjR;J1O>ISxyKZrqXc@_$m^``UeWswB^;3 z8dEVpZ~X485ZxLX5Qn^uiCk&gf+)MNf2q#x67>}sd=zrpnaV;%sfpME@ZP!0JhV}5 z%h6YN!}+mWB-BVB{OdN?`0)@3tqUUFUZ1B4p;|V870}N3lU)H5Mg+#RlBbj_lxL!y z!1;1^V)f31G$h4DQC;>JIC(;YTFVu6Vi8z#@f2a`Y#IlfsQ|>R{e$eYY#FIye{nlp zlp#7toAUc)lfS=?(vHcqZyGclzv@L%I6!uR`=!iltA!Hc8HwaQf=Zbn8A&^7S^M6rE*l9{k;ztD#)ztf}s$whm%;=n!< z97*K2Ab(!wXl1}qcpn}BHc_rQe>~HLYZ)_sf5f5VU)m?G=p zKY@j=ghU-rWvU}M^ciMMFof+F-$l#lLBh7kh!ia3J>JRwAY2b@^tX6kYHBE_%n;(8 zX#q%1PWD}pvX10rpw`3(`4F|TI>+$%&zAKBW#iq+tFCj}&d7FG>JG_umu}O!kDe&9 zH%O#he?nmmfSf^DpGTK`e_i+4Rjmtf<@?=JT&SJ1wxoSNvoSZklY-O_YB{y-#FT50 zB3*IKm*m2YswThdx}2o@_xlm<@h-8mn5!j!H~WaeBvXHaHI!FFfRHTY^@}C)acs;Ii128g zD{SB?!c-fwFn`|+`?vNiaI7uVTLfZ3V>zef8MV#j!Ga9($Qa|iVMM;ZPfvm)hX zK8=9<&W2Q@2%?nZRCrE}$%t;ho;TJ~TY&TmWJnrd>z@UAPovP~Siz%Ad=)e0|6H=+ z?QgMj4^bDpuikZ1f2C|gq4%NZU!&g?5nzsZx*cO$Y?M7%C&KIV`RZZvp&qB^6NWRq z(%-`BqoU^DAG?Wu?-%>aUIL2(XnhA&>!Mqd7P6|fubPJ7Z2L0&8ud!wEVi_-qgGd@ zbh`5XWRZTLu0|+6^2#o0xlzW8scl|v>Ua?CIFpqKe|f*tfAQm#Bl70g>-~3bEH1Oi z_`IPozxvo2_8CUDlW_BqDTF5!iHp}*bfI&J})>8}!XAEi9QyoJox%>537#xiUU zdS4nUlnWLB66w|LTcJoNMF2uy&!R`Z9$#6OmQi6BtiQ{;v7&+g!zQnGT(k5wS7=$= zsiMda1)&u~e_gh3)+J|e(`IhErMX?AdD6}@^1p7_(TJ{i2}}f9X^J&!U?s;$)%TCP0xRPmKYrn2Yoqa z!f!fLLA?#_Y2T@Nua*_$5;T*Mc|f+VN9><-^xj}^qSHy%x z#V#Tb+qxYi^5b{2%>9nizMb>TV(+k3TzlUDO_V##0-^FO!H4HC)1p)E{#3f4UjQl& zMzBWD@X)mEXLsfH-j39Eryp_Dz2SEUKnPqoe*uy+nt;moDEfv7!j-HwITB3fvnv_j>gz5l_c+j3G5DoF8Wpzs>U7h_x|%Lx+zm4| zW?2F2k@YhxF_^s&XFoV8V8OZkNyy{AqCVrkZ>Z~UU)a&YNU05LZDT6EpYIYsQRPCWxMdd@GV z?|9l>u=70I@nr_FML>x@w-85JHq+S;{hwcAFN@B;s!-kR66tT&2p5ryLm0MgL8@PdGPSW zKm<|}KA7i&Z=^WwES3hSDdh%{MJs(>^7IThF2Zgr3Qef_oryJfy!@O&Q_I}3eb9);VktNh?>8iiD(^F z@Zt^Vhd)0-!fm!sNW5;P-aY@(_GU|pbL+a^tvGyS2acfc?C^nlBoH780pbhPRY-;c8 zy@&k6?0W5ii0jwJW}TYx-d$;>cq5Osi9DVu&0^H?-xsSe-q-_8S>MNxJ^iRcFThh= zS<}~l-&9q$oc+G3%%^T}!uZH;FRyoo`vH23CqOdeZfRwq)7R9>dpyB*SmrExPn<)` zaS9UtS<=aa^w#?Yf8}R3sU?a6`NFWL6bk1!NaN}Qk9niLK1}fr0p2-%Ji0@iy5yd_ z3Cg1#9QuWY46je^{U%lBoWQF{c`wVcysW&!cl%sF z(gtqVs*hx*AM5?9cR2g5vxW1ZbVfTyearBPSX1R3Z3;T3e_P(QG2rT|;Y)cp5V6p_ z$F&;Z-1fNPL|Kfck17hmwO8>PUzL4br=eEzqxPT#5Y}g^H+{j*-0dhn41dwR*wXv( zG;7!3TDOneF^1CPCssr2m%^{dTJl{~TE@^BJAuJ&u+`WsY#)i<-W$zA2Q=?ySUH51 z9_s6j?y)vle?RS+9!A}D+;xL;<5--R=6a{|EX~pRd57CTE;saIVAfu{a_8jR_?)8E zdm(MVj_0#Vqpl&dai~ugUCeJ@7nHeiCT%sV)yg93ou-PmeS}iy^*qe0iz>>0jVNIV ze8Hl1@6xL_OJ(T-d}$1TvoV+5z=*IVOg83aTG?Kre|9IRs?pd^)}zfMiqH@`R7<;Z zpmko|uJhbNK%rurK3g)6c35-$nVBpO=lN{v$9OqF1=Esi4fDpvY_?fX8bp#ij<-ki z)IqC>K!81bNKeTg1kbx8SBLk@G-)`G#S}Z9O;K6~qwb-L#CH8=;2~G8Z7=m8Xm1`5 zMLEune_r*n#Q$+%N2Xio?34tr`t#Tr_xp`;u~1w~ujb@pox}w4C|fwBomEVtAV|dX z=cG~6p7K`3`K$aK%p*)#rX8fMn`0ZBu2+#+MzM2i6uN;@iL7E1wJ)VuEL9a-bx_7$ z?Dh|J?W*tcYq~~Vty^V{oZa>IwQg?b2kK|}f7M^D?~}UMZc5-DPs@58NDpqA3uAdM zCWhB;v=&;kgcpK#U96_<_0CY**Vfuh<`LGvMyoe9(qU^mm%MGWil}TU9`y>(*YrI0 zYu&DWeRs=ccYyn8S83f3`>0C4=zc%?rd&a7)C>^gNUplH+)fsq2It|p@9j>(*qI(K ze`oN%aK)2Za$hWmVY=&Xcb#=v9fy|%^PA=h?}hU1UN)cYW3AZIf^pA+@m#baYrL16 zQR_ZY2O8RsbYyMXy*e)=g7^4#5%`+S0r=xdXAEc17IkOMlaDl=v3dy zZQSl2YFC89UxT+KRUKQMpfk~`b<28vd$-o++G`=SL8kHC?5|xbUZoBKZgmqwf59|@ zl)!k;cjxWhyikAPD{okC5S20PjI>FqT2ID8vdcFZk}N&G=e)1$ffdm2!7}Nbl*R*)-Z0tNZAKzBG36 zTCeEU>h*x`!my2nrG#+)KyD8Ae{tbn)E-Gym$6RU!0!gIqBMt5(JZvjY!Ynvo6lE2 z?R04^T~LB%Xjd$zSF0PpmU|w-E;yESy)f$ODD$_&g0Q(J5m1hUIG7vSt;j@FSMP)6 z1iQJ-5(L&uyV~p4L)(^)A*TE|?#qUx@QZxmzW;Ug4S(tM>?wzIQTwflw zLG)^Xztyc{rwrs9dbhN#t>^+>osA_QXRcJ2n~70yuk7!(Tw6+TY@>9Ab+Vo6@6PVp zAEZXpSYN$yTAUh(usdokf2OIDCX+P55mDca23EgD%#V7W-?kPHb1*d*tw?>Xhh{@L zjlILDtG15*{FpY9`DDACPj07aA+7@{}MS=3%q`SlO6kYaAz=pfox|}R4$*8g@ z2G(fT9`sxF^QEx1qxO4(0sQ0bK9ou;sWLTs4x{5-nUyf3vSie5f1?Z1cK9|xaz8#! z!Ld-gA2#UAOcDn0ox|vPSQSFEo*oY1aouv5l*`TORGpgD z@LjBUDhL(yiB!Awgf_WoUGc$w^q*^|4-D|tFRnu6t*QOAdy(RbqJ!m%c6Di6Rw^iW zr^96$ueWT^vNvp-wfi_yr=9SP875hPw99(PNp4l(A=#BZ33PHKW2(`hD*mZt z|Cj%d{ZdZee-mH#HOC#dtLyfA87!;I^<q>I*F$3vjW6$zDXtxZEV`RbuVz#Y%w}6^q~VP=v6$J{JUTEjjlIu2WdIb)x?lyUQkNb>*ch9 zwP@pna?oNkaw@?#20ZJRdyVy}bB$cg&6LAXZFuV!iZ|t6=d|h->^Eceg=;2&zS;o) z_GP=Xf9kB_Y1^{bL16cxM%-O#o>EUAdhO=QO!gNV54W?f-#X1%ZBTiDef16=l6$35 zZG)|l6wF)WVE;l3#X3GW)rQ?3;O0MA3~Ew;<~%RAOMLT5D-VN;`)KxXm*XgD;op-! ztMh~ZxazI)^b#O8wyX`qtNB#w%F-PL%7y%}f7y~Yh$vf|t!*~Ze?1JsZRfFVj0SB~ z(A{xwsXq#cwU$baQCP$AV%fzW1f%h)uN@bQQH#*Ht-qt|vJN-8y!Q*!5D;NUIbZzu zsuwnAt>*=1I>}?X<1QSapYX ze03FFLx@| zTrPW%p*hghJLaC8Yka;1mM0I(f9mY&>WYpT)?7UlPAkQ+`sXaGtnl5vH=FZGe|xfc z6y@Evs-w_Yc`xo4RA)RZDx`y}o<4dkg84;SEuJg1S0?Ff(kfLdplU}wquK10E|2+j z^Tuzobsn~q3lv{K-oY77h{@cd`zlYQXKZ>sEsi`#Rz@{bVw~F76II^zuuQjR69B4S z!|@7kX>nr z_xfXh60$C*^KEwO&lctRd@x~RKM9PzRNHS$hX(2->1ry^LVcE=9z*B8!sKpJtM{MP z8z!11sb3y0+XGMoM5)Vqe}X4CEfuHl2V{Khai>4-xZ_~EX`9_u$*nXzDDEcM*!IGk zz@dLll5TJAX=n~WgTF2ue0Hp~lBDX~8SdRi85N8*;Z zD()BKk)qmHsh;*_<>;vT&Lm1Ix=wau zQ65J2z|*&! z-wT9M3-#7uUfz{DTALYV`=O*a1b*6x_z!3E&M9bBTUKkhe`K(Y9PR9$GVZImbJ&?- zP@je+P4ZT=zV25>IFzafW7_O>4&HfGbBwB{4SL&M+;Gt5ts=xSL2=aG8<0%A$veIp zjkq$@-G`wcHpK<~fp_OzMeQ&yZjV=gHLP6#fNke~pr9b)E@jCmDM7o0ay|ya8)k5& z&8Smuyjo$Qe=+G6m)%6wWwS9s7|hztqr=)+-fbE4n%f>}2x^kqtp0p zEiN+Zj@3qO<_FD)=|DG?!%A^#&0^ff$7a~n?b>lr8N7_~4f({|Qh{M-G&HEb!+Fmm z>3%K3DJjl4Y{0!!y; zPB+hOS6y1;dw_6*rh#%+y2Ht9A8oxiN)e+7X^vR-UHCb3z+;1z8_YV@u6aFOCW;Xs zV%tKIGcJ^mX-j{)^O`j1&epBez)7bWmX(f#s-ICB zjLyd@f6nIK+-PZPwUhXr`s><#uc$;G%k5w{pBaS%;vwA__~Z@MlucMRPDW3wE~KPm zmHW@b_EPOMoU;w)PreqM!^o{}mjek{>$|@798hOr)gEm+LJv(gCp)i*EZSk;WbRzH zV3f6uHV5q_Mec-k)F6Z6lN(dcda42`d@ zH5;P}4jd#cm=|n@F8$-6x>L}PGxx0!`Q_yD>KTR0$yPnIo?JxC$2)zy!bNobs$S2H z$+A9uA?RzAcB>+f!}Ipm2+dq+z1E{OdI-bY3(veav_2a=hp{e<3}3T4m_$7Nee~GR ze;iaItu<{G380X)(zjhlculReZDq1-^^DLwq}L)@)I_U|;H?!c{yj@0UNcutQ#b0o zcHPz6)_RluQ81^66T;(Luvv{CfAw%PZw#038a8JOIvo0{+^deT$f>GW=O$LFdZzrq2&fp538X}{?*PB!CX2s2VQADhafc8N~=w}}&v zT645P1FtysjKyWm8SAr^)(^>p8g%HaaEN6cNo6bOD+|dl)%U$qyAma{O2=AZe|;m- zA34vji}ii2lqIcI?Z@Xs)C~2KHRwk1Zok*6((8G$+p98`vWNSt=qE;J5ny_g zDZH@AOD5{IKiW!3yf_R_2uZ;^f4)yvn~`#8Cz!!>N92$Uv7O1t4$7N%r6?sA*DgIg zADaia+mwB->yk9-wr*ytKJ=Uga@2*TKiaM0)g3%~G_@xoWzxKKQlj!9e98dU-d0tL z3j~(nh1lA$dTubD#@`R0j^=Ul9GAw3Qa>tVhpvXwoZCx)Wu7t0s406dQ-Zef2;xJN0|Z(@cc`+UpOY$Iv3w5GhWE^V8sMV%s>l z0bh5n6}3DmCd}oQu%gDzq-rGHDI?1A>OEhkqjjres?#>ZuxiCBMy2JfJn9y#-emEd z4wCA&L+zpUD{sTTpP%hZtHl)b6$?Db;?Z98M5_rtSD# zz#dmmV{6c&E4JMG|}5}HnjYop{C%lXVIK}nhDkujYpuqZCsezex``({a?D1jM@rJ`@qjON( z>f5cCAQv|xxm9$cf9H~etG9cMpngf^Mlq_ji){%RbZN9=(+t;$@8hOBn_Vww@6sw1 z2UUa@=WW#p=I5%ln~zaGyLaqn>(OW**Fg#QrYiNsLtUj)N%6aOsiQ_teABAs_&mk@ zb=H^hlnQ3D01da3JT-yWxPf9*^KX+_87No( zNjBIT!`{(m>vwn5c(wJadur8shfyi$n_GQ8O3k~Y#OdZvqIue;QYk^s2>_0*Ca=hW zrdm`{<-krQe^vt9D!tDUs{70%QoC5FO0C;_6r3=@k>(G_8Zn=VfAE?|ClKD;>8*isBI3&ic`%vyJt5faR;IZ_BysMX~=S%%Y(ri{PJG0w9zMH3BtJ1vb z*qKYkf8wen%QVX`c4}J3)43S0&u5b>#*o6ltcN$lx|E&wP>(C{9v(vs1Mll$?HT%0 z|Kya5=w_4|97a>11}fE?(hi;Z3}^NF_}MQePwj19hWc{*d@S8zyt}+(^FCE7_?awD z>AP~%H}h?!I+(8dOC&0F{pgzWdwVi-%JurYe_S6OFGwgwYtdbcw=x|9)`>aObT8w>}yX4R{|utfiMtNt8){srGo??HXQr zT{Z1?yPg>&$K#29d6x*c*`Lnm?qslvPsfwF+fVlBJ5Pf~qwDn+`0JwVPykk+Yz*Bn z5MT=3%UF&lLGln9Q9tSfo*aL~~qTtLoH-7j5_L(=n)}<@X^Pjnt7f)2TNC_)`O| z-m7j!sezK=eZQ|5`^jX!2$*ivceerwQ^%160*$#h^JpL(@5 zlBKqxNw&oQ-Dy_xycRA;j5gAril3b3?rt`iCc5j97197+I^}+EQd&+it&&RiHhA_& zcGd2AuiM2aUz@9Ri9KmJ9lw`Vf3?(l;IFqH;!X_0t9u#x%eag6{T$sKT{s0yrvTJO zgWGV|hY4rHkgn7@YvXmxHGzT;r>pR79ZJQ1JU^+5T3Ds!#`{>AO>6UarxAL+VI8p9 za`ISg)GE*d;M%RiupXZZ+uFW&oK|b~)@-@fd*N_4*y(SyXdAXI#al(`e}?aU2QO!f zB&IbVUCK6+arGYR1pQmOhKta0tu8*}{`K~l+l}BjG)lqF9JGawtI*(?1>nS$&0dw>w_I3|}EOhGSV6Zfd=MAdr(->D8l^a%+rSf60 zs4e@;-99N=v-zSjMC@#Ee?Wq9Ydnw63hft~4oxG0dOzGeMz5}Z=)GIM4lZoaJB?4H z$3<%o9pzmd$4b|}`SoKmZiY+BYG13zclW(#Sb%t(wGXcA+VRyx?yxqy*yvlP>F82< zP8DZTuU|^&tYWeEPM*cbrsg-BF~|*ExuH?w_*LAYDZBMgCv{fpe-}s7);2nv4Qj<1 zwph>jV4rGxZ_J?78x$U|)mULfcye!&x;L}W|CjagvY+4nxBoRguGjzT|Nej6FT4M3 zIls;S&-L+eqf%J^UyJz_$GZO``zZ-gHZjynhtY&kTR$)cIO>!Zw$r{ys)#RO`wT9V&i*Hh)vx<6`R8 zdY)7ezvUG2f7X&4F6itBUMLqgEq=Zqh*#QsEE_uvupI;h=&%m5pO^2!q`X5;y>Szt ze<(hLyAKV<5LUM~?KADO^+fb+jj8;aP32u0brIE5noaTZJw8Pwy~JN%k~tRPNaihU z4nbjrtaroL+R=CCAsaXQD}4G8-@0_FnC-En%Rc zq_L6Ordn+DWyw}}Kav#&1B#zYn^t0tB>Z5R#-Iu3Ih`whSrX5Ir?e?sPs~Rm^VNNt z;`hF?f8U!19e(%0+WMvbJE-=z!L~1jhWRoppU*wk`8TiISg)YmSHlXgtC(+xpR+gC zrnV#aT85wYl<3-5N)kd?G<02aYW1&oYh+dwcV{cDuRaSiNyz=`>hyI5F*{1tG*;52c8oe+>=AD^C>H;XaGUXG+enRbV)(+#)%a zI+~>P#+Ts5`q-Km{bi!i;*9!={kb-~I*iQf&%a+<{_p%g81fTc$ai*ae}4_$weE7Q zFT10&WB&QI{Qhwtcu(!^Y|S+6hz`9p%KlEbKIrJG{l>UMsXD3W@5)R4xgO9&8tYM) zf29*nCD&0E?;1-{;;62vjrLqm8shHmbTp^r$y$9mjj&AH#|K#sCejhPRQfZU+EMGm z&68u_)^=7DIL^5nX@)d`KkYb_QQ6mI-CZbJl=5~MK{!X|a;3iHpE|4s)NL>{Nw49i zas_Oq4YZ=*M9fIToW7Qp@KQHbu6d-Se;Pk?Gx!-*ksVL`dt*kjCYvlmerHrMvA%Pp zC{7Ble8=eztFToXE}(!0h5 zSw~%R_NHu3JMM9!IGQhe(nH?K2Sw7~eAWW@$R1U+-G0~V=BNXlS>A*g z_xaqfs3*I=t#tY?S#~XZ`^4}Re@dBZa}V@bbM3ernZPxYz>-2ma?w?D)?IZ{rc7zL z{imk!T1Dj?F^P*yN)TPDJ-Fwq=>rCzdbG-E)YA!{1SM=FB6aO>|CN?BOy!}ZwpY`{H1boIIk}}D7oC!m_V1+$*XOHI z%{AOsV5C;8ZX})OY)Hhuf5f}?ZFQCe?;!1{rJ=M7M(hzyX7sE(KJeX0mq_akH{Q)+ zgo%}S;0&9Ft(Gd|lX+1`Qe|e?ip{g4F@dhWbR3Bgl}afvan%*)f<|CWWscZ$bylup zOqN>Ny8p)1ZTQO0f+uS~U$>p?US}~W+>+d8_tQqpNltP4BevxCe@(hFMj5&fQ;Qm1t~TSBi7<>>~F>?o7p(_ zlp9iT{V$y+{C`B(_{(p1?kx4uCk6(4Pf`oNSq7rday zzn#gg1q0YAD`l`Pf2?cSt;wU^j%QMWhn&cq4UVhScBV>qf)U38$PaF$4@M~JaMNn{ zMzt*m$ha#fo+>dcl$>slSp&}A6?a>scYNGvBc$q45|*Xbg9boc5_EDbC}`K#iiuSPBNR~3PXm48n2H>%&}o*}D9Pwf(vgkU*p!`d=Q)00 zxnSyX4XsF>8BS}gAkVyb`YS%`LW{>}$8f&B>~SYLUVBoy)N$vOc8)xEeb0!Kkv>V} zG3P1g69Ve#RezOBW889;v811LR|AyB^UzfApmtk2BoYZV&%>6*^Y{}({rK}dI=qiq zpLsm>zIjd0BdV_IYz%4zOQwTL6jv8bgT{nWKT=ew~zNyK7l zk{nab(q+FRHDKOTM|cnNEXS12DVeb}f2)!yb5>OI{D1K0oXzj)R-6-JebkgJzKgw+ z93R%^I5TCSo(^_HQ8G{*XV8r?`i!`~l-ve4!Lb*lCMWXXZ7IDpflx~>jO^wh4X1Pl zOjZFSypnW^w1VVP0;Jp@fy(3luqM#dE6MhJmS*Ta4d4oljlEnTX%AslP)J=&jC^L6cvsp4T~*9!Y->BiMnV0rt>ZC z2L|`7Ig$HCpX=s6P-dv8SCdQO!4^gyd9nDr0?tsW#E!2@2e6;D5XnhTjY{fClsi;k z#bTgn4xL=t$FP}t!ZW8zM9stR`KwlVl&R_D>>|Fhv!6Ey>FY!8AiE4YL zr;TOL#)8GXYq6kTrutH*?0=Sg@)=X!YDu41O)ONTl+WNk90rX8jD?cHKDI(Vt;msn z9YyG$NeGR3_opX2PW z=6}~j*7Nk`n!706Vy7RaW{u?6jZ0}yB|njl+^%=?)k3oZ9fqDfY@!gr+7!P}&1wQ& zI)b-5gVX}#e!{W&l-Xc+~?@HmY-5j+BiE?xgs=X zUt#WCBz&C4Sb{3CmD-Urwv}s&go-&zv;&6ODL8cQHDe_#H}!W?Bk!f~!<+VLX7!7z z9m_o>1c>J+U5~Sax-jb_#5F*J5`PM<4t1fAplerpZ3ythP+nf$}Lomlrhm{uSLnF?j7TPOy5Ipt;oADP4Rp5 z7;Iey_bXl1^X}%e@>LW)NPlvG-ME+&#oV7aX6xcUxwq%E(&zLs7kIy2vcAQy^(@FI zqHh86ePc-UEmBSF!z>SBFaD|3vo$XAq(5uB|E7(b?e(li%<=~LC7%5^pZcj|%G_r; zdDbS~5|RNCD)*`F8_#6BR*z9f(Gq{ut`F6X7e|^SPtvubpqS$wWq;jsW=2|b6o(Hg zM^#6bI`nOnkyN^viq!7Lkt)lGYL07<(P*Zdin*GUC$n9TrV_*({iPkPjdUqJ{ecz- zac5iR-uV~(u(Uq;DYG44=|$Z{)NSNJ^rJ)`TRDVJlFmits^|y%=bhJ@THsjV>agI+#A&%0RlcH{{cr}Y>C`+-_ z>w5N3nH4;3cchPiF+@x+ClzbPwq$Z4_G4hsT)RbObM4mkz7sq}|I8j1%berNHj;Ld z>PSv1b#L#q+^NPonnziP-SJp>V}#lFdE z9j<#4xXYEJToL1~t) zUco|(fN!ae8wD1DMAiF3aCS)bw=mp`@J zzt-YEHG0+q`R2KRqUrzSXaDz}9P;;=zYLfVW^8v2QHQwx-0+riw=&!G5vx_q)xnH-c_RZFgSenBMo_ z=7?rS_J4Vb?;0cL{PXm(e(urv&!30dFn=E3uYGbd&KEU{$Jukp@|}PAjIEs-C2J%@ zuc~vThNOPNT2>=Z<{SZ5;qZu@ch_}h9mE!;I}JC-?n0F=E*0u>r$)@-%y)cKdIdLQ zLyQ-BGV)dD)Lu$SD*Dmn1;keJ0y#Us?VZKm$$!?-2DQUq^%1q{gqoUK(_Yd8F^iW+ z;m-ahU*|`4vF~;IgciBiym-5M65w53@~wfCy8C*{$4&c+~zKqtYt?(NRyD7X3@6E zTYseV`)~Nu&T*90JoH8|`->FRC&_2=AEWoc`U-}H3(78vmr9ff^EY*Y$lIsiI+EYL zBmYVAXBWKnh?+a!U%EVP^)>31?$ww5>fSG2TGUM#D>z+pF?=QqgbG$VZ$Ji%p#W zy0)zaQdH`z$VpMZ68Dk+&*l#;?oCT>y+TgGtQQjXeCbHXC2 zO`lkv6d)2{m(txgxi0<_EQ7UOU~~t?*||6@4lsU)e0ADTt37# zeC+0(=n0WTa^MMK2H$P$Ut&MSN`Ez>-=GF(Ty!=CIswbRTQ&^K^44WR!{ zA&S!|nRLRQB%VvE8LB+qLGlXy_sNxFnUsU7D79DY$>#vJNwC$Xc3w{k7JrXAl)4Q` zfdi7AGv-6Z(2e^XXV;$9Wb}6A{LIMj4SZW>7rJL>=WJf9A;b6Lp0XaZ?~TGL_jSt| zOb{;%;Y^$cNhTcn*lxH~q-hQ3!Jc-ka1NGkJ?Utp7sG5qyY|NJzOS0f3{(*Hk}T~> zG-$yn&ZxbUH0)o+*R^?P8-G>%)Xc+sNnTf*ZSrU=7(}~-RfN2B$B|BwYD>xmuYS+p zAP%Qyb2s|>Sxo-+uk!rjPwgkJ`Kjq*2EaYolKYGO1W++#hM-aRgcYmM*(>95iCLbk zvpv-<^sVT6*`=dmfrjo;NfNnS5B(kV1Jr2eliemrcMIvj@j*F55r2*R?JUASOHp20 z&3Tr#bIwC)RAYV#b(4ItMV+np$0a@L6k1$oR@c!7qW@KpE{QZ0_4AvVU{=@tsbR&u ziZw6(KF+QwKX-oD4bD$*ojf;N`@j1Jc^^^H+LQCBWcCe7gQb&J5w=)&7Z6N6wj5Az@f1z8*V_-DB_}cUjKN8 zq9@6^q~FOJwO!7#yhoe&I}Y_is3@}G!%1Amn$=f2UP%c7 zgi9_ay>COA#C9-EE*XcP;+kNjDGBZTXvKBwCC#aWy8>A2T? z9uLQ|>^X#6P0W`*&w$*V{tz>%QT`g!&1O|1chCFqqDG+y;)4%`7fLVQ51BhhZl%8_ zKX;WakkPc)#(#V1D(x|OmFR^zn9}%8IQF^AlDQwm!$L*2Pu(cdrFFB#j3Gg0lcT6t zXt1!@Yu6$57{*O3M04QloaY14FJsm64325{$p07hhRDla-B(t3<@foo&-_!P=6i=c zmHo8sd*V9mvFPQTUGI62eS2Y~%GAMj52BBy_A_Dd9r$f+0ETzw9sR>C~rkIP#SK;beqcPp&Pb zxBnu3iu1iNqt=wjT{|KNBM-8~*;`JPOC+!YqJMvxk-i( z(f&FUG%xSlC9*d^$hLUawpM@5b%{0V_33l}`MPeMLwx?O#asC*dM5N_3gS-=&+Je3 zTK1>pAjhrptogk3522+V#peaQ!RO8MHa@TPvnKQBM6K%)1NbZ``4&>y=@ZHOdRJNN zQh%AuB|B%teNl&x>VO)Igmf$V4MP!cx>=sw>Sul8rz7TXdF-U`@^k&!e}Auqye1R< zGWu=v+Ddt*zAM`#oH%7Q5^!FnLH2BS_(9*r_wwHf8bu0)aPxvJNI$pR=Cz9J<2^10vn{c^dtVh%$r$#MqKmtcmJtvzDHXI+v5 zeP-haiM^${>>EdGsQ21HnXosIJ%3Q-+bmS=_QbuKrrUX@uu zU-;L()RB`e6sunUy#8H2G`J_r>|Npv<*rfKPpSXzhtKu~8YP7r3*MI5Kd)Iia z+PES3{@LrIZ(U3R!XY>bao?}SLWv9I9HNeNWX>}*tFH9)1-_?Pk)AaLcKS^8zv$zl zYs)!JAD2AncfS8;{`Bp4z5_k)17*F!tUvUppW>IOW+K~pbxt5yB&Xe7*W@M)_;&`bc{4I-u}&_ zsFqgji9Ppr-9-E`=ylwrMND%vu@41)2ONv#bZer2=cn>2!|&)bxgRqN1IGZ)3n#5? z=J!2Q5B6v6|9U2T?tkRuY1Vfabp>0$)ofUf%CB-AKg`CS1Wkr%ASo$HiuSLT7k zV-OH6Eo=U^b!uaTA_03P$DQGpO3tyrU3=8V5u_KnR-{JkD}Uh1ZSN%&V~)vZNE3&n zE|LxcKQf6#9BN6>Y0MiAk-g~ncir~gyT8^9b$^~Gf7gA!X0l#+)(g((hS_t;YX)1y zPqD_BJ^A?><4hAX!%aMx?X&aytyUCBiHa@jot7*|ma2jeA^Gkpl#{HufZ`zR$vVI9Hc+`76id(e$IKG)lD?ZjcdvAM4S^i z#6L5YKRtuIZ^ukU)!{`nnYnGkm+O z?LPQFnfi>}&(va{y3DOn>y*6s$hoa~^HLI9>W%)L_K@8O&jOJbo?nLO4Pp%Mm4jz5 zZr$i%Zhs`#T02qA?;_(w9KXpU&Rul_}{sgagN8t202L&V;O~dXM9#W1^$?!3D zwGUo`>-^R&-#X`aPT}Tr2~l_ZTk;>}`1A`3>(h?4TF*)xAX7y{R%ShtW;=GqqP97& zx&IHFwKc=GPop>3X;((s*#=cq)9IUTKsV-jEPonJQmG{J!gH1vf@cx)@$b3yd*1oM zNc_%`@;=%3OyGOc_Bd!pHESVh)~?en`X00V$*)}2(X5enPw6kn3y~Xee!XdG&pQ3H z{VsLhEzT{zR%w^I&IohS<aMG0+!s&6G#Bp#b5ICw5P(twJp-2h$3NtZI&@Wn47#ysK9rjS%n^ z=YigTc0ZfT3Uj%oioc!659DpJ;CQw;pNKqSj-2HO;4J>Ce` z1~T(T|BdT7GOr9YFq-S3e0xt3)s0T}O!Z2)1NEdHl|%AWWY&^?&%?EJR^M@5pZvP% zAlC_`n7k=+`qERcOWv?hohGh?y9VoDS0Zb8NWfi-oSdAhxK}pEZE(nQ)J_3Jb$>)1 zE1oOrQS5K=Tyvb#@Sk}(_^Yg6_Ak$q$oDsm+Zd${CuDLDX@Mvb(iFa)9By~ z70PATTg>j#s0&tHI8;EUBmYnhgE2rKPiP=T*jz%Ro z7DHx1ZLj2}!3+m0*1w1&ihqRPHS$;;s< zs98PYXE--7F4-CU8^^jnYs%rpx%5tp!#7Y*&r}8nl2EKzvUvFxbQb;_7FDo~_yX`e z#OKKS1ndNABCKh7#yLJ@SZqk}d!G*Lg4;}8(FV-k$?+lPUD}A=P>vbM*M$AWzifZ- z1l+4XGg$U6c!KW?&3_WJiY!;n&K!{|$SqIWc4ct>?^%e3$+KH3$l6w_K+**R>Bksf zC_(up4`tM;py@MEx0G~Ph?Q$$Kwhm#{gv%SD$?R3W$R)IVmCE(brGN{OZ2p`^G!OV zYnE`MG;7XpVW32U3 zt!X=v(e;^&!08~45R{wS?i5lMDmmu+$`>^mHNBhcYK7uCsdm%#TYmfZu6#2ytv>2= zKEGUz*uHt6a(~4m2 zqbjG)C_vL?IFSlYmF(4*_NoKsDX5Re+H<6)OPNE3zAAfm>omKgXL0tv=`%1RI5o^P zN^r5>tOx6DufbCRGyWS#*%W^uo@XgX?9NS`Fna%y@SGPz4FqwKe`?~htf_jO(A zU?VlfT*z_X=tUv%($Pd+{-%x9K{J({oP0LiE#F!}-}DV~Q`s7~*V*%7qS}#N1&qbh z#^Hnhw|{`HQ17b6LInPgFlT{t5cO^kU)@h1|MOfvm|S|r^osthBeAdk)bx4oCW-!j zj{VU`{pzrMJTo+zDUmnZt95Fr>u!*yIzsJh31gk1w!6kDw4yQcr1JHFm#g|tjY%)k z?D_5pw#!5tj{5{2Q9%t~)GVEsb(%b-(bBgHhJWkK`^^4DT*)YKxH-`*rEX_#oadSU ztiMnf{0qk~=B{}Rd3|S?0#}yzC;L*lxnNf2`h9^Ha~4TWF{Gej$iZ>2i3ZXuDN&Zc z7CQMAXRxEzXptTpLt+WUv>CI2z2zdzIx-J3!lC3f0{2MtZWjBH@1-!5yeQ{+gp4Ri z<$uq?c;}A%o6N~5FlTv>JjYId@Fc%572o;LoB2wX!`C`LbE9ldnmw;Se1#2h;adZx zS_+164BjfAsS3W$2W!=r?E`sYUc3Ls9lmBe=Dr<83D5n?)3T-X-hbM}!E~ZMkK|=y zuFtuc%?GKMQfi^iu@;uhJ9E8}yhEB*cYj0Dh(alG_Vf`GJvF*gI>62CXu+QucQqdf zw7ur+f56|Fn*4{Ep~Xf;Y6m?jy$F zq(eohm$;%%cUD==%&J*$B!7Yj%DQh!*1 zObzUM_{ebEM1M|1Hte=I55dp=!f^^58~Y>YrhRyXp_r`*PSa;@o7L&R_Q&6Kn84|? z$I4yFD2rZ&LJ?AoQL=~ z|51L^xBbU#;Rnz4M)~o}gCrfDL_vW9_poqE9JiWkxQQ(x0RDq7s8L@J>#ZzjKMlqN zF@ybq1@@shz#;ncdB~$SWq_Rj>GOy@FvraOOKro=72LcHTur6-av(<*6lI7xu~hT*?fbBE*u7U1wuJ3}Ts(-TQIM`JEb_N4F#JQ`kKRLFf@?{QQ-=%)C9Z{2Wh9o<` zD{^#EcWuSRtem`t(mMjzz4-L3f2|kxLAG9S+YjK1ioB4yV7?EsbMmis|J0mpUi$Vg z()hNJ6V@_^9}O33J8}MXnY)eJn@=70_wNz=Z9tqfB|n?{fN{;|>wluo{evsb@gjL2 z?eBe+;Say~lz-mKKRCp!X83peRebK_Rm=ys?Z@oNtaH0OyUCFlrNckOWg+E!U~TKq z>)V6$`QP5Fi=!#n+FP{JReIhP#pK#R`IR4-F zY5JY9{?FYXvDP+E5r31cy-mQKL2u5krym;37T9LwwhOx?h_b)$Nw&u1dlX%|+m?3rV2bn80rnrm)McbN30ftVY})uQNgiu+Gu1uvaE$_vJy zc$MvYUF48iT>A1s{^NRMCL(Yl;6y`5e7%2l)r5Qek2N)IEzDAI zqt|;{Z=5k}jDNy4(XCFi%ams$pM&gPx7VNvM)j`ox^`MlSaOx88gPFlWg)I37S{nD z5d&%X=x%F5D9Psbe{$X6!v$epPmkyyJ;ej}z5I{9th|3En4yEt>cH!KwjB4>vh*0T z=g&GCxBL;k!*{YDZjan#=WMuP0hjFk(m5*3-YwJHwtw$gJZilBd97!@<;_SNFI7=j z=5d$x*5Nv`dL!1d{+;y2>3#9JIe*|EdqAAMV0Ac4e(*Q)POa_0&_JWqx~LL%)#>w= z?dvRV4Q|wc$xd!LlE=);1*r&DL+{kMG5Or=v!+c;aq(ev7F5#WbM8f5bP#os7KqRJ z1ecca3xD$c_TlUO)Ig#q^O?s8{QTz`WN|B>nf=V6MC~NzP!i+5IIfFrjuf{BfX%!1~K9-!?^@Z94)H8pN+`hV`xC$B?s?AV3HOy96XNTG>hHGbf(%Oown~Koaw(heiokc(O6R`g~`Q?|Dv_IBP0}Bm-DPmj1 z*MBK>QXXGv9)cqx&gXp)(L4J-1A$c(GiJe461d{;eimo^2eX*fjvMg|e`?1~OZl$9 zu3ldY9HtEB+LBsHKVohVCQa;Zy}>Wf>qVpoY*~5V0EB|6VID8Pz(a_=-ZRSbQ={P& zT*8^W&Qh#ayyd|(4qCfON4=r)zZ~X-B@YARN_5lSy zDF5$++05pLe`8L>HHvkY^$_L6DZ4J?OBW8p08Iuhjr1NETX?HJGds_P+Y(fzK7h8e zo%Ros%g|s!@914)VB+7x7gR^qrZu&pnnlO#>^RKc(u%r5oE2GLtJsoWKA#(gKYyQ_ zeB{sP{*zyc&pnVmlg~~b$b5->@IWs`)aH4Q{9pQ^8IR##zRsQ#^N5`9?O3Tg_Td@T zz)@?uBgb3j4uWd|HcH@XLqK{!+hgWra`>iaVyIr!Q^f0ht)m(;c@RW;B?V96C}5J| z`R)SH)h4;JJjMS3Tz@^gCs}@mAAe%T|KSmHUpR-aQR3Iv@05`qDnH`;S$wSD7GCt6 zI6(;KJDbaZ9VtFocAO2uA|IS?n zM@{U_sr{U8)o~e&_cH3prLCn284{lRMDQ$AUau*ZV(%2G9*)sKoxYv6RDY7kT`w_J zf(H33Bs3G2EDOcXM(ldA@k*Zc=RS`TDkON@abY_m}C1Gq>re0<$8ncTGKE zY2teOf`9iq+Twckr@$-cSd@SBoWV%;$K8Am zAk(A$&Ev>>Y2Vo{&*+OU5xu!AFZ`v4(MwHY133864{j4I^A~#>axZY>mcUcL1Nj`8 zD@DFCvkl$giIzm9^?w`>V^T-6i5UrTSs|cRG>4i@-YCOO0AF6G(M+e`fX zYi=lb^vsWK53B-tFdC*c$ryU3%lo++lPdWESIn*LczuU+nCvVHy3V;nZZtPs6YUxF zc@*=^_f0bbgugj%1R(+AM-Q$k+Q>`JNkYM&zCNk)Two(IzJG+kO;Ri7IcF1#pNRe^ z9C_ug%?zirkd{|XcQbthL(D4*67vcMEv%zUhSU7%|Ao_zM7=G8$NXY>^7(t-$8aGi z#VPZ~TvO+#f1A%~M8ElSzXg6R%<&Ads9`@d{-lP+rdI4a$BgHoYW^MWf}weJU2xd5 zuPJ^pa^Z%jp??>|*|RY$CYj`YvZ`1D&LEymS&OI-@2S$Imq=eR0+$b8o6OgVHt4j7 z`zd&-XK_DyWB7c@ypt9>$|ecAQb4{Uz8|E@%Q{5xF}ko^#Pc$@#BlK49glOzi9C9X z;=V-^Q@tt}0!hi{isH4|efxuB61Z>v{wHRL_}IzUqJOwQi@7S*MkJW~0#lkjiZyUu%FB_r zL_DFpbeg%GnYr##xu0R6&1t5mG{V*9lh=#$^25Iu-_LW)93%AMY=XD{!QzV850~+G zJ(k7DFMnPr+iPEqjISPqxOVPG_8!be?8>dIp*ohpcyy4nWdjMkB7{xJ3st@P^t$wM z%+x-To6nfR%-M@NU8?FZZuRN?$(#YaJW*uTzOhafEe)DIFoeBG%wiCg<$cwP1dg{9 z4&v*nlOBt_1`$SwzFzd=vpk8jy?q3>DxxR&Vt@7*Ir`3tJDH}_%PEZLuYmcP_e5{k zMTcjb&Zyu7=ayb40wvP8?>o_1j^Nupui)l=`iE|xJ=;^HQcvE1bV2W&b)-P5QuI{t zis)~pNG*we8#VJs_v?oXEAn^tTaH=HIAOuQ_-aL9rf8biqJ`I1;HD+aj+BiLw$rm4 zI)8Zm(Z;;@h*>>K=UlIXv76H^NK;#Eae*uR_6JzuqILi~_&w8Lj*8#SARAbWc3@oz z{W5T4SeD%%1(ZAywS^CsP7B3Mi(-tHgVyD~XY1}LnSnH`DG#B$AS`Ek{!%B8{Ruwr| zw(_3f7?K}*`V(w-BxW3}s9|v_y-P|Z!;3N($n(SLSBC>E-!Dy$pWgcThu;QY;rn;E zZ32d8{n6*p1b&j|rgw$b*J;kz#CqA-xi&}2^w3GDnL?HCQNIb@4m8SS&d+sonSXQO z1Rq{QJh1hYaL;NZpPcjF@;EMX5@Ly{OMlLD_88}Tj>i-6v>GQJaJ@&#x%vCe@eMCR zLj`+|ZWw$uf#U`L_5{{1zD|c*@>{y@%G%`rt^j@xI>l7H2)0uTAc`eu9!@{Vj?pYi(9V9U5kuD~E=f0w&X zxi(8vlN{x+_mJy5q+jktZ{QzgQo^JcYg5%J6^2{Q>v>Ll0>&7wtjFFdNL~sLr0$+! zhxe3MDrzq`&3c(jX}Vm(r%+lzg#@2s%#2NXG~u|D3&qi09X1112g??9;D4BwHdZB} z9_AiBn%q~g@6%a@+)|!1V~A>Erq90Wt_iZe30OR#iAr9H7J6z~4K}G-GZRwjq4)1?NCT z&w;u0>=jnsDp;Hj^FZo$tAE6A6*C3V-y=U8^RsT5S&hm;N?oLvdHs`KhT!yw{er$^ zN#qk(F$aZ**}au!@<6HKC?j~_87}4rm-uIXE9zgKwcvkd{Efdcc+8sMK8bmBmS^UD zAyLZ{)6l^HizNNhxR|3R#I#eru2JW?pAX9jN|RW-ee^i)cKWR8k$>Ce)W?R(GBvIp zyq@eSH%03}%0-V4TzGhh%%#vqC{p$tubZ|X!39;UA^F+UNoJGz*mcHFfU~YW?Z!22 zB*o7DnMBdJHa1GFDLoIg`nn}>-eYV0^u~K>*IqlaHf+X|$#Gi**C(Z=$;#`CB$hg( zS}dek(_Nr1W%?$JiGKu7;V<0qztuPa3o7o}FCCOWGmJkt;y>rYhfkXE$6Vf*8kRct zw1cO-Bee(5Y3yt}z4wgN4D?N{iy72Wo?2mFE=^4|&`eqD^q!{KQT(+T9l<0z-uM;W z$6z=)FesLekpE zj}}aZW$Vnn*3FG@wH!9V&am~sV&!vZIFLda{hndLHv#AacA)<(sFIJ?A~U3bf481KwcMccJQfN z2K%IohZa2}Y=4v8NX)Irf_vScT1QWE-qTHOwRxGKj?OnQnpjAs^u+w34qY2-p&Ji43d(oBKv9q;n@+R8S)7ny>)$CH;mrG9+$ghRs z@@m;r>B0o`bUt-W^WJ_uQWRxCD-%2(;**k;2lRqvvZRfUA4af{M}OeB9-U@`=O)%GIH7V$;L_kT3pk)yuce0l z0;iI#*B{-Nj1TdbuARWph;#CbpZT1TdEfqn75c5U`3CnS=92krNVYnYp}uWg7NyWs zH23sh#HS`_fUH*9W5YRmoziWmaZT;X)x;|*kPo}tKxr4yZ0igvxwGEb znkS!Lny51b55+EzE;Yfu)HU^49_*Y0Zq*21V=Vtp!2gCi7?z2cl@_na(!|chdpq~e zCfRqM%+-_Bd-r{GjQqT>`Tql65r2I%4g}1+g-&;-s~3ItX#hVj(|IBea`qlYyke;d zsU_E!_i1+_?QoinG(nC)_<~$vhuv(hyGC9M?rKNh^_M=LCi3gC#B9h6osz&oYEo0` zRHYE(Sh0l#E|j0o`eHwt&q8bG(`%uR6+hFHfsWZowmDq=S1%3*3N7bee1BZhAK4Mq z;2rn{EMYjGvp&X4T8FaFYfvx$rLURcEHW-GSa9kf5d&tgZhReUXngUkeXaDg6mpUhS&RBYa$7ae@!!g` z9}RFu9_k@DQ4_slOBtedB!9c5)u05gXFubHQ};)aVr}#Lh*;{hc>2btmx)0nhAMMqTeoThR{e@xUO#j!ftr|M`Oy(vcXR2!)(trQk zk&EeZB27zrS2D@nkALXbUAypk+7#f5{sILx%KOS0kNRjUkWVsQ%zw+H&UiBUS(ou5 za;{9SRqzjf{Ln!&5X}MBe-C_L+3?MBoYk1W`TERuBzI;$TR7QQ{f~|mI+GHT#^`$p z{rT*^T!UZhNc4IHuX7^5$m?D_qlo@(oN395{8sep$QPWsA%8d#DQaw~Zx{E&LJ+~Z zrSH2GqWSL3Q@k4%v7I;+TtL1j56d?>=Vze*+NFVYD2!h7k;a0#XE}LR_fE*dZ&A-?xT5|Sx&+U0+MCBuJGx7! zCvt!&s?X~1WPi)0i1k-ZXyi`Irq!9Y=B(TK`dPAyhg#t|;y=f9M`O61#~Y4D3ba-q zt6Iw#?{{PDzbCt+to3FHo~I!eFlBAI#Qt{E6a9NJFA)i+QgnpA3X4GK5&VM(B6=Ku z)}g@M|C8sDJqPr{;X}A3FC4dQ87y^;`wnht20kB1IDb!j3_s%N5pU5_Ng(MYC-fFe zVlIW&Ebw}IN8;Hj!^$@M8uO~ReN|qqqnKBz0vQhvbs(MT*F~rZs7O6rLV>-UqCJ$? zM{g&}X5tw~CC3ezJda zZXmNrSv#twUMt4aJT~D0mXOj#$E`uGEcTAjj^OXS=F|WB>K(98!EyZOefr_hWOuj`{-pW&RpH732& zkH!Z5o`k8>ZI4c|Q7c~PD>*?DqP9z^j+_fT)>O9uxxuaG zzO$FrvFuyZ@jR^5;9YZl<_1HIKJ_2`l%JN-) zM}KF$16ydWz{#`!r{#vg6QH$q9m75N;opdT?+E`1_&47=B2>i>rG**v{8%j9Z`>&4pyWVZkCus2ZrQIKm?U?H=-qHI47!Qx~jDIGz z%w9x0N$YktM-*x80-Ubi&?~=sqf$?rupj@;*D6J@o?Ej-+!`ntd-kPE4b3@lXn&qB z{oRaL1kV5Oef7@08$OyufAojMHRtg{^qK#uEmF;En6DSdAC13X8faoZE3i9Co#WCowXZK=RdIw_8s-T-pe;!W@Qf|wLz!-(SCyut6hQR^+lG_iwjni@QzhG3) zY*1)qhF-s}wT0FUHSI^ISmcuA!PLzYQAfN>ocW@UC+Y(3LpC$X@6o3R|9@M@|BL_r zt$!pmx_&eUKfUmbbNd^=nD?UKx_#sHx4%`6WcFpIAw-|RP)FLqaK=F5+*4P1DtLsJ z`(@UGWB2--=!%&(I3Y2^5;!I5EHod8fqdWJbpSCHPo^n9Q}idao>YM|n|2T-BPO{L z`7*F`fkArN*3_x*MpaoiqJQhyc(z(I#~w6*hZ zU#G3tKd)=x4}dNuIzw_PKi6}AfV{6IX8vD%=%2jBU?$^d&+6eLfPd(Z(E34FGG%Mm zx{gbVguXiGeS+L}2X%f9Xd{|9@wVe&Sh%rvT?7G|@kOz`yU;2V=+m%Gb5r9bp~mnoD!X0!+b{ z%i6Ms#{)5IPFM3GIaf|h-{XEV_iAxHsoU|GwQ!~xaR4U6Sb?dx)Wkk7c@OddG&@sT zI4ssQmDQ2G{U3Zk=WB`UU8UlFf3Nq@fsdcf8Noh&cyakW<$w1c+t~R%-+uH!|7(Ax zJ$kUOKK{BR>Dr|}>pAGYX8m!VOXc-n_$o517*YxqY848i0T0?-${% z^5flut{1P*G=FF#xwbK66Fkb1a|sPv)*|DP7#+4De=_riYvcGU`T!tBJE{Pj7 z0mM8UO$mp!ktVEVdY&0}lpb3QPB|`}!Fg^vGJ53n$cY_G>6x02qOHx>u|uVcxK^rm z9F3aKu9#1hS++OkhSFxy>47r=L=;*GbfzAA)l|V6(0^XgAs>DHrYh1x%%5eTMHHRA zadnKxR6c*s*2PM2F}VldzW?u8LdGTdOPl+nm6F#rf9cfI&;0CZ)=9SJ@-v~7%|`Pz z_($tQ{-bsF?LY85;1>OOB>nR40yzJ{j38XSVAelW(CLJGMw{j58Mxp)gPt3PMB}9# zn;1X+yMOPaAp_jrdrh&j7AD*-!J`D1!`IQyq;q6$cxH<9?%{7DJeANRWi8m40DNBargzJ^-J!tY zdGj^Il8Uv)V1uN*s6D|?p|4rU-bZfLJ1J)~Uw>Ee&=_pY->+G9UN$@z4U>b`2YUrf zLCljf9|`19b03MVFioOfdUoX&*XW3PNof53twWyQ@7@2qug|(@r{jCZ!Ix_TM|h0L z4dvfx7_TDYkz+AX?juxf;h{3^l>IfB6!}Ld7!AYjzxr34{;l4zxGqJLJ*L_Mzxktg z^?&?{VQiZ}TJldW1L%P%ltiu@5E)QP3LCwJV4c2X;!6O9o zCw1C2>VRUqh16ZvB~*WAlHa`Kul{cCf0VD^75NM6z&SeB(*e9x128Z2b;a7MTra{) z?U=pma$y(P$EW zYWxS*5qbr)PE}j1!RaNnk>ZbVNa^(i2Hq)wo2w1M0h8R4S3S*<%dAO($K7{YEq|VQ zg*lfm>QTiDuck(yGk&7iBi1%NMptOPKwp$v1}O-^%|^1y8Bbi(861KO7?cj!w^3)) z0*z);RO7w#J~G#7&F$Du4x@06!!h&SX_dG?H+UXss&Rm3x?SMIvN)+q7I~SR>*js< zqTDy(x$rx`&+(UFbiT1G?DBc|qXzd|=U%CvU%NVB{gl`)@}9KpeGS|>J-J}+gVjDj|O2v2{efE#jeS4;Q|eSFcb0~~O06yO<0cktaa z=2L`WpBy670rJs&C%M+n0O0`AE-gDshwxd!vhX^jnPjQI@tkOZ2M=?4bgNbPqHpT$ z2;U2MSE|&qnfto~43bsh8-ElZZ^KO(pkc178dmSZiYvo03Qckot)nFDM9g!awKua` zTX8k`ykWvj7IV&G0rj^$4*_~U636UsuQ;ARXGxCd$g%Ol8$oQ;|8 zl@2j^Mo0r%=qer8>N6Okj!_+NEapfNdi&QcKrqhJt`N`fA@($Lcd{~m9^F1IX0Z5* z5#PwEMg1vyRH;Yav9siB#7v%Nqs?gmL_oX0$Adi#=k(6ap5O1;^RW(AB}vNzZ-v!Ue1^QyEppB@9m$lh@SYb+4d*CeK{|ApY>N?=9h1b z$VW3#>A{Sb9diX4YVZQ{sTvgqw)#~W8 z;ba5vWn{*NPl<(|6n?wvKaO4OYctLo+E$bd4+dWJJ@b*jkhZ~ zgVvuj%NWMd!_tl?J*8PA2mwNm8GjBfu8%t@t-6WM2VuXy0u}_`Q7IB2F0s@Jb@*3e z4b_<0&Wy2JJIleL8czIE?)l3JMuWD!E5@_tNx>R_@g)XT}R(N zD6o9v+~X_5{pEZ6_qir`V1J!`zx6S*`1tkP{H>KNbliXQGmqHmMa(S8>$nCYpmwMB z27nrN6(^u>hOf!1uo9 z)e1GYM4o?Q8z!YZ^`N6{%rkt+P<>MV;U=dC^swKhD(CTwX7`%kzSULuWdJ&D23hC6FNQV=V@v}><1&)IGfzU^wS>bm~at@!K8TwgX-%^K- zP4O)BX7rUbuGt=Xp85Umi2L0Z_gh>?hF|eM+Jkwl`)V=e*MSDdPmV3(WS$GemN&zr z(^!HdADJU!I2st1B)kgx<*}P{LRg~$+d6+1nd2L3tgH=xRpvMMX}SVfV+Yok`CaPvvso0JkX2tn%jp4n*Kc&|Ug-t}svkaB9S! zMk)Arui&@u$5-D!+~X%I?l%*MBm5gQP?(u1+dBFzHG1IqhmZ9J`j43E!WEpBQ3HQ+ z^wIY|OXB{f2KWR=9yVg`d3LYK@3;4F2{5g>HJj_FkI!p74AA(P01K^F&%pdfNGY-$2%ObpIUN z@zBM0D5f7T1v;76819#--!r{*X*Yj{=itK`%J+-$JhUg1|Fo|-w?73t1IR!V+m*r(`jIxfn zPwJytlD71%1^!f^Z~pW~fA#SHb-$5+|H`|*wdF6Z#jlR#w-?7c$!i+H1L1#}?c^p0 zGcg(x)AR&uTxhd$?pDl#@TmOPwefxJ4XfJkYw3|;S1r8u%jn#2?&z}IhXo5q9Yr5- z8#?voP;j$>H>Sn2lB!y=6`DBz^=rWyadXX-+FP85?;1MiJ;cwm8ST&Yg8yBS184Y> zU%jgX2oIq*`coHtV(X`-&hvk$FSbpZm#hw)&t)9njpIUhORr6LlL#+$&aKg=h~yKY zXG_fn?!}?6%Di`kUUhowkPQ#$42+RRwv@z&mOm?S~a8{_CX#NrVwAXmz zxx#}OJzQcMz4hYAkS%IwaGL=p(fRyY_|%Ji14yHu?He`Ri23O+E{=bk;W7T?b@@#A zC!hc5VdZ$AZ=Rm*8wopWXcq|<#N;T5$>kom*N13U3)MqjoG__ zhyOxM9z_q%G5DOenRehltlNhgbAcyX#ONSNjyoj3na$%X#Ze^m#L)K*`t;URFnx>& z>-I1O$0`Xd81ekM%kh6h#Ay!FcBW$_&YM5+jmm-Cqa+6jV1d^Vo`(mrmIvz&IP@ErK|uno3? z&u#C=3Ai<$r|{tyq|{;Q8F~1vgEuhD5n7`WUS$8W7QTISNrQj?gdPuhohrQRw7=pu z&l!n5@HM}+%Kz~7zP{;sE|$%R^XINV=T$lC8A&8Oq>#}76j>U-3-900HKKPbg*}|s zXkEi=jE-nra$nF!73Z>wBRRUr%B+J&Sd33U&$ak?62-$AT$2TW3N+`DkbocdUp&{G zfAH)41xL+u&0>Gh_{m=F5NMOtKGsaR`e$;oxy>%f^?BcP+v+{raWf|$PU0enCZu~y`$W!FQ zCZSeMHqtuLMrtk4Hc3I%DDgY<@LM{rl)iK#j^CIl`tp3gr35 z#!ujX>Y0B&Ejgi&Ze9kF9b&B@xBxjW2a!7X*`OV>GyCH=muuLeIr5#E|Mp<-kiQ;u zcUsWbHP#$GHc|Jbfx0anv%FX*o28c7^i*{H=U3Tq=tF0-Y=;{z{8h#0w7NTRqG;zlkayC06zKl zw{Mzpc|TYLW?rnJEUvMR5^W;QkMbEFY)=uNhx9;L_Yi=RR2(G^RGwY|9N1WYT4xxr24hOpMgAgG2EY22M#91xlLyP(uos~R4V14HL(;~)XarN`; zM6SxXKX>BUBWr#5b@sH18^^=(O z6;b?cmu8>%qR2imLiqP(&#R3tF84>I_RN0|RPj5%8arY}@U8LbS<0XOHStpZZ#`RG z)G24yQFr)CITPt;J?X#lSv<=Yp_@QeQi}1YKs;3I;9>sT>i|2f|M7j_ut`O&$?KAc z*AZmN9d?@FrHS=IPM@vgY<>i})a3b?TfqnBe}B(M(jU)yo%1Q=7THuL<|qyN^b@*UzLSOQInTPH=#=reEoMPavYOw>(M7TS=)XKCQ2L$FKoT=>dA z8kOQDG{IsrHzqctJxIH>HyB;PXM;<5jxRTro|EwYM?*y$g%9Q)%u{?Y(^{_sH#>`2 zsTRUzqvnnFeBH7~sSD=N(9GECI4ytT5WES;2{vPq-%2a|GC;bDyg+Elz}t9gS)V%h zwiSKTPd_GxVfY8v^vgg0XGZurOS7C;yazrq!jGjxo9WuMUep=ST0(wL+z*}1|B`dt z+M;t432gzmn@_)gDuL~hO1js=6C3;peD!WuO~F){W=ijNm8RE$=-c6QQH+20Vn#qN zB?>)Q%{X%-?8m)DgI^dogZB$A{y;@E5AG7fd!Z?T35>VMxOmKyKY4G_lkufDbG+Q| z{qdPsfA+_(e*UNC$!Fug_p$g{IMkUpcFui0h`r>jM1*{VYwcgWgDB{RQteDR-S_VfSbd5A?Gp zwPe7vc)vunU`#{PD;77E_oDB$>7VSan2#l04c}gH3-HmSLt9fsId@h%X~_ zHYE)pj5xzbS+1F#%XpCfiMirT`lm-=mb-k{lhC8dV@>w+=zQiiR}Cz=ll8+yJ(jI6 z5vxRx^+isfLL_~mI|gQ`k9JKpz4F}9>Xu3xvsqK!ogZ0`nfiY)u!0jf(vA3@>r;jCreHQ;}&8|JNegO?s98rKaDHNZC{;}xum4}3eMUQuEI_POt@+0U%>Pd$_K z2!8q(%t!vgB_8epF$JE^)3!FlX1bA*vBnIWK8nDW!cW;$mFQgc#QFK_ zwV>;%ZSY!)A{`%aN>|IcMwPdvv^2Pfu~IBIz`9fCCGg*`NV^MFiw>e=&|9dVX|f3c zJHapddR>1_$*I5fCHLq@TZ1_F(GP<|57rXj6mp!6f5%AX1OUIM!NbA6wopY_Nno2~ zYv4&?!37m^@U#RDS_!U=GaDPr^(anwcBxgIO7w)70SJ8rR+G0w0V4D5m0ILuO5$Jv z4ju*`zvyy_XO-bhzurv$^xyx>|LG5Y^)H+%x#WM>uk!XXL8?8p)xy}GfXp@rkVCDlc7HE&VQ#pHu z2@kaN+7W&MLMOE|odw<#OyEm7?=W@y;G^N~)&!1ln?FY|a4)+t7zXzH~IjrXJi~Bxir4 z4{sJ-L$??l@fe>Zz6g5;oD$bH|F$*o+^)s0K1t4UA9^f&LhvnOzoow+$1D&mAng1+ zZhQnB!6!BbuO#7gAb3g+b33s5OOwl%aGM}5l-jRqT`4!@QxN`K#E4E?6Bx}%Xd}+4> zCziI!rV=q1lUx(z_d7(7&E9`xvPd+kKKk~lc7#mq@BXMi>*k>ke~q;d*Ts)E@NeIL zaif0kHwH~)nhV|&iIIFCI+uu3`n%9j50fjndHgQ1fp-y~SntpDio-f{+G6vh0*D1} zkDb#=txEQ}8&C3K0N8&uY$Xy_P zmWLkJS3V}o*w7Y+Ux1`;Ay42pO?6DgI3@nfV?=nHz{zm=10T^n0M8sSb?`O)a2M^7l>?dRZ(fGq z_=vdc+y-9eSlqB}_`H8N;V~j*{@O<(&xUHj7dLDfJ{lwhe@OQD15a2F4vH9g8RGXOo=>oU1Q!CF+wZY}f#>yCoabXJ5uDmk7p$-NtV=I|C&HMZ z19rPSt(g=&LoMQVg^PWR56+7U`|JPwI){edAK;!-BPTd};Gq*}5rN+5Di94@=*l6# z@%EH5l4mZe$6SAbz9)DZI9i9jqHC$4V**0^wYgT&;7LU86v6)yaiY_do3w-)V&LEf z2I~)6GGCk;3UZr(@8ZG^eL#>V&LkXT4;v^edu>#+@g9{HglvL%|K(QfF37t+*Nc55 zPDE<4ucRobB%;@fcH_X$l!ANk6NCHv=RvOtTvbIxY@o|2D?JeA9$62TPHzAMzrG<5|<#lUBBT}zy04a{ADDLX|2$2IO>RX z41D)UNxQ@E4&_n!u+ip~!@?&aZXMa?%xeK|jjI4%7uaqF2NF4~eOQ*_hYC(cjDbSr zCRh*9;nIHtbiXO^!nGehKAYfJk>?31E_CjpzQJqq?LQcFQ@$~(Kl!u3dl3xDHwOK8 zt(7l34Oj&dqxj}i`dvpJ@iwR}-gm__QQQ=2j43!x%A@0mlTA^WFtxBnzCs^oH4eDt zmOI~E{mdDMH^#zSN|0y;E>^@D&#GYc3b?vsZC8JIsP`JFjmcRCXOM+qEJ{=GBt?>M zBM%Sxv2gzwYiFu#bFiBf#oo@wd;sqBqZ=#Uwx$9W=?)o(TtC2@T{Kh6aTEnHP8NLc zNc!BQ9PBd8f4^%!{t<8Wg+Kj`Z}O+_40(9J>hf#<^JUw7a+JU8_*+wOxYr?3yZZ&; zzO{cpq0j3A;7+Lhu%P1)CXuV>SDPCrPvt$+P8HNXiq=wtCWmRi*r=1hXsN8Y6cTt9L- zNN5CmjJ;}~7?R^-V(r6zHAWsfWSFpD;ZqHMOA_NIF&V-qC4p}EF=F6{gU{y^pHhF; znBRGD+fD}w3GiYPpPT*JD;-Br@WYynq$c~35&q*zyFPFnVwxB~#R=G-{lIgIWH6XY z+%MnU7f)i3n83HM<}+6>Y1rD(;kHEuYlwa$W0m+Okt{&`P<*BkKGYrIM!~c85H)~5 z7k!6?jhNXQmA^(>3g5t2Jc8_}T2_CBs2lu;9PF&meu>N@pWJ=t2fGX%3-r};iC8rJ z)ySnlt^n$*;NZ6b_6iuSbT=AplfrZh;Fhq_MckXsY2e=!)IqFLal0N!*TGA(uhq(P z=+FLOz^M`1NK%dgr}n{#0;7$6zxy*@(9Z{5YxHlJpNYjtV@JJ+ z-v|`}T_vLLfO$}*PBY9K8tQ-;%JGvrp`I}L{=!oM2Mp?}c*B5c zv%z79_9bg1A49Kgh#3zyEXOGDd&0hQZVX^xF2r~c=tAC9j^_aCu${cwV*xTw#EV4d zqOQ-N|HmF+*U_;*aBi+J34woV$^l0oTo7Eo0>9g`G}{B<41|Yr@N0tiXv$-;Bep0@ z;-J`f4L;q$pVWW$IfEwrFJ8(IkKK>PV9cMR}9p7ABLoNp5^gnsn-+a73@jjgI`7puQwM_5=@@(N@u<}9?1BF}Mf$bBZ zd2`hMa?LbSgiO`ltAhFbggoRq#3Ni6${RbKO3gGJdl{>nXLch&@I>2QZ_uh$pm-g> z;F*JW7VDhX37LnA`-*?#$+WNmeeN*mVN3lXf9bH$`6)i-FNraGvSCEztM7wmyOf)0)f9E;ZnXC#y&Ug9DS2RjYPw-GYj+QRPN zp$<$mc#|F2a$osv;OXya4*H463xpl@U7sILJi^s8#QS)Xi~bpxADqK)b{6*9>%j-` zM%-Ai!IcM_<_v#*O@jBr8mk}=5!hWT@=$%LICEL=!EsYfZs~~l`vk$afdL)@k{fqf zYINjs9ds31VD4eN;<~X%Frlf3$&}>D$0yC5GL{G*a)3H~(d{U!U74GKluU%49k>92 zJ%olA>?*7|9k@(0BxBfDDg>XTX_NC1gn@4wd+9lB+MR#!7S$DxJ_=j{uznx?^N}ZP zY;{EXnTb@1U%0M6#%Tt7;J14D(L?{jg`>^?!iD#X^MUyOlYqC!xs$P~-h26`r;mC! z2G<@O?e_EL<5{0RI&IKL;Ts`%Hb_PXh}~v~m4W#Yd=Fp8As1mn*eTGV$iDlg10v@F zQXCVIH`ssRB2W&I2h+MGerlqzM)qm16!=&0>VtEG56N^4Q5D?} z+^n-}9|~gu`R68(X~5iI&E$#3e~58XBOWr%Iq}J`1+#^!_z*`%++;;~URC>ps~mD_fBYA}`_2d7yZDO( ze87K7VvPR848b!q_};%_hClWDoeLs>U>0JH^)$gGvE@PbSqXQP;Gk$uyqbR8)SaB zp_WO=%wTsR3yPF`4ZPoy`0*iq@_V__hJPtHSrg!B?2S3whjZi&WFP;5gm`}z ze49ZG-wXDygu4rK+^_FQjGK;N?_y7~yy@q73a>C1!EZoqS?)!AD=GXZ1nYoY4>85q zhHr^*{S5xMReVP$MDiwbG|4!JUk!V&gnfW*+a7qOVCR#4qE~#!X@2Z{yu*O)g-?~} zFnz7P;D|OR7I6B5-4A~k@c1U;tqXtZ0l$KS=UiZ?rXG0Z3*MfT%To}i3NTynQAiv$ z-b0U+#Ogh8;@=JEs;+Gb9JV1NaN6O*8Y*xJ@QZ@G0owV<+ee&5?5)0qM|K2axKIJU zO~qP>c!mhP3;a)ic&~!=Bf?$IeDB*;>=lQ7FJh^LBkMCafoJGPkLr(W*lB+l6E^%` z*xMgKQV2~2-X?Z!p<=wnA(t29L}h2)f%Gqo zEwWZ4hwgh_9vK8bx`%G>2QQ1o{-g7ST#xVl>6aeE$F=}Zg}s9z_?bVt@Ne4=7y#sG z$2^mveeC_N+AI(oE`VKC5fgtiMBpSHa94L!bLEhpx+h}?dkK}h-Htmq7Zun zW_>eyqHak$^QZX)xCA@{K#Zt~s~i0J4mj};OQaql?nC;>1=kZEb@=f&qZw?+^@2 zp-9u0R_FB*Xw*=;kY3JC)~CtS5Vj)F9?xM8J8{ zvUn{c{@`{4pK*T^1?#aULS!2bc@eSf9Cj_6Y4c`{HL7ZjmG4O%FH2GkWLMNm# z6@U7}9e7R>*CCSu@n?PGHGhk_|M>5J<@CZFhM2}d z#^m6+4sw$)7Tnw~)Ta)ALqYPC6*EU120Ep3;`{JJ5MF;0QddLHn*@elUIl;kS~8gr z$HJF?KSh1_1?n4oI;d};TL*l&3H^Qe7~t#vntQ+TP=Cn(24B|a{jzV3F!=EQxLywD zlk^389&l?R7rJRkeb*o_lVJAl->~sBO%3ddu1W0TwW$1T_p4sFWAcD*BE~!InXsIk)rhxqiR5NlS9AKk+Fplg_TU8zC4AAHsKh>$mr2 z$$Phi>~)A&{a~Mdi-mv9(^zwU?azOUPks0oe#L->-zIF(*XinU$!heWS>Q27*k~kH z6}ZzXbypg<|{r!_yj+F5_aKNpB&;- zlu~@fn_IGXAn!T&k6tPKN8m++Xd1>8#yqI-2mI_u5ABoZD0m+%$QMR{^xxldml@b_ zglGL*rv&c_UaQY|Jb!X(zhYFNSk~A<%Px2vJK!i_Yl3@9%m&PpTFTDUJ*U9N=p*xe zBsqWIJE9d)KywHbd?s)i5P$_}xK(30E{=wRygGUXya4f2e))oIXxOtmq-#f#)3aZ` zpxtPZu^WdbcuC}Wd5H|&3*<)1wvr5-aY| z$FYF?#g58O2&V!M3p{ohtljtl+XTFIk}H3~c6~wO5Q)lxbfV{jE$B=y$aS76>A>+OzYLeo+8idywiYf1%U^0_J;n-|BO?v9{3Xn{I4StEZ(1N z)Gz!3a63QyTGCE5e1;cD7Hk&qJz-Digz$scm|tn+ouR&fouT{!{U3W`(D<=F=C6Nq z3|uPm4eeq4sN!Zqw6~Qxo5H6IK4|Du4fK`Jl_GW941P-ZC__vD2tlI*5Nyx-&}(q+B<|qIPlO}Agd+O4?se~j^Me8PRvIa>Q5Z7zwdwD!4LMq`(YFh`gwRy zTc4!hP1K7$?0xv6ACY3OgUElsb6BSyv~gFOhkpi-nXOXP8FjB+5s#i=&VK#l_W>xX zWK@W69Y$9y?BEQ>Mie36Ka^V`e(D9}1HbKtZ;aNj-2HFgNc!P{h5az#vyeyqjgdhf zB>WeH9fusD@3|g$5a8Os_V~#^Yd_whSF=4mjr9I@qhE96Yl#-3s%w7$C0p2Qtre6A z%Y#d~GxU3KapYokJ%Y_>bVu+F_S`*Ub!b=U=Q(nm^KwhWX9R5&?4^{}s?Kg16g!!Y z_Q$f6&qlo_&)Lhx+a8aP_WLk;$`=QN9>W`Z&^@@-`2S z=bO;?BV>r%`WTY!4;8T;tbqd$)E~wvvZ}zl@t?JO{-vWr);H3Y;12xOmLGh)G{3rX z;3A*NH=@ZT?kBFujVoYz5R=xZ2lN*1^T`&_(ou7E*iEI3%2m(U03lf{_+cT*jEg`;W&Gx*E{@7!`cEKI|PwGzN`(AekFU0?cxvdP4EGK zy;FmnI^we$@S8d{ig}>=0eP{BT|(Bi&-nbZyT9^K2fUQjed+GOW`T|_IC*y9VFt|$ zMLyTk2=;$3U|n-=trq$=Im4X*Qvz5v@I$Pi$I7>7Ot{EXeIX|iIuIjUyM-M+i4?d| zu6Q_P_E;?h&nDR=ApSaxt3UChLETI_|8S#SbgIsftMKi28|o8A0kkYb62BmRD}tLv z+-Kly@xNmjL)#I4mJd$+J0A4W>;1vtU~GQyVfcTIepRv8h?B*A{Ku$I4=SlQ>(<`vawY0;2&hKhyZyIaZih^{e zq3Z|!41&4er}7SZ*SPMZUvr`&b_srBfPz^b`Ln1U6}&#!Gedv_xjCPCrT~`mC!a+8 z@F#y^oPHvMH{cRmVc!N`@$Kkd(9KwiperB3286!? z7?gV=cH0!V>()Tt#CchgSY)8RgVW3G5v%OCb%=Z-Nza~h17ir7FT&f99Uc1hMod-S z;<}pV1UZJk&0yUZg~*;KKtBO9mVr3R_qu=oS6?l7`2S1a6TO8$`)>b+Pw%h1d42pB z-aO#T{`%da`0ICH{D3#mKY`hv=z=6p$|P|D*3y#6o}m z;zK|DO})1=^*8MU+TlNQ7M@4czw{l+6BKv~`IgX!0Dch7LvS3!8izF+xL2~5`tdPz zn-SRqAfyccB@GN}vn^Cw6;wuU&2|OQ&(gqhd9?^LX5dMN4Gg#kxP+1OlHI4&bL`r2 zNy(`MkZg$kzYK*Bq*Wu2fPa5CQ^7ro&>BJ4f+=HLGZs@EG*zz*gux>#+J)C34PzpMh^3zG=+4#2-Ga3&_F2x`0Rz<{RPKA7RU57;!cnms0m=nj@YlTVfrlCgT| zRYx8Cq}Ym6-Bf7oHAt}sJ0J50G@XaMs-I_n=7}%AJGTvy^H0vBjBndD$Izr$rQ8+q zE7+IN89|~V@={o8R$+hbv9OotPRG-zYL$IcBMEbV(>96T0&-)u+g+47ncTz;9h_H!(uluZtHp%k? zcIgET8u%|1#Phz_O#u4`Unlx2@qJ^SKzqxl#k7yH35p4A0KtD{E}s}jfYPGhl68UP zEq`l?|9OA=!CexZA<<*|?1#v;-*IpA2z=k&uta)z@K&g-$!emlGY1$2I3`2On1pF z!9NMkzPBQ3e1WL|@z@@mRy+r7u@d^(H8}aMPhSxCAlpa&>eT1oW628Qn<>VyT}07s z(~W&zLKhS`z*JFi4{t*&8zmE=aUfbrH-+YRqC01w1tx!q0j|Q73l#}rz@A8^cd`-7 z9q`fpa?fycXPJTw%6U(5^}oUbjdS1myXaTU=^Vgt%s)c7k(j1 zQE#yDG=u@Zc%gn_5WcwQ{=~i702?%7KO9#R;EaDsJmSMk{i9b34gG;9iug}tn)uEH zm!0gD5L_3EAYG%YnIlM<8HOT6w+zz0pS&T$4XE=c#MZz&0Vo>LLy_h;8#;#fWD1^; z^l(FL7aDCn^lvS%vi6yx5soOax?3)A@zNGD)d4V(woP~t!Ec8>@a=^$TXvjH>;=Lx zp{;+;Eb=+S$r8Rw%eak~NcF%}1J4N=H)Aqx2#5hBP`_cQzWNaH2Mpr_K8`nI4NZ+X^x1m83X&xWaB`9pUcet};J~II>2#~L_x2Uy${zj@nh5x5RuJHTaXQ+B#k zG#7c|4tI45AA2erIT8b|>;*k7VmE=m;lqFUTU`*2SJE$tFZDYfuqAOSygT6<|H4d0 zqYd9Kx5yE)&Lb6L|A2l#v^Ean#S*_C$hoHOYW5xXg6XH*9Wt*w{fR3*C0Bu?{HCKecA#3#Nnz&|)8 z?8k=jjJ*ljyCaVf{ydZ$-2Qz`fde0$=0ulBnw-FEIPjEb@HG#d)uJ#1pY`w^(aUg` z(5M!J7kElFtfXu19brBo~@!th9gFS3_b2 z{%E{64VxUiZ-_+$Lz)XngTQsfs)MZsiD4FIH^qSNQ3$M%!_!pGnRBBcfL4`#UZxO`b%BHGzY9AFncDVEl}gn zuE>$q8{o`T>MZi`eQSavdi?Ql6RngvVjn}KAISWT`D@6FB01mCR1trF!|fqf_8S*F zhxXwUaQ#lf4S_sr_8p;hVQd$`5p3Aecr*#bdXN1SJhYTM=Y$)bEfHqJzLe}OJ~(%| zw+ZhKY$V?MukdAp9SR)lzi^WPl+W`25|0w$V*uuq^#-3=@-K5J4Kes3mOJ1Y7VQCH z)sQu?EBGxR@8951@|k}D$K}QRjFpcfI1~{Z(g*H@)Y~S$NANHtUjjBeFeW>1i~E3! z`-<^%J;*2W{+r|r&-HI{QhxJY5v(J49m$ke{+Bqe{tf;wjz597YnCG(|HJ1ftrKvF z&%y-t0d84vc@6Jaz^ivePVB$O4faGDCp=~|h0GRBCXSAd_Kr)}v0TXsg~ZM;&1{1}V6 zfj{UMo)Reif0s)pd46H0k!xar8=7#ah?bm@I{U&_+x36Td}GZ*-{Uea$sOYFAR@=% zdB0UCAbX&BD=wi4HSF^@6a~E5z@LK@hrzG&m;Uu{@#*|4{S5ON`8?P>1OLqDcYo@? zf8}Jl8R(PO8}@D_XP5Aa!KYD-pK?t!|6!NT4ZJIWpQ(vGHFCMGnbw|1Pm0vxztSH? zi+y_N%hP`+^5v0hr10|Ay9way8QSvi`hEsK44f8RCNlbf;rTDLgI&Bg986|Av~#gY z;1k4t09-&2M%OX@a}!qjSueW!=DI$B(P>f-~KnajK!RA5`OS73Bh35 zgZ`&2#?U$bUs+QSLLnGb#Khn;0-swT-N;<~zs7&7_^rbUy+D{AA6}D>f8z(YirAz0 z#ewmo?fvKTU-8y2J9FUdqxlb(i1=a$yYqs&xW(_;9eK-Z*!?fP1zxRNl31l9$&nQi z4Wsrd;yaupI49VFn;A`Az^w*O_s{q6!CjX=I3jq#0l#vpulf`|bl^8W`bEfj#-5Xk z5gUJHuN#rYaz*&#h+Rv#q@ZAnIr&exH{=EnY*@cE#J=*whjIO7+aagz$CmiUZ+`D1 zf7TQBMI%@J3kTUwS99*lA^!lJiql73-;tx8s+#hR<=m}`5B?n*)I)9v-vEpKJGRq1 zz#q?xzhNj-V69;ved9V$9s39Dc>70OHT-{SB;Nm}OE>V|5uTt=Y=h)1{_G3Bd6>TZ zCg5QGV@}MUu%CRqmCW;KCM@wDgqI(4X~ty)$0`apf8&!q*&pBS3<_6pw?1u=K45Gf zfx8Cv(k;=q*9^9!d-Uoh^ab#?bmIi)<(vI0Z^CuZ*`SI4j>LbVTm5$!YU~ApGeLjs znB- z?-bfaKmXvr1TrVZU*vbgjvZmU!UwI+AdgN6Sj%QdQ&F{@Jtr4p-nv*Z3Q1`JXFn3& z_PxcujP=GoUWh*fZhc4uh{}>JNPPOTz0y`Jq%BubR1k{JY{~0r-VWP5J^_8H6%)o1BG>0o}|~zjl!e(CvFIGoE2D zY{B@vN@?rt68z~H*+-38wUfyiF{@d@bmug&*7o$gJ)4%boQXooSbJsG0&iti^?WYW zXR9w3`*V|6%X-Q8Z*`+CmlJ4;Zo+Ui>u1-^)1YKwb==IC)XL+08;O5SBTr=P9b~&b z0;IYh6Jd1=(YF3_2sp6Me>skRDj&lh>@aFbrxedPK6@Ehf=6U0Ip9=(e*J zG;FvdqTUW$iiBt3qtAay#)s^=osgV>$pId2DGy)x8Vmw?kxlX zE-YA0Rir22`po-(raJVL3+m98x}jLPn%N;C-UcF=E7cADo90J!yY`@Xc@(S*JD9{#s1h1G``k z}IA^hCG!%{$a_Mp%{wAZJ-N%@4Ke-8Zt|{X1NDje0`iBnzl2b)2A((~b;MFfh1}%%dH#A& zQB&BFs2m(?z@!af@UQw1QrsWzFWlea)}(x-KTL=7pX1^DuVcb3jOP(k{MY&6et$Y< zDfWT>G29<`a`Yc^UT^VyZc6$G$6joJ%ZC0-p5K3v@oToIKN-HR_A{Ok-TNBfxF5<3 z9TdW2_!;lRcqHS0O6vdfd|P+X9zXpD{C30rhyL;+*ME*Jl6U?yAAI$X@A3Qh$6w|7 zdEepuP(Co|{W|~K@$kH#W1bv4DS7@^fBDXjK0?xI z;6;BW$3y#bE-1Fi_%(pvCAbju4`hy`zK8bvYM%}62a2y29tUKtI;6e%&-ve<_gx-^ z#r%o!h4!0~@w*}O>3WG{^atob*lc{m`ymX4W4gup1StvRn5DR`KzksC9rM*t9~j5* zu#i}t@T8!xAzhX2i^-{34KC*oThyQa0cU@W(R)mJBwv#K24IC68596-Y8v|>gJhku z0@(`<*MBV(U!N?5*sPx934Y(sjtajR@}R*u&33SRS>!=4Vh%(-_;Lk4Vf=-g15!IA zqWHCr4YwUkITT{Z=OROtfK>(#1Hg!IxlByK9hpb)<+UZ$!AR~wxPqLA!FggWG#P)` z+JOn+(a#}SR`ZioPuQ6)RIGJiXHzixS!Z@03U`;46^JH*7%);e8jCct^aLdxcjAU% z_^~Gpfy2p6SB{i@nB8-ErNm;^^J^c<7CCrMB_&OBzEgl7xaR8|3gIMn=R(*1+(I{2 z6$a~xS52h^;?7k|rKux^f&l5;e!Zz^-z>A07pQ$ zzjr67$7L0E(Muf_mr;INYUky>M!iBO>$8ZZ@T+y%7d^Bh;?dzq*Lu_L{W(_Ka?U~hMGfAn4TFzhng|c?bDb?qcB`g`LKTefHnn5sny1t~zh#ODW$n<$qJTrZr z^5;lprn^)6T-}+saN^7*&%D;h#cEM9jecy*Im0{-$7eF{nLBfQnDaYxzZ`FWt9ixT zcgHfByUa~Eo>y~|xn7RhYQ`{^-7!vPXQpJ1!D{9+<>u(FW-5~}j(RdxnDmq^6Xr0X z`-8K3ij$r?Nanqm++LBgD#D3#3dTuPDI2V7-}|HgzyCwRfzD$rNPg%*v_ugB8e?aU zyMU!UzJ;}5AHX7um`s@Fr61ORtWt)Df;b>pL^01{R%Z&3#sU7bJ5#L5yeXQgor9^( z?ZB+dRr&$;91k|iPJGs^Q7f|BBktrQok;-msX*B9?FX<-NExnOPp}s(=qf48}@&1!^M<&Fo@D;J_prUB{`oY^L1pU+Mp6{trWzkh$<59Mu# z-wB`Jwd8lCp078sKe3vBCuJ%59>8pJKP`l=XDvkJ9<2%MHI&LIaGI9Q0|+9R=bGi* z0PTsk!*6k4_ASVC)2_?m_eXak18BhEy0;K6;6sqrCLR9?c^36%HO#JZG)YTFdh$FL&Z_kC4Z;dHbnVtxaec=AI9r{;X*KCRQ9eiX$C@T0a#dpQo&JBWsKHC-Auu?1dJhWq@~QHB2NokNK0k#|Rbd97jmqaEuOI3U)PNI9U1 zo(1kT!}*P_!v|XgZ6r$~j%~ajo{vgpSAzQ>eI~`{<=HMPAm&V{nxo}#PZ9Sd{f7Xm zYB;wev}@S$aQ<=UJSWV2V87*g4gcX9lUF)io4<8m%HNy}QD4_>g}^%>@vdU`);*Bo zqTN5|oEqhSbD@m%IWO@Y(0!`l`#H&4!)6K5uLqdH#&};SiL<%S*Wfi%J?#y1u*5kI zI>#utQF?eD;zt5lX+)gY@*K{c2|V!pHAB5}P`COyCjFPRDVYax4_*yE-=qHnq6-EF z(Oly`{9pSHdDe`KJA@yhT29&&{TO3-=!YSkJ=hn2pGn)I9ofx;;1*Cnr2ldr2+VN4 z0Ou0=6?IWNAOc?Sy7sPf(k4<) z>dSkeJ$GndU*qrQ=YM9wETzy6$`;(Yo! z-*s<)_{+)p2j>1??T>XO6rT|2F;X4JJG7%h#w{G=3g$Xd{u-BDl)__#95c4T@q3tK zP;Owy&V;o$tVo?&ty*#DFPO*qId<&I0nDoro|&QvTA@SMBK8x~a{i9<>gW6v8;=g}#p3f4>m=XG z07(sNQ^285leHIhsIv{`%O_M~VX-eC_>atA?_vJJ3i(g_8EYl9S&~HysmzF($ zvB;Rr@fl+|Z-JzYhPIl3wP2ebhV~JZ-}<0vVJ=5Kia{-ick~HP^8C<{uy&T@T4*cj z>;`lAzj!@FKm~XZ$a@PzU;nyZ!rJin??L9gfA*fA^K9_F*>Z&G+0r5O;COch{m9G# z^5nVoKYxFWVUg4quJ77L>RXgAMOm?b9-^=QvvmUH5lCAB^)f}<%!WCX3rhuPF$d$D zj9tu0D%N_eqcNGULt;yjbrk11Y+5alq5(;jmZyzJ z6kZz=McOD=^1Q`gf8!wmE@#IwYyj4{9$rcA~FDa)rEVCcZR5PR|O+JMX_CSujJZdD!`v{;vzzXZ z<+d>of3-c7FA#B-QOAE%hrjwDH15e9U*NjjmuoH&5Oh9xWr>7=cced%0pZw~|2Xci z*C_{2`pb1>5AhH0N7f04tj8Y8Oq-#XJ#E;F=|(CVWPETEnO{Ytu7&I4FV}KUVq5;{ zS{v8aj~q(fo6mS5b@QKpz7OeZSl3!I$2(tb{~3SbSGy-@rxv#8S3iNhXm{vem{ZUO z(_haIu~uA(J@VT<{@2DZ=AC~$R>=71V9Hj+4tE43&|*CJLI~YU*c=$c;$b+Ji2eBY z;|K48I*o?)1^pFcR)l?u@d6u4LzyjJihjKx>~x!*Yu)^fNz5aE_2m*94t0+G1L=#{ zOQ7%d`R6<-dpAc$Bl&GX#*43r7(>LqA$B%!G9u2K`4#7{|H*k`$DkaXg?XJEw`l7E zb1i2M^Jak@Z!%9`VMlMjo{#cv^A2`D?kCcuKf*4?9zzVtXV}E}>*q6UAK*_upD~Vby-V6c#yG+CpU<#=aQ){q`Y*2kd`4S;59uGnIKB?=Wdi_%a_n7F z4m^9f4(&#`VLsPAKJSkN0$g|ayg&E3j{dyAH(b|z-v8@5VwVZ+DN&?hHQpBTpI@S9px$n`z3hksp<_p`;H>yyuzMt=9t^^@WLZvAWSSPy-B zh2KCoZ_*xrhki}tdXZdDYyn)4`{(3Zyt6I-x|WoG`*ZDbD9g{acyC1Ie;+d^V#f=> zW{AX= zKGxkV@Jk+JUx^M_7bR>ke%X#smiVfS3_cI^nQ#VM;%CikQMR@HFyP)t4DJE12d#~v zzfAa8wC`i&{UbN|bIfCgLE0N}q~jJfY*ft4n9T>dd)$NX?%-WSe-sgn#?*^B7V3Uk z*x2xYzhggIyD68$Tm^QG&87V7S|`GO5nA<`6X;jC!CAn)c_qZYX80X8Yk0pX@T>9K zPKnOGr2)u9O)=LiAeBZ=T;2NM_yl>}zv#|GNCTl*657xRc$6PFvL0t@MYJ18oTBA7 zBq`KnHRN>?-8h+$P7wWga-M%u)R=w5+#6DVGZcb6!|y-;RCs&{WhMeM1Z%>RJkDRo zHsTN3Z|4yTV&OvnIc^bZ0Mq67F>3XQ;LNXMlIP=~$lu4%3H$xLV5}}M z@thRp#Pd*|`PcLQ!Tk`I6%}$H3?KV{U%t*yNxeZBsYM{ukhqF#sc4)wGK-rXkU1bGC(c_EMIf2}=-cEWzp`zlxbDVJoB`o*zF z>hDbcV!}ba5A_M&71HjwzW|Ux%Q5UaMtRT zz90NexgMk*Q64ZN{q=gB|LO-AZ>Uear$&B z_#NsAI1rZP4}p(@kb&WM!#Kl#{f2UV$}=X%(J(#{aU=Z+J{hF4k`INyca{R zANtL3-KSj0e+MtZ@H?95qye%r^b=f%ehf9l7JR;V9-cGwqd~L*$7s)gPdQnX5A}$0 zUkFbS-W%~<0$TZuGeop7&X>3kx!xexq5LelKgQKiZnPH(dH(c|7b}wZ|B#>Y3*Q6^ zgCBg;-~J=u`XLTYYK_d}2tkwFY|>SWfog0Rnuv50o?H- zas5@qDdFKkWF3AsqHD*01II%2ae!+@=en_9p=*-ggZ6oSjUS97@Js-81bhW>8W>NQ z4-lnYknysC-2;1stlzLF3GF)Y!6>>z@+UFwv0P(U1IQHU>qtGIa1M4%z`2k@dSrfs zQXy!4Y{cUc`9Yx*R+^G{I*B7Az7M>OG9`r*Sk+(|P_hwbq5^?`Y*J`+Dr5!6q|op! z8zkPIkxl?itjwAzev39_z3r%6Z#hz!9av%6z%s61mH+x=xQY)~kJ(9KGae-7vJMPLu)i$nH z%t7|6giSJJ+q@FSZ~5x;eO#57J&19`9Tbe1q`-g8_iw^#Mek85-@l3E>35lCKkuRTRq6D%RVqO{Otr8eM~0r=ARR}d>v9BzaxK>ejki)2U+UA z%K&2Q@B7n6Tb>V-*(JXCMcYvH7XO5nSU<@Y%Lsmo#_ROSttQlxFihqZIV`R_4{ z-~F`FufAT0KlKM?(XaC=XCfH}?cAb&uM}vD@$dC)|J+9vObFniKmMUF*nRkO-uiw& zP#1ikcf6nTvhiJxo}GT3?+xL5%o**^n1cuM=luD5?lOM*k?ASl^)t55-@gZ4{nztN zD2RTa$MYpHU1rfwe*^;M>p93T>p0o0li>Rqfct#k;|wqU=N=QY_!~?|BF3zw0o8p4|6&P$_+%FTUEM5PPH*4Cet3kw51_0dxNJbqDLm z$M7O1)Ec?j?B;*`Z>Q^{+CTsMe|tWfRuF^$cl&j% zpA`8Bj?xLq5zC;1nRQqkS&9Gg+ztcvw=q^I~#rsJa4K%Sf9|)a(#~Qp}bUWyaxu z{;2Y(4J}YCbruen8H95B_GI2bym}X<(^(T3B-w8tVIj8a=-s)0=~JEc^zM0m&e4S) zaO?n*IVyKe-i+wzIlU;|D>-ZZb5~pKSy%kWwV8!noHaHxI(VyVve$WcuP?9mHa%819!P~aal>SH%{HT5e|C1- zvEXl!Kk2$Mc|M|l)2N@6{eiNWRX^(0I2ym$AU(h~zb{hnacn2So;u2$%bdsJ^GGkM zq%*>UdFp3S#@#-am-($6^(#g7yIe1x%z!>iocnAZfB#c>JF07J^ zDyYp)xY;CrO#FSLWOL`DAIh@5U!SMTy5kd{-N%(c(2GJu;Xzi?F$r zvT~L^+o&(2`RuHF`tA^44p!|v&+I5f@;O}WdiY{3pW1}*@~*8ptmVn+(znQIvsY_# z!|JnSHR92K7ZzAN+Mrd&$GLE(w_0~Uz2XSuoHL?ULf!9#reuO;`!Yo!ZhzP++t9ph zPoqm##1C{mVA{szyBUv>ZG3qhyZxkJ^zc)vAbDMR73EQ*X9ehFb9#UsWWlT)3o1ki zQEn4?exjCAD(?feu%1rV8ZWEc%@@HO17tqxH_d#1YUzSD$)Bg?>-d@>%Vx|l>jR_* zri-a}SuFH}d;^TgI`W&VF2T&S|S zOBspV^egI~NOObAw&qBPPGh5g%tXT&r9qrcMDMs;^3dm8jb70uTzA6zV(nc9N^k4e z=+K3KTcO`1CPwjoCP1_c&f^4ZDg3PGXzLkVkF2?Q=F7Nq7c(onSqSS;(dvE(x0STq zrB&UI7A3dK>G|OzyBlLy70}sRY`Sum_RA1_TI z0FY1*C%UrdmTQ^oHGlQUFX2Y=MTU23d)A1Z3(4Dc4FEtMrx7gM<`gs843;hJ2bofs8qOL)sJH8lChN;Bkbf@WHb>)_-(sXD&rW-_PGwzUqd>uA~JmKf8n%qF7*0;ld0stqY0cP0CDHx-mV^XtVp zPw1I<*C5w_Q1^zg(r&lcX1UK_?eT3rr)TJ`7DBZWS2HSz_C^vxoM-~^MQcWX#VhsA zkL}exgh<3QTk(Dv9+2>Uo;fGo1^T)H$%td?lPV<`n$IulXTwn=1%+?nSxiX`$FmCRhL7 zd6V%GnSuL6w6ER9i;sz-@EK%(|GItZ%{KPUyIggwr zO58JsdON+1-Rt3=VpGJf%PSk%nyvNK!r_-0{_K^e_;b7&CC8-33Kcb*1?Mr<;*u}ySpCJmoCwl{;nSzeMb+*^;Tt$~% zkxUGGW(&XHHxF5KfPQJ~P$HlI}eBI(=Sk&AD@b%dC~J(*u$mE@l&+ zgY#j#9!HmJg$dbR1bLm_Y37n20*ZfEvx#$)BK58AB=xyJY)n(0ITy>$gKH#SNmyuncY=?YTiuNAYumtWOb#3?S8^GlS_7X7XU&&t#wsseRu(Hm_*0k?y4c{ zzBHPJM(>W3M-cb{I0{?gNTq=aBXAn(?}fWtV&v~8-bvFB6XO&#l~gySQ{UL?u%Dfu zb*wQ5?7Zsjef;7pIDt zVk|xCxn~&~r!VD|X0>k`{CfeUX45%7?F8O`1e<&dR!0W!zK zJPoSi=G^J6Vg>s*FS+A>=VCnG>`f39w$Qqhr)md@@XQ{CZ9I2qi|6%lvjBn<7sb_k zWEE>Z$LYF%u4e7h3p3e^<}w6KlpRbK3yVy)IJd8LF0_4)>{nYDiPJ6eX|m{id!U)N zhpS&Na;`}GSm-VH*hcIwco^`|B>2^)baJNQh&(oXbzT`)Myd|NbGqAbd!t>~yAJht1rfcmQJu*Knr(Zi z18-K1Ev!tOZ>=PL9a4EV;>y6*0H)2pS@MXB>QeKC*1ChpExI0o}VdPm!g z8$qnz%O-~?oLKj`u*Q2`^z^59zsq!R9m{Wj&U|+?7-!*0d7liCmaKHGno#P^IV|28 z5E${Rm-_X2vDwW9|8ain$?nWc%h8@OuL^as^Q&as9^GqnyNmPiChjJnmVA%Iq}D{K zXh8)sTE16f&$!RSwINV;rNH>O?RcTE?Yvh{v3+k_7IZ7b;N>wIs2 zG%&%6izvz4C@!64F38wmQ$=?uCtze584Qgaq)DyQRsOoa4)kK1<^m1HO*h1DrE!sU zefQpqMgcgOn1#M1y0mfE#-%tb}mL*k@=pF;Jx+g%lf+C?2 zi*8r5S00}Ph-~g5j^WS7BPHgVOEO~D0VK2WvnoLh6~f82X*Zbtf+ zr9>|`id*cQLkF+}b|WVbi&O1qI$ngwuo`*Bc5mpiyFV!xB(`Pq&ev>j(dc{Upqwt} zsW?ih`|8HY)P+*m88}#$^~iaDWtZoqd|&S7dp7fXA@qbKy3PE>^FUX7=eW6oMEaQ~ zi{6hUtzSl7r>$mh(j5!KAJkoB3;Y-ciXcHRE_8Ohc zb9%00o1VNRYx~gW6Kdk#x%TFq!S^<)gU38+g86KA2Ni-_t)3@7Qb$~W)a}*~#5hUv z(aNT!7%)^EJj~aw7ny4nE6;sv1O=swK8_VP&KXl$E;Y0xaf*&#b z4lXpWN;&7~G;ce;F!F7|%>7z)`ilm?!Nypqp(I<$m=N$4?OSx7y*jW=A*c+*SS9Ri9m`Kg*jK9d~R0Kp*y1`Oqn4R6&sdFvCOW zHOA#VekP;yQVq=6M4)cl`&tZAuU;hirF)%vZ}#51l$x2QzE&xJ)tFt&wKXc8T_pr{ zw67!Ee~)k3DxQw-_q*#oXPbC;eYK5vIMt6C1yns$loGf;!q@iwlLT7?(qNBWQ<5ObvTX|+I5btajtKN$9%t)iV2I^Z#L`Z+lr}H;p|}< ziGMODYrxP(W{q^qhjAQv{&SZwrV;u#9em^SvRiHv%1W#I3-GJ9%8$IVfOu@L&P9e= zMA_-ZrTbTZ4RY4&AtFmhnwb z+Qd-W={kA=s&=ln-Sc!mW#$2_m-ESsy|^oc9`?TI#@FP1N_NvD7S*H$VO#Fb;OB5? zVT+VsP;rAeXIw1!=LECDVz~*anS(<#Fr5^FQsU+O8>-|2UKyxfZU z6y&ptd@5G@*i`H$Rcp1rAtESUu2&afGZ+%ltAnj`y7A@pUA@zG-u6ttbgn0Tu|X2{ z%@0n0wpQEyqDte9^KKb;73_s8mC)xGnR=Fo;*|*Di*}vfLbBq+*ZjruFjDteThw-P zwAbzaaVYC+Yi#W05#ZUwexgUrxNF?eBLv<{Dzx}1G;Sg5;ki5M&OAz6{+SqjuXb}v zv64~jZ|T!fRr9*qkJIfKUCr{==Z<>M>|1ev>u!5}Q7XMyGo5}j4_N}3>()0P8a+2S zYdf(IR8{W|f}S<6m$_Z~Q#vvpe9@kcdz-f$c1e$V?dFu@&1QXm^H(@dc}fT~XxLBL z&1<j!#qRbxJ#UliodLgplC%}HMh=$-k|3Kqwy71>ciUSpRKj5ot>cpr zT)pM^SeF{MQ~ds29PU-MFF-&uTX|1$o?-vIzR#tx@i?@i*$LTkeC)Ps)RcP1=lPykH_bFa}22L zxtIHVsaH$Mid1#(#BG1~?_y_v>CCIuWq8!yuh-lkIY-h%KTl<^dF4dGizZtC= zc}02c8F?usnO~W<2TSJox_R}^pJH{QoY~M`vyy^QQCT(3APm8rR{aT z_Wk|4W=tqSO8eU?tvCL9b$%$NTa@6Xq91E9dq~LAXw@-0drwE`owK=rNBkaeOmA$Su>fv&G$?|mPP6|#RPt)ssMV~Ti^SXifkzQPmd{`V7@@Bt# z9bUd9T`#l!K0gD90p|68z8eR6=pB05jbowpZ?Ylfllt1c&R&tcJ$Y-GD{-GIc6s++ z>-U{YmVq&y+mj}m@3r`tGeS1e#^*~Ea8l53!PY;v<6S?N4Qy^UMCQWO)uPJ8!p>X{46*5##-(rjk#5+9V}*WeLS8kuq&7lSZSJn^6nq0*BBBg(Pp}& z=Gk~6t=Ij0e%QUNU}PVzig_q^{GlAJEOsyK`GdhO?|CzRoqg=5p7UwU@Z0V%ceZo; znJ5fAYGV8b+q!V~!Jtjii+~+HGUjl(C*Tx|pa?B-;zL#+(QfS0de*(3tL)*LY5Cmj z&fFs@&uX(v3aO)i;P~ngB%t-=aj8Me79H}fv2u4?NC439Ua)3nIGL9Emg*|W6U z2Oa1>K_YC8AjYG{2pM0B(}q(MZR?*HG>>LIpUMs zyRhk3HGRaQ>m-0AamFU!sma6Eb{$=;!%Y&;i_LbsgA(8^g3t0~L~b`8-*=0&)5czZ zZ@2e_L=a>H6 zp9?#PJU8*Er;??#-cR;cc;k2L^EP{fCIQw^jhKB`TGlCi?{aX5B#jk6!)qlnm2&av zU>xqOCWu1ozBe8qy^W^R4eC)J2@ePHkJoK~Y}SW^vOWdsDbXU6(eK64!8pIllP!yQ z?HO59W^r3s9Bu4Rg}g1-S|sn4cI0_0ZFW4s=>&@)ur}MXN;|!Md+Y^f7l`rg+|24C zSI8D9?EC0)v1E7gWU6t=tghHSIPT5bHEEnks+v95=~Xyt$g?!uvT!@QF?j%=x8n&yBAhyJmN(?)~ku@?AE;4)nrq>&ZBnK}Rg?-jlv} zc3C*%jIuwy*oMk0_<6YlW7qMh-R|Z`lS;B_gR*tOLTl>7oxM&TYx)&!_p|+l18ih8 z8~J0bN9+6~a7PH$ojB>0^c$0XFQ!j_hkk}-TOqnKc|T`cdAhxp-JNmaDq`l`bFscZ zs9R;QhpF`}gyv+7tnpzvix#Wpd29(PR?r5HU}xhV5m_h$fb)kGar)#EvTO7HVz9I$)u zQcL#yJymD#>>jz4=0aa$Uq96OOS6vABECEUBWz5=W0oyu-3vvJUgbNSCrCmc!SyjM z{fQbcE`Bd9&X?ILjs2Ohx8}`ro4l+$ySyZ;i_wqv z*mgZPdzyOl6L++jO-4t5q>rz>cs*6I8wE+C*6PK>oQz6Zae}uuvX0KEn;fM}7Dy)N zh0(a28ioNQ;qWN*M0`sK&# zWectSq}j`lv2*R`%$_~YUz?Lv%QqUlD9_Qu-<79}@95{7mF_lw)ok-RrUACV@XfL2 zZGpYYV!Q3ITdGG@C07SETklk6f2pDbm~kn3sOMxqInH$OyFTN|d+%?&g;c&6yTye0 zinP_PN@(^eV#_)8tUWU>ZVkJ&w!LgDPtIjJU#{HgWV75BW6GaNi3BvrtkvkV#K-3>>)f zXp%k-kbXWb_p0ZmyW%8Hck%(o%d6IoKCQvu!!Ub&eqLXH7G}0_OOPGyZ=QBxD=?5g z8L*4qj}RX+jc#>{(Kl7^?p}8MY&nun`f-{GkJ6hPG&lsi$HOxUi0rJ(wwdjYO7G=N zwt(F3X!3@&$CmG9z&GW?s)9f_fIPV<%2GM(l(H(4`1zVJ8UUSZcfO(5?`?c~^QO_Z zK@LO)=*UHXs|aTKxY_v^Ym`ot{M~bg0^MP#Q)>ZtQ7UU$+{Y_LC^Es?0B6-+Q)H+% z;Z0Ew<4Tg`hj!jsmFV6zzSh7J>OY&{_!1A@I#+vR2HJ=lLgG)3U9|Vpai*!@%-Z}{4gqkfL&k&)1ZgKOXa<6XJQ|oRyC+5YSv7y(;-bXGm7k9m&Qo1Mr!fAxAFp8uxtJ_!ob^i^SbIISAH*M zGf5J+$1>a{?g3l#HMZjEb$0^K9W!r=`XyYgmk;l%me_)QA>4;z?#98XxHOw3<2$j>71*Yxy@$x6=e+kq{yrvefDM$p-j4TfGrt`%*Cp*c zklgfe**{@xNDGLk+CpdEWAmuK=Ep#_&zmjyVawfpHv(lM>=sKi-+>&w=k^!c z4$aMbevBC-KqBC*ILzJZq$veRn-KCXnCnsJg^{q3UXyX2gpWtRET%!qM9nlc=AG`o z{oY!4)ABYeJ7#Z`+2!>JcQ4c0s+aN30k}MW&h}mP@|?|zPY?_eo0GRsf|J`EW(I73 zTpsF*OIv$ScYRnCFQJ)Ey=W8rnsidw)x9htXAj|yS|2fuMoXb?hTDd|OKRlnW9_D^AAVO2AqgRExbBozToRUw*~dvEzYA;ynLOqE;`5Ot_dIB2)p`c z_Tmk3!mYMbg#A>V=kyc?yYLv&g<9fWdzP89Va{AUN2rZ^{#?)j}E{m7mw;y!(*-kxv_kO4tMHd z2HM0aWlVNUpiT5gF@NacsXOMXVr|T=lgy{Hk!u2AL~pBi^V-Gh(>5}&uV2A`RZA^% zSsIT=kLmKfp5A&-K2ez&ir{5lFBqeWLbsf%Dot&V%Nu)!05;$vNRKRJdpduYwS@iF z<+{42Rk7MBo1-LN@{OeKxaMZte!D!F(rkHiHVY<7M{Tw=523MVLQTbnHgqQEP)I<0 z9t8s8t^Bg>fn2q5?fAS4b{nmK+)NMIY5eFZbr!u`gPE+g(M_d-dv=*RbLYNWEg8Xh zPAzH1+Z#RIt>@fUS_TcZ&F8)TVtS_8$RM&SxR(_@c$WtJM1_Xeu?@p&`CL5p!|ghP z5a-4i<&ikPj{rQH+&26(?zQ^v&b-T9Z=M3&&f_c|C7L)tJe-BRdo}!jW-^Zsb16^h zOV`cvwA#JzU3&?a&`|2;_H@lpye{Cb!X9yQeguE|KG;{GClOgF=HP2bkQ8y zEw$>~(R4G-t4QmxFFcmEwd!_Lm%4A=*zD{JoPW$rs}=|d9fM>ibB51Q-S!ceQ~B`j z@7~fLLuTroQa)8Xz)w7XfylCfajM$YT#b|mvxgj#8N$lBc=O|JS`@Pum=;g6?yFgK zoG(j;x%JZhy^9(j0m9MzI@6SQ=DAjVoxEQ8B|kym^Le=f>SBu5{nYl@79!+v=fA4*@?jEZ=FlX(4p7I))+RB|Xl-1%wYoZ@`4 z1-a0frxqUUnBIE7pMEYZURrzeAM3?*5?h-ho-ur9IRdX`KFexzUvsJ+Z*1FKZj_Gem$B! zcN4$5vP+7ioBgaZ^!>En?YUEa3(S>t%z$0&XDrnqX1zFnZr}6jFsEO1pt*X>?_QjK zIcgX%e81AI2Nbf|#ghWW^jP_?JLGU~8`{v;%B+Z)guNBYa$=8K3$9Qy)pATr`#iR=yIgh1=wtnUo`huhk73P z)(t?lV3O#6Wq%!=sTihIalLFex*&~L^Ky3cj+;kzPj?M>qx;MHzT8aC$$gqom&5zK z+Q-_ub6)Fi@??OkJ1WuH56^KEOU!BOdfIiKS+Vt;xff+XJK!0YC6AC{ua4Ik2G5f z8#$70I`?uCcexNp@0vb3zBAcHuN*Y#=J{x7(469d{&-&>a1415o650Ycd5&1$9Gp} zv`U`ZYj-}$V|P)Pn5BGS^~zl^%q>^v4S37F&3zu*Os{g__(-x**Jn@db8Uk z4ImePAh3F=^vkU$u<-h06O>^#zSpl!{8)4A*39(IVcv3d@JAs0^e~O|t7%(n$^+b- zh~o$JbMD-lUNmM*Y#b{6SUTWbXRJ+KEV9uybp&M=7w7r4oN~9T4dL$ESlq5x5wPUU zu5viRfCD#KJ%l2J0%>KG*eA)|}?93Y%dOzl^4SZtTY;i4vL|!becVLBSDkVUF zHzs>iTQzu2Dy)u4SH#DiyH4yqt!?2AdZpV_-$}OHbfXdWux9^~&u>#qTI1bKb8Nj? zd6Z@6CscL>`f_tlg;Tg~-RZ~p)<&8n8>NeP#XtDhM5yVo% z+tu~1RI4OqLfGE5bZ9KS&c++4u+NWwy8K=~m0&Iz4{7qy>-l)Tv&3YyEt($IUfdi#}WZI=BREA)_&vsas0gl@H{gm?j823nvlEM`cL1xfSfI1vzni4^mYvjm;B&^#1$}t^X?RF zo2EKP-2urnM~gi$PiS}4!u-}ut66Qo59wvP*1bzOddqWHozC9U(R1!|i`^W!8v$(GjjYpi7^$-KW4vyny%7MuW_#c)k}X;+lcZZ^hMe2&in|AdG8G6}}% z3KIpA?U0W z<`I-`=2vBG6g7J(;KAt9M>h8OOk%7uaq>ri&=Z?CH2=6cffp}-``6tuN27JQ%Du&S z$2)g$Ok7_y!_C7T{1Hn)kmevhwdvXky;Hm?OX@JfKt6Em+qS!|PT6&m?Mk7nmh;D* zTCCD%IpLMr<9OC*$G5n=SUdRA^^hJni}^)c@0ZdO1GYFM*bj0*lb6!MhYSnl8_s5_ zF?@d<&EGfXkqT^odDp1o1y$N(%x-sv5S;23j#FVdg=@)`n#t^KS|vBGH_jb)?G8)w zz2k4kO?T5Z_BrpmK!mwy&UZKNe7tt(;Kp)c7aNCfCt7%#Ph#JG1hGerckRnQ73Q?x zDfcSw&E;rmhD}7Z;k)&lvpCt-E6J|bOr@_3VElJ8YB~;oV~I;3ihlCv_foDmvv>L` z`XkdQG#Me=0XSz!%BWyW zs`Pw(Z;}?q>{MO?KJGO$PZ(XTYI^5qdmmY|?|QPHpCta?3QS<<>l=Gw(AL*@`Ov%` zNRsoVy{{U7t1a#uKv`>$z>iV|=8vFxcd1M#ul1~HrVuZ#imfnxGnPLpn&QNiG|5t> z5Ofy$Vad3UgB;fd4R*4s*2sIT}9)y07F2$zq{U;Gd}_#?~N$gnt7WoFt~i@I=OIZ zREnBhEN?&af4oYj>E$6)A4%OLM|SbPE+!hkx=cOkayrfKi{P}lpDlmFZW%N2@6u&- zWFYk1+^4j8cmHH>DdC61j6hG>zXI@{aHT4chWY%pK$%}hDlGFmm(b5s! znRL<`JC)ryw=vl}dimbwg}%LyRMmBl>^phSxtGdrfAiiS?dAHGWACJn0}3i9o^ZW6 z2)8fYiKl^ufD$&`{V1;wbFYsc;yRT3*zIV80vGym|C$_`haiod;=VWMo!qX+rz5{x zMo+%nRV@`r^P}^wum3M!=e6rNwq@CW>9+LnGun3j>x^&nsbaUT_bi7x(L42Ie8f?kagWz_4>ul5KW{`abNwTZBt6* z-*(g+y0}Xy!>dSA=ag4BUOBTW@(NLo_xStrx;yjoQ;Ncgcjo;)O|udV9_q6r&jp-q z{Njgl)guzYF=GbPBqAbsMtYBGk?U{5_nSpVe`a^*>by-pr#`Y{*uk1`mmLmV(H|?5 zJiIyQp{Pb6$q{}TeUeaUghW#zw@>b_vvo!6w1$%=qz#)fSm5^k+-%0w1Qvmiw;`-; zO2sMPg=Ik(6%D-%E8radVjHV395H0~pCNzqb5esM?we4~r)r$(avaa#a~~y%{b=P~ zf95$;cCi@>FO_#J?qWk1LWdMEf3x(^z&LNC6SqhrkxsLW%p$=h+VD+16ZgeECO{fn zz`zK);<*vMk~Mn5pOVg1U%togy{W+f#M-L+02(~Ulc?v0-sp!W z0Q)FWFwke6w(M&*x?2G|uOHIi`bRHZ$b1U>Ht}06kT4*D zJOa}5LRVDPuQlm#BgJ(vp`t>5<46odpADHw(zDK$$|EKGww|Q z?*)Qb#p@Ca8HrFILgAR&mlanu5Sgt6-TgA7kqmTqR{)O~Z%q72em!|Fe}`=8E)0dv zRTG#`l5-)If|v{fOkBhjAlnDJweW}Qi6x2H(SfMy4R2O&#sa`wHw-8Wn;WZ)P8h+( zKw_2>)KUNh#*`V-X!Vkh{(39n4aTXxk=1|# zRy#rs@wU8^UHhinUNIi%&L-of~NIidT3UeInh6Q%n>le<6Y^62Z#OPg;@Z zDK?PJ;d^g6YtAIyVPxJvS~aPwHAF=M*V0;=Bv{Yd^e5-l#XC@h6(Dv8t3YP?5cMju z*~Q=4Mj*Z1iQmJ08rYa*&;%g3l^TzAbQ1@iRRLtM)A#^&C3PFoG9G&#-4m>OZu-px z>;ZT5IJ<%`ZK94Q@=4L7>9KjrU5dFZ3@4+H{b$N&Pv_|m#pWRZP zko}6#8v%ll$<>^V&9EE#`lZvEA$@?AW#+0$&G#!(3cQ)Q3^a&F@cAUU? zJ3q-`U?YIO{^&R4&<82(qot#=*X%+-dtIR@G=xyu9Ds!#!z+=aELOq#haulX9p3d2 zZ9F)|Y?ldZf4(2EnACaSvO$h5j&N3A2q~Ni3Tc*?6q@LZS9W;qbok>J-`P)5o=GQ# z?nFEt>BR}$%0(SFa2anWu=g@1wtKcdm&Dcz(iD?&iz-M8Ck5cqXT!AfyZ-%gx{hn9`fqY1KdttXlAH4T`>4em3I z87~+{>B2zsNV3H5H#lEz5XN`<7FPeh*{f?McO{sAZ zeJR6jV6L+&ng-X47k_#62-FRrKL_e@{_Q6rf0Rn@8+B}MtYG7uW~=o2;3yFTo-sQS z3FcXbt`BdBH9WThddwdf??Ua`mptmXd+9)`sQj2{I(w+9}|y&-#%Z3T(mXSbD6VX6kS@SKz3Tm|59;7KwX^ zf7v|CgwZV3*s^(1Q#)iq8Gc+BIvwSCW+pBf@}Sq33VL3ULTvSi(j;Uj)f%)&pnp3F zrvJ#PO8sVF*r?zrdet@m2|J|mJ5J-Ky1^mFjmUG>W1E^(aM!}vAc z@8A;WH$gS-QB@EyZY;Q{jkA-(j$+)<7ne)7d02`z{8q3KJKk1_>RzrX1twADefOw z1ZeX$Ktz<~ta4ni)j5OHJ?;V7fA?H}|2|~`u0Xw#sYibDda_y4%--J$6*Nk}C;7B~ z-sJho0zdufKg^Tc+eskb#D%2I;HUi=c%kPbH)F9O-&dHO7aXs`FMqa14#_eDRSPKwkgE!9`@AthWSLFwDxHC3v zl+|t%BnOc8zHD!_kKvzse+b`9hZBoxv_xn#ehi}Bb(R3lc3%K)g>D94-UTLTJ>kBW zrh0uFXV4k1g9l&rHXT1=(3z4UM(d5SNTe?Mu9q)j($Ie;eLqIxp!VY}53LRgZTU`fz!Jhe=y+uk-l2Stv|2fJysXFzcpR0 z8&ct!ryx%7-c_!KjbIbU-H8ec>c(E&rZ;qCW`-6BxFWY=e-SSiOvbAWjFHd$T)l^O z&sa(3JyWM7*%6F_?7q44Ddl;Z(;ha2WA@cpHnmt2r=mxiHX= zdv(?!v$`5}j32HF@CMAXl%g>IJ)CdPnC2%5NH<9ZfA{eENNx_juTe5D$7j+2aV3IR zHvIf?r%}#*DquJ+7c!hCHE|bMQ$!Mymz9t=H__ zJsF;G14IZhv3;cw2AEs(g{_skWj34%5I$Mapew$jdZPc}d*m z+sTvoM%FHNuy%1sxP)$_-btgBtp|BVAS5)me`k$A1U*^l*Q>-(Zxw+MWMp7V-qBaF z=H{wK$?p^ps5H31l~LoWBsf(?mzlkeoq#QtV64LT>@R4zn|WA0&zkpfr6;W2xDU^b zB=fZ*uk(6ACjo{u<}pK03hI_w6A`5^;>?WpIjWNpd%)%8OQ_ixfrz>;-$)E+N?}_a ze|eMc%@cyvExNQPV#9lEN0Em^a)k=9M`wkvA!PXLuf%1=0zx@B@7_yrRm1U89{61i z&8;abmBSuYw-q$pGFF|Fra1!xaz1FImZMEhe}CWkIJ6(2K&Ue{Zxb9zPh6ivx3jSpGvtGO zja!;E^)jlARGCp>c|O_xw{x;{5xU{@5e5g6C6sx%`HP(Qr>FvMI@w0-O8sqJ?A)tW8QS|e@gxg zLR(t)fYTSWN2qp8>mvCM<#AeWw7bknh?AJ<>4SZ6;FMU z?e3VZ{i9AZn<-|1gZ6kPR%OH+e>H2*Q#n{jPaS=HLL&tsE6xubTGfbgN1ZFA>u1d? z-ZSWkOPPLDg>M`1tczqPS>~;dT+F|b@1_3=^UxAgn`ua3%-w^{qty4au$z!7YJf`u zi~wqq7(3P2B!qkh?iUDrAp4!-rfa5+B#k!`-c6tThZB6(IzNT;B@3eXhMhnJ2`FQ-7t6n%1i3z!4&!RHpJ>zr<66}KSQ6j z$of6S_!Ykeng77lLQs7eQW(Fi!|t2SweLrYT~6PBg`X~KEIS6bFSPiP9DRGe!Y^+H zfelq$5q=GAZ8_9$Jp5b*e;iwZ4~edK#R5(Sy7jB{0s1PPi3Db5Yv4W()%bp?P#lG` zqduvUdz}SmMRj@Yqv2#%Q2CaETV-IDa1{_2i8Zf&rjGUvkDU4vg3BSZR70+YRW(AkP0u=jy}xl-N>;sZoE3tz+RD?#+ll!(tL z9M0C>(r1s*5n_m}Y8!`dXZFIHV`;?*@*?8egvZu-xsvlOu*LuVq7wsL5lyZ+7IB}X zo3C^njvXipHl{<9fAfjL7@kO9WR*#9^$mqSytml#J0FlqNp%>t&@+HDfUoHqJoHSd z8#W}cV;`YkmcTu059t%0Jg>n{aRA&5@(*ri%iZpL^C1_^(YJkJTSGz$GH(2iCs8*% z`nwzu|2wXRfLb0ZL7O4tAnm~3FvY<5x&Uqvr7}YTnsseVe@fVrd`6Fzp{0M*Zd(Udy;eH4NZ*)Z2#rxOd^ak?)`zuS|Z%XByK%2V- z&{omYf1f+zm}Wn!Pr9uLd=Kq$5Plyal!=&;>P*)i0RFSUGbWZF)eynNKGtB#{e^E0 z@XqWdh`tMYzP5mh@YLEhP;KQIOMyg^)_rcNEhj) zn@U`2QM(X}u%mmMCZi*!PuvidE(L6Bpc2%zoBd$Nk+$KuYYSC`*Q*r^;dqztzVa#- zf6W@;eb-%x!|3=3}Lf!?o$P{d!{UNth*;?u z-OFP0ArmW@Kxnd(k%!;6y&oIcSop5Pf3TmI;$Uf%uz2m=&d04nv7k91)vhl#Jhkc$ z7~nY()5urSi0Jv%p%8r#IWPP`UX`kp%==7>N})(cwv}+JQ;iq~(ak5y0+Q?4)*_;p zo{Jyoj1sO+45{~AecFKEACG==HqSaLH~dDxC)owj!AIK5Fx?WLy6UYrnmvRve+9A~ zAHj4zW}Q42awU#p)9d55O34B&{y9;r@vOs}&j9YPu#9 zVs$RsxJU@dlO|6rm!b4oE+s!nf2pn0i5yt6K$M8_r`DwL3s^K3?5bJGwcck8^9B{L z#?+K22+k(4A$?hTM!?q$0=8$4&neW+$-z<9tb5zx>ie%z zNHO<9m7Hw+1eCjE^3bMo0%W#d5fZrekEEe+j6}|B>!OSpE%@pt5_}Qne?eFu2c4E< z0$?AwJ(rA{Kg9F|4HA-{bo*BD<*5A3!9vRYCKv6;y$8~kSIo2JnnSp=qC3V8$O^UE z{0PQ3^Uel%Lfqf#(u$si&k}Fd!*7XLIiZ*Dc+&@%P+0FGCO9M4`f}dCTQC!!^~3s* zm#1MZMTDgaE2#=pP>Wu^e>ON!KE1IBc45I!0^Zs+Jc|st5iqNEB?@)X#QB|gVhWkM z%0m`@wJ`Z~bcFv*kGax(P0fL@P^$CnXHfLKU*7g%X|V_wJe1%@{D6$ler+ct*>2O> z_G>1Jj2q|e7HMy_9}CQPTWpZA996QqgjGe2K}wJH$CQU-00x~Fe@v{W9v~2}ilyC$ zbtDjdZlt)!Vy(cM2V+5Nt?5#eFhPFW;#{T!FJljP&Vp4=f19@2QKdDrhw`0F@d|}A zFC6Z9T(LX*+~v~3Y6*wc|N5SZ34#~`{O_OQ?eWg5ln(-i41Nn+?fTO}N>3x=181$D zoLHI4;fwa60fzYIfBY{x0g#k~pTB%UQyIF#v!LMz6U^wPEgILyk~$p3Z0w=2K1tpPYw7L$kaoX556;^bhI@$ zTkpA6tN5e0H4zx@asy-^c&$7JdvfE&>ILdcU_KGeOG;-If1n`UG=JD}1dRL$Dhb;%D`5JoR5mFdMvEk`BCAO__F?^y)IOEHcI+T?IvP!0n{AOHW_zH0e zOy#rv>3}d7GvK4^o;(mP-WpJUkEcB%*Kf&SElDAx1O*@RTas}8gi5B7dL7#TD%0&= z8N~={(y{VsIvD~B|F zNeckt6}BSxceO`~Ji4{>6w$-9Bu|9>-i z0o42dcbDHy_vFAwdxs68+gxuKOP7b;1B%I`QRLARKi_|jIh7I z(|-T^!HK{B@PFzhdqbPbU(jgYL3{xJIsJ*efNNf|d@cjyo&5Lf`-d_HA;kX?Nqi#W z0CExhe>WF?JYjF(pKQ{f`GhH2AAt)S{t5#BBmA|0Ite1WDGwSjCXxT=H3PQ~`~gk~ z_(4A}crRzMjAPlKp5mWKOzQsr%Ds+!Gll^y@h;%c`Rd=#{P*LD@)vd;LyH%H%7LE) z2OZo$7tY@=@~;2bThdBEA%uV5&)<&+Pi6Rre=1pYbN&we^Edqbckmehi9`Q>tK|3J zMoD_!&*$?chp8{RsdMNochJBc^8lEygXisQ8hI~eT#+i{ ze^~m-UY!Xy&H+v?){~MAm?Kh=^fGwH!1?V+*Ck=w5wD*np5X8WL(~+<+>(2A7)DE2 z1UA4t8AI1r`}YSPvaCpdXh718P1Yi9xcLkTOV%? zvh$=Yfe|78;QmIve79h?-bVp(>aJiG1cSKQvq+v(c|ycI-#nq#X-}$icvBNNVHb^D zn=%t$Y{RDEhYSbfinU=~SZ8MRKYpY8vfT@6%sz!OGkGlwkN|DxsUfFomUxZ8e=@(d z(i3W4$h~3P`?)ayY2GD#D^$FDa<+o5`>KuD62u1mO8r$>B!~}vf z!*Ov0!WIt-2%G)FHpeM24it7#q@cd3fY8k_SMSb(y-U6b=y&TRHz2ae0d!bOipkS= zwiQ-L%A$F%MLXe>@aA#ADnj`Ef6NV&1k!?0hKLEdL}1Ivl9-1o_wzm!{ea{R(D9h~agYPAZqnIOQKe{*3PMdXJy z7}D$R@=S9V5L%l%cEq^{6%*hy$x8aX6IpBlK4^+MjjVogYRIp#JdOuVR54uf!1UgH z;%flWX4n7*&Er75(6fKJpjvB^DitT`DSm+u!sr>bx>#6Cq zAHR~^lo1gj$*IFgP$}alur{~oKS4>%gqr!HrWXAIwcmxAk9qC z(MKCQ$Jukr-~uX1k3o(wwqazYUOs$#AlXIefTIL+NuKP0w%s6VUoN6Dsx{`;TjN8# zrh~+Q75MSl`43r8f698`)AbdCveOEIIEq8J+`VCPdInG+w-x1Zm7I)I|FAK1OAPXQ z^eJE@;8!wS4=nxGg&Iw1w}WHChP#3(d+(=uIyydhM}0I!06sVjn}6Z;b=)69eCsKt zz;59v*Q>5w%x!1YK0XJYfVLCJj*vQ3Yw5EtOTgUgtq&( z*#;&bK*igy+xxfJ*TfKBR%nb4wSbBwk$pvTBz#_a@es~AhqdFDBfuqaiV6`)D)QIj zP5oN3wDb9V{W@RunTp>C#nc8amy^$>-|1SV(oJ3xe<1zbV0bF&OlJZ>?`I^RpZTE> zBrUHL@I}gD{=w2)tIsC;sf!PcNnxTM=+pF|fgC_8Hjyi4HYE3!>2cUCd&8@5_7guZ zh_nhrXTN%$^#-Rn=?{bb0EbE=&^MX*d8f(_K#Ut2et|r~NO7%ZGxicRJCrbUN=`kC z72s@ze>vcrQ>P@8DH`2}l;(8RB2UvTjqylkATh9J^yUdR55}f=49F|apq+j@-#YW2 zU^NJM(7k^UKnfM!C~UvovFpg0D3toOLij-am0WgY7I{0~DqHpueq&Kc>vA0aZM14v zE_sZ-WyHeE9+?v+1L*bC3Dcp<^^%7TwlSI8f2AA_5*zO3%@6%ZT@E<8FO4$3TJeOW z9eNa+L?DjrYqpB;6@)icF%ky=nB3|I2Dkl9?xmW(>DGro?tImiE1bu_T5UjnO?l)H zXdHUHh{Dl;ly6li`l2+lgP(~+PtbuBp}%`u3W2F7{CX67Ke&pwlge15dT!` ze^HqfGR=#NIqs%tMJ>haQFqi<<>Z;3V|CGiKgPhXPMHX6#U zC&0cv@N><5ek9oVdFQ3VK}nXKjjeEMBpC7WcR@B6Fv>!CwmS_2P7B2?Iopyy`jXLk zEhLAqr!(Qe8pFKy;F)1i2&@6Iwcb5)e-w%f^_W$d{i!CZ!L|>+iI81CO(2gZ0Tf2J z9U#CUs|IOf0=TiO`vN4Ky1nynm#pyi8Z=2q+HU-P7Zy{*&xZ}d&8o8wB9pH=epfZR zvhp_ivb3>3m|)x~`xOqe3v(AIs>J&pQY_MJxCeYM-i0y?_6Fl8;Rm4ghjYLne-t=R z0TKxjLcMe4wdEt{0Z;Hygi7}PiU54B>j6LxF_>Ho=tpyqF;wpI^=6UJtDHP)f{7f# z^PszdV;RKC3L3bt*Ko$^`SVE#*Pfv0?dv!ekD1z_M7}d4A%(*=$*^FV`u(G722`vj znSjL7B;kkGl*f3eDiL_!iV6F7e|b6N2bk3TbY*N@zugk34uHe1R(1nh`X{uew1m`# zn#D9A}tO@^d6QZ-5P3d1mNB%2Jm`;noeSl$0nZ^1Vi5^ z)J^~le9ah3;S%~0Yd875pLpu)#;Yx##*L$1C48$TX#LFf{I{RK7k-d3r91-Y1UZYZ z&@hY3O;{;8&z{yJRVyfl=Z3I3>zTNBsooSl~m{UQ!@gr3-WNLcILgM-T0sx+{ z)`I}sL;1@Bq2|upQV2c~wCGi=RfhDP#c~dbXymoH{e_M(o2fb1R$ht~v}5yZlPW0yeA%==zcVICLF4y>=rp z!aQLER2YEF>|B6^$1N~}0P7AFMSbN!`Sdnv5qrWw4d++)ajqFJ?TgD3_Il-DS2Ird zj>9cJc|z?@e^T-T=w6^!fE|F-fc^pr!2EYFu03w2lOs`H?_T$x3#`l^DN+D4U!vCj zV+1{h7UAFS8zAo$1@jpYu2Y|pXs>5^uFO$d?AWIsd*e&$cmoa6IvnvkB0$J_eXL`+ z<~5|C4!t6v9hQ!Q$*Q!ZmB>Dp-5yKm<6ajZBEB_Ff8qn0BJ-6WKe2?;&8Igkcr)YY z0J}~H?dD}_h!-axJqWdA^&K-3He+TRVOE>SL$=8=ot<`txy9m4DM5QQkpLLr2t+;&v<&ZS0mNI*1)Az)(=^#QfpbBup%+2?q zdu?%06rlRYs&+d15NiKdD`%k|>*pnRQ|+}y1bLJL|EAYsJ>d-R0i$9zlfK+uQlM2v ze_lbiH9}@B4sa?@piWgjZe)>ivB;rnI|27>%X!?Hgm??^Rnm5-(F*3(1SF$SCl}Uq zqKTIj60or3g5+2QNL{xA5{|_c1I6}!NLEx*6#m!Pxr2S2syQg$D1m^dAkhiC($j)9lURJq&pbEGdfZ6 zZ$#_Mk6RRZg&}gBb&M;oSvJW*0jo6S@qNL`A6Qz(@QI~2T7g353&ZC|E4rhIf1#E- z88ik>#1e9X%JHed2=h>63j-1&qQjVtxh2wkiCE;7=6FIu4Vd(VVqU%{;zdW5Vgf>j#7);pe=x3_?y&ZOCo&*| z*)Ozlc?@C#WdKEYk8iD=?>KfLP7I_|Es~=C<9IxPxwT@8z3+CYLS60-tzd(*E}vYN z?sF@&tBEB?Q*OLLdoFnJNdr7>Jrk$a%h-stRSMQy{eG)p(wT~c2E*O@B|rmzvo7cz z+AD{&FRxgCIV66;e-5;L@ou_3p+EO9?R{7~9AaJJ$H#c%%}(K@3_#+48e~UarVLcW z-VaT&D5Yl^vtyc}T%@BN^=4bUH8=R6bM?7FnT&C)msH@u^|Hl2Y0I7c0S_b12;n-h z9KK}i8iO$n@U6e!s}i?Z-(s`k%KQ*v>+@oNZ&J4562{ite+L%zD-{l(9-bw>$J%a1 zJ^*Kswf40eXdUa(t@j=6!;32MgRj3uQ#;=GSJ1=9V%B#$4in_gy&x)EYZMHHz#Z^} zP)qkhIY3uYaPmzdzIgzj2(`O>P6qx!3kx~$EB$8qTYUY%8{>ASGv9dg@B8oqrLj~S z$DikW%2|@YfA=5VFmnf$aj+|#F!5Ch3+z$x$KwsX5J4Puw^AJnI3&+lo?sugA!r&7 zb*x%tKARe>JF+~@5wh;+zGoD?7J2XfE_W07J17|yfS*yv?aLJHAUq(n!&ttf!bI-< zTBj#n+JzT9+L3x-Ar#(hJ?$?mv@EZ{odscBr>0!1e}Lt_xT=cEu)=B5Fz1z2nP30B z!|yko*QXoWYZ?VAnt(ZtQ{3?7ZupX+0%bLy16JZo*kc3*uzIUg1e*8Ee{cXw7&A0o zSFjul$FG?u_&)>b9-r)Boe9Z(0ci1=<0&xLB#n%_UYo_KLItyNbcZ5Z(7o1DhBhvW z-k1jLe;YNN+Dcn?6u}!RBD|(sPIpoUhmOcI1sVAbzgQ)VY>FMY-Sz-qL`mVhU~Jz( zlk9p9hik5)p*)3@mMh^2OXP1SzhCmLKmfbeg{f^(g;Yane=%7kSVhxd($iO)M8bfAWos{QT4^-W54hzQ+B8j?H0D|?a_ck&3!Y^!OYWmf%vnQhj8b9W%FLJ2tilSCruy2FYuh|n1Bt4ZJ8SIb+$SzCne-YHi zhJXidLJHfK+}*9@Spr}TM*9yxWMI|#16~lG0Z35%-0O!;6E^^M^>Fcz4i_t*I=Z=> zf5j;9J^<8K`~9z*_=84BtEJMfMx-|23V$~sjsWsCMXl>m-#BeU6|~g|M8XrIYO|?E zZ-N9%|2F5_4Pd?piv7?6!hw1He~^-7%8uzZ(v-<9>$d_Bg#@bE6m<{kMHa$_LWvv* zC4IgjV$%Rr$OITA=nT%Xv8OX~!Tte;#7!=npY@TONa2_+9z@-PTe~z>JXy7PSNbS{BmCCY|e~kl%6okXI z;~>psO5eGJ*xKv&cr$h_<@R$o2tgh`B9@Gn41bkzCAX=ks?<)PFvt=U z)M>>K*SH;tDtzWIQv(<9g!zqg^Pk$eYp%@(f=e=sD|YwTRyRF|!`uUxbI(dDd!Z}& zI6Z4%J{^M4+9u`-=YoEee~EXNziEJD!(P3>>A#B%0sb(y0RiQEmT7Ez4W{Zl&|t!9 z*a}GI8ZOMqdV}vF!u^(obi>I@CI#Oe@VWrLRZ(RI$(~f9FiO|ecUt{|ILZbKDGdGS z^uW{vt%f(y5GYNnL?9UK+|Wj;)Bv?=J$AVCZL1_hl}vpXKdO~~fAFNoAi4JKDCZ?S zUZCoq6aOBM80^Q@n>3kd1^~k6PlkWWwN}x`3Dz#^Dm0fHk^L4ha2RX#y+F7x1SvW~ zA#A_{k_4z7=vw7%_yWuEPDm|4mQEn3EIzQZUF3U4FZ3t8UvU{?m;Tu_-8E^_WTCwW zSZ0~TWb`7UT(2h|f6ZXG#NIc4?#vyx3IU(O5^z?(;?a_(1y5QB-I#3#hf!I=uijmt zD~B9UoTply0w(mq?fMe+kI)r=#X?qucOfLT%$6 zCmFb`TIj3(*$hB3-VtrSmgDV5t~2a14w>&4;7+xf|3cxw&96eDQ^Akc!@0s+AiS7p z(ZNEr`D6w9D<`qyaD;0!7tDpzPlXJIoTg@^v@Y-;m={VpaQzD{=rLs|cm=knK9OD| zyy4d%R_1bte?BYpuDKG{4cw`mZk{*RlY_nc?g$LDVTGL$-q+qE=%ciNK=9=lx;t+E z@cB&|e2^IkW5Y^UnKsyb@eOV4wi-X^e8ty=)QPA39o`%W0@>aZ_8FRU4ei5s4lFI~ zC(&lQ2x4|jfz_Z5+u8zVeKPQ;H5#XqZ=aNv$q$o$fBxV_k?;oW$uS%dV5tMY1lXQ6 z#p*AW&&HUZt8@t`P43lgiWI|zFwtk&ZwGm7ndT*U`6D+MJ0n|xQc&u+{q8UL=s^mN z>S+>k-H^zwp2Sgh3EBKL%36j_i*O{s)uSHEM3YM-U}V`HVkuN)Eec{6!3W>gR@3=j zD)tfMe;k$$ak&E`o4+mG@A0>EYMCpcpz;xqk2+dG=BuOhDGYV2THV!up-}687*YAb zOn&$k8c-lS{)y;EJ;ijcXa1v02(FF8@%l?zRk54-OnnBKlb7y&)T=; zR`O_^qiSpzY!A&HnJhz;pXnH4A#;}&j)<1Me|?Fl4LbG~2~v8gQrl8^cW5vV8_czav|<9-#;?KE?Q`^SGN z5uk8xo{>e;Og;S)=Cv-@XMlSZ}Iu2vlV>*z)_bF&@J5= zpHuCQ|GHW~sxQn$U?J8A`oL(~f6o*_81Z3P@0$!lsRQFU{E7~c_ZEw|;S(?me`g(@ zzyT`f4Vn|pOqX}psIa)2^d>6nBeSD3YT6JgsGUUy%cz|J5Webx3Ih(%3LX!9Cnv|) z9nUzw-|ymmD4%|IKQ9GL!L|0%OPE&(DyI-1>qAtx{zHL}0EI?O6{t9vH&8)R zX0Bzs5C19ZNRHgbw%Caj>yJ2R0{bF00QEK4Q2%<`pY~77f|0^^DeE&rMuLnf)_-^| zn51k@LwUIINu>>c3{DFC4Je<2ypM=gwO3HKR*f{!hSQ~jioz?;59fDBlMMX! z935cgAHz`?ap5oEDb=s*0Z_rQwk0TlY?IkADy>LzjW!0qOA2QlU~TG7LXWvbhUb z5i{yj35O9jIKHvo^JdyemkbhTx!dpi3WMwO8|kgE<<&nD%Xm(xM3gf6W5IzVfxzOj zS5@`llQ#L{3fvcTDf9%%Px9aT;FrLoelrkHM zN>S5OHGz~Z%undAh~*vCS>XlLpAi`PoLwyFu>g$=v`=z1yzZ5=Wn9GM2M+wqDue{c zt&d=H{-8S#fb7#xx}hHOytpwpx7Q-3oRfL7TD;nTpA?v8h z$s^@cbTYLp9tqjY`2t+p62^yi%y$Kb-1zvN@9F2(f*UPFn!Tp;%%R464w9tb$ut`f z`O`=de}KCW{WR}XLQZ&DmkfQk_f3D%u~-8cUnX9?b6E1T)*reY=!y}A;qyo4a0jBO z6~?ha`Cx#cVB}v$!Bqeqf>aWc{wAehMi-#i=s~a^#?kM_lK++;wg-6?@HwHf-^j}b z0=i01ATEx=&JXR2x4Qv5%AVJ9&S66jS1ePK$@L5aZ(d) ziMtSAR4P-YYIrSTI&N0r75H{A3Y@OTR12AtelqM>_s~5i8-T`70a^>M=mY#KFjSkY z{3UAaTmi`3TQ3b%7}Z$7%rhuH4K(dF5GsK%X?VL?!d~Wp%*e+}@3|eG;!=YetWRx% ze*vUe`fR}g_C!X}^!NAkwCC!Ux{pYMs*BC^21ftfhHU#stwMeeW=IbbV1a-r%H;(I zvPhOQUuYxZQK&Tcw|8hN>jyE^yotd#z`U@55`|O4%!l5PS~6zSb^e>>Ho!u-mC$_v z8ePUJ*8gC+Ker)}7#;izN`C=LPQWSrf28Ph=m-_{?x<(E|^Bk6>htZ#wTiVlsZ{D)&XBOp_aY0O=u zr4v}d9}|xta)xxm3$Sl(@Ff_&GlmPsoi} z|1yA4?%8ls+F~d#(&*bHzdtpyf7EE3IP`sj4rX&)e{qQB2`k~E?Z@siCCd8(04lx+ zSClaQ*e%e~ZPa%Z&=|gH@1K>j%@coa`+FT>uYm3Q8qn|k&gECP2(MUXJALmATT+Vb zd#fhV1)GBH+wlvAQ=j)WAki57z?Wbn&QVD17t{^+n6OT;Ex;!MAbntIf6rXz9HOtB zG$n)++*`vzJp}2hmGs3;`-y=1HtBkIf%iOg;_A-&NU&A%nkNgC^QfM% z3RvJu^b>GffJw!gJGV-ie*uI>=Y8l>0rsI6HOuaRnu=p2Zvl8P=b{e47c+YLQs2A^ zgTB_sW_Paaj8|)Ynq0`?OdQ3rwNHD7TZB zrfzWoNt}#@bm<6mK~Dv$SOzoJr@+fw7;9U0nLb=v)~0o>hhF&8fArQO>iSMzBEMii zi^DdkbYQ|8gXIkx8QjMyjb{X%*S=o&rL7mHh78Ygk|Mwwwm3+Qohc&tkQeo5oJj+U ze9=H;)!*p|1b$WtvAY8YdB0(@g>e!AI;)#Re#SZ{5Mw)Ci;l=64Y2u1WfM{(54s1Z z!?7~dj3J1@JF|I!e^`NBfEOO(;xY`bFKB-s!T!{3xaQvL*9>+*4#R2-djlgrS4b?EB`hKM>1I2 zoRIjyi`%n;IT2cncNX%$QKZG-CdP%%=B&GCh_zM##-9E)p6-PR3SvFPR@m=o%3K2h z1K8W;U}jsQ8W7H<^?zvE-D<$9?u+bYJIOE2AfO)0;+vZbsL3RH|L((gBhV8J;Kf9K zpp5{_0x)TT`W`HQtYuuE2VAY)@40?A%|m9!5~J%?9nh6Mwl=I9sA8v!ax;Pess^5> zMqp`(0}vS>)y&2=U4?VzdfrzhY*{ZZCPtSZJc-IBvGctL)qgXLVIieadipk`KGd9M zaAQYPJ#$Sw zRC^l#(z#aZtJNDzeWVKC(mqQAp*7_r+SZ@f`V=Wk-hUr}unD2aIz+z|3HrjsRSOPF z=leIRaxp0{a`Ue|n_TRkSY&6dwnDERm#|4yehEV|nYA{{?d5dj!Bu|d-7To>- zQvu!yI_oBm>7t)u|Gu5y2(b`w8=wGC<|beiLk$-2h5!y5L3j#*IWS$N?6-yW)l$BI ztNVJvqkm2b-VLEtW9BZ%WznZpau~LPfLR9o!>1j4Ra#|lTF}o6$CTWQ znSU}RS0QL-%YA^1E-u_{AIzo6h)>)WO!a+^4N$E1Cgcn#f~#fbZYDpdx=g=2Bv`+VDP%8@>cQS02R4!t!lpSpv1iNwHh8tvjK?`$X0S>2%`or#klpuK z4(_^*(@w%i&(6Bt0Cv8{dk*iT12G-ppnulrm+4%MYYXY(nL7WAhI{v3ynmfc+HOfH z(*Z15Jr|TBVQX{K3ATiK41EO;VFIk$r-&v)2>LXwfH=Wb5VtpOSo*kg%KCtrfH#t@ z6X)@1+wtSOeSp34W+^frW6A#*cF)}!RLx>e0ZzvIN#vq%cW!b?HJx(7zs;3DNq_!$ zd|@`3P>Iae-}|dlL(ixeC4|H!oGra;fs^`-10^p4Rm=e9_~8YpNkZaUYnlg5ubqZ> zgJ=kDt_Eb7n0VVpF}O>mW{U&wYEUl3uTD~Md(zGnc<4$!dLHnj^x{VryaZ#SS%f5h zF;K8BT=#ns`^b9}*m%>);jYPQm4EA2f&p_KVRu0Mzz4$MrbbLL0UXp`0OgeWv{ zDcy?}BRDzhReM)44}3n5D?1vojx8WNC|2CTt!ss@ZOgNdU5)%RZdQmGfOY9yob0)x z^DWMJ#{A0l_!_Ad+hMB=X4oNAKzKz#PXj;c}9Y!$4=qyczf$$>ey!fl#5`-Jn3-G8s5QXSEA>Q0n@z zfESMZ9O}S7JAx)15`Qxc+k_B(O$envTR1*{ACHK#83CKJr(BE6_(_eaNP4~Cp*p21 zP7c4B)NH`!1lhkh#2O+BR8qd*G~Q(c>VZAOx1Ie#6S9@`Ib4{8tU9_;Xx z{68!n@2Cp%vELHZEy`~We8$SO`>0#Y2r4vuvsQqdccB+b&=}@+_bM_TGLRW}Koe^| zyn0V+>rdBB>ZMji2YB^Y7Ew&TgoU7M<`g~<@dJYZd}J(F$=(3uH7fYMoLhT(4Tl;T z_qEgbi{Qbe?0-$O=W1Lrzl~|~8Myl5;HrvC8%duvfs4^7*Jb%p5uZ6wS)S^S>K- zVt2C}o_*J_0)1AvVX+TJhDhqcQ1VJVlW`X_9{(!0GGA0_E;F=CUL zKAbP_mVXhGH^m_|Ju2|td&H` z*nh`g`5V?CZhcjAJNDnlxb%~ic&GjnBpUVk#7R1dBF0-iL)BZtVUe?*V)nL_EU z2F_tfRbpucYq4f(dSNGO~GGZLtRI6dw1kbS*8pm{^GQqupDZcnP!sB-b zV7E88x|&gH^2V>^QzJloD&OhzU9WJWC18d-rP;mQ5Mlf>BP>uMKiA2WT5gZ2a~rOv z@5mQq1+w;HuaaIf*z-RQNXZ*2)bYTzA3uQ`N(m9hKY>%EzV{b|sf{0n*_59PW?y~s z#D+jUgy}H3gDy{Cg60f5NrQ{i1V6@PTz|!d-NG=wVwo^Snz`Sj0#+YiXsUSY?(V#$ zL{Qaw)E?(HRTMf#RpzBq%BIsMZeaS;x5JF-` zivh>J^c_oUfFFpb6e_TpcKx2SZG2O1x4*P+)9N6-{{iv&JPHRsIy$rZ?{~eO0DneT z2K{HLKJJ~i0;$n2onMLq8p*2hX7+irWCGSTkHLXnL!SN4EPJV?Qr@WCfq#`Rz}%jo z{)aZK#NUJ0O1|s~pbiT(b>;&ekBcin*I#U+4Q`MCBU!b2PH@HRj_-aewuL6(iS~t# zGg83Yug;(r0{ycqL=YrNywzYu(SMc>=uEoW?j-+$CXrG;%> zMkCol`;_6AgI?VnJNG6gjPLfvy5)kXfeMUE3m#i&_{^<0I_u+g?owYd($dldYW}Hn<^#JHl5LuxM}L1?fo_mLfUdCF z0XHrB$lysTVK)f$SYW|Go*eDQ7>&;J7ecETXr!GAI3VNe2S3Y(W|~Aqq#9mY-le3C z|DvZh?~5q+2At3Jkp%6WQYh9V-x(-|(5paXvo5LvYbD~|g`_M$2n4R^DG=y;WdUot zj_v*jlUR9FX|sB4Rj;94IPdA`FS~KdK#~96lLDTaIhnOW1AjC(0Z_P&4vEhX?^rkH z{bC^*Se1c=Q4k|Espq3@`z?Sms@GNxAg?WebW|f^5 z?%`mc&@gfX<&%TIs2(m7Zxo1mJ1pJ7&7ir#9Ujs-1P`HSgiaq1$=*B))i0E5JIl-6 zqR&Cuqf)O9g?|+RO~_E+nwCJN4CH{e$q%^jJSD&r?GDY=0^VT~#{s0!(7`>)k8fnZ zeQ@&Db7ZW#8C`Z1bryl`ase7eAOGD(enBd42Usf|U>wmSFsm*im6%6G4BOhSsE>3) zG9ZH9zG(G?&0-lp?6(Dv+l3=X!g%8t`-HU>V)n*6kAI#NuFnM&+u8Gy-iI%~FTVm) zwEGd5V#ii`0w3{x96`yP%`gsk(7JvjBRB#K6|m14je=GUIUNIP{uc6M6y-O;DVOlE z2<*D*5tgI~@a(gNhmZpgd#0*Ewyh-`0Rc&X40Ax#Y#)U<;{$A)NA#xc^WtZCQr>Zm zg)fFCLVuGLIA-#8gH^LQ4C$FxZd}IQ2I?F76EZ^qEp7If#6JRO9k9lK%_Q$+K%{@G zYkd(%IVw6XhDC^*|j9_O1<$FhW`m|q?+46w<1U5(Ifpjqpx zLw}1>L*~cN`wJhz^?&Gm*S~-4rcVy~jG(dkf*_sTVFxUZOkFgX1IA9>=(5l-oZ|-} zP4F9iCgiPSBDDbU8N|6M5|A5w<&+QMg_O^`OwN2&ZsDDBDk6d$!_9Jv$QJSuXxU|e zJ+=O`-F*EHIlGM*b{Lx8geb6{=qJbl8oTAwQSV zF!haNCeT2x(!g>3dz66eaFzc1Jsv3jgMYIyx+C5FJu~~hqX)IWb^+jo==o*ef(irg6soFNiM{=}r|tq7W60Zq&ybZeg+ubdCE9c#=} zCx~BSvGovNmM7j(lt(-*e!3`S&3_N?gMqFS4sTuCLDNJ-zoHJM7bM(&oN=3&M__e6 zFOR)ObONYK!xH8npwHz*dO>VoGoWnJ!S~kho|q(A-AqWX1DVQ))Zj-fg7I|%i64Z< z&#n6789(3M=piOrlAHTG#{}J*?Cw|jqLDLva69fD;N91M{hp9aa`A5_=zp9T?{LCnU|V(f=53$`V*Tko&FrWkHK|&`9l8JHz|pk3?#FufR zv4SfA-GRJ9z>_#l$eEFk#IQC}0l>%`hwkaKF981iM3G(q-M8d2@L~)qvYBz+$9qn{ zyBm0~XSf5;jt!nXzKb3Dk5eI9pGI%%V|^KkrhIQnP<=Z9Jhwo0(0_q%lX1YxHJ^_H z0Ib#TEP0R=fiYg4-UXI!4%bENF%Hd8qi70iVE=XAfWGAjlE;~=^ix07O0wdz?`+`< z$V>~$yBFt(ub`WvBuv1;gmnE>NJ>Ko2qw4J4JK3p@Q)IDR7uh+-21V5uD$U zWY&A21^W95eGhtUG=Bq*{LM$Z)qb2#O2BEt;>lJizM;Tws3TUttR3R}$7}1wn^kjX zb+=D`4mNaUeho}S8BI`-7z@$N`btnSAhDpgP^>#7p2f_$`dZKT_9!5azFLJFo~H_| zcsWtGmV-oPKEFUX@m=VRf2>W!e*h1?H&qKO|W8#Fr%XOqP zsq0=)#0UndlnSj*cnc@BZYFP$Bs3}y%=qo_Ll1Aed{lu{3_xAF zA|Zn;25?~*7?pg(IePgL4<=cdH~ff!0R@kYbGH(_M}J>1c_`q<1FNBzWiq_B+EFXib5ZB$RND^yPA(i_LQ$GCWrt@@VP_?Xu3dEFWRK?RG zLzP}l7mR<`ZCJN149|;IYsrjQ+SZ*tP5fPYD~%aY)9*1A7k zI?I0y%E}BMs7rS9`3(^5CWMaU0KJ2Nc=ZwXv%jH%!)f1>W4CmbELv9{Alqu_5bsXm zVSgYl>i|b9$&5*23m9(It&{oLI#??hI6B1fKPLuSLjKi|@r4+E3JbmzCyltidjGi= z>F0cHknIH`3BPwYWYNj(P)myO>EgV}TjP=IqaqN(CfeEtlv^V{CYZrpm-cwn%LTtC zz_j`$=mpG4NAJgH`;uCw#Xb+Kn(yv(+kctE&@Pw|MlG@rq~R%BbB+xL?WOoswxJ#H zO?e#zfL&KuW9hFon(uO^fM@t7870F75MbAu^{hV38s4W~{(T=HF8Y8TAvcTj7smla zkq24}AJNaO2eiDUFKqzIF`P#lN%vven%N!?v&d2hsPae4u z2A~u^&=G5W2zgN^8r%%nPJd!x2}ZM&eTTrLohV?}rrB1ZRStu8lY3s^7jFNsgMX05 zKVstV5&tLD@gJoyyuJP*5HsQTzy8L57|7qvzTIE&|L?&~?eF#v2#NFj;CEQf;~02V zKF(dhM3^LScU1@GvP<5-t{nXm?~e7)?FMxe1>!Zo8lcEM9Q1n70e__6r(HV%P__9D zjtub)XauY$oS0cz_ZzeUtdI4CpL4c!rDcQSMA0Z;`Vo=`UK6b}K?_Nr> z`ig|2!R3UDO>nlzyPG7SJ_H+Lx>U$Xeun5|#&tAMYl@Y?&T zgph$EVzpyiBxE^i{(lkN6p*no<+oRmS8tYJb_wQzfHJ>Y-^Q>URdq^7wgk&R2GE?x zW@v&qC-%i^zP`#Q7`XR-D}?)9H_{^mz>=PP4ho`~dK;eWzu$V`PX7CP2*|l%TCj5* z|GAkrbsA8n1e!Xij)CJdm*_;{}F?M~2FUX=(=3b&{=wX*k zdp(eTL}ICF1OP?vsgvkl_QM_dF#8W`XTSMjHUfMW(!yPLnF7~@eYQvSEDfNAffv^a z9OV7A&HYP&gMaL6g)*}oY;VIeU5{hrg<&%L(GB(BhYfgqG%EsJzV=u3TA)b?P0g5Y zrh)&i-J$lf^-ev(BAzcN+VDXj8LtgVU+R4L)jt1Nnl3!N!+UX9)yIAVA!KFcJtX4A=sm zet%N}jH|2r|6{McB6dYociDiD<{@Y1$dMzT@`3T@7^Jk0CS$0-EgO?Ce-pzV)&2|r z=}0j`_J1OE`up8hpSCRH>aJ6v=H`HaqWc{h-K6t)|!_Zhse}1Q2KT;m~p_Y9q{cyO#TX@qZ^nZ5|F-|}T-{sGL&}#kN?N1d?#*_z?5j_k zr^aH9qSg1<_%>%gJ)@|+LNWFe!7da-$)eGSey8@jy*O{!3*AxO`O_d#lzDbqqZkyP zYJb+3>uEH~&z%t!6LB33_f`KIeSUX8kH^{Pa<*U2cJb4veSEDyuU;cdjl^68iP`=j zKW>kQt;m=fHBvp!nY?qWK_VsxPKSaWk?Mj6?(8PypN!$_M;mdoOY9OK4@aFbXJ~!@ zVr=o=kG0Kr?T~if2cLb|#{g}30@Ecdntx9ZKl$i+dv8eZi@lh<>*j)?u@>FvDl=T>zZ^fn-@Xb3va>l01G zsO&uF+J~s>ZB#&Tq;j;Ho?q=bKJ{*1GJz z14KBQ_PDp*twZP!PIK9*4vh>w($lzmnC|`k7d6xyz^qMH&FQbx>GX4Y8;9CdVj69X7<}AL%Q~|%xMr`?{DYBR&)C*x?l6D@wuL# zDC$~VJWp|&U=+hC@(p<}nl2BA_eFcxPsBX(h&Ov<8iAX2ZP`Iy{f+($VW`-KGw+%C z{L>0sBeNBb-#c$#P(a-G{d*mB>8ZCm`HjlzOIFG5{MbgQ+^Fhe?m6pcPk&PyhxVuK zAlu-$l$dl*m)Bjme?wo6b7_q(;#azLKGnsWQg7t?%tT#e(?S=?`-H~iZ#%R;TCHd$ z?QXtMe*s0@CmVnLr9g?;3*$0>i#A(*_4F1bmqT!&DMR>bPtl>+z0|7CO!ej&8G{BD zk?q)fb&jV<7YfW3bb|LQ(SMKii8yHJdOcUSnKhz|6IBiUkGa`XwhQ~ie@Pqfm+ybarZMBdd;Rm@Ec`_3l>6}c>>hP&gyD!gVn4C*6OT)tu z9X&7l2%gRN-gmUzso8l(v!P2Q$H5qB zvw@HIci!)&r12fk#ZMT$R!n#2w8Kret2LCj)>wH%UiazQet+s!+!rU$he+I2n(ox| z(&%um7%-uprW=9w%cf;~=^73D=*Z~qRIZ)C)|T>MhiOjt$xDT<28bcMPn(a{mOSl; zs_>qyy;+!I*$#HEZtfjtXv*&f)EEt2sv1+J%#oSXMjdbV)r7r4spWypP6~lfSNaldd)BAYg%xxie0X1P;&!?9AAn z>Xt@hol4AR_g+62s1#?TYz@(9v}t&?bNg+$`3OF5 zA7Qk4SQQiD}*N*wbOB7@uZ(sl85gd-eX>c^i1) z;KM*TQ(eL;SR-MY_6NZ;3M~V7n-7Kw6vjk+4OTXUCBHLJ+MTyBewsF) zGhcmKU4PO^`ZE81b+qT6ANKNE&s7P$qn8gm+0gC2Iel^5Hf_KcAL*`pF*ei2QJK0l zTj=!XEp>3zFM~J7uf7=i{$LDvuCL*Dt$ubkjq2;7_C+0Cv*!0kFVv@b-!Do;W14-7 z+*{h#P-oE1xBb%nh*Yb|fUFTP?XBPUyQlN(E`OQtUM^qB+?e>|m7!LqBXt-Bonv;Y zMd8K&6km(k*zA6&V|#rzKit=kkvnZAZhvr@X_r{vg>i;@S@WgxIqfLkvA#*Wt#kEI zt<17)8dQfg7xmsg(^rZcN7a}5`O{Y~VShA#^7cog-gFj|uVFOvcJI138NGIQ&kB9= zqJOx%tg5xjl5VV;U*kLXjr2JEHlmjyWRb&rO6J>zP_nmblW#*bDsQhy0aoj6J&xV> z^T(&s3EfWul}<#d{Au=fu0`bKj^s#sn;m+o>2WPA9%fOwzWZvsnqf|ByA<{EwiAxU zbP+nE#8XGx?Tax+?La49+I|S->(F_f5us04^|AA|8-o;D!=3W=jJ(fqPeJYT&a>Z6 zNSmlyy&llw=)E`k3QngJZt~>wMIE*WZ`4t2E4{sABIWu%HJk0tWUZZhNc1#cSAY7) zrm~B^I+wAI<)gacOLm zLoey{QE2!ixO1KsvyM7`cj(GI)t|=;u+a2XTkaRs4pV4>7r3rXp4K#A{v5P6_UcWX zUAmK2of=JDDW$6-7!e{a;B`%oE@EBlc@|eHhqr^zUS5A*nCB z6)4lYEw*C+{vI`3uYYf??F&+s2eg=1uOH@O`1QJft#>V9kBpIXsbr0n@YfS+0(;%& z^K|14KI6^wIH^s)w6ESUXx2Z`>F+47E$eHrKQ@!eQ{;nGFWXP8O7N_Gy;fSC@yF}* z>-9xx>$Qp`APgb%eyoDQ@!f4Zt!ZugIY#a;*MF>KWNn8np#H7O>hE%_&0?@6-2z-%|TpySDyzT<}4X0{^u^!w-_%Vx$;b1H##)|sS2+HCb2#PSi%)kv>2Euu&w6+q&ldinK3~uS_zj$mEo$Lw^w9UX zbkfcS1&FU_Ykz;qy4JAv(t4-xE`Nz=tohn{`gXq^HD9$?;g>x~H?%t0J1=h$4SB!R zM3^+au%Ey5ukG?fk$N*gzUe-FuEQ|y&DB*ik`#C^qV}&ztnhKDPZs28nv|V!5|7=F z;jY?TJXwRy`1^g?kB6-uO8Un8j9$O&omkKv+Pw^@Ab&hVqR*fje_p<*-Ds}H>ic09 zIJ0VEB9OY;R^L|hjnk$rjqYr^!r314>3ZjR<3x$Z)7~;^UOYYAMpfnHqyr89rx=cE zY6-!B&n!{Q60JY_sQjai%JF#bqH;xbdIdO>gV)qtnt4%{@y;d9kD|dxSad~EiY_Ri zH=hW<%73mXB;dSQ?A_^@x4x%E?~~c|gXV){`>Qeml{3HOReRd43rYdusp#^_Kgq}3 z+H6H@IejVaK)!!;o|&U$YI-W&l=%6*fQ-7^qU%Y~<%I9p5r}gcKbuc{G*rVk&I~av z`i2hKM8p^gTWtWqps|i1Di+_G9s)Dk9ZG#|MMdFC>pAzCiB+Y!g zEKKqXCw{hC@>Z4gM0Pb%iY_Mn*etrTbhx!x6t{9_KMH}MAn$fh`b8f^{B_TpEjpL6 z4bXWwi#g*QBmc$)4ta;WIf%9=?BhU3DN?A^WGZeFjtmi*BF1QMRBjo75f8mfrXv6Tkz<2ZWBx6EU%W>-yA}zZi*xbT+=FFkTs!Zs4S%e}wBRs2c-20pwlUrgwZ@1Z z2wk0dPlC6eLZ?M#Lo>nE@q6xt1Ajf9KE*U@>c!zg6R}A_{y^mL@VuHqZ)&a;(WN2x zRgf3kYrO@A7}3!-k)NNi+W7x4IDhUmUmvT&^cDjbY`SVIdA}v5O3lt`nd`-tsN`Ha z3b>vYK+CxOdx#c@25C*?wx{8nXtr_^9;yb<-dg1DfP3=AIjoq^wo-nF;eQ97x#BK- zF#<{u>Y>s?E5Yl03C$I>e~*M&6_H@BFYa~Dcpni|v2OWnz^DX4@$cI{Ex(I>8fu2ZcgoLP z_&qzM{EXrEwAQY>)eTLlzJD9~V!x=idMc6uI*lHYgXyu}85T6=z=u%ta9=!Y;|}!%x3Z(7Dl|!4iU1oO;gQFT4?}Y4WA1ORpTx}Y z44MY-CVt)Zh1VGRHNG#=UDXTB3Ym62&zbvB0fit56024mv=QQc4Sy2d9<5wOogliQ zbaf>@ygR`zMDauG={sw73N_)6_MLT&(v%eY@v3TxH|#v??`B}bRM^Plf+h7_dChx5 z9Ac5g>Qo5&3uWn6kL`0i*U$Iyj9Duu_CYa64>n?rD7hTx;@{zOH;MR6RL5|2jnk*({iH z>y(d_yxKQaZ>lG$<0fJS`swsm`O>11A$w#ZVh@GOisG+aSAPs_Lm7Cu@drc7BU@Nk zXN^)utZP~|MJLk@@!1{Wxw?7Lo)$3;vy(Vvz7^Fw%&?z^CfJwzFfz{e#Z*$ggIry| ziX7kISbx{!^U$Yr>ZOX)l+AdScr>xOJ~lQ^{hlB9EI$IFO*O^KXtM)K6lZJO3*khD ztUs>3NqcBtA%F7pc66&~oL};q>UjfkiX-}@q1p_re>xbtXmommmaCmw+@I+19o&OB zSH5j}f(Gn|7y@MObaPN@nSJUWxEs2W?L+CdnRGuvyvE)$CdItCr|`PJulu>Qzt0%U zm+}m;Y0eeDT+fhp=r_h9@mAL0Ko9WN$DkIxv^|a)_VCG$2i6|)qAV&p2h%{;a0aJgP4qh6auPuPfC z7JEW|H-7=4OH(HHirg=Z1N%qOPiapTJ34_)7wa5ls<@$x_!y(SvFhfr+Q<*O`rK}Y zZ>RU2$((y$Pw~JJn{{i}n4(7{$3yEg<|-O=kke$xyRJq4^V~kMXNvs-J3GFPJ$r{p zLG2FZJxE)6+mmvymA_-}Op5VZx$olV{^?O=kALJmUiWV~#yK9^KsV4br1M<*Vxp^J zCGzZWKONS&!cw#S0907ND0#fph6h9O4vFG7F!}nlm&BS1HB*msjc5R8G*p!VR@9Fa z_Zb@-(v6g!g`M~?RdcFt(1_t)EkXSroljSNK?HSj`%2uU*-kK$PKHWrfPM_#80sH_dDCTyIy;riQ zetEX)IyEFS!3m5lNU|~rHpTi>-SRJlDX?ecOGe?PMb!!!9s@i(j6!%-K8>vN$c5477FZbLo zeQ1mQheovcjOG4g-tO1?@!$Dd+8*YI>ob5|KoX-ePgPY^aXwj3vBOxW>T9BRIfL63 zgVz+jtBFAS^xz5Nm(Kf@ZzRcRsedVafdH)9308(&K=`G~!2a<$?mkW7v&gfk`2E#y z*|KKv9gFkL6ocNg@T%X#*Njr67OE5*L=PHkCvf}5AbV#`FHO<#)pfi!)z(p;yFjqJ z`Xy1ulgsflOW$Vkz@rMtKbe(c@Scg z16@uw`-4iJH>I?9%CLe!`+rQd12Uy@zaDwLw0GC@rLb$#Z@Jlh=}(;SQEq-lVdvz& ziN-(s2LIwr=eWC#3+2yuyLh$THy`qTo^YM3-*U4BxA~uc+XJ$g)%?R+MfS$GJzE>M zJ45|lJ;fu(bjS879a^5N;gE$ePjn8ZRWA>^=*)e%>jND9h&AZZC4V85=e@!@pty`6 zv08mslP*o5!WgZ*4WIYum(axzohfi3^pidIV}njY`pckd)7M*{t~Goyzf~Rh(aQ2N z#yuj1fc1;9k2FV{N`EmIx=b|wlM_V zg8gboIi;rR+gevtM}N*LCxmB)`?RM0aM>>kjYnq}GmeXO_bV2VXU)ymJYD_AtDkxE z?XKe~$9q(qr@wqfe7y3^FV?H{`|t~k^R@5?i}_*y4e7q}>%1emONaW-dh@Axac`n# zdvQwEj77unpUg3FM_Y>=Ma^LfwU!3w68?frOE&FOJkS+==zk12t8$x^1saGaduHVa zNIc4f^h{ejrEMj5HPZ`RBTnZ6O-@^L^FHO6&cLncP$yhmKqsw!bu{1rFFV^L?TC#R zMv{FppTueO6UrMjc|Ei&;tqT!Foe){Vpmn-G_}q!dw(R46!u9}o@>E%pOBDZjThszy7>pvmv?`~#hl^g+%WD%%!XZ63jf+Y zTQT!@jl+tVJP9}x3~C4up6$>O2u=K;_dYuk13a&dKO@jHxNN`Vyv{DiC2!Rb_VuSTW+MAGMH zoPO2}&&hb)V%M8Jzr~?<|5lg3XPuTYNMVOc`%}cG#hU!V7p}3b&7ShA4Yjk%;7kWo zRkX(7NPq1A+-J>ynUF}WJa%-S^K=xjFHxVV?(Csr%dukrdYX#b3)x{+qf>%%K70>1 z4NHG+PVvXQL7;#2{KnK$i9=%D)bPtS-=2?Z*`ysu9yu0bp&~9GVRIG5Kg_V%2L33e zaj^!sM~>ep2(rkec2)Oi@EohU9JeUO4r?BL|9`=u0#Wa%1%8HhHDg4}X=hc&Wipn$ z$N%^EviSQLSIV^~{h87x$M^WrEPQvcoa^3~?^Rxu7U(rSqvIG|TNs*Nd}Q3;j3jsU zfz6skDvX*yhY$jFRM^Z}|J`%HXowtoar_0#e{CS9Z7HF&X%Mp;Ba;R3IuxJ8gZQf8 zR)51ZbAp4Stl|i?`rAgcqT{7&7)Vo-c)|3Y@6mSPs@tw&Or}BS(<#_bttW8De1Aku1Ok0D|1(ZH8tu8sqWbP4P$W*pYnt}i z94QdN+Idw~FTK4ypus3DV;I7x;2L=fXN7f<>+sA1%S3*~N>Y3Q*GBf2*fEf{V?Zwi1n z)=K&O_jA6mXZE)6t;#r$*P?Ubm-`Ysy2o8yD=N-#oFj@&qS0pDZBOzX8-GFpEZ!! za_UJtBTmXl)g5P@&y187y9zXX?|wT#%PQTEdqF`Hahh zU&tD`+TBq5XS^|QS@b6Y6Mt9K*o~Yutm)tJhhnb6sH3!e*8JK+2@Rs$Ut$vO$DpFp zyWh`V-jC%IzyJIFSPyaB^}N8hSzm3U#~iV?D@wjaGGQ#R*W7ChB{YtrM^1#$iwIdF_!s^kpZxlJ>)Zrd15dB>DL!$@`xC=*x&t_98X~<@0iog z*Sww?#s0sZ*<}o?Uw{3}64NN=g+4h^ton!>tz8?;z@5ahJdgVZXOgy6fWzYY zabDmb$qvx9u1Ozl(WolMNmv?9=(|H5q&!MGtXpSYO;L?ITEe73K-4z_CsjRtX>0V` z)AY>%j1oT_zlE4c$q0fX_6KvMh+bC-mBFg|X<(zESb3H&#(!qq7`8=g0=|HitvoeX z(VQ-FYvPELsi$E!ITx+AYf^LSyr_|dxl;bG#8l0^%er5d1-(BtQ z^-PgtSd4#peiyHYACLCqdM1tTe!ZNZ@~pxSDB~u^`i9SHJy2nLkl20Sol-VB5%f$> z6zV<%X49i3#gKITMLt z;04Kf7?6?q!D4>mcZj@2JVkEGO7OSaZ8hWgwT=3zwInXbzYJ{qH0=z0!McseJ#yBZ zi5-bC7k|Pfn|k16)RCP}bvoa0*2i0;uCFJjR@Ywb(7uWf@TA^}NtULDP4EI=tS}xq zXFGO9#ZtBnHJ33yKE8Y(st>(j^u)hqzFQ&{T0d`-Px63| z@a)9r^H8TSWd&Y)vG+XxKwo<@$8%egeM{g#n1>4rRuZ2t&-t7EyzWN|y@z()l|J?#V|vHbA9JRs67%HW zjC0}R7iUy~nZ&N%AASu{GG7{-x5+3-Pk(x@<3hS}4Vc6$h22#B7uJ+LjhY@9Xg+p_ z<29m1>s}hKIfPlXZpYvVVvNAL$v^Yld@LT+rhCr@C9A&q{>5$Ki!vW_3lkrSXfm3# zTv2yI>s+n7hTk4S+EbK#3GU3gw!2FIwL+YOzZdG{K8tg&FSqKmr@~DBkTeX=y??KE zLXwPg@4D_vT;jHlN;`Rv;rLtnS%WK%&N^dHlk2(WEsLC0k>7$x!rB_-M``1U=`F=Y zQPoVIHRa_LC+YD3Z=>+5<@f_CG~xv&9c>jO2-a(Q3(Huxz$A1BwXnafCHsAQplWB( zFo@?kv-k^sA@}2&b+LTSDqrmDWPkMJjOcq4S7lzwr+?)H775Ug+=?!>N<~rwsfMqwfyFi!?dnz*^z(z zQX5~Ruf~|_+@|qi%6A2pg~!g%3-x^Hpif|r2UX&M!W9Q|pl&txoEfnWGTLo?pu!GF zUzBxLVwc{*kocX9rIcfp*hkv8>_r_!I~Cl%&i!<1b}!ARc5-U%+=tEc3R{%bdo|S1N2nB=>V4{fWo%FVArPKEx8`JZJ)p z7YvvZ2rbEImjsoXu~TArXf)rj{M_v3`qE0Gt$u32me0VAfE{SWtDabqd9;6Ulx)f8 zI6(B59_}d^80lK^zEDx`KH(c-Tg62`d5n=$Vv`aN0lWVhdt;s9drIB~u?037PzWEb z366ok#vZMFAF$VG;-k)Pii7g*9|@vb%y-aH{3`yI%v0Fl%>V|>uN?QR0*=|p)$d#Z zN|!ihk>A@~xd!)~=50KRF_nM(lEN2~7<-XRD{@9Z>;1v^Dshb>P9<*twYRQVCt^-T zT(GuJ*LfvZmguPXrmc~>iZ#PbtJ_+J;DXpGRs2~+Bbn!F#}-nBcY<}N(h5$BF+pITLHhoxW%dTDL6c{*T+6{u*m4dyH?);au{u6o6KOB+zcxTU{zr9Z`c6c=sb+Ny*9B>f(;nhT3% zX!*74d$2LyVMO!DZFq7gtaq@yfST2jgCHoBq`|Ddw>&=TX+iBc>|nu}vQL*E~Mtn9B7p$M+VuUC+$|-zm6R za?SrEH+2xwM{AU4fJd(ALf4aVJOQ69#-XeEhJSA1S>DHcJXhn7Y2wN8CPwh8oXHAB zi36VLwl;Y#*7SeX_TI*#7)Q~7mvb9O`Mv^YM|BD^l=NG92e{q~cXjG`gJ`^F3rt;@39>vrhex&%_57KUI#ak$AgN-sXY5Eq@?MC-RgTT@N4m0 zqn@>xSpK0H8zoQQ9WC$Miat$@=A1g~|jJJX_@*hf5`jq#1<+-tU zx1W`Y_p&67?Cbfbr3PPRo}u*HFEP(=6?_chWQl8vTfAQShSza06nub(ywBAZS~o7A z9D~BYhv|C5LVot%m5aHu3;*FKkMtM+tgxNo`4$^V8!U53(w-8xmSbOjpVnQ|T{#b# z+rYrEo-BWneb9YyC{_?@^9+nGTN%bKGFM(7zp<^a+#Xnh0`BQK;1qP5z=81KGU9;x zBf5;U7q4rOeUDuTXKLJjx%wvx67_t~eQ*AU zU0^+D@47;@9&3tuf`4$c$9q@qs(VO)W(4Qv;~;%wmcIDjw097wzI6N_*kopr^BR^$nu&*T4a z<8Ev2`x=YfY>f0LZhmhPE0x23+<_~{bFrSe-kKQl`}MW*UF9GQk!SKGM;2f07Po)& z_u{ib(`@3F6VM`Yi+da6HNNl3mA3a6h9GlNy0pnEIKeH3y~eq}*0i)C`0vFWmAE?b z_w}6mg~3dN!ol$lOZU_@9&v<4t+i0PRb@gP;`*^m94F&Ecu>aBIyJkHg_)nIZKSrb zE!Y3l!oPZ*>~zle(l1uY#rd^fF;0IXVqEkBU@CX6(;mT3L?rgbi0OV~fS5G^N3QA0<>z)yeum}2Zf z*hPYm@F-`~!uTaLRt!yeBg@=@1){PWo|_9rLxD_8vC>~B6I=|LIKv?$X{ z7m}Z$Y7Mj|Hlx4Be-9UgU-*3aaF*7RS;qh(k z9?!2Oa#MlE_gj_gI;aZjDG%p2V{ZH7A3S5!rG zG|uVPRu39E785=}Vr_rmiLbMlwt{`l7ijp>vsnLfzv5icCU5($HmdGdsWb;Ve9D0n zx0L5<(o=kY;H;|qkw0}EJxo6@Zcy2ZD)}?oNy_>b1xZEd_0rEW z8e$>yB;zEZH@?L4fBV(f8is4W_)m__ZM<%0@?%b=^sh@Ec7cDBO1t;JT<=$&)dNre zdYv1Vift;^@OLX4A?`!vL1Wpe<$aZNwJkCO9t$?oBe&t1X3{#S$=9oP5-UyT+!(?G zEpY}M-)uHrji`#e;{r-G9w~h?fw~8vg1DZ2rlTL}RFW!B;3^^Zk@ZahPTnYEd$34^ zM>I=mT{^nH5OyON8+O9^0-6vX&EhK2<|HW>VJz?poLfi4pwCk0L)%%+W0-fU9%G z*hgO0e7x-;X90Yz4RlvE_fPhx_8c-+*4yQj_QBGDhEQaX=u{gfY+Y1QAQ$M zf5QiybY)DGxI@;awz&8bWBC**t;A}P=hB7Gh-+)wUQjF4GBgh!rRUATu_U%euLj8@ zkR+Zk>5RhHo&pw@>uzufH|?#STWV^%p50Tr1?XiebZ3gS{F}q-1V?~0NxYw}f-^}n z3+}!OW+#7Zq|8XscYJ}%9N;InhU}d=IabN9_?B@9yudY#5b3{R_e=lnid`2rLfQ>o z+UA_K_N!*3u+s@@aF2dl@!Acyk^bxz7rWLB$(Tc)_jAXT_*c4M3{u3~6oZ@|J|TNM zCSW3O4u?-y19*4Tbx>0Rww*}~(H1iIENv0^NMC=Q8rqq~$@14?)+ad|v@7NbzS`2) ziSHTLft#*yUCFK6N)nGcAjYgia`WI|Z+YQA;df|tIQ7yGW<%nS_#!bdYl8?c--TB9N?uvgU{UFIhmfY$BGmNqH4?LC<|0rvnu5khT z(;i7KBXNfOjC|j@cwbB#RDlgdvWt8>1m9)_&YWwWz`|KGV1JpDSIG}#UT5o0C+CL4 z8d6Bv?Ye1F9}1U56>C+-r)Q;So^~q!@}R_05qGK77cvf(=N0*Br3Z(DTrBE&tW|%; zLC4?V_p0*UjFgXd>s_wNnAd$mVY!w(Z3NFre=6_*8Ao-uVBL~Wr%|W(+s7)`&=2on z#d#xbnLL*m9~4~fYwoW&p7h1a_YY+}bn``T98vy%p99WxiE~rKSH^CFwTbUt{u~=g z_w2F>uYTD{Agq|YV^i_rL%lu7;8uUcPW$4&%Q{W_EMp((kGc36&fGpbOZ`;vo*n1< z-oY#|`zLulxQqT2SD)5P)N_!R_WJNQ_H#HtIlNyISrl}&?*I0j%Wof!F&8=zEo6a#`g%mh+}e>GLpsMl>=H(Zu7t!^J7^);RRklS_Z`X#c6+ zf8{upJ`l$#`Jru|8kLPsjfs3;f$PYCSj%KQY`5e*@-y>z%UhS{lf+uLPTr7XJ7PX3 zepZVNVvdqF*mcaRv3%3j&nm}O;kAg?a>5QFH&rd~VXo#@nh zISI*+_r+kM25IPD<1@|xU$`?jNy<1`^16Si4ZP*pS*x0i z6{+b!d94lYn*5HJaYht2-B-Z>i3=4mWsV2wdQfooz?1Tu-7l~s7CX2Lb^}S9CxgDW z2U)Wr*Iu4o@lBHPtb@A8IxvfK6@FE@4oXhPL!Iqfa_H}SoP6>lCKQ>h*)pcOa%4bv$67ve*KVgk{){sRGKbv|>iF+i5Z|O8B$|ZK1(WWirv%TJ0$!222{w?Bm z!uk7njjS~k2=F)dY7UNp-IO`^M6gE_dJ|%l`yB6ZDeyI~v37rnp+a)B;Ap9z?a^)~ zKU3~s8JElbCQCIma6kADRXNU*gCTj4H{Q*yo@G+zNdJ+;v&x#;E0=Fj;^opO$Cn_# zdfjWWPF=}7CkS@b*m@!lZ*;5ie2``feCsJ^3ItVL*X8_8-Zws<^6JMk?VPKD!I*#> zQ1j}-Z8yEVz2$%GVU3eZjr(uNtg%kRUTCn~rSFmtpWfgCuwZ5VPuFA}vq#Xy^1F)1 zS;`rtddM=#dZRuyS?@(19qWra9{gp_(TTLx`n8tt9y8*-^5$i3yyQ$*zgdg60Hv$;&_6=Ygyn*m4d?{KXVs4fg)z z$?P{4ysz}j5@|Qgfk?N%?+Kq+o&}`q0ogOTK6==KfYQ1408A>tQ9es7s~mNoL+%XFo2rqIH#t1R~~Jn!C=I8~gFW$XosU&ga5 z_-X39!2RQ{{E|OjW1M9?d-FTxSWcK{8UOQa*IIv&0#Ca0Q?GR-W$bgsTD-^BWGXENM@F|sot$bFu&mxQ;n5{c4qvZj8b%7o1)doLZw;tTT;--C zD>#41Z(1k&$sbY0Mf-GWDl=;Q3RT1eiES|DnQi3bZ?32knyCALP*$#*+#PL;FK z>~5;h7Gr`APJ#J?4a%JU#M0a@K4G95(>8zg@Eq{0pF8d=bBMHO{y4oDBb4DzPbjt@ zl*#%qRTOGo8Bcu_?oA)3$D^U*dE^a=M+8dXPlEdE$OVy-_-A%F169Nrn^sly4Px~jmZb1<)>)pXw=-V;sv~hz)enefk9r^eVj9|L<}-Xe+=Gq zyVMaw=^5usIU@tjV-p1+Iu`@>e!m9>tT^YEpoyuE)8FAZ1o z$tjAHX2EpttvB5XYy!B!Z`5jb+AVu*iPkKPBj3TkR-P&qn%{+DU!|mp^<5KbX`U=Qqn5s!eVRT2 zu%Fg@dkD`kKDMZziTWt?_u%Yf15;aREI8*zLDD*#=jgSwwy7P_7bM`PM!w!Rh5p=6 zt2NeiL|uO<&IdRtXB~g-o{iznH)u>qPH${I`xI=+{MeFx;xwdQ1LAk);T*Vs@IlMC z`i6-O!(m}ZS7=&D{esfBx$(SZO?>dXR>8Bj-qa`#AJ)yA@Q+7%=&XS zKWZ-(dydbSbzgssds)ZySQ{7X8LIUgSwG2MqhVaO>kM-+7yK3logeD1T%WVkkEe`D z&`v#?QR)>L&v(|Me4U_2_0wX^z}(K(DU7K9Q!O8&UQbme-Y`FCB z&k9|P`xwdRw#qsL8KVS0G0MNyaQ|Sn#WjkxDs#H>{fSMT*8!!HblExU6A)6geHsXA zNM5iEwlYVaSH=p}YBg#({()GzaZ(gz)IA{zkVO5?%AHyV6ucyHSLbS!=PN z&iP~=(d&P>WGfkaOCFFM7j;GF6UAM_4e1T_uz8A8+F|FGb#kZnMZBBjdptiK=4p0Ym1x%!6hez5qqYDG*exbMP@x(;a#; z%k~P5x3iw<>l~?P&z+y(&w9G0DxtJ#2F|}(_zh>bf=-C5Tznf|cEt5Yp5 zt(Px3{D0;U-^HUgOmfHZfA;1B=6~nF-fQ`<{_I1|!7pFqTI+g!9jvFJ8Z-|0smFq6 zb7&MkC*z0cp9d#O-`liG8=TnQ;8o(>XlcsSfCKJpEAP}xY3DAoqvSc!glkMU5Xpak zu;gdO$r$z2HwbGKbEftQ%vFIWj?WuFx^5(P><)~y)ld@Zr}2Fp683QJw!kF(t~+IY z%5m>^2GKX`ntVKZe`s!ySS0St@Oz_0Ps=){mNZB1;* zfLCfOWL@E1o8p@Pd&Jo%)aw7l6xXx5$OB&2_fPEbgPWHA5jFw)06zW0MO+8++Gx+r zd#Hp?aOHe>#=3HH{r1_W&D8;%T&_>|3!)ktHfh%>@~_#H#-(_5(oFRnyjjh95-tj4~@c?H~^`n!6XI^xPshMI$9`M5(+rv%r-(ww5 z4Vu3$l^Wb}eC~3rXWN|hR>Ti7u5{QJE*=DpC5RvFg!okIp`D~2!b9ELgI6syvYok;FyXnh#6RSlc=3?^CX z7s&dHN8WM~KT5rao9}y|!5Der;g7D#ueDR||3A;-kDGqq&oj%KVYq)S=fjTso8xR0 zJS8j5rkG;*E)qjJ_@9I@qrIA<~s@fm#7mVIj4XTval zh$Rz)2FKFR3oGa>r7V9>4=GVSl78M3yo%0G-0)C0%zk@sq&t!?GJPH z{_gm$W&5daf*n#%akoDmrtOMTlPmro>`xI}-Rj03G*hJ}O?f6rPUxSu1%Et~T=|== z@Omq^t*%p)`y9Dmf{i)lXtobkUU?ZMaX_7h-TETCYb?p*YxXzvAZIpU7 zcYV<-E`771SD&ny?<*(gmg~LpJ+A&No%q|M9GwQ+He+Y1sP^7k6W*c#jbAm!2L)F< zknGDWLv^2xVWZ8tOkl*+a^*Vx&5(bNk30SX^nM{7+ji`tP@i!RglySb9}aQH(@{GnNf+oxDX-AkF*o?Pdxs5!d&_uE&x<_Klne68=&ZvIqhFKGp@gLA>#q5dD}Cm=`dneL#9R;p__NfcUYcR*Z5 z-o@5CtEzwIs6|ebVv;I=I(d1t`_Wfmt=O{;J-Spa`qV;b?cf)jP1GHYoB{g^S@^ML z%iL;#A>d2dZfXYT6 zY()<2N2}uJ+?9S0HFei>ne$biv8rMBcGy23OZvpylv_WQI^Wlt*TC`oxjG@8^=lP zF4un_AUkp&&$bMYOFM$j+N18?Pt13XtsnGTfBC6DI-&P^!}2`7>w~_7-%^vWCcAd~ zl)mpypbzAU$@BIf?8^w8?9fR?56!|?*3?TIn`fW0GvRc#O@h|;o9$`hb`o`DPBh!6 zLF@xwuz{;t&D(yk8td80_rm$7qPxV_zNLROq^$MT|8_@QPfR5KA~i=!{N+*S>K5mI z{4bvUU)EKjG4$i_6}8ZR)|wSLb%_hB%eDS)pMx9?v8u@_MmKGzb6N4T9jGe%D39=; zU8{)O63OBKGr=E3^Hlc+5$CO~!WAohXusOTZIEO{_0?BnZTx+|mbrolEa!Gs-R$_4llm|5 z^c7!zz;&+IdFNa~nBA?K;)A(Hsj{*Bf$75gLY?Rcjhv-Hy;al6js9zNz0= zThx-uJihy`>IO}|Ag2GhRuO;2#^+svn~iMxkk=nV!K4?XLYbpJCbo{|*4@w!I-?fg&{XgIekNkRByMNvD zGTytc#gcFKd#&Hk?5hocpC)+%*RiG+`-=5GcMs74``s}H9_uKy!DoLYCr0W+!3#aq zX6s}}$saiNw3Zi`7+gjS(#(58wb&>*IS zk4uLP%mL5QcZW1VT2+51uB>G%YTw~Uf`6h3Moo{*L(+gW#}y>WLf!>C3?WzYaPRdD z*E3s=V;Sq*c(XrR#-%M_$5PL&<}+#oW&Ut#)9#0^b?_TVN#IAiXPvdtTH2>Z-#k~l zHCfMf%0AI%2Ul}P_*>{YH#*9oHOHpTja4M|M$x(?Blr=j$DDr}J*3r%Lm>KK?j+v) za9$KXCi~#=XEep>GM#?()Np3E8Vzcz)9xyqIl_tTOPrWz@0oLLMW>UQjkc)#K3cfO zgkM16Qs0zhWG6sNhV@r`-*x>x>UWnpRO-r0e%0M3N*fy+FHY4>R4U5^hQT$D-{aZ9o<~8qb5J)xJ%S zV{i~p$sbA^_}!7b@z@QRufB`U9Jw8QuAF_KFEnbP#7TeGC0Dwvxs$pt(uXEvPaRSs zRQC3&whlwdGbrmMO8n(7_4M#aPQQIedjPMZto!~M_X3kC`T4)Fx0{_{oWbd@`~0eT zBr)9rt9ab|f9NaReF|6FVe*1-#Hp3yb7c)HxW0dq{&%!Cf_x6n&7=lxe89H&{LHwP z)PlvPxdeX=`kZ_%!Z$8HHd@YkFfx-<-(88sSf?`YNlg?n8=Ofb8Ob#xm`Q;O>%${* znOi<}hJ^k2lOwpT8%pjLd{)VihJQ1p9uU3*@^pXnE- zg8=O=&1bBhM>Srju**%_I%6dLV~=LP@WjZ060c|2itck18`TBeOo|7vHK!*tf0A`- zNZ@uVUtYH&yYad?3^1xT)&0WpwN|Z@&`qJ>89nqgkaKs#=?ng&tdW&@xqs#`Z+d33 zPsV>kF2E%ABtJrKySUabUr6hWs7=f^rxhec-K&nMT>wovdMC3x zeTyBxW)F3pecbAc7kBH`(B&j4iyp1Bq6W#yH`^)wbDb39{b4s*Jadux_B-vu_QtVo zQ3Hxrr;5HU7`+YWMsma)g-nUFsGkPpcxiuTEo(t?xXXb)8@P41eo8__gza{q47S9) z*721b)K*oFfWuv>14n(#q|%Ms7qO>4MxLMbMpWcgz<#g3n$)kZJ;n~V^Aw!)tE09m{k2>d#zzB$PILIdIYKtvcET#`!RkIt+j8 zuPMAMSpbN^lbQ;NZUFe;zeMf+umul?)}+1;KUiwmG`|sCM?|(Yq&tUC?nC0^a1O}M zc-U7(>V4`u`L2GST1jdaQrDw6VC6oUr}U$X-n2>k$Tpa$KJd_D;wY23g@Q{OX!sTH zxSWz#yvJ5M2KH`jq#l@Fzl|;d`l5g6#~fa$9obM973wRSdZmyilEexp0JyW>w8-yZ zH+z;&4TB%QBce!}=7Vmv&0$+R%+>Rojc#30XBK>eJK!piw{Hl_c7j~$LN;x0ogpb8 zbs?|1taqK5(ieKnx5~It;tfCAM)w%HH&->BN_`$j8FObU`h6j98^6k@H z)&_O8LG_@nsE?aE5aR6}4NH|VTodZpl@YBp0P=OcQG1M|RnoRjn`{kE zfrghdpV_{a)B>M|?iTvg0)LE{{oEPKv`&_@w`t4PE>`UJNxq;IPKPCsC%+UasxYo)0 z^u=N54qFnh{c_9KbMSVsmIA{F#7z3{{~zS>W7=m@$FYmsPdqVx40-&?@ry)ok;ezO z#J{@h1l{(d)DZh!`~Ijg`b*=tz(UKsO5uOpH5rH-8yfc~>t}y&b%f}={8OvwS}R)A z5dGq{-D`|{+&J;@MI_{vIAdtlqU`jxPi!W2wCPs?4mt+A*+`x1YDavzbZcGi&_1T$ z&cTGMc?rMUFB5VM@FdWoXN^j2WIo#(xq~26F4-7R@x=MCWc(n8Kk6FIS)VOa7ft|| zWyLjP&W;-T!{2`$=;Fd}(L2agtklJBOCEKVj;v_E;J=l=WClq9U8qtk$BC^BZSB+N z#Mn@@BkS#C=oIVE^u|C#N%o&vz{P{_?!2$jTZ3!W;*9;ReZ<8=s(*> za}ez92oYAvrpw_cK2_Ns7HunKvjYX`ICS{Z=n3Q-ET?~{`knS%SIcliu$<2z(`zjNNHxBD3tT;v8`ky*uG0eiMj)o0zNOM z=Fzr$wOxNlM+;%&%N}fQ&%JEW1GVR-o=HvornS+Z;0~Zk8Ml2?uTr--FwUdN@Qk-Q zIIPTSuXEw%6<>0osZAqBk$m!|{QXEP+n@P==1;-iy5`TqlNomk>ECr_G1!YcSS*tY_Az3V&NvcfA?c>pihXY74i0FXPg+g!JYAl!%K#3&yHM?)cx#`m@@{75(*aB_=!>j2w^ewLuq zE4ncXo$sirLFFaDzi?h*vdAMD{GrsTpk||tPw#kQVb^3G2{FYORsABbDg~f8Z_^IE zxKcL*9lNq_L01;BbrDH=bOBL{I5d$r_Ak@y4xW& z-1;2BFWX5t!-x}cHkl{#Gx6xcgQ^Q9&9!`|^rZeFbIkcxa9zX(A(5gU2~1&`t4n`R z^5P4o)zoG%ZLk18Uii*aP+n+nAVDjp_XZ9u>z2eyM&8sHI67 z3(ZT@_KLn=s9N)0$+wZdN7tg}jrD>SWr3%uUAZ*jvpwQz*XRGgil>27$R2+_v$pJg z$Q}h#LVNMhL(A(M{7N-X;fE^oxH0;|%M|Jq@S0-F`?QaK0}S|KJb%_q8OuHPg8a32 zByZkG=m#>k6W1nJ&t8YFD*>SWe-w=aQDl8^wj=cQ&V~RrKu^o)-f@kctg9zLBOEoBb@ATF*tlqWq$Rl#pKG4 z!>d9k?ymQTFJnG?Je#b8J9#@QocflK+(J*OEQ)8pz85~#U2CqWmymyZ43Tizr?s%( za^7ax>9SAj-F`=7y_~;WaIjq0U}pba8OAhf=4`p7FK9&ah`Z>bRs_9U^FT2sFu_I7k`O~_&%TgHGEx(pW=TnmH6oof9Q|e6vWXV`Q!iSiHILtVuoN968rnX`F_ngx@OEX zpZ%3HDbBCONFN!yij^FK_Sbtl3$qF9Y`pVO=S+pEX^qzK+@eEKh=xXd9?+ZJ4AA5J zel_iwtdGQlM1Zb*h!cU{3THpLUQl;xb~gI-*SwuP^MC6LmUz%vur~@n z^v;7=C9#x48;zHaO7A@2FkCn3r;sSBaae58CMOQl z)btacvJz2Cp{{MX+LE72oh8?Xi-6xR`O0+2m0DMErdJMaT05hqeRA=E;C(*U_J)}1 z!zc}D!BFa875=+)IJH5J4u(pqpA@rPkK>y6+J-q*u19~VA0q3ii}>@#1^k;1#t-kB zUM%JLa;+Bu*SguCVjN5S04!SaKyLY}G6uxgzhlhB-n`Zk+{Rz(tjOAmpWdEe2y0hu zFs9n#8AwBF=gEDGpq)smk0;*o(H)UJGGf`A*c%((3!GVC#j;+P;#6`9_<$H54}V7f zp7fj946uPpMy5Xq@}wfiHCt<`KS|{Ct?Bd;O3 z0~pIT>pIs{J?>E}iBXX@@|9Y3ncq!<6?~mSJDY#llYELC2YEkty|YiL(>#`b#~EMi z-*uYHoM5SwTAl|FdUOxEJ`cKzx3fm-0Lfl)|C3&7uR%#1nsS8>CC}K||L7*eek@+Q zX_(P3ZZ(DN_oFvQy=|dw`uF!q8&tmUrUCq*S9aG*R)qET{QI>>ZBnTbE_GMX&ESyz zlLLSEcX?6U2F|#3mdrP!_5f|Sk7=q!lQbHv4xjY9GFTh*){0en5y*8ss~ph<$!&UWxY~Gw zKPmH}aFh2kZt2paQTA1!E{Q_KmWwa9%26y_^5q$-#7f z^3-MD?OSicf7MHL>j3>(ry8;uGu=#5OY^5=@^xrDSIscznTPrlWr$Uud8Ym!dkcTg z5;wm47x-}WtHF<7Q+!>lH9bihomH(?=9A@GYdsCoyR2RgFOE1!4FG(BJn97SkBMs& zVpt<_mShA4eNe!Ii@t3g`hNHNm)(|xwrktO6Mxe-9EvxI&(aJ6UAk22$Iw4JC$s^d zeV*pwrq*L!%6uXH%?n+qhq3%uJ)VC*G>)$tDR&*Pt39W7y~Mo#vHz*x{?SF0KD)## zA9UEHwzaH5y=i^@>SsGWWnk0IHFdK+YnRa190^`6YyM~=2hU5giC~r29+3@M1H$>o z-o{={3Xb^B=;%K8m-lY-B75*Gp2InPxUV+gZ6kczA9$^*i{FnO-87|bV-J5@ajR#n z^)K|O@N1fxtcOsC2(NgJnRPkQ-%~eAI$f>k zFYt$c^-Vvt{r)~FG#|Q1`6Fgk?d$Q!smPm6(w82(tlH3hrtb@ezT{WdWi4>CA?LkT z)SHxgOUa`@jtwt$aBf`MqK|*!o%bc@?mjPI?*AJ-Ryl7)AHRqE$#1-_)Z6&AHj2H6 ze}bmoeP7VOr>kvSnd~*h_~RSHIm%tQ2N}&@A)|j>HFB{1#&GduTl$p4uF!DmUs@;Q zRP@bqNyu`hg;P4zFirv7)q`V*Mx)7fG_G-vF*eEfZSSy$rIz?$u=0P7wi+&Q0Ekx_ z^l>iw95l>T@C>*qd9>WFzqD)g!l{#hR+G5-gjz0|^_uKSuZ{OKe7ixYM0FG`=N zhkSiGCy)Jk9_q!5HKKp|Bj?c0=_yaMn@N?Rnx3}s&2&ThHnRTcqHl?%Nl0eS=%d$+ z!m6ie-eNceD?AJOsY*_o)HjiSRf7OX;@zVqO(-QNcQ{z36L`e5oIJ*9otj+5pY`ZX zD9;ea%G`l5@;!Xl0gZzYTu8M6 zS;|=XFW19e4|0FAQ}jibd1$z&H@xv0v;C^Ec-VjFw&d>+f|igKYe|e&JuqYi_OPL`#yQVb zZIdtHfAr{~cYr$z$q}~GnuRBO%g1(aqEyG=l`Ct-fh;xLQ@=AFf(_9x2~L@89X?YB zHuzhiV>W;A_2U$Oq>Xlhvh@jND4 z#Q)=ah!3S+`=@=TiQSs|J@Om)7R56(%Q;#z0b4X1i#dV=5YZP;<(yj3Ll%^I!LtQB z@;$&ck|VH#Plz^w?5n>P^wm#m*S`8Pp1eL=A%A}@At#lR*CI<%{E@ye>u4?4meQb? z#Qo1e;%J2617SpjAoO}$3i?~3kBLs!#OsgH?$i4onrS$OSw75^VPw?2=~g)g)*yaw z!4I=HaBI{d39ER!p_aCN%nzCR5>W$iYH7aP%|zo|E`ZWNp@!Wu=3 zz2BTz$8c$$o@UgA#^FGH19MEWmGx$4?pj^-90L4wIPklU6{W}Xuu3Lj>ka1U^2Q&H zRs7WMA9nBOk10I~KRtD~YG>K!@x1?P=Z$~;aP{T3TaAM|I2}9v!!bNJ=AB*RTU|Zr zu1+W5i_vta;pwPZYd3D4r|UL4>qTFIaz6U6?TU9?YL^RsLQA7K^)G)knO`!Mex^PZ zpOby{?ytQFO>zPFLkhn5S^-cbKBxIAkskTchfS)1xblD+7OTpjx zd5^mXNr_=AU$2q!5p+G#b#0VGYOH@9_tw|DDU5-H{(+@Y0<3Vq9S(nG>pOC?! zSPvaGF_Po0hiI-M*Ua^b&nvooeJz&A)W+6_pn(dI*GSF}u5!Tp*>=2tEp>mAIHC2B zybGGRX^R%s#eB;9>(L)b@;G6!MOr1NoR0CN4u4vy1X?dwiu-4?rhHyscxp|KZ-}=l z@9+A29dysLP)_a{k%rQ^9%riY<{pgZK0PNjcs;56auDkCiyHF{Lt(A^Q%N z^Eb(v-=53&m+PzUxv#>K>w$l6dPq@Vf*`@sjYfvrJnw6+QsjDF|KA@K%Kc`F^UC^u zYp}kr`{Iv3oZo`Xq}by`%h#chN-N7Nyq-eFijL9OeQ^I%(DA-mE{CFB{`F5_ zP7T(ld^yJNiz^rK6{(QWm�Pe?r+QIbAUQVw3To0*EQ#H>o~T+=PGBE0SGF<%={& zF6NK7@e+h-SB@He8RhuU=Z3x+94Q%0UxE0WDI_S6Yg?LS2<*8F?EGu#4tx|dZuFNa1`^us6bf~;cD%<{(RcTn2_sZL6 z<-;M9e%zC36R#2JtThHH@d8PZ&s#PxIN_W_*RxX2v`4LaQWMEW`YQwy}w)E44iut#Yv%C0~C)ZWP+0zS>>Eg-i}? z1^#Mwb#N~FW7HM3GZzZ8Lorr(JP7+>pLP0L*6Dzf$I}kI8|b4-D_@Wiy1tu=dhqPP ztF})Lt(OkM(MBWzNvf~qzrDJV9sqOSJ2Dn_G}K3PdmKdeT#&Z!ozZ+$yoBbvyw|Rx z!bqK;ya)T0b!&eloAKPHKdk(m&AoVsgK+l;vYlfUr2TB>M%vZx@AVG|AgDQoK9HlP zvjav+3%W*F9p?%<^?ax1j5dRDE#|ZOe6`^|i2%kbj3gtSGnNemj+29yOvjwaa7r|P z%;(3T&jdpDT*_(7>jfkg%g?#R=g90)NH52EwS}EQL*sw3DdhEJlRCzD7n^4_M}0%y zLp@ZE@p!bEny*52<`5g}(&ji25ftW*&vIS#x8{Ra6a?kxm1ZZL$LzEozwfTUNQH-+ z&2);m=RxTE@hD}ixgT@HeJklfnKG8nlzF6w2J*ih#1SDm@_);#DSS_~rfVlqjy<;r z@)}0Wd(nTJEXNex9p+iL!qnzjV)XOFRLC;}EP`ywz_62YEM_hho_ z3JLz=Ioq9%?a%@qDJ%Pv5s)$Pb$uOJU;o$(68w#Qo>2@Aa-3P?-Uf!Wv4SmPlMt0k zrE<;$`;E>NG!z#!23MF1Ij>4M9c>xYU>(BDyWT_hzppufm@4O~yw1;FVEqodiLVpb zjRk-A+bH%w`{n}QT!`w3`_R$vMWk+}^Lp#R-c)_WrNh8Gf{OBdL_h2it=a%bK)AnU zuH=35?{in>+H{ZlEY=~{1UV|!kKf^S-E_8mZf(;Th&9-@=^~&9R0!C+nD_duqc!gx zbo65|Mtq(E_o>qn-RYp);P*p*?~nB(^IpDYa(}#kKD@pT$qY73+8y!w0O?Ym%~0v| zv*(_KTxU+uKB3pJ@%FG6S0fHyeO#;uboBTPkOnMC#?Q+>+qRvpStB0L&$xno{`@T0 zFLTQ0ifh=L^7nH6ziIxfeEx;BL;qYmd>0&9_MIm}Dz4<5GB5FXEY~65&GD$8yK-)s zN4mX#NqtntN3LIksp54o3_qm%GhI0pPRWMcQ(fkjJp>ZfiQq;u1|BFXGNzO?%W(~u zSFUfK2x|5R8_mgcF(+d=UdG_Sd`8T#K5C!q1h!`? z9`U+azFy_^sCeC4oljsS^7SRJJH_kj0o(t7Gyj|`^eRQX;w--Sc_GgaxoApvO=U2WuALjN(UQ6fs$d>Vc zd^XRAGbf*K&GV^nyLp}S1b;}r?hF_o7$3a8<#nTcU5;;9yxwMfm2>erYPs-Eir3}% zrp4=>RSp4~=TiAM=VVBMA^TaLO-hKWqC9)$-aDub>}8z{2a)CZi@(?}O_%nI`nmsJ zXLMs~Fdv-d5LM1p!;7XCzREo*?KFFTneI_E04e4wRJ%0gI27|kPy3%W=rv3~U|spM z+#P(Lz*0uo_MEeGRlN7id+DM6zkNRiad8)aK|Xi4SnJpK2cv&^4#r~e^&ih!zAluR z4DO_#srOfI<1q9TV~{0`e;wP2C{py_&(9vFTdK4#&mFZf>2rUOXefwhod5HGoZx=+ zuAD1ai7|7NN*gMSRSAk=Q~v(vH958cKYRE0DvXUX0O&5xrBpV^5oATyx3%@}``rD) z9>;w=V8V?)?W9&}f zNWbv!6i#T~hu!+~eaX@L=lf&~@vqn8dz&!w85{1|t0e=ohVR6^j0Z0#7J4AbzZ~6PZG=9!|=dZSWo?^dba%}#m=lNo-xnD3)u{J}R)g82hNlaDI5!M8VSXtP4KwKb!0C6er zy#aBD1jr+V0GX$s5Sg`WpYHSjGqd|tS7j!N`RrP;VnwL8Nw*YjW<>V?F!yG=&T8A% z*sDajO5{Y~8lPX0LS2KY4L6ZXz+ht=+km6Id)mOB#kbblJLlI?qAcyLZ+aSg!YKOa zMeD8MY=^h{9Tg04I{)8vom-Ql=$eK9%EVmF1QUUraVGYKf`BL|MNW4hAaWJ~fnPr> zd#g=pc1-|!P>v=-AK#mo8S~c^3**cSTRnapFUl0R2K^O`sAf1ZR%|JjJ zMS6s*&)R2@Oj1wfsibmpa%vwp{Qvi@^}f}&?&uYFTYtO%5ch7)iEG=I?q4}kPgUWH z0BoGb=R`dW-H-qQk`T8yEQ|!g#x}Mk`1R-OLx9PgbFH~ooJ;Lkxo4)0F&dYCX}$H< zZx2Q?{mf_ze0Ui_4{5H8*|CjCH8YBseLXMy-W-ja`S81@Q2gl8;RHDS%e$Q}%dhlE zTm3G}YJU&g?L)tL6C>Pua5m$+yJ73rRzXSe-|EheqwjE1_rH4cArkBfz06Gn9KQC+ zZp?s`{56Of|M5hVW!`6^;iP|Gj;D){^%PC42`QfYxCo-Rhkgm2yZO*))Lm7)-1gkh zyXW0LOSe9}trve16v@rZVI-7eFdyc&_p#j6I)A^uzQ?24ezlB`RG+7>@#B0mJggu! zz10tNY_!PSz@bg6?#+LB4`gba=S5g=ygZM)EoZ)3r|Yoc+^-h>7p7Fd%sHnm=1ld< z?aQYHK>f6N+aK5KZGPK85bBpnqy6q{{!r8+q&7oY$AzZJXH>c9Pu|9|=Q zvHkDM#pmMxToi%wfmn; zzT+5m+%TR$RWJ296S`FyNyWfe%wGWoC*D_W$`^a{oxC?%n~YjGeMV)4 zq5f<3`s8k3-pa@Jv;63Q*LUA1i+}bwo43Ph(2#%As44MZrIEcgMra|e*Q~dY7So-*|Uozbn(C|5&F?TZ+y)rjqUW; z=cBx+oK@w*r2TkYI`LgEWL^B%medEssX;Z!*cNHG&F8me6;zJ}OVvUCN`EzQ%!`%5 zNAOE#AVg@eF!gNv5*+;08`qxq2ld4Jr3RnhR%Ow{q_vbA5Oamhs!l!AU+)=fJv#z( z(#-9-26F&9hnZ=YvxyT1=YKc))_mZV`K%MAhRW^E4pJ;vof;a z8v-cLRKD@Oy8BQW3dEf9E`KARTwMnx;7gqcH4V(qRLx-%DoW#T?UYB}cpvt`d+A%r zEz~*4DE#hCT<>rEgHy`qOWFp8&Rfsw{o@P+;I}<_{V2HgkKWWlP=@cjMxNVtQ5t|G zppn`cM;o|+C~w(LCZk$vTZS~%zsk*O`GAzowfQL zj$m#Dt=LFQll#>3%H7;M%+lVde%lS}LwTOTuyvz~oaZq{%Bk}_J`-EL-98y9^|Rvp<~BfT zuc$K;w(pMu+cHvU&iDHJUA}H~0;H1VXEAVAz*vve{Ch^x= zsQ0VhmxBB}DNqrkXMHwqEJmBwLLI%2K6}UWkDY8bbxQk{GxEZ@eXY)qthe9r%b0ad zCQiv(r?U;OVeMwD6Vd`q3FeIPec2Pe#v3PM#FM@ktgEsrc`*24H z7i4yZ6UMg)W`DlyvgXuC>o>h1z?H@w)kxnRt64Cs+U%u`2u$5lL9irzSh-nZyW`Zh z4k*fwj4qP1Ov^^oU_D!@!ggF#u!F#Zj_!Z?haKzZnS&y?R8&0r5BrISq43kdP$6}Y z)H56>c<&zX6Q{tAlRH|Y{l_4*9q#gq&$?1U$2JT1mwzhuk=w?S-tNZcy7gsmc3*+v z@w#NK;_{ZgdFYx65*IebOFCb?pchRHF~7F5HnoHDJ@wU8)H*uK*%7m-exTmag+gL- zlW$2B58cB4!+vmIt9#5S9Eg*fgmL!#J58`(=eo0&A6CY^HIq!<0|`x}t0N>(AL>lz&e8cjYiqx#=G1eF*GflzP;j*~g%X zZqnW{RDozEWwvN9+g3+yl8134zaJR0I~CMp zp8Gzq{aL?MCa+s-_fn|$+Re^vi2b*BYN`yJ)cf6H{5Tp}g(LUJuI$Lkj-G9;tRcgS zHh=sy?-oX*&7Dd%9qydz_+8etQy-=Kz-oFR zNC~zubi>q`+>Vo&->r&m&o7&sexCPdGZmN{NQ&^?U>aK7QKzHqemh{0Nt*L}?KXCX zGHCt~75<|cY!$tAq74&A=bnqq`f^dh9bJ({yk5h%|ViJAaCa zQt;E$y=tiLtuk`P>G}8<8&TXna3x87GF?_@_z4mM9zTnraa{u>!Eo*)c zc}Bjcuk`f0)n>8Ghg3n$SR5^mKXHNMFtLU^_+nT4hkZ*KkVUWGvTOUa8SCc`grN20%X8nzbJH#@&pkTZqW;-`9DlW6n0CYm z9KDG{&ceOBJ=q>t`xI^Kw9T42d}cZKog;B0dox$9v03)(S>Sh_wAnLU+hC1jD`>{t zc_!*NEwgAMHkHkLFx0JdZzqB68ihTw($?tw8Y6AbouV~hcJkej7|S-WE$PgRi}u7U zz0m4_UV}pMwVsnH51q=X8GkfKHj%X2V)E+yo*gQGo2f6Sur1BN!JeZA7O6Z@PQX5t zHlrZ$r<^x$6rXIYojFLtJFMk#JQ-|uPrf8*CZL5s$LF~ z?HdKGpkUWENdVLXDN(9kvE#;3(S{3a(3dzq3MRx(RUCizuje@U$M!r^zv4%+!y96t7uB4_0QsfAr4!<|yZ$`(qx~EowHzB1q<<7+@X{b~vi*CPA*8Ie z(53>FZ$m%Zy;ER%iZ?qb!Q1Gi(YPOie^Z!21tcig4w-}0I9RT(1V2EU&7)<`zHq(oDQ|13=&IE0pEO zQK$xWlr+oBM1K&+L8Ck3TE5I8Ml~&5Ggxq6Zo^HI*s)^vANz;BbB*S{xAw>FjrYQOVfuZW zyO^n@wQe7`eLhn*40hq5QIE+}$yc4_;22KYrr+!#U*;WxC+9%ShtQ1|2l4a(IhQ+B z`LDP=KGGkM((07v{4oDpd;F~A(Ubr&64#^@hyy|3gXZWg4BV|(sIkZI~WxsCr2B+QLpt3f6zHt6EDoC*wwVV zDYhuT=j;;z;>><8VLTmE)5vx>Ivpbd^l+a8_PfTt%~2IhVxq=R898WqgjODnEojKjn|w@5{Q$`8JpjB6nKHva_=t zePmx=)RZPZrg8TQ+v~=&FXG^qsXTlVYtH~}o36QWT-s>E{R+!^;Rb%vGvlcr6n|ac zw2$qAq4TN(-OO(Vv)s01ZMNwV<-v%&^`URpdxr8Ora|p@>-x)Z1IHsP7(x$;_2@u_ z_MW@z*?7RzA_m3%5_{IKu@2AZ@0b|e4swqBUV67BkAJ0Fx(T{+XAC}X+o;`OU z-2gMa8yMzt>5TkDHQKCKGH`>uV~1%*Z2F!x)0E!hHugJi%lRpkrOfY>{o1Kcs_w;a zn;*+A<1atjMPkWwb-b@i z+B0MGbaUe%3Lg6*n4K3DO-Age{;-6H9JkVNlYh8T9rcm4jm|Aio=Wb%(Mi45Wb7O# z>)_~6+UuvmV1L+i#_ZfVsDF=k5N|uFJv7$fFPqOk>go$O+Gi#S0Q>0}>->RcAZnD_ z4|wWrF9;$ocik9T-kY0x2gJn1_PeK?o3?))BhU-~`<;-ONZWxkHm7l^Kzu`$>s2_( z$<82Nr0vY&(+TP7N`tAF`7USv)rhv;*h-h}kRqR*GJ-xv2ekPyG=KY>{^qgYXM*fJ z*hQUeKekAN{RlaUGVPfvew`UHt;4oKj0KOoI1Id4qQIpDIcu%K-(?OGxx5t(Nzpx`DylTn0g!ba=k8hn|4V> zDhr-EPMo!YNSR2|F*3|eu>U09#JAcb!zhgYpP#4puPdD75#zL0+ZoB z3ce69Sdzg9pQ#6!!~Z^fuoLpuLi~f5b6>O#0qd>eAIa%}V;zQTn`j;1?rs9`7IPRh zarp@r{9*N`yqQr_hHJjrKp(`wv0DwNsAx<1p@H zZ%?>(Lf#_3FMrND8S8bhpH(TistrcaG#PI-sO@_<3=ay_Yr&)YmQ}m8ET)8$*Mddv zJrzuj7QdT#5ocN_C;Rhxb)HVVCV9H2v1)_joa9Ty63z*qz3q&|b_B0u#r7BWOYXIJ zuinia`Y`MP`Kg`WxTaSF;pDvr)=WQlYy@3kXfGl+aDUeBtP#vo!Qo70aUKJ1@?PRq z(j3mmVB|A(OwNZ_oe%ckN8j@|Z2T10o$Q~sae{5?_)Xv0(|tC`RjQxScV5nXW;Rzt zGx=1RC-B$KIxe3b6=TQ!p5$c2*s+tlU+4R)6?BGx1o_3K$bcR=Db@`X?YVxv$;phO3)N;H|hzLo-U-R#y3U2ty6U0P!L) z7I4i4ci*B-uW@(m_JG)0ViU2~m2KkhXJ4k*^opqaN8x&U-yc=-#2a6z&55M!wt>g00s?iTr4eP`8kT4^(eHBn7Q-{$*d&Aovv{faS^SY6`n)sUvxAn-MWN++D;wj=0*3+*1i^R)dQI~vBeDN!F zjvxP0#`v7%wW^;CYxau+-jxiwIL4w4Hh);E4*GIfx0JiKue)hJOgx)4qPgAJype!d z;5QRvo-Vn&2mf}A#IgUSAY?0;s6O$Wq9##G=A4V5E4d@_IJimX4vzaa_PlRLO=H4v z<-z@8s_kN-^96HVbU&0S?M+gLd%ufpyWTCEXWW(Zsd3d)ZQ_zw{rJ_Ny!W9P$_vwqYsvi{aZ6ub5nfJC!C$sYcnD1-CpmH>ZK_$ z-xv8vhx2)Cm&I`$DvP0YEo|G=w0}PNdC%zK+RfCpdv#(Q+@Dc|kR*ufr~_Gaw*ES9 z1G8v%)`iY@pcN;e9&?N@V7IAYk4{;)|Vn9|J5^II!yKJmP$cVMCS39^t@l_kj13Q?{+fJD4AP^q#dK^JM1h`y2Qm>sLqHA=%3NFiVZ+_>7(n z-wpGd*Jl{!VSDSgL#h9<1gVlryICsEk+G9v3wh^W+ul>W`zm)w*S_>0UEh)zb<-dD zV&e^O?Y(&EYz=;l53qyN*?%}cH)n8RnwkIRB`+UcW4_J;8rRZagC}*@IYiaDz_(1j zut|*2Xb;~u^LyO8*TuMz8uBrxAg?Aq7n z=05d1oT1nyLYhzOFVv)^N)Ilm%#G^q2xsxDeQGiq{}x-j=|@b4Dt{&Kkb0f2@166J zF*!u#$5;pA=|;83iR9b*UU3T~j+NKQd5~Hlxv0i!usJk7sokg>+2&9+$ybTx?S5o# z-UQR<{?84oEMm1r1}h5^t8U-2x7ds35vItmAhf|`89=1$SM_~+=SBI*WLi3S4re~B zFL^Oz@t*PLtQm3YD}QE0uW1TSJA`zEdO;AmYlHGfUwsqO~XKu~8M;wQ^^ z_gAIg`@3~;58hWh+y~(I4)KARPFv)P0kdYg|3?^xC_>*vaxb@&UWBkp>2u3 z*Z6TaD=Isn&$0Z`exQ2hEFNF5i61Of&dez{taAD)HcFBbW1a#>Q^@O{IO2S-Olz*k|1^29mIs)tTFewak{Y1CEr8xsyG6 z)eJdf*r1P_W?{Qdbp5l`^3ML}bWfgNKkC()^DXUjzVGn!SN|qI23z>;XXJcQ|DZd^ zH{S~dRDU9V=8Qut{Y+IXHzShOIDnjiBYD!DK68~FB8ocIoy*vW7&3ihsGhS1q_rk)^(5twM=d)Rw+q|8+Rf?=J6U=w_;dco<4dkcjT&3V6nXwqq!TbBVx0Znc=QRv+V^Ns355!-k zj->sw&Fje@GNYLm!GMWZpVLMYZpiQVp)L0zpUHh7zsBC;zkZ81^x2X#Q`MG;u}{AC zR9CV%cY-g1(Hf;-;lHg>ly-v1GWJ`u#1RP29{BArPZonC+>64;L3lIuefRyBXMbf) zjbL;0YxuCkhY%yo!cm?(?GSSr3~y?M?^O3(?3+*t`*&jBsPj?pAnv}lZ({#TZ+xn6 zS9d^cT?v2XFZ-K>+@D5AI0|d;6)XV^B!CM1XMV!7Hv?_s7BAqQOJ_1Fn_%?e+kzMy zob_bGtfs9_Htd<#u?W9-Hp|XQ(SI5*3sv{*IG}cZd3L9^+%ff+Znl&zw7<7)&!hM) zwaek-R%4zW=45piVa}NHHOaJoUGS{q%F{i9Zy;Ura*xc+ZGf0(IGX^-^U1$M`p%y-l`;a*A&es|MuwJi1iIUa|7rhgr#(d?E^ zFW@@vg7{6I_30+T_sbf{Su_|{tJZ+N>MkhavY+xJl=_#ZNq-BnFQE^b)|y1Os$)+Y3x9`+&{ol_b)(s?Ga5T)I)3#>t%;Eb^&pMMYCM@XR1of_ z_MC~h?;S(kn1SQj5fg9?j}}xfYQKE;T|Q+rNnppQ&=hmZx zaQ`h^6%>R)+udPXLVMa_^t@$c?fz)KFp37VU+gHL zw#GVI<1l_|XW+Nxz6UWq-X4%+bfi$!q_mKRyaT*Y6)m?x>3@M&wA1v^Ilz?)J1$s_ z*$#IPL)9`ZXSem!d^89Ouf3hDH;oW%-Tp+?fA*S$6FX(nPP$ao@W!s%b-;W^(iX1_ zslW6Rv8PxJg2#1Z>VSD_-0{c%p6}5G&yaI?{@R5vcDhs7_2l&>_7*PKIY+6^?Zqa+ zb%QIW&uz6Ya&Dt4k6qmSvJ1c4i~5XmamIsGIDWJh+G$9jYv7NVTUv*Ev5(2) z`+(n@HY!oukSINvb-Lk>##~QcC%!6Jy#h_1&iX>VO;JIAfp5nYu(w`h>_hWnqruU^ zLF}V3GEg^Xj;HurD1H78GuR@DCf4_Q7=|9Wbhkq<1%IXack`|o69YmqqphsadGtB= zG43{u+Y&yI7um?sh23?E^*+`0RzN0!0kwT`jF?*{@t?*sFXvL{!Pj+6*@MbIOWY^_ z4yfx@`LbTqbB+2^uRmifDX|YdZ1VRCNA1sz*Z!(g*1r*knyAs6KU_C=UAt9UoEx5> zeD;KsWPhI2EyJ-HQ{(a1Zt)g}MSLHP-#R8U;;@Y<@66zplh!ca+D2&FXl%Fb_0sIt z6TcJK@jUihQ#jNfcF8ReIw9vazUsWUbJJxYML+WHVUxsm;Y}9tGD?D$Ie@!ige~E) zw3pL__(1zT&CmY`sN-98@p~C}OU|!w79@w3=YKxq45xL$2Cxn=%Fpq=@WX%dg3=al zTJ^fBK1vM1YZP4h7vHcM{6PFS$9Ikg#(rvopO}W7_p@f-Suxcqs6SJ82?+r#vrL|P zh4gm-a!bARX-}++d`pUFL-U0P%G@yS313xxKYY}_P!Xtd2l>;J^X+g3S$pEt&kt}< z>VJEueBm>Yc--SD%&$8(;}WvaV+#!W69AuyFc@;%3;&Zpo4=-^Njequ=E z`A(D9Tk0F`ALi9A!D86AEtUSLvyANunPe`8o`JF9s*e3j2nUU>bvIcJ`5aDR0@ zLanR%y~a3G!9?}By?)j+<|%lG?B}0r0gv6M&p_8e&`j`W_DhCsob`QUMqb)q&)_oH zA^z*FFr2)34<9VHxrYb%(Qp`F*w^>*bT%}L`WMXresp|l&C|g~Dp{*N_37(>r|~h zwo%t7{_s!wXT;v zA1ncGo18st#^8Y&{XHur2duD#jk?{wGRopcWaY8FdOc0J6v-yP=Y0nbT%kF29r)f8 zwPtMt)zkTR%Kkx%!hXVO{C`W{AXu3`gWbaWeeJ}yNUdU|phL_PL)NzO+r93A6SQ0g{=g8 zhq=_BAAId(TZRh7mw&|FO?loY?zH#| z;jo`-%rj+xES&Sm6A#`5cimL=?^xG?^p#gXIB7`keM z0`QqlCKCi!T=sDCF@B(PG>8r1bgSC!mtYI#D7|mj^NvZ)50NP9DsYVg(#ohVc(h@} za~uchH5;`Qp7*dNynj@91Pjd*<33F75jhqxk-1mHdyzG9}MrfB|L3Q z>`(#UUGubs=RuxC=j+M#D{3IvKGgg;G$wrS+IwK$mVVc$m6lQDK*fReUHRXM)bG)@ z1%uRl0l_L{Jvk?~oQKfaA@>#Z5OFAGT{WQCk02y=c z^&ALyg;@Tze_Y%QiZwLkgfSJIwW9tcJOeAJcRQ-{I0&*9_R_ObujsAOUZK{8T@oC( zQYakOIC!W`2=C};y~a|9P%Fk-M0^bCXZxlO{ahDn03v2BsY7=ezEkyoQFp0gr894| zvIUp;jJ4NizJKCYHni_O^DHmCE5E{?bf1y7-}HK&as4wtY0>LtP0xy(&EN(8kJQ%! zxYy86q@H1pRMwWyRC=}_+jVAwdL8|pL8r$o(G^4Pdmi%k>$hca_DOm!YhoKo>NG!9Swva-)7ZeEqkYW|`0osd&)K*e<} z4|`K59}r(nymPV}RUW9<%}LoP@B{7>O}E}KY>@v<`{U6l0b^$!skejUP$N!I(6#T{ zYT53lGk?cr#x`ZBEr}4#K{0;hYx<%*v6+MCF;j9K=X+4-z6aKddlBsj1?a<|PUh4e zUe$5uwSuIug5cIHXPdy@ZH0!;Ia>4ljE3hLTArN$y({*lQ0F}MueFEDSFtWv{Z5*T ztL@FHzA4{xjyBfm)xke>N02FKJ17^@P8@kZ0>LOhu#F64QFZ**9}`O@X@~E zXH9w=P$k3HdgkL-yly?3S$t1I)K;axi;U#BEJ)7l_XrF+HoXPR6gZv#ERlP;6C(j% z>ex-8qzMmWBsm7Ogo;Oj{l^^POD`OXik~jFJ3)VwA4JXfaN^BAEeYyrSQr$kG2c?S@)wN z=uR4sxO>~RHbI730E0o{Db;r`5uJOUqL(raI^ zQVX%|OGQe?esTW9zW(OFPknO@?aN11z9{$d#CfaEsq~xbx)8CF_8}+EL52N=_J2!b zZKWxB^A1kql330Q;tl7G|4(zn7YQwAs6Ok!Wl_v&19zsLwx>_a|FY8VdKcZBL9x3F zmGpM))=pU@V6_wSc)J@m5J%Fm((KGG?pdBtQGzIvs(&q*_#KM=s zw}5|l#u_SK6aBe;@g4fEU3mTSy?-;eo`E!&<*s9o9ubj&4WD|0ResMHz9zAB zi+Uh42%_T2iHmRla44{~fc;DoDoT?+Qb*n76a2_BDCiL+PJo{k;Z38?D1Rs3ZqIv0 zYe7@hGdxGBQImra)UzGxkQd!(>0l%yVg%tdkbXKV(}178@gLk0Rp}Hlu%P7`8AGkt z$sT!{SNAYmFb$JII{P3r>_;ywg4#QrCdKH=Ebf_%iZcqxA10c3L4AFxH)Kueqptir zoRJ@YU|)K?*$e6UxYQbTZGT4kSM(Zdzew#s{3JMr5s#hC-n_y+<$g*XjM@ZsFe}jY zu(Ka&qjHZ8xrtx*l9zg2S_DmXN6f`Jwxr%=yY4}K3SRn`@5?!-w&EPf`QP`w+Q+t+ z1kX0^t%^4@xIkXwc}9IL^i=GwN_IzAu3H%K}g6y3`GHh->rcL=jqHc9Sn zs_(6;6Jo&*cO>A#6SklwOuWvsc@}`wptyfxA20raS5L zl-hFbuxcGue`g{j)PEO$huwq-j>(bu=r1}0GxxOL74PYy0igN!?!^~oKfZrwTyYHR zE$0RQB^)}d;WhCC&%LZQb5S5QBld^ywb$f{fA_$u+Be2qgYQ^`antaav+OUn~Bnt|`K&Kh+yq-=A6|=YLJUcflKUEO^3^F8Sy= z&b#D5gTlM2oy)Vc7S^n#HgStyo4WMON^aUWW|eP*D+pf$>@Dvv(qw8G((4-c!|1WQ zA|LfihES(vZoupe)hfie!ZtYOy9l-F3cWl zE`rlJwruqTUw^-7R@hS}J&P0K{Q7iY?acqenFi<3+zwZL%oOnt`9%RsLh_5BxZ$)0 z*JoGm?XA?}s(SXBlK^>mj+c|(3%Qp@Cz0noo!wJ>r{e&pwujI4U;2Ck75Yc@v{NgM z!~FL7#`JWFKd<@)&UFVJv;1ju;M(e%`>C!(PcF6zNq;fvIr)EX&tyGV2kG0nvSl=u zN!&Y{Z)PFVs60pL*8g+6_Mg~2X4S%}M~0%`F~X+hyEg63>b%y0+O1t2_LBE$sK~!< zt5!xmgJ-PHkgd8N^xo8sJn$!S!7Lrpw+DteJLT+6&kT!Aw+vQcNiCAPRApx_{=@hZ z9S*^Jf`4oOA^8OO_YdFq|MN9Zih^~k?v=%mDDIWVGf9nW<(c>Z&A0vS``7o*_$cV&`7 zz&Ov^R=IA7V$F6M*fYmMFU=|I&rbdG84UWV4S!E^p!Vnx7@g;o!Mxiq6dF4!eDUM< z@o_1A$@X0iu2lFg$X9RM+6@;dT7=1?#M_SOst~7tNq;uFALP{*J*7IYxZFS4(`(Iw z^JlJN(LeWCbPVt%aA8J;Nw3Of-;Bs3cf4@G3EX0Xv8wR?(|-A5V=b4VW0vw- zXyY`gZH8zwINr!^0SGtOFU5kuyx%X<*G}zWO|Qe{`PFuB0ODbrI+h>L&4u`Z5C}_f zTnTy*ec@j7S9mDD#f8$ta6YrAyhO0=b01r9b%$jw{u&2K%z;l8zbbfTbsoSBhJW1S zQExp<95Z?DW_4l*Z_M)&FAxK(wA(G2Omv;&T-m@7X1G}fTlygqeu6Zl+&Iy#E0KFCAd6m1WpWNg*PCba1dflnFjrc)g2+3o9a?UD` zk-K}UYgakwS>u4SQt=>9H9`8wUVjIc1BROKdz*Yb1PvO;MT_PhaN-&LSKwsX7 zFPZ6_c_5@c0mZG1xuf)x*c8=`V&8R-aFp(RY8;YBfPJT=EGcm$^CiWujemm9tMKfJ z$Kc>U+PgD_fV8e3P@PF~S&#}%JO78jZ5r2R0&^v~$CMFa= zgsv-`i;v=~@ug-@cA6(~K!g_+CEpeMLmf`$clq?P$=@BvFLo}oXWaCqzQFFCNd!s+hIn2vMu5;`{>5Zt7Eia)@+YO75eqDfv;H#(y6jh!avr=krsd zm)hX{tlS!?<}kNyh&2_PPyA@!(z#CGky>7bPXGZBt1y^|-*A4!17 z9{mC|Faq#rP-MZc7^(D8NPmX($JX`zlDkEHr{aN}Yo9;$A*ofs0sn(zTwHMw&ozxo z^F}m7Ak$a8^sDb1pnu~Fwa(wcec(Sf2q5UwOzoaJv;uTpIREfE&HFKWLi{~~OQ)Ww zsnM0V;~q3$C=M9te{@(wYQFgzJJ^l3#lR@qV0#;SCdPNqDcn>9ajQB=@=<#yUoYBT z_LX8QQX?rY^+~YRHnDE7^0V^A4tHQQCtl!5PXiwWWY|(u9e)p#3Td5;EB?5~MNatE z`Z~ zZ$c$`aDYUU((qHH^ES0;{FAgTyVjUw0qh4Qh#_1}*9D&%=A^jCJ#&-aCR0Od==%wG z;t`%9JUj9$w10oRAh;RY_r!Vh2CQ|>$T5rl%rw0Te-}GA zrdNn|V1LVF0NpT4iHWH%(@&0d{{Eof6HJmiiT5ig?YYGnpjP726Aj;bhQT4IZ4@79 z#=xoF(G*P&w$#F;W|zht(Oppt*2?xUUrarneNXx5JU1jhTy{|+pY6e zKC{jRC5J!v7F4+LxnBvawZbAV{5A1eKlux>41a!_--ZA5p91)K!lU3px&i%Y>HbZK z2inf3A_$b5^asG@A@AL!Th4_f+tZm5%O{9B{Rm>egkN5Ee7g4x1Y z*H@PDcDoLe)QLoAiGC2YMpP-Y$Zy>g{cG<4gt+UvRtPsk_zhBTthE>%q3ex0ZXxB+ z=YLr=gwT1@y%sOVj`!QKmaBU!bzju8Xa^OyTAyhpIy#>GJURy0<&*!^^#ajjC}+ag zmh(T+c<$M5oO?8Xv>~f?<&XG#cxC52?**8%YmSX~IE%tpZ}-5^S-V$iv#d|GcKugd zdd)Em4)r3w7It3HLb}5zN?zOzk2Zhl(SI`x=3g+WEI=4Z$+{KKR`}Uir>xtEnl$^; zrLTHD%SNu+cZiE=$4J|rnQMMdTv$^OK7)D~_}*20vx;x!F5h7*uC;x1&5Qan@!Mj6 zT^;u#G&oB16w_wxg=me1$p?YbhWujy&)}I36zX~2lAH^kQ2I*0b!2CVyTiq7n}4Fm zhSnhhbi}%lfpJ+a3GR zqK@Wun9Ur~^Co@SX1g3y=TE3_#^3b#L-GSl9CwD+Yq~Y-MQaiHGmrelxE&)j_S~p6 zPkgi&`y)F_nPG!C4_qK4+fxFuMgM@@fF3Gqi%n`pR@6kV2=lCbxPiVtgMW){+5)F` zo&4zgR58#`4NKyf?9%64@n}wcF(EZ*=a3P5)F*anLk0e)G`>Woe^?wQvJT~TW*0WmZIN)(*}(>s3Bu(*3}m%9-cHo^#* ze(F>g1ed%gk-Yy3uoWu&MXy*M1F+ zXs&Yq5J!9LAE$YoerP+uy_go>yP5}rWA_BNB=4TWVL#y!mo?+;>VLWt_d)Y3&pFx4 z^kC1?5z1(CNQ-PwZ#xs64(Nr^ac*XN31xR9sH%}8c@UW&@bqcQr(z^V{!K_^V{xQ6Q<$u98l$-)dWG~LWPPu z&vOv_?@rHgzHjF`Sbvot;%hECK|eZd6MNV`2uV^D;kLB47xJGSD<&xi=bm{be9|=v+toy!c1b+Z|Fc2O^W6@Ru4AGW@oGT_hs6Ic(xYX=6C+kJ3|d@#_yoi-Qri>u6Md{> zl(e@G0u`yWRfK;r`*hXIYo(oE&yyYci?n^QL51kQzEnGeJH!p~nP`3GX>bq)J zI*AgqRrj&t6karQ@w*il&OUw3^m;UK=dPLOBK$CqK9PPZ{Ho}NyNt# z-Xs@~A?JVC4&B2sc`50!Nds;@n2EP=?|-eduJ`HzF^KwCf7FN6&_%;HdG?V8Mk~AB z4%p8yjFPgo>t@>$uWkE{bPONw(;Oc{Gxn-}4Lf~e4q!eSx8In&{8?Kyty_eyC-|qtW6R{y_uN76S@8PbYq=W3qK1EQ(yIRP|I`2~jUeKtlOFa(hez(u z4+lZ}?~A_Ck1ibROMe&fG%@pfVND8YR2RJ0LC%<*gYgi94bm&>6O08?!ldH{8EXNT zxE;op=yEv+>JuA!-9-N;KtTDJ46SwiNem>sEc8#YZ9lw^4${2fS*-`O?H2Bcfe2}w zGG%|pUj#ved^KiT>8M@oxp(wbM#ZYV-rfw>CyjA>FhVOEIp9ZLvAVe%K1l1ih~%Ij zHmyAd-8)g{pnv6An2_j)ea^>k5jl|Pheelrb?OB=aregLPgfkDiw5Q7#DS@}X%$E9 zT4(0WYpQJI8P}+=T68&o>MHCcJP;jc)$xDjRW5Vx<=E}Nh;jUA6s#P3&k&aSe$=2} zsuFkeZJxf>zIMQzxi#l0I8UvPhT z==Jca;v~N;m4S-}bXC-IdVJr}MgZSS(;nALw5+Rs7;nfr+O*JbX!_%K&7R-WbSr(Z z_%kq^v}a|lVp!>@Fu}AikqzPe*QHMzoz?d{w@itdk!aAoo~s!ERUAveBbo)m%ewS+ zU3fog)O>L_K}eq;41Le4eeVVkQip%&KiN0t?745)7X2M1*oRTiT01{T2Obfa*~fPm zko1}I+_li2Uenz{VNL8Ele(vJPpLzGNnIu?#NPQWbUNWIX?rJndx8a4J;Y}nQH>iW z8WYudR8=RwK7(Q(b^LwN9mLl09@w}mTw8k3#xx3KVz%;`5%ji5K(6{DC#6U({bY`6rHe_w9d?OCdu?;x0JF zK0{2c#u$HkDSTsyyl|&%YW)tEnI6HBJ6PAZt77(Z6ufA!&6#Jbfy}-H6FTRalM3dN7<53bLp}Pd$V9(&;fT_{{(s_91+;m5_ z)<5whBvg0L#2*qb!Nq@pY7Q>T=W*_+7xq5g&-%BqA(!>IPgOFJS!u7MI zne+8#YR2DPi%VMiOE20N9aH9HFe^c@ft-ryOZNIm9T6?_h1!bNCe&omG|FkCpxDOV zarsVw_6#)pW}0>EV}by|W6@=FS|@U7&)F^9bx=G_n)N3=HzKAteTF?^NmsQ_jh@@v zwEgQ;M?+Kbhu42(b9c`;jgB>|n)_vKwJt`*!?`u)JDI-%w)hwg%!ci*o2xdx+~j85 zOYji`J|`F~T%4Obvo-8(>fhZkd3MWWj;@hq)u{!~i?#zUb;ig4|K|TE*dY{KO+=}+ zjyHI3_4~ALOw%OS5-oVQvtGjIMT2mi(0@pMq3APgsQG__XUK-=9+*KqF*bIIZnl13 z$~nj*Z&J+gDn_8anEO=tzSJLbc?q3QpbL88-oXMIU>quSzQQq05bonB12(`dUc!AP3srnH~n#u3wrmKSGP1%tr`UYY1GIMnfr z_Eh*?*m=?U@95uC;kK1M(>)MZ8a+}E;;yABO+RRr*{ZoUtl|s(Olt9Hiorcl@BY_t z9c0WDkFMJ27 zYhQ+Cv=|ko)O*H)k%7)_9aSk@FY(W>Jq_)WdIdZX zZ{4!ZhdLN}V1k{XUx?}aDCQV1$TcnUGf3ZA`scqddKS2Dz&X6R^!5I|ei*UT6&Fu* zdn!EiPd?^Fs|ar6z*>_+26lgRXV9lBv5bFB(W~guukW{aWz8!#c&)%!Plc z(~2%UN5pRV;=9CF?^@PEi;wq z>H7!$j2j+qLI7Zfx5!7QDYKSykl%jdZtN##{BhqM?sxn##`6^o-gLTz`FnP|`&|bi zwf8K2Z@Klq!`OZuP4-iZ*{^YPoQ;31&vEVbdF8FU{Hf*;=)3+F(_@LQwB`?9<~A^s zq^dW89ow8;nNf1hBRUYXn{vl>Pqemee`u;5>1~gdBf1LUvzB`4lVEA_LwY!A{4LVq z;x*;&H^85m<&@9ZPp{o6(MdO)bxiHZikSzKhz_7&F!XD#e%-(!0(LoZ8!~^NqTADk ziaJ^<^{A>f$LPeviy>a>%3Xya^Tt#f>1HIi>V6^!+13~s@lw_Q4l!8wzsJOtRn8&z zRQ^qm4);TJeB=%x)X%-zh~CJBgOpyd1aLXgPeljaMb{NgJ3eIba5GBL_%$LNn(oYZP54tCdK=EXQ0aSDzAE$ zB7U@)z8^MV6MyKJp?h9wJLr2~TYA51kZta6n%@uE?|MEO|EC_R(6N6PogS?>%-xWF z9j$*kCMtSGON^R>t|3c}Q}D7|Xf9_A_N0fXYYoB&BnH0HLzH^X$mjhZZ>56X`l) z-?HO*v%|+n+N1}Iv`l~J12nb-xxG0!KB$6gh>jEUI5+@kd1GVsd?4LBp!ItqiBFDb z9cceu)l#nUnM$kWng^=$G(E%M3ST*Mz^YtQo$hU=8Tq60@&c7Bz=i}%=CK8Fg6(20 z=KaH?;g-A1SLo`~OVy8jvxCt62Y&2*Q*0hKh3cFL+gl{f$)9)R>zJr^-~`_;j+?0a?-V{ zdfCAPb)EORHi&=L0kJ~ub9dJDVja)}s;4*OOzP&&WSFNszqBNSwG?{YXg8T%>44q$ zpUZq`mD`6=&^-CqZ)Syug2#6q8t5O9V;@>|G`F#R=qZSn=0sYFP?(NV^ytz#vlL{e znu8_-Hu1Oh%+S?*Yf~Rc(Z(-B>RuCOQD}cWp0^`%P+xy|M4x#tvX{^qh+kI^eXX~m z^#r6J0*j3uLDTCz7v`!?ebyaHi2vdSzxSfE2U+;6!Jz%bId3{?Pl?{KXjY!*D^xuo zzr`iL?`yT@bFg~i;iA6jQfH@ump-@IlPkOqQfmvkro<<5e(bb(>I8duKf+axClehj z7aIeyXzYJK{aZlmX6kZZBUkeRad}i7ikA;_jpK(XtZhC9qb+fNN z!cb#BZmv7lGMgUK(oGrEta*M4(!G0RwC(S6vzTe)1n%)hG$$^v8@~Si>xQqF*CEsR zEYhbRB!T*M#KS-K{I@l~*qu^19@zN(Y~6oGH-`w#?}g+|=^p4+G-D81UVFwD1;0+& z`m~FCe-OfRQxbpsen3soJP_6WZ<(uy`BBWdU3#Vj9ip!%b8cPXSqZL);O{>^clOV( zA=cT*+AtFDkG0|RezpGf_fj{63H(ZbIJM~Fp1B|BKMIC_nsa+(ZUeeb?R5FqHI;w) z6sE_1GVE6J`T+cmd&O6M_q*(&hpwMULuD$3O2|pYxt=7Mu z@5?^_%UQj|1*bFNFPQ!NLJ!Vkf1NL_(>%hi61WL>p>=Spb4;I!;LkJ^>ptys?s~2< zNd5Y%H(2w^PCmBsrN7T7KG$hurv87UflU7oYtHlK?@s2;+lJ2oIPNzwu0>74$cSf4 zwA-lD!|i4R^=l^GjL9VM&IBhY;Lg!!kVi!R_&Rx)?nr`%7^{5aLW$R#8v((6#=`LYh2HO{V%xf-^0>XC=1f0{E@arr9kqN-=( zJa_3|tt-A$*Gx8(Mx09DuGd9p@YBxFYljQM`fg1l7A(_8hXFl^%WJkUr1|B3)C*Gh z)x;|5Gh%Ll=y0z|M5td{q()ExEs>7$j_Tr zI2)0yao+X3Ik48E)-UT5_&*bOl(gugLA%tW&b6afSwFp|&TEc0^FwPnIHX`OdEYr; zbAsG$uhR$mMi^5*`}rPoAKU0?!Cl!6O3^xnT8o!7z{8)5YJN>u=XrmA0`CgjPM&xC z>--qzcKtcmCi{oEHpbiaOc~XF{daQ_|LNc2=Q)ROQg1=8i;PX?CRVuU87mTPD}BEV z;->v1@KomfnW~KdO6m*PQoNZKL+gjnc!@^|M{{ zJ}aKY4Bn(*8Qz&&fp&k7{CBxwjnQwV#-B<5?EZsXNM;6;s|2*-72of__Ot_b9_Vc+ zaf+FRCHU-K`eY_u`bnfuhIyGI;svRV=y=kXx;`9k`rWB}DdBEc_X4bnd`r4ud_1bkwDmb|HTV|^b{kvw#{JrjQgr1OU zcF_L|&8W4tjS_#-eQOH0e-Nj_J1&@x1dlbCGe<&Vo63$orrn0vv4JwBb?h|XUdL|L zI#qU~;t*fWxVh5$JoRYn+Qx;y$M+ps(=%>(o-H8u|EfMHx?5)+h43&doq1|)6=(aJ zYoTM!^W0^bOWvk_b3eB4d;JZgiPQaxy0gs2;T+)u$}N9GuhMr!eV&{!1^C|m0uR3T zbJgK&c1JYd8LE@R{WE(9)n}td)I>6ByM`5wm*%M6GT~2oqr5Y)6y`04^Okfi`a9iG z`b53y-Hj43bc!D4Y;P7B`ESA5hvya>p3!7%Te?9esp6+$69`td=h`d5{^eu->}s~Mxny_>4NbX&SBp7 z%@#ct-Zwl9(_O%kA+9Gsa;OQvz>8V8L*5UeJNHmT7MT2^M30RAeA~Wh({mz%tKE*C zMY*>I+n4904qc7h2+h{P+rysuZS;>Y9}zS#qt3NLGi=T55i@*$gwt#X?cl{)hcXw- zuxfvY)FfJ>dua9qx$Z$< z2H55K_f$Uqh0m(jlIcJbx1@e_m`!2uW)7DBIg)w#Gv?6m9kZU7fP6ADeL39#90;D5 z=2Lo`pN(6y1E#oiw%bXMIak!IW!C7d_T7I99p(|i38ic(OX!Wv0KJ=_V1^4bP)Dt_ z6E$7ycFbH4q&LdK?-#pyd-Dc)$XSnG(y%ki79Ye?P-t+#{u>APwez0e5g%-l_S>Rc z+MQmMJk*%y0~fDH(-oSCohkO;9a_xn*Z95Go2GrBU=*7v_MF=FNk>!9yT8Jie(-IYXdDwvCOnxl5|-}?Mkz0mT$*V@LFUza?V*h%NPKYfIH zX0_hyQ!J&m3^LI(keNc24)l56;i|x16!mN*^$XY|&Y^gQT>0eEV`DHdihLEa1o4dS zYxmqvME}&rCkP@JEYRT#!p;g!Bd~vI$|uA-GqiV^Aws4^1=eq)PhhI8;BC!YxJFCt zL;DFGD|=+GyXL+|9)pqCdkjVsV*&odxI}dCeBv|~`MQ-7_S)uphqgdEg`4!$T_;O$cGoM3>7vxK;4A3a?x}x!^C(zo zI%XW%keHF#2oc{n*c%JJ69rD0{Wh_V$%;CBTQRNHIKGWL+j`yGByXHY@%wz=y9TT<}jr?j7+-Y6}PLj{eXu>V0CzG!Ef1 zLy=hejM3@3qvRx~S(iWl=~RE0lo(laj4t->H+|oq^LF9>{qWe%{lfa+!aD%Sf%6LA zjB;$~4^cK$A3rE6KdPc8`DfAWb6c5KM+e zA@czG1Um5F{XR)zmw5{GZ`8T3v_TnmMBMDZhXlxAw?k&9x@LGiUw`VV4Oi%@Kl|oa zA#-8LcI9m8nGMW*`RPXkUlSXn&z$rk3g;{q{mLq4VqOlj5a@q%6yNYfe-wEP9ND}9 zznng5$!RiYA2BmQ*CT^n+nk2fc7#Wle2q4q@k-qaTuSO#^!$hS)Vk1%WG)aKq7mx( zG9Qe)F0Y9d-EQW0ou2!qc?XV54~p)Op#K=gom&&WzLI`^IO!T6G<7ZN-1{zn&hc@j zCwPhr^i2EjhTDG;epASYMy}R0<}abjJ7F6w2;zM5Kl)c6_qAa7Nxi%g9pPtFtATUS zag4FG2osr2KV8KBm|71#q5fn=C6~_^){eQ+6ulsDAZ5nlPla0{xpEbQ5c|n~{+tcb zGQjqtQK3J-oF6o%1%KdK!YUqd=RfQHxre_XI56=;qZoh5*M3`X`XV?>#^HYGJ2PxB z)F9#$nfaO2b#EvXi^Q3zV9yU4kMZ~ zRn=swyu|f%PA+q#wNLtUHkt0fKiQXmxF3R(FbmW;o%gqU@-J{LneECk z7ic^2vC)64r(t8QZWCr}#J zDr|M9d=!+pBU zqWBkQXkOpusiHU|!IT%JE-?PaZwsjue?d8n_Aj5BGX=yjn84%4Wd|Q4mHTXZy z(#NCJU%0n_dj{bqG99r|hs*fq88mA@_Z^+TI_Itz=z16^L4`L~m=gBuN;C7qbw9;J zoW*}a=G0tx!ZHH`O^(y-?@fR{WL_|v%Y&2GA)2)Zs*axvLP5%DhM%h4-Hn=wwvXwD}=Fv`(+WoqI zx1k2ob2H<0a_K*+TTq~@=c86me0bG2DfWM$!sE0(J9C-1OOYb#^5m;r<{*X3`XfGa zS)-i2T2SM#;9oh@yft8cQbYiS$+0|Nhd!TA9HPtQ{VI@U&ddaFvlxy6X)&)ziBv5Tn0fWOpML2#(Hzs ziFZV69$e*3j@7iN zD_);d$*U{;|M$MkpBeqdd5$gTy{zlcNes!H#c@2k_YNd{qO~D=XVhLtHyd<>0K@d& zahI4WpV>q~#z5jIS((=3@Pem>88QpaHn5+3|;wdTmcCJ)^3guKPiTaETz0l86acHFu~h zTE+$%CC~u9qRdysD5uLXr-woA5Pfc8+JKYJrMf=fm}c~k<$Ry}OinsXzw7ke;R~%< z(Hho>yxd|5k%b&L13-UE<60&;6nsg3C$)7@_ZAqDOdX; zF_oTYF1;?=kELDa9B>v!G5@Od8qoV%G+{bXn_s~<6F7jL8}ejoP{iSfIlY%$ z)k_?W%acJVRUG|OH?QoFj%9Sbd`ms4xo?qg5nM>UY8m;p5eK+?SlMlwsnT@GO0|)~VA!4Of1&$=!@WB~2}j7_)*HiBgiBn_Bi*qt(0O&|RL{Gw{8z0uOc~EcomaHW9dsPT*V2C(b>S3W`UT~WMIEN%VO4b` ztw*QhT4uiJzlX9eCw_}{&mF`ki+&h5esMdL`~^F1BCCd0kJv@=%TBwAz791u(J&84 zD*5a_yau#{si$YVtr@i8(X+SsD>7Nb5B3M}C^R>?3+u0JM*gFDDWS93MWXRh^eGO} zq4a%otUZ4Wz$2JDT$DfpG&L%}22VlXhntlB;62f;CC-%laK=I8-2UjUo#%A_{-nu8+-MVAg6 z-BTemvFT?x>poUkklxpGej)XVb1WjSm71aC7$LkH;eN@_qRp)4)K&xA?_(ad&X@X} zr(q$VD@n&WbsX{yscojxX9QQDz7_Ux3w~v`*E6uS?ya?Y7}WNy9xQ0V8M&jg{p$p7 z_GEuSqrr^ck-Ccp!~q?VqQ0D{LSmSCj9($>V2B4 zyN-7h`fR)ghPz|_Z-aQHp;pY7gWzpKxzc?YRwlTQ|j{sYZX_b(tDQo(Ld*+X(M$seaEPwucZzv+y|rl<-}=0Y!Qd( zc%fiN&jj=aFuTWonWJYW=MG+8wp$KHfM9ZO^7zP}9m7y^HP0Vf+vW1W#HGSpc|nT! z3p}c<#RQt(QP18johVylUsGCQQ`>)SZx)lS zmFea6?i#JcZR+$wYVqgSn~xLbh5RC+y!DFL$4Q#M4pJHo*0`&8?KhS1^n04Ww9`Cw z`>NNUvnD-*C*LBupUi3_PB0a-Nj)G_Y(5-6w0}OJ*up1)qxp?5pxFpbf=7Qfzq@I9 z*@JqgpOx|E%V~IQANLFx&l#6DdK}A)YGSrGdbJqIbo0u!ll|{X-{fW{IwE(X&syc- zfAa1wb+*flxu3Zvr@E}31$5~(uj(1P26EC8`sq{GKX=IdxuMW*D07#Aer=w7(s$m% z*=F2sQ~SV2Orv^e+GX-gSv!AJWjJh?#S1EW1j?TwaXu=&6!SA`rE1>ql|GzNx8;P) zP#Vpp_tmhPpRa>Pw-M+2Z8uWn@B6gfUfRS-l%W~A-0*Kv0_kD6h)wp}!%L0|qI&VR zOV1m(NLB0EaHMTcrE9{RKKj-qe$#p;g5l}hKzd0JR63*6ec2FCTAuKO$wtJ_R4YrrQTZYH%bk-sW=5dJ@=Ur>j zJ)~>bz{iK!oWhCW687Dq)u+d!OFi+`GIp=yUz6wYdjISPXpGi#HAXg?|2LEwK_At$%WGTENny2qm0A6H+=r%%~*Oh=}Xg}TV`I<;i%)LAHVR!PpPn+ zN}uM!x23i%@qcxesu=n(A6d7~XWKdS(1Ad+<1IIC=b3+1G7PnMc4pX;8_m9?G5E&rBhAx?rb4!hF}LxVK}; zyFS3h^}H-aY>H!vsm=MbLim6?)$VPXsgk0Fs`_=lKVWX@@2`o*jvISl4496XpDy!EpR-KCF(52c zMYwx+^MB~FYjH(v>Pl zw6xq}UvdY2J6J+7`BVv`ipdd~YvdZ=&1Eg)j2G>fp3DZZ0`k58o4t2yR#n@&1pj|Z zMLuPQRRJmre)&>zzp8+U@KWIgxr2%dSW*AI+Q2#IT4(S5owHA6Wj)o3STRKq2IJaC z@2$6{8B`tcH>iKWrXlk)@n0aH5&7%*6-gf;>(XzUx1>LO=jHuiyuRv^_DdfiU_x0hcGzI>{iAj@l`07yz4u{MzPicBI(0-z%Zb{eC3%L z$K~A{@&pGRug|`n@X7w*EaCTuy^b|DHV&BgVO-!iLp)*NfrS8bUGUV4#M6kjrj6|Y zFrL7Mfo2lz)xZt{pS3Xu6!V^n49pRs?u%{mSsuX_`Y%lQLHm#0q zhM$2KRkSH?Mdk@@i@5|`1Na?gd*Eij{OcXM$QSkvele(fe#IIwry~Dfg%;4KeG>sQ z0m1$On+DD=tliiWfxj00hh+Pa96i`%*n4ZRIG`80X(x#LVolUJ!dcAN$Q4EZyKLsj zb%btLY(s<9My%?~uZ(|vO3>0@v3TgTD}m<*JK zXkTmK#fQc#L`zQ{Lj`?&gdIuvc-oMDo&MuYLDmSPj zJoeDu7+Zv|i|C;KrVshqBM*Kx=-Yf^yuWf7p|el$FTmVzW#lDpB1lI^L#pG=F@{Z#zsvYpuPClwo@0N=R1#xkxgT)cpE-5aoko=_&G2otwul$n>_MH=k{y7EEmFP7P3=`qShNiBxlAL&_Vqp^8c8`QWIJX1r z-QYWQkUr7Ea2CPXnE>A5v>9Jbi!Szv$8`1Gqey_`ap2H2Nx`|I6k|sfj4;a4Y3XH;S&{E3cO+e>??I+-ytGaG1&V9=TJ%N+Rlhx0mOQcqiGA;1$ey)+)kuC2%^_1aLzvbr73ubrBx1X zi_I8ZCp!KQe5E`PlFPkA#x^$w-X7dSq#r21@mc@USoxmA|4`4r;`~26N=O-EMpoAVkMCWa6k648tRfGsvOZ%}61F+^ z5~N>)V=vYhCiuh2c@tO;jBAL{oWYdzZ~vBOI*xQq9k%EbI_)9m@KAl*XW(jm2>rmV zAqB`Y{d~U-3~5vo_CUss8E6lCh5~=B4aMPT)nP0-MPMCyBybF`6zCQy4n1%f5PtwN zdfjD&O?3;sWMaSk=+X@H^)QzshW8t1%}|HmcFuQ>*>AQuiB}Mt4OohID2O~iPnQrL zYUgSc!@+_oA($q=m}#4Aq3w$r5;oP^*-4@lGDki5;)E-k9ABcZN!Kd8n zhh+rZTuJiU(evAy@N&cX3ER($p$~YvK;}o5F02MWCqC1FL$^jiav2bfMQ%0XufRBg z&l4O51K-LLsS|W;Ab-3j`t&3>mgN2X!~&5&M0^4fIQBmC$e;7F81Tm~NZhZ*SV8^6 zh$QhoWD|k*ap^JtkfY@jzf6A%{~Pj^z`aDc2C%mp>{Gr>!5M=*9g?iACH6rhI@j3G z>7;$I1BG4cko>W)xYsaWW88Z`ehnM`s9)cI%GHSr?2o_v855Djup;mg4rMzIF*j%j zoa7dE4Dp-XV-9|I!go$|q2V{02Jn+&>|=n!mxkK;%C{swBXEs@&+vaM|Jo{`L5;d0 zI7ku`|84HQ0+ZJw=G4mIA{gZ-jRb(e`x+ld%!Y-I7jTZ?P%$?c{;iIYPc5F9W1?8Y zbPPNgQ><4o!QdBxEoo>e(Nrh807Tru(O}zr&$Z~!AZI-jp6IG2?TL2%$~VGR@XOaF zBcAv1d8gh~chQf3E31Fk=J4l&=ha~dCRU&x?1&+@E@G9Wv(Ayj_Inu*h$W=~a{Gzz zz6Om2v4N3F2W@wia_is9roj(1T9EiO5w#g`r-W$YGq1rr@QGVt{+nX%`dW*@H4B~j ze|?Qk;Thl^0GAPo2@r1MR?7zrZwtN_YD#@H@W2rTKecdS@6>+_aaw|(PD${<5}`va zQWt1Mw;&`WIwWBP+l1IB9^vpNdu3>%;hN`VGy{(KZ(Q@i7qAS8*G`FZ_V)45esLy! z=N{w#&pbZlvJyQ5tbNnyPeSgNgkesd|F8u%9^Hv?=b-+R+zDTC6 z5zD|u4!UOS5ruyk9Bdz*C&D-Q_4lv-|Fd(S|3dddn}TZ>`F($QUWB#uf7HV(E~(5n=G&#U z+rE<%IHSN>Dsjgz{tSri6aGB`ybHr#^gEySqscX_v7deDKk&r-vP*_o0^)o%`~ukP zSA{Rm5T}2}1Qfqqc5&vDI*kP6j}l)1aEJPM-?_AH0J91Hmm2f1Cd#6FNAPH~mfK2M zf?xLicxo{R+_ksexC?v-&THZqL48h0oHJ<$e9)JU0*QGL&aKaw`rfns&Nnvf>EXls ztPNj!>!DZgEne!l%1Y9s*iJ4tfO`3{{3KkCLf(JEo~1oqyTMKzg_V6S2p`!WVjjfb z<{d%lj@(6UGm+p%CE7Wkn8(t-!AAe^Q=*;&_>qv~vV45!^L~5{zwjYndGHv&zxu>a zKIjjQ5Pr1wS9y46eF)(+DY0*_=7hr>%#`98si4?D=HLPE0FoQt0hmvWQK4F#6)D*7 z1EPN?K(Ia7bHf(_t*}#&F*fqz?M?af9>gDob!_{yj-lR(t@hCofo?JW|6{!AxBS>2 z+}2my{BXr3U)mJd8w_zL$GRX#I4BXX`@F;1ImUz@?`q+?Gd9;o)zX_Xq}4D4J)(mc zOD4gFm~NK4>$C!H1a<=U$^&0DFDqk!2%CR{0CPiKav00xJqR{NGQfAdr*i1;kXXsU z-}q%$5FeIuA};nJZubkP^p%re_t@iX>0VD6^=;*7GHNa4tCgF2PyX}*={E~L zj7As6GIACOANr05FPRU4GhqE16A*IfZDu{$Z-2JuC06RtS%MT9jJ(Bl8 z-jPRo3+(T~Cyu?z5U&MyFoinip_O}re|*8%+J$Jn^aD0xkT_=Gcfx)Pekb5fvEPJV zF5;u4{(fn7e~#<=ju8>)1Zk253Lp@J81JMw~c6c<--w{cs?F>ro~=PT$;bWWBncD?rWATAFn!nAR%b3R0j| zX-k`BDUtWFRq(LyLDjU(tHq;zf(P$(P}*||Z_R)DZkOSGG+AOE1)LLjzbB4Se~`NW zV>^O#wgEPIz@z?)_xaIRAb2sn8)9lh{yNba1;&8rNWy1KwA+7Uk8tG<7&G~wy<^mw zAd%khw(tTg&;@+4YcPM7a0HL{&|1P@8Z{Sd>2WkEXV}D{D}tqV2~al zOWIc^!g9B9G=FIOO5H47q?i2j_w>riB6O44`P481aE$bA|9E-3asQ4b=mGrcy;>6F z`rGgIn@#dNS3ddz=EYC${BQh_Wig3_%*U)2ccXm<#aMqYLBt+Zc<|E$2NJ3LxwgSu zp{l&1ip=pKtw1hpO`H{7Dg-AXNLQl=Hxg{FnXjiIgEaIK8xxz$_23-x?^-=;kRbwN z#C8%>Yb@i0i%s6GHU~KU-K@~5XB(ko@$hLQ7Sn>O?c*>1$^(ZThH!w0cf(pK!52d2 zTg>~I-{pVHFu%s|Nx_%%BI`^JTo2)@zQU7fv!i}V@L_+rA7O0cKQ<_EDT5yVu!aDa z4NeXl#t~oxh_?^-hTb7rAE8@;`Flmhjl^!J4>1#FsH& zeQbZuul>u{Tt)26J#ezHqmMm&IMS5p+_ffb2=Ex4fz?C~*3^0~Ub`_AuhnumhQA#Y zFYsrfN**onLa{tumy4IrpZ7!D1-y4AvjnX+_>8g6ir+YlU%XH0?;K7h196@JVi3fb z*1)az$rHyY+$=8=(W4?7z{9=(`@?~Yj_`k0e*E3w10vYx<*bPXu^8l&68CroAV?kh3ja062C)GtF36taj}FX7YKEoce; zg=omZZulvadHjfCW(fHvtxrAzAs`I8^MuciD;$LZT| zEdGhl3tU(Kv_C*#{Wo87@H@X3G$j7uJ66EY_XkcdC@KEAKM{U1VFZ3Mzn?DVF_kXCn&1g&n@_(;$iuL}ANA3Os{twcO!2b;qtzj?w<%59#;bX7Qp(&t1 zz?AHBFcW>|dlP;SY^xLCT3+2}Eg|}2#{IA#!t+1&{%<;019vSTS!9mCVt%sV z=v_UJb}i0_IUYD9bD8YvcPl6{z>echJ9uZ96*IGpKCO>}j>;7%} zcfgcD{t$MR0UQnP1&j+~SD5$VKfyMbCG|G&O3g^#9U=QA`<~zU)WDVei#P6nUZGyw5yu2pWYfSFjD^(%`wMTm2ncr{asysRkTmc+rAwc&Q6_$W7?OJoEJ#C>6`14=4Ecp$bxyP(NMG1WL&V{XyFp)M$x(eSZk8%UPDuReH{AUYrycT$pu>ME zc^AAszC7FzWNW~qcj0JFTU|mHgfVX-D(YAEH=TDl^N`5Q_pAW;d&p1 z^UD)_9;SLvIIYiTV6Z)qyiKXz#c_wJmWthTzs9x;wBC8(zellp4$19t;lf{jOcG5O$eK&E(Hm11g52ekC$MRq z>2NMM$%~U)J4!-u6de3=SKG)#YL&t%YTM8>1_y`-y_*EsEG+JCk?GK{A*cvvg_wdD z#1c+_`{v-J_H587>FVd&brhv~bkOurwq=WZ*4ry2vTTK^phJ(v7`os|Ty3}E*=Omi zA?C>cWx6flxBue#+@oEWj}uuZ0bM5;08)pH_!+v=L z%N@ZX!`%MHzWmZm8hjgr@9I|`-4DL;`<;nD>-X=!ePcskfNsUdM>33^A8zq6z6aEQ z*MXp5@We(-*a(a(BZCQZIOcBTqNI`rU3+W$rWI%lUOcoZ@_(ktCx`n2>tCwTXs@e< zIeELaTDt|uRqL5N!h+y%K>U!a%rv;}0PJ;2XhZd6Pe*v+T3@rcyYKUSHJMc8{ z`~uqqoAQSj@jLgPj46>|q_K{Cuzdv2POv0-i@kgfr11*#1ma8$uy(N7NI9Bq5P~NM zA>`(&psn~9Yw*bfy7NJx#N{Dtfs@|X;6>NKLF`zcKiJV9thc@Y}G|_d%+StL@OZ1xU9Q?50^>3I{L;Mpv zrqdPVF7?F5LLBzO7Dk#Yl`VqZYVz@D?Bz zM0m~7&q%J9>=`knUkUdr;X~^%-w?eeqKRr-+f7HOo3VrFI{Gz~*B#J*rXuee=g|{{ zV2*qn`qoBa)o)FAk$vBfQL{9*O1@{3%i3 z7Y*d1foWo+DV|GwP`reH*v;t4KA^=0{uea@KW_w|7RcK*l_m2D=116K{*Wg?;^>Ge z11t5#9ZL9DJ{mczQFrj6RfDai_+Sa9p>}xrHD{l+D#hT^PLX2+JtT1ce(q~bf&Gz- zCB#|>yu+Vu>MwjGV)4YM_KDS>0cQTJRW^RxIf3cGmGc*S04Q94o`sDhn!CKU;;_)C zaI;pBhw;WRqM;LwcHXEqDS;{ z`^kMJvFf2uf32H;wI2YW1>rgP<+mwa5=TCKzRxZ3S%FIyTwP!9Fzf?_3AU?38Uo_& z&=9abEjw7Xtw*hY=oY^vB%=1>)cJbc{+0dz!^NJ7FyZcV^cEMmz9_z!wE>r1N&jiv({u z4?Q37(GX3dAun<$*W3Q)#rVc!{Nly<;9)9*@&)L~<-UCmA2$CtgBh9XvmTZw-FdZ=d(~zV0VKf5`h9=J!ut$}g?aFD@V# zez@OxYg({>iGcwYnqneEo;e|dtqEQWzIBYOuVF_9ico^XBS!havw#t4fS~0Y>&+v_ z)d$~-VL_vshi1qfTPWlQVO;-v{58Za_RV+l&CkQ9%r`#|eAa*9V(GOE@A8R-z`Oc` zd-}yk^u5RW)py?mhl`Ci_$jaO8Y34G#ix_+6Tdc+G*^2X`!BRRlOi z@~cIxVL}E?rx==-nCmOrTVq}aKI|s>#LmXxb%4G8!mks48tg;BPlLGV?$fTM4}axN zOTeI6GH|-zc(Z@&J^wzJ5!xS|?SHC|`)l-E{_xjKc+2}c^}_z^{7Dw9Xj-0^+t^-@S0#D~{yykE6qw(kQmtc}4-RQgk^CA4r^9OYU-wxpe;=McaIiM@n?0^-7KYCg0 zy8;d**m%6XGg|n!T=3iBJd8r{!or#>JU9gdp@7B@nI8vCFqs>{1qNIq`iSi!rvSNs ziUE)L%7=yr>^25=C(*MYy42XKyx0_c(nRMl!g=%&m!JLo4_6c%)x>v2=3avR#C~JIOtxFk`WyJq%c2IZ$070FH@%=uxf!}J#+y3ZbAl~+CKKR$zP!QsO=L8rj z7ITvqQ2*5O%Pf$ihs@$*g%xP}|}U*HqW zoB06-l1y{VUO=!5dTQ$llZxOy1i$#ggpw`Ev7}<=8^4An_e~kLKKk;TlKg>xFQ55` z6Lsj%$fNbK2J>w-0+AT_T;L;eh9Qev&;EYy;%l{JxMZZ`=T9{Dd z4gUsZnHYTkZ z3#@ra?>y1qX%3(K16)CI1pCN;0tZPl5e!SCmpL!^my#qvy0EqFrWDDkicz5IX zV>xgAl0RPM*>X=g39smgPV$AOMDbzg+U=U0PsNJnrqMP^R-DI3SyHd9(@fqL&-F!{N7=1k_Y&)FU#m4hm>aQIHhRqU@AqwE zpBBgWb`qii#h;nn8HHmIIPv~1dc$A{qLMqxjZ^Wm*L zo0*AoZ|gY@_-u2Kp`90sl^;^4E!1^>zB){&-syPl%2&_@=B~SU^gJ&+_q5PislK@R z!Z@I++;)e3$y0N=hdK9Jh-8G8lXv~r2(4L8kGHtmJ$VPG((1Z1irpa9I7A)Ecv;Qt#ZtbA?>Bf; zIT3NBzxXGPj0@YsUMKPE&!8gX>T_=3aTxx21SdG$f|q098ZIEsjDe~Ixf}Rari7w- zO?an;`g1<;mV21g*jwW~f{bmA^Vknd4EEiC0zfBa8zY{7@cDg3?&r1Sc;Iijkz22gm^XbaG7aCq%Cq>+S>o{G|ynTO%MZC4~M3 z@F2)56Jrur4H2_}OBj1K9(D>ju83~&dIvs@&-vkg#udLO=NW>;fEEdGa3p_mnUj6K zGWZh+HUjg14vuAle}SDG5PlDo_x$y~So(ePKJc89N+s`;LZcbHw%{KTH1Le!7$9jC z>jC)2$oVrluh@8B99u6?8Y3u*+*)#sEeFasoG0;_mi*o$=QWrFm@N4;#u3^>icwx3 zoMM=jNqyiLJ*D{^56{EzK}Co5o09U`Z2riuQ;!^q#pq}R)fVG?eS?}k33I@e(cxriPVP@Z@`NI4&4~7DrJP!y%+`lK! z$5w}4LedDwL;LTC{==a?;IaMgFKP{)9lSsKAM{Jf{Rz((l+1u@ApTsuFWP59%Cl;L zYZI}5mmztlD3AH6e-TKI-}^U#K!K8im}=-(pZ0;s=3uYk`M>%HNx9M_*-y) z7nO{7d%MJ%g>hvM{7dBdQ*er*X@DOFO(Tx$Pkn7*$B^-XxIXSb)F&^ILJajU**MSB zpJSXKuKzmb6|DKK1Kyw0Kc2_?;h6dDnDoDY%jf>UmGi?q0A>ygO@;mdD+=Ykk^VD0 zUjUTXM(;-b?FcW2OxhPtvZ4R~j8|spAH(rb{$c#PWc+=eKh*CEb{=$8a2{O@_gl5N zAI1wXV$;vDl92Kb?ScNV8v4uE@z7r67`(rvfBhQYpYcAt&rp9q<%6yU&Xd&a&wQ|d z`&|Ed-uJNt9q)FzAkW`?`r|*||ND4oAJo@BlxLNF-Um*j@A(Tpu>qej91raelNh>Z z=nr@B_F!L#{s9S69Lw>iyhHtjq`r|1f#b+Vlo9m_TW1Cr8oB@B^L~F(A3Vg5$$lE+ z88pSi{0i#bCpkuao|Xm;931*b4nJIf8;q+F{u#ei}Udhx`6@#lZUxg#tOEm6)HG{Mr?AlwPKj{#nJGKhdM7fC+F zAAU)elr?8B*Kd-i^L;xz%y$U#;xrK1%U+~Ff_y;A46>!kZx+~Z;_tDB{_{tFf$X!+ z4eQfj`k@d*b~72xI7xPlXdW^d!7qtH!+#3}}IeQt@@czuO0DMl6q1et)Ub`k9t0-(9Xy=HSH z4`2&jqXa~tV=c^+YpUF|Bb~H=u4-v#FU6c8YB9J&1L7TT1%$N3(GU%#i8Y>k|@6Yrj zU9|oFrI%KD>@Qb+#8sp8!WkKNYf!%wH8d5(fB}&J%`KVl?n|^ZLcq^|C3J#Ur{$FJ zdT)?(Xv<(=Byn+5Up``Cx&%WnDywYQ(GnIy-L8w}JdQD|K(7I;^#B2_uA@| zh=gO!u@Xg)PT-YGazQDzL!P{d8qujuXedx2ww1yhMvh4Xl$o~E5<1lcq66uXSU(2^ z!_@9|S*}^vN@*y6=r^sZ(>O(_w`S7ZHB#*sr=4JG+LK8+*V0g@StIyw{g@Xo zcQ5J@R|Z1(&_{8_rkAl!wYN=wLw2l4;a^6LOsc(%D2jcPE$oD1O{%$!a%#4vve!tX zrWxdCxR}bOi#s)WQU`NUU)~J`0qz;~E~xdEV_xrs0;5#A78FK*W#eeX&lvR*!&6*O zx$Px8k07!5x+IH{G@ZOJ(t}f`?Ka{TY&U&gqS_kDC<%%9WSJGw3A z*Xb>bu8aA8dfi6(WA03ADoPh~eOgt~c`_&kWRojkk_Ol^K%Fo?>azjmu1)VU$CsgY zli2t@5b>Y;p}bIK#_z<>@1PPEXr!L&Yl3qI2x(&SJtC9jen!AIh?s=r9_^}J9s{`u zv2Sjdso#nc)IA9$8QK$Vhu`A9%T842XR8*&?<04Af<60^$8{Zcq0rk?^w9U61Z~F~ z<03>|gXt=qo(Evhf^MgK{Q-$btgqJ#vo#(JDS| zKXtma?r7(ZpXgW-;Eckxe7*}57C35eB?rS_)OpF{^HMf|b!?W2sA+YaLdo1YD&jd_WzWdX5=wGL%Sr%wt(nXQ#BLUNmKvM&4 z5j(Z;47gTGv|*{20L-8+pxnGX-Ow*|IL(Il6!TT~#6%IKWL!W|5S}I9KOp021;U@t zdkSd}Og*4w^iD9qHZyQge)^Y*{`IG^08tHpj4dchFOgq>dPX0Yz=#z)L7fFIpQDc8 zz$9(9Jfc64`i&J9@3zZHyV}083sI+qHj$<8GKgrGe|FD??6Xmp;XXfg1WU2^vcPjB z^6n__dKmv`#|HU%*kH?~9N^DMe78<;eq)-jR&pQYgR+EU8}Enbqf(a(WMGp%6XP>~ zm)S)LHN}-N^h4ZJ!aYgU}zJf6oC90V08oc*D>k8 zq)o{@hYW2HHV!alRYQA8=m!oNsX68_uh! zZ^VMh`7_R|pYsgP$Di|k-wDoe{*Jl-SNmgK38aVT<2w__d$glQ#w{UiCM^8Q*SG|t z*d03ol!u-;?uI!A1tqD|b*q;=`U~c90huIwaEf7mz%wEIfN|VE;U>{# z^e6Ny^e=ctMf`1+G{zKIoWW>E>gM49UQT#vkba+*LWj-Hz0sPx((B}23EmG81H7N0 z;r+ngg7?$#ey|P6`{g(CiX*HOP9=ppAToL4^QN&TLN^9JgF7r=FL{)Y4V z=RDJBB9NJ2+G+$2>!i>^z%(A#CV2QiYcJ~1Ts|>hKEMLm;JQ2hlliL~<}a*}|FoY8 zt7MJ3Cu_MTA?M#*uIv|b$!(YD#ne+bHdw$L@LHPRl z=ZH+UHYIxLcsC9GXqC1xgrL=b{{9%l5~(j--?vZs-J*Ok%8K<6ef6KM6DW^J+6oaZ z25mDN=1@K;z9| z0$CHmQ$l`olq{q#1J`EV!=Qd*ti&5^8t@zT&%Z&WAU|bJzaqAUg6}&K3Te&qAsToW zWEK%SX6fzHH-cBP#y{~lkd*>5D8oE?C2KSJy{b*5%qd|9YNvAJ#OC#lNU~+ z_*ZQ}<`a|yeIX=sJ8X)jOy+B1SNv7EiM@xpp44v~3V633e@VSKO@=Y>59J|q(T1R? z`9mwiP9l55X6cohF#BpxizEGJ0~`3~8rXDX?nd+x^^bIaHIxnG3w;XnCi-f_pE9f| zq7Hi*+%ZF2p^TU-8T3`ynt0FO&*6SDjVrVWlQ}-eb?HuN__GM$&e1E1 zKoh&ryu*2a;nd4VX-7}~({*GI@t@w0tP>7dk3Ezbkrpp`*svGljhsK5E!KK6 zze-lqi1qzX*YXhC_|vsEu5Ioq(5joycp-K3FW-msHLPnbnd6eTjCtpOKOZY(eDpA7OJavR$YZiG9(H2j z4>lymuyh=bWnw@6_4vWNpiaYKeL;W4n1wf9!gzrVWuVM!K@NYtAMA8{IXC)whe^yM z_2m*94t0+G1L=#{OQ7$))6aQQ_I?hgOY+;CjF+7zVGI%bhS=HQ!IyA;mDV_4|BLg) zjzKwp_%-Hra@?Y=bIi5;YM3{34`UtkbPYRt`}KU3Z=3e8`*A;sCjAk1F`?)W$Y$;v?hlUxh!+Q~bKc_cb2RjYdE#=|5eTwT4rv6;VHK?Pd zLu@z&HU;YI^BEO|<@NI!whu7DpU)UaxZWjgp_UF3$=l$j3y5r~lL1{&skzJ}j4l18q76MOjA^>{y9`nmr68Pm{; z`ni5G+}~|}%^m8{w-@-$lKdtb`ZbN~C2~En1#ms?pOS0w&bIXHT2lV)&$Zl8mY-|! z-u80z`dPJ+UQuYPffb>q$F*f#iO;p5+B{ZA@(X?f12UFx>m+TC^$h;=Zp< zhB9-%)uSK}_wK*yMI-amlKQTfWTTx+U~&(kzaifn z{ZVo|}xfZY$#n^_{Rs36oQLXU4aFFc6;67)%0;Bl$9_4)nhpAL@? z$$tl*JE2!Vq~Mbe|2anJ64uSHV-o6t-~HzpR^Jj*bKl3P)f^bRU&lm$YLSo%e;WB6;A5*Jz|QKmw5y zb?2VcJc@#~VIW^Bma4Tp*a$Hxpi(9AQTz_a1v!Q!GZJoF@;#cJ9Fr(HT!Mf9xrDU# z_veC~A9E+U9&w5ziR&VN9R<%x2$@4@^N*#Plk*A5=fZKYlsV8Olk+*Q$F5RA{&>re z5v&KlFrE(zT}W*_$@8JlgmP)*Jj#vdL0^bG7v-9NJ@-GkFZktT{EpOv>LDo|-{U_2 z?)AHv)F&7PTmDKy>%;p2MMy$YBo7bui1;?{2P8T9-Fb=ff_w^p^*xk-s6Q3i-Xva& zM#k^nBtQOtteuDU!#)w}r$f2G*G9^P_#o;X$0&cyXXGy?BDBX)&l%B28t#u2fO8A{ zfO75!azyTheX{@_R`6;X%anAadbh>kj+` zw%0gDeKMroNK_K>Y4RR9?n|!!)lWa;j3fO3{o(ZK$G8u^zmoBYznISR&-larhH`z% zgMQBd9PugV(4SELp*(0$7lLd6`^S(vBmJ9PPsY#C z4~PCs;_wJ!|C93Jdc4Pk)IaJK=g|%^`8&MVlw3dbo8h`oxd=<=*YAKkz^VAtPjDUj zalJH29wMHH=M4SmG>jjMwCAUsOOy}wh;l>Q9OJV^dtu2&Ifrou#0SPbhx?H0Epi>o zPrx@gM!)@kj0eKc_|rdLz@w0P1YPqBCx!JPhSmR1mK`2Z@ee)=y_zfrz%ybgg2WhE zH%K(LC0(6BEs!&X$scGJ;2A9v-$oa~)IxNY5p_pXVk!Y{#|GYn=ST^__KbE$q=)eQ zgTEQ?iN*jGFX7imKYoz+Ks}+pu*8u00^=ROU=jf1m~O#EC`P0P0D?jqT25+$S%R%^+>*F7;hjXB9wbgMdO;r(EaBCM zXcN7EoY>-paO0woz%OJ7*Jjd)VfI1kth!2Oa&LP~55zECvQ*_J1N z%l(No8qlaDuM&;7r};}PQ-kG%G)DsW#v}(Ad%!~i9k7TTcryiZu`Qq33UGiN>8U`% z0x8id9zWeZ4?ySG+ogK;XIF4CyjSgcd@O+$%dOehc}R~)U*|yu1^JJ_A3p;7>$-PN z^E;;wL9)r`U2^pGJh1iPVC(k5-fa(m{+V36JE^PdQCU2fV~B0pEBiFbzwWsL_swvx z9;^fX=b0L^XROb82obNAKj+84&Ido|fBl~4`Eq;wNn zpZD6=@1@RaAJfn;3S@H9P=I^`2H}QaZ9Pn#F$8;)qq1uc%OZiA(Ca+u3Xsu%s$i(< zkJIz`w7h=4pX%3e0{8^f;(^+ohRR}kjK6<_|4OX?{6=w}-(^~Fzu!Zeebs$$ZFA<= z`LBA)WPTNYo%f`#wgDrw_tiETQh>jI@9EOd`S(}bX2>r6t`o4#O{Kc~Eva{yu-dM^G;4!*@FYAS>+g{C!C3G&wFGMWht( zv$A`F2L{JUaoqECv|klShOAKM`N7Az<+(sv9-%*@1$8wJP@e}yroY|=_43^(v~KnN zE}1kzImo^H!+v!?`tq-GWB>sE{1$8ys{T4Diymco9ZR|4efb%pJ%6r$$$swNuK=w+ zPR>^#=9|Eu<$~Y6!by<@_w^f8CLTMv6)!ujh^})YWOfyPi=#T$BCu<1FNVuFt>wt~RxQ&Y!_i$fJFD zQm!#FB2l-q7VXo9d;NWX-Vfj7yRHxYunG6c`|RuQaBr`nhFPNCnbUMPT#N6%?+?KI z_xU*~h_{e9lv+N%@V8ULT5Qe?Q-uX@gZwYB}WRg6FSANoRnZcTojkH4RTP{sH8 zo$`I&$-c*w1)-L&^Y7*Nn0s5&uk-fz^TEsgbN;6vt-v(;b)MF~>u+rToZkuGbJyzE zm>nZ)^!q$0+;(rUTjbyU06B=?_dz}lyp)q}9DJ2wm4)B;m>^g1>mI?x`94p3-*p5A z%I~^9|J1>X{;7k1sgr&G{;Yh@J1aW)o=b85yAIFF&pXgReRS&NU+;kS`+h#aCSUgl zqao0nA>|N37Z_h!z20c!`IEDFQWfQABQ46eZFabqclz)|H0Dqe{b%m|9IYu z|9U<=4*x+;Qf+;?O#b`v&_r|Qe?8O4Cm=r8k4cD4KateJUI=o=fDi+OHQ*Wlb7K-2 zz%c-ehqy%YIQVC4W#nth4nXAz5OZSuj?H+-!1E2sGrLveDQ(4D?Z}pv%*^5fK0c(~ z6zq6W@`@OLtQCP3=|Tcjq=!6Qcab(7wY|-d=o!nGhPC2+qDTj873R7UKoI!EpD!hU zXQ|W`!|}%Mz5T4qmejgzCuJ8I^J;8ulg-^<_K(!ucemGhFDzaMIV(nVG&wHri|qJX z8&TRRP`A~yTWr6NTjJ4g{ z)jcWhl{|Tx#cR=JGIhVOes;^x{$x5;r^m(iIejgrl<9lLn^Ldu(b&6r3vcxXy+oEM zYL?5pc}bVEC7qP+<76Gvj5;5Kr?*+xPv;zFN12|@mHAVN-uuN}*lVTZ?r3WMyiY-- zB6LrGVYixf(HX2>FQbXaqBlQmefM@}>HAq#oA^j0yZxPLmwtDM#Ug1Zlkui3>%dan z+eGiHhjP?Uca|yYVyjnIv#KcgZ(EzPQhPGiV8$c2OJkds_on0Lv7Rng599<@DQ$1O zdUrc2)8ln&?_TCSWcXX*jJk`@UF8C?hMIbRw;La3KL2uaN2TML{JY`gm0y)in!8edqSv#BoxieNKzjGDRa0 z>HmtWxOuO-Rj$dG{q)$}Cj1D7jJ~G1gu)6|W+P{z-lo&pN=c3arKQv%5v%b8GWL^y zII%< zV?kM=Mr~eiIz4lj+p@kDUGCrZ%aeIHp5Ok$WUdMwFuG{2)6s>$->>sLnnUn?8_r8~ zMao!TGMb^AgP)}?;_7GnRVTsCaCCuxxvSPK1{z_~)>5uas?oU##z%2Jo?m@#wkKh0 zTSz;0r$xHcU4+?D4A-U~-p~c8*;(yv#%beZSDd}>UPJTf{Mu^SvZS1+WNr%t7b-uF z*!3%B=;er6FBY??(=SSpLp5iiKNn^ijSu5`J$^}=Dld9lqZ{pI+1oTo_w{RkMu)WJ zH?li^-vrt`_>#^urVtuaXhl8!ar9(%RJC}GIcgR?!{pMOM(fJR)Wviw-<`Lbf#%&j zKNoD9=bNp>tfKs|)4j!eLG6KjRg9I=nzp=KtaODNnWNDr8kMu#^KRY2lel3|m*aZ! zJZ6{T5pB|=$<^}t8jhdYbJ`_;XZIc0w|KGOcZZi|sVneSv3o|I@vO^rqTEwZqS`K^ zAkej_hk9#KEZ_A(r2RO-3Utc5K^) z==Y+`A|;BC$4qz9{n74xcfD7WQCG=jh3n=Kou{p=mXAK)qUfjHXlJgo+MFhiEK2v) z5ejpUwKlhB^j6^aNvT*b7%N1EPVlcqzFH?wraGqc`hK)yHgXUGMI^mAP^ZZ?;P<=r zZS6hwPYC~wr>0yeEC3LH1%DH-;P74P-m#rqwKRgLq!$bQe2!sWU6WZb8sF~OExlxs z?I0oAW;lg44=eS8-SB~BO;JDclFuGV zEKAjXYKDp-iOxoQxD!bYFA2{bHVMT)YC+qNGtQT8%7|_Cd3i~e{p;Sl?6K!1#(Wn0 z``FYN|LBcWduAV+qA)C9FzaLkIy0jj^X_%@DzPKxcW-EackWC2a9M6Ph4NJIkW#Fk zS138H`UAJ1W1F*s?L#mk(Py8k*g25Hl=I3yU3azW7)Q}Dn9MPxcPi_ZboJNLeZSU{ z`mA;NJet-MeaWoS^Ktibg<25jM)!!NDI0<|;C8_)zh@(A^8mp@!ag=}bsTLPS1m^w z$X-re%_q)(R!GW1YLP@<`)T4WPLRZUTDOJM)VF7*t+O+>2U(bIpZE0)ezce^#Z?VW zKBJAsp6FKcmYJk`r$&N0i<`vH(v7S!Li+Xlc-f2obnl-o{m08EmvN( zMbuBZ9x86>`4@Y4*iN@mXzU-?e&o)&<=a<|6@amSul9O+x$(2GSh?;)q~$x4S1%jD z9U}{J?3@yNWlqiLys;g?+Oqh~zRS_b^pfj*6y9O>N_OHF=h;2kzHi()lJ!{bCRw#! zFZbSaB=z}|-|U*lSbBR4HxQ!fdp%->!^x_4e%59^d!Nm^v?IETA;xIJ#Z7n2jJLYF zo#vu{1^BY!+{AJXI7`r3k6C}6zE9C#Z}apnVPemcr3jGZD=~s3oHA0&8{gZ zePpK!l@&d!6T^De-P#?Ez%965AA&=exR-IIM0e?WI<5T&6ru{rf}LZ}$7#wxAu!|0 z!hIJ6ER!)1)kEES@5p+n6FIPqro6cN?Xa?csbf;cP=JPJDomZ#b6eZ9g(N$*atHER zQB4+UTld|ryISM*K{Ba)f3e1Ot`@gs#^m%m&i!P~*MR-5?hkK1Ze~cluG_e;>$Rk) zN6ni)klXmm_aLd{#A&5zhvM|U8o>w*fyZ<9o+!vjxGO^NelA1q{1_i*b+;;+Zub;_ z?78aRZ?&~Cx6#Ieh-zMm`l3GV8u!AQa(bobT)@qe8MfcMmkwMAdhDDwZU)!rue%h! z`{O)rkzuKnx1MjDbu+D{6H{iL-SN416l~KwLy#3V?#;igpNgk~i)l$KWBPbBY{&}c z*g=Ti3o*Q+Rr=j(Djef;vzj8OIUL1*C2O(H&1l3_nTut3D5m0AyqPC473X5zaZ&9+ zs2Kb5&g}#%eAsUROzmwYZzS_II)>Mte`x_LzAue2ojKxY9laL|XT*=0B_p5&*~p--e;cuHG;-Y(2Xf&}9OU+|Y>aqb_56CSY7Y{K^#{Kz+X#~Oi)+J~s`P5!u zm*!y%egAsY-7X-PK0v%CqIc7${;b@l>H*VvS*v|sCaW1`u8ee+`{nz%S%x8%m_YQ!*jeAI;_UKjJ?p?)040DB z5b=;7^NnqmF1zQ>_Lbd#ar9PswD2OD`n4*tc7u~-JNfOI>Pl9K~}uXq9%pWDMj@EsiI!^|(7Kvb11xbRP+S(=u0N?QbY06{4fJ z2EIT!Sk``aQy=+S-iy$$U*5Z^)MZ6HsTRVaZ;frM$Zc^gk^i!#Z{_jw!G13U^-3X5}q`DD!F8kDY0_^PZG5 zwLr-@i

TU-#6)?h_I#&v=WCX|vAdv^u17W49`6#b2%0offOF`DT(%&kKj@OSe62 z3g4*b%VQO1ZmOFrFb6I(bma8Cwq6$lJabn_M=L_|jK`OMN$CTuZ_2GdI@S9X-K}+W z(4DY6cT~J7>k9nV&%J&>D(kVNnV`!|Mb^p2uMvHK&d@A0u1>cy%yyoS)BEf=n=n^! z^YT43IG%c~B}yO*-a>H>pr$C|@#8@Y)2zGMhY->-&3(FYRHe(MYc-XubCE4p?E0;- zJ0;z98|i?51UGv(*CJK&C}>8>>qmM6K?o#f>h*qe+bqyCwbT7}bpxKdWot$nY2fU% z9(s2@jgA%XE76gu*5Z*p&!{wdUJm<&eY&r@+TJQz@}_T{GA3zuIV9`TQNE8Y$-U3A z=v2jMz}1}3C5I}kqsl*Jb3L`xC_jOe`}LgdtVhv*TG4846zke^4h@FjL)VOV_ipuF zbwYnw1d6W1z07%&1SdtXeBGAi)8^;Q-U_<9sU~g1iK6-lQOJ$_oKT2eEkOJuf^Qh`%n{>O^g}HzZd^ zO>=F3=-Pg!&|SS6-K4bLsHhhwm1)@{7-o#O+gFXT_OsI_lXs8T0~Q??{|HPU;`P|5 zt=l=VQg#=vh**Gajn{aFQI6H0XRSr>Zl`O|W`aSfUR~w0O>uYTE}!ygPK~i7KBwU* z(q1Fy!m5_D*H1xMUEGcGnibsi{;-t8b1}<*X!9oc`PAD?l^1-=rB6Mrd@|D1htX3U zNHH_ug$~bM1rkzfA<!{Pkhg3f)wl9!JY;vK-WRzLy-aGE@Vfwzyobpp-q!5) z!lu0o?q$Pan!F(0of;iSyhhC|Q0vdDeFKLK1j3A)&_9l5$&YBQFc;KyKGPpF?CoNI zb8|A}QF4!@5w^3f-mI>oyjI8Vp*mW(Ij6zk6!Z~%SDIPxLu$gVL3)A;p1t?w>Va%a z0ox!pt&CDz&s9Cx-P+M)VvHBIiIviSj=TLFQWcB=N?kQPbZ;}PA7MB>_>*^}1?pk8 zoF?9gS=gPRfwq-GI?qGYMy;hQ=LBVzr$vI|O zd66xcGe3)0v0wV;d|K@$26a12&2!otE=y}NoJclT!bxFg3wE6&f9=*k-rD^>R?10r zeVqx(h*+>K6L;lgdXyq5y3D=ve%;=gzUeDi4;1K}HmmU{i=6DjXq8Yu7WMRh6ih7xi4*B2KDkc0?^m{LDS4Cf3;7zndcJ&sToF*>MJ+h7*)OBVUfQ$V z;j&N~F66DvscNTSs!40~jp@tfR%&W2_Q&h%WEZD)6g8vqbvvDVSI;}e)~J77ZRZr- z=g4KG(3hFl;k|kVqjg`{a`<5`kzWbb%gulmGMRbe-lxHDJ^XRCExxYK?4cxLlpI}^bfvjY$!I^T;iU3e#> zoSDUQ^Qt#o!9OCTjHMfq=A;`}@8rAL12^b!3NrgA59$R)qr98Zl{0#O?vV;_QsZP? zyQ`_SFQoU`)n-#LKD{L0R&2;3oa5@!s}%M+((7B_B%!!#lozd=etBlFJK>8T}Sj z7R+YRyRo}q-ClJi#gYx$(jJTdgkB(0rta8Fn#mxQI9V)j7S6;5$2lb%0rE*pY;Pl^Y)sGx6JtwYr196%j>i zf2VmCOhapb{H{kwWSc8^|L}2z&pMrf+*J>CXEWYTN+2P|XUpy-xJQ%?gc{9z8|oE% zFbipi%}IJ_^9vO z!(+oCV&*fVyK-h{x|!RdUKHYPbz|EXV8Q2XEIkV-22Seb=>SJSxW8qcFYmT}=G=}8 zZg2-#cV#rb$AJqLp(l1++cH!=<$MXGDOi+u^33lacY1pSVy8Q+`kCGyys{s)?R!*T zor1GRf161?qvE2QMpt%W)i3kjxuy_8$=jFBSjM@m zR#P~7*eP}HJj?H%SLyZA-@@T_p7o0`olHTs;Ro%y3a?w6wfIbkI~G(;?tA51M{ltg ze}k3(vL-LQKxbo=!5@_Zq{2wt+o*M!pXC0&|!%LRe}~k1r$e z3R;QbEym$^w`7`~sksN5)sNOuIga!8e_qznXue0scpuibv3~gVlU@s)z_2_1o&A2y=wqNtsQ+aJ|MOORZBP*tkp7G3zT^+l{K5MFHy?!3TtNVQIC7jkTRTQ!gs0Q#x1#M%`?~G8o0W^i^vj7qRZeGCbfTvNi0^9$e_pfQ-s$UUZDq-Xisu*C*m&C87$=mF zIQ7#{4X0d5_hPqlf9d!BbmxRSrk$v(*pN2BrtUzJ(Cy0kw48_0EgfG@H%2Yq^>klZ zMzC3H=6f+sp5a|nv*(C@zF@m;#8m~d#LH$~yzk6KUT}w<#TzHa9nbSeFk#Fsw{EwW ztG_xXt8;VKtnqWqsp%L1H7iexC+oYYXVsBsTpe%bn?h<-b$*h)^4lfpp6p$_q@H;;SF^0j=hFDH5U}fuu!#pR zOD4l#zuCOJpq%QZeLX>=71)T1XMv#IX%k17Hc#!CJyp1TMC#3%5jUP$vb573$Q@eEh5jLmpnHcou#6bZpqS9%FP<<5iIjq~LDVs?N?U@c|q z!)cr)^bwN8g5k!KW*(phTSN`~Y3v1N;nU00>Bg+MvSpc`p6!F97t!2| z=Q>yRfA#bQgyyA6&mSw@tmC>p^n~y1n#-T$ zWAnHD-EF6puO>;f-5F!%^$H$Rdd`JIvZUY3v)lTuP&jWt3SLuo32hmDdEwvc7MdIL ze8_Q!?#kLgG`<&;VQj{^c+qz=s#(H<;!7V`D*~3~D~?Kb$}(dk zf6Y0^Os2Z1Pe;*#Quo>T+)`5ibLX+Edfw^A87t0kKi6Fd(wJeQk|1Go5P+Z=B*X>&B@tsz5v>6JD6l2bWwf1TMq zU){aJs@F!itnbbuX!4+j zv9h4Y$5%o_IYd^y4sJPi&TmuCP|`_bhH-IZ_6%>G&n3IUs=$Xf9 zx>wfIb1V49rA#7-Q@{4q6!FQQpOIiT&lB zyX99Y$2fJ&Cp_HKgxQvbeOP$2c#?TdAZFn+TGU55TNTIpBx=bme(S;!#$&HAJ8Yd= z`6R`2DEGmBUv$w)eazn%)lpgyp1)t^^=9hPdL~4G{!VrF^4POLwsd=~e@b@wf)m69 zKI!H8#2?0xzT53C)BWq|Wo1R}btX8QeL0z#o3*mpsc&`kEYEj!vImG@S0U8$zHC-W zRLBs>0@0$H+n2J`?Q=0=?%Ir*6q|fEkND}~af+Hwy{K@2%f_`fUb{PY?`DSwOkL{M z%o^dGZL22DZFyQ z#-}!WzbBJ+E#?iO0f$_HcaLr&e-}jk)VC{XyWC#brzwTuWqVmHf3|yccMgG)cXnnE zitb!oR-LBr0#h_syweuQNTlq}+Qy~>bS3jFn3S-vZ85|7I zAvm9eN=!g}SMRFB>RgYd)qA_7$LzA(!3+A1nu&f4=PxJ?(f3JFBQw1y3Q0;CW-G0F zyR$?e>4rQ)bd>fYxyQ5G`E^es_ZafBffS=&2(iyn`gFFpyZ(+PYsK= z>Lco)9MW!_opZq1!UrA z!Md2d%cbGgZXvnw-HcDiV0wWSvXhwbxnVhbJvvF?x8ELFf3wc^>!8o~OwPIUy}KXZ zj)sxb`@VA!bbKC_w~CF)`vR)aGYkuXcap1 z#Y$y%*|uQ4V5J-%R&mgyG{7=s>CN))@6DNYe=^Pd4>2*aVJQ&)5@eonpk=Xrr8VN6Wjx_-0x;>RLd*zo(;i zRc~M@Ak4I&^nT)}b*#rTYohSt{?Ln35MRa$>x`GD(QOMUxd=JaW*;o57hp#(p{_O( zv+E*Gy3>1e0xodKoVe$;=Ee9ae?1P&cFxSOlil|t>y;Ss`d~*AC`#a~ z0Dg6H41~v-JyVWa19?U6c?SLCXJ=U3jP!L<=j9!uJiv}6G;3BT5X?}YFC~1zXQ?E#?GXjpY ze>Pleo7a}F(>((XyhB~_O8Gcg?(~YBvy;8pWeX+I_>A4wCS7WanIAh9m#p~HnyVnx z(~h0RCAS#Wq&Z4r3@}iQA!BsRb`w9`Oq3TAz3z+BSXNa5@Z^w`N2Bh?8Gmf0uwtItnfBr@>Iio1fD!F8{Wp>YlBNt5BaXrVh$0}p9 zcgO2JJz2-%#&^u@4Kj?#zEp?)B5#45dmD~X0Slok`^RZ>Kw#}Qzkz+hieTq<0CD-# zQyLu4qw!iSbQfv-_tD4!g-gKNqki7kB30iPNfvHl`FponwqHEww)Du5@4pSYe|*b9 zjHS`DqAd3nTTiDJD~$z2s;VMTy~RUX&elw=?yp-!k0`YQ*W<-W?xPgOfcH$DLab)b zV#-?88Q!Zo_V4ZD`5eEG+`bart^R)H_?U@~x7ay;SFU%Rx7&@parhcTyBRUC_>ggn zHG8UN&AVy#&YK^X2LC88(radFf7E@$-e!WVQkA#PeQtd_<|Yz=I=S~&1!Jxi`=zOS zrV?*XQW#rvN$>1>^xpT?3Ik0VtAMJ$AV0l2fWXz6oLtb?t}AJB>3V2U7F4H15u~J@ zr+uFS5*qYW8TDY;nD);TawNL`8jJVUR;#rQq&hE}&t~<*C&2ZrHDSElbz_FAHsbaU)ZuPA_h|CDf*3GoUgB^pBFQmFSaaffCW4-_ZO})_K)VX3_d$+-9swZ5+0aV@I27m%9fgkM$aQagp-6e}Su0u~+L`8-V2Y zb@R)Ex_&3w!8&Fmsl$?aTG0OZ5U4;RMbiANQPt|eP7yVDKr}9vN5%RCHyYbKip0F_ zHtOzmucd|P?Kk6dwX!!5a?asp>yFl8(NCYV<5ImI$4RVT^>%R(vRSs#;$pl%ZoPm) z95h0Z;{K$|ZZpa2f0w2=&-?xw0?hq{_Y4HvSlp{kWu4mcncXU>SeQp9GeeHQn#Srb z+(PVA%I76`!!u|4Z4@1qiJF~`(8=yurG!XXmEh$?pArIjU1?e}3yt?a>`xk8_0?keeuO zn@oemWN$ml;U|t{9IR%f#)#A{3plctOkKz+b6}I@soOFYKT2%+dC0vrcP!+GeQ}@V z^94QBK3(s!{yeD{op6eSwq$R-4}I^)W}}7gg`jRPwm6$jD9*`_+wwXxBnnF~yU3VQ z?F3&^`l;m|f0f)vTJD;WcjpPnv^#qFNUDQC31u zVTx-Fzf7ZS0C}Rae8SbUV{!++6|H+rL3ix(V_;ViK0%ZZn0cPM&>QM?MyOYTHP8jG z?G8btT~vYwYGtu+HZgR&&nK#x$g|gj-tzW=@wcLve@U_B_~i)Uw~IJoVf<|4#LkW- zJyqDLetV9dx5fU^Sx+hxs;UXMD1^z8}u&r2`(vpbs{Uu+K6O;+Cc=KirbdoH4{)RMcr zMUiI`e`J;Lz>DT!F)64mKMNqNq#*!3fw}p>tB#&?<++0HE09?8+r4=oc02~)^O4N< z@A)y*<)mt9gSsfw1()AMxVbYipv8j=B$Ks0LS#~b%$m89mRapj?`B~W@Yh<&y zf9O=d*&d&Y$$rN$0V}$-O>D|S|P;J8t(Lp9Hb)$ z-1U*O9w2~Q=~dUa+iN#|t*lt=H#OWDAv8dfuz&;h5=jd+*vKkoZL>uJkPrRP;yiK=-fMfBO&p{=mi%~3#VN1$ibd_Hf6)an zmps{WyPGyeA79KnL8q<9ezxJV4R~X{kftS$+TAapNn^QeSB=X2(iRirZtlys8I7CQ z=qfv&Vuhni-`&_N1U@4*bE&GmU8&7G;+z`JgZEB(L<{>h1sE3-hITU4jK9nvPjTdicBjq)=U^>8ZPyk7A&m zukU6dGYGj~!AH=`w_9*7xQ21X{WNEHZ-b`a)hKwV09GR+*I#(M+)|)pt>)58l#N6R zt%LKxRz^F$#5V0J3uya<7uH()+)In8l0Oh-v%*D<$nsvg)xu#-BNfDaf2>xCujzK` zS{DVtx=GC~1bS6b#p+dBk%>{*$a|B%&GR{b@K2+Iah>!Y)YkS$C`0nh^gLJeSN)8y zd+Rt}>(t$#RVa0xk^X&rj%VQC9wA=$lvn+ZGuOcR9WySGZ?&uE@6?-#``htQZB756 zF@0#=&Worz6d^K7ZipySfACvpkduBZqX!rIl>>*9o$cXly1fz4d2DZhAPDPYFe$et zYru9t!z@Xe%ahHH{=R4cKIFl6!5UNU8tOgrQSN8(f+YJ`^0?%6no*0iyrU|36 zq&vRuq2SNtNskT7e+2Ng*oxqS4a{y@BIJBLUdu`}A5t?G7t=a2Du+5ZW8)Lpxbz3}lr6?%M9wGb z+yF)Y8}myoEX0z&Y35pv>d72z&6V8Eb>_tEM(2qX?WE|?e-;i^cbyxg*TbW*K;eIu zPeld1S!do?&&qF+qj4~Eh%ePZB>C2?f6|xi6=g_}*O4k|Hri8@66t_Bi&|6pi!V9A z_o7!t`Pmq0P5tU(%scI1-ito-_Y+jLIJ@XmYE$lqzt?X-e?f3C3BC1}FNSM5Ms<4r@~Pt>kJ z!rfy%ORo1@I=S3(xl;7m^zr(?`8u~Lm0Wm(GVVpgtjQc!7@cHNnMo%X=|f+{~wo{)4LrXk3Ur3Pj66o5Tb862CD&Va&F6Q|XM8 zP>@}a-j+2-`2(-jaI^uL$l1>4)bxSn!~9H%e}N>{7en)(d0%lWzI*EHMPv{g?+J1V z7W5=9Y=N`a*%1#McYl&?-Y(lT9GGg<&x*gzuhWHlJlpynM+qjMc+u(FafSEsXkCxa z{YLi~GdaXZ^|GE;cct`MDMlv0Ut7KYOxO#g9k$pr`?_E%aTjhn(F!?JCFg48>HJ&0 zf8#p(3yugub9zl2XSwB0?YX;=mbQ5Ies51;zu(VJt`>DngPhMjF)CckH|}r%u#EH4tG<4 zVCB=jm$$CbN&dCxy_9>?EWzEm&n2=henHrqX= zCy=RCJH327rYWE7qE~nS#eR`shCAv6Yo7WeciMrad$#5?H+(TBfJam@`zKyte{}my z?(h>^g&-1Oj+a=uR)BX<^Eq=it(?NI?Ha0;#}Pcp_<-JPq)B46OJ3Ub zxlruE*usA0-@_x`cjtn}lh@2NcPFhhTNk)+JYRhcMf=BbvYkO~Iem{#rteQ`k?-jf zu+yV*V5Q4-CEw!*0M)r;{NhlJfA7MnIpMnQon&Hh;P znPzvs9h&p4pEKyJKNggZ6rd+WVa2P(;bvxHAV4%Dm9?Zr?f|VYe`l^@f6k;b9YvyB z6sh};`ea5u({M9RWm+~|a2VzDlQcG}Y%Yi6HR<|sxXAHQ24zkB-JP?|bCME{~U zDlwq&>;*PqsnOkIJH+vLBbFSTCuet=qZ{3g@2%VWbXYtzNO{Y%{ky!OI|y4z>h!sK z4q~EyLSMRX(|L&+@D6ABe;gY#@^|Qk>&SnI>3G~T93c^>qOEr&L2OK|?9`iXthYJB;%!f81jHU>h+bXe?%Uo{3+gN%6r&k zatb(qnAnvIm`^~)#UCC_w8GH>*o)C?ioOl z(=cZGn33o^V&RtLGdxnk%>9)Iok(aYRV85>wM zZ?9UENLWax+fQBhM`6CqfAJ?>_9UNDtD^sA1K%W@ zZ5m>Vl@i~eb9#-#+A%KgIa;f~FBU|hMHp*^W2hgan6o(nXFtFiD@LhvV(|I6Ay&vJ z)l&S@I@9skB7)PQNQgPyRYR?}Pa~ZTqaR z8eg#Ll26G%x1JBDLDy2g-N!VbeNq0X8q1MPFvdNC ziEo7JW%3ql`ykqX(~PWu10s5qcIf9Uk)rXGNl)v->GC`;ACFiLr_0qgjP=QjgZT!PEe;S#6QyPrS5%In6(j=(LJKJ9m8LP+F?bZrddG+_w_=Ma2f-Fn36WRI_l;QjG z3c9;XYY~sq=*4r~sP_4g`ib#p%#gHYP}iF6FRo$nd$=D;G5_^fa@>hD_v)!9=^ zX_mg_jmA8vgKZ?--n(#2M4vW&(#eqV-}}tW8NN2e%$`bS-Zw1yKyYhPKD8wE?9B>J zTdv&oV>cjo+uECUALIkooEl!{g3g?Yy=486(<|q-*PWuNFok4}-Z>OI*0~P#&BC#c ze{v@^f4D_g$rgEY7Ki>^gah$Q`?(UrZ)bM1x=)kHaS?RsTx^5*Gm&P8Wm`h2N%H+u z703l2Vl+BlNJj4V!{%vQnv2Gq@8*53xgUi)qv(7~g`+Rr+=T=if+X(Gey1Nu|JWSO z3l%`(OA!oy){3b-eNQ1PNi_O$Y}m=TfAKV=7H6NCd_i+;HTYBWdwK_fk=X2%>>D1R z&);V`IUe4`MO}9N>V8Y_%0D6-6DRClO#+#XRe8!bd@XA6CbG(#k2MSOIH`Sr$gzlaA%V(`1L(e{1&A zAG<5lhhW%K9Cg_f_YjdS78Pe!y5sbqqZ4s_Mx?COvtC_(gg&2kY0Ayf`!4E{TuSg` za09eMX5`*fOLB)p;M~7rcPhfsPiL@dPlX>nK3eSTD;fV)m5mBFlchfjwKH^hqfHL9 z#P#93QhazY4!30eXvT8A1{BLbf2t9X^*D1fyyWf6j+b`Mk<%k_?oZ@Zy_WNn zVR7*5lJ2lpC~Lc!{a$xFp^nL&li&7FfDi^N@h}*s1*2a`ZEsFQSX#Nb@Je~Q4bOZ# zBe-#NqDi`}cwjxRL!V&YaQfVDwZhz6LKTLhIxR7iPp|fOsg1Df^^O2se}OnqTHC5O z5RiZ2H1v9?sSaKz+knkx!wgp$|BRaV#$v1Al(ahz4A$l}WL2#f`MY|fr_*B1&;79} ze?P&rU1gYm8}B7sjL}`e{;K5~o7c@_tk?UJ+Ae!ML*j4i&6Q+mYptH( zJEu0-V(uH`@~|6=^U7P8l86a8@`=4-8dL4r*k~!~<`jOcX*W#C-kJNCrhemk1Yg%n zrk80#r`z>Nb9c*~j(2?xT?Hi~n73O^&T|5ub>uFm^?h*L>zzq}f3^Tcj9e4uwB*8ANK8-Ye?)bMv)jmT1BaljLD|~d z_Ou(;p^h2OQ|tozI1kxx2&TRR0%v^XjkMk<{z$1c2=o1ieh5+Ui7MdF2#x9UT0Fn* zyoJWe=Q(0j5O}F~`OyMo+5TExW`?Q+D-FtQRoSHi(M#PRMG-Yo}&9e&6AeLg7 zKsK)o#L(pwUeS9h2J>{Sum^uS$3lIirQVH;A9Pjndk9RY?IAeIv7K^xR4*UgP+T8f zTHT^B!Y-V}_?h1kkAr>cCaj;)1n}PbuWp`?8LDhVe|SJMu6is?=^;T{VA>;0`%qrVtU6cb^xap8~uHf9Wp>=fcUyIKO>ke;ZpGY)8@ucO-rhk#w+GrN_eWf1l*3 z%tIdx(bYZL)w{hbZSB=*vQu?vJ=HYpddEeUPNbd}C)6cXGq?FCFTR6Ii{n|{i;ccg zDnUha)OQZ69A<41#r8hG@8-$pZ3h_0y`u!M_YHWSoi@*CF0Um_bEJRid~aP0oNcx2 ze>W58i-d>#lS(om2fne*vwXkeclWcO$GtS8Tuzdn9Z2OWJ~kl@M1hMc6VBH5f1$rG zpb_GSTj{RC$D6N4--0l#N~ky(oqZB^p~;pXdSAI{n4Q$mArjpC^DVqRa`(6Zj|$e? z*FP1nOg~bJe4H}(1}%j8$`5_%Ws2IPhI_B!$dAwEF=4uB`!VHb#0Eg1a7j9;xt(ar zuG!y#xWtxGmT3X(A29zOFAEgme}fsyA1gd}Ius_SS;Q;do$Z@`@Lq9zR?Zb}HPBw} zr4-A(w~@3$p*^#Ra-`7qAZ@^>88AP?C#&fm6V}dYG5PbW;hYJdi-B+ty=$;HBkt+< z*JAZ3KJkkxsNU>Ab$Uwo+TLN%aSzRuXA#%#aSxcGkq!-lxi+uPpLS2QfAc*uL9*-H z$9O@!N*C$U`wjza|3;a5ime~9UGI;S+Rs^xV^dV3BZ4rj#=C5n64%|00Cmgwq}ATz z+0d@#D)e+JF-J3^j+8byAjw=u=UW9;WDu4o9@H$F4$y57zwkk#@g~2H3KmMNgt6F6 zWK;0*rf!dC`3uE?_Hh{uf1P(+n|eLk=cRSR!IEAbt~K`4-el}E9zphCl%?b~rs2D= zM)s7g_Gm#CBa)uk0o)tn$lYu1y&LD{9e1?&l>o@%{UOGfa+VHgUm-1anZ0c`&9SEJ z;E|Dx<63ISm-{P1LJSp7S#oC?R?obe_wQGB+LPC_fZAZ9U*OA)_}BNsA_n7#nNcgWyYLsC8V3<5jN-#T z-CY9o+fM=!?88hS?PI*jx$gd`1FRfe&PQ)dyFOZh9zX3JgAyxC{_A`nV}>mVj?o@+ z%`vx0L;NTCNkfDoe<{$;7}zhJ%i2ee+Austm(wQO8RF_Y`VdD*fK^kRFE`380adb(BB0`>o>zm?dfeTtUz&bu-{;+4`WZV`NH z=w0sAecc}d-zJN?xTyBCq;H_`UWW9$i>9DKKs}btf6uh+C8KmII_PsyqgjJpn^N}j^7q3OA;h2Pz=p&cI@cAe`m15Cj?ATs?) z)3S-Je?RU29Qg+&EL{xW&!=hm^~y<2$Awrwl7`-hKM>zb47TDM9T{x1#N>RPR%iaA z?aB>zUO1hvver_0PVIf~#VN;*+4a_QfBBe?S^nU9nc!7^2@_vYlamU1+~AL=k#|4- zpJ#8m;6W;4^~q%K78~DjN#$$tkD>bC|2O0He;z#_J4y}|4%Nf`=4A^=)L|I zmlcT2|GyVobZ$THkN?j<{z>WoyZ~bE4x;H())a+*iU8`LeEsw10Wg=dO6L2Bj(2~5 zr~CLP2O$6c<&h&eOYdvaefPR(!Y5U2)UsQ%FKoDoK4bSHnnAH|uZ36S}D`frstCY&J|I?5Fvm?Mj6j<03JQRYP{{4E&f1e-cTq@@;kR{8L2Gk##g!1q24`5{wEq12*23hKf;cb_YiVO(v71v}KK=b5)c#}51>-|7?1$xU}h z62bq(`MDz}IePZd{^rggNTl2m43dKW5Iz-SQTY9=j=XMP@i$Y^+9-xIWtW3|x}0vQ zl~)3)MGy4cd->-8^~es7V3TeH_!Wb6h`i*sU6LQ}2l%aGsO?FYoiP5|e+9}SMOv?K zhL}wF>rTiHjJ+4-`g?wyLwau#Vz5%qnH%%#u{7qayf|;9`ec(X9QL-n*>%Cf8Vjt} zz~XKSeTJ&|#K0ZLw<}>@Q6N=IOi;&)$sZdA6Vo+6HP`oQ7L9W-ak2N4B9X6Ye`kl~ zt>*F397~Eh;hIQ=efW%Af6RZy({7P=R%@2}7Zt)odYLrWi<$T*d>{*VO>jh6H z%MLfqy@F=R>6T1A78Y2;oB44NcseY)aeVSW_$k$%6^<+&p6h!k_v_6mu~NK?`{i5s zcvdsZo_uv`pYoDhaOg? z4sJm6UG)%M*W}b|JIiiQrHe}yEcV1VruDvFH-B`DX*t9KTWUAZd)?wJTGEEf;}@gp zcdIagyx@F|GC}OWqcf0GSum!HQGUynU5<4PAmH=0E zZ);t1z8UFy3-GTlp(pfl5}sQOznFRso%}BXnIiq;(BqLEf4ruVs3h$7X1Tt!{O0Y( z1^(QpiS6fy8w=SXqr(mTbF=W%pM~xMmq!qvg8Foa?k2>dDmf%T-GPyP-p$OROnm8U zI9Yi7@TpV%fKkfy+eu?irWUUhh44dRgJAlw&=W@eHOiOM7WlZDoia>E=BCvj#@$zY z6<0jw@G0^ifB!uNOM*Lq*~CqE{l5IQ+49FR!>!*&H0jFMPR=`4RCjWknzj&sf^4=+ zm9(TQHySFH*87U-qzkGf>Fa0qU@fVsfnfDizxLAmH}$>uooVETrIzi+m;Lk!w^UL( zoB5U4aq=?lhUQfz{r6O%aeZ0?d!rsK$S2?Z59DRNf14mAdFD2+M}^s`%lJtE_EQaJ z)lDi%PRL5G<4&dM2>9S!$9?FS%5tz(;(@`Qt*|8I99*z1Erh&szNuLT(3H zJo%5ri$(dqY_fg`%tT%Aw?v4U@4rv%ublXNqwH-c@FVwY52Ilxz&b9P?<(Q%^wUag z!$ywWf1}U(4LWpYk=ZagS$NBdf8wb)PI~`y+@f&6XpQTa<0m-~)G@03+0cK+Dde*X zUokT}6eP+6HwDM7%PUd~u#X>dP|tq0eS;@ettohIDKd-~ZG!f7aVy;2gcn?l3Z7cpT5a#&jv_I7?uV z)kT!9=RLxo;CN$nF_CgkM0Y*^-PGOWKV8~qZkPUd%h?~xhPN**d2|MeJ2RmgyB%Ph ze}{joi2biP1K~5^rz#=8-yv=fo!QBbsB^ z`3mcyo=M2k_pD#+?QXB0Znqk^9)rQQ?-P0v;$pI@h+WtF|$ z;+^Dj(iH0_w{wbIqke5K9`!|BN=$!KEk!{EtS<5VaaBx6$DpwzO)x~nxjx(3b3pJs zoA3JUS8c5*D?evqegZ7JKZy;tNt!S)VtYI8b618b;!S%b#t0X$*tvcXJL;8Xe;56| zrx`I@c&15lWj z4=!@qh?BOn5!_3j-zC2v@sKF~tqP1KpQr=C`++KeLE5ss?trqu#{Xa{D<$Xm?sZ}HS=8!!56%Z1sk`^3~n�MD!|-^!tKmVCIUW_}?RO=2|N4s;f9q$CqHg@I zxmW`*@j)JakH0|W^@qQd{6{v%%eISJq<;W+W=K%TIJH9mWhad}IqgJqi%gz4rS^&;DmvYa|!KF$MQN|x=swj3j*xs}MK|4<|)`GiYd@<8V zUGs7K$)o9grhEByj?a(V?s5pZ>DH6SEhrBaxUZx9NKUZ$s-e+be`)e+c5+t4i%;j5 zd0{%|bB~^~C0iCRhatJDy7qv(-yGXDd?7{I##k zysuwh3P+vloM_heW}3UDO58QUBw;}V{`wB~d_qM}oLuASsp;qHLG8lV<*zcL6xYCO zM(lyrHkGB&+-_z?bFT03W_be+7aB(qh6FrN5DX8% zg8<8JQuKOWe>=)SiGrMUP`wxHdV4*vipi#N+npEpxoZ^r;;2eHT+gqs?Y$gbCrKYP zIXTvej1Ka*Jbd=enm;jrrGk~iY)#xLWpxFRJ2_PO?aBlDvyM`tN)L7?sK)z!DXZkD z;6FC%Xk{S)4Un05lr_;^lU~ zwhK8+2H6vC{8Wb55cXeH0hJRk zf95+oflqhbH{i(KoyZ#{+09_!y^9Tx0ocR829dUVyYJ$i6qnA7K6^;7rHZ(`I6Z>r zGs4WS!iGoeLg4X$3UA7woWW{5v?!!%;th}0e3w3e5<1@3U$4Cp965|PA(Pp2B(qBE zoQsK)a|1Iu(%2jTE^Q-ra0z@R;5n)%f3JTuFurW3BSw~Htt)Yabd!aS(wIGLU%ahJ z8KNT3P->FwI!&OEr=4-Jn2BwvdE?w1L)t8y;&cdKnG3MJ91vm+%#UIdXq~&>{QOuY zfAp=wcAdnqO2y3jXh zpnu(A5FbAgrQYwaX1wD4i&<;7f20}h1W$w-n>W^=VjY+;&7{6r1NcSCFs+Rue-Ko?i1h9iVV_K z#}WB)rPy6(`HB7HsO?~Ge}K`*suE+^%jaYiyV?5)_wl^{_bu> z3B3FgaBaK$%$hwv^nJcZCFRBEdm^#8;`8JcALfhiN}o1H^;*82Fm4()|C})gkr}G< z<%duhcMSIZb(ASwe>;|hC-27?8y%j_R@kFH`O@C47pj2%n$SjBA${4NmSllf-;U>G z2eh#I9WI?V$-GIV%0R2yJEiMy!J~|I+^t?YUcPx4e_!`DOyu-LzRq)d%pCqFre11@ z>nYoAN5t7GccFsPtX5G(;RkMpPhtA)AudP%FM5_YLzZ2Zf1whN$_j}8W4{c}5pPX0 z>@VH7AGC1paU44v%7!}ZwQg(ZAL}}APTz~bO;X}wU}7%2`DoFv;&d+`>V9FG<6$PlT@k)}OXWlvVVK-v z389DjX^zW2oW0PxNSdJ}jItVr49n;1^pllN)p%$XdG?mqvX5*1Wp$_6mWh>f1j!*${J7a;^(gJtAe6)c;v zzqqyN=lsmAb8n?bb$RaO+r27IZ*^efX4ei0fh6R4GpSb_HP54X`t)Fh5vRd@9A4+P zD9z%LRPP?excrYnGx9?$pZ$9$N-+tX&<ANPlL@cBcl&$r12$Cj%&W76dWY7?-XA|uGJpSiDUTN&1XO=2 zb^xI@SkF=m+dTI?YQ*@$)=sBi2FCV*6@SR95Qu|NeUD)p?!4z&mbfSm`I~OF+|AXe zlj;atfV;ia9qsz8;zP8;@N~S}P4~HuZ+=A|gnw^+tbA}$AE^4BMr2`7Yz#=;40cxNBc^+yCI5}$l5)V%OZ=@CEI)Akww=e!V$)vK^ZfuQmKYqJitq;~?{Fyz^2Y+P;0;gz#tekGYABT9cN0~MG)ef_ha)Ac!AYnuw z&rEp#P*Ji8fsBW#>-*#Y&gL+fwsRj&Ak}hua>drCEJ#k|?Qx6J<-|QlKjP?Klb9F# z_8YrxSHYZLNWjmqr2-^jRbi-S-AT=5w~)=~JD` zmH_om=v8?<|3vG;0?>TE@+ksYg^-LOHXcoq6jn+pckcjB$)szu7mhYRyTT*a-G5Pjl61!cHX;AH3AxYjuCA|sEc?Tl-}14AznXdotKgaJ zg^=R}Zy=H&>1+G_Bm64KzIoE%D~t!c{O)JF4iOE>**CgMWLnS`_^5K)7$P za_Gjrf8nG&Ey3{GpF_TR;hqV=z6s_8OssroSv5>O zU5EZXD&jlpURP|hkSn*Do0WWrOtZi}9EjV^X&=AfIPIj8M_bFeT@qXPEO!V#ft7vk zn=3E7`batDo8bW-pnonvK|b15$5ld#l=J67VT_I7F~6@F9_|t0BSD-A(V`sga1{~i zUt-&Kyw|x58(G}bf4#W~u$0)@ga&WIA`b5pOMlcUY*|Yb`$@5g47&P~=IsYX?|?0-VykO{H&P#_owjqkbDp~(rWE20^AhJEf%jL32VHcjwyc4mgA*&7)@ zTtvtWO1{av9m4fD5L2Z4)#PHoPuhp#!G9ah>~jh}J3szdxxZ_z*Ve#A zr$91*vkQ)f8;#e$(7L|_-K5X0=07cnE5G$5j()Bnbtj~&r#^Q`QI_|af3#`)vryzZiKEZZXFJBS@G@Jhl z?nNJ}`+o;wBIewNTK@*`B(iVjJZ^P-cw);O7jv_JklEGCktw5$GmpTN6?m|aR3Hod z9@i`^D5>vzpik`OR16y_*Q-^DxeC2Rx^J}2zTs(6kB+0c0rRce)4Sn(W6D7bz30Bm z7#)$oDDp=tc)bec^?FtG8u2=YLA{JKf z2w`J4YJn2YLXlg&$iOMsFLs&%Aivu4g}*tHt@`fu z+<=H-ir+3F#glqfAGh(8O3os(O7&^y3J_FmQwUk}&?aC6*gllA(cDt?!=-&adVfE? zJg+CfRMwyzKY575>j`y$`eHHZ!%Kz>+!UR|j5k0Lptuy&c!v@iVuzJ~)27=VM;zgP zbhMLiEuZ>3_~@FX!aSxhjl3~Le0J@C~3y150gQtdmcF1zpk-J^z0n9rf zf8S)BYXBy`(*80(_~Ux*`R)2mHfWYFM%A2X!{G1i8N=%)9S=zL=jY+JxjW@lA8I|l z3%n5f?|bh7abKf9(LA~Bw||@nt9x2bItf9+#F>HVxVB`au_A@De-(NwS1vyX=TWm& zd#bunLnsWr1=LNDl*MTN2~hCS|0cgO$?lHq?nYWck`uyyp*2H3JV%WBD#_an#4Sop z5Ar?R(-!~yeKa-kGTHii{~mZQs7uAjaEpLqSyN4JPi+19yIc3E{eL3^1e=kECy`_B zGjJ=)Fq_zugZcefe<=JUgfCz8$KFH|hCH-(qg>vg+qK&TZZ{=JTSAHN~ z;AIl3r^Z~4Hw+zCqGTV36bFudNrqbyUy9Z9x_;?`?6&G%B7a;CCOZ+7x_wFZFfiS_ z*U_|CnVQhvJHoykEpLJctU+SFB?|CkdfNOJCUNNqNz2Il^ilVwHEGFn9(Nb*6pU4% z)Ux(v!-)4jAq+t$XqGBvjC?X3HD^ID!%Xe^eZ<;p_vV|@c5ZFHiYbt&Fe_3 zT*(>J4YGg-Vr4H|K!!};-I?7O9wrdFoAUdeNPTy1bro(Ui0Y8Z4X_UXjrwIBrV?7# z1{s4_b=TCcYDbJa{B~!n9?WwGSI4XQ`#iu8sCK91I)6=Ni*Wj(qK#!3V+{E!u>Q=K zyAV*ruMTL@EqiCwhV7bk4^GF!GHukAVeE4T(LN&(Ybuk(elb(WG1Xqd;ASkX!rdZL zA3c3zP(I|}z1066)7u2?Ep#(cnEJ=BiT&(GaLQX7#I%6j@C{%4W3|ux))|q$(Bb|d z-*!I(ynp9Bn2_wcYP;~EhPmdM;8dsT10KbcLy$n z&JHiBo#yO!*lxxypupLYY)!u~7cNrTtztNOJe}NU^$nHhckZysxWe>(IKBix)1**1 z3V!hkLZ242^N(=mdKsc8^DCw__`!7e9-m7iC4cB!+=P*kvXRrT_5&SwE#13_hY&#^ z9Wf0!Le@Bz_y9?1a?td#8zOcxu4ho@8jre6i-ygD^kP5QEigJ3-#i#{>dMGP4F0!N zoVJcZyP`5l`*FyJGKW5kvd;$wMwCV$^=o>#`G6y>IKB1E&eN0qremN*|1Rsdx$OBD zWPf*0tB>j7VXo(=+F#b6+98yGdmd0|6y^6s^kuApX;sVbk$D77mqQhk9E4g+ZmPp6 z2rL`O>29knLBo*kkzw91?3huhwh-*)yB}vGjjPfD{pE-vx51w7OVWU+l_8FAe12b&>3@E#0fHtzLy%5G`Hty5_NcxqA6(0t64rh0 zcYRep*W#=_273643|5tVCW+%LEGo88;I^D%_N5yP*qqB<0Z;fi`}ye$%Th7h?^wCE zu@2s&(V3}3$LfeGDF6^NjhXHB5sm4@J)ldHt$mcy9VHM;kU(C%R4+vPuM^Kx!}{6UX4tBLl-{Tf)pah;F9 z;V~+}yG`riR`|;CKtvD6ZNFSmBE8srtyIJUNb5)a{bq5j1e)21uQhL#$MA`$!~r&7 zuiZ~MX{=Mf-00gM?{0iOJ<}p#dTRjg<|g%heX-1)m}vj4eG7n;(#Tr^1b?S9`eF0& z1J=L$N|!_~fk;hN(j{i|i3XI+)ONfH@xfP`0)JlVYP-7`+veowSa@XICm}8q$bXl02|!i*D&hqc11~TPa0NL>F_cR z*I-iB5(DX2@SL$U#2Sw~tT7^nh5)={kf<22UB1nzZ|!Q<=D_qlnd_!#)wtGrNRj?eFwh;|KJAcb7Ib#6<=_7=n(8hgbtUw?f2%f(S1wB*Z$ zVgzfM#M0WFS5ttiS-6(%`|;p2dP{@HSvWc7Z@72Q$ALn6Qswqnzs$ILXd;5$?WOH{ zZg~~ifZeK^fV3(1V~5#%|>rdH1b2GlV=*neS%Ro{v3h@_3Oy2kCi|6H70I24a7GOhIh^cYRVl2bDB@XguFo?KpEfsr+P-NGf^;T`q-B<=f$ z6mzt~4GTqO#FZ%xv%HH8kS**-#pX%lX&v1(`?=E;&JqRMfPYC#CBYs}NNO4&0*knG zkoZXthi*NNLgHx$ZuCvUd>OiH9#Uuc-~y#mI3*B*sq=$}+sQM3VnO9BxApdGG+lEX z{!<`wzsy%;8wAp_dX5WbpV%)NiJ5V{cQhsrn_$D+r%Z8Nr4Gk-x}sQk2Lds~&JP0- z2|9!l0@hhU9e<%b^hM26N2eNW_JBPzzjJsD%ixA!5*+99-U`G6ePDREeFzx+GS-kX znl|!-yjVJiND>e+C-Tu3Z(%gB`nKUj)0xIUQyNfj4@BeqbZLd|9a-);zP0w5+Cr9| z!otD1F3k`l2{6=Bvs~}BeMaV^po)cA^;K{r@H`k#-+!A_JJPmC#B<2esdg*%!ey1- ze8_}xzmy%f>}MJCC_1l)ZOy+?{N~&9OPoA}k%FNw!|!{nzY<2_UZDiIWCxPLOBh%0 zntG|O;#o~1a$B7}czF38Yw1t$*z}LEeopuPB){EF$lu_p+JUDu^y)2`zMehx;mKaK zA;3m1-hV+l|6Zmqo<=`02t}2Fsb3tDL9X5`%0Y^t<~Y2MC)&Qz#z2TJ0#65#|3!zl zBp7lpj{WtzZ1v6~OXnSy@&>lKYg_ngTwmAU3eCO>&Hhs|Ve|In57>O|0d>9d*zNorm8%C zEh~g|_e{Y#TQBF3OT=zij?z23l~TJCJ$Qk89*DG9mOcQL5Jy1f008S%Qn`a{az7Zk zrGMjAiGmGF*=}yh+cDtr0_8vI&4JKFR|U{OTt}47^$RSl&p{{%(nZKE%S3m!)L}6_^ukuZxA53(u z0-<|$Dt0)6_MPmsoJIiaF3u-+oP+m?X@9kUmMRc;&uQIEuTV=xj1>VBA^mQV7!>3V ztTD&U%-6k6HcZ|*x#DfrqPq#2LNhh(m(P7vsP%4e!gooR^gbQ$S`<=5a;Gn@t%*X8 zK2RJV7na(Gza`E%JZE2ZF1XGK`P7WR9PW9^rU%>_WGR3G%>jbRW;0@3gI;qJtT$-a=kf|%*7d-!Tm zil_KC2GW8DZ#!%+ZriaW#0fW3y@qaY&Vqa&cFjm;=TIYEr%bU z{uWTPK%;Zoq&T2W;e%pg;mTznU${i$TfTB5m5^z~@lH4VinL{t zF+k)(YS`UW@Mv+)0Y|ShjrCjm{2oE*J$zE4wO``U)HjKgo^6Z$x)V`kO@HxVkZR-< zw>T-w@@})tDh5jDdAaWuUjW{`ZpIJ1&b&$wde717@( zzl%*aICD#qAFqEW0(um$FT|1ofO3Q`0uWE$11TszwZ^U(di`L9?Sb&ghTFHlamhXE zyJxK(e@*Creq9)a(pi1HkxI$I2jCUu`h0??ag!rODs~*P-R6k(Ie+;qS)6N|F^8t= z^EBM4n5Qq707qnEMVI;>|9(D<%+9L8$azcLqwsg_H6YB#kma}Ctd)NPF13Nw+w~Yk zeaUakfXlSF-R(sZRTf=-)JiNTNQcd)0 zXD#Pv+?Yjz3b0RH7hos>J=4MqOW({;RG+%p>!!*j?|nw+yRRD z(b#ip7dtffE!@CAJTyDXc$lIHT$*yqv9^^$5yeMdB#xeLmFq?PZ2{l=%i2*VRt~Qp zfz+7{OQ84OCX>kC!~xcrNO3r5x;N(`l5BcjWBlGkv-_!bSOIU1 zjf!19()087PELjZ?925>At(+iVdSQ)hP8$C+&^!ubTWW^So9@!n&|1;dvn2Tx9wMG=m&?G5S8h;B1-*Fp=FmJFg&Hb%{G zxPK6@ZQ7yVm9=|^KcF40qO>rT>Hd*@+D9y1=;=eXv}qZ@UNsawy+%uH4nl ztLBI9G(Gb)>lM-%nDh%O!V2T$!5 zP)reIw;B^4x2}q;X*jmUvJ4MZ2o5QLN272y`Inoj$iw(IC-xrSrm@ixpD@D%7(S9k zINPO&cVYTGTMe3pwp#Q?O*prr77>twS@dVy$p=|+w0ki3J>+Tp3VkHsAV`m|7JtH4 z#2S;;8#iBxJe*+bL(}nSNUykka!U2JmU(;Ps$HCCsaM2%;9|@|TZ3)~z;b{+jvkwTFh4 za_DEg-UY9sd;6ZIHhnxC`}3Iz?tjo7{rY8hI%1e@$qyLaxfciyLO6A&OU_x}KP2QS zM%_!*cKBncnpn<`usq2d;1-(_AGhC*c~lsU(Hr#^h5hx1tO0iLb#GyVO}XMWzD70c zyE~6rdIiL+%G5_K!eH|9;nh<{7>Svo|8$X_nelY8$r|@G(c!%*fm*UiSAPKZxnHBl z8QEt86)4YO8s5d;JhX7iyoXr-aJLrK64v)mwd4&S9Iu>5OZGf-<$mYg{RLxNQl686 zQtyryxW`+bCJCwV(=)EM9vqDFS!yE7;y0f&Tqy!f^9JaBqn3 z;u8D%Am8u>A1vqaFy&G33xB_j!x_}=#4Gi#POl}Mu@I)C{sX2NkI}il<)hu+sA`w3 z@T(UmUaP-$n;#U_O}wSI?0b~MeO4QjvWo}1mY(tawVI>bVY|``Z&pRKq=!_&u=nue#ey1!Lk$ ztbqbrWv)RUZ1=qyht-9oml!tOl(eFrn`i!2(Vb^I5g^K8y+uI0V`f*m>>*&U9?-e4 z^uufDT+TU}yX<}mph7zK}JAVvk`mhbZdoJd7#$xJU zPB=0!+~>Q^qdk55D{>uo3}oJh*`Es>&fd;Jw9sKNzQ%4{)3JE$-TAdcqYWS2 zgG^Df0_y+F{)Ofb091oeHpw`~7Xw*(WSbpVRXL6+ii}e~FjzGv@twfZID7Zi*eBbJv%+@wn3d>My8@CoiJqFhZ<=nSXA0+mWK87BAhb8CwXVH;TF7 zdl<<3{DSgPFzOT2M(~9Lq`%J>a*($mASXBQJ?O{# z;%bcb{p1?6NEi1iL}hiEkNE}Rs%0JpQOz8La--tT0qaVFu2T0#UzwbL8Jovqe`{i8 zsD${cEPt9W823@|^IpHvK*& z5zUP(N6MWyWA$#ed}UpDF6dv_TAtWnUBc89o8H4mBk}&x4W%NrOK`D31svOE5%C3+ zom6U_n(=>$BxFF}EN^~7VtkZV_4iZj@&WvdN`HKptX7t3KYs#3)x)P=%4Tt(OToJoN27T3y zC#9-%KAV}@rNZpG=NMF+khLuilOO{UrUr^-pIc#@ z6$(lkS}+rZ79tO&s%1N?2A!!TU`M4KEq_nd5xwc@a%Mb-m~YejaA!F!qJFVpmz)Pb zbxzt>hBzTu?cg3xpZMjkF&kLi@6^gQsIH+>2h&GD5rMcoZuCwA;&8umA{r_c|7*>K zH9lV<1b!5)Yu!Tgia#7-+}NkmZW##1`SV3GeHX&m+Mz&o+%=I{p^#p=nMWT^-+zVJ z+>0NW0FCfTO+dHv`qExGzt$a|na0QhL-#x$VRK0?KTL;AA=l}Xba}D#HU46TlGne! zqr^7#Z24YpkP8!6pnOMWJGOZBf#k3ud6*whO`-MP{Ut(DZSJu_#X?j&@_e@6h)qmD z<&2?yj(*udgm1j@_T+O$rxDX;1%J1_{r4w`-=_y$*pb_x&SL)Z2sOT&ln7$?>^B{O z_mKG9&zoUYh)W)_<3(p?iXDm<<}CTf(ffGW6(78fAOs1mXCcB)lwuU0;Ht00%-%|V zGN`n7T-RVwNN-~ZZNPiI2=eswzj%ay;PL66bBpprj|-BF06=P#{@eJJx_|U>A!D+5 z^F-8*ez<)S;(gEyn$%CX{=19^XMCi17(C`4)6TQ-IJovCOglLT?4lXk%UKt+i#hVd z2txUBRE!U_1hw|7Rm=arW(!`X@_`w~!QoznJQ5=T>_)_Ej#^-Lb-vKCukV*v%)?z% zB4CGW9!p8mzRd*T_#~fj;C~YHB?_ESC62$MR8A~78(#Kae$AY3jANe$G`y_A_Sl36 z`peke%pGYBCcl+A27)_pJYG+?hXhYis?q8fDra7AlIfT2tei)>%ejnEg5fUxeLdLQ z3(cZmPh`94XW(SVf_L|uT#L%|{qZ1Ns^qdlr=k-n9Ra?GbrkuBSAT03L}lLBw=7|5 z^HG-P{M;lZKT6A{(cSRG*mWS_@x~v%(aUrTq3<2A+w*Js3C>$@oWHWFPu~uePP!x0WN+l!_LjDhvgX^3 z4t;_QmH4OiV6KHv=YO7DltQ7Mg7F#uLNn28e$AH)aA;psnlJ!nD4h3j^2&C$&0SNF zrA-13@$Z+MdEBw_A^MzUuDmL2Z;Siisf0;V(0I)bri&&ZX1}OXrko%U6DtA7em))2 zF`@i(Yut9m^u-={jO(t{7j4((r~WZKq7t5uYPG`mFnbmXE`KHmwM(=}gmcGB_y%=P zw``b_B80$`>Uci0s(MB2^iaY|pZZ?qU)=IG9O%mjLH_9CS^XpZMg>^b$NM$xR`aq4 z`R1*`3Db;BJ<}N@5mRLq==0R7o@LYMno*aZd%KSZvHZJp@5ZiTeV0 z86?rdef6+_t$+CH5CpEi0)`Wc)oV=oBLTJEojVwniHjlH+hySRUrap1>$BK}cDA0_ z!b|{HTP{!OOY*Yf6*S$RI#yIqCp(Ge@q+uC()M*nUQ1=0vi=s{ey3FYUR6HnZ|nu1 z_e;GW`-G=^KqelL_(#H5AV5d5`-1#|aQu+b$~#<+ynk(D-$;`4(c^{i{Mq|V3E2h; zxzZ!t!;aPV?IuDKRD?%V2+aeSef-oJ>B)w;RoPk4NWqUZz6$LCZzJq)63UsEIFkHt zOuxKKYkRr!ld&DwggvC|sJjc}W$+4=vE43;AFTU+i#ZYvxKB*ggU8IVL>j(SFgUsl zTS-aq_J0Mq#7a^BlUyu=6SzYO^>IFKJvxDpvww-qG1n7B*!JIMm{Tk?U$jskLl|Od z^3ClF#^&FPw!Dz}NE+mOpWqQ;_BlN|Y_7kX^J!m~p>1Vu&#*l?AfHVtmj2~(eIi2?%kLk7362{e~69;}=<@w!hc~>X8!$)GE)Ml^@yW(!@7)TRP|y-56dN^ zS7AU|t4-8+T13bx~Zk*_Aye|EN zt^Eg2?wOxJF8hZt`(ISF2rkaQe@~?RKmNvl*tuNra2&?Ji=m*KXe}s$01LZ!1%LKw zIfK{h0E6>6Q+Z&Bj~zyXPdxj~3z@&o6I{@bYa^fPL}!H&IndlRg)NotJL{MqTL~iC z+!g4vk+?wAq6&HQ98urS%hk_CXD?sW8@&dK{L1f|`@G1zGfJ%aSy9Gl6+e{{wU3kA z0a^fbh4`mjGLwAWa_m`Nm3xr!jDN|lZvqumEsK%M|A6xS z`9zwVLL;H3FFG=2bZkrj@LZ4cY6B)QlljezgjPag_ARl_DI`R*&1Hw$@JvQADmhFy??JxrztqS9S56gA#ItwjJTlv z&mX@)J@DvI1Q)U@U1r&5D_Y2+{QDx83q9}JfMz?wJm#1Nhhwuv+<$O$$RNpQ*Kkpf z+bU(jwV}C^rN~&sTK0O$l#rm(0GtCCZQ!d#oWzMZxis@Y>U=e8bKLPW>owjL50O|& z=f{S6@%Jz!Y$#^_m;drOjp^WQ;YR`Azlah)k%FE^MM!D{8<;~Hd@c1lX(2WJG^_V?f@t&mc}Fk#P_6*1$l+|TSmdS!1D@U-$8zc=`@+ro9gEv4P^Zx z)JZv5QL69woqw^iSBlO;_NP-HEqxLEZ*Z2&F230nO;9<4M;c4SQ(!`52@I{UJL_-3 zyc6tl6P`4q?$RB7w+6lkvBlRGduh2VOU_ZhOGsjOCm%-m$?sJR$M@}CA9wGQykn#e zVNic*!SvI-eP_pTH(<6l>^-?>F@C#lFHO#naWtFg$IRHF zTXlIc>)jgT!5wDIxT+UasPNjrR%aN_vM^R;@)vJuH()&XKFUl-xc0A!PeHte?(%6b zbL@_LsC?C?XaZ$Rv+$HtKYi|4vjSudF{sVYbuFvccD;()@v{s3*zV`WPU37tWLfW= zxA07^*MGxo-!Gk@P*P^};sfn7C5l8<)}A&F@IAsjfx<=i)Zpjn`89EvrHJA}*v0A| z$YNFjpX%IPMc5h-ZUL2plD@(~JLRovwz`Tx#cWk%r*$}_T=HtKNkdldvD`y zD{?ksai64N``Yic$LacPy<>jhVcyQKSo5vPSrCJ5(nnY_1Sda-0x0?Uhu)z<9iTH8 zD5cXp6)4MIYU47WL%Q6SBQEAd{!R_WY^_}dP%eI%tG_=S)RuqhkCSDXp{CgqgAbSN zf`1=i^>4I2j`I7HC2avY4U)M^dS0qI{$1F6i4V9e56EG42!;qxFPG-?0aBvYIszjrZt zG~?^_lfdIDdyf+lS3-Q;Rh&En$u^JT;cj*({MzpWmVq(7jPX)rrw!qZcjUiKqH4H0 zJFU%0#NX5E)k_gTQ|wrCxlWJAe1GgbZ(V#7r|FYhMr7-h$2-W~IRB2nx&s%;AU)XX zcRt+$C>R@;52F6vQ7b;rfTI8wwL`zu4nu|`bE+NhWcAaAmdC9$d012MLyP(L%}avX z7l`+LwwCLna24RZ`+c(am8lzyjqgKpzi10vc0dPs25rMJ^3d+@1zW*eL4Ozzd^}IU z3*-ig(W7w1eAiBFD5uM}c!PuTy8r3C-!;z`kR9WX_mHjmP8&s>dZW3aaq4QKq++rM zcrg|M2@qC_oN*J76`~a2QKtoK;pWH9RoY_$VjhBi*_urr18=hk@=| zlW5zG>}8Lph$qiz$)l`$D4=_0@E*)4AFS{W7>_DGOk!-VB{sxQ?&MA94bftE$H?bl zls){pX8*gVep+?dv9JB?!B?|)?SPRP_WEVSWQ~6V7YyDv31C=5$v}@J~#(2@vae zn~t|VqzzliC~qIwH!)-%8{!pt+z*C_yKzT8+BUxJ{I)8teE(t1VhwcOqM!*Nso05KuWA%r^Qa{%4~bRb6O<@5XZMOUjPJ&V;UUNa_HeT(UnT+ZVoR z>dlwFAmh2*6@S(IM9{A7c1?o(CmcM2dQSK5HB6|2oPVrj(*%ntU*D=K*l%t-2vFx#_x`f=%FJ51V}=F_|n0_NFXq_u`QsY{(D_H1lWG&v)0=0t{HR2 zw2c619CBpddFP!i3OZ1+U*<3xT3ai7MqKc7^(8Xi+W#%U=+lLX)>HU;S7|H<{(SOO zIaNSIeTsLDbo5fGR2Hg|+3EPO^uhph3B z;eY<|b7Ms9$?#xBpS!%-I(E%ZG=|pWkNAB*rOku;@fE%ovmpk4H& zS0{Mc*W%`;yS*E4#$)Ki(8bcNd+a@o{KnBbC3Q zee1fP<1ZA+?C80mz|F6AT@=n{oz-Sh3F~wW?G4}7^?I#~c=5;J)qOi2?)-pyX`td9kY#_J zhMP%moI7DO+&IG+nd@4&GpfS+c&ex>9l8!(6nFeibo`*8cmMcYESfC@42Gx0omFjo z&F-uFpijS5OKbKca#j2ISauID+xyj&d{Hvmyg+MxpY%@YLvQw3r*pvLbAR*o{-&~i zxQQEXI#l^4rw8k97`?(HSuEd&qn^+CmIlF-n^#(gmu&Pkn@?YTbSaYYW5s#Q;nvaC z!i2N>Fv*@VFtqP|yWKAQ{`B~6H20w~rFSs($!eJ_?auhHZ9Sg`-rWP55KrxuXAP`y z{78M~3bmK*%Wm_we`wgOfPV!w^Qtnljt3P+n;SGS)9tNAZ|?h>V%{9lv^H+^ck1mW z#v6dKei!6@wV7u3mC-z#AExo=0}brwbaYM8Odh)w1AZ=|=jiDM(VXV7@v^UOJy^MK zPW9=2G8fDW{AGzQhHCuLXW<`h78uw|mxU6wQ9TWU6+p5z|I5#(#ecx2L4w=SP*~#X zt_iwJ0sz`tx*#YViNy;NuVG_}9H^Rq$fd^Hwunh>5`yvBIBM9 z;h*ef*4%8LNB6YAlB>Ahc`T;x5RjqY4^V1RE&!V}G$^|w=q?Dne}&2*T@1qA3{Hld z&4ZA)CZw@>1HGW1)_?pwzNvASPDOWlxZCP32)gS5?r^-?8zbZF@ zf{OceT6DM7>LU#&njGf5p+XO)3FYh3<$zz0ZFzr(`%A%g_~)p+p`eKd_d#JBYQ%H? z^N*V?4YyMow>iiLxfUs{s(AypOOE@;=jF?SF9)lUmnG?z(I2YBnD^ zSsbflf@g?PMrXj_cju6d9oi#x*Yn%%AlP=JWUD5phlTw#bP?J1gH2AgB&~c@LO*Cj z1t_ls56@N_n<}*plX#5&zb9KIhDB@prLtFic*lzhyK`_ncjfhbOZB|$)w839-QjLk zV94Mak7={3tbdpe82;2!_OX4yI9f@yyY_CoC;!+zCFOmG@#X*20Qjrc%5y!E@!dgO z)z_Tjs#hfRh)?)Cgl^_VV)2khD^1!oXJCwzfsxr{bUoh=5*l2!$A6oYCkkvE%-2ws7&gyQ?3j=R zLmLEF4ZL@ICHIHo(b_-iF=nh8bNT!xz*5sVC3immb=T)-JOt%y@_a`+G3xg+C*{1n zpvt>)!t!%AgCMh3&uASTc2rCZX=`%p6+Uv+^YKpAJ0_sBnfb7TbE8=>Unxz@l;1Q| zY|lr*mw$D1n8n?mxooK@UAb8?=moalSp|2^qaWHv(BrklENDOz(3LMh;5|s2Tf}&% zNHkTk3o(Lph^wa7(a!ru(lYqF1(jL$zu#|ssI+RF0)ru$p6JMKupS3`46frM&wU;j zdG3RZ0aPc&80k5l%XPQ@$z$Gwrj>ehLZ7o%^?$mkbU)Hj|E@8dMSe9$pla&;q zm{U-G3l+qU3g)(D&TI5X@Q}`!*k%Lj z;oLAIwX)0{Zj}87LF3*&sdd{-S9U;+*~$KJ!|I{E%BKv}uy1-_q2=f8_=7Ru)0r9N z{C_MqQ~_aFdALl)xLe&XZPe^k8pn2;u!rKHXUun+Ko8~}y=JlH?D zsou}ymh9(pO`W=tUMDZDHG3dc*+B;nK4#|k&9XJ8eXccB|Jc=TyW)z4FOehFy z7qB)Gi|_SRQt-3Qf3ba-2PWU)$m>R!BEsPr6<&LH7#r40Zz<~~HXv}u@=sRhIfF^v_VL3Xrq`(R}xjEmJ9P{I8sS#KrIr= z-sm?Bv8|So?dLkZlqxvS4bw{7F@Mxa)emG{Bp=NKP5BuoY=g{cX+OXW&Nhd=bhRgp z5r4;~p8vj*b%P}O3pSH|rO$!Om~D}H=b9ikj|bZhG5dix_K&Y=@APVSqkHswvO^HB zTJ~VS3>koyN{6j59a&M~8tjiCg{O?oh=U#MH?^OthS|)j@Ute}2`w30_J7y&dXsHg zWXw8rtZ@HQCBbC|)%_&*a+4agp2wx&yV+4WY7;*;KIe+V-Z5y#e@}mtWP0mHLNuu3wYR?fMW`E#$j!|7W>3p2m z_bB6tO$hE?<%=Egj2>@LKDJd^%Cahth1XNBG1`c#s_eNk-vCZPvA-8acVNG`1)U?X zquLJW`P66oQ9pxT|8qT$=Y7sIme;k!Cd#v4&U<-ZD({Ogc9j3-aD5m@W4eF#*7mby zwnq*JZHqYah>z) zxLwu_6b1cSdDdj|{CY2yXS5!#x`|~ykG0s7>6dd%9$HySHJ42<0os<;>eM|K2QO+I zeN}CG#`beJ*x1n~&--T7ap)iC$Tlf#=$i;&9fAqZ*j7PctyPh`@oRsdPK9eLNE)6^ zdqiu#We(Aby}Nn5sYUMT2|`%BX-2W}6p|PVcS|#W?U=OsFbi5G*^n76`u5=WDO0k} z-KN^lCDqJ%ys9CY$~nxqocz`G?3J~D@*=z31NwZRwtDQdmskgvb3*>F$4t&)t~$#3 z3hJG4P3)Y!>-k;USx|rM6rDJj#jM4#pWCqsTxA{hE1R8uXyler)aei%;CEC%=_~Gw zLurQ;qSHy2gQ5kFZaBpra&3b%LT$WbgSAKF(qNuB_HK$BLXwe6fYisWV?2l9aB`bQ zcIdIbIA2n`rUtoXs-UAPkEm0k07lC+N9Cr?~u6&HCZz=yi#|dMbTA8ppA|=4%<-(l_LMI*IS6|JHOY|Blb)v!^Q+Pn?m8 zo7fTT8tjd=nq+@LRUpDil*f@N{q8aTrUbRJ_ zjV+{IyT0x@t<&?b{!INeJ$NSG@-Yut&kEDTbYt~vot}Roae%txT(gD+$3ApX)FnFEp3+`TjGK9-d`u*XG3WN@fjucnY^pzC%wt7+C4P{r7d0y(U4P(x>g2n7yxtOD zfPH^?j=#QR5m{q$t@zZ}7=vqxmCH3H<5su%`4Z2`*^+5rqCAUFyLxVP%&KJ;+&llp zsy@ai9y$2I_zC-Nih^zsa3=eqbKsm*y8)exa0h+I$?!djBr&PIHIMsFG)$>iM-~Y+ zKSe8gGo%0EDW5kf5UNZX9Sj-kJ~;Te5NdzL{z_53F5(681|i(uxeMZ!Neg#3jO(2u zY%2$G|B-Sllu5A`LX5YIds|Y5(6U*O&M?DU9OSw*Riplw~HO6)NvKSTM5LDv+nSutZa@y z4b>x%<0KuAH=aRF!McN3gdNBkz#5G4_t=1XuJENS%cI5jyf1h_k4iqX$Q|QkQ@{JV z<9hb9wx`rT+hz-25nrxjgjIa{fO&ryFF2<>)*0A`(*`eL*t*z6+qLo(j7I$R+09LX z4Np_gMibF3=8hWYvmk`kLi|7sPu5o-k^SqKOT5(QWULXMQxq-nQrifA$4e`JeO>pA zbvfo@H}%;M?jY-TKEC*m8W$>Kr0pf{zF-b#%z~|b@fqfqJtkva#^Wj9S8#u}qsrnD z^mA6XLL^&}Rxwv^d$Acw1yR{fx(WOuMkDGk_O|9v+oR)**g7=EMn-Jav5MlRpR$I7 zxXGP-wVl1eSsmJ`oXveOnRv~4;q;o>0i~?g`%}Sp6-UFgNyB;`xChM*W$!Sr0Rph$ z+<+^4KaNYx7v#vXqoJ*WXc1AQPHsrF_bR5Y! zQ?6}2_Tq=*D{)p|?eUG0alZKKKdxm?#kPFoL&y2W@A|gmr}+T75T8qbkNhn4VT>>``$B*3A9FXI9v3YB z?=`PtH{K{mY#rEuwsmjrcKQLd2})JW6KF>}$Wx6P$6Y zamF9>cs?smKj!8fFMxmXWx==qAY+Zazt}T+jT&J-e8zqQDFv~A7d%gi30SM9VNt#l zFX&dI{novUY(*<*a9w=*F(&wEI}jh%eg-gyeNbPQGw;WG3~Zdx{Q~?*p65+)fKMgQ zlgsn)Ioe+;@BN_9__bHY!B<;*#z?MvUCy&Jj&=u6m~+lwU!Q*qOu-4CAk!qC0WM}9 ziVqqYF!Q7d1_-3s^u~&IYjT21@6Z?#@4R{kQVlP=Mn9;_SPZ)<1fVo++K0IL)Q6av zCxn3cpjX6E+rmFx1+%6_{6p7EH{v*Z7XJv}e?|O~exECkGH>`kJm<@w&xt+NHs6r- zU$D1J{7`(qF>!x(&j1IdXwfL9_xJwU-y4D1^J^qC#hQ$E``S~@55h6 zU);;Qf$`yoa6fhvag0Yb!}AP)P!GOkvA3Bc)@1??C}$$JT+S$(^^v^WjBAZwe#oyq zo7#yAS?}e3m-Fn$esA~JsrNLssFVW}Gr`?4Sc0=()&omX);;xv@BZ;W*kk;IE8fn2 z_~qYT@I!wc6Tlq(e(!@PdpcpyVoMlH!#^z!_eVElQKpQq)u><>^Dt}s{ghf*HYIVS z0TUK6hP(9Y57RGQg{fi7-Y#}$d*qqyaTycyy3e!cPe;D106Xd z4>o_3IMdeW%yo_7YxJLPieTlvY*=ToCP_1@0!BM$hD!fDXXd>3e1>G$LggEkDXyTJ z`zpJ6P@}q8`RdzKpRY6icphiQQqdDx=JADV zOWg9}o0WF@$2<{_9JFuzu6UwW%6|p+i1mMc#eB~`@%Nng1Kh0dbq#RrsVZYIc_xN$ z<0$#;cX=MmG>ovBBc_54;JKP+HZhG!${E{ep-?zMFO{Ulz`*~GA-AHWn!=L6?jf2+&FQETVMUg^m3l4uC z*<f`dC{zEkgkGH-Ong%kWIjZbNt&AL6~gZf%yBmZm$~??=YGCYMzX+^0qa#i7s?YDk+&6CWkGQ=GD)YOIF&KiM z?wsxbyq^1(pw$|j?bkr=JEp~k0$YDv?d1%P^qMH+&NFtT&$CO6gPr1>k^4XV+f`(Sa-yiFK&Fi-El9ik%@t zev1=;eP-QQe$cENHK*G?9J<2{b)`cbS|PH)-#!h#px?6vYa@Tb*R%(A%jkbW(=!6+ z%N^`{TpF%WXGaX z?T?z%=|znxIM=a#jL-P@HP(OW23K77oJSSk=Q?f@|CDPW^}z(hPq^T{^UFAvdD{|$ z;yN2iN{yeFz1D2EP0(VdBS{tp! z6aIIzy9!)#EN&-bZ&i1@c2F1<`mW+vd0y6_u88x~@YrkgM2wu-ke;eEboc(b2I)tY z?h-Tbp+8fOQd@MQw?(%*Oy7)jm!iXrD=nfPVLRmY-#(T(L3{t|W0?p7pE?oh+r^xGY3N4995fv% zq|@Kde%7?55G+XiXl;+cNEr5D)h{SIJ=7Q6SMHEs<^P^J067Gbm>%rMzN*p&5$F-j z;z@F;zwF@^6EGgbMryp&pZRO3@g3$QjIcX_sz|(WOu<)~PvU>rNedquKf1)YG#*yQ z_>4L8D0BG>FO|72$GXH2@RekLXdnAxM`ipk^QiH}^W1*>9S1N6{H`CdFFqM{2}N!W z?BsS(?9DE$esgVCjKlD8q^ca;doySMpAMK+N+n;~AAy)bm!_>H0`hBjGwj+&WpfzWTpD(ynFrQ_x3YI#b5|iDdpJCNw%@eC08qdQZ z<{XI)$)SH3^quZ%pDOOCkDz7BcVq9ZHI;*ux0?OOFWURBIo@@bv7f8GJG~8 zAg+`ghTvWr8v$dC?aFp;kH%DgY2MN!WVY_DdaJ7_07pMSjRQyLzw%rxIZ35|pX%H& zd^CTZ!_xL&@-XUn(ob8(&6|ML74PQfKMJ|*UgmMbopfCH|lvC`0kkp$*i zxnJ>n&xLn_-R(7t;ubux9G^q*G1-vE4jR6(g0}+$A*vq7Dyk>oDirGxZ@fu-AexKc`yf)OpU=5J<)-1nJ!}@Be;LZ(Ioc1alU5Tr>{G~T<}9db@5eYobJfS zN8}soavm~HjB7bg;$vz%$QqNirpM>ve~`B;&%I^~)HG?=RQ$Js1CxYYjV|^{{tfYTyt1q^tH$WhXq9T%L{b@*jUz zb>Si5e;F}j42FH2=dFM}8qkBwd|kUE1mXn8Rl$pE*j3+pePuo5Ek$g=+_j9!vv-R~ zVFAun*dvp6yqVpy%(do2IcTHMm#i@HT?WLt*c{r8G1kH> z@bH*(?~Xn4=iHThO1=Y|bje%lbN8EZ-4K+afiW@qREJ;>c;qFx?V(YW1#x#53XWZ`z;f^xv8 z)Og1lHiQkYUgl8ct|9y@;kWu* zeNJe8D>&l$oH*y`nqGgTuvOaMV_#gx;+t>vgQr(=u_7gYrf@okvXtPF==TUli@gY~ zjc|)!ZW@(G{B~>=TweG@Y5Ner9m6S0zS`_WlMq|gwliMO1tY}|0Yl~UJdH>4T$Qio z&C}^AY7Qr}CjO(weu~^^7jxl;)EEO688XUPdA@M&9qXgcM?QaE%ejy#sz9qQXLw6} zkoPQo_KrwYfaltk&;AW1wYIr+W&)Hl?owy~|7KP}s(Y#Cw3#YOUQ+)QL*>%Eq)Tr3 z8V`w&cjmZipIc*V=N$Z%w|zYy;4gq>T;o-_{u*NicXylN+RPl<^45zW!iP;I_d8!S zkNg}W%B8^Jf*60gP&@?ZYPIaxH;$oURa=F#-ud>xODxV+FRXrf#ctdzun8y|MxzFN zSX;OWefN;Gp6dnk)>yz7b`==E<}4VsJ-p7DGf>9l}JF}95A(NvcwV0Ix;AWU(p<#xj)p=M27-Ms_W=!D zQo4%cmmc$NkOf;?(at|sDA82?D9FYmH=D++3FAmioHjDJB%?N11n1bicN;HU)UuIM z7zy^t-WPmeTdAbxM73{~RaF!-fMeyAW?64AzT$sx`b?a2Zi?;1&xZ4jzkS~SXI~qC z|Gd`Ec-`d;y!cGSaM^{6r~h`zS>1N6<@0dlx1IY8X{d(8+mnAazd1B@+mIaHe}CO4 zo(-_oUF)-`#EXG(d>E;>fTxWG_{>!MH7x4ph}?6wr=zv=;-_)wQ;|P}>x%~B$aooa)R@@ayhp(mcjbj z(>|Pi2RAD+R`#5lkCKN&Y|%f}4ZA=}OOZ=mrJtRnJwaOy4h-Cv5T8o>EVOeg>b`%G ze>4^1C6>{+_l`T7WId_Q(My`OVACX**D_F+c-+o~({;{siLcrm)AgNmpb2nJ7cEJk z=J23xq*eEQaN!jk5ilq17ny2&d+6LX?!2NfJJb{Sna1B#h4`qc2sJQ8Tx^qU%xm)ePzxWs*{YCGHP|z_a z2S5$2(a7NC;v{C{!66t-k`&6gM}A!5)yB+8hxy7&zB+4r!8rA)87i9DE^&WT8%{z@ zqme4+we`_7>R+b0-L!+`%{gpZhA^*%6Bnotu}f)+U7~RY{{33#`Grq;-6LoGgtY?C z^Z*c~MA>`^$BUaNI1(y*GcNzyJ@F=NDk^z{qk zyl1Cf$e5Ym3b8Ti{*%fX!wP?sn4GL*9Fz<0-n6r>(+Y}KE3DJTs4$RyOj==MWh^=S zv)v4XWUwa56O?%NPc-w7*+qGUQ6s&$ew%vZ|S>hZ_sK9 z#m-Z1A3HT_$T{6r(A%+7Z&u1)$4;SXe&tM*HYsUdZ4)`wzxpq~YkGgqV@=K0 z=nFqdByJb2khO!pN7oIGv{Nw>|e&Iso1w*x;&rQ z`i^6Rjb)7y%_gY_s{*h-eflQBLrS}oFlbwR7F>jYF#t2zox)?hq@+P!f5K-%73YuF zWxah>s_at(3ODtClu>`lJ{`~&nsk8hurB-VY|pH79p2Svx%elVdwt>Nmwxi4p6SeE z)A5P$u`d{u(K)IK)tZ@ev|ih%{F5_21%o3jQ16H>ktg-3A+=u#nz029mw=z=&ffxf z=Zh>h{ee{NA1oWew&q$bq<*RSb=so9x%3aksA)O@D%^XM})U?$?aXnT;6BA#Z_>(-K-hbY&RmuWzyo_NMbW& zp9CqKIdWcbdM$s`dxV7QA>88hGHT(7(MX>!JVMKz^PfM#b^qbcF`Z=chx>^OKs@2hWsyG4tfKumw^;0!xxw6%XGd`uL~N;~+& z-@4>S|LJcr-ZfjsoqRsOUpYaVhmJ?1?rp;ccM%?>3ExEapgIH_FUpUh+6hegm$7Lh zWskp<)(M1T>L5ygi4yNEHHhTYY|eMdXMi7Qegtt`A)t+2+M$~|+nuT+{0i#ev|n+p zYt?y&()WKa_f0HV<7M8-D%?_{2Y6+ck!s z857HUVM_wWi26BjHRrg0ZkZdNj{`44mU`9w^oUh?9dx*Dkq@VTqO8OHkud=9(zdJR zIPs}>3?|_3l#%QamQLnsP0`7~<9pPK<@)oi8Jd5_ZfLe%TTzgP&0zy9M%ETN_p}>Q zpOqod;2Q8BjWxWaAjd|049!C_L+E+kHN0Lo+MVAM%=~=6a*lS?8rGj$;A72f|7k#^ z^W{5|h=Y3<@z9&*(Q}@PgvEEBueXNG7xl~kEAw@!oA|Z^-{;Ah+f~lpnHPM?GwHMB zg6n^{>qATh7wa{%bI3_8&C{obt$nCdo5?;R5!%g?>=}Cqe`E`2hI&t(HGMu;!f@!H z_PHBSuu|mjhykU|0EKOM7XK4t-0XlaGXfoYj!wHYNV3y#UM=2l|JqJbLu&RG~8F`W4_<{H*2r_|GiRSYJFoBUj6f9fxl4nn_92Ihjwsb_Z3v^-QNwK5TsiRq zoG9D04t?yWjiR1;nBTOp+=6#hZeA*VMqBQew5P5&iu3#)U$o5A2*&x7oAM8QliYvb zscz|9m*iz6KceUN*B;dCJ5*wF#7 zj2Brv#zxn}U-AhWuWmgahuz)^LDPTKM{V7sJpPK2yfZFfB*rL)G%|fv#^_Kz$B3w2 zN!++Hy0Na~nlY~4_KYJz!vaSd#Lqm>pX=aQ+PO22JNuPG7rU-~`)iI&$6fQP;3hwL z4;MR#6e|Vg_&WJ8p0ANcj?z8XpcE&ys5^XDzi->sd6aoz#gc1}JMN15D|mm5nX&t# ziYo+@t2EbD39wQ0Xp9}iD`J|C!ehnm(EIAwmz`h`MAo#A(Y?FP|fR5Cy$Uj!$b3r>W8N!=@cjnuiE z>y~x?cQyYc(%hb(gdpz3Tk=vU0C*tI5{0{^8S= z`SWtV2l8y+HbV35%kzKzJ1#r?;BRZrf#e=aUdq{4T+jUyPnfA5=N>rl7i*n!GC9F7 zb(~e5ULe?c*6mp7dIUJ${t5n_oWY*8N;a+`v3Kcj8p-vHdJutDZI>PGtJFmXg`|=KW&W)Vqtvx!nU`FMgR}2^ z)?VwS-Y~1dfm$cSO+}LVY)`U+Wx`Swdt&H((Vad=FaDkIS^m(XxcGrrZdqw}N&`oxB`1H6Gs#9|5DY4yraF`H+|#kr zJB6x|o5Aam@{NHHcb0pALsx|ZQ(dwCUz<_3%#ZeP1(wyW(-%dL1?s7nu{czV$ci`i zYBFsJH|X;G2cBOUAWuAh=*#nOzWoTEt*j3}*Kz&1rvBk8gH!8zqrcnV@AqoHDz@Q@ zXMKCa$)wibb(buu5{b$+iX?P3|yP!#bJd2K#bZY5Rv)J=Et(<(m zX!n9!cq%j3vX{MJ7qxfTE-5le^Ef>FbJi*x+ktNt<~!;3c)I1%yA& zf%zzh98}g72~IT>Nt%+6Wjkda`Wp-RJ)UCIe~jJ5u8S{y@qbFL){njad#v^6ow;0> zIXu@cg1KINYvDI8ebIY>Y66fVd((D@^LOt>t<2T#FgO$~Y<$v04;!GA0<0v%zo{|? z^kIMUh$XJ?>$z^I;*;{)r;h~Q$_IY&5(Eb=0XRo1>OW4B<9GHgD}N;>l6n{XibIHx zeB!&{=A`(b-j4>kTY2(dkfJGDy5~9WD&VTZS%NT#6^rL_Hp4a&CvNpA5(%ILP8pq3Px>oVq zwtm}+Yt8~2q1QIe*+=+2J~|)ZrHc;TK`(fgRm?vla{JWF^zj9a?#f#_>d>Yc+L7i* zIq~#{f!|NUIyoypX&xHBOHC-X#GDba19ngevb5h$@G<|~|Ettza!mio>&?bJbtivj zfYz1Ga)v)%xM9$(Y#Z&~CK=G48lr%AXomBQJp)X#FPc(sh2tgT=}8_V)8h%gz<7c^ ziY~>KGke9QblRbaAAwWVn4hpRwp2|Ux+qK4)-{H2K4aFqc`=AH99k~wgXZ_AxAV@guH*h zQ-uJCjdD=h2SaHif}^(&u_{^X`(7VjH)odkN>a=4hQ`UY{>hZOT1(eIef@v8>S|3H zM|f{iCvf$Fe#c(_x>nXpf7Z$xCg-N@H&l`v2ql#@8d-MRKK^%VV1rL{kbGh~ZMD3U zyvfg)>ASWS#b$Z#mFl3b7OSS#Guz=?`x;l}WW8WHBY3>TeQ_phK(Rg~9%M|!4;rf% z;T}qD6JwsvT=VNbLra8OJl}uhJH5f6Q5iVLm;1B>JU{@F^`B;vJ4qp{YrdxMRpvf@ z)NEt?g4#|Xx-2tzU|nCm*M*8m2RR$k#r(l1cWS0&^M2YBZHND%`z74v3-71&$G>eu zllKWv@nWaTy}`Va_tG5EU$tt?pBhcOJ0#YIwsHGTj>YorFKbhC#!Y{e@xlYDwu+_& zMa>JHdAtd|UNxTB4+&XYxZa_MZ&rP0A*KdoJK=&*F>6^7Fk+DQJi~RChk9#x-?%3S zXgIGn1l#FDFRl_4HQ$FwJ2VF$ayzxSwlCV_jFuk?taA^ki`d{%we7%{y5BasCQR?% zeHDJ5#5Ghc!zZCDDEfaSqUUqw9m1V1_x-h&>*}McYdyz#-v1KcmUWq$v*Xn`U(RQu z#*)r9Mr^MaL>V|-$XQj^$WJ`UiI8_$lB)p=W5{-x(uLq#lBen0^k|?@-iBIaw|z*1 z&SBSJR|=2fjaUe79kp{M@3Q1+Dv9%0=SAxit{pa>n%c@OnnHg+xtLdel&NSY+}%+$1(G?W&BwwlO|Q$RG6-Wsa8|EmTm zqoec14;Gy($F6_8r`#WL6fu$Talkma2M51j>az_W8^^d4QxPvoJ^r#z{e3UpN&QLT zqeBY^fZFjkTvioC{GI)(ggYfQrNoqK^@*LZHJopo?dbT|un;`r(;y3uuv~)w4iEFL*f;Fq_!rFfyf>~qRL<}kXz9uGvKB&6dGTGS!I=sF3*J(&Z;k9aA>E~Z z%roE4b^WbE`zd!|vp-^8eGX>K*R|IFZ0G+|?S$mpON|UTG|-*?bN%Z>51pRk1KzUW zJ{~!ZZ_R&+z}2svF`;X%(2u%319}iO;{WM7N3>D$A7_H_O)nY~f1VeA)OeFO(7G3a zQCZL$^Iz1R>im=~d#dLOk4@_|o_5=Fe(J1sP@b9EW|vxG<8OcZn?HEryOs6^%nY9d z9jfoMxYTjfc|RXbMJsQ$DO^~QS;574-n=#4O{agFe|AFZ2trwta4d$nCdK-u=s#4% zR;sABY(3i6)Rpyz=Hd=piY6Y~c>^SC;HK@J4*i&@(W5L_M2h0(g0}g@iu-oemt3W5 z341`sf%BM{Hw&{{!5y8|(d}X8wXF2xlK3WKk8cW8w|;*qcXy}8%rH8gN>fs?R@k>*47rkp^QDm?tbfP z$a%+p6+E`A2l@B)&G2K4`7^lVHHJnbg_z=Z-S1y)!Y>TCtcioSwWbc0I1cX5#aDmF zr<@(oVe1WDa+a$_??ZUmQezP2551DdFLq(ry=!FikUF$dZ{ntZP%V2sOu}?-SXq2K z+@(F_5kybrq8stme3|d(Spp8GKcj+QV_FhmXT%@quQBFNuR(*{{~qlYa1*=&3e{4@5>MsrsFQFnihoyZfx>&WNlTkOfSxhmGZK>SKxle(6_*^UX| z(7~^DR`@&x+OEsMTAKR@;&x<4{0hL`3U3O0j!=lUH@Z$h{F)l)nCKL0O`;1%dCgzx z+6ckn|HRwAIpvocf@gpA4&;N< zhSfV2PCJa?V@_hK$}MdoS4BpTMoLfTBV3ulDP{*4A7v zf4$0}i(vo8l5W0B#Qs4>#r|a)XTD=DG=6@@zkYBVfAE6D&n)q>pL283qP*nQ^?qag zzjMZVzy0K%pZzuM`=9fn@D6qv*ONDKSJ26hdF^Xwj^iqN$%k|vT5x});Hbgj-|@UC zF(+1Auzd80CyqjjB_``P-!)hGp+f_AisB5xr$^kN*1V-YAYfb-bpvB*x`Q6adf21o z{n)Bb)L;|e__cXWR7P4c8!)S42WbRc5KB?hI9^pb*EKI1f7+voD8w@7|h427Fg7R{K1#L)^ne^ zF}eovr{-&^AN7qlT<`>$hbxct2X}Twod?a07m@##kA}f4Ogmr)$N`re5OgT(#N{M8 zlQ|rX0nVXeTDv!NQ7Ys){O-23Bv(P(n)%Smoay)$y}_Vpn@)c=qJ-HS!rN;e5R@AD zL+^n0tza-mXB4H4I6_;7c%sW1oYD)h*M%y`{Tb;Y`D|K#Jve*}z`gBFRrr;@YWAsr zeyPH*gk?z*gls$Y&-bJL+Me)w$?rIi#W}z8U3XOGJW4DUyI<-(iq@dy4K?px@Pdmb zIOpHb`glFoVBmjeU9=1Kn4a^kx_<6_ra)hbhYbhfS=$P4C*+K|KX4S3&UJ z5Zbs>Uz<4i+o5`|IU#Y`z#>*Qi>G;lMqSqM+yPRV)b4-bbCO%bo3b}GS3RN!wV${f zZ`w2(hUvb#)F0lPX&!b5^*2-pkG++8Mwhi`7|GB+K!T!wj7Z_0bx54bv1(2!xqxmg zxXj)ZT;|Ltyx7`ade$c^eJ9b`x~$E!-7a|wS3Z;Wx98uQ*5}u-v*eUw+rC>!Z8<)1 z+>Ta_rNn;_hOvNa%HD#nY7XNk?!$2p3e1z5Lp0~eFK`@IUniS(#FMLY*9Tz_DAS5nY5mNZ~yDJanwh1l`77d@A4%@}`3)JOtIMlstaA zEgr?Mh97v-#ftpk@$2*I4}a#8o7DQ|m**_U=y!kJF|~b{^{2<=>L-3}o)@-PbV)-( z)cvwo6sH8e#0Yn<9iwi0K%7oi^oT3F-6DNq?V#mpKeeJ&w`t_!bnrLtyde zrbDBZyL62pLdSB_`|dG7gNmBqG=Ik4PSl}cTVd*=UwUVn^cIZwk6}JP##u65t*E6n zvaEko-N5w0W{xL>^ZC!M&zw@>y z@1lGCEBA7)*}ib->C<$@rqKm)GgH-RBlAFh*)GUg!nxgLgI(9@5=(d)It>P#mb&wGM;RsS zD57xskgU*dZTl&;MB?EdJQi=2e6SrgDMZIs_te1)t)@sK2wq5ReAe?tle499p}-&> zhDTD}KRG3jM6`;}I>o>0%WFT@mgFckW^_FRbiK?qKXP7YSN_md7fWm3ioaCaxwU^j z&kM&Tza~7kqzC6C_1Noe|ni)3Iud0pRHlwL1){-y8(fzG=gS ztEw;0f%ay=Z$R_+t!O;sw~9|{xW+u#6l^>=!Nh?>!*auBv3=n?HtsryGh4L5ahAI7 z=vW_;KejBx_F6CLlYinv6^)~^d)I#^XAor6>VZpu<%1=2mTEi-jO3g*?gXtw^K&$h zz+Z@8dflhz{O?5@<#+z_GPeK7E1_$C)z^@G@b?*_W8`ViK{uaT1Li+YuuY-~SaK-H z4+rEp+9G?8AT;Z1=*oE*xe!V+s0%KQu37Tv>m$VQbbtF029WF}bU!PL97@-QPvSMsUii zGj9#=0ldqe+iB%X%^;aBiA{eJ!Yj7V)-@J%_3ts($H}hj||H+H|9rN?d zyFu5pS3bHVX+nqJC3r3VeSSMaD;ytG@VTaC4B>;!J$U9nIY3emp>?aj zxyF~g)bBj@WiOO^q%T^Fq?oK&!+_k02N!+hsd~>m*i9!oSaAAZ@P)r{f(4seIIEqH zMzF-gP1Q&=w+sOi;vawEe-dMcb|^f5wBHsX>yh}QMTu8)7ipHb6l1@iL-=a2p;XmnE(lY#H+^?|-W_^#VA;jJ(& zDx80jv+exN{=tv&TDI5itP{3IHnyTYH82IC13(J>5zO)MmNht_76H7Evm*7suz&r^ zCpAbB0>k(@#9I=0EoNx*z{}G6hkbs7C6j#)k~}?i49$P%xYOLYORtnN4%N?jJ|~;M zv4tP8?b#1$uj+<62177<(Og5b6~=?Aq3a&(JLwb6LiZW#WlZe(tPY2G^99d6M9V8^ zSno6D6;7PL`l!Ww;pf@0mIe+)gH;?0UW9~!4~_v6ZGZak#q0bRjU zOt_|Rcgug5#c6gMY(#I}7>HilRdc5F2flqNu@^No`Yyb`Qg;YEt9<5b%&qM<^M1}R z#qqgMfX39m{f0kuw99e1@L>j=dy)sKb(K%$lH_4Ztp@JJh(znYQAA-zt~8u*-lrSK zyx0D-)G13%K4%tZw)QEcj+m`L5u$02;a{WuVtapX6g1sa^4tpV>1RMZ2L^89VN#nJ zSXpY;m1t+B^eW6p(SUk2^m6QD2AH{TTB|3pF8DL_GqMh`=e20YLOc@&`KqrPJDXZn z`1~*3Rk|A;b2wZ;cj%CVg|R0k&w?3;Yt$z9jUR#?1V;_+mRs|8tzzeJ7P0bJ-=Q~e zBOQN6y=>1+9cm_G;;MMtBZhMesUN{-*mFjp$wTe+q0}Z3zf;$h(w0zK=(C?$%MxUK zMBbQl>UUi!>l4iVdyST{!euXC^4=w$`E|cuI2jcP`9SLElV;v>l5pmYGxRcdXs}Z+ zYV6=*?}KnbrOH_P2@iNTEp)*NA-%=itkXaKf!vNt)}Twe_kc2AR69v zw06ivrSlzYkp1WNBD5f)V1EKTA7N9Q=B7JPi?@$1d`{}N5ldih2X=>KWYLe3df$IW zhlbn4)kI@K z;zI9>K0X`h!Uej!Wqs=&4R%AHTKEx59(dCby!FaFJH`ZR#V)5xNODNqcB7 z2CFF8xSXeIGnwNfVtZ2}xz^4)?3TR)fXM`$E3Hohx?O}Ttw)p$CV*h}5HU+Ih7~KPqylBu3z^Ij7hoo@f~ndCV+qw%3y!JPB=wJ$53e^-yZ| z%v~g!M96y3cfp$zOHgn#w;l5TumFi)`@%Ebs2N3VRJutV)?T3p`w#sBIYaOme&{S} zJ(`Q1%+9)cKhN9G)`$ik$!dR;81_-Uc8#ssW^K08`VG1-f>B0ob4O!G$b2;?#=a~} z2YGL!mZrLIN6>Xw;#YVsx%bh6j-V>6ukB-A45(g=jp3)}4Oi&;z4b&im4yj!82j`i zpY^RbPU#eJKe z*c)2cz2sU4NV3|ytn5W1(^tv_uYa&nU7 zTe9kyI&Zfd?YGfkj!&6($GiH>ntV!cm0+;mwH#tYLH^48MgA7$_L4*hTrdyi`}PYO z$o8ak`uB6 za*|E6`Eiq-p5TWB=*9aupZi5yZ0CBhx2sQ_r43w^Gq3ccpiY{Ox9r!UnO^+uG6wxC zAN*Hsn{a<6MOX8p1FUY)7|Sq%>kqi_(w@#jPGpVYfT zlU|J)c2NDJzAYFx4{Ez0nQK&Sd~e*o7xICknY@3zCH_Mn<~Tt=AcnubLq4hwe|NVa zCzAB4hSS^9Q>9j03hs_Gj3PAkKVbhCJd5+`q@b{rdPKoM)^AI=w(RR)dXy!;ci}sK zYg+!wx&5*c-!}51kD&8Sd`9Xn`Hp9AbpU6p)0=Cz@TwL*HR6|{*Zzu^tyJLaI;tU41O zn91=w-l=+U&A8t@-p>ZqqCjj6t(v+=nCPtor?hD?KZpJDTDSiSk0UU)sws8*!`k!+ z)N=K|u66q5IW0Q*QOOmf!uFz(@FQpWKZ}14^0~kmSKE5c9cj$=n%{PWqj~%tSCtyP z?yQ+D#&H3MN7shz&V8at^ArCH4vRk%&@#L2#~%~fld={SoRG7ok?4d9_uO*KIBl;T z_$}Vp)p`6z2JSa&NgNKG(>^66-?ju@cFVzoXC1d9bJtSvX%z_AbJA>}^808v1$zhxrvQCR(Kk>CDjZL{i+w*k4tfZBxyvfCsI z9GYI6YG=7YIhqSX>U}z%)X_$$+ADH<>h9)l`M?<(pl<*HkzxXL%p~u1VWw>U3=!}ajkjRJ@Nx1 zzH;EZ@CD~Fxwl0#Myn|xd=DHKEPL92i4e6>@vsv$23H@m#=aiy07?_auX(dKi;oVf z4o;R^l_h#H^lE6+y`p74ac=q@C$u5jabCvNHVM(OQ*#X`*wx$tZWno&(IIx-Xwezp zQqM;JJL&bt^}V_Dd`?;O(d5*%(4APtALtpP2j8aDeYG!jakqwl?6=KlVuxhKaXI(%gu|BFBIt&Q~~hxVN-{80xE)^N{4 zQjM);hubEaDjY=~-&^Xm!5@X=IcKqR%R%lC<3j=;<2ty!GqHEINK-V>-h) zyB*fzqj_rJOZiDQ;Jk|CM+t!au+a66oj&!Bq>G4K;K5Umb>X3CTvGc4S~u_NPyN`- z!bLpWf@>etKR7}3nVWttME_}J-_m1)kU6l>gxUJF+5`80Y`wfnKj=Pxy-+#F$;cG1 z{ifQ?$Ei1P)3Gv>1AUdyS79BssW-t#?uEHHVfRt6wj09DBIk$(xnNi|#70G)a854) zvDHv(pe~3$#q8|2^K{pGZa(X8`0F3Jx*vI!KhGn1?S23MR4c|f;Ox$;%uY~Q_h2WVP#5K|)0Ldt&k?i4LJ zZ$4}R4wNH&ld{KP+l}rmtJHC69n!o$Fza3FbEyHG&zeCzfZoM_wxF+%?&pu@B$5Og zt1ETj_)|)wU-^H54|n~CmZLCQQvUDE6+FH+aISHuS}pZ9XSsC?^_1JAU@$e}z4J~R zRAXOn#>Mi+v-e~$^b?e<(aJRL#X|Gn?K>V8r9|!bpCVu!7 zdJP-w-!OX0uEgCp-R9FL9>=ZJ=jq{S zajGp`wq`SppT7=IX7iy)J4b8lCB@1;wmt7qte30)Ys;zUcgLaEnySs+aq`@HYrHte z1>Aw^ZhTxf+hP7XkG2o(W#esjeBZQh?f1v%Fzy8L$I7gKw>vM5&zjkI>)grU;m4Nw z8D)+ZVQbjv_xrK3ioO(P{_~%H<&}8XE{YDi8j>uRe=o1?6j>epne>&=zhi?d%B-7x zGW2TQKtF)r$$0h)W{%%`!R2>uLtdBbLAEC7Xer!t`VAhasj+CW_^ysSes3S>yJ(?* zTvtyCzc=ZBzfm{&{Y^FY{0>@$&J?`Icl9W*chN74&8pNb+{W;_x;tJsKhX1r$Am6A zcWgDx*Hkf~2oeUp6UIgA9H>Q!Ne{K|<#zyK^gl({l{L)o?Ue6Zrwkh$4aNZ2tj z^0!U!*nrw>e#c1yZ-$(cBFWC`%X8H#T9e96PCQ6|b|fQiQ{I67S;k9&3hrAjzu$4a z#rP!Z)#v_OJ$XNT{8c%gR21|4Us9iROFxPfJya8Egh1Vt?#pwosA zYRw8troQC+>Dz4-<{%Xw*pA=9CQUf&vZFSTMZwk-u_Mp3Rj&oQknN>1a}!W`CDI42 zv`G&!=@KKG1wrzc3>Q_Amq$#3IV$#^Jr zQjJFWetS=KH1m_tx>31zjGtbQ>SK&{r7<;{v&zG=QBYb}{{7E?$jgW}xq7FlM_%9U zOwKKqUSY1%wlpj?xB{GCv&(<{BTLspf6Ur{KmPdK*qt~N*%qd?LO-28_^BGDtoLrp z>NfjhTGa62vGn~cEs)ry^F9!}BsU@TMf`)y!C|Vedy^~pBh^W;{{!45`uXitFUCKx zwngt*J%H#Iy{&r4MP?^tJ}o+0*Xp2BT~VLc$Tpu$`YHh(Q1`yIRlS+IuY&~l*6JaD zw%%0lpq^~27Ql9{j5Gh)V>}^ARVs0%aaTE1ZdMf8PWsppv`M$k=+` z>yY2g*2ukQSO^?Ja&Ntv$4RZ6EzjzIuR~}gNL^Wp?S0U6Wlu-Ybh5f-M-*!>^0UrN zSsl}$iqa?N52W=-CB;!XB4I1C&VY8U}XDb;+6qFhDy~I;_7nM?# zO~NAEtCkf!dT7EsW|Qkx4lDbeeH!u}+gtNo!N?s>_8C5fypMaK=|Z87pM?{DcN6bu zuf3TX1y%ogt$Q?b$(Yc2z;12cj%{UkgX(Y{&Xn{>)U}sz<4w z#kCIipq^PUB74q?A5_a9@!6@74z_^-^=Iva6&tK z<6ghNiPQ?kl#1(oSlsj(Fjh3P%#$H=7kx}d5o68$OorSyMbwUr)(1U9-Xw zYoGr!eKk+3BcOaQwt<0U6yICV%tfZ*+WH^Bs!5)3R<>6SNZ;3j-z9W}9_kMmVtg zIqw-u4iJ~h4*6WFml9`xv!$4BO*ZWOoV)!#&2b>QPO#c#d$=+Dtk;)$WPSZ(FHj6c zXMeumXdfA8n`btMeBY?;-P-AdH3Ui{b0(ehSWhcF(odd^xsZ8vvdxGwZ6S#sUhkp$ z??1Dna`_zGuwP>&>({$=?2z{*=51lEWdGxqz588!GAkT%AK{*VZiV@q7xDV>co3=Y zy&GmB*VFVzzCY!A`$3htlKbXo#&tq9krh#e9B)!sM|`HNAH&}BdXQ|Vyw6W5a8yZs zw@zkI9qF>NB7)@_IwtyD>+PdjnCycv51l;UtJ=o9OZmIc-<>3?G4JJTGwzS-{O@dKJ+4g>%ZLlrmO@}5yvDxA^v|437SS_BHJ;_=zW&Ds5 z*rlsgmr8ZT^E0j^`}_A$)-Q7^c2GWJH7x(m`nRgxraXVh=a5JG$J*h$P=;XNDHwXJ zLzx>gwiCS$Z{^qqk*qSeG2dxD97UV+yncs#9j~{XA@8w&SLg-WBj!!^RF!#cvF?oY z;{eG8x*qWn6lmIF>TwO2S3VzoQPcb^R>%F!({qx@cn#af+rp9g4dZvd2SFb4qS2n$ zb6)rL>+b4!fI}>=&v@M_U#Dn{*h^k-M!bH|uRrs;SH9lh^<2N+K4SZ?^UoUH8l9n& zu08VsPYND?Tja>T-FHkfMj)i;eK|_(k9fr2MB`*0ijCpEA;5V@vOjK1kLli=q_SS{ zgz#XTqkFWm54Vsb#Yt(r^NeOVWIY)k>&tNVsxITvWlx#x%anO!Y+_<@GP80X?Osv( zg3JGCqHDHa>CfW%@B`%eRy?2KnLMBA^L$<^&(|`4dHtwg-|#w#19@G>7fexJXM92W z<@Ft}H_O*$d{Lg2@nL+;NWcC`(H)+#lk)Qy=Op7n6G?1|{Eam&XD?$2TN9hZH3fF< ztNqA-*d1Tnuet2Yf3wciIvFtba+aG0_re*Qch`DPVy9&<)A2tYjbyH1*52B6_o(OR z_+t%!9;|fY$hvag5t?{M<2@WYBg^3IY{|7JuElP!SN`X&ukzfr^K;kadoI^=HvIiL z*xx+&zkbg0bw#jPx;b`m*~ejIJ%GRX9t+o6?&_?K}v)o0p_z4*_u&)C!Ab8PvptY_@m1Fu&2 z&K{U?FX#Ip+N1gkd-)f8n}|J;`*>8Y*mish{0!{1_ACB!-;elvTQZ8`=ePmi=ktF^ zm8LzT)6nGC{W#bngbO$~M|)$AZPDj{p+m&^d7jU8-;#Ax{c#O`kl1dJ=y1hZA@|1q zVolw!k6Ei~>{U5ajw3l&%DKWHz*c^;R>jxk?>}CXv5iJ@W?imz85^e+vxYdAJ_8|U zphkA|&ueS*ulM<}jy+b5)f(F%cGkjv|Ge)UJDi{AUevN&gE9Wg81h}0HAo+S1U~cL zm37X%je}YSKch7v$=n!45BPU>EcP#m{&8L8*6?lryiQ_>fBAWGe>}@-={_AP&3Yjf>myS|g>(fdV@&3}3xS!>4!7>n3= zJ1)nt+<)YD%6Vs=e0E1hulnz-E$Wr(>mJ$T^Y^=)6>afHetgFNvptO6`PX|z z_6I`vJ350A_uzNr>>DSzb+ix39?tMf`Q6a|t=L_qVJ&Tru;d)ilRUO1e$eM&B0gcge4V|U`=2mM+-wL_4i_H2NaRfiNq0q!n3F1N1wl6?yt^#?)W^T zE9-G^JQ8CVuKXP1^z)77&k;Wd=fAJy=kwpo>znfPu)MzfxxBvoxxBvo`FKRXm|H*c z-H@^Nf>YSOJpZKAA&z}vf_4?-*eILrO zpseV=XdmVv-0`R*NNY$ z8tvn^-I$AiW&sY{dhHx!?)$8#K>XCESME`H&!b`L>%|`)T(7T+&-Z=(c~1xS_4)<= z8+}dQf7p&Y{T-Y)f!q^>5xt8Di)Zl#xPEVYa_xiu{Oh&MUtnLaUCO-=uGc>4 z&;7Y)n#?13BG)T_eZAbfRp)w}>rGW&&u6c=)&q|T?CZ6u-1p#mEw+*SxL&K{smjlB z%=CTZQ|oat#XhM%IEo$@Z(d%19+Q>6@9VYfS3diCEwOW8e_zYKklGoEJK6KIz38-6 zhl0(>nKcQxC)4HJJ3^!&{3Oh6yMlNH7s0T1}a=CUyyx1ySFCMNQOo23mP_5i<5&kTh1 z6~OHBhBzgE>Jne5u;YVo(*EB58<_f+f9O+(N@Z2iTkT8gcaY!xg5M$iK_~%Mq9piO zz>l8~DmQ(2x0%!cKvm*>G#aX^w@2a0+@4>T8Y;oDZ>1mX2Xzyb%3A8}bUvZ{Cs%V& zc{VWmJaVbVZ7;v$nj=R2{W?>VD0m5%-=S0Q0GF?Szcc7NgZ`(N-=SbaA^v{d{p0nn z+Cl;R{=LU@TlXR(55;<}jC1jT}x@(WsZ#ZITcC&LE5nqbt8Jl!2&x$7k{@7y1rcx80V&}Da9>={=OprZHF+-XpX7rc zck)L9(i^&O0nfoC@ID!5N_@Gl5I)m9E$4A6pHp7XeC6^zN11QNpS%$YXXkn0_q|*P zkg_@77xO6hML%Au0QsHypfnDIT;Ov(-uic?=M9n{$(%WTKS^P7KQpqua^HOao6OgL zv*J2gCz86c>e7!Mi21*9zXAL7$Nl#7I^4_kjH|v+*n6zkfbo{~8n7P!a$i$Dj<$>= z@B8z5?)$t>&=X_bvP4qe$GGeD#^=RpIX>n5>3Jl-#JGuGjnv=h`(j)I<3m2TT;Km< z{p$V0En=j2j#v3yuK!-Q@_mJTZX(xzb01WO$oKPi=7T(-lJ&*7>HGOS-&hl=9_JF&&_=KrJa(`(RScVK8NeqXFJBaKdU(?#P4qj_+U$$b zKr?~*0w3{ldkDsCz~kWn&#A|=LA(jS3VQ)}*r2y0Hv^uSQs>A`wE}y8tD?Ew+^dRq z^qVBvdN=%>4q*OqXW^~$F&o*<#pwRkxBUCA8pcz1SWSY7pNwiNnu?nvH#cK9cjDX4 zi(ES#8>i{$@HI=j!PL#IxY>O@f95F|ldXKt@P*I$;Q#rA1_Md3*ihy7 zI&+^sjpplS;nFW~9MbuJJlyI3yPGfm=1gxLm_xQZbDfW5%ik}bk5cLEB{rYkyg9qA zy`t!M4R>DN-@zDnUnjS2@3-8AT~<(}nl#^{Lz)gm)q%J24r zko(RL#97t$sGL>PYo>QqKOK0pr!BoDk8fxi#;Qm37arTbtx<65Sr&YnA?+ zyS#m%G~oW^j_pzB;b~G?FK-%NW~3-aWp_~ZZ;s*Z^Xpgt>uy;6bWS(j2YWQ@ZmZL` z_{H9C-l~v3?Dj+a;C7z8U9#%=6Q1R^=F_QvnGCyBC>73b`|drw=9_j%IS>jv>pe7` zXqc>f)4_g!Wp^6Ym{$fhtJ!=^!2UM9-ekIcHBJ9Xi>fI?%#vY&HJco(?{$1u&j-Si`Q1xPOGn3 zb}yH_*i|n21UFd?R)!Vp4>K{)8~qRn$6#PK9$;T{r+p$tX6iFongJa zoJJpv&)ehTm~TG2x3{f9qQ>pSc^Z!%o2&Qz>+$3GWe-}zq@INYvm7<2lMW}(HU6&hu7#2>XqmI`Q&*;jJ4#d6VBqPquh1`log*FTNISM7LMvuzGe&Q{wXePQ!)7BK z@@QF%*A5r-s$uy~98L{G(B;LdFWe43rd3scN%pYqX(xp@xQz##{$4AjJsyp)8Ng#$ z29H4gkl_J+Ghw#}^wqAfmn2zM7GIaQH%6brEnnujB5hS~Dw0o71iTN>M zQS_|M?@#n1OX~BZiHR15f34Ly2k4@&jc~7@eOlem{5&dnW&csq;s3M`B6)pXEQ6eD z@Y`e*gwHrXGr7`oLu(=U_i^h+eDmmkP2(sb0i`!h50Zm|SyT)M;#P^96T=y${o`Pq zVQB_T-&)*6`@~j>6SxVer0ZQdIpIlLFH+qBLUHpV|JJkE)!JuS8;mWKoqA zAIneWN>UXMO2c&@Iz8pM#AHBDNUa4KdPZbIiV_gUL90LC;im3)ibj^1eR?r}$=P*( z%$UI4;rD3)%p?2((9N?&G0@)rJK;;)O-*4YI~N2%eYx$u|K$oUAOWe=3j4#k);YSL z&Khe9RK(v7M-bjZbZ!tRQFe@dl>A%=)V)$(CJ#S1_P34CwQK3Ei`A>HY1~z#}zMsUfiD9@Mua99as3_uXZf>GMYRBz#`e%)j_C=JidD1 z7+Vb&)(d-jP*i0ONC#Yxc2Hj~vNwg_$fbONJ)d`Fs~wjZFF zAt${sq&&P)cne*5i;14=c3;kYl&M;5$mlU&d*O$|48x2AG7s0MEob4 zeYwy+Uqq!Hi#)|Yfj6C|ML%;u{QEJ77`Q4DnzL$%>c;ZhM(AR2$*(w_=1|qkb5VbU z_W{y?t7~aiQ;yhP*@kC-$MKjFOjSg?ppY&HD2wypRltXSW9QZHCPOZP{c2t9fkj79 zEKbE&z}`dH0HWe{&Cf5LA+sr*)4hY*VUHR|ar(3C+|AxH^`X6^wjM~x9{hF6u-t&TfYydIG?bJ zZQkP`JM6>dr-1ko^-J`ZY2ag69s0m&UW&1k-p>B&t`8yOZXF*6H1;G#3pHz1|EDhL zM4o`ele+#YJJ`Fc1Tc*RbyZ^Hy4KF&&ZkPoiTM+bduP;T2a0ND2zc^ZVwH{&J z#moN;!%);@JmHIf_{MfKicb_cNx<2{PpCHZqb35*BRZ>&x-+w}@}CJ{MQZX8H90iI;xR*igw$X(<4NF70o^}RLbr@Z zhI~p@C^t8kp>j;G$=AT~nKy!)2XNgOux8u@AKkW~_?_+;HK-ZK&C;^7)$cC?#moQu z6y-PW5*Z<|&q{O9UZb7MO(<<(eh4qz_{+M2AiTn@eXx@)AUJV9n7!Lo!OAUo(#Q1% zS7$~L=I3dDhYmtIR{(eXHK#Q9>m>f{=T6{>@bCyC^UU~sKz@wbGiPbx!NzZzkg$yKlD7oAOYQ)PUipa$LC9- z+pJc9H{?@|-kCmLI0gX6>I&OZe|-LafbZTJGe$~(eDqKA=hLbFlyss)P%x7%iuw7{ z)lWf1EzZuA|1&Eb-n$xgTG5lVyvkF4a7tDGQ7~!gaBNM{n~dzv zX(5#H(xTs;0*i-KRjij%UgmKb^I)54*p~8rD$5I2m2%rb!+J!0Crno8r^*A__wzlc z3YO|W&|Drt8e7mtcJT_*B*O6r-#cPI#&`Wp-a-vj(S~{}?EHl^#UCBK+DmPBE>WI; z7)$6S*gn<%)oNdHZr7)ch+)a3)uO&9+x$;0WvK?f&wmUrPl4p8J!I z1saZB@j9m?A%592V4v)3{UYF>DmkXe!BK+zblT`>HV_Rz^m`aKu#n0QC&6ai9_$Qj_!Sb6jc?;?P6yG8Tt`XbDDjbipD#ZQ?Qr&mv|3T z{i~AC_KyvHJvuDpg1=^Y4`zsJ_A#cd!T}gR{Y7iwu|8hcUH>IzwHs*)djaOICv^cK z=!puu{EfMPrb!$k zavpPFoKr%62gMphxrN8t$GssNiBZ#LBz`;U!O=ob)}!ev?yC6bU0`!)4&Y5cK zLr`l`am~Wp)KfZbvDRLY=!((d4X=vl!~Id^zPy^pr+AcL%l?yPZHRh|=W@wjn9wsflP&vyR7(L8FwvU2nM4*rm&y#(Alwu~qVS5m%}-1}Li#s3 z#BDIVY-GAFa$NE>CFLz>xqxQ&aD7edPa4)L%eHT{S2o5DD>6oulHty`bF4IJ?cr&Q z(6D`9?jRy6A>b;6eMBo) zc&XULAL%`yyOhgTs(VP_c@dTtF7#zyQUDE(^CDZ_a)363!}w8RMd`9+AAs7p5PMfB z=-Yxu%+{?QY@&n$4na77@w-|5+uYu0<1}-SedPFkB&OJ;QkkI_XF$~uB-+(Vhh&6E z*Hh3z>4pm2aTSXw?C8-#D{6Mf`qBxKttd}a(7LC+vHcVL$gz9+K{CR zRO(!Q&gJuT>nNDY*+`&DzuhX5coh+st?-gaPRufAw;S3QSP(S`K7=vpjBtp5Ysy?14gfqQa(>8Rw!es_U_gE_L6OKGo&}yKvX3MfORmi7tE^?7nzkQm2a}wwGjfwtSa!*b9cus_f9l;tDm-q* zC#qO1a*@72Oc#mBk_Z)KLGB?%UVF(|1M{!zUhe;-^O+x=cB?*PPzL z-`i)~ZL1(oWc0>uJ-R>ts(wST({hd69nQQV-AzFj%t|zy<}}-3cb(85zhG=1SIZwfM0OT0yNe?J7D&Nc)lZ^-X>usn5Qy+zGFwmCqrZd7mvH}# zPn$vZc<9;CsRuKSw>@^Z-IS~sn;HD%TLS;Wv3A}6bPh>SlUj*Rd=R;(%#y!6XWClB zA+Lgi_LCo3^Bh`P$W|~vGbnoKVWd?cE;Cr0UnnAP)otMDn# zyDW)+9KzBXvK*+g`%#ICP+CbZ_2g~=G?X9xsZ>mE1qJUj1n(-iyKI}P$M}2y;;u!% zV`W2)rfbjMgLhL{R;M+?eWX zBxm~}c;+viGT<4;(&H08_+3O4l$rn+X3_*aIu&!U##eQ{X;y3yNH9C?M1g(w^+#8G zGU7d;Vvj12`_}tS4Qtttk3`$5u=*p*jApU09K8PkjsCg@-jb#F1Y?M#z6GTH?7*Ra ze?D${s}3u}3$1&fHmJp=^g43PR^#_VV;EZU%(0u0khXk>E#3P!q-Z z$X|c~M&h--ElO&>la5HFp8ZDBBr^JS3FalOedkmCNORvoso;%7kQ5Kx0>J#x;MhLk5`jWu8Q(iM0`#=m-*N}z|!@0 zi_at7TNXn1Ou!r80a;Jofq9~$P~Mb$Xulz4(ADJq%lOIl`AiR|;WJhe1U4gyvmF*1GXL!=X$F4f|(I$4LRS_rDKay8H8 zdmdh1;n3^15||_<;vYZb)@gr$bHf<&CyuVsbXZZTg*DhT?v1{}A6zfGCmC>ig6Jk=h+zZ|_!xhH7})O!7kY`-x3o0LM$KUB&?#9KZp>bMi~ zv)jf3fxMTcO!N&2FiJ?@2=6sw5NzIBE34`d=F^nF#RPsF4YPu-(!VJ>`ftma#eE_z zG0*cS=}fhA8HEY8w!gS#jx$vkJZJ!@9l-!7|@2}Ki^RNH->PBpE6Y=$b(yZAK^=N%p6>_m2bkGx0s{- z3ZOMJ_=5=S3?~p#LNj6iE3v*005fp|^-+iwPF;@^Dj{pWs$~6s>SxkR9O8{;id}_QJDv@a z-aFy6@IH5od7-o8KkR0Cq0(c&!HGF~?yhS+s`)7t&pY{d=&*juE17@vEzvw21KA#{ zB{IOdpI+VV1ji|3$<4gw_M`sGUyF?8JMlR58;GSrWN-%B~Hr1JuAU$0}O7&p{@n zAr^D}XmRG3beP#fv+tC>pbR}MOnY(Y{mIQVdD@HW=fH4aM46Pkev0I?Xc6V1gb*FH zxR~94@q_+UU>a?N)d2;l)ww6w#BytrNhZ%@drqyluJbhV2R0YQSdhn98|APWew`Ns3? zhF(yh29tf}li5NsxS)Pg=qC_g&{@KOAb=Djve6zvo3)Tdkibno(S(V|4)Ta7n_MSD zfRT8W{VPQpx9)vCcv##mig!(<4OMvG*}~I7;|eX#p&8>Dc6fi|58vq8EHx8~aox** z?8qCpGmW36zQxGvoK?BCPd`XqG5gXl5Hc%)<=yuxs z%V%%+w2HKm^@n_>cErT)qw%t5;+0RXF&vGpE%!E-9^>THYpk#E)bq_PFhM&Qu9xZZ3fdU&J=}DdW#5!? zy8Tvzb2L+cd6w>0m$Mb>f;>-)KA}}wvs(x2ThcGU5% z!)3s!xx8QK%NU#zjN8Gvs&(;yqUVNor*@w0qr%R3vky~d2-pQqb7uXjM=Tk|z0Xny z>odI>rXAM{^V!IUsRS{40PBz?7i;qogscB%=;btL+WfwALRN|g&8i#8bn>aeF`J-* zvT$xv2Kyl%QuHA6h2zQ_V^_<0C4Dr07zk&)l68OSW!y>T5X?${HOKcy{XHuJ z73$-)KZz&ph{8^IcyWDXG6U|lxl2!1g?8&TwKlM_BamJV{3XavOV6402Wb6QOP#;q z`1n*uuEr9r`NM++(;D8Gy*Ce0DD<|09AiCv2fkul^Ra~HlPY?OIkcxpKbUW21=cn< z(|Eh3ymCD9d0WT$?c%q8LGE9r`78X${m`2Xib@xw6OJyg$+==^Y*!*sktTqlcH{#K z-gRg%tO6o#_dHo{7y8P!;HrA^L1JGB#>oDE5L$FYfd?*3HGix3 z=c>6AXuue#3&3}I2v}TT3O;ceKq-GmyGrHY5V07>T2Dc<@Uk~X%W*O2U46YQ&q0sV zA-(08=7-j}yxH4a`~CSl`zT|HxxfS7?J&;+p7!g<%^QIdzqqk;R%@xQ+q@JtpRlBNxs9Cml@K9!JJ~PtB-XoK^ zTx?JiZaTLgFsuJGAY?fh;c|w-dXaeJ)UP{#0qIF7GEID(>c*@A;T_@eqF^VJ-sA1G zJ^bNcI@#tjeecexAJ~P@@i##i9Cm4x17GfBCXf9mKN{E+0Ogzl_r8G|x5&f1R*reO zPmh+muNxhICM|lV@c$vi%bp=|@oV8Iwd4ZObJpfm#f z*+QPQc@YtmnTY!IH`|eFZ8}eJi zn}~`BHz~T)s09$5RZeF!_x>W6ISw#~xqvqaI^J}DH}p?YZ<;F+kZ=MxDo$5Oqe$@C z)Z&k^DQ|&HnpVC2 z&!3f?3|BhB;lTDFz>jxA6$q?+-*8P~O<#xA(lh7YoeRw;#ONlZ+6sWZys!Z8W%KE0 zUm>Y~!e5O<2_3iHxMFX59(oQ7Nv%o8a{?knUdQ)0N4Q*B3f=KlqEu}%S9Ou*q5gIR zSk@b-epPB+)Ud0eGcJ%X=#Fk>x;FygC1gpgYA1G0Pp{l!Uq~}839U+>T*$y+bFeO~ zjOnkmF5re(@;x!a-)($|1ujrzx8^ok4!rq)ErJ)Cm`2pXJ5QkGH`TKOi6Y-77&BO| zGPk#m&%y{hRYb2-f>fqF&>b|2dyZP~50QOw&;U8AnQtu+v5tRt?lrmh$JL7W@W(Sa zfs*SN=&33(=G(1g(v%7_xjjFMnJk18ip05%kCu12ZS>)_*ON)hiiWHREM~15UsD8s zNu26o#35O8?0_Z)U91A~dLA!ddpSgA0l@wsLM#~rq&zefJ@=+?ckoix@2j@Po7WEX z&)dr>q0x_P;JeqI54gKIrX9biu>WZ$Mm_LMARgfZ`So>A)j&agsQyfnJMbXi2O+yc zge&up7Q>YFghXAXP?BL`t%pMr%GT24!8Kn_w? zfOnP{@16#y*>`*6k$8kA^^L=Y63QCZq~2uRT+d$-J5kX4RIi8dd^{BVdPer3JeG1A z#n^ro>G2AdS?nm%3OBvV6;qsFP+VO zcXSlDre<~87cQ_0f;=Nz?NUl57ds)E!Ns`f1uiHb`p?2F+#;?C87tUrCVOF_AJ||( z@Z;SX97m9L+;Z;#juPFrb`(52X?9%OeoK#*lc@Y9qo{hMDafVwPd{!pbNHEeBbd3I zg?mSVeJ^WRIp2!td)Y%;#UTEF1h4Msk~sX@N1utvsO>W9{QP@(#rwNK4;nK!WX=E* z@Kp7(h3Yym-5h}<%2c!}L>Bh+9&|C=ngStZ6SXxDtn>#^#88^P2~%t0-schetM6=F zL#gb&XE(IAC0@NumnDrWMKFr#T2FYWljgn{ev`^?FG!dpo4Y zrmqhkJydtr&sPi~)aD!YFClY=pNd1m=G}z4rS}0*=r*6Wy?v`}ZQB zvKVHj1_>l%Bp)+8O@mXlAf8m>U;2Ybp4Kalg%I7P)E;E_0K0rRMHi>av&!CtZr|HO zX>5;ZNtm>UIeSEZIG&@Pao_qnj7qqv>YtjRYg{qQm8VD3a zg%irhY9{12K|GARrsFMo6EX7FYk-!w==ad#XJgdYH=277>~%uqqltwVkM|WAfLWlylH6TZ z5f9Uk*^b+PMI?RkhDu_xxhRw>m$wB#nAMl# zLhKeS1ag3#-~Lj2@ep%kESF)RIQK><8d|y@$H}jsb zQ|%d+pMx(B7s&Q;`!?!=IYa##+ECuQ9tOpf^v2hJ2ZruPC&9{Q{;W7E+g7t6aHzn2 z%#Zt&le*{~L5Ju(Y4w1a+8_t~0gs+Li)krKALucbEySn=qxd1Cxscff%J)}rTIs9I zz>E7>l*E2Xl&C0xb~xmrEXt+sSvc#D*RFQQ5x%M-c1Z9DmCGUk;4$9Logi}pv=W1{ zJ(mQ3aTr9w%@@PYkPpX9o@4bHKLLG?a8dXiaUUU@o2*FySGc*l;IN8b`vh)Wd}(!Y zA<{S^MXkfMJ=(tW8&#O!9RHE`Nv)*e^&q0Cz6=cW9cBKuj{d;b_K)1X`p@X<@?cE? zI{jZ#O#`{vjrsSB%l}x^0w)&Q*sR*J0eIqnaF|j|*aLwbvOXjFcqO2E2PHf<1;-N+ z#~|a?a1&_6qnx*c|Gmuc`vqk4rf{1Hbh95#nvyuX`{-OWbJ*K+;S<)x;UI;R^zeQ9 zgJd^j^wj&6XvmDfY=H=9nRw3?%;Hn6D0y#IaT2bzMxs;Bp8eCQ+)OR|*)8<$d`9KU$uP0%3H8G}fBAZMX{~qhRdUisd5NQQ>Y8Q|< zF<)o(^`vB3*S^aiVazbUYjdN*1TE)(k`Hzn&~t7W?;O-W9M!iTnhGvi@aC)S<*1Hm z{_G0kTyytlX5ZgCZerr9hY7H+ObE+4sGh46N_M}1istHRdNk=!F$!gpet=E=HOb0N zJb$^^LTMb>uz&$15R3TF%wq>-5&ZCK*vz9fZ;QF^1u23Y2O|^Y1UTXusC2h~_hP%7 z$y9zHI#}qo5J2+>N&lS z9o8ia-UGZFdA4E&T^90kD7r4OK4C!LV2Kt`NJiHb^liU7mTY<)S~b>NqksbQy)!g| zTrA{f&ya?Kll^f=xsG?K^-36jn|%z|H3p3mamw@!-xn=V1tqAsNylY7lOa83V28Q@ zQVx-E_gg~S4zuz1))`}qSp^I96B4LvX;D^mh(-wBc6Rn%%8ZFbqDC0IM#fJ;Y-Y8NdLe6Z%S)Tj&ZD1pi`*mF)(!J2oyu`!DF2KZY z9nzBJP)+v2|Ma*9j3M)0wy7voZ{H+e0+&RjWzc!+kC_~G%jmm*j{$u&m3Ig9*6%o1 zJ7NA?mO^!akg=G%!th9ORL4F2v1#wH%*Rxj+HL;y*|pR=d5P;+?25nk;66AlEC%iO zz5_J&b3dPIIyI|mi*8CkLG7U)>dSoh8lNZ@wQ3#8zE3A{ekoBtLXSODqXl!rZ#>@2 z>{jpRc(UF-179_N_2?O)PuPBjv?>72EZ%jLvVrW}5Wj-JzWfFnlO##{2lj_|y)_cZ z3n2-E=DVpgRto7Vg&ik*mL`QUuNh2xyNgpBo)o@UZoDBm;*xv#pKNvGll{zuU!-ph zv=>(pm~d>0H}=d-rg27h?Ajfw)|OsoKFl|He~8cTbiMw6soMDJ9>}Kxv)@9HiWV3= zergFCZI-j7p!NPWt|+OXo(JVr${|k4=glIeK)u~V2wq79BT0A8ONm+AMGsHi&?Q(t z%6GviYM^z`dnwtTc}OPchO}lYu@$%<1>YG_94~lEJJxcO3CfSA`?pOB%NOK7ya)&y z_zTxNQ6}_%zj9Zm=~4>Vf6ZXx*pYjdq57Xm6iR_v4 z4JXpqi3!=m#qM*H09S}~zOVjmWVGx&VdTnb?g8cAluz39tD@sx;=%C7&oy{~G^l=x zmJcwpzlQ)nK)}ECm(!v268~^lEM+kY9t_a_j_Vcqxyu6?e*q9{ycVv~zqN`Hy$U@H z$*G?+|S@q*VTu}^F@d+p1@;}x*! zqBmRv%{sZs2jEM6r_hj3?!{8Z^jV@nty&tM9kJphxUsn4NgnN5iVSdr$OM#&a8Rmb zUPPETJT0JYf98+B0rMM-u-Ns=B{@xmO~4<7*vvkOk88>m*dlP#9YmjB8GCpP^M}ou z`QKR0(A^duZ2)X?6Z>esNKG|61B}K%zJIoySmco6qEO-am<PZA8$TjsE8S)9#=!~g`#sTt z={x&%!x@sLuV!E9;?z z8&mwi-SlaQFpvW{Eq*t}ovt>V?1MbxtNCX@l;xSF*O!GvT5y4@NQ*TaoFkJs7~lnK zxtI9Te{CEAkqhKK=B|Y93!EU?e?UyK)sIXFTz0r)LVr4=wOB;kjd_fi^VLTtjZ}b7u#rA0_u&V%WnAT#f znduxm5*LeD5Pq9RDx^gC&Gb6-2Sv<@Q;f!ve*mUHIIuzYXd&v4WkkWZ7Aj$Cm9Rqk zqu5uqoYaS_p3i0J`vZX12}hgZ1&BX?S7z#Wlk?N{ioW;1UOVg`P%%>;IVRX=oIu~q)Ak`ZWTNp0Xq(Hp#2>bA ze>=l@uroB^#o1~RKpc{Xc2p*8_xe!je*{ifm%<~iMd`@9a=-|-uKcxpn)wyF*08or z3PC8Cl+A&D^@P9#8xH1?71C9`*jF!h6ikf)la2p-h2Pq-Gzu#w`9S97AFR0oXgT=$ zbPYSq5;a2>dunv3ub>F(&dXbgguqq0@-cvyJAnfT?cbAu#@NPV2?B(teS6_Dx2~-9 zmh(h>lx49O^3X)*A)J-lr$XY8YDasEkFrxon`6A-&h@e=j!A?mB#0Z7e_Hl8qqcLj zYWmnvcfX588_4BphQ%|0(ip2F!B772z2Ng{3#jb%Ndo!P>3-7czr2%q-RM-7ews8+3i+R0+ z_jcs3H}muCFlVQxmG8Gze;2UE$&HM$@x^xI(@6=AUx&CgtEx_y?dhfS3^u1_3Ixd|8^XXR64;rh5=iq`rVr?A^x%vnaX9 z`2ul6`t{dfDP7Ck^=2c>|9ka`rZTGd+lzzhb4n># z50rV>ZiPlr86FisfBY^&n;4d)RJtA61&DTHn6vMT=ZnR56hpvpj`wF)g9Mrdz5kMv zgC{`{(?K$mfop_Hb};ka@!GyY#t^D_mOi~-^L}I$bw^9T1EikNAHt@YC}V#YFKkRC zv<;2x4*X|yImbYR=Qu~Q?6w#>V}tS?%w0@z;yc6hFdbDPf1Du>2oZ`;xPE73z0-2n zLg#R@(P>BZi*A?@Uj%r^@y*`4StQc6PtX2v;As|XI+~#``iZ;R*ujF{tKdfdP>rW9 zZb>lcnhNiMgZxs1@`7b^fPngDpBeR7Fuc7ea~Yj%Qoh2vPgkgDjV2WjX6asS=^-CM zCeePXKM%eGfBCwFs8-_-_xu=CJjGSZ?Si%_I>>Vq6V3sOA32eeoEat%pXYPz)Of z03UvCRb2kD2AT#r$&i>gHz{qe+{dQl*M;+iPWDVH{AXX7S%TC z?UC&js7yk#EY1;*Bn{A>WL(8fZ76XLuvFGxvQ$It_6;TO^}u~IykV<5%-@dJ2N)_G zXBcwBhp2Xnsp~Z3oc$1cjsY`QW1$D^c>kt)uV32!@#+{#I(0pdeC2>*;Cru`!j8l0 z6PglDe=bRi*ZQKDR?6b<6?js$+=k>d4Nn5fZDM%Mv8*f?FL%^O;?#3(vsr*d(%AA< z=kGNhKk>_>Uk^Eb-~DB7gug58??k00QyesNzo3MEx88pKW}hz&8j_BOe>6-8gTXAu zpQA;#t;!t+Zk3SD=^KX@LaLHsjGoTAlGs*rf6Uj%)`YwKeff_faJDGJgA2rD8rZMv zA&Rb|yZ}3W0JJVf~nnSDdE9tGbFZOnZ*lNgHJzw4LO zfBcQWr5o+YX2V=4PjGKk3NYcq@wV%Yd7JFVc5TUfsxRvel#Wk5Dcjq9CoY&3D~OJ0 zfy6w{#cdf)Y9;*j1mRC^AWeG>%LHX*T@L+XCyI*CeLW2@3yPEqORU?xlhsC-gZ%e$ z6EINNKOlME$Svj5M`8LCnDM^>x*uvJf9-l>(%?4CUmgu`usdmkpACp?Ipz^Q0b#6n zak*k=L6N{n_kKUh5$t`p=flnB_&RD_00pc=bP(YYYM+&5Eel-1i`955DaDn+(w?{6 zm>lHrBXN8HP7oS`@^Rn;xcj!!ax7qZe?88c2;eKNoLZ10oJ`jEVXgBlU^D`2f6m(X zR-7b34#Y`&Nd>O#(@LpIXYCkoR%mTs->DtgXfb=ykpOedCqe3jwx_rOsuYnVM(?_n z$-N1<*6AzpMa3?LTkrAQoE){Rl$Zs=nSgU(*x-YK)CKyZqiqwL(IBBWBJ^$HNdGqa z!YGm{`Ff!Uz<-^sa0@;lZHd8+f{gz=~N;AiZ?K%T?Q4AJlW zTCEJhCVy`C<19?-jI$pR(2b~h!s~b(ErnIkQuS=wCtsnVS&L`l#iggDe-?QxJ0##J zI3-J(c0m)s1)WK$_M)RXh>tN2LJk9)Nh8&K_dQ-6IKb&lzh*tjZYLF)x-p4O3>hN@ zvt#@)%m{3m^94^INVf6B9#m?_$;;}#P|+2Zhz!=Ojiv^PiVrLHP&E@?TES1@kj%GP z+ZB>LgZsPA3dZ*}(J(@Bp z*%yUaHeb9uc z729c_Gj`q%Lv%G9B{1=;J>U3@L0!Rj0sVczlE>n!U2pw3sZM1WfA{hxdS-Mb1i%e= zKSZ>~b3*D!=LaGJkfT4D8w|AEyr4#{lgH5xp`B&I;XC+)O%BQnJ{?AVZBgD&4g4Rb zmvphsuvP&@9`gr0j|tzY<%RjI><0>=->Pbb52}p6{_dl9O->oOGF|84QirRXAI5nY z7q`_I$M~yd$>4w~f999rm5-baLrWfX+`)Bl9r;MO0u zm%BT==?A>b&x%e51fKR>wA5!Bif*ypB{4mJ1w1DitEAV_4d7i(p<)5n&XC?pVihAa z5>sFtB9r*qs2@R%;O@rU77!Ru`ega84+dx(`t~7X7Ub-_d*C97LAm3N|1V)iLih;! z>pYQ7QU}XofB9tVlQ~@$l`Pvx7`Q|sN6=;dF4_BP)X@YNV;~!R{X6sC@S0yUa6#+b z7imI&%zH)!?+(Oql!2(;)C5aoVAgEbr|2rJBn2VehWhhO%Fq}vrEl8Ix{6lhuv z)js646=``y$kK!8N|56|Ty0fgr|=Vj5hR6mUUH_Ue_RhC?*gUTD1D3;jygxtZqX~U zGVx_wy8p>S9hK8KbI=#&dTpG3b~&D28#ZO&dVD$$5&^abS^^&OT_aHJ_%+Ee&lVg` z)RL!JK8BXplP!9FjIp~$5ncrewqE{b^jUReVlU8)5qu6Xj-bvd_@0Qi(&|HafC2(e z9IG%ue{-7SV>Jn(5s3Mbu+`;OjNaV8SmA09k2b0!x(;`0WQ!su@Sd`c&7!n+O-?%f ziD)mV(%Q?6&%eCxLq~wnQDy5ie{cc_B6^c%kh-)5j(UP)`LcNX*s3xr+5ips3N?PV4V?vB0+Fh&&$Zle7oOP`I zI3oOT=nt?wp!M<%A=PDAJdFOMOOYx7mX*s-6r`}R7W97Tjn2@|jmYqAb+ZJD>nA}U zGXJ6p^To&%)abwG&o(fh?LG?>g@n4ns%Sl)aqBSN2KO_}Mw0jYtkDg9e~eW}psRrO z@!?V{lhV2hW;t9?ofv!I8z7F&iJeY2vT}(Nx@!c z3q6gJSHFgaYDd$53l#b9REKJdV$@}vBCSRpzfdkY*U_A}$W6=e@U#u?p1{Cpc}+xp zFo}&kTtMQmM;jUi;X#*gf9MZb_y#ybR)N*GjTOKmqf86&TzU9Dq?vz*nm+79@=Mt8bP(!f!1961e)Z z;*ORD6i{zC99+XQf7<#%Wlt|GeoY!!S#^L8070m~*wxZWaH99Vgmj*H+u}>lQSdkC zJr7`o)V_5^&1jN?w`ouusQt%M$WkOTR^^DH#UYixlgR*3$e%8LynE34uiCAp+@t!B zYoYKbh@_^mk!tjv@HWs?@$jGHZ@K8dCVCNqJxCW4M}iwde}b{@B{nwBSE>D8iTJ5! z%^FBmf2ri>8h%0X;|4bb8WTkR4zQNMhugq%y28a!;_D~r#^!W{d-=oS4X&aK+cJ8@6Ukf8#2XP`?YbaPhE!By>*MAbGF(@3?K&s1x7p0@va{5Q&O@nJ1AE2XoIY zs!utEpRJ}d&^NTQ;?u9?D0FUpFviV#9^MpB3Ui*PUDLJ`~e-l+930!}sz%bXoQjDX?4LKyH z!tAi8-G%85u#!PH?NRQxe1uBy?q`gVs&_*MPcbBd62^7Lg3tg9HUt2^FB%gkk~9D% zJ~ahp{~)F>yZo^GsYnO2UGQeuQZi0*K!^IZSyAR(9IINLuaXz zfc(AFe^xa`myINx@q7lfs)9}^dKDa97&A~7EL(etpJg; z_dLDtmX`8*ko^xUwB27gAVUMqD`gsWPX8^Qe>pamvZFW^GedypLu;lw5p|@#AVrbV z{5{h!nL;;Fib+&fg`^A&E21(ocpPoxhsgPd_Sd|A&WjLyrVHPeG$>HeqO?+;F0b&u z=S+f=?+*X0yJfci520o$;LiHj@oizh*dIsaKw~y^A<)ryuf%E==b6hcN|a`EvaNO_ ze;9rlsnb3C6)j5(td*ub>gB?^lFxgh787$YdpSveQfNj0p{~9VU|Pu1>_^d!t9w<1 z(RNECJ~a>!@B#Y5%J>lO{LAXVO8xkYlc^CuZmJBh$E{SuDs}LzNI#3GLF(PLHob^h z!n?Iwhf+r!)yJ!>@do+CF|tez49#aff2)d(ca(sKj5i9eI(tCQ!ug7IDU;w@t>6ND zvAFejWW+>4hP@z~<2S?*$(LWP^9qv>gtE+Z3fSf2j8XQ%9F3{zqUj6_Pyowi%z7Hd z%{-mEHhm$U-G6l^Lq_>C5Px4NPEZ!7*$jtSmuj$$NVuJ61?ae^#(;&i{z_7Fe?n98 z^WwxOe{y;@dYq^hAE-`?xI|M5?D+k-VD6#PMVO{rJx`g8^qzg4GzED)_smp>3RflH z8PkE(bF;}p4wZmVb;_vLbzC$|28iWk6*_l2bjkHJ8PH%zr!a+Z%ohp7b<0xY62}uxPEO*+LEPZxQ8`>6RDsXqHhWI**2=m&(RTQ8*D ztCWLh*B=O>w^$gkyQ5||YLn`!duBeV@MNc7Z^enFP521Fv{6RGbRm*}f3|-TRXQ*7 zB&F;z$&aI7{@6)b%5?%WGKBEr1gHs3KRzhlRMGOIo0JTUvlujwz4?Mu!UDZr3f^?p zT{pMxb+PIo9yv|2%j(a5-d(Q*g6h-TLfF?Wnag0ifo5AY!f>AzaAxNZu9Ao=H0~WN z=@)b%Lx~uO(J8iv@eY`(f6g`An(o@o&q6{&vrTXe8vfKyh2<~k+s@?8oeWvOU#-ru z8EOx)iN}WnZbK(KteP1teYL z0F7aUg=}Bfx$i$7aNX9{B#K57?)jJI%V@eEGdmm;;g-2;4sqo6ps_}C+ZH?jyo$U`nx zn4bCLqdnTe6msS5Vk|0DB&y%DcY=|Q*FIZ2f+hWFzG0(JPUt#H)A5H%8RFyS@@?ad zSnWQp_12894+&GUf4mkA4D#x8v>Dw-p5lG!1XzzVS~6`Fi`jT zR8R$vhxX$MiKK#q0%5Y^%I*Y0>;ZDk*@q;ge2XA@PTP2WcPO;Y&A&*MeOt_5)dBOaBk7`9DbW zC&o`I{`a>4m;U1iK#D)hd`ygN>OgH2pse5z3ZVaeEz!UK$0G&qtI!nY`qFa&XGQVnlOo$+TQVnLPitD&Nr34FlCtAT zg_tlIe{9)#eYhYKrZ68CX?fDMVep)5c187onKB)s?fBNopc4JHsE+=(8U zuPo5=lP6!s_YshBRK4$h^|1VrL#hupI=R5U6C~{SSYwXy?j8 zBq6^CjBPZb#i|`NfunX2{0-H<;NrV0<{^lse*)JuNK3*Oc+-1^@_!pd{j1ww#R43q z&mYp8)Ngtf#`$ML_ZwcyE7zktCPofoRb2j;&>hCcjhRjNV>Bvg$+Pf*K&2?H*g&0tf6HpTj7e?{#mpY{d$1Kb09)ObC? zt?7>6JwiwmfPeGk+t-GjM~*naa8$tWy@5caougIMR0|>2auAt@%OE4QXa`Lwe}C)L z(g6nbcQUad0NFd;V*1Up2}KoEi+`G;Hezq>UG#rf zSiQPRz`h?*(_d9-HK#9u)B^1B5GoLW_@}V)2JZYUDgaq7h0cyfB)c{%)5ZeO5Z1Q#6Y~F^J(IYOHF1lwPAY%o%Hzl zWwy>Sfr?64m~bV|=9}0%WPN{w~zG8lmWIn{bd%kNgU{ywP@PP+59 z&(eCX20I?umF`F%8SGmTk=NhZzU=3D4MHJ>qsXc<>t+gd4tLkBxFWMcmF`(Y=1-mIZAx`*X;(kO8>>vlc_r1}E!Se-QIGe_}3Q0f4V)oYP{!FaLa(hyB^6mn;39GKYTUvNItt zQG1epJ@hM~gW}w7JFt|?hhMI0GIPQ;R^&{awLa^qY^G07T_hIHK=3n)e9Z({7xkTN zK%|1kWT>z~%ARLHQt2no)&Y8%1XCp}-tS??od#%`H`gCL??Xzse;jYuU zhXy#Av`k5=`$n}kWR7Mha}qB7L9vU*D9GPH(8QSAA@0dK!W3EEKWTF4=s0JF$q^X@ z9-Ddcp7yMsWaGMMT8e1iY3o;6i{RHc6RUmXKK>1z`(#!px!bR;iZCNFgr5?Z*Up~* z*#6*t<~{kMK^WsBe{&`Qi&1Er&V1DN-e?UVf5WM(fUl@-$CxcWvuX2Y!1QrAZ7|ab z#GhxMM(-Dm6nuA3>R1xyS6u=S1$geaYGfj?ugeV(5LQ_A>jARbE3+*V%+`Z=P&oxt z*;X6ml<3NvS$~f#Q3$}=e^>B>2Wzn1p~}B9tuWniqVK*Te=SzkW>(aS23uWtRlGl9 zAT*`{1+)Op3~%5Vu4)F|q0CD{&nXEe>?hJKkP3Yst^r@igvP3HH8l#A5`+fNKOTk z&3Zz&CmeAm*Z4by~&fNpQ=wszd07r)bo77L!viSi|=cVQ2cg_xG3 zhdZxacHiJ<3IU3wU5vc~RJ0$HE-mLktp^0_()dYc{2}e*D7u};|1Rtb7TaeoeI@fw zwcgu=r+jp)C38@TD^sse#wBYs@sPMrHe`N@^C9wRBAYKu|+V5I~LA)wQ6G#BN=-riEizSLj@qbMow(N0>}zx&&#g-)=qFn5po>mz&e z0xb5X>Bu^dyw|x-OMK-V66tsKIejr^wfL>i31;P z95(g#?&3>_fZ8L6BYZ1sK!B1(PPq7QV%ZMC1n*myL;y3pls5=FU-ydBcU{@7h5 zE)NVrsUp)8NeOQZr(|4M-^Edu_H2z~fM53i{L&(aHSEe#y{!r zS2w*DiaXAiP(FnOiX{adAQazEe_N~BD+8sd_}uq#D|2xLd$fpBrW|qLa^Nhwn3vj~ z0(cR{VCKy{laN0Waz5a)V!mZQw|Zy0(EMUkCTowo{~hLPN6lBwqCd^~t51&+*Kw;u z??c{Nsxg7`s|8RXz5(++;TQdaJiO=taaaPDSdRWNJ=^9>E;BD0Qp!u}e}XDwUvz#L z#Kl?$;F47u;2kq0+fN3NxSSFfKY#c^^Md(;KgUwpWnUMF`X$c20SzaDCH<2V-Y=+H z6EF#_78xs#y>;rLN5;i_8JJ7bz-<_dOM({QQls_L7`zqS__=8Dls9lDeIzJ z0B5H=2jlQ`0KmEZx^ahm{PN)tmc+K78~E%Gfz;cBy|+yoFUMXE9|y!8Ghm}B{;Q|% z#oe#l#|s9qrW2C=Wg)r8u|UlO)_N8f7Q(pFwn8<)*u}oJyl zV6sL`A392mjzJ4n9G1!tbesSA;+0R}?9&E?2_G?*~7D69eWkWM4cpzW4 z`F>#!W4+~}8oFQJf1&SPQ{}pULt$!zVS}dIE|V2kO#S4sPG8dige-Q&{@Unp5&;77 zN4{m({?Y&7_W$=NMf96|TAnpz&-=R}7Y3)e^U37Bj6qZ$l?VM1jdc4k&*}biKr`T{ zz5n92YsL-;Y99er^EUNqPymBRTN22aj?Gw%!A_Es%fYTVe++nsAKY3hnZJ>ZWjBJ5 zyQM@pMSisAIkEDe`$Rp&Q9uQX0mS@;ZikFnY1rsOqPBDPH}oC@1JT0=tAyv^r&ix5 zpFnjT*z-&AO+EkW?x(as<%VF0ZIQKY@R&sqP~zyxxo+GeV|Da2O<-P;zS&=ngtdVaH)(=l`X&N zfR#i@*q@4vp2PSKQN)aWqGuYW2rL94O2Pj%#UefZe}(WBQF=fd_<=((z{GyVy0arC zfedE`rC}I>YCuYa&#)GvU#DDyBs>Md^WCn15tp8C^f^ySo%{f^lN?O`ju>-gV{y!0 zs_>p~_GiI6D*eX4U!>U|GGqZ^DH|xaa4$s8#p(A>Itnz{Q|J#2!ll)o)zyU~th1QeEo5@W+wlbUE2DEr}SLPu; zx)KmhRoV}`2+XCyO%fRk!wfbUaJ7@gf0$8qP&|PZ`c%Q`C^sSeFx78O>%fJH{fwZ& z8r*?F{^hB_KS)t>g{urvJ`K zFf0nzh2LD!!iPH!`9?sYFkEGUDgtln_)e?g*b3w;gW>-DiC^z0uuULHp8LNP;K!&d zE&87)liD{%J03pAzrTcA@WV7MdMC#~e;5InkDg1|Akrp`si;Ly<>tu>++&`$OZw$g z?E>T{)JpaDg_!vY37xmU#-aaHqGZUM3b-KVmxuxN|LMQw>wEf}I;D=lkDA|>6-;Nw zFxVyocOkKoWc4M^0=R3OHTtF>3;R4tz=OIAN)`_My#HDoq>1+{Dxr2uVdx|if5=T# zxHNWX9$@(|u$}`#qGn49y!c80LzuCo&)s9L>iJ%-_3gU;00}XJ$Jo`P`b9fKOvMA_d`9lY)HV>OAH;BgDNf|+NF6duk1KIxMy($`RE@Dkzx|ItwT5xunJQVa^@YVnp;IS*dcUH@-tWS{mkI;aqpwqTbf5d6n%`S7m z+xW1XLOXS8Q)6m?x)lL%0(|e0$!uWHzI-u;Eds2f=xzZO<@$ubCC+MX5=Om znhVrm>p^Wa0n~%PIV`jH!<+zw{mpH42Ur#8dy(BB=y6Wd*U~ph^_VlVAOa57BQFkuc?9t4LE>$fACZkx!*Tgu6J669Kw&l@OE9Aw31|?q-f7txSTV;zOBMso})ou3X zBZQFj?W;gy#FInRRQy!IjNkeMApzv-;Ga?h1RV@#4E_VBHU;*NH%(pFcLeg+p)xK( zmXUbt+icGaShBEXA$EonmNVXa)njMGXkPbQ%JK_jsC%lWpvM^!qElN~4|+Pb_OnJ` zIqj4xkGZd5e;rQ!Y9ZVD*9(dItN>mZ5O~C2IaovF&+;39w+?;D|Jo+ov05sqTXp>( z5b2I7ZrS%D_pe!eh%53|_z!bD!0Gn->wr=xk)J5WPJ^>A3%WKacr76P`w#L0zvTn% z$KqeT453ZK1@1+L{KRrj8nd78x8Z-uUbjAwz#w|tf4rycdBH-U+p`BX66z$2DWsGJ z&w(6rA-Q3R8{S=RrK3>!U{<_}^d4$no}xu<+lw~b$TkX%Gc1E9bWpN->H$eI{E3SG z4XK|5eL4Ar)pL1!1*=HJfU%vIE;aK< zqh~(Ye|rnHo$2R#=}Xgvt1!l=O7-w5_R%D-ivfRDN6(w~hN1)={wH$BI~9S_WvSJR zM!&#?Y1=XI-iRchbmIx|w2Ger3WO?m-($Tcfy~cV!H=}RjU=I>to-KcqXDWr0r2nm z`DAc=s__0(r+nEySG|wYMGl+qH$^%?V@LUof96fB_OaqxS6wwI2Yy--IIn@b(4CF> zu6OgiF@1RR1-qVgeG%|!O&x4!U89tgvH}b)ZsT?#^cAcPw|I+F>nQpCz?=S3#ouMYkXL7vu+2K>5LV`Y0;P+tbY5TB ze-Zvg;+w0->Gg9Dn9->U`-x)oZZCk|Lg@BJ{=MSl9SC^MS%9DLMf3ak(np9{NvOw- zOXjBzU9;(rdP2Oacx&z@>B;L6`Y51!x&?k9o&5o3od{JHvNsSqv^+|l=3?i+b`!B) zkPM;3`9XmCR90YBD*547N$`^{#B=@XfABo}lVKgQsxIW|ogd*wJV$l3_=6%GGZmju zN$`wD*rC_Vd@UlV^=Xix@F<$VkQrSDTgn@zRF8%Y$Ab#eZq2xG^s-C+k?rl;I*@?g z@Ize_7Xg#slHPwr*Ra|d`_EO%UgQj)jNAM*fzlb4h3I)bNU?=f8q9k9{ez3Le~SKH za!vH|bYoW=qQh%*tML+>VeTTAOWmhnzs8`N2hjTc0E#Me>wYg8c?Qj{dzS|Ar|MRuRbNRBE>TL+NAnpE3Rv20xVQZOl)cH8;>r>% z`auj3AV3V>Kz%Pz3(^b(s09H6)O`K35M);E)7*RRS*uRf&W21v5gzX5=4KrIUI!;i zFD{*?Op9p(*GM_@7>&_4aHfy9ul5{d^>KBPVjJp3w3&cGY~5H%yJ0yUe0<@uEP0(Y?o3T>Gu6?9tjdd0}#kPaNNc7yoeQ;f-a0&Qzr{fE6tg zx|yRfD7u?evQXVIsCH8|ek6(wm%)ZXMgR6-U&N*SMBH>_>pH5C|cP|1w^~U3v(%<7`lXT;g>M+ae zg-3MqbiIa1o$12_N^Dt>w8qf3PXxGLeUC@s#W4tE?}x9Rv@&1s~95913 zKd7Pmyf0V!teK2niyH_=TYW9p!1dz7!w>#(>+Po=DDF{jrX=Da%De;Cc|dEUI!*`=3Yp_a{& zFJPttD&ga{YI;nQ+)wdzH!hA_s5p(B9`;z{o4{FcD}am`H4^G76j`W#NvpL`0w{yh z?up+|Ut;m-jk8csB&|ByZ*K;Dty6b0GwyvTjr@YMy)41#9;uN$drL>30pF-}J5J~*is3MPVI^!gXjqI zrtXru8?Jj)uMLz&r_cRtLN65bAYIwkwv5w=-XdA$f9tN^rkMT=*e75O{Q((M!RgVtb2ivN;{86?o8E``Pu*qB#PH+Q z<^?qi%>LuWfy+HT*kPbwx7C8TsG*ad_9Gu&1&4}{PBFDF&M3Y4>j|v zX$s!`^B5`PwHIWs?)7@g(s(j^Z(3t3*G)3g63o|^PbK#~D6Z)T(}FyhHVSUL$vK3* zf9)#1XqVHyTSI~uBYnqojuExpNw?*Q+U78Y(WZ3(TkP9esqT|zxi}u3bXx{XDB8Rd z{Q96N9}s>LA!U*3@1k9|Hqyt%DYkDR_29?;3Z~=p0^J-4&$9akfMxVKp}zH+MG(c? z9V0c~-6nus?Ty4_4J|UKXx45fP=4C4f9ChrznG9xXxkXPsQoP8O38R#Q+I7I`je58 zXC|-Bp1M`9&H}18NaJyQFw?vB{S@kpORvFLVPcNYewoU(`kvOS2Wk+QutFaGx&xj+ zd#$n-Lk1e4nOXmMEXKzE%^Q{fIyKr(T&*zz3{C3m4WzpS4X?7OGdmY|I-K`le@20; z;||s4a;r|Crp8?!srrJO&9tpSaeQMw`uSWxOAVw@&FkDIvH^vnPB%o0L3eCwIrS6mf|W zjUQIm?7b2`Mmz7hThPzOjC1`Ee+iHnf?5=8xq>Z@kc>KM4&(AndR1iz*6lTOw`-Vz z`mOO~FY>&2fawES1|4LI`|(NhsEaPy(r)gO!)Z*6{W4SL6>)0zW6&1CU`7@_f|??| zTu0IURBk5=h@b3kAk<33Ts&TK)RytytzlSE`ew?oRP!1sEf6~7f({^eu zi52+rG*5bgQ|d078d-B?ingKLVxQ17#ESSp3&gkRqNFSKUTgMm{dJl8t(IM15w*H( zwvW+7M|&9smG`*0H3yY%Be&!;lXyj=xxqu87Q3l;&ot|~zD*;!ltbA*_oW+tUCdGq2=o>e~R6*uE!Y7y-%(etb3c?BzOb?UAn`zbTjQv(|lv!MEkl` z++Ydb*u)Vh)`EfU86`P!(*_u@hp%x}>@h8oJ8Zi4oyzoWP@V#Sy5_w9g@qG!8_wof1-&Z>dl%sb)xAHcT%H`Qv z8C5=+ZjD>|;CM{2ev#p62-ru+pdCJ{VZ3%MfhZQemcf406lI{$iQPwnQ|QZW{N zHCs))hk38l(JG=HQ=3^^b+(u+F==vdLt}DsmP)-_KgvlnLgha{pE)@?o#1zP+PzZ^ z0&y!}t>8CWPRB9WkJ{R9uEj`sP=)0fba7ZSy9L#bXq7eLD6ZFF%s65!^ys|H zGd9dg!cbg(g!W`b^$nA$q5QoQ}{R`*Bh zdcCu^=m2}>c^}_ncV!1o`e0x{d!*QCHhDd|26wHan$>X=FrNEe`nt(S98h3yNQ;9?d-L9zJmLu3?K<&22!R7&*^yYone|r?G9_r?C6skvAjIH zO;Oc??7+&DXD!-z`;m)4A)_e||A@+DEu=1(YAR47QR_`BmnZ@kYK? z@%+Bg_dNr>?gEwz;c69}!~>cQ=k-$%og;|3<%=>V1$_!l+P!PD9*P4C*UFX#G}}>p z!(L_OaznSmP#0I#8ssu?^&;_eR#UDq4GRiqor>4;>=L* z`d#lbeE)rEWup{A_b|M;oLNs@Kmq_df6_s(hzhrdz)Sn;v~4;`?#;{%kL!FZI^F&- z^6u?XgwzwuHImaDd9GO(y~Um`w{+3MULhLI&-&(l(~8*bDt^CN8=b6`BGRq?bWRae zy45L`V5gGVF+9w?XMVWtPx4Y&*tcs?hsq5mxBFn8 z-p#pZsG?uJd@1TN4*5#1(@DJLj8gNpE0g~=*3&>@>0wCWVM8I+e+N`| z+Rm2JxamVhXxiGOXU|cmIb{6xF?scx6pg!{dmQ8b=8e4iQ2LXx8HH%6uX~)WTL83t zcX}mra9L``qaj3-75yTSo{tz*+I9l3P_BATx-(O+7cceZwh6O&+N!(Xf7-B1u_`MX z(KXnf@VXp56t5E&>=Poi@FCm6b9L0cC_ROFtiwGrr((bgf4ryB4aKyw0VypnP$2kJ zyMT)66TIxTU}>q-k7DEEl+k^&gY8f9o|@<)?WfD}>R|2f%lN|W3jQ%;pmrW;R_utv zeKv!cgeQi$jJlBLytCARe-P)%`qYAhj@A^}`JUyA0{WcO)ErmsedYM~2qU4#0HRx- z6Dw4|fg59Q?ayjnK%lUkvuo}|Jtr`Vw2u+uIbQ|$kv1tF`r)!&Ol$|t^~1ef8oF?~ zn;>~r%8|9<(|Dbno3su5ZgS&a6A1TR;VF`6df^q<@uR)SIuP*?f11LPu#3=%-`Hph zQ|O&}hnerf2vRsL1QSEEfmS+~1Tcq1u6+0F`5Xla za5v)EU7uwMhavqD<>;lTUE}pBz3N4kXCMoe^Ikt;?)ZF@pit-Zy4{th&R9JcV1hdV z=AU8HGDUZh>aB&+di}a znomB6g3tVnEaX@pDE;Z>T$TBTczui)9i3$IbHjF=tz!_~o3gOkgpT`8N|dNH@Ah4) zOD!;}eXwDmy(=xJw?hx*OKE$VaoRLF%}3`=?6HL=HoTsfg@2ITSB$(LzwPDYjm*pu zxzKa!Mg#4iqd>I~Srbkz!DM7D_lgWP`PoUR4>;&uifQp(gURs_985N!)Y7%7Q#S zH_!AGk2jaelW#2y-A30cTS4?A*t#1VI%Lk?64$cpHPgJVM^kpZ+U^u#G=am^5gqf# zLxE}$qfbhAB;U-L+axa&{tL8mXKkFweE=2j(f$}ylz-mAD^^e;;ccB@PF*c!&J-Kz zh~a#}VHvhsZkFtl$bZF`g=yW5>3Ia#lOUX?6y#<4hVEWMyrVYFc;te0tw59+lHON@ zXunSuA?)2R>+ZVdr0I>Bu^^T#BiZul$+}wL41inEUuBA3Xu%667}dJ=qYEa&j>~+L z%YCEEl&~&cEn~y-ltTP_cK$FHXVot>u9u7H~v+zK+gg0VjV7O{KeJ1m-iXZ`Izo zbF@;%5Zr-BE4NwIM`3aq7g(}2BTd1MKB)a}Ia|HP)Nx^b~PTxcw7Wi~NZhuT)XD#Cd(~BC@gimWW6?^VXqd?_EwyMfT z5VN}w$rr;oJE}8N4+WtXUS>ufoBM07SJdk4O2ELyYK*~FGd@kl-7~tXhv{8dM5iKV zb2Fmedk03+g&$20ZwwQ^>?Sqt@uZ*ctGryj%qcggGU~Nz=t+KJ7MmRuHZ6&*xPSP1 zs0;s+qRVRgjW$W^gLw;m%#2+Qukk6^jI5(A8k6;CRo|Ozf4hecBTP%Tx6w(t?{`}YljE|oo{Iv^wStAy6IvDp#r@^96(%#Iy3+0A z1pv=Ij7~2oSn%*U&C$-5TQHd@et&O5Jk?bS+6&LFevbL%P(sg>UV~@B6`fbk<8{}b zZy5Rl8JS(Y3h;d%$2pyBT~?z{@VyF7i*&OSw9B)2ZuVjh3!8pk%#0KC$^zW36Mz4t zHm~Ve>7GeAtXon=lam1fXu zagIq6b!wmrUzM~omp3yO?BEvWCH+jvs}*!f?S&O7TUaQAP6I-;SzG?Xo(RkSwOeQb)mVeI;rlM+Pr$3xT^xMN>3(E4FbLTL)S|Yt}&nc!=u48$& zk|+-IU{miXHaGQNv;tgmj|XUc99iYufB{Kc-qo7M!mAsx`oy#+bB797?Bcqz1PFvG zW}Lu6ia}JYFWqZl&952k>5#76<9nL&7CnLS6xGHj(~w|7#rtpY)PLkvQBYdX zx9c%Tly5gpQZ!($m|(O7fK2z6&1QuoYSLD|K}LQapVpp-DYDyoX)^Qt{@gZ!3@L)J z5nyOG>G{!FLz_YNY9!G8dHhIfe=}Q~oQR1eMgir&d5eK!D@p!Zftd&6PDoN?OyOP% z`pN||->l1`Z|2kf8GpZ&?W{Cy*41Ro;VN4^VL%P#-Tr|&uPn6s)tqKx*%(pgIhfS_ zbQ(Q&dlS^t9F%P_rXGE>o7{_QFpV!cW;`KUH(lv=En`yQ?T=o}wRpFqlayfg-VAlf zitw^#S=E6}6ysL;QNCd;>8dIdk(Kdo_1L&V1??8C(k-TlsDFi_2+{~{o^eav9yWz! zj?7Lr6ZgHGHqTpz!V=@P8*8j77_rvau^FsJmz&vRz_8vojj(^SO&_7Mk3UmP@nb02FqeFMQRpw;85a{`7S)>PEJvbG$SDr-#$#~Qw ziX@1Q&g8@E{2Ifd12{a!e`juaPvfYgccV&bA%v=-Z+{3O^D{5Joxp>#JB1qZp^ zeV;7O5LTt_GobWJb<54N$Lu5JFP4G8af?!A;bU{cW!1uTYvGRjWq=WO1wOp%l-pj% zF+_pbrzCE27xWeR)g$?}dc+JD%%A+HbKwwTSSSiA%a4P+WP+usjk zclS2iykTzl8l22os;!dFKAhpIDKqQ$=+tbi!)BX=Wi2k@xWQM6QBI*D*W2^%cuwXe zvv{o+j~9FJ=l5cAI;{J7_{{u=v_95aWR4bqGH?~0gJ|DZ$#wjmKQWxJcgx6AU~3k? z|7PWg7B!c&Y+DhG=-JEF1%58Zws-D<0{ajn@>e~w9f!J0`>DqM*RbKgt zriCra|LMmKE1x4D3j5Z-B|^D&1nBZ-NO5E<{Iqjjb5d(nrxqM{B{rcI?aw( z_Dr2^ZWM?WdYnt>yyXhUK6X2b-hVHb*Jl`?SL0nq-D10i1Em^|MYVyuH^z2do#HBY)Gyu&I0$ zw;~(A9|tW-r=vBL*C+Lx(;=A|U(B8MGM})=o_XyEE)K8x^dLL2!<=Ccc9vH z{^*qhQt|t{yzbBop*OIU&pTzhOB*mFEX#xzYYfT1>mHgefeBoVSJgUCfC?VfvmR$} z=LMCpcH8^ijH+ShS2)=U5PygLzHhIJXgpv*xB%r!v;q$jB3W+PzTujS+2L`ew;4Y> zjHkvlu}yT_s)IdBF4?JB&xCocH~RJH7|*7F)i%9sLp0#G+jv&%*W~C)r!A&SyxIFY zTcUnjYBzO**{GFnq>^?w-xTG=&g3|jSc>u?fVH23On_8&`_zKf+kfTQh93bSp`kxH zk)C5n>6v?Au2EHEV4EO;CJ~75KJFNV0k|hi2H43&zzcvpw>?Gn= zV0O-1fZGORS4jpPbDBB>z7KLt&5eYCyh+TME3d8E1{_1qkdYitCV!vZQ^dd=bN zFmWw#>T=gAO@@5K$X5j@_wp>Rlf>GHwpWXBB&=8K_|+VtZ&oDg0*1fYRDCH$9i5)A z9D`9BOD!flV}E^ou#b>a7is8BDTF?z#*TB{;Cuq*Jdl-hPc3S|&1ac^tjj4Tqihd8l;>0&$;;*0 zJdA>xmLSSHoy^$`gSLfVDZ%m4K&;wb1CqtauG~+Bpxm+Z^1V4>D1Ul92Dh81uNNby z3Qgb@yfAX_{o7c|_jPg>E~qZ%mOXVaRiYefKYy4ul0>x$>+c*|XM7P}(^2($_F;LG zUs+cBb~n{3VGQ(&NiDPo>q`^B7X~^0b)HA=aXy0$?P;rLv*mLI?V@=NjIr6g&T;sp zj?SosE!dGiEDIXG$y94Dd-P)A*H}HL44~ul`sJze#9hLRZAtBB{w@rDJF?Tv~S1Y`0@ucIWu57%fME1ZLa&21bS{@6H{CwpL;^ z&h^DeJ%yR&`I3rOK8S_VYOh`&@7YwaPJfThN?yOraQ42H-6|X(5_P-BKcq#tt6#et zYwP)Jw4!&f6RouoWrz86F-F&KKAlOY?7_Jc`$R+rIiFayUANcfF*5eivf!y}-nmb& zo&%0^cQo^!rSFQ_C*uvHFRLKv`6pHRXp1e(gXf+XuR7$k(Ph&*XWy&*7|u7!?}FG~1v~!9>&Q40qQ~e9q=rJTtyU^d{Lk#fp+A z<4BGGAPZNtL(YqCj@jAAVw&(vu76&M(EdG&^(rc;v?`tkdvHyEAN%k+-;1-(%%U9H z9n`3GGN?m2%|kk|r|p5;O{oa_zDx5BQ+pHrGG?@NFBEh1tO903QY$Zv@P9hHEIiaX zd{=qdhCdxgr!xjY;7$bNak<+uBRFPC=cD4^=Mj&@K!W1_7)n&3ay}?|e;Q2yO+JeC ze7ZAVnD=L9d)V7fISdce6D(rtXxrPh=S9AdAx^cLq9nljTA>8Z?p??=C&vgKpb{{ff2r%wMW!kTVu%{ zCxL3ViAimCXXBa~r=$y)+C>TUWEP+@=J&iIpQ6BYI}Xl2Zp=*YGk;yp%e|exCaHMQGAn!^eR|{L zhd9u|Z>sQ~teg4r(OZsC5nysLzg9g4sx4*W$J?Sz5&;kpRc^$^zF}ZnM0ck{m!W+W zk4N`%L-!;4#V{9>0e_daS@>40-xsGH7Y8m^QWoZJRHau_Qct=D9Hn=rs^PA`nsQuO8tyV9OLE)8$d0IsVrxSOjith3MC zcr}_-lW2~58=~aLV`7|>WVD`88%R!eelx-hSp+Y(_c1@9?SEb|pwZH;G+Dl`_H9XZ z)}f>88*M;}(o`zI!C<0tnVLKu;b6};;R*Wm!YWi~D!W3V z*CXKiJ?z10$KKz&Ib@}7(VCj(d$$)B)F#6q^6|ZD&N0FZl`@f~pr+WB?_^HHN05Tz zk|}Q7=gd5eF@J;jjE~xSccz)^YTg@^W}<}zK(oa!qW5+4yl(tAd!CIaeH+g@D8(rA z@(TC8*QKm?6tqZfU!Y$0N?uCK<@7O5{V3@XLtm-2Z#Qo^qHqhP7I7LNwhtfw+Y9|6oI!&7xVP`D2**;GEgFZJ z?Wg+X1+j?XhC^{cvE)bKH7QDWX_iyY$C zU2@poubQ{0wOc)z-X`*42O%$I@>D<$;SQ4XRzZu<-vl7wVMb`lL!CI?LAz&-Tjf( zRurxTKJpgx?VR!;vDhD{Lc1>yooC!;8tesk zV-{9S%$P{LeT3u=36FVuqC@wq!WOdXAb%Mb3klI*`}|K#SQpAy_!}w;n$4~f>CMIz zZ`%5SeKkU4ZyXNR$#!}kGx9n|ZHtX;l0R9XO0 z#1=&l;{q%2y}VT<`h*;FQb;AxCm7_hLOdO(C$ zH{k&CdiAD?@LupXgwK^THC~=6d+D3dRJU%2<*}L{Fl2(R*nfF`pVHpt zXiv{2XZKw)v(4TK`lE5;RBs{Nu1I6N#j!cbuBC?C%`KQNsu=}sV2T;LO*iv8xqh58 zaC;PERLvbT4Eg8nz#k2a=wFy}PJyKg!R5d*!rQtmHyxT*)f}!*UeiqsIPRIp`v&%x z=yTi-@AVDz^6-4tHni63$$xbt$~&*yS6dlFi9QS)!s)rtVhp7F_lLkj=LKP8x{@C| z3^B~a$?^6A*6tSC8Lkamb_BY|;{hkbM^RD}Oc!VL*>Trzgq-R0{j4A?H<7Q&fpOYv zu^yE-XXU(ao5kify1Xmz6xux!U_CgbRikJfG(l$cxba!3vK(!f?|;K7SA;UV7@^&+ zhL8DVGdhe{ljx?Rf5)>MRyReiE?6vk9?C}C^cmQiTQ1pyz-2Q6OPQNgj^Mm5sGW)9 z$5o+h(FPRv=lsZ2ZpHvH6)SZ!J>Rcys)5wCntPtN5^}*g%;M z86VVWZoQT@Qy)*q)mb|3(&uh&xh0L6Ri2x7m|O)t)p;!Q;(u9JC}oc+NC|6op;MlpB5T2;#K$9G|+GH z*X3+9O(1HhEl1HaFjcc_1mGs3p0eh41cyELFk7e;Msv2-11h}Zji)v1ref^n_!ZtM zq-NB1yf=15vVScjomKrUW-wD;qHroIUGjTzPMkD7FL zFwnBuGyM&H9JZOO+ci+v@bvVwIk|WwP>&Yv)~ch2gGT3`FOR~rtf*$1!G2`ZGz%*< zSEL`+V~l*_Ew2sy8F%ovnW74wG6p}9m?g#R)m2%YN`G8M&9zxgHy0Rk&o4_BqIQvV z&kt>zm&e|=p~GEJ0rb@GuLk2;=ej-}_1h+853z02uk}{BvGPn@-y?&Xu% zl&`2$AebREJ4t$=U3@E%lONnfc!~6K7FE_9hAEWc=$S8Ns=1BgP*Z$0W((Rr*z@&0 zKYyZt)~Z8eZ{m|E5BWp3En2UySfg;4wta&@W`ZM{o zM(%Xut8Nct_I}PEE#+iUG*{5e&BfBd;ePgU3sN#{E1@rf$-))r^ryoU;U3jq^(5P=L&37ZERHCiFwdR5AjM0;S+@e`|)@-vr zU`}8xn>S4lOUn{yZpCc5m9Jq=T^+44e5^H%mr~JqZbmTejO%VJxqE&Tm25Nzr+-s= zfqwX^IeKf}c)WH>yPGJQHPF`1&U`<1HfiXk<_J~6<;cdP= zg0$Xwxkkg|8742ZTGr%`Ep8VxtCI-eTQ`TPIkLwuiyedyQ zA2#;(q=SF*?Ro&`1giV$mO8fy1(^!ZyM(Xn3{q~_FfJG8J;VA?W4WVOHh*UfpY{59 ziaJDiHaokZ@4UR-hh7ZlkD{L}cTmbc0XcB(5Cx9!Lm8=zDHIWlIe$JvK~qhx#m4n# z_xR+Rm&Wx{?N+Aj3)WlntaA*P(~VV_j3UUvI6PpWVpl8ILrCOSqBSgv4oEj7q?OOV1kE>(Z%hthr`L^3~v@*nPWk6?GdaY9dsLY%UgQ5 zEqaKmOFL(qN&GkpcrS((O4Ux{uy^k4>$u;_)|KC?c_)KKe+|qCo`2UHx8DnWBHqHJ z+5<9JjPH@b+0SWNSWHLN)OZi#oezzfGP76rVrgb=KrN=-9q!n0V?B)Q$5Wclwc`%- zn7w^FiS`ZyG`AUNjhi^J&J;fxpTp}&a3=Vf54w0pZ?vSmlNr|-B!N0l2myO#vU^J` zJzgy_(@0Hq5h88+?0w`_C_wIH?m;Br|=+nQ2E3M8y6M5k+G344+fRe#@BTiv$Z_M&(DT0S%xyyxa z?HM+#JjH7{hRK_e$k$uL_E2j+gzff7A5~`ju1@g=-FM{*?>Ed=DE*2Tr0MifydY`y zHW*>|7zz1oOn;$uPIDMzVnM7`F==^@N|bOn{oSLNd_?Q`L77o{f5aC|6k20o_WipX zKc?v__gD#W8LsQr5~4<$UG;VxBpn99M$^M{dpka!ka6d&7=#0Bksse>nJwR;JwCTo zjJ{yIwnrLYoi>|J;kC}Y>I-KoubgKsJvRJxI#Sak)PJ<Iv=N#&4$lr(SEm)?x}F>-nf6C!N%ICa{lO+%sxNtJL9-a+}(Dp$%i*6 zCy>wuALb&Yr`<+*WpASGJ>Q70Rt{q+;>SFb%-~7R^;UcikROH@E}><+!rzxr_EHYgtE?xB zeGKjXm%~kgC~VmNphB!}5jzcU-Y#LlWsxd(TL;qW);2i^VG#$5i)4nTuDv!*=*E4QRhXIZ1+Y{FE%_wB4TJ%)}ttMBy5i2p4O#ne2}0Lfvhm zYt}W#^&DM%;jpF-Gw!ABn5znB?Pc?*M~AoXHQ|DxL3OkD(@so}C)v6Cu6}1{r}>G3 z3E0H;<(r;{Xjq(-$Lo^n$BA2?-3 zCg{giFpf|VPuAnvDOKC*(l5EkYy`1L&p!te^gdrNn|XTSir#0iaGx&}$m)%%>1=P( ztht`P=BT^Lc?xB)EXHF?z|g@|WKRjt(-zV5$N$Plc$L)HIl_%=X3gefj82X>Ie$i~ zR}g7zHM%mGWPQym4Z`+pVO&YMxf z-;XX7ub{DUVug{H>OCulkY(rfCo0Y_#x%)RNU z+IcW2>_7MmHy$&r(ENY@-#{?i`TTWs_#iecyw3^$jZ3_V`8JEKhV~Q$YzNx{ zNVp6R4TB>Cz_WZxK8PgZcz?AMPD)NoY?gRBm>=AuD|bDBIRg{>x*|`knA1j0MjQ`CpgX6;Bus~cVqT&0wSAPgcfZal3B>bp& zMLy_svAlwJ=7pk`n5I`!^8MU+M;sQo!BGMK+2J#ASinmqMB|Ue|Ec(uOiX~0Wf6PP zjq%6q%8T8Zi5iSgb;lm?QVDw%o4)hOv6CJf3cqZ zV$m7XGKfGVvCK92xPN1Br50b4(}Lys*GN9L`=jBpzAp$|d{Y`*XH&Z6?L=Z7XdSjp zYH+cML8j(BE2j&XYG?qbWDG|r9DJ*rn`T1lNwi~M?okjnWDUU*CrI2Ly#cV^@sA!? z6T}m07!1dl7mcT8ukP@>Kmt34yu7-_zafyUUSP?Zt64Z1-hT(a&g$`RJ5MOB~hVcOM9|Yk<)b_cL~># z{e_V(?g5QXTkN1e+)1hLV;?CPOW8@e*?ry@hVMJPw~jos7(p6;upW5tZgfkK7DUd8L-wW3OPMQFZ(MF%HO#YrY2SG~BbKwc7x zwr$FMN`J#W4iZL0f{AD0(&_i2z4B1FI?x=|$#Z6}NO^JF?$VPBeU2`tc@Fc2LV?*EWDsHh-I}d;Fe_O4f0Py@GX8q1+$OESH?m z80+KDhIQ+)@7vfhTdWJ&_pR?^y~uj^Sf_yN%nQy5&Oj36s{O(djPa%yMl5K{uTH z4eyh4zr&u#dl}C9&w02Gaj7BaoxFdW{OPSvJeZNcCC4W*0d**pS$<*$*-s7f4SXEy zMh*Mv#z40}hPxHpZSc3y+Iw}vUb%+B^M6$FJblFxOV~of{T%8AMvFM^sD)M*Gwd7a zQ8aUd@@24~LnUi=cutxl)Gs9idei!mjkCn*P=I6WDPAv@lTc!t#@aS%5Tw2;`Z1Wv zAYuvFhf;imyb_7icM_*NR*zT#(%De{eR|AP;f6SkI7s3&IUmFOB<~6PKmNY2A%FgT z&&nq*eq+guoHz3Phj`wSf18Ly!yfspTaNElncdp0d!2_W2dhegpgt^z4Kei>V}?Vt z@)WpA6G|<$<`YH|F?%IC1*5b8134r?8PyAHF;U$jmTa1f_z)6BscR<9S;Tn%y0@Qv zV`Ex(D;;z=er=n*>Ja)qjo2&-e1B3aV$T3^lLBrUC_!eQ^|lINjWXPYtZ*LdtWvTu z!@}7}u3JVUan{27)DUZ}p4<^+zYp&V=YgDs;r@c8-LO{QJww(A@zo16^}Bvt6Bz;s z#A+gnob6+?#-%n-7aR)=2Mrh=kph|}cOIV)IBI1yV&&gZJFzu3zk%l)i+`<+^MNH1 z5+JRtig|7oN}v>s^=wh=VE>`*JySv@f)EXl?=c#3#2`vw`8TYH6_vd~^rd0vtVpq- zl1uBH#objWaMGr$BHS1S|vtf46p6DM*yY*#`334qY9*WX(=A8Ps&F2ih;`QC`nk$z*kwnmABUiUVA09N!1Pe*EW{WoD8<_tJ#e4&K zpgy&89cN0w{zqGi(tps^6qCADV0R)UxSK_BnXs|uu&N2d_-?~fN2{R TcI=*paC zsop_?#3@cJv}EP<1~;VGT({L-;a8WigX#m;M0%zB*-?4$a%(8X-mVeySj1~v!1uB+%wWq{;B8Z5&x_UgK?ywkH+mY`E_etPq7o4*g|l4e7j)iDh1L0}Eav?ll+B4s2d zb~Iwi=L`<9#q8LB<|l}=pMQU2b&fk|IFnf6Pp;>3xXZ|X9CF!DT>5+te^}`04Ea(5 z8<1}&}&C#rVa>lP_A_W&E z4zMEJIa5e!3xI)cYD+1BW7mRBAyX^_i+dH2Rc?jGC_YpILjfr^sA zNyhOp`UQcG&bsEDskq%#^o6?B*pSg})kN4AWPcu}YZ%2M`%Y+C?1KI`)IRoT?n3B`7GHy^cN1!!@02e4G&zi5v39 zq0}^U_6}^g;WUyr+7P7_ZRCyr+@ECM4RP+1KmX*@;f5t^)1&tI)Fi_mMh=a&jfd|= zE`Oer^@S{f|IOiN8{|j|Y&h|7Wj={b4!X*C#@Ks?bc0h3T5_f$l_h$8;69Q)fRNwX z2bJVgbj4L7 zzpg*=Q+0;gj_l7|OY{YoQc$Bv1JsKG>rp$|E;-z#GWOGO*6=&v>>@>P#8gGRlYbNZ z&Zwis5@uRZf^{T%rFasjC6z2Sro0DzZv5JXE{pBeAW3P5GtX!Ex8WdGBEI4(zjJMc z8c#L3M&bFqQ4O9(JxOwv7;<+iv{OIo>&i_4j^xbg-dwW6@L?}Z)DYG z0#e5h@fyZXTo1`>^7Y_?;!bkyJWfP)PfH?8q5da%w=rHM27irImqjvxyc_Wt&x6C>{(p$2zgWgMPLuobd%iwr@8>*V zoyo_MZ;;%Umhe1=ybFIwjdQigSxG|9zV7$`Ys__hd$PGWGci$pW7f!4SvhQ*95A^0 zv$j>l-Bh8Wv9A$x@GQBcxMJ-nw(*hIF^Fq+df@iKd6%}%7WRpWsDFcnf@HtC2~Dh` z7lkFny0)w@OV(L-A42A+!P>>xKt~rm;6A)K>Pp)YpgoKjC14NXJdyj3=KA5@_H)!2 z0^2gc8^7^t2Cb!rhodXv6{mjU6;BJWKE9|wciJbH80zGq-X-fy*6mL`QzXFz?u0st z;CtWi{X9QByZqn2?|-*8A~B!TQh)Y8d5-u)^5*}L#|#(_^1h#EjC+Q>PCnezpPxy7 zhfki=|DIz`!BP6XOSpvKFo@qiFLdQumV8ejcW2{c&-xn(FXADY!n%>2!WNg<-wJBa z4d()zYiKtpV7bNOS&}s+^&B}H9SnW(n!`CAuo!YqnG*g|h=1QCp8Yc?vTvyNUHlHv zsv-9b$A%kiD@MqX9K_y#$)7X|uZLfsyaiyQ?dR^o*I=>=AIJ0N?A?>-1F@sfLOw)( zo-Wylh1jRapU7(>ImgdC&pZ-y5sQG2k~4_<%C3g^`OTm3yQ5?XPdN7>_A2lyl;?;W zB)Lc4g*$OL^M9W^k?bRxz%1tq`v__l*hh9|l6^G$eU6`aNHC_^Z|>Bp6VyfoEBM4i zk~e&wb3g185@SDqBj&^d-tsOStW4MJ4UVcf>?f|B2mo%mq0M2l z=fUoo)gP={6C8FY5*+rw<$=TR`;#N%?)#j1!AvmvIMlYdUw<&A&z<{IOOxkLYC96o z|KPFz7AMHL|JFo3UW-NZcgUCUwL@+Bsmp(2!+)oK`o-{F)}ipMNi42#zHQ7c;vCxc zZ%n}cB5^skl0;;I^s-w<6jJ#0;p+tSo@F8Qg(kuI;NYW+a|X=1OGJ%#Ls){4+=VVB zg6TH4j8Dvtt*Ub*{sO1sa}CO;c&6AxCI;$|a~on1r>bOoQFOB&7VkKgqrk=Lw_RA!X5L!#dg~k1R>Id9KUc^?|_Yd?qWA0 z_}dx>xd4mZHH&(z!6{8<5yS1q0+#z z#NQ!@9daM!G*~Cp2Igr6O&S|T31TkxuIJ;T!ClNj9)U-0f!|NWI{jN5B{&LUj(;PL zf?U4uNmCDTG{rg&vG9Yz4d><0U6_&l^knZ_pE~3Dz4Hb zl;PnMv5ll6f10fGYNFeXNpgG+oqrQ@J~&>4gfF%rjX7d+CnmAPw-PSmm4i5ieQ09A z%};$-q|m>8NW4RXQ60-^6Jaltcu$I$0W&}hC1)TMk-PDNjZx~)+1BxZfWvsJ3C@iM z^kElFeA}_1tw+uW_D$jkB%dW`3U{TCJwWb24gD!Re^L|v))^w=2kr*hxPQNw3OJV% z@Gck`*b@3UDrCO#c7Ey&Yewz?^dIG#^5ehi4e~rE8(=*{&5-{6EQgpzVjl1@VC$;N)pZCY$;9D2{15nZy?TT zXNWBF>9)T>tANEvpo4a3#E0WiYtixTO&)`D1_esb++#?0dF5<&9_Bbh<0-E9vZn*zc`EWCi?{J z191AYiBBc3|A4dno`3Z}IQ{3n4)-F-jsM5{?=MVn$j5=flRTb$ZdfbUA@%m}{rG#9 z-!N-hQw+|pYFc9O+&pM^#e(YI>M@o}U1I^>wuqA&K=luPS7>Sbva-aId{VmmvLtRh-A}=e!a= z2zoZa00~Fo4~978dp{Ur{_}6L-alB^KXS`&?-jB1w-*K6=4YM%OMUqh7yiP@UGf|d zn?E@ykJlf#Az%K$KQYNaxd#8xoAtJ1N{%}+$iER*Q5UAknuFCd>L>!jz=AIc_8j_( z1m8*$kAZEXSA+4Ff}n<;Wyyv3C8Q;>`P z)-?!BFmDD&0r%Q(9$0UM;he!_6|o!nLbfsx?xhV`vBjIJa4#1h8zTULzuAMKP5+=g?tSEJz&fQ za`QM=c{<{3*%WMgrNU8ssC5Pmm|!$Ny*FSILy=$87B>6qHWb-{iG1*D-^7DX>i3EqFF-f`{M%eDXE8O#zP_k$ZS+ z6TVf2;Robv?GSec+yZg@=PpIO!Rlg-zS!6&m;Tz z8;9wJC8S}O#s)+>mJ;Ma!X%|jL{d8bbf-bHq$1K10uoE3EG#I}%92a7HY$)MD{krk~q4iVBR%McP=G>zv~JQPLWY!9q&7mPzkA==hqv4 z_V8I-lEJFUgZgL84=C+)Fa0H+tyuTVD8>`G&fK6>;r}&?7VdML=;1Zpo8dkj zj5!K>*Y*m1Qmh$$=dO(K#p?D#F4}Ije_CPv7TD8KTZXQwj_biU&Iz9xPGe!-ub{VI06hiGF?Z5x5>0lsWe%N#5LpZ25Q6DuaLM}H^8 zg%uhqpMA)f6%0+jD28IgZvLobGrHPU71`n?Fr9`lS=-KTqpSMjB20jNddu)Bwc@_d zlbGrU43H>lD2PwGF>DZjEO-bwPDo?FJF5P>BMkW57T`uqL|pL#0TV5SIF@!ajXv^|ifdcI+M|eAa{C zoBC?yngV-azryRgToxwacR4;VCe9YoJ#H86*f_@v@bbC(yW{l`qbaXgoQS4$w|u4i z6aQ!bof>N9@%NYx(VF9|ft<7E1-uhcNccfQq*3jc z2Bq^C>!V=iv0a{1Yqi7eqW@ zL)!I6V2G1R83s4|B1ru7y%v;0;n09FrHS$$qrn@_U^4>>^P9qNLPSW(pT-=QzHizF z8s!E(7#$JGVYQj~IKOxOFzmJqMHvc_C(@?vt6k3x{&c;M>%#T+SWJhsjY(}c`uJVW zBp-gC`INm>Q1`$OGoK*u7;E=M!!{GBiaX`Kvb->+yHZ> zCVg#RE@0h0Sv_+>xvpDk;n8%zDO=j)xZ471FaWt zbub{hC2AY0hgxq(DT9*S4A3{+AkSFN*A(raC}EuH`~K3~>6(bF5{4s^iMO-B)QdZ#zUIv9O$Od*p`Vrek>gk+(Hexa>S!3%VfnsP?3tX2wEq`s!ssj@m*C=dmfV{8=Gu~lyiw`7Hf}8u?BHP>%QwYHI%AY=JXTUf= zmTGU;SZW=AR<$Q^ybO0@WTXD&VBMnyWc}v)iToiY7=;VIg{cG%wZFJR(hlgEFParc z&sML6kbyHDbqoXP)Tl`cVzLfGf_80rZ2l&Fj8(TbEzr!p2> zun6iQCP5r?b#b;S@M&wx$wBv&OZ)@3gQK}%V*teAwvJYY$*~rfz-`u|^W(WPM;=M) z@4_noruV+^_nE$5!r7$|U&5^-O~R z->ntGss~i#0`g$rGy`8)MKJO%`4%L)m!Q+7(qB`pp^#YusyhmH$RSJw%%mYEYq#5$9R?aWi#n7eipk3(^-iPW#QjY{1~st ztdJY(@ZeBCw4gocXzBeClRx8nM+IVN$w=|&s-Qm`HR&9`WH^4w+e=ON%T99@8)0|= zgqGOZT*ZC|Iz$6@k=?fl5hjsMtVN=MMj(8d`!wI;d!OLOyCR);H=k|T`!GSD_<@>K zn8`1cpMCZBrT#@>n3SpfnCnsE4b*3lvXy|zYsxkyc<6hci7RkG;S>{mT0czxklqC8 z;6u$QRlPQ)JlL%CYG>dZ@@nzf>O!`ZkCUQHF={UdNCq?QP^(?JM~_dmN={(Ne3x_Y zgjqzU@`~Zo)Nn6;bAl<3H|3J^M4Q_CJQqsh@+V<3o51`tYq5XA&nHbYuR!n!fpF}c z3~vK=Q3LskHqE~;@`F`OO<$}?+UNt1%gcH@6;@yKK`>my&%ci!TwR>mW4d2sLjB(B z@k$jip+6RLCR(k`8YlIM|C&i^q-v?GHak)uV7aJ-6O6ib7u(yE@$HlchK_RJi_Js> zFU6gBjHS&Z=s5x|bGgEr;(VBeOOY9do?4PnyuD4%7KL_2s-(9N7Q%Q*Fkw-An51kX zCmt1fGu+Vfi1jxMYO9jxW5Z|y?f1ItK0t_dddnR%W zeuE}W&kd*)ez|RoSziXv1_6lLa=+|a@m%U$dOQo|(I?6x_V6X610kxDdsPk%%rBe_ zQ<(aeE5K_J@bCQ_mQvaG$s=_WOy#<ng~#%|*JWuH~U4j+h{T|v~@ z>AJ^oOxTaSpF+(KZqdR-w+kh8Z~DCr-aa+3$sb}S8ol=xM5IImD=1E0O!h}0H1+|w zWD=ZxS-_k0O}VbXZy?T7J7om~HT3it9lV(tXRRZ9r8=7J!ELWT2fKXZbbk8wpq5c= zV#f#81E(lDB}j#jP^U(owIAcxp!Q5?vmzAh&FPe`8UFmPV6aaf!|bZ5%zi94$ae#j zK-iGXJ|wT~goRldF`EZYX*~eADnTAe;j}pQ@6A~!Qppl%v->+{U5l$+1SW&~J68I& zv`C@gm=Ag3b6=8QYwbh+d!97g*Og+4T3+=~HZc6cct-*586&x{Kv<9K^wgzgVU+NA z;{JHf`_?@#mZhR~$+ae>Hj5=hNdSd{QXyaQr6oP8`rihI{l>PesSCuAaCW=)97QRJ zdi%)j#BXQ#zrf7#W1~nvJD-`>mYARER-<(S$1hwMY|`@;D+%zFMxHNkVvpi`DABXv z-DHgm_ME90o<*l&n%L>JCSrr)9hPyy@P_5Xh@d+&C7UG16mT z7M@bChyuh|b-aJPxD7mF5=_^(5!sIZC>O6T#0{szGI!a-80j?yH#^M?-iGA#PV_;* zeLg&-ALgVTO47ChDid?4G6v#ByVcW%?YX^zHjJ~|1cPXoIHNUvT45(-j}My661}P5Ui{D z1O5VGyYi<0`6=1+t!osJ5!eLf6C$=sirhQ64(ywgtr3Zx5`A;`jW3Z%VT%QWh9Ept;0sl7V}|tDD2dS~`f3^?A{lM}r<}+z_Geo4+#Le0?HrO1eNNMVV5__|nW> z>4k0U+mIh?+Nm`1RHS$j{b+u2GVbPPR-|KONF2QyKlrSrxR-Mw_`B2%mhpXB&AJG% zlM0_lcq8K`Lw19G?IRNbzBPin^yC*zQC2J^phTTPMys&e?3OgM-D1a}$frb2|6a3C z#Po<|m5Rd$TjW!lgw~OQF`=a@IJe{DqPK(4S6OaO&wfxCxYE28E8-81t#nLSDoFHi zU|@lu@2HQ)o$5&Ky?rt)5EFE09X_yU$r~;~59Kw<8>Kfhr*xZV4W4(?h<6Z18`M?+ zHWJ>j53R+59>EoEP0Uh)c)Qt@X%)JI$C9of+UQjdrRL-x@v6u2ewl>rS6rUBuR+nF z>%YhsF9Qi~xAoPT`D?sBt=BrJOKTk<6vY?!^HTFX1TG1AuO5!H4>caTpsJDGzh;fh zulr8uSetZ2Xxe&&f(XV;RXxJ`W#&%-@aEneR#~S6rW@p^eVG_`r=c@`iaAtCVuHM; z5;-wzx0DI#ib15{G&gTBfcU!$WNgo4Uy+VYp#l`-ITd+8i? zW2kG>3i-Bp)%$XVc7mF2H3tajEb+FN8O_9F;tdb~+5f=CN&n`615ntrK0xK^-Jg@D z`J!GbwEXrAfM3Sf$GWm2OFHlTQogg8BBtCL8D(fR1n3QV4C{AW`WY;J>B*ow_jpo6 zX)TkOCSs9gM=?Z$(x-EzH^azf`ByY-DxXnG+)D1KLEXqVt(g(jzDTy_H(hOkA@>It z6gG`0Klce1tLMUo{Iz38B!N&+Pp?ScU0CX=IY0zuvIHCgBUuT(iCx?QaJy#q<&IJE z6UbuOq4MZ=fkDBGu8Lt=UQQFz9@Mw8mHBBtTaept``*YI;=tqbww%IRy$q_67!^xf zTCv_lHC9!*VW3yLj#q-fEHcNFUxxa8h^C7chxbzj5m15y-ph?P5u%z+PC z2o1u0Z&8*{ngkz#+TiQ=HI&y62g1MiZq@*^s?Nw)0QSNZW;AndN;o;i{Z|HMaX?BNp&)1odK1{`Q=F z(X~BWU4nG28)UAS?kA#2eOG$JGa#Cn+;11jk7p;}TJ@Jm=866HCMPsZsQKadp1`b{ zg(kX4G%IQ9+A%1JH<~8BS)=m=2RCcED^+dVml%~fN@ zVu*$q4v4D=^+Xb>=ftuKs?A6cS@f_VFYaFyFg}_0OS0P_Q+jS_&-7 zVjH4t8G+l|wrO|2UPpY?I!GzWR8~g}F6`(ZMu0j#B*WBC1>oq|XK^a04~<=x=Oten z@m=9jsB(EIT~5w_GAd@wa^)B5_O7euI>kAi%`Fxur=Mz))Ku@bW9~+)Z*T!~Zy$3$ z&n-C{b^PLO^93w06O<%9FH^#V#MRhQ9W0Q|c8S1SnN#`1^oXQ6qXS||*GaAc%?2$v zfKS#uj4GXWkIdJtYZqH@81?*U8&Z)NPekqJTkdr1?0m{}fpynhyA;ol=-<>$Hwr_s z4nG{*$auoCkd%HV zxJGIH9(nX3)QX~)?+jb?#y@A&O{Ffp>BF#E3N^C19i?8nY<+qR=MZwt{Gnnf9kls- zu!t#G@(rq*{(FUYl621S4CHra{%v%V~R9Y^MBblXpVG3U!aJ-OLVP{b=jmR zytOy00#9?g-`lLMdgkjecSn~*oS03W;4#jVh|BYko}*2J<`tzN9O@pm_ZpX5f<$S7B~bk=JADtJz?|Joq; zyVs7t`XLl}33JmVvi0eFy76p>6oX;=uH#U&-E-@goi>C;(&I5yj0f zif3cXW@VXABXUsFgb7l_&XIzMApb(pmiPK@LX6TKltM;_ zf+vXHmsW$@N9B>P<0+edeYNyq5;LNcm&pztr<)PnPAS)|WyhxBDNv*g05SoM$~rf- zJazZCA*W)BKIfqOTnBKMiAi{Dw%EBG#_u(88r-a_Q#fd1{uTxuL7y6(4arqUSeGJCHm0;RPQHBv&V)66 zSRNU7+7AEO_$O1nfJ2>iM|;ev_(D20{ImhY-YEZc13 z2zmVuH7FCs2Pn^%{(cj_X@|v?oIzwI2UkVu$UFYywMY{AV86tcR7O+Zl+ESA8Wg%@ zB(_84BiGar;=T#+1^>5o;u)oK=9rt-Hzk2pmoY=cxf@ar=!2+RH9$i2{yuqOH%^W0 z9ZC$hnr4!=^YG=8OZXYOPJehBi|B_Cn87X`R?G`N&6L6Z?nTz-BlC+=t!16K4hdPx z!UU-d8Op1);HM?0uvo5}bQ=W%OTv30s3@zFpXA5z==h-1k^= z?)-W5;vILUC_FKfKi3pYT)FKVxho-%*`dI5GU?1iwZ!JbuSq!lJp4g7dO&lULFjb= zb`HYlDoWshglZ#4wY&Qi?`(duwv_6M1)*~gj`d_YU7T91CSr|*D6Qa?E}zwoyUXyG zre~G!1jwtbqVrmIbEQ@1crciyqNX$D+9yDJK%>zuAn{JSic#pHJ&pP*_+wBa4ptJ* zAEMUo*Tuw3fl)963F{99N5eB_>0F1`_QW*H{!1FpBf8CDb)1oWpGD-@nt2`b)(#IW#p7iBdCv_RXas~pQ@i`vfKW+bjh;MFO!m-ZA>@>FZj4cqex3BC z(XEu9m#8e4$RAfpi8UBsIxL3=p{hvhN^=J8Pd&chQp-77TF$|0oF(caJN?amJYjyu z)K#+p8f2L`;(O}B^~#@{v4O%smzz^SNCQE3^kVR*MJ>j<#f7dcYi5LqaMm`0soiV= z6RW-V~xf9WU;f5#ya*jy#UbDdT|0jk(nx_e8-xD^Fu~Pe%KPdqOtO;V3NV; z2!*)rQh&bHnwN-wXQ^EFqzbA|Pw7ps)Euu*&PazgEY1HH(3Iwi(gFDblXFL!VUS?T zWMWBv)}i`j(9G`FAP{NHyn)d%_MiTHuWP<_VCcrm0LRZhuQf~emMH9vShW}px)*Hi zeWv5r=d07mPcdDN)IE@DHQyXe1i!BGTW8zYEzKzED~hPOXs)#8P}{U&P!dB{ z!A|bmvlaSe?|_j1@|}ygBA+9 z9qXZMGnD8xHEJbPOZztwh0BV=7|j__q#CyxxBQ-j0cilw?@{dlJE+*eQhJ-Gv)MW+ zlXRnY7Vb?KBq)~<(*u*S1;pI<%EGyiLI$r73f%E%6g;DLGp_33A(Vpt%DWe%x?MKT zEw(1NwGTO$KI_ZT8ON~hdA;_z{v9EH`V;j|1+jeYtWVRe1(v^7DHB~JFN_R(#N4b+ z1LNvzi89b$ezWx_dM0s;yL<$8!HJZZHBA@rqrY?H`hW3-m-VP9|+y%8f0Y_aRIIN6{S@W%oX;9|u!yxq?ZhM>pyc}~*0aULHr#Mjq~*!z<{%`dzpUd>e?N@YO$f=SQGAoaf=4^&XEq zOMWQm0EyupEeGx`=vdZ;zF^5v5b4Bz8H^M^>ykWD=pIjIa}nCrX4TXiABTq9-l0~| z0!pJ+D6@iD_B@`7?192f5b4n~TCXc4?tr`}8Urh3(t`=9))B$CZgu>M^nY>Zm*!G7 z6pxc;BLbT@$(;s(GCMTb15~#Jm3LWI0Wm3m=}0WJlfklfj3`*HhyPa4(BwKy;@!d} zxBeWcZ-^yo{k#rWr2|)O<#c~B4kWH%nu=a z^1+heZFh^8_RaH^lnsSw4a~iVpmL}-4aYzz^ z#th#ml~+mxidJ1jvC9lFyq+>_Lz>2x73DuC5!6Y6E<;7?Q@K7MfBh-UPg zw8mVedWl^8%~w-?QGo>p7_E%R8Mfadr86H5j^vDph>1vyBEd$T!syj?!p&MJAAR1R zBAQ7PsUYS&+ELT2LhX?Z_el1lGyoB@vi$j{)3WsV_)7u%0*c9pvtKp>)n*V`8_ z=~hp=5}Pr-Fww2da2S?-fP@H)d@YkLGiWVvslAnt?nV0d;|j&5jvkOgVZbzX=-@|6 z?;upo=hf;ru$2kn7PRE z73xGX_6XZ0ccz0KOZJwL)FIHMZRmn<>|c>f#mN{S1BLs*%Ug-MR=cGD1EuGr%z6v# z@QQ%^qXi!1vCO_?KPzoLBOt_gIbq#4er}c70ZZ%45L*g=_-8I4Zm*W9$kq73<;{z- z41=E3K+e8-%@cQxiII}XiN?r*19jwQwBVND0e2_+yo7fA zZgBn4>tRmDxC3sb@~K-)T(<@MUfa8k)=Yxtt_J>g?EZK9EneFiaO2qBR1ki+GQP#- zem9BTQXwK*NzVFNtaQr7blyKIuo0^YS&^@$hIMPByUhC!zT8Dw^4QYPpb^#efg_(R zhZ$!c-6+_fK2PBqt@ifoEEY8~t_rF+0VDFLnqv$MR}XfwZd|iFQhu*JGChzlJa(7XiBg ziOO<_dA5@mC9vlyB~x}@{SB+w6J{O&e7^=?*FO0L{pgJ%|KwRW zXYk$|SazrSoh-qu-c^>nms_-&VDQz`htBa+Rl_4u_W z%U*s*<2whw_~Not!4a*fzxHpt5aUOOY+IN+xTa{-A<89bCbQAB?nlR&b`?l zmFY*xQBgkrR5aQDJ<)=v5e{+okTv|UjsL`ZHF}R zy!{Z}?8POcjf49oOmkkO=z)A=!CX+OQU;N#u|pvW(3dz*DKe{7Ux{_}p}Q&dx&}_H zR~ZTRpn(~vW%|dOHn)bYzZaL)EIQ%0zdZ_OdGn1IerFCP?lW&LxCORgA4M#IM(URj zuHNLayhh2+{5I}^qW{1WD3_R)n4fw&dF|UoaEy0^zen$hs>e?tbALH_tAy0-+*{yj zH{Rt9yyESMS{|9q={*&U^x+4Pw4>=hO1~3${wZK+m9GtUMhV8Yj5_Sg@4uB3IZ?_C z;~I2_UKLmuiKX1Mozw036ecj)`t$)R+YM|UVVA48we#O#PCxi$y~(n;6p9Mz4^(D4 z;5uRsX5vIw6bIj7$q+iW5f5U0SfaC(_#DFs1RsSk-)l@{vHohwd-6DAlyThm$XgS> z+|g|pItR=;_KX1I$<1x3+%bauaOCV!R4RE}uvaCp50N^I$Hu64GN_vKf&8;y1*|`8 zdUV0VM?n?pGg`Q5y=4h@MD=i_@{jT-dvA$D?m=Y-v2?Y0lyn`&x8`bNEy}ViDkT7Y zl%?l|sdaf`(epyp3cpg+grb+jG~2QbLtUI#*pNk=isL=85v(%HeV6%CW}$70lTXsK zlUuhJ)b2l;|6RHdzvZ z^)5bJ4%1q{6}dJF+}(7J;%DH_>j1vTs@1x`Z|(t`{WqN7+i|X9F0z3(o@|?r7CxcA zO0~^4c)LOEisf8B0Mm z0W&46j(ySCWLy1(wa3YerKp=0Z0LNpmSbO5oV3B&MKx{2?nc~-(Gfg0hXEUf(po*6 z=IMStxmUuXA;y^{XcE>68@o&}RcW&enUJyDJ5<2Rq!F+9@KvGmaz`^OyiR{l8`M+U z0GQii(|4XUjn;EIsyz;jXz~(f^YgV z{7W_r2Xxp}kdp@}K_rok$#wc5M!B5Go5}nJBhv8BYnVMpLlH7l9VbKe1a4qi z{XMkxO5~6Uke0jX?TIlrb?{zL z;F>*r%mHXoH^ilO8V?p#&$6tP1cYi zMTf~}RxA1NcKv!nKs4R9Lj0142nR?H_J6wJP=fxk$p=I+S9E0o@15-@^iXc^#>=#d zY1JjxsA?Hj4J9OkgWecayp{d7_kmD`lAlsS?}B@kNqh7S3A=QyTT0*=zsMKj>q1TICD+!PP=dm1$yD-+;X5y7IieoK7nE>vL}d&Z43^Pli+(796QtJ&t!PO{F% zX7WS(6%>Q-G50#Oxa?=r<^yg1Is)H#qPc^xO2LmnHzb(Q{r$mD_H%xgMKG~!9~o2_ zbSBK3KDV^H&4|g5Lc@HMF=%Ze;tc@*J2n~#449`n)>D?%MK%(E?X(G%Y;R3Qq~_;) zc=qpme+OF~Xfp*Q4}G{^mNqfmM}37s^P+jUsOGi-rSE2bqv@UC;{+w7(&7Tw+#o09h60-~6f?vOgo7cQUY41B<586FZg6=I7!=^*!M%pC*3 z?9*%zu0bUaJ~4D9I7sz%UsJZ5Fp5dp?0V0f(7I$Wa~D)2G5+x8qn8ytH)3|ryj5~p zX~8yOn0I`657%h~Cj+Tb-EH(M?!veuy4sBUzBTcT4`v?*%je-2fT8BNuln+fu4S74 z$vl+y+dS9$Ah0j@>jwIA%dV3THBJI%jLBRTCxWx5%xc7;DFD4+LbRO^?~sNg5FKAN zj8yo8%aW{Z4CRHl%$5AXPlgxph2kn}xtpr2vK;L9!p~B@!PAC8N6ABfMRZ+^u4OaR z*6W0Q-D8@K68Mu=LC~C)zp%b@5|DJc;vKQug}P0MlsXaEb11oNhK27U`~l-#dbdlE zT-w=&_@7>jgkGPQK;56-V~*ZooLyKfbao%o%UXmT1J>Mx>4RR{4ybHVsWew=VwB8W zEOBCx+U(hT(5^W6l{nO>)Q|R^jkI^?&%QI8Tw|#{f_F|IeD@|bnHd<}gTW4OC6}Dc z|NC=!byZHO-0N*3$7L`4FW_zQ?Kx%MmBAt&FfkV!{gNUm-i8(PcFqaOWX0|Ca4XRQ zL;K#n39z^Z*VF#b=#<6&8hkC!*X(|HG$f-V`s)+gm%~(GJNjCMmjO1Siu87T`+lcx zB%BSyMQej`LVj)D<{4bv2qe)X&Vv9CVB&8S+t)h3yoiZ{ z(f)#8XN)T3FE^W>`Fb@8Us;cCY|ti<=L$ zsKcSBCH!`*5FWlQt7TGbm+B>mDUQMNmT@b9eS4h+n9tPA9JR< z#ax?(BkAps4Zu>!`L5N(uE^x|JkYy|zJOA?i`cPV0_qqbFDQ{eRGl}==+vGLG-!*E zb4{0I(iOT!5cf3oB6KBnrpH9zyl1VXg?bEzt8TK|fyY8J)+f6B9w)(L*>cnQG^kxa z=Wym_xO^EaYg=1UX&%_Jwk1ZL|0nWmsrK#5FXpDdnE#m(=^y6*cqAGbUU@ziKay}z zWM>H+6|4mT5e2O{VBf7*fR!BeiWEOj{Gf6%s~#1wSE6p;v){O?XNM3WK8MS&VuS1S zK-wp)5u72zJWOj!7qrPT=W^1R+mKUHYrm{zVXAxAo9K}E=Rrf)BzL&K9;3`AMX2ql z33o9{GH#$V^`6E@Z85jm&HIgFhy2?63zR4zvM0cPjz9U&1d+GU>Vj`wVpwtqZ&%XT ztKtV71w@X!K(Qync9@TNaV2aRi~fzpW}OZu3RO#A98*&tyDa9uulk&}(e9u5q91Ci zn?^*MVpsacE&vYsm;JXAWq`08wG9dVnpPbTVLzt$X#1t5IwsHM{BF_ySdeR-aV?#J zGEgUuBnr%V!#FReq2XC2M+(SlV4N1}I|~VI2Au94hmc?+_9dxY&^ErH8jetYK?O^xes0pg zbvyk2Gp5WAuOuQPwn5JFn9(W&E;5mKGh(49>jqG4=D47{M4e1mUm`oPlry#U>c=1M zK`~xk>^#uVKS3peS;=dX`OV!KH|Mm_WHBsu0e8>S2+xbUr3b`8qYA2#DnPyLyd6or zglFm;Btr>&SB*6zLNP@v#^DnZ1iUhj~nSvv#Baz^SfL=F-0&SG?@pKndq>Ea!x* z{B;mVd6-Q;pHFPiOultdTL`xvS`$^zm^9*ij=6&FBFw?gOegK4&*ZvLR2T;JD4h#h zT_vuQ)oQIC!-R0CU+Ese<-`|guA?tHtRvyI$WMy==YAfSPcz!gGef$>%PM7qiAbFghwZ7@K6xWAqGs7TkNp2;wG9KqKfRjU*^Vu{rYKFPEZRa_x+GMP zm#%Z-K^HdLU3WUZ*OM3@8&33F`bP%anm)mdFLXrG*r@vLLfcQF=$4SolQ0%?S?i@K zo@m#42=y86-PN>P{!EU0O0HwhLh}9Gbo?(}Jljm9F(&f7KC^j7tr0^%2sUe!+~Q6# zy$RgCa|&`brD1JV4}kJv9$t7XY#$qT-S8jx?e_w1a}Elh|Mz+2vvEJiq$y5|>n@8` zv8om|^KQda4^G<5Gf!r(FATofdzB`v>WyifXTP46D6B=5XfH+n9=f%;aO*CUTgIU} zwu~fuBEZax13$qzV|oG(ca8?lmAFIWfDxcgK5*Cj6$ZMt;>~)hwkem>+gZ?iZVl8z z$lQqmwovE589id?7^yW8=OHF+!sgHy2j-(>A*K4Zn5W%9g!nx)h~(G2;!osTn4FU@ zrxx1`3NZW|<9VuQY@kv>ChaaQ+?|}fjBb!?5y|pT*!;{<9+!YrrFY4>&!rvu2)Nc? z?9}*&`ob$|x%#Z{COjX%DpyuFGTzaq`}4?)rHnGGqie@*6Gij2@I$+%iKU^_RjM(U zITgPJ>emlPe^W&S&oD<)2C;5u<)dpGM;tcuHD6rx)Pj`OWwT;qUC)@%t%$FxP0U~I>bMUeL@+wKLw69I_=(4oRgmv zieR+JnNM_<6xJ9N59Cj*;_gV4nm_unlrfsW^@;xY&TIIj=8K0tZo)2mb@%pXSSKUb zt=k=&=lLTkCzo%SXWaZ<_AY+bemIMbjW~qoNLt=^y=&4&EmS&sJLlWK{+n}0@IsT) z#C(_om5f__#Mz={q2<-PsMM*9QYNtmX(2kC#+pG{A{%*%o3&)8B%;qwg}A)*)6U{t zhQ|sKiB>=dkf3#VyM`IN@F$27lrl*=BG0brx_UOdI04qXct%V3%Ge{&#PduKO%75hSyB!|E+q)``3HJY~`H%nAmvg zHl~Ty1{%l`;9@?~_f7BX>i5vq3h3(PEyk<`8$DxZaw?87jkf zQP&$LkEw7=CvrDRPJ+G*jL4OK+Z2kT_weL+pv>)`ex(j3Wr_sH0ee%5)OYK&6Fpiq z8Z&;f3UrSeIE+&Q(cI-PIlKv{N)Ti#+$@SwyJP&UzjMEqCp42j4@v zlDkcQFT=j}V*N&IkR7n)$f#bQvL8%V@7Ffy)b&43TwIm78_nPTGQ_$jrfG78CXHZI zQT{PXnE~t>_|anG>KxlWmDK_5BSEwQd@E`!MUlcA0o|tpu4wbMEP}&g`7^8ODSjNI z7|BCHsu2ONU9EF0t@rFkIDq;RV0Ma>c1VH`wofygYHd;wikixSA!bYx_$$S3d(i>= zr@q?P!a}zuU=dw5!B4-mMk~WdgktSMxO?qq6HB)O(F5Wg=Je~I4}J^a1g5>a=cBm% zo7?q;Y;=K!w+1;IFB_q|g?zDh5yT7GZ`0J|^4vEfhv|5as2#D= z+Ze56{s<+5vURzZxJ1{o_Bux=sHyMyjdwSW%}`SPg0DTIXLY&<_!(yCA4e!olPMOb zZ5+85_(vVaZAIS3SovfWP%wZxA0VuG2HV1!H8H?mO)0yo^vy>z&r60WFJK`nd25iI zba^m=&RM&M+*4nYXA&(Ql_%mi)=SEePR02S8z#$V@W0-DF4aBH+$WgB(@YeJo8fqr z9(~`%%s(|6^nEplZjq1mEM$5)6(gzm_wQU< z-ABN27q&i?Ht2#X|51y>6FdBx>RiWL2E4iXi7lXcE-AQ!+%z* zRm`JRs)b_{k+bQ6&o%b9U$}eka;qL{B{~4=*>786DG_k_k8Fnv?o@g>1DWk_cCMDv z0)u~^UWDK0`O71J|Ez?}XtdK==VtdFiH!)Bg(6)w=}4E@kj7dyXy&1Tk6KQ5^K3}O z!obatJUb|QHjJl%R{0GfG=gt)Z6CQCNlWG_hGkQ75oqT?_m6U5;^cTUP&n`25zrSK zXGeEB+1TNB!gKk~2R?j+-m9Qab#RG7O4-V6VwFb2tl$tvrd~mm=A&=2Irn<5SWQkg zFFDplHM~=4j1d*#i|%Kj5eUDX*wP~&iV&N6L=K4)0oJ%NNdWP$etir z8+UIJbzm_4{0nEpko0klPEixs8gPkX?*kQ%5O3#-5&&NfxSG#x1FM_gFPkH2j`f1c zp%Y_mpDxB5qDvN>gNqT7ip%GNk4K1H7M7O9`G@pp#c%feOdEUlDOqo!QhlLeJj9~0 z;G|JRC}K{=US;5{VuJC2<+vv{4f$31!M2*Hy2ochp||S~sa(uaEk{;E6${4} zZc||QmOeOf;xLizDMQ44p2eq2P*1bTkRFn7;>MN6$iV5GZ+K3dGVkZRCFJqvT9mC+ z|4G_C5k8K%|7^vTwe3X|AUzCyq8?pc)ICUASzZdr=OB+C5{^zxS3LwBH=6GZd~Rp> z;mb@gk0sp}v5UT|!kFHPGjb#Dm+)_?$q>_G54+-0FOHdgolP81n#-=8%An;b0`mbT z$phvdJ%JlfYWW*?pS8QW4tte;$;t->TI)DqwwjF|&V5*Izg}+y_-JmjuxawIK)(zp znVIaVa)~_6T1{piw|}-L#bt$fdGo(N*1PxXP4I-q!Cul(#m*0^Q#N~@bKhsoJ0p@~?_1NDiIKTBLiEfvH zw{{!sj@$IcpTEeW0R`k3y&~5Bb1MA(Mq>fk52a`J0o^}Or7v3aBJzM|=twoRB_kao zl-0@e@ddmmR?fsB4rKYMLTV&h=W&77PeJ*e9Pe8X&~b{Nvx2-WpJHVD+V;#4y#v=O zJ(~>gpYeYyV~YO7nsR9Sj;|s{R(kMD5p78U&L(~mPR-MV2jVX|UH`Ebu0x-NZ(&y= znCgc-RvutIR7^1;&Pq}tSwCVivmJqjeNP!{wH`!;^RPoR|zpxR* z*a)o>!}+hpz=^KU?P zgfM#;+wgi3fu;d_y1Zp%-I<7ffsW!4R(Gmmm;Nbt!k0KV>7J+)J?phlXVC2U`&+!_ zw|=uDANIRG?GH%$Ec5=N zUTvA4VhYP29I8)Hho`|GpDQnP1zBH*PY>U(i33|es&VDhTVp*HD_}UUr&!rLh3nR z>_gi!*2-&Vg6+z-ve>hYjiWuP7_cGtLJ&L6ikA-%49trAP)b#$@`vN{vy%?#;{sI6*1UPta#pcfWx0H0Y?vxdJf&L{zfjNNxfX2u03RD zSR$$85cMt24;+NboZ#-m&D&S4;QayUV2~3lRmzLVw&0At@?z{y_mU3sf5A_b+48!C z!=ESlvDFs-R{zKrT+gTbsd1ZELUxkQ{{fYPRSmAKCKa86%r(dY+135*S>MJ#Aary~ zR1KYUp>QcM#fQ{d-^Km9WQ0JzLJQR=M|2}~C2PO#>X^D}M)vvQ(8|$k|NIL_5t`}h zSdGXb-?Oly#DnVxMC{2(=Npwu;j3JWYXNr}ASdm=ag-6%{6Fm09(`8%^Y)Itni6%G zJ}@X0i`(_75CU+&nx){z4g(CL?roe;B3U)4T2aZ7H8GUTC1SC$}REjSX>|CdIX z#W1#gr!=Az6t||3@vl9psrl6#qfuWL(Xkyyh+g}FJnD$hu7&$*K)#4?wnmK{T1Znz zh0DRvt;x)KHu5unL-tw;Oed1@5(*E}K7z?|0OP7Uu;06O4D@cfF-g{A)EjfCN2B(9 z#_Hpv@<-OyO`UF1Z?oV3jkTmDthoB^V{v>Im*kz;874D>(;I36ENGR9c1y%Q^~?kz z?6x9Ro{}xO86De_EzB;1z(!o0pV^_m{q5}lwZzA!uk>~!OV(~Cj>Aotu7UlXe)o?s zEifsyK;t_AE~>>`@+CO-qEryoomAk$SeI)&6H=7H|nt-E;6^``{e zlGkvg^rxWnI?|l)#-8Fof=S_QDftwfxPJuGfC110dHC`@&#;zg>9kxbo(Z9_Qlh7w zlD+4;>m8>gk4=3|jNQq_##qR!VSKU?-M;u=&xqM&Pi6peujrwL{nI>0iNuvqH`rhiTA1J|lE03g-SNTh= z08BRHE@uPXiyy)>GFT~ z1AHW$$Sr_&_%4S?w9R$rLTO(c|!c1Ih(lZhk_KvRXYJSWCM?* zt~@<>7lLec_RF|MKC%(?uTeQ$2!}2o{Y1Pv0Ro{~vhVak&H|gkCYQexplgR%{}F#q z_E^$^^sPn41!sE`hp>&eWFqmzsz4OnH*wa0ya4VdphM8h8Sf7-%-&V;h``p_nd8-m zW5e3xm-#rI=7zgP!MNc}fb2&=U}F^P&}X#7vsk%QVr9C@8h-wybL}^<3~^-Ki?h3A zru`oPP(ZK0d)u1ZOPOO5bHW-v_eIPvb4+OJKgZ{5#~MG(?{%Dcyto<0&>XkUaW;p1 z=?TXv4e^rm9Z&zo@%g)P9dNj(=hvtCSv#0(MZ_|gH*Jh{%+vB5NB{nQTz{X5=c&!{ z!)cCdM8w6&64*I@mk<+U5B~Ez;sp$|pWl`F_g{ZEe$Jof-w`v+=YM~9f6o8@9{imD z{T*?`oJSCi`MU!h(iig{W4?dinV>^}m*4ZSKFr=LKj+2Y^RVvB=bd?+hkO4y5BCO% zZ5DeYoR{Lf8H4xvvZvPz1MlK#x8I^))s2W=4;3K zm^*PUk9-V1`}esD`hTV+{(bIz&8NT46~CW5{n3sJ`l-b(f49qRzHR|A-nU)&{m=Qo z+5}4mKKrk8^ZB|@f1QiHGp^&WbMc%p`+WDE81pg%FQM!p&AFL*&GKCZWB-PA&M4hI z{M&!Wjt;@jxvFW-c>p_9REQ;huX!}`E^NZLALtaog9F#ShkqScF^7D>L;QZ-aX0XH zeqO7XVUR?f6M^t+n4WllH_zrjY0Qk(wHFe{=#MsB_1NHsITli}QVu+%0Rn+DBIm5Q z9tAG;NJ0dxKO*#m8P!(3m) z$bpK^hx37x5`Vu&68{tbxB%OOIU8I8rU(29E;0fR<%%TxZO*sqaAs&wwUn9Va^@2$ z;ALULo8QNy5#Wt7^EkRpUeCu-znhQq-K>A~3b-D4Ngr?ODQ!o4EV`IYFI zZ}RWg-&FP=$Fn&V9{l|p5!Va_`TZKL4d)wt`0v+{tA79nbxzLy{TjGg zc8i#J_S*%@DYdZMl1nxmpr-;I1$&L+h8A;ALZ;p2&zpGoQJ0MdG1mIj*2JjF1 zetQErc}{P$w)Sbu)+YW`klJ|FpF z)FB}M4u56I0jO)F%0r9m*nK%4zs>w+yvB3h0-BH4`21+T4*Uc)rJcVQ?}I)27N5fr zvu;U@3>Pb3@V=k#hc3bQK9R-7_aKsh(dLY^K|i>%A~C-vSb#@$>z_UsBUD6D0QZl6 zb{?Q}i`U7lSHojGe&2`dXF0e_8mJ@9?Fd(*VSj!-U(fvg;Jak={UcgnaXvmj%;x)W z=g&cDAe-A6!{ArD?Sr{}`uXAFeV>{C_5bYoexDoSVE5N`0ircumt@TMhu46z>PO(~&6kLNgZn7)x%2toe);P;&*N)5_XG5Y%ltmH6Y5rTKf*Y~&)@y;JN~}c z^>aPs{2Il)?{$9r6Ta_zJ$RnzkH+^p2Y(LA8}oDhr|&iX>2rVj2cV?5|JnZl$G_*# z^_*`%{Ptgzz`o<|-(Mfj$9+(9`=ecP9M2(}e}3m=C}8>TeYc zmjJ7I_d8|GlQ@QEbCFKpl2Y-2s1(>%slvzEMA&$34ac@DaeF zU4Zui4*~6gEYB8k);wRsCLn<9(SHmgz&x2@p2C-6el+Ln$CukPFVV#ghLzlTUOLYg z<6B%b>lfjok}qF(z~K7oumN*4Yejv1G+PusXJ5S)2S0h}$O21{!7Cqr62M3L;0Vr* z90FSb+#a--)OOaCO2M_6FXo2KBYdOhi*h*?o|E&%X%_0u;DYOHLdG@NY=3Y!Vn$7vDNLtR@{U04v&z+xuiE-sH%;rmEg)aMslCX z`t_NFxaNGRy~wv4C#&=4NFgObNd-S;uf^+qc*gnu_>MDs6Q1$;lt%=XM?~`c(|*LX z4JXqMt}WoY)+4S@EdeF|^?&)0S*h0~I8qOe+I5kCnC9!a)VDWx|G7TH^6>dd7gz@( zIJ^7gukSkykz5J^g=?ngYxdr;??HPZ{Q2$^)Kqalh86z&{Uq)afBi6o1AHQWe}6!J z+sbuED1LsA+b0E@yua}$`?(H#{O|vtYxAn3?tl4j-K$T#>5=}o|9}3!|NpMdz5D-O zqf7d~{rauWtLZlHP?Y8Ts)2>X>*&?81E_c*}6N_7|F@9aF&{^Ybq1YGj;t%4+n1 zFM4grSnjN@;&O2CMv-IomzXcy^IG>`X(aULZGXNBi8C{CM}G~*NK$hr*PojhEhd{z z53l+S99*-Q{lwP3m0HjncZ!h>#`NJr@wncpPAP!9z}MI%c)hWzn9EsrI0eP66hE%aC79JbwwYYp>opcdg` zb14$b6ItzO`hWWbNR$03D~U-`tn}Ve8*R~4Fxb78tL1xltTv(Q%Lm%8DTiIf+K2GT zmEP`FFRHG&9gX6A^4U)ve2E-4yJ2E2marY!otgV?zuVPGi6S>aa3#dl+7Eb$>*w<& z=uqWtJNUsig22&D>V3|e9XnaeG~PTPq|s5t^9r+V-hbt(WhfMj>aeZV*1763ScCqj z@6JQTYO;=NO?2bae9HT@=~r8Be>!d_6hXLt6u7!@`>E^7dQC%BELN#|)ti@WiZ`GF zf1Jg2ST{EwrG$hO3fLQr+v3W-*6!|jKW*yk4MfVc)vvcBxr;Vi>N2JmQQPW`($R~> zwFpIxpnq&3Rgv@A9?QUH{AK6)*kv#GUUv7}uA91$V>fwgRO2(hUO(?8v#&0@gVeF& zop@_%Ov`jfYNw_BC?-eB$6#2{HJP$zlAoa!j-_b9HC(q%u-Ue6TcMSc)IJYOFJCPD z>)w_(!v4bR_roO%g+=jtF(2|X4cl3q>i7gf5r4pgpotZ?Jagw}C%(PCXGhNNiE?4H z+kC<>2$I(>Y=^b9c{%*@#vi@wn>=>id9|4)=CD5I%j$p;;(+J=fw$o%HuMj`TdU*o ze%-v&i0+Pc>ZDGat=u>H?%&(x`%b@_VR`N8+u+g5W64zALi*%g{5dTb@n)IzMQA{e zyL`kZC9K&uUrgImf350`u)YtgX9|z$g?RKndDa}MQ?_wQl-s!d=7DvWdw(vW5(2rm z8U|Z=n>j1&j94Wj%7&Ol@je9_?K}!B)w$hkbeY$wl^DtV~WSd+R=FyZPfj; zh=bi$xPu1Im9_I=Th+Q)X;-(C$5uO)g;KxoM0XD9@D|PKti5Sbw$e zs$J2k7F9>ddY9)f{xq#N84V-sBy^DWYqD){AH#*jYZCcdfRY9dw_E?UxDAi!vwB&=`+t1#k-a!s!N&w#6IFR&9*s+|ipADIDR2$%*Pe}E6keHw& z6aF`GO2GZou8qy)9yW#T=Bv^tH}_qi>ei;PwgcRX^CO67m+cr!lM`$_;(tY?8}gIi zP#W&jy+c8}Lj&5MmCl+@F=`On3z0r+E3gkdac8myk+;1o*T)-%{n6KS15m|{=NoIA ziV1b89Ubqjz|XmCK5=Kf<2Cbg8p4=*xB=!ym$?2Ai~dt??!pr;A+ZVs{&0Va349~2 zs(jjVFv7cHvBWq<#87J--!BQ2$E6Nvo{ur#NTENZn&-fYjyPy$j)p<^|V z>;V`pIF{*s@I)Fuz6$$F?zz`$%Rrgk<_~c*8Aj3+8%H@sWxa{bvp=QcsUqPctUzU{ zC{N%A*)mKgvWF$w&WpF?C$FGN(A;w*(aBp8OMEQv^cT;(5lfjH{(snkU({vgRr1{X{9p+M}ZlzgBX zk%tmTMHgn@JRHVtR`<}2nD;LHWc;10)ViN{+qF$T97riY)b;MU0os^g79a7mb<m9P=GTUDtp_};yGhwz~=;<;HU zRK;dqOo@toJ(glftoK*#q}GXoQg)TZ$I(!wuKcVI4OTcuV|@KWRLzUMhOn1e=QUSO z;>+G`>9Y)QFqkkqg52*e)oyk2=uZi0+JJ#EjFBG5d2Fw>D}OxP2BWcLY9&@XW<~7m zx4U^pqxZ`D#7e5?;U*rSsq#1)l-)QF=3yT04>;Fcon2`ht?2?g&;SKP|k-iF(-I;hlE%>2wkvS0G8gIFG)b7{Y#N+^qs`Au?^ zXP7GG-PuPmYJViqOhWEkqwkU_cPHg=@}MAd^NY${UvA~N>qtSOshsNlWoCcO;*mWM zWk$CdGX^cgP|h+{>R0?qx3Rw!P0N31A#J9VOx+d#cIi(FQQmLy5#Y7WqX^c!p7IV$ zrPu{h0Ja~XMH6hDmc_XUhLBD*#fZ{EwtT@dqd0m=8-Gn@nXUg45*M2H-dOv5Dx>x( z9CqaWupXDvlh|jU4K|qDHWb6hd93SufHoXHN?n1hQFO%q8C7C&8lS-ycIkRaS$ys@ zlYMz!51+ET4U+-{Z1pydlqM-=c->^BAUmKJNlnuw&I|4*WLaG=ZL|&_(C4_V)Hy8; zKZ1!;n12s^K%^Y6PgyZOPo=+-F00FMs@7I^7JA3QRg_9DlDuCjYc;qE55+7h>56Ry zTN(+q=V5jvXT3#dH%;pd(-eXgr+0Wi&UGNOuU?BM7n>KA=61$att)z`o^b!PzuJ#C zpI*gx0So6lSK$D@KaZ(-AKt`%Bd+#MxElI9SAPLtjt4L?+>5XXUj)7OYIWz@XE8N z8pqLP{oFkR+5KRbr0=sDaq7eGyDq%nFCr6;LNTRqI0zwhA_>dmN@m*U`SM=eOEEx_ zsegoURL?-n!LcCjA-$ff?X1zWzG!b*Uv+H0b|3uuvz+LBn?rD~ zCsZIm)jr%OBo?~->FV~dz2A-EmOq31sxN{EX*NzqC~cX)ys;nJ{82s1Bqywwlnl4) zD4nK-oj$GljqpFS9-OeHsj$omKW#Gf-+!aBexEex8T-zwb&Aqyu#^~}jv1tGSOvit z!=>o*M_Hnx9;$;@rOy?4Up?B<@MwVd{brnG%PA@gB==4!y{1~(dFei>^tO7JQ}SG; z5>ekTaQ2E9^Rtx{hw%F5XS$m``ij zOdKRgZo5de<@0_+ug%*r?VLkC*<@En5Q{lH^hMu$9LN0QaBpz}wn8h`TLB_iae7OV6Et+mQ!oAe2(BHg`OVPSa<$_d%S zpeiF?JPIH>;ewcsi9osmmEU*AKyNOtzEsXmF24PCd}nY$J=xjHN!rDI;-L;0YR$!Z zXn7iR^rlH5JFq*v9;X^H_3k1c>2+j1Ka5?w>+JxB&Lg~dq4n$rYb`wl5r2yz->qTn zs%fYvTj<93%q;|KB!hPb>F=bXR!w+}IbD86Yww(3Q&BqCDqrO!Ny5Rg0_5+s-LVNl ziEoXmKc)6E69H)~_^Ru9yFbOvXx&EB;3(lCjH{Yh5}MFRO`oeNd?w4DO1DpDQlfIK zUxKP-`G}y2=(oa&7a}aaVt;;@WymioOyiL+-F`HC`69(CH|C2iqV^x8yNvRA}5=QB^k%F`0{pas{rtYYS%FyY{l!4w+<-%NrWt?(xet>X$o1ErtaM=ezijt5I!DCr(wiFjJAttF{}B znC7A=t+a4?xHlzxEq@Mmnugaa+noAKaI($i(~!f(zT@iy?TVlUXjD!8!8ctrj5JDIS4`GW|Uy|dFIy44?X372NlPXabao#Gl zWAgoU=sbPDGLehq6CI}G#p&awQ0zjWtkp9d_daZ^F9%o&GCJ|8I?Lna zZVWaT{|Y9>aK5K|$lX=Z>Y=TK=j%#68|<~$hgiJE+iTzo3F5zsHmKSTK&6#V%*N~q z?O<-svj4g5D}P;AeJ|{C|90%wBls$h^+jdZBlt0T;Jpm1r`R-&;?S3U)_KaxHeXNx zfH_+VPcH3UXt5zLGH64l#o_*5LjR3va~QNs`&7ifJZBcYv*Xg9ivg~B?9Gc98@P~9N3_F3%58q5M{7Y{Q0IXx zMHhRL)Xh%8Qmf1t2H1{*l-PS8&@>^RSLe1UWv!kztGBev((2_B%@R6`DI|kGOs->Y z4$e&_)PJY$RCb|DZR2UNn5304gsU)i4_9GvqL<95jIC7NOJ(!+^r%XmdobReqOiiF zc8tti^KFn-%H75%j?$6wmM7=THO*QK*Y*_~Yi4zXVivf63=EMj4>h8ZNReMsuyL(J zPpl`N589^L+*9trZ3v@%g+_FgNjn|<&XM^2XMY`v%LCJgd+~ITE{`_9dHe;+3Phj; zyT{3in%nlCCZA_^J%Srlgy9a|gw0M_uA7J4csS)q8TEoU5@F=*v}L ziGNF-%8xNPY|3lDd4Mz-$ESs?Ji1AecgICfFpx!+A{dlcd@BccO1R&J&s8EIC6R*M zdR3WnD_=lqC*GI!cyCjMI3I5AuH%&U0`qKgamkr!RD0<*Z8=FOiHsYa zfrii}U9MCGfm?1E8(}3-s4PR(-Rw4pZGUP$Z{gwWN#p%_#&r~no$4*)rJ@5ErF+S4 z>z(PNak{2=zvzl*ubgRYaCaYlXOleTnU!qaeMzE&zDmMaw{rewYDsal41&3B%+RWVJC_}0J(i!tok^dKN;wj>+TN)B1vLv}^JtDQCbEk) zu2z+qad;#S z_ZQ-^$ylo;K(&d>b&>FjysZILi^}w6Vb8m=YJ$0M#s?F zisWgn3p~2Rl%rvl>H*w0gLRd>TUD-uIzG53#47{Q_T_U0E#vW)AC%W3vpxzEWxLz8 zHH;uQ-&VA-Y%RCFGnw6OI;ZkNN-o9CB_IiRTGOYv+#H0hutl0j(-y1xE%kkCL*7h! z_n)$;-0IS9ZYJGdwhsqJQGZs*!?n8pbac8~(F{o(t%-)$u!b6ra&jOv5S-I(ZGVi1 z=588eTjTLu*Rj=bkL^v`jr}cEd`DhbmILn6^Xhv0R=eGw4%cpM@4h_03Fj4gui|s{ zt&mM_Fp;}Php}M5OKpxtuJkcjzSng=n3)={k7~4{sEX!c%KXSbn}3D^bg^6T^`aAQ zpSyG7i~SjeJKbd`Ol4I$q)XMxd$CzHDO7*@s};NB$qqkn^-&!TXZ~J=Fa6m)*2e1@ zAB3uA7w_#(woj*zuwD^VJ00FTZ2?9xT{q?0dc2>z;+kKnX6!P}-tZu=ichZ{#vzUT z@Tr~hXIuo4xMkci?|meIkUw9KLy-13dFpB$8b#~%Hsv4%D z_tpt>|LTdEtqb3{}#*?| zGM*lM%1T)($EzLPj?EkX$BLqsjU%33wftdQAVUdc-7>$XtGj*mg2NG={wz0<31~9C zX2~~w%yX(8%*|L`__3nwcccVl{@#`zwJDHLJtf_1aeq;ZowU4BN%J9(8XpmHAwLgZ zNk~Zvb(bE5loQ**8#2uaCQBAv_pIXJ#gAtQiBnrC-q+I5>-Ko1wOtw#-exm#@AyKV zTQOrm6LDUT<9ZA$&O0R_h}XL}qpQbjY!}R3?HTSq-RY+IH2Zg!pRct~U_evVa=E6L zr>!L+OMesX@?CScD2usz(X#IgZ@+7|LuWBzZUn>Tc`=J&Y!~N!*O?GXk7al1cSZf& zofKEu?X6(6r+7!M7^)09V|^{&#&*A1w^6$`hW?RK3#!C^XMru5o*&=WBwd~C_ob$2 zMv?(Rr=f_la<^Xc+TM)C6NNlu`6m2k`M6z=o`10Db@$m-krpfba%l-}qE1V7oDzeQ zK8IXLFZMHNHL_jjXHXf6gBj587fC_4Uinjue+3!Zva44S6s;&Z{1XVw!5? zsZIQuc9lOgX}q6nPg;QVh?kI&t-MaYDoVE^8<2@SZ$7(wlx%}VpWCTk>#xyqNf=l| zw6ZJ4I=gC{MbD{x0gw4Weq+*|m_q9h&421)+wWK3(Xp}*Hd756=aU##6=S{9s^2PC zx5X}ep)#l%E*|5GnB<3gEV=$_Q#9Np?^8T(A8=$EnCbY8S#0w?d4Jk` zRxG*UJRTOFi&i}L-IeEG&#W9)D~dmzY4DGmWo5O)nYHWpbzDf#`hlEvd0_Uf@!3*o zdSx!IwcoPTaY5dZ&&ZqFH|c1dhn=}Xbn4!iH+=&tv9?Ej=OmukQaS7IMcR8qDeJXl z!bNjDEByZ1j}GyQK5J%EClxc^ILgp{%BTy^WM{osuEHWp;v4 zFUZIJAb(nAzhRhFaCn%3nGJS*mdFWoedOTe7tfhY>H#0no=N9cTflx!MR&YJXU}?h z1camC>~k-NyE-&4i|Qjp+kfq&CJ?_=E8#4O&gzaG(B~MOz}?Qy@;UC+b@E!>R`;Y| zeHQM^jT!7ju2OcoE~b!TjQFC%#klt=9#h29wn*}Qdtx1YtX?)Iys3CF9G%?y%Vv#O zng$eRoyq6+#rtM@#Ro%}^(bznm0BHlFarU(1BusjbQ#ufb4^~tJbyX$FOalEDAqd+ zv3ybS@OZh`w><@+b;w@yz>^+{Vi#i^@^H7_OUIy9rxzCk!3$fijmG%3)d2SZe>qU@gXhe$k3QoEK0Ps8B z-LE7!i`XSGZM3IMd>*hh3SD~l@cazqfz9D+jzf8 z4+mMCoZVuvrcPdbgX6FJ3YM=urlu43jmlyA?jPTt)nnf|#* z)9n7HUDtiA?f1?w`@OI;H}7+KSfHeGSYWjX@6HA|PZ zcil|y{(pL{!sACy4pUKx>UBg0ctgH!>dGD5#oi@iC?yrA!}2!vGnhfv$-cO+^--$U zr^|)nqh`t6C+z~(6bH30Ut2B4y{KU(EbL^b9$uzlR74dN3j;Eo6kJv&l$JSWoNLa2 z!7$JKeDh?NCm}>A5Lf96yGO_pLo6BrGqkRAd4J^{DPotL!-aa&-04A|=<86X{mGSv zLwMKoj|cd{TRTU7{EP~7LeeU%x@GZNd;6vM{2ZK!=w17x8>i+#-zt*6pSPQ(j;#v( zc7HtEjK&ffwelKMZw{B`lg8s=?u$%*VAfjqkNiaLZ;#}1jJk_=r}JkG(1@}!D*ak! zNWpsk(IjZ9p>+KvIfv(Nb4FP2MW0@C;kH0a?+4H&pB=4L0jyIdDLyLMc>gr8K0gNj zXS-dKQhp^5IlXJnC(T-|4=H)^FV;;W4}XJR+@1XCOJX^`ihdt_!lsB+oiw7ybr}HJ zL2c>n>ity9C&!ELwmxR>Qmy@4*WW1u@cy%2$6G;pj}de`ZMK}+vRz&eyGSZw6g@nP zk`GfkC*(9P+gNX)F#Dj46c{UJjq*w{v{k)~Zwzy!`(Wz>AyL1ZF^SsXq`Rjuynma_ zA#@sT8Pe#xxv%s#dBo<)-y-Q)aU8O{Qr8HpMAT` zB=aTiM19|Y^S8u)!|89-v*5>Ay;Cr6tP&b^*!0A7JQa6gPj7X?_d0ofc`L{1W9zQF zne<@%7{Uiz?XlCC_+lORyW7&epMREgU95Lg;|?fCn&(hd!>|mA#c@TSjvT%G#4fXV zQcUuQfvk8~b?fLp<;MdbCp}?FfqTxbU2oFbMAr%YhwN}xTfCTJnWFx7jVoo*pUsq~ zZ^lkPzd-7+g7vfY;WJ$3C&Af&^7WGU?-A8*kLQrh4M$YZ^^(~I>G^$H`+s7jddddz za4l)>n^|jT?C(Z@dY5HdSZ>LiW%0Qjr8jyK7o+-%}~6%*Ghb8kQ! zwU^U4~Ei+IrPi5(%R zgh=?_QJ>Q4vaePfLw{YPuN!Aym6tu$T&_py(wtguTdY}C>N(ojRQm^j9OTa4W@m^| zgKD<(-x5^mcpCdJ)l}Cmq^Cd%56&y)S{nXw-E5So)7c%lRB9xRKc{mKV=R?WwQ99D z{KX|)zSrgZ@V0McRXz^^{j7x)pL!fuJE`@9e!Jty=GB;#x_{_Px5q4($cenXTzlz2 zMC;)(o^>*W;N3@rv`lJb*Vg^nSO5YD2uo_^zJ%2T*1MOm&ut$O|Ub1NXWKV3X z_j@Dn-w)I!9^f)8pX*C`_X614-Ij)RWBLnW*m{I&KDAgJFi|5tsSmGVxybh@0VEIs z^Z-l@Heu&g(RP>b+i3Gpa~*!M&tR!OZh7(A70KfTc$Oo$P^b)imD&gzXa>ewldb3? zJy9|5Jbx?Oc-85==e2`kzV_E+^VmEt_gmMjwcNkIQy&3G_V}1RMN|D_aSR}3-SZV= z*wP`;BVT?zQ%Xw(auM)PWw(+JmP0AI>%O$j=%2_kiDPo0+=KuYq!sDpvDP0imd>5K zNoCW$e)MYUdAp=|NM`@I_0T^$J1J%Gy2LC`CVwIzdTH#hZ*!I1m)^MEjQ|`ViGR4$ zOQOwgeP9RRicdjn_=%IAi6gmuysRg)0NxinZ|nNlJ_0*gGyWA|_OtO4LpzP+p1Jic zeN|V_WAu5U9NsL-W7?>%#}x0^7t(p=!QnmDCnj-Rt$vQ)b-jU9W3_sB7W@TNbzw%I z&wpk;*-3a?Y`pUckCaIOIvSBDqT23+q|UoCHJ@+yqO{aLsHc0|&nD~(c`-%hmE02t zbA{^BM%nsfnE1_tx#k2XP-6IWFT#U0FCKT^U$ZefmG+RQ>TWZ9ma%_)pwMF=FI!0&>g=DBEnA!DDajwpcio zZSIyY&9FuEc*EVWP3NQt(WA>~LM!X-qAVh?#;Ljcwsd@4E_4#su7(n9@$`E45$Ry(3oG5Q6I z-DZ7Hq4HajOxE$0D?j3lUU7E%K9fr%vxr)GtGrzdXFpgGa#VKdxLqMt(wWz86;a`4 zaYArH3#}nJn|A-mb2~dNA?M@XIe%ihr(){)sV{oKxUHy|No=%_esn%oW0c;20_oj$ z6FkgUC!eIgJ#w8Kw+mU4m|MX+xq+l zk@m5R*KebpHo-@<7n|g*-Wy-|aMBd@X?b4~pX7A3yBbz(<&%N=m7P@gWqDfh62_9KSV>P|>P5n_5og0@Y65vwz^{~ zr~Vk8Q0I#dSSfvsqpsAI0bgPVD_Fb zp6@00&;k{o_O0LsyMOnxts%T$>(4wDJ}&K_j3#!smrc`DDgyk}MMdP6JBccjOMR%P zTR~pKa3oxSXe6S^h)?_p+j?OH7u8(0mwMYN6z!Ta(l$x+#MU;yhQuoLuZNK`n{&>j zNxD~WtF_i2FPuulFj(Q8snM*&V!;9m2*+@um34X&&pwrk&3~{I{mq_QruTr@?yBh~ zqt>r3t(I{Byj?<9r(%>y*2bgx#M5;=06ss_R*Z#HIPFWKMwb={hNy(`7C^^Vyd;Yk zpa3L;-d7jGVN|!y$zxD{%&toCdR7}|!s??e4yAloR7QIO@<9aKg>~BwqQAG9K=B#r zK^`2V>$WHLx__iU*jp@)>cLCnLQnw(DNRo=p6~wtUhejTl+Iq21Yn(teQ^xark7a| zT^D13E!*g*kF+)5T9uq`j#B5Jv!;GjXYphY+R~KIK0-yebm5~1rbRL4(~2H$%Tpf{ zsbl(AvUns4V_hdy4>#<3wdMwW5!!mQ5d&v)z>M&+Pk*IV!b3QvJ~GVx1|S4n@f=M0 zxC)~O^%3a}pi#g*mQB$MyVF82nBe0tn!Gq<VZtoPeF!Kp^tAJ$>lc8Z;kE z5|z6cmfk)~Hu0fs_m_=Ezv=KGzKADVKB~15@PC0_JOD^gR9#uQjrxFo&+;|)Xa6`h zwELDsl*~r~YTDfhw~sz|X-X(*VwWum5(UMV*E0|QLtF0%QtPkJAxiHt0dzC+c?+y| z_Cxb1O;Ga>laMHsj+eN8yzZNHPl*2OdPMwj9CkaX)K;e{mUdAb3!+`W^6LCDZ_Dj^ zWPcuurL`0K158@ptNZ@2Ti@xxVfB&k?VTLz32OH`tK-XVi zPWYzZ1&2CX@6h++p$mev7)|?maQbw3Ornt9WT$A2jIR4*zLXft0^o$<_Z)@ifzUV>s+xTp3Xidcqw4ijqTiy1kCGdZqL(0M1RXq zoAJcUCZ;R5OlebNMI8uY^7P~eR01&cRex?DN%Gm`nsYoy z8zxK$+dhU3GF|}Frw}(1plzji&Qz?0=9>e&BzOB z)Z=xY*9ippqDHUM{@K?LdUcVD+vo;K7eCI2;)X;^tLi)PLvHU|5&N@L=yz988<7V_ z2KEEN?{n%9iT}*@&UWpOlX|;tw&VMe#2X|<4*t7jZzD(uJwZOMmAcYx-&yfb?Ai?E z!%mPiA1vsZ7&}=I@;1Dhs(+SmSdTn$HLv%8mQq#lcEDEy6w|hOf3hARl7D~)S20Qq zU2`!;2SwhcQ+5#?;=Y~ki(T1XoXm9`vEGJ4o_8F(gX59361PxaWuq#%^G|I3 z<0P?xmrQ^&XCKbi8-D~XyHmM$VC8@E-#P8V3c^L;1 z6caGQGm0nISV<{YK(h$HOhI*h+zV?(RPjo!z}W#>m#6ZExw| zl1e#p-{W(;3!(OXNW|-qfK@%Php0a_({r&3E2S>PkPl>9%~$4yx@WP#57tKs)u#@ zk)_3FRqjq_Hy{(n{b+A7K}x9G0D~VWz#oL^k~ocxHoarAKlA5%R{~xE+f?XS)<)&t z*}c>n8C^t3%zuy(QfC@tq`e}f+^xyiny;_#qF)^s+)`Fqk&0}e*^*$9pJ+sy?z=pD zYar&*hrbeGrKHHkiX79$dKbJSdwQMwC32DH)veH;L(Nc?<@J|0E@zQK5tpH&MaTKa z);NH-oHX|C1^R5aI}UwQ!YpWe1q}D(X5A*J9*=iN+<#DUVPJ@@u90`aNB~DS60gMh zJ~6ucq0g_qD~n+wv)tH~3ris<>s7lUG`u%t<*if_Pwbq)>}{kw7(eXl!P9bN3Scy1 zl$9W)QWL=010)D^UJ4QEl5 zN!wH4(0^YRyBae~`-s~jYhD^8+|(UhyPGC=E>hEmb~vj6LmRUihX_gJonvW|6scu<}k*} zOq`AW#0b;*%dxJFTg_abByv=pay3iPrt0Ii_J4ptW6HVG-EIi(-eD4_80Xxqd{0}e z2hEXo_nXk$2QrVmsPmTHp)W`(4z-uHFe$iuS>`5{HLmpIsXX?9$D0N$9FEbVV#T%Q zCEOEf$n)h)1xk0jlCdt16y(Ddq|smOroQuN>D)Go4>bDX)8!HLL|Ii)0(*{@5#A1G z9DiBB518miEs|Gd*eqj2OzR@Abl<3kBZzRdXy4oSf*g1ZZV+tf$1+%6o_s*5ieh zY$;>{bV^gv;we?wE1AA^Sg^49`{(p3Uc&ujEg+Pwtob0 zwcK%#TKm*#Mo)A=hg<%m(_jd5*heGJ4iuCmM|*YM4$WH|clip!VJ|4PS}S!x_`H*# zqXKWi$s{(hU5~H+p_W#n8sZ8*UDu=~KcrLX^4@?^+WF}>5$$b&HF@`24jgAgSUy;k zm6z7ES=w?cKDHs|KIl6d?BC)5Uw_*EfBWzM>pzqE|K#%9zq$Nym-x-)KSPA-$C%;Y zviOhgw1r3-+s^X^D?!i*^OxN%?6-XAg7d5mKz;=_zN#74$PAM#qoLqv#Igst1>xV< z@TeZUqsc{|7zkj;8Sg3i< zqHDBJd^X3V2_fu_Bd_RtZ}as*V0=n=%D>?lGz$37tR4m9fcZnM{RwEqNG1tr53t(z z%Aodw58T|e+g=(WNO71i;0eJIh=t5wDy+0U$hp|srnCJV|M<4pS3(3z@_M$3;#%q; z+ycet`H&3eH$<@NgMMw2A%CV1^XJot@14w-Dxn&G+${Ip6wd7@oW6GoxddojsTJ_? zGBgJcZ`QvcTp^Di2E-z6WmiD-U?x``N+2Zguy6gFIi2I&Ce6K(aONT#-dfn7pScTa z$Z;A^b@0CJrgmJr;^VZgiUs}YVSoRIH$*`{sj*4~q^}nYy?^KFWq*Uct>!;9<8hj# zXc%sOC4pO_y*zM7p7lF%pKZ2WzS!m}EiZ7vwJl;#YBpa!HZLkMq9}{*+xA{UHQ3eG zO0_wp&n_Ich$0>v#b1x3cLV+Pv|p*KhmkjCoQ=1S{oWhAP;(=H<8`swFKR;0HJvf# z_t`|RS@X%!p5Op;-+$_g{isZ`2Cs}OmRF*EWZA=cv({2It)?_W$qGV z3J4nxIR!(Ghx+oW&*Vc~Np4*}+)mHVYnH`6a7l1xeI_Ix$sHkHL5;wT<)6jbscuI* zG$hz75KAPk_X8QO$^b;21dzS^R*RBAdXI2J8kg7bO5g3qOE#O+eZz0!f@H%hvDxg* z2d{5)6RN}t)PExHr>ESdJ;1fTR#P|ry01YD4Fc;nLknvEhW@;eJ zR@ShB<-?0}BXj>MHKd?Zpf}Km&PjgXEaQ%fC4vrWBnv2i9V*rF1>5MhrXJx?>;0#T zwb0QeuQz(Qa=k~bK#1LV@v(3C=kmF8^Tev%IJm!}=YJ=BO3C7|-);1yj|E?f_FD82 z$vsFdF6_uN(Fjh{;C=QNe~YA^1!}r|Jl{-NlX(vas!)Vnx`mpas`Ib25^guTeh5F_ z#CpUVdj`wNqDmn>Pp2R}wDUq!Un9w6Cce=>Mg2@{;g38CwGv$pHw>%`U!Eoa42Gq% z0}bVNa(^Res&1|W%u~4YrEHWf_6wz}zD^4LQmeia+LL0V1KNu=fHk!X$vrXe5Bb_B z*5XTpe|M3zBU~IjuC!04(=!M0)}2#It%TtO2!ghx_w>~yF_pgCZR=A9X?R>(ikS8$ znenC%ZwjJJ&iX3#U#ylcL`^Dptq@!tcGLF#Zhw9G`1eZNq8Rg7EFR^4cyop>EgRL` zpH4%6ig{?280&I8u*xD+XmJM{Uyho&)Q^jIy{{CCxh;-20A%)kuD!w>%&^@a#LoR^ zAd*F%i74WQi;mVP>R5hGyVXQ)dgX2<^ge{4k!`A|7C>vH;QgQlL+85V!){$y&^j8< zM}I8ZwJUb%;o=@R)MAnfj%9_f%QU}Mn)q_vE%rGWXQqPL>q5zd@Ca_-Hf)e+b{%f3la8+V^5EJw0{d9pOe5hcI@ghMC)O3l1%UYGDtqc^GPhbOkp78+Cg(gj5U9fRHq9et)Ad*^Zu#=E z>y}UYVxPM4bGwKf02a;mfa0IKxl50e@pgz4N{FM>wx()wpKQZ;06{>$zqBMd z8vTE*6v?pw$Vtvur;jx)OuD#lrggiD)JzuWb-isq)Ov^T6rXK^_x(06`$6?L>&Ay% zgih->&0nH<`_+4~TJ;H6MjW(L4h@$=eBrZ&R~rN|WX0}Qe831fWABaedOTZfol4u~ zRa2R3wm5w@tJO$x(Wl<2FEe|1fiy%M--CY}z7DnkAhuq#qkR72INnqkqlfI9)r)iR zb7InbpSo@p?jv6*oeJCyZ%AE)wxHM{BRNV3#;;NaoO1F97(X^>T+gjdnV> zliIu3(lgS>Z+FQ8BG#9yhD{;D-s1pfcm=0K)=VG8+v#^yYzAAxh10OBD?2g8->cox z1wD7g)>=*E0XK5)qmGnS*_Tyrc4KD!L^wuCv9i<=-rmx=ud z(r~Hm!z<_&wZ4!1>Xt7qQIgHj9I$V+_m3q!5?&FVT22F zqxmNe${QBp=i(N8@fAeKKpN5L=7D^h!-ruJ-(8WclSS-ZNs!cm6A}V2J_6H9pT1?@ zya@4|o30nn)Z3J;iU2|OMfnmOnjoG8VyJqGpw5M0BL#=VUw7c~v(?{UA*MDkz!wPL(qmHnXlL=9Z2t>XDyC zkM-V1D2;p8yPK@c521uwIAzzIDT-)#bKP!+h7R~+OIwiTUcBI)?L#j7Nd{iHV5zY# zpT$tzj*1-8kC8usNPi5?W-pM6y3sGj<929o^>HIVyperY{bGM?hxR0{mWwKX9ZKgS z^zJ(W?{MQzxAS&1VcvBBa!6k@8fEow-?RzL+KVEy#Ghpgg zq#L;?k$=5=*^7VhD`WWBZNgCR!4sITD0JJ`Q?o}JEEY*x5BzAJj zmp1q{rt?iF-y&I5deJ3aH7y%cpLA;5y0t9rEd-H=iln&a9ugZ!2Q0UH=y09x65Dy? zu%0j^hD~&1?zX}#ipYJ+R211zj$_+BV<|avHQ)YAr#gRZIiatND3i5|^b%6pi)-6^ z{3x&5{zOT=e1+U8RcHXXeQa(XCt7v1!xzGuj+k|DGiJq_HJBVqP zKYM}<`(xJWhjROvK3qf7uN4xT2qcsPw9w#>i{#osDje~Ijt3$J;H3}u2(TZKPb*L_ z(IZqLx|~dtd$+}c>Nk+VMBK@A=6=1Vz~{4W{APasI(s2wXis@){XjlO_*Z?)4ocuG(6vKiJng9$l#Cg38Y? zEcE?!7kej%+{)c1RH%8hvih)Tf%w$A5`iqgI!IYZo-Hid?#0mvB1J}`YvGq^10dLW zyXSwYm=J@94^GcF6x>ga>svx?k3b&mNp?qn{9F~Wb+=a+rdx~$^>a`$;ZZfgzDv98 zR(4i4ek_{W1^^suRfDAV>R|RXP7D3M-4&PrgT43IQeNwp1s{lp1PIrthP?L%L=OZ= zLVy4PGEZNz1ZlqQ&aHc^Q)ARW&OTpe3L}3lGuJc`F_8_Odus4HwsDO%5?ej?n(~+~ zp1TA3ICyJzya!_ma(&nKZe0d45C+zBd_U45N=@UzPy(0ex(X8ZzBe>fu!_{>8}viT zVOgftJ)Z|qt4jRc>{Ly+{?kw+s{#uS`RD8vjX~>a>Cj_OXCb^;rWb-f#8aaPAvAwT zX3N9Q_B=+u4E4Jrq2sXfXk*`%S|^cKr+r&5Zqn6;D8wUg+8c<}tJ~-pCn10028Up( z?33gs4-EAzaDEA+Xua+w^>psn+T67Xs36)X?xOg-dxH44yS*auHdVPe=$6BH40J18 zg5gO#AF0}Yd5=YiK3Bu^sA+U8tVDmb^l{M{u+W)_ag#IsaJsw})=6KE^DZBa5^Zl9 ze$rWYgiqzj;mR8y7h@X5@zPzMMipSb_4+ug0nI@V1vxY$Wj34+rbd6b%24|q z4xN=Xc-+ccv3b3OAe$-6!`^v{Ie*;wr)9Oi1rCdxgi#5e=Rcu}a4X8;LltQBbT!XL zestu`Ff4(6JSC5_TY^Q5LSyWrEUq3+{F7UhoN?HTw|hCOc<&xTvH96~k& zw8HyoAQl6&yxZa@!?!^>8y)fWJG z=o&Hi+Sz$JTHmYrh4X-`A3gSC6X2H4i{|sLbvdJKZmH(R)In|!1>1jR)W)_{Q@p!t zMpDcvH9pNQ@p7|Oo;$@p2`?5r-P#(go|%fw1{6ou-ZZ~R;S5+pFA3yTSN|BvxXJ4=MD!?e>G^uiZtypD;4?Ynv6aRZovu?igOFD#N+Zb39LLOzJ#Gq@ z?(*GA+;k)P&~8@4$$fv<$`CP^^tmwM7g1xn?qoV{lyVtfp`vNFrGJN5$mC+?UOw1l z(x!kpUhh;tth?(zKT2mvTZv<473^>G(d+U6TNW?^DUtXowNGc`CgJ(;jyhD#31P}63_Y6h%+1bwU0FTB;u$8pCsFpq|7zg@piQE!t zJgW=ylRbC0&&NJ^=FUlgq zf*{#JF~hkG82^8TsuYBw(JR|DpTl4sS6!f;2J7VsnG9y{&4tWP)tqt8m$qJ2Pbg%m z@EQY=zVp}ZIT}#=v zbbGO0wP`X}-M;2NNN@l@Hb;moDs5*ysl>M?BD0_ER8W5lteAgF-c+ARBXN>JaAu`> zty|E|^)j&cRQwtV<1xB5KX#6l;lt;Hu$#=mMHr5zqcI@=nKngp^!2^nw^9TZw!^8Y zqQyBG9L~zQ9K>!n9L`Tq_oXy}GrbxUn@%5d&``v9(HJ zpi7-=E|h=vaccs=`Z1bsa6!a}wojVHh z>tk*<;X6URPoCKZAPe8e=Gq>BHb9eZ@hO_-U~rV@iN3q!8u!#EkQ-E?oA>k(iDiqZ zjxB#{gNjkME)sB?cTgQGm()5@t?~Xbbj9Lyq@!SD#z!XDz9{IY+34)NxbEV0_=srQ zHBgrw07>maycOhhYNm@D`(X^KDKslP5Z(u`+sc?eH$3DAV}B(N6>sXrmb#Vo769uz z^4LtFJhGvO86NSDKt&~b%paaBsnoSonp=Mrx4TbL^!gi_0E1k(z^3XgGyoiUB8rv(3rKA(L{i^3zP8?4J5^twReMJeb`gnP=r` zwKbY%x@}AGz)a_dWN3#q8Y9m}*7&~i_*8^kD*%t?Xgq1IojV>dJ7=IDJ9^{*G~a(- zXH(>RcT=gIfKzQgOEnovOcshy21JAc5Glvr%{7Bk?9iSleCx6CYkA%^TkKD6GJ>e@ z;;DpV^FGKAVh}#Y>oaRY`KJ*Uo-}Bt zo#vi6yNenBTJzC;nj*l|qy}`%slI<-%I@NMk6bCW!paBb>JE5J{CX(*Tq%-EG!&j2 zd4qm_jJHBR<*7v1lR{W7?{)w&)eTse8|iu};pPgPSKKHPRD-8T{8od#qP8G_d(tyj zEsK3TJV>EB{>cZVYAkpw;F+SDPx#muZCP^t@;WTn=8EIHhF%S<&~4Yn@Uee2!)ylu zmn|bZX0<4uHfqWbHX1h&;YC(J>%cz?)q{}@ghiA-4{|y6Uc2C`S8y899F9KTcTeXO zJ-y{H+?uP$II${bc7r}ur$#%c<~A#RotU?jIt%sqw7c<}Bl<)IV1+zSr{|)JmGMPD z;bzTn34hy`wC4B&6pXR?z@ZD!k*n!Y?Z*HC&^o|1ur@^wg$6M4_vCYE_f?zd+d%;(WUqdj#y zapfb238fZ}HtL3)f4^!v$IPNZH`p_;__If~dVymN% zJ}vGR^;HxLe?%V!(NSj5{-dPVhV~f76N_DRL*AlpqN6WY&8lngG&z^(MmpO*uG4J) z(heE7fWBh*JaSscMv{MdUAuxiJWLkZ>de`CH<&;)VQU9s!vjUG#@FJ=%-83^bWt6q z5%6sci4#r{Uq#XLw3_r{_pSzL=_ zuN1}o!K1e=5(om~c5%`j7mM3m>0A%7u-zG%Y$3i{cR?~w!=SL!U3lz_ zpc!y*H5~sX-I%D7M}83wY- za4ys#y269v*Nap~b-$Tr?bSuuxht2el;`KK)i@Sz zYjl?cN3+?Na-pM=ZC+-o7H7?!ZDiGUs3SDhn-_mACWFk{W{~809Nh)4m-x!V?D5zk zzh;&>-S9ee{_d@Ld>sZu+V)OnzMnYPiQvtjV`om^B`k+4S{$XO4RKAH3e-%7e8Mvt zhM8!OMq{9OAn$q9ADOq9x|i{5f~qw;U>qY8Iuj~u+?nb?12$mGE=Po#d++2i*LhtH znBafdMQ2bXuNHsF7j0&>gI%o~;klkVsy{i~>>T=)GR4w$L8sx_(eujpCwaGAM)Fy! z({txeGyjke`7Yga&}u~Myiud9r?f%~=Naz_2rxiTozj$%LBje_g}7S1*GEP3g~6okACIo}@j@JVKY%_mr z=L1W!Sc|1}SI3Cu_cD}JfnP0*aVU@ND>aGoNj}Tl`wnvDO`5E4o2?^m6kz|ucq<%o z@3NVwz`Xlr>(Jf!G;Tv=j4#YqzMg&b;dLPDHb7S? zf8bB4PzQX5soZq3&p`)H?{1>brRyqDs^x2YFIKbRYUt39$J3|74)vRp9JmMzP7mnj zPNPcQEVHhhg^;5=yxfBtrt^R39ILze9AWyFW3SBa>=^#=uv>B?r{R6=0m0e%3jBh} z%Aa^?r#x{cMf>yeJcNB-%h#17omUgD-6YR$X=h@bJkrZ*51lK$xVP!U?7buO#VD8_oZiavQ#XLS7I3X`# z{vgeoSJvvZQZ8e4IG`v<@#e8y+n0b>p?wCZ1(09SxV5K`>k1q&mh1L{PSQ`b{`Q9VZFg%N2!-YTf<<*6 z7%~K2L}LI&%2OZ*z5}LJ<8u>Z(V^`e6stNaYDYf(ki*+%9aiEjKpPyWDrSR8H;pvv ziZJ99hvzCdG(&T49kDI;z;<@Kl9_f$3KT+`6#mkGt(GgbolJkRO|mC+E;qyLY_fPI zFJW1|USq)OZsloE+_$G}|3DY*nJL{ge>2!72bgK)} zAZ?DyAOl46YaOm`Q~PGjt%0%99$mQw$`RDQ9Xklk?WTWNAHzq&KZf?An!O&-lheY- z^=XZ_yYr*G19=>ekh0+)=d-G5C*@&OL6|MObk)rn>lWpid}a|Y30wqfuNamPU%zY2 zs6*UzvbN9j$R1p%F?Bo4N5{vLiotQoh9Dn}oPBba>*g{Di&K7vP_y&O9>;@{LD8^O zM$dyN3?_dTLs`vmvpDmI)MkgEIaUYdAnOX?^WsIert~42?MBgE8dKpYSTy6;`gP`S z=PIrPMJYkvPsi(x$UK*PgX3O~Z!rYMPlM@pRn8{|>Tt-O1IPt%^ll{t!2_pt%+<5e z9(`r)ay~G38_1h?8#Fo(U$@5ulK!612B$@6U(tVV`a<9DS5)Y%IKJ2JSu0)Yu$oA8 zC_nte{;2pffa2}xavMvJ(V&vg^TZC(m?MBF^y;uXCGyPX%H3rOH8SG?Ttzc7%g{sv z0sk>;#QpLqfE=vTyVu2Ypg5gF|Dv(?&U7NkPy5!+o^-{gk99G6T@`V7I!wn9csHD7 z8vuV}r=P?T1kI}Xb{x(jVt6`7Cr~Q&DoO_D&8lw8*@2Ec2Ov}kqxcH&#N3JrMraQ? zE#zBD7qSa*z`VS3@lj&t=%ur7f=(WeO6+h0sV?8OHrCYR#*ZuWFxBMdj;{4?`WTO^ z$!KW>O(>M^bmr{`N-1UC!H@S21gJL@1H^v^ymZEu8R9g zQfal^?wm{?A6RU0oa+hEgADhsEBkac&oN3mebg_}V=&H-gZsK@$Hhq*PQ2NU89RRq z{iVjM7q`+Y>O7c1u#ZS-jK`O)Apym1`qsf*l(NZtVtBp{kaCtbKuuL0eO#BTY@;zy z)Ol>IWOuuVmy>x0kUZP8%fboM5v2F+=Uwz@#a+ktxk4?kDmwv$>0+AH8jo;KTg9Ut zc82?q3Q5bQ!Q~D~^64|=O%Ej0dVYW2loxBM-Ofe486S`NHuQog+uD<}I($C&8|Zqj zJgu8O7Q?Im??J)M-4Z7U#y2A=-t2_G`3^El2+CYx?m)Ba9R-3CY?l$M@avxeM zqtMB7u5anfx)iqPH8BP6{&I#O_VLafH_&RH%4YF6-S3ypUP9vg;sXtJsDOX$+Ktb9 z>N=-xSJXQ7?IGE`oTc?#`OOHmZN}d&=Ha|L-Il7z$QzM*9l9e(WH;q*ZA|s0i{f;B z)ZAMm#3#tNPUk1jp{+@6n*tOACl}2;hG?OH?t1p@r1~>7J1m$bPT0q2%2eaa`3Pts zG`aTF)^tMXIziOrwQu!R#GHS4Ww>tz^Er`}h5rzIsL6}7VJ?+QnorLAI8ISwcin}e zI$gD#cD`4jwaOl}h{;HVnAjc}l!ScT_aCduK~jN7PPZ)&tH-}&(N$G1wwxHpn|FyUfE#iDkoHD(4eAcMAjaPq73g?xB?mKl^WR$FuI21k0BLQw%_ZE*mjc11ry79w4>(C7$^(fr~p zWa9KBpQF(*Z_clmHnh;vPvvIUryZDlYIk$eH40+4_CY~E4ZeRhfMYsC{a74Lv{`dp z&&p!88ZlQ{J5sWk*FJX_yy`Y-_blaPlDcd!Q#z*5@_CsFD>jNz3_Owce#LoPQkyYr z0t(~R*<y72?$ZKkOFn^bV#mhp4Yc2u&SN81;W?f)00=B!P*d{8AFq3L_E5beFRt&!Z;bRb!2 zw6=o2QIL<8(`t4#t(laz2j*Jq>U>%(@8wuTqBWXE*PvRxyyF8&CAqO>XYj3_Zz{&ALUjzHzUN61sooV0saRAeWrcYkNu}NB4H! zarI(OSq4Ev^8R8x-b$vih&qQw`>&O-4WckuK`Oeoj%*}82ZDDIQJ}vfln?9`ZE1UU zyPHg=I#-^%6+gTjo|MhUMa?1c9I?TxL9U61Be@MzKdDprDis@Y(*=K)C!1LU-P$Pd z#4JZP78&yJp2KO&*J~K&=|k!iEfHqY^tk6{Bl)?rQ2kTrBIK7Kp^Y@isSV&{ zS;~>V?j?oqBvdYotE*28+di*Lh6x7E8Q74$RJ1V`b-3HC(XFkGV)=5c%Ec2roI=k} z$LbYMltuU0Gs%Fb=SOM!nn1_LSm(xT91V~%uK|Ch?F2KD>Ux^b(t5LACIVono}C>! z*3=Su9POAo&z^_SW$YVJo!m1|0v2il5))B z0u6uLbPIUcKAL2@Tm<>EJOhx&8-q|B1;KEj>~e@h?#?Y#uOsq5FC+d+Sl zGB+kt?X5O}8@P%8Sl+{I95hL=*jr5uij>DG;m!d-BAsd6`(a`MwK) z_1O=mG9*!gARpKsa#vszR%ma{!}nKr+ZrB*aslgrS=c!$TuaEJa~i#Wp=U>8<`i_U zyvVh7>vJ*9msqF>DT@mMf($TsbFz?i1ZiK}pg*)&ov!|Zk$FR0TQDx=eJwh-@& zVzXaPWYgM%Lb{0Kc6v2=P$fA8LZao49RunSSD9m!7@PXJs4l0-MBWM!Gw2i zF|93u4nPY1CJqaBeqRF7o>b!Lz=3r!s=H(+BW*2?_MJXmjHK+9*)1Hb8zBtxY% zUzKs|n1jH0mf&&fBYCj14K}> zu26PmG+0#EB_MTx&(GW4dKaHe35~Co&2p|fV2wU1V0ow}&0M&o(HGr5Rfh(n)~eLSX7&M3*D{RVCYOJc?v0*wA&}?F;o2mGOvcM zj*!1mr?=tact_J6?8j?RcUcT z`J~j#8erh@cz;Hs%B6o4=sIZg7E(RzD*?6oR2`5c(JYon_3jk&LUM@r{{>3}ay z?I;EFmgPTgPG;r$Ma%pxIh^=mu9Lk(c>J+@Vcx z;v4>0Rx9VWVC}7qf=k^&p`Lj{@||ttfi&A$02PK@dyZa*b+|VVO=E4Xiw}s!LJYP_ zAh7kcx}aC-Nt}P9+)}OG0Xo|;zVx_n9*qs-`8usCM)T0HF*1y1y>Ex{ZG3OgQSI^s zThLeT?GU<1fFE%eaUPj|1z|6FwZC7%k}s~H=Tt5#$r0jf$Ma@@Y!YYPjVjq`=ln4|fhap^CGgn8VZ}%b zABwD?!&3v?TtaQ{P)f+G@#b=+Y>zYP<@1>r;HDNvilW&PGOzl*@+L46yT$7YJi*M% z&#kpUf3<(49r|Ogfx1tp4ui`kbtli!dxXi@D}uJG})u^qQ;-YS5RN-88uo#!l8d$<_BrVj@;O>XM^+c^-8SM`fAu{ zaAYi22=`Vvsh`X~DbF&?3o}c&@3!c!$Er^0L#>FqE*2*hvZVC^-4v%gZ)!Id;4+lH zJfZ+%qY-GOO(=R~*oY&k!HSBQ86~tEgBpPUMksJfg~r^h^dS-UR6cxFPk}>+)OSPf zo?d?lr#NMO@^9qwM>YiRy+(892QBcd$*`kT7_qL+35B?<(Un>^#YG)9TwF`QV2!yF zMCUZQaSWrNiP)8wOZCDYC;XDtz)=Q1$JGn=e&;*1-E;x+A26owwwz#?7PwTjuq~~D zJGlx~3cSGrI;@p5@VG{_Qcw4!nYf|7aRz^UR!;kQW7uL72W!s0i4=p@x#)5YICYY( zRB6vFm(AuO0UK_3J4%|>v6y9N*@$B*JMHB}+(_ItkV|HB%mDU7WtCv@e;Z+ z=CkMQDL^TC$3O|6+OgMz3b)FoCg^3A^V_n}m2@mhhk-fUDQh%wnwPArl&m9}<7z4E zg^ZfKAgy(0g}$vLDAvni87*vDJcNJabD)G9bJV2!lD>feABH^%^HL{6L zALl%fl#5cB2j7>IwhqpsU@z73_6(D=6`6yA4;H@ezMwg+qY`Kh&@$Dwfd_v_=boG7 z&M5!9SxlCQyqn`Bqx1AN*Y}|h>0Z-}!gDCkXhD04W~d)bGwW{Fs@iYF5fS`$n2K5~ zgxTZboVfY?C=ch%GqN7dP{D{eH~j!9&T##D2Gdb?0;^AX(ClVhKhdvoya$^?&`UPF zOzbga2I3J{#4-Yy$-sipku-myC>wIO>z68`!7Cd3gQ>{9LUwPV1*2@B>g+4&dda1m z!te#NUVsCH(i0Se8&eQ*Cb%qz!^E7`VGLV;N>!b;K*-|KMCRORjpP$0r41Nrs4wJb zT7J4G8QRnom9TB?<`8%<5p7+W^H zXg~K)!F_YGcDLwxJ%#IQvD#iwh}}=E^0=c}JI|ij(P9E!L!(B%U^CkeamNNKIy;BE zFIS7l0b+gBq(Myak2+Bu*{Sj9|q+}kX5#1JSG|}3C3LGtj!9@qW zc=K%TJlehafWt2*em8odKlIQ+VcD0C*Bdkd?qR!Mp|?=8;l}$aI&S3+4cg-vZrXpxrQmpUrJLy6V}*ms+};dNaHtnImRh{{`r#eyg(K(AdZS!pND*rg zzdlaT2hUzVkw7|+yiGZpC-G#J&?4*a*J#Hx3T|juc#&5#gem?lKyG&|ineh)WmA`X zfQb(>_G^WX!7}S}KERfhJk3T96q97>l2hBWJnIxh9It;eqmE@2?j7cytEfPSF)u5w zx8qou=Hms;)_a4$YbTl{V<>2m~CMRd-$9Zxv z#}hQR%93!MGc}lfAiH(i_Rd+tXgp=sA>=3hxuDlvGNM-6I#XY-NKIyv$v{TcZsq7P zyFn71U#5SBJQbfYbY~5}6zz7KtYE^UWafj?$)o}U#c6{b#~PfD8wVY{7zYmcip?=y zBRL9DeOsv{(_UWrL#eLv=BNuhEqdZ$WJPhYw|N~2{BwHrJ9JGB(!rI_xX??a!u&n1&<82t$*4KZBc`ef=JIic)G1>_G{o)LMj1N3& zoC~OG$~>A@DXeCTGHnO9a=af;{Ea*pg)xvz%A~FIZohRNuieo;ZPxPOVnfv2^ zO3+Yy2qA4l9B*IYGlFUZdvcQ(7W?QU}PUDE$yNI)^G`dZ%qIxm*mvRsQx8QiwrGdt*P?TTQ1GK!! zuBi)k>#1?HTy!wfqKub89FiB;}fsd2NeYuh_Rzui*oq=Y6^ZNo;~!A zj!x2XABDxC9apz09JLihtqqw5KMK>~`)7YG2wb#XM2mznvk>l|O5jX+yub~2<8 zr2T>8QK-Y@EHv(Q-F8#KYpy(US%f$BoXT;$_6O77BrXfP`E zc*MoZ(ab53W76DJ7Cdk=3`$$qZmvhSv6Qum%}k@t13as{+`?rZm5FRglKRR=FXoNMG21p*6T%7&uU?)l6&A9xjZc!Y zU1=+e_aJWy^bA@Bw5m1{=#w>t=Wx_2v0&hp#I8}4xgA%9Pp43LUaDxMzk`2-z_Y9- z(!@T`fFWLV#ZsuO;K&x)_JaDJicZYe=W?G1{AE#$*AqQRn)MRx%^}x_enj1fVvD>JqxqnU1ejOUS>smONAWk`>n=VI|IMw39*<9^T zt)iX%sBVQ@jfCNnD)gDRd-|(rR*aG9TxFs(+;^K@9USJ0Dy$al<1l|7X;ez1ja#4( zG-R2Ym0gUFilJOZ3u=d;XIZXJlOJ;3H zelWTP>t*~n<%dlKj!8yO-R*B`6%?mA6abMb$)mLn4~sfzD7v#D|s z7DDGu=uWs!Q*I88$7Y9v%Q(FPqovTmhf0|RwSdy(fDQK8G|nNnVLV3bb#=e_ca%}> zF|g2^g&@b98ycZ(qV6z#D&}ndnl8#iOxGOj@+N)>dvyBYL*1km0i=?LI7T*a)hy3z z3oR|U#AUk1cZGkm4*Z2Q!SP4m>u&FT$$35O0eF=8+dawmKl`eBx> z28HM^B9&Dica#^~t9Q!|_9*6-EE`{%U`nqK#*~_&y@NXU79F7dx0LPn$L%6UcBj#e z%{+QM(Hry7>A|#JNMr9hhcFx&S;qpQ$t%L;{-8nuCj8kSs~lh zyu)Z}Kn$oU}>`u8Y84l8f!}wh^CCeQ2y{m3Mn8wwW~r!``?cD^4&Sv!f1abgkyTO3dUxAL%((Oy=*u1!CFtf zeW-RXq$95T=I(4vr48_{b^^Vt(|S^EmvCC-^bD1h6wFjbZxl&hbr2g3?*kXTf#xI8TF#wPAD?eG-f@l_6`bwO+R;4ggm|;k z&l!IM_1x78O=jiIb3KCgaUEYJ9@4XU$S}+6{tkQms) z6ng8=cy(Eguji4w9qrHx2gly!IckfJapse2*~C(}th9FJeG zw4~>Q!9c%nXJ{F<*&PnA*YQ4q2Jh{G=7P7Od`I*pzucR2Hlimz-Pw2l9`0Yx3%!4$ zxU$`verfK_bD-g8Na5KBbBV;Qx?4u~E*ptA(ORkdt#}J8#VFLdCaNtwu<2S@s=l|EA zx>6*NM_|F5CiH#A>nu{{)9NJ)uBNiN2{3KrL^UST%1{H=lo@V$$+ z%hh^Bl&_FF#*$`-yl}%nB zBpW%()-q_V=%T=i+dj2p0m%lHt6tTLe19+T#e3y;6b4$ zZ9~P1>s71pYutaRfdNa|{-b|dR*hJwm2r5B*@&G8va0Bt?Fmi0%~Xx3ebusC+)v68 z0WsXZK7zOxtGTfI9vyIyPc!o>kg|m5>KLk*P z6>cuD8QijZF3;S5zs7&w@Sp3V<4VG?t}3&*ZsbUexElZvMG^aDY_lTIrK-T@%!5q< zCm_!w01TR1EAl)5M0DhN61l{ZCeM@P?|X|!pZohfi%GG^kc|7#5)hg}p9!z&%q))j zIY|>m63h;NBhTA{xfYUnOL1BY!;PWRtzxi7~g7- zmSqAPu2m?97)pOe!#2rSKgmXPcSA@E*g~zfMGfLEd7qbYLW=j*QGo z{IV-O!2Oc?4gt`NOt9zj~CAFPPo>2Qxzf+Obmj#{yAQpNxIcqCT zCY>{O(~otnDoff#gToX(5Oq*k+l?G)T6}Y_MYz~#0)_L3;kK(=N zRuUJmaISyK6OTQ7c1v00)<99wI~GmF@ETuYOBY@E(Ygh;BrRSd_VXoREYZiT*YHg; z7bSWk))%r48P|D?aYa@Y#tRkV(o53n)00l^ky~w(IH5V2|R(O0}i0p$^1TOJ~8v5*kDe>l+ z5m|q6XRH=6pbf49E{n;F9wcL+^?=qg#7freiyB~0xPX<3F}q|roXEFv96g}whjXk$ zgN^`v8?xSrt>CcsAT5FY4%lT0C$1Um8O~@7#v}Y&`_jESgn8&)QwVL4s zfh&06MU>1u*m-RTY;4$_i^C*SIFDPc!bK&z@lb#UkjNGymo1LI$Eu2-`mnhQ#wU>6 zxh&J{Ny$Q&XNx0x0P&q{>Ch8qgPN8MpLy^=#fT}g8)*oFl5DT*n&WG%6BqK2DfWLx zT3*;Ry`c|2^0QaPOy^4uYaRGGH;BM}a!C>c&)MKCt>X;z{8Zr?l?71N?dql7K<^0% zUd#)&6N~dp$UogJ*iMFDT-T%1ApJIAYPU%@jk_O z&xx9*s(P(877S*!PK;F(tElc_`6fzcGfi<+>NHI_-Zdoz&w{fQ2F<#($@e=4oQ>eE zet$n~1@SNX=k0AGS%-7}*Lw2MvH<%Q6sFca0@|3vdB`uAKgF#CjQea~)BJyoRn^aT zKQlIgPvJr#8;ucIamohyR&-5JP_d)Xt&ZJ8LpJNyY*7FG!spENTv-Z;jYYeO z_ZbS?k-X1Ev~wrx9cz}n?rUDgc<->U*g(hpeb4I1dY{iDj3L=i*e7_NN0LifdXY0X z);yd~d>FGUl>;2-q_D_-`Za$<<5Ej&FjrZL1sHmKQ-d4_w{S(?QyU`K_V6j7(@pcQ z%-q3Yl5mYfWi-cFp5cdBkh(qrFp+4fXsLO@?O0KUt>%@K*8YdrDm;fCNW@)r*#*T^ zf}CnsmRF$^Yp&7huhroPSglCb;>SLO5hz4&`_Td(hxPLDqlUysaeRM~(=azm!x&hD z1&5Bh#T5Xz1}ZqIj_$+CR7TI^!j1PzioP}M=loss=tRrxxvT7A(r`ZPr^W*c}>H9kop^1MxiQc1rjT-iFw_`)}^1xi-J8*3aBaC%PbZy}}_K}Us7Ac84 zHc&oe$vQ-$HbH+66}|v@hWE3LYWPjSV_O`K9x z5f;{e)A6V<;&WHnPyL*+?|J*&Hw#i?>`OJy_y|fMS?SC8JnDZ|iGl;4&5Z>vwF8bL z{!=K3PSbEW{YkZ2N6gBi{a;(NYgPzZ-UzR9jd=o<3S+cqiSLp@UHUU0hza`rKzvE5 z#ox$&dE5V}KV$kcfcTks4LP$@QFTr!G)GI0H8+K1k4Q+N|D5Zw)vv>M;Eu*WN}<<1iAs!<_^Vgu=j zC{{gaV5%oFGq96n{&xirC;UjpQHIhe#_@#ponbEz;m5((4r%qvKf}1beY~%I0gtJP zIdr^7uW=SaI>2mY^+3v0UaCXLW2g=~eJck1g@~)5QC5E`LIy6sdu-=vM%@g6q0RnCr(ejR|ef+4*t$}Ot&2@ z#5(0T7%_i^tGRKI!RS7gqE~B#pvcvexyW6IJ`F^Emlhlr9Oh8MnY%};ras2Qy4k60 zUif^kd=0|!jG7FccRbW@0LSgU$=*V;lNrvYkcvXa8I{T=nRj2CGS2vkgo_4c%MNFc zilbx|XP>OYId|Op`Rn`q`Tf3L&+GX<&*$@e-k*e6Wmq3PF}W^K+%(!H z-lWOs+RX$#OUK8YD=ND@-|I&Wjb9+>{b;Z(Zk8ko!wj2EH9x=yzJwTzmMsF{QLR)W zED)#%ufiw)|td zSFp*cA%hiVT2^p@%|}SA)bbv{^gI{)ThRv>sgfR%-j(zNhqYeKzj#LUOpzWYcJ!=% zg;6uZfBbpDvmEarDrhjWHdpT%hu$Zh+>32G>ora(zOUIt zO6pW3gH5KeN8jz9-Sw258FhUdJ=UQ?py|9&nZMQ+QAZfcU2Gp66Zr82mVKS3K>0 zdQqa3OZ&?kVW4*Fn%h$k>N`A#20f*NK#qod^l<}^F426Z{~4tpkH&W!dJ@r1zEuD#{A##-UZj+QizPwM`{p8}N_5$pKjE=^K z@C0f7D_Nc|OT?O|JIxe#a{bV5f(zdM&Y72$x(}2N6)kQa7b#xF14riUZ|JUVWc!Fch@X7q}HL5yj%j0Z~LHw*^p1)cp2 zjcsp7$6z)-(LJJ_42Vdey0MzzuvvsVvU@VxDZLN5t#0h(n*JP6nw_ODa_VcRp)JZI zRr+IHA|Dueapsh)pvC)&rXfp{-r)r`|71k?ced68IMal}f&PwrQt$)_P8XHMxoOB; z&h>rZezKJH-EhKNwPi0b#{A}g>-6#o2Dt|+Ry0DvxSZJovJh08b*R>pb zf}Tw*5FTU)uH0GDR>rUS-DlvtK3oq|4rkaI)gMU9TiUrOBJ&i2p~@ur%P`wj4$aDo znsc%LcGMKkOBwbZWo;4D&lq3p zBL9RT4kQcV;p-#1i4tdoa4__i5D+ju``RKvc*YbE3RiepSq^H8(hM0} z$#}F#L#R|y1yc880?nH&;1T9fcH@yHfOL43Qg8l;JAi_aAQ! zNkjQ!79Wio4S)pyY-ht!NJzXy&qQ?8vUtH_Gj~byWgMY35JW*0 z+RFieb+)?9+B#%cghmWSi9r;4w-McihjTz*UTkMgmm23!NplemA?`RPR^XB@Vp3!A z!Jt53E!{fDs~=%{IQoU9_a8F7L_~ac{iQmF#61Fk?nqD?1~Z=aj_YW%kNcc6+b=*) zT1S4yEqt98M2p8g&WDUIsHo+B`R|foV>2-18F=;Pv1U6ZsHV<%=g{ipmL*Av!BoK& zMjLC+^`gdlzdA?;YD6UlbUv;nSb-_up70`G5-t%skP*BfwpN&fVM;TYA| z8Fr899^Xb6uWRL?sRe5nk&K8VNKmf6z4u}^$BCZ)?5K9t;#v!zf4y{ATC$swP~FhD zOVd9IavM*>Gf8K|nqE|ux z-{~61UkSEPben5%JodlK?-t2^C6$Xsi>>Km*-)n%e^cFUxLFD5MEyNHf;90??h)k( zH?~lK^2e8E40bB`LtYqJPdwQ-z*+~4(j+@Zgk6>`WI?rYY3KX-K=cg<{Q#zZ_ICC3 zzx~E*`d;Bi1*~)EhOiGNcFNq8+STOtK7*m+1^eS zWbXr7DXdgG{N6%S5uYycLr#AOI>X^AL|v{&yVvm%!|w6gp7?JE`a+NX*^ge|e<}ED z+N2a#6FQjv`KqS|Es}1`@w>ERuAYTuoXib6w^1FV>4)`B(ulJ)Brswd%6;n6<$O<{ zFE-E6M7*)E6YyMZQjXZ#{2!^xeQcS;T?RgyK*`??#l4HKPHv#;yC=0-LPE#h%>^NvQHRF z#fD#s$aNbOnB6D~Jg1q>bxs8T8M$4>ujX!KqQuxu)&Q$Mb-Acs&i<~d^^ju@OJwXN z7wtT@s2fmsF}C{MY=R2JzX;FakN$1mf73+L*oa1;pIvPd5Ip>4BJpzC;*-#cd2Pi# zV$1$9?YxlHjhV$r3bAt7@+hOvnY`=!1r>hCPj3!vD5f>q9@In`|ImNU%-tin%5-TT zI>Lj2XVKDw>2?xz#2g>i|DIjQ<-S3W>oA6H0hKne{Q9Ui20K-&z#ac_9MUw*O8VQf zJ`Y&hVUz?6giFt!Y=Gjw!}OAL#zdp*8OQkSc4;N{E`zXSv+os{h)Fc0tZFT!t+>Q* z^&0ixNi@7(-#R-bZX~j&woQbvzr=MvJtrMm@gc~!XC!g0uuMqk>ZVw?%=R}Y%kX-< zCgb*@UiFzDP(J?YYZ4t#7;TV2lmD;Y==9=SEJMJv+x^}6P%i|0|LQhDWm4cG7iprf z8^f$`KllMaS%a`$O$Mpd&-p!MKBo4BJ4F!gYHA3d#rw7PaZ-tg?3_962m7g1c`&G%?S zBm|fYy{%{&C|n-#*=(cgZ+~hrBzZaP;v_oMq4&De1IZ9UFUf_R^(5-rsRb;PdNn*3 zFXtaV)ZMTTt-q^(G8{MDgY_D$r$B=L3BI2ei3uc&1~&&py{GZ_>bg5in!eqGb+=tV z%?172seu1o2hXhjK_BLPHQ_j$;DnxA&;aA-s(kW8Fsr18TU2+eAUh3mYM-~yo<~-a z+7vB|JqhnKu{PpkmOYTFYGMC*GZkdJ%0xi?V?45`e3-chs5@N} z0$Nld9Pye_O=P`vK#ZBC=NpftCGp3tJxu+{GW&_25Hy=Z(N=r%X2D|7(ES)pXsH8q zFLb?{ds#RN!ojUFG#efybXvIQ&7Tj-yr#p>Brsh;X<$~m7H_m{t$oyc>EXXYM2el9 z$y-PlBPxA83PiabEcFGig$o5v74b`OWH2#4j`5IM;#4!o;TxNB7QOd$)Bm2SNQ~U{ zHzx)&6(4_(#|`+zgYAdgjHeE2H$HGUi5y66nqO*wXuu|9`N!(b&Jd zZ-4_Up0bP3woYrq1Oo6Gk7KF?(QPWdk^>`_Ay=%c`CD2GSfW9=ZqzpbiuheaQz;aEQQ7ys zgXB4+F!T3qa)v|E+ndLZ=ET-(py;5ZYk$@l!)lV^YqFp{E7Vl6OCiQ@fhgVyrKcxq zqOY9hbO%4abaT=xU<%+hzRQiMTqA)(ogK2A{0MbBy&bd4J-HeB}akGp$OG33KWV!lAt_nGrHljbu{#$x1#IR>p@$Bz=nrFv{ zH>|pSrl0meObgr8Io}I!e~7HAWU&d=IYzh4Sh$2_SE+A&{0%Q7+p zq8-EahWpMv%-d(K!M+F*?e0)d>hp^``O}I#hO)p;9m7nMoLAA@m*hEKF7^XWm5cH7 z&G+G5e7g^EYl_1^)9*<2Dug!OJ=}7(i%ZCPXJx=H#aU!u)-8=Wu$t$4+tMIS;s3}_ z;dfOZ1Yq(+S`6{iN`4Crizj9Op%Si9tC=jy#?m+wV8oH|Zt7Cyh=L+xS$YL)ANgqz~qv(gwHrFwPj+B=0)Cx zPXxR$054Db#C%TZkjF%zpJI!r6uzeZXtU{a=#3>F%B7zh8dsn)A_!)Un2Ru;E@X)@ z%68&e*vA~|QaEMe5A~!rT0;dfQL|($BJJ_GR}P9sjQX9gN6K17LjMJN4Cnv-X_^HH zM3)%x?Tr3<8#I1Fg^{Ri(#!gte~+3mw`n>kr}^OPaIHUg>EipPp?g-a zcia5XRGS&2qqy|66-USn`OgG&HN$rfSmQR1I18?k3bSTFo=o6zItHYs-$Jc#xO4mu z2~#ACFxIfv9`DkC4mEMwP?e)^Nr`(0KVF?g2`1;=87>>@>Gk(6q%&3D#25&y&(w4D zRQKlbugE3An{a+<>{%|Q)6e;`+>AEsuRU&$_(?y*I>#IRI?qsuq+lo%mi2xg*sEar zJv$41s)=a0)VtC?SV&m@M5z)1YGJg0(W@+#gstNOy$AIQ4wjNMZ|3NE{BGx z3%)AlL^!}P@ltiNs6e0l0p5*MI=kau=w@E~>!Fy3yslxN%rPSkB5FPfHAq(;#H%EA z6`F=i;S+-Tm6BZ4z!@i^Capjau2fx*M;l}6K|MrAPW z8)DTJichN@&n~3#7#1B5|9!jYE?OLRPX@ZP#spyYSB8j2oPFhLz zhrRfD`vdF)71N?aZzKW&ZI)KzvHjrpp8HC+a`g1K1oA(~S@dz8HFO_P5}17capYZK zZ|uW1ZGAs4p@cN^`0Gcv)^~TvaAS2Md&Ydd;~lE@$_VXwiqU=hzI+dJgmr@|)UxNxZ_Wjq=kB3m2H5=~-W8yq4IJD)FRen+D~NE_EKi z5$auR2MgTh>ZhAiz>EC>oC13q?RzzEO)iKuetWenTp~GQ!rk&C4!w99sY;6`R^DR) z;fbfvna{cXk^Nudwnxcj+B*-{X^1nkzMv?-_f@S6lOJwjlT`b$va=(C8RBz#P3ZX0 zs&j*s4A`B=n67%DB$$R&~@dq!cjjja*jBlrZ-KB;r?NQfZn#V z)P5P?8 zrGFX>E=S8YM=9jIjum&p=;lSyBCTDY@P$xFhZ8cNx#RAErQ#phg@ivU`KiD@=BwIR z?U?Hl2==Cw*+*ml#s>d^eJ@t}G$LfmD!wxp$L#xOZ4$at zKK1v^*V(wk;y}MA2=9lBu zDMHI#aztS+LSzs9xP;#Rwpd)(>}j;hw2ZMjqxz~QLj0G7Z{!cIJw0lftzD}DLZSke zsjTgP8&Dghjh5W7%$hU$o^?mZ9QV=?X}=aiS&dGIou(P>hVG*xqVjfPXZ6x+t-DTf zD(P=}TLm#)WdZHNncmzUwyC0JMiXj*(=WKDeiTpqN{I z$-R(Obv^j@Y~rBgW>uB~%mgAml3d{)>|iqs0nR_tp?t(|rmCfT?Nm;59T#pgBe2bi ziJ&$fuKytSAW^MUn7inKZSNX=By3Q%Fe`^>5xKF@{YhKTN-0dEigrieeb(u3bp-1* z_ebdt=VmqsSEE!M&vQAes_}a|&pcVI6PEL**!g$(2~NRk*R0Qc;f1;-yzs7GzAH3< zQTEci-kica@;)gqap%b3Go5Vd{k?;OFA48PI!q^gnr3?9vblTE;X3)}DiX2d2gL5B*$ru4OVlP>cj8I-7=ACf{ZwKYgX_>SdP$r2>}IF1M1O2WpYU*uU30htQ2V%h-jIWyh*H7ZTDITR^IQq?a z8wPVo7{@UNy4n5s<;~nf2nE4j<*=Y&!$G)*z6X^L=0QJ zgu=-+BJr3So1pn|kE0t`YJ@W5$!>bTbMBwoWWmt)J`e5k+mJ!}pb zB`@P1kP}M3g<+v&`3K~O!N3-*BmI5$E(S%pVb*R12BE7U&>24|YqIx12^4B3aN$h6 z_lN!AhKDATv01wS;5Gi`^2&#rQ$hW1C*K5b`d8)f5jywxVC~EIx^Gs0%K!L4!Y1)e zI{JPmW}dA`!7Db0`()W z&$RX;jX0u0;>I#89ir-y=3aZ`bs3&C^6&)U`*d{Sv@eAUw2~1MEO{Vuf!IapH2&|^ zzP0Pc{qu5@K{k7B_1ec!{;}%EG0e-nGel}XN?u9*c7_6$LD1^PrOqhfgcoD1g=a(k%9b8}U{c=ai+hlngPgziT$ z+AZ2IzpY~5>N}MfINre+zV@x3>dhb>mJIHHIG`r@-dhIKfv=N*`sB2!KSrNML?A}* z(6SB_EU4(iV6ejGVBzyWI;-?S+iW`OKf7M?ZRY_zHp+vSLM%Kl#N8)kk38FKPnns? z4H3wN*9=!VSeW65*Y4Nr`z*dR_fv84ES{^^5@a5%iZN6pyaJ4Lmy9voR(PX*o`6dIlDmVezFJ)-Ou@Q20r z)?Nah#3{^4yS200CzV78$m(ARdUm{XXrdoS?1;*yFMBuA;;oU@@KZF$xQAr6JV)%t zS$pn&4Z?XSQ}2UDL0x2?BSYhX>b<4`J=$@(?R0N3!afQYcCdPe>u2|`Dg6~~@XY)L z9MtmPbAjNyeQoRUBHDda=^o+j=U?K@+~xq7>htLNuOYX{^t8Er)1`C@d!!1ncakVJ3NW_99ClcUaF#tg2w72|~-|sb@{VEI98jNTf+yAB;)U8^p%e@8U zqZEk;Vj%p`j=cDeF~;F&4&2fTYG_8ZcYVJtQ|3Aspmh~#ATY|~CkvD&qA%E*=~&}| zN7bFzepS@h{s+#dA(@4p^nrP4vkp1+fMun_9FB@bogUdf2R%Go@3R@aNZ(1__7dTy znLo!w1-PnYC*iw|&#tHT)%R6Je}Il++#%`>jB2SDXmoEK7UviYr>xfJ`^B9jYKv(& zrZ}Qz5e+BqAq%~^%ynYV)9%U?&DA$DrNrdvdYhy(Zu&v*{p6#KDleC;X?*tO93=3o zX8L-A_3D&7p}Y;Aq9QCl!RZ<)Rh|Q>SXfs6+N<b_t8Q3wWN%xT!`{^3Ge(8H>`vfS=M#m*sjOP7UZPWl}UMY;Rlj1H)DKGNf$H&OXm>$s+u`&K!qvP58y?$ksZowQGfO%=QiHKJ4Jt)9# z!be#H?hIP0*(o1>-PW@@dhIKgs>i0RrXJLwXmG7xG|)Am$8M9+Dmg4ruDvV)!vs>h zpgm2_L8rm(G;-HV*d z@eTPeVI&kScQ-X_`@5-Zb&lRzcXlk04q#>`6%In|*+Nj<6 zdD=A-MH?VUg=bx`*C-|SnUs~8(rw1+aaKj=<0gfZ73Fr^0TUJO^?nIGQnID)z?uz? zUHjV{DM@vPZgU%!i<0|4P``Q8!uDIO`{=3Pl02BzBsJ4m!PQkc%K3U%K_yrC#;B~% z#>#O8Km4AVg{(hVdcdubILp!4B}&6>8ush+W(pwshE{)WZSF?v^$S^9Gz$Mh1N4MO zA?F-%f**!_aya$E(-NspYE6S13OEnopBr3ghwDd;b6nN7^m5I8^C|b4{ylDy4s!<2 z*5pp%r~4rmYqxWXL+BR8P{circ*Eq?c^{=iJ;a#g^$yiov-uu{!)K%0gZ0P!bJdba z@+JTqcoa9^g1OIil6X9nOZWwY661u{rY!&2i zQD92_sfyS1b0_xL!IV0iN$lVn~)VDTE}UWw;1IA~&Uu+6Yo?y!Wp^|2`>eYo9dOIOs`&4>i827e57u zOO0BZe)v;kDl0>5MFVobPpyr}f&S3hQ&jtcaS&m6S`KkYs{G5?r@KbV$&EEk^tGMg zWbRhD^9S7_%kg_o_-G59(yd@(3#55pw9Y@}QQHNEGnQan<+(PFgwsY9#gl9AkVwQ&}!4gs?jG;&lT zhQ}wvX@clBEeB#|EkaCt9Zbii2VavmaY+A&fql@a3{O64K$tym1wDCUt2$K}t`2FYUjnqWB zVMtP^_>DOR^k3q?2MA!r92#zDOhKmQ6W+a;CNjR2mqA|*PWeyYM0X7`v%Xn}7NHyP zj#4t_7Gs~|+y$;1248k#l#}*iO{ne{H9^SG2>!kLI|tQu_!g0WIRCL~R$(-a55m}k zOcT69>cbOw_7YLT>|W>nBqOl_8$u2k_VD;dH}iR^j>bv;TW(){>KgEN|7g3#N-`S0 z9$Kb3WDz)4O764V$kL!uikvB6eOPsFkTpho#V)ZbNB|Wgb5ia2Hi`YvSG~UzXK9V) zR>rBzKdA@06vi~hn;I)B;yoeT7WuUgeX72c0p4{4Equ!Dea(BlIWG)xd`wu_qd$u3 z7A4Z&*N>`u>*+BL6Bu#g%D~9kM0n%-rSwzA%8LKA`;}z;RUW2|jz~LQ?0mR=4S&~` zUCplY4jWLW6=$z>7m`q0cP~NggW_W$boR4nSj9NQPo4oCKJ6Q=&=ICt8_GfbE##}J zhyWa=UCb)3wtJ34RdSzhP!CL~-m{%}L+NL9z&FG}dY4uEPH^@a49U}O+z1h0;d@}0edV9%B1vI+ zJ{E`(V!Pt@(kfYNV~f_ma-qbnkC&-3kUj3QS3q*%<&HGV1h)~Liw1PO_v=D_YE11C zAu`^9yh}=PoHd#um&IzTdxAI@+}L0_zRNZ>K7&&tYRx(Fww|fE%(sj~!~{TBU0_QilxXTLDKM zh-jwXDf)$%zmF>6I1y0Q8fkSc9{pDK4c58K@C|j9C2srLG`2LK7y@3E+W&4>@tKeo zo=>;6sh>3OeUIGV3mn`I_FTNC?sw8DUF@*k(2F1^0DKOZFMQEF>(>z5fxt#+ZbU^l zF7v=H{#we4=o#G=QUTtzxQ=NN@ZKC(yhK-+7cAcTvjA zJho1ByE2%1<$8nSdtdi+vX~jkUEmvB=Rdo*XqOPKJp>K;OE!O?6d`zKA`0I1Jb@9l%YpDG@YR9~*}-bpoF1qgSJ{`q8C zS;uvc_P$9LbZci4+-4s*+q}*D_l=uI)kvgX{#&g)v)N(sL|AfhznNohhH3VZ!b61= z1Iu`Y<<|Ar`czmE=A@{t~)A73rmkkS6^7!e9ydIJK2&z94Q9 zzv8U7$klg(N5h;6m|y~1_07_|rp;n3k}onjn;aelxR*JMMTDBwVY}|7Pij!e`q#`tjYvx~xpy+(hk%cax^yE+ff1}^)ZoH4(dkhwfR2&2xV$M~9p z8W+GLwQZHC`{v}LxcY%e+sOph7V*bi%h7q^9waCocn^+I>XFjLlXX#vw5{ULhFV1%gf8JC@k=K z+}Y;rFc#FyfG<2|6^p5Pb2^A$_HEzbZI>&Vzx?h-S%Oc$X_Vw!iKCQ}=V}2-lItsS zPL}=L`^ek2D<%lg!j_f+orWolG zq;SPOSTiXP%l%8GW`j>6rym9AXMH3>Nijb(!^S@`SLoMv{9w?(IT0_LFaIC2k_o-i z+p~mVrbk$1V+9POjW7Y%w^-=}%PA%EM(rW>0<)=!WD8Efa`KgEVPsRp;gqi_HLXz#oU&$^9}WL-)ZvmUTUhb-wF8aG8J$}P`M*VY#nb$=fzoDUlrkL+%7Yp70(o;%HC3boVfA(i}L_UwJr!%O6mshEM+AWKqw}x1e6G#B`j8d9LwLN%3#*m39ICBm|pYs~_RI@|s zS^qCb-!wxy?y*xw62l;Uj&DCkv(rC!C)+>h;M&E&_VhVRIql7LgU7Y3U!}WxDF)9H zTn^*7@Kz2P<9V`YBJ=S`0WXx@jphrsAZJ&FDb{Lkq)l$l1<1_RbM> zU0SR{83xhu(bYUsxMHKTJY50(AsKS%1Cx!dQkt5a=BY>Mh4`USf^6^x4-WDd?sO<7 zbPA25YTT`>VMAucEfI+zJhu|S;%-ghtH+Fh<&haAF{t_&PSGt{SY0gYmuNJ9rc}}6 z7?7uhAM0AmX^4B1@22V(Tn8&6qoviB*#BAu4%+aE) z6TaFLe5nl$Q}%p%RU)z0>GpMn^00Mg$(Bv5lVY@l#nJ(AR&?lF1oQxWZ?URQU%+s& zZ~jjfgOvsDzGXI#Q2J_@r8vwTnRQP|N%JAbz!LckiW$I<!2Vcx}qJiw~b51jwG{P)hx*ir)xxGz${PjY7 zi@B{_m*;>H%_1!z$=|_zVTu$?9E!^YD zUc0I-{KSUdgdwi`(=BcuGuxN{9*$q&$z&X`%(zJjD_ZSd)d8&-hwi?6Ej8CRB1VIwCS-XwjZrKEu^G&Z+ZUde{c37si0XH8{aVyd)SfqI5BW|pNJ3Vms{$; zo)<`3G%ybRHdy&9hSN!o>kaYBWsRA3DgAy(xFw@t@(q9gAMYLfFHGW81hkmAU_%ak(_FlMVS5W-{hxioieGa9^@qfa z&}^%=ln-=_F9o}@`L5WSDFv^(KHUgJTiChi9*V^H+5(yLeAvIQT!d)idf>l?UCiFI zZ5z)CmH#(8`mts3M$OVP<+FF*>QgHDqY2oY0fHmD@YVal3i=`AC;wr=*zAC>Ch|1eX`a z$in4UPv`zKgug_aCtH?b($Q;o%M7I^_z7C;otwUK1j?DnY(l5(>=?o9lzTQJQ~f1* z{p`Z*Z+yg+9!p?x)Stm2O0D7`G5kvEExi*h{Wt9{gGOt+^0D=+B6GHv1_$ay_V}rN zP*&MS@T3VD6Y(+If(e`O#n2uwIySSw`qFlXH!8j;9rk2eP`wcza9(`jC?C^5 zMeSxPzrK~?;=J@xhwPSA;>PfYt2Xg62F#uiHJ3%CZtuuYUYjX(13^6Z4~*cmX;TP8 z=HY&8voOxBk!$TB zf%2S7BYIS{egctK^@{T?ft#ATjE&_r|Gk^?nvkhr{(O8h;HN7xV|}ZoVfGFa_%kRY z)ZQ}4Dlg7CUK?p5jw1}_$*RCMOmzbd(8Ibxyu=zJ#r7wg$;*DJ$T z`$!CM-*-J=STvTXS;q5S?F4dh(3Yln+1kr4*Kz>Er~m8KTn1I+#5iV^2QD`f$@ot& z)+F*t9JFV!`+UB{{MVp32qvZJn{Xiz++t9%#=Qfjxtuw`f7O{Z09lJLJhQ?>xty1Y z3`gpRrRy_;k^~4yq|q{*(5CWfiSLiui3ED&)1fxp!+vIb74Lt!HF?kK7aILVAhHK}sAtiHCDuFzVX?nouKkFMWtJzVL6V-^GnS!*zHWIc)GN5`9FpfAfBtw_qBA2{74O`jMLTVgH-`0^JXF~@8hN)AM3eVAC{0#)isTW&b zl~K}>BFsXRX}-eqZ^hh)J2py(AlFSLz=&yv?8C=)6GAn!T(N?O@0s6B0Xq7T6#y2A z?JG2SAE&!-;}WGNqFz9__&h_aq{ebiXqA6qm}t-uS#qcyUC*5xvut{PrF^xG5A?>o zN52=B%LR;K@V@|eMN}4%$8WcYB$?B3;_XijURbKeN3`yooT-T?aD)Y81o@P*`SI<4 z6i2o97qbHgBmExLRtC&9wD7J1#vS>%NWMt$Y>6)^mBv@3J-9feb^vlNy1t{H5*wff z;tr>}&<0>lly-RNR-%S&6ggf~^YJ95C-sod{Sg30K@(t?$?5f5UfG9#ph$!Gde&g73NtbdzIlcL!VcjZ%NFbP;jxF z!$YVe0DL~i>>$2VArR6GeJeRqBEu{F?00A~h$t{@+8}ulM^P0jK!`XJ~w z(ji`y40%NE0Y?6@j{H+4`JW!^Dzs8t3@NaUJ}Mcxu~KBZ=wwWRU~H%*e}v;U&x1H; zUv6qh(gNHnR5AA6qumR_uLgCASxS3=>UW^OX%^#cg5B2^%0=9buXq5tol$jar z!igUUwRnl)DB$G#-f841u*)Cypf{50KY!G=bb@%5(xFu|a{gH3h=hZ%H-Yv|%acmy zX-UMw56tTgef9Pe_Na+ZEm#$*(evhp@Ada0NKYj}FQVyvUjW8fo2swwKmTx;tw`~U z&k#?`N`!+p4i?EERMNf8^K%u}Gw~WngDaHryU?H+?BH}GQkUu$M06L3rpr<&fqj%U zQLc`URdLCdlxJw7tG(VJ4PeaU^n+L@Q~!K-q%^8bM?6}7`3h#;K$*NCw8Cs({TDdD ze~6s}bB<(`9y>tC(nOtmuU`nMulJceW}S(~#V>M?Pf&-$B(KVw9^;9iEvlCrw0Kj@ ziPySlmh5>Zq?FL9JY7qjb}e^cGC*h08V`T~=Q>c&I-Z<)>nalUp7k+jBu#++8jJ|+7qiQ&JqeD!9IA%?Rn2OVi_aAJ4lGP&i9ETtCB(=rR7P*teU zWCX!uOPv&^L&1HOPD~bE434f+HkV?V=)_|pO~B;uPm&w@cge)BG#YVHRDwH6{e3`x zEP;$|gUZ-EEQix*Jt+$8kLjKMMAA7ZUDAq7eGfZ;GFl-fdb)W`Pt zBDNmp%Lf0?D& zff`U=h=EgDIQ_noAu|;rjz^4OktbAF^*IM%hP{jlV6Li(~_ys9ulf{mh=Y(WGlz*Q#%bw z5;N#oSf^tH2ndXqG-d{f64}h@$Q6Tl9Q6x85eKc}sRu|X5g*O8^MR_bjslQhsGHdB zLJS*%868RegBrsa5nG6#k#E~J+Y)cNaW^W@QI8EM1vGSsz;$rUrV@U ziNc-WdstNK0XQ;8tDn5D4n#0h173)Yk|TR4AZlbvE&)zi3$7=hMNIY+A0AMGt|NBV zBCq!=koR}qG9?m?--jiX(OxzIp=L3H1a!xFV1pjKjJ$jY!Cm;)u7k#o?<6=R7fMoX zUwRwM@ueAP-u5C=Jgq~FAmsp@z2qfW@Bq`9NThqV^EQ~U^JO4{2Bs-oNasnDYKS0n z|MYNqapnVAK1(V{IN}}EUGAa`{p`ijqIGLZ=w~{rsczRXXy5p6Dm4LYBSOdGx}%NG z<#XXkwFOvtEc4XmtzR^vC+x9uwD71Yy-9{7DVpX|gx$l*@`ON^O&}te!(}}ObWYC3 z;;l#*>%VWLg0Dwr#or~hsw1c0G2R9gT`am|AbT&C z<3xpHsi~o%lnKMp11Kw*`g@Ak@R7-zGiWqikmX_j4$!L2#foBTX!W*=+NGIQxXObL1uT&@N@SVI zB~P*ZG&B-9b*YPQ_u&x@$l;Jrxg^&^A5rXORrF91Po81Pf2<+YXJdl0jOgY$p?Ip{ zJ_w(AZ9lic+Z7Qw4TdiPHhGm%dkJw2d6}3Z#r1K-%)UpGC(Dss!gu`X&9rN+VJ$yD z5+l$$DPePBA&?}9TC6}?DYAky3_^3Q+Mtff`zrHx=BU#c)RxkRtG@H7Ix}c5T)bLU zHih4I6zBC5f9J`{GaD~qj!tywEaPW#ABcU2el2NvDt+v^mhfOgk(HMKm-5%209Geo zjFfwDC_gSNyKLm*dToTQ0xx*=E2V;WrD{d`k}S*FyEivQ7%P#N@DWQq`;3+yrI+^+ zFF`VpS~Spfn4o=8zz$ynG2brlr3hc|5S}lF7E8PHQkaXX|NX? z;)N{BDjrHSOU5&;vt`Ni?AmSqAjti8+|$Fr@yhttulxK)4Z}^S+p6}(CtzNdL{ieJ z=L2;?tlHg>#y{_yCQP3(emlf{5vx0i*PjHve~eh=7T5#w5$z}yqy>M=0t)?FMq2=U zeVI~d>EEQD%!FS3=DH=rbq<`X1Pk$w`qI@_tDb^gV~5?w!nReL9gb@%kV2;=CTZhy zEjg)?<%*j3_|k^tPT1O4&7`<%3p)8UAhz9mdAE*$x<7*#b-NeP(n{|(C%qyKqxP<()MVx(sATPG3kt)+Gl#5rX?1*a_cjVDm&st9oExe0lW)Vw8#bqmK3o_*wI&kHoI_i1W3;GBldgQe$wlBHG$2 zqCp4Ov1q_uaFcpx_6r=muc%;q!$obme`h^G2$r{r#NvHftm>bx|2c)1ZkSV%Buo1= z3Q5YPuDqF(9cD@Gxdwy{O+)`J*p(1gk+h~yXpYPsiqU3?0`9to-DF*&!O)UhDDoFC zl*cW&1A1vT3(L6Ov{sqCIjQfXcO=@>3G&2zzf68}W%fq=EhpJ+)*ulmdBZ|WJYQguV- z3t{=M>?@a23s5yYD$O<5m}-3!`xK@mJY7QUqO{*s8n+PQ@Opi>BK8rnH;X`FD1~WK z`Yxj$39h%N!D}CARw^0xOeXhPVdrwBz*?#^?pPVap^UZ8^9-~is0OxBf8^OcE04vj zf_TWn9hBBC(81-0B4h#m?#bCHJ&AkOuVHbZrs=0{{Y(;_my82Pty_PS5rD@eD^tu( z;Zy8-79jcWG5lrqyi7f%aBCo{iOL=?}9YvK%e*Y}F z&Ki0H3VTx4UG=VYm@vYTc~3$znxLACuMC_=a#jS~wa?Lr2q{O+ur*F;3n zbjnwHU)(ah18eYPnLVU)tK3!th+bugv7Qhid~k1l zF+Tsgxyrl{cg5%1WQ$%84f6t!r?5z>-(ul$JTYx40$0RjT_CSu+MPxIP5sC3}WxZ!y3bMy2QGFpk`Q-{}H z(*U(I2e&y2R7rYS2)*o}McDhp7p`~@8zU|kJC2x(=RO*X?eD!Ho(8jzoBmxuX7#Rm zq1`bIWOKki3?SI#hJ36T%@r`dXwH zSDo@yN+fm6$Is8$V=*z@J!liD$)RJJtG$(QmnI@Y+%xfOU49f9=tqJH^loY3ldmSi zt5LJgQye3%SluQl=W>x86K}F6yb;qDLKwolL=)krZCWvqJbW)8kd_a#LtYNlKkh<9 z-h|9uf1P9!xW-oorMWT+aT^J!Lgx5cL@eDs1{<^}`+6kRbnQLFoZjprA zoveVQM%3?gPDC#l3suT1ON+Oh2{<)spW>~ye>#{P;AmL&0@2=i0W-z@dwDXCi-)%# zW=NaMXyFdWRMx}&gc^3Z&qtBR9wfQI3846w__TfRvi9gu-h{XNIj~ojmHQf$?qD;v ze0o98ln^y}7kZ)kz_4%!(O&%&gzQ0iFVM?RzEadTf!_Bi;BS)EIC8 ze-XBtI`RR_a<`z{iNwt)U3MGK^BvUF_>oB}=nos40>>`COUQyi-9Vyextr@B$;5_W^ z%r)NB>b5&34Yh4R9yiX%3!@4%1F6O!e;I(=C+wJtJ5#G1CA@}bBRK0(Cnq@0J1$0I z5?YN(DbAf(Qk1JF^*__&DlX z)c}9h#_zTGL{(i~@f@Kw{8nX7E_h8C%<-HxnhRNs1(DG8D{;@E&eEvqe`RBZe0@_# zxbMSWg-jv2=$cq0zJ5Tm&2Fu8L*log=ZA|zqS7;4pKL17hgmj^dQ2?Chwqd$VHQQZ z`tU^xON}^AVEZlgFgA7=t2i^s(MegmI#q4!=`!wveRuBK_$84qbZmodQ=%uRO$pqP zJf6Eiq)UTGkX%RvWZ>0kz_1z9Wl6>=82Yp&wKY;*KDM4MjRCL|r)P;>n#8k$R9pVD>1l3Zgqm1V{Ye?q4~(lc( z^>@~$j>_$N)w+=1$0J@1z!^Y64HtU05Ud_df!`0-FUl&YdW2^tpW=qeQsE1PBAY8K zkMfYFAgyWpZrz22P=@68FvGZ7)^yoEZSYBC$fb@KTP-(lXm1&%m#q8s!h7g*5z(e2 zW;1pFL7IMumm`WPfBiu<@3MesFnh#q52Q@`-t)#Yb8{C0G7$I6B^h(;5KlW~BJtRl z*LYvx^nc03?PNYb$E#-tyR_vyHde)(`n*4+@EH;+8KMB@gZ%;fX-P{5^y zG$^Y3JJxo4D!G-`FAwemlxs-!wm43Kr(~uvTGM4obs^_le?aWx4#(LeZpankl_%tt zX&1zhIY`9pXgx)|7Yr44>RTC(S3im3mTLeH!n|A4cd^QiM6D*;ireiYb#0)VU_hO+$N zK?P%FAsg*o7*vhnBJF!kgAd;a9(e8%(Tl-&PnG12Z0ReouQpRvimfeM&oAT2N(;oPP68+-}Qq1#;3;UC-|zldiIq4 z%R+3mf9Eg@AI|-Kq+5w0efhq-j~s0#rZ1rDjOCfW}%dgm6&`2aH2fib-V_!_moiVr8k=S z*BIcYg@!MOzHE|QyZ4AL!$mw^=+l_Te=kQVj(<}htW*H=4U(X8U!_5_k$qfQ& z%3C2SW!Fk;a@ZjhunIcH_+YKSx&X{7B77KdF?_FN(?W_eKVh~7wVQ3F_p7{B_>SLw z%#2dj6(RfG(u}uDD`D9WtLtm%*}$WMdH!a#{f@k`pe@9SFF$kbRjQ+NxMG?`e~&{* z@#Lx)ok)yHM4zK$e?Az9sY~PX z#kE&cm|{CyO3c^&^ueojLOZN(zaAFpv`sGdSoM5fd}Q`x6jUIWraK|fZWfMP*&v_0 zfW_#2zJF3%dd44I0*QpoGBOPjG+6$A_^q=@D=;Omi18?IPphk@X&bsXBl$yi1g@om z$KF0mugIZ%3As_%Y#a*Ze~1fE)0k5@3=(VY_6q$_KMmE?9B3FhV!rrP-dW2)hFFcf z*zM5Y%r==-gA8Xoy{))-t|?>psu_RCdAo%_QhvRX7ljnFKPFEgwqdSO#RN!vSEP}cN6on&>tnL zOjIMd({6%Sn$We#e`$pVl~iiUkKVthzvjC=JTXAPIGl5|FnXWFP7K&Mg!!je9BB5dgP-Ep%K_=PC&epOH>BI z?ilf`#E*2kR$c&^n`8}LfqsOd5!KX8%ip)IR}djEG~cKjt1YYeKg8q_F(9Cd9{3O#9lB@7 z^KoG6`suBZOe-LrBeyGUi;g9)&Gu!BD9ZY_#x8X!e=V8yvR8zddb#EJhsKcQmps|I zY2&SVQcW4g83i-=Rm$%;0yjVXxYGM0J6WT-pjFndya%I%Ei!V2RZ=w;b>q2>q}uee z2bDjK0{^ATeZ!i-W|YZ5DmjVhW5~quvhE`P^h48)d4@xe{1uciKDx$iazKibuaJH$ zV1^Xgf2e(-Qua;wVyg!WO)xz-GYx_kdGr~W{EJx?={Tm%%jqXW*C#ZtNH$kCtBkMC zX^Y_(8D67aK-pHS=5|vjX)O;Ph@1mapyAhg#7clGh52zWtk-=A{^@`=9*6q#xA(F* zClWh)op4pA%mm&_2Fgn=pXaNIj4z~rMJ?wee;X%9$@Cv5KCLjMf^Uo>@ia6yxh+et5AIpii=E&VLR(fp-KzGT8(n4H z;#_N{iR^=Ath^vX=2(~qz7GrSTY5s%RndOxuGZ5i9 zjvAHl@R?e4zR(TX{FjEUao4bp)A1jjf9d;zfs0^j7dtY>!b_aylQHURA-A3%ZZIo9 z&R%2QP%k=mAI`;+n3OTEo3(atNm##aZkJIag&HFy7LRN)>-babi7TYVJu&_l4#>gk zC_SR_EFI~0Qn#z8kg+v7nL`Ql=CTV=PdbEO+7>F{(7v6hp&4O_(m)AflME*_f37O} z44LV+;zsrOX0^kIKHakPXSwlkWgjaf`E=9z6nBczqb>>*HZo$!5{KT??Xl)Ywu0{^ zl601?_A4~(pgnn(l#FrE)=v}19r0N~_= zpH5cgG&=7Fy^Nd3!y4-L`n}SSHJ(59O0dlsPI{C_ozzbJ~(>?({4XSNv(tIv2HIoO(v3Qy5nj7w!r_9~R_gOP z(1on9@sw(q5nLKle$tVrK@V(D?GRJ`ImsnVWhUg=4_-v< zPQ&Jf?{j-ESyl)z*4l2QviK8*CF27d{kHtJw z$G6h+X0UG;#zy#^p3p`65QNSf2x=P)xyR4~&~#2oILcB8#tt<8=36k@*$&O+H@ScJ`^ZC0e<4EFCk676^*gjgu0+hFHK@9_%rZIBX#iQBTW0$aBhVa-o3Klfd3w(Q zT}#`GQC6~Rt3@nE)z`k}QXu5Im6GDP#DBkwVikpeOfe?`C1 zd^ii|k`!5;c&8UsI~dX&!i3RbJVM&ne3#Q*w+1M~q-IqC;4y6ao~7BV+d@+5ZfEw) ztZFJB&!$fv10>sc=mwx{fz!#5(JSQU!F0esSb;n{eqBEyha*c1Awy!Z++y-#=BBo3~G;6E?mdqfNMXd1sd(dIHkDQ#sVivFoH*{ z@w3ByJ=+gFaWZ=Qrah(e8q$x`u09D$9taSUfAu>#Qwe!LFyjsx^z3z*Kx<*>_xyJk z8zm`H8vg}`q_LUq?LPiO@VS=?0jS8?(8dI<1!u4!cXWwC|3V&WCaWIp_$TB}U0=ox zt<`7X1OF>i0g?OY5xBcX$g2+2FbLicrt-k&xmaDdNDNg~KsqySueV9E@N5@~yde*f zf4%X|Y(C;U1RvZdaCh&DEV<{+V&D9`E8aBmaQKl*1UrV19kL_OosC7~URpwcA)AbT zc`K}O*&uE)oPQu=2^`wupeuS8x~|S4@}L|AxePM50K6^x4l51$<`^WjNOyw@l?59~ zUevw_6d|wu=N_B3W#L&JX}_E7Ye&fsf8BqR%1+8X$F=>6;LRt`boovp_6m>Y;DWAg ze8M2fbCgla^!f{v4PA%$X;14z-JJO=8GwWLOm$+f=iCHsNxguy+?KCc%KGQ(KkFgX z`P{?##|}Ml+U)vkfI}$V{$O5?&o9#3g&>6#7z{z-5Qkh10ky3`0gGF6!k(*42J?`E=AaZ2=T-P6n^H=P1FM8{1nKa-x)gde+V6BJ6+%`1*lczi{L+s$VkKrXr`NCDvvZ^x)wX( z(g@D9-|1`J`5*Z;rX1jE_;_Qm(N>d(jJT7K;}AFVHQ4!!mPQlgFDu}bkYO1}Qi6XS zN=mdJWP^NV_5p6H!`%A0A1=Zo7kyl;XzoJ;+AA;HwAsd1J`g5&!v(8U{~mK(CHx0mF=1F*#v20<^0g@?3aD70btT``(kzz@U*1|RQBR$>6B@(iGv$z&9(z-Mo^q$>ubJh* z^~{=}EJ)oMdg4H%#8FU5f9AH)dEvRPx5eb)IbL8`L4nlh`pF_&EDluSqc;P}B$$Zy zo-36%?z$UZr)Ve!TFLG)pIs=w%8U(GFFk1_)Hn{$TAMc@Stkd(8#}sWfWaKee$fP` zzFu|hn8v0p(1aWDFBO3Qh??F6VuetSjXZlQ2|_bJnhFnKo-$g8e|$5~M_sQV8=5|l z8G0x%Vy0;E09~Oa+KdLziOTduZacgJPyJIQ#1NM{Z(F{{a|H9XkRMa#l>Yto#BIlg z8m8S;th#>=mVkzCsLVfoL#Wle46G{Lad!vF?N1n{#aF97J&$KbmM)qH91M5V%v&g?)8Fz3ehF%kQS%@Ydjt!?7k z(|Q$?8S^$aw}KO5IZRQ=qT+PF6#0rD>lTWRLQO<(w0@@Vf2O9)Ww7_!9zK?8oYxU_ zOh|j(K={=u(W}G_!|$kSxF8iTzCw-^M#s=s)umF-wLf>6{Bqg1#QRRvX69kF&kiP2 z_loSUy2yRf-GD4KZA)6{vg_240CfIkfF&nm$ZM?R3bgUUvWlTERyel^i$X_V*SuZ zS;xnppH+F5X(mh3Vzq`?!0#`a@ zgrXVmf8rn*@YMDc!SsE9sgAYoFz@aPN#;SY0af$aWj7Ae3tl?1`Mn0v-ifj}D?9JTn`-Xxl+|Nt~(`+*bl!{0p?N zt%GHy(Wws}#$-750J){pYL8K(aHg^qm&L!Bf7UYCv03e7s7-D?RwF+mV@@CTGXjbh zk~_>{#b@%)3(m+|-p)3HE6j(AF=;ZLfzRLjLB^p>;f$M_7i*E1rnuHm^*N+d|E0F=v1 zU+H_cU|$eXl%Si0E|b|X(0YE*DL8Acb1vq}9c0psZQ&7$-0rSmkd;1mlV|Epi%R(} zmSjvqV9^%}CQ>6ADnuQN8vq;eo=9|%e{yMb5^{X}xs`qDH)j7JJ|%*WFj zT!)b=njB0@_S3cYud9hbi;tq20Ks zHKS72oGLnAmMKrl(L#S%;Xk$ovl!H(Z(CL^?g)*o$k6E=>Xa}qLDiSVybURZl5g)r zk6RZ^7<)fgr92)%+HRvC>KgH?E8eWt2-B0N7Qp9+Chgsz-)iG!ue`p~m;QUs+ z?w`(pYeh%w2q!iZ8ZzzHRy<*OPFoE(2d4o(BEF%&wixr!Ys)jSo(9UBq=&t}Z~|Ia zv^+0}xGwp%q4~SW^S-yVV)zu!LdBHac(&}?4LJ{GI7=7fL#~7*gYvd=Hx=;3Fg)b* z8}jECWuC!9bLE$J&s+^`f5$FijycCy9L-0sZrl=n(RHqE>C*y!+z{Da10hJ5Y;VNI z@ibV0Vf4l*qRJ2+rlwKRMo?dZ!0J9YdgQjCxmlq2mDueocIr1|K|6_j1E5%Z96OI3 zbPW9X48=C8RMBn6@!ludmp(8s&q&~&ncnpNuKVkywdHtBsiwi0e>R#ueTV)`A@Maz zK>=}!TWiLs>w!DR(WZA!@#B*oODcnoB89~L$Jz;0L`o6*-kikpnL@89f7Jteeqob> zE8V2E(T9oPw%Xx)`0*>izdiMclAULwTwXSZ++CT!&K}I|{2Lw zO42>1e4=>B)!ze$e|eG*nCCS%`{Ut#;E~kIsr6r6_fIieZ9$CF~$crDBi3;K+6&4;gQ5iy0S5Ui2iW5 zzx>A==#f^e@yX@4VZUJwMDjuQvaaUxEq~QVLEWLIAI!=?oJ|aEz)oDRKaH_#!Q3Et zV9UYvhw(Lre@f#;Sl9D9H!hRumuZ819od!G^osXy<~QUK)Pse+#C2If!?FZ>upW^i$^PK_fPF z0<22fkbWL7@E`)%HKl@%1iL_G<^>%$;liZO0fkRwo`331WRh|Ga9D~lirbaV0MmQ7 zwVO7(f_lvwp7WC=*t|Qn-B%r~G-MG`ke5D2qev$&_CN-C56i&;0B_#@YMCeHALjX{ zzM@j-f1z%SP}J?H?W(f60hlqO)$L%0uH+>6np(66eH^1(6yXRd#y87tA;(C&K`+jb zjB9VqiT1b;Ggr*;)J~|=-rE_9`cQq&#fi4THDBLL2j*uPDlJrek8#czX#qe9*frg+ z7`j#qK=B1P#KEuC8TVMl(sO0DK#v;i4h012f4=Jw!TAC;;qZ9Yom;*)xGg)!0tKBz zZ{VRAGCC&P3R9}ykRvIbzlKM(JgfTQbdXu`Y2I$*vQ_eajXiZ|KM<)vY@lf)8yQM6 zhhOBW`LbFUs@yunhv*^k9IgnQ9DwgNYIN;74?z%sk7$_iI{mvvr1qulH*a9wy&S%3d%LAs8k)iKByO}z zwc@1=HwZSPGOf5W5w;1H#_HTgH*nmAe|I$FEYX9);3XW+0*9tW9fe`WDk}%?@(7XgBAZE5e!wt+jt1jQTNWQVb1Rgq98L zn-2}l!QJ<&`dCNEIZ=6>5G_Bb+H=8Ut+zjmp>KHa9LyYqn~T`i{l?sjL1S-we^YyE zFQ`uZsqZi10WFKF#QIkuvEc#8+D(!m#;fmx!Fd_-dsNl*`AUOOkf81tk+slVSI8zv zUVUAIiX|NuckZF65z~+)|N3YesnDj=Q>mn>X7o@Y(buQ=R%6<8ggl*(HTjzRLTYL# zg!Yxe7JzWJ!hWk-@)x zHuh)8eT*Avtk%k*$U_gFU=I7i@FDN#`6d91!e=1jO57L_9jmDpqzuv@{&oQ7lCjf@EzW!D3URM?u>e#v1 z&!K9*0t8vULp>H;wBKRO;y3ym)ykpWM1sR2m8(P5wN~4Eu2W z{CzC%84x?Lw8 z`pQ>k)jkKr`aMAOYl1H@R<1N(81l8iX6V&Jz9D2QWdi{`F3~cvs8}w%cKO=;^8I!S zI|r7v8hR)2_q(WRa%6GPDkITrSb!4udfbr@5xgFH-BRKBD5JVLmbEmyGPHN5%GY=O zrefz6@q;wKw?w{Sf2MHumkv`EXel;I7n%*hXzwE{FM!sPaG^caWhje}Q;Bl6YC7sj zW+jXFKxtKedq}P9@bI|&0GS1)3J}{q+J~NLpj@-Cz0?CXY@x;!Nnz{TuQwAW=b~vV zWahu0Eta%lUcDAgnJXIxON5GS$y8gj`DyCYyUs@)!Zz=le}j+FGmKeBLa6*DhV49j zb z7|2gPfj~f(A>=!~?5Y9)eE&`(ms%)^r_u>r4g4$%s5a5Zl^>wK=iW+J(hz(URvdL&qb!d2jO_OLx#36>A*refr#w%m|bRgn#VR@{K3xYTnmj{m6-> zm`OoOe==mR@byxxJTO4lQ&G#dyg>AT_0S)@au7;p(j=q|D~z@Kliko|Maxq0kQK>Pdg6y zoc}v8`@h}nh67EbUsTs0$|V3U;NJjps!JuIEx|9v>-PO6O9DXP2VZz2$^$&?WQhGx z4FKKz{prt-c^|ohF>{-U>GE6rPdYz<-{H?{P_i(zN+AO2&-(qLv;U>C|Div;Lt>}H zf8WbrNa!v9Wm|I)osmMsW*-7D8s0zlqPiXP*OOE*q-oG>C3-n+uUSvRp0QkcnFvTAf z1rW-WFB8Y@V*vojMFV;SL}&ayFSM;~f9G8IjY;jd>xe(w^CxUdyn?P(-O1|_{`TJ; zK=JV3pH)$$=KGPwOq;az>%sgQoKg4$-4LhT)u_J5JWOtN5r#iV(`S(H}kAyUi~TU)>{2Z8=0C@&ynZzyfrivB6QjqV#M)1jel!{sK}D-W?r3lL?p zP^iv?)BbU!i=SS4_h^h)7+ZB& zO1HRVyh%IUB7~v4FVHm>Mkllg-zc;=$;Fp>yJ^?VCah9Y1a>V?VIpive?-A4^ErS7 zGxUrOO;*N7bdn%BRu^i~o%}LgrYTzcXgJdaTEd|Ll}g~KGwA10xLnXrGy#Qla3C&F z3Z80K>&N%tmN$?_qPaZwHz-fiGOZyu#OO%7aXKu%!X0yB4~uZz!Sp3-6cw`8u(I0ue1J zuY=lvCJVuhZnQ|+D?~mqKaPtI9yrQyX=4Fp%Wzk%9=9g+UEcj!fA;q#>fSr>QFLfp zn(Yh%d<7``e4V@DRYH9+ofxevY-L+a} zureIHxz~%gpFt|%KrOM92m9iBa6_rlw-fLnvVOsaP@thTIhYCtn)o+kHvs&(I5J^0Po2DzK*>W89 z_AMKGEM1Qn-#g&)v#7bx{yb)E1JGU8h2c4=8|w7PRTjF1^OiPHOlpsnI1pFHYpgL* zO2D51N|&oWK%#?=juFB84 z&9;|zD-O~f%yy#J9;Xi|ih#F}%q#bEg4L^iAO}PRoezvjjMD}l`gcKYui}hRMh+zX zZCRZOM!t*iWUxfoSDD(ebamNas~5mzOqyAQe1 z6p?Vvq48|t^6$FTepFhZFtlKcpmA8?>? zniaf#TJB`>b+TWLtt?Y`coe8=zKe-J(vfdR-9<5)e*nO8wJ~7t<4NgOJ{(*K$Dg1N zYUMAZG}MO=63rCmTZ-=2dfk;G3?9TnrCA5@B~PTbRs9oe;zq0Y zoHo(Cf2mEnsX^S2Ger>+se7kUL@FTn4iCOaGrq;<7QP(~=J&`3jgk;-nHfWZxSpWc zL|i=Q7!VewxqI}zBEzWTE2>-kjW;&X`F;KEL?}Mxc`%n)g*u$!3=#^YVJukoRDLLlxSf2LHE|o{x061L1ym>4r+G3b#Fn7m; z^qILKdd%R&SPM{45RO(Fr&_0?OU=Aif0Cb%a9A?uS;VdwtI$W&pJhoDI02S8)^%#? ze;T@m1Sx{~wL;0L0}lkWYDD}b63fZ3yX8Mc#V3_-e)|wmu1vKxm2BL61b4&_1{%S< z2gY4LnorT*llDhiYDnSRcI(A*VeD~j+c|96=LqQy^}96M#98;{W9n_qY>T!QS4=WL zT_}lE1Wl2cI{Hfw!qf(ln0z+IOQNs*e`AKtG=n8>DLDm zL{ddeUn^jl#u*#;LLdu#w_$b>40^&^2MG3=kWms?6rcS&?r*xHZV7gwiWmaK4C4{a z`)8G*rwUT|>`Gt!Ymu~@(lI1oXHwMB9=Kl7|Z2QnB)E)-j**^6HF_|bQ zjiMkf5?EBC7~7UbJ*}e~mbVRtD|2KYB7WJr?76&fI=_IQoKBgMoT?9zLL8yRq9aJa z6tjI-AUr{Yt&@j4vfB8!4U922_0$Q(be_{(RPs)IcTxWl;{u80sx4AY*ix;_>zb zABUyA57#jtyoj1yIY*7TQkSdnU`)h36dvOu^HxNqMgPksxU zwb}eWJG=+x| zy9M)Ezg$t`U|4CGe{S@Q9v?AxQM?wKuD}NoH;<%)Od!ISB(f1V)1&|nL-n9DDfvL5 z#L+xN-Z?H_suhG*Ls|=fYs;W8&~U-*^tr2aO|s@6FHuGJt=`K9>XXJD_inWE`DU%u zM~}x14a0Z!1&&SK#2YGYlaUZ!X8W#g{ zVwyT8q<;MF+ahOn`ot?6uDW+{Lao@b7#i9h;V&+rf6;uWKy+y;A@BVntsFbG-XXUWjdD>;a(RlZ8$iHg*t z8;{PP$x}Jc;hr$6w&vjA{C)*F!EN{&HAf(8f6iucQ89s1pC1^3AYGiz0$8>NcR49hW7*gBlhu(=lDsb>z3guedp%5&u1%d#_y8hq_m zo*vlXQ_qhn1fb&Zw(S}2ZT5O3$U$IeKI#jEExdAS#yt*K)ujGh%Tz)%oV*k?TGq|a ze}7B3_iPG|)v~Oc&>ZJ;%7&eQ1g8#r1Xg~t(vBveRs|4B-|M@^Ticbxz*DH5q6yvW z0gVxjcE|+QS(r+_cE%>o?rdNq$YR>WI6}bSVmtvD*mX={dz#;|?Sd8|dN+OQ46SaC z!4)h6;LRRsA4C~6{=xe}UU?rM!05Vzf3m(Q;cS`U+D%t+p-RaiaXFB`)Z!CuqAuLh zZmcx}`-VG_$?&br;lB6?@a%|JjH!nqn`FH2x0Ji2)}ESmuxiv>fOKz&Ce;`MVV84w z5v)?%1nYKbo>{=I0q2*^TTpnmpC^Bhq#df0hdR zEDpdVZQ{P5qj@wghCTdIyIz5_0aEt&;J8O8yX2VOQ4Qsx&?G~M4#J-B@%KK3b$ZHe=`;u-vPxqHU)9Nw%!E9P(B5#LBx3?bT_FpzhuGC ztxAARcXl`3RMuN}+wKw}%Nu(;!Z*YagK0!~wQ1=;x&3L9fbdpSgCKn+hpc5wFi{s2 zgT+(yim-t+a2B|$pL^2c$_2(>B!_Q?H+XC+(5w~6LA{AjJgXjDZ6igdZ(1CqMu$j)aYw~2y^ISBDznr zua9^DJ3z$0**lOk(ggUB=|{0xghR4f!SP=&8jCx$LhOfD+=A(mP+>EG34guhbufNN zl7i_rfeI_42{3JnC4{Gx=Ze4&6%CZQo+aSjs0;8$<`g6%G6FdHo$-VFH`a|Ah0x$H z@GL%`uR=&H>f}9WI>c_ty^4)MMlF6;sR4Do_49g1+)Z?Dz@~JN`$=|lK6~K3%>vNOrw~DQ zT@LjZ`YdT9)p<4(l;P{W2*~+fEM2J9T<6n{@KY!#bijs1WJjaYl7CyFx4XXDy4{~$ zDVBiaiAjKZKnL!sLHY?8zfEbx%VKeM@f8e={-M$jmzdPGC_n@R0|NDv1ja|tw{ zEKTbL2^YJ=`xx-|*MAQS0Y3NKh^V-QXo`8=p)Pzk%Uo5nZ^B*e-n3EV^2A%BS5a~0 z$@B{7UaX{!gj}cbkchPzqiGGB{PS4b4`pASLpjAtY`a08qt5H^?UjiENQt9XZ*gg?})*2J<*NH;K0m9zmOW(qRl_ zLePxpi!7g{*iWJ<}srGT#i1%4h#$fqv4y8U8vC* zPdNtZk$-nv+C{V6Xi~&-)Wdm4(lt}pEY&l3nq>YAHb2v8w&MnnPFh0$0y&@jUqfXG->>+_!-u&tU5y zr8k)Rciux-cuPVRCH|zf?l^R_P1w%c3nY>{+0O0gK!DR3KPI~WJX ze1FrMtOk=|n|v;z+jPwR44+X4NKc4R(gFDL@~evdg1A2T5FpiEvxbS8fFjTqU~xWj z3o#K@2+@ueXkX+8koHI?19ZkjkfvjU7}TY(<0`e(*Fa`Yd#ESP^YF|oc&z8RbYZf{ zgs9xdz!PXO`UNtVH`Z6+wna$ivWXUXDSw*=^t8AywD(Ia976|;n|I2xE9Ow|YriCJ zX_RNISrXuqFdbxC52M8$pol@DVJ8${d-7h_-kxjzU=->GwJ-xj2xof_i|UHV?`&@` zn0*yH3&fv|H+WehTsAbRfOe`?j@(@qH?JVHOn+%0-{)3M`9=7d(8Ob0@w-e?GepItTYBXbw|HjA-IBbAYT#mO&CMdw!u^0OE3Ncwfp#EJJlJi zG&&D3krFDFX%iu3NtSftxqp3SAo-SHu$4d8=fy!y6tHk_fFe+{f3sgtVt=2X@oiX< zduuVzJ=9g{IWYwHC^7t&DDU>Z0&N3*-m=%7?`TU0zs(isfMJz86&u1cucOx*Ylnvh zL13|Rm&HgivDXdDXz*bey>-&SljyTY0%xbiGYOz^03H9$I9o`4Tg0+XnUHq_#`W_^ zLMnklnhf#f0OgihLO(gA;eXMN&jg;wfEVUujxinXv|(-3&xvm@7o3dZrM)ORi_O<{ z#mpxRwW$K|@DCJ2Z}0S_0~b`w#t;aX*e9q4TNWMM>n>;`-dP%y*6vkZX+HqpsKu|A1jE)6|%fRD*(F2|o99a%@?ZhtEMlKGZo(${%% z+f7ALoXU*&>0lDrm`d!d7{GW{VCty3(D=qLb9T;w=9m`;+WDRry*{HyPzecYD7FyF z%lT8c?MA#vV27Z|9E@f8La-TnF;MqCA{rUs?!|worX$hDrf!WMT&>0U-C^zFxZ&yO zW{RtRh#j6D>-|#4`hO#ZRWvQepzj+*Fohr&=F1nKeXq-V-!A&JP?|Rn15hA}`-hk= zeu(&{zorkpr`2g(WjrnFt{Mw4PF3w z3=x_K$hK6gLpk81AqWT<^D25c)Kyi7$Shze{+K8Y0>YUy=zr2csqqlaJh(YWWpf8Z z!Fn(Vu#YS4d1(PBw_m~Ew}HVi0*|RPx}Ezy1SRrC5nYSq`K@Ni zR88ytEhHE-Z81S7P%JqL05$6uFpB^^Got{-qz`}v1An{>@&=5@(1a}`rT8!&0dTBe zZ0UOgcOYC)^6_zLh!*4&V_*x3)=tSD%=!Qk$eZo_pQnGP$=VL}il zyf1ly#Uq17v~i^^wbbfaC?3GU_RjayA#58?Mbk)KzLF}s(o-dXIcKPav3qt*cPkgT zu>>?k*MGTcLW0<=-)_#~0RH+5Hs5t*8b4k(OQ~{heiWb`q~TGwhQ=r;w zCppH&>x!Qb7K3|DR`c28$=l;gw;BgYF50ubdJV0DyFtfXoCg=o%9dbRe+|Xi@qMxq zl~!lf8QG@y)u`Bx^r%+Z?>nT`hdHnhDtT6S%YUUUmWJA=2ljIJ&2bzQCDe&R17BD$ zw2;c{&AWxq2F+&HfaKgA!WaRDZWix3znL}$_I3QlvXKnNw8izC=4hWG*%~dAUVA5e z>NpQs>x>Jk>o2sL zWAH2igr(YGFjLEl)H&E-MJBmD(#JS9!YS~G(nj3^r@?U@T(E(R)mu{T(CQj&GLp>G zSd;tQ+Bd^~C3iY=K4D%OQ0}t@;;omd3p_f)A=*$z0UGDtHK>HP0nGZHXn!y}Pme0g zD?u7vBG@jj$IY$p^1WV-#1P;q5zb3@W`!}uJTgTeov)|&_`LT4_>jF#0t}$EY-tGc zj!BBFYHBZTRu$Z$pU*plty-mU6-N}x@ZY$60zNi>lzr4$0+BAm97_J ztlTJ=DJ0C7Fy8oysGB_7p??(zD*NtwDK448m*c|44bbAdjeajWzy}jPO!2ON2R9?2 z3*XFQWxuW`5or+Nb_d3LHyKU{2@oz+c}qg!4Ct8SGt-7B6hzfT&0%^2@VsSBwi4tl zq3csr!*E9!4F~zn61q4i_RcnW`tmJh(Qx$e+lf3NrFM3<4hj<5f`9h>ef8zud*^)H z;Sds$eOG|N%vHZH#KBe5m57bp%!LxgM^N)}ZUMI~BHx7*D6pT$xDzEz_ug_UIPb~@ zWm>H|Ub2A8KvT#yW=_Km`R^B#KwFueZyXPuQMtKRa`?THLwK*WoFK-wpMe)q5S$!l z%B(%VKu0bZD5f0nTz?pIeh6RN^$t=O3%Z|zEk40Bp_NoDKhXQVia=yG(G{=>=<N z8?bT`Jc4WZ2(x9%3(#8^P7+7a*!k+~M)Ft4q<=~j^m|5JSLor1tSSP4pBET^xL|pm zczW=@?)9j9?YOC%D3Qu%S#W>(!RYGI-{rI?2Xw4wvC~oc!hf>q#p2#Q0|!koK!Q*5 zf-b%peCYsSu%F5~=(Urd@_<@~{}Y~b@?rw_o1nhJW?y{+4+ zDnKp*kIFMr2H9CIHTX-$n>(c)$g))B?j_-Bn%UCz6|rjMw%(Lb-F+C_Pd9K`#~|)g zk!PA2%I9!DyMOR5^s!y<`z*N4Y2lHRLVX^=?t(B89Y+fB#`&0JEAK2YL#TYt%7X(bdfhsuc1QM{+Nt+n4b z2dh#@=;vzj#?ozfrbndmt(BY)(T`Xb6)p@WU;dlxYZp-~?LV1%o$a8+)Jr8&MNyWHAyD~N#Mwx!$ z=|T)l>wo=npM}%dqoT~xq?YO zDj4&8ab$fXD$MXmK!YSDALMd`UyGim&2rVi9mn3Il+XNa4m8SR4!tHDra0Q&uyA{V z86jW40=|3(*+p{Y5Spl`W4z%hnFtFQ^?}g`q<@&kvn`M*f~=$q_YS*%$iRG_5cH#) z!^3olL?b2qy-iKfXJD}>Ap;e2)h>T*>$^CI*N@n^e`AE*e! z!{VYKnpO52_;a^$y8`$pgw7-HAWjd{9H<$w>bJPY^dTOtf2+vPVNOaxn*8}hhY47N z1AqA-w1J6Tn=!!4cCQ9lh6HZfHoj0mneVpNI6SH{lmtW3vNM7n5)OtVVZf(drEr?N z*T9b4P|+=if}qkDzu){OgIpvZ!b$;WSf-+`F9kqcOGc47UJOWc{a|d80T})WzN^8a zZwe-B(R(p~8(UqNAfAc~Ox=Dlc#p}&Wq%1{R+|)`Nm2CkkOKV^cgWQPT|6#qt2=J4 zapO(EelT$ET%+eXw(SB%5z#2OQ;P8&5d(pgz5-9q5rhdQaNl7!9lA=%{VwO+!b4%a zDA)DV2!0zfEoJHp6gPI6tcG4P`|aH%!T-VDoAoNrZtG(IC3g{uFkrfU6e;d|gMWMM zn+OcJ1p@}$MEUP|Uc;=aZ>{yMy`!9Sktiiq)nwYZd7IHb;~7hlzdRMHJojuGumSpx z(I{Oi6N)?cc9t)02z!EU_XOsZY+CKmlwTE|v|Jhe%al>Fg{;%m19R;$T)is#V&28I zOTTD(dAwH7lQoT+cx*D@2uSu>Q``0qo?$oE7!&c; zbA^UcnW#);O^fC$O4+%m8G12^&AP3st}y#zWsXb@{j;RFcCT_OWQ&fkThNz!I$A|1 z^F%pWEdY791fFi-8z^&^3=8Q7de?ra~?#EMyP@^e$gi+u51A%D#{N8M1X zp3!W0Z=>bK*y;n63kkr#afiy&G!OilAigDSy8)_PON1zM@=pCie5= z`@LRD%I)e@z`i210=5Jt>wn%JQ=O~e{-UKIE`4M(L(hKj5kS>Vt{Cs_Hh%ZrW4jHl zxg&uN0M(vzZhH4dJve{Lr40U%hCa~dX#rZ9*=8X-;}GOy?**a+b82j{$r$7f&u4$A zJFxkb!UT=vR?zLyG<2`$8CtbX=0SjL_KeDdG0UOicsAx5vrkdkIDcA$n%JOwYU&hysrq3h;Bq?~M$6 z?&#{1k(3$eGQ)#(88N`UR45Ox8qkYTp?#Y&^3q+cQd&#dn}5XSs`T)+a;|u~9!J4> zlsj%KFN)n71P!3K1GUQxS+c{)716q2eSRJepm~n^_EgfVfXnKAdrqg>8;YYn0x8>? z-0ozg4oa}c@pG5T70QF<#Y$WrOoXTAi>Din;q6KK+m9~qGsp?&@}ta;uUYy;&vkGG z?{_0mNW4KB41Y4D#Uq;EJFs_N9?RGMx&nXFS-x%;%lC7&V9eld@Od;%0lY!qW;3Ky z=6+7y+y+-cVLbp05;?U&wWhRM1`Aqt_Vf(p1Ec9uTQeoC*TOX)t@%)*(YtJSZbK8? zW@cd&exIpaJL$!N*aKi+eclgu@BU`g*rJP=8L=OD)*6AHmT(syh&?q6_^L z!IKs+$;=j);K<6>8Y#kFKgVX0HSrk{VH9kjJ=h#%N-6-W@}^@zLk;wZ|wG&9i{YFoNFw?wI&em6XAuXauyYVsKMijVp%r`cJW zMl1RD2!9jhg~mU5yqW>n9Y5!2%W$1@4s-73%4mkBb?E1_)=h65niE0ah3`m;k-=^5 zV=c=;eTi;53V0**IX#`f8HD7^gGg^wC-2@{U|qRzax=%_Wz>!5*40AM&Jk6aB}2`( zX?bmCJGTtZU7Q7cHm}iz8$!Eh5&k_dy*=zkFMiWLR%Ur=A^yn?&9*01(~0n&w{Mg zYv#E2W=F0{?jvyRk8Uq}LuLN!B9@~Vy!j#Ui*c-rsifHl)`}Z6yqE+hL7FVhq}^a+q@1Dg z8d``MJ{qWl_g4}_Fe_@zC?0As?{jI`elzcs3Q)Q?hB`Kk8|~K z(2iX`-yVPl4qurzx^AAByMt-a4e)juXt_nu3aHcl%8^_v?@Y91I8sL!km7Qz z!lSLrg?a~$=4I?@It^mIHtQ`o6Z9p2a5 zsAqPUqdgJ)lHIm(OfPfvTGV%FcN!kD1BmLQU+d6lIiZf_h5oTfKXlX1cSiK+HVZB+>c&xGW9d6vK3@4|d?zJ457^85dA;t>XL=^Q&)_bPPD>FyGf;VM`%dURl7HJX6cUcdHmrE3{ZM*FgsgqAb z-=TQxNE*0*{tTs5V4_A3YkyLQMSmIv59Mm86-gMt0u?mgp z5VgM>+RCc$92M*5EPk)8m6Emv#wXz-A z2(0r7P-gr?a6>z@Wkg#jr)>>Y=+Tyw-BiNL^FZ6*h#F{gZJ!mVOG31?vI+a-|=bzF18Gp#c8SAgM+pWj&%}^Bg&ws#4Fm6J@ zWc2rUl0GGT_;WI=b>VJ7H!67cay%P9)aP^R9**T}y7TMlWEs`d_1-C8-Q}U}z;{!k zA?&yx>8162-c&SB@<6Dx*wdGCe|cN})NDmq%2dA#@2?AcIDdMp*gVYGeL!XU?lhBL zTXo3>yb+0LGK~7JBei9MX@kthRRMOAmp9uNcrVtTc4rLttj|zE{xVl0^CZ zC{WmAF-Ok_<*+n&?o{GZ^&yP<$NDbq;$~Jwhr7Btt1lFg*8tkDP|F=3%1}c?AL;nG zYX{JVFo)Rfn1ACtGPj3ml|Jh=AEP)cU>_pAn7LB+Vwq`AC4szDK?f|0mIGGN#i-e~ z$|T8@S=!5J{Q17@`)kK-;p%P|PjPCfv2UYHXc3H>d>#A9q2229Sp{I9m*W~PZVFpf zy^7=IagB6G@_wUaY394z`}5YuU~$|dN5sF~dw)z9?|%k0UDq@ny6UugjF-neqJ8}HDtA>p*DLeKCD9bKu0rKcB^jNkgrMv>4$ zMq=+F%DNhZx0;a%lqhwmWK=ncuBTKei4>kobp2au<}4O!gb->%>|@J zPu%Pn_p4dl-ez&XqgI%WOxw{;gjVo~B^Yz?wSS>dlYhQ0+VYXV${Ae16=KBP&O-X* z*+$D}X9My0jOtky)s&_K(bi0F>)>|e9+Xp$C%!VCILw;bjgfV_9nf%91T#7Rj3DI_ zaAOu0-uK=HlNs7hfCm2lfU9GJ9zne-i$0LE=l000PL%naO-8HidE<81^E(DPIlGp8 z{(p8HKPwJq*lB!0%z)#Knwg{L30GOX_R32VI$Kpt1|1* zmv55?{MM#w&FBw->5Wj+zn_$pEUfR$c>U&3t_3jJR?4=~UcQBB^{lS8$9#UFfT4ah z=+if~_tkV2hx1)b35;`!OcAND`=jPjoV?WKt8;g0OPUG7M$b#|u7g%?)-96Yy??9F z1dnl8C+O2!TBOshXul=#nvD+^Y1zy7+4EhcmI78;1j6H{X8YuNwvI1@mD0=eGCv!$d4U!(u@_}@ zvk$A}<|8Zu?aohU3p(<`W#X)Ot$$(T>3F9`sq%jNO1u;ey4rvVM)IG_`{{Y)f~1fZ zXfCwb?scaUHXRseA>#db>7Xo@PSCDaY;f0mWr1Xl~k;5fU!H?Ih#Ma)^%tnjOt45+KW(k{NJ>B!@o_%?8lv9;#n1RV*CvoH#6*s6sF4KIvwCQ$gf1=Xt(wIX=_cs{ zNMs9H1=}q;s-Qp@q$`&sB7aQBSJ$59M{s3tHvK$ztjFUKBVd!sl;e6V+J`w~Z{}Pt z%RqG|M#`l+jgG*cBYz}~*ALV;Di1ILJP_iYm2e!-XtEQ}y0Un5x-@#A1Ha=77k_(hPyJHNPpC~h zceDDw2#)hz=kdBkm8UVL!*MRE>slk`doP%+@yJLwPQ?*&Fh4n)B=91IS zw=%ytIpIy3oXu;}x+O(-Cv7%8X}j#?zvpqbUu=&(Fmn09k00V`qSMOwi9k9^Ezu-M z3q)k=*4*j_;_uDv)~_`GK10ECyrCCp`mmkMq9VssMBID31GXu442E!R3IG+c$ChhjWbSLi@wp|9#xR!`UIn?cdV zeP18WuQNAcgw@PU@7H7*iBgc;MdH)DO8w48E%bz=|Yq6d_4$%mb~GdbHi zy{s$1gJGwrbAO&HL6I#NxmpVa?d+q_(u)eDC3WFYiIa^3WqUat7lQSw>bZ#C{>xEk zt@|S2Ytzk{B0XF;w@!&p%Iy^Zv&~)C#Vi74e%waENw)$Q-Hfw-I%bN8ZBTE=J8sY& z8F*5>7fQ|7$2yBn=4N(8g6=drdTx12PcKRzB|9UC?|)-)Pu;j=a*bQ|y`S}HL7$Ew zSi){UajO}+e$}r<7%Y%yuY%U6{3^BHQ{O6PBDdTd$DL$QdlV}LWtvA#i6%-ZY=!y6 zmC!+*r$FMfqFF6az%jaco!*NJ`g-WcTg0D@r*3Z1Zd~$B0nDbw77fLFrS^}TCzuhf zdWt%xaetvL?c>YY`O9J^71}Np#L21O9~)3H$O^w(GtcvEiYy3l%}7^_R6RJfJ0u5k zS(k5ui7wv8>)x*YWs4x{__SCc=a;35!gJ60B5|fX+*XD|EMyV~Z^W5ou-w~7)ZKjL zupQ6yXDd&bjU9T{Cf$*S&AI0h4nR+lwC&BSdVe=zioGG=8+T9^YoclHDaY%odsd7N zD5SfU#nOUkx?Sr9Wl2YHR#C?+3&cd)1)Prpg)+LmQvYFcXd}3ez`}fZa3lDgQ4l`M z?7a8u@|IEy3pUFwrxsMdI4*#Dj{)^%lJ&Dhg|D2Khti1SBMa`xS5kDvJH86)yT5mE z^nb3qi~T|ace+`D?+S98PSyE&fl?Woy)9zKSij6*YWS$Nbhg{&Lca2A9*G~S=WmOI zU5%}zJwkEm<=fS}F*5d*S{D!(;7*n-iF|y|=ENm+5W?rG=F#Rb9xgS@1t)}l+{_`~#qJML* zxr=Tsg1jHS;jS(P;?-Q-yN&X)xU|V$7O|%rhAPlg{${{7E;{&w7&P z_4@>!g(Xli7%+!X4*dq|5akc!e$2fD9(gNP;8ob$Y7%bPLX*@ay08Upq0>ksw1`$K z3i^7YvxvdN0B1Y4RyJ2YW>BYjk$ z0A|)S)5234y%%HUQTNC&^dJtS`_+0pE7by#*Iv)>3$Kn1&GzmTFvBjI5SxjxK-tqp z1|zXSg(pRE%%pTve=cgx2F)Pz>XmP=Z**<~)E#{xx?+PSfVb`R`LYEHpnuy6_st%c z_hFT9Q~w;_!91uvE}PBWp@8AjyVIt=&+`VM$LYd~ucs6#y4GAN0AkHHPkDy!eBRoB z=;*G3cDu%c#49DF!4^(Y0!2a@eBd>E@Dr|M=CuaAfZx=;(@Gm(rCLN6yLq&uX15uT zjhFRVZ>K#PsOOs#nkOvAsDDtNJd9+siF_MYgrjtzpMGYKy*lcjBlUDxa_3?b`}64x ztSKDm@YK67-`}VfVWev5E!K1|_F z;OSBwr@TpiF*!m@s)6VG?s0XyMe}yjqeylqyD(>t@eyQ$&viR(C$rgXi$ZLA1@M^d z_I*EVirRcE`)OOg-+%AznYHI@9P@Xx@vK1s`FM;z6X$NSe+JiGthbKF`+Qq?qwR?1jh&W>hQUY-8-R!)8xNV~e*Wz-zUw~V6 zMIkWGG{7hJ>@$*@O%+kk>R{!i9A!suZ1f%_tmx+H`YCUeAb)j1<*a~al3uDsO0GqK z7DY+Fbrb8xadZVrO1-}@MrYgr)yRz$g)typp0(Ti>Ki|i$K!_C{*|lgpQxIi&tFVU zAPyqm1a&R`ZCErxHr_@N^jTQr5I2x8h0?&hG%zjUChQOvv+VqMwG!qYM<*~Pc_yJt zV$oQbMH5&wDu39)jR*G^xiMRwt_WBM4(NBjgY!7!r_ zlhVMPgpX@?_&)mzgGm{g<``|*8P6q8;p}5+-J`K$gMX$7AZpr?U$V)LFekACQxZN} z>cE_Y0ZSOdRE2*OypqANcm)F(8^MY3*XYoms525&Kb`6hhZ1nvx@LA7 zdQ6Gk_ znXGim?B5^PL>|2Ed_(J;E3`}kUi+jnjD`XQxnjsHv*nXoHCzFZP>yhp&so1`G`QHrVOVXW|6 zv4rQoV9Xq|w{j5BOKcP7N)}EqhBKu@wK3ks=ws9o#N^#FwZkz!p5k|SUx<)24)7hY z%*VY+&g;Y8NHG_9Z?RW)!@U}Wm&pBK>ochZ#q{fXJg7WD6NJPbqPrY=erSgox+aA8hgn+(V zjyGkl>aAeaEIRI-&vgI;Q$dJ^YJ3cH1AsjvfD}x^)-+_^O2jhqSPqF%mZEJ9*&A@c zO1xD|Od`cs3z03x5c%djnpx2B(dNjcB!8lu*(EkVn=~r!i)Ui33LdbVzzcS2jU#Ie z>m3Fwf({JqDXydroeVN5NHql+yZmgyR>CH5vcUnC8kbX6Q!jV{APU-F-wd zu1!qN%he{94a|iI5GK~!mPv+V9_ANwi8;5(VU#iG~0J zkMm2F5mZE?L2KcOT(3%JM$wcWySeeWUD^a#vl`DBsak~Q38`#+$8`Ifcy4RVS*61= zs&FliBlk?HjmjAYU@Ff;>OFUCt$({fX)tLl@|K1BDXaztZ{Cefv!OMEsW};Rm^?_W zBnN|@DBL8a-t3wd*+%hgNGL|zjXTD8GcTGEP^`f4ARuGdGsGOji>+h#^-HyBpKAmW zdxkZ0?B_Q0MdoLZeD($zJB;zLm&kZxeux}E*3|gU1p~fp?3>>DoR8kw%zv;)e&*`; z^;FV*xyQWnFy3;qM^Nq1k-ZeMVuw_%U9?8;b37BKGFQP zCkctcjrShbVX+1Hk8*Q`1?Y@2oYhf6U+IlwR2<41`HU&q)EFxk@O5dDihwhs(uAYh z%2EN+a(zZ;(&`Gr;5_DANq=tZvEC+731A$L8JKvo5X3g~siz!Y(5rO?M^<$5+ zu8f8K&%FlSoVu~?J2U#=0ob;bLA?up#6Ue3pWA+)-&)9e2m4C=AoIf)MnKq0w z`3%S2$Oie;U~XCLtJK5z59dx9d=LAI$Z#^ZI}t6Q$u-DaGKWIsP;D+a8%1aYpY7_? z8LLX=ELQTw!-g?Jbe`_Ez&^2zr`Wx1IH{fPq@-XIMdUj~L7WCS>u}@sp=COSz@-HO z+cnt>_QtM!L83=s^nU~>2v&yc$pUy_)JDNx)OE&-(eA@mcNWE)VC#TMkP3+c4lVC- zEW?&fY7@=`KpDk$gJvRuEgYoQo9n4j$_cG&Y=~l-HCT>L$`-Os@0i<$HB_dlc&4#T zEv0tqnO=FKVk3-?$|ujZ80^!Ag3=LK7^SdEkWYiYbctSuzJFvNh7V7=(^`xZbOUhP zL|1%d)<;&6vCNeR{inWz4kP*wY)Bv;_Af+j#s!J_GR2-y^%HdBNK+TsX(_0eaPwpt+-5q_fk#ORnc`t z$;9MofKO5o6M1`|R?r7{{%LnZ`TaDpf#A0xGc}s^F2sBjm@6Z6Ob!B?26}?o_xOh; zvIzQ>+*AJQ+VB12kFL$2DKe~E^0yiEz(=2B-!Z@R{eLhnvyUzxWYLL%e1p81n!s=I zc4!F!C36hz@l9lEJ}pCn2yDw7IZ$>$x1nXFwN;pvqi`W5Kn8OuV++ht)_qDv+DL>V zzbII%LIVn_*UEuTU2Q!Sv@$D~ZxB{P4`JRmGOjI9KxYXu2c*7D*?g_ylZNL8~XS7D87PF z53WVl$7eiP=tT}JmMrYsM2Hn|HwjFPp6x=3%>0PTD~MJ!{^+O9JSlX}syUD~yJ$B6 zyW~28j#MNjAJGg2QI{ZtO+ydix%lxIY{51op?|)dJXcfj!*dbY!($(y_Zw^>$Pw&= zvhtWf@zu_2d24m#n-y|UkfSBdZF@Radcr~qfM&xj7!Nj1!wlNT%U=;(vG^56$tuc5#XQov5hLA|BCU%qD6$ z=zqs#cn_9*k8ywOyRTmR=q$!;lm+wvvAsAX;2`rVg@;~dvI3R34RltajQJI8Ei4L9 z8Zj%pC&@gE`FN2iC90;7?Zz82*#zC_U=_k*9^^js?q7A~k4zuzP$I)sd>(#}^-s=) z9W(5EtaDb&nHK`P=-U|byrWbe&&>E#vH9hVL17fijlqg<5D_VS_s-wLwj%6+}o03<6OM6C2zu zK{*8GOG=0NV)6XQ8IyGxLr;Bl)z^37h|b5lxDp=%_A%@7uz?d|OM%)VsdzC{w||?a zHkjRNy|qSJ#Shz>?W|H0d!Kf;1wT1_J8S9exTS!-c%Z`VY z^DvO<{mL%2jWJr)SMtikTT?x`oo z+D0f8#!RJ*fV0vG#)1V47bm39Ja?)B2<85-->+ro; zVq1a7fjlqXf!Ge@{iMNXB=*FQ3?J52OynSu>%^8DYz&hl-zRpohhu~9GJj^GSK!Ab{KQ$}^RDKKG)cMD*~`AGM1UIKaG<+>>et^rfB!fZ)s0e{zqCfhR{_P_2Or50-n8W`^dpAT~l+e`#SAmkG9|DeB! zjU)`dtm=mDXJ7f~B1z+SQONkUrDx!N_aPf?c{IknungopbA+AlYSQM341XzMo-_Ci zFyFjz`#6jb;&HiFJ9zJQMrDHGi=m$Q%ti^2E&|%?m4dJs5WC8a1h9ztYNU&2Qi}|`xRcAsw$Y+R&G;s z8>YsB5L6+FQ?|4?Zhs=I)mEXzq)>!`^;%(eZ?ax3A|IY4jD&evpy574=0*IapUM6j zd=4OXu|}~5fB6|78Qg#D;K9HCj0ajRea`vH!k_Wz4^y`QgSu?18$N_@w@P7n>S{(m zA+zqhXdYPXT`5zrjx5`3CyvJ6j~*)^LI*v_QJ{ZWy-*N^{(t`S12BDY$$2s4xWQnI z!6t4=9U5sjrhwQ5^lM#9_VH;s))GzuYl$?=UA&QG&%sa+%&Lf5Zin9=n1ps07#iaq72SDCn!^M; zp{UiK!UBgK0KK3VkTXJaNc9GCNLQjg=cvr$0-_=SkD-3Vjc%JPBuk7SJCa8 z@sRTx?2*Ct-ZKYuO>Gbo!21f#3NaA8FXAR&9e+sTeG@YwcAQ1ls;?{%cX08HRs~{h ze~U)1nF!hrIKQlzf^*+Kj0*pkeZ_t3#9#M8d=-R})_-~*j5QkC4Y4`x#pObawLRR| zZHW!%T~${w`4(-K8ws0J2qZYw;(KBLvt6mcPJwE{*rCtBhWKxgL$J-ES0IPr!=N4< z>wg$)T5%9R!~R2wA6et6np?9RWJlXXSYwglhb2F*f#zjYXajzoA{GJiB#0QI92M+g zaGW!P4J}Iv;vWe|Ity?T!V6&f2PT=sSsNs`XcC|N>TR-au*dkpPycJYI^<;_fE%C+m%zJKb3CTxK#>_2zo@}t!-X0Us6`V4kyG~gcM zu7YF<+dX1e3LBPo58nvv{Pq=KyzOlDGmd2ZPTTR6si6WaX37X~7SV)zV-xCWw2*U^ znemMs{AKt}x?9~V1BW#QoI|v?&K3QHYQ}O)D#A);a4V8 zAK50Vv43}CWWa?^uyspZiz>5^(WD35Br@Yytbblq&JqmEjp4BKSO)L*Qid<$qWo{% z5m{)U^oOi_1060%pA0O*1bT3?idw1Z4Dn4MGRvZ{9Po6e*HPObit0yuj!$<25I{=4GF@<2@0)UCrkqs0G{>DUQ4PR}W z1hx(QK!G_A>wHDlIri#n>tFF~Hk#bdzEE#5 ze;ncosI*>1US)YKXrJ}I1gm6)@V^(CAn#O}wmbz9gtWFXS}^zeD}R_37Oyv9aG4rs zRarHH9Vf7T5$~!D_**ESF`NCNSdkVa$cGTai(3qFDg!-VNdAcOdF*9kZ$OI;_WqxC z`mgT)&L#NkZ$ZF`>nGaX>;ndsDr6oLF+)h?WD zwPn)e#-O4)fP};=)qhDaQTG;PtwYt1Ml6~0@@a(>Y;Z&*QvYpVLPshl$>$kpH?4@r6I^Ck5kAo}Kuc$QK|dCI)B##?ChegueyQ z=nDGjlz|6@*zrn*9s|rFJopVg6;StzPLmvSN9+MHbR;!A#D8PC{U!O>RETvlrxVT#A;!mBhE+sk4XqyP(ZQ| zfQZJa?&!z^=#&#)^2pv8KH z4YTi2j!D)lVt?{Oto{>|Y%wmsV(-|Om{Y_-XMeI%kIB@DPCYH}guAYVPZU0t|N6;0ODq8rF~FRTr~_|zwHQ$$&q<}hchDine? zP`e4pVN?vQknoL}aVgUOsWibJMH_8WFoFL3L|(y6fPcKo$opX5f8Hm>yJX78ZzJzD z9^!sJVsRql8SLf)Qol~2Oy(nVB2?=lmW;*|>uZs~_`zImAqTJy zSlvN&XCv+bKN!stL10yGi=WqCgH#N8F0wX#g*+E3JP?=q+V9v;B<6;-_T5K!+-HvQ z;w>~|2!GifGhFj?onifB-GF8rcIJ-g4S9$+L57id6S(oo`+Q~bS6_bg=#T>&Y)8nH zgn;ZU_DI?m76KeTZ=u4u9{gfF7qVAx#3#!X%SUZLwt{*MHGRI+YqN~1@$v-!!wZm^?M$O{%S-Aut;D^X}+&%G9SRD z$A4Un`M6^4!EOJ?{mAvnByX^vLwF)&UMO+6KA989{lBa`Fcb=D>_cw;UU$7W6;kL$ zoQwHmiHst85cy4Z)A^i~2%tdttVO7N6CkP>bACoUbQZSOcuRB;GGF62jl_8k{}SP& zi*z5?$Q@yBf3H!|Lx& z_^G2t2WTQ;^n}>W$r16p!EXKlU_hV0CA)sxE`R0~k(aPO_T{g9*U$dJT215!ye$#^ z4%~>b0En{VU8%CVYhTQ2Ik7=rj)YyeLjk7ESEj(PPzMylJVP#+1-m+0Yexr<-nvGK zy39jLmC^W(7SpKPXW>g8!nJisC|(B*9)y3zkmrM*=*YMu?)xBiBJy1R*Z=w2Szn)e zh~W^KLTun)b5g=j7-Bph+w0?VR>gf&Fle^ZCm^6_f=HxJNa#~!PEgNZoc0Me zletHd;zuWQlTtv}pTcaAmxL_HLXwvvHq@9l+)!e|jN~LZ!B-%I@%-`u{+qre?bLr1 zlDEY3gE9V$xq$uX{iDr~xZyXJ^J8PQ(0?uTAeoaMdayEAzb|p;3XQcD&@(KSI8G4HyR_i(Ha6w$}M`~h)h&oDWAbL zVY9&@&VSDp;*N?Ki4VmF>u#VH_NwrLtzT16)O=)(^z^S^hZ%ZE{WW zZ{IlnFrSM-ej~~|#PJ;z<$dD#?qKT=7?3|?(PxhF_v_c8P>k%QpLieQl0R#+Jz)+V zbgcpVRd4**%QU50lLq~H1vELL`e2%v-b4wJw-FfX8H!UY=Nge6KUQpi+3$ZZ&2O8S zu@F9Dk~>Qkv|`JWi--z?z=xI4@I?R`N%21L>&Sisw%cI%!R|`F@k#1y`;zA*b}flB zle{CbBmeLZeqxkEEadlI`|S%{)I0F<6ZoqMkbb~Gn52ELQz0f}NkVG^2h-lr?d5Uw zE0u9P)sc;<<|-!RRuZCZ8?ApA*jf2vskL_7#GhjQ4~CJpphMkCGbjDYM!_-#ba4|yWQ0TBNdQrPeyl!Hzm@J+Dg zkdMCE(DwvA0X;j$GGT~)7P2E~%Ml`~yvDgB>bxYm9N6+Is5)RTLa%=&-+Vvx>UFTs ze)3sgyWubX!(ekG*J^!YpFj2*_WrQ9Uc7mYum%zT2(?MP1DO(@P(*9OFi=@~A4tdG z0K5wF$!v#KK+K!Y;g#AU3L^AOs1T@l%t0@U-kvOBRJSjpNpKCQ3MEym7#D)`GtS?2 z4gHFc1{s$InJMfeU|)YeF<3IMfbxFhHayXb#NV0mHP&w^8%=|~5`yZD3G{A$M42*d zmROM!C~&Mt6}k!a{(+p-f{>4hTq>EU!$={Q%ylgpw~C+ud5er23>4&*zIGsFWcR_M z;Qr{S^o>P+KLZ~_^6*0r<1=R8_yUi;I58Q>Dd;vKLHl9^G4y|ZI4A1}7!pA-l%>Cz zKBE+^+=(B6amIH>Nsba$$jm5FqL+{ypy#VYBW~ixQ=a3ww@Fr7U`f(30Rzx`f$Pei zdp8mT9I`OA_AVGRP>X;VJczUy3;jL`3R56kjA7m(c|X*(;{D+G5WS`*3h*1yw`6Y( zxQ!2XjmT3O>k@zaYw#%sJERd&=oqtGp1PsIov+@!GIsS`LT`1%7T`>abDK(*Hm!DS za=N>g_!by}LP`qO7}_fAt(*CdBj(|O%@@n)rny!rIXHI?up-N-#NIM)gWekKEv)Hb zpA0hG!Si9hiTx8}LxIDuQ3xgxIhE6H>o#%%w;K5(_*#F(#27F3z#|>y96r5G9e0fe z|7JX*Q~0+jz+o2F?RoxS!NQ(F_b}mI?a3Hf3_*pWBP{V}hrCGyoD%T|8MH_vvWVn_ zf9y|SVGNAkfRjP)4%(uT3ywu$m;*!9I`=E&W>#Zu9{H9%PQZbezz4+q!oDrBZvtt% zyo_Hl(g1&GI}8d<-2X;!5qIksE&_hxaDVp)>+zKrKFMpsMnga6wIurZOl z?D4KHVoN#Er^EW_m_RJ|Nl*)-cRB%wgcklfGl_o!=0$Pi>^kDv3Sj_L8{`4@C9o$0 z$`*JQ_{`|2j&UVjPYu`tUwjF{%{j1LA$tghWxxlyUhkkt77V@{37)M=p}QqIa;3cL zdO-GzT){b6m~rmMpD^B_7R`5KFo68&FYP{gIN0g1;y${umPhYm%bXu`%!A-`9%m2u zDw2Og8=9>Gf*rC`OWpv5MZm!b6x8qwhj<1HOd4{ykV%8@??GPw6UJS3G$J| ze!n*JPhRcg%a{z_>0>t$%)y|?e!XuZ`@p@yYXIgb@kU$L69+rax~+X*0H1%=5+LD~ z%{p{J=96!W68N~4$6)NU3!0;UV-RV*0q*w1*ydGWYs4ToFAZb2c2+aSDk@GYZ_wRa zU{YldEfqfN?u>OOhiD9j-u@r|Maw)Jda~2d){6MXzw%*mnf8NAMCqvFpKi z*?nZxfQ1-v6GYa--yyOcHV zp3iU5`8rQnq_S*AL=Pi^r=C93ooJ1|c0Jm$p;HU;(f#3zu?$EVhqSJ ziQo!<+4kRf_8&MT*rb2MeTZNB+jk>tH!+5s?n<5<4>YbcpkY~$Qx|l zOvaj*GOLiY)zV{9*08}w58N%A+$7CqVKZ2U@StvDN}V*pa|C~OHDIYBlQ*zW;1pYE z84s8nvHRekD8RZ|7ho1`BHxvb$q;#9SV8ZhLtdgo>w++a61EO7OO~Js)@u4}>5OYy z@UGFBGhV`1AlO&M;b5-=lc#d60woJ=3-<>y102gCiO<8XQjpgsasqMTpM2Oq$JsVa zGsyKprvHjJB~pI^RyqxsR#Q$J#Fq+<_=`G6@n`@X@}&x*y%2r(JMTEi9uiyltT|96>ezEw0~KPxo)=swj`!jx)++qQ#1X6* z`S$7shrVI19_&&WgpClA*dTl-mPMZl_^KX4C!9)ZGTOw(-bXAAVY8t;i6-(M5(an< zn1+xmDdc~}7Cy)ye!qv<FH}Okie+ zPcg<(DHO^F$-~cFZATt34IFemVuq~mqfar7wK=S-VV-7WT|osA{Nu-7|090)#gljr z(IpPRXCziM#DxrY>?3CP(bFUkLrR!<;CGQ)9Poe6SDC~EhCB?(WdLK_TK)(7@RgYa zQ{Mi^LDM}36fiq0 zS^Qr2E!OtU;*#o(eS4BQ^mf7b-QN$hmMR-u23Mxi(=ZIRmZHW@qnr)6@hSV99JE)U-h zA`aZt-){3YA0)u%G4s@!E^5Y6Py4y}?Eh&{E6`nHJR-mkM zV0FowCi_0cdw`X4Sl2`r|FQO0>>SM);eRHGc>vFANsYs_2^tVls6t&mw1j_PIib^c z#Yuy$gPeZ~HX10mh&qrQ#%GNei08?$<%q)v=$yws8LVGWm;Di!{K_5Jo|*AGC;#ya zb{Lai{tu}G_{1f@y5Sej^#|K#SNtW~Yt^FbFVH zhNvZ`fOewT!yfo}Oz131nh<%`hirHXhh~C)4U*|CKV6+)981nUa zACj+c2K$A?Hhrv(pBmdOlWoxUl9J%hx)o~ibPJ*!abLiLNNyet3J{CpAz&TIo8v4V zSOG1T)Dv@~1n0paLv19f z=^}NEpSqhllyf2?7OB8lAi0}@k_)tN2ksv@bhI)+ypRKczF&9PmkOH@p8`71ZqN@Q zI+6S!>PZlsR_?W^U;!%d>&^G}N~6ctWGf?%%BJg+NmDvHJ{G4V1zSiQ>3u?|2+`M3 zY^sEe<$!C39aMj?5PAdtADG%d4PUeI)sV13xJ)_(qjmm z&q&&R^fYq@rZa8Ga}E1Q`oY_NVyoZW1@uZw?4j=*pg4d0$Ce!82qf2zJTGL%2M^;w zW`O<^xKV;1ft-fiW#U2uZ3*hCbROww474vK9+;d_k4usTHnWpReI(xH1v?jUKlLUi zf#3*`pLL9$`B!9 z0nhxH5bVydo_Rox;ioYos^f9%2f>%(z8!P`kv#-!Lv(aI#M6k+Pkg~Yuz*2p(Vs<1FM`lBcr5aM->H z4@RL7Y+D+P-GW%|uxG*ziP?YhDjs66xOZ!116Bf`;YWV(K_iqCB&cr=FjWmIub>wZ zs{@8rRZtU;_!cnO1g`-b0CCcY%Yb1e`rU?q)X2!+!tzF-d+cE@OjwRV31tjl@uI3{9Y=HqpP8SbXvh zgMNqKi+%B_ufjazIu7ipUl@iv+O!XO!vlX}74Nx8)&#lV$3GKJM1BtWfG=(Z`G5fj z@>kpn8T+3))o|8|w-xqJgc9SOE5qQ3^Kp zLt5+x_KE+&@oL1Efzx2PJYlwET;P8W@?yC5V4E2qEEjUSQ?EJmX$3OhqoE0_4p@I$ zVDW+1v#lFbR2P+6wT!RAMHCq9dGt-NIB=&xzmw|@^3lb-O@7JvM};_8HbGEG;LwE- zb!4pSz$dU#FNbnFT!)1nxJ5pK_?r-Lzv|);i~T$o!5Nky31cT{t-;V1a}`B)f)a@I zBM(8!zlDiKzOVf;?=H!Q;r)PZ!T5i@Xyi#K;N-)L?$8|iEju34SjZs}IS?}EhT9I~ zKh&dQ{SV{+UH>t982pjTM1GUV00xvO9%G=MNqF#Wcy$dGj>py91J6i*E69X|LsZ;Jhd8Xl7OA)FT|z5a5GAsIed4-OuSS^?HpuNF0-L8+3i|tD`4L15`F+^6JicIyN1*gfGimM1?D4(^Ef1r z1Al<%g#p*~t2XX~vHHnhbRmDcX;B}=Oavn>4rpv@aMnu0I){EJ?owOhf|d^>lMCGc zRCWUW>Oqbn|AzPgpq1Y^4&gi)u})2D~B04ZK^SoJR)1 zk?qkuQFZ1nXPFjqCZ%4I+Ug&?7WhzrkHjj*`=hS)U*aQ)-DmDVS_*%xQZryRFE`Bm z1#!VdtB0I=hCn?duPc(9#TL-W9JfRMR3dpY#M=>Dgq=g;?ZDBYFHovl!UZ}T@j2l5 zN&J%3=$Y9FYgP>04R}JS+tv%|S{;1^= z|AFHLq5q4eb^h2Dtg&kCl`Wo5qV33~>0Ilq%mVkfAON#eCdds&<{q~HxGc1%ZXmyL zr=a_TYF8e2!iZ7OidvWzV~;NuVYI5%L=&(E0@H4GhaITFTu^@?cG)Rvm8dW&)N+88 z8MPHdeCdOC$9#VA0f3{B`NUpf20TLwJA}m4oj+=ZzVQykF5&1K?4dEQ;4DP$8ZlVJ z2!>b;LytGWxWkD8-qiyr6Wu9M4=OD0*AwcXQI9I~S-OtL%gL~kAWDb%N2g5~+Zw}q z0=t zAUA>rn|xtb$vMp|7;9B8CW98!?9ZT7}0$Q ziYZWtz!@0V2}@)sLm<9`KRe)?5gP!$8P5$ls{e^^M*V*Y$uE5TAmSHw(1D#f;FzHo z!9{|IaRB%?aC|RMh3xxtMPUJSMINUGhiTyU<{)hGV zsm~p1wT1GBH)4$C?9?UkE&wkLiL=yb;5J-z{17TlB3@-q1#u-9{~k`=Nyhu{W`*iCp;i@0XGq z@V|R^N&UNbM+s7fu_GK3d)Ui6?9VM?w5ShNX2ie8H9!czx?xWv!7P7lw#Y62&F}xe z!;km>cPjaReWwxzUm0TSxaL4XIb1>*GZGE?f^~|n0e&M>0F{Cw4vB_$2xt;)cIn!SU+~c9HJjIOh}lH!o1yak0T=A4z>XmEuaJl8$o#yM&w5%j*!l<+e%CiD*-&;WR{VmjUkYIApLPEa ze@-O+5XFdG4|0gh5&|*+4G5!Y42t3)q!KjRh79|MFOq-o)dnQQo1WCs{|G88Dtwer z;xGpYRL74|)V~M!7P@zn@2pP)mi92<@jg3gAdn>&Bfi)05cq$HEaGJ>W_TEYIBe(fK3UAhKWp z0kElH!I4>w$M{2!$Jux!kLm3gpuB$k0)5wbF&p3Bs~51o>&tk$ABW3zeg=rp+^wi_ zZ;wrW)xUcaT*65>ViQ_~=`xJRf1f|_!E0oE8Q*_LEfSb}oF%5R6!n^k+@Q?R%;nW+ z2u?)cmiPS(nPoqHeG2GvxPuw$_ETia0FY&Qg>t~(1T${RayldkMj7bD@L6NOD;=`& z&a`2=CW&+d-AQ2H6fnjpgP=dT+EP4p-Dy?Jgd9+LH0Vrfrpd|A)>JGY_^gvoa?C0p z3dny_2VM*`Enrfcj)!dy8g`to;u*9KJco#2vz&so2*8VFZ~)DdMZTxtS**Hd40tI% zA8;h@k(RYu8Bx#6^Q79Lq)fIzt_%XNn;eG;HCAxF@grr4vjNd{;PRC7u(J z8Z;njED>=&9b|N3lGbSN9{@cD--)*ad_RA1Fpey~5A54;JuiW$KYY&^zQ^PK$uQ2@ zMZmkS=NgMp$%wO0#VOd6zBx3G_N8lG2YdnwSmoXyi*D+IDaPaO7o|hi>KlV)zzZY(ikLM5$N+(8ICgAw0#@+;r6pUEU zdIIE&_ahMpuHh*Yb48vTKX)dFYg^!}8P*}LX?@p^t>d^LsTX`NS3qKjHK-|5WOYF* zt|;gEpN~K(3&!f~#4qYoMUXI#WHZg)dQB4Up@(txFt512)WS)5)C%wg-7qHs886YPF`qH!7yX*MX(N&Pt<-zqc!cBp z=XizV*5`QFwX8iHk8yuo`W&ZlJRXkAS=VyxaQuwp>TsN^A^U&NKlDoCOzaTZo);m_MTGN@WwuFsP_*hTK+E7YjAD9gp51#C)3-p78c?hHloI8JPcg$;{X`v3xDEDeI zw1`zoaxDSxhfD_E4_%~Ncv$59a=afbM)H2pCzdfBR~?KGHypp}YZzZ} zMUmq-99KTa$@mt-aXo92Ey+x9_!)XK)R_?bndl}q6!6T@y=3pz91VFnMMXe|^IrH1 z`Na!Fe#yhW{5OB=Oqqi4wIjMbNo+&}(Xfq2zc1`bBB!yJ5z!9Ca+s@-Gt&)Q5A$O{ z_5WIf7A*m)i2TAHmA!XxI|)8ydjs8>4d%Hm3j|n+6*f@IFXJ#1mTaQ;l^=-n32 zhd@fQCFA^W^#qM z=plRl)fywZ8Efog$Jl5}ZDHU0d>DY71a<_*^e}uDiB1c!7yrfBJLs%HY>o`~a+3r0 zfM!v+eiqq1WAD=dcCt z$CLkw?F8Hl_Q0RdlM-39f_=+~4`xSX3)y$96DIo4eD(+s9S~^#`OFpU0f5W&3~Lp(Ci(sEV`OYSe71g_%VKO;$5%gk ziEw}4B}_O4gU5Gm{rx^SUL^azRVv7aLC%i<`rJ2O;pX4D2V*xPxftVrF^0I;&l(iq zS3t(w!#c;iQ6`9upH2X|vlWXM>^l}38s|Z7{o{G*)B5N0h!63%*Ta41(l9o7W^0l< z(gd~>o+WS=Xwf2gM7~{fBccX+nQ&%fFs$L$+?Zmqy-a*e?|soB!;6URY}e zB5p*+v*LFQKl|@z-Z4)+Y|x)|QZr9*{2Guc%uDIB-}fiFG=V)r>fHak>|r8z{&uen z`$JLgS_pfRg)I~T#mahS8Vxoi_OOk2gFQ0XkN&CqKAa1?81V_zNIt>DL4JQ`KR?4>AT-;<&kFhZ`+MVaen)QuOPB;&Fz3+KlhcRMB@-w0t_*wk?jDf;EKR;*W=kM=7=PTsr=yQMm=jYG) zpPxVHe}3-BbKu>{K9&;lUZjMdJO}I`JO}hMId6}itk8Sk^Pt1X`+LKA_2>P?;k?7= z{n^iX^ymFQ=e@}q+H&=8xgle25F$cHLa?|@n;1440=W;!9dds6*`wq;uLk_uI3M$5 zq4DhZ{3Ef6-CyTVzUQ;Q&&Pk9SB@3SJ zX>%errTgpLCC+7kpWBdU`E~Ajcy{{FJu@1vo#Fi9U5T&8jBq}5%M@te&-sv@IF~~_ z2G{;MSHaq}gx}|qXLf&`D-74|{uoCE>lAUYpK(dabMvtAhH>HK&-s7fF@eg!wSS$P zk>_@QoeSR?&++SA%rm6V;CEurOG{9KB&JAWGelUfd?h5fDWFU-&HeB907;2R3@12vM-CjuOhVsZ=f=7Q7BZIX|W*SbPDt3rIbnXyAL~vxm=YPX5Q1Pe|S8@EKVQ zp$*G;?&4n;C#fBU*tT%?r|Rh5IVL*V`t_>@*4OI2Bebk6(Nr8lFzt* zPo6Kb;6+LR)swzV0CV9wHTjI^mdJDAJmhymOf5Mt#`%AEF5Dk@HxkKb5S_;Rz=E(4 zgvI-TZs$C_H+de=9-xIo`F$_&OkYqf4VNrUi|*wuH)f-@ZQ6^{k!MG`FIbEBM7dB{ee0G zlHY&*yw{AJk9~mD6*=$oT$pDdZieq6zlTE!j(_$G)(KKv4eA$hKip@K4_IFqFS1U* z&q?;n&pdOWUL*1Xav9Hw^M)do1^K<+kn@et{6H(>`rvaNo*VNuq`rRE^BX;?QF@JW z`tZH{p$=fgy$S0 zi-^QQ&kaLde;<)1Uq*KggA_+#e8H30kkns6@TKtD@Z4m6@P40tg#Cn;xunksT3gP@ zzJgpp7O*Bo1W3&y1cP3f&`adz&`%^UPaXorS_$?a9tL`JL*5?`1wIFOT=0-cpFDqb zC^Iv{%^yv`q`DI|G1Kil%LRYx(YIeJW zxM?%vvewro@2ypT`d+a%pO5v$c|3baZu9Kujj4y>u185Jcbbm2qoUQHvog9*cjm@l z>V>f#0aTXRCQbm6>!E+XuKh`H<$d<%@b3#^8mEiNo-WBA?dZkZZG?UqcJK<#xDRj1 z_Ne>$&Q0j6y?v&tgkE1o{(PIqY{{$7S4lnf)0KK`?Q}c!)@XJ&M;nscr=e6gdGZf) zX)bpU^!VRV#fE#2rcZVK@<*p4UhBs*;tm_oGcI>CPn}b-1uTCONV3skdC%zCW=9ra z^aS%cD9yOD{RpguX6?>Lsyh@lc->axd@?P@%cHz`7OxN;?dwd*dUN|ekGeaAvKU=S zarJf$H3^7HKt~{$v82c9bmHJ6FR!n9W`>>Q{W#uLJH+Wf&vta&s2O645KTZ?# zy*N$R_wnhKZd>L2YUJv&K5%YvJ}=Pm?0(pwpZ#K5zqT)ZJ402+O3ufZ_jLPm`;+DCUD9;Qdsw7JtjI0#h} z7TiT$N=|S;)S~uSy*6Qf45N5H8RxHLcCvPJ^b9&I=V-*%%-wQ5_f@;p zl35*OJMolTt`w##(-54Ke|WYV4xRggg`vzTINpDew+j%zE#0_4fVfgdv>R?E)WP;| zwcT9BvT@pZ^9mk|8;45rrgE2-)~3!oPa#yc3e{?^w!QGqmJbXmc+1Ahw=Zgya{E`M zhb!Zt-7br2epbsu)y~@G;I|(BRSLtZJ zK0PNbTHh%(5bvC%pgeKTONYX)eFXhw#jN)ABt6c&u67x=`U>p?F6lE5 zFPq2g;fnV0{Y-K{Jw9oA!@b_oe0Mavr-*+tD3%(j%fox}xRe2{J+#L|dugqXc3N8qv&+TbAL83B7^lW!AKLQ8rDgeH>S@n!b8&xi zZ+-b)Ki4ac*0hPuVhVbxOy}{lDJyn4qxH2h&8zA3RNjEJm}RkkqinZZXzR&*DWE;} zL#SvGa$$0K-zC&;Z`#Ja6{*vK=uOM)ZDw1QN`WO>x!i9;Gx6^7;$#`h`c8ZEcr#P9 zOnG!uakN8get(kpt7B_VW`}FNT0Vc2YSLHxjYsD`ZBwso=J3)Rb)0Af>q!|0*aIq7 zVdjP2OWWEY4jt_!YcqMyHFfJMPf!QfXgXB%Vii8FcPZ#|j#-=3mC?c`5^s+EcJ7DM zSquA1#4TvIJuXB39D){&3U=9k3H<5A=Z@xTQrEnpgQKtPCo^GsRnpg3aZrCrn!7$8 za64{-CpRjUX;*9@lY|Y*bgF*hO}Wc0-|F|xBFIMiGt#z(yLg``wv_RG z&yKk`mu#mkr?rn5BNs(RO)c#-sb@)Pbihi3W8l&uuy;|mvZl>M^KN>lxRjpE)p?;6 z)AQuUFlrv)V4%-dx7|9We9vbfS8^)Dzb0ysF^l*qcw3Wm6eEM2Vpf0t{vlnK#hkhA z9-67x*VS@*csAB^D(w@Erk(j~pU;}D~Oc6d4SbAZPo19f`os=LBSvAT|=hBpk5RO z&7&$)1H%*-b*DY{#`9F5S@&Hk@#TGN>P7KjW_C5v=9(hZ>lfZ9KvwGJh_O^lrQvt` zMmm~akn^WkRYws&yXJ)wUeF?rc?c^Vgk|FV@BqYoGHu?Fn*GJj*m~?PHl^`*vDqd!XRObX zq&uEmxzEdW?>}E)<_e1IYi3#duDxI9+{1g8_sPnfaP*5ud)p?Av^#b z`M#s*^K(2q=*CqG7n5VjXxshqsISw54JYdn`L1g{Z;Oq4)b)FCAJKbpVlKEyiTUt8 z64tMl?&g2q>biJuWk+8qW`WDpM@4}fo|i=#jUBZYpl_@ zUC;JG1axifbUA$zy;*I&>ozLb|dvhPXI&bl8 zlkIG2XrY@0xf`c%g0ZPSx?mhmUzD-W4SKKfvG{(<4~-g! z6jyk&x3m*(SQxuZ412!pBHg{*j_kE)IEP4iYd z-B?YH;-b#@d_UhSB23GJ${cP>VYTX|DCfA{VZ7d8)$XtRcFmZSUi;m_JY4FLyI8EB zym#2u)5T=2H3tCl`q|d7x+HO={tj@JENw5W>?`AkBt0M4`9AhW&u2D@kXtOdmsGf_8!e8<^W%}PE;DTt>Gsyl{jKnh$LGsRtF-qyKm;ou$Hxdly8NTwgrr&wMg_ zv#V+ybNjj5s_*MgEzEPKZRRfsntFdmUuqAEEIpfIUM$MxbM3xH2DLoK4XVm8K&N;Y zipTZjx}Jt}^StZ(r!z`Zl~=Pf*vArEjZ|v9q05I__Reaz?gL4uH_qYgrOElM)APms zjmETK*P`t$h0I$ap3S)CF4}du*oN@35~+_jC7n&5ps(LLDr`T$Z}Pb{30{9G<@xp0 zjgB36o9UW2du($=NZ*mOGCa#mF5u{x9L}+92dguooA!xZl+tu`};7R%oFE4 z65RgEG0%-8y#(4FwX;d9$7*vqP4dNJcgU#rlAvmHv|O@n`?v>Ug}v;U{?#$RR;J_+YN}xvSo;W5&q+r9J~9oH4LVXWQ)!ThqgIVTy&l zh|GnZ&QK~XtzQ1*?d^QK(3(~2EgEsnjuP{=-NE{vs*5zeZ|O~0H{1b`JeGe?>9Y#=+T6bb7C}$e+D#ul$A)xy>~`Y$Hrkh}kLI#6AXVzV zkmEf)X)EV`cURYx+J{TDv4ndURXWadS^3AKLA3$R14>1W+c^h%*k(Yt5f2tDn= zB5qFjmn28(?c(iT$DLsqPIVovCVq%s?$$`ZO>W2axR^&(ZdQMkwJv_Lj2a0_49&hT@vo{c)hVSuHPH(TvwZTh@ z@@nZpKqW0+v`tJQ`dT$=hHizSAu~RAV-EOzs;Rz0)UCNnP?}o;Yh0}Nb^tzQj ztq|VVHT_ne%sb!k8#HR+b_kv@xB0xhcopxzj@#Y4GEaXBn|Y!Z)(_C8Dv14-OKR#T z@XS|wF)7mZZI(}?%~~xc;_fghc87<4-Y+_CBW&{$UFlDSUx3Egh^p{KU!Th=UdOB3iyl?+B69uf2{1U$D=#~_=u79( zUAm++XBQ(m)lxpKIKR@82Hjcw07F2$zx^h9)~mg{TFAhEAg7bGn<@8)H-GM3{bXPE zy(1yybl>ZzJ9<=F*!0n>x^^kmh}N>d1Zxp=e#-fNUM0JH%Z~2TJ=!!L%Y9^-WaHjt;guTgf==Dk8UFOHf#&Vm9U9=*iag%y|IIgvR>hGUf z7aONz)z2QgIqRN-z_Ze9OG{H7HS~-9Or_tu>8l-ot>e}ePxsMT0GpLjte4s&zVH0j zKG=@6J3M3Ru)a?d&Cz@(IG$0SUN!{gvyHayWN&Ww%>E`V_M4fhk5DTDZ)(rI`sFD) znY?{@@6>c@PD3?2PWPrg-50Gd&4i+0cVIsBD~mUGQ+-q>mv9j-Znb9vTb5|c)SnYd z$NPGJw>=)SlBub6ov1;^K#qTdWhql)J+?yBE75V{R zJjXPOHea=ZeQ}SSxJAwXF zze@9D`g-jme^$NnCGSlU*FQO>ci)ba^IG`72>Y*PSG8?h6#SQb%OE2FIp855@B#tC zgYz=??zrP!-S%pAGAo68w}|dNk2vqX zn5f5fmh7b{~IRag@m@)c$@yM7_7|+!8u6 z1qzIOGYs;b+BDD4!sNvBq7OnehKH^f6u&G?YpmF~_{KIvFrNF=jj^8M&t_~I15du3 zdFE<-j3f0}qJCd44o7I$K+Qp$%?t0X8*E)}#ivlXtSy_J(oIh*J$Ph))U5hux7P9V z0pW=Cwqu?l6GFR|zA{%(Hy#JqKQ-mf3Lh=l+BapBOG`+otN5b1_vuc6Ag(QFCiQ%pL46xY*r?>dr1kc2{wF^p0rGY%dXgyr%cYYWI_P=!NO7 zs#(7#?~RUm!7V3(`_(>wn8m_6lIOy5X4R4v{eOxf-vW-1%e#53K|>hcg-APZ5yb;% z?up1=IP-iduP`gimI`^%;HHPApiH%lbyRDh99}hxtem9_(58oVmQqV_em5@xLE*}9a zA&6>xVushFOkzaN))@MoF|lGZN%$e56>dkV`Z4lzwtwF$|1q?0nCDwdF>S@di^aW1 zUgW^7p{lZoTp102h&8ThoQlW(Q@xY1FMY}@B!i+}1O+izq(Nb{o4%iU@(pA#vP@l^ z!#Q0fX1_Ot%j2ng<~HA_NpLSX?(j0z{pPf|()W$7a?ljcR1$0iSEC?w)7InigPQ_F zrw;02N}*Hm^y5Qlyq|MvT0Sh;sl!vtG;+5+0v@OLd)FI(ZyI;^#E*QHXfdalDc=^! zT3Vubq-xqo)^8`$Q*>EVpFdKZh!5LpPxc$_t;M?>8}PX< zv|;)$y-jg{r#i=L^$48hJe}jM*Lrbi4nBoyP5!hk+L(7r_toLR$d5YnsxO9$1m-P_ z8BTouP5Q^!$!BK2RjDpunKps83}fx9ZMuG>Wxd`5O~~W3-_tO%YOYRkInS^+ru=jh zdA_wz`lf))3VWux2gJ`#hanakookul6}CMGHMQ1%fv0sLo4QZjC_`*@!-WCF>-nYI zrd`&u=!m4H^DC{cov&K-s+=6eBc@l#@8=M?>@-UHGNg((rc?B~OVSbjlH;z8H;o~Y z6??bmgkX~UB`-TNgN*KaLF4pFBu~st?@L?hA%CK!FKP64VbSQU_GrYgymGgS<_rBI zcycy>6TP{gxyRL-0S^8Mbr z^D(_gtswL;xSMu)7=A~b{UXteCPp)<7j*RgEA%~aW!S0P+WWZ-{UILIaqPbX5S+|+ za)RuURWVJ&RMvDz%Oa=7=$>eo&F`ek#b4Kd4Pt|v=fyQb|ML9wPp`gXY%}iouH4wM z72-QY{inmxrNg3sS99cfA(bW%aD~{Hd$)^{U)mSCjkjob@AaZTo^@e5pYr0`HHFolSX&deZ~YK?(enzRaz6&L%bG*KtsP z(UKY(N~4EoB*y*c*e{FXUCXas-8I$UFD{#>qmU@^VO%HgbaKA1w^NGE2Zo``pA z-%#Wm>fZH^Xgp@1*7vX_2zgkZo6WF)jMw`skFQ%Q$;Q*2qlscZDsOks@e2|$aW_cG z!2As4Zs?;Ohit=`DXG&BPY>#hb8pwYVa-cgiqiXGwx?^pd~QO!w+?8P$YGXHd!IEQ z#Hl7Esg%VBJznH_8ne-0=r}t*#%r8;yLx?ycUpUf56)I9amc z5V$1I>(Tbu9=gFnGET4Dm*U&88X%ZoT2{XG_sAcUn>dg(Gc-HqzWUVB+IMeuvx~|& zqpSg?^_SMTT*$QTm#3fBor%>TK||!B<-ppcXO)ESwbpFBE&R#451EIqGI`S>dlfnv zs!pVxcJ)4$aDJv?TO0d1XBU!xvmUB{dX)zq+Bz-l?@pC>^@f{3-^rSN9bS4-T=4JWg z_#POxg$YZHV(TxzU?? zqdFdt{w5BCr*+NMz1hWgqrdEqB|EZ{ag>;7IJ7S|_$EgXMt3Lmx`^`qu@sN_dp+|w zY)H|Q&g-7t*v`2;oRdsS##=%J2s&GqGgs7At(FQtQq(9diL{r0Kc@U8LuN;-a$ckJ zxqt8W5+*Ri1WMnYWFy(1 zvf;d|uh)3G7WPv-oFX296L~T@Lqt?>q{459d!*DAqQP^?9nYR`FYNt8~zN)k2I6O$*q8@B!YGMbynbW7cKE@nTwhy!1wd4GdsDGuNlsHy_I zo_&KtZsJ&ieVN?3u?p>G6F>}3e|AKmIS*|>oIt~{3Mt*B~~koChhJ*5f`gd4ECD5LCv;^g4CUJ)>{naK>ETz!=EUD#x7G@AGvFN?1=^680uP z3+|Xk-%G*GhvrFk=k>aYb}%7n+H-8i`)-tfu=`Clkl_zanPGbxE|;3#VAdGb&o@`i z-TEFE;@pba(R<)k#}BWJTPCp=66llru{|#z-JCpiCVmLH_fiB#$SK^_K4(G`e8X_m zud{R6#3Z3jnNip5(vu#+Vk>Qf%G^qeN9uC_fIlAjRHBSe97JG^of{)aZZ5{KP? zTLoWe0;=vxP4RcLiSFh2(3__@?#o;tKd-(JmF7F@;y8($Z3Cj(gMAvWj24IxCx~xS zCoza3CTH_R+4d6&4&7Tl>qA9T^K(;T{%iS~m<*=FBwy-gK11W_l>RyN>W_=#Z+V2} zjHx)u&C^k7dah4{FI0SAlvq5+qIf)i44Hi15YtuKI84@~7lOO9fjJVDZJ(UihFL>; zur9RrG!um`#)rE!I5bgtviLU8$wKPcv<((4hQL?+9!fiO@{|#y?&~|XkCg;T_WdJx ztF)#Uk;5$fE-?r{u?~$+{m?roxT`cAU=py>qvUG)*Lh|ts2Nx!_(# z>VHKl0O&*A+k>VbXZ5pX9=pTt*5+30*F1xq3~BZCaM@Y=nKCocVogh-|I(H zLxFym=&2fp(la5kQ!n1F0AKrmIDF0TQMb;hR@#y1PJ<>>&NN5B`a_y1x81Y+q7MgZ zA0D-fHf;l|{QxbInkRdqY`t!dd)s^U>0=eXQpD_y@0SW)H=SG4S3Fk=o%Um#%!5vM zTlBS}S#{@A$)9R^HJKHE6@@9%N6Qn)H$B?&Yo|Dml7;*0MV)VEy2Of)sVQ_@*#mbOn-3w!l0vi(!Z_(V+H5v# zS0uc0DeoS`@eM@uE8elLFPEb~p2*lfU5ZIICI5K3pPl3GyK!Gole7f=Y0@h=t?$K2 zf+~Qb_?bJnIvdmHZs2Tfd%OR|SzAiiEuz!cWwz|IP?cc?jdc|WkLGkfUeENi ze?sXGAYHHeSe$tB)|RoF=;=>os0=H464F?GHAb1j?7!wVP(RONpqh6W5f{x9W3uHe z>qY5X!JV4vthwu~&2<~L-Bti2a}1_d7iE}P@aW#V;AX;qHb9aNz8c(Ha+IJOavcaZ zaB0-DK|M*FeC6`$qiXshbgx&rK}A-z!U%8dY8v)KA*rfVhSvA+e$Z-AsEziRuQaf8 z;+wd}{9uaZ%iEscTQAc9BEAADKa)FO4eNeZVXwT{hG=`HOiruXUq~-D8pZaue)VBY zvRdZ{_cYso@1uO`q5>&WW?)Q4R;GRY`Cdu={kiJp*?*nH89je;tw{z-EY|xD<-&a& zu{d^8f-=mCJwJ(#x^%mZGA^(+fTfWJn~yi2Kd+ZL+DF&kE%ccJl#K4`(lE!<4lWnY zqqcW(l7gn5*M}yvm$y9_4o7WVOIR9x*n}IKNV7nHR*UOnFuK=(li|@yz0lIs$BjNX zb7AJAL?29GdU#KXEk!3wcdsC!s56I`NPMmH^=UbFedbnYII`UZ-{iFc#?TUPNwO!8 zs<>otTKbg8RQHd~XVpnz4bc(VyF2ThH`Tp^U(MHKPH;qU#fNuBk zq-vgjESW3TplA1wA&8mJcmd0=6O*02lGhk>Mbh0oC*Hn39tWJGp+u2v+}3E;81C{wZb&U=JN_38zh`L=C^Z?@^i3QWUCk zCjq#=kO^+)fjg6z_V9F#$%Y>N?fOc~JueaU(|Z#G0lEtaN0+_4b&E0OBx8caB#^3e zG+`$2mUP0Uqo}ssJ#%|6S^(?7@L81JOdNFs>|gc}5$x@9eBLd1H64NC@xLK|-Vh&u z$G++9R)YbG)^DuEF%-rvJ9;}PVq|Rg2{jG|aI@a-2poC6n?CDCDY9r{yv5fao{=82 z-@HUmFP96W(chPR9W!6fLv+9+?XVwFtX>XfJzrx=b@~}CinZQGE9ME8Tvf*c{xtIM|&oGPW&RFx~rne#Tqs_T>EMvGe%+wW*YsYmAe96ZZRZ z-PdMKRUb*)c}Va%wmy@Gz&+ENJVQ5C=r2A=K;eTo2Ox!oWq>o*Y{XT;@9t}V>R`D( zO+cwHBs7H$5AF|q7gFr^M474o0O^Ab{Sgx?MC9zX8`khi1H~Wjhe}I}K(5Var+4!$ z+xFCP-(D zed6kOyvvh~lr!4)@V4dn6`yi{JB?!20)G6)9oJAlWVvKBV#M(p;ua5v`shjFJS5K$Q<_1W1xa3?$t+5QUdwsn1&fWYFjZkz8&PQ7J zAgE4{4~poZ>}2orSLjSb>yg(n4D84FkoH%0JuZZ35hLsXu7nZW{moo|ASB)9kF{sM zy_vc%>UKQDgNsm^zp%StxpKL)40u+JoOGEUt(33N^J6x`n@g3gRm%oUFAe84=8x!J z*f4^!9vRip;^vliJHj}OMHCpPEV{OX(_dVT+jm8I=_&6#*z9}%B*H5+hoSvO(y#qJ zG+D8EfdoQIeDerepZs=z!ogtu@?3}-V4?$r%th!hE84Du+451Li1uVDw?%sb^;PRH zn4>;&NS0+U;0Zg!YGnQ4&flpt+7m4fo}r+d?s|a&sGo8M2`G3UeM-_`;@N0X*jzVK zyPMLQF2bZ?2q)n$DjHdS=n!-U*x;M-4hfay&8iFHq(^$j9V@IYHc&5lKAkx%%CbwmJFP-aRm z30OPodrI}|H@5@oy$v^zGrsR%{^oMtNvEz5$jaOmL`z8U?tE5zq1+rBu6HgnPuOE6 zzsS61O=U1<)^4wV$J^{t- z4B<~ab!T2~3|Fdoo&DK8{6tOvqaggpsC2HIWqr&=-x-P)9gucPwoBYu`p#o~m2PeHNZ;)zvBmBLd3h{r~ z*{XI}b<3P?t)B?j=kMY>zqe9_cq^F8R`?>8<2Am2e=h~P9bN!IvpT7T^`MJ6%2cX| za)x^d&o~&+%(g@cEmEkvY1#Fu%J*Vk8?X3QyUq8~5`l&Fyd^IIy8eFMv2@NnXTex@ z?;XKS_iI~cy6=`s@cz0311#KedWRvGGmL#$;uL}Rba~wD;mrcXwxRbRD^b8#?B*K zg$ah#-rMq1@zAi!fxN!*_YuwxR>e780iFkcC_D1&o~quX_@=ih_CEj?6DMv@?AaJ! z`MtT59Z|skohc7uo1>iy1CE1i?*`Oy>lxBW@wQDDNSAKdV+@(n>e&Hw)2^T3j_hF+ z?n=1fVN8X;OLsk6-Q5QOE`R7#+w>Q73+X4JwfeFT%ib?eR!oD!H>~#w)r~n7-Q`@}iD>+2D&C}aQwBIhpXR}v-OKE4i{_QF5mw z&|f%5Ai!rnr}J(rfwAMwnMF^+t@ihW_C@M5pbNAb@z(E)y=7Y*;kGv^C%``Ge;FpEKlj~ z-#(7<4!Owbm*jEPlWypmhXF&iwbjpJ{BUCH;tdZ z$?D4S{Ra58BzmZu7tZgexjEC|NuM^=?V`W3@a0wdwd>oIZQ~o?HDyxF_1Sse%Tr%? z+|dT|W@Nt3DLe62$YzfFzSrmTX>-dl@#v@zM~3%pJo}lUMcCW+2~DJb`-7@Vt6#&F zG>%3-@*#5`ZvzZy1hB4^3TIJmt={KG2x3jVL_gZQ;n>h}vW4$9{i(N;9kt zA6L=L9hDqa1G$8sB>v$O=J~Vw+};P7gEszMId~NOVJI*EuGtKVr*u1+2&?q!&0@ozLDTL@Mumm)G!u>s|yh97!q}i!EIrdrBx>;v4|^0c(?+8k-lG z;q?7fr$DHnEf#Q)w^?JyJt<&-YsZ9?DrPro7 zJRhkYgl*U#N+Mc6*N1;f3c)2IYeo0a#_1#LKKpYzZsNJHIiUFrz1Vx*6=&!(VL$i& za<(tzH+O;hRwmyo8}ef~nIXOz<&wALWQ@-CdyFC?P*yT3dN&D=J8G~E8{{Vn?o9^fP<;9p!zd7&cjeL53 zi*d~C2N69Vy+^rGo)>+(p8MGv(NR*j+qFKh@5?7nzkBweKp^2@7*(3=k4CwY{YunDAq0-B zc`d!`0|J6~t@NbNPgw-j))al;wbvNsAg4y|{_=vE#=!z$kN&|KTXnOK$E|*py)&G? zF?F`T{LBXa0LlOoR&CDv@s(xy7n90-$_}G{G;A=ceY!as1K0L?4XpRcx?Syy zFp^UHacij=tf9p*K0I5MJDbK(H1m4<20tK(HTv8_@1LMMcgR%x|J4m z6C&PJczI>2=E4!7)Z!(<%L{3+YIOUy;dSwD)1Tq<8iggs`a$9mX~IQ) z)whp1R@tN3WazRu)oxcB95J(z*pvl)H#Bb5#9=I*GP%LV>si-_f*_t4yQDXDA11|q zPeEJVyRtxH?{aHL#tHmN=H`1s4ZN=*f(-|Mpu|d4#r}>%@1wpR)Npr>uo0Dpe5tY| zeS~p#SGX8{O=!!BCp|sCD|$~1UQtYN&6ViAezm{rbnOMmx@x%;oGd6*yvWyff|tW# zGX|K?nh*QKO0o+MND_@AYJhjrgpx@&YCNT<4a&%Q>;bk>XC|um(f5f)o4!A+MP?L# zj&x|XH6@h`JLutl_(p;7BjZ12q};;VvOpXJ1Ej79qzLw_vV7(!KvK89uT`!L(^%y z({d}7;fo!w5X!15oPBo2U1qNpT4(EjrJ%w!V{Ugss1GGwleem|B)G~<6?Fg+E62=- z)w$eAPen?8nqZG%`t|Wt3;P>Is}dxS@bJ1^H}~<=*q!q#l7}ijN+?Ri`$Dw5EA;bY z;*nS^9N@IF;pZZ=!W+3o)^of?MNnBcAYtGImA2CD;@rOZp??~AhgkF8(!N4}JyLIT z>TjRAF6VByr^9JFfv~EASo>)=Omn|io08U_s7{PBSJ>%}3FImw)7qo0(*paoY%c># z$rgx+0m|8zM7z=nJyYYU<=d-0Tx9ZP_uV<0XDUi21*o5d$q)C#C*xTd2C?~9dsZMH zxX6_EG(2!y@)lZ&%f`V;ipX7mSNZA5hhtIWgjqFcm$VT2TuEp;+)xCOt;^22L^Uj3 zqK+9Zvvzi;MPZvr+K$h&>DvJgy?)8gH_B7s*YWO6GIqhF1?sG_uWVlk?OWHp|h3ckdaqWbCU8C2Fj?ZT{8oS~vJ$U9}d|Y}$>x?;nZqDmh1(1B# z!bh*cc-)S;)FY(vm7kg+eB=osueJHz#Zwgmn3PT*W$3t+sJ{GZUe_8m`Mn4n2%xBo zt4Lqb9zt;4o#(PKi?w5}rerB278=z-1n?3b=MQ(Yt`$n;?RRrrkB@3zVSloEvYsGM zbl<6{{3N_onYC`lxgofJp_jIH9X-zt3pF?SC6|8dra9`4oz*Z;w{)#3L0$e-Q5bCC zV3fb_-_&ZkszT7k2 zuIa{lbIjOq3$6O>+J?jn$VMLkInz5m&}?104+qoJ2ei$9_Y`ITd94nhD=W^XV>VL) zc?5KL1PVGu_N*29s(c>r{gpVrty1VNC->~)`fB(2^ia8vs@ns#ujzjD#8K78({pja zbpEdI`Fesr>5aA$)*z61Gb1xk*9)iy&D#ouo6JzgmL3w)KTX#&)$NHK`iuDx4d`WD z&&DUV>*tn#h~CF0aeX$AvvMl#^_R$7Q68vQ&eP=ig@LgM&5Qa|qQHGq>)XQ<{Ey&s8dR0!+gt27^r66zY7Dej15?@Vbdm%3I&8JAK|ocE8>wbgiV6 zVmPV0o?f)WdfR7bs1!r9AZrK9&LC8SA zB^6VD5Ky@wPk0>KwY--c9JJ_?>5S8)0xO!UrY}PIa%(<{9fERp+?C6Y&5ZTHuXRL7 zlaCI#8LnY}q)5*z9D>MS+ac?7yuT@(n*%BM+wJb;aI*GUgC(|=`#@7Mv49sgO#H-D z*CS?|K77#D{A@fwAJ^)|=lmk~RNl`Iy<8T57V>H+Att^L8)XL(k7c^k-m!cZFUGIGUt=e`On80v17HN}<#s=R z#+4*8+WjNL{BM21MJC-e*Qr`yFG!~VET5oP(!1&yp?!aej58aKg>l}#FW1OS1s#=N zYjj%@^v>uwGVj)Tmqz@BMorhUm z2}8HU(H)FpPClmbO6m9QogW|MlZEr?UC}s^l+G1-riYjJI!Y6z+ch_T<_OxK=%(BS zXD!r<%ehFlaggq0I} zmquI40PJi5QP4h1(fQ!eNm->nsB@uPZB|Kk7J*=t+wqGYI56`7jA<=}>gZlWMoxon z`3u7jQy)DJ9^-_$6OfT%4{7|OlQtEWK@CYrkUMMj$tm|6qkFREu+>yrcu-G zNcm`A8_4rd^!?&D+r97-4>|o&9vd}2laMXYo*=?QkM~*89W(G?!v+s-~ODS3>rh#U1EiOjkt6NCgjeF?v9VMFY#39`{d%;SK(x> z#l}wQVHm<$&(vURrdOy!G=s-Xd&%;n(;iv!*^dI|UHbFhAUE$mOcGYL1hf2Z67}l1 zC+ZzAvf-%=2Q$;Wyc3pxU#AsmoKngnLq*S}TR7V&Hm0YbhG$WuB%e!^sNJ5Qsqvc5 z3Vq_Th&wRZ>Wzl-F|+yh-~^x6Zu^l!J$k$8{dJ3k@x0xpDVu+sUD`sCti2o^`F8j| zH_tUPiyCr7`U>bsOG34OK_Eu8PXvH>k1nLE}28oSS;`>ZRuN+@Vx(n&}AFQ;!m@ zAd9w#Ph7uWx3krMy|a6p5j`S=vWF|`BIGSkJ>U&c9g^|HFGrI)f+KKmhX!ZK7Fv;7 z`k8E6nLm83fA(;{j^8ihR)046-0a99b=t&ECvEU_rsV3Z0I5y59cCC<#AjsObp#wi zWrhUv6a{`VhlAwW^TF%c3+!EClp-9qAf0~;28v6E&1dU>gm=%=PHimG8m7hJ+xE{A z6Yg?2jV7Pdee+!Q)4gkzLuU4f<_Bu#J|#dq%0e3NXqRt}B<$)xKl%p{U0V%Ps@ry* z7NKSt7W4gif@AI6-lbEzHQMh-z&oUnBynE#^Wx9iKJ`zm^xa?_b@kCqW1pyD8GY|w z-aFC1J?FW9C-2|GVcP7)nK14>xE9Az{OmaeCX&v2M#mHA{q4@?PKRCla9$;ZfrjPi zZvE`)z8fmp-FGaYzhDrYcdraV0SK=fL^!+vs7xlss(7u(%*;XT?qH)&O>jw=z1f-j zT>$YkS&2V(WmK6*v?j)r_cq@tq}1=*qHM{7!dahxa&48<@r}%9Sogk4c>zN|jNL*K zZu9*%KyGkH{dU`v;w~yY+2z#hr7IrQ5S0z(E`k|e+^zE5J!q+=Pz_*n_P*jQOhl23qfJyBMGQ#Uy-yV-UF}w7h6nT9}YdP(I?Y5)!mhv<8E~G_StkNKXZ>odo@@KAIMcq|MC8rnDSFXu%A_dHI5J2 zLr59yDqVoR)Kbh%LsQA4(4hH^$-fq?=pQbh$%oGgRDevj!26r}gd*|S-5eT4gzvv}= zr_dkefvg*$0DtV?wh(~9|9+@Xb(2-vl!BHL*3SF73vWmEE;`GU25$nBn6E{XPWzzD zb>6QJssAXEUK-`qM>Z6#_rcma#P;leYWm07GsuAQQrYsDUQ7>--lZ>49r>(XV8iRq z-ONe~d)vBL8?Fk0J_F9HM}zd}l2FhyVztM}Z&ShPV)hJp3PU|YguAFiuv`1pGGpJJN>BS=ETee%`Tz#aW}^@n?pZdrTeX3=;}Owh=G+o zi};GR&eLeP;+%)|HogyubjjIsk?yE>QP*zU zCFafX`8>mEyfWU5@z%gW6eWpK^6hh+$R^yP;p;FXG+eaO-I0WauET~Uj*Sa+&1@gh zpC-iDE^x(ZOUxT!x<-e8o2rK$zMIwU&zY1;Z$-b~)MS@$xeAb-l{1{}TXu`vAs;W^ z(~3WWw->#}-Os$eEunPH2??Ow*x>Jtek>Az_CLud9#$bR z*k$ETa584%TrFidq~ASO+RyC!>A_IluuXOl%ERNF-@qn#Z#Q>;_YrZt88cPeTkXg= z!;C}sv3ZLr8;T4NT*Q{_K0{pHXk#wlUvFn`85&i{32J~LK|tpuhKJ{oBu@$$JP(F= z#o2=8rwXq$G;W`mgdR!x3Q~>8Wc|1`3mqhi1@mS99@9bX@AcuZ+~2^r_H91OmR11# z;BTnIB2tfw^CgOZd_qV4xOaLg`bWIWf|`r0>OH_mh~E{`+5R$arzE%LU))-*_)_Rc~X(>w|U(&hb`d_ z?p|-4DYm(k>F)R5Mgb$TPN|shnMNY+-SSM{jnmvt*cB!OSZ@J2zelG$cF%Ua>^cB4 zRTJH|M3b+7U-Wzd+$y!;v30lJW-U@NZ_U^H&$@B$w?)ho!3@7Eh$j61iAMkDxD#x-~bmL-%8H3i2VIoX>R)gnL!Sy0UDah ze#_go+(442+YX<%F^(AfjlVf+Nh;}?{u?_XG(pD*dJO|Pty;>Oq-M(2)L!6 zJZ4=+Xym9VY?3pH+-cKAq76vicBc-rk^V&~V_P2V{uvnW7W_kyeYbDdr#QL{v#*zP z{)fnrHo|1!93cYv}DWJO;c>REO_tfW&NC2XAL4=2ubfZP34urp2B~1xAY) zf&e?wQ!G$$+_b&rUp3=AO{BDo`NOju42uVOWh073pI6LgbH3j;)P$FRdtu%Klyc?l zUdcdhrRMW0#Pj!Pm?wfJ;ObxNW$kR7-T?^uW2yP;^P)Sy)L3mxKB!k@Ujtf&{)2Gk z`dgc<-R=Ipdoj-X*t;0OmdC}<vFe9Fj zc-&*E3P=oM0#tYSmmQPLtT9p}q*Vf+@Qj4nIbjwcnrp&Fyk*OOlWItwurhD29&YvP zAWPfXdBWPdoKo%>Q*O3gCpr)&jttuSx`6InFows0C0lwg=JLGFP!TD!NcWk(c}VTi z;8?Z{7ztdjn4n@ z2#((H;=r-n-R=+!KR!Q?Q4br(;Et{pS{-PlE<6?m0avFc@^0Vr^rIJb3}ZF#vKtPN zG}|U-=ga2-LrX*1Qi0fTZ$JZK4%OpOTSTb!C7^ykPrHbJ?3H)pJxVQ7-fTZnkw248 zGwkgc0KR3&H`~sBq|nBB`qFONxFhtp{dC$s>XPd`3miZo?x9I*_|HSV654&d=<)Nr z499glEs*LUZbh*vd&f=>#QpvGe5^#eUX911aqXK{0;vD=mKMK@*Df2(&-Z(%H`~ok zfy5w{MrgW!rCUx0aWXq_(L_8Dm9IQ&qA2(8;jAd>-2|suO;5@;g(~IL*v!Q4NpfYK zRJ}srr^UdFtrp66^k-wUhe41*N@ySKB|pwngQk-g31_G>KbKwgTA=VI0~DqjOBZ&w z7_F{zHNvDmRyH#my)UKW`BRjHMlq19c(|S+O%}#qke>X@XxajMsrrM}-_wPsGS9q4l;PP!q>KrruCcEN=kiOv z!*5-GUCwlTb65~`JbJSaWtq+1PAyHf*;R}VCibfdGWdIw7d2h97}Pe%yOi*dJmGSgz;-=Haz}7j(#N^JA`RycI8yil;uYr}7|!XYOrf21o9M-&*%Rok-hPZ_0jbUC&ES)UlA>5gi-aC&u z_6`oIQEu_gkjfD&>?Z1xC)p5)wFj7ff8gog|CPM(0C4$VJoO*c_X1bOKX9{D{qVy7 z+J;oHR{r^`{LGzTn!;9!544Dw%^;C~J}Nl!n;k$%(zUW^K&GY9v0FH*$e!cf1?vL) zl}Lq$sw&LaNf;b;bW#sPt>cX^MNyQsD;o2n@zSWd1;Y*_55DWf%MR&Se-o?h6k?7m zerCggo{q?52J6hFbd=49v*wbMS?WxEq%g3q{Dzb&=N#|{khpiY>=0MPb9>`|@+VmW z?63w*-?%tTEkmjM;@0jllq=ZT)_N$9&@rsdP+kN7vVc7R+S^#$ip)7aDL6Mpu7XHF zhffcJsrGfn&VEFWvl3w^y78!>M73PZUi%=L1Zco)+W5Pa2}qX!ELMdo&li|u{bR!N zcl`yqWL`!*vn&UlUgQ-2N0d8%$)ZD1**!n~;o@^3d!@SBDX+?nIZ9l2SP z-n5V($DeDV9?#2b%05Y~oG|c6E_Hf8(<|hTeaQ12QA7NUwb4+Svm1Y1ZUUE>#;W>$0cR9qoO(LT7k4;M3Q=r39m$xK@_eSRF+=zf`B+%)_3nd*G-?8 zT-CcLuojkOJeZH0(e&GYuux-dg?^~DKe*uFE~~K6LHY1b^gUgrEmWWmNtzB_^66p zwZg%?KtOLi^+Z6DdmstKANF{Ah+vvrpDJhr^w_Z$CW$!}GkK z_xttgTR97iT9_%Yu;~JoB(TujL`OnxASM$5EHS5o(a!4A=%7v%yL+8($4l7`mA>tI zA=!~jv3dw3%a#Zu9VznlE-p!`837K%9OtOxZm%6Y07D!9LJ-15p}FpxiAHk!XD{}R zd3{Xgc-h9Iei#F@ien3-cK!)gFZaBBlbUSY+c7%<;g6>;nLRYnt>o+}vDsXv_7uOi zt^yZJ{^IO*Lyvh8o=YbGnTqqhx@xV+rjJXcS9-CZll)yDn_uN9=rNO@Xa@Wm0xgn# zW-i?N_Bjv@Dbxwl{#s9+{j>+swDW?L#3b-TgWLqA&$FG##pZ|%)@3YBD*G#jiItrB zZVfv)*Txm&BL?=pC$Og{-*(26o!#@Ye3`f{&6b8DdLm+*b+grBo5a{xf6A z=M1zrAQ97v75#DVD62=+7Etj@4V`% z-4qwR2(L5n;1qCp+fs)zi95+&&lAWQif}AF3Rb`EnrnjcE87{DzDFNf!Zg>vmA0x` zUDiK2WQe?0cw0P%pugGXVqPmDrPuIg;FaE#lD8UGp{2ovqvr@k@Baw+YZj&Uo1E&Pb;e2Z6HR4R2(Ybu=Jti$XX-VGEMbBKL`F_>K2kG4Xd29m3p zK9@Cj378hU&g*BregF8G8iM_$usR&ZcA1|0tG=X6-v9W9pPm&fO~o>8qCiy`N)=OIXI55^`FbU=>HG5heeP z;A#oeX7FH|!{tnsD7B5-XOK;#dzRT+o9;7?p7JTB_T539(Tfn|=LrJdb4_gaub#R_ zh*j(3h0>jh9{>vvps2*SKL0i8fW(@L>*Iv|+s^&nS3h!d(?tY6#{RaFo+z#6?O!jX zuK9H&U#-E;{)2Uq&kZ5%So9TJAnnnd`)V{}UsO?bawCQ9B;D{h+w=sv<6_CbT(~$e zjG0nzIf$0vrk0>)4CO6%UKOsYGNJM6%8KBK_seC>`VVs!4P>;-tbW;YH~zB1K&*W2 zG{P1;M|U7?5Qk(77dXu3{~Gf1w7Z?ZU8LGe#9JUj<^>x98MYGg> zWO!|8NpzT)SZKtVcNnK7ye0GO{Vq9nfuHk8cBo_Rp1~n zWTLvvoyPdIIYcC%#-SL!`;O_$9TfpwPH7dkxph6t>{Jc4mk!d70 zP@6f;u@)iDqut5wybzUbd1PAa(lKj>aD^BE5xcvNfH)3k@^k$SzyCZ#XzP8~=uU z<1hej_XaRX0K0ySSWhn2CXT(B)h@|e4vmL2$V)5bNUd~nkV#R)KdSmO9Z_bfpozOx zpY;REWVA?+B~~$Q-UxVlCLg{hUfq7*=PBld;$>DEh!!fMwxjlNW=GUIJACn+>33@$ z6Q-YML75>p9{0!A_HKYf;Dn^EBxXBF~omL>jV)+d1Tq3?MR>%?^s``aP5#a=QwAOz-8UMqoPosmp12!T zm0qi*Q_Iex_0=;F#U_~McVmvncUGnoK_v}N1^sPpgEi~s5EWkMS29%50i!>uZ9Ij!~)@AVlcV9i-q z@yT1LSzNW>5vAJ6i=um#{YAM2C>Y4C$%219r+fXRN7Ow+pdO$WKQ&s*5_F1(4b+xbnBL{h8ZYMcmLRommg)Qg)~XO8UC`6 zsY~-knM$f}4a|w|K7Yo)ZMjjBcU1e_BGBX6*y>!_~W-%)2fw3>U->CqL(5dGz>`47&gvrBtL z$>woitm{WIxg6$hAzX0dG{4EnJ%PFWXE6%&r@Nu`|Jc2UuUeHIIq$;5Lp}+^Q>0fY zsT}jE5|;WAahNBk>G8aW59ndRMPQW{qdG{NU)4i*Q;x}k)>qTN4*)CpLCMbeJN@Fs z|FWfB3vR+Zy23yRSpjLjBj$_*2HbxC?g)7H-Suffa8+W4Z+Bsjwoo-ZaX9i8n^PI# zr|4A37=v<@uH;IcjnC%$i*cH>IO@NszJwZkUY)ms2Os3-H+~uz?T|pPI9OY z^~Xi(+JEq%KgBsyppBbOoB>9-I{ zu4PL7^lF8&;IcHVSvhn1d(bB1% z#){fH6%e7s@%r-${$uEWv>gcI7l~v4B#MwOkW81^<@=AyOVn;saywrD2aN1q>HY3f zVG_t^z#(Eaun_17ove6PCtV=#cbX-@eA5S`{jT{LP=(xJ*(X8oW<=<(n#@zuw*JB&A2WNiA0Y`2a9ZW*?NQlQi=!+K%#cn}{F zI#g%DU1aD4=C73Ndfll^FhX}P^yE9-&ssY)^L(KCF+9>&+Ub+@e{{DxN`$?PMpb%W z3kbObfO<_g1wM`5lmyT z?(Ih!P_8J|Mnq$*+lKjx+3r^FIro9tdy>?T45Auh*p7OAN-3yTb}pI%y=LCTbF@Fu;W)28%sIwfp?=58=T3#6eY@H+U1o zdIL52UoU(R`rty)T-Z#*TwVsu>B_<=-d5G{r3UZtM>}kdufKpG%YcqLXA9(_1Tb-_5w5O8~o%C|wef z`&&T8{%wRRbI)DFI|sce`*bV4uO;Z^ObWNA$4ma3iNv$u$#vvdrhCkxkm8ueP>0t< zM(hg>lQ7w8jo#ZzVJ#{2FaJUc#FPP_C4{#Dj|y$82BsAE=NUATY1f|pPK+B(%JhCq zwRr#+;b{1#Vh&bAEKZuBWi>}aOe^&sQkcj!W07ef^#8)Q4=}5~IH^jhtcjx9MU~0U z<`prw{x|b0g1qG^jWnOXA4AH=?%&JBJ}h=xX;GRHcKx}{MQBygviWM&{9RxZTZXLo zGk5RW>F1;L1h0tOuIRCw4FHq2WgPw`1}~GkSMQ3fAOON4^zi`X#Ex+dflIOBTk>8T>zS1{}}N0 za=V0jv*By(9mo#+H7PA(5SzeyW%}8>v$sf3@F9VCp)MQDabf8(#2opdXdinbl7`q^FD@R>D45)KN|_el2OAL#^V`JpI`#_fGb= zZ?kkalw?_y9*6l>Mz-$hAYro5KaC<^Mv=xE#g42NvcV`lXY-eGp|$mZY!R*Cqer6J zI6cOZ=Z!<(JJmxKHq`VCz0KG3t-19ZViUThj{|GOoT4(5_Vn@3T#HH{X?E=QdpIS{ zIB8im#;bnCM>lQs&38Bm5r^T=hjk zj-+tdf|5AlrY;rW_Ox0;fQkdMqM@t!2uq3MaR@FYE-a4*)5eR_lY#L_IsTPPA#3R& z?upASuh^kcWOq^p;m_N_!u(kG$LhiEnAu6f5fgN*ao4&R4Y>c^=^!&OX3@;vSoe<AHBPLn5zrA`I+Ro00mer*@~cL$bR`H=s78_qA>lVq_{BKmXy$_^dPBD(~0Ha ztr4$(euI(#UW~A8hfr(qZ8e|7&Z`teV_Ydr;eFkYa2b2sClu2xNI{L057ApJOpq+u zq81oHdu8G~Ajw$QRc{pDCs!5tvp@ik>Evj$@9+DT8)G= z7xGlr2Jeyyay7{Ti4o6VZE$`5a0`*uCs?g{a7vL6m)vrZ+1@+@!t;nQB?GBT0C@&@ z2%peDtR=EW#zPfOIkE_9bWm%O7W;wZwJS-~dimCaC)$-kF&veA0!>VsMozlVQ!X`r zfSzopF+L}GpHe7Na_}^P@-F$wfxFJbZ<*?ktApdBrgS;4-gsL=VV9xBX`1Kc=V$&a zwfBzb=17jxr|}a)Tb3{Bi?vCQfT=CFzOEc;J#MLgvrDV&AhOxNMZWjHGOh2Rw2xa+ zs&hrGlXz7G$y6o;>q1bO5SH0TAXY~R!#spHv&6nFT6q^5uj5Jp9pP=s*;(B?7$Q{T z*z%Ue!vh)^8&oc2ud=L#P>hHcYZMH#B}f_s>o(oWdPUwfcefzhkCq8|*t-xr)+j?e zDIQ7VGCG`C_uF$lH*i$0N|Y4lYw&El&tI#kbJSnUM_0bLvm3(RkPelo`iwZcEM@?p zIE{3Qt)wA;0wjH~76{x#MB<_SDYq^~J?GYVqf7&I)~4)KY3!oyE|bFJ2-7$9@eIfZ z%*lCWCd%ZV}YaqR2q22#0^EJ|+3+Kr$)2P}`WxV<#rB#VG)!?SMh z;g6Za4M<3DtE=o7RPhbR{}$tHYmX5q)A@=9&K{=)q&!LH=%!L!Gq4rHdN+%si0zDo z6P2*VS`GpEy;9t%hP4=LgHyP{X}cf@7cI0FKEap3HW1w`0w`ze>d&+se7mKLz-6ZM z8SMVKby4UFrx5_lt&H_;BwBouU=oTgb&Vm+f-z>Q96sQG%x^~U*NDM;&Zqukm zf_2I1k_nXeI@itcy<;ptAG6*oiX^V5YT-4^IjV{N@7qt#U-uwOq$%sXW9B~Z7wHea z!JkEhxSsa_)-`dBoEr_)Nv9etd*AP=Z#7x8RN|eU-lYnq*BAzK)56j zb6e{Pb50>hEw8w5Y0lNRS?+i2=h{ zU}bCdqz;+VI)?uc$}ZZ#-j@I5U1uJ2wj27fVBUM09T)X2-?Ei0hLyioAK)Sl@bA*G z=n{#$6Ea>~RvSuNfp+^pY~2WKKIUvdK90`$em`4jk!?+y*KM7JWnMC>X*koUkEfAyl>! zqyA-MJ?DJhP7<_tAJ^QEuQc6EM>lF+^o@{(^#U{>J;^5Y}H+nAXagB`JFg-2>a4ogn+Ev8!1=(|e(6x_H z$F-{fNYfzYFR2>8r)Eh#B+5u$zIw*2T%KY+Zn{*?jQNrENF(5sEv<4^wq8fvBG+yL z8Zk>_H)~LsgqRiAR=CjhPWzG5_pR0R+3twCUCqCV|A1(0f6Dw+P}JF1UidKV_ZQGx zvamN_A?r&xt{WPa*1o{2yYH@mANDaGSKE{WQe}@4qI0h|;GhNyG~K?hWfAe8O=H)) zTMivWJhP6mT*x0{wGeIFfnH~_w@F(~qlyfdH;E;HuJ)yU1*4Q8%L}3;$R**QtqhHa zX;M6Ob|O=4eKHB<3L{AlBv7%}j)LkWpAS_)+Mb#9ZoO{o3i>UgO$~s#5dPvhjAnzf zad|A@3kZAs!iN}!_n(11;L+<^k-bwbuMpA@OQYD*A7FoK6mI`DO|NWc4`dQRrNmP^ zR{*OVNI3Kr%iIuf-b;KgKaT`yj$ zm9kGg>>UcH{b8a%Vdid4i<|G8;akjM_*Ih~qWx_m3a{SzvdaGH8liRx5x$~#$;bS; z6n|$f@|zA&eHpqk2IVBlxO+MfuzFgMbv-dKQ7M>uUNuYbaz7p1%ifOW9oABv8@Ch{ zXAZt3sY!#AJ$!T;|H~|Z@=Yl(`Wz0gjius%36VE@K+l#i+g7N*ENyz zB>0IQlLU2^s^cv@Yx<)=nFg8eR$R0;7a9vK4kZFwPS;1$gdu1IPv_@gGR~e^^yyiZ;|`?qQvqHpp%=#P8UMv}U$&#M;qdje$?#M9Jc|AtN$YPS&+e$A-SH zmjLC^F4JIE68v(ORdg;GcmLbD#u+4h46JoII!>@HB#N{QhF(t9ylDg?i(-C(4|MOKlK?=se4%5KwQ+ihzfYtr>$yL zBdZ6p|3=}*OWMT57?w_zN${}Hy{Hl+P` zkGc1f!#ZPGbDRWC-xe9;9n&#y1R<1a&p>}Hg988hqpm0Bfg_@9dbgbssisEsUWy|8 z^`}XMRS4Ri?}TZduS1INcNmbhgU<|YhWPLy4Qt5dsbq)6=I@1=z`VUrL$^7v5Sbe+*^LUdBcera25s*;onl$eM$4)~1~E3LU8tl=WWkd|-t zwauaG628D02EUKRr(bcwtT)J`1@Co#7`PNCeg~q3n)e8MgFr1Xii$1*S6{ZQUe0Fi z2I9ACdjjKvtCo12>M>!=f629ZwA7F98hsl6CVu|7_Cm#$c((SzIPq&Qs>q1AhAmHo z%O1J#XI4Ys%s}@Q`#y6j^@DvX`uWWLs!LE($m}{?&29GtUc0yy{!H0@aCl347CQlHaS|LD@NEEF}VG zr>Or_D+0A99SNmyX~{57Ki5Y-Td%|kv+$C?uBehruIsza?Phd{XV#=QRGIq>=8imL zM%>DVCrp@S92qTlAA5VGayI$sgK`E#-{1WQdMs^Ku3-c=a%+;?d6&7DOChh^>mI`pjZTdImALPZkip?} z#2p#~%95p_Lg@x4cl1fQ>0&C{(jic|PYHtjVDPboQkd^k(?M?jdQpVeP(ERJlu#2% zh&&*g0$?mOqK#u+_9s3#ga~gVSQ8)SP`f`|>x*JaFsn!yFX~Wz?+D|bg-_h8p>f2z z#r_o6ZM&btan8rwNhAD-xBnqqhU@*FHmlgMUUNZXwzu@=?NUvP2iBf_=vSjOwH+d`#=E8b%{j)(S%uc;=m}hVCE%J4YoxLEcq?clhd4wc4eb@F$o>EAs1=Mv4xJK}8pj#rA+z{XTIN z_kQrks0%0b6_RkGEur+$@VXy;Wb1F%)Vo0x+0nf;77H}RL3)BOGFkv}gR-YKNyNQ%VhCaY5&cw>_eQJ#AjRbsvDKe= z9tcFgg9bC-J>>(2m>t#G$JXccHPg~S8Q{#D1`yg5K`dwd zXN5`5^Pjus4zA#(j>K^h0&vi23){1smOcjuab!>z&S|&3!L1HN2eYnIhH}zV(x;su z6vmq77ID5^sG?ob4B_V{_H_I_Dox*Po4jT?5xkSIyaL>=^?@^CNhMMG9r*CA4vlYL&XnKVbH8|3QJ`ixmhQ&;*o|gG#e}cUM|030G*3O3e3{EL z)%|`Qkt$U4UOHVO;4AHh4=Q#qrn5y&A9>7zc0Jt=6^K(3IREn-DC#k^kv7deJhS-z zpsdH^u>>QWSj+L-2bd5vzLW1x&{{JDK}+E&rOFrR-W?(m+5jeUHtd3b?k>sD%xSo! zajhKetP2T}+k;S{6m%BNOuZGl?1nC&+j?Xgb}m;xej)tGL;a`4+kafWBhzS177E!@ zv54jE?eLrUNa_G!9901P8}KFD@q58^Rpj^Er$S|>yI@VG4bbtn&@ip=8&&@I#;!ge z4TP`0q^r}qKAd@KXtLuMoHXqoU_^#Ck9$OO%2MXY8#k}`DY}6Y^p?>|2}ZV7`+1#g z>FnL8bJF!{zSmH3PBs@t$5{_46Z}eY}xmR|p3y`u0zs zQVNkBC%BZWOJc925AocnuTpOHG+f}1ymlD>HOt_s0XbM$s{h%MKLpaduZmUC9!KK^ zh->@l(T(E3Tmxz`5t|>K5I&y2mfoxE&cTX_=WBV8_1%Xw!#RN^l>cp4+tTLv{LUcx zD;vJj7UyU7x&Vl7I5gx{FjKV>>_$j|%q_wmRz8cp>Zi4hXD+OO-4nH-Bl0EgJO_H2 ztp(c&Qx1ce+UY@~ z7~5fhJyW1iQf{gU)dUS!_pv2jcu$#XazQ9BBFg{y0NAq|W-?;(mQo&>yChg>+^vS- zYZ05%rPpG8f$_(+on$-zT9!ZWaRyD$x$N6Lk?Q!R=1<5KxEjFcV{QEV{Odh_N0nUn za={zC$~73-gCBb6k=)_80Buk`yMm2X{)_?i=kuDX?aA`vLwnRf*{(?m6t<`&8T!Y!)7r5XEu=8Y}+CAVTQviR~OfFSO=0x_uzgTcFDxwN-hPFN@ga z8=UJAfW5^(w*o#N5zj_lJbE$|5BJFuM9O@W19+03FYs&^X;fPENF*;7#;3%b0}{v% zg?j5sO{EMN^7dP^g^W%ifpM0;L=!*xw=lot_B#WGEQ(aW#u=$O99$;X)XTLTlc{J& zsWMz!BDeTl6HO}-)-twyIFS&{iqTI@7m%j$eB%LL3vmX6@cVe5D=_jNC;)@v#EVD+ zWQN{r!Q&hcPs8AiKD1pxJ&gmv<`AxF@^sfOp zZ5#WBF2aEtfhKP9lFqZL_D4{RzyHjn6Da7&{g0O!3!TS--W12NT80~-ljr$xfgPt+ zEw3q3{yivl!MqrIQDpO5mVs^LR7Dv%Ks!K}qskVi5WEx1+aa%Q?54ED)t9g>=4iaM zFyV~GU#S>dy7EV1n808{T|*)g&#F;)_%ZhS&egVQ5w0EV_WLR?czo{WVH5Rxt>>zG zlv6Hm>OL4hTV$_F=1B*JsIs>WdfY-&&78ri^#acP-ApUPfn)6+tg1Jk8WfE#OR$s} zS?6>8Oj2e4defC~@C;`4LIIY=_(v8I{&A6*x_`mCpR%JxL<9*9!CW3){T2DEA$zy< zFrMG5xh-d&eDQMSSGVdVD~7~>b{5MD-lazkU;@b=lC7Y85B??C*Bn(Fos!c#}c!l zJ;4;oLN3I2tz-uarP$Xn71j|+M*VxMWxA5lNAuf8I#`<=wvg(YCL5}T1z5~x+tuy( z(j4p8Az3H-3c$Tt={l(rS3QQ&u-QAGZ(b8^LP;R%b9Xw{#>6A%Cp!02uXj?CTR`MpT`h zB?j@VsCxWdldm21piM@rG0vJdLSMwZ+dn&Z*l=WCB6~F^F}5GuJwyJN(jxxHV*@qC zC|2DyZ{~pde0nL^jsL!Iu?@!AAGlGbpGp6`Ag!lYv6h0d-^3lOffFx$ z9^F^n_dsCc5bF*68c^3dgc|k-1N4yB*0C4B+&aITSW8YSWLAbSCP91SM9OG{y!8Z3 zC~Vs()nAXyk`rhQwiL82-Y*L*!|3njVMdQNn2WBGTHh%k3GynC^RnZ>vzzBHettuB zQ(Jw_{#eVCCssXb862~ENABTWreV9C5}9R7#(B+3DyHs&a(!5(L`lmaOFQbjWd;#kk)c`Dof>9^x1xY#`=(1V-qQC8Yihe~iazLvWHE z$4lSo7w(tvGyFC@Zn|I}Yn8$a9q=&|;n^(=4wZRSto>aeGYgJ+;E?#tvR=VxNKw#{ z5?}QWD01P@VY#P3YNt zomWv$tBDus6KI>(Ycs`IKUPXm(O>ULlJ#3c|*mF&`lt3McgJ+32}?T9f#WtwqU8Rs|;Uw@f=7!*zc*L z9At|+H+#}~<>xjVU3D8#vM{Jo=S<(4USlI3)XB4{5p)!?!0JIx)Pj9ksMmEFM}=>A z1mmvP%GPj%lcjnNxvs!!E~p0XQ04yFq*h@}ThP!K%7?rG+CZ7t*XY%)nD5bpU#}G& zr_m449QV|x&-+ImiVToNbwlNQuUeq0XNN3}NTGM$-F#Mi ze)jI-w;iWY9iq#!{IlX9EC=t&8=fwt@1$er@8n|~;(Etwv$4q{EzySzZ05Qgo_yzK z+Q$!KD22fEE`UNzoI+}-?9EvoiG%{Grt4fk?>b_Q!`zs}T&Ef%0WI0|MuZkbS0Z3) zsHg7j__)H$aOgmcV{62`*wK0&_nr-{6G@=E#Wfk2r9n9`eJOQQQoqVxqgr+?!EV|M zGR!VXap?VcgV|gB(^(j~9UlGjC{Ixe16LRIt?PbU3GZ zn>M82FN~UNV6WIplA-NG@Ftc#DYMwF>#O#6EjG$*QE+}vwrKPxMYTbZX8MaD$^lNC zTPskDxL#FZycvZZxWMy?uhw;*kPE!D;C@vEr`@t~$0-jQ4fxWIv9LdkpP$fHUbATt z`ol0BXH#3At-z(J-)B2;=5NLo@%OmSX)L+{Ggf=GbAa;cCUNuS2y4gspk85by5H1-6Aa#AR8tHb;{@Bc;AMDDdw zEii+=a?<(t{-ifZCF>Nnsim{&GSpMk54LH;`IhV-8Ig*&3$848WOcMA%JPVil?Rrd z@Y#X(C5a!C!PjJV)0^Z}XDOuxNJcpcsH-YLr6lfxVeV1@IE8nyUUKzC79)ZI#!Zjz zfdO`>c?Tv=0poi)OlwI_9+b&S{Kvl?^-e1_=)SKgS%%Ps zgRn>-Gl{tB?*|EL;|hc7*9_;f|7r;$J?$6>u`2%W#5EJ1;v9FZL`Y=4&qL_>$BF*k z(fD7>q5e*%Y>tMswf^fj7URhrK(`ZF1j^P(qL#46%|Ocauf?eb?l|G4D#3ZA`Qs0S zy3S8i4lfjp+(C{nAEoDy=zc%shCrnjCS3~}+fUKNsM0W_oS3?PNcY-Ts5WreNuC#S zhYO0A?$h$hOenr6gr4=c-is|d?gA&4 zKD)t`G)&oYLE#YFYgM3*BP zR0r=D0q94a)ug2k)r;C3kOsfLC!wR}cM#%c*=x%S@Fcrpu_$H{L9;4R5`Zq0tGQi- zmIGMi*gPoR2zYJ5lO zxM&BTbM?RFdYhxTZ_T^svD_KhNi%DX;rB{IjRjI%a0friP=W8(TU4~$j8cDn>M_59 z?c&~3jA@BKRR{`~(rd(MK`|>0*^*vAT6v%D_(Kj=(vb76u$fs>a``ZR{y+gQLsw@3 z^pB_Y@4)!OR3If5bn2#pY^9U4>u%O5ul#J#A<`XnBR9EzA&tm*bW9%$=#ZU0I^?;HsHcH3Gv+6~GKhdpES z<sm+bL)l^dYS zDEATE-sIdxxN{=>T$#^!+ge*VhjppGKQrR5nCB@hd9VTf&jBF#=uO^iSoEU6n%hsY zi@lbP3F;t~l>dlpv^HKvft+B?3(&e;Uo!BYw$YmT?b3|Dqi~?a4PN;wzZ6y?L{bTg zM_}ntyQHZ2qF;`#(RqQ&!D(4WzN_(3oY;jwz0Z@1-jm_zc1Zd-R7+0I9tp8=@X!#} zx^`nJ@N<(3ckw>t9EI+r;fgu=%06-^9&>?)eQ1!reEG zLMc*Q<@pnAHP2Q%Fy3gp>lwX&{LSaSn(06@ru2%Qsf+XY*?AErHo9ovu}fYMQu~EG z^h(3F$$y@i^XrX3PqEowk`?X2(eYsptA7mA!^EvA?*)J6JFwhrg~~dK{dt0k!k_=! z(H%QL=0;uoz}b4#jF{7kXynD!vSED^r?@QTy(+1>)NPuZGGbw3eeMl_8geS=0m&9& zab0#*{0B1d{2dJMTXfw6?UvQx4@u7D_k3^Q6tkXp{fi59Nyl_{%~yiNwcpw^L24dH z0jbkq^jj`_)j9#E+rFGxL%Q#q0Y9^85#}d(yw^d-*2f-C#-_ zoz?p7L%dipyiS#Uwz~!pZxmAClF%nA%eYketGR!KyAx2@IH(o9fWR+!!vZVJ#dO!K zll?s}av8hTyy86-@UHh$$$NL~NgR+4`V2vaV9f?n*HbgXj!spX)a^qIcjDqt-#{m_ zwXRQH_oo+QDV~<{meq$j!Jy%v`O#T7;S^p1h#!LuIqb?3ns&V6_7V20elw?#f1KRDyrS*OC!KS#RRE)Y0S&E&A#N<>(nFIO6SfA$)u=h0@E}gBD7V_hb-`;u2V4b0i%b5s zrih>l1}ct||=AIaUS9?pm~c{K}(avo2b%Ec=d(6m08yx5sT{U_-Q@_LpRo z?g0}Y?Q_c;{wLb(SeJ7a{R!_uESu`qI3QNz$im|vUzm6a;)o};o_GXyxD34U_0*bo4Q_*rn<^UIQsFp@MlPrj zt6*L7TZp!J@by*ykggkJSSDA+aZM72X3%!qXm*U(dG!N#% z8X#*!!S`6H@WP8obx~9!L-ZEPgE*yh)8Db-=7}aWUlprxG>W@I0RiSiWIcqTY5w;} z=mZ(-7E}ura2Z?@X3&xP2}<|_hjLvAK!MQWLEyXrdh(H&Y--J$U|rwIrnCGP&niVQS>Da&@=2ytZQP(-=Rb@X6*`M&v+p5wv(^dN z#>yEQ$h%*cxLKVX!PJM6Bg6-k@T z=;R-trYPbTA6G68<(@OfXljO$sWqXKM8|G=z}|`(V_xTlo&#!ehtSzG-)X?Ha2MQ4uUr<)QR;J zUU$402z^lPm;qeym)7NuLqm2!)C~JJNkF#!$S?i?7ET=cD<(%aKF!@r}hUnEw%Pa2q@ zwNRS}%mI_E68ie#M+2QEI;8w!E$&k3q!=?5JBqup7yHRk2*!&vCRWLV=00FNs9aX1 z;(6QW+|e_a2Jlhm_I2}aPU-ax^4P!k+NQ_7pUaFV3Y%uNC_aCy`f%-?!_%yOWu7;( zpTPS)L@lDrExq3=?)D_CG_PnvZc{ZlYZ;yU0Q`EzqzZh&40`Om;n6XuB5q^*m(4v7 zZhz#V4N#mnf6JNudP-FRkv@LXP^L-waDvo{f9P0_Yrs9n8veBY7U==4Cfi=-8sDLL zCni?qwx|EspfH?_^Sjr4o{yk09Q`m`S{>OBC4RI?y@R-M?Hrf5qj5f3UKE##4S3)uTIpb2(XeucYPQ_Bd_B2k8P$U^G zXjvxGp2`4?=!#aPd?6(4S(Yi`(wh*;-Q*O&9duEHnYq8rk?-?B(8YV7e!JT`Waz(7 z6eUImMov8{{dpu{*1MF^EKsA)OKgOUCXjVsmUlU zMjyks6V+Yt?z=KI^P=>NFP%SNx;rj8n`S`0^|UaQf1qP81aum^0s+zs>h|7b#9`zhGDa_cd9jhlAF5cWl{75v8;J#Q$024=A1qU!DPm`Sz-t?pI9=aSu=+)3#xeep5Nr-6kWNvVw*RO=(3M z&k_r50)S5BE6$O*o=N7zD?0|n{K3Dsk~Sxpc#mU%G}xr?+V(Img#CDeHw1OR@{Z)^ zAtYZf%*I&`Ay?vcU{?~Ra@xOK=lY6q>g#uJtBFz%K?25GcwHInVF-lbJKyb~tE%;X z|35s77I5vE|6r=MwYyyt*)xzJftc0QVrS2qq4dk z|AL;bSM%xlS%XlcNIs(eH0Zz1*!(rVHD-@j2WR;o(GHF zZ6j-$k5m6N&#^3pI0HjXjWXy^GJwM^S075`H9<$sPZj=?)05@s!*9oh>G07X(DLAt zncW;VJLM-nDEQEw&yZ4`-+J6HU;XCab-}Vjc1Lu8R;5M28eI3>vG7)j04hC%ApzQ@ zfw?d!od^l&GUUVK`8G?%ABIB!D!9upfKQ_}_pCT{4U;zrtS_r1(~2VkTQ_#DB29%R zTeOdcQQEQhlTFZ{+N6u&rA&H(CZU8 zF#7#<;Bs&^Nbh?TPPAl6a#Fj>ya)$JqgqX?_q!3l$kJL={i4!M&XD7ch|N1`DWE}? z2Q8bZzy+Wmjm3t=ZjQhLX$O~3frn&E#0sLkp$7Y{#^;{`Gz9tFdB3W{mUNNynzFb^ zr^JTiV?F$%LF`z*FMSD*;>8G6N!@42(#y5XAb#1Ar0@-#(G_()lcauac}SmGXom=r zoYS=WHJXA`zA1mHx36eJPS)g9WO2agk;=&i28eiByzt75tpoX2?4fqXiiVTt{9kcM)KL&9a;hJDb+Lxj-81&>oVmM@JYJs*2e( zEq0UWFnR-%tM3C~q0^DN`W@^SV2lwFpTo@?Gb2>gB}0s%}8R zyxtPbTWxT|wtnD##+HV-O9iq`Gp1gg9@%xzWIp5F>E$^L>F;(tp8Md=TEs?ixOeD` znkwmyTT*k6UulQ-wAJ(ENfLdLtYQ8q@W?DoG^8~B$2tN^wWG3%13_S=o2g;M|AIaR zbI_g+#dy;++lA3<&fS4X@uhwt9st)KOR?uYmkVXqTnWgfv1r8>4DjpmTKK`Os}ZU{ zWXh~-qVs0AEgW~gQBy?Kh9BMaQ0aOxG5EnFlF#_~OP9BNn$pm}AV>pSW!byx>Lvq$ z+ebRF!a&fBRZUk1GR)eKTOVk=>m^PE4kMxA3|AEhiDfG*GXZ5Bdv|Xz89);5>9+De z098P$zy83-kY*oaURS}}IwG)!Og8@QSf@|y_PcIs-?bUP$Gt0sNbt_dyng??F@|Hp z{kI_=}%zNKNCIIcrIDG8Dzx2)WyQU%f`fu94!knIcjt=*#EOx*g=y~UkIgsL6 zemzHwXJ%X2%YS2U!4~lI9D9U_W}EDP;+`(-_1CZXH_wfNG_{*a%;4upHtuKOUli+c zLW&P<-a z#vlT6xaL-x$2s~fvPl4ENi_*^pU~aGzZ3k~{@LJD_^0z+iG*+a&*#m1i2v|<^Yh_d z>OdOUoDW4M^+ouL{5#R_*z`MpHtZ`PLOQbp$KgMn%cNi5@*mF?aPD5)T<7=s&X;f7 z{MX)R9&4W+B9XB1g8$QoKl2al5j~qIu|+A|rm^%t_m{9%fQ%ac z%3s&`@0-K0JOA-qndiq)*4KXa!v**qE#{4Auf(qpi8)-sFU5EE>p%W~`*{z0hc^AL zFBq?wvq1i{5@bx~YtQRW6{dgQ4}Lnh2I}EC%-n$A_9|zeWUtNphwnNV&pzSi=Q!qW zYdjHko`0);=Zni?4$Xe+>}Std`uTW#!*T7ua$KM9jpy)S*XQpB&fm_q)^lgO3B&;W z2}L;0e)O;V&(F6(e_nBaJ=UDZqxbn>kHJdwXCF>K?)dZB#b^81-|PAFufPACFVCNY zpYwk{|D6Bx`RDwf&z}$Set7f`?k%Ei@MPFm%kpvDEoFj@tpoA0=$ zaX$8GtFf4U0h~Vw%6#tbd;LG>qW^5+&$*}h*}XsK(%`^KtDO=PHQD*utN4%lWyzKj*?W;yM1D``u4}E&tw*)%UsKQ-9lqo9z>G zKpf@UE=u-u{;xKvzt8>WT+CNo`_H-9JKM2c&WsBA8<{^O}{| zcqRyVU|n?V0{f5OzN2o>u_|6c%maHe$$~Gz{$BGkJR3`B6B511+1%iV=G?=XT|S%R z>R;boUz)jpM1Q`Y(Zk4<|HK(}_^3#j!kW)8y!jg7lX=d(t~0N-i@?cjV+BqL^_lIB zZ5}%$5ravn5eua9Q`%)NnMZqca>@&g!c!u-9Q`HvG+doxnY zBqMmo@jHLS@8<6|zT=h&$b{y?72II5wCb@%=e}L(%bkXGT_-qm5EEWbN>r7X|{r82*S) zW6YH3Qd~F3+B8EF@qCy0I^X9(2>korh<4+?IwJvB#^8G36^KUU9q>H`#C*OVuKW46 zLy4-Ae{&y1WUILt0DlMQ4S3o4JMNo(uLDxgZ@Zu$!7ey|NBg449=xgZb;0lQy*}Q5 z2k-HHUOXT6*?7L0Zwc*)=SMyzp3f)eqGPl-q%hn0`F^)E@C?ZJ&pc)G??ym1|NF+n zx1SL}LHmB63+?vpAH2`^`+5Ez&+QL`g}UN} zw<}U47(c)IgWt^crT@>5qkr>U$8S4-e)}1}`{(oV+=%SX&jEgP#@+l}XcwD({EWN*=5dD918z~y#r8?}PXLj@$p}`EXyn$9J5g|G)F2p8x&67y1q7V;+FiYCiAh zxqzdX>z01Tmo?iX9RJQ2JU_m_<~m@^!{(Qr!1zKj%REkhp7Wau``yo=96W%kJHxP^Z%-0K49tIWaxq%Ly$1@7a(ZAF@-{H?Fv)w>4*JsQ*kBTCH5%9ModpFD= za^(Fsv&#&h7It;c+oGMEaejyv^+p)ico=WSOAB+o0P?=DlNe8UI3M}h89Iod&&=bD z6&(+oW8BY_vd|GRm^DyLb4@;s7-~su@UQ_Z1quv2M9zRx0x~VYfR_kL3g9FhkiyiaA$3b7p%sF#BYMbID-LR_7sIZFpb8 zF_J}1#jk4;!jEjpmsM!apHhEQS$W)_#pBZ#|HJ?IfBdhr!pi@Dd^%>u|NiS!SD!!s zMLT*$^uM{;z3w0X%lE+W_?(OXa+$Ai{`t#T)MfSfFB^jRUxs`3U;a_EWfpQ-{Z?(s z30nHHxPILiY@)waIv8+sc^4Oa**5-DX9`#<2lN130(lI()JPCk2ezLJ9Bd*e680jZwOI?nuOIUeJ)qmOK^Yg@9Xz}K55p$r7z^|Vi{12y(!(e z6g5bsXq&E%p7x1L#3+62WcSl@?9PPjTHy`E%R83Y@`#y5 zgX(1FEEb^S`*PdQqPOBF#8Ge96fIZKJEy>>7sOG&kvNv#JU?r0=|S3NzgwS?>3(f` zw*bQ{qZBrsA5=&k24eOAWGFmt)nWN~ev)<(olbIp)(0dKih3Ca){?5%i-!gQo{J?V zJ`BUSD!aX~#M(77RK}Mpu{!4Mr+;&twSMiMmuw)msD^!pH+tQd_5|Hm5LU-U6rj?H z*=(q52gu~25r*<;uA6fvoKooSZsMCK9Bg___uKQ6lqpkBxO1n-zAl{ouBLpUmr$eo zE{}qL8heMUYG1_lwO#-{N-TwIb7Tt+v(Tx?-O>0Aw-^e!!G?%)1pLeUqij+`zUe7(7 zFKPnB1KZ36ZTpIHR^>+wQ*nTbYO1`iYmUx;%7(c#Z(dpwk+4aVX}tz_gm+%7`*)mP z$rNCCDvo8&U($=uwCIoY%Imh(jf~XJazY7yaE-|JszNtsuezJiR)R{|Lo8;1mTY-qBRkAs5dfwxI z42)qwrz@+y0X_oZ*j5XVHq8qY1VQv$wZ1FPyUp71x@EfHa&&1sEu*|A122xZ}WM4SJ9A&6twH`}gMTMxuOc9!(k+{W){JPoamPeS({sup> z-Mc?P3pC_KIUpU*gifBFOMS8THT9&{P0>@qL+)kfa@cbG#v{+*7r%~;`LwftRhNXw z#>T?UT|e%Qbk|p;3Wg>8qSdO8uX_A(*k1VaAw2_Wx>d2_AOSD%L)&m>L5@ue0)z`# zmlN*z;nLJLd*ml~=^@me;wX4>i-ksD> zSKYUwf7&R7g(%tM?j3I_NiKJbQn5iwvk_@-)1`+r*`@JEf*`AZ)@C>Lphm*F0=Ho0 zZtDGp1|KK1uv7iAGHCbNJ<*)$?b39+d(P?Vf*%w1-XGK(VVuoV)Kv+fKiBI0Xbkgp zwVj@0_g#9XH50f9v6ZZo(OYfyxDoXwRSI*(yb{+G&XjODR7Yp>`bhY!Za@ksk3~c9 zmHJrQ1vYrH?c9NX4?nuaaccaCbMue0J%)YHftJmPRnUq}MWmByuhCA;2gGn>vUjs= z1bcsvsPfiL+kA>28?8He%4=1qhw?yl<(r;vA89+SAK}bRnw^v4Bh%*xNbkto{ccJo zWF~^$I#^F;N+XtSbPGycQ4$yNDOi7psdjR1RhdR#WgAg{*XpE1NrMkg&l|)KHjnms zF_?GJeah+9S zTM3Kd_+g_udoFS!{9Hg;dAY@LR6R~s-~w?>iD79#3(`0NxZKq|3*ChZrMr0|ZMK{H zof-OK2lSwSBGs`0UoUV*o)ttRNdObM?QDpwZJ6>L7`xr@MlESA?!lzA<5w=n#M7}J z;#Hgn!@{3JINu%@G>Hn{=-W6{AOAR-_7zyFq^#dw*=BfHcY7G z>|$9?&P6VHJ3^IvhdQC0S@jpBV1jJ?bFKZWxZA8Bt9o)4S1aZ}#&wK0Z3jR! z`0!e>C7|QkU8HABrXO>W@msQl&NjCAP+)l<`32E;uTq@fUO1W(eL}>@RM;{>Eg2i-5mfIW0n46zP!5VJmD|R84h0JWPo+RX9RCLPg;X7jQeWk^ zKrEFAF2>WPy!7pwQXRyLcwBo#o8+T+7Aq!t91fdgRo3#$8*fdAN{-BXY-Rn;K6ljT zxa)_-%WC0Y$2%4?0Q3gk?aXzD)a<88gW|h?V^o?_t6Cof9fZIfb$u50cJ+2s)4}S& z_<3GCg1pal&{RtGc!033yNl!w%rlFPF*I*O-L0tJe@&0=dE_?INr!OE3+rNC^hKa+ zU9mv*{p(tTb$gv8%@n@cYD4KSbs+R4*tuscPT=Jcs%yEH6i1IF4*159C?L|RVK?c2 z2&$jlLaoRJP$4`t(YJDi-WcU6`*<|1Hr8&TDLC$)ka~2_;oWkdG{i=<#kud_WZ8>{ zMHEo=1_*d=$u$Ch?^&AYmbSHiKTP{sJ9V(zh9M!q2NZ<2YsJ*lKY?%EuA^%0)z`&a z7R9gIB$8>eG#XNb=Qhw3Ct{aCbdVQi$*@IEl$a-wiuoziWfY zBW2hwsD5=f+od^d2I}3`OM$*advWa|W$BO^gi)hsf$8brK0$wT6&~cJo-BEC5qUdH zNRX$|V`94LQP|$7R(gh3&`>vQmkLM#)GzmmeCZvv$UDI)Hir#rvp>uW_#20R32Vy? z{p5UkzldYlAB$Ra}W0>eyqmNm8mz|Co($Msa~x7XGumPv{CZcyE^x#sV3?%-5;;j zSku_&d)?N(dkMm`QZ;zsW>oiHohI_} z7`Lx&Xm=-vSS7DqSl_OgIpX^okd4jfX+%&fCO+vw2vc?FnC} z&2gJ1uhr|!T8sKkb&vY!#pcs0r|i*NO8eM@bX}cP<8Y~KRl%ECwIeMnPr~T%d!$WY z3sCNyq|KUq+&&lh>2a-pfRf>vj`eXLMq~51d7UCb?f=*lTWlg)L5Q9ZMCbIv&V{fQ z+M+C$l-VAHRp5s1Ef%*Cg#^%}b-l^mFS4bl$>U>^Z_kg>=+%DfuoqGmFAwU`1o@%3 z*PrWYXUg6Z6+0Zwu{@DKlufcRc3ngh@>(R@j_8^o33cJnq}#iHyPA`iFiASQf2ohXrqUJ^5a1YPjlr3tNKB_7>w_NS17WvJ2d2S=1mPq8rM|^9cp- zdL}pA^zQ7F^7cO9647K-77Zuxu3X7e?Yx?BtC*OHdsaUOy~L|a;=cX3n)~QCKIXy| zFogZ3zP++?)gmkgo{7~}uTPc!VdNg``x2Vd{wnPxKQEnJymlDS;>WYcL%;H!u1v*eRHVWEe(&$9R}xXk zcN5RuPD67$ZSOI@-YE^Y#`6GisDRmBilXb~;JlI;nJ{@Fx==L6YiTpH6 zw4F`vsz+{KdxOr}*HaDU{mEa;=}?Iw>I|a2w1E;hm@U@aNf5l6X^GdpF<$=kfIu{O z;$?OUnF4q{_wp*%Ev2YKv z*6~PxkSV({rrIe@`$VPNcQDAe$CPiWt}c$3n`8E?H6$`CPA-D}p?p@nx3!wPsy&~! z^^z@7mB+;j#2SGd=<)tqZmHfy*tAnb-8%&4O;@9r>=pL(Z4d}`7n z;@Vxc+!Mn4IYYa1oJ$GJ8DA6 z2ddw?Ati^d?!DEm&mfkUqpE+4H2R%bjB8obZ$0YrPiE`#j{YgkuAJ_UHBR@bOfaQ? z#8=Ku>1p$5P`xG|nox53abZYxGF227FJsfb3rAnA(?hi);*&{S^DW^Ry}fjs=mL`0;LU)>|8{xaWmW`zD9?pPK~F( zd!5@wS6+t|A|BrB)O^@OBkq;G%HQppCtL70 z@93t*cOP}#glA87$bv%Z0$6Ww+U{c>`o4U7S&w%}kjLnH3#5$6 zt;i>eroLDMb!iv*)g-&eKSAcQMVwB2EJudc@a+ysH8c5M6u5ETd_#8I7w>z5%xqdN{tZ{EEz;gtwJWa`>X} z+c!i6ENN$vh8*VoL)$M6S?Ja2Od-6i?{3yH8aF4J5iaq7Y6_Ff=|$UrZU`mMsW!PW z*N@CU7iZ?7nS)z+`6Oz$H0)zx*deDl(QaSg^v)cFZpTjwYPQKK+P_HkdB5ML6aNMiCT~4NNFG!CrE#Z7kIt(m ziG5vt?8(g7zHc?d-#xcXUv%Umgm9*fZN!|^-CK9|&+6oY zKktPqd3t|z5fXWhx8!uWY`28Fe8}-|)sl4cery-?$L-gDfgKdCGSKpC+2pU& z6ioNbqG^P5usH5N;BFY~Y2QR?XzxDn^A&#K!9Rr_=%D0QUtt&5=#8k8lmp7It@WDD zF?C9FMTK^m*<$+Ae(>S0njFee7=4mWq*8S*ri&02#4-sa+xqsFjP`y~a>IB$p`VEC zMHcYug|C5nFkT^lER>&n9fFq+(=qSOVUhLFkFL}HMHD8K;La&_2^|scGHU&RB>(dk zJ~PE!M$w9bx=o~BRC;f$&h_=V9qowoyXKHAliNkVErCZq7jKZp*uBd`y&`#gq4UWV zez89umW!2f0Oitn$@4rJuOQT4F(Er_uh5^;6+Tv8r9tF>mN^#!tD9Rz?AI&0AEi#Y z`xnY-Z>2y*6=5x$eRhCWkh~I3x5SX26C%Lzjq)*d9c$k$%!OZEqBrKRRz$1Sc4`Ra zoaQ;ellQkmA@{IczKQ7Rgk|)4LlMd%uKETF(p#ILRS6VhU^>0r&(01`St?G?%ejab zwz1LH@iN$dhSA;W?cM^8&E)fb6(b0g=#urh*t2ouG(U0Zf+^@xHdO^x8f1?dO==n+7FrY*asT@ zSZ4QLUp()ZXE8>LPn2+*&1jt@ON|O`ooO#_yr5Qp=O`7*CR%Q*aQY3VcmPHK zako8nLm6eSx4!K|Y+1PEIXz-X8(s`+=-Rd7w%9_t3wkkj$Jai!AEyEV@NvRpsm@40 zjcrwb$&|GaiF;M5i^HM3bB;DH9bUhx(rYoPB6-xPxAG?Xbd{W%U6Fn+SM?Q5SRBer z?CHBtEz;mo>eBw>LFnJ%01Ml$R}axR?%Cd{BuQ{qm&3y^s3yN3R$w)fwukVfL&t!I z+#FTOTLseVg8lIz22OT<%5w!z3NATKi|BlR+6{LzyIU)Bw`!?ip#ruB`XKUE)Eelq zP+>T%<83*v=+`a^iv9aJvptNpIc5Pu3QhiI+)$J5g#6|4@jjP@Ilw$Q>ye;pY4vW)& z^rR*~u#PO%r%RhU`a=5xX}T_+puc)<5@j7Vbe*Id#4N(q6_oMeLEID{;;FawrgXRi z_2dc9_ZQu(BgJ+TujO+^68ip#R#5BgbmsS;W4fjy3G7wE&4Lt)`$%t+1mS<0 zh=yobj|d6AhPB1*P^^CnM9vvB588?jR!(EPHey7ZAy8ACUj19&)4ff|&}=R?r%72( zzGz*c`@f(s?Js}-UZtM-K?R=O=w$IoM#bSX!)qDUoFE%H@#g=HdJ>hVpFn{GrOZFaSYtUo2HBC;Z`f z0j9jzqY5tz`}%Ud@sBqVN_3TcvV*A+s9eng(O5!ur+X&1V9|QywF!$danx1edWdK% z+lt8V-21^Dqo~TMEGu8A4<(bbbelj|!ZB9>SM)v>VesXNcukj+m8QpkOCV*^!`%6* zT|UMS7>sBtA=?siAl^Qr=tj6N{P)Ih$jQ8T`fF*&`kR`}Lu734M=(?Gda2pGg6+Cj zRz7(SD(gl>`Jh2MC(%?5N#0_U^Al=a6U0i25J|XTh_m~e>|{eOCB5d?n_GQpv**DE zr;Rq#IEOoaGsdF)($3s}b{vqdHP^~E$m7e|f9yH>`06U*6Y(#D^X}WywaygvSXaQ7 zM%P2=Ic`4ISprtR$Jwmq%{HtLk6Ns*fy0$oNO`8J1u4$_zHrZ{t;|oynfdvLH+5ZQN5dbU_mjR4H4blunT${7;RUB;AoVpbbteRY-7 z!xVT#btT^pgLGMczD)g1?$P!1bBg;3x7z7gU*Fn!3U{v+A~mPW=ACh+w^$noSLaW+ z2RSX3l3K2A5UmsPrE$^hFgq{xo;!a;APw%|{{X+xCKgHX9lp|gx{+Ro=|)P!Vv`1z zf2|rhqE^Kb+8wzLnIvaBrN@^Xxew)awnbMv_c_8gt9W#O`kPgOk#UhChv}X6{cg8k zx3@UF*kyB!FHD!ME7i=Oc7CJ5CTY7K%NyIhan7~lvsj;qPxm>(!{@Jbp2)AmJ&S^| z+@_wFKb#__y5PP&?LKpMyQ^~l$nuAycRAem{w?UUac9fH6gli(*LkGXgedoOZz!B1 zBC2q?Jw4ffQ@ei}magXGVce{?NnET4UUW?PVbH02l)*bnU)RfJ`K&=AQQMBqop_zb^TEnbvsdc=q?i>0Jot)j~W$t|`J%5%9WI?HB zKSj;Kyj?#>Wylt0aO#*FEL*XLKeFG-;%ocn2Xk6~(<|`5Ja>|I0UudwP~Ay*vP;{( zSnc2$L+3^XUGVqw?lO?xuGcURvSWkl@`b?@w$aAXe!Y0n^mYYC&NJAWf>()!;bPpc zyWE=I`;8*Mwqvy4>ZR25Q1$n*cFz}iB1a?7S%}; z#kJSPvf8HRzA9fA^G!bx2K8XF_52!`$-3IyjgQbnE8rf!yvne7CRmrFhgs(ah*xJhsu2qulM}#*4dSY&V8> zW4diZf`;H)ZPdl~qGry)e;Vt{P1j_IorT$3eLYT^2aN#Y4)XrIZIFLApWV{iTpqI8 z`+bW1?sSwvOXQcSN?<+SdV#pR9n}4QiQcEX^(lE6ETumWh_W}4ym+=7q+I)Go8?@w z*lYS{sfQoN*+HJ!tUzZ((P!>IZw$$6K zp{wQ;333R@%BO$LK-zJ-+>Inv)xMs#rnYO$TdtyXDBwBMVa@wRm)376^wq0lwS7Bh zY7-3*f6!d0>ONbsu!^JqS}QqE+5BDssBL-H>{Fr^x3hN|rgl4A_M5I*>Y|+9_BR*Q ziYlJtNuLbYT#>!|P-u4cngG@sOV`kcGHo>-~A} zwI$qo5(`jH1wNE&-c>$Os#lfa#v88>SfT)i3-u${n?`a` zg(nDV?Ztu))6b~{x%KdQFwe6V7{yA={N#}_CnGE5!n{B+A?`ar!rgp-iuDi6PucD- z?`-*eTI+)v!k_hmV~*6;fs8sBrDC)+^VJmxkEIPhT-W?3GAvN;HDRRRS5unnH)l;* z)Sg~lb3|aBY&>QAz)Z`BG;m!XO3@g`$7olGmz~i`;?;|8>ckc_*b=K^EW1oQKSdQT zvUI*1&SJ5hF2Q=)_( z9LWGuU_!N~&c>2hoHj=PLZw%QB5z9#G^r2{tC#kG1n4RSy{J}ygXukx-;?k;EtSUI z)kB|fske7->y4w);WM&g@lb{97PXWb96;AW$Yt0#E%~tF{{W$z9gd6hCoZ((oub}G zILc|^>ZF$zA^5v$Km)-~Tv9xNK=gaF1ypfzSK5m>P}r7yh8@m)J|;+!Jv8b?=;8zi zDlR`~Xd_~`lRQj2Z+v?@nT4MbupF-ie}W5ZEaB-m8K8l22q_h zjqa}`8LL1S-#RN6HL7XsH<3Os$Xy*pf7>j`PXUVNz@e$(a1$&>YxZ;=(_<-PT)q~W zwn=F##f$Fsg>^ctxGpK5)&%(+>~tc|TX9nrbb!=alQ)@vnM|$+No>6A9jw|b@=Tod zx;5|P^%(4f3mDT+=d#@tv=v8}S2{vludsLNUQ5?r7wtT*%}bKGwFC_|J~CMC>=(OV zAD1Q>F9l9oD5JeGduns~Oq*UUF5HqtP%D4FUTPmx51^B=2Cv;%4%bVqx}6TE-U?fy z`_R0oo-QeWvq~n1e+cd?Moe*s^3CBWO3(HKknBUwkN$aYc8A>I4=op`Pr^C6$4QYAjOFfTWXl(lD<3H7`5x%B5=3Y}E`gv=J35q} zFH~K+MnNt-AT`2|dQ4KlBnQJ;+5p+}KAro2s9aoAquBJoY%W2+R=`UNv>V+w*%%D0Q^Ih`+xwO(A32Uz)k zn&Qz}{?2tfO}zp!%D#8|N2C_M$`vZap;R48dd?Z#d5Kq{U8^h<76f2FcM#DAwNb^= zEJ#ulZdg?7?IGBn>gVN7#QQD;#Nu{;erAX0^4Y1IQJ{r~R~=2$MfvM(AyS)ommKWL zMAGZ}N_IK{|KaRJ%Z_w3FPpBpFgi1|dUbbs~)rq%vERJfG z8e0@U8~_Zh+xMi>v9e#?Z|~JK>`o!p6CLox>fcC|E6v*BVHNnz`~%h@?8}=>(#M%dTH*w!VGq;;>&IrsBAH z&`WytctLPynYXOmZWemPJjCNC7WA&dqQ+eoXOrkC2-3dxl%NWIc-(o?WoT7q3kz5T z?v5vp$}ui{@{`oJK|Nj$dZvqi&yTp8Tu9}wNdBaa{plT0wK8bRErF+a>!AF>b?LK|qYUh248GNfUd$bYgB@?4z0-QC69MR=ZXGRK%qf zN_FpNpK5|M~$l(Y5AEmtcYf|^! zjSaGC57gzcUr99nw(<8{+iw8npN2ECa%Rch0hU1&<|9A!bNuYP#Uge+fsNcCGwHp9 zI@2>DkxO24`o?Et53*teJO4 z101$S@`~MFJw=zM6Xh!}9AQOQvGZ*g0G$j9Og zfPZ9b!HF;9Yyb+oIHGX?<`E5AJZA z))E2XyDr;?vgv?pJ^?qedUxOafmn%rW529AvihEK#a->8h^|1$in7M_W2%H}Eah|= zXHImYjQ!#q-Pur<#o=#NE8mim|KWf9pZ>MbrN%>rua+;|N+50SZurcC#zb{sKm3?`j&)<^XAF9Uh@9kvu-lF;rrEqvy zb^>n>a|7;wP(k6#H3`L8=CGKlBpH5I0E(OsHDC3)Q|D-_ zqA+{1D1(hebgM_vhk4*nx7>d?*Wuy5kH=NKc~3mAW~#(86)b;9iZ`bwW6wuqN@Cc5ViMoc4EP!JV-{xiH*#Tl!aj%J{Q|K z>44CGfew@5ojknn{j9Gk88|ZvlI)jEkjo7vsSxO}PN!meb~|eSsBA>q?uWDq_Umx> z_PIB)x|7e_qTRfT&5$2(xz+us*iXhd6+KD2jB&#PFu9SoFQ@16v%eMPB1u<|{q?hc zy%t7szI6H}9hQCmu8#ViWU1=2UA+sS18MbtvwE((aRSbEnVn7YxkKByD*bfgJp|f@ zjZWPkbi}d})PC6MvOK@5cptHv3Q_IF@bQ_qqSXgzA%hkafCk}y^CEcH^(Ll{EJjaX zAt8xUiv_(w5swsq3T*Cc#1H_KBj6!itxqR`iLlo-^05PcCDH4GFB|UtrkuXzqB7hzFw`0lbi%g>jv+2JRRhh{z453(}bwgYftCf+5mg3VC4SU?x7jA*>#;2 zhlvj~kZ>)%@%VaeB|&}2J7Y;rG8HAhG^$ho(O7GpGeVP;r^8*kzpOyh?=4`Gtgr_c z^m7b`=XrO=c#KZ$LsVh#6yb$XsXc@5ESr(NGZ4LBU`{GAT&#d#v7)F3l) zY8Pa+TQ1Ji=B(F4o-E}ne^U2%Pd;-1me5;my{^-w*j?`kR*&_mmyhq(cXm7y z{f;I~rJ!@<*Vl5C*9+>kM{lFt&U}9MTK3$%f+f;#%&l++QKUCVy<$qe_h+$C9MR$uShGfQF34s zXT(h4S7odgNQ_b3?8FpW_s|`G-_U(34q(3L3;*QRS*2G;B+$5}wmWuqF88FFJgs|Q z9rVrOdfeO%KQQz9^*kv~^ zS}(iz{jAk_qVF<~u+v3fGoQ#_faV#$$*YrmTP6wpS zAI(W40JZT3vb_bU%St~~u(Ero{*#yRBDpJzTUXESHm)w!9b0UF@*O$DGUY6_*8r9X zb61I{rkyfTP&c$8g*Qsxy6c72?61cck1+CrRfCOjIIQ{0tyUY3T(sJ_ zxI&azS?m@I=JM{;%G%|H_y9$T`_yvjL=z`H=$`b-x|-m{vP%BZJ%Y;u;oJjFcOy+q zJ#}x=9YBJP=mv0qxoazSor4Dpxe44#qb^&g)sJcbtgoP1@yY`Q%-w8W!(Yzgvm@ z>3X`h*XoeGoe1?!s5{=X>(|!t4+Av>;dPPUl0BHnI0Lw>Jb#J$$S2L~=D4f9S@x9i z;_oQ(JKdu7eI=v-3MDyD#;4n@oi2xh5*`=heDdU5f!Z=`xK6b1jU#>B9d|oBJ>=^m z0y*z7n1*YAz}->c*WJuq1mXB;{XPUqbZV|Hxk(sjBK_u**f(q{?5zrV7fM@oooFaMw;(*djaRXY|{M1Y+^3ak~VoElOcV>~n(qm%P6R+#209CfzS5+S$Ka({*HFJKvmgCsdbg;IX@wpjuwxCFzxx|I6TJaNsa1=&enzQ%C1zwxL1el`k2UXrvY zliK5dZXMX=v5XGwxS%_f+<~yCE}}Kabq+5n-?z+LuFngzx)a(?V3%9B9}a7C|BCN^ zt^iIE--zmG__UeA4sm=u-Y}Glm#W77yirf_mB~q)+Z&Y|}R{#*cTVua!Gq)Pr+) z>+V^3Q?JQR8x#q;q;8yc+!HeF3n~Vl?YZ3?D(!wAl-&us0btZAN8K`NB2$G|1$?HO zSmNQp%O}Zgq|r!9#ZRr4cwDqypJ=xYggGWGkRb2mKFj6B=-p?Ppu)+G9Iiha$re_B zGPv;~Gn|g4*AdmnVQnRuBB^))sR@w^-bC8*<`4W$I6C*cmJu7lk33+7#PHrkA0?}0 z1!Tpxs|k@GY3Jp}UIM|gI+9$vW`YIsJ_iR{mcw-C>UHV+CPhC|is{5gMY z4CfP7(X9pSp$mtj6g{qV2C6@x+2wu}-@robX|8)uOYcQwC4GMsqOVNY7TgYh%YNKI zI?G3%+oj1p*twd9CEObjc-x22ypIG9d0g?l#tcFQ)u_fbE?ZL*z^^3g1V`-k|xrii&R^#je5rZO!g) zes`_l(_}%nkB(4)gbb?Mqi#L@@Y`mflj*!H`-^iRP8U|CHswtLT2t&qmowg#jgN(1MbY+ag(4Au>2wShkHbR}_s;ZS z-_hp-CW%w3BSN*|cSUvIuj^Wu4q6;H`7>y*EB6LE1gN0^%x%Tv){G{(GW3f~mOFq^ ziHl$4S8wwIn!+|s$;qSjVf5{>L4^NhEvipcM>eXonc;G19C+VbRO~-XuR*?}Dlw)u zXL6^c;U|iP@nt1{b-fa?0Y@>7?NUWjoc$(88G8_@SEcvc2&x!NDn`xTu-->Vi`g3z z<)3=g_vhjP&b1wcq@J6nScJIdl{nqc@En}a_*o(cDfw84MQ*DYMgG`Hd_hMM!LMK zKp_Copbw^hA_9aFUuxN6U*ZpxaS`@!ueQD|} zX{0lS+>F;D{CxJp?REys!gOeTR&y*=E2k$%-y|O>w6-_me7`+~5~V$3aahaKv?+HB z6INrftx*y~!j}Rd=kj{+R2N1UiV|Lr`ynH#&Gr<3%ByI%-{ggHK~^;Vbkx>5%Cs0J znshq9_|>8TuI_2Q9#4KscF1Pt`5|52rv8xE#7d^um-q9qa33_h{oT^rwcXC*&3&=U zctej9jN1#3z1`;dRJGKVmaytCbM?}nrW3yhwK=>)ZFdJN|I1Q+5z>fT&by@92W43C zNrrBJR)wjuLe=VceUs}Eqftz4!ChpQ`dq|sR7;vMicNIIlCyergLp%Us*sw7<79i` zNes1;f~rBH`9QoieuyeV^#gKK$9X4qhrr&zZ%+Qy1%cW2>4S$9$w zT7$5UPv=@E)UHFR#-kBEkMN|C?{0prY=;zJ>E{Ze4`EwhGg6)RliSO!kEnAyyr|o# z!f0%X^QqhQ@5L^knlgu%6B7FJBK0RPX7=0Y5kivvu8tRae{rSVGNO(#?UOZqGR@P; z0{vrHe-p2Cf9H?Ub0vb`q1hZ1K#H@~vOx*9?5=skd!m48k7~T23W-zg1}m+6k1M%9 z5{16<%4Ob^^;B=uDs~9gLv?wjX%12`$m{_*o#Qpz+e8;84e$iYK$Fv(+{S zZ$j5wu5=oAEf=Cj7RG3*Y7k(zbiIj!G=ATAe`7>B9UW&OIx$KmpXk8|8FAlC+{kqRv4bC-8dHD5AhTdUZf%h15F54wBba;H+H*AulS zf9hREY}F@rILGBnt{_r%>X&iy3irIRp+PD|1G^v{@7qY-2gQA$(^1UCU?N^9?J0O~ zH$B~g4(sW7M2^$i*Y$9E$pirFmQu@%T-|BHx(iBXUY_9_fe3KTFF^&vFRcEUM#qb~ zOUE+^tzEC~kng3SvKo$$d_~@ehjE(Re@+#`<@zqTDbhR?Wdp`8wWs^W112PND#Vf# zVnXe7`MIQKX8Q688AJkQjPxevlqUnikbV?TOX=MW}UTwj_d4JLPpfA-u( z^vPDz0?~Y2ElG?H4n5tO*(GAHyX{WTe9~Gj7Wckn7GwTgQ8B89-p)QbJ*GQNJ)#wm z%=Ca-reu;!I*y;-iQzA{WfjZU7D0~0A3sXT0vgu8H`9u!D-iu&eN?0x$b@?&t%N-a zD3;e37vIkJ3g+k|9;@kU^$6c&fBvaKjga=VmKpufn$jF=p&U11K<{;7xd7`wSfiSF zOM9U}WEWaC4%^2r0(k9qCO_>(7(Ft0+1sULD+p%Llzv%bf3u(N%F*Uv49RPMd^0Ev z&eC>atkXm`*5qzSy;5^gt%^>hpVD@(s(X~vC2I?0D$m=6ey@?HJSCZUf1?dn;jMG$ z+=~VkV#bA)UK6Jx<%!toBB!Zvr`TqbtS)r9G;eqHUED#-DvgSE@4J$7>vQCBCkVKz zkFYX6D6pjs^~!yq6|PUtjxv)h1fr}LP>C?sz!xQQET|ErDJ821MtPb->ozv8HSHy< zEL&tBhsKh5{ZQn@@5#&Uf3XKgOuyS=^9ou`rFP>3xJNZH>WUHBhjSC}wSE4oD()&8 zUORzGE4&!Ojg8Md2b$_*ci63^HQnf&KI5k{DmG7j|9+p$`)adorwU-QbSfz6HXX6j z?BY$12^l0QbUndnMiYIryBMUUlEBpz2<5Nu%j@~QSb>r1_KG~}e{J0sPnt_HGIg#5 z?Gyw^b>je>S+?_va6=W(wNC~e(pB|EMN*D7x)@|9;4+LIe`ynwb*AqFb`aBkecE^J z?DGI8{4O03s<~22WJ@ZO$IWe#?cVFD-xqyRVL+#EApB(y#bLlSVWqdz*v|)_EMoGkHpp zA-o(xDZ4m5V}C)*Hwgp#_(px`>v<~xK$^57eO*+Op5ky>e;nSg-TDZ`)^d81*L>HH zFDZ~}>A-9B_`IFjiEhOCqrYywVVTHY1N_r44ch7&G1B9{-Nia_77|DWHEw;I*wZP} zmXBtvD=I$?D=JH+Rn8;OKt1yOEen#F`#?Zn+lhhAC5zR0mvH@^C8pH^@e6Copki~1 zASt6}{$<~cf8JYsOkDw(1eF65i7cd%dbbqP%sao^4q8_Co1;ZzgB{$Bn4Kyq)ZQ)< zfNT2YKUYV}zn|}q?1oZ^`qOgvV#(#HgsO0;{nM(#XGU{fNh>8BPFfN^Vy90l$)uON zl_cl8ixY2#J$VLS)R{1E>+Mzcec;e4;OIGk8f0lXe^C*w$I{_>%C-mkxGYbv(A+OK zwY{J=`*6Q#@^RVA+GE;CL-Ywy;=hfiwm3{f`WfV49VO3r+KKyaCI*8<%)Y_)8oYDj z^76`{htw`BF)$=lzlr|J}aL3D`+;5vbubs84tzi)Zgc%5uQ#$t$R!-oO`!;%^YMBb+*?Xa_OC*EDxz0Zt zf519CPlx$<%%rwGZ}-vh%nWziIJ7~Np{nZz+YY>$Pw{@a-S|w#E23 zJ)yj}=raMmdy45>+{YRe_a@Z+(Tiu z{CJ)&-LNI;?X}d`Yr^EW**zSyO(tCcg_>6z`^|5GGum0k{%-Dw0{{=}#X)|aZ717Z z9pg?XVhiu3ITH!NjstMg<=@a^-X~#c%G_lTIabfRS$x;pi~)CxXt|I zGiH~&HeyjTK(3d9hKZ|0e|Pg(F8x_|P0#v1L{TZ|6L#h06+Ib1JU*^k{Fco7c78HM zvqu6-p?BGF)BxUdk&D`V%G+QRM0HJWR%1+RytwZ7AR4q+lsVRSZvE)m*ZjSoMd6yR(ni%MnD0T44=AQ0S6pA&*ZmwnT=D}@u+ORchNM zv0LAj7ttr0e9-9|qTDDnBDclv*{6g>5sO)Q-%~bf!z3~a(4QF6{=KhAJN5zUP+LwW z7*-4t3%xzZS5)p8SlDR==*;B2IG2m%c%jy%qZ=E^&O zj8-98E`@}CC1}?1XEe%>A}!yOP9pC~U|LawW8KyB`jw>R4lVna z5xkXxvi3;hI`+XTa*wlZyDvB0A}xsupkB{hvghWQSc0}L4wd0&5oM|h3K}nwn3@1u zBGPkkHJk_De<-7w8O}L2fqpW@`@QdFXTR_9mYI(|gRv-a$dr;m-Q@o!m!U zoq%l@psM4btey{FC&uh~?{7l9+V7llD}@8-JW;=HpHR*x6f8RfbVI^kWxXs6l0VFJ z`#d1tT%S?tuAfQbnO>yTOn>Xcj$)6eO%?{1U3`9Tf95O00UGAPm)6W9jSlSQO|0p~ ziuMlR`~a)xFN;nnz(d=>C&5o2TOP={4H{{YTHkeH_t|z9uXr)}7_a z=C_CA`^`&+q|38v1bcawc8+-lZcCz}>R^8ik|B+1!2H_WTPx>Yd-awt(Lo%F(R8gx zH_|e+e>TJlNX7cGTO7(lNq5}^%{FE&8d6*8)K2DQ`%1|G>8cI}Ll_HhF*Gmp&|bW! z?cGY?2&9<=(RJJ&Orj~Ow{>^DQx-_*Z5rYV42D@Frf)PzGtM5&U0oI2gzj;>o68tQ zW1tscHQH{;%%96baX>oT9j?;KqMgFt#>s=_e^F{0Y<0x(gm!I>7xsW2TMMb_w^d0eLVTlYi|rnuZ4 zf13?yVrH)GL%=7&?&>3po9BCwSDG+S3ToMVa>T~HQ-IRyL5O3j#oKMBb!d$M$2`@9 zIas&>!r(^r@_fHHFm~MF^jL!Qu6|C@*XMonIwj;yiA+hjO_5!e%Z>~I7Civ{`THtE z&*qhxA0qQoCy^QQW59VhPme045L6q-+oBRum}_jo*uYqIpy?(Qh_TYCcp$>KQx8wSYN zt87-CxFdDnlo|Va!b;cC-)e4Fgm3Io*6{{f(ItOeu|0vds5s3p#~V?Ht$?n+e>_{D zTd+NZ_gVB<&htx7F;)54pG2&2@=Ww;Tv!fpTRDaRs9vU$}VJjBZh2$lL) zab{?OU^QvH;%byC?VlFNlwLitiUuRA_e4^n0(d7y?|U8_svt=#KDUwFZj1c*yl3_L zwg4$4nhn4^EY&yuu-w;Iu+vpae?sK*&bnziDsI+`YkqwXSnJTlJUishO}#$cD|%Or z##o6^-1C>;Wa_yA{PjZI89|HkG$il#I*gy-xU4|mL3C0}7a*Na_U!94BK%s0bp-$W z4whKEJ?OIY+U$_jj)lh9N`;;Jp(S}LgiEkh85f7oI+I>#eh z`~*tC5}XzmwR>HK5Sc;SlgDDWa``Zhlnr33I9WWuHtwDv8{XJ!mRxRh>#|#1!we-z z2vQZaQ5qV!^@dUf(vXVSba#AI3kqe|>B0Dxe?EG>M zX!2i0gnzNWqZsPnvA^?RfA?0nyJKSA6QY^NDmyUp-6^)*uGPH0(>RJYte)8M2g6$T zGaHDj4voB=P}9CsfEL?V!e6-Axb<>g=ZlcGZ96~-`-fHfU;hKH!@P@yg`-dPc~j}$ zZ#HOr1=sr&`=J2yo0!@FyMe)16T$rz0QotvMFrF$Oe_FP8mR3Pf8#z#1SdfHu?E|? zlQAO5`X-?JMGL{P}Ka#bg&-(zw#N?WujnQOX0{DEi&AN2aM2;wz^ zd(n>tr=T9l-lJ-uH<{DWk6z-ps>(WGq?ElV-vWyh6aBd#pzhD=Sz%+gZioEb?tn^P zfFVIt{8lK+v&_!t+q@|)^d8QKIc?9^6nkqi-s?H4w|3PYe@HdE{q>L4Of3T~S6g$FRXLr&QZ@_3(ocBQ$Pj1@i_MU!UU%`y&S4w1^F($`{udYry zfyxg=3Wmt+lDm5c{vOvH;DNS8zd#d_ZukA#jE$}A_3ayFwWPnf^s9Tcb25L?^793) zbEHUb2zsw=YS2Yp(iFJGSR+gN) z?s$NMTrg?oI22SSsOF^wf6{O4etRq=5IHL-14m9Oy~!~hN^~3@_3blGU&ir%gxxI9 zta1RjD5~6Z&V-k)#DJY9K$GF9)zaa?eXZRTm9st7K&nq4-kWQTSoaNSc%iz+-m^Zd zH-PuBe+x80?4|u8ZRq!Db@Yht-4rzmW?CTVdLW8Cz@q^EA>uuGLrb73pv>FTe*=2o zI)~I5Ygj$Le8F zZUY%yV$p4V5{G}9)8D?L|56x|v@Ch>IOV zyR9~`5qZWoP zhu3ZGGeqO6^)Wa!i5mm?OqJ=!Mte4wV@NkJ-5=`iMaN@ZU(0|X>|93BD(k|n~ ze1u~sZh}|-q8H)Q&FF4y4OG_;e@sT8ZtjG-<+Ci0K%qEBvN~?wMPM#HeR+&-?#+w} zCLn_PJlAjU;yK>-M$gl^{otJRe6kF!4dY9}24O?x+g>~DZm6g}_rNt!78GiVdZims zKSTi)XD|7eI5@BEXwK9s4OIKrY}G44e;Tqg;-Y*r?3jFYE2Uts03ArTf6j06jg)(N z+T4Nj7>AEC2o4XUI1q`ctdUkbEE06!TpD`}mbY&27-!1!UA1ir<>`3_RgSLa4U&Qm z#ciU#Z!S9C-@@*)-c&2-6+`694T+5r%J#V4896$W(BVDm#%)LXKqbB&mQs@tFQAQ$ z)xtK?j&mOM+u5ovxhBDae@;f1<@)Kqn)RrP1i5*TPtRR$fE6FDk&r=)FqajF;Z7$v z53nkcIhQ1~%NUo8Lt>P3(drW}XqgPQ@gbW%12QFgptvaa9nZeXdr@?%Xsy#dmX<D_3;u;Tq$^48AEwvY5SIe_J!3tmnw+Xwo62#pDt3gCzlf3aVqD8@7*OXJLi zcNI=yxdZUX)Yn7#KbTlIRFQJWkX1W$^2u2*}sxu!GZ z;s#9$=icnL!7{Pa*(&c5Df zK)^l}`Drg69{buOfQ1N9wL9R>cy&?k07)n(+uOw$maVa0f7$+%E2^Dtj-`1i=USXE z)>T+64z3J?gtXf?N+g8pbg?cb-vMdUtyVJoYO`j`zcrGyWO3m6#X4sLD8Dd!aRPWUQY1hzNF2oI5*gqcb2uW1aReRV#hm<_-1c$ zV-QD!@@C5%?C-2no~XO(0zd!MXTn5hgxH#YyfKuwf9Q5@Z}1BB;9Qx}d`6K?rW}ov z*lr|ywoK1F+661n0c(jFM<&oQM8-5ojm$sK-!mzZo7~=suWSM0!u8=k=ZUf8L1Hwk z77ClQ){WVd-}Yh|OW@Gv3j9oR1c8LE$XFxx%@Op7D?yHZW}tb!%R&5j?RN|}o|6LW zGstx{f02?t*7s}&l$v38i;V%4_-AU)Vx-+xL^X}7q#Q6HcC#*c_{Q?QIw#rjup?2W zVehl$ig(^N80W7>RT@rxz0O?9(#t?IR#ldZVnwefYMI?iTj?wv_&RfE2e2u`Gqn&R zv&`I!=xv%8hBnw#N~~?Ss3d83`o2Z_K+0lPe;mehN9;C$_(8hjbklah3&s(3)C6H^ z8G~AS_siK5tCB#g-UA3q;eG>%U*M|4me6aWuF-Q-j_YNR_;a%W23~OHP-jYmVPhZg zhn6^^8n+`)%lm3WqyTIsLC!7!e}tN!%{5ur^COk%eey={wX^b$L)TNN8p->_%)D#+ zf4Ca-z8vge;>NQB#|FO6=p1-;3>3XZ^RIu#XcZaR1(Qn1%S$H$zGUhFRF;K{=O_H6Ex#tR5D5;Pb5%JIt${_c>Qe>&o( zhhKl(V7EE|@rOGH;3vcdW*@J!PcG~wjg;qXiBmaZI^W`@?~b^Sj|=Vk`LDR7a&u;! zicu$z6?DlAcdhrR=}9De?H~ll008pSk@wXB_vv_>?hiUC9CO=r=TebIiw>@;&AvL| zQfZ4K#$?}uH;myPpYH>@`P1<>e|5M`Oq^eBdF(R=DN&p~=2M%T{dp=+$)qhZJQuBd zqnQp>%F|!>^3>tRe>&Rmaf9RqZioTOCx3S$4Mca!#H)t8f%M~z=qCOdV{>*UmhNzV z>_#R9Pb!+@;4bqsMzI6}e|kjYi;C`g z{QfqI3@Fy|oT=iaiQ~pwi9{D1u6ZV#RkkqSip}H*u=Eti_L-!N#(Da?GAfAIWEVKZ zl~QJB(|7Rw$!h+5ALl6-#?ML|m)QZCl_6W2d2m)oMP~DCa-_lOJpFyT?{pvh95O=1 zJ$`>Dev9wvSYohmXX$|@f17dI$a?3GSPJHxGiD2b+H$$Tp__-}nFu7cfOGcRW5q~p9lcpf*Gei=_?&_VrpU~E)6Tm*i9A~cvN{A#t+ z-K%1idBEkyjs>^`f8ZGv>~j3H##a;bAV`2gs&Hd;Q10E2#9g!YLErO`9aj00m#=vO zHy!xt#?PF7e#SOaV7^>{QA2LTA_zWnSQ_O=&c5U*6dBCz&)oALStJ3J@FPRj=R1?E zP2WR8%;dl230&gpM_wSWKeF=q33(au-RYX&|L_by=X}2Ze|bLl`+0C&^_5B9zm8A$ zp1$M9Z?nkGklB(Qjp;h?&-Fc2=!-PV1t`A1WfoefHFk-@QimuJgp_mK7)NS@rmaDE@oezo z2F7K1pyWE>THyqdgE4@0%W;fg2~G@v2|ws+N#JzHb!e=vQ1U^%f-1J;481BEN-R7A*> z0&BG=GMNDkbGc1&(tWv1JJm&se2x9A5sn-yEOj@OFlH8$D~bThWxnbu1(B2^lNo5J z2L2X(6m#Rf6CKDh*k91*!DGEMPLDP8X@cI*5wkM@&N1;@BB-i7K#;LRk^LmMkR51R zrTUhSe*orJ0GV1$^7}1EDBw4uUotzL_pPHKOA*ff(NB<;M~&cDOD7@mFFw9+IR4M;yYy2$NU-U8ypZQnPGdy!1L_n5~!F? zf5+=R<+3BOGKgMbYFKLQLkkOrwF(N(yFf~wAPQl&PKtnK-r^bzEuhkr(YIs9lfDAK z>5@?t=&J*L+yOYPZzp}_f5v@U|6lrQ(oq=WNf$szLBBu_e)QG%JpJCwpi{oT^Ovrf z^uy2j^0yq}*dP1X*Z%iQA1!TCNPf3i0{!?$i?u@}G9={ZLHn|Fjg(#WeOSKXJu z3(>L|KNR+MGBb3L`+j3JhkED53$E`Q1GM&T--)d2-t~ge3sU7iJAYaNIqP^(0Sn5PQRe^ok1 z%mW!Zj0cS-Y50R?dRJb`hqnZk#b9sZ9O#Ln)spk-;Wa28a~vISdq_T z?a*?s(#(Lg6zghk9y=DWEv-`J-5ffqhg>;^j7A5mMl2H`y#bHh)^W69{1aB+Tq#G zN}uh-A{z}q1{rV73FubRddfJ149W9)?E5{+i|Qk)DpeqkfeaRZbMiuBe`8B=GFf1O zIOBk~&0{vO4|p}Nfi0T9WRL-2oONXs!GziiV$`kmfuOit;e>Y&od_6P25QD_aS0)i zN2g>5W#RTY*8Kh%)Q3u7GFiXfQU>s^0AJ={D#J#RP}&1Xdx2a_&T!$KpaYAhA{TAu zfOUptC=D_KUEPIBp=hvSe{>yf^*rnasTWk8djuM5h|FMsaMew-J_2vKICbVb?gzapFu>L zF_4(Jz0dTS{kGHbJ=^77$HV$jrLjKQLI(&CXKP9%q=eq0J;+BMoE5}hWJ&=V368ad#Dcw$vw zBtL$_jzxe&Xf-bDm>lS5c`s=tZSgE@sv=loALj+g2ZV%$E#xwNa1neI^sq$fh%Knu zy1T~a4s+{W9`-%U)G#4BnRR>Ezk-Tq&0)uS>KUX{UQb$Ff1WZ0?paJaCH6Fil5Aj- z`VJ*&h*hVMX9&hY%5z|KIW zGX!UE)yxfr3Y=BYM`P%BvK*)^yU#M%6*lxB=K{TP*PkaACYzxYY1l48t_C9mTs;Tj z6fa≥3+kkYQ)g7b9~DAgBTmfEp-Pp;Nw40+a`#$6rYylNbw7P8CiJ-+3p#2VKpb zi4Xh)xq7I5E*PB9tK%SAf`S)9_qF>tp`Fs>eS-}P=Pk2T)50F(h*|qV=Ru}?zU`+B zJ)DGz+#Pmq8a+ek!H=E$Px?Qij4i~xBUGn7t^(iFP~rerS~R#3&(zJrab#=FZ}+u{H;U(u!r$% zzt5fa@Grfn{+6>J9}Y1E>^Hvf6j`*EPxO(!!y7l$4VDa$8;%`{&0*{qx{k(S$Rpwh zf59FoEfR|C#SYhNm72G%CsPD;3;1iXf0w-tb_R4bt^nc#+ri!{*ah}B9^m+Vu54k< zk=8PBvES7@4mIu^E$)C8hP@wqbE|7E=$d;)1V>A%zVpaqYshkk9yfqHG_Wsu0=@y| zDIPufYqD78080?8wlNE?xFQr$Pq7owB_NKF10K^sl}D}^ut|J*5G=8%Q}`p8e~*VQ z=5;C+K`JbXhIPjhjOLSF$4Y?Mne?4CpTC%m0e#1!6C;Pd3v*CJ>XW`xK|KXIngOQ& zmk;$@hfOhtZ`lhT!%dl!bxVOmT4h38DQJ5Gx%7w-~^My z8-x+sVt6NEiUNt~HsV0KiMdsTf^ucI2T&6waPOp&;V&xaXVUm?noHplnq{? zNE+ILNziImwlvT%bL_P*k#Sf)2fAWSVVpj3+H9%-N+=@)!2 z<<1~Z^5x(D>_t;dkm4r&@!Lm+O^dbprGvg~Z@yP72ETvC7<2K5-!<)HKl{zMF8N$% zibs+kTkMw}nC2&2{@Q20fA=2vWizbdFQ5MVIezJp>7NKJFH|lwgBAtYN6P51Th+0e)(-2q2z>+-Q6rX>CJUA!)^de>4B(LnpAnZZsS! zgDu02jx+h&JRdDMl|YK95Cj(69_G;jZ2Due9S}Rhy6zL`A5@KqCEpt+CjgVVS3HfK z#s#T>0XoC3Vj&CT+g4df1_5h1b?+8-U2__knq3Q<$Te5B-0VhJ?9XGFUd!$9=`oA_%$CN z=xdKCvyZL&r|t2NxXNem`D+b-F zBR|ug^pW2@zTjTiOMdL7j~@An`Fz_$*^jOB%O3i%^_Bk1e@~p`3^K7S{?gzhg^9;;Z-unZ^E=EiSn->0?`kufs7-y2ptI;=l|FFdy##&Z71cl>SpeC=09W{M|54xu-ef5rqqdhla&L7pdnZTkId z-`Pzuf1r=u^|cOmh%a8I=YWp*`5ThbU$*uyUy`u~4GmyO7p&buVY!fT^*T41;JT9s zQrDdfbDQO{FTfUq;uYhBtG1%Z#RtIu@W6vaJR>`^1MFr8`HZQOw7^_C!4aKal$1Lz zs=W>{RVLR9`X)g3R4C-q*~tV0qyfN&jj`2S;^L6VMYkwIpV-Un4f@m>H@o3>*pti z^Q||&r)C2XemAS$&etHi!aYXiN7f52}<%soSF2kb7SG{BzoltojkX~4!Br*%9G zJl^IBOM5F@hwm|ub>$R92HtsOmSe1(_$ms@LM&V!&$&_^0$eUzW#v(@lYv&D+Q^T0=Pv#VIr;2v1X3Ms~#0Cv@P3tgdzyklX&?b#i1e<=XU zW8P5nBa$j&3HsS-Wt6{}qTrT?2ADv7E81*(p7u}ZYL4{<#CkF{Aix#92pR0XgCOo^ z^pNpPIqcD|5W+=7;*FFHK=S${7CH1aQ-DdLpa=nRE@|?sP@z2eRZj(}qb9V7?OT^*wDW}xJzIBkqe`Ej`2j;se zcbdnpM+!T&&JPiwGlVBYw?Lq`of_51N}&&NDIs<#BN!_*U=DGl2c8)?Mxy(P*GfF% zwMH|=YvrCr`FlwM(+Vl`kG=4RO*!R#f5mI3weg4jhw(=|;+LNO*!KTepP%^aCw}*p zJN(We8?ZtCnJ4_t-Td0qe}BbUzMkdV-c(Qg86}T+7Yjv)B73dOMU%p)GD7S%;SL?b z2MfwDj)mZ1MWjN3^xl{l7HshNBiC#x9N>-f&O*E#x+s9{iTu@@FgZ`$X;nsH14IsP z+Rq_d{x(QaL(e%hsIahSvw)(5ZP<@gsv-}h29P}`&P^gR46%i)e`$gTiIMkUSqVq> z=Cg(Y;M?mFs77G_mWk)db`Zq4IRz!P;kX?Ed+{^bf-flBdrihLuzB z@7iNRk{b_t9Z*mgQfBpPfAK@?G;2$>E$8VVS%wK!zx4rfgGyR-z{KOCb&L4l`q0@ci z>Av6b*vZ!X@+Zylvz~Xb<-U$Fz~6HaZyXRwM~qK`%@7q`&$Ra)=P=iXVPi^kC_dXHz@1NM=l;uo@o7$D*l95=MC1iDzA zNS=tbKC}Vdf9_(@Ax5jifzqHSkVkfocs~>+4y?zmP^X}{X2p7OMSbE30cs>rJI=8# zY}@+Eo7g;#nV^`lEmU zioO3~gZ;U7KY9f55%`(Eax;@HH^q^Hzx49o@%S%2fApzo_?LVyuKCFs{>1d*Z+xC> z%J-aMdnpkiv|R64Q`~r}6XP8U0Zst0j--e67g{lbI8F{^PJ}(2?no%D2pbCspsmT5 z2U}(`?2(4SA{OKFL@4B>iab47^b^J;O@M?bSLhW=&TpTHD+A7C#RXR_pKHv$Glc<# zB`n==f1{DgZ}cyq_5dn_3f}NbdvM@5nF!%T3=)VEE|rRxcog{9lezYm5#aA+1j2Bs zvpgOy%Nnul9OTIk!G6#4?-=PDxsosafg;yGaxb467RbUUp8h+| z{pACE>y3}D_|Erz^v;i;_#>C!GMNjIlk9tM!~K5h4sz7c{P+f6x^8Jq&y5%dngL6e ze_~o`+5@0_K%$Z=BunKHvveJrlX!RU3IKE*h&!8$9L6UGNOq+bQ%+8(CjEn6DCEyv z88s^plTqxTN$4Hvl5S2sSd;+4?pGAVMHMfUxWNe_dqBAc!Yqe;PmaC8L0?XZcnE9} zGhlX8-33!1aMFVjhYYSL{q%vvSqen}e>NJR?DpWTW(-(#UC$Q=>go)ZYYYNt;3nd$ z2z}Ov5RpdMF31TLC^mxKj%yx^U_brj*m#dm&10dUeu)q`H{0om?E5Z!M?bgFe`RW3 z+XChdYK?ZMBtTqtE__*5_mG1WRWGant_=Bxp9|ZyR$!lY$T>Zn2LF{%3yAf@Lz2fwd*QVV|@rH>{|2MfIR1@c(BP3*MUCXcgTS{rqK*0<`g-m z!7@&(E%phC8`13Yso5IL7_l*)Ik;v?%HzwzJIe;Xjq_B&7bS6$wu+o1bCIum(8F2;U0#qK75_j@0% zjg5vnk^Px&6fT2}yG5^LtTCu(I*?aSK$;lm!(JCqA;7V^kG5Zdne9=YDUaxjL4${e zS|i6RvVBguLIvl~rTYrhHZtJC;BWmI6A>ATfG(j!t#CrbreVvho(MVIf32ZEvFu^g zI0AG_h|QrPUULFd=i)c0tRP33KKXpA+ZDRg8TyDFiOUI+RXBYRc6vyFYBvzXB0($w zaAz`uVp7@>9m4&Ib2AmXC=u(^Ii+!HKnhl@E0I%Ks=(}~yxhz|z8EWz*}qN1WTyQH z>-on{{TkbU*8F_=^MB-$e=(N>>WzNS$3NoK{~;guyDsTlHl~`%X`lHIbq8Pj)ETxK zd^W@?eWwi0E>LFu=H$dKT6zZe-5ylbR){RzIfp(-=obY7AVPfIO|7j(fO;GYtOw9f z=1(me^6FC#T}9Q`Q6kZ~SQk9pGwijL7k9+L>HzC`;d%UIK&>XA31^;A_4M#q(*qyz}GoRlby^nCTbes zQ^3#5GXZP9;yFtO@@|Dz>=8K)>`#IXtOl0NSBO$5*kgOR`y$w6vnl5{`E}?opn0U> zOh%^R*O~A>K7JkXe^-shKlAiehS=e+Jm>GafWPMZ*Iw~iL*KT_A8~>y7W9#YX}$j5 zLwN9;eC<(Fp6%!Q)BKbFuAYD?onB*lbVsyH>;=#-#8n;WQuAvK_9G`VK;vLxPBelc zg02@U!F??#fV7E-p>zYQ6B2Aq_#9LndBBU7BMm+HFVy>Ce@unyvYYBnY0;_(4>2`y zV&LZ(`r;EnKY*VA3{J@q(9?_T9}w40%o&YOfDq2mpQA7t!Ud?;K$tk>LDb*~Q@(@9 z{B{F91olh@l-}B)x`Kp9~e1#e@~h7kQRIZAM{i=2Mm_LsO+i3nXl%X-bAkl!R$dT(veO(#chy}{`lvp zK`N*H_>X#^ul4yg=Ra%gUt+!Z`Q^9#=@e@^5WZmc3tXL=R245}J%v8X+8 z5cA}zjaCOQZEPDa63{_j>*$@H>Prz5MRRkmywrELhy9L*7aDM=Q@#%SFyn}74Lc4f zfTBy7$VUpOE!!gw@JLw#bXkVxSin%jb+LCEE=ageemf{mj3gHto;jD%7olXL&d_PH zDJfxKe<`D$^_&6|rbCma1a_dCIP_B)J3G~9ZF;;j=B0EnFGLGiSkyHPlbuF$AA5Y# zm%nOVe$B<7YwP>iKjUxTF|WUU`JXzywD-F ze{ib+v9(2AFozf`WVGib@?e0;_NA38K!gDhJP&lkZ^MVv*1?d)^>gBcLrfNn);(== z;GRHuh@Dg@?jQN!NXV4~e7^&%d+Y!w$b);I^-_QPkbmfg>2HWZ z{iAm4OD;Zp_Eb;tRSyQa`FFLTI4}6IxqjJZOh}(Khl&$I0NNc>;5}h>TBVui09FAQ z2WqNC)I*9cbub9h*0=!nkzq3~RC3vOTt4t@gJ3)nqAnCR$Q1W3u&&EK@;q9Ee=oPi zzBtw9+WxzJvPGdPU~9vgi?Fg?-Dwr%Xehx|0f*5qr3n%m94<_Wns+ufEkEmxMGZ)y zjx}-$n={znVCYS?FN_1V--BO_$=Tglm$D}^dzU`MV0u&l!s075^cB=oPoSO;NSAqw z<^`wsqJYHT4F-HilRps?HCTfKe_4*`seeM}N(!htsQYyc8ac8lAA!qJFNHzBK_SFa zB^ZdUXi#8?HJ|~R$qotcT1sC}F%FjFfa1W@PvnLcj?WvdMP%E5E>k((bZO=5ntbAX2I*n6I^>&C0)yU1E zxl&Yhrx$k@6g)6u3`X8hF-8{k{vvb$!kXPm zbkP^;=7=R3$_}ysJ~|+((i!%2jLQ>{8?-F5KuYKhRdJStIM7!a_5oxYu}LEZpPzAf zhx&_4fR|fy#x_60p@!@X@hhr28 z9yaBXto|!c`MZwze~-QFpE8U7sV^VqJBIe#hJio$@joU#_FL!W!k54CJI@XuH9Nz$ zLd}Grs$5O(r74c!JPE~hB-T_Yx-@B;AavBo*zmVadAQ~QUQ zqwNpZ95pH~V#kq-JqR&TP`kjNb~~f*cM?N9ts!v01B((Ee<4&C%EMw6!*1#ZH7$ra zqJ1ext{HWpeA&f3umv<$IiRm807gK$zupm4u0hs;l%t9)5ow4=$f(GhPPZFqDa*_8$5U`UqXYJ^EH+68SH5&a&uDL(Rv> zmKs;mM}KqBi(LG%4Mndrk&l5*J;fBjw*h~H8JsP2BVsD(^I-EUd}&D}h1@M*BaXOH z(E=F_jYPnRM|@-Q`A+By3I!hVK-m1~2RUMV9*6^~d=Z@a(|;22AM~T;lm11DezL{C z`ZrIEiT2MQ6rfKlWX@DmY=fn&4`bigf8WSO_-S=4oof$fK?t@JEYQ0q1Lpt1$%J|Y z>n7mh^s4VSyqlRs4>)!TB+>)2_9)f#2b05OhOooY|M*QU$nd9`$0xg8Auep0Z(KJOmy z^ED3t{0|6Ci19lTW@73()e!&v&^IEb-g2l5MGHA99HzDQkN>7yZHB4Y^n*b4JuK|m zjZjuQ9o86Lv1tlOOo)F7002aDDTx%-=RX~gNS~4Y5{b`qPwDgNA@K7f7aw`~^vB}n zKl6~|dVfr7&nHq#`~^!vQ2u9_?JDSSeVnKO;C9CTYO-M@6 zP?mkAc%Z*d8D2-Ev;&O3=zv#z1MN0W-B;ScAb;QO+=T`7qziKH;;3iK6D-@~)8 z+}JbYvljz1Rz{&h_9$`)MgNJ6dkpr0k~xd^h4`R8;JNm{PxfF0GI|372jj} z&-YMZ9dXk*-+&p-^<2XO1_^(kGSj#ocV<#WXBMAp@JTK$*dz+;v_oHN$AI)1C|yAV zD@%BOzKiP`&HzY2Abm{Vm+MFI#LVM7!+%*=6x^LM0#s5Jy1mHNVt!jhWE$wJO=}BgL|HXus~JQqX?P?kbfq1 z5n*f`(L_;^aF<32zt_L!4)P`II-@${6FLZH?t3J4j9N2Q`PWi&KRsW$WsYx8?Duo3{CSY z7EOdPRXwwCe?0^7Utbdmis?Gc=Q_W~Xp&!4mk2Jt7juiAFQ@g7ZRT#6P&~XF;yK)| zQo{PCcsM?es9jLSPI3zu=c96Rx_^%Q+76%#Cz+Xk$G9dPP_sNV`~*#&cYiFdImr)T z_;K8c&wG5&mbhgdMp@?gyMe!p zu}5OwzmBDNz8X>D@8eDfKi6}}Fzyg_hF)Rg0Fg(?HQpO?rZmtkHUqHo&V>%cF|dM7 z^1!377S}|;3!Q!#KMvp>d4E}&1)d-8k8y=({p+(k$uH(~(u31A%JA#&kbk60@eiurYeJMI(fy1>B(!tol%jn8qZ90!*D98Ymv{5sy_c>FmodpJIPjvsMc`8rPH zc>Ot!T+sCH-}4U{J+t5#$twj|gAAB0knx$;ZKtCS1LOCzE~8j@1QGs*U7gL4d%%1@ zI$%*Khztpm`We%XOMmi7USLUpD@`1wQGk9}z^dz+j-l^(N6rl)Pw1w{7>ON2P2*u= zO@Vg}8J=kkqyY-ULDn?wN9{F7hD*^lpu`)x@^dY`A2!$N{YtzavL1Lp&cORA`SgA& zhvTUG!0`shg|Fk&_y%9cF}`g1Io{&9{B?X9UpS7__+Wg||9>zY2V@FEPxFc2U)U!x z9tGC(v^QanH=n&1V+f>F=p3AbKo#@SHuwj)KcQA6JXqqc@dMpG=8b9+h{vm?{uyVvnuEaa4yzj_QxuF<=gi;#S>D)KY zh2Fqg`G0plfq(ZVzu%jj?(=g#_J)7|9+*G8_kVuRuj7n(|3(58oSE`22QXNK8u%>lfM`i?X0iOtt{qyaz@ z_}c@x@sXyNs4C}2?Q_3r5BkS<03$9dL{?3<1?m^M&gmJ9vF?npF%aFF>=;DD;;sir zg2$xzd1l>ff;8^5ezniujPK*#g+e5F=LC9H{dY2kW5WH%j$yGcCa~k!z$}t!d>Ldp zUD%i4Cx83wX(@y3=)a78fi*qZ9B16Cut3()fwUYEO%CTH{Q319Ko{xR8v63z=v!oc zzn){mNRw`dO#wRt`uby6{F~?Az}`cz#>dW)9Nf>qPXvWHAw^|N^4If-&_z6SAj3o* z$Wg#lJFPp!2ASvgd{#C;$qn(xIk1B!-Hoh)TYqA&n4S&m3o+k$C z@0ce8w&=H*lWerffB5W!@njQHU&pa_YvT!IhUvTNXT5kV*3e|PPIk6{*ni-3 zJig($_CGkTPxr=i_|WUqXEZA^z{XtkzBlR3M4apiMc7Yv^!NRz=c6#6Ew0C!lRWz4 z2p@x$rr!e?{kY@T?;d`4zW-iMzkmPz>wJ0oJ^VWV>-VqozkdHZ|Lga0e1CASM%-J( z*xVDy^QQAK@t^bXE)zb* z*Lf#>@>PV%hC?;<^gG&?@Oy#ZanH~1uzjX-p3m>q*ZKMMd;N9(=XcmY)A^s@&tK<% ze!qO4gJFf~IY8}(_Y#n-#&bxpgQorw;Ug2b&wDxF=Y73D`*j|4?ezY^=YPD%*ZYf~ z^U|;P|2}WBI~iIVf9s8Gvd{Sbjt;%)yTdg1ASi_1!TGZCwMM7!Y*_Ad{$vmPzs?6` z1?ZxGo&UY29pTsc2iR{6Pu%~|9m~%>u-`b-Gsz#h-r{`tX^Z|i*#bDf6O`%P_2>G( z&c*yWAWr)}_cT4b|LffC=YP6?t{EBg=rA*!&wS3Go}H&~zJT*N=IeZ1yTrK);xP^g zslLwzi9fFW>s;tYJjbtdKmOGG&v7h1&kdXUGcMeupBNi)l+U;*+1L5sV^V&e``5Wx zuekQFbKyHX!tZmjE}(rrekay^+D*B8BAj9~)1H+$cqUH5z6UC$BY*slAKOuPrdSmZ z?vfHdnPkD1V1M?!49~_A+9$3Fdvp5zjW{&RfoP0j4qP|j9!`71Gb_%$<*{TSPu=uc z#pe&e-SqkCAS?;F?K}iaCcP>k0j2|!4%*1og%*t4Zlvc5qZIMaxey*(|;rGlAt31i~a5+lQetn)&^WdKQ^;!DIag#ac?D6;YEQWV@#D8yjMxvPvz?uvQbN3G7 zv*=GgOmRyt2fh$(C{|2=b@rN_v)LuvOwZBO}D9Q^fT^k>lpplAE`&bYE=K5Gs4jW8=j~Mp!Jx(JdK`yW= zfb9qsJw1PhdU3X!o*&N}ea1IOw=T~6Jm2&@6B7W&1%DO*RBN z8%?x~jQ8yEJeV(x3!WRv2y|^Bxq$P2#-EunT>bxpy!^Z8>ZWllrg6mg{&_x*<9TvC z2Tx7VI)B)TT6}MkFR;vg;{JI4&-i@iZ}E|b&$#`TGd!0yT{m^UxU&HvxmgPhY$l4Csa9v`{GeeriZ7xF&LBv4M(DSyA8_rd#q96swJ zM)TJ9do8E)A^&K`na=xqF3c|?i=Xci439%Oj(@Ea$VbcQ6NCuv_t*NvXUN^xb585z zXMQ<)dT;Cl{q&qT&%ybV9-wiZ>HNR+3R;6c&yDrh|0%~WMxZCX0+Ia5&+&8geSGKV z{(t>{AMBGU4>sv-C~Yu+oEda9LI@!5?F5B_^+inM_?p%w#&w+5H{_p0J`T(w*hF|R zkWBj%c2;}pyI+RSJ-TX8Gx8r0ZXfsO@iAVH|I7dSfBzp7y6yjV0zvEl_5ItqY{vhl zfpB5>e|qKnNdEWF-@6AWC;!`Jxwmw*0P=EdW`Q3UbdyL4qV{-7xy`4oA2^=>J`woqKd3%5uA-_VKW6|~RJhwpXaODqSGop1#Ee@& zOgKhFF`k@XC?j|KltW>nkb-$YQRTpS`D)?Qq$ApHn9jg};!(Hj{VH84#U6^~41bMk zRbsx=*NPl$G&p<7JeC%-3h6%9O$MukH@^sr+i>Nf;owGsr-He^dw^zMud0{1u2hWw)<8rg;wg+33 z!t~Z00A$>WNM=80MO`HOrkc+-yW@smymtK|0mk$wIGf~oc)XH_60h?MaepECrO&nG zWdkl$1&n{~5#Ur#-B7!Ivq!AtR4dO-aC;n*Mk&mXxvr&atw~nx5HuGdteQgiEz>q0bg8 zt=_)t1Nh-Bwc{838m>J^Wq&<1h({VTibOD%3h`-y-omie`fNpN60aP8P+!8#yNDVY zY{GOK-*09b8kapE-Ap@lUtx7FD|1{mr?rzGu6yIc9NBi2lq_um>?>S)?s_|mIJ@26 z0_*-**(Z=%rki?v?#OYm@cS2apX>Hawdd7kd%LIFg5IR>$CF-c&41>Rcy;gFECQ?54k!KQq<+WvluwIc<;` z+3z3c$N-oEEvz5w846NP+39>jXTj~jU%{mRAl`c}T=U3MJ>KoiYWJB#w6Qi* z*y4(C!IE<*wSV3p@C*ZYRC@M4XL)-@pg&xAxw=lcL3mQsV7Tu2w2~L%we#C^AW_!1 z0UbN5S+|HkMk;}2Lptk1H+9KvtonU#K_3!ci{_D!BsQ)@$nmh11ue??|p$ zP{qm8ywIK(oLL2N@kJdW`>gh*CXP-6<^_+9XCCl+fhkE}(N7Elxbv&nCRhOM3fv#} z#^Pl+v)@aH-0R71JkWgC6qj)`E-OQjHG(7*@P8)WhiL7pjSmPTn1Y0`XRX>v;Er*z z5aq}-A5;z)0Sc)Dc2Zf2$@6htJl7%EDC9-@2!!nQo)JkOodM(BXt&7BmRDY$Z}ONb z&|S5*%^ z*6pC>=OdAT+IW8TyeL~SyX0ze(k3VK?tfZkaZdJLeN6V9uz#x? z5Wp;t@w_H|eH}lx7qZy5tHtZO)Tu|yb+5&<5T4aKo81mBY2L~J5yWFwzPHVzR(H3n zlQfpGj)5XjgUb!da6yT~vtOPbtQ9;hi$YKJP&^*N{k^oV&m27xL1%b7b?3OvWqG?Y zwa7iJC|BMoHYk#w>p4-QMrpEs94s>4XxQv>NP8aW>PpMl3mTW-P zWXI=K2icf@or&sFXZI~VoG!0mLs;Ub4bwEc5vT(^q&t|toA|;~ASgHY&f2HE9YR0P zaNiH-6xMBsk1m*>88X|-4sRmq9{3~J<`*1@$N18SOF85 zb8$iTV78pPm!6NjKzdKY262{JQZVA8{EMV-!mylm%eMj9H zNd|FzRHvM^xN(-rzil& zR;i89bML-<)qAas$AG)-xpQ(~2h};d%h#npWZ)addVR`Z#JS7yr5X!!q(QL7 z610(kp5$=17K_n{fq$Nr=GnaV&JYDofh1&LB24TbE(hg zoM79=VtsvMWqH{-Ev)lAJ`z-*AIo7|Ecc4HqbJ_v=rK?0&411~&5B+FegFD?Js!Gp zsUBBnY9Z>~+y&!uMrVil4d!St??`~J9MkM{vY&P&dkdvooGyz+ly%0w$$$$o*0nHi(lZCdS+sY#nR0*f*FjTOg0*intzm(2R)$r( z%q}|fxZ1Im9Dn)6&5vVdzZspy_IxmGoitJ+HMYUdCOo^pE@+0pu0x-1NOuMMWcKR0 zHT9~Uz(37b_EX9tf7w;`BTZ+!FUNlsxk!iy#QXent%|>bSTZ=j%-oo#?dp zUdw*wE-#>iOZ`PjXLIp{&P z8jNCRf4l(6<8m#5=15E+Nxk3S=(t`)!ReZVn5~j(aMFef7~4QJ;>V^Io!`EA(XRkY z&EpKT5WP7{K4?ygFy^PhSdokCs3fD3zFR>tGO1t%^%Yf2$ zFqS@1bAM0PbIa+k_sHIC(%G(jI_dnHCeQ3}J3Y7Y;$+VE%k%CwkJ{p-H(9IB!B<&& z&fKKHKy*~H8`9D4InE?BpDq?JD}1I$1Nuc|RwbQ-I<5+xcU4O}FQYkS2^Z}^F1h89 zzRP2CJZzeFbBad9 zEqX4STe090(zNFy|MuDSy}m7t-8os`+_W6LOkA9jG$B;rCDLyM`0tg(uH>BFt>@Pu zI1@q1tA3(|i?wvrmAia@Co?~LTRoW&11gvE`0Y}p%^qJ`mf06{NRIj;y*-=7lQ^vk zTYpb=<0?_hTx_h$ZOQJP+-=@CKW-ye3D$bs1#3-dS3z{$pEOrl-PPH`TQxN*!y6-; ztJ`sXuu0+Y>SnKm>+HN5L5hAxJpO(Ub&asQYq7gEZ-0&*FH1ZUUF251L@|cC9@gVZ zAoTK*ZExq3S{~0kIbrX))@=L35qUOJn19LST?_qg^>kHnTRkSO`j^k_Smd>fW*0Dp zw(N$v#Th}lzh~lF-epRhkvnv3oD|gUkz0Y^AIDW86KVW9oF32I7Yg@z$)2#S7J{3K zHsv4E0SQZcx$V0B#jdxm7PP15QPKyRC2e`HE9A|aSrNZJxf*PI5pnnJdu?4SWq)m~ zZ4`!awz%E#xGTA659DiB_4Ah-8R6^UC--&W&a%>Y3imwN@8+xLmLloeuja!OhIs{& zt;~oo;(JlNFna`$#BBE3>z#dYx9*_kYuhyDbhc=AD}S+b_1#;&rx$&5 zTQG|DzLdB8*_HOOv2?4Y=68FsrY&{1nn~QPZS4JvJHh}FlHMO)NN~+Vx@AvS`(nQi zFN@B%p%E@e$;(o2e90BU3DzJT_l-h}^Bgq4q`JD0^{a@{i@velG)dI8vIh*%HKy*_ z+H-D_Cb;*;c-lc&&wrNfD_nA~9vV}+2ic_-hvPoo+)~49bcJ_)D%id4jqLj4Y~#_| zOp{kkSKeofbfh-!PTEAYBhQuDsocuePTro#mps^A>76gi%d0iRdI`b`&;C3Y5-SB zG=SgRKf?Y5c+6xz6HHzRg3ZFn?nmnK0Gk);kQ}yPuj#7jF6^47%+qGM1`OZqJv(IX>HpeNx_Z zc9q+jo-Lst`hUe~K(~-^#c*%0g|&JdjQLPJPvZ$R7pGD_4&`dQuXg0VU-h;qEW77* zB-?o{q(kE_;8QL_uLs{62+c9+QX@sXhD0aJ=$faSJ(rC5IDb8c<$_yjPc10A`Tn&# zgsY>TD#I$7@88Qyv2>$uW6;{N@otN&mS&w(zSyfR_q=L;}Nhh}~3JTc73 z&XJFM)qj5mx2-MKJq~xPek<51eZPC_ zwRz@zgTEmLd$|%DET+f4oxMP0Timb9t6X-*O!OZ8v%mF+#ZJG!SEDB<-kku2BslX~ zYn0sfBFuJ13z{Z8Jo zp|72_P}e0@_N{zeqh8Vi==$N(6ATC<9fV&z=6E~y-D10O-u!##$2%ix;#pQt9QqyH=1YE*VKb~;4`TQr0 zP|fz{E0vNXHSk+6Wp@5HA$5!&&t!pG>?PMv&$p+2I4)0%b9sBIb-CxMB5-WIpIx=p zxV=B#sus;T&#w^>MuP1i%iGOq*v;+vZhxkU1b_;VZr~*FWh;R0i)j6kv{}$zQ|=P1 z<|s?vQzW^SkHQd`G`djD7v=_@C-mwi*2Cp{AAO_9eDg&Br+#-3VxXYFLlaCTVmyttSfjbA^zPEa5nR!qpj(MIg5;1a9T$v272hciAlblocbV z$uIgbPixJL*jIJG4)4M_8-KwDjed{!R?e3idA$%^p^v^C@vd8v8uWS;Xhuxyj!ScWNnG?LmHGZ-298Fs43f3V9!h z$I{*H)XWS^6NbvhTda=v&653O(S z+kElpx~rnQ?+uuzt$#dvf`XM&+xbB!bKvxbms!{mjn1C|4NGokdaZ2zD9cpMo(r6* z7ZFm2|Dg3;{0pz=^w&T0lr@qJmq(7`+vw#+QCRqiC*ja$>&br1HAA; z^3~{z2o!CJpRUAD!Jy+@grE-NWkO7;Vi8?{Q@PM5W&CM8WPi_@beM=c?c4O}(*Q{s zak*I?&)>wLZ1W-7{OW@Ie91``*pH`QK`YEKuZeDVCmP^`k4cb$0{s=dpr3p*636&& z5u;Rm5q*e4@p1i0!hRCcoFmn_9_=NPnAkgcbTQ(Op9aa-g$1}z z!IlREVvF+^aepX+-=A((GH|v5DJ{o=P=A3)pKz)?}rGT%M0QTTlE^3V+|CX2ZRwn5 z+?;25SO|eII#qaAui>pZi|dDBA^s5WPN+=}F>a`Mt7vr&3TDNjisn`t%qYZc#Y zK5tt0c~ksjwX4#@ z($w+g2Nk`2EV&15&o;|tsTgbLg~fKx)XQ$Kr+;p}K{fVL1m~f1%c^KrYWon21Hrsh4eC0g(& zzwgl;ggqlbTU`(h2QLSDeYbxcbN~H z)yp>C$fs_Oc6>jkbVTkDIbFkU1pp#oReyxXFjGt4UZ?5)WIpVBmfLgwIj=yZblMbN zUAk4BJZDMmy6z(d{rQYNIYz>E?%{kf;v5wIhw(((z(s9rBiAa+8ND=rY45xQeQvFG z1nB!gbD_(gOIRCIm{mzFUAwnxJ(iu6ZR_)4vLjCYs4#e#8 zC9v{+?!jtC9*HMOSOI9VB_m&Uv0ARtzF+|{xY z*-vpf2cN7yxc>I8cJ1-}zJe=R)LfVfBw zP;T4<)&7=I%&s$)P}gSR46Szsvc*Oe+NFKT+GGVffn&JR8lMOd`!~vH+amMVIuziT zPQ*?g=Z)G^&-+8K$7}AL&VLg9(sn&*lazNp^WupBY&o@)YU~F==pA2sIqbNlsvP!Q zaUPt7IxB;dj?;ItUI{@Jt8KCFk$v>5s9mkeZoOSm7n!&>7h(tCjMsWmO4rSXUIlHn z+=?_TqAFj<@j3>yd1cS+;>vioPM?d8*ms)I3+-u3$6z0FIN%>_oqzMNy}0MuA{-Ot zDTJL}Ahh=|v+F96ui7cv+l>LMB#~y6a(Ktb{Y4Q^YvH+kxd3mIpBeSu>k=2N{Mj}q z4zAB!P+3dQ()BJ`t;Q#9QXGTy+r++k_70-vxo%-e6JUZ*!D~7$eU2UA_TZ!k#++$Z6+s;%Z|BD%T z+7{cbI^M6OR{>GcD5@7KFdX-2MMrzTory>OMU{`uYTm}av|SRFA_?UNDUjp|$}g19 ztb)KkpW=*-)xw=aK<|{94|*0pke;WV%SKP=7T%gG$D)I4=Zr88Zi5+gx<3Y5$g!;_919mXhU)%5y zI%#`$h2ePA1{o1n>WLJdskwX&{jsX5=UEl@*&B}P0AulKs-$q&9Q3__D}5#~w%UoWy+ERIX6e{Y2|&)8)f$ zb9-AHxP79ijBCwrs#CdElH0Wy$#n6s4{$n8Bx+~2;D3DtNdCG%zb$(W!*jloUzr+@ zwUo%IG19&Wwk`2KKOAVC-yuSGn(r?BaDAuiQRpI-M{uJ*&abD!II+vEv&%@1^8IM} z<^|&F?Nj=->QiI6UBeP}g@ju#3Xytm=@t8;x$M~EB=nCcCMC}UU`>G$drfziSmp9AT{#8D_8_F4Z>X2?wOkq+_a@f%` zx4*7bq0Qrt9-=0pp233yKW@ignUxrcrx{f?MRjcS$C;`)x-y%@x`0iNX=Z43o;z3g zj7k?qKm^u^CLNZXtLXIlwxS6=XnNJ-wAA0W+b&+Wt&c6mE|$xc z<$u>FDr=n=dEFwq7)ciKSQzaUXM1bij?({!y?^UcRm--u;UB~WD4=r5`6S30@lH_= zBBG*z{Q9q=F~%I3IajXSd#|^YUCOs-$;c5A5(=G8KeSd`eV%!KBb#+}4vsHJH|!bb zRoJl1QVV6;Hu(;A=Y6NioAq+mrsq(JpK)28_VS!wT+PM>cq^J|@|mt~G7+YygLi*Z_hUgR z+;Cz2+zDUtQhBj~8>eqIoPlL-JOFzF!*)NeT5{&k;gw+THp&SyCq_`!mb=pjY=2_C zPO?dp=$bAkj@hAppXl3Tz175%d|)=(>9iFqWxhFR z3*#W>o3wMDf|;G_$IhyUYS(4GDZOg?Np_|{6|wfxpho7;_?X2OH)2RGpu8Yy+7~Z?n@{SRw0z-!gAou8;bQNFgfPM zMThs8w-<9a5;J?e1#>TW-WV6R6Z*QMQ>52saY!wp&BWz`SS*+Q`j9y1(|?OtEG`3S z#oC&=W#W3fhAAoASVEfLE3oeoc6L_r5~$g;cWQ>^{E-~bn@w`#EcvlI-nI#f;niTT zgKPT=5TNQxc4a+HVzUK4+t7+(q|#MYcotT!H4+(4=h;dV#YAm1T769Sr{n%L6Yl+W zHl!YKyKl33&)?}<+N{Zbrhm0rPTXkPc-IRB!9fc*V zEvXvFlH9l5wKyzYq_Zd|Q%4dKvufzLS$xVv0#dXW+6Jb|S&*CO%V z9iv%J@W=~hy9x8H_zoY8yp_dk@uYKa4}R3+YS2dx*6}5sax*Z!$^3S# zPQt=p(*y4DxmSi`w$kI1NXTbaJ+1DGvE6ps9cG0~=Dzi5ekSk2Y?UpuEMFk`x!h$Z zR#oCG$DH zrp8Omt-EWD6cK2yWLq6vNOQL{x?D^=n4qWX!%n-K`)JN>Zu9+S&aBvOlMnEdidQ2$ zNd7a|tYDt1nSUP|K%6h{P-fTH@>=K9;Nyo}ekNgFQN8pPcP`o(fwTCPUoz30U1R%_ zGsTtBnxUH&)81b@?-LZv!bPG_q%#N1Fvy1 zY6AGr;LQq!SMrVDC5ZB<1fMaYksDY3+CXv-AI83wf`5Kv@GH3Y=3W3_X?d21tdKRD z%4euSPIR9yeG$&I3;qm=XJROBRf3aYr2VxNFS5z-&%Mat+y(3y(aIvqVh2Q{f16{8(p33Tl6kv_(Sqxfz5OmJ@>m=-ES}J9%?1!XkS`e zy>@Yw2Y;8>sL&9J3ES6Of#CsBFM1&J&nI$V;iQQ|HkGrtNIYfLj2TIdI z)u_tt-Fwr{3g!{enC70+;mBp3{@A-0mR@2HOY)g_aTnWc zxyRP0x^`?m3;P_lBb!umU10Zy!xHdRq05@Hw+xQSyn_y9#X%&$6wiay5p4PJMt^eY z9k}Ls@`Zg=H83FCAFjTvZ-Zu2=S|^==X|U#>$qZTC3DH!yNmSr(eC>~vZ*TKlvmht zk8x9T0V7FOku3JLT`XqS^|IeKr{)zpxtB-ttBpDtM^GaZdF_hXtm57{RWM5|0JgT9 zJc?RqKTQRLIRPPdGWTEXT?%ui!naA!xU3bzO;0Ldy@#RxURWEif zFXuoOZVmat>&x2=6#VtRkf5$zVn=7+$oW28d#3MdWh2MCG&D6?KYsxQgP<7e+{fJ_ zQ|#u@hgQ1T`I#oZA2!z!H8->N5oGF%Y*AJZ`DQQaYIT8E>7im8_h^}3N{6&cAf?$o z#m4vfDyEkrCnzVISqK!fbYkHDPc6 zsMU(zH}@)m5`oChH-9YTW_R?(Fs`rYdGWlYhSNm*c%Lg+47?C(Na%}WWK_FcJgeM{pF5o+ z*ZLOJ8&_33GM^J>zNyt#^R6UfEtpvMHyxj4l~$9-&ioeS#(&I4V)SLL%5+s`GDJ|m zuv&}TA|Y!RR>rM7F5-~!9d@_Uwbk?1x7mA~es)I)6)tXTxSOSDEBlyG8!>v?2Z*hu z9kZ*`$aw&Daiq1OzvtraNQKmHtO~M?=zDT@SmUwU8Ysazm#L#!)YZFd58?n1uFQCN z>0;|xJ=Bq_dw*tf+H93Bub#E3<8m37*dX@X)xo6o*D2nG>Zx5bIn4rhG#>x3Mt`6?OUa!Yi435|H5r6X-5k6y@Ou-gOGzO5es1p{D zCA;ZtkW3r#+Tzj$yT#Cm)Xw3e_oHbM6uFz$yms4#RaeK=&3VD+qskBOD=Qvmv0|!0 zY8ZM8ce5%o4D|$2`^88e@`*Z~TA4+T;^?!bG;l|jsD>qAI_z<=MS*5HvybbS>r)4i z`UkC^VSg)Av+wY=Jp1h;f(8E0TRN2;BjN7v?x!kexIJub77uTE%uL$n4l?QlIEbtb zeaS6gn|0jJGTou2jr&^6PGCpxC2?KTg+$D{8e@+XK2ea-%+2LVT9~KT=C$3LiwiQ* zGgxFkihRpPy-TR279k)GT*Ct5n|+;2yZ2R0R)6PPCbO4OPMNh$S(}__3NeCW;ff%6 zdsqE2+`12T?K?-(+)M#YrBGRex;ys)R6qBSZEkqJ^XKf$`UC}#$8&y)l<_vN83EqP z*8G))Sugo3dc|*#tLF)IX{4KmsEQN08yOu6N54SaWoSpsuGgk@iis66o&nJjr-iS# zM}L4In{3k>uiURkK7aJ~?5Hj?b?ZIpEj7Z1;}R&rdU;qf0oQ96ICbhxWDO4xfB+w= zaz#~gNrkj4WTYKRpLQ07I-Pp~9e8A0cWWP)O;=LtE>3o@N*<5uj(uG$k@jovc-S>j z;e()5Ut^dT#$t54yaq3Ke2`F%LjCSBet*5@7vI)L9dMuBySnc<+(yR+_Ve|kwjj6wfHo)pJ9Z^X% zG)s6OQyUGm4`h|sfK9nspK2(@<$oVQkDl(uktq*v=`5{&SNe7UPzX<&uWy_!S=;ZtG$vc17rzO^|$kG=0cb7xODYsCjSV>^N zft_S!)avoWN|M?fo+7v53i5A_1-R;9JTP|Z4)lOMI2Rg9d) zJ64$-OYvsx7UnC7clEAfpDdz^+XKBIH}`X_wn$8v>x7Q(0~JL()7j5%$b$L-wGd17 zWu`Oar2KfJ-Y&)F#oQnJCnSw?HorZp8ziT*cRTlxKv24if1>JPd)lc>c`S+mYCx60 zkedVPpIAdM&J030tjx`6CBvXyCU1W=h>NeQZZb;|5(as;d3Wisnw@E7u^P2omJsjD z`+VGS%zdmFh20cq)ciy!J_HtyNLw?H&y=&=N-<0>>xAMg$MTj$_Ig3EP+!hhClK#< zHDfZDqFc7!h7LJ5=G*hLnw#9{oKEuUUPutG(d+nNMDL{|XgB9^M8V8fRqubyz|GHd z!b;a(H@x4N2ZV9MxxPALtQGI5p;>Iy1A#DW_#_&@Xt46($y8b0uj|3n$!^}| zaoC->Y-{c@0nF(P^X2iiWUcKtGJs*3lsi@>=hWo&-H@ogUSo4*zXX15K&y#1oKYH!_aK!iA>69VUt_2rj z_8zxFKFDyu1Q^DU0c`&m&tFnvgjV2Xfraeg% z)Q_U$qu{`gaDw>Q&YXXb*NMyY{SSBa^FBL>K@YM1BDUD<3oz)x?e5QEYq^8u)PszijOAyymd=m{j47 z>7&6#ntn+fRPLN?k7kM@5(rYh=l`S7m=qb)k6()$0RzD7vgLmiVhWv@;JHoC=c&q# zN8>zD3;b4OCPjykT_;6`@KIpk=iBF`3>^PsMGhW0+6C_8_!aEtN7+yDWxX%8I~mqR z#PfoOql8}23OE6W_r9frU@OlY#%gbI*7e!`7Scximr z1e;WrsM!VHNjD#N#)Q>?E%{Y?BRiFBx#EKwUAe-ESJ;7XfgJ@drTvW1G(HDb|28nd zQ=naq4+F6_hbJY+FSRtXG+T8-^B|s`B>v$afgIb&HBkCXA4DLxY zeWofn>|sKc09yd$p?V=74HBwA<-iFZSbJnEKN2KZ6dj1Bmtr2cxat1MV>sQr8SY~Y zpS^^CHw}L=lMzuwS8#>fbiVY|&eJLJuv`6Nn?bi~hh(VAcgL z40xL#l+0iFj}$0}4|?u{r~D!-wWE+z8za^$IEuo)4E|+Eo8THIRKKZ>42WzKy(f2! zIRh0eO!0s&1pW)|;QbnyB2{}&*lF&d7r-mH;W~eS!ypa;X+r7~QdI>vWgF z8=(*#Xe!X6i**mji-mqtBpGNASp^ze1d=6{?jj7^@(d_=8P>Beb?DW4!6VJjSbXuZ zrT(Lxrso)F%5{%TTXaEB|`C)${@P?sBmw_}!I`)gVg(2^DY7<;<#e1nr zb`?U|l_(F`=_J6pdWniCEUt4)kiB_476{O~jcE$N_1z{M9MIsd{Y5XhM#PucQNfLC zvVoouCD2rS#)2O#5{-W+RPJxgB+ z_hl?afIBLxX7h$u{`omQW58g@Vp>ookZ@S_0TCFu25iza+?JCx!km84_qaC{3L7uzcTFZR{UnAS&>@ z1Nni>@pYT{z~Y%=nfpO*ZUsS~P-%Y>EXxG)7>ZE_w8K;X$`AuaobZhwj2YVHgGiZh zW60rGDt@v+2LD?_AVLWD1C;Um^WYnXMH84Z%>_(uajn7e`mfLYSOvkV3+7j{ z7)_Q~xHj~IpK&scKKNdI1}wtQn0Me;L_O3I&N^yntIUe%0;KM}T4NDsy{Lg<}jU=&QvAnpO z{YbzFxfP|P5HN$AJu*!&0{=O0VLERtTBh^9O!sRV1AnlB(_Ht18%loKYr+ry%pdP` z_8lLAs`8?M3fB^gYvR^NC9{Ge(|`15Ce5U{@kb*hB*4cm@_2;Q&Q>iis` zLXrhw5rY^^XR+pZ2=5jTFE(u6w>@ogrSfzx!XM`{$z}hW^O%3m_3zJZz53%^fIywj z1=DHOTdtQgf|E0i>7EXMe=gm0o(@0PHQ=*7Vv-Vcci{CbibuHl|>*HO~XcbdOSoP(t;J-Lino%D>GQ~VWj=8MS&a?pi%o^ zfWy!HVF$fvoO?t^u=vK9^uKkEM+UU(73OwXl06{ta6fna$^n+WFvoP^j;ypfrw`q& z%>R{Z5%a%&kDupmxq+>-gzKBoLGd3LL~TN*{VRXs_=C!uV=n#Rm474Gevo6ovB&?6 zZB(o~emUVyCG2fnWw2~?TwFinVxHXjLUjTHP+5;mY8DgYDM!Ctd2opapN=x60&MeQ z7O4fRYdLDq;OGjEQM3`9<&YB?}ocn*L*1ru*} zDIR}s=^^<*yHW(O${51+Z#yihZ##gZI2n@vwSG-;AAi;j|IiKz*1pqPYihIP7qWgD zOM#L9tR?;dH~0$?XGnOSej((5eEcV*@vna;Y~~kj_7_n%IAUa|t43?>a!(wqTuM@5X@T*Bm2Y*L?x& z{cZYl&n)?S&vbCK{_Q=(=K_=Z{{Q}-{dLTQscS?mXg0=Kayo!AlpWu}bWz~PzQgl( z1pR3upW?W1TI3#g3FO_2o`OEYc8c%-$JNPRmnK^VN(G?5P}5iR()7IlfA>6y?*D)5 zU;m^1s|Sew`>q)F9;W1^%x)!!_F!C-=7PNHr(u8?28**gKKJ0 zxYw%uBtaGa({CoUI^rn3A(cr#E*CJUjuEJVWie2wNW1{2{?o64+s7u5(&AX{^i6A< z{9S_1PyO?l|NHvBG?Dwp8~_T$1wVgQtor0+`R~TR@4oRblx0fr=2Jrnb?V-Lz*Kl2iA+&1RK+TQ8^;>2QT&ln{%t=1XLFb9hF(>W zYw1b&hBYcSX=2W9p}&1768ieLuZcFyMWMc?))m%&IjlZuSbkC1?;vKOQYE-!p(a7J z@_XIgSJS#1F0#Og{M;`dUtfPrD!xAV>2qJOS54IfD*EZVV5AHCzQpG5h6{7sMD~z+ zcm+rcB@R(Eo{)o-&bpG~=`wkCt}IZ)vR4KC`>~?k6o#&!&j-Ht^ZB3s-_Ll9TY`S_ z3$L16hy{K6#OIte#7_Vdnflxu7)eic@%bj=S4FrHfohhF@Djc&AeVnrPw5$96B#c0 zCbu7R=zDYr>-a>@U6Xxf1oi=*i2b_;|KxoJTj5{lc-L<1L)IR-{BE`lD0vfx;E(p2 zESJ$*z%ul? zjtxk~8uX_s0|5ZtmtlXR?NM^h7aQR{VeVB5KQQX#^Xu}Z^vDs1ML0}_!|BOA6TP!U zyb858z4`(b32K@!)6__ivIX~vA58l|V0@?j3@mxEpZTosQS4P50pO`X@wXMs?({a*Y$4znHGc0guleg}s1C0B$&imW?e{Rh51~q;wq0}G2-&y7 zqyWch+cWm0AriRGHgB&x1>oKi&+mKhCy`ermt9eHY;@naXrFL9z`xkTV3^FS1OT>E zyX|wtqZ-5|@q8y bE#x4KHc6tD90ITbXet>`Y*Yv!7pSkH8|BRu3W4FKOT*R8h z&pPchpP~{L?S>f6G{)ppeB{$^Q)~_rnJI4fpSF_&(vv0bl{@HBvT#%>EFMDP2><-v z$%r#pk$&L2VGnbHY+(tz!1#~a2^mS=y$Oc{w5VJ3mFaJj^MGB}7#+R@JHoPfs=FSxUmK&J6%beNBNb5}) z=ndKVvLlx`{x&Kg$-*HHl%f|=4?cR=Kh5*$4FXLWzTnh?3!4S?2^r{xC%T10)~mAv@mg#3_7mQ#BB zlg;jVh_yc{m*3_J6Wi+Z&)=iLVaXro0v@`Gzq{gs_p7P0Am$Iu6x*x$Iu%dYjR1{} zSdQ)ROa(%_dlD4ne%&Azf=VIs6pk1P)&@u{8oEAr&yDCH6_;0$=<9}`Uq*j8NSiRB z$we46K@0Zld?!4W`9^Kle~S`NJH#o&WXkXUM=0^M0gpg4QK%w9-9YnulxyaOh+MtU zV17qV0LTUdOTp@Ud^wU_uOmQ>^Fk#9rZ`mOSXjr%Vm+ZHZY?s~_*tru7vfn!3t&kx zAy`vve33-Qn;d$4z9EZ1Q+j_B*Tn8EpvO}gB4hgdGt}48Bt6H^6#C2eXYeB=_@ZL6 z$hCxrdp?ryF+LCc4g9Ri_*t+8C55=d!)Thz$e%FZzsLLhQe({4XdhuOK)(1Uxw!y4 zkiT{Q02+eN&0^wxeEP{RG`SAyU~YbN5?IvW7XQ`0KYut)``B+v?ec$*_YKt@wxPI< zB5HbbDU5#NY%(jA0)X`5&b7&wc+^hDf{wI`04%2lU- zfiUTK1!$9A;E?2l^~yTM`YBfc`qy_>LHSw$^nR(9z}J;wxh~fdY_-A@pmJ<}DFb6h-B$fqs+=m~=UM(LimdU-Ysqd(qN$j=wG@ ztVQnLg$|H(!Xat+G?9xDymVqi&PUE8sW<7;8Nn?Cx{*#UT;u6HJCYnt)7jy;I?i(1 z8(}V82(z+_&1Zsr(7z{0{4f27Ub~(UeUPVlQ3cE^CR3jEeBgfu!`Urq;S$`6#V%2k z99BfYXz7>aP~k=cKQl-4X+m^k9rQzB*C7>|c?>Xs7EpnZbO7%ObYQ5#!vo{{gf4DH<)Q&S zJ|WyuJQMMup5lLaM9uIXg(b+;bPm6;%|E{0@Zfbw$FDlVQU?ZP?|AX%~Kgf1z z_d%}PK(+(1xC0XGgYJ}&Spa&`2S%UAy~E#5IQc>vOCn7@p&wy#1~mR}y!?ce2QH4k;Qmfs^cNx>Q8d1qkdI#+ zd5wFBDyR?U86znE2B=0JH4MOL;yV8ww)Yp>Tm*kx4lx@Z_f|AZ>}Dt8+P8lJ0r{`b z0w%&A1pEgZ%cGCW22kf-0$e!)nP{W@d(VK@L}Uu>o2$U-;a>dLc=g{{`M-b9Rs>=e z=kj@<-)#hh>l@Y;@MHbKBE$Lz*EwN>dkO=utxV^kzAfOcKPYHtLU*I)qu;5DjGtXx zA3lG7Le_F$B)bA6JHG;wz3ye%Hb4Ck?+IV3PyYc`{xAIYTb!`UFfg64=gMUJ|M;eWIKN}WS{A^)$Rr~W(L(=Yb@i_M;XkNyv2{b{;>Z9=&JYu}kZ>)&CWr_Abv_Qwmjm(BxW zYTz|L{Z|NEetNx%fck@4h|+Tp_icKnr0Kq@f(UdjjbRi`3*0!K8l(U}?W%lm&`N*t z&**VwLNtES>)Q#T_(iKvI7$7BW}lFZU-bHZLXUr6pAeGa7tN0LM(lNZ{Wzh=zpqco z$o%^{aFOB{{r>nT{JQ!letr8Jzux~NeqHzn{QB?MA3q3j)DfY_e9-DZdb;ult*%cv%rAN! zm{M8#z7B+^>weMf6WacZUI(I7R==-L?VEnl>}X%{i+%^1R5re^GeCy}6C3mReua5b z>{S&J;Aw7(1@!4@?nN7}`PUaUj-8(f6sEYE#;@Tsetl5V|E8abHrO}TXD)yL50UOb zWlq>;taE?&L3x9*9PKE_tt10&845}WdAowS^D-T4;aJ|m82%p~@8Y_F;he5p`tyN-TjmwoO`nEHQv{dk6fC!Ubg_Pc*6 zV~_L8|8u-M?iC`^6Na1pjG=!*0?aa#dw9M-IPMY>rsnp8U4{iV)>6|vir3y7=oS&b zn(iwwu74rC1@TWTHxiLD=3ro+cS-Y_FyfBhOrN*>*S`t>!fXQ`h~NK(c*nIsVKl8{ zc%Vh0%K_|9c;VkfQ7;$qJ8#~mxq15g4}#pQ{~aFve**bDp_->PM}mLPk{K1xfLPfd zMD_>Q{y7h2>c8r=Ui^FX_~$&(7k1OwK4JTnX}q4+6@Pc$(|YfNzZRnpRvzoTe8S4> zUOS=7|KWV5vB(ksLg7ycZbU(V;{JuvwpLTWaX#4RpJTA5o5t>cLRX_tP59>@Bs!vf z6Sm$6zxN8~h_RAEU&Vh~6W96YYac8;*0X;do1ksN3#?_Qd5Og~Nas~3j^96hEB>#a z^S==P)4Y%K{sZ&>MfCs1&%;9&nJg7;{5Ng*S099GXd2`HdmOs(ugCo@Vtv{}{Fm=H z%@ZGle~5FY>?ACnF4l`V+>ZaifLm=VH4_s3ACDED)7tW%js<_FSZW_7@cYj9d70Yg zzrCKRuVG&6r!n69x%c1aFZsFeN8G0#Ytf&6g7qS>*%Q`0$36H13645$x0%)>f4s;4 z*)xoB=f8edrsw00rEEQ|!#&h$xp*ETDTzO8NIb(b%EIxpGOZu~{`tYRpiOt5`33zI z&#a8~DV`Usp-g|A^F~y5e_h`P>kMT6Ul?frgVSyw*gyP6$J}HLfBR=Fs?Uv9T@0;`}(ww!FdQ9jO)|SK!8itglk8^D#pz!#IufZy1_a+`1$+^ z%U-^*?#KJdKyn`htcxd9c`|*EHSvTh|9lUtvk6uH`QCs3qRKzt2jAn<>HF||{O9}V zd;I77_{EcP*;Lvd|zX8_PajqdtvKA1wQ)U5Zb;PgwS;O)zEr!m@vkMSuNb*+0jY zA3T5j?_;rFo@S(J--)$B@p=n9I0)G5W8N)*GZ*pf>pLLW42V_!SUW!XDOP0_h^mo~@(I`eLj0>#%AaGssm}BIMYex8s6j-59&qjn_AkHD?ZC3fQH{_3-@&r~ zcc}H`Us3CSy^a)A!ViDF4%B)wi8=oPwGI>FKTzxc@H$ZI$PNDW^9Qv)|Nkr0dIcwD z{EYYe8>G29oe$0rNVg9{8VF{1`%LI&d@fMNn1gWrAH?*(BGYmHi|J>ej-Nk>?JIw; z;rb_3H?Hq*5ZXX9H`DtAf&Tvx>;Dh2{_pz7^t}8FlKij!`fdLX5t*q!06qTcKcDwS zdqGc+VC1J?O!sB_{LjB=T#Trn`}6tS_vhyi!u^9V52oi8;w_x_r~N{d(#bAN2Y3FH)b=c}(Z|XM5wg z&v~I_5Ot;BzngR^xG(=BX!K8io8IU1{CCs);P`()-~)O8Iqwe&9`E;S{J`fq(|t!f z{&qwoA=Uht#s63TeUr6V<6(ZlDNX-}d@X@_c`6V;VPr?HeB!7>`-Vs*G%0_=r{2+p zzR?x;&>6y=ON3^z#lr(GJ9G$09Xg1Np+*R`kDi6PTRB4giU8e6!UM>m=JJ8}dO>sL z6Ys6GW6a(xNx;P)XNMY9H6#BPx2R76$2XaC&m{l+{%uL}T$n4UdgSQimN$~E@jlSU z3STd={{DWa8ZBP4e!Z5!UvqzY{fm6p-%ureKM%W3tN(f5vp)O%dIej6pVyIG{dpa$ zh@aQp@z>uQlI8xq4yX9PUjOy?@WlA__ao2#ygpj8{QGs7U4LJv-2Ufv+6(fZ*N@8P z=ly}H|9RaI&<5YvD;b%|->*}Dz5Xdz$Nkhu614mOJ*55oe;R3zDyDy5s0eQo0z)_Y zpCIkSKauuL-=GuyAni}$RDk^pX-VN--z83e1T+H9qZajSuLCQyJTRB`{V;@ zbEt)zdk1B6tHKbAjhlZIUk;T9%IUk5@LC$uU=WGaXP}3*yoQ+IkIiMIV zC-MVCG`)1lZEB7Bu*#>BZ1T|0Bk-#lJ{uhAc_f9^zBwuxWE__p?9EP&r{1s1hN}6; zj$%q{rrX2u6>fixhSKzsKdP?#Jal^T)V*s0UDTshorK}a#b*EBg7rPKVX(Z}k!Q0o zDjgCu2-A3f*fX{>XaQVt;C{VmlHot?j=6Z$#gEbcWy7ZM6=E|hqwe1u2}??I39wPAJ6JJzuE{HBelED zqf}nw?A6|x&S;2g)vELmo%7|X!~PUDb|weB`-!@x!gG@>y6|yb80AJbgmB|mi-6*c z#l4Dd%u0VqN`hnkP5U@Oaxj{3buG1)%**Ii*ZVW~*cYAFo?vRyoRX*C7{%khDQ`FV z;nwXTfIA6W&397o30oto1JRlzbK9kE^6&^A1SL1&ZcYd9LOio{HZd|C^&p`*-K0GFr zucI$9J6EdqgQCtEW_1j25?P_B_0Z7R86GQbXVJ}5XqdUkz2)ivi$MO-oQ3|`6Xsrd z)O~Q8mzxt%I2lfQq@<|zX34=#HTR0*^L$Clk!jY$`sAoh90CLZt}bG_{byk z8d!hFKP(>Li=+P9kgLn$Y%aXb$y>y$rS7IM8=L3ro)TkF`zSZxg(IHcYt-0dUgG0SJ7{sOH|f1R6G_Ju>Iw|YZ6NUqkpE%_iSGTAOdB8A=3d6%0LuVC& z_}#1C-aE>P3B1+w1uUBC@t2t6MA!Fbq06P@f)9cy54wsi}P?< z=HWX!!atO7pKo{LT~LfEw%S|JgnP40d)6u5rtZ!*QXqI13vYRp74U1eCT4%}K`IZN zvk6-Upiz-tr!aS~507EC<2SHO?pNOY7{9h;0Y4e83yRmFT)`Ca0oP(TzFjV@9ylog zqigyc#|QfH?A>EosX^%mKC5KQ*E22QA5o5Oc7JF{4uc-G-=3u42oD#8kjMIU-VLw# z?FWz3k`7#}T|F$>xXxFs=e>V6ZwVs>r^O4uj&|<+ax&n=CJT?(4XH}6@C^}G7t8Bu zDZ*fiVkPm7$3p;kd0~UK?m%0m5L?>GGW!;eSN+%*MpnkFvN^f$Xxvxy_`1Ln7Zs`M z5ma`tUJVWP72~$Mgd-?w+|{{JRJ%9?b9MyNt{6eS?;<$%`WK!&*P4H}w8A`H%J-+W zSW){idJ;0%E*&zWS9OryPN74g6whP!$0pCex@9HoMCH213iOpdiT(3v#k=&ox(0dpmg~je+S$MbI zI<=SKf@O>O#Vtc4&K!U8+OIFm3kiQQk&YOe-GbT-$o08F7M=HZrTKV?uie7DZ7Fp| z3-N*3GviHDJoqiTPk*cM4}kFyaZ8;WLNG9sSM{dynARCspx z_sh%e+iKp#rgT@!aB+UEn!cnG>P#gTzS^?}uyBMSN|8tIal3!j@#>tq?n0U6(pnJ~ zHkS44I=G?^Ltaxyp+Sy*PFXsL4Z|Bu5;Z#1&1I=+n32!-ZF*ffZLst`@!S-fO$)eg zA=rn0!9LE~c5K7dhJ!yRdE$lRLQu}|bv|G8^Z^FyDpmWqf}t#LKGfjAMI`a;yo9*9hM{ z?QvSC%9C;nsoHM*{4A1iZ4v~!DClQPNkG9Pwew!<(61pY!e8`o*MaW~%zAe|XoqcM zzAtgcKzy_>?M0mE{9QE=mzA_bUju9~zwhN$7zu!w=cS~ea-1}qTcUv!8P$h$m|Zfh z>hQ1~wqSo#j8E^XDz;SRGlfOSKuGU~pmoq2h%?a*sm4{nmAcJdk|=F2M*RS_%)n5%uG`HaQynnD z&iC@+OOz7Q+!-jV`xVIZmyZN^ut>L6Pj7TgcI)7-G1l`?& zq_`YRHe2-EAwF}?P`1i&=jGjMX>8bRe}FCKi(KwY9dM!ZT%gkJ>qVU&rob)tOHlu+ zi_zZMV?TQghP<_)Vq9PpH0b*VraT#lfqh1h=UEx$+C;6E=;Pa6^ zQ+bDwbX2Tf^+UOJ&jndN$+ttlfLC>3+Hik6HvgPm(^bBGB`jAx2#1;-mnuPfepr9Z zg38?U_KfZ16b^SgnInYvO->ij<4ijq4lEhZboFkm5l{&)ZhqNX+L^6x5k{tCS_&;= zyMD;BR(&`&@~k?~(%p+T)2_Ib^qc&c$E`z21i)qwd`)l9F)6l>>@L(V^+iw1*x8Pb z_r4mYxs-D{7UZQw$|FkX77i_ItV4e;`z_4yZ{S2HZ;Du|G#=m#X$k12lk>aUrS5)) z!qtU3TPaQI9OncFw=?5~TU46Yop}w<-Eq!57mhmD%)kr=V*yF@SjH(g2+tV|RO_QZ zH?%z=orZw+-ltXzT6*5W@!2dW#vwI*OVoOFzR`ReolJdqUa@=l zJu_iHR+&DTwU4I6}3SD;=ys+lJj4 z&l;oN>NYMQ9i+Hgt4nouagQRqxnR6Ju|NmU1?g?a+>%xtMTHnj2gRdK>+L>2gEx2z z+jq1&h1$Xa>QaA0Okn%&c8+o1Wyjl<2C-k!=I|ID$c?kX7UprEi6G>f%$(elGP_U6 zxbzg3qoFt))o@aKe`a{r=3QPzK7Pg_(BUp4f zrLDPeUf!+?l_1#r+z<{ktSu#To-BO-7=b`_wJVtaS>)y9?xUHhEm-b3!>&GCHQkQL znrfe|Nnn5L)5rw<Pxs1Rc-ZK<7vO$n^I1hj zcE3q>{8*7)Y0#(Q!Rpav9=3s0g;lWdF3vleIXkWyh0+z+m-*bB=ahRs$J^Jk-H7lt zZl0ujH0Inb;2frNxqf4}#3_StFw$0)s!7LVqO*Stj4QJ{#T9$Q=@lp4SSR{?=Z?tS zU!pGB;d1NIx^IatHzm-x*VS|HZ7(ss4?S>+!G%*#dUik4zQV@c$|x8^=6ra|f(t_v zKsB_Xy?nmOW%?q-qkEP+)VDc0CSmY^5&wQEW_6O5Cwo7;o9cDN+B{=7L6<(tt_6ba zG^~FE&r=_deAJX3imq4c;IpUKVp(-o9!M)(`J7B;d%=_uL#nE`IzQO^dtA)jLYTt< zceyg>@9RFjhE(=ma(9-oMbjYgw&Zrp+yW;e##8sKmr6106kzPx2ZGE-{1}Fv+Ez^c zq{HlSWW%--w=$K=6$lo?AYSL3(QW8)uS$Q36RwxkGFr<;a$m#-FD6$QNKGP*0!E+a+X|P>Dt^DOLuFj zV3$7+%MEutEl=xkSWEmmUx zQD!7cVisxM46tR7np|hcGs0Zlqt?l~7HU+JXS>$(Ro(C$IQbKy(+F+M2uv6 zw@r5wB;TELQtqi|?7ghKt|mKvyCA+ zWzt}ma-4OfUW%uLkANBOL@OV&rMG%-nV26g;e5gA`2jv*;gTj&nG1w21lehnTNxOO z?a}Vu$8$r^#P%j`xOTGuA-spme+R?!wmKqPb#dp>Zll%rSqS5d6qbJ=RZ&|s#g&g+ zbF!h;WAoMhy;D!m%_$*p>p=s)g3wglK;=^@dsUqQB7G4dZM&!CgIy|*ZmubpHz)3_ z(O7eJehFt%&@HP?lp6QT{=AU6NH$+jXi4m~iIw|hoQ3=lui)TqFl8FwUC`o}wTv}t z)n{Vp_*RE%p0gRj{aAm!TUh5}Hl}iFDxHSa^ltGY2=2;xr~`iXQAoEo9Q&){8ZgT& zx>2lm-T6wq@OtWAY`MpA=H3eia?yY~=FN`Zv)$`)nE6G#k5EsnjxC1MlzMuZ#c_a* zScJdX8Z0nA8wTFPBp>&MJejviTx;Ke3rtFVi?0n+g6`t$qU?X`0lv)2*AMto;laVo zZ0}v~z14Gjc&(3Dz3v&CBeT`6;=<_0*+795*F3Ttg#nI9m}A}KrQvK&4XX=k%8NXf{S#_o{!B3plHHHe}|bQg<_XVX3>l_u{UQL~8djH0{wJUK@8OQY_HoU969R z+J^d^UQ}OO3wzK?GX({?| z?E3NZ=6{D>-(3Go?0WuDNd4E?^~o6;s?a7ZfuH<^UAKQeWs|?K>zyIa=D)D(bL7*1 zW7pyIt`NVm>ytSKkK{N0yIv1ZKAru-t^=(!UBCys?#E#i%JW~?_2D0|>#u&f{*7Iq z-gL^qf3fR9_Aq{7*SDW5F{ks9tJK`52Uu@@vFp+2r2h%Keu;Wu*H%O>k{_PRaeu3Zv%pED-c9*WZU9+Y5 zR_@XO&UsoCsUCVWRmCo2=dK>u?Uu>-c9hfEauoOg91h#Lhv8^n-o(%sMm3@x03{1W zY<728$E@{JrJH3_CrFdc9)Oyg!6JtcDGom$f>ROff}_e zf)|I!CcC(h6+57u@nsb~jqs$1R3L=W)~fV!5zN{dMoUwy&9iyp{USKPq6V@BVy9HJ zOm@s&KF-q>nJy!a8xMQ_ZB!KIHC>qpM-^ZQHYfG9uIRT{ehY63VoFN8x1*;-s#X1X zmS=x;*N~6I8&xNAmLq0N-IS_)eX-uq{Rxii+3=rb^}oW zb*Yc<*Glw`A)$4MmAz(tzpZwcejlmKIw)zT2lInw>fly&)&#I!uqgIKB zC3u#*Md4^Ww%;w~TRj*+CnD&M?RA~%CpR-gZ$;QjS@cAX85^G5ITI{y*Q~GCZo!3Sv6fr{Fru9|yKj~3ye%A9f$5p|&y z&@Zga^@LsB9fINjC5)rfvu8*VhxdQd)_qPYul)Hv2gv<)0$(t(BYA&&Ea}bp)I`Fm zP!E%zjLMR9@Zz#8O!l$-eu(9adERb1H3}LWItU^wb*#m%h%dvrg6`lm!i<5e&pEh< z96HLav%Cu;J*Ox{B%K-2uAl0DX_sY>$<7<4cax-i*zq1i+5KwBnz90PNcDfsB{lN( zNzePjRA4u2iA{XxCk zb|bquY=~Dahq}cs?`HH=?O>~o_Qk#)5LbA?tF=3@Fe8p5zw!oRg%!9E&fc&)OW7lY z4MhW{N1#8z(R#KdL-daz7L|W`%*f7e!B!)VkDW)KdV;fPMg|SD--r9+hO(Fa-M^rm z6$HLT{=S`WQv@VjXf~{4D}{^7n97#UnU12MF?P$-73i7y;|PrDPCLCW>qoSCQrwsc z2bBR3v<>d=VsE|u;hHtQvcA{j9fj=S!^wE?w4FXp(wI8lrb;`)yv~1i!jihYdeA^N z=Vx%yiln?>utkYe_u5rYy$MbcZ{PF{O}KB-d0H)KQJQ=7=B_F5+(le7?;81Yu7JRd zU6%X3-*x-Pp?k9AyI)g!uRS_9$U&V3>5iS}Z|R!enIl#X;UWzibiIlMCkDPL%eldb zbW_faD!!<|Q6`2e^>KgnOg!avqFD5CzZ4j&h0IKF58AzEkQhAbXSU4^va=&zykIjg{8kg%VGHJ%2SY306?A&NKV zP3~kg!(T6uUQ)mYYK< zmGlA88e|-bpk(B*>{*$rvvB6!PQE$%Z|$+xK%ZLr#!`P@Y<88mh>T1fUj91=pF6GR zQWeBK%XP_l;oYu{>Yad(NX|9H8;hdPphX!LFROV&p7Bu0>mt5{r$?^ACB%g1x6_x$ zcGqO{00pKBq(DVm-bY%Kkm%V+byoM>!1x3&QG2ji z7==>cL9RLLB-%{qU)WXqgxV&#H*al|4jkTZ6RC#E1kYw3CbO#99YC0-VK0;hZ$r2+ zdX+pMqH90A*kxK4_2pz^Z>8JnYUQx@A*x17Me?}0qe(k**4wL^ z9IStyW@B)d`*QQ1-R@N&MO*%vVQ9D<>ZeK9%Wj;6u%gtlENxA^^np!}>8bRcQyWwa34vHz zyd5|cxF^R+&o7!`v_}&^G-(iq7yCMrwyl5BS6Cf7LqL?vliWoD>h7O2b`S(SZQbR*Mllj5)tFz$4mBHffOYnnYb;TG|eD0iaJl&mfU#HaW z02xE)#Pa}CK&-#hAR%&jeJJd0nN+fMVTI5SM@_tG!v2`wR<2zP@oeNH@Ql? zi!m*K4ACL31j@^L55ZlSJ*|`V>B#buGmjl?irjX7ciMK>!?$!LzsCA)44~>4mq=ee zf=wZ3>rx7jRe$Nvh%mpytT4c8o1yH(yH<#XKId;n8K2Gg)@*3_T>i>G51WXAewi(S zO?@&MfH-1qR^xrPrY!=`#r=Lej)%tD4C7&cTe#lrxqA_Q71bVl!;l{y_uUJ{>_Xot zJQc0Y{<;>ZqxxR8`(p1%j2+1>zhS@J?L=6jmeRzoY}WG2&SiY2N#Sqsp+-qm)UrV$H*TP%bT|kQH@|D zdIxiRr{0qHjZfL)zT=UPyhJui26uD{&p|&Fyz;(j+Je&W#-VDfO}9L2j5C9k-1D6z zqUC@ucvmbSqUI_I&h|4R=JfJ#I+DzPem7FCvdL%EyW|9qiBH5C6KQDppe|mQ?Yui@ zl>|475hj7Mv+cG_DP1x$&&=mJjs1JMw$Ly#b67~th2GtLV^O{A+vz}xHnAKp$1w0; zk=VGJcI-Fv=yFzM`Fhpl`$cs`1I2dNY6e|jA1~a4DXk)TzFD!!%&{+GNYk2sAdTC{ zlGw)c;k5UCME1I~AwE{=u%>`Cx2#34ve)5&kPb8-k*_iwi}F;m*0aP>IVQz|$3An4 zTM|^Vu%DX+khb|2BF16A)RqJB8t;R2Cx>dk2A40}M$dp#fxmm-gigm>cByGhk+x`s%2 z`&GtKP-`JI#ChwP65Z=&N!&BN_4>^STs zC!XKGV#iOYvYLQk|BM|cCNX@;!kiop@fs$M+vhz(OBnt^d{pOmJ3B zep(6Nv;D&aY{GOq%n+lx0Iqjk4g!P(Bh2?%Qc5A-J$H4himM*hmA=smdlwlmcf9EQ z;P>QMl5-L=b>WCemk{fS@-mhpVzmZFp;o@qaS@0~%jMWb**nXBD5d#HZECsZ85gRt zm2}(bxHO)H%-%yscSt+W$SQVKpQI{c@ocNS=kkrDK zh(|$g-2}=Xi1wX-Km#Or2Kf=ng{0k(Gf;4UvF11@(7BQv5)%Kciu)p#;e)WtLzK>K zbg?oTfCb)zWQW$X?@a&+2O3^GkzmBt#eut1`5f;{GEAOENyY0W$r6Bs6+VxhidrG= zKIkNbHghUhE>ah6O;=CKqQA=mH*KP?Iu*S+c zp4plrH@g?a;q~0xH}h4Ru-g+_{p0Og(2 z??oPW+s3OgY9M$w7OlD{=8ocZs)Drnp;i&@xzo?CDadovh9`%4lsbRjhgjWWBX)!v zE+r;^9D=u?Xx~fGJyT_%%@*%fN-ScNJerlH2>K$HA2*}i=T~e*UY4L1^|NL(i!PtS zBiTaI4-Js2uFXq|`DYcl&w_i^sLNn1)aB&T_IRDY>(pC623I2ncLN6PIla^eVqQJA zZq?j`XR&YSzD|r{* zQ*9sQ%i+LZ7Vc#Mv0|&w3;GROwcQ5-tc@TOC}-C5p(6p5f`93l;mw`N5h4tY!fBT( z-niVm2AP~K0heI|-(OynpxeU=JFWbdKfkV93{}s&3~9xqJ4jrklaka^((qN1@Ex7 zy;o2c0@dG?@ff5KFtTrgf_h&Uy+hWnDH*!QbE5@kh>NP$SnS6Q)S(C>RWppCw=iQj z)Qx@4e76ns^zIWj=)dQ?n(Tw)y}WyWaQCEHVpAP0O$txfvXou#RR}~!UjtYqmuqDN z*)zHCB#7N@qho09xx(BM`5P-bc7pnDhS+AR?s7eaRefUPd88=i9aD+jOCcfm zaCp6jGX(aS@onILU}J}zuJx^PLc#&Edtm$n=LW%xPF}8kp1tQ+aUj>u^s}^4&htp@ zFHoB6RCGft=0;4-o~&Eh*P`AHA<=fSN2egP-+AMLG9vG~@zE)_r(0=}RoxBgrFow= zM!!ND^Qbl_6oyo$v8tb-Ij5_CQAo+)eOo=KY&(pvqueic`}K3bo)?*r4Jh_0S$21E zcZ73}%)&|&G8@2VD@j_pr^>|fTyLyI*96iCh$sfj(h$3X;FClfvwSmv} za!FhBG?}^0(3v6%`v>9Bgo`fP&^x7MHfHebmpP0hcje~l3{RoOrV^%qO*X>nb4edr ze|Er7&O3Ct!v%UX7qItXfK$K zUyu*xRmiJn)Qc6?-qY@XNBMLJyRtr94S)Hv&G8oV@7qOd33rxMx1CYn7QS?z-G$xZ z{?wrMdK}lY)?eQq^kJQ1VMgC!(KLHRtDHBn%ja|u*dXq-xkFZc8u{$6|&1#keI!G4CmA;>4STCIo_dvth<`2aei+u5V1n% zIOZ)4$-J~EVE}gq?|sSE?`F5Qy`wlq+&FsGEcS^N#`2-M`VH}rL4OdQt3{3HTcl&> zvR-@9a?zm@RP|4ftF#|n=e$Utp`a@yx?y|r^Cj_oXeNOpPC%YicavREzpdLftx&_g?(j;OALb;hh15)ehP5Ah&Ci7pl?A&GN3wP?}SC-fwuiiM6|f$8o&? z6N1`Z>qXeFkLFI9L1P3n89i3_$3zZ`#E9#735E-&Q;9);sfV?Q@oTw{^vkUzvE;i% zP}|7CQ+{6OcWYc8H#@0w!F7iA*%s7Se7IBxTREkZzE6Fet3}yafo*0%JqmZDrZ>kI z49FIFc)p^!l@x;OE-_Ztl69>VP$iui0#qJ)+_D$Kyl`boJrR+gQ;P~_uR== z=Vo+0nQzB`p%>D1$w{NcQX@obZ~!IUoFJM|y=2cv?cZCIbm-NRGmnPGY;LpLEscW* z{iGXaMZVsTjuMO`YSX6mJSm;}Enk`{3{U5F#_|XC3U;@~kA~p2ySYKZ&J#YjasE=Y z=8DCnWDf?iW=LT%&e|)s>KP7blFJ-rUm)N|p-P^A08l_GVeQxIim_o&)7#dq= z6rpHoURbDMnOmK3TgK9gUf#hnM_nJ@Z}&_>tR}ZgFNT1@Ae;ADUw(3+$WKn|d#u;y zSX|#)*8}lz382|k0ZH>nAL-M&iM5oDQ?_O9tqy+a{$iw?7c0&oH{TxPj>w@b$w5Gf zol*^d&KRZCT{3dF=q$w~0%ik+bp6iUxXW>*w^m9&_Svd9a#?!3X?YNzWMU1=8a67S z_iso|*&69=dv`W4*@gm;idN1UtlLHO>||zKv1zEL6g^0r8DSp=nG8FuImbN$v*+$L zZ{D}=9$(=qJtT)sHe^n@J0_P4q1?0lM29u}vI49W> zP95OPqjKiL&&wL2$6r5E>e_UReYK6GwLdp8I4%zCVy{GwyF7gB)eQWI?ZXnIzESWd0lva z)Cp(5C$1SafP24wErT<=ow;ydd~>%Ae_z0m{Br4>mrKj1%%PWZqQK!c(q=c3S|0Yx zv>}gW-VhlL)AsptepnD%QLAdPU0#>w{&`5B71Kk55O?X0yHZaGbJttt$wdoxGalz1 z_BEqlJ$KjlVY_^gI*E)^rFG$AXzBBR{*c~hjcZIu1GherFDhRb;)&f0ujNZY;pOG2 z2eu9=h*cAI1aQh)Jb-lXr!H{0r*=QmXK;Q^9VTa1`WQvEc%D}MZiK2wRd)LoGg6>| zqWT^dBD6=&fIehO0Gir5jCEPF*e_*xpe(dia-0^j3jw}^?!`^j*3e* zp(&0NQ}GAwEq+u2Q&xQP4dShTM0*>sX8u>SHxxSmiuP{dYWExMjo8{|_>J}sH(#{( z`t3d^bQ+4}|3Z6zQ~=ZGBdYL!8|{63QNlQ7@$b{;i>D9TJGv+l3d>=4@=+0lKWOhM z>p!Uw&_sWry=`K$C^Epk{f+k4#QKWp1TS_UDS`3H`u|3I*B_O^{~p?#BDbjsfFuFM z#B|3(0p08uV&D(Vx5mTn9_8~mHc5B4{ANpDNCBZNc}zab~$*50h$d?4YI?a>M6dMj$7mzbpl!L7SH-rzmLjnfli#wPQIU>&CgXM+@9LN5+68)Al88_f*GPNpqA|N=nDD)CHes(EqJE_a{xPZjf3&%O`p^% zF3(KYyFy}tN3IsOrPFnPe(;aebxsJNpg?pB)GXjsxD+$VbUci9QPTrE^dd0{Kc6ho z1@t2B*lU`gF<%7tgjNB*)f%=ryY#1a)dbX=Y{$tmil6`WxgP|1_(h=raSjX`5Gg?Q ztznrDBLosbZWlSaz6XxSw8>-z1OQxw^KcDJlY}tx^nNbhkLUV->HR(k@(%(AxKmHw zWY;I}tu6A*TKyg>6j(z{?Oj@EZ&))-7C%$_e$bvt$N%82jPnOS0_?#`v?a9t_{Fe) z(3l_W_ZLki^$5mIm@430zUYYw$MHc;02%ok1r7wbeujm=Ev>EWi?W6p^xL@MTHrBX z0b3CQ9Re!{FG|gSu>}?$*q=VHH7a#J=-^)7?tvE-^Y;PBmk%;hqSDOBbKDc4(j|_P z{fD#4;nhD25kf*%iMPhcUF}fBRBd)G@STdi@hTV#PpIoql7{r{^^d_k6C2n=2(hg1 zFypJv6;QT zAoLkRa?$_Mj%XAr36$X-Ng*2L}ce_yv{>Xh?X--GKxHdUl0?4C>z8 zqYSJjE;0l#&g6c!8`hlDUBq0eZ~{ONKXS7T1(H}&-KG>y|R0>)d#he)F_P>$RY<;t`%qK z8@j6jk^&r5x?S*(yQ25D7WFUKms@AM`caqFmc;gd6SCujf0$6{k9b1*1@VXcn)4vqtJ#LhuZ?TG0lakmL2 zst5jTLRKgutf?KiNiE&hlHRh4=x_udmZ^Utn85qyo3e$$DuDSqD)3U7njczJS4I&o z+-$Ia?6iO}!$aBQ|2{+fgixO_h5Fn8J^|0t7aP(+5tlI_g}`F$iTmlx-~j-0l50Rv zn2W-S<`Dwca{~M|v7g#tT!eJQNhq|GEoV37S4Vo04fZYF=HVlzPZ zKlQ%P5&Fx7O`h(#sH1G>LCE}#Q*(!aLGvF!Tfn^J^vvWF9Q<^J@m%9pwk6zs)9`Ny z=X>zw8T$pf&p1>?tC)kzqCy|Y6|oAg1c!0`$(x#nDpSE_eH%m_^pQ20fOeUG7nb&XZtwAusV8rG&-YXB__G z_NHsBzx_M)f%g}A4p==wm&lXu3j@{^d5G2c%GY@SGW@)|iz5l=FZMBs@Ew zzXIwVyZls_3kLJa$aOc&tG$JPu?^$XbRSF|w$~9%>@eR87UVdeu`2#)Z)o8mGyh1@q3@wqSXKI^lU>E3EYnEj*< zX>Ma3Bj%PP=OJu<<-!!09V&Ez5((RN$UmK4{~Skp?E{D(2^LAVMcze!s11F-xP)O7 zc}h`;me-fy-QXA?j(HuJn9CtUpG*avCiG@?z&VT2n|SdS-{;I+>> z>wozA69wzU&$y=iU|;3JB2du$gBtpdp;JHp!LWVNgx~kX`1Knri@xwdFUGeq6k-0f zN0b1wTK|X}%r9?d)ZTJ`X5m~lQ~>A_=&V?SNrSE+RS5c3eo|w!2<)-fn90_`O8guL z&glQG_eT|{b&(Zf=`g|=hQU=Jhwu4CB*&_(51Ocdjsa>GX9CK7^xVH$^FSUuj=XXJ z`ZS~`@hQ&WixxI@t^4WcK+H^&bMQZXZ@?Uz@MSUf2;Z>^hAwD-Lwt|t8|%)`d_N(C zr!nr&Ie*%>e4hCqBwv|+*KaxYH2)Vrk9@%c*XagoPHlh-OU>C`#dW@{VO3}&+HznH z${WW_dy;8O=Qx3s{s;TELmkYXk$}MwqySbcU5;L* z?;^!iLN7^BewM<2Ln*;ASmWd(@MT7fGC_z0xH(A^8C0vAGY3n9zWDap|7-j8cl?Hx z=D`J#_HX+2wC4N+#|N}qI<4#(W%0~MU7a1r|U+dun0Y;Q#y`?VhgVE9?{ ze(-b&n5n-I!}uCM>tLMMKljB}K?##>_{>XJXXt;I*et1k(HPMerT@Aw0pkl%6Wq(h zn5y`zlh)(Yg>d`zW3*I&w)Do*$)Erk;vN6Mq18@@3s}lx&=Cq31KgYUS)}r zuu6gWL+%ya(Rvl2{Xn-)L_c4n&d@yotE>rpr6|CEgR8u=VBK^wMVyefgp<1&zf2}K zM(ljPsNl)r^#^7AgB|_B@xButUoiKl`p)$#~u$+@C=`CC0Hjb|3CKrq+L}kYuiA7kPT3NIC%Ka z9rTFMtz+G`h(6?Rs`ZaL@%yvSwZrpVK!c0iIwh1D7^`9o5GfE*VH~ARHCwgI8LS|v z(&FlDy&~fSF@U^Fcp?kMchBdYJ$5OCLr{)?U=y_bS;j}GR{;=OHwC`m5&8bM`;8T5Xeh+e6g9-7u#bpGkuiwPBFr{7f$eFd ze&h@+^OcDAD`!Fa$;8s@V$5VK9f|K@23D;5k(tZmVLTxuh<|X@5NG;LA$(r-pWlq9 z8z6SS;!)h@kISV--1gA;F7L_F!*33Mc=y$sY^P$7?L?A1VsD;H+Qjgj?>szPNlZz{ z*kRCTqR$Q__6-zuYk?t%2BO?zCq1@^kzw4^g7#oqTau}D5fvEMPOYsj^O{l@c~F>FkOL6U>>_@xEMx}k~X+7N`j6;~sFH@C)p z4E7X&Pl4Jv?x5a`m#1!};eES0;@rY=fU+-51a_3lV;tuB$Z4j)lait>eYG`NiaW4q zySSeSuxAX{&nNEpo8t9z?y7k~*FkCmMKzw?3W_6$jy`Y~SyxSf~iz~`r zfoGB!^>G7z^2&|@d4lUXfd+gQ-j585_iQAQQWIor=@9-#75+PsFf`hKFUHvm>5**5 zghC7C@D!Q>@-+U(#|`Zt@kcU_9O7~53dd1Jn?yfnj6NOtj~p{-g;ze>8t$Q=coHUI zkflPj!k_D(+^en}``WfI;qdI9P8O~Tl~EwmywueE=8ELF?l9^@y59pcqlnvyAlJbc zDiiE1B2ApI?=DF^zfv!MW;Dk&kN0_cX!%Pse`3BzK8(jDNn3yw_a7Zs3ZV%=mQ>B0 zvrUIMkp&tl;@lIWV@7;5J{6li^+Zh^wDyUjtQaPACYHU*JdQf$9G|t#twevPBe7`j zkL?p(c$3)IQGWWRxc-iDe)=gt3S0}ZYxKE?cpCIEpFRp{qa?n6g!lnIPmng~eD0aw zu~Hq%Ts#}dI2?VOL9;u-IU{3p+)I@emivn9eSMZ_(K=)2JZsz;89A!RiMU2d9`TsR zdKLO`K0+i(wlyG0t#V_%n?FHTSgqWWW}f1>FS#s9B&Rd^vG$72** z*ui13LFnF!_U2@oAiiM=R*$xRzSyWpWJNxr7s`OE$P?bJbq&RUkY=>B{8ed^A6|fGamqjF(&AaVk)A6A)fPcawiX; zFjL_;SVtvAtcb*)-=|GJc`_Srxx2czL;|)MF(PpVl^br3+=191x#%bWzpQuu4e@Og zeG~Ktq#Ja9%IkGYAIY4^QO*T$HpxEW8R?EJ>u!(XIC-v76?|NPUyVLM3K~_+PN_?> z+2wE3adk-mJLFJBCl6C8t0*2JQciw@{2qsGu<`rr)$X=KdpO8wQho`LTuaBIjlS6q zMxsCUzYw=Y|Lf=YlGxs_L!BxR>(0E zFyAZUHDp7i5`7_2m;J~{Sp~=K*S-diW!9?W)iogf0Q5~PRNGFOIF#jlKW6qh=3F>v zn9;Y8yMO)cXBL)l!8i1OQ%=YQB25;LE!#vZ3W>brSit1@vZB^BsZCj~7A? z=@O@ZRnn^sXWV@uvq{u&47*NqiFc+#V9bB{;kH{1JA1O2HQ$M#9K!qs*x z^gX&A3jrMSzhZUoUVH_c_;K`-XYTM5ypKWfssP%Zq!$Z>OOO1F zcfB%*dYD@g)iFa<$ES8!9;g~>{+*4DI6Yi{hHb!^L?z%N+a;0UuVp^!V<_bitH)#- z!@+lqx{%>Lh_bi!n8k3Cj_Qb#s&%b{?TsE+qQ>nG9_ z#J|t?f2oc+UqBW(RL2)|SgGj(`~PoL#|U{Mn*0A&b&MzQ_L|Id_^w1HmB-4!Tk=1__+6-fwHCzPLmM9G!DkhTEF9!41kgsdV_r$GtB2@37}u4m<- zctRF?04=Nnw7(4DQ`EtIzyOJm-|g4Rx?bu0t5mZf+|x#?63n&B1nqHCf$ZK1{j zs1`aqsIsi{hREMks5-S)*?AFEyHFMbshWD6l|p@rWr@-ceHh&NF{ARW**zkEE@6@H z5k)ODLr~_{XsA$QLX`|W8Pvu<%R*^S)VuI`5+&}T&fvEo8eJM%FlcqLPC@kx@TlQa ze~ZTqAuNo>2UV~|G>e-R`%w+2;m*M==Sx8h03wbf(F1>Jd1>fmiGCK}8}y8rc_MmO za-2VEup4U9SLFA;bhWoZQEYsF6vLAMIp0Ke^QEfOp;m-yOocX@!@0wE@wlIN&@+}hZ z6QYD2l#=e3a<7nr0APl|heDY*Lg)b#=+s=O(l{tq2Nm?sxg!c?==6AKxR8^A;p|5V z%tIf@58B(0ewXOezO|;S9nLdRldW=0>JR(*H=0U}Hb48RL8YYP7LDu<<&Gmj=H9>B z7nEoJdM%(J^`kugXcCEkV$>=EtdHn<2Msp=X$vb{Z#q%N@z4`v-~L7?Z2bLiIxPeY zI8UG7`E@Oz{W?Z!!xcPeKv6bD_7m4lFIc!?(LT0AJSuM~9Mq>PD9bR9Wgk%`<5^J8 zVh6E)qG%N8K|ze=i2@c1Vnl2#`aZNnqK^G)KgbXMrJGeYL?gO?{b+uXm#CoIi)C%J zqYIXGHbhwq4Q>e46QGqw=omz^>Ehq*o%qCg|9mE6`|!E@^*JnkqZKqFyaP8QH;6Jp*_5@zeH;_sD`b=s_sZlgN_dSg8e7j>h!nw zr2XQY;(Nny44PPf{5xs?O9A>l?Ei_}hyJzg{8t)N(pMuYYiQ$e&X7}uZNl*(?H2?x z6mubC%eRA?Q=l~D@HAn`HD!?VigpK8B=RmvtOUJfqSv*m;kbH({&Xk*S+xuOow)j_ zgo#28jCZ0UCMvRzYB(gS-OuMln`V7JhkA_rsD>k=+WmZgPE=_i+7T5obYuKSbsQ7b z?(jKZ|4}!*`h#v5MWkQ4Vg8T0Vd)RLVYJ7OZut2p#c}z&;y5CT{6TSy$J!sXxc^q= zOpeX?TjxBeoHwG!gIblnp*$zbTz=5leN@Cmr}j||Ux;S*qY@_CI{l*}hV~5!1LX4( zpJQ&Cd`^ykZ}3qO6P5FO`22>?mCxto_~ygs8=}hnQjTx%oP_=prKY%-5HqRf`)(J&Y%@#aX#=YPyU(0m?QdctP79+pEhW*vQ2cE z_Mz3CdxyB7k9&KIXXl{2)?x0$edF;@mp7nnH@>ug_QufGzn1s>f3qJr7Ety5`~CcU z4t*f$T$9e7Xb#Fx&{;mJbrS4c5H0iHtHN>b!12QJ%!X(8Cbn;jZT+V@V{$DAJ<-k* zEgxb98MLAqw;t5^gBG>avTx0(l%g*+Jd5#}0^hypgF1xo4vNihWv%!J4K421_%~=x zpGB5`vA9-Ni|j(yN1j1{`i}EUD+@Uq{{5HI7%EfJ#~{yDX!Udv))Xl50j*EG!_HHO z=O&+%XEVM(D0dAR$L9a7;*_XKNuMLbx`-Nkj_x;2l(0LZC;n^)GUbQ{_OG?ZD{?=< zc5KmaBlmWqsrJbIn)ECFV!N>%9}XI2bvI~#lm+tlFGc_F+mn)e5tNbN>Sv-FMnA#{ zeraR-mb9DY@5|Jy3iobAxc;DOg#!4a#vByDA5F7Te9jg60|@kOGPG6nH9tz`@6X7w z_3+;MS(d}G;T&K6JeM&0fME_s`3?zG`bSD(GKToKzlZfs2W2%r-&^Cz?0X~=QR!#*7f!pc|6l6i&;EZYilKBT`~O+4 z`6!5gR|F4gUZNEqw7(Hi^9~B)LHqmuT%%vw;z9j8=y`vZKaoE1pdcQ9mLGpD|Iro? z%MW_q7YX`#hf{Ec>JZ{XG(G{fAFemI1xSo^4oF)_%0E4$6tvWVZ*PV~_g zCmFeDG4-Ba{NJ?2gO(UIEJAii;@Jq}H^(4-LR3B=ZVAI925$fE4^Pye?jd`|OWf}rQm zdlm{;6V)&}QD{Pctx1&0AGPqH@*LuwL?N7$lBhSySHL%?M4|hwfW`a&siIc<;P z$KThBVxB<_{H>0K3lV7;P$3i5Dp4T=oBp@z;DmDqRcy$mg)G#rf2kP$8$~jd$JSqLj}&}Vu^-icDU`EhKR-%W91rx^2c;^& zj{#YJOtu@_52fs&e*Qj=n0gpgu<*d*dqm;;zdPTq0KlW#MKd5Ul zzkuV`l7EL0oV>^J`^3)|AN4QhKZv>&9+!_=mb;VV0|4!xmB0V*+sR*S7t$(l9Ff;O zC~^P1{4eEyteFrcGD49FQ32;4r7vm!gOYbp?Be_l=W$Q~5AEt(84VgSe)svM0RH>- zL9Q1}Vc_%O{6c~IXT>p=Umzkw)Va~8{d|7+TVafLh}$4JFIYd6y7+te9@-oGQ7)ss z@x93LFVK{ZV7M_=3d zd;3E^0`?oBxc2`>ZTv$~5~u{D<-eQD8IaRt9%xX^V~UT=kK%ejegrx#Xg8Rz$7JFP zc^8;}@3vNDB1&Yw^kxwSae%pG3>z%uTO=YPaV;lUs7bUIh>Rht3{#$%R{;~r15{7k zf7H3C3i?(93p&)g1%23|7-VNOsA2t90;6B_qXZUyDTA$F%HUs0;BTcbl)&H0Uu5xo zD}Qsn|4{-9-^yPogTKGuBYo{h8T`F|pl7~+)^E_>mr|H0f4{byD1m<|gTIu-T4|7y9adpa!;oQv+AyD!Y8Cfye9yzm2oM!vp@z zt#KUITyvB&-9BBnw36F)o&`ekSZcL8HoR3l!Ay|RA(1AzE6IIqE|yJnRHCxBj`pp8 zW*R=)pMtIC>_iiDnqgw)H`w>;4i$RIofhslSu2(1k>qDzeu|a=_yVecptVE!F{B$} zy_ra7&t6YdgVmMDJ#A@rY4HB3v#54yJe&a5M9men+eV9xoR@`YY`2$iV?`y?+|>z? zi~IP%ASZU+10HdkA@O_i7FcS=OefcW)779s9Nx@p5NSd5FZ3E{Qq?sq7tK2H5)!_ByiP_?oyx=eO5l){TAsk?dE^60r7)3Rnx1k3}hX@Ofx zIuHoeQ4JSKCMnD=S@6xd>0#*s?djpB^S!_oO|=Zzri6%ZU&c?ACb1<~jbY_~o5r1* zdKspVvh2;rRJfW!&G^U+dER4brj>z!wb71&X#?c&ww^QR zkv^hd=lA-E%6+EEkJ0JbM==#=?~&12;w4@Johfc`()TJ2gk(PgjdlVlOTK&iY(97L z=4M7q>j6*B%m%w$Uq;4EQ0diwNf>h}4lwsDE4z~T#>DK^Dv8z2v z?@-D8QjdE2+8JBGp4Im<&7iv|lWDKtsMU0orlvezUd9as!wVIo{Y9RCP5q5o%9Q!O zs%!sMuq+Mx$h3RC%e5z&h;8$uDe8g>R=aAO0McARxMEhX{bK!o8cMuL?>F_i=h)_w z`I)CI6m@>gc{a;)tI_iMx^)reYOnX569Q&2jp-fVmAB+_Q~*MMRU;i#oXNFJH%2f~ z6))Ld_Mx=n*2hsdtCCuA9y_cagEjna=3Dqh=%YJ!c<%CCFf6G0G0XJ+1)Scy zdPyFavwY<`fGH1u!Ag@}{_)HMsi|C6cl|hfZw%$gznVQ#!za`zpES8~%eQuFPnX7+ z&--dC8qM_#82lwDUv5``gL9BY)uOW9y<<;hdjo*~(Yl!{yHL9H#wxhyR1aRyX$QBf zQ>wXjQ(NYS)am^U1M%k+Q<3`pR+=j-%~Nd#S5?3Eu5*!pPSzrwn`j3>O|+{=qW((L#-LHrkjDK^?NNAG@;2P7 zic}(vGC>yF)#1?0C{UpDP1qj%wC#$A>TFNJe%Sp&x#T`hzsOr&g#e5M8 z6-bqowmXh2+RB@g=9hQI)psL%FJiK>&zu#=nUbCg(Tp~x#`0qImv~F(oObiVf)2}8 zWycqaTjCDgmi9sA7VI@MO6$?zmS?B1cmZISUKUP&e#U^&17R+1&1}5%IO8^AdKczX z_HaCaS~-$YIdj+-wS1BoDd@vUfRm?sWNePATahmShgLyu%$w}Fsygj;9xdscpf{0b zcbj6$xaAhw1)u8dsxUO?e)qa)wsD?Y(Jdta(ZcZdEW-0G(y~?mJj)AHg!|089k;p> z>9x&&+V%v{tw@u^#wpPfrRaKi+*@zM7DBo%uYTUX#!6eK$Bd7XW}BYFSMc6G&#yQP z+ahv{O>zyHirPiNsSnpX3NWSn!H-Kf93OUkbXG6bml)4Lo~{!aK3Zu(fFUqLEG<8|kMvE|rOFM>A9HncN+IhW}p*+-0hzMx?K8QI+P z9+(#B)^%QPr%hG+qdV)%igw)Q0)!7w`*9zMFz6Q5!IcgV*moae%s4M>9G-gYy=t0% z)f=cKxyOXN>?@(9EZM+A@2UHowrvwCn=cA?4!`B95y}>_*h7VD*E@1$dN~JwZznW% zjhx3J<{KQrISAF*@o%_@5QWdB1qWQy;fBXR1(N7)AK3~Lfbhg7fqfwNp)}0L`dVJb zo8alQnKN<94YGDuc5g@5cxieLz45Q~lv0;^X2VL(G!x~~z#YiK#YfYdd8p?!%!y-` zUh?aTI$U7{vDX4~2FAEt2yCE#=b=$B`g~d6&#zVeph_klEvR=fUrk|N!8}KD=Fa-A z2T&+mda=jx=ZUMp6kTmk6hpmEHn+91lNEZR-5)2E--gC`O*!x~PtNURJ=w*N*&XEj zm`U%g(5#eOZuzJneiBzFo(+<81K4Zh-;%5>&9 zo2;J)fur^1VLv&YS92$Shyf`Yp(L|v^it8NdyKh)Z9ZayX=p6x`$C@eXQ1-*qn4a) zgdoSwwR17NgQH;ba47ydGcg!N60;;Dso3NelT#lX;FBauRHxf0@bdVQo*!I4WvbJy zoVn)W*%j?JoR7OQUaz;!ot`pxNuqSSaF~lpi<{T!5l^;fV6^X9z3Y*Mr9 zsX4lSFZQIFF_f}=aHqFpMoH)6@(mOw{oH0_dV5{8*+^nXn_VK9@Cego?O4wx)#T;+ zzN9uyH94}gacM5-al`D`&W-9Be{7_sC!E0X8m$&v?|5_4>GY1dt^KaQa$I~mtBV7B zngdTZeKsAQ@=UsaEZOz^_H^%=4AN8r3?L1~+%}Sn^d?LC&8n8~rwd}HpM^K$)Oc*Z z>qZ-`GlDUlTjTd#-anxBq{i3AoQtC?;O)za3kBk>IIf+0r2wLjT%~6=igRvCUB@HJ zZ}_qwPXGtYk(pP=**Ja{Ty-pQy$JRng z6A0JQ+kQExc@4__VN1g>m?Zh^1)2RL-bN#gb$qvz{6;$%-5Lq&Wxap%8%3PHUpOxe zrJmp==1=Xkt`yY}oJ;@fk!>?j})y}R5S)^mP17q^II zPuFR5hxuVXhSldrg)ucc(0!EkTMxQT3KjUyKD*JkTah20e3Vw}X@nvYeWA=Lbh}pP z%VL*B=4R~KQn=&<^;~$Z*t)rV_tL~E-Y5TDFQ>tOY2T#jQjUA-aF%vW;vd=PYjcbm zcwIK9*V7Lh3ht6C;v3ZLEEFPra57D!Ue%fzsbgeEQWxC^^7mcKuh>|ym9j^Yli9x) zRZ;Az%{d9?tdOXC)t=7l>&Tb;1K7*Jh$$EgN&0xYDG!&+r8bJ-GE&7*KE7;_wDj$+ zlXBC4n6$IqiJf&7oA^7(AogtWRw#R>QGrH}F@yw3)C3$Z7@?{|c4Ncn*XO{c<7`bo z53ehCyD!(`4z5P$z{!`%cth*=^Hl^kcE_!D!futb{LQGuP@O#8gDE9PE4VDy-BZd6 zWI-^TYq##=wH+IK>#&QJ>KUb_nXja2iOINsA`?qcL}fqWgcA2U7F{I?d(dsI+@alNTe#d|f4PgZU_hbz{9y>7&}?BoaF-qk7r*(Pa! z&bJ-f7d;xkwci&~2F0ru_UT1An@7jZS}m!*IxR#(n9$ zxpMCbZ#?z5pG7|VTI^Ag5L)LNoYo-B6w%HACpW5-u$yg?D0}Rw(!8ol&dGvzbGg(x z(3q=Z7Q!?Iy1hsF`NFeh?yi*8?s9A3M>-#0RKv&xb5~>iYUrb-Kgu&(dfQun*IZk> zZB&m>(EhU2E7x>0Fkqa@i2B+W6??F#+^V}`=z zaDKSY<_4U+_i$3&+yyF@Cj1*h!PCrtp;&u|`7lVXF3he<<Mv7^#+j{by>w^$;xmR{I@GxBgN4orWJn&>E0B>+WMH`>pm9 zKH=BDQv0n*MaEyN{Se9hTeY7{czE*o>31iIfcgftpA5wGkJ@klR{6>9TkRJO5#cYj zA95Gh?{Bpq_aMKAU^b!rUCxlsO~2KC@&Nt&6rH>!ges1^Z?zxE5ytO-eQo4@H9dc; z{RHGu{HFHvWvC_Pam*)z(wR-0dq=F5kO} z4=EPLquJQ3mZRwdX5;g&UWU}OWRBCP8RG8;yL-PLhsPls-NOY&oqc@a1!RWmF;WGq zw(tClt#OZObrWCvO?Rn(@XxOq0{6X|9Q*rnq@@#a@&p0z?f0+vec8s^?NF)3?mmWe z?WrP&rY+v;12TWeYr+QP* zFBOU*GHNQ!%k|k>HlYtu24blT^`RWwinXvB?zQ&retc>Z`Mrx^XMt*A_kx8uQDs3$0HsQetkG9%|O2P`Davn0*^-BDJ% z%Sr!w_4A@Aj>a~BCC!OVd&{+a+I0@|qisKh(`st&_t(>WWz;%@DE_ONfaVRwof|#& zw;VX}_0|d)s8XiAijfKgSNpdx+HlC@J*Vw!sdja9QKE~Nb2BCA(0RRtTd>J^dwS@0 z@wg3(*;c7I_Rt-tr{0;4#_OkOY&T2GVAb>Sat_}#{LP(z)by~YUKggn7oHJ?MzI!I z=vg86O1F!nPM;B{nv}cy&AVUmD25gHe6zL zl9oe70GQW*MBwK*o~Rv25|_*PgKp8++Qs+2)9~77uh^z40lWB%zZmjBF}QX%X+hJ+?8(X=%dQdhXt#@a=f0#>(v5H z@a>|K21lnke=L2(C1gC$p#O7b<<3>=L~dV;fOBhpUn>>9WF1dP%~JOJ6p>@QMqRC8 zx|t1PAoqIU@V}P{br21Ju}4l&A=~YKmtN{7N1HI70CH_!zIIzZg69&!#BloV6>Uq6 z56!-$days3t8JV`6>@xKGoes@HJ5pSH>i6hN9)<{e?DvPa1iRxbftR7iT;%5C+M2i z`MF3h&9obj-tO~6MfRc?O()QHPbX$aG3(i5)Sjjcwb7V`HJ>bE)c;PYxPBk*z6wWY z5neRz@O~kEKdc_pcr(>r7c-4G4mP$lG8Q(onqSV#EEhCMmBMmCpNg?;oS1Q-a<{{h zauZBve|pB5PbRkW)GujgI_G8+yU^#18Ur^i@!Va@I_q= zgCET{8z$7HXI3cwW11Q7wcOm5&aRg}0$92)jEsq+de4>H+l$3wTuRzW6jdGJTOtTQ zv&;E|v9vZ#WTQ3A`rKN^9f>r9S_RICB$rnT3eS)@qX2+kXJ90+F!^Pti8!$7eyMgi ze~m!FZY@7uZKKu5l7LSSxM>3LP;Q|p5)r{?!~^!o030JoP^YaX!z1<6Kf!bKx}qFO zT9_uBGD2%&?(Q%Cn74ouE9utm?q84MPnUr_OQD__>3%}87B3qXwAa-H_(CDAo~ws$ zWo;>JRb6P;UWyW|;&r=5y6b@vByEJ(f6JxT%b*ZM)i#Ed((PP6M?eXtkmIL`JYQ7dti)xD|Y5mCJmRZa5{w~EXa zIH337c@qGlwc3#wUSS*{Gr%~Ea+(B8Qn$@I->~gDe;oo)p^=rfS>}F|HZ3I5f3NB` zEdi{um_?+I-za_4k*G082{igaA5~9+^rS4zked@~dZ6B=3Q2}Y2gDkD7IHYa!LGEt zgPbcZ?y?=U>)m$NF5b1O?e#}Ie^bl?8;RN=V(!Yz9q(K=RXoo=RhLve+EYtv`oJsb zZ(i}@6fYl3HD>0`8Pjz!otWqu2U@&0adhN~muCGN9)BOz&HrDhZbtu6-Tcp0Hyw5F zo>1LjypgRGlj^Nb7L1Iff%gFPbUL%@7s3i!_N1E#It!V2rwXgLe(I8!ens}_g|Aga z;6mBR!-<_tWfyToM{_DKAM`<5b-9l+hKL;A2l`d z2ZZ!TO${2^UubGhgXZN+Q#0O6G@KzHP0i}K8#Fc2BD0<(Sd2m4FHOx5(Iz0EVGRAH zsWEUqkv6~L2>^1%eKa+;+ldnrjE;whv;L*2A%78U641tI`8Q3CKO$lJffDzlsi_+s zLJE?lP5xZ}sHxcxNXh?^rbe(SvE~~KBY6Tb`e!nOwDEtyhbl9PKV6Zm7?k5ZVL{2znkcx{AP{%70C0 zBawQJk5nzj1-QS^Ib%2rsDZnPi?}Nh1jcsi8+hS86z1ySh6AUZi^40m**sDQqRyy$1bpwJN|6@lM1+|qnz9rnmJCd(3RUg#|ZaE4(gk^Ccy zqR79aCe+(^Rn8}yg=Yn_wjLuD3=kI!7L$C&sboD#f_8?oW`(nemH~Bh3x6prd~COg z77lDU{NFz7ZOwT@HLHc>`K*SvLlAbz$b}-rTTqSIIt$ufwd&ciEl`vJHy`d=yzuLI zlH;%7xYhg@!gMEg`XBW<-U=7D5l`7Q+l@SzO&VQ}$R!bBWI4-F=-6xAJ3R!^8(~+nG9AF|E1{WdC2-z;1 zny4laAr8={b~27Pj@?s}dSlL(N;$K56GS(9)|4 z?SO{jMs+S6fXJO>T6jJyT(0t^d;tQE7K0AO|G2fsc>G+zJZ$x^nSWOux*X_wR@nax za2g)>y2RphTI4`mc#t|{%B3|t`y{;Pchk!BC16Lpt(*d1`-zk)e30x?C|EwztG{DY zKgt{cvwx7Df7InB!!$LYkw3T0TqbCk=_HMR9vfRrH}$SD05_BQc;M%;zmWn3?n@9kvdO^wPjzwZ%oO z5V5urj_17tPYMoO<%E5@7%2N$p@ibs@xb#4k)V&R1l6z9a9X++rtAJN9=pX;_Gj6sSMRhzu1f4xK8}- zBm{>TtzMRJU70xt9C(SD6@buG5&A5?N;dn|E8>@AK-onC_Jmx6zc8F?Ytrd|iQ$~> z%noyE(Z_K34=|i8=1EY{w}K7~(=`#nU@{y|GJiVHSaS_s6%SYk3sbBhs*$tTHIH^tGxdB z2j`PtHMk=Wgi2Jge=wP=%SzK$L*~gEg@1UC{OsS?pP_fYzz+1J3;cZ|tT6X;au^jp zzJIOW0<{H$2JJ?8k_vWZc^bCNRG z1sE&boXnu1kWwaW))m2Cw)J&3$To*xM9t-51$fOLm&?d<3DQ+DH8V0P#-#Sttj8?P z2!p^LQ6%~er^acyq_qjDc>mkXpPwDUQGX<;NY3Aheu%pZ9BoKV@qjlei~|q^G?0qb zZpW2C(*Y+3+{;d$V)vDmY#_=(aEAoT1Kfb9U+m5A=s%!82pR`S5$KkHyVMD4N&r4F zuc7+D-GT=)$+g~?#S(!eNNoW)0!Z&LD9R1*Owe7p``|TDNhlVVIJPGGGT@7VfqzLu zy9HteILx`SmCz?zWBFCC6I_(Lvhf{J`9V0=DG<-(d)O9mjZrxI0*BTBb`2O#;NL7- z|5;Xk19wF5lLT*6+~2639uSSdr4eK!)~OdeUp)c~cv0+^MsR=N_yR+TY(wBOfw~;f zgRm;H?$(Q8@NP?97}?HTY0r20?$csiaVegQLIC7IoyC}0?Km0%zxoG27F@h zL7U<{YdDUzbwUpa`vSxdkTpnWczq#2x5M8!<_TT{(^L`Elwtvsh5aYEGL*>41k>9jx+k$?UC8wA=9CK8yLB;TTGNLE&*1Yh%m*92DSU#|rK%|AHJ z0l@{kDcYfr_3eOu6bHmA1e@fXt#G}eJ!Oy<00~0c(7#1h(trP(xXY5?AqdUij}d9V z%0v?FFSMQj%-#pVrW?3X3AT^zuz`j0^8u?T5EM$HO0|rVF)S~V>wgERF|618;4QH{ zUK6|}<`o17PbH6FU^)CIX+NNZ{z4h*$$+S12ZW?;0kxDt%z|Zr0@JaqQWG2^!jU;J zBEZ~^3xxqbi_`J%Qs9hn-anrmv3>zrdV?U@N1;BlU3xLcM(XKxz&;fO6 z4wy<5w*!9(WDNRe8-IeY1-9u6V~OL(XH`YeZa~DYu>S;|DgJtIllF^qO74T?Hwc1| z{5#q|@Skh4|2Dzh{RP79zd+=X@Qe#gDlmUor!WI92&ke@`}MIpe6C``AZx`8=vEQz zb^MJsK+Y@L9hW>i#En4RAT$9B!<5f)g@lLv{_#&3N9;0=*MESil*sx0U@U=E68R6t zl3;oC59ShR8}5U#1WrkmKA#g5Pw>H963k;ad=89~sC+&r=$(A{Tzmf!Wl2)teo&T} zsrW)!)_+1-?*4$Xw7*f7X!F10GXZ1y4WIe`#%H2_^N+~Ie~Sqv$Hx0Yh2nVxOlXa& zaBa?`#NmTuRDTGr=Yz4#2&(6UsU%n=<%78-SfLNbvcTskB`2Sg(u;(8{}CWwUywsnVREfBpM+;gx@i~Cxm z&G8@ZJNj_{ipX{OH|=b70Y30DJj?$f9+O~)ur8#Z{C~8;PDnzDv@4+4Iv|Bm59Lcc zfy1+Nz^V3F7WWNYEB|bH8v8x|+V3%0&-e0pHvIK|5M1E=Z}#)^IYyjcogmQ~5uBWX zw3q?oE8saLv_O9T7Z_UHJ8-Z3yEXf~-@-^MG9@ z_arB{Yk%JeyPb@_)bK3EXBQw?ZOKBKaR-(TnNA6zPV?qj=g|Necufjj*{c_Mm&2qKR0UtyhpwLyy@ zjQ;^z(sKS~xxdC$k}<@;{XMdM1c&O&HHR7zNjZ8F`invr{)UGWaGYlZFZsu1SHKqh z%d$Yet>zkwvVElelj~(THviTBkhb=oA%6l&kcdFUkz@F|{>tz3j`O6WFZ$I^(61-B zN`i>w*&nNX%>(eEJ0SBa&WXHd!K)!wN-+;&ie)Kn_DZ%97n17S; z!#8Ghz!d)rN>m-L7v~!tO#0ct19Rl_;EvCM)cf1dH)P$|4j1=z@?OXC(*b=spfp3I zP?G!ff%K!l)=#z%XhnjoeQ*Y8kM0NiD3jOT2mAPW{lPwdUVpHUpVyz|*W|VTS^o3- zv;61vXZg?TFHGbISNV;J9B`Guk$(@E$PfPV3lsTS?i&+1EC($4fQkHIEx$04pXI(W zk;8Jplj9hPqz}g>!)xGE@H)k7G7Ujqqwj;)pMOVfBw0Qouk+9H!)x?^$nwK$Aiv4- z!|Uq@!}*1^{9ri0u$Jft5e(-Ce|i64IKP(r?Ee?m68BoN{~xU7^Rxe7_tO6CV~IbbM8qjGI@)>1b_Ga%!{yXAI#<;5X zwH;8HGX(K*&B4|t8$j)UBQb%y#J*tNTkVl>6pQD!GC`jZ>uLQQ#C)X91p1#ij|8@D_aE2ptJh7jsqydJLU>5N`9G~I*57w~yg^0lpER59b%>@lW_nERW+>4tPhhy?*e%OCi?- zMJA#43GVDbEF-ZfMBW5;nlF%hg48Jyc@&uYhWhxQV=RH0{Ki=Jt1pZs!AyQ*Ds_xN zelU~l*XPKyAD&l#k+JvR!AG+H$M{H&%*%dC=6^Mezj2?&g!ezdN4me^BkvfUF#vH9 zd?YLB`~*MFLve!62MBa}f=Y2C+xEE-KKv?j;E(FK?eStpK2=1#J3745wR_kgY3pr` z+?#l)#c;xn556%S?WpVSlDv?+Zj1AU|ES(pXoOTN6myflx?^Lzld|05k>9G1L*2-% zc7F_Dyk2&^9*%-&qHM$2Y<`Vq3#whq{P@H))bi#~5x)ZrXrVgsESux{%_lcuj4^3& z)6DD6Us|t2>OZqf)w`3|>va|U^K~{eCdsKxXjE!82hHE?NB-=FJ*x3zWoVvd9CZWfDlYF?M~X?%Fa=9x2RxCLrc8?rm(`z#iHfN8_ESS4z5*=xX} z+<1Q`0g~jklDpYNyy?v6r6Vx*scf|8`<#2=`YB4lcGYsy+-vI0bqO+Hb#0BJvVZW# zO7;5G9?i<0L@tC|v^^53dd^VTBmIhkvP#pOc&wXF>$^tR&#&?A2t|cg_o$*@zgd>e z!xZ&F!uIq=X1nj4j@Q<{d149%`STfCW#`h4%a`NkZ)(&))daC9wz1xZK8(gM z7NxuAxv)KZ`eT^>6QR(q^|iO5DUi&|UxZf+fESkGotj^k@DQs>xIio9C(*4!Zb$#eZAmTp7pv z?CG8mmQWwzI7iLLCX>n+vyWd@sXBdTEKgeTvYZWah@)fiN>~0UQl{~w_ULQY2Z6A- z?6;fA+l$4`b=*7t?(S{Z%|x0&)~Lww)Z=iw&wGPJ>Nd;DMQ=lBGFtc zZs#|tc4zH)tR+EmTOjTuKl{t|5qdZNNk8xIE@7t7k7P_f4w=a@O{#i3<~Nww)t-A_ z-rrA)#VZK%xjlcJH->pb4NS?LQWq@G`NDn(<3I$APD?4=Ps?-VPk*k~IhzDQs;9@0 ze`4NSRc_^kKNKeqw4i|1wZ7QZlWS&TlD86z1AVTeYAK!f`u+ZFt=lu$yk>Eb-6AAg zh4<>1k0a$0wvH~QZ%$;k7IV?++r&vbf0|D$f4Ah#F=uX6MZ4WmCC}Ixya8-ryvJoN z_`RDoHZBz7xVx1lcYm9d>Mp&Tfo`eIWKC@8`)yuMH)gd}${ZZnBGhn{tKHe^~axo^fj13l*jB!3fjQkZ3b$Y$Eu7T^BE zapb17Tk&fhYj|$ljeSU^Nib1nwZ2ug@;O!{nQ?;$ zxG?LD=`tn(+4>g)Bu)d2SwqyL&O%Va>$+~b6LBr#5iom~0K(m}cBbwN=e{VI{!zZY zs|H3O)2rQUTz`z;dv|U-^c6<+Y3D2rud2Ip9M@Zv)4?^}LXnkQw4$jxQm9rHX7!xh ziWKBlWhl_@khkQ}#eJ6@ir3+$pN>6ukGxAA?sex!Z5xl3@Ug+J%f4=$4Rod->zB9Imr!k$qZyKki z`{^vSfqxf{(GF%4#a#!ZT6)hrzXw$9x(@fAdOJ-FeR|YpoNY}v!VO6ho5KMJlT{3+ zOLMyz?XJwkZAMS7?|USr{us|$THdE?51r71tLYwntHw&0OQPzoT`pnUlGU$);4Q72 z=jtbvxt>Lac%S!axzjG_j=wI}ZGU`nHmgv-$i;d~%LasldnAV1Coi8i zG#?=ftoaAoi%qy{+D)$RpW>{ED=vq_tnL}}@H(m&e`&|}XdH0oRaI;rp;|XfEr7EQ zh1=)Ln?~dD+U8uvYOyRNukY38G;TMC`+N>)?=I~o@7n`{!v1u695)_nxQy+HPQ0nO zE`L>fM?X&TA=B7b5A7tg+}xycf`04-L>j9KottXjJRl{eoj0HC%~+%B63J zo!oBztcZ3fj|Xloy&)?FIM?aj=nMCFu79jP-5TZHq1)$fH4zsR6>;?T7O2X2tzcB` zT~+{xu1}-nm~1RPtxlf5WK9H~Oa z+Uq^pUR&MHJFZ){*gX}5a$uA{VvK7B2W_GLA^G-2MzSVTteS}nw-2T+3Q$dxg8!RGktW#)8f2ngE>GneG;C30jak((xb2o><^MC7tvpHVqrXY?M9N&vP=C4WLvvEw+Gs~8 z8)fUZUpkS(*7-zoS86>I{9D;4>UMk0)p#E*N;A@Ln8&+u_a#W1SIgR?;#uf zHs$0SC1*wTq}|)xu+?b3K)zLNWYO{Fqd@jTc%vr0K9=LQT59ZwI!Pc;PS`hnP41L* zFkeNt9G@i0+1n=7mVaW1i$d+~r)l(4clFp8{o%0APfWsd>AIeYz|DEedvlQ}`Nbm( z1GaRo&sNgjU2V`~nZjYVo{q;eXPP5ri9M$DdkNdM`dC|=((AiLj>(fr$r<++^o01D z$;di)B@CllbKPH#Yd+)`M&*h=?rt;m0%rUot7SmXt9mi9_J62^-3!wb?h5wla@bS0wt84MqhNCG(6q$!B|5q zZ;fOgP&@^wA%=lxkfJ&Jl^s!jrI!!waJ*45v)zQ3rJQHGI$B2bQM+vFwGYm_znk(o z=<>5A(|>!GT~YPoafser^DOz~e9RZdWIJyPSL1GP>O9+(>j$#k*y>O;qx(BN9M4bG zv${yiUpHwi@K>rIcgtsRrcsgtj=ObmcV-_n3cb073@7M|5x13_=i&|C_w0e{)Px4a zUt)L8VYyePA!@$k6@Zu{R}A{wq3ySj+$6%nv1*bCk0*gmLEqAT>Dzv0mD$#43)&%K3h)I3ng3SF5~hkcK~2h zqqY>P$E)jkcRgL$INUReXET$@MgarkH23n?3P_`Xc#WrRk7AnXdTbxV_p3r~SI>*g z@_#Pm#nOx()To%EE3r2Z+USH*p80M*?oa6}_bFkvG)L#-yI4E>^04OHQoYOzqtK>4 zIK;CT>h4Y(_w>jeQHY;;JWHfYYx1d`x7;!ed$9u%i&xyIl4SxIBUT*Q^G2&=6KwXViDF{wS?fBqFKCTTq z$tKl zQx~^@neX7Sm7wo5)FqkWJ5=eLPMI;qYlrKmA6a+4Y!pDgtyzyMw7tAoOyZ2bLeVsjx|{L}-wC^&`Ns=KIHMzTW;?w;*AO^xHCQ{(aO z++w`{S}tWpiKhAGveS24YAIKnZYCe?;{$-{xf9BZyFA*>(<=>N2{IQK-G7{;BX4_S z0lJytRz}6;?R=NJ2@nLV7uQ(_XkusW;xkk?FAOfkSxd9?_C@!L%i~o)0=K{b?fAag z!sTU;PjG7pw8erW!^*BH1>ULJ1JZD&cKh*@+WSqJ0bGrv!M96{q_(eaIu|aZb##R@ zp$3axE?3Lm*3B9nQtpXxn}7LKJ4!G(7c|-~ZFk&xJz01DOxcarb-S3)@<d zDI2HEd#$*H?+rMa{065+AAxyz9}9o;tT$(1$Fb7s->9PKh3RTkico|#j$rTe;g zaQ-o$^D`XA(dERJ+l$vsZ!q0I0V-T%`UKZX@L1H7qYtwFL^?31aDN;j4?G*WRPElp z#yQR=-NwGTv8e1H_d_gje$c(RaU-mnYcP5V#YnS8nh)xJzRYB)T#fUx@?TVI9$a{4mBkea386f_)VCC~`t^Pi?rATe z((5aoEEVC#teo_j({p9)Ghr{9(o)@w7mMxM;z#*$2|m`t1b-!B*_%x^>r1$#l2+DQ zMQ0KAg?K-Iw(K%Ifc>@D0dlJqvt}A{^0cUc-??1F>x>@PDovj>>fm|jQSZ@$tYg@? z3Yv2YZeR%f(&La@qPoSoNGGb^$ms;d1iaziJ5@xTL#1|>cAkLO?Tby{o}|2`Z>Ou$ zE~Q<0rg_1;hkx%)br)X3PVr{kVO@f2peXBmcLp+gwRhz8n-P#o7hJSF=u&FKiZYAX%j z`C+}F{0)-;RVwiHc{|Pnn|Jl4#+#c%Cl{mNek(nS#8SHvW}t$oqK zH}_189D0L!0J`yxqp1b{FZ8^|-K5vsvdJGYYky3pP1+0ebH-ijDd>uqx)2N)YL&VG zV5u4>^gRp?X1`ojVlKrH^$M2h8OJvd!*nUPL1{tZ8nQ&&#OLSMIvjBNRv9xhcp@V*qBN z`+w;Y9-QDDt}lyQpgdpDFWF6zEgnnBQCQZrd>AC>yk| z>X8J0!aDqSP{=1#Eq~4H5I*ofLm}&gcJPBjeudk({6-x51IdD{l&b=mRG2St1W0E?$=f#yxoxNEHwqc$(7&UQ_wghA zMj`Kt!5m9AMZR0Hp0nR5WH5040)@Qvlbt2^SJ%gg;eFOaq%X^RhdMwaFlCOfReuzj zYT#zm{gyK?mx6Y6dBzH2uYdoK4c*stzM$)OSvTm)j1~_PYj2qOa#Z*mPAox#Qur>( zi{!@hK0L>q^E#qtl#03eWgJp{#77+1UTbkSFXmHm_Hu)=z7>(fP6PF3J*J+LiuN|D zN^f>5w3ohPlI=dK8HssR^1geojDPu>S&K}vr12};r+@<4bW{in5a6w8JHPf<^EEpA z=A<1}(mZdP1+dK8>#p0o$ez}e$vXdn*=JS>3CXQPPd4I`>nKJho zV`}GkLo{ja)K9~EmnwoXEgoL2WwYnYp3yUO6MSvg&<{yJ@BQ<;%2k_AL5>sj zr|+xk=3I+Cbwxczzj2nhJ&ytlF?LwZSSPvBC!yVkbOOy|`mCqhC4UVY)h=D0X2o-g zDxIaVzMurW)#D`5IX7wt~^Cgwz|iy z1p<>&E*{gmkayK$!GDh8_1(Lf!dc!=rZEIAw&YAtXDEO+R6R?|axMef80N14qGQdD zy#tal+~lNNqa2sCSFrPktIVW_H=PFi=yAEPjLYtz}>1py1H1o7uGOLRe)_-v8wGLZ^sJ=Kr4>(C% zR}CJR=PIK+t?rtGelLNYvjd@i(Xm4{Eeu~5=h?l4EVvY>Ee0Yb=C{0ahe<;60X@tC>}8e5<0NsHbMwNLxYaxH+|Z}&)rkoZU#b<${Ta!5@+p9{xbaC}p2 zUfaOhX-|!g9Dna1PN$DC6cF4B(QQd+RL4IF{rh;U$n@DeEK)oqr~6S)ujVwX^InuC zbd=-}2OI%)+%)?rVBGz)c?c5Rc9(g`-Df48+j}JrM8rtg^<(#`WYVAIYK_3fzbb@_I{wSS(6?)EW_5WO(=<`(gvM|v-X zGT(2ldlA^^fSwx@spMR(^6BVtEcCz_U*{kqR|mZd%N{Y~UOBuuD0e`!OZ|G3%DG|) z+RE%-+Z(?at!J0Lf1uO-4Ff)<-7nnRd7`Yk9tfMId5w0n^UX+SIdohwo+WaiFE?=M zJUFU-;eQeo6BFsHd?}1kR0_KDg4u&vt}Ms>wmeqcqS@Mzv5zCOI&4C3?rDi}o3gGZ z=-xVbHSg@sgHC#I&A>=TOKRgqv^_10#Prf&#jt0ACs|iFMsV_ag^p5kIf}k(%RWVm9K}OUth>I9^NAItv&JE8T>HT}PeHT3PnL#O?n7rn!#{a`ttZD&dyHdO z-#F51Yp=ZdrI;InkpPvvqL2M-vp>npgX@c4Fy#cFvshevCD;&?bydV9jzlY^JU@Ubung;V@yp4}iQgx?(3>u`W z%-Po|*`Z(39E+|xyR(j-VWt$L3siQKnGlj|N`Ol_H-7hZGipE0LP zD8hY|x68J9%+I-goj=j`+Gt!^TfsbJ7k}ykF5|m*cTY~{s*@$1)%8A{8;7*kUh@jM z8=06NyHp!n#eCaAPbuC|lYUi4Vm@p3(8xIJU|;Wc$JlvXEy>#R4=J7VuCE>HO&~?_ zgi;s61G>HkeSMs$)%+c-l2izu_UpBPkBfRw#Qm{J3k(I$6J{*z5tB%0K%>G<;eU@X zDVFZ~>|9v$)^L{NY!RljrTa2BT1kZ?eY4&q!E3$%wL#Fwnd(@AuyyX+ zxS(jW>R0#m$n+y*)T|EFtJ}}dVn2Eyi|XZ+v$P9(nnil#$+zP2JzCD7luSd~yZ7-k ziQeV&CGulmT^XtcF@xmd@VrN_=6{KT2!vl|*0caqB){%%usZCL^0JDNwxy%pd&`l& z)ER`YnB%Pux9CdqVi?z_doyM+P-|v%M9B`f9jk&<$&dYAW|#@|k?wfuRyX&ySDYrd zx!Y`w`R`aeS1Rzzjjcs{dDxL;t2gwAALe}7?7t>HBnhDJqXnzSFSz_kp zDwhV!;LfA3X4^*(Q&O+0nA#OE_EkpEsTOhMhOgDNJSj$b@Mj6w)~TjEE-zN1>aBD~ z317K^2qv)Dx-`b#c)#-!X;#J;>p3%uS#&nmqyCnTk^Du!xXZX(%wQB8&+D~z4j%qw znH?{$v-$L3@J0xw;~ahQG@a-eK<*Xg`%{*A)A#Lp%cAw=@;n(fQ;ZmM zit-sz-Uq=b)DKP`w)5h;SqL#okD-xUB3YlB*!OtUF686rGG?9tM1S*juQItUKDWo} zK4Hh%wyK19<;7~NrPp~r&Ga-*>z&D5oQrF=~dHN&DN?;UTB=I@Ao5ZG@DNM zSL|!F^H>cb@Y(JncT_RwA4|sr($fyvQz*PP(Oa!;L^0y^Lz`T^0E8k-nad|8h^~*K z+Fa7SkBqSfp{CGJ%6|aB+~je-*AJ~P@At`J<|m#B^e9I^Xr3zPH}N>LXE#^7ZM7-C zEQq_vy}dJ*KZkuiYg=4u=CN~_&~I;*uYf~1^&T@etIS%xzrqqzB0fZm0$aN5 zJZ5*2sIYbsJy=V+&dy>nN1Wr5EB0vXXNwC}+;*KYQ6JCFcYmByrj$$vjCdThm_YZA zCzbX!R?398-;6Pcg{V4+jR&fICdgBBRBS7AowC_nlv-pb#>A~)e`DKxH>h?E#&~qUDM~h+M4YSpS8(M<0`yLu5N)&}ifvrAb+fw^ou?aY!uw1GvT} z3CHSURa?S-wavL8S;wo>0-z!_UR4&)0QRwZdx%UJ(rWApX|jE-mJ3l#tQiLiZ47WV zr{>~)<$sJh-4iLxceuckppr!AFOrY#A?9ipczwW;-qm`SoF#6quh}+kzVbB5cwgxD zxBI?bVCovWX8>pZ>d#1B&)3Q6#XX1dGaixO3AN~g=X;qDC_EU0=ooa<>AF(u-IU&) z-?GO{TtNxmyjvqcFqwe99BfR+eTYHHZ}G&XZhrz3U+IS~<~(C1^pxE-X4NikPnDyK zG42(1yHxe#8+|=)StH-ld)y8lOsCY+S#HD)xKZdY?@nR5;Pgg%sFw43!5n_^yu)ql zogUI;|C-*+IW0X-$T6#$M1BCh`K3(U>jLSQO}@a|vlo;%1=r zHH+tj$FyT3!zoR^OI^|uQa2fJI-zS@3gi4Q6@rZ7Ufgb^M0S9^I;1<1%cU>{&i1`i z_*|{@k@fBb!_%K_tCa!Faiv=NH`SRIr+?^v9G@s`R2M{$5wuX#OWwhf_O_$lgG8&? zLXHIgIA&XO)v^SP!%|RU7DQZt4m3lJsQHyz@2LYOp_ijM^BQ*urz|{X&pEUz&$+pV zXD&f#E?#$xe-MwTVRD^K=ytn4+^7E3^KKO%zPI%_WgjkMHS4Ev1jDvb9tR7i+o^AkoA-X;7iqX59()WBMa?g$vGDKF+$a)Wq#eb`k9^;u9 z!ZnhhHk&+L!A!``u#xY*p7BEpeL0Q44xjtT2b`?It*S zG)=SjZLdqOMx-}E3?ZfMzJCPd?qiyt377TlDmt@;$5FK>l-XQi)Uc$UH*HMA8(Zv8}JWSij(*^&MYISl+kchwDw=luA)r6kpYah2+q`x6|M>9Fu*RI?oXSa(T zFpRl+PExoSpu1b-JM|KYZRCuVg;qV?&EYL+`X2II;azA*%#tjOnt!?2o`%vTHzVZ3 zATgc3 z&Oo6$$7E{W`*0RZ1~N=MJ=%s2c=_*#v1~>q9u^1xo^NJI9nT-)&6qR!qEPXi&e_U3 z>6#re9^xMI3jJwz+>nfi>G&I4rJ{JOtoX5<(kMBsvMw4=JXtHP zzPh{45p%J}Sm$WpyIiFGv9;^F?qAO8W*Rbus$O6Iv#{ZaK!4?P{diw;l)M7!eG8bv zRAaiH&z^ar?Vh1R=D+shQ-M6Xp_0i+OSYw4uk9;!wab&ZINTJhOV9Kwfd=d5Ch8a_ zY(xqdS+vMvM14$$TWt7!8c?OR*xbr3-XrBLSQb{TTH85q zNXVFwE8a&o^?%t@g^&%)$TuIGPCOxJsJ$nAc%nTz?DLeWedZoq%&GcV?A3z~*<@s{ zb^2-}tA2Du_Gqj=>}a+(btsAH#XZ)R>+vF*Uu}JhJc0QxTUL7v_m3I_!u>Avn^z5`o+;Y8XI7AdG~a^oRYUb@}4_} ziZIC*tl5{#`3*N?AAmBBI{`ftwN8u14WSR^>Cl8J>rvJGq`PbSq_papUgoC-XyQor z&XeQ&I)CMlnC;PIH&Rul25l!t2v9!T;i^C&N<7nNgafCHVxOF8CUnaa<|Mj#&U4e# z3{>D-Ha1QhB&r)oPVyr1)gT>_xGn<1zC z6l;vLJc`-uu$;UZ4AYl0gdIAgL|+Z)F`QQEdF1xZTfsY>6?d$;KV3$XxiK#;$M8?Nu< zIlfGt5xsw#ThGbahXZ3$vYc;w`s5XRaVt2m*{t@b#q+$M8I%=yhvLF7ZxnF0rh?38 zOm|jkT_+(N*=8-KFRY~-rG-RPxmMbFI?1c z%#U-{UAF52TtC|c# zx?KR_hS)~&p4%hI);^>I=qyY*BxfgcD#QpkX)6R@O1Deu6(z+f4$JlRu4fl zE{~xfJnyMEw>RPPjvf>WtL>-La^ zAiIAb0WmvQk}_RfR?KAUj4so{VXwj+6U%*UCFAE)o|*;X;ctqY&ZuOyjF;Aw9j(-J z17F-8S73$^cvfHfCAz62%38Feliso;+Gs>6gg+ajuR%67HwN zVK3#g<@_|;n&!N|&zCD_55}m3{+ih`wbXwqS(+(t;&W= zbh1*lN$nZ51;VP2oC7ROC8A$K};@Pl^Tyj4Fsahs-eF8LONigCX-X7K$e`*_c+4*-6Tuc?W^ zhWqk^$~zp)uJhk}!=GJx6okx++ev?l1r~_Eb8$R&TbENu?nI(4(|qO!X)J`l< z3pRe;%LO72PyeZHXd@mKyJs+&O^?#FTiRV5&Q6!!_Y3gz>u59O_NwQtmL5j~y0=P#Na`Abhyn7V%dMHygYJ$IecSAik~JR9sJBT^yO3LIWqo|2efOMi$pfsDKE9@}dA*x#*eh=S zuTT@+8?vMtF+xo2E=uG5IWe4yo*+?=Ur-QxieoVG9;Z5a?yj3>I+=ejUo9S87p1nh z*Z%g#>#K(NBr|_@^Tqa}*4?>b9{qe_-OSl7C@9i2wy zb#eu$fQ2=5GxOMOQr&;0%hC#6Ub+B<^?$2MfFT%-5|#M_a`j7&5YL6blf1u4PQ>pYkd zF0j*BKCncn3RG%ptd4pJ5vVO%XfU$WM-!PH!QGBBUtyE1pO~`}UhVp|PEi7!1n#t% zuc_*>oWx9kzej)F2$2zMm5w@iP>1ax$4RF#&pu$d5o^KsU{}p_JS7PJKbnXg*0&lE z`>EI_24H{J)zKWe&VYdj&R+4vLwbT$?R4i*h++ZR{{N-z&9+t5x^2w|;zCS;OOZ_S zL=e0~1VlhmL45kCIOd#d{ps0pV#g|FuAu{lLZ>IKx1@i56NQ4x?6|leUs%VL{oA+q zc7$4O6iMVcej-|7&yROT5Oj!2xZ?7wsys`ALt+~e(SjBH$ZUU+3HXOt<|D1(YU5dA zw}G5KwDCcKz`8C@DUgbD9lr%=;Rs#xS!@u9fWsO|3y@@;+Z}#0bQL6x>>-J5yAS@P zd3%dwbp(Gxa9r)lC%GcOCCnSV=il@BsIH&e=a-~35D3=y+R!$TecwOzL$HB-HMT3W zxG2x^`0|6LQtf$@M1S1(7uE2U>OQ$2 z9%b9QNol(=)Sbje_@YD71;5YHPWyUpr*BC!`wM@wG0UUd`yaV|{M&Fk=i+)H!~cUm zIPrLfUrGK4N>bkuWaS|(p3eUw5q<0jiSSE;9rp-4$1f>*NOsTPceG{NMmi8=_(5_2 z*@xtTOu#*iHG6(*N1zlS!yH+dhwm^d@92xNCTJH(^F!{Z4;_-G6gExHvuLPQE9zq- z?~{M70G5;q`8wkE;Bj7fOS-@H^iV+%cgCVb2Aif$%99b`scdeEI>+%f(z!39)#18< z4Pg6F=NKqDZ;?Vk%Bk361+`)U^{99>L}G3kY{;(0bi}#y_&Zp0KPcws3Tcg>=lVq+ z{B`d?7zorQY7r#d;r{yeas1OD4Di!te~^D6vlRUg?!f+A8~?^olxSQ0oL@sPd&G_x zGNy9HqkTZc04{&s?;9n-n^Hcf^>-H~`K9xC$ulBTpp-HIsgEY$SgPD?C$# z-_Z6NumT|IVV9Fu$HRn8@xMO~Sre3iBL&n#fNoFHU)}Tl7qf80b>m)<4bHNWkACRQ z_qf2kw?LIFD&{a24Ip4F4zi`V9%Fvjj&cr50bA1j zY&o#n<15eq7lI*kiTy`gf02I_*0;~|KlAYKzO7}mvzSYfv^~#*5BJ;mWfSY%yE;1O zl=#zDNVWTywlb-)%{MCqB?SlUNJmK52BIJ+?}7SegxiTAj(Rk?!eY-BQg7`lQPa_9 z-?t3&D2{`?!;%mx%rEXc03xL;|I_hVsUDJJ1J*kZVGzvcUp$b5HSK=~6L62vcYh%Q z4)|`2arqOih5OL~)JcDf70@Ll${Zwy#I8oLM&3{~X8*3WlO^pcUZ~D=4)N^SXoYcT6U-bd^Qdfy&Acs$|CEe4mphe$g<$ z`^MZ4-bbipKzjo+%`B3!#}Vi-^hbpRHpb#P0s1a?^rsYaJcNHpj77g-cyl8@4?H^Y zjLU(LJhAYRkjAc16FuM8aj{tKLmtQ!sL&ue5yHN+ZGwLo&kp7r8<>e1F#)>){nz6W zvU*xPgzHI79CI1t3}X!g>+QmJap!pxzJ)jZ!aQsfq?QB^qORuohJP%4I6Fp1|FO^b z8TavB2L(2AV2gi!u~FYX|AV)}S{s0vE%V3`=l6G%hAWuI?)(XoE7GF2V@z(K^vswM zUOSp6XHhe< z&Bz+T=3U71v;4i&xj8jy9*6j6?>mpfS_kkFyFc72hsJ@zkVTxPWl7<3UK@l;3|Il6i38Br~pec3zthrmayyhX#ceP zPk&k%4?-Tn@Bpd`nd{_kl;ypDvKa^a6v)*r{O(Yu&Ma{B+djM_3v#jd27*YI6g%Er z;)~6I)ZIJ|F52M?fvI^vFpsnSiE;Nu%hdBW;`@JjQ1nk79RF*43HZGSn-s^8+ED1& zG+=ylOgswL`FVfG_vvSR@4end;e$(^a&A z`^H2&dJx)#0PLUfe}V+i@ZI^z>A?9pNQUFc= zu1V+?@ zXek~YbRMtgGi(7Izt2*d=({T-m!Cd^+-<}uk2)YH&${Dgn-=$?w+-em2|5l~L$p~9 z7HjfRw&?AGGg^Qy=YD~9mq@fbGReOfzF!ElpX>iUp8k#>`?*(3H8AGYCqK4mcg%lf z8ko^@pMXus?%#{1#zwg}oGr#_N?ks#$kW`yL^NTJ6ueySo5p?J=zsydG`Zov;BH~ zL0f#E>-YOO^vij4VEq1I`2O}B=ksEfD*3njc4nomZ~LD|w7+xwWl#Q^-@n)4|IF`E z1YDE!MQ>p|{JVbk&vyR2{_F$HwQhUM z#KTb*^x6KmoVlN2O@g{Y`XPt_iUz61p%3AFFn?XsNX8E`1Aq6mp*~!e3vX@+@4#K8 zz0zB-H|EL+rKJ-~fq6$r1&3;HBS~RD9+(B}!+tYozs#X< zAkL5@vlvmyM?M!$B?EDZ>M8Y$X5UF98L7n>#0`e5>_4)o$A@s_eeU>M96s zArY_T5h*ngoF9@EGemy}&g_8Y#dnRSLJttHPPCuzT9EgJb95Gt#2hWW$1@m)1%!@+ zLu7rs0+0K`s`2d)_P(b>G_7*5M%?3u)lwd9wP2p#^-CcZ3wZ7KGF){(<3yi$C}~Gn z6M>@}6#@^R^+wtR0#XuTub*HSvu~v0MV&FjhZn?Sp^VVK1Eqgyu2^BGXxRmw+uOHt5(B z19K^T?jHR7I|zRWvL?HU_W51DTbl>=C!%raLBhs9W1nLll@?zl7jy(cA)8&C9FV8K z_o`p4-{0@~*WB?G|IPPce3{;ohT!ac9m0%E6%D8U{awHBHy12szLptLIQu2pRVu)4 z{q6;P0c*k!LhpNT^#0?1_dCCy0p`8n0}`Hi;GtH?u?c^kj`cW=~p8$WCl_kH>YYX@>56NuzJ!$MC@g9F4 zj{IZ40vu%oO3u;nevguoj-4#LXE%ouQ|rF&V9&y@exmUnAB^|x{2tdA&u*S|zHd@} zi<#@+ygxK>KH&Sca(^6gy|Akem}gVO1=jNHHNB>tSxNq4Y9@TdMBU{Rgd9bw&mVUSNAU zuruKQ?YFiA{DHfygO0PV-Qj)-#2`hHDP+BmMF8!P`8EO|@Pio{h8_gTZphZ_{NM3e zdr0ulcnM%J4nP6{0|H#jUh9A)`(P|%dnCi6);XL53> zE`W;=xDlWXTq(nT;&_-*O7P?s5>RL}Qm#uk9%@4(oR57lJ_GR(0HARVzcFa#B*5MG z3&AwwAUNP1JZlNO0Z?`wd*Q4ZyY)R6I4TinG$7n&)Wf-jGSwGzrUN+y?AiRClXr28 zKA8Vj+@Kg%1t@h!x$!dw=Kx&X7yo~SD&#!kPjEgm48tp9OCXkK{&wr?eXmJw=>@E z&50x;L_Y|**g^aVkqDeOBj!>8kde#zYy;&0#Ln_>crsi!+WS1W<%~^Yfg^vK5qEG( z-)FoTOX@&QKtLCWlNks1MK$3wW&{~zLVwzH4-|&zyjh5R%{aLETsHRtk@|(v`~F0m zi!5Fv89AGAEqCsC1hNY=mO`O==Zi$S*k^dN#g*Wkz4UPgVhZiOcbCASIY3~|sJ=hA zyBTo@`TBi<=R0@N-^ez|_V0i58A0)bM=S9Rk(&C&PC>sLe*?+GJ#yH9^}v1pH;mA4 z)Y<`B_Xn)Uc(w;66A>Gnv18x<1ti74js;Zh{zB4yF*bYjabAW8!w!H?b7oB8KK*%S zKz+aqiS|vA)4IpA_|LevKbX6J|Ct?>m*)QP{hU8-gqeSGAAxH_aQc6XX)?a!f5sTW zhlU8OFkgo_X~1uN(NyM)5<|}4co792@1c%3fB%foVSW&C0uXU~8i+WoGQ6&Q`yoD) z$7jy{2b8V?g!&F{~rAx2)%f| ze`!X_{cGQuzw5tYrm&pAJ3l{LJjcD+&bA@iXYRkO+wJG$xd(p?!5)f>#}pD8#yqCD z`MHYh0Vtm}rcpF4Hi{TER<3;8RrunmP$K?MC^TV4M*N`Q{2A%+gL<3s7RMh{+>D6$ zLBWMH3hn3kjI?lnP;qE)#CzuBke1`6pW`#aBK_#4$l9Bg9BFa z?>IR2A8>H*-#C9bbw&*R4M8XW9YN>M2-7d(Ed3{J*?&as&27`8NaBmz<7d>~;SJt< z+Z`eB_=7Y<9_b8VpHXi>j37Ec-`{b@bNry-fKB1KpW{G^Sk@0JZbt3>px}T^;l-ch zbNfa=s5rDQ0ypzVo#sXI3xbtaXhdbF0wbg zK$}sHGm?RGR+yA_uo;Dm^xq&CL) z7Y~+sGS>QE7$_iOVN1>HD2`3wk+Sed&CeBBnZJ-??BVa2Fdjq~tiixeU4rU0W6jKC zHGd!VuYcqIh5Z7&0>A$YIfr{eImm3s?14S`%xZtm1m0^W6PUC@u7<8AtUC(boQ8^knzvS>YNnS8mW(VQbFV$iE%?V(MVe{&jAE zws8w*KIiXf=jm79`cDW(jPt*cgY&wN>;4;d@CO|J@*@)(+FGiS3iNhh}hkX zSxfN@{)Q-+#u+!F8|(=JokKB(}MH#>b&x0rO_= zVyt7Hu3$%>etrLpi37fK#y)mD5Mh5;$g6+RX~Fy&Ht~#3`@Xh+&}rY-&JQ~6`x@+m z`TS^p?f#tqeeM06|9yS_Isf|_cGYm9#*obP-w zZ5Ssw|K~OAADsX58vPgN|GdVt{-$Dl$1%{kGs^B48tw;W_X{D1=QhXRe$H!u?*A7W z?&to0q2YkyobUe!A@};Z|6k{Qv1#Mq5pF>1{=&q4F>5nA?Td)}V%ee}B$?t4bW{cX%7Uu4~ke)}Ts7Jo2tUpyP0v!{F80gwiZt&#VH@0E6Y>~@A16ifU$qP%y?M= z8;rkf#~yfmu!QdLd7#e(Ti6nR*1YjIDsF%H7gQYHSHDnlbI!?L{U?MRexl#;&Ks<< z83Q+3zwvO`cXm$tMah4?;Qz{m?AN*W1^dG14^FN+&M6cKXB-3UU;bd_fQ&nPCBAzD zc_C?W$GZKv!Y1UM4-%h%2SVEIfBX-Kp>O^H4=2{FP4?SA`F)QiJ=i0q<@b9u!Di+w z{eF+|WH7s)f4>JmFJNap{IB-{qD6=a{dqsv`TqU5zApphi~N6iZve5yt>9bvB98X# z_7_SCsI(ci^yA+|iZ=YBXq|LMh`}{$0de*R(KSD$|2u>j5Lo{&zL&?v|NVQvs3@$Q zztCPd4`F6}58`$+@(LUpAjbX++RXw5b3QLZI0Z>VI1h-YzmQ&N=ilh4FQN+H3*_1t zkq1$^|BCqfUgv+mLBRbNgdGrKws{EqYz6dRP zMt-5)e<8ilj<*>xhxUdW8hgoR6WZ~M{tCb7yBTfwk4U!v|NDQ*zq_t(Zbzif0&)1S=l_KS z<2Ez04kk)9quTI1XXM)4|G&@wi$247t3QagpMLcV>Bkyhl--P0{6X9Om;N!27v!Jd z`1koDMElPOI-CzQTQK*B^{@W({oLP(IrKyHi}|_C-~auI#>I^KeLmmsMYJ97zbLma zvh6gFR|J3maNXbb{~{p&1%3A~pZoLN|10wFXZ-&I0`Fh1Z$``uJL?!lL z{co7>8z?&Me+NDHOH^{MnAvD^fBnz@VVwTNU%!6{Pz*V^FH(#L4rwybMZnCCxEbUo zb<}Lkxg84f#pb*U}wc4+6ni-rL&&D~z}(J7>j| zzwYf3xkr4wpF^*E{tnM1;*WUvj{6Ha$D$Fw=(rJRx#{5B^;NWsU~>{W@;Ae~@)sYy5q@;Qu=Q>w9tB{{8tRyrI92Tc_VhJ00uYpW~mu zj$40@>%YIBUXPs|6+GeI0EY>a9*3ojeP9W4Fd0@Iz5PCvd3d?DEg9n4({H)%Zh(y zaLq&1-r2Hevo!1CqzJllpY+?uQ#oH&;RC50JHabOffcUt&E9?lDST6ac~BR~8xnW* zMKgEJKuW~QUOl>;`P{=(yI98_cxw0C=cVZ0^p@W@y&^y1vKAq?=%wEI*!k+`LO<@& zs%Gx#kZh&5{@C>Q&0^@T&t!inLz0XI*}jt!R4z2sOB>BWHMw;|`7yI-_Msr3 zx|Q;LBsGJ{iS7^L{g{d#r}S`wi`jx!!!dJC$y+#u3&8ormR=LYvi4v1DixeSw&fd& zj=vm1WN(E>)p0Lt#|~Go>9V+`iqKd~{~7ycDbq{RxwJ8~S;a%evr4wVu3mrP;&euM zw@6wlTF2Ey7W9oBHsgdu*eN+CPgHd+qg_7*&dp+4>u?6a#+VL^Tbnn2LO9h8gt-X{5SH!0zoGPg@FMEnx5W$5Ijk5SH*X->{CFk z6ZNqsmM1z%cj-lcX=^nK!%&QS-B&1Xh3brba_-E%yhaX?E7e|rA`9*Q>B7^_$j(J^ zPhU_~ch|5%fup3^VuP#=DBG$RNFRu(S$!(2h`PLQPmZ`1Pgh1c#B%{DwB2d-WKOgR z@%jZTt+kcfHdlHpBb$GpJ`V}FG9=0>^+y>D-1{-D*NNeMo{KiHR|=#kE{j!6ocEn7 zi)zeF?G7>Yy$cQe4p2!HGhjn#a1H|vRfK|?n*n8|S5k)d>bCZoE5RqF_&!FKGo*Chpd11osr&-M-T0F-WM~2 zV?#MFLn7OENbH$oWjD+?Ko#)Gnfv~Uu^~MnGhaN8dSr&n2VYeQ5$m0Cxy6nZ5AgR3 z7fO6NrmH6$s4jAG{1j4d=iOPZ01Bm$&aJ<;i1AmaN3mnkO0CF+z=KrmCjiOvD0j)2 z1y^6WS+u`opRRv+*gk2y&ds}QE$b6NYgy==E{b=KeVeo<=t}LjmkSb2>t`*?*}h<8 z`*mLmYy)QTokKu=&P+Kn{aLj@UTK%!RK_*^!&@ag^cBb6)RUMJaMiyS&_INis> zhi4Jx$-9B=v>o>_PX|q0byEKBB;5ezw7#F$9d#0=83BJu?R#qpMrgY~h3m4p_r*Ht z-*)Ss_cfaEd?jcql^=}0%Mv)y$- zx!O>x`pQ0DC5T8W5E?PKStwk%K1d=3T|(Y1%GJrH^AAryt`%gP(xP$%!YWy*PdeIDal$;Jz$ zpmeTH&)i$g`(Z>$`+9|*rh=~cs1JO0cmO}NvmUJ3ck81vI^45Uic*S?$H~9tdUl_7 zbjoGN_X0@(pZJhLW@eMUFSa}Z^3mja5xeG<+yj45#0yYKr%kq5I~*N?o^Syh*cnd8 zW60{f4Oq=(7z-0sG$I>>meq2fBbZ%jZwX*?R61Ro>nP7-oXQGtUf$fzs7NOg?HAa;QqcnI^KA%6lp`Ts*I3i z7`z++<#Lgz#_7}8_s~D)4|VOIbx-D_ydHl+w6ZI!c6QIRzgq~?LgMzE|Wl!;+OdmhPNCXiD(MkTSzU~YOl}`03ecB~}tBw8MB#M$N zkmRGaqX^JzI~@cLGM}DBguJF42(7RR=k1G{05JB*Jru@l-D11_r1ZwM0m^%J_Mm^z zpD7Z(J~t3o;gZ#%Jlxst0#oRC<1l^Xdczl+@QWM^W5{y?=LsVz5J~dE8UWaUu_^pxm z8-Ew-*2FR=<~2D)7NROrhXCXDv{QezQ$b(z2XglirE?18Fi%e}-%8}^9!S#BuE08s zY`u&==e9f?heIVGEK5VqWq*nC)GC$^y|7rX3LS-VliPUE#by0O`F9%Ds#1JFeLRB{ zMZfFfbGQ+T$s83Oo>FNSx(o;Ah27WG@zSBUCs)KDy48wR8pM1DO529sYYBho2_~D> zerff3V>56WZYUJy=brIVfz2wH5;R!kvwP_8`dLZ@X@5~ldn3|7g$2vK$^q5|R@YSz zpL-dymgY7)YrD>;2eyR_ZqC1r^-1XHOL>YWrH>xTUIoFSwE^pmgROV{I2HrsV10-O zkrLZaxr&AxIXI+3`_g__?RS3#Bi>s!sJc7AL-`@#6k~ZinPK#f%J8y0vHFv%y_So( z%VXb&3W&nIsFup)y&p~IJd{e?w% z$da0FrsL{C*^xeep7s9vn9`kq#gBc~RHWvIla9RLCAl%H%ol;;C-2YTc7L94&kk-e zaMjrDGqpab?FMp#c^{?is^SIwvngyKM+lU&3`9NYs!uO6t?K?3OLa+80w|!Ss|oGh zx}KbLd-3IX^pht=ZDN0;3Lo?#TECZeyHxz;VtCva(yI%$b=BTE=w`^rfJFwJxdVF9 zP08gNyJc4+_u+wf>h}|L`S=cW%_QA)T-F_<*|1lfr?NksE~VdpsI1PBer7-xhBZzM z0z!{16)_*9FqWIeTkh4j4$)+XLT}1a2RYxU7NhQb9V{Ah%HMyy>#NA$=5DP;7A}-U z`1B-R1k`gRlO5EX;M!_Omvs154x#IGL$cGA&p|eI{9=OQ+f87@l2}97*ySM%q%6;7 zc;!p_%~pN(_Vc)M+|$m2E#Y3oLB2TQF@NSryFEK)PbEHq>Dj6cl(uM25FASa!;;G-kg9k7i-s&8gyT_L`=(!Os;xfxtHDH8XBa< zq1HP|PfoefVP-san;@;b;^>A}ippz*;&lTy1d@(4WwC$tHZVON8r5HTHo+Y`XKu{} zDTW6l=aMUL_#wP(nYW99U$5PkSX#_tfui3$0*39#roXXAD;E zv>n53O5T55(5J0dAW!?Q-s_{_5H8jswOVic_VFI-XNO89j` z2nUBaVb=Q$JJ0PxsmdYydp95T>oR^Ie>_1jI3SbTIYqH^kT=(BKe4Y4YB_sia>07{ zd}4ol{#*!Ud^(R=Fy4A3oX&82;q`QHPH}thQ0+jTf){ZODP2;;gKA3~Cr~6<_gQ4o zZYWdb2lgt`vh3lsoto5t*5Re@b@o~D4j{Xc6FgUbaH%$r(lfGGeASrbw9KYPjO$M? zm(7V>6VQA9&`U`8^U^~_^h2V3Ixp`h?G=Bg7iuKdxdIulXw^^}Z1&5>RqQIeo+aL< zT}6g(?A5RVqk?94+b)$I6hU7i+n1*HzBUfWL5!}vUO`6dy;RX9aihw6bEm1@^Ztl- zw@CF~Mulq5TU8Xil4(7qT-FB-0<7A+xDV7W5Hqvo>b_4#r&HaB| zwsOf0R@BORq$A2B7LsLBt+%@`W$adozP0bvaHmvGXh25UhvudI#L0C~nvjwms?y2J zE?_?8fn!qfzOQzse*juO)@~aoyj+V^#iSVOwrS{e@m2t?U@kEFQ~kc7^VMRDV59L` zpSm!Sko>~$u&c|VCNe&%6Mb7GE@Xc;jn@srRWw7&%X)VdmCa>Eu@m1m4W(|2%zutT zDX-}I)vdKrfI8JDykEOBHw#a6d>okBcrb&G8U}4(`!h^H#)r;$d`CQfaeR1hNW3ab ziEBIIq|siVB*XTgMHk~#CD$sCU3Z~Y70fOF%|l?7CD&=}z4ZGVm_2ssx*LD$6O^@_ zzs3Q({F2%~x%Hcb|1fV?^1JgPYI!-J6yOmDUEY+W&4#-hbyfx!F}?hXiqdr0LX&&) zGsP-(@?%5`B+WYDvWrdgEMBa&mC#QuBnX8tn0J6M+$ez7%B^6$4qbEKCob`5?odfS zP=ZVdd2=A&r4+r9O z``C`kCIlcyYt!8-03YVH)&Z-i>pojtIizYd9xwG)Krg}hfK2kRyM2FlJBrP#sHTlT ze%NGrBOws{!QVfizKL>E7b=?N2dVhE-%D`sNas=M;xH17+BFW3i@#lkw(|C!Y!!NU z&W3hzdbK@fyCMUi@=wAb*xvrWRkmO-YsSM$9Se0nF;U2SvVJjgx@z1fpWR%_)*WZN zBN)`yK$n*@PUL^Pzrr4AKu1ef#q~tJ>YbWDAi2H6fS8ykI{=Ew9i8{?u6eiP z^S$X;a+IUk@iiy?UeW>;RK80h=gzo;^>p80$#hX`qLr!O?-!iJyDGAj9J??k1I`yx zux_u+?v{I3CO0AKI$pB7+T~a<=vxeM!!vm(Y8U~X8@r!I9M69~w~1FRv=5%#lFZ?n z>fQ95;aNosYLH02DwjcI?%=`F7OWV&*Pj@0jZj(Afg`o*-BZN&7$4PDFi;bHd+F2b zcw2@1(qG=N!_wZb(Jn;QTv(ZoV#D0&WzM*3Af_HbS9&EhN)}CEbg$0Xp=>J{qkEByN8ll|HzLeIPENrwUD4Lu8o2 zo6K@u+s+_3!lBVxxvT8|<_fX*9l_K!h=q)4X2Y`;SxX>b;JQG_*(WoAs6+Ap=!wFRdgXHMfFB zJKeW)iVjNTiV@G-gP`9(Hh9^nL?_*6k^ztkm#u?zH}iY5+>Q}bZQye%mCsM zEfFLJOV$NTY3o7-vh`S1@8pF|-?p2!-1A|t>Gu){t7mX#Tid*un_+{}^atO+S{B0V zdQA~G4%JzHaiz!Rk`P!-tT6V-rO>wP-cJ|h;R1i%A2wh)88i@YpQFQ9+)5=axy2AGM@C)8%tJZ&P`;5F$pzxKMOtQ#IdR>w{JcqAC z?-GBM_;%U%>$7_o$`oFv+`*l;mLyR&m$r6-grwGyY&%o(s%y@-z5>GB4YF&}?z zxqp@kbl2cGUic+XoKu=QX4N7`>2i2TjApTPH)HSb-cfE?mDc+__G@N7s?2H$?*z<5 zZ6ln-EQvmHSH>CRBTfDINlt{My!PKz;A_ z6rHO?y3Kn=nz8*42`fsOkwSp=ne71Cz zU)D1|C^d21HG@Y=ump7<(g?J9v(l^g&>+I>EHEvuT6cK}mL5{+n>-YWLba}kRk+`c zL$P{J&3nze6i*@8(|$3Z$cSKKTMlVaVxWh zBKjgQM|Mq8UWxR0Dr)&wzXd_8gmTVrZf@uKgydo zEOlw0fsL`zUABK(aN4>T(lcq!OIAgTI|@4&sdXl@^gg))Z*SJLh|+GgyBcTSX-jwz zlkB^=?d+$Kx%zbz@j#lF0eBnZR$Pi^DDZXcYC&Y&3J3*EiY$YgA!ysgu;qtXDIe3@ zb%)lr;U3~&R|H=$`DH6-=d;g3SbgHqm#$(Q7(T)dPP~61q-Cfd*P^mp`+;it2|j|> zQu*MAZ*lo>*72!Yu;+ID1e|CE*{H)!l#N{tkZ+8r%!vd!Ig~&B?cAhlRH%ixI*|U< z=}aPWL!v%En_EF<_R+ML2q3J?4-zmwE8?LWpRk&|IvJgWeD2A3YL_{czp=*MonlrFWTXG%O2rnA5lw+AyQ9{zeUBIBdnN{Zz~66o&aqh*tf6E61QQY;N_` zW1k9Q$puJ)9EC|1B>{WaJxB!3ttX@0JxYbjqpnQp%qCe8pKlLA$ID=9?bd&afGuqU6l>nMJFwm1`0}MUeYwIZ zezWYzMij$_Cc)^YcT@B}aLIA|?dl>a%u-bO`?YWm{^=#L4ft-$J)GPPQ{TO~1@b`+ ztB0<&_bJ(P%@OgXetETeT0Act@aWl7d#AS4#izXN^>{0%OLE)z zwK;!XKdM_NAZBtvp;YT7bVKepS6=i8rIGlvW(=y%+`koGU0uZ$6oWw_{kFzu%IP>ADhYsUkp z-Zkx{CdD!Zx*UiL`>^`tRI>Mpi)=2KhCP427T^(cBzG)rp9`Kzi!^}x))ju5`;gR@ z8fygtazlGNLbQ+J6d|vqj zSLh-jklCWjh<6>U&wDYN=dr{Tgt|0@>>%7=v!AxSlr~sab!l18XVO1E)Y1^Nbg_TG znOe;!u)1ns^2x34$bgP*-OGa3`Q78{rB$-J#mXhM*3VUlj0CSHd}RxpSK6$ep;om( zkjwAFe8(4fsUP%>TBPt|FHTdrBk1Nqn4d`P#dQkboK-yK6@V zO{qe^k_#$%?l#yd-8Sn>%?7$vMzVhgMMG+AX$cB$R6Rp&Ak_w5`D0r z%;2n|!;neK=cq0q*l>iSn#e?K7<818Z7o~eXOHXUmVs^Qzh4(>?etAjsg-{#JnAex z5>R=*XH}Edl8cA#0Sa{3uI_MZTuoiHlDI#l2_W%igM>0{)pq8|d%eT^*v1|qE=RZK z?h6}`X~8t_<>GqXBzEK6)7>swx+_tcHD)Gd>dzo z{M}l-V+(jlQH0OwCU?Rd0aHHse2(?w$pKfZNt=-aNL6V3MUY{vd&jlpdNNjBI@EU-`~;<2{O zVL>offAET|yaxjWXwQG{z9kBN!8JCRHVBM=^qBFj0$N8Q;9PE@YhGaGL!+|ta_t!l zplQxN!1#MJlpqlvJ}+K(v#+l=IqZN`b`d6`y2t(6uMZ&wjK!0s{IYsHEho9>m)w>b z80Gt<9IQ((b}wW3tm)@@18O==*E__xJ@xZqE~aQ!lKlt)Z=ioa{2GPg!PtKGMP5fk zk_WHuWnW&0O%rYM{zQRMzKh%={W>n{s|?5;Xp4v4!SUN+J(Tgs6Zn^~j=J(@JB-wP zJk&!xj;9R~-1v;lIf!d22$8i(cn;nsSo{4UClqcM(IzL6eStWP;yfT4&ce{Ks7HiZ zKawjuOZ#|uCyalo57C-o?bB3o^eT+B4S1i=-6BxWL=8m50)$;LIk6No8uIOPamJSJ zrVfsua)=&W_fTKm3V09ec1{dVw=c}@cDMjdIV2b0f>%a$z%KRNvNDnZ((N|yjZfAB zy(p^O{(JyOK)AoyCWjq^2vk3ocX8<>2(?`kkDlwuT3PSxIuW*imyH$)Ho#^%vy(40 z@u7z(u%fQT647X~U!l^$0`tYW9(Lxrl{dOtgQ^*?fRr%vs9KE2t>Fx}p4f7(`r)w3 zlfzR!-n{T2KkGW`vnk`B)YK&Er5rgjQa(ZNV_iT>BPaN`lT--Arebco#z+_4buxcI z(fROJsWn$N@PSW%6s2C7^5mP16i1%Zh}f2xSELB`lqiD6FTwe_^S2V}pP%PNc6v&Q zODQ2ksk&oqhgGoJYKbI{VTa6^6CHc+$XDeX%mT^;jlc^l7#row-&`JN(EMH7-`Ubh zV6<1urRTtNiJLSWFVCH5Uxt#>=_`ry^Yew7Mr3m(l93;ORz+i;cCY#|?TY6%?9R(i zOxT2*PUE0yXIYn_cyE6cc{yBZ~1Af+{9eA)+3VE9Ufwt><-Jjc2z$k z5=qo6Fq1We|CC0!Ut_P-;T>L4y1;OqzzlJ}BfvY`kNq-!*T*KhB!0Plc){DXqGP&n z`BJ7D$UTOCJMz8;>67q9I%}^BZT}Xu)}s!ej$jGn___h|h5D)ih6b#VGgp&Jkik;z z4XZe})|JArq^W$d=f&aE+JvfxAA{sR)NmRoYQNd8;>%K6v(dFShvf=1=;d%hKHf8r z3Hmt^y|FLI?e-`{D0h;h+w*fNs7jDF3W1RSw5fc5iMfvH8`jOz{S<^7JTpLkd_r@~ zJ`p8$jXzy~7TfGrzGPH39+_x*Y8(lCobd22a7L|iIeT0-5@28HoZlFoUzxv7j%4e- zZP#2MfWjiwcBDHWgsY%l9&cBm+xM^27MxR8w$C1IBYxIp2Sn4@=mK&zd{)(FN~Fsu zcN-^vDgdnH554pnC%6kgP_^&gW7J;sI&KQF*+thoqRDa34g8cPCxUrAjx>^xe zceyYX?wqW3qi9C#mIYPlKTvvuF{>7LK_wh_>)GBYXd^;I!}@YghPvr?M6#xY?zKHz zgRy?P=F^6en)WP1*U0RmwzsZrcBa(v(@M>M^v|h1Mr%9Yb$vtb7G&e3i{4O~;Tlza zZjwIt+|a838t#ywe*|YYOs^#-eesNDIV;p=23?+rx7Kr>@l6tdwKgHZtTj0=_ zl0C`c%i+$;ZH;2Tbcvl`4oz}}8W&2V2{2C|EmkkiYy|hJJob>)QhUK5dwS~^Z~N-D zm9nIAK6?5$2+@18w6-g)twne7p)Q$!t$lJpQ>x^auU`uQ(nO6sA>vk$mB^{J*#^f0 zp+I2HUtCw!A#&Q_WvIGV%XepYiDcb@hw|!vMlD>*pff$;pesx8&s*h)dM2@X91PN2 z0t(AQU_-8X*)cpu9$H2z_hbx-|iE&3Vu|J-@F$KGAQPn@z@H-4N&3 zr~Y)@3Vbvc-w*4K@O08!UeBR_M1MZsd%bmSHbKtoEbgil^ z&JYjHmTJ|_wT-RMGd1~xQ@?w;xdvh(U%Iw@v)hlX9Kt@c7D##2YIt_dee>!*VM__& zt%mDqTipF}00Dk~e|d;VHHU|PcTnp=7{XPEY@9x&l-fS8*ri>)K*DN(CsHRQ5vg^? z31Cgh%gn0PM-fCS%ew&M&f=KrsTN3}`dxVOJ{#}et->dhBS8!+;j6$7u~CXTcqg2p zzd3eCA<`>6w#k%yG^be+Uhz8A$m)Lg<+jxcB`8kx6NgZLK#GFOB7ErH=C+RmL_!f> z6wj96p!;poWzDd_MX(>g-1Wx0E_dc73$|x(8jvF9!*9II^+idNEY{PZOp{+U6cP#FiP>jrEEU(NIBqe3;UTUmc8AIJ1R?aFn= z>{J_LNd%uKgpNKIV@&;$bZy#}B;tfd)YK+K2AkjOVxnet&^z^Dl3Z`={kr_X zi4otp7a|8xC~U`1QhDt8Ufz=7y@@Y_>s;mfYz*dqaXT>2M;u2x6jTt@(Rc(>%kOUT zPA4uWJ2tl7WpQ5khH-rirzM1dh8r?oz3`(K97nG7y{^}E#V{v-%eK-1*^qCDi6V&Z zM^P{Z4>lxEC-vz^eu;Xan9u7fTrFj_(wfyndz*e`qH-KwsT*hi2V@GARBPd(23 z)guKn;Vy*<@jYw`uC0-una4TVO>q&Ey|!6@_BREv*d?$j09WS1M&z2+^S$38@#h?@ ziN(v^?Y&&WpMr8@3)l5?JezD!wV%yepW??^U91hhQ{Gh-wvaPALrs&}dcl1)gp$5$ z)g9S^6l$#|6_7VB^8kCjteYiRDMR_uqW7x57d6TP!Z_oP(pt~smcC{o9V|gV51uc7 z=0R{ow(Gp6407{goZn(x-Gykp5B6F|iSmOa`{Dhr>E%+y)jm3vliAUJzMR*NT=K$; zkoVoA(4jQ5m5I~vWZx-j1d6wG+ky8WWQ)-lRx;8%VyJ;%o<(Q9BS+k7QLvyq>pt?pE z!rD2iOyUnd5`4~utz1WMQ4*vc9d=CKp5^zSGKU%`Jy4N-l zv)*i&$Y1u?rW(}cE2A^Agm|^9Jf92XEMQ@EvHYK}rejgB=@IKGwR^2yzk9`A952@Q z6FAN74#sgieYktR9bl1 zO>!@(sMZX4W05%2=b9xtwN%%Gl~)(SMbP@SUT>bRwyQ!YVCeM8Ihgi&@vyn}P);Fh zdc=$V=vV>{T+OTuvg+tHR{XxSyK-7CA6&jVgHkAWTc_A~Tcm3Padta@U$^PIs1g6t zOo92-%{AMmjyPZ*+hp5b0yR$FjxkZnpXO1!{c@GijU>v&XY(P}x1JL^bvqE3mqBij z96eMY$eKpVB|xCR*%VIlEK!hjU)^A!tv|*YzOk~&Tw!_KOu1f@m9RG7l-&t(|2ke0 zcj+iT2oMylFsK+b;cAV4Ps;(Z=fGarTm9Nzxq$Y2{YlG9!W*~MEhwTe5pR40ht@|Q zIO!9}*z4$@Gsj`ix|bL3(zX+6{VMwn4R_skdSRx^K5#`GSLfA!Axd&n?sIti&$kz| zb)IiXO2j=m(1LC&kX3Qw=d$hKiySWGzNBf9hn+HDQUZTAgY$WRrHSixlN-l7b#4ZF zWmD+NQF<;{TlN6u*TKXE-vQyXUS`s*%HS$Z+bAt+OW(PYby*`(|2TzuZcPvES(#!S zWz_w(i{6WeTuv3zHLUfDdAza1-#j!y>Dqd+gT(*ATLV`@ZwKqaY^}xSbO&Vg zZZ+S5KCB)VYG!SJOPh%eTeU-7M_lae-E-49INk8tTD^i?b?@{f3+8Gpc)307-rjiG z->;clHOmh+ZKRNv&MpDPy4u8UZ$lq_NeNzEjfZ2neM%2+V#o->*XMB009Jlw#^-1t zg5s*%qkbPRvRvFP4L;Ec*3skm2--GhlH$A=pkXJ6#rXk$iAzROuImfrwf(#QaH^G{ zj9pB`cXEoT*4+fJ)+NADYuU$ry*|7!EjEsyK7`k9t1kvPsma9xHE8c)El&^k6S?j4 z@M{ZO$v-bZuC$1t@tgQ=49v$ACF0W=(S=AUJ@@!?TE{!3&|=Gc4WAu0 zA4i)jj{tkViB@IICPa8Bko}cU8rzyx0{(ShXO1gM(>gsQcgPSS!u={eb?NVf(CD$D z455n`UmD_}WUIDA0@}ifONTGvfI+-dKMHk-B4Y$H|0AlFT4anY|lq09beJARQyu-Yq`N8?h0Y4M6pMqHqsO>6$W*(A*n>H<7Io z1rCrxE!*dt!H%GphYlj$)v&#ugk$+we z-;0Leu7zHHlHOh2)U({gv7<1F>L;nZZkNP=ng;cLumywYbiqjzWM0-Q&PGHE%PJL< zp48nB;3R5zlS$U{a>?KB8TS4zT1@dZiA{-q()8)Zq)^;{Q2>IK(*;XD!XFFsdRj6Y z{hDIw?BPV|nu@5dRlGySI58yj82}(XGf+|bl>U;z>ZYxO-P1I3UHz;xe!)&zzm) z%TPzbCfyeA?6D;SeD{U|BtP*B$!kG{yXmu69d)F&uTuDUa1| zAr~f_VX}Jx$9t;d3(52=T#HIvhbi~Y^2=*T{}{;7;Jizx1j00n0B}$j6+PQonpw^X z5fbFr^ZK+@P6|~^KpyR^{T$BM*7Zk1`6E*BeA5-<4kV_P$waov0ts+E7nF5i^ z__s>gc~sLu!uX9m1+$NxICrthDc>LJDX;cb0>$SO0)b*?LzK<9tWPL^=G+J#73WXG zc!!9$FFj0=D}lZxjs%|zDnCnm8FxThl=SM>TgJgiljV3rVMx3QT%FlML0Xki+;MO! zF+6LmR2|~PQ;_@i(bBc3PNa142;EH|<&kage6-VMA7K+;W>r7X%*BF(%pG@x5ya;vSr|%<}eh6@7M=xx- zLwIK`xguUGlqvNpDK6-CIGz>)p`myZ9*0YFj4r`lTo<>_0+$niX2p^}G+3i<$U;|9 zpz9!30-m_v>^$Y1-BZ3l#3)!2s$B`4jN9%E$H(Wml_OT;c>oIYNPcWND`kw^eah-R zN-^mGdm;JICOc(th7yVDuW-uS8v+H)I?ENIeSJ2A-PoO4^hHnVx7fWziprvL+oAU?u37hX zsLn?|PaQ94kb%Xvwc~c_+O`k(;NE7R*mzm-P7rjolo9-_mga<3GU;k^%PhXR}{hQCIm z>Z8brSFgK&W>Y@E_?fKZPGBkp#Ddp_c57vi!XRaHI)C=KIqPWI^dLdUHWeGBT^G7j zdwef)5hdS?s}bF;qN)w{RrseP9E1<{v<+VIHpG6Yp{YgP>^;Gw%PKP==IyCq@9#(5 zm5?e0j5AYH12O4lWCAnwdif zEZHuR9vhRoS9+~?c3D%DC#e&0T%H3;7MdH^q*mD2N6_4n`Jt|+vuM8^g$2lxe`wBo zyyHPCN7#ZjKW)5YO-x$OpUx=nTth6?E5vgwM8`h6gbu5SR#Eio0z4`KMYe4Xk@<0z zSID1#x}hu?(ENRA#P)XS(l?}n9ix-s&e(qyyNQZ?he%rqgjUjS+O@yABucUm>IY?0 z05eWfn=khe{J7mf;6G}clA)VkM}cX!-x=>d<-6!=9O~$rv%%)wY}N`EtV+{2&jzxk zbe-QS{;|AUp+AaJ)j#%LnFHD7dpGa2YNXSDxlA!3XXUQMDW60l+q} zpQUtq3(Fn2M*O?gHdU4u^qb@tJbX$x(xk$=3du$c_r zoOb$A5ii9tj^+58q5xn+Ubg*FLUnsoIJL1Uk>rVr>*rC;gM;KYL3|5ncZj>rBh2H2 z!vM|SOL29jUVRwtql)_wC*+(smGIzy9?dEGM66h-{-%1#-Vvjz@lZxlxyLzVG4e}+ zUl86cYFSiVZ%*8)9JcF~ysBJ((LWyJV$&s1s{!2(%%8jL7{?=|801H1SZ*PlTwQgt zosnK3a2=doN9CQ~SRj`Ctlx{I?cC<3C{_XC3Q3^ z&2D;s((rm!B{+in4M|Ak>Oey`;f(Oas(YP_a_K$WN32jpt<}LyMyT*FWQGB?+M(_Lb!sU}r2 zpUetT#Ytk2CRm|IK)*cR%t}lD#8Y4W3s8Oj?ca56>_56TJBUU7=-Py^4F5ZcgEifW zo{8ef^Pfd7ze=B}?Y{K!{3WpC^Pm|uLviQ-Be&efE)_f*Dfx96nd;S9YIyqB-P&qUI(fm~` z$J#N6%8o0RY`lLf-*5g>3`KpxKQ>6QpNZ{I`TEKle*XupY~9*_W!F;Zq38v$%lsvn zG4V%@i0u=}j@Pw5<8_#sl?b`G`FP&K1L}MU?(^rh--iL|Ajr?j#h5B$thcuW+Ix}~ zd$$&ej!j+OODTFn%4dvSuSLaA0lIicK+@ghlhsNpgRw#pwDBH4C9&bfV0XzbBLhb~ z(cFvJewalU-n_(rzrl~YYgfYn3640mFzMSYA-L{A!KNaVUQ2li>4&GE3~s=Wikh=sMb0oCw zTLyL+Av|w?I%ION_Xs#%SR3wPS-sdP%+~O)*)8@5AS(xQx_&3?ZMq38!S{mw>Xsev zrM){BvWvC8a_{E~p|fSgfeDNq3GXyMYB&2QPal$4vzmf{o~`R+;TeGRYlD(Z+?74>zWNV|v}Xr}jy4@(0UyzGHOGqCT&r z_tD&JH{OC`2k~)-*E-dRlo{K0WDPN@ke{%CR*A3T!btDYpzQ6L63hYOb=<#$7hLSVv8d`H#JPlo~s4J@LB1pYhsIaa*I}#a~|`+a|2!eSh==;`$&!qYc7^bo$)ht>?4_ ze>g=B*soh7b;5GL6YpzddZsV&M8W|pmhXWIW_aPz)0vEY_X|0zhc?~GQ5FQEQ(F*! zQ87)Ohc>EpcR)JTIRjOhJQ5>saF<`rp!$$AaOd0XT0aKE~AN zvWOa9IIR$MIOpEtVt|I@+zU+e`{PHTCy&~Q2_24?kQq;E<`K~#?xqGjF!*09l?OGCz#c_CR*Q%&c0-#cK015}=Iy(=} zr*l|S!S&*1h!!tUS>h|%A=}?qmv_tGqGGz(H)pAD8|746)J{0B!5HI>R$ZQbR1aGB zhnz9eQbq>$WlubnVn4y355uSyoWVVcW-EPOV*6@O-t(~Xp5bv}8@Ua&W{Y}%=BS3v z6M?o9eb?31yW3HVN+j3%VTnv2UOOS|7Il~87GoQ$AuHK-tu3+}A3==72i5y%Oe(JVA}Hi&Ad%ruG+ylfAwsFA{}2y8a4K z*7lZn=YV8BvUqrW>IJ9ArEandmMd0%aRvO$FAU7lFu zLM`ih=3~T6Ra3Wa(OjE{{9a>cBT>YTU(?3v#RrQ+>#)}s(pl>R^NuCesj0WN*ys;? zgSF{W2i2WuZc{k!?om~soYY(d+}2BpwYXb~IZet4ymd>kb||_tR_FBw#W|?yUQnPj4H@8 zRN@i2-B-;%uY;`FQDopu0jO6sGXC|l+a^ddO1EC9s1mlw69hAOkvN#SljP;ai_GE5 zNoTHv_7x%=WDo>0L2F{Q1J6L>YG`()k&)Uc$$xYx6+vlhT)0(#WFDC&-I?6oGn!{E zOefwS#Pf~@8hxXvZ^J>Evv@%{bd>6=2T$yx+wkr_LG_O7@!C4?W#Ir;0^qb_R>2zE z4TXN4%FT9L)%6D@M&VdSNCz;}@tmYvEn@z>M-4sl(`*~RNF!ukK9Pc^FWN7 z^>x2SVJtI(zyvWvn!NzWu%e4zl#!>Cyigk&B(f+L=F_`>Q?5TyBy5%R6|dX=0nMj~G+~Eb^U6JxhOdKM#{JX>Q}exX3^9cFl>DPG}cd zMP4X$-(HndBDI9lU!FsksxK>-!O1%Ba{-S1;$2U%B8po=_jS2T9n=`#dv6@y-Cdcu zG_b6(ql)W)CM9#`=-yF8yjZ#UyEFmLJs|spUF4_bB9p=xS_bq51?~oN$@%lpe?}Eb zZtJv%kj@E-GSB*Sq?*z65T+$Yc{z!LVfvqU{&+pyqjP*ELS~VYBkzm?MUAWBvgFS9 zdd_d_DBsYTn};2J>4oHU8^go4%~f3MeuV*QHe*wx~}d=@qr!*P2IBc!1U;xXTEPDads zz1oe{`mPU5d+6L^O+nU2wjYk~m*1~;$&=Zk>{@H=!twl}3nyTEMLy5QtIGFM)j}h3 zv3Z0iO6^JI@j6W#=Ot&t^-w>SGKExt`)!5D+n`66$HmAveiAd6i7@S#rkyJvbN_NtZ?(G3QVo%hkDI6~Hs}!S*I6GS}{ve(Flt2SP$z13+ zViS4^;JgVQZlIBAX9BhB+_ubr<>~G-k!WkrI$JbE>?N=)8hvOpSyvL@Qf|$3TTyG` zZl0f?*FswLExFi>1;;L~(LE#sY_C=mxxdQ?>J~T~2#-8gDy*{3&MxvGjc?t)QtoDn z(mlIumJfSz)f<#|el)7iOuD(SyYAz8@s6AqTMP%}_!+4d+ z>&DB}XE%&n%b-I;iXbpi*149w7XD##eKHf5Z_G3%Lt^E|x_Pf#gA z^xXp^KXAR5d{1RpPQIEqSdOdppSNm~iMipFS1Z7^P-h0Ju7?GrhQfOCvj6p#M+(vNpd?>AY02ssT9Y`StM!baS{ zzN+5SI}uc}MiG>;Q22(p#BH@*GjI^s-hIC~NVH)dC^gk~%Nf88|3G2NaZ|!Ncqm1& zu^fosdNg!rmPct(j7plA4#&UhL%M*b9%_K^gxiz(6}m#rm`5kKk4Qths|~H!_5&N; z#t=8yz>BBHVOv9gtl+eVX6Va#_rvje>bD=Oe3sXXgmU2z@o)t43@yACMDe4{z>{+y&S`<5 z*!jkHOY?GhdneTxT)1izwOn0PjWPttA13@$-a-YlY|ANsS0*4>43X ztJeY3zwu$Nt1=$+2Ia`L_B8V;x}lp2?U^T}yo^Yu!0L5bY+M9l5DaaUFqyVj$IZc$ zju&_P5&2|)VZ~<~@@*NcX>4~GN}~M9ekIqP=w!`(*zK1$)>!uTY8$&-A*War(Jtuh zaejHc#S!;GWG-j%7GAy9*=XsuTHbCGv!=tfj_kg$&JZ&58YWvaOf9nA8yZwY4~+Nb zP3PfS2v$CGuFd*jGcp{fB#pgkW?U|Xv{~2AKKtB%G^lHA;zAgoJ6E9m&U%%RLCD%} zy$Q+vF0jddYad&tzn}AfypVagby!LUxr!nEnLna99!G1z zLayoglt=Q-F){B)tM|G*?iO$Av9Ceqj&t&brX1FG2Cey4dI4v z*LaVAyQ*Bh>ej{g#S|>zXj>MD4a#{6igOk;5EjxEA0^@BM+s0GD59ICeY;JQrc!#H$*Vpaf8_6}WQM1sC^qjfN{63Wpi6#u-Y zJAat4m0K*VQ{TQF)Gb5v0lKIcxib;a^jqro)TfWBYP%OwG7et6^A|&!j>N(tBW#vE zpGgky4RR6=z3&vUFZcw#{~L0<(NFUApW*3Z=8J^%11sO2Ly3{ zlQ_TFS$ab;P57X2U^2oNBes_VJ{(E|xo0RjX_=Cst&0}KXrWMXJl3~;qMqf$d!~oW zrCc_dyC3QIDxh)%+t3#S(M=vE2feTBR87@+wHu}Sa%mzrOfLqVLpQi&7FI)l+g`ey zs^`|V*I*sV&GA0vAQx*NO%Elyy5R-N^$oxdN=y*NBHc)29=WD7r?{bU_k5x`~t)(-xDV|e(b)Sr{kngmo z=_!r35Tfndsf^Zf`gXnNeL7En57ez%j1)`&#Ee5t*r=Xz59Xm-80&{N#qQOY_k3hu zO|6+oW<)2`d}(7;97ej~vVTAbt9Z?3{w%BG(|bh2kh;g<2Yq;Gu<>?;tq-I$as!wv ztS{a}pHV)9BrwD8F;=-(JKKcvQZQLKo;Ic5x&nHQsKEWiJ+E%5yVkRRmr~_Y*6-|q z%e~`{Z#6SjRx3JJr?xDlOlD0aIEq82bsk`}>wS9V#>5^^V}D}u$;h*_?4*q$)ZxlK zFF)vc|JtVS_2Ff=vwh%g#ibG-oZV=(mz`B~r=Rxxj1om%1Y^FLJyzr0I8w57w4n~q z_xdEH$>qIx)<|ugtFT*tDMg)eh<|ZzdK7hm(0zYZSWglQ@$Q>fTYl`bMY*jubH-`j z!TK;;aGh}`ZjpYpQ$i zl@{B1R5^3ay@_k?DqO44s98s*5xyNUcy9Y}r-OEU15aX%CdGVzV=T_w7xY6`JA_ zvB7Hm8XULg&RWKQ#<=z=Xy3d3^|!ZK$9uz*fCG@O>w9bdvceu};kNlc zw$*$I#UkTGLovuoTX%AHF#h4CP%;)nKs|mTZw)@ju#BIA1dSPBb7WO>+FG;$pcK~E zwYR&AL#MiL*%Z=&cKZ@R(6@SJ+f^AzqO6(3wQmHCt@i zXj@$T)OlH_7yP+L=XAPw*QZ{u1pG{Dny#r*;+WOtd92sg%!wXQaP=;;K9Zd=QvJo4 z^X3=Z$)6a1_S*OAc?QWf`4Av~EPdm`fDU<_VL}y&g_dQzvF^NEX_e6{*ET#6M286)s)CYuH=)QkBl&L;FVC+{AKSJ>t! zJEG!$6!UW3Szc|b&CW<)DpZlRLb&JC)veqrCkS<(!2%cC`W?@$g|U}UO_DG#i^p9$ z7e&MA*VC(@J-HkE?DpHGYHU+K#oRX4os?dhn{KjsmCgQ0(Yba}obz3BJQ+>J@a5;c z45g{Wwcf#fvr{qEQxW%xrpA0$b6-D--G()PEiRm_cy67{;5;$E)uCnss)#DRyz}De ziQdWBY-Y4B9Os!1%rTGQmh~uaTYH(l`qNf`!DE^{OW|o0(~-iie4lV+dgeHNUDu~o z(&?MjERq=%2WutVA#t8Co9SuC7{uXl?$-X?$l$XY%vx#9-rZjgqrAo zbe=()i?*5(y}1B>dj&*>6B6DlE7*;;mW-j|HK!tJnNFjO&m~A2$P~5iXIGUItR2K) zCfCPG9vkAw!I-#qa(52;aZ!GhA=4F()=*v%TmQLZ=Aa!w!M)S!s#u`}oE7zKI@VSG zK2G?~r~n0?TqBFu*4#A`#GcuugYj#BdJq7TZh@V(>+^2eyJdaowsXHO=&%!C?nXRP z>5ExDH##G$J%5^t#>%j4V_`yg9J6eXLR`Uw=D3-zEM<;yZPj))F;wF{Mtyp(t07HL z?I!f3k#I#km4mGA(}aHcJZBWeLNTFGSXI6H&YRa^Ul}$uoN%YTHuptW`D0&yti;i= zq|Ts)xyUi?=!m;4%dKD&rd048rD<~I{Mv`Zsz*fDlv@R4Mxg~Q^r6vek03MaF>Z*k z71gsH7z)kdx(voJlYFd<%e(2Kaz010aRPZ;C-3 zyhf_8T1-NeU`KWHdWESN&@}Xa`C&VUOwRj`nF5;S)(a(FhTG=?4IFjJ92$)^?b#W^ zLFeTq&Ag-WP~rrL%zc3eOzY-eV40o6ur&pX`*wWtLkU8Jz7`j+xViE-;k9~=LhpIG z+mjK)xH)DCz;t}($svem>5e|1*1-}ZfBS~cm=%WO_8KOCrOh5pc=mCBZJOz|c~s{H zb@SEi;j3_%uB6)|&leB#c>?}frggqrhrwzc#b7V5x$|i>h3D&5xY8Ro@#eUrT`9V~ zN8wG^LqlY&K8ixmda0UKoVFXsWHMb1R*PvHQwkuV=kH>ZLa#t=$?g- zy@mp0*Pm_ut|Z<3#;ySgGoDS2T0!U&$vM9e@N7^3J9ehclis{b&|>FO=^E;;7(Cycgnb#bANDI|!p-+vZgfm8ebI-fc(Xwutz;pRz znM#@bJnB<(eT5DG^>NG>ak)Jo?y^<+!Rv_5@n{xNt7pQf?AbZKqX+VxC_F=WSWMLFv;wzYbtHt8i!`p&-R+Q zks#xFb@FzW?CINdmNsm!pn%_mQ;hjfr=6jdZ$nu>R`ks|x)d$DKIQett+0x>M>zaI z3v)To#lonk})% zx+|D?kIyD_01#4-hgfX0K3FJ+t#^Bv^{PFWaHkn{W2g}J;q;)N&&-Kg+Ql+pL#~vS z)dL15YO@7@J<~l1tA%30xv)**P2)g=WoG8BXso+DA65H&lf7-I`t;h;GGyzi*@5G} zRpAASwj3`d3|Ra2xLnb}1VSkrWw*|i2qqPJMx*tm9M2o8+QhALOeg8HOBm>mOljJ) zz__*pVZY&*Tk%}K-moP(7+~V*b}cQI(**-LpTjMGC)5ZZ(r@kapscfSg@r%Q#oK<_ zDk2nxUk_TzuJ{XedH9cP-YZ}P z2UO9o-D~)SW3ee0iwSmAMFtdT-u0(c+$l zS1Rhv8NeiD8t%laW{iA1rzb{jTWRFI&evqKpG!!N{F<(=g7~(lz9^;6y=`FjG;tn7ba7LE zKfzF?Tf5W?u09MrznC*-Z*4J|Kr20XF&uQ#&9!VV@RchzfD8eqS4&?HE*EyaKaIva zi++n|Y>A6vX!|>xID5FDByhMnx7`Utb+DqQz|~#M9n5E&=S_cpTX!3EIeAY-AJ*lT>MM0w6U(86ZM9Ff4VETi438EQctcKRD4v7WvxnCb0GRJjzE*Q zAFG$+GQObfMvnvZ$ouv4dRaW-B{j<-r?ltikM)p%VA5P0$ zy+ee{x%h@-D^|Veq5b6?_`Oy-90+(jH-UjWGc(x8+l(#8bY7=(I97XQ5lxkU<%HVl z+Vd2y_R2lo3%AScG2ZMHh?(q>IU296|GuA>%Q+I*JtgQz{Wb;hskqJIz;J)0hN-)6&O4P5rtKMr0@G8+MGuT z5&xx6lifLdWAdFlQy80}gU5PyenL@Z+PQ1YVL?AMvO36l7EdZl5MImo{(Q50`l3Pq z{H^-a+&okpVfQ?zJ=_Bqe%WQupw;BMcmrzM>VTnyzSqysh@s!J?dWph+8G1eFKZl? ztJ@x0F?!BkQmAIh#^bDi@9tp?5!0yY#d#5o?nlEO&#c+wN}JGMAAwTj<^42}LVYj4 zSJu^;f{4Qhkm@>4OP9VwI?xl~dbAjx^HY9j~u2r-TP)%QBp3Zo-0-6BbYMFc&)8di#W8VqwZmU0Go&E3^jS(VPcd9 zHH>j!g?Wx{C%bfdi{c+T_d;A2&2}sAX+ga49;b9J4fjA34d1+;ELNOIGSGh3WeYY? zTw8AEYx9*%AU}(+ZK&YC@$)`%-_lz}!I_U5cq7cgXxP4tM;x2Q>~+81G-HhnQ`iWn z<+C>2@UUtpm#3P4Q`c8C>Yt;Tc*f&7PbB+-S^yQC*1xqCE4A)p2F)JNIzDF5KT4zb z%q?YDor(MLrM`;Wtr6}GS^@U8*zX>(n_mL->DH%(emN{B>h@+QuPTF*{*1mCY6e;S zbyr|;Z>IF?EV+9pTE`$qxs^`aI)C#%zpiU*>7hmrFSP+!9#)&7U1; z<5um^Cu1UipO=0bQ+14%f@942!$A*hK5>dF0u*ubAMak?P8rJxM@MR_Cbzhj`lfj~ zkyMBgT{ERw?N%C7Obee^VW=t050?hx;KEsKR@8iJ^`Wyf=d)IdEkM;99;M2?P2Ri7 z^Z6L_IY<##@$5MF8`V1g!$h^$zTAg-LA!grXYJ~LHSL>iI-xc6 z7wtz*8S)I!>{zDrU=g>)wm+@56@53E$6*AASa@+CB-rMKWoe&jZzV6GrcLMTJ)b}C zm$@fp&n+P4$(lVXpmj6A*Gqer!@4d>IwrC$;mQkA`|690ws5pomP9t6%*IE0=htyo z2LSkgHz^n`&D&ki)eG{fLqo0Z>P2~nvy76{bv4H*_^as(GEbAvln{eq%G*4b`4`%_z&w%7celC3L+n^7 z@?JQ#IixrD@eE#)*8L#E3g4JqdjDzM-Vf-1i57b*SuNR>*9gl;H;R@0rLGS_Q%xs* z1J1r17RrAA+V|n3k=GWMexqh^cy{9_mckNUp{uRo`ye_>%nv|z<-QSM=Q@jG4la6P9 z77ku7%iByq!#GUSyu^I=T~~T!A}hd6%5tn_J0-XD$SmqNxE&zUM@zGY3bKAm7c2H$ zUSxr8<|A=aloK5+fO}J2(@XV?B;LOyiKp+V=6zlk30pu|koL1gs*HC_kJyw07CSM5 z3I>@x8Vu3oLBEvC%FQCVcMRR`$Jp z-OD`2c*$n+V%r5jW1n!6$(_sDIrJmx6=yf7$Qny!x_Ro8e97d#$lSgAYm;4_`J$le z`u#pjJN`CHXKHzBS;!GQP{GLY>uN61ifxo=v0BWbp#?9CDYt?vt+^HT@{w#*Du`0! z*=~D!)MA>fjM(vJdzWl^KV4gY;cEsL%rl0y_a_gMD7z!%Gdhj70l+m+6TroOw5E^L zc(FVD+XMT-zt ziEgyqLTYw@fD-IGItqTZx^2Mzg}n6)~ye69VWm0KPB4!AOIZ=sMr?76X7-rnQITELL7 zMWPcNWY+I}VYtfZF*_utyN1`^{nd{bY<5l$sU_QIP~D>$W_1DS37Wj0f7L~h8Lwt^B&2h`81stwaUJO)= zYK!&yw3FIdcB0Rvc@}RMzHj6BVe0vbFW~DG1eg@bWnV0UdpGy&o}#1YQ)q(k^`&`PQ1J> ztut8eFur1i>tj|dV<(*|b9t3I6L|}@N_V0hE(+JfA)8;G!Y5Q-QMn%>4SlfAihtzh zLErX=Jzt17>wz3OPvf3iT-R4cxY)Pu*>(FSu|>FXWYd{_JkQdie*zg|A?;(>zZ|_6 zXPrjr8L1f;h;x&FqgJnkCulw6!)>xZEu!_?nrNQ2G#{(WY<1o4#zM1Dq6m!)Y3(!8 z?1?EV4dhtJRl|>9eBJS^4A0bbbs5#@2?w`B!J`B+Qg0$gdIc%t@R1!3do_J2Qg*3a zgAq$G6<~yFuB!Hpe+thdhBBJU8lcXB z!dHSRfcPyfU60{Y35iVmeu!lnaP~`=%&up)-5E6bF7>HO#8H z8R{86&aQP+e+_k2^Xz*BEJ@Xr=jHGc{T^>5yXD1%!Ax<>o`U!MU{`Z44C7GSj5|j= z>9Gb4F^J*l0*v5l?i5jnt&rV=?6|!LT?>synmfKFXjnlc047F@mr65Ry%|p-*f*Ir zn1qBY^fj%TvG6!9xH)AoPh$D%9$v$a z%C~Vgxdo5MB~Ldn1YT1wC?syGx4Y%zGjCkLO}cu=SjdmuOpiB9I-RmPq{y$M`?ijr zanFumf4I%qbH7yFyVH#gUw?Nep*e3GccNMBLVwUx`wEWO#!?m|0pcsGWc#3cf3nkG zu>(2kN@4Hyi(NcQW1ahbSyHFtb9XzR`%wi#_wt&)nwO&8wU^N)!VVU}E7^jFHh4QT zk1?AaGG$7U zxn<{zbj>wIdqR@~ql1eRB|$S%5t2ZF0)er+PG@_4epMzn_$)E(BA4#tuBxxto>t8; zkNdjk~XIR~{kYhgc_W8(+qxOm=P0HXhGYfRIpiW!{=UA| zmbOygPly0=2W`UdAf29x&=ceY>2clXqc?*c6pkDk@r9 z1vy#5?8{m&M3J87ixuWIQm89I6?sN`M{kst;{?WYJB$IKL#SAf$&Hb2ieth@aKg<; zMt-@&f@G7d9C0PJn^CnBU}9{am>oPACXA*(^{Do%M+VX^R1x-%U_85c$LzsykA>C0 zR!aP479)7OjbXNzSrA`L6dGaDW4AS;wLrh@U|Q^nR8iM@d8}6F%u%{y-Uq{KMb^_Mz84A z?p7zt)4}5OcZ;T|#2{r z3siC_~uWy%!p020;aUE**$CJ=&1=t#nrG?zS1MwPYfU982pN zZF>_iKM&JRS(zTRX_yy)wMvk0mda`~zJgwFk1>?m?C+zq;MlX{=yhaPhOk;Qbi*$M z>$Vhl%gKd#WBVk@EmwKx68Sw z%bmgrD_6jOQZ^;Mm)OfY+^(Zpj+kjjHkCF(6DSs)1#B2z|k^tF}eoLhE)b8 zSRP(h-l(+KP|2EHXM5WhD0t?dAPw4irJX~s5oO~<^!5(_VD57*hM~?b(ykDTV(V=k z%w|>_Eq%8sf2YukQ8^wyCr8aVYKK#CaN+KUd0c>o?PP4Bv|XNx^>HKRFDNQkp1Njs z9QAU>dT<`6kRnyR1+<`q*R|Pl8N?WN+nF)O$lC<+GMl77<7U5BFivh=>0~`0%Ns0v z!U!Gf(QO5OKKz4EQ;a$9C;)%{ym$QjO4jA1dmSKFf4CcOW`_d{0J+X){@Qe*Sg3ME z?mhUW_?#@}im0mf@)B^BA>THqX<>2x(O#F38flu+)cOR|fY6|KY41G^H&yR9dV1J~ zd^E|9b9qUliFT}~5YM%ye8nARVV~M}0>@h9|Ibu2l7MSNCHl z5AX8;&b`OFl|smW9`W>;n|RymEv~^>Jyb9$eBm5O>2%VJ{AUDs zpWfab<-#e@$zMlT{E8*|1W=#hSSN!U;!8Pu6q+2%`F4kcO?S<%xX#e0Sb$P*p&2)q zoDF97-mo#}=b&pGkHT^PxokDt%_rM8q=$DQ>QxQe28~a{6=@QP{qv3q z%BhpBAAO$N`&+R{W{??ODY+77tBt9QDsyMTi*&x7SNgb%qV;ifeNoUU4l3aj!T7ySMk)LoBzvA21sNFNvu&o+chEvW2eh&OR^jQqFo| zuIlZJXVr<)_xl6=4uws_!Us{@yrCa4UWw@w-fC5EoZIZ?YS(wk9AO_Nlw_tKe>wv! zCB&ry7wX-s=P@sG;T+Q*Gw zwWgcBpMpLf}`65sH2PlO9d1&b9)${>sEqq?Xg zu?uS{s-upJQ|auk!QC*VsfrDkyRs(6bEpf*TXQFL*IK?|wmFB|=fX_nynDg|J;fyd zK`Fw+C0}B&L_8k6@k!Lk2-pk|6QrWV99if=P3nBXtCm`dMT}VxfAxl50Io=N zZk7|@(>0v}_5dY`V{?w)`-e6YFU&hT>5^I>-oBBzQWN-p!=FSjqTDG-17Z=8qYyG&0m}+Q$B)~rE4@nD+4XJdPqy5w ze8~Q>=K0BXHlEKzcLW7Pe`B1$J+R)ylKGtNMfiVBRT|2jGu>E&%n?3I{kpzI>T5Sy zY~%BE5t&VhN#zIEob?V(H0?Bk8I4Z)5A`1D&aB@++{P@{x7ChI&%x;-JY0PNJ@BX& zn|Mch1P<)8Vc{4RBYcCO>uYGRL@DTi4m@8Q`0j(4$sj;MIeYXte?NpO3jNf|qWu;b zFO(ZPG>2M{3Z}O)t9xkdU?n(udbS`2!(|vCJe~_#FOJ9YX}o%!IUfYD0{b2A-0fB% z>lIJAfFhk$^+2lj$LU`(#sO~aD|x2 zipHk~6_xaeXT=Raf5TOn#GDvlKX~WbQ)Lb_4Bk5%`{egUVm*m#7{i;3V~Q$qHoe|w zF{T?=LArqfOCp_cue~bKqiu}uu<%&-XXn!j-q*)WFGq_d59X4aw3)?Z>?!H;_z<&h za5fYFlCp3yF;geSi%pdas+eN+yy<9_HRThWM$>Gh=ZV%Fe~h`(FYr=YO5@p4lRS@0 z;V_#RP%=FqZzuhc(3x?u&BrykNbsy!RL&6>XPZ(=2JoE?LCyl}*}b9HT> zub7U11ls&)P=QoKgt=Q=%@*MEQ`zySRCt_fs+|^Vf5ULvc45WviSgUIhpfybSYF!s zeZ4_QpVk79Sc9KP46Fin>grSGB z_X2r@xM3S1&8iu7u9aw=qSaA4m%VIrXLwC}i;6z9VLRVd+9=BphHS!}{j{E!_%UuU zp=q^V?e!gR8RkYq(GLG*5h~#j0lMXZI|sL6P_ZyoNa4szMSqT+&YjxE>v^9O=3+6)r75Rckj;aTbVA8C#~MMu9F-* z_ju9UyESh{)72ELvum|qB#WZCiCb$i1KC6aQND^HMbYqXatUYh!G0yjD-V-dOw9Ll zKDCsCMbTVEgW6(gJi3v6(%CB1Xe?LEf1!_osG%SBkN5=cy{LWZyGy4EhbZwo3x*3MW$k^d{^5?yoK_d@`-Cf>NJF)AiE^{u7Fwh9af5cUAy+DDa zWeiDOQ^zi&w1x{X`T4xSZ`ixU$!T%iF>Gj!uD1^USBw#|L7z+fSAoz~N9sUiogEK8i3*AK6t7pP@klW^L)F|O#L_w z(C@rjJ%|YOgWH-NMEadQ!~2+L^;eolEhO>YNIu@zGk|&18=i>NGMzwzemS2&*LeYs zp8HyGFxGqD8O#G#@G(kzQC{7T<|W~kITobU?vY(W&t~*qUi1BRf6+hly|nSxg~X4y z5%g<9<572$Bmy|4mgPCAl#hvZV_`N_uym?71Jzg#E^^=iwSSw& zT9iY({CtM6O+&$JO_-c7Eca}Tow#9U&bd-6THP(KGD2j*^#9O68&c1T$k7n9g03X&XhiJl9W~Qh3{e;m*;=zMRI_Eie_N+?sxsqOeMlV2v6M?-+Ls%v^ea#!rc;oM)+_U6Fv8|O63W?_g6lh^ zu@^WrtC33Ur{nf?em}4JtA_z|iYbOWWp4A^Jvm1>xsbt6`qz7WpXRF)p3sQP*MhN@ zT*3m~?%f0mBQg~n*D{>!cH4>qmtX3H(vdXQj&Jtbf7~>NmyeETa7N(cC7@1+A||hJ zwSQZ=%&zmY`EsPLe5SdMpVD#Rdd4Ko*kbxnp8-I_8rO%$J15~>(qpQOfb5>V)eNOUa zhZTG`f2;WOHrq_4hkRJ<6C7M>$#e1El| z<+fb#$Y#(cB0o=qP8C<_vX*VnxV>D|Wv$rd=1i?wsJ^DTDJZR(zqv-`E^NXo`WCY- zvE;MOsoGw+OsM5{swvXpni(>{$EiY_8sFJSfBiML@(MOPlo@R2yp`3bvy^ficyxH(>o}gRL`fmc%IbD8qfn!EQyv5VT(Vn!bCn&OTq!M9_uumIQh1a%PB?D*(C=>Ct)7+RU|)!3x!;f1))9tuvu56;-5K@e+<5OCJ(K$# z_`>sBcRyw^W6pxzq=iM=1WfO71ZwR;e_PL-z5(N5BIofHQepjcBQX>G ze9h9*c?$c*AuLu+{Dcd7I)67g^Q|EfeXCFdwX|+EG5B0QvmPWJl zst0K56vWwp4AmnBM)4iYIG&fe(>Q0ivW+2L;B=`0E52dB^c*!6D*{Bc$_aYee?T)P zTd?BW(`b%?=hM!&G>h?&)VwwdV!+59*vk4BPkeUn|i5*Spj^G8jT_Bu589*2K8;4Q}gr* z?0>R@e-RAb@|udRHKZlPQ*dpjfB5?mQY{cDyG`Q9dY|UaMD;9v3hlLJwKpz~rF+G$ z6FhqPu<1!M*G;$cbIFE)N*Ue58V$f{S4>Tm55=9|%+DcYpNgdm7CYN>kFDI=^_F_n zd$&ZQRisb_??Xxi_JmpPTrf`>1Witic(H>p*SBh^Z`o4YX1x<+?iG`Pe|4XXjsDD< zZ}VCSMQP@8-ae)qwP&cS5mu1s?aVTU{NosR*nmG^IvcpdKXf|(CDZwM{HSz(P$bqK zq$OJ}3nq|z!K#oPoRu)2ga4rc0M$)?P}#)Oj@IOZR#&DM7uvrt2}dEhbqEB)?Qrxj zurP&i240pF6BSBZ_*U5*e{d4+ei{CnLki<`UN`c^8eReUdWqS|QNEQ$D8xVoQ?dtK z#rmjf4l0`X{t(a#&@HdtZ#-I(Xd>##hNz|SUQCeR%Q!f8>IiR0(#gBxxQ~kFpqhz) zf*nb|pL@ob!QZAMf&7wfG=OAvBCq;Xr*j?v87p7N`{%9-QO)EAf0a!9vl?C$h~YqO zh)OB`pNj7`Sq^W(?8IL56Z~hsPiOaSW-6L5&70k0Ju?BPg`5%XNz_o~j8z|=>+qS% zn?>VWLflJv9C9syi9$VV?c?#WMcG>q*WU{yXIKO~LC0il|B)R2W!O=9c0? z&l&s-{F`$F9&en>65i=tNJ@qnkVnG+U(a=?eeBYq!Znewf7bRgY)0|Ax9cg{lPar` z#XVHuUU3grX`mY&P4^QQ%~f!+ZPwSe|^|=i7|%UL#3>2Va(8tzpxPj|rLjdH)3*@_@&wU1+lajMO)tOYcIY zPrYbGNRhAofBt^*S~ONw*tP}nUc}&YyOrAbWwfq{XRH}8mK1syNUN)}SslU60yt!o zaCx@F&&4oz1MoNDv2G%pi7{6&PyAbF+t1gEf=T`T?@Y$63}t1P69atCQ-GP(`285N zf(I~wWDIY|JFZoaS{O@WXPUy4lGvO%d07~UZR_mae-MucIRi`od=JN}5)098xSec) zP4jFhVZWJyz&&d6c{hw@^0VYAeouC^*vu3Pyv5=BaSrU)=*gxk&`r){rHqX;R)0+9 zNl;DT|6#s$C`AryTZWYN#y)yu-8(#wj_h^p{r3AgV2%3_>x{1{$uk>1kFQH6c;5K< z=b4jde+vRIo)bAg8o#n0{~D8Fd1%f~*LS()M^bz3VeEkcMvX5H&f z?_vlHi2@H!CiViy1DUqKs+DrufeV7bcU58vh!icws~jWnUW>h53B+5B!SgcOOtIi* zaTQpYv?rn6Dv}JdAnZAhdy58pnj~x&X_l+*lWMI~ZHQZ?r=@Tv&4%^fgF;fwvyT zEnIy{P=7=!iWOrva#=*_&hslceivdQWwgWv>Rs$zte-kHyBN(robk^-zi4H0R^LDC ze}^*^QY`jA&KuT`#lOw(n1I+f#E8$whUfSbKX6va&%t@hF(8A_<2m8Hy0FJI_i!XU zDX51KgW&A%Vn3<2(4w-)SsQbR9P7mx%yu2}_wloar$c?gfGB=gSOfV~1g;IyGqhPU zvxx7*-of#rc#~gQFizm5B2^43)7dVme^WN&+0F#r5acNQsk|mf$)&N^UA}F@;Pdy* z3gRe!9!NUAKhi3gr$Glu;-MgbGa_eIjjIQUFu5<;WIPVJMTU5e`(mBJIgGu{wh@|4 zC4N``SzGKktn1I-UQ3^NMAn6@5B6|8`+N=JQDK%?ONGOEz{f~z8}|L@nIxF?e;Dop zJey&^!zC{{BR*ZUH>{2DHHc}O;p3T!bA>XX#qZ^kHThU9v@sUL)o6NQaoaQSxMsinepN+{G8>r+i2px(J^t+J66@xx__tx2FX;eM`D z6gD@u6%{s52z3C-;%xYc~&?-+{2F@js?m-!6;kF?s*`RTBeF(+-%sKg5(4_@1@o#=rNofY&dC6ANqplgoV0%oJzl$rdJg=EK=k zG8IZj_*wyaqe6&}?<`rp2H$U#{?hOoVIVRMr- zkio&iYsYiY+#$ESSpKkAhdUKz%U4_vKj-F?BXPQ?dXh_Fe`b`GaVN|8lM}qq6HlCr zoF?Y*=dP=F;fZ+7x(a)OQcQlU@a*5nE3hy5W;_eI*u^LlhvM-LjS3h0@-2Rz)%Uai z&b!E6GTim?jCDa9cthJW@MO!aVD(7uP?{5bvfyF>Rwd&69NZ)E{jelZ zTb3v5)*{Yh-4KI6`Q>MC0&k7?Dqmwhhdp^YU2Dj}kuMHiV&q2$Wyp(DRX zWubR6We+O9ciE6UZyqyflG}15e-_DeCC~Ebt|s{`?h>2{2akaq*xR)4MFl1&rJbzLxx=PHRe`!*(${5H}hWPc#i^yxlc_Fdv>%RZo z$>a=?+UhqC!0Y~rN8fQ4F>1&Uu!nYDhKX>q(pn2PwxO-K|oZqkgL~6*Nd-wBk>_M^yL(Vy@!_iC7@eIdYmhrL@w>Sr;?KaqZps~2B zb%?w;Wh)*rE>hX5ilVyWLl@ptFd*?`E6v;1TrIkmx-w_B=zqpY> zidL~Q8x&rWj+zSZAA#+GVjY8cB!6$Al_MsB%tH|y@f}cD$h)zCyir%m7HUQ!_A|*< zRZa2$8DuQ>ipVWnt%Y+3YqhBJ1FS*Dk~)9($Nf+C1*u)hI$>|4E+K1GnxuaDf7;`p zoKPa)|Gn1l#3Of_1ZzY*e;Kdkj6pv9Ce(87@rb;^WKoCc*tiCdy9+s;UbHnZT`+dc z_2~_J-!j>P5DoCSAP@8W!t{sogEe6uEOn`Qfggs{~EGVi$O!X!1 zJnRq4L3~LwQlG;JW!J+8gzS$Qe=sSWMUv-yp5;(W{5il@RT<@TKTTQ$-ek2 zPyW;nzk8>R;Z2&XR}fw`{Sa`;g8i>jl3@EQj(5$D&qCA?wRpKwh)wXCe?)zZpf7W7 zCdygE-C4xh&O5n?G5&OWPy+U#aIug4j7QfR_X(y}j^F2o?O8D)ahI|9=8w4hjp=;i zEjbTEeMIsUU=YK;_{8R)dh7>tA?FFeHa!=&m`)fTuxuUx_fAuWhr2tswS*6Mz z4rog-jie;cEAT{|(-4=Wop70`;2v*`gip*YLwu}|hlbGHZ6C;6U3{U!w~Q{l+4wmY zW&9h}^HAqK#^acqwc3N_8`c8cLh3xk)!xXf|4`=*&r$j0)Iaf%++{!U?h98VwZI8y zr5I}LQk~*weZ@h%e}952p&o#p6o8dnjRmH`o)%ePN={oLmQ7%ChBeMa9XAZlz6&~F zdo?mRa-TI7;GuaCPROYqveBPx-Z!~koos&O}Nc+uOfD-;zS z;zgwfLrim3MeLbe6Wq5}WN-7e$lQX4hc={0d3r5UczzlCep5>ny!RI-CoptiLAB<mj!;0 zI=mg~@Y8n;)bK3H8Q1>GdqWUHl~BTPJJ0S%x7G!m>OyTV#C1)t1=bi%VR$7-wIi{n zFu~`Qf2_fu7>r{XFkf}%l@Vtw;K-REu!n@JK?5Y<7~(NZHDmr0kKGyK%8*A&zZmlH z8nm3^@*Mh*kVQt_6mx*9 z|F_-)=P7aS3y=-J*S?EI{72RwiSKrgx(OJ(e^`$nu_3m!ijQ-V>zV=^;JwF+)wu^A zleI4>tfLAQ;O&J37KNXc0Dl3lFOXJ+VzcLp)iV|?k^*Dyl>dwC|KrTzp83wvzV2mm zu2Acfxbw+D$zJ*DNszsR_b`8k8+EGti7rx1fY+;vIF~|JN$^R;SdGd{=dC2QNov4L ze+#{e81)aOWXi)4;YI|+S#>i*?^{jjXa`uotrTVmn!YNyq09M@ZA2qhfvep%ud=V+ z#iwt9djmsSIeCAGzevc7i6_)3X1?FwcmD9br~k-3hd7HkLSpPEhWs-Ief2Y3ALHXUn&utd2n_|NmW=z!f(kc22A5jhf5@b7gm6pZFUi+ouZa3dph!^(58{-y*qEIE(^pI-e>MAW zzKwbW=bB)R5r?%v9{*jl{NyEACz9v;Km8P}9qxLJXPWq4^|N-J`?h?nZ>9h@6n#!y z*}y?ikDCtHLRW&|v8{?d-Nv=M_EPkoxhX*2hL-Sf1DM4f%plw za#GkVgKVreObQe!%6y>zpa8-dIayK|d<=jGX&f;GkW@u<#8 z9M?%bcyLKxi@xrzN56Bxe>Fe2#rM5F#F0PG;#V($?6)Dd{9+5g_Ymr2al2Qe)dmOJ zDNP+sCX`~hZ*hm=!6Lq9n%FVGXD6Byuwx@=aF#QlQ(U8+ot6(^^r&wX-cxn4jdtr6 zd0ks6eAk>Ef1(`Gih;$-6a5mxgSnG?VaP`hA;E`{q@y@PPhG{%e;E3Uzjwjs*?(&F zPk-sRPZfOCIVJjU7=#pp=m{ z5ZpvMuy}T^iaU^k*}Ur_AH_ayrSj|JJaU`&yM_@m;DFYYe*_lAtcyGWT{D5y<=FQh z+#7b5STFQvN|umQ?&k9U#q~O{pIW5+_Zr2YHS)1WUJ04^zt@PKK{ol{Ycxk}$2q91 z-iz@$IPK+ij6Of_cr>ObDTUi)c&{OH{KQh;`WSaFT2ojPz#^ZIR~w)rGRHC&Ffi#q ztjC$;>9{9`e|{CgTam{CZw8)KZByvr@B%wBaNIE~cLF-d*&af28%P!I~uC7^t~YRvb@HqJS15 zt!+M4#5C-eGd>SdVb^7E403ZAwgRan`(@0M^M&AcH_3S;`P(;EM`9e|c6`+i}FY>2ra%L4rN<#IRJyXj^Hx5 zmqMXg;&Hi^D0*bgV^L@+2YXav$1ss^fgNSgEkv&V^K+OUxysMShkL^(aTetW;w+M6 z^us=Q_YV&C&ln3_NE-Ir?|Kb;Y12~R&c-lce_~dsS0hjy&$ut~`b7SZkCXQW{8!uq z*5ls5u+W3Y`B8!K<6LDiKJFlu+PoI9kSN&SD0!jqn&!0Gfe!w4b0)3(%e#ocB z|1TIPi6L)T&t|TGYA`}BVsE)@^>!?U#ok-BRg2SbEYDC34~-C zU_F@+DTr;xe-6tmPh2CF(6Dz3ByLl=!<0YP3VeqHdEZ0a z>avsvuS+I(rbvswP~z^dj(nzw_5$EC!qee-X9dC+?$$ivHu*b|QWcB!Yu@g|i`I1Ow}ut&ztW zd{2|!3-Tex!PlThKx>j4{>`E8K5<#g2y6_&J>36Y?24JkyQEHlH45psqo0)`uSe0F zj!PVN;9M|D&rBY#3o#2lEr!Rsk*~wqCVf@#$y%eJvgEd>MGXf@%hsUEf36HvlbEw2 zJv&l2;QO%0$oV0GiW`x*FAi~kCX)02-{SrkJ}c(Q=Ii5MaUl7L6GOfLb>Tm-%nOU& z>Oby)`&W#|*%)FriSabYNF*q_L?vwbg3fJ~KzLT$o<59d*G5{Hv+&*NjAi*S? z#B3ftS=?{PJ6sayKQYile-jJ*0hv7+<16*zTLriQ;xw3JIA}x80DKo;?~K?S5K1db zvWH}NEq45vX8wxln5-gr=!69x3WOARgZl7AU^3zdZ^Bxk=KRh7zhWfkle6>7N%;fM zLyeT2w?%Xk5Z=J+c*^2|_n>^r2BxVOCjcqn0kz3q@v_(wfW5Tne>PXpAME1C@<2mk z0*RnMdoDd1?+4{CjH?a>1=?o}&^LP=E@qA*58ml$Rk*;PabAp!T@oC@DqXxO;0T|1 z`8gXM7ztl^EnXvP74$1|+ylS;uCM3&8wY_U4bOPU9f$g^LLS(lhXgWR8$$&WiSL`e ziunXw5%`_F6AbXCe>_)3JaS;fP8gfND;Y&HFAzoZYLc7O#@-*9-n;JuJM z3!=e<1ibsqMHfn}8FBX&#P( zNf4SdE@iO=ecHNn1wIE)aIG#%zF@Q#I0r|In1$DI7XqjweSGj1Fij%0G+K1vyg<3% z*u(~M7=Mm!S87%;M$Sd zHip!~NJE(0e{#w6;5H5TAUu92;A4_Lmdi>E;h9EZ4~*hd2mTue_XdM6+H%d;=u0ohxm5Aws8`qU&p{e!QZ z`cG`(e}}91{R{?9(0BaBo)f>G-ye9z4%-hhQG8X4m|f0XcMg~m)+HwJIR_KcI9|&Hp3B3+${0p9zOzoLR?-d&N^B#u_j|?m*3@Qv71-G zry=VKj>;)sMS^?8eTP1t8gjdVYpWrS>w;&we_GC{$X9@?HoBOLa4>yv-h|dzKPLPH zA2<;(#>m%saKy3-JS39W5d0ZvE8-gPcEYRL4ZM_r$Ax`MdTQUi&LOUm9N>Rd1B+jG z8R_Bf&>x*hd+?$Wun=TO9WScr`N^ow&Uzq%U_p9qq|SFq9jCD8Imx$CN6e1M$4I@! zf4$%85jFm&?-o!a?6-f{{4eASqz3T_ZiUc%9Iud{qrXb-XK+ow^V?SZuC2&f`h%N3 z+&7;(>_6nS_!Id!;@S|ah8p*a`}dR2Os!T~;CTR-$ks9|!h0q@s^aO5y!7)L_&!Z&=yb_!l2;G1)j`Rd;1qvsrSykweflEr&!3t_TByQ+0 z*Y|tm>m6u?Y=U1{EL z9MA8bdX}K?3xOv$nM_U#=`3ZtV)f=tTT4YT*9Iv zo{+jid69KplXyaKaS~5{d$|4Zo`?M5uX8r+FBMu;A1wF>AN#2{P+tKrcaOMVfA}q* z?;qBlaGZfF{q&vSdNkl+f8b5mc&{RyHlel8f#-)XKJ34oQf1@{d`oXpS7|~uF|n4u z+$C=_fI53O@S4S-f;#|mCSaHW{#}AAi+mP%yPKtm{W3dyw1R8ZJMhatcOQTS@R7+m zvAlXGW5x~Z=m{>dwZS@0^O{qx=ratp8+tYL!0imc)7IW8IG?~Je>m4ej2!MeQkt{J zVV(tC!d(wO&|f&n7Y0Pu;16!p5AINW_8;aKK5K>9lOZ=Dxx&|44Ye!boT2ydRkxD* zZmIs_Ki+mr@(r#7HX6?HaeNQvQn)IE_DT!x93^q>4Nb)wH9a_3lL|lMKh+g*b2PyM z{=%+@`1Mv1_iCf&f6#mAb~_nYA>O}*_m7`RdnEpuiP%D&;x{+Ix*#J*-YOD&2-9oe znf+s3&{O?zLjKGBg*s?>|KM{f|Ek@Gc$X@8)`ZiGk;xuBx#X8C0^vE_GlA@52CrMw zzz|TcW1i)>ME%;o28_0Fzwp`^vj(w+cTVUhBkZ$6X(vjOZwr2EjT7~E z1<4XTBB9ma_&fPMSMq+X;r9&pI8h%T-tT|L0a0JOvEl;f{(WDrw1%bod4}tKXzz_K1AL3t0Ls`KmD?=9s_Ec&)>1mBsclu?tb+Ye@Ogl;|+4LFPvbgqh{6xu{#fsQBbM& z1o@+1$v78?S8iqS8L^;3pK{{3lltz5RHLKhG<;K23lp3*V6b;e&4LO&`WQNshywwc zwW2}=&lbi#*i+=r&2+INJ?D&^4e*w{jl-K0%%XsE6+)p1JvuLjFs50tZ}eHgfx#V* zf0zSY0Vx^y3B!H^9s)83a0Q_^Jo&>|(6ZnGJ!ipVqj`;Tc(;>q zz{@!v`vCC*5~2JMw{V}4d>rrd7ccvoxBO#%)dvsekG|1|hw*2BNi5fOGO;D(fvpNI z7bXwExdKm9$E=(NJOJwZnEnDk4Om~mai(r$+YxRTgcpQ4w*{w*T7z2>4gK!yVcX@( zw|`g$HFOg4od=$T$|_@`FHd-@>=`0!1Q+Rb!dLptm44Nk126Z_XGAd4Kll+CTm71k z1vlsWy`YBp?TsljZsUL_1kS<+bDAT&#cYni;5pJ`;GddRi5E54@{t(!Ks*IJ1J7|m ztAi*K6c)BQgwz4tpic~*1MW=mNb`bMC4acfRS9@Hbhfs8FZh=hs4$yfbNYTsms>1Oap34SL}2oLDPzXv|@iR&aE z`08JN$DA*I(|0WS;I3cwqWA~?{lmro!TnHMYQh?G1QU8>c(JE=p^teh-EwK9K;pou zc0<_tV^$FL?da9gh(+d)0 zcdm`Q;VX_JZ-(La*ZuI& zU}OM2Kdo4CP?Ay=KaBMZ8h?jH)|%K4CYZ0tnJ~jeZ6sLV0wb2dF(64gN*^-c0gXt+ zaiHJWU*P3^=6gtw<3Gf0FmJ!-)JW_GhW{NW{*l)YSm}pbhMEw*bVHr=1`n{O{}IFC z%7ghWMRqrcV`C}bL8JgF29!AQ9Pt+0BY2speN@iDxuv}1j2S0jc7MOILSUnO^G0rQ zs-P}G1&0hI6w((%9|rvciJfs2=e4onsWGIt51+V#@QC>M>DSUc$eP9%F9yCLz_UJl z$-m~D*kN{<AmPm$}9vl0%(ZhiVWGXw8Y7%QwqLx+OA)ZyL*fIu(_f)9Q5 zWxro5dEMdWJI;N2wBHzi&7)obH|sZFL3|%H4lqxM8SJo-Ih?ON-l{ioqqtcvXYfPA z*ri{gM#2m{>dyc`p1CFODGPztQEwZ3AS6mWbp8*@8*X>wsehrM9wl5K+)LA|*>d1% zklGy2b908@6z^y1T5cH|{lTGb#K;lq#)j05!@QX9F{D1?e)BDG3+jm|r+@cX-2274KNynaVVznr7;ueqdi~Vf(5>|}Sqxg;g;x*qU0&L? zYv2?p9Ecb+iGM?4Mrv?Qh9U@uKMN4peMGSg{GHaFTZH?A{yd!TKwtS_8o0nUfvrd` z;<=sV3K3+q2_A$wN8~7>h&$bB=>kOGWBv#6 z@HL3mD*V;beaxwUcsi&-fQJ#h59du)KA0Ujn}Zg~7*0-j_AwM3zi<`s>_~k1>ZyJ| zuOZg__MgbJ7@o;rvmqpge6X#r`|FQ9dWhFV2jQ<+L^w@fcldynM>)7uQrvEkBzvL< z;bAYi>VGU|fdVs!0F_}*xS_>>?X$vmdV zH1MV^cW1cuPTkwm-Y{p>LA9E_`>5GMl-5g%QJ(=s! zCtJH;KLc}0Din&heq{-L1z(a0LKFCJBu-@+#D76psPN*sVkGGMxqj|;yx;%xxIC=a z@cc6ud0dOVi~T388mSG3xch>NAI`ZBU4u3DT%z>J30O923*>kY7zTghI?4!?|7{j= z{&g!()Kr|+NXpT38RoBCa2>5SWy3_~kpr;T!{!sK5zFy?B=!w)Zm2J?Z=l$pu^=%( zw|}K0*wv8F{dpgK`t{#93En3JUw-fNq2Ict;7{D?au}eO)9s+OU_;y-eInF$a558; zi7&>q62vxY(Yb@3REoa6UBJG$RSWoZ0RZC6Z2+xbAz8unX@lM>sSm|5=7N_j@-q;V z?O_fic@ZA--_M2AI)BVa{_w9&U|eL+et-7#u=hWAuJrFZ==+&evqGYM#7{W6btSky zm|=t@z~^4vU@qw>wQ}^28tChppeKiU=XE9SHTV|0{51ln`naC7pXbxj*sscjbq^tG z&Xe4xOGAhdg9#tq7^|%cZP$?0d!Ih?8#4mrc>tS3>_DD0@L|8_T1b5P&OiU0t$#*g z`a9qJsdtCHN$MrSBgQ?XJ%Sd!5D|9ooEoEtCJ(U`nC29{QskYPik!uUjP{<)V^7$L z&}Iz5I+aO`I^(`s&oHNeni-lVKA(+O)Q4|Eco1-vwvL-DzVAbD)XO<|{nSeIs0~i4 z04v#-DXan=9hOp$oSkbc2u zPS2CYvCPH^u*MyFDR;?FfEh`w13ry(Tw*2(zFh&%RErrGLZNp_PG8Al<=%y?jT8uq zqDD7FDtmBn!6!Ka?r5B$8i^d?fYZByFC&)Z9O>j7Vd@usvIgB0%)JNTx_<-L`r&b) zhDBaSa%Ix%{wJULPY&d_X2W+K|I2wUhPva2Bl8oBGEqG8;s$3d+F(vS0tbu_nmuOp zFyGDoa3PVC0&g$_6Z3O&M#ToExkKH?o z6GRwI?-&4d!2Jp52)sxz>VF6?638w1Be3(4z`qg+IS(qsz|>V}tvpT4E8tv!cM`5F z5B)y$T0{^_u@APS?g#7xsltIjy8F!Qhac|S7ryaF?`^n42dwrN+dv%to%1E^(H@d4GIhcEpXUliJgA zvTl(NP^hIbvqpO8geMySwf)CD=k82NF6h|6Q^@-h$KwXnD(o>~m_y&-nUMEK;?eNl z{_2l@<&Hn^tqcE{ng78LetZ15TX5diPKo}O8){~#k-LQd10NW2B3_sqC@w!A2X|q3 z-zBp~jKY0`o}zbEg@1Q?L%99U_es99Kf~~W@8GgB)Bz;E;XNZ?9`Z-7gfyH&u@ITH z2K*2}&VU~dF<4QF<`}`8-cX16@DtF(_&TTm?4N%1b$;eo%Y>VO^9;T+WkKgkVryWf zWR|5uPVMew4{<{exd8eKBo6?8gCX-D_ZEx?#t#9LOAM0IoddLn`DMn32JpDrOhq zJ`p=Yhq`EOdcet$qkTAp(BR3QIrvd-gUl0}Ni2=r8e&v1;u>N_W;VA1U-$j>ghS^c z)!wZ^Ai|suC?TSEDrD>n7D&`Kqri_AHGUGUuLM^c`hSBM0U`ZC?5B9(9pZi=I7~-$ zjlSoM{_0(jH7cZ>;eR!MFd^J#Pbj5f*!1 zl@VS3h!4Y>!;V5c$YoMsZHM{yTaNxs+cZL!9HFJBVBQXn`SXBp83I1q<#1 zIQ7GQihtY?c^#e)7gvDuT@!O$Wc_|)2|kA-E%G`K+rpv0Mdsy^{}XoO@Q9&L_&@N_u|K}J zNgc_xNU!i4=fR!*Jzq`ulRta;kNI1lJS*TuG}UTGZ)ZaqJI% zIqCIhP@TM21@s8!Z?uw7KO{uQ<*z;#CJM)=7+#1K{0Cyp=r+)2Bb4OF!&N->0{)N}uEcCAi9Xxe2 z_VzCL`2?c}u8BT83_+nsu~Xr_B<)-5D_p&V*NfhXdZZ1yt-w?Lo(Um6MsS?n1pCe< z>$zDXPLuug;mu%ma2?|Bgwuf8xEh*(;D0O}X!LhTuZrM5c-G(ypch8&HMp))+?4bg zg#oWeem3CsKQq9jRsgU2TUYN-y@oG+J>;<@UiO3s`OTBYxqxQVzkdw69Kx{)23Jw$3Ju#2Y^KUZL2p&vE@P7~n zI1ABXa$OG=V%sb{;yxyEOUM0yK3h%x&hZ{2Z`a#BY5>f2Lu1BUo#Hlr$`~g>O{K~Uv9e{h)d_-lPXbq(UwaF-5uX=XKA#i!6=SY_q}0 z^i_N}F6JZPU7nyWQz8C_et-NfBzPu@7mDDSIG1?ezj)>k$45o1{HooEwhgIU$ZINIz>xl&&;Q}Z6r=|==rInxT#EdP8!5lElr%dnz19M+!@3CiC4G;BPF~PlueDa_3;a^wfhtqbiRuA6%G(_CzXqNI8m{baP!$ICc>HcTD9^r$ zZU(6rzWV*&T;o5qkALv7AKu3BT8_}L`R#=kHu~UzNN}%e0)5o{aK~p5A4hyan-E+n z^njspVuKgY=GzzSk5Su)=y4hs z!08eZ;3U-ijepG(u4`t4Pl-DgPL`5ebIAC0krTl$iaFFv9JD(X%oPzHhRjx^pMlqv zDcF%UT2W-*llikh;}Y`K&wl%-PRld_$Ad%t

y-aG|#VeUGfuaVIk-cndl>`>5M8 zLCI?HgxrULnLH^&k9EKqDsBaNQc$Z%i<$5|K01e_hkr`+34eH9DH6$=gX8=8ia3f` zg6~6q=wnnPOAvz_CX8rW_xOQIwk9T;`z>yC? z7Z9~0b%n)n2(lOQJ2Ib>L-@4Ad?@ZLvQ8ug@29+-5f|52+*dRgByyWKVS%*37jwib`EYbetys@G`pui^Va14{p4YsNouDy zzO#eo^(PPg;%|TLbAkPV%9A)cHCKTdrryci?jKnz9v>7bR4 zNF}3+{oJRDPjp1FUjN_Sr#M^x{XYG_dC>nV50VK3<1p|`5Y2ZzxY^(9^FQ+J|H!HT z%YS^~2D|eg9Q#Le!ny)8*q$S3_JR+G`>o1k4}?w^G)|OEF?8~{7BgjJUOvkd!D9-{ z%YFK1sFi_zAqPjTpOEiIL&g2f#Xv`B&|lxTh?~%5MBY?lebBEZc@yCQkQip-j{cg9 zB|1KVOY}lEAN&K{c=BF*qD8!~$$bnw2Y=~3;U?J^C=8A^K(4Q?N!+kVzVh~5cnm{vF!4Jjz;0BG^x04Y}^3xmQh@QnaKJ+vDgMWYYW`93C z@;`KE{^J}Q@U4_faHa^ht3H7>Kk)p>vlPX92yJ$N3D|?!N5J>d^8zM;4Tk&-nr0l< z&DBiRZPvzN_B6Z@4>^1+pD_RH5=bAjqad%xc#W`FfzV1dHx+^IuLSqR9{JM6#?PEr z0%p+2To(2PIA9sHU`3YjZP^3#>VF{fHpH24{_PLW0&E=J+W`|F^3vg)!#^`5*zGsJ z{`VdtJ;u+v!aItr%YYsJexU)Vceb8&-4JJ~y#NPmU7vIG|f zyuQsyj8tgple)YPZpM{b94$nrHDu3W?VJ@HSIp|x_^+qH7WU}Y{cvDFX$EEo4;`gL>=f1V^m}D2A&4cCrS`IFuP<*z&Z@P z7!({K2tiH2?Mx25U}NY%xkSGLd}$XvXre2{x)wN3f9o%x-|&B;zkl#w^%c-d{Kx12 zgTBJ%I2&|0zVsD_XG`#?%DO=wip=5x^N64>_w(^#HlxE`@};?h`KaT7S^qIp3%!9K z&ih|;c7N+Ad~|jOKK!7Uup;NtMD6&wKmO(`eC?fI{;yABG3hUTao~S>9G^Sq!)y4~ zBs#-!6FkWn0K9oI@_#+B_`h{ZR&QiZ(OL=q5r)RTw;p@p70QFOv3AN2!(0SBHm2BY$J(q;vx6dpg(~af;jRA z#|Q6~Y5_Qr1YT%w_$3Z>9x>!EVr#-oKd&%j-0D26CfS)Ppl=8q z2rlROr#^v}BAy5GrIv$!FZxK{r~Vo4dQ|dXv(32Q{^SOK$DaW|{?h&+I05N>{_s%0 z_5Hs!Vt@X_Uj6n482nI3yqj7TRS|AIk^UrjuiVvyGnZ{LGItp7$?<|#F~Q-%2XHsY zYY}gdt9A%nBsP`*+{d97yfFzt@DPvKfHT~*qruE-bR$q*zFv@eF8!THM9m0FD{AS^ z$Z9xNt62X`lc3#z`!;lnEx>pScfjXsaVHNwIe%#0haMk~WXxqiIU0E}#?c85Kg6|` z=t*O%1Bd_kG5pp~KfL!}&FsIuGe}+Y-T(Q~jQ*3)G}Il#{NtZr|Eag(vjCjtlhY0B ztShU%>^(H(@2cK4*avQahQ{YEbQ?&VRyey*3qpOyXD1fg|1gk)mq=Ry1i;71z54k& z^na#-U3*mt4HW#GrXxMflLUWOch!eD(w3}_l!Das4Q$sF8v|8CtVB!%2X>_xMJK@% z1!l30hF)(At^R|?raQndNx#aZ`?{%zC zCg^p6a^ONUDaTx>sDgiijP3tNzYVIDY&IC`6zDJG_aeppM_vA)s_CC-BaY&ymmJ<9N?QFPUH)(272=Dr_oe$PotL2qM`mxQ#Yw7lim-*oPR( zBRK~LD@4phEN)S!g6Gkqr$lN}^gCQPQ2F3IM~skQlR~tRP&43r$j1EWuY#@wrySsz zgE;NSaGvOsmY+!hXr=!A&nJ2w$bbKRl7D3_lHfhIE9}Ba&gp=IhlU*nCA<@-G~$W% zmz>i`cDoJ;H+j?URn&S)+8#@Y3ad#AV_b_yZc|}XWE#_6PyC}YwDxi1A+Yae?dh=>|4(h_+MGzPq~Y)9 zS9m+(M9l1rZLS8p&*_Nb+JE?hjWMR-%LZT!Wo~Y&Xp9~E-}gyL1r){w<=#WgwCj>e zNvX_NDk)RSmP@7N)l;;3dvSgd8Q$3S9;Y~t?iX$MJf0t?@oB#(lqTJUjenv({Z6&V z`OoC(&G}>Q^_6{E(>a_}xBPO)r#ERZH!tf|y3y2M_3lLWsZu|VWPjpYT^*tK+RL0+ zv=)AMPcm;koo7c@p(B3mBiU^Gnq^b*+qvj*-U5pN`0PEXzZEt=W1jg^TyQVavVQ<^ zF#}O?Chg~PWDCwdlpK_ib0qM*EXjNHge8@^#I!<706yVEHjfm3%TX)#D7t>*R)L>My6%;;~jpo-~3x7?U%&$k*33EzSYr$Fb$H&4SKJU`^F3&BumyKK3cN^)4)=96&M$S6@ z@$u7f`sVo8&0o>ulhkqfSK{Tkb#prT6{{aNf20#PuMa#-#~xoB$K`f9`uJK)6x+Q! z_l3E2RnJfV&^@O_neXrWg*qdrY6_L)*bB#M-qnXT6Mu)Z`9)#aYuee1`+IwC+4ub! zips`8jKa${>+7uOWt#k-D@CfrRWUml%Va^w4AR3pqT#Xww^Lf+PhXAAtp$O4m&^fV zYPhIf6OnKQO(yG9O0`Pl;^mz1Xorw>4xH;b*8EAXIe}&15ZKw3lPMVLHu4wj!dB~) z+E{nwT7S-Z$yEAt=-Ejn!f849=Svw%%+9hY>agA>-jgA5BVVE{6%G}=nmQSdSj6*& zLuuy)hKWmB&V_8U5zL$!#Z0!@a_n9vS1S{FTA#BoQG+~l=l8>EJ&Mo$BHy-$lv55| z%CeJjVbDgF2{!5OJGp+dUS$Tm63yM#Nnfub$A1jz^M+rI-MMwTE0vBej!jZT&i06C zG3&X)0;=$1HVA&!bFa8yXSbp7uwtBu)vMnRo5hrsJ#RAj>To8$P; zYKj#ONuNnxHrlKS7O#0;ewxvtHipAyvsj?*^3(eL09jL>;yvZg44h08S)Mm(#{z*{ z1%JwsKd%=C{QX{uX2_@WRGC_(Ql!)_RfR{P2W|1v$kbXO$IJZp#-K!9dO};gz3;W1 zO1@l)+&`(ml)aJX+N%9z7G$2yU-fM2FXeGKlA3zu&xeHu9eJD67n=2Kxmmg&uxm`_ zu1@?fW7Iy^&1Ku6Uu>u{&(>&PQBk}sDt~qi=MtV(IiV8ySy{gH(VNDo1$rYhPUOlg zCg{b$Ve~5XyhfG`q?paZF4%076QeTCCVrNUSSD}kd2xA3d=9E{TPyuU0nG3cJ@AJrJWJr zB|)~u>x)v1ueBOpFs0`nw72Nd6MwW-jsULJIdrm_zio0rXt`M}CwP9x^V@pfrQP+r zBIV=#oq1ooWTibnDBttGng>^`dVv#0&}+V|M9 zFPSq%S44i5$_Ycy4h~09S0lI4 zIhZ@`I=Hytv;ijUW^;Stv`a(bm8X8p>`}iG^OM}JbeQt!{J$S14b zk9c3u??t{1_5K3+W;pwg_s_g1@Jrr{d+zSCB;3uvwMbYJ>1PU&UF z3sUd0(se^$yZ@jb$f7z6sHc9<@!Rq-AcMdBjeV7H?kX8#sek*(KL&QDOZiT4{juQ+ z@`65onN6_wL2Ql#1``9wIk2CgJo&P;6<^#v+GZdaY8C8S@6zT=ksZ~sT`-9d+(GB* zb^%@&`i!MiU(CBwg707aoTOdR4|bizP9;rV^@%l|#sRN+c3D}tL)%VZD^}WmlKi9( z+DKEiW~BK}`+rl<2G9ARWgL}_Tun=ta=HlP7`(CTmsbSl)q2I$Ri3$(R-LW(#D22_ z>6=L(>BtEPMPyS{{r5H|J{8`8pT9IbEl5IES^u zj;p17YVmyGOFuSy-WvnUBSMNl&HSQ}*+6=PE zRPXuXlYf4RH7P!WSgC#Tbn9a~XAf|&;HESI8x+nS`^){DD`k_W?TNl%Wegc_ks*0` zI*6l=VEnDpbLti;6X@ku>);Lt4$K(eW$2-y1GhHm`flYtoqR)l$d7!JY;J>WD03y+ zuNI?>ohVDadh&vz*92eLB!7NwJDarZo*-jOKY#5l*m}K~T}XYDy-Y+lyXrGT*RE#l z)C|UF3Py(7{=9alopOvpJw0(OH^*K9V2U*>z1pk%r7!I<9>K$A$>XFHp;qoU)ff^RzNK!AN7VRntNln_km*SJ7xeWNo8^1*7R*q*rQf+9 zt$*srSb{I%>E)DJl-r8Pt0ZHR>jizd=py(cW5R1rN|E^4;zwk ztBkWV%xT{7K6b%)sjBQL;JxHcIr1qhF86oR)>W`J&sYZByv5eR>7tCIz5FTPHSKvJ z+6M-gSv|y6J$ocqaWT#0g|1~wy^w{XqYsn%*A$(j>t9fGZC(G2rc3DhYr0>eLpNDxg&RzcGY&~# zq?pPhZHFH6W=ZAqlP}SqNmL&*6GD!g&hw{ zw04s#P@(C;PH3~t$H3d8Euov0{EAk}e7B_KfLi!2*5p`1Pb=23CzrmI zJI5GL$tC+;(;J!4Wx#t$%f=`0UefaIOgWacv@__VzB{q`VhVSPUb!H1Gk?-&)oUee zzfV7RHK={JS7XO&gxPi+sb$WCwWX&aV+pU%i!<+HJ!GAQnj`*6xxZJYvKPAeKTf>+ zA?ss&L96?75C-2pnQ$yw*~L%NyMpF=dW~7oi>Y!|=VO+QqZ@rNmZPu>c2d@@oB8U? z1#nfnw^;zrmg<*=mU$hgN`Jh2q0pH1Y%yk*aQjd9CLP;4G-VE_-X;brBYr4}pEGu2 z>Vx%9ZPqRF)z?g&W3%(bSnc;?J{ad(`786Fm6Dt>o^8vy$9=V2Q?m;4fyiXEdXHZT zaa7i0nv;{{{rN!lm2Fx@_A{3_E0x~-23CiuL8${%>P!-=d0X$Eb$=k{KUeR$ORz+; zyeEuaX0n&}Oz+Ah_VT{KzO58)d0*UkU$(}~c(3?fGp~-k@XsoyWl}w9afEuSk{Me? zb=WMkKj^{SdX|zf)yuiFO!8JNSJnxrIl3z?%|~C6llSMDXRFwe5NI|NxFVh0*SKe0 zfWMeLp``pIN#hpFC6LnW0sT;OZjpna7uV zHaGY|&ue{0{&N4lLpxADCjFZgz9tJL`8&40Q(lV|QOa}M%lx7VWuCvN%Gs0h)nvVy zxPye014&LyIrhwl^^jUm%GbfAFylx%-v4g;x{`k{^J*25?|)p&gWZhrdtodfe)FWK z9wVVA9nQvS)-|Mkq<&i74?W~cy;yG{o%oUbo?htoR^?XZYMOG@=F(2wOS=`Zd$mr) zBB8lw{ZsM3VY4c!|HGW}(4Mq2+?l7QXEYd__(Y zj|~2hjtw-4-&FOrDB&dAkOFbIW9_Tkt#uau6;;*U~?WX0U+FMTelX9`6s`!yeIu17*NM#=9u-LTk%S+X_eDVHR9_{%n!4x_;W`x_?_H{Wbk3FT9*yW zWq%!vapL!fdh(g*={QFMN86(TC&J=S7r&pi?%2KgwPW6&=dLT}{h*xj-s5^*xQ!Lu zc0F(Cu99WW+-eo?BaKS)s!@;H&0;=VFLKO$aZsqmhGU_<|B-oZ)n9(Kdu28}802c- z%g=`AYvpcGY}bdghadP&sAr`Je!*(*&3|LIF38RQ{qH{y9w*aL%l+evJDqji#dG(| zpZ|L_Xt~{&x5vrYJv)dV#iHHO!A0w?`{RGETK4$wJA2f14$9s6!R=(!8vpHdr}pF5 zp*`*%_Pchk@1Bw7D;(TId;HLaNkrdGyWCFgaqp}(b0;R7ga3Xz965)55*~KQaevx! z+{4zx)68+t#*=aPzwFWTWa_rY?$`EYN_wb)WWOY^UK!>nLG|B z^RvGFKpnq6w;mp(W|6PY6UTN{_J3_>GMu^HuXFps?MteSocr!*`0Z%ior~<=9&G1% z*jk*mhm+1=N!e~W-M1ynX?NIi?O(9$+iv&Jd2V&ONNGN8J)2w(=iTeoSMIye}wyR$|?tfy~J+s}` z(C+vMrXx-8*c#cx#o0>Vd{mCf#Ya_bjh(6}p^wxxIr>QN(D3;b*{yU&+J7uYFx*-I zxBvCyf=gPjX738VBb`@aO`uEQBk&~p0pl%#Zcl~}&SJ!$y{qyeuhz(!j+RQG%F|xn zss?Xw$;7&<`s&cxD%C7G_kYmY{)R{%r>%i64|%X9eSydB%-@7k_jl>NhadC!u>Qg` zj0!oOsV{5px2bApYCo?GO(lHvr`u1%y(({{){lZX1XkE4dtaM7kGAW}Kqz*z|3K;O z@rPEvo^HbyehrzxICS4u@IP!lJKZzy;_C)BxvloF`&IpW$m7Y(J%4+&U%C$)45v#> z)1=eM+)uIbzSA8JSKz)xV)_BsM$HdJr^jdQiR(^Aen#KG>9HT*y{=@fyYM(}N$Y2@ z6@u4@kDfJTZ+1}{?^g|OnU%RS1lwL&hC;4`n znEZY0+mn|!MRSNfjendo>3F1an@##S?7n=Je-0mP>7zwnH2-Y0>K(9#mp9YXMk4?8 zJzr^X-yRFy9_-iE!Amx3y_i}?-br)h%$}WD`%R5>biIe&$2C(2hdOJmxklHa?_Ea| zp*%Q}e-G8jz$m^Jqp=Z&7WTuYn6zqb6T(Bw=?lqH68^i*EPt0TM2FdOCqIvcKT zW-`3f)XPg<15uCF;Au@}GJpwI><%`*%s~u(l{YurhDI9=BMiPB+2e6{1<=B0^O>+S z&sJQ$G28|>@MMG6`V-BGC&4vaE5w_&z2HKx7f@7dJGs^_W73N3Fd6yhSD%gZV3|}} z&}i6M15*i#Ca4;nP0j)^2_z` zDCzx9x~on=pi+?Xuse2U)9&B-e&s9TJ%Jf2D!2{9Ikt;)Y@PE_ zEDZ1XF5Yn;??f2hiCw%CKHj0Eo!rGc>Elf+@6Y>W7k}@QHQu45o!Z4Ywaz)5w9~tI zr+vKBp+s`Ji}~pq^Y~FBr0GO<+D>GRf8r>b2vz#%ZrfZp&cxAaYR~e=cA35*SmS<_ zj(fi_c?5ufAqWpII$;0=7y_RuMNiBx6(*AaEHDJVOqL=FC8GdPFa*9#l3{oUfPx|L zgW0E~6n}sZh9Erh=!602U2+iQT zpAlCOns^kxY)(%;qhCR2;!*fOE13+}E{R=eVhGkP{Uo+$ho{p9z=$F6K~*T;0q|i6 zd=M3mc>sbKg7Da+69&MBA@E~UIDrS?g(2{RQYhvDIAI7rGc?jk1K`0BeCAtBCk;Ra zLx12i*XW*TV9X-`4Lk}T`{ZdjsR!YJN8!VP=$`tR+-3iUVBL(uPHSiu0We?)d>9jQ z*sECtz=0v~LCxCK&Hi}=pn@UrL5}y^6ZSQW0DLe6;gLrt48R6M;76Y2FH9x@7-0y0 z>l;n;2mlL1@R>;=oiqR}41tgL>7HLu#($Oqz`~>Ofkix$FuzdQ>^ACktBxI|PQqn# zvI{y4fe$*uuns^BL*PSz%T^9Jklqj#?6{V0ALt`-}wyF zECK+-5PWK?N2d)y3`5{!ej0ATsaXUdh9USJRMIR0u)`39MDe?fhf%)06q-C??9Ah5db8HAUs4Fy9$62kHQa9kw`3#Cq;D6 zzQQosZQywlWmHOjvG;r6VWNJ6^YTpG%!z0KW%TR0p7XS~;hf}d2zbr$s0bTKEB!9X05Y+Gpa?mmq z8{id9#ZIF8f|qAfnPAELWmHQ)Pc3Q(S&VDpiE zef@U3ZNB&06awbS{ctlp zg23gy*QXkLr9=ZlWfFuf@4Y_-!IE@gClYcR#wdfZWs4W6VAx3{iUUYEvkXL;E#9E( zwRj+~Wn@yV!Gu4GN}hk1!kGNK6tNFe81sFXBK~0tV~X!mB!9N17|n*Rd7@>YgAc+~ zY?DPIh{dWD(T^v^W+Uiun^nBR^P!2E{5Zm4Nps{7nKnF}JnyXJ zD#?%#f$qmiR?XMyNYTD|fm!P%R5f*$uL7cnG!ySH0T2(1fQoIQCj8F*!+Y?Eoc}ao zRjY6l71+^Qe18zKJW^3&M0a^nU{3eBHEp?*4;pF}JLZcGGF)2IX=_2O7fgb~X}ko` zYRMdS_L0~7WW;1|N#+mcdY5RrHIWJnSp}5*>gnen@rB^+(H;(c!R=p(=9>%%cU*ra zREH9}BQf%?-RVR>nGjE9yNk1^N#x1ZyA`o@TF+QL0$-D7wi+!LVYHtk*bu|%zM-c|d zCMuA}(nSl8Wpv#NuRE+)-j zdqaH=l2*ufL#2%o_Rpyd{Q2dpdCqn!ooRO@)GLaWmW+mnd+>%VkhAxGt*@IN_J*^r83n8x z4!SIzywSjrQKKXtw%Vc)0tLZ|H;j)#MduF+a`2bZyU!0djr#WuaP-+#=GJAM}LQf=AwWjiDDM{O{(!Sti{2;lJT z$ZhrBheUfoxWV9Ph!RddWMd#-r(N3yXqlWa{GaS$pao*gNCSny#AZXQ;jO2{>SJup zG!B-!{I6q$7~lSJaPY6=ez*0Yt|YNsaQR^WI

From 9e1330c15dc305d894104843c61bf6d7a7cf31fb Mon Sep 17 00:00:00 2001 From: Xinpeng Dou <15529241576@163.com> Date: Mon, 9 Jun 2025 19:47:39 +0800 Subject: [PATCH 006/192] CANN: Simplify the environment variable setting(#13104) * Simplify the environment variable setting to specify the memory pool type. * Adjust the GGML_CANN_ASYNC_MODE setting to accept yes, enable, 1, or on (case-insensitive) as valid options. * update * fix CI * update * delete whitespace * fix according to review * update CANN.md * update CANN.md --- docs/backend/CANN.md | 20 +++++++++++++++ ggml/src/ggml-cann/common.h | 7 +++++- ggml/src/ggml-cann/ggml-cann.cpp | 42 +++++++++++++++++++++++++------- 3 files changed, 59 insertions(+), 10 deletions(-) diff --git a/docs/backend/CANN.md b/docs/backend/CANN.md index a5ba617ca7bab..2b001f09abe45 100755 --- a/docs/backend/CANN.md +++ b/docs/backend/CANN.md @@ -8,6 +8,7 @@ - [DataType Supports](#datatype-supports) - [Docker](#docker) - [Linux](#linux) + - [Environment variable setup](#environment-variable-setup) - [TODO](#todo) @@ -290,5 +291,24 @@ Authors from Peking University: Bizhao Shi (bshi@pku.edu.cn), Yuxin Yang (yxyang We would like to thank Tuo Dai, Shanni Li, and all of the project maintainers from Huawei Technologies Co., Ltd for their help during the code development and pull request. +## Environment variable setup + +### GGML_CANN_ASYNC_MODE + +Enables asynchronous operator submission. Disabled by default. + +### GGML_CANN_MEM_POOL + +Specifies the memory pool management strategy: + +- vmm: Utilizes a virtual memory manager pool. If hardware support for VMM is unavailable, falls back to the legacy (leg) memory pool. + +- prio: Employs a priority queue-based memory pool management. +- leg: Uses a fixed-size buffer pool. + +### GGML_CANN_DISABLE_BUF_POOL_CLEAN + +Controls automatic cleanup of the memory pool. This option is only effective when using the prio or leg memory pool strategies. + ## TODO - Support more models and data types. diff --git a/ggml/src/ggml-cann/common.h b/ggml/src/ggml-cann/common.h index 7ef80a4793314..ba2cef0c25fb2 100755 --- a/ggml/src/ggml-cann/common.h +++ b/ggml/src/ggml-cann/common.h @@ -37,6 +37,7 @@ #include #include #include +#include #include "../include/ggml-cann.h" #include "../include/ggml.h" @@ -103,6 +104,9 @@ const ggml_cann_device_info& ggml_cann_info(); void ggml_cann_set_device(int32_t device); int32_t ggml_cann_get_device(); +std::optional get_env(const std::string& name); +bool parse_bool(const std::string& value); + /** * @brief Abstract base class for memory pools used by CANN. */ @@ -354,7 +358,8 @@ struct ggml_backend_cann_context { : device(device), name("CANN" + std::to_string(device)), task_queue(1024, device) { ggml_cann_set_device(device); description = aclrtGetSocName(); - async_mode = (getenv("GGML_CANN_ASYNC_MODE") != nullptr); + + bool async_mode = parse_bool(get_env("GGML_CANN_ASYNC_MODE").value_or("")); GGML_LOG_INFO("%s: device %d async operator submission is %s\n", __func__, device, async_mode ? "ON" : "OFF"); } diff --git a/ggml/src/ggml-cann/ggml-cann.cpp b/ggml/src/ggml-cann/ggml-cann.cpp index c0ea26002196f..d1a0ad374d691 100755 --- a/ggml/src/ggml-cann/ggml-cann.cpp +++ b/ggml/src/ggml-cann/ggml-cann.cpp @@ -31,6 +31,8 @@ #include #include #include +#include +#include #include "ggml-impl.h" #include "ggml-backend-impl.h" @@ -93,6 +95,26 @@ int32_t ggml_cann_get_device() { return id; } +/** + * @brief Get the value of the specified environment variable (name). + * if not empty, return a std::string object + */ +std::optional get_env(const std::string& name) { + const char* val = std::getenv(name.c_str()); + if (!val) return std::nullopt; + std::string res = std::string(val); + std::transform(res.begin(), res.end(), res.begin(), ::tolower); + return res; +} + +/** + * @brief Verify whether the environment variable is a valid value. + */ +bool parse_bool(const std::string& value) { + std::unordered_set valid_values = {"on", "1", "yes", "y", "enable", "true"}; + return valid_values.find(value) != valid_values.end(); +} + /** * @brief Initialize the CANN device information. * @@ -214,7 +236,7 @@ struct ggml_cann_pool_buf_prio : public ggml_cann_pool { * @param device The device ID to associate with this buffer pool. */ explicit ggml_cann_pool_buf_prio(int device) : device(device) { - disable_clean = getenv("GGML_CANN_DISABLE_BUF_POOL_CLEAN") != nullptr; + disable_clean = parse_bool(get_env("GGML_CANN_DISABLE_BUF_POOL_CLEAN").value_or("")); } /** @@ -410,7 +432,7 @@ struct ggml_cann_pool_buf : public ggml_cann_pool { * @param device The device ID to associate with this buffer pool. */ explicit ggml_cann_pool_buf(int device) : device(device) { - disable_clean = getenv("GGML_CANN_DISABLE_BUF_POOL_CLEAN") != nullptr; + disable_clean = parse_bool(get_env("GGML_CANN_DISABLE_BUF_POOL_CLEAN").value_or("")); } /** @@ -731,16 +753,18 @@ struct ggml_cann_pool_vmm : public ggml_cann_pool { */ std::unique_ptr ggml_backend_cann_context::new_pool_for_device( int device) { - bool disable_vmm = (getenv("GGML_CANN_DISABLE_VMM_POOL") != nullptr); - if (!disable_vmm && ggml_cann_info().devices[device].vmm) { - GGML_LOG_INFO("%s: device %d use vmm pool\n", __func__, device); - return std::unique_ptr(new ggml_cann_pool_vmm(device)); - } - bool enable_buf_prio = (getenv("GGML_CANN_ENABLE_BUF_PRIO_POOL") != nullptr); - if (enable_buf_prio) { + std::string mem_pool_type = get_env("GGML_CANN_MEM_POOL").value_or(""); + + if (mem_pool_type == "prio") { GGML_LOG_INFO("%s: device %d use buffer pool with priority queue\n", __func__, device); return std::unique_ptr(new ggml_cann_pool_buf_prio(device)); } + + if (ggml_cann_info().devices[device].vmm && mem_pool_type != "leg") { + GGML_LOG_INFO("%s: device %d use vmm pool\n", __func__, device); + return std::unique_ptr(new ggml_cann_pool_vmm(device)); + } + GGML_LOG_INFO("%s: device %d use buffer pool\n", __func__, device); return std::unique_ptr(new ggml_cann_pool_buf(device)); } From 652d6104ff6760424b83fe9278a3d42d316deb19 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 9 Jun 2025 17:17:31 +0300 Subject: [PATCH 007/192] graph : fix geglu (#14077) ggml-ci --- src/llama-graph.cpp | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 55390d42e72ca..27c9ab74be112 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -663,22 +663,14 @@ ggml_tensor * llm_graph_context::build_ffn( { // Split into two equal parts int64_t split_point = cur->ne[0] / 2; - ggml_tensor * output_ffn_up = ggml_cont(ctx0, ggml_view_2d( - ctx0, cur, split_point, - cur->ne[1], cur->nb[1], 0 - )); - ggml_tensor * output_ffn_gate = ggml_cont(ctx0, ggml_view_2d( - ctx0, cur, split_point, - cur->ne[1], cur->nb[1], - split_point * ggml_element_size(cur) - )); - - // Apply GELU activation function to the first part - output_ffn_up = ggml_gelu(ctx0, output_ffn_up); - cb(output_ffn_up, "ffn_gelu", il); - - // Element-wise multiplication between the activated part and the gate part - cur = ggml_mul(ctx0, output_ffn_up, output_ffn_gate); + // TODO: these conts should not be needed + ggml_tensor * x0 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], 0)); + ggml_tensor * x1 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], split_point * ggml_element_size(cur))); + + x0 = ggml_gelu(ctx0, x0); + cb(x0, "ffn_gelu", il); + + cur = ggml_mul(ctx0, x0, x1); cb(cur, "ffn_geglu", il); } break; } From 4eebf7cef4b5b5da73155383d9db561b921d5866 Mon Sep 17 00:00:00 2001 From: Diego Devesa Date: Mon, 9 Jun 2025 07:36:26 -0700 Subject: [PATCH 008/192] cuda : fix device sync on buffer clear (#14033) --- ggml/src/ggml-cuda/ggml-cuda.cu | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 3d2a0a36dd56b..0bd2904e1c9d1 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -615,9 +615,8 @@ static void ggml_backend_cuda_buffer_clear(ggml_backend_buffer_t buffer, uint8_t ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; ggml_cuda_set_device(ctx->device); - CUDA_CHECK(cudaDeviceSynchronize()); - CUDA_CHECK(cudaMemset(ctx->dev_ptr, value, buffer->size)); - CUDA_CHECK(cudaDeviceSynchronize()); + CUDA_CHECK(cudaMemsetAsync(ctx->dev_ptr, value, buffer->size, cudaStreamPerThread)); + CUDA_CHECK(cudaStreamSynchronize(cudaStreamPerThread)); } static const ggml_backend_buffer_i ggml_backend_cuda_buffer_interface = { From 55adae4753586141d9f77a6290ef82fae91b2eb1 Mon Sep 17 00:00:00 2001 From: xctan Date: Mon, 9 Jun 2025 22:47:13 +0800 Subject: [PATCH 009/192] ggml-cpu : split arch-specific implementations (#13892) * move ggml-cpu-aarch64 to repack * split quantize_row_q8_0/1 * split helper functions * split ggml_vec_dot_q4_0_q8_0 * split ggml_vec_dot_q4_1_q8_1 * split ggml_vec_dot_q5_0_q8_0 * split ggml_vec_dot_q5_1_q8_1 * split ggml_vec_dot_q8_0_q8_0 * split ggml_vec_dot_tq1_0_q8_K * split ggml_vec_dot_tq2_0_q8_K * split ggml_vec_dot_q2_K_q8_K * split ggml_vec_dot_q3_K_q8_K * split ggml_vec_dot_q4_K_q8_K * split ggml_vec_dot_q5_K_q8_K * split ggml_vec_dot_q6_K_q8_K * split ggml_vec_dot_iq2_xxs_q8_K * split ggml_vec_dot_iq2_xs_q8_K * split ggml_vec_dot_iq2_s_q8_K * split ggml_vec_dot_iq3_xxs_q8_K * split ggml_vec_dot_iq3_s_q8_K * split ggml_vec_dot_iq1_s_q8_K * split ggml_vec_dot_iq1_m_q8_K * split ggml_vec_dot_iq4_nl_q8_0 * split ggml_vec_dot_iq4_xs_q8_K * fix typos * fix missing prototypes * rename ggml-cpu-quants.c * rename ggml-cpu-traits * rename arm folder * move cpu-feats-x86.cpp * rename ggml-cpu-hbm * update arm detection macro in quants.c * move iq quant tables * split ggml_quantize_mat_q8_0/K * split ggml_gemv_* * split ggml_gemm_* * rename namespace aarch64 to repack * use weak aliases to replace test macros * rename GGML_CPU_AARCH64 to GGML_CPU_REPACK * rename more aarch64 to repack * clean up rebase leftover * fix compilation errors * remove trailing spaces * try to fix clang compilation errors * try to fix clang compilation errors again * try to fix clang compilation errors, 3rd attempt * try to fix clang compilation errors, 4th attempt * try to fix clang compilation errors, 5th attempt * try to fix clang compilation errors, 6th attempt * try to fix clang compilation errors, 7th attempt * try to fix clang compilation errors, 8th attempt * try to fix clang compilation errors, 9th attempt * more cleanup * fix compilation errors * fix apple targets * fix a typo in arm version of ggml_vec_dot_q4_K_q8_K Co-authored-by: Georgi Gerganov --------- Co-authored-by: Georgi Gerganov --- Makefile | 1264 +- ggml/CMakeLists.txt | 2 +- ggml/src/ggml-common.h | 4 + ggml/src/ggml-cpu/CMakeLists.txt | 46 +- ggml/src/ggml-cpu/amx/amx.cpp | 2 +- ggml/src/ggml-cpu/amx/mmq.cpp | 2 +- ggml/src/ggml-cpu/arch/arm/quants.c | 4113 +++++ ggml/src/ggml-cpu/arch/arm/repack.cpp | 2174 +++ ggml/src/ggml-cpu/arch/loongarch/quants.c | 2638 +++ ggml/src/ggml-cpu/arch/powerpc/quants.c | 2731 +++ ggml/src/ggml-cpu/arch/riscv/quants.c | 2068 +++ ggml/src/ggml-cpu/arch/riscv/repack.cpp | 396 + ggml/src/ggml-cpu/arch/s390/quants.c | 1299 ++ ggml/src/ggml-cpu/arch/wasm/quants.c | 1480 ++ .../x86/cpu-feats.cpp} | 0 ggml/src/ggml-cpu/arch/x86/quants.c | 4310 +++++ .../x86/repack.cpp} | 3265 +--- ggml/src/ggml-cpu/common.h | 2 +- ggml/src/ggml-cpu/ggml-cpu-aarch64.h | 8 - ggml/src/ggml-cpu/ggml-cpu-impl.h | 22 + ggml/src/ggml-cpu/ggml-cpu-quants.c | 13891 ---------------- ggml/src/ggml-cpu/ggml-cpu-quants.h | 63 - ggml/src/ggml-cpu/ggml-cpu.c | 4 +- ggml/src/ggml-cpu/ggml-cpu.cpp | 16 +- .../ggml-cpu/{ggml-cpu-hbm.cpp => hbm.cpp} | 2 +- ggml/src/ggml-cpu/{ggml-cpu-hbm.h => hbm.h} | 0 ggml/src/ggml-cpu/kleidiai/kleidiai.cpp | 2 +- ggml/src/ggml-cpu/quants.c | 1179 ++ ggml/src/ggml-cpu/quants.h | 116 + ggml/src/ggml-cpu/repack.cpp | 1566 ++ ggml/src/ggml-cpu/repack.h | 119 + .../{ggml-cpu-traits.cpp => traits.cpp} | 2 +- .../ggml-cpu/{ggml-cpu-traits.h => traits.h} | 0 ggml/src/ggml-cuda/common.cuh | 3 - ggml/src/ggml-quants.c | 2 - ggml/src/ggml-sycl/common.hpp | 2 - 36 files changed, 25588 insertions(+), 17205 deletions(-) create mode 100644 ggml/src/ggml-cpu/arch/arm/quants.c create mode 100644 ggml/src/ggml-cpu/arch/arm/repack.cpp create mode 100644 ggml/src/ggml-cpu/arch/loongarch/quants.c create mode 100644 ggml/src/ggml-cpu/arch/powerpc/quants.c create mode 100644 ggml/src/ggml-cpu/arch/riscv/quants.c create mode 100644 ggml/src/ggml-cpu/arch/riscv/repack.cpp create mode 100644 ggml/src/ggml-cpu/arch/s390/quants.c create mode 100644 ggml/src/ggml-cpu/arch/wasm/quants.c rename ggml/src/ggml-cpu/{cpu-feats-x86.cpp => arch/x86/cpu-feats.cpp} (100%) create mode 100644 ggml/src/ggml-cpu/arch/x86/quants.c rename ggml/src/ggml-cpu/{ggml-cpu-aarch64.cpp => arch/x86/repack.cpp} (68%) delete mode 100644 ggml/src/ggml-cpu/ggml-cpu-aarch64.h delete mode 100644 ggml/src/ggml-cpu/ggml-cpu-quants.c delete mode 100644 ggml/src/ggml-cpu/ggml-cpu-quants.h rename ggml/src/ggml-cpu/{ggml-cpu-hbm.cpp => hbm.cpp} (98%) rename ggml/src/ggml-cpu/{ggml-cpu-hbm.h => hbm.h} (100%) create mode 100644 ggml/src/ggml-cpu/quants.c create mode 100644 ggml/src/ggml-cpu/quants.h create mode 100644 ggml/src/ggml-cpu/repack.cpp create mode 100644 ggml/src/ggml-cpu/repack.h rename ggml/src/ggml-cpu/{ggml-cpu-traits.cpp => traits.cpp} (97%) rename ggml/src/ggml-cpu/{ggml-cpu-traits.h => traits.h} (100%) diff --git a/Makefile b/Makefile index ae29ef2c60d34..4fb4acfd7eb8e 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ DEVELOPER_ID ?= xxxx all: @echo "Specify a target to run" -# Build the Cortex engine +# Build the llama.cpp server build-lib: ifeq ($(OS),Windows_NT) @powershell -Command "cmake -B build $(CMAKE_EXTRA_FLAGS) -DLLAMA_BUILD_TESTS=OFF;" @@ -52,4 +52,1266 @@ else ifeq ($(shell uname -s),Linux) @tar -czvf llama.tar.gz build/bin; else @tar -czvf llama.tar.gz build/bin; +ifdef LLAMA_SANITIZE_THREAD + MK_CFLAGS += -fsanitize=thread -g + MK_CXXFLAGS += -fsanitize=thread -g + MK_LDFLAGS += -fsanitize=thread -g +endif + +ifdef LLAMA_SANITIZE_ADDRESS + MK_CFLAGS += -fsanitize=address -fno-omit-frame-pointer -g + MK_CXXFLAGS += -fsanitize=address -fno-omit-frame-pointer -g + MK_LDFLAGS += -fsanitize=address -fno-omit-frame-pointer -g +endif + +ifdef LLAMA_SANITIZE_UNDEFINED + MK_CFLAGS += -fsanitize=undefined -g + MK_CXXFLAGS += -fsanitize=undefined -g + MK_LDFLAGS += -fsanitize=undefined -g +endif + +ifdef LLAMA_SERVER_SSL + MK_CPPFLAGS += -DCPPHTTPLIB_OPENSSL_SUPPORT + MK_LDFLAGS += -lssl -lcrypto +endif + +ifndef GGML_NO_CPU_AARCH64 + MK_CPPFLAGS += -DGGML_USE_CPU_REPACK +endif + +# warnings +WARN_FLAGS = \ + -Wall \ + -Wextra \ + -Wpedantic \ + -Wcast-qual \ + -Wno-unused-function + +MK_CFLAGS += \ + $(WARN_FLAGS) \ + -Wshadow \ + -Wstrict-prototypes \ + -Wpointer-arith \ + -Wmissing-prototypes \ + -Werror=implicit-int \ + -Werror=implicit-function-declaration + +MK_CXXFLAGS += \ + $(WARN_FLAGS) \ + -Wmissing-declarations \ + -Wmissing-noreturn + +ifeq ($(LLAMA_FATAL_WARNINGS),1) + MK_CFLAGS += -Werror + MK_CXXFLAGS += -Werror +endif + +# this version of Apple ld64 is buggy +ifneq '' '$(findstring dyld-1015.7,$(shell $(CC) $(LDFLAGS) -Wl,-v 2>&1))' + MK_CPPFLAGS += -DHAVE_BUGGY_APPLE_LINKER +endif + +# OS specific +# TODO: support Windows +ifneq '' '$(filter $(UNAME_S),Linux Darwin FreeBSD NetBSD OpenBSD Haiku)' + MK_CFLAGS += -pthread + MK_CXXFLAGS += -pthread +endif + +# detect Windows +ifneq ($(findstring _NT,$(UNAME_S)),) + _WIN32 := 1 +endif + +# library name prefix +ifneq ($(_WIN32),1) + LIB_PRE := lib +endif + +# Dynamic Shared Object extension +ifneq ($(_WIN32),1) + DSO_EXT := .so +else + DSO_EXT := .dll +endif + +# Windows Sockets 2 (Winsock) for network-capable apps +ifeq ($(_WIN32),1) + LWINSOCK2 := -lws2_32 +endif + +ifdef LLAMA_GPROF + MK_CFLAGS += -pg + MK_CXXFLAGS += -pg +endif + +# Architecture specific +# TODO: probably these flags need to be tweaked on some architectures +# feel free to update the Makefile for your architecture and send a pull request or issue + +ifndef RISCV_CROSS_COMPILE + +ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686 amd64)) + # Use all CPU extensions that are available: + MK_CFLAGS += -march=native -mtune=native + HOST_CXXFLAGS += -march=native -mtune=native + + # Usage AMX build test + #MK_CFLAGS += -march=graniterapids -mtune=graniterapids + #HOST_CXXFLAGS += -march=graniterapids -mtune=graniterapids + + # Usage AVX-only + #MK_CFLAGS += -mfma -mf16c -mavx + #MK_CXXFLAGS += -mfma -mf16c -mavx + + # Usage SSSE3-only (Not is SSE3!) + #MK_CFLAGS += -mssse3 + #MK_CXXFLAGS += -mssse3 +endif + +ifneq '' '$(findstring mingw,$(shell $(CC) -dumpmachine))' + # The stack is only 16-byte aligned on Windows, so don't let gcc emit aligned moves. + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54412 + # https://github.com/ggml-org/llama.cpp/issues/2922 + MK_CFLAGS += -Xassembler -muse-unaligned-vector-move + MK_CXXFLAGS += -Xassembler -muse-unaligned-vector-move + + # Target Windows 8 for PrefetchVirtualMemory + MK_CPPFLAGS += -D_WIN32_WINNT=0x602 +endif + +ifneq ($(filter aarch64%,$(UNAME_M)),) + # Apple M1, M2, etc. + # Raspberry Pi 3, 4, Zero 2 (64-bit) + # Nvidia Jetson + MK_CFLAGS += -mcpu=native + MK_CXXFLAGS += -mcpu=native + JETSON_RELEASE_INFO = $(shell jetson_release) + ifdef JETSON_RELEASE_INFO + ifneq ($(filter TX2%,$(JETSON_RELEASE_INFO)),) + JETSON_EOL_MODULE_DETECT = 1 + CC = aarch64-unknown-linux-gnu-gcc + cxx = aarch64-unknown-linux-gnu-g++ + endif + endif +endif + +ifneq ($(filter armv6%,$(UNAME_M)),) + # Raspberry Pi 1, Zero + MK_CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access + MK_CXXFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access +endif + +ifneq ($(filter armv7%,$(UNAME_M)),) + # Raspberry Pi 2 + MK_CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations + MK_CXXFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations +endif + +ifneq ($(filter armv8%,$(UNAME_M)),) + # Raspberry Pi 3, 4, Zero 2 (32-bit) + MK_CFLAGS += -mfp16-format=ieee -mno-unaligned-access + MK_CXXFLAGS += -mfp16-format=ieee -mno-unaligned-access +endif + +ifneq ($(filter ppc64%,$(UNAME_M)),) + POWER9_M := $(shell grep "POWER9" /proc/cpuinfo) + ifneq (,$(findstring POWER9,$(POWER9_M))) + MK_CFLAGS += -mcpu=power9 + MK_CXXFLAGS += -mcpu=power9 + endif +endif + +ifneq ($(filter ppc64le%,$(UNAME_M)),) + MK_CFLAGS += -mcpu=powerpc64le + MK_CXXFLAGS += -mcpu=powerpc64le + CUDA_POWER_ARCH = 1 +endif + +ifneq ($(filter loongarch64%,$(UNAME_M)),) + MK_CFLAGS += -mlasx + MK_CXXFLAGS += -mlasx +endif + +ifneq ($(filter riscv64%,$(UNAME_M)),) + MK_CFLAGS += -march=rv64gcv -mabi=lp64d + MK_CXXFLAGS += -march=rv64gcv -mabi=lp64d +endif + +else # RISC-V CROSS COMPILATION + MK_CFLAGS += -march=rv64gcv -mabi=lp64d + MK_CXXFLAGS += -march=rv64gcv -mabi=lp64d +endif + +ifndef GGML_NO_ACCELERATE + # Mac OS - include Accelerate framework. + # `-framework Accelerate` works both with Apple Silicon and Mac Intel + ifeq ($(UNAME_S),Darwin) + MK_CPPFLAGS += -DGGML_USE_ACCELERATE -DGGML_USE_BLAS -DGGML_BLAS_USE_ACCELERATE + MK_CPPFLAGS += -DACCELERATE_NEW_LAPACK + MK_CPPFLAGS += -DACCELERATE_LAPACK_ILP64 + MK_LDFLAGS += -framework Accelerate + OBJ_GGML_EXT += ggml/src/ggml-blas/ggml-blas.o + endif +endif # GGML_NO_ACCELERATE + +ifndef GGML_NO_OPENMP + MK_CPPFLAGS += -DGGML_USE_OPENMP + MK_CFLAGS += -fopenmp + MK_CXXFLAGS += -fopenmp +endif # GGML_NO_OPENMP + +ifdef GGML_OPENBLAS + MK_CPPFLAGS += -DGGML_USE_BLAS $(shell pkg-config --cflags-only-I openblas) + MK_CFLAGS += $(shell pkg-config --cflags-only-other openblas) + MK_LDFLAGS += $(shell pkg-config --libs openblas) + OBJ_GGML_EXT += ggml/src/ggml-blas/ggml-blas.o +endif # GGML_OPENBLAS + +ifdef GGML_OPENBLAS64 + MK_CPPFLAGS += -DGGML_USE_BLAS $(shell pkg-config --cflags-only-I openblas64) + MK_CFLAGS += $(shell pkg-config --cflags-only-other openblas64) + MK_LDFLAGS += $(shell pkg-config --libs openblas64) + OBJ_GGML_EXT += ggml/src/ggml-blas/ggml-blas.o +endif # GGML_OPENBLAS64 + +ifdef GGML_BLIS + MK_CPPFLAGS += -DGGML_USE_BLAS -DGGML_BLAS_USE_BLIS -I/usr/local/include/blis -I/usr/include/blis + MK_LDFLAGS += -lblis -L/usr/local/lib + OBJ_GGML_EXT += ggml/src/ggml-blas/ggml-blas.o +endif # GGML_BLIS + +ifdef GGML_NVPL + MK_CPPFLAGS += -DGGML_USE_BLAS -DGGML_BLAS_USE_NVPL -DNVPL_ILP64 -I/usr/local/include/nvpl_blas -I/usr/include/nvpl_blas + MK_LDFLAGS += -L/usr/local/lib -lnvpl_blas_core -lnvpl_blas_ilp64_gomp + OBJ_GGML_EXT += ggml/src/ggml-blas/ggml-blas.o +endif # GGML_NVPL + +ifndef GGML_NO_LLAMAFILE + MK_CPPFLAGS += -DGGML_USE_LLAMAFILE + OBJ_GGML_EXT += ggml/src/ggml-cpu/llamafile/sgemm.o +endif + +ifndef GGML_NO_AMX + MK_CPPFLAGS += -DGGML_USE_AMX + OBJ_GGML_EXT += ggml/src/ggml-cpu/amx/amx.o ggml/src/ggml-cpu/amx/mmq.o +endif + +# only necessary for the CPU backend files +MK_CPPFLAGS += -Iggml/src/ggml-cpu + +ifdef GGML_RPC + MK_CPPFLAGS += -DGGML_USE_RPC + OBJ_GGML_EXT += ggml/src/ggml-rpc.o +endif # GGML_RPC + +OBJ_CUDA_TMPL = $(patsubst %.cu,%.o,$(wildcard ggml/src/ggml-cuda/template-instances/fattn-mma*.cu)) +OBJ_CUDA_TMPL += $(patsubst %.cu,%.o,$(wildcard ggml/src/ggml-cuda/template-instances/mmq*.cu)) + +ifdef GGML_CUDA_FA_ALL_QUANTS + OBJ_CUDA_TMPL += $(patsubst %.cu,%.o,$(wildcard ggml/src/ggml-cuda/template-instances/fattn-vec*.cu)) +else + OBJ_CUDA_TMPL += $(patsubst %.cu,%.o,$(wildcard ggml/src/ggml-cuda/template-instances/fattn-vec*q4_0-q4_0.cu)) + OBJ_CUDA_TMPL += $(patsubst %.cu,%.o,$(wildcard ggml/src/ggml-cuda/template-instances/fattn-vec*q8_0-q8_0.cu)) + OBJ_CUDA_TMPL += $(patsubst %.cu,%.o,$(wildcard ggml/src/ggml-cuda/template-instances/fattn-vec*f16-f16.cu)) +endif # GGML_CUDA_FA_ALL_QUANTS + +ifdef GGML_CUDA + ifneq ('', '$(wildcard /opt/cuda)') + CUDA_PATH ?= /opt/cuda + else + CUDA_PATH ?= /usr/local/cuda + endif + + MK_CPPFLAGS += -DGGML_USE_CUDA -DGGML_CUDA_USE_GRAPHS -I$(CUDA_PATH)/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include + MK_LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L$(CUDA_PATH)/lib64 -L/usr/lib64 -L$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib -L$(CUDA_PATH)/lib64/stubs -L/usr/lib/wsl/lib + MK_NVCCFLAGS += -use_fast_math + + OBJ_GGML_EXT += ggml/src/ggml-cuda/ggml-cuda.o + OBJ_GGML_EXT += $(patsubst %.cu,%.o,$(wildcard ggml/src/ggml-cuda/*.cu)) + OBJ_GGML_EXT += $(OBJ_CUDA_TMPL) + +ifdef LLAMA_FATAL_WARNINGS + MK_NVCCFLAGS += -Werror all-warnings +endif # LLAMA_FATAL_WARNINGS + +ifndef JETSON_EOL_MODULE_DETECT + MK_NVCCFLAGS += --forward-unknown-to-host-compiler +endif # JETSON_EOL_MODULE_DETECT + +ifdef LLAMA_DEBUG + MK_NVCCFLAGS += -lineinfo +endif # LLAMA_DEBUG + +ifdef GGML_CUDA_DEBUG + MK_NVCCFLAGS += --device-debug +endif # GGML_CUDA_DEBUG + +ifdef GGML_CUDA_NVCC + NVCC = $(CCACHE) $(GGML_CUDA_NVCC) +else + NVCC = $(CCACHE) nvcc +endif # GGML_CUDA_NVCC + +ifdef CUDA_DOCKER_ARCH + MK_NVCCFLAGS += -Wno-deprecated-gpu-targets -arch=$(CUDA_DOCKER_ARCH) +else ifndef CUDA_POWER_ARCH + MK_NVCCFLAGS += -arch=native +endif # CUDA_DOCKER_ARCH + +ifdef GGML_CUDA_FORCE_MMQ + MK_NVCCFLAGS += -DGGML_CUDA_FORCE_MMQ +endif # GGML_CUDA_FORCE_MMQ + +ifdef GGML_CUDA_FORCE_CUBLAS + MK_NVCCFLAGS += -DGGML_CUDA_FORCE_CUBLAS +endif # GGML_CUDA_FORCE_CUBLAS + +ifdef GGML_CUDA_F16 + MK_NVCCFLAGS += -DGGML_CUDA_F16 +endif # GGML_CUDA_F16 + +ifdef GGML_CUDA_DMMV_F16 + MK_NVCCFLAGS += -DGGML_CUDA_F16 +endif # GGML_CUDA_DMMV_F16 + +ifdef GGML_CUDA_PEER_MAX_BATCH_SIZE + MK_NVCCFLAGS += -DGGML_CUDA_PEER_MAX_BATCH_SIZE=$(GGML_CUDA_PEER_MAX_BATCH_SIZE) +else + MK_NVCCFLAGS += -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 +endif # GGML_CUDA_PEER_MAX_BATCH_SIZE + +ifdef GGML_CUDA_NO_PEER_COPY + MK_NVCCFLAGS += -DGGML_CUDA_NO_PEER_COPY +endif # GGML_CUDA_NO_PEER_COPY + +ifdef GGML_CUDA_CCBIN + MK_NVCCFLAGS += -ccbin $(GGML_CUDA_CCBIN) +endif # GGML_CUDA_CCBIN + +ifdef GGML_CUDA_NO_FA + MK_NVCCFLAGS += -DGGML_CUDA_NO_FA +endif # GGML_CUDA_NO_FA + +ifdef GGML_CUDA_FA_ALL_QUANTS + MK_NVCCFLAGS += -DGGML_CUDA_FA_ALL_QUANTS +endif # GGML_CUDA_FA_ALL_QUANTS + +ifdef JETSON_EOL_MODULE_DETECT +define NVCC_COMPILE + $(NVCC) -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUDA -I/usr/local/cuda/include -I/opt/cuda/include -I/usr/local/cuda/targets/aarch64-linux/include -std=c++11 -O3 $(NVCCFLAGS) $(CPPFLAGS) -Xcompiler "$(CUDA_CXXFLAGS)" -c $< -o $@ +endef # NVCC_COMPILE +else +define NVCC_COMPILE + $(NVCC) $(NVCCFLAGS) $(CPPFLAGS) -Xcompiler "$(CUDA_CXXFLAGS)" -c $< -o $@ +endef # NVCC_COMPILE +endif # JETSON_EOL_MODULE_DETECT + +ggml/src/ggml-cuda/%.o: \ + ggml/src/ggml-cuda/%.cu \ + ggml/include/ggml.h \ + ggml/src/ggml-common.h \ + ggml/src/ggml-cuda/common.cuh + $(NVCC_COMPILE) + +ggml/src/ggml-cuda/ggml-cuda.o: \ + ggml/src/ggml-cuda/ggml-cuda.cu \ + ggml/include/ggml-cuda.h \ + ggml/include/ggml.h \ + ggml/include/ggml-backend.h \ + ggml/src/ggml-backend-impl.h \ + ggml/src/ggml-common.h \ + $(wildcard ggml/src/ggml-cuda/*.cuh) + $(NVCC_COMPILE) +endif # GGML_CUDA + +ifdef GGML_VULKAN + MK_CPPFLAGS += -DGGML_USE_VULKAN + MK_LDFLAGS += $(shell pkg-config --libs vulkan) + OBJ_GGML_EXT += ggml/src/ggml-vulkan.o ggml/src/ggml-vulkan-shaders.o + +ifdef GGML_VULKAN_CHECK_RESULTS + MK_CPPFLAGS += -DGGML_VULKAN_CHECK_RESULTS +endif + +ifdef GGML_VULKAN_DEBUG + MK_CPPFLAGS += -DGGML_VULKAN_DEBUG +endif + +ifdef GGML_VULKAN_MEMORY_DEBUG + MK_CPPFLAGS += -DGGML_VULKAN_MEMORY_DEBUG +endif + +ifdef GGML_VULKAN_PERF + MK_CPPFLAGS += -DGGML_VULKAN_PERF +endif + +ifdef GGML_VULKAN_VALIDATE + MK_CPPFLAGS += -DGGML_VULKAN_VALIDATE +endif + +ifdef GGML_VULKAN_RUN_TESTS + MK_CPPFLAGS += -DGGML_VULKAN_RUN_TESTS +endif + +GLSLC_CMD = glslc +_ggml_vk_genshaders_cmd = $(shell pwd)/vulkan-shaders-gen +_ggml_vk_header = ggml/src/ggml-vulkan-shaders.hpp +_ggml_vk_source = ggml/src/ggml-vulkan-shaders.cpp +_ggml_vk_input_dir = ggml/src/ggml-vulkan/vulkan-shaders +_ggml_vk_shader_deps = $(echo $(_ggml_vk_input_dir)/*.comp) + +ggml/src/ggml-vulkan.o: ggml/src/ggml-vulkan/ggml-vulkan.cpp ggml/include/ggml-vulkan.h $(_ggml_vk_header) $(_ggml_vk_source) + $(CXX) $(CXXFLAGS) $(shell pkg-config --cflags vulkan) -c $< -o $@ + +$(_ggml_vk_header): $(_ggml_vk_source) + +$(_ggml_vk_source): $(_ggml_vk_shader_deps) vulkan-shaders-gen + $(_ggml_vk_genshaders_cmd) \ + --glslc $(GLSLC_CMD) \ + --input-dir $(_ggml_vk_input_dir) \ + --target-hpp $(_ggml_vk_header) \ + --target-cpp $(_ggml_vk_source) + +vulkan-shaders-gen: ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp + $(CXX) $(CXXFLAGS) -o $@ $(LDFLAGS) ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp + +endif # GGML_VULKAN + +ifdef GGML_HIP + ifeq ($(wildcard /opt/rocm),) + ROCM_PATH ?= /usr + AMDGPU_TARGETS ?= $(shell $(shell which amdgpu-arch)) + else + ROCM_PATH ?= /opt/rocm + AMDGPU_TARGETS ?= $(shell $(ROCM_PATH)/llvm/bin/amdgpu-arch) + endif + + MK_CPPFLAGS += -DGGML_USE_HIP -DGGML_USE_CUDA + + MK_LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib + MK_LDFLAGS += -L$(ROCM_PATH)/lib64 -Wl,-rpath=$(ROCM_PATH)/lib64 + MK_LDFLAGS += -lhipblas -lamdhip64 -lrocblas + + HIPCC ?= $(CCACHE) $(ROCM_PATH)/bin/hipcc + + HIPFLAGS += $(addprefix --offload-arch=,$(AMDGPU_TARGETS)) + +ifdef GGML_CUDA_FORCE_MMQ + HIPFLAGS += -DGGML_CUDA_FORCE_MMQ +endif # GGML_CUDA_FORCE_MMQ + +ifdef GGML_CUDA_FORCE_CUBLAS + HIPFLAGS += -DGGML_CUDA_FORCE_CUBLAS +endif # GGML_CUDA_FORCE_CUBLAS + +ifdef GGML_CUDA_NO_PEER_COPY + HIPFLAGS += -DGGML_CUDA_NO_PEER_COPY +endif # GGML_CUDA_NO_PEER_COPY + +ifdef GGML_CUDA_NO_FA + HIPFLAGS += -DGGML_CUDA_NO_FA +endif # GGML_CUDA_NO_FA + + OBJ_GGML_EXT += ggml/src/ggml-cuda/ggml-cuda.o + OBJ_GGML_EXT += $(patsubst %.cu,%.o,$(wildcard ggml/src/ggml-cuda/*.cu)) + OBJ_GGML_EXT += $(OBJ_CUDA_TMPL) + +ggml/src/ggml-cuda/ggml-cuda.o: \ + ggml/src/ggml-cuda/ggml-cuda.cu \ + ggml/include/ggml-cuda.h \ + ggml/include/ggml.h \ + ggml/include/ggml-backend.h \ + ggml/src/ggml-backend-impl.h \ + ggml/src/ggml-common.h \ + $(wildcard ggml/src/ggml-cuda/*.cuh) + $(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $< + +ggml/src/ggml-cuda/%.o: \ + ggml/src/ggml-cuda/%.cu \ + ggml/include/ggml.h \ + ggml/src/ggml-common.h \ + ggml/src/ggml-cuda/common.cuh + $(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $< +endif # GGML_HIP + +ifdef GGML_MUSA + ifeq ($(wildcard /opt/musa),) + MUSA_PATH ?= /usr/local/musa + else + MUSA_PATH ?= /opt/musa + endif + MUSA_ARCHITECTURES ?= 21;22;31 + + MK_CPPFLAGS += -DGGML_USE_MUSA -DGGML_USE_CUDA + MK_LDFLAGS += -L$(MUSA_PATH)/lib -Wl,-rpath=$(MUSA_PATH)/lib + MK_LDFLAGS += -lmusa -lmusart -lmublas + + ifndef GGML_NO_OPENMP + # For Ubuntu Focal + MK_CPPFLAGS += -I/usr/lib/llvm-10/include/openmp + MK_LDFLAGS += -L/usr/lib/llvm-10/lib + # For Ubuntu Jammy + MK_CPPFLAGS += -I/usr/lib/llvm-14/lib/clang/14.0.0/include + MK_LDFLAGS += -L/usr/lib/llvm-14/lib + endif # GGML_NO_OPENMP + + CC := $(MUSA_PATH)/bin/clang + CXX := $(MUSA_PATH)/bin/clang++ + MCC := $(CCACHE) $(MUSA_PATH)/bin/mcc + + MUSAFLAGS = -fsigned-char -x musa -mtgpu + MUSAFLAGS += $(foreach arch,$(subst ;, ,$(MUSA_ARCHITECTURES)),--cuda-gpu-arch=mp_$(arch)) + +ifdef GGML_CUDA_FORCE_MMQ + MUSAFLAGS += -DGGML_CUDA_FORCE_MMQ +endif # GGML_CUDA_FORCE_MMQ + +ifdef GGML_CUDA_FORCE_CUBLAS + MUSAFLAGS += -DGGML_CUDA_FORCE_CUBLAS +endif # GGML_CUDA_FORCE_CUBLAS + +ifdef GGML_CUDA_F16 + MUSAFLAGS += -DGGML_CUDA_F16 +endif # GGML_CUDA_F16 + +ifdef GGML_CUDA_DMMV_F16 + MUSAFLAGS += -DGGML_CUDA_F16 +endif # GGML_CUDA_DMMV_F16 + +ifdef GGML_CUDA_PEER_MAX_BATCH_SIZE + MUSAFLAGS += -DGGML_CUDA_PEER_MAX_BATCH_SIZE=$(GGML_CUDA_PEER_MAX_BATCH_SIZE) +else + MUSAFLAGS += -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 +endif # GGML_CUDA_PEER_MAX_BATCH_SIZE + +ifdef GGML_CUDA_NO_PEER_COPY + MUSAFLAGS += -DGGML_CUDA_NO_PEER_COPY +endif # GGML_CUDA_NO_PEER_COPY + +ifdef GGML_CUDA_NO_FA + MUSAFLAGS += -DGGML_CUDA_NO_FA +endif # GGML_CUDA_NO_FA + +ifdef GGML_CUDA_FA_ALL_QUANTS + MUSAFLAGS += -DGGML_CUDA_FA_ALL_QUANTS +endif # GGML_CUDA_FA_ALL_QUANTS + + OBJ_GGML_EXT += ggml/src/ggml-cuda/ggml-cuda.o + OBJ_GGML_EXT += $(patsubst %.cu,%.o,$(wildcard ggml/src/ggml-cuda/*.cu)) + OBJ_GGML_EXT += $(OBJ_CUDA_TMPL) + +ggml/src/ggml-cuda/ggml-cuda.o: \ + ggml/src/ggml-cuda/ggml-cuda.cu \ + ggml/include/ggml-cuda.h \ + ggml/include/ggml.h \ + ggml/include/ggml-backend.h \ + ggml/src/ggml-backend-impl.h \ + ggml/src/ggml-common.h \ + $(wildcard ggml/src/ggml-cuda/*.cuh) + $(MCC) $(CXXFLAGS) $(MUSAFLAGS) -c -o $@ $< + +ggml/src/ggml-cuda/%.o: \ + ggml/src/ggml-cuda/%.cu \ + ggml/include/ggml.h \ + ggml/src/ggml-common.h \ + ggml/src/ggml-cuda/common.cuh + $(MCC) $(CXXFLAGS) $(MUSAFLAGS) -c -o $@ $< +endif # GGML_MUSA + +ifdef GGML_METAL + MK_CPPFLAGS += -DGGML_USE_METAL + MK_LDFLAGS += -framework Foundation -framework Metal -framework MetalKit + OBJ_GGML_EXT += ggml/src/ggml-metal/ggml-metal.o + +ifdef GGML_METAL_USE_BF16 + MK_CPPFLAGS += -DGGML_METAL_USE_BF16 +endif # GGML_METAL_USE_BF16 +ifdef GGML_METAL_NDEBUG + MK_CPPFLAGS += -DGGML_METAL_NDEBUG +endif +ifdef GGML_METAL_EMBED_LIBRARY + MK_CPPFLAGS += -DGGML_METAL_EMBED_LIBRARY + OBJ_GGML_EXT += ggml/src/ggml-metal-embed.o +endif +endif # GGML_METAL + +ifdef GGML_METAL +ggml/src/ggml-metal/ggml-metal.o: \ + ggml/src/ggml-metal/ggml-metal.m \ + ggml/src/ggml-metal/ggml-metal-impl.h \ + ggml/include/ggml-metal.h \ + ggml/include/ggml.h + $(CC) $(CFLAGS) -c $< -o $@ + +ifdef GGML_METAL_EMBED_LIBRARY +ggml/src/ggml-metal-embed.o: \ + ggml/src/ggml-metal/ggml-metal.metal \ + ggml/src/ggml-metal/ggml-metal-impl.h \ + ggml/src/ggml-common.h + @echo "Embedding Metal library" + @sed -e '/__embed_ggml-common.h__/r ggml/src/ggml-common.h' -e '/__embed_ggml-common.h__/d' < ggml/src/ggml-metal/ggml-metal.metal > ggml/src/ggml-metal/ggml-metal-embed.metal.tmp + @sed -e '/#include "ggml-metal-impl.h"/r ggml/src/ggml-metal/ggml-metal-impl.h' -e '/#include "ggml-metal-impl.h"/d' < ggml/src/ggml-metal/ggml-metal-embed.metal.tmp > ggml/src/ggml-metal/ggml-metal-embed.metal + $(eval TEMP_ASSEMBLY=$(shell mktemp -d)) + @echo ".section __DATA, __ggml_metallib" > $(TEMP_ASSEMBLY)/ggml-metal-embed.s + @echo ".globl _ggml_metallib_start" >> $(TEMP_ASSEMBLY)/ggml-metal-embed.s + @echo "_ggml_metallib_start:" >> $(TEMP_ASSEMBLY)/ggml-metal-embed.s + @echo ".incbin \"ggml/src/ggml-metal/ggml-metal-embed.metal\"" >> $(TEMP_ASSEMBLY)/ggml-metal-embed.s + @echo ".globl _ggml_metallib_end" >> $(TEMP_ASSEMBLY)/ggml-metal-embed.s + @echo "_ggml_metallib_end:" >> $(TEMP_ASSEMBLY)/ggml-metal-embed.s + $(CC) $(CFLAGS) -c $(TEMP_ASSEMBLY)/ggml-metal-embed.s -o $@ + @rm -f ${TEMP_ASSEMBLY}/ggml-metal-embed.s + @rmdir ${TEMP_ASSEMBLY} +endif +endif # GGML_METAL + +DIR_GGML = ggml +DIR_LLAMA = src +DIR_COMMON = common + +OBJ_GGML = \ + $(DIR_GGML)/src/ggml.o \ + $(DIR_GGML)/src/ggml-alloc.o \ + $(DIR_GGML)/src/ggml-backend.o \ + $(DIR_GGML)/src/ggml-backend-reg.o \ + $(DIR_GGML)/src/ggml-opt.o \ + $(DIR_GGML)/src/ggml-quants.o \ + $(DIR_GGML)/src/ggml-threading.o \ + $(DIR_GGML)/src/ggml-cpu/ggml-cpu.o \ + $(DIR_GGML)/src/ggml-cpu/ggml-cpu_cpp.o \ + $(DIR_GGML)/src/ggml-cpu/repack.o \ + $(DIR_GGML)/src/ggml-cpu/ggml-cpu-hbm.o \ + $(DIR_GGML)/src/ggml-cpu/ggml-cpu-quants.o \ + $(DIR_GGML)/src/ggml-cpu/ggml-cpu-traits.o \ + $(OBJ_GGML_EXT) + +OBJ_LLAMA = \ + $(DIR_LLAMA)/llama.o \ + $(DIR_LLAMA)/llama-vocab.o \ + $(DIR_LLAMA)/llama-grammar.o \ + $(DIR_LLAMA)/llama-sampling.o \ + $(DIR_LLAMA)/unicode.o \ + $(DIR_LLAMA)/unicode-data.o + +OBJ_COMMON = \ + $(DIR_COMMON)/common.o \ + $(DIR_COMMON)/arg.o \ + $(DIR_COMMON)/log.o \ + $(DIR_COMMON)/console.o \ + $(DIR_COMMON)/ngram-cache.o \ + $(DIR_COMMON)/sampling.o \ + $(DIR_COMMON)/speculative.o \ + $(DIR_COMMON)/chat.o \ + $(DIR_COMMON)/build-info.o \ + $(DIR_COMMON)/json-schema-to-grammar.o + +OBJ_ALL = $(OBJ_GGML) $(OBJ_LLAMA) $(OBJ_COMMON) + +LIB_GGML = $(LIB_PRE)ggml$(DSO_EXT) +LIB_GGML_S = $(LIB_PRE)ggml.a + +LIB_LLAMA = $(LIB_PRE)llama$(DSO_EXT) +LIB_LLAMA_S = $(LIB_PRE)llama.a + +LIB_COMMON = $(LIB_PRE)common$(DSO_EXT) +LIB_COMMON_S = $(LIB_PRE)common.a + +LIB_ALL = $(LIB_GGML) $(LIB_LLAMA) $(LIB_COMMON) +LIB_ALL_S = $(LIB_GGML_S) $(LIB_LLAMA_S) $(LIB_COMMON_S) + +GF_CC := $(CC) +include scripts/get-flags.mk + +# combine build flags with cmdline overrides +override CPPFLAGS := $(MK_CPPFLAGS) $(CPPFLAGS) +override CFLAGS := $(CPPFLAGS) $(MK_CFLAGS) $(GF_CFLAGS) $(CFLAGS) +BASE_CXXFLAGS := $(MK_CXXFLAGS) $(CXXFLAGS) +override CXXFLAGS := $(BASE_CXXFLAGS) $(HOST_CXXFLAGS) $(GF_CXXFLAGS) $(CPPFLAGS) +override NVCCFLAGS := $(MK_NVCCFLAGS) $(NVCCFLAGS) +override LDFLAGS := $(MK_LDFLAGS) $(LDFLAGS) + +# identify CUDA host compiler +ifdef GGML_CUDA +GF_CC := $(NVCC) $(NVCCFLAGS) 2>/dev/null .c -Xcompiler +include scripts/get-flags.mk +CUDA_CXXFLAGS := $(BASE_CXXFLAGS) $(GF_CXXFLAGS) -Wno-pedantic +endif + +ifdef LLAMA_CURL +override CXXFLAGS := $(CXXFLAGS) -DLLAMA_USE_CURL +override LDFLAGS := $(LDFLAGS) -lcurl +endif + +# +# Print build information +# + +$(info I llama.cpp build info: ) +$(info I UNAME_S: $(UNAME_S)) +$(info I UNAME_P: $(UNAME_P)) +$(info I UNAME_M: $(UNAME_M)) +$(info I CFLAGS: $(CFLAGS)) +$(info I CXXFLAGS: $(CXXFLAGS)) +$(info I NVCCFLAGS: $(NVCCFLAGS)) +$(info I LDFLAGS: $(LDFLAGS)) +$(info I CC: $(shell $(CC) --version | head -n 1)) +$(info I CXX: $(shell $(CXX) --version | head -n 1)) +ifdef GGML_CUDA +$(info I NVCC: $(shell $(NVCC) --version | tail -n 1)) +CUDA_VERSION := $(shell $(NVCC) --version | grep -oP 'release (\K[0-9]+\.[0-9])') +ifeq ($(shell awk -v "v=$(CUDA_VERSION)" 'BEGIN { print (v < 11.7) }'),1) + +ifndef CUDA_DOCKER_ARCH +ifndef CUDA_POWER_ARCH +$(error I ERROR: For CUDA versions < 11.7 a target CUDA architecture must be explicitly provided via environment variable CUDA_DOCKER_ARCH, e.g. by running "export CUDA_DOCKER_ARCH=compute_XX" on Unix-like systems, where XX is the minimum compute capability that the code needs to run on. A list with compute capabilities can be found here: https://developer.nvidia.com/cuda-gpus ) +endif # CUDA_POWER_ARCH +endif # CUDA_DOCKER_ARCH + +endif # eq ($(shell echo "$(CUDA_VERSION) < 11.7" | bc),1) +endif # GGML_CUDA +$(info ) + +ifdef DEPRECATE_WARNING +$(info !!! DEPRECATION WARNING !!!) +$(info The following LLAMA_ options are deprecated and will be removed in the future. Use the GGML_ prefix instead) +$(info - LLAMA_CUDA) +$(info - LLAMA_METAL) +$(info - LLAMA_METAL_EMBED_LIBRARY) +$(info - LLAMA_OPENMP) +$(info - LLAMA_RPC) +$(info - LLAMA_SYCL) +$(info - LLAMA_SYCL_F16) +$(info - LLAMA_OPENBLAS) +$(info - LLAMA_OPENBLAS64) +$(info - LLAMA_BLIS) +$(info - LLAMA_NO_LLAMAFILE) +$(info - LLAMA_NO_ACCELERATE) +$(info - LLAMA_NO_OPENMP) +$(info - LLAMA_NO_METAL) +$(info - LLAMA_NO_CCACHE) +$(info ) +endif + +ifdef REMOVE_WARNING +$(info !!! REMOVAL WARNING !!!) +$(info The following LLAMA_ options have been removed and are no longer supported) +$(info - LLAMA_DISABLE_LOGS (https://github.com/ggml-org/llama.cpp/pull/9418)) +$(info - LLAMA_SERVER_VERBOSE (https://github.com/ggml-org/llama.cpp/pull/9418)) +$(info ) +endif + +# +# Build libraries +# + +# Libraries +LIB_GGML = libggml.so +LIB_GGML_S = libggml.a + +LIB_LLAMA = libllama.so +LIB_LLAMA_S = libllama.a + +LIB_COMMON = libcommon.so +LIB_COMMON_S = libcommon.a + +# Targets +BUILD_TARGETS += $(LIB_GGML) $(LIB_GGML_S) $(LIB_LLAMA) $(LIB_LLAMA_S) $(LIB_COMMON) $(LIB_COMMON_S) + +# Dependency files +DEP_FILES = $(OBJ_GGML:.o=.d) $(OBJ_LLAMA:.o=.d) $(OBJ_COMMON:.o=.d) + +# Default target +all: $(BUILD_TARGETS) + +# force c++ build for source file that have same name as c file +# Note: need this exception because `ggml-cpu.c` and `ggml-cpu.cpp` both produce the same obj/dep files +$(DIR_GGML)/%_cpp.o: $(DIR_GGML)/%.cpp + $(CXX) $(CXXFLAGS) -MMD -c $< -o $@ + +# Rules for building object files +$(DIR_GGML)/%.o: $(DIR_GGML)/%.c + $(CC) $(CFLAGS) -MMD -c $< -o $@ + +$(DIR_GGML)/%.o: $(DIR_GGML)/%.cpp + $(CXX) $(CXXFLAGS) -MMD -c $< -o $@ + +$(DIR_LLAMA)/%.o: $(DIR_LLAMA)/%.cpp + $(CXX) $(CXXFLAGS) -MMD -c $< -o $@ + +$(DIR_COMMON)/%.o: $(DIR_COMMON)/%.cpp + $(CXX) $(CXXFLAGS) -MMD -c $< -o $@ + +# Rules for building libraries +$(LIB_GGML): $(OBJ_GGML) + $(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS) + +$(LIB_GGML_S): $(OBJ_GGML) + ar rcs $(LIB_GGML_S) $^ + +$(LIB_LLAMA): $(OBJ_LLAMA) $(LIB_GGML) + $(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS) + +$(LIB_LLAMA_S): $(OBJ_LLAMA) + ar rcs $(LIB_LLAMA_S) $^ + +$(LIB_COMMON): $(OBJ_COMMON) $(LIB_LLAMA) $(LIB_GGML) + $(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS) + +$(LIB_COMMON_S): $(OBJ_COMMON) + ar rcs $(LIB_COMMON_S) $^ + +# Include dependency files +-include $(DEP_FILES) + +# Clean generated server assets +clean-server-assets: + find tools/server -type f -name "*.js.hpp" -delete + find tools/server -type f -name "*.mjs.hpp" -delete + find tools/server -type f -name "*.css.hpp" -delete + find tools/server -type f -name "*.html.hpp" -delete + +# Clean rule +clean: clean-server-assets + rm -vrf $(BUILD_TARGETS) $(TEST_TARGETS) + rm -rvf *.a *.dll *.so *.dot + find ggml src common tests examples pocs -type f -name "*.o" -delete + find ggml src common tests examples pocs -type f -name "*.d" -delete + +# +# Examples +# + +# $< is the first prerequisite, i.e. the source file. +# Explicitly compile this to an object file so that it can be cached with ccache. +# The source file is then filtered out from $^ (the list of all prerequisites) and the object file is added instead. + +# Helper function that replaces .c, .cpp, and .cu file endings with .o: +GET_OBJ_FILE = $(patsubst %.c,%.o,$(patsubst %.cpp,%.o,$(patsubst %.cu,%.o,$(1)))) + +llama-cli: tools/main/main.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + @echo + @echo '==== Run ./llama-cli -h for help. ====' + @echo + +llama-run: tools/run/run.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-simple: examples/simple/simple.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-simple-chat: examples/simple-chat/simple-chat.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-tokenize: tools/tokenize/tokenize.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-batched: examples/batched/batched.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-batched-bench: tools/batched-bench/batched-bench.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-quantize: tools/quantize/quantize.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-quantize-stats: tools/quantize-stats/quantize-stats.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-perplexity: tools/perplexity/perplexity.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-imatrix: tools/imatrix/imatrix.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-embedding: examples/embedding/embedding.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-gritlm: examples/gritlm/gritlm.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-save-load-state: examples/save-load-state/save-load-state.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-gguf: examples/gguf/gguf.cpp \ + $(OBJ_GGML) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +examples/gguf-hash/deps/sha1/sha1.o: \ + examples/gguf-hash/deps/sha1/sha1.c + $(CC) $(CFLAGS) -Iexamples/gguf-hash/deps -c $< -o $@ + +examples/gguf-hash/deps/xxhash/xxhash.o: \ + examples/gguf-hash/deps/xxhash/xxhash.c + $(CC) $(CFLAGS) -Iexamples/gguf-hash/deps -c $< -o $@ + +examples/gguf-hash/deps/sha256/sha256.o: \ + examples/gguf-hash/deps/sha256/sha256.c + $(CC) $(CFLAGS) -Iexamples/gguf-hash/deps -c $< -o $@ + +llama-gguf-hash: examples/gguf-hash/gguf-hash.cpp examples/gguf-hash/deps/sha1/sha1.o examples/gguf-hash/deps/xxhash/xxhash.o examples/gguf-hash/deps/sha256/sha256.o\ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -Iexamples/gguf-hash/deps -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-gguf-split: tools/gguf-split/gguf-split.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-eval-callback: examples/eval-callback/eval-callback.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-cvector-generator: tools/cvector-generator/cvector-generator.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-convert-llama2c-to-ggml: examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-bench: tools/llama-bench/llama-bench.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-export-lora: tools/export-lora/export-lora.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-retrieval: examples/retrieval/retrieval.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-speculative: examples/speculative/speculative.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-parallel: examples/parallel/parallel.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-lookahead: examples/lookahead/lookahead.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-lookup: examples/lookup/lookup.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-lookup-create: examples/lookup/lookup-create.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-lookup-merge: examples/lookup/lookup-merge.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-lookup-stats: examples/lookup/lookup-stats.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-passkey: examples/passkey/passkey.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-gbnf-validator: examples/gbnf-validator/gbnf-validator.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +ifdef GGML_RPC +rpc-server: tools/rpc/rpc-server.cpp \ + $(OBJ_GGML) + $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) +endif # GGML_RPC + +llama-server: \ + tools/server/server.cpp \ + tools/server/utils.hpp \ + tools/server/httplib.h \ + tools/server/index.html.hpp \ + tools/server/loading.html.hpp \ + common/chat.cpp \ + common/chat.h \ + common/chat-template.hpp \ + common/json.hpp \ + common/minja.hpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h %.hpp $<,$^) -Itools/server $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) $(LWINSOCK2) + +# Portable equivalent of `cd tools/server/public && xxd -i $(notdir $<) ../$(notdir $<).hpp`: +tools/server/%.hpp: tools/server/public/% FORCE Makefile + @( export NAME=$(subst .,_,$(subst -,_,$(notdir $<))) && \ + echo "unsigned char $${NAME}[] = {" && \ + cat $< | od -v -t x1 -An | sed -E 's/([0-9a-fA-F]+)/0x\1, /g' && \ + echo "};" && \ + echo "unsigned int $${NAME}_len = $(shell cat $< | wc -c );" \ + ) > $@ + +llama-gen-docs: examples/gen-docs/gen-docs.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +libllava.a: tools/mtmd/llava.cpp \ + tools/mtmd/llava.h \ + tools/mtmd/clip.cpp \ + tools/mtmd/clip.h \ + common/stb_image.h \ + common/base64.hpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -static -fPIC -c $< -o $@ -Wno-cast-qual + +llama-llava-cli: tools/mtmd/llava-cli.cpp \ + tools/mtmd/llava.cpp \ + tools/mtmd/llava.h \ + tools/mtmd/clip.cpp \ + tools/mtmd/clip.h \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual + +llama-minicpmv-cli: tools/mtmd/minicpmv-cli.cpp \ + tools/mtmd/llava.cpp \ + tools/mtmd/llava.h \ + tools/mtmd/clip.cpp \ + tools/mtmd/clip.h \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual + +llama-qwen2vl-cli: tools/mtmd/qwen2vl-cli.cpp \ + tools/mtmd/llava.cpp \ + tools/mtmd/llava.h \ + tools/mtmd/clip.cpp \ + tools/mtmd/clip.h \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual + +ifeq ($(UNAME_S),Darwin) +swift: examples/batched.swift + (cd examples/batched.swift; make build) +endif + +common/build-info.cpp: $(wildcard .git/index) scripts/build-info.sh + @sh scripts/build-info.sh "$(CC)" > $@.tmp + @if ! cmp -s $@.tmp $@; then \ + mv $@.tmp $@; \ + else \ + rm $@.tmp; \ + fi + +common/build-info.o: common/build-info.cpp + $(CXX) $(CXXFLAGS) -c $(filter-out %.h,$^) -o $@ + +# +# Tests +# + +tests: $(TEST_TARGETS) + +tests/test-arg-parser: tests/test-arg-parser.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-llama-grammar: tests/test-llama-grammar.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-log: tests/test-log.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-grammar-parser: tests/test-grammar-parser.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-grammar-integration: tests/test-grammar-integration.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-double-float: tests/test-double-float.cpp + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-json-schema-to-grammar: tests/test-json-schema-to-grammar.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -Itools/server -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-chat: tests/test-chat.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -Itools/server -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-opt: tests/test-opt.cpp \ + $(OBJ_GGML) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-quantize-fns: tests/test-quantize-fns.cpp \ + $(OBJ_GGML) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-quantize-perf: tests/test-quantize-perf.cpp \ + $(OBJ_GGML) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-sampling: tests/test-sampling.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-tokenizer-0: tests/test-tokenizer-0.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-tokenizer-1-bpe: tests/test-tokenizer-1-bpe.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-tokenizer-1-spm: tests/test-tokenizer-1-spm.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-rope: tests/test-rope.cpp ggml/src/ggml.o \ + $(OBJ_GGML) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-c.o: tests/test-c.c include/llama.h + $(CC) $(CFLAGS) -c $(filter-out %.h,$^) -o $@ + +tests/test-backend-ops: tests/test-backend-ops.cpp \ + $(OBJ_GGML) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-model-load-cancel: tests/test-model-load-cancel.cpp tests/get-model.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-autorelease: tests/test-autorelease.cpp tests/get-model.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-chat-template: tests/test-chat-template.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# +# PoCs +# + +llama-vdot: pocs/vdot/vdot.cpp ggml/src/ggml.o \ + $(OBJ_GGML) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +llama-q8dot: pocs/vdot/q8dot.cpp ggml/src/ggml.o \ + $(OBJ_GGML) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# +# Deprecated binaries that we want to keep around long enough for people to migrate to the new filenames, then these can be removed. +# +# Mark legacy binary targets as .PHONY so that they are always checked. +.PHONY: FORCE main quantize perplexity embedding server + +# Define the object file target +examples/deprecation-warning/deprecation-warning.o: examples/deprecation-warning/deprecation-warning.cpp + $(CXX) $(CXXFLAGS) -c $< -o $@ + +# NOTE: We currently will always build the deprecation-warning `main` and `server` binaries to help users migrate. +# Eventually we will want to remove these target from building all the time. +main: examples/deprecation-warning/deprecation-warning.o + $(CXX) $(CXXFLAGS) $< -o $@ $(LDFLAGS) + @echo "NOTICE: The 'main' binary is deprecated. Please use 'llama-cli' instead." + +server: examples/deprecation-warning/deprecation-warning.o + $(CXX) $(CXXFLAGS) $< -o $@ $(LDFLAGS) + @echo "NOTICE: The 'server' binary is deprecated. Please use 'llama-server' instead." + +quantize: examples/deprecation-warning/deprecation-warning.o +ifneq (,$(wildcard quantize)) + $(CXX) $(CXXFLAGS) $< -o $@ $(LDFLAGS) + @echo "#########" + @echo "WARNING: The 'quantize' binary is deprecated. Please use 'llama-quantize' instead." + @echo " Remove the 'quantize' binary to remove this warning." + @echo "#########" +endif + +perplexity: examples/deprecation-warning/deprecation-warning.o +ifneq (,$(wildcard perplexity)) + $(CXX) $(CXXFLAGS) $< -o $@ $(LDFLAGS) + @echo "#########" + @echo "WARNING: The 'perplexity' binary is deprecated. Please use 'llama-perplexity' instead." + @echo " Remove the 'perplexity' binary to remove this warning." + @echo "#########" +endif + +embedding: examples/deprecation-warning/deprecation-warning.o +ifneq (,$(wildcard embedding)) + $(CXX) $(CXXFLAGS) $< -o $@ $(LDFLAGS) + @echo "#########" + @echo "WARNING: The 'embedding' binary is deprecated. Please use 'llama-embedding' instead." + @echo " Remove the 'embedding' binary to remove this warning." + @echo "#########" endif diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index e186fdf3c03f7..727139cf385b7 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -105,7 +105,7 @@ message(DEBUG "GGML_NATIVE_DEFAULT : ${GGML_NATIVE_DEFAULT}") message(DEBUG "INS_ENB : ${INS_ENB}") option(GGML_CPU_HBM "ggml: use memkind for CPU HBM" OFF) -option(GGML_CPU_AARCH64 "ggml: use runtime weight conversion of Q4_0 to Q4_X_X" ON) +option(GGML_CPU_REPACK "ggml: use runtime weight conversion of Q4_0 to Q4_X_X" ON) option(GGML_CPU_KLEIDIAI "ggml: use KleidiAI optimized kernels if applicable" OFF) option(GGML_SSE42 "ggml: enable SSE 4.2" ${INS_ENB}) option(GGML_AVX "ggml: enable AVX" ${INS_ENB}) diff --git a/ggml/src/ggml-common.h b/ggml/src/ggml-common.h index 086c822d73a89..fbb04426abe7e 100644 --- a/ggml/src/ggml-common.h +++ b/ggml/src/ggml-common.h @@ -1074,6 +1074,10 @@ GGML_TABLE_BEGIN(uint32_t, iq3s_grid, 512) 0x0f090307, 0x0f090501, 0x0f090b01, 0x0f0b0505, 0x0f0b0905, 0x0f0d0105, 0x0f0d0703, 0x0f0f0101, GGML_TABLE_END() +GGML_TABLE_BEGIN(int8_t, kvalues_iq4nl, 16) + -127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113, +GGML_TABLE_END() + #define NGRID_IQ1S 2048 #define IQ1S_DELTA 0.125f #define IQ1M_DELTA 0.125f diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index 33f66af8d031b..77dfc10df2057 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -10,14 +10,14 @@ function(ggml_add_cpu_backend_variant_impl tag_name) list (APPEND GGML_CPU_SOURCES ggml-cpu/ggml-cpu.c ggml-cpu/ggml-cpu.cpp - ggml-cpu/ggml-cpu-aarch64.cpp - ggml-cpu/ggml-cpu-aarch64.h - ggml-cpu/ggml-cpu-hbm.cpp - ggml-cpu/ggml-cpu-hbm.h - ggml-cpu/ggml-cpu-quants.c - ggml-cpu/ggml-cpu-quants.h - ggml-cpu/ggml-cpu-traits.cpp - ggml-cpu/ggml-cpu-traits.h + ggml-cpu/repack.cpp + ggml-cpu/repack.h + ggml-cpu/hbm.cpp + ggml-cpu/hbm.h + ggml-cpu/quants.c + ggml-cpu/quants.h + ggml-cpu/traits.cpp + ggml-cpu/traits.h ggml-cpu/amx/amx.cpp ggml-cpu/amx/amx.h ggml-cpu/amx/mmq.cpp @@ -84,6 +84,11 @@ function(ggml_add_cpu_backend_variant_impl tag_name) if (GGML_SYSTEM_ARCH STREQUAL "ARM") message(STATUS "ARM detected") + list(APPEND GGML_CPU_SOURCES + ggml-cpu/arch/arm/quants.c + ggml-cpu/arch/arm/repack.cpp + ) + if (MSVC AND NOT CMAKE_C_COMPILER_ID STREQUAL "Clang") message(FATAL_ERROR "MSVC is not supported for ARM, use clang") else() @@ -167,6 +172,11 @@ function(ggml_add_cpu_backend_variant_impl tag_name) endif() elseif (GGML_SYSTEM_ARCH STREQUAL "x86") message(STATUS "x86 detected") + list(APPEND GGML_CPU_SOURCES + ggml-cpu/arch/x86/quants.c + ggml-cpu/arch/x86/repack.cpp + ) + if (MSVC) # instruction set detection for MSVC only if (GGML_NATIVE) @@ -302,7 +312,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name) # Since multiple variants of the CPU backend may be included in the same # build, using set_source_files_properties() to set the arch flags is not possible set(GGML_CPU_FEATS_NAME ${GGML_CPU_NAME}-feats) - add_library(${GGML_CPU_FEATS_NAME} OBJECT ggml-cpu/cpu-feats-x86.cpp) + add_library(${GGML_CPU_FEATS_NAME} OBJECT ggml-cpu/arch/x86/cpu-feats.cpp) target_include_directories(${GGML_CPU_FEATS_NAME} PRIVATE . .. ../include) target_compile_definitions(${GGML_CPU_FEATS_NAME} PRIVATE ${ARCH_DEFINITIONS}) target_compile_definitions(${GGML_CPU_FEATS_NAME} PRIVATE GGML_BACKEND_DL GGML_BACKEND_BUILD GGML_BACKEND_SHARED) @@ -311,6 +321,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name) endif() elseif (GGML_SYSTEM_ARCH STREQUAL "PowerPC") message(STATUS "PowerPC detected") + list(APPEND GGML_CPU_SOURCES ggml-cpu/arch/powerpc/quants.c) if (GGML_NATIVE) if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64") file(READ "/proc/cpuinfo" POWER10_M) @@ -338,6 +349,8 @@ function(ggml_add_cpu_backend_variant_impl tag_name) endif() elseif (GGML_SYSTEM_ARCH STREQUAL "loongarch64") message(STATUS "loongarch64 detected") + list(APPEND GGML_CPU_SOURCES ggml-cpu/arch/loongarch/quants.c) + list(APPEND ARCH_FLAGS -march=loongarch64) if (GGML_LASX) list(APPEND ARCH_FLAGS -mlasx) @@ -347,6 +360,10 @@ function(ggml_add_cpu_backend_variant_impl tag_name) endif() elseif (GGML_SYSTEM_ARCH STREQUAL "riscv64") message(STATUS "riscv64 detected") + list(APPEND GGML_CPU_SOURCES + ggml-cpu/arch/riscv/quants.c + ggml-cpu/arch/riscv/repack.cpp + ) if (GGML_RVV) if (GGML_XTHEADVECTOR) list(APPEND ARCH_FLAGS -march=rv64gc_xtheadvector -mabi=lp64d) @@ -358,6 +375,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name) endif() elseif (GGML_SYSTEM_ARCH STREQUAL "s390x") message(STATUS "s390x detected") + list(APPEND GGML_CPU_SOURCES ggml-cpu/arch/s390/quants.c) file(READ "/proc/cpuinfo" CPUINFO_CONTENTS) string(REGEX REPLACE "machine[ \t\r\n]*=[ \t\r\n]*([0-9]+)" "\\1" S390X_M ${CPUINFO_CONTENTS}) @@ -381,12 +399,16 @@ function(ggml_add_cpu_backend_variant_impl tag_name) if (GGML_VXE) list(APPEND ARCH_FLAGS -mvx -mzvector) endif() + elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "wasm") + message(STATUS "Wasm detected") + list (APPEND GGML_CPU_SOURCES ggml-cpu/arch/wasm/quants.c) else() - message(STATUS "Unknown architecture") + message(WARNING "Unknown CPU architecture. Falling back to generic implementations.") + list(APPEND ARCH_FLAGS -DGGML_CPU_GENERIC) endif() - if (GGML_CPU_AARCH64) - target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_CPU_AARCH64) + if (GGML_CPU_REPACK) + target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_CPU_REPACK) endif() if (GGML_CPU_KLEIDIAI) diff --git a/ggml/src/ggml-cpu/amx/amx.cpp b/ggml/src/ggml-cpu/amx/amx.cpp index 0f067137df006..258857b00754a 100644 --- a/ggml/src/ggml-cpu/amx/amx.cpp +++ b/ggml/src/ggml-cpu/amx/amx.cpp @@ -5,7 +5,7 @@ #include "ggml-backend.h" #include "ggml-impl.h" #include "ggml-cpu.h" -#include "ggml-cpu-traits.h" +#include "traits.h" #if defined(__gnu_linux__) #include diff --git a/ggml/src/ggml-cpu/amx/mmq.cpp b/ggml/src/ggml-cpu/amx/mmq.cpp index 0ea91596bc7e2..cec34eb6416ac 100644 --- a/ggml/src/ggml-cpu/amx/mmq.cpp +++ b/ggml/src/ggml-cpu/amx/mmq.cpp @@ -8,7 +8,7 @@ #include "mmq.h" #include "ggml-impl.h" #include "ggml-cpu-impl.h" -#include "ggml-cpu-quants.h" +#include "quants.h" #include "ggml-quants.h" #include #include diff --git a/ggml/src/ggml-cpu/arch/arm/quants.c b/ggml/src/ggml-cpu/arch/arm/quants.c new file mode 100644 index 0000000000000..b0909dac08765 --- /dev/null +++ b/ggml/src/ggml-cpu/arch/arm/quants.c @@ -0,0 +1,4113 @@ +#define GGML_COMMON_IMPL_C +#include "ggml-common.h" +#include "ggml-quants.h" +#include "ggml-impl.h" +#include "ggml-cpu.h" + +#include "../../quants.h" +#include "../../ggml-cpu-impl.h" + +#include +#include +#include +#include +#include // for qsort +#include // for GGML_ASSERT + +#define GROUP_MAX_EPS 1e-15f +#define GROUP_MAX_EPS_IQ3_XXS 1e-8f +#define GROUP_MAX_EPS_IQ2_S 1e-8f +#define GROUP_MAX_EPS_IQ1_M 1e-7f +#define GROUP_MAX_EPS_IQ1_S 1e-12f + +#define UNUSED GGML_UNUSED + +#if defined(__ARM_NEON) +#define B1(c,s,n) 0x ## n ## c , 0x ## n ## s +#define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s) +#define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s) +#define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s) +#define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s) +#define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s) +#define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s) +#define B8(c,s ) B7(c,s, c), B7(c,s, s) + +// precomputed tables for expanding 8bits to 8 bytes: +static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4 +static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4 +#endif + +void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__ARM_NEON) + for (int i = 0; i < nb; i++) { + float32x4_t srcv [8]; + float32x4_t asrcv[8]; + float32x4_t amaxv[8]; + + for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j); + for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]); + + for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]); + for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]); + for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]); + + const float amax = vmaxvq_f32(amaxv[0]); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = GGML_FP32_TO_FP16(d); + + for (int j = 0; j < 8; j++) { + const float32x4_t v = vmulq_n_f32(srcv[j], id); + const int32x4_t vi = vcvtnq_s32_f32(v); + + y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0); + y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1); + y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2); + y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3); + } + } +#else + GGML_UNUSED(nb); + // scalar + quantize_row_q8_0_ref(x, y, k); +#endif +} + +void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK8_1 == 0); + const int nb = k / QK8_1; + + block_q8_1 * GGML_RESTRICT y = vy; +#if defined(__ARM_NEON) + for (int i = 0; i < nb; i++) { + float32x4_t srcv [8]; + float32x4_t asrcv[8]; + float32x4_t amaxv[8]; + + for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j); + for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]); + + for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]); + for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]); + for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]); + + const float amax = vmaxvq_f32(amaxv[0]); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = GGML_FP32_TO_FP16(d); + + int32x4_t accv = vdupq_n_s32(0); + + for (int j = 0; j < 8; j++) { + const float32x4_t v = vmulq_n_f32(srcv[j], id); + const int32x4_t vi = vcvtnq_s32_f32(v); + + y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0); + y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1); + y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2); + y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3); + + accv = vaddq_s32(accv, vi); + } + + y[i].s = GGML_FP32_TO_FP16(d * vaddvq_s32(accv)); + } +#else + GGML_UNUSED(nb); + // scalar + quantize_row_q8_1_ref(x, y, k); +#endif +} + +// placeholder implementation for Apple targets +void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_q8_K_ref(x, y, k); +} + +//===================================== Dot products ================================= + +void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); +#if defined(__ARM_FEATURE_MATMUL_INT8) + assert((nrc == 2) || (nrc == 1)); +#else + assert(nrc == 1); +#endif + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__ARM_FEATURE_MATMUL_INT8) + if (nrc == 2) { + const block_q4_0 * GGML_RESTRICT vx0 = vx; + const block_q4_0 * GGML_RESTRICT vx1 = (const block_q4_0 *) ((const uint8_t*)vx + bx); + const block_q8_0 * GGML_RESTRICT vy0 = vy; + const block_q8_0 * GGML_RESTRICT vy1 = (const block_q8_0 *) ((const uint8_t*)vy + by); + + float32x4_t sumv0 = vdupq_n_f32(0.0f); + + for (int i = 0; i < nb; i++) { + const block_q4_0 * GGML_RESTRICT b_x0 = &vx0[i]; + const block_q4_0 * GGML_RESTRICT b_x1 = &vx1[i]; + const block_q8_0 * GGML_RESTRICT b_y0 = &vy0[i]; + const block_q8_0 * GGML_RESTRICT b_y1 = &vy1[i]; + + const uint8x16_t m4b = vdupq_n_u8(0x0F); + const int8x16_t s8b = vdupq_n_s8(0x8); + + const uint8x16_t v0_0 = vld1q_u8(b_x0->qs); + const uint8x16_t v0_1 = vld1q_u8(b_x1->qs); + + // 4-bit -> 8-bit + const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); + const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); + const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + + // sub 8 + const int8x16_t x0_l = vsubq_s8(v0_0l, s8b); + const int8x16_t x0_h = vsubq_s8(v0_0h, s8b); + const int8x16_t x1_l = vsubq_s8(v0_1l, s8b); + const int8x16_t x1_h = vsubq_s8(v0_1h, s8b); + + // load y + const int8x16_t y0_l = vld1q_s8(b_y0->qs); + const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); + const int8x16_t y1_l = vld1q_s8(b_y1->qs); + const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); + + float32_t _scale[4] = { + GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d) + }; + float32x4_t scale = vld1q_f32(_scale); + + int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); + int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); + + int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); + int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); + + int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); + int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); + + int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); + int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); + + sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), + l1, r1)), l2, r2)), l3, r3))), scale); + } + + float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2); + float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); + + vst1_f32(s, vget_low_f32 (sumv2)); + vst1_f32(s + bs, vget_high_f32(sumv2)); + + return; + } +#endif + + int ib = 0; + float sumf = 0; + +#if defined(__ARM_FEATURE_SVE) + svfloat32_t sumv0 = svdup_n_f32(0.0f); + svfloat32_t sumv1 = svdup_n_f32(0.0f); + + const int vector_length = ggml_cpu_get_sve_cnt()*8; + + // VLA Implementation using switch case + switch (vector_length) { + case 128: + { + // predicate for activating higher lanes for 4 float32 elements + const svbool_t ph4 = svptrue_pat_b32(SV_VL4); + + for (; ib + 1 < nb; ib += 2) { + const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; + const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + // load x + const svuint8_t qx0r = svld1rq_u8(svptrue_b8(), x0->qs); + const svuint8_t qx1r = svld1rq_u8(svptrue_b8(), x1->qs); + + // 4-bit -> 8-bit + const svint8_t qx0l = svreinterpret_s8_u8(svand_n_u8_m(svptrue_b8(), qx0r, 0x0F)); + const svint8_t qx0h = svreinterpret_s8_u8(svlsr_n_u8_m(svptrue_b8(), qx0r, 0x04)); + const svint8_t qx1l = svreinterpret_s8_u8(svand_n_u8_m(svptrue_b8(), qx1r, 0x0F)); + const svint8_t qx1h = svreinterpret_s8_u8(svlsr_n_u8_m(svptrue_b8(), qx1r, 0x04)); + + // sub 8 + const svint8_t qx0ls = svsub_n_s8_x(svptrue_b8(), qx0h, 8); + const svint8_t qx0hs = svsub_n_s8_x(svptrue_b8(), qx0l, 8); + const svint8_t qx1ls = svsub_n_s8_x(svptrue_b8(), qx1h, 8); + const svint8_t qx1hs = svsub_n_s8_x(svptrue_b8(), qx1l, 8); + + // load y + const svint8_t qy0h = svld1_s8(svptrue_b8(), y0->qs); + const svint8_t qy0l = svld1_s8(svptrue_b8(), y0->qs + 16); + const svint8_t qy1h = svld1_s8(svptrue_b8(), y1->qs); + const svint8_t qy1l = svld1_s8(svptrue_b8(), y1->qs + 16); + + // dot product + sumv0 = svmla_n_f32_x(ph4, sumv0, svcvt_f32_s32_x(ph4, svadd_x(ph4, + svdot_s32(svdup_n_s32(0), qx0ls, qy0l), + svdot_s32(svdup_n_s32(0), qx0hs, qy0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + sumv1 = svmla_n_f32_x(ph4, sumv1, svcvt_f32_s32_x(ph4, svadd_x(ph4, + svdot_s32(svdup_n_s32(0), qx1ls, qy1l), + svdot_s32(svdup_n_s32(0), qx1hs, qy1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + } + + sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1)); + } break; + case 256: + { + // predicate for activating higher lanes for 16 int8 elements + const svbool_t ph16 = svptrue_pat_b8(SV_VL16); + // predicate for activating lower lanes for 16 int8 elements + const svbool_t pl16 = svnot_b_z(svptrue_b8(), ph16); + + for (; ib + 1 < nb; ib += 2) { + const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; + const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + // load x + const svuint8_t qx0r = svld1rq_u8(svptrue_b8(), x0->qs); + const svuint8_t qx1r = svld1rq_u8(svptrue_b8(), x1->qs); + + // 4-bit -> 8-bit + const svint8_t qx0 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx0r, 0x0F), 0x04)); + const svint8_t qx1 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx1r, 0x0F), 0x04)); + + // sub 8 + const svint8_t qx0s = svsub_n_s8_x(svptrue_b8(), qx0, 8); + const svint8_t qx1s = svsub_n_s8_x(svptrue_b8(), qx1, 8); + + // load y + const svint8_t qy0 = svld1_s8(svptrue_b8(), y0->qs); + const svint8_t qy1 = svld1_s8(svptrue_b8(), y1->qs); + + // dot product + sumv0 = svmla_n_f32_x(svptrue_b32(), sumv0, svcvt_f32_s32_x(svptrue_b32(), + svdot_s32(svdup_n_s32(0), qx0s, qy0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + sumv1 = svmla_n_f32_x(svptrue_b32(), sumv1, svcvt_f32_s32_x(svptrue_b32(), + svdot_s32(svdup_n_s32(0), qx1s, qy1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + } + + sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1)); + } break; + case 512: + { + // predicate for activating higher lanes for 32 int8 elements + const svbool_t ph32 = svptrue_pat_b8(SV_VL32); + + // predicate for activating higher lanes for 16 int8 elements + const svbool_t ph16 = svptrue_pat_b8(SV_VL16); + // predicate for activating lower lanes for 16 int8 elements from first 32 int8 activated lanes + const svbool_t pl16 = svnot_b_z(ph32, ph16); + + for (; ib + 1 < nb; ib += 2) { + const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; + const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + // load x + const svuint8_t qx0r = svld1rq_u8(ph32, x0->qs); + const svuint8_t qx1r = svld1rq_u8(ph32, x1->qs); + + // 4-bit -> 8-bit + const svint8_t qx0 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx0r, 0x0F), 0x04)); + const svint8_t qx1 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx1r, 0x0F), 0x04)); + + // sub 8 + const svint8_t qx0s = svsub_n_s8_x(ph32, qx0, 8); + const svint8_t qx1s = svsub_n_s8_x(ph32, qx1, 8); + + // load y + const svint8_t qy0 = svld1_s8(ph32, y0->qs); + const svint8_t qy1 = svld1_s8(ph32, y1->qs); + + // dot product + sumv0 = svmla_n_f32_x(ph32, sumv0, svcvt_f32_s32_x(ph32, + svdot_s32(svdup_n_s32(0), qx0s, qy0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + sumv1 = svmla_n_f32_x(ph32, sumv1, svcvt_f32_s32_x(ph32, + svdot_s32(svdup_n_s32(0), qx1s, qy1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + } + + sumf = svaddv_f32(ph32, svadd_f32_x(ph32, sumv0, sumv1)); + } break; + default: + assert(false && "Unsupported vector length"); + break; + } + +#elif defined(__ARM_NEON) + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t sumv1 = vdupq_n_f32(0.0f); + + for (; ib + 1 < nb; ib += 2) { + const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; + const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + const uint8x16_t m4b = vdupq_n_u8(0x0F); + const int8x16_t s8b = vdupq_n_s8(0x8); + + const uint8x16_t v0_0 = vld1q_u8(x0->qs); + const uint8x16_t v0_1 = vld1q_u8(x1->qs); + + // 4-bit -> 8-bit + const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); + const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); + const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + + // sub 8 + const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b); + const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b); + const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b); + const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b); + + // load y + const int8x16_t v1_0l = vld1q_s8(y0->qs); + const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); + const int8x16_t v1_1l = vld1q_s8(y1->qs); + const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); + + // dot product into int32x4_t + const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h); + const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + } + + sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); +#endif + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F) - 8; + const int v1 = (x[ib].qs[j] >> 4) - 8; + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); + } + + *s = sumf; +} + +void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + assert(n % qk == 0); +#if defined(__ARM_FEATURE_MATMUL_INT8) + assert((nrc == 2) || (nrc == 1)); +#else + assert(nrc == 1); +#endif + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + +#if defined(__ARM_FEATURE_MATMUL_INT8) + if (nrc == 2) { + const block_q4_1 * GGML_RESTRICT vx0 = vx; + const block_q4_1 * GGML_RESTRICT vx1 = (const block_q4_1 *) ((const uint8_t*)vx + bx); + const block_q8_1 * GGML_RESTRICT vy0 = vy; + const block_q8_1 * GGML_RESTRICT vy1 = (const block_q8_1 *) ((const uint8_t*)vy + by); + + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t summs0 = vdupq_n_f32(0.0f); + + for (int i = 0; i < nb; i++) { + const block_q4_1 * GGML_RESTRICT b_x0 = &vx0[i]; + const block_q4_1 * GGML_RESTRICT b_x1 = &vx1[i]; + const block_q8_1 * GGML_RESTRICT b_y0 = &vy0[i]; + const block_q8_1 * GGML_RESTRICT b_y1 = &vy1[i]; + + float32_t summs_t[4] = { + GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y0->s), + GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y0->s), + GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y1->s), + GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y1->s) + }; + summs0 = vaddq_f32(summs0, vld1q_f32(summs_t)); + + const uint8x16_t m4b = vdupq_n_u8(0x0F); + + const uint8x16_t v0_0 = vld1q_u8(b_x0->qs); + const uint8x16_t v0_1 = vld1q_u8(b_x1->qs); + + // 4-bit -> 8-bit + const int8x16_t x0_l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); + const int8x16_t x0_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + const int8x16_t x1_l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); + const int8x16_t x1_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + + // load y + const int8x16_t y0_l = vld1q_s8(b_y0->qs); + const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); + const int8x16_t y1_l = vld1q_s8(b_y1->qs); + const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); + + // mmla into int32x4_t + float32_t _scale[4] = { + GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d) + }; + float32x4_t scale = vld1q_f32(_scale); + + int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); + int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); + + int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); + int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); + + int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); + int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); + + int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); + int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); + sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), + l1, r1)), l2, r2)), l3, r3))), scale); + } + + float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2); + float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); + + sumv2 = vaddq_f32(sumv2, summs0); + + vst1_f32(s, vget_low_f32 (sumv2)); + vst1_f32(s + bs, vget_high_f32(sumv2)); + + return; + } +#endif + + int ib = 0; + float sumf = 0; + +#if defined(__ARM_NEON) + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t sumv1 = vdupq_n_f32(0.0f); + + float summs = 0; + + for (; ib + 1 < nb; ib += 2) { + const block_q4_1 * GGML_RESTRICT x0 = &x[ib + 0]; + const block_q4_1 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_1 * GGML_RESTRICT y0 = &y[ib + 0]; + const block_q8_1 * GGML_RESTRICT y1 = &y[ib + 1]; + + summs += GGML_FP16_TO_FP32(x0->m) * GGML_FP16_TO_FP32(y0->s) + GGML_FP16_TO_FP32(x1->m) * GGML_FP16_TO_FP32(y1->s); + + const uint8x16_t m4b = vdupq_n_u8(0x0F); + + const uint8x16_t v0_0 = vld1q_u8(x0->qs); + const uint8x16_t v0_1 = vld1q_u8(x1->qs); + + // 4-bit -> 8-bit + const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); + const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); + const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + + // load y + const int8x16_t v1_0l = vld1q_s8(y0->qs); + const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); + const int8x16_t v1_1l = vld1q_s8(y1->qs); + const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); + + // dot product into int32x4_t + const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h); + const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + } + + sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs; + +#endif + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F); + const int v1 = (x[ib].qs[j] >> 4); + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + } + + *s = sumf; +} + +void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + int ib = 0; + float sumf = 0; + + assert(n % qk == 0); + assert(qk == QK5_0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__ARM_NEON) + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t sumv1 = vdupq_n_f32(0.0f); + + uint32_t qh0; + uint32_t qh1; + + uint64_t tmp0[4]; + uint64_t tmp1[4]; + + for (; ib + 1 < nb; ib += 2) { + const block_q5_0 * GGML_RESTRICT x0 = &x[ib]; + const block_q5_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + const uint8x16_t m4b = vdupq_n_u8(0x0F); + + // extract the 5th bit via lookup table ((!b) << 4) + memcpy(&qh0, x0->qh, sizeof(qh0)); + memcpy(&qh1, x1->qh, sizeof(qh1)); + + tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF]; + tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF]; + tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF]; + tmp0[3] = table_b2b_1[(qh0 >> 24) ]; + + tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF]; + tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF]; + tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF]; + tmp1[3] = table_b2b_1[(qh1 >> 24) ]; + + const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0)); + const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2)); + const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0)); + const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2)); + + const uint8x16_t v0_0 = vld1q_u8(x0->qs); + const uint8x16_t v0_1 = vld1q_u8(x1->qs); + + // 4-bit -> 8-bit + int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); + int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); + int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + + // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero) + const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0); + const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0); + const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1); + const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1); + + // load y + const int8x16_t v1_0l = vld1q_s8(y0->qs); + const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); + const int8x16_t v1_1l = vld1q_s8(y1->qs); + const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( + ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), + ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( + ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), + ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + } + + sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); + +#endif + for (; ib < nb; ++ib) { + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; + const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12)); + + const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16); + const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16); + + sumi0 += (x0 * y[ib].qs[j]); + sumi1 += (x1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)) * sumi; + } + + *s = sumf; +} + +void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + int ib = 0; + float sumf = 0; + + assert(n % qk == 0); + assert(qk == QK5_1); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + +#if defined(__ARM_NEON) + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t sumv1 = vdupq_n_f32(0.0f); + + float summs0 = 0.0f; + float summs1 = 0.0f; + + uint32_t qh0; + uint32_t qh1; + + uint64_t tmp0[4]; + uint64_t tmp1[4]; + + for (; ib + 1 < nb; ib += 2) { + const block_q5_1 * GGML_RESTRICT x0 = &x[ib]; + const block_q5_1 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_1 * GGML_RESTRICT y0 = &y[ib]; + const block_q8_1 * GGML_RESTRICT y1 = &y[ib + 1]; + + const uint8x16_t m4b = vdupq_n_u8(0x0F); + + summs0 += GGML_FP16_TO_FP32(x0->m) * GGML_FP16_TO_FP32(y0->s); + summs1 += GGML_FP16_TO_FP32(x1->m) * GGML_FP16_TO_FP32(y1->s); + + // extract the 5th bit via lookup table ((b) << 4) + memcpy(&qh0, x0->qh, sizeof(qh0)); + memcpy(&qh1, x1->qh, sizeof(qh1)); + + tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF]; + tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF]; + tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF]; + tmp0[3] = table_b2b_0[(qh0 >> 24) ]; + + tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF]; + tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF]; + tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF]; + tmp1[3] = table_b2b_0[(qh1 >> 24) ]; + + const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0)); + const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2)); + const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0)); + const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2)); + + const uint8x16_t v0_0 = vld1q_u8(x0->qs); + const uint8x16_t v0_1 = vld1q_u8(x1->qs); + + // 4-bit -> 8-bit + const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); + const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); + const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + + // add high bit + const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0); + const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0); + const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1); + const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1); + + // load y + const int8x16_t v1_0l = vld1q_s8(y0->qs); + const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); + const int8x16_t v1_1l = vld1q_s8(y1->qs); + const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( + ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), + ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( + ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), + ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + } + + sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1; + +#endif + for (; ib < nb; ++ib) { + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; + const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; + + const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0; + const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1; + + sumi0 += (x0 * y[ib].qs[j]); + sumi1 += (x1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + } + + *s = sumf; +} + +void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); +#if defined(__ARM_FEATURE_MATMUL_INT8) + assert((nrc == 2) || (nrc == 1)); +#else + assert(nrc == 1); +#endif + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q8_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__ARM_FEATURE_MATMUL_INT8) + if (nrc == 2) { + const block_q8_0 * GGML_RESTRICT vx0 = vx; + const block_q8_0 * GGML_RESTRICT vx1 = (const block_q8_0 *) ((const uint8_t*)vx + bx); + const block_q8_0 * GGML_RESTRICT vy0 = vy; + const block_q8_0 * GGML_RESTRICT vy1 = (const block_q8_0 *) ((const uint8_t*)vy + by); + + float32x4_t sumv0 = vdupq_n_f32(0.0f); + + for (int i = 0; i < nb; i++) { + const block_q8_0 * GGML_RESTRICT b_x0 = &vx0[i]; + const block_q8_0 * GGML_RESTRICT b_y0 = &vy0[i]; + + const block_q8_0 * GGML_RESTRICT b_x1 = &vx1[i]; + const block_q8_0 * GGML_RESTRICT b_y1 = &vy1[i]; + + const int8x16_t x0_l = vld1q_s8(b_x0->qs); + const int8x16_t x0_h = vld1q_s8(b_x0->qs + 16); + const int8x16_t x1_l = vld1q_s8(b_x1->qs); + const int8x16_t x1_h = vld1q_s8(b_x1->qs + 16); + + // load y + const int8x16_t y0_l = vld1q_s8(b_y0->qs); + const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); + const int8x16_t y1_l = vld1q_s8(b_y1->qs); + const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); + + float32_t _scale[4] = { + GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d) + }; + float32x4_t scale = vld1q_f32(_scale); + + int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); + int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); + + int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); + int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); + + int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); + int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); + + int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); + int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); + + sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), + l1, r1)), l2, r2)), l3, r3))), scale); + } + + float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2); + float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); + + vst1_f32(s, vget_low_f32 (sumv2)); + vst1_f32(s + bs, vget_high_f32(sumv2)); + + return; + } +#endif + + int ib = 0; + float sumf = 0; + +#if defined(__ARM_FEATURE_SVE) + svfloat32_t sumv0 = svdup_n_f32(0.0f); + svfloat32_t sumv1 = svdup_n_f32(0.0f); + + const int vector_length = ggml_cpu_get_sve_cnt()*8; + + //VLA Implemenation for SVE + switch (vector_length) { + case 128: + { + // predicate for activating lanes for 16 Int8 elements + const svbool_t ph16 = svptrue_pat_b8 (SV_VL16); + const svbool_t pl16 = svptrue_pat_b32(SV_VL4); + + for (; ib + 1 < nb; ib += 2) { + const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; + const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + // load x + const svint8_t qx0_0 = svld1_s8(ph16, x0->qs); + const svint8_t qx0_1 = svld1_s8(ph16, x0->qs+16); + const svint8_t qx1_0 = svld1_s8(ph16, x1->qs); + const svint8_t qx1_1 = svld1_s8(ph16, x1->qs+16); + + // load y + const svint8_t qy0_0 = svld1_s8(ph16, y0->qs); + const svint8_t qy0_1 = svld1_s8(ph16, y0->qs+16); + const svint8_t qy1_0 = svld1_s8(ph16, y1->qs); + const svint8_t qy1_1 = svld1_s8(ph16, y1->qs+16); + + sumv0 = svmla_n_f32_x(pl16, sumv0, svcvt_f32_s32_x(pl16, svadd_x(pl16, + svdot_s32(svdup_n_s32(0), qx0_0, qy0_0), + svdot_s32(svdup_n_s32(0), qx0_1, qy0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + sumv1 = svmla_n_f32_x(pl16, sumv1, svcvt_f32_s32_x(pl16, svadd_x(pl16, + svdot_s32(svdup_n_s32(0), qx1_0, qy1_0), + svdot_s32(svdup_n_s32(0), qx1_1, qy1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + } + + sumf = svaddv_f32(pl16, svadd_f32_x(pl16, sumv0, sumv1)); + } break; + case 256: + { + //printf("sve256"); + for (; ib + 1 < nb; ib += 2) { + const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; + const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + // load x + const svint8_t qx0 = svld1_s8(svptrue_b8(), x0->qs); + const svint8_t qx1 = svld1_s8(svptrue_b8(), x1->qs); + + // load y + const svint8_t qy0 = svld1_s8(svptrue_b8(), y0->qs); + const svint8_t qy1 = svld1_s8(svptrue_b8(), y1->qs); + + sumv0 = svmla_n_f32_x(svptrue_b32(), sumv0, svcvt_f32_s32_x(svptrue_b32(), + svdot_s32(svdup_n_s32(0), qx0, qy0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + sumv1 = svmla_n_f32_x(svptrue_b32(), sumv1, svcvt_f32_s32_x(svptrue_b32(), + svdot_s32(svdup_n_s32(0), qx1, qy1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + } + + sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1)); + } break; + case 512: + { + // predicate for activating high 256 bit + const svbool_t ph32 = svptrue_pat_b8(SV_VL32); + // predicate for activating low 256 bit + const svbool_t pl32 = svnot_b_z(svptrue_b8(), ph32); + + // predicate for activating high lanes for 8 float32 elements + const svbool_t ph8 = svptrue_pat_b32(SV_VL8); + // predicate for activating low lanes for 8 float32 elements + const svbool_t pl8 = svnot_b_z(svptrue_b32(), ph8); + + svfloat32_t sumv00 = svdup_n_f32(0.0f); + + for (; ib + 1 < nb; ib += 2) { + const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; + const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + //load 32 int8_t in first half of vector and put another 32 int8_t in second vector lower bits + // and add them to make one 64 element vector + // load x + const svint8_t qx_32 = svld1_s8(ph32, x0->qs); + svint8_t qx_64 = svld1_s8(pl32, x0->qs + 2); + + qx_64 = svadd_s8_x(svptrue_b8(), qx_32, qx_64); + + // load y + const svint8_t qy_32 = svld1_s8(ph32, y0->qs); + svint8_t qy_64 = svld1_s8(pl32, y0->qs + 2); + + qy_64 = svadd_s8_x(svptrue_b8(), qy_32, qy_64); + + // scale creation + const float32_t deq1 = GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d); + const float32_t deq2 = GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d); + + // duplicate deq1 in first half of vector and deq2 in second half of vector + const svfloat32_t temp = svdup_f32_m(svdup_f32_z(ph8, deq1), pl8, deq2); + + const svfloat32_t sumvt = svcvt_f32_s32_x(svptrue_b32(), svdot_s32(svdup_n_s32(0), qx_64, qy_64)); + + sumv00 = svmla_f32_m(svptrue_b32(), sumv00, sumvt, temp); + } + + sumf = svaddv_f32(svptrue_b32(), sumv00); + break; + } + default: + assert(false && "Unsupported vector length"); + break; + } +#elif defined(__ARM_NEON) + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t sumv1 = vdupq_n_f32(0.0f); + + for (; ib + 1 < nb; ib += 2) { + const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; + const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + const int8x16_t x0_0 = vld1q_s8(x0->qs); + const int8x16_t x0_1 = vld1q_s8(x0->qs + 16); + const int8x16_t x1_0 = vld1q_s8(x1->qs); + const int8x16_t x1_1 = vld1q_s8(x1->qs + 16); + + // load y + const int8x16_t y0_0 = vld1q_s8(y0->qs); + const int8x16_t y0_1 = vld1q_s8(y0->qs + 16); + const int8x16_t y1_0 = vld1q_s8(y1->qs); + const int8x16_t y1_1 = vld1q_s8(y1->qs + 16); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( + ggml_vdotq_s32(vdupq_n_s32(0), x0_0, y0_0), + ggml_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( + ggml_vdotq_s32(vdupq_n_s32(0), x1_0, y1_0), + ggml_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + } + + sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); +#endif + for (; ib < nb; ++ib) { + int sumi = 0; + + for (int j = 0; j < qk; j++) { + sumi += x[ib].qs[j]*y[ib].qs[j]; + } + + sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); + } + + *s = sumf; +} + +void ggml_vec_dot_tq1_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_tq1_0 * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__ARM_NEON) + float sumf = 0.0f; + + uint8_t k_shift[16] = {1, 1, 1, 1, 3, 3, 3, 3, 9, 9, 9, 9, 27, 27, 27, 27}; + + const uint8x16_t shift = vld1q_u8(k_shift); + + for (int i = 0; i < nb; ++i) { +#if defined(__ARM_FEATURE_DOTPROD) + int32x4_t sumi0 = vdupq_n_s32(0); + int32x4_t sumi1 = vdupq_n_s32(0); +#else + int16x8_t sumi0 = vdupq_n_s16(0); + int16x8_t sumi1 = vdupq_n_s16(0); +#endif + + // first 32 bytes of 5 elements + { + uint8x16_t qx0 = vld1q_u8(x[i].qs + 0); + uint8x16_t qx1 = vld1q_u8(x[i].qs + 16); + uint8x16_t qx2 = vmulq_u8(qx0, vdupq_n_u8(3)); + uint8x16_t qx3 = vmulq_u8(qx1, vdupq_n_u8(3)); + uint8x16_t qx4 = vmulq_u8(qx0, vdupq_n_u8(9)); + uint8x16_t qx5 = vmulq_u8(qx1, vdupq_n_u8(9)); + uint8x16_t qx6 = vmulq_u8(qx0, vdupq_n_u8(27)); + uint8x16_t qx7 = vmulq_u8(qx1, vdupq_n_u8(27)); + uint8x16_t qx8 = vmulq_u8(qx0, vdupq_n_u8(81)); + uint8x16_t qx9 = vmulq_u8(qx1, vdupq_n_u8(81)); + + // multiply by 3 and keep the 2 bits above 8 bits + int8x16_t sqx0 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx0, vshrq_n_u8(qx0, 1)), 6)); + int8x16_t sqx1 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx1, vshrq_n_u8(qx1, 1)), 6)); + int8x16_t sqx2 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx2, vshrq_n_u8(qx2, 1)), 6)); + int8x16_t sqx3 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx3, vshrq_n_u8(qx3, 1)), 6)); + int8x16_t sqx4 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx4, vshrq_n_u8(qx4, 1)), 6)); + int8x16_t sqx5 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx5, vshrq_n_u8(qx5, 1)), 6)); + int8x16_t sqx6 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx6, vshrq_n_u8(qx6, 1)), 6)); + int8x16_t sqx7 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx7, vshrq_n_u8(qx7, 1)), 6)); + int8x16_t sqx8 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx8, vshrq_n_u8(qx8, 1)), 6)); + int8x16_t sqx9 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx9, vshrq_n_u8(qx9, 1)), 6)); + + const int8x16_t qy0 = vld1q_s8(y[i].qs + 0); + const int8x16_t qy1 = vld1q_s8(y[i].qs + 16); + const int8x16_t qy2 = vld1q_s8(y[i].qs + 32); + const int8x16_t qy3 = vld1q_s8(y[i].qs + 48); + const int8x16_t qy4 = vld1q_s8(y[i].qs + 64); + const int8x16_t qy5 = vld1q_s8(y[i].qs + 80); + const int8x16_t qy6 = vld1q_s8(y[i].qs + 96); + const int8x16_t qy7 = vld1q_s8(y[i].qs + 112); + const int8x16_t qy8 = vld1q_s8(y[i].qs + 128); + const int8x16_t qy9 = vld1q_s8(y[i].qs + 144); + +#if defined(__ARM_FEATURE_DOTPROD) + sumi0 = vdotq_s32(sumi0, sqx0, qy0); + sumi1 = vdotq_s32(sumi1, sqx1, qy1); + sumi0 = vdotq_s32(sumi0, sqx2, qy2); + sumi1 = vdotq_s32(sumi1, sqx3, qy3); + sumi0 = vdotq_s32(sumi0, sqx4, qy4); + sumi1 = vdotq_s32(sumi1, sqx5, qy5); + sumi0 = vdotq_s32(sumi0, sqx6, qy6); + sumi1 = vdotq_s32(sumi1, sqx7, qy7); + sumi0 = vdotq_s32(sumi0, sqx8, qy8); + sumi1 = vdotq_s32(sumi1, sqx9, qy9); +#else + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx0), vget_low_s8(qy0)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx0), vget_high_s8(qy0)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx1), vget_low_s8(qy1)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx1), vget_high_s8(qy1)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx2), vget_low_s8(qy2)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx2), vget_high_s8(qy2)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx3), vget_low_s8(qy3)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx3), vget_high_s8(qy3)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx4), vget_low_s8(qy4)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx4), vget_high_s8(qy4)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx5), vget_low_s8(qy5)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx5), vget_high_s8(qy5)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx6), vget_low_s8(qy6)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx6), vget_high_s8(qy6)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx7), vget_low_s8(qy7)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx7), vget_high_s8(qy7)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx8), vget_low_s8(qy8)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx8), vget_high_s8(qy8)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx9), vget_low_s8(qy9)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx9), vget_high_s8(qy9)); +#endif + } + + // last 16 bytes of 5-element, along with the 4 bytes of 4 elements + { + uint8x16_t qx0 = vld1q_u8(x[i].qs + 32); + uint8x16_t qx1 = vmulq_u8(qx0, vdupq_n_u8(3)); + uint8x16_t qx2 = vmulq_u8(qx0, vdupq_n_u8(9)); + uint8x16_t qx3 = vmulq_u8(qx0, vdupq_n_u8(27)); + uint8x16_t qx4 = vmulq_u8(qx0, vdupq_n_u8(81)); + uint32_t qh; + memcpy(&qh, x[i].qh, sizeof(qh)); // potentially unaligned + uint8x16_t qx5 = vreinterpretq_u8_u32(vdupq_n_u32(qh)); + qx5 = vmulq_u8(qx5, shift); + + // multiply by 3 and keep the 2 bits above 8 bits + int8x16_t sqx0 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx0, vshrq_n_u8(qx0, 1)), 6)); + int8x16_t sqx1 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx1, vshrq_n_u8(qx1, 1)), 6)); + int8x16_t sqx2 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx2, vshrq_n_u8(qx2, 1)), 6)); + int8x16_t sqx3 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx3, vshrq_n_u8(qx3, 1)), 6)); + int8x16_t sqx4 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx4, vshrq_n_u8(qx4, 1)), 6)); + int8x16_t sqx5 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx5, vshrq_n_u8(qx5, 1)), 6)); + + const int8x16_t qy0 = vld1q_s8(y[i].qs + 160); + const int8x16_t qy1 = vld1q_s8(y[i].qs + 176); + const int8x16_t qy2 = vld1q_s8(y[i].qs + 192); + const int8x16_t qy3 = vld1q_s8(y[i].qs + 208); + const int8x16_t qy4 = vld1q_s8(y[i].qs + 224); + const int8x16_t qy5 = vld1q_s8(y[i].qs + 240); + +#if defined(__ARM_FEATURE_DOTPROD) + sumi0 = vdotq_s32(sumi0, sqx0, qy0); + sumi1 = vdotq_s32(sumi1, sqx1, qy1); + sumi0 = vdotq_s32(sumi0, sqx2, qy2); + sumi1 = vdotq_s32(sumi1, sqx3, qy3); + sumi0 = vdotq_s32(sumi0, sqx4, qy4); + sumi1 = vdotq_s32(sumi1, sqx5, qy5); +#else + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx0), vget_low_s8(qy0)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx0), vget_high_s8(qy0)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx1), vget_low_s8(qy1)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx1), vget_high_s8(qy1)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx2), vget_low_s8(qy2)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx2), vget_high_s8(qy2)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx3), vget_low_s8(qy3)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx3), vget_high_s8(qy3)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx4), vget_low_s8(qy4)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx4), vget_high_s8(qy4)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx5), vget_low_s8(qy5)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx5), vget_high_s8(qy5)); +#endif + } + + const int16x8_t ysum0 = vld1q_s16(y[i].bsums); + const int16x8_t ysum1 = vld1q_s16(y[i].bsums + 8); + + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + +#if defined(__ARM_FEATURE_DOTPROD) + sumi0 = vaddq_s32(sumi0, sumi1); + sumi0 = vsubq_s32(sumi0, vpaddlq_s16(vaddq_s16(ysum0, ysum1))); + + sumf += d * (float) vaddvq_s32(sumi0); +#else + sumi0 = vaddq_s16(sumi0, sumi1); + sumi0 = vsubq_s16(sumi0, vaddq_s16(ysum0, ysum1)); + + sumf += d * (float) vaddlvq_s16(sumi0); +#endif + } + + *s = sumf; + +#else + const uint8_t pow3[6] = {1, 3, 9, 27, 81, 243}; + + float sumf = 0.0f; + + for (int i = 0; i < nb; ++i) { + int sum = 0; + + for (size_t j = 0; j < sizeof(x->qs) - sizeof(x->qs) % 32; j += 32) { + for (size_t l = 0; l < 5; ++l) { + for (size_t m = 0; m < 32; ++m) { + uint8_t q = x[i].qs[j + m] * pow3[l]; + uint16_t xi = ((uint16_t) q * 3) >> 8; + sum += (xi - 1) * y[i].qs[j*5 + l*32 + m]; + } + } + } + for (size_t j = sizeof(x->qs) - sizeof(x->qs) % 32; j < sizeof(x->qs); j += 16) { + for (size_t l = 0; l < 5; ++l) { + for (size_t m = 0; m < 16; ++m) { + uint8_t q = x[i].qs[j + m] * pow3[l]; + uint16_t xi = ((uint16_t) q * 3) >> 8; + sum += (xi - 1) * y[i].qs[j*5 + l*16 + m]; + } + } + } + + for (size_t l = 0; l < 4; ++l) { + for (size_t j = 0; j < sizeof(x->qh); ++j) { + uint8_t q = x[i].qh[j] * pow3[l]; + uint16_t xi = ((uint16_t) q * 3) >> 8; + sum += (xi - 1) * y[i].qs[sizeof(x->qs)*5 + l*sizeof(x->qh) + j]; + } + } + + sumf += (float) sum * (GGML_FP16_TO_FP32(x[i].d) * y[i].d); + } + + *s = sumf; +#endif +} + +void ggml_vec_dot_tq2_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_tq2_0 * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__ARM_NEON) + float sumf = 0.0f; + + const uint8x16_t m3 = vdupq_n_u8(3); + + for (int i = 0; i < nb; ++i) { +#if defined(__ARM_FEATURE_DOTPROD) + int32x4_t sumi0 = vdupq_n_s32(0); + int32x4_t sumi1 = vdupq_n_s32(0); +#else + int16x8_t sumi0 = vdupq_n_s16(0); + int16x8_t sumi1 = vdupq_n_s16(0); +#endif + + for (size_t j = 0; j < sizeof(x->qs); j += 32) { + uint8x16_t qx0 = vld1q_u8(x[i].qs + j); + uint8x16_t qx1 = vld1q_u8(x[i].qs + j + 16); + uint8x16_t qx2 = vshrq_n_u8(qx0, 2); + uint8x16_t qx3 = vshrq_n_u8(qx1, 2); + uint8x16_t qx4 = vshrq_n_u8(qx0, 4); + uint8x16_t qx5 = vshrq_n_u8(qx1, 4); + uint8x16_t qx6 = vshrq_n_u8(qx0, 6); + uint8x16_t qx7 = vshrq_n_u8(qx1, 6); + + int8x16_t sqx0 = vreinterpretq_s8_u8(vandq_u8(qx0, m3)); + int8x16_t sqx1 = vreinterpretq_s8_u8(vandq_u8(qx1, m3)); + int8x16_t sqx2 = vreinterpretq_s8_u8(vandq_u8(qx2, m3)); + int8x16_t sqx3 = vreinterpretq_s8_u8(vandq_u8(qx3, m3)); + int8x16_t sqx4 = vreinterpretq_s8_u8(vandq_u8(qx4, m3)); + int8x16_t sqx5 = vreinterpretq_s8_u8(vandq_u8(qx5, m3)); + int8x16_t sqx6 = vreinterpretq_s8_u8(vandq_u8(qx6, m3)); + int8x16_t sqx7 = vreinterpretq_s8_u8(vandq_u8(qx7, m3)); + + const int8x16_t qy0 = vld1q_s8(y[i].qs + j*4 + 0); + const int8x16_t qy1 = vld1q_s8(y[i].qs + j*4 + 16); + const int8x16_t qy2 = vld1q_s8(y[i].qs + j*4 + 32); + const int8x16_t qy3 = vld1q_s8(y[i].qs + j*4 + 48); + const int8x16_t qy4 = vld1q_s8(y[i].qs + j*4 + 64); + const int8x16_t qy5 = vld1q_s8(y[i].qs + j*4 + 80); + const int8x16_t qy6 = vld1q_s8(y[i].qs + j*4 + 96); + const int8x16_t qy7 = vld1q_s8(y[i].qs + j*4 + 112); + +#if defined(__ARM_FEATURE_DOTPROD) + sumi0 = vdotq_s32(sumi0, sqx0, qy0); + sumi1 = vdotq_s32(sumi1, sqx1, qy1); + sumi0 = vdotq_s32(sumi0, sqx2, qy2); + sumi1 = vdotq_s32(sumi1, sqx3, qy3); + sumi0 = vdotq_s32(sumi0, sqx4, qy4); + sumi1 = vdotq_s32(sumi1, sqx5, qy5); + sumi0 = vdotq_s32(sumi0, sqx6, qy6); + sumi1 = vdotq_s32(sumi1, sqx7, qy7); +#else + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx0), vget_low_s8(qy0)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx0), vget_high_s8(qy0)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx1), vget_low_s8(qy1)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx1), vget_high_s8(qy1)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx2), vget_low_s8(qy2)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx2), vget_high_s8(qy2)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx3), vget_low_s8(qy3)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx3), vget_high_s8(qy3)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx4), vget_low_s8(qy4)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx4), vget_high_s8(qy4)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx5), vget_low_s8(qy5)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx5), vget_high_s8(qy5)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx6), vget_low_s8(qy6)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx6), vget_high_s8(qy6)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx7), vget_low_s8(qy7)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx7), vget_high_s8(qy7)); +#endif + } + + const int16x8_t ysum0 = vld1q_s16(y[i].bsums); + const int16x8_t ysum1 = vld1q_s16(y[i].bsums + 8); + + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + +#if defined(__ARM_FEATURE_DOTPROD) + sumi0 = vaddq_s32(sumi0, sumi1); + sumi0 = vsubq_s32(sumi0, vpaddlq_s16(vaddq_s16(ysum0, ysum1))); + + sumf += d * (float) vaddvq_s32(sumi0); +#else + sumi0 = vaddq_s16(sumi0, sumi1); + sumi0 = vsubq_s16(sumi0, vaddq_s16(ysum0, ysum1)); + + sumf += d * (float) vaddlvq_s16(sumi0); +#endif + } + + *s = sumf; + +#else + float sumf = 0.0f; + + for (int i = 0; i < nb; ++i) { + int32_t sumi = 0; + + for (size_t j = 0; j < sizeof(x->qs); j += 32) { + for (size_t l = 0; l < 4; ++l) { + for (size_t k = 0; k < 32; ++k) { + sumi += y[i].qs[j*4 + l*32 + k] * (((x[i].qs[j + k] >> (l*2)) & 3) - 1); + } + } + } + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + + sumf += (float) sumi * d; + } + + *s = sumf; +#endif +} + +void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q2_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#ifdef __ARM_FEATURE_SVE + const int vector_length = svcntb()*8; + const svuint8_t m3s = svdup_n_u8(0x3); + const svuint32_t m4s = svdup_n_u32(0xF); + const svint32_t vzero_sv = svdup_n_s32(0); + svfloat32_t acc_sum = svdup_n_f32(0); + svbool_t pred_s32 = svptrue_pat_b32(SV_VL4); + + switch (vector_length) { + case 128: + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + svfloat32_t d_broad = svdup_n_f32((float32_t)d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + svfloat32_t dmin_broad = svdup_n_f32((float32_t)dmin); + + const uint8_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8_sv = y[i].qs; + const uint8_t * GGML_RESTRICT sc = x[i].scales; + + svuint32_t mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc); + const svint32_t mins_sv_1 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); + + mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc+4); + const svint32_t mins_sv_2 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); + + svint32_t q8sums_sv_1 = svld1sh_s32(svptrue_b32(), y[i].bsums); + svint32_t q8sums_sv_2 = svld1sh_s32(svptrue_b32(), y[i].bsums+4); + + const svint32_t s0 = svadd_s32_x(svptrue_b32(), svmul_s32_x(svptrue_b32(), mins_sv_1, q8sums_sv_1), svmul_s32_x(svptrue_b32(), mins_sv_2, q8sums_sv_2)); + + mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc+8); + const svint32_t mins_sv_3 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); + + mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc+12); + const svint32_t mins_sv_4 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); + + q8sums_sv_1 = svld1sh_s32(svptrue_b32(), y[i].bsums+8); + q8sums_sv_2 = svld1sh_s32(svptrue_b32(), y[i].bsums+12); + + svint32_t s1 = svadd_s32_x(svptrue_b32(), svmul_s32_x(svptrue_b32(), mins_sv_3, q8sums_sv_1), svmul_s32_x(svptrue_b32(), mins_sv_4, q8sums_sv_2)); + + svfloat32_t temp = svcvt_f32_s32_x(svptrue_b32(), svadd_s32_x(svptrue_b32(), s0, s1)); + + acc_sum = svmla_f32_m(svptrue_b32(), acc_sum, temp, dmin_broad); + + svint32_t sumi1 = svdup_n_s32(0); + + { + const svuint8_t q2bits_1 = svld1_u8(svptrue_b8(), q2); + svint8_t q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_1, m3s)); + svint8_t q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + const svint32_t scales_sv = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc), m4s)); + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 0)); + + const svuint8_t q2bits_3 = svld1_u8(svptrue_b8(), q2+16); + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_3, m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 1)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 2), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 2)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 2), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 3)); + + + const svint32_t scales_sv_1 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc+4), m4s)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 4), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 0)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 4), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 1)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 6), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 2)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 6), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 3)); + + //------------------------------- + + q2 += 32; + const svint32_t scales_sv_2 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc+8), m4s)); + const svuint8_t q2bits_2 = svld1_u8(svptrue_b8(), q2); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_2, m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 0)); + + const svuint8_t q2bits_4 = svld1_u8(svptrue_b8(), q2+16); + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_4, m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 1)); + + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 2), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 2)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 2), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 3)); + + + const svint32_t scales_sv_3 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc+12), m4s)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 4), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 0)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 4), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 1)); + + + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 6), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 2)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 6), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 3)); + } + acc_sum = svmla_f32_m(svptrue_b32(), acc_sum, svcvt_f32_s32_x(svptrue_b32(), sumi1), d_broad); + } + *s = svaddv_f32(svptrue_b32(), acc_sum); + break; + + case 256: + case 512: + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + svfloat32_t d_broad = svdup_n_f32((float32_t)d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + svfloat32_t dmin_broad = svdup_n_f32((float32_t)dmin); + + const uint8_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8_sv = y[i].qs; + const uint8_t * GGML_RESTRICT sc = x[i].scales; + + const svuint32_t mins_and_scales_sve = svld1ub_u32(svptrue_pat_b32(SV_VL8), sc); sc += 8; + const svint32_t scales_sv = svreinterpret_s32_u32(svand_u32_m(svptrue_pat_b32(SV_VL8), mins_and_scales_sve, m4s)); + const svint32_t mins_sv_1 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_pat_b32(SV_VL8), mins_and_scales_sve, 4)); + svint32_t q8sums_sv_1 = svld1sh_s32(svptrue_pat_b32(SV_VL8), y[i].bsums); + + const svuint32_t mins_and_scales_sve_1 = svld1ub_u32(svptrue_pat_b32(SV_VL8), sc); + const svint32_t scales_sv_1 = svreinterpret_s32_u32(svand_u32_m(svptrue_pat_b32(SV_VL8), mins_and_scales_sve_1, m4s)); + const svint32_t mins_sv_2 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_pat_b32(SV_VL8), mins_and_scales_sve_1, 4)); + + svint32_t q8sums_sv_2 = svld1sh_s32(svptrue_pat_b32(SV_VL8), y[i].bsums+8); + + svfloat32_t temp = svcvt_f32_s32_x(svptrue_pat_b32(SV_VL8), svadd_s32_x(svptrue_pat_b32(SV_VL8), svmul_s32_x(svptrue_pat_b32(SV_VL8), mins_sv_1, q8sums_sv_1), svmul_s32_x(svptrue_pat_b32(SV_VL8), mins_sv_2, q8sums_sv_2))); + + acc_sum = svmla_f32_m(svptrue_pat_b32(SV_VL8), acc_sum, temp, dmin_broad); + + svint32_t sumi1 = svdup_n_s32(0); + + { + const svuint8_t q2bits_1 = svld1_u8(svptrue_pat_b8(SV_VL32), q2); + svint8_t q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q2bits_1, m3s)); + svint8_t q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + svint32_t scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv, 0), svdup_lane_s32(scales_sv, 1)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 2), m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + svint32_t scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv, 2), svdup_lane_s32(scales_sv, 3)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(svdup_n_s32(0), q2bytes_sv, q8bytes_sv), scale_2); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 4), m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv, 4), svdup_lane_s32(scales_sv, 5)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 6), m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv, 6), svdup_lane_s32(scales_sv, 7)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); + + q2 += 32; + + const svuint8_t q2bits_2 = svld1_u8(svptrue_pat_b8(SV_VL32), q2); + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q2bits_2, m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 0), svdup_lane_s32(scales_sv_1, 1)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 2), m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 2), svdup_lane_s32(scales_sv_1, 3)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 4), m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 4), svdup_lane_s32(scales_sv_1, 5)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 6), m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 6), svdup_lane_s32(scales_sv_1, 7)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); + } + acc_sum = svmla_f32_m(svptrue_pat_b32(SV_VL8), acc_sum, svcvt_f32_s32_x(svptrue_pat_b32(SV_VL8), sumi1), d_broad); + } + *s = svaddv_f32(svptrue_pat_b32(SV_VL8), acc_sum); + break; + + default: + assert(false && "Unsupported vector length"); + break; + } + +#elif __ARM_NEON + const uint8x16_t m3 = vdupq_n_u8(0x3); + const uint8x16_t m4 = vdupq_n_u8(0xF); + + const int32x4_t vzero = vdupq_n_s32(0); + + ggml_int8x16x2_t q2bytes; + uint8_t aux[16]; + + float sum = 0; + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + const uint8_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + const uint8_t * GGML_RESTRICT sc = x[i].scales; + + const uint8x16_t mins_and_scales = vld1q_u8(sc); + const uint8x16_t scales = vandq_u8(mins_and_scales, m4); + vst1q_u8(aux, scales); + + const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4); + const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums); + const ggml_int16x8x2_t mins16 = {{vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}}; + const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])), + vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0]))); + const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])), + vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1]))); + sum += dmin * vaddvq_s32(vaddq_s32(s0, s1)); + + int isum = 0; + int is = 0; + +// We use this macro instead of a function call because for some reason +// the code runs 2-3% slower, even if the function is declared inline +#define MULTIPLY_ACCUM_WITH_SCALE(index)\ + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * aux[is+(index)];\ + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * aux[is+1+(index)]; + +#define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index)\ + q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;\ + q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], (shift)), m3));\ + q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3));\ + MULTIPLY_ACCUM_WITH_SCALE((index)); + + for (int j = 0; j < QK_K/128; ++j) { + const ggml_uint8x16x2_t q2bits = ggml_vld1q_u8_x2(q2); q2 += 32; + + ggml_int8x16x2_t q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; + q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3)); + q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3)); + + MULTIPLY_ACCUM_WITH_SCALE(0); + + SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2); + SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4); + SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6); + + is += 8; + } + + sum += d * isum; + } + + *s = sum; + +#else + + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + + const uint8_t * q2 = x[i].qs; + const int8_t * q8 = y[i].qs; + const uint8_t * sc = x[i].scales; + + int summs = 0; + for (int j = 0; j < 16; ++j) { + summs += y[i].bsums[j] * (sc[j] >> 4); + } + + const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + int isum = 0; + int is = 0; + int d; + for (int k = 0; k < QK_K/128; ++k) { + int shift = 0; + for (int j = 0; j < 4; ++j) { + d = sc[is++] & 0xF; + int isuml = 0; + for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); + isum += d * isuml; + d = sc[is++] & 0xF; + isuml = 0; + for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); + isum += d * isuml; + shift += 2; + q8 += 32; + } + q2 += 32; + } + sumf += dall * isum - dmin * summs; + } + *s = sumf; +#endif +} + +void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const uint32_t kmask1 = 0x03030303; + const uint32_t kmask2 = 0x0f0f0f0f; + + const block_q3_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__ARM_FEATURE_SVE) + + uint32_t aux[3]; + uint32_t utmp[4]; + + const int8_t m32 = 32; + const int vector_length = svcntb()*8; + const svuint8_t m3b_sv = svdup_n_u8(0x3); + const svint32_t vzero_sv = svdup_n_s32(0); + + const svuint8_t m0_sv = svdup_n_u8(1); + const svuint8_t m1_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 1); + const svuint8_t m2_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 2); + const svuint8_t m3_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 3); + + float sum = 0; + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT q3_sv = x[i].qs; + const uint8_t * GGML_RESTRICT qh_sv = x[i].hmask; + const int8_t * GGML_RESTRICT q8_sv = y[i].qs; + + // Set up scales + memcpy(aux, x[i].scales, 12); + utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); + utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); + utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); + utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); + + int8_t * scale = (int8_t *)utmp; + + for (int j = 0; j < 16; ++j) scale[j] -= m32; + + switch (vector_length) { + case 128: + { + svuint8_t qhbits_sv_1 = svld1_u8(svptrue_b8(), qh_sv); + svuint8_t qhbits_sv_2 = svld1_u8(svptrue_b8(), qh_sv+16); + svuint8_t q3h_sv; + + svint32_t sumi1_1 = svdup_n_s32(0); + svint8_t q3bytes_sv; + + for (int j = 0; j < QK_K/128; ++j) { + + const svuint8_t q3bits_sv = svld1_u8(svptrue_b8(), q3_sv); q3_sv += 16; + const svuint8_t q3bits_sv_1 = svld1_u8(svptrue_b8(), q3_sv); q3_sv += 16; + svint8_t q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + svint8_t q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m0_sv, qhbits_sv_1), 2); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), q3bits_sv, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[0])); + + q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m0_sv, qhbits_sv_2), 2); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), q3bits_sv_1, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[1])); + + q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m1_sv, qhbits_sv_1), 1); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[2])); + + q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m1_sv, qhbits_sv_2), 1); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[3])); + + + scale += 4; + q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + q3h_sv = svbic_u8_x(svptrue_b8(), m2_sv, qhbits_sv_1); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[0])); + + q3h_sv = svbic_u8_x(svptrue_b8(), m2_sv, qhbits_sv_2); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[1])); + + + q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + q3h_sv = svlsr_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m3_sv, qhbits_sv_1), 1); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[2])); + + q3h_sv = svlsr_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m3_sv, qhbits_sv_2), 1); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[3])); + + if (j == 0) { + qhbits_sv_1 = svlsr_n_u8_x(svptrue_b8(), qhbits_sv_1, 4); + qhbits_sv_2 = svlsr_n_u8_x(svptrue_b8(), qhbits_sv_2, 4); + } + + scale += 4; + } + + sum += d * (svaddv_s32(svptrue_b32(), sumi1_1)); + } break; + case 256: + case 512: + { + svuint8_t qhbits_sv = svld1_u8(svptrue_pat_b8(SV_VL32), qh_sv); + svuint8_t q3h_sv; + + svint32_t sumi1_1 = svdup_n_s32(0); + svint8_t q3bytes_sv; + + for (int j = 0; j < QK_K/128; ++j) { + + const svuint8_t q3bits_sv = svld1_u8(svptrue_pat_b8(SV_VL32), q3_sv); q3_sv += 32; + svint8_t q8bytes_1_sv_1 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + svint8_t q8bytes_1_sv_2 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + q3h_sv = svlsl_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m0_sv, qhbits_sv), 2); + q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q3bits_sv, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + + svint32_t scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[0]), svdup_n_s32((int32_t)scale[1])); + sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), scale_1); + + q3h_sv = svlsl_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m1_sv, qhbits_sv), 1); + q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[2]), svdup_n_s32((int32_t)scale[3])); + sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), scale_1); + + scale += 4; + q8bytes_1_sv_1 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + q8bytes_1_sv_2 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + q3h_sv = svbic_u8_x(svptrue_pat_b8(SV_VL32), m2_sv, qhbits_sv); + q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[0]), svdup_n_s32((int32_t)scale[1])); + sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), scale_1); + + q3h_sv = svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m3_sv, qhbits_sv), 1); + q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[2]), svdup_n_s32((int32_t)scale[3])); + sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), scale_1); + + if (j == 0) { + qhbits_sv = svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), qhbits_sv, 4); + } + + scale += 4; + } + + sum += d * (svaddv_s32(svptrue_pat_b32(SV_VL8), sumi1_1)); + } break; + default: + assert(false && "Unsupported vector length"); + break; + } + } + *s = sum; + +#elif __ARM_NEON + + uint32_t aux[3]; + uint32_t utmp[4]; + + const uint8x16_t m3b = vdupq_n_u8(0x3); + const int32x4_t vzero = vdupq_n_s32(0); + + const uint8x16_t m0 = vdupq_n_u8(1); + const uint8x16_t m1 = vshlq_n_u8(m0, 1); + const uint8x16_t m2 = vshlq_n_u8(m0, 2); + const uint8x16_t m3 = vshlq_n_u8(m0, 3); + const int8_t m32 = 32; + + ggml_int8x16x4_t q3bytes; + + float sum = 0; + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].hmask; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); + + ggml_uint8x16x4_t q3h; + + int32_t isum = 0; + + // Set up scales + memcpy(aux, x[i].scales, 12); + utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); + utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); + utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); + utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); + + int8_t * scale = (int8_t *)utmp; + for (int j = 0; j < 16; ++j) scale[j] -= m32; + + for (int j = 0; j < QK_K/128; ++j) { + + const ggml_uint8x16x2_t q3bits = ggml_vld1q_u8_x2(q3); q3 += 32; + const ggml_int8x16x4_t q8bytes_1 = ggml_vld1q_s8_x4(q8); q8 += 64; + const ggml_int8x16x4_t q8bytes_2 = ggml_vld1q_s8_x4(q8); q8 += 64; + + q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2); + q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2); + q3h.val[2] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[0]), 1); + q3h.val[3] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[1]), 1); + + q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[0], m3b)), vreinterpretq_s8_u8(q3h.val[0])); + q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[1], m3b)), vreinterpretq_s8_u8(q3h.val[1])); + q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2])); + q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3])); + + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0]; + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1]; + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2]; + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3]; + + scale += 4; + + q3h.val[0] = vbicq_u8(m2, qhbits.val[0]); + q3h.val[1] = vbicq_u8(m2, qhbits.val[1]); + q3h.val[2] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[0]), 1); + q3h.val[3] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[1]), 1); + + q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 4), m3b)), vreinterpretq_s8_u8(q3h.val[0])); + q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 4), m3b)), vreinterpretq_s8_u8(q3h.val[1])); + q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2])); + q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3])); + + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0]; + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1]; + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2]; + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3]; + + scale += 4; + + if (j == 0) { + qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 4); + qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 4); + } + + } + sum += d * isum; + + } + + *s = sum; + +#else + // scalar version + // This function is written like this so the compiler can manage to vectorize most of it + // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the + // manually vectorized version above. Every other version I tried would run at least 4 times slower. + // The ideal situation would be if we could just write the code once, and the compiler would + // automatically produce the best possible set of machine instructions, instead of us having to manually + // write vectorized versions for AVX, ARM_NEON, etc. + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + uint32_t auxs[4]; + const int8_t * scales = (const int8_t*)auxs; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].hmask; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + q3 += 32; + } + a = aux8; + + memcpy(auxs, x[i].scales, 12); + uint32_t tmp = auxs[2]; + auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); + auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); + auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); + auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); + for (int j = 0; j < QK_K/16; ++j) { + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; + +#endif + +} + +void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); +#ifdef __ARM_FEATURE_MATMUL_INT8 + assert((nrc == 2) || (nrc == 1)); +#else + assert(nrc == 1); +#endif + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + +#if defined(__ARM_FEATURE_MATMUL_INT8) + if (nrc == 2) { + const block_q4_K * GGML_RESTRICT x0 = x; + const block_q4_K * GGML_RESTRICT x1 = (const block_q4_K *) ((const uint8_t *)vx + bx); + const block_q8_K * GGML_RESTRICT y0 = y; + const block_q8_K * GGML_RESTRICT y1 = (const block_q8_K *) ((const uint8_t *)vy + by); + + const uint8x16_t m4b = vdupq_n_u8(0x0f); + + float32x4_t vfsum = vdupq_n_f32(0.0f); + + for (int i = 0; i < nb; ++i, ++x0, ++x1, ++y0, ++y1) { + const uint8_t * GGML_RESTRICT qx0 = x0->qs; + const uint8_t * GGML_RESTRICT qx1 = x1->qs; + const int8_t * GGML_RESTRICT qy0 = y0->qs; + const int8_t * GGML_RESTRICT qy1 = y1->qs; + + // decode scales and mins + int8_t x0_scales[8], x1_scales[8]; + int16x8_t x0_mins, x1_mins; + { + uint32_t scales_mins[3]; + memcpy(scales_mins, x0->scales, 12); + const uint32_t mins_0_3 = scales_mins[1] & kmask1; + const uint32_t mins_4_7 = ((scales_mins[2] >> 4) & kmask2) | (((scales_mins[1] >> 6) & kmask3) << 4); + const uint32x2_t mins = {mins_0_3, mins_4_7}; + x0_mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins))); + uint32_t scales[2]; + scales[0] = scales_mins[0] & kmask1; // scales 0~3 + scales[1] = (scales_mins[2] & kmask2) | (((scales_mins[0] >> 6) & kmask3) << 4); // scales 4~7 + memcpy(x0_scales, scales, 8); + } + { + uint32_t scales_mins[3]; + memcpy(scales_mins, x1->scales, 12); + const uint32_t mins_0_3 = scales_mins[1] & kmask1; + const uint32_t mins_4_7 = ((scales_mins[2] >> 4) & kmask2) | (((scales_mins[1] >> 6) & kmask3) << 4); + const uint32x2_t mins = {mins_0_3, mins_4_7}; + x1_mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins))); + uint32_t scales[2]; + scales[0] = scales_mins[0] & kmask1; // scales 0~3 + scales[1] = (scales_mins[2] & kmask2) | (((scales_mins[0] >> 6) & kmask3) << 4); // scales 4~7 + memcpy(x1_scales, scales, 8); + } + + int32x4_t visum = {0}; + + // process 64 data points per iteration, totally 256 data points + for (int j = 0; j < QK_K / 64; ++j, qx0 += 32, qx1 += 32, qy0 += 64, qy1 += 64) { + const int8x16x4_t vy0 = vld1q_s8_x4(qy0); + const int8x16x4_t vy1 = vld1q_s8_x4(qy1); + + int8x16_t vx0[4], vx1[4]; + { + const uint8x16x2_t vv = vld1q_u8_x2(qx0); + vx0[0] = vreinterpretq_s8_u8(vandq_u8(vv.val[0], m4b)); + vx0[1] = vreinterpretq_s8_u8(vandq_u8(vv.val[1], m4b)); + vx0[2] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[0], 4)); + vx0[3] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[1], 4)); + } + { + const uint8x16x2_t vv = vld1q_u8_x2(qx1); + vx1[0] = vreinterpretq_s8_u8(vandq_u8(vv.val[0], m4b)); + vx1[1] = vreinterpretq_s8_u8(vandq_u8(vv.val[1], m4b)); + vx1[2] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[0], 4)); + vx1[3] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[1], 4)); + } + + // process 32 data points (share same block scale) per iteration + for (int k = 0; k < 2; ++k) { + const int blk = j * 2 + k; + const int32x4_t block_scale = { + x0_scales[blk], + x0_scales[blk], + x1_scales[blk], + x1_scales[blk], + }; + + int32x4_t vr = {0}; + for (int l = 0; l < 2; ++l) { + const int idx = k * 2 + l; + const int64x2_t vx0_s64 = vreinterpretq_s64_s8(vx0[idx]); + const int64x2_t vx1_s64 = vreinterpretq_s64_s8(vx1[idx]); + const int64x2_t vy0_s64 = vreinterpretq_s64_s8(vy0.val[idx]); + const int64x2_t vy1_s64 = vreinterpretq_s64_s8(vy1.val[idx]); + const int8x16_t vx_l = vreinterpretq_s8_s64(vzip1q_s64(vx0_s64, vx1_s64)); + const int8x16_t vx_h = vreinterpretq_s8_s64(vzip2q_s64(vx0_s64, vx1_s64)); + const int8x16_t vy_l = vreinterpretq_s8_s64(vzip1q_s64(vy0_s64, vy1_s64)); + const int8x16_t vy_h = vreinterpretq_s8_s64(vzip2q_s64(vy0_s64, vy1_s64)); + vr = vmmlaq_s32(vr, vx_l, vy_l); + vr = vmmlaq_s32(vr, vx_h, vy_h); + } + // apply block scale, will NOT overflow + // block_scale * sum_256(int4*int8) <= 2^(8+8+4+8) = 28 bits + visum = vmlaq_s32(visum, vr, block_scale); + } + } + + // adjust bias, apply superblock scale + { + int32_t bias[4]; + // no obvious uplift from sve sdot-16, just use neon mul add + const int16x8_t y0_sums = vpaddq_s16(vld1q_s16(y0->bsums), vld1q_s16(y0->bsums+8)); + const int16x8_t y1_sums = vpaddq_s16(vld1q_s16(y1->bsums), vld1q_s16(y1->bsums+8)); + bias[0] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y0_sums), vget_low_s16(x0_mins)), + vmull_s16(vget_high_s16(y0_sums), vget_high_s16(x0_mins)))); + bias[1] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y1_sums), vget_low_s16(x0_mins)), + vmull_s16(vget_high_s16(y1_sums), vget_high_s16(x0_mins)))); + bias[2] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y0_sums), vget_low_s16(x1_mins)), + vmull_s16(vget_high_s16(y0_sums), vget_high_s16(x1_mins)))); + bias[3] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y1_sums), vget_low_s16(x1_mins)), + vmull_s16(vget_high_s16(y1_sums), vget_high_s16(x1_mins)))); + const float32x4_t dmins = { + GGML_FP16_TO_FP32(x0->dmin) * y0->d, + GGML_FP16_TO_FP32(x0->dmin) * y1->d, + GGML_FP16_TO_FP32(x1->dmin) * y0->d, + GGML_FP16_TO_FP32(x1->dmin) * y1->d, + }; + vfsum = vmlsq_f32(vfsum, vcvtq_f32_s32(vld1q_s32(bias)), dmins); + + const float32x4_t superblock_scale = { + GGML_FP16_TO_FP32(x0->d) * y0->d, + GGML_FP16_TO_FP32(x0->d) * y1->d, + GGML_FP16_TO_FP32(x1->d) * y0->d, + GGML_FP16_TO_FP32(x1->d) * y1->d, + }; + vfsum = vmlaq_f32(vfsum, vcvtq_f32_s32(visum), superblock_scale); + } + } + + // vfsum = ABCD -> ACBD + // AC -> s, BD -> (s+bs) + vfsum = vzip1q_f32(vfsum, vextq_f32(vfsum, vfsum, 2)); + vst1_f32(s, vget_low_f32 (vfsum)); + vst1_f32(s + bs, vget_high_f32(vfsum)); + + return; + } +#endif + +#ifdef __ARM_FEATURE_SVE + float sumf = 0; + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); + + memcpy(utmp, x[i].scales, K_SCALE_SIZE); + + uint32x2_t mins8 = { 0 }; + mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0); + mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1); + + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[0] &= kmask1; + + const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8))); + const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)), + vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); + sumf -= dmin * vaddvq_s32(prod); + + const uint8_t * scales = (const uint8_t *)utmp; + + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const int vector_length = ggml_cpu_get_sve_cnt()*8; + const svuint8_t m4b = svdup_n_u8(0xf); + const svint32_t mzero = svdup_n_s32(0); + svint32_t sumi1 = svdup_n_s32(0); + svint32_t sumi1_1 = svdup_n_s32(0); + svint32_t sumi1_2 = svdup_n_s32(0); + svint32_t sumi2 = svdup_n_s32(0); + svint32_t sumi2_1 = svdup_n_s32(0); + svint32_t sumi2_2 = svdup_n_s32(0); + switch (vector_length) { + case 128: + { + for (int j = 0; j < QK_K/64; ++j) { + svint8_t q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4), m4b)); + svint8_t q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; + sumi1_1 = svmla_n_s32_x(svptrue_b32(), sumi1_1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]); + q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4+16), m4b)); + q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; + sumi1_2 = svmla_n_s32_x(svptrue_b32(), sumi1_2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]); + + q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4), 4)); + q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; + sumi2_1 = svmla_n_s32_x(svptrue_b32(), sumi2_1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]); + q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4+16), 4)); + q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; + sumi2_2 = svmla_n_s32_x(svptrue_b32(), sumi2_2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]); + q4 += 32; + } + sumi1 = svadd_s32_x(svptrue_b32(), sumi1_1, sumi1_2); + sumi2 = svadd_s32_x(svptrue_b32(), sumi2_1, sumi2_2); + sumf += d * (svaddv_s32(svptrue_b32(), svadd_s32_x(svptrue_b32(), sumi1, sumi2))); + } break; + case 256: + case 512: + { + for (int j = 0; j < QK_K/64; ++j) { + const svuint8_t q4bits = svld1_u8(svptrue_pat_b8(SV_VL32), q4); q4 += 32; + svint8_t q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_pat_b8(SV_VL32), q4bits, m4b)); + svint8_t q8bytes = svld1_s8(svptrue_pat_b8(SV_VL32), q8); q8 += 32; + sumi1 = svmla_n_s32_x(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]); + + q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q4bits, 4)); + q8bytes = svld1_s8(svptrue_pat_b8(SV_VL32), q8); q8 += 32; + sumi2 = svmla_n_s32_x(svptrue_pat_b32(SV_VL8), sumi2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]); + } + sumf += d * (svaddv_s32(svptrue_pat_b32(SV_VL8), svadd_s32_x(svptrue_pat_b32(SV_VL8), sumi1, sumi2))); + } break; + default: + assert(false && "Unsupported vector length"); + break; + } + } + *s = sumf; +#elif defined __ARM_NEON + const uint8x16_t m4b = vdupq_n_u8(0xf); + const int32x4_t mzero = vdupq_n_s32(0); + + ggml_int8x16x2_t q4bytes; + ggml_int8x16x2_t q8bytes; + + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); + + memcpy(utmp, x[i].scales, 12); + + uint32x2_t mins8 = { 0 }; + mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0); + mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1); + + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[0] &= kmask1; + + const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8))); + const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)), + vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); + sumf -= dmin * vaddvq_s32(prod); + + const uint8_t * scales = (const uint8_t *)utmp; + + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + int32_t sumi1 = 0; + int32_t sumi2 = 0; + + for (int j = 0; j < QK_K/64; ++j) { + const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); q4 += 32; + + q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; + q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b)); + q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b)); + + const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); + sumi1 += vaddvq_s32(p1) * scales[2*j+0]; + + q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; + q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); + q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); + + const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); + + sumi2 += vaddvq_s32(p2) * scales[2*j+1]; + } + + sumf += d * (sumi1 + sumi2); + + } + + *s = sumf; + +#else + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + for (int j = 0; j < QK_K/64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + a += 32; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + a += 32; q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + + +#ifdef __ARM_NEON + const uint8x16_t m4b = vdupq_n_u8(0xf); + const uint8x16_t mone = vdupq_n_u8(1); + const uint8x16_t mtwo = vdupq_n_u8(2); + const int32x4_t mzero = vdupq_n_s32(0); + + ggml_int8x16x4_t q5bytes; + + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); + + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + const uint8x8_t mins8 = vld1_u8((const uint8_t*)utmp + 8); + const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(mins8)); + const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)), + vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); + int32_t sumi_mins = vaddvq_s32(prod); + + const uint8_t * scales = (const uint8_t *)utmp; + + const uint8_t * GGML_RESTRICT q5 = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); + + ggml_uint8x16x4_t q5h; + + int32_t sumi = 0; + + for (int j = 0; j < QK_K/64; ++j) { + + const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5); q5 += 32; + const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; + + q5h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4); + q5h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4); + q5h.val[2] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[0]), 3); + q5h.val[3] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[1]), 3); + qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 2); + qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 2); + + q5bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[0], m4b), q5h.val[0])); + q5bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[1], m4b), q5h.val[1])); + q5bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[0], 4), q5h.val[2])); + q5bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[1], 4), q5h.val[3])); + + sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]), q5bytes.val[1], q8bytes.val[1])) * *scales++; + sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]), q5bytes.val[3], q8bytes.val[3])) * *scales++; + } + + sumf += d * sumi - dmin * sumi_mins; + } + + *s = sumf; + +#else + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K/64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); + a += 32; m <<= 1; + q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); +#ifdef __ARM_FEATURE_MATMUL_INT8 + assert((nrc == 2) || (nrc == 1)); +#else + assert(nrc == 1); +#endif + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q6_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__ARM_FEATURE_MATMUL_INT8) + if (nrc == 2) { + const block_q6_K * GGML_RESTRICT x0 = x; + const block_q6_K * GGML_RESTRICT x1 = (const block_q6_K *) ((const uint8_t *)vx + bx); + const block_q8_K * GGML_RESTRICT y0 = y; + const block_q8_K * GGML_RESTRICT y1 = (const block_q8_K *) ((const uint8_t *)vy + by); + + float32x4_t vfsum = vdupq_n_f32(0.0f); + + for (int i = 0; i < nb; ++i, ++x0, ++x1, ++y0, ++y1) { + const uint8_t * GGML_RESTRICT ql0 = x0->ql; + const uint8_t * GGML_RESTRICT ql1 = x1->ql; + const uint8_t * GGML_RESTRICT qh0 = x0->qh; + const uint8_t * GGML_RESTRICT qh1 = x1->qh; + const int8_t * GGML_RESTRICT qy0 = y0->qs; + const int8_t * GGML_RESTRICT qy1 = y1->qs; + + const uint8x16_t mone = vdupq_n_u8(0x30); + const uint8x16_t m4b = vdupq_n_u8(0x0f); + + int32x4_t visum = vdupq_n_s32(0); + + // process 8 blocks per iteration, totally 16 blocks + for (int j = 0; j < 2; ++j, qh0 += 32, ql0 += 64, qh1 += 32, ql1 += 64) { + int8x16_t vx0[8], vx1[8]; + + // de-quantize vx0[8] + { + const uint8x16x2_t qh_bits = vld1q_u8_x2(qh0); + const uint8x16x4_t ql_bits = vld1q_u8_x4(ql0); + + uint8x16_t q6h_0 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 4)); + uint8x16_t q6h_1 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 4)); + uint8x16_t q6h_2 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 2)); + uint8x16_t q6h_3 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 2)); + + vx0[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[0], m4b), q6h_0)); + vx0[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[1], m4b), q6h_1)); + vx0[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[2], m4b), q6h_2)); + vx0[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[3], m4b), q6h_3)); + + q6h_0 = vandq_u8(mone, qh_bits.val[0]); + q6h_1 = vandq_u8(mone, qh_bits.val[1]); + q6h_2 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[0], 2)); + q6h_3 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[1], 2)); + + vx0[4] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[0], 4), q6h_0)); + vx0[5] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[1], 4), q6h_1)); + vx0[6] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[2], 4), q6h_2)); + vx0[7] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[3], 4), q6h_3)); + } + + // de-quantize vx1[8] + { + const uint8x16x2_t qh_bits = vld1q_u8_x2(qh1); + const uint8x16x4_t ql_bits = vld1q_u8_x4(ql1); + + uint8x16_t q6h_0 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 4)); + uint8x16_t q6h_1 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 4)); + uint8x16_t q6h_2 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 2)); + uint8x16_t q6h_3 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 2)); + + vx1[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[0], m4b), q6h_0)); + vx1[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[1], m4b), q6h_1)); + vx1[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[2], m4b), q6h_2)); + vx1[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[3], m4b), q6h_3)); + + q6h_0 = vandq_u8(mone, qh_bits.val[0]); + q6h_1 = vandq_u8(mone, qh_bits.val[1]); + q6h_2 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[0], 2)); + q6h_3 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[1], 2)); + + vx1[4] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[0], 4), q6h_0)); + vx1[5] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[1], 4), q6h_1)); + vx1[6] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[2], 4), q6h_2)); + vx1[7] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[3], 4), q6h_3)); + } + + // process 16 elements (one block with same scale) per iteration + // - vx = concat(ql, qh) - 32 + // - r1,r2,r3,r4 = smmla(vx, vy) + for (int k = 0; k < 8; ++k) { + const int blk = j * 8 + k; + + const int8x16_t vy0 = vld1q_s8(qy0); + const int8x16_t vy1 = vld1q_s8(qy1); + qy0 += 16; + qy1 += 16; + + const int32x4_t block_scale = { + x0->scales[blk], + x0->scales[blk], + x1->scales[blk], + x1->scales[blk], + }; + + // calculate four results at once with outer product + const int8x16_t vx_l = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(vx0[k]), vreinterpretq_s64_s8(vx1[k]))); + const int8x16_t vx_h = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(vx0[k]), vreinterpretq_s64_s8(vx1[k]))); + const int8x16_t vy_l = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(vy0), vreinterpretq_s64_s8(vy1))); + const int8x16_t vy_h = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(vy0), vreinterpretq_s64_s8(vy1))); + int32x4_t vr = vdupq_n_s32(0); + vr = vmmlaq_s32(vr, vx_l, vy_l); + vr = vmmlaq_s32(vr, vx_h, vy_h); + + // apply block scale, will NOT overflow + // block_scale * sum_256(int6*int8) <= 2^(8+8+6+8) = 30 bits + visum = vmlaq_s32(visum, vr, block_scale); + } + } + + // adjust bias, apply superblock scale + { + int32_t bias[4]; +#ifdef __ARM_FEATURE_SVE + const svbool_t pg16_8 = svptrue_pat_b16(SV_VL8); + const svbool_t pg8_8 = svptrue_pat_b8(SV_VL8); + const svint16_t y0_q8sums_0 = svld1_s16(pg16_8, y0->bsums); + const svint16_t y0_q8sums_1 = svld1_s16(pg16_8, y0->bsums + 8); + const svint16_t y1_q8sums_0 = svld1_s16(pg16_8, y1->bsums); + const svint16_t y1_q8sums_1 = svld1_s16(pg16_8, y1->bsums + 8); + const svint16_t x0_q6scales_0 = svunpklo_s16(svld1_s8(pg8_8, x0->scales)); + const svint16_t x0_q6scales_1 = svunpklo_s16(svld1_s8(pg8_8, x0->scales + 8)); + const svint16_t x1_q6scales_0 = svunpklo_s16(svld1_s8(pg8_8, x1->scales)); + const svint16_t x1_q6scales_1 = svunpklo_s16(svld1_s8(pg8_8, x1->scales + 8)); + const svint64_t zero = svdup_n_s64(0); + bias[0] = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(zero, y0_q8sums_0, x0_q6scales_0), + svdot_s64(zero, y0_q8sums_1, x0_q6scales_1))); + bias[1] = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(zero, y1_q8sums_0, x0_q6scales_0), + svdot_s64(zero, y1_q8sums_1, x0_q6scales_1))); + bias[2] = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(zero, y0_q8sums_0, x1_q6scales_0), + svdot_s64(zero, y0_q8sums_1, x1_q6scales_1))); + bias[3] = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(zero, y1_q8sums_0, x1_q6scales_0), + svdot_s64(zero, y1_q8sums_1, x1_q6scales_1))); +#else + // NEON doesn't support int16 dot product, fallback to separated mul and add + const int16x8x2_t q8sums0 = vld1q_s16_x2(y0->bsums); + const int16x8x2_t q8sums1 = vld1q_s16_x2(y1->bsums); + + int8x16_t scales_s8 = vld1q_s8(x0->scales); + const int16x8x2_t q6scales0 = {{vmovl_s8(vget_low_s8(scales_s8)), vmovl_s8(vget_high_s8(scales_s8))}}; + scales_s8 = vld1q_s8(x1->scales); + const int16x8x2_t q6scales1 = {{vmovl_s8(vget_low_s8(scales_s8)), vmovl_s8(vget_high_s8(scales_s8))}}; + + int32x4_t prod; + prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[0]), vget_low_s16 (q6scales0.val[0])), + vmull_s16(vget_high_s16(q8sums0.val[0]), vget_high_s16(q6scales0.val[0]))), + vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[1]), vget_low_s16 (q6scales0.val[1])), + vmull_s16(vget_high_s16(q8sums0.val[1]), vget_high_s16(q6scales0.val[1])))); + bias[0] = vaddvq_s32(prod); + prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[0]), vget_low_s16 (q6scales0.val[0])), + vmull_s16(vget_high_s16(q8sums1.val[0]), vget_high_s16(q6scales0.val[0]))), + vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[1]), vget_low_s16 (q6scales0.val[1])), + vmull_s16(vget_high_s16(q8sums1.val[1]), vget_high_s16(q6scales0.val[1])))); + bias[1] = vaddvq_s32(prod); + prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[0]), vget_low_s16 (q6scales1.val[0])), + vmull_s16(vget_high_s16(q8sums0.val[0]), vget_high_s16(q6scales1.val[0]))), + vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[1]), vget_low_s16 (q6scales1.val[1])), + vmull_s16(vget_high_s16(q8sums0.val[1]), vget_high_s16(q6scales1.val[1])))); + bias[2] = vaddvq_s32(prod); + prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[0]), vget_low_s16 (q6scales1.val[0])), + vmull_s16(vget_high_s16(q8sums1.val[0]), vget_high_s16(q6scales1.val[0]))), + vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[1]), vget_low_s16 (q6scales1.val[1])), + vmull_s16(vget_high_s16(q8sums1.val[1]), vget_high_s16(q6scales1.val[1])))); + bias[3] = vaddvq_s32(prod); + +#endif + const int32x4_t vibias = vmulq_n_s32(vld1q_s32(bias), 32); + + const float32x4_t superblock_scale = { + GGML_FP16_TO_FP32(x0->d) * y0->d, + GGML_FP16_TO_FP32(x0->d) * y1->d, + GGML_FP16_TO_FP32(x1->d) * y0->d, + GGML_FP16_TO_FP32(x1->d) * y1->d, + }; + + visum = vsubq_s32(visum, vibias); + vfsum = vmlaq_f32(vfsum, vcvtq_f32_s32(visum), superblock_scale); + } + } + + // vfsum = ABCD -> ACBD + // AC -> s, BD -> (s+bs) + vfsum = vzip1q_f32(vfsum, vextq_f32(vfsum, vfsum, 2)); + vst1_f32(s, vget_low_f32 (vfsum)); + vst1_f32(s + bs, vget_high_f32(vfsum)); + + return; + } +#endif + +#ifdef __ARM_FEATURE_SVE + const int vector_length = ggml_cpu_get_sve_cnt()*8; + float sum = 0; + svuint8_t m4b = svdup_n_u8(0xf); + svint32_t vzero = svdup_n_s32(0); + svuint8_t mone = svdup_n_u8(0x30); + svint8_t q6bytes_1, q6bytes_2, q6bytes_3, q6bytes_4; + svuint8_t q6h_1, q6h_2, q6h_3, q6h_4; + + for (int i = 0; i < nb; ++i) { + const float d_all = GGML_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT q6 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const int8_t * GGML_RESTRICT scale = x[i].scales; + + const svbool_t pg16_8 = svptrue_pat_b16(SV_VL8); + const svint16_t q8sums_1 = svld1_s16(pg16_8, y[i].bsums); + const svint16_t q8sums_2 = svld1_s16(pg16_8, y[i].bsums + 8); + const svint16_t q6scales_1 = svunpklo_s16(svld1_s8(svptrue_pat_b8(SV_VL8), scale)); + const svint16_t q6scales_2 = svunpklo_s16(svld1_s8(svptrue_pat_b8(SV_VL8), scale + 8)); + const svint64_t prod = svdup_n_s64(0); + int32_t isum_mins = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(prod, q8sums_1, q6scales_1), + svdot_s64(prod, q8sums_2, q6scales_2))); + int32_t isum = 0; + + switch (vector_length) { + case 128: + { + const svbool_t pg32_4 = svptrue_pat_b32(SV_VL4); + const svbool_t pg8_16 = svptrue_pat_b8(SV_VL16); + svint32_t isum_tmp = svdup_n_s32(0); + for (int j = 0; j < QK_K/128; ++j) { + svuint8_t qhbits_1 = svld1_u8(pg8_16, qh); + svuint8_t qhbits_2 = svld1_u8(pg8_16, qh+16); + qh += 32; + svuint8_t q6bits_1 = svld1_u8(pg8_16, q6); + svuint8_t q6bits_2 = svld1_u8(pg8_16, q6+16); + svuint8_t q6bits_3 = svld1_u8(pg8_16, q6+32); + svuint8_t q6bits_4 = svld1_u8(pg8_16, q6+48); + q6 += 64; + svint8_t q8bytes_1 = svld1_s8(pg8_16, q8); + svint8_t q8bytes_2 = svld1_s8(pg8_16, q8+16); + svint8_t q8bytes_3 = svld1_s8(pg8_16, q8+32); + svint8_t q8bytes_4 = svld1_s8(pg8_16, q8+48); + q8 += 64; + + q6h_1 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_1, 4)); + q6h_2 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_2, 4)); + q6h_3 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_1, 2)); + q6h_4 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_2, 2)); + q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_1, m4b), q6h_1)); + q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_2, m4b), q6h_2)); + q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_3, m4b), q6h_3)); + q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_4, m4b), q6h_4)); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale[0]); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale[1]); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale[2]); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale[3]); + + scale += 4; + q8bytes_1 = svld1_s8(pg8_16, q8); + q8bytes_2 = svld1_s8(pg8_16, q8+16); + q8bytes_3 = svld1_s8(pg8_16, q8+32); + q8bytes_4 = svld1_s8(pg8_16, q8+48); + q8 += 64; + + q6h_1 = svand_u8_x(pg16_8, mone, qhbits_1); + q6h_2 = svand_u8_x(pg16_8, mone, qhbits_2); + q6h_3 = svand_u8_x(pg16_8, mone, svlsr_n_u8_x(pg16_8, qhbits_1, 2)); + q6h_4 = svand_u8_x(pg16_8, mone, svlsr_n_u8_x(pg16_8, qhbits_2, 2)); + q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_1, 4), q6h_1)); + q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_2, 4), q6h_2)); + q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_3, 4), q6h_3)); + q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_4, 4), q6h_4)); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale[0]); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale[1]); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale[2]); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale[3]); + scale += 4; + } + isum += svaddv_s32(pg32_4, isum_tmp); + sum += d_all * y[i].d * (isum - 32 * isum_mins); + } + break; + case 256: + case 512: + { + const svbool_t pg8_2 = svptrue_pat_b8(SV_VL2); + const svbool_t pg32_8 = svptrue_pat_b32(SV_VL8); + const svbool_t pg8_32 = svptrue_pat_b8(SV_VL32); + svint32_t isum_tmp = svdup_n_s32(0); + for (int j = 0; j < QK_K/128; j++) { + svuint8_t qhbits_1 = svld1_u8(pg8_32, qh); + qh += 32; + svuint8_t q6bits_1 = svld1_u8(pg8_32, q6); + svuint8_t q6bits_2 = svld1_u8(pg8_32, q6+32); + q6 += 64; + svint8_t q8bytes_1 = svld1_s8(pg8_32, q8); + svint8_t q8bytes_2 = svld1_s8(pg8_32, q8+32); + svint8_t q8bytes_3 = svld1_s8(pg8_32, q8+64); + svint8_t q8bytes_4 = svld1_s8(pg8_32, q8+96); + q8 += 128; + q6h_1 = svand_u8_x(pg8_32, mone, svlsl_n_u8_x(pg8_32, qhbits_1, 4)); + q6h_2 = svand_u8_x(pg8_32, mone, svlsl_n_u8_x(pg8_32, qhbits_1, 2)); + q6h_3 = svand_u8_x(pg8_32, mone, qhbits_1); + q6h_4 = svand_u8_x(pg8_32, mone, svlsr_n_u8_x(pg8_32, qhbits_1, 2)); + q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svand_u8_x(pg8_32, q6bits_1, m4b), q6h_1)); + q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svand_u8_x(pg8_32, q6bits_2, m4b), q6h_2)); + q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svlsr_n_u8_x(pg8_32, q6bits_1, 4), q6h_3)); + q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svlsr_n_u8_x(pg8_32, q6bits_2, 4), q6h_4)); + + svint8_t scale_lane_1_tmp = svld1_s8(pg8_2, scale); + scale_lane_1_tmp= svzip1_s8(scale_lane_1_tmp, scale_lane_1_tmp); + scale_lane_1_tmp= svzip1_s8(scale_lane_1_tmp, scale_lane_1_tmp); + svint8_t scale_lane_2_tmp = svld1_s8(pg8_2, scale+2); + scale_lane_2_tmp = svzip1_s8(scale_lane_2_tmp, scale_lane_2_tmp); + scale_lane_2_tmp = svzip1_s8(scale_lane_2_tmp, scale_lane_2_tmp); + svint8_t scale_lane_3_tmp = svld1_s8(pg8_2, scale+4); + scale_lane_3_tmp = svzip1_s8(scale_lane_3_tmp, scale_lane_3_tmp); + scale_lane_3_tmp = svzip1_s8(scale_lane_3_tmp, scale_lane_3_tmp); + svint8_t scale_lane_4_tmp = svld1_s8(pg8_2, scale+6); + scale_lane_4_tmp = svzip1_s8(scale_lane_4_tmp, scale_lane_4_tmp); + scale_lane_4_tmp = svzip1_s8(scale_lane_4_tmp, scale_lane_4_tmp); + svint32_t scale_lane_1 = svunpklo_s32(svunpklo_s16(scale_lane_1_tmp)); + svint32_t scale_lane_2 = svunpklo_s32(svunpklo_s16(scale_lane_2_tmp)); + svint32_t scale_lane_3 = svunpklo_s32(svunpklo_s16(scale_lane_3_tmp)); + svint32_t scale_lane_4 = svunpklo_s32(svunpklo_s16(scale_lane_4_tmp)); + + isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale_lane_1); + isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale_lane_2); + isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale_lane_3); + isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale_lane_4); + scale += 8; + } + isum += svaddv_s32(pg32_8, isum_tmp); + sum += d_all * y[i].d * (isum - 32 * isum_mins); + } + break; + default: + assert(false && "Unsupported vector length"); + break; + } + } + + *s = sum; + +#elif __ARM_NEON + float sum = 0; + + const uint8x16_t m4b = vdupq_n_u8(0xF); + const int32x4_t vzero = vdupq_n_s32(0); + //const int8x16_t m32s = vdupq_n_s8(32); + + const uint8x16_t mone = vdupq_n_u8(3); + + ggml_int8x16x4_t q6bytes; + ggml_uint8x16x4_t q6h; + + for (int i = 0; i < nb; ++i) { + + const float d_all = GGML_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT q6 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const int8_t * GGML_RESTRICT scale = x[i].scales; + + const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums); + const int8x16_t scales = vld1q_s8(scale); + const ggml_int16x8x2_t q6scales = {{vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}}; + + const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[0]), vget_low_s16 (q6scales.val[0])), + vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))), + vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[1]), vget_low_s16 (q6scales.val[1])), + vmull_s16(vget_high_s16(q8sums.val[1]), vget_high_s16(q6scales.val[1])))); + int32_t isum_mins = vaddvq_s32(prod); + + int32_t isum = 0; + + for (int j = 0; j < QK_K/128; ++j) { + + ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); qh += 32; + ggml_uint8x16x4_t q6bits = ggml_vld1q_u8_x4(q6); q6 += 64; + ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; + + q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4); + q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4); + uint8x16_t shifted = vshrq_n_u8(qhbits.val[0], 2); + q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + shifted = vshrq_n_u8(qhbits.val[1], 2); + q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + + //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s); + //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s); + //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])), m32s); + //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])), m32s); + q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])); + q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])); + q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])); + q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])); + + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + + vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + + vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + + vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; + + scale += 4; + + q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; + + shifted = vshrq_n_u8(qhbits.val[0], 4); + q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + shifted = vshrq_n_u8(qhbits.val[1], 4); + q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + shifted = vshrq_n_u8(qhbits.val[0], 6); + q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + shifted = vshrq_n_u8(qhbits.val[1], 6); + q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + + //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])), m32s); + //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])), m32s); + //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])), m32s); + //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])), m32s); + q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])); + q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])); + q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])); + q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])); + + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + + vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + + vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + + vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; + scale += 4; + } + //sum += isum * d_all * y[i].d; + sum += d_all * y[i].d * (isum - 32 * isum_mins); + + } + *s = sum; +#else + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) { + a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; + a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; + a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; + a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; + } + a += 128; + q4 += 64; + qh += 32; + } + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/16; ++j) { + int scale = x[i].scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +#if defined (__ARM_NEON) +static const int8_t keven_signs_q2xs[1024] = { + 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, + 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, + 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, + 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, + 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, + 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, + 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, + 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, + 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, + 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, + 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, + 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, + 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, + 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, + 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, + 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, + 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, + 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, + 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, + 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, + 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, + 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, + 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, + 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, + 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, + 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, + 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, + 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, + 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, + 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, + 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, + 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, +}; +#endif + +void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_xxs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__ARM_NEON) + + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + uint32_t aux32[4]; + const uint8_t * aux8 = (const uint8_t *)aux32; + + ggml_int8x16x4_t q2u; + ggml_int8x16x4_t q2s; + ggml_int8x16x4_t q8b; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + float sumf1 = 0, sumf2 = 0; + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + q8b = ggml_vld1q_s8_x4(q8); q8 += 64; + memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; + q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 0])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 1]))); + q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 2])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 3]))); + q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 8])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 9]))); + q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[10])), vld1_s8((const void *)(iq2xxs_grid + aux8[11]))); + q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127)))); + q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127)))); + q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 7) & 127)))); + q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 21) & 127)))); + q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]); + q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]); + q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]); + q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]); + const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]), q2u.val[1], q8b.val[1]); + const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]), q2u.val[3], q8b.val[3]); + sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[1] >> 28)); + sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[3] >> 28)); + } + sumf += d*(sumf1 + sumf2); + } + *s = 0.25f * sumf; + +#else + + uint32_t aux32[2]; + const uint8_t * aux8 = (const uint8_t *)aux32; + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + memcpy(aux32, q2, 2*sizeof(uint32_t)); + q2 += 4; + const uint32_t ls = 2*(aux32[1] >> 28) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); + const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls; + } + sumf += d * bsum; + } + *s = 0.125f * sumf; +#endif +} + +void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_xs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__ARM_NEON) + + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + ggml_int8x16x4_t q2u; + ggml_int8x16x4_t q2s; + ggml_int8x16x4_t q8b; + + int32x4x4_t scales32; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + const uint8x8_t scales8 = vld1_u8(x[i].scales); + const uint8x8_t scales_l = vand_u8(scales8, vdup_n_u8(0xf)); + const uint8x8_t scales_h = vshr_n_u8(scales8, 4); + uint8x16_t scales = vcombine_u8(vzip1_u8(scales_l, scales_h), vzip2_u8(scales_l, scales_h)); + scales = vaddq_u8(vshlq_n_u8(scales, 1), vdupq_n_u8(1)); + const uint16x8_t scales1 = vmovl_u8(vget_low_u8(scales)); + const uint16x8_t scales2 = vmovl_u8(vget_high_u8(scales)); + scales32.val[0] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales1))); + scales32.val[1] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales1))); + scales32.val[2] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales2))); + scales32.val[3] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales2))); + int32x4_t sumi = vdupq_n_s32(0); + for (int ib64 = 0; ib64 < QK_K/64; ++ib64) { + q8b = ggml_vld1q_s8_x4(q8); q8 += 64; + q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[0] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[1] & 511)))); + q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[2] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[3] & 511)))); + q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[4] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[5] & 511)))); + q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[6] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[7] & 511)))); + q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[0] >> 9))), vld1_s8((const void *)(signs64 + (q2[1] >> 9)))); + q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[2] >> 9))), vld1_s8((const void *)(signs64 + (q2[3] >> 9)))); + q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[4] >> 9))), vld1_s8((const void *)(signs64 + (q2[5] >> 9)))); + q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[6] >> 9))), vld1_s8((const void *)(signs64 + (q2[7] >> 9)))); + q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]); + q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]); + q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]); + q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]); + const int32x4_t p1 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]); + const int32x4_t p2 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[1], q8b.val[1]); + const int32x4_t p3 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]); + const int32x4_t p4 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[3], q8b.val[3]); + const int32x4_t p = vpaddq_s32(vpaddq_s32(p1, p2), vpaddq_s32(p3, p4)); + sumi = vmlaq_s32(sumi, p, scales32.val[ib64]); + q2 += 8; + } + sumf += d*vaddvq_s32(sumi); + } + *s = 0.125f * sumf; + +#else + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const uint8_t * GGML_RESTRICT sc = x[i].scales; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1; + const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1; + int32_t sumi = 0; + for (int l = 0; l < 2; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); + const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls1; + sumi = 0; + for (int l = 2; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); + const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls2; + q2 += 4; + } + sumf += d * bsum; + } + *s = 0.125f * sumf; +#endif +} + +void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__ARM_NEON) + + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; + + const ggml_uint8x16x2_t mask1 = ggml_vld1q_u8_x2(k_mask1); + const uint8x16_t mask2 = vld1q_u8(k_mask2); + const uint8x16_t m1 = vdupq_n_u8(1); + const int32x4_t vzero = vdupq_n_s32(0); + + uint8x16x2_t vs; + ggml_int8x16x4_t q2s; + ggml_int8x16x4_t q8b; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + int sumi1 = 0, sumi2 = 0; + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + q8b = ggml_vld1q_s8_x4(q8); q8 += 64; + q2s.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[0] | ((qh[ib32+0] << 8) & 0x300)))), + vld1_s8((const int8_t *)(iq2s_grid + (qs[1] | ((qh[ib32+0] << 6) & 0x300))))); + q2s.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[2] | ((qh[ib32+0] << 4) & 0x300)))), + vld1_s8((const int8_t *)(iq2s_grid + (qs[3] | ((qh[ib32+0] << 2) & 0x300))))); + q2s.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[4] | ((qh[ib32+1] << 8) & 0x300)))), + vld1_s8((const int8_t *)(iq2s_grid + (qs[5] | ((qh[ib32+1] << 6) & 0x300))))); + q2s.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[6] | ((qh[ib32+1] << 4) & 0x300)))), + vld1_s8((const int8_t *)(iq2s_grid + (qs[7] | ((qh[ib32+1] << 2) & 0x300))))); + qs += 8; + + vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | ((uint32_t) signs[1] << 16))); + vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); + vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); + vs.val[0] = vceqq_u8(vs.val[0], mask2); + vs.val[1] = vceqq_u8(vs.val[1], mask2); + + q2s.val[0] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[0], m1)), q2s.val[0]); + q2s.val[1] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[1], m1)), q2s.val[1]); + + vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | ((uint32_t) signs[3] << 16))); + vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); + vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); + vs.val[0] = vceqq_u8(vs.val[0], mask2); + vs.val[1] = vceqq_u8(vs.val[1], mask2); + + signs += 4; + + q2s.val[2] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[0], m1)), q2s.val[2]); + q2s.val[3] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[1], m1)), q2s.val[3]); + + const int32x4_t p1 = ggml_vdotq_s32(vzero, q2s.val[0], q8b.val[0]); + const int32x4_t p2 = ggml_vdotq_s32(vzero, q2s.val[1], q8b.val[1]); + const int32x4_t p3 = ggml_vdotq_s32(vzero, q2s.val[2], q8b.val[2]); + const int32x4_t p4 = ggml_vdotq_s32(vzero, q2s.val[3], q8b.val[3]); + + sumi1 += vaddvq_s32(p1) * (1 + 2*(x[i].scales[ib32+0] & 0xf)); + sumi2 += vaddvq_s32(p2) * (1 + 2*(x[i].scales[ib32+0] >> 4)); + sumi1 += vaddvq_s32(p3) * (1 + 2*(x[i].scales[ib32+1] & 0xf)); + sumi2 += vaddvq_s32(p4) * (1 + 2*(x[i].scales[ib32+1] >> 4)); + } + sumf += d*(sumi1 + sumi2); + } + + *s = 0.125f * sumf; + +#else + + float sumf = 0; + for (int i = 0; i < nb; i++) { + + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint8_t * qh = x[i].qh; + const uint8_t * signs = qs + QK_K/8; + + int bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf); + int ls2 = 1 + 2*(x[i].scales[ib32] >> 4); + int sumi1 = 0, sumi2 = 0; + for (int l = 0; l < 2; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); + for (int j = 0; j < 8; ++j) { + sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + for (int l = 2; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); + for (int j = 0; j < 8; ++j) { + sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += ls1 * sumi1 + ls2 * sumi2; + qs += 4; + signs += 4; + } + + sumf += d * bsum; + } + + *s = 0.125f * sumf; + +#endif + +} + +void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq3_xxs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__ARM_NEON) + + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + uint32_t aux32[2]; + + ggml_int8x16x4_t q3s; + ggml_int8x16x4_t q8b; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + float sumf1 = 0, sumf2 = 0; + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + q8b = ggml_vld1q_s8_x4(q8); q8 += 64; + memcpy(aux32, gas, 2*sizeof(uint32_t)); gas += 2*sizeof(uint32_t); + const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]); + const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]); + const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]); + const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]); + q3 += 16; + q3s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 7) & 127)))); + q3s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 21) & 127)))); + q3s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127)))); + q3s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127)))); + q3s.val[0] = vmulq_s8(q3s.val[0], vreinterpretq_s8_u32(aux32x4_0)); + q3s.val[1] = vmulq_s8(q3s.val[1], vreinterpretq_s8_u32(aux32x4_1)); + q3s.val[2] = vmulq_s8(q3s.val[2], vreinterpretq_s8_u32(aux32x4_2)); + q3s.val[3] = vmulq_s8(q3s.val[3], vreinterpretq_s8_u32(aux32x4_3)); + const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]); + const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]); + sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[0] >> 28)); + sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[1] >> 28)); + } + sumf += d*(sumf1 + sumf2); + } + *s = 0.5f * sumf; + +#else + + uint32_t aux32; + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t); + const uint32_t ls = 2*(aux32 >> 28) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]); + const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]); + const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127]; + for (int j = 0; j < 4; ++j) { + sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1); + sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1); + } + q8 += 8; + } + q3 += 8; + bsum += sumi * ls; + } + sumf += d * bsum; + } + *s = 0.25f * sumf; +#endif +} + +void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq3_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__ARM_NEON) + + typedef union { + uint16x8_t vec_index; + uint16_t index[8]; + } vec_index_t; + + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; + + static const int16_t k_shift[8] = {8, 7, 6, 5, 4, 3, 2, 1}; + + const ggml_uint8x16x2_t mask1 = ggml_vld1q_u8_x2(k_mask1); + const uint8x16_t mask2 = vld1q_u8(k_mask2); + + const int16x8_t hshift = vld1q_s16(k_shift); + const uint16x8_t m256 = vdupq_n_u16(256); + const uint8x16_t m1 = vdupq_n_u8(1); + + uint8x16x2_t vs; + ggml_int8x16x4_t q3s; + ggml_int8x16x4_t q8b; + vec_index_t idx; + + uint32_t scales32[2]; + const uint8_t * scales8 = (const uint8_t *)scales32; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + memcpy(scales32, x[i].scales, 4); + scales32[1] = (((scales32[0] >> 4) & 0x0f0f0f0f) << 1) | 0x01010101; + scales32[0] = ((scales32[0] & 0x0f0f0f0f) << 1) | 0x01010101; + + int sumi1 = 0, sumi2 = 0; + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + q8b = ggml_vld1q_s8_x4(q8); q8 += 64; + + const uint8x16_t idx_l = vld1q_u8(qs); qs += 16; + idx.vec_index = vorrq_u16(vmovl_u8(vget_low_u8 (idx_l)), vandq_u16(vshlq_u16(vdupq_n_u16(qh[ib32+0]), hshift), m256)); + const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3s_grid[idx.index[0]], iq3s_grid[idx.index[1]], + iq3s_grid[idx.index[2]], iq3s_grid[idx.index[3]]); + const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3s_grid[idx.index[4]], iq3s_grid[idx.index[5]], + iq3s_grid[idx.index[6]], iq3s_grid[idx.index[7]]); + idx.vec_index = vorrq_u16(vmovl_u8(vget_high_u8(idx_l)), vandq_u16(vshlq_u16(vdupq_n_u16(qh[ib32+1]), hshift), m256)); + const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3s_grid[idx.index[0]], iq3s_grid[idx.index[1]], + iq3s_grid[idx.index[2]], iq3s_grid[idx.index[3]]); + const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3s_grid[idx.index[4]], iq3s_grid[idx.index[5]], + iq3s_grid[idx.index[6]], iq3s_grid[idx.index[7]]); + + + vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | ((uint32_t) signs[1] << 16))); + vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); + vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); + vs.val[0] = vorrq_u8(vceqq_u8(vs.val[0], mask2), m1); + vs.val[1] = vorrq_u8(vceqq_u8(vs.val[1], mask2), m1); + + q3s.val[0] = vmulq_s8(vreinterpretq_s8_u8(vs.val[0]), vreinterpretq_s8_u32(aux32x4_0)); + q3s.val[1] = vmulq_s8(vreinterpretq_s8_u8(vs.val[1]), vreinterpretq_s8_u32(aux32x4_1)); + + vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | ((uint32_t) signs[3] << 16))); + vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); + vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); + vs.val[0] = vorrq_u8(vceqq_u8(vs.val[0], mask2), m1); + vs.val[1] = vorrq_u8(vceqq_u8(vs.val[1], mask2), m1); + + signs += 4; + + q3s.val[2] = vmulq_s8(vreinterpretq_s8_u8(vs.val[0]), vreinterpretq_s8_u32(aux32x4_2)); + q3s.val[3] = vmulq_s8(vreinterpretq_s8_u8(vs.val[1]), vreinterpretq_s8_u32(aux32x4_3)); + + const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]); + const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]); + + sumi1 += vaddvq_s32(p1) * scales8[ib32/2+0]; + sumi2 += vaddvq_s32(p2) * scales8[ib32/2+4]; + } + sumf += d*(sumi1 + sumi2); + } + *s = sumf; + +#else + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint8_t * GGML_RESTRICT signs = x[i].signs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1; + const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256))); + const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256))); + for (int j = 0; j < 4; ++j) { + sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); + sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); + } + q8 += 8; + } + qs += 8; + signs += 4; + bsum += sumi * ls1; + sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256))); + const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256))); + for (int j = 0; j < 4; ++j) { + sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); + sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); + } + q8 += 8; + } + qs += 8; + signs += 4; + bsum += sumi * ls2; + } + sumf += d * bsum; + } + *s = sumf; +#endif +} + +void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq1_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __ARM_NEON + + ggml_int8x16x4_t q1b; + ggml_int8x16x4_t q8b; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint16_t * qh = x[i].qh; + + int sumi1 = 0, sumi2 = 0, sumi3 = 0; + + for (int ib = 0; ib < QK_K/32; ib += 2) { + + q1b.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[0] | ((qh[ib+0] << 8) & 0x700)))), + vld1_s8((const int8_t *)(iq1s_grid + (qs[1] | ((qh[ib+0] << 5) & 0x700))))); + q1b.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[2] | ((qh[ib+0] << 2) & 0x700)))), + vld1_s8((const int8_t *)(iq1s_grid + (qs[3] | ((qh[ib+0] >> 1) & 0x700))))); + q1b.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[4] | ((qh[ib+1] << 8) & 0x700)))), + vld1_s8((const int8_t *)(iq1s_grid + (qs[5] | ((qh[ib+1] << 5) & 0x700))))); + q1b.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[6] | ((qh[ib+1] << 2) & 0x700)))), + vld1_s8((const int8_t *)(iq1s_grid + (qs[7] | ((qh[ib+1] >> 1) & 0x700))))); + qs += 8; + + q8b = ggml_vld1q_s8_x4(q8); q8 += 64; + + const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q1b.val[0], q8b.val[0]), q1b.val[1], q8b.val[1]); + const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q1b.val[2], q8b.val[2]), q1b.val[3], q8b.val[3]); + + const int ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; + const int ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; + sumi1 += vaddvq_s32(p1) * ls1; + sumi2 += vaddvq_s32(p2) * ls2; + sumi3 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * ls1 * (qh[ib+0] & 0x8000 ? -1 : 1) + + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * ls2 * (qh[ib+1] & 0x8000 ? -1 : 1); + + } + + sumf += y[i].d * GGML_FP16_TO_FP32(x[i].d) * (sumi1 + sumi2 + IQ1S_DELTA * sumi3); + } + + *s = sumf; + +#else + + float sumf = 0; + for (int i = 0; i < nb; i++) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint16_t * qh = x[i].qh; + + int sumi = 0, sumi1 = 0; + for (int ib = 0; ib < QK_K/32; ++ib) { + const int ls = 2*((qh[ib] >> 12) & 7) + 1; + const int delta = qh[ib] & 0x8000 ? -1 : 1; + int lsum = 0; + for (int l = 0; l < 4; ++l) { + const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8))); + for (int j = 0; j < 8; ++j) { + lsum += q8[j] * grid[j]; + } + q8 += 8; + } + sumi += ls * lsum; + sumi1 += ls * delta * (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]); + qs += 4; + } + + sumf += GGML_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); + } + + *s = sumf; + +#endif +} + +void ggml_vec_dot_iq1_m_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq1_m * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + iq1m_scale_t scale; + +#if defined __ARM_NEON + const int32x4_t mask = vdupq_n_s32(0x7); + const int32x4_t mone = vdupq_n_s32(1); + const int32x4_t mzero = vdupq_n_s32(0); + + ggml_int8x16x4_t deltas; + deltas.val[0] = vcombine_s8(vdup_n_s8(+1), vdup_n_s8(+1)); + deltas.val[1] = vcombine_s8(vdup_n_s8(-1), vdup_n_s8(+1)); + deltas.val[2] = vcombine_s8(vdup_n_s8(+1), vdup_n_s8(-1)); + deltas.val[3] = vcombine_s8(vdup_n_s8(-1), vdup_n_s8(-1)); + + ggml_int8x16x4_t q1b; + ggml_int8x16x4_t q8b; + + uint32_t aux32; + const uint8_t * aux8 = (const uint8_t *)&aux32; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint8_t * qh = x[i].qh; + const uint16_t * sc = (const uint16_t *)x[i].scales; + + scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); + + int32x4_t sumi1 = mzero; + int32x4_t sumi2 = mzero; + + for (int ib = 0; ib < QK_K/32; ib += 2) { + + q1b.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[0] | ((qh[0] << 8) & 0x700)))), + vld1_s8((const int8_t *)(iq1s_grid + (qs[1] | ((qh[0] << 4) & 0x700))))); + q1b.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[2] | ((qh[1] << 8) & 0x700)))), + vld1_s8((const int8_t *)(iq1s_grid + (qs[3] | ((qh[1] << 4) & 0x700))))); + q1b.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[4] | ((qh[2] << 8) & 0x700)))), + vld1_s8((const int8_t *)(iq1s_grid + (qs[5] | ((qh[2] << 4) & 0x700))))); + q1b.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[6] | ((qh[3] << 8) & 0x700)))), + vld1_s8((const int8_t *)(iq1s_grid + (qs[7] | ((qh[3] << 4) & 0x700))))); + + q8b = ggml_vld1q_s8_x4(q8); q8 += 64; + + const int32x4_t p1 = vpaddq_s32(ggml_vdotq_s32(mzero, q1b.val[0], q8b.val[0]), ggml_vdotq_s32(mzero, q1b.val[1], q8b.val[1])); + const int32x4_t p2 = vpaddq_s32(ggml_vdotq_s32(mzero, q1b.val[2], q8b.val[2]), ggml_vdotq_s32(mzero, q1b.val[3], q8b.val[3])); + const int32x4_t p12 = vpaddq_s32(p1, p2); + + const uint32_t * qh32 = (const uint32_t *)qh; // we are 4-byte aligned, so we can do that + aux32 = ((qh32[0] >> 3) & 0x01010101) | ((qh32[0] >> 6) & 0x02020202); + + const int32x4_t p3 = vpaddq_s32(ggml_vdotq_s32(mzero, deltas.val[aux8[0]], q8b.val[0]), ggml_vdotq_s32(mzero, deltas.val[aux8[1]], q8b.val[1])); + const int32x4_t p4 = vpaddq_s32(ggml_vdotq_s32(mzero, deltas.val[aux8[2]], q8b.val[2]), ggml_vdotq_s32(mzero, deltas.val[aux8[3]], q8b.val[3])); + const int32x4_t p34 = vpaddq_s32(p3, p4); + + int32x4_t scales_4 = ggml_vld1q_u32(sc[ib/2] >> 0, sc[ib/2] >> 3, sc[ib/2] >> 6, sc[ib/2] >> 9); + + scales_4 = vaddq_s32(vshlq_n_s32(vandq_s32(scales_4, mask), 1), mone); + + sumi1 = vmlaq_s32(sumi1, scales_4, p12); + sumi2 = vmlaq_s32(sumi2, scales_4, p34); + + qs += 8; qh += 4; + + } + + sumf += y[i].d * GGML_FP16_TO_FP32(scale.f16) * (vaddvq_s32(sumi1) + IQ1M_DELTA * vaddvq_s32(sumi2)); + } + + *s = sumf; + +#else + + int sum1[2], sum2[2], delta[4]; + + float sumf = 0; + for (int i = 0; i < nb; i++) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint8_t * qh = x[i].qh; + const uint16_t * sc = (const uint16_t *)x[i].scales; + + scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); + + int sumi1 = 0, sumi2 = 0; + for (int ib = 0; ib < QK_K/32; ++ib) { + delta[0] = qh[0] & 0x08 ? -1 : 1; + delta[1] = qh[0] & 0x80 ? -1 : 1; + delta[2] = qh[1] & 0x08 ? -1 : 1; + delta[3] = qh[1] & 0x80 ? -1 : 1; + sum1[0] = sum1[1] = sum2[0] = sum2[1] = 0; + for (int l = 0; l < 4; ++l) { + const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((uint16_t)qh[l/2] << (8 - 4*(l%2))) & 0x700))); + int lsum1 = 0, lsum2 = 0; + for (int j = 0; j < 8; ++j) { + lsum1 += q8[j] * grid[j]; + lsum2 += q8[j]; + } + q8 += 8; + sum1[l/2] += lsum1; + sum2[l/2] += lsum2*delta[l]; + } + + const int ls1 = 2*((sc[ib/2] >> (6*(ib%2)+0)) & 0x7) + 1; + const int ls2 = 2*((sc[ib/2] >> (6*(ib%2)+3)) & 0x7) + 1; + + sumi1 += sum1[0] * ls1 + sum1[1] * ls2; + sumi2 += sum2[0] * ls1 + sum2[1] * ls2; + qs += 4; + qh += 2; + } + + sumf += GGML_FP16_TO_FP32(scale.f16) * y[i].d * (sumi1 + IQ1M_DELTA * sumi2); + } + + *s = sumf; + +#endif +} + +void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK4_NL == 0); + static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); + + const block_iq4_nl * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + const int nb = n / QK4_NL; + + int ib = 0; + float sumf = 0; + +#if defined __ARM_NEON + const int8x16_t values = vld1q_s8(kvalues_iq4nl); + const uint8x16_t m4b = vdupq_n_u8(0x0f); + uint8x16x2_t q4bits; + int8x16x4_t q4b; + int8x16x4_t q8b; + int32x4_t prod_1, prod_2; + + for (; ib + 1 < nb; ib += 2) { + + q4bits.val[0] = vld1q_u8(x[ib + 0].qs); + q4bits.val[1] = vld1q_u8(x[ib + 1].qs); + q8b.val[0] = vld1q_s8(y[ib + 0].qs); + q8b.val[1] = vld1q_s8(y[ib + 0].qs + 16); + q8b.val[2] = vld1q_s8(y[ib + 1].qs); + q8b.val[3] = vld1q_s8(y[ib + 1].qs + 16); + + q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b)); + q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4)); + q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b)); + q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4)); + + prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]); + prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]); + + sumf += + GGML_FP16_TO_FP32(x[ib+0].d) * GGML_FP16_TO_FP32(y[ib + 0].d) * vaddvq_s32(prod_1) + + GGML_FP16_TO_FP32(x[ib+1].d) * GGML_FP16_TO_FP32(y[ib + 1].d) * vaddvq_s32(prod_2); + } + +#endif + for (; ib < nb; ++ib) { + const float d = GGML_FP16_TO_FP32(y[ib].d)*GGML_FP16_TO_FP32(x[ib].d); + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < QK4_NL/2; ++j) { + sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; + sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4]; + } + sumf += d * (sumi1 + sumi2); + } + *s = sumf; +} + +void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK_K == 0); + + const block_iq4_xs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __ARM_NEON + const int8x16_t values = vld1q_s8(kvalues_iq4nl); + const uint8x16_t m4b = vdupq_n_u8(0x0f); + ggml_uint8x16x2_t q4bits; + ggml_int8x16x4_t q4b; + ggml_int8x16x4_t q8b; + int32x4_t prod_1, prod_2; + + float sumf = 0; + + for (int ibl = 0; ibl < nb; ++ibl) { + + const int8_t * q8 = y[ibl].qs; + const uint8_t * q4 = x[ibl].qs; + uint16_t h = x[ibl].scales_h; + + int sumi1 = 0, sumi2 = 0; + for (int ib = 0; ib < QK_K/64; ++ib) { + + q4bits = ggml_vld1q_u8_x2(q4); q4 += 32; + q8b = ggml_vld1q_s8_x4(q8); q8 += 64; + + q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b)); + q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4)); + q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b)); + q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4)); + + prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]); + prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]); + + int ls1 = ((x[ibl].scales_l[ib] & 0xf) | ((h << 4) & 0x30)) - 32; + int ls2 = ((x[ibl].scales_l[ib] >> 4) | ((h << 2) & 0x30)) - 32; + h >>= 4; + sumi1 += vaddvq_s32(prod_1) * ls1; + sumi2 += vaddvq_s32(prod_2) * ls2; + + } + + sumf += GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d * (sumi1 + sumi2); + } + + *s = sumf; + +#else + float sumf = 0; + for (int ibl = 0; ibl < nb; ++ibl) { + const float d4d8 = GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d; + uint16_t h = x[ibl].scales_h; + const uint8_t * qs = x[ibl].qs; + const int8_t * q8 = y[ibl].qs; + for (int ib = 0; ib < QK_K/32; ib += 2) { + const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30); + const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30); + h >>= 4; + const float d1 = d4d8*(ls1 - 32); + const float d2 = d4d8*(ls2 - 32); + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < 16; ++j) { + sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; + sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; + } + sumf += d1 * (sumi1 + sumi2); + qs += 16; + q8 += 32; + sumi1 = sumi2 = 0; + for (int j = 0; j < 16; ++j) { + sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; + sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; + } + sumf += d2 * (sumi1 + sumi2); + qs += 16; + q8 += 32; + } + } + *s = sumf; +#endif +} + diff --git a/ggml/src/ggml-cpu/arch/arm/repack.cpp b/ggml/src/ggml-cpu/arch/arm/repack.cpp new file mode 100644 index 0000000000000..9337e01b62390 --- /dev/null +++ b/ggml/src/ggml-cpu/arch/arm/repack.cpp @@ -0,0 +1,2174 @@ +#define GGML_COMMON_IMPL_CPP +#define GGML_COMMON_DECL_CPP +#include "ggml-common.h" +#include "ggml-backend-impl.h" + +#include "ggml-impl.h" +#include "ggml-cpu.h" +#include "ggml-cpu-impl.h" +#include "traits.h" + +#include +#include +#include +#include // for qsort +#include // for GGML_ASSERT + +#define GGML_CPU_CLANG_WORKAROUND +#include "../../repack.h" + +#if defined(__GNUC__) +#pragma GCC diagnostic ignored "-Woverlength-strings" +#endif + +#define UNUSED GGML_UNUSED + +void ggml_quantize_mat_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; + +#if defined(__ARM_NEON) + float32x4_t srcv[4][8]; + float id[4]; + + for (int i = 0; i < nb; i++) { + float32x4_t asrcv[8]; + float32x4_t amaxv[8]; + + for (int row_iter = 0; row_iter < 4; row_iter++) { + for (int j = 0; j < 8; j++) srcv[row_iter][j] = vld1q_f32(x + row_iter * k + i * 32 + 4 * j); + for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[row_iter][j]); + + for (int j = 0; j < 4; j++) amaxv[2 * j] = vmaxq_f32(asrcv[2 * j], asrcv[2 * j + 1]); + for (int j = 0; j < 2; j++) amaxv[4 * j] = vmaxq_f32(amaxv[4 * j], amaxv[4 * j + 2]); + for (int j = 0; j < 1; j++) amaxv[8 * j] = vmaxq_f32(amaxv[8 * j], amaxv[8 * j + 4]); + + const float amax = vmaxvq_f32(amaxv[0]); + + const float d = amax / ((1 << 7) - 1); + id[row_iter] = d ? 1.0f / d : 0.0f; + + y[i].d[row_iter] = GGML_FP32_TO_FP16(d); + } + + for (int j = 0; j < 8; j++) { + float32x4_t v = vmulq_n_f32(srcv[0][j], id[0]); + int32x4_t vi = vcvtnq_s32_f32(v); + y[i].qs[16 * j + 0] = vgetq_lane_s32(vi, 0); + y[i].qs[16 * j + 1] = vgetq_lane_s32(vi, 1); + y[i].qs[16 * j + 2] = vgetq_lane_s32(vi, 2); + y[i].qs[16 * j + 3] = vgetq_lane_s32(vi, 3); + + v = vmulq_n_f32(srcv[1][j], id[1]); + vi = vcvtnq_s32_f32(v); + y[i].qs[16 * j + 4] = vgetq_lane_s32(vi, 0); + y[i].qs[16 * j + 5] = vgetq_lane_s32(vi, 1); + y[i].qs[16 * j + 6] = vgetq_lane_s32(vi, 2); + y[i].qs[16 * j + 7] = vgetq_lane_s32(vi, 3); + + v = vmulq_n_f32(srcv[2][j], id[2]); + vi = vcvtnq_s32_f32(v); + y[i].qs[16 * j + 8] = vgetq_lane_s32(vi, 0); + y[i].qs[16 * j + 9] = vgetq_lane_s32(vi, 1); + y[i].qs[16 * j + 10] = vgetq_lane_s32(vi, 2); + y[i].qs[16 * j + 11] = vgetq_lane_s32(vi, 3); + + v = vmulq_n_f32(srcv[3][j], id[3]); + vi = vcvtnq_s32_f32(v); + y[i].qs[16 * j + 12] = vgetq_lane_s32(vi, 0); + y[i].qs[16 * j + 13] = vgetq_lane_s32(vi, 1); + y[i].qs[16 * j + 14] = vgetq_lane_s32(vi, 2); + y[i].qs[16 * j + 15] = vgetq_lane_s32(vi, 3); + } + } +#else + // scalar + const int blck_size_interleave = 4; + float srcv[4][QK8_0]; + float id[4]; + + for (int i = 0; i < nb; i++) { + for (int row_iter = 0; row_iter < 4; row_iter++) { + float amax = 0.0f; // absolute max + + for (int j = 0; j < QK8_0; j++) { + srcv[row_iter][j] = x[row_iter * k + i * QK8_0 + j]; + amax = MAX(amax, fabsf(srcv[row_iter][j])); + } + + const float d = amax / ((1 << 7) - 1); + id[row_iter] = d ? 1.0f / d : 0.0f; + + y[i].d[row_iter] = GGML_FP32_TO_FP16(d); + } + + for (int j = 0; j < QK8_0 * 4; j++) { + int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave; + int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave; + src_offset += (j % blck_size_interleave); + + float x0 = srcv[src_id][src_offset] * id[src_id]; + y[i].qs[j] = roundf(x0); + } + } +#endif +} + +void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; + +#if defined(__ARM_NEON) + float32x4_t srcv[4][8]; + float id[4]; + + for (int i = 0; i < nb; i++) { + float32x4_t asrcv[8]; + float32x4_t amaxv[8]; + + for (int row_iter = 0; row_iter < 4; row_iter++) { + for (int j = 0; j < 8; j++) srcv[row_iter][j] = vld1q_f32(x + row_iter * k + i * 32 + 4 * j); + for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[row_iter][j]); + + for (int j = 0; j < 4; j++) amaxv[2 * j] = vmaxq_f32(asrcv[2 * j], asrcv[2 * j + 1]); + for (int j = 0; j < 2; j++) amaxv[4 * j] = vmaxq_f32(amaxv[4 * j], amaxv[4 * j + 2]); + for (int j = 0; j < 1; j++) amaxv[8 * j] = vmaxq_f32(amaxv[8 * j], amaxv[8 * j + 4]); + + const float amax = vmaxvq_f32(amaxv[0]); + + const float d = amax / ((1 << 7) - 1); + id[row_iter] = d ? 1.0f / d : 0.0f; + + y[i].d[row_iter] = GGML_FP32_TO_FP16(d); + } + + for (int j = 0; j < 4; j++) { + float32x4_t v = vmulq_n_f32(srcv[0][2 * j], id[0]); + int32x4_t vi = vcvtnq_s32_f32(v); + y[i].qs[32 * j + 0] = vgetq_lane_s32(vi, 0); + y[i].qs[32 * j + 1] = vgetq_lane_s32(vi, 1); + y[i].qs[32 * j + 2] = vgetq_lane_s32(vi, 2); + y[i].qs[32 * j + 3] = vgetq_lane_s32(vi, 3); + v = vmulq_n_f32(srcv[0][2 * j + 1], id[0]); + vi = vcvtnq_s32_f32(v); + y[i].qs[32 * j + 4] = vgetq_lane_s32(vi, 0); + y[i].qs[32 * j + 5] = vgetq_lane_s32(vi, 1); + y[i].qs[32 * j + 6] = vgetq_lane_s32(vi, 2); + y[i].qs[32 * j + 7] = vgetq_lane_s32(vi, 3); + + v = vmulq_n_f32(srcv[1][2 * j], id[1]); + vi = vcvtnq_s32_f32(v); + y[i].qs[32 * j + 8] = vgetq_lane_s32(vi, 0); + y[i].qs[32 * j + 9] = vgetq_lane_s32(vi, 1); + y[i].qs[32 * j + 10] = vgetq_lane_s32(vi, 2); + y[i].qs[32 * j + 11] = vgetq_lane_s32(vi, 3); + v = vmulq_n_f32(srcv[1][2 * j + 1], id[1]); + vi = vcvtnq_s32_f32(v); + y[i].qs[32 * j + 12] = vgetq_lane_s32(vi, 0); + y[i].qs[32 * j + 13] = vgetq_lane_s32(vi, 1); + y[i].qs[32 * j + 14] = vgetq_lane_s32(vi, 2); + y[i].qs[32 * j + 15] = vgetq_lane_s32(vi, 3); + + v = vmulq_n_f32(srcv[2][2 * j], id[2]); + vi = vcvtnq_s32_f32(v); + y[i].qs[32 * j + 16] = vgetq_lane_s32(vi, 0); + y[i].qs[32 * j + 17] = vgetq_lane_s32(vi, 1); + y[i].qs[32 * j + 18] = vgetq_lane_s32(vi, 2); + y[i].qs[32 * j + 19] = vgetq_lane_s32(vi, 3); + v = vmulq_n_f32(srcv[2][2 * j + 1], id[2]); + vi = vcvtnq_s32_f32(v); + y[i].qs[32 * j + 20] = vgetq_lane_s32(vi, 0); + y[i].qs[32 * j + 21] = vgetq_lane_s32(vi, 1); + y[i].qs[32 * j + 22] = vgetq_lane_s32(vi, 2); + y[i].qs[32 * j + 23] = vgetq_lane_s32(vi, 3); + + v = vmulq_n_f32(srcv[3][2 * j], id[3]); + vi = vcvtnq_s32_f32(v); + y[i].qs[32 * j + 24] = vgetq_lane_s32(vi, 0); + y[i].qs[32 * j + 25] = vgetq_lane_s32(vi, 1); + y[i].qs[32 * j + 26] = vgetq_lane_s32(vi, 2); + y[i].qs[32 * j + 27] = vgetq_lane_s32(vi, 3); + v = vmulq_n_f32(srcv[3][2 * j + 1], id[3]); + vi = vcvtnq_s32_f32(v); + y[i].qs[32 * j + 28] = vgetq_lane_s32(vi, 0); + y[i].qs[32 * j + 29] = vgetq_lane_s32(vi, 1); + y[i].qs[32 * j + 30] = vgetq_lane_s32(vi, 2); + y[i].qs[32 * j + 31] = vgetq_lane_s32(vi, 3); + } + } + +#else + // scalar + const int blck_size_interleave = 8; + float srcv[4][QK8_0]; + float id[4]; + + for (int i = 0; i < nb; i++) { + for (int row_iter = 0; row_iter < 4; row_iter++) { + float amax = 0.0f; // absolute max + + for (int j = 0; j < QK8_0; j++) { + srcv[row_iter][j] = x[row_iter * k + i * QK8_0 + j]; + amax = MAX(amax, fabsf(srcv[row_iter][j])); + } + + const float d = amax / ((1 << 7) - 1); + id[row_iter] = d ? 1.0f / d : 0.0f; + + y[i].d[row_iter] = GGML_FP32_TO_FP16(d); + } + + for (int j = 0; j < QK8_0 * 4; j++) { + int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave; + int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave; + src_offset += (j % blck_size_interleave); + + float x0 = srcv[src_id][src_offset] * id[src_id]; + y[i].qs[j] = roundf(x0); + } + } +#endif +} + +void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 4; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) + if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx; + + for (int c = 0; c < nc; c += ncols_interleaved) { + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + float32x4_t acc = vdupq_n_f32(0); + for (int b = 0; b < nb; b++) { + int8x16_t b0 = vld1q_s8((const int8_t *) b_ptr->qs); + int8x16_t b1 = vld1q_s8((const int8_t *) b_ptr->qs + 16); + int8x16_t b2 = vld1q_s8((const int8_t *) b_ptr->qs + 32); + int8x16_t b3 = vld1q_s8((const int8_t *) b_ptr->qs + 48); + float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d); + + int8x16_t a0 = vld1q_s8(a_ptr->qs); + int8x16_t a1 = vld1q_s8(a_ptr->qs + qk/2); + float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d); + + int32x4_t ret = vdupq_n_s32(0); + + ret = vdotq_laneq_s32(ret, b0 << 4, a0, 0); + ret = vdotq_laneq_s32(ret, b1 << 4, a0, 1); + ret = vdotq_laneq_s32(ret, b2 << 4, a0, 2); + ret = vdotq_laneq_s32(ret, b3 << 4, a0, 3); + + ret = vdotq_laneq_s32(ret, b0 & 0xf0U, a1, 0); + ret = vdotq_laneq_s32(ret, b1 & 0xf0U, a1, 1); + ret = vdotq_laneq_s32(ret, b2 & 0xf0U, a1, 2); + ret = vdotq_laneq_s32(ret, b3 & 0xf0U, a1, 3); + + acc = vfmaq_f32(acc, vcvtq_n_f32_s32(ret, 4), + vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd))); + a_ptr++; + b_ptr++; + } + vst1q_f32(s, acc); + s += ncols_interleaved; + } + return; + } +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) + float sumf[4]; + int sumi; + + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); + + for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; + } + sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + } + } + } + for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; + } +} + +void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) + if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx; + + for (int c = 0; c < nc; c += ncols_interleaved) { + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + float32x4_t acc = vdupq_n_f32(0); + for (int b = 0; b < nb; b++) { + int8x16_t b0 = vld1q_s8((const int8_t *) b_ptr->qs); + int8x16_t b1 = vld1q_s8((const int8_t *) b_ptr->qs + 16); + int8x16_t b2 = vld1q_s8((const int8_t *) b_ptr->qs + 32); + int8x16_t b3 = vld1q_s8((const int8_t *) b_ptr->qs + 48); + float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d); + + int8x16_t a0 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs); + int8x16_t a1 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 1); + int8x16_t a2 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 2); + int8x16_t a3 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 3); + float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d); + + int32x4_t ret0 = vdupq_n_s32(0); + int32x4_t ret1 = vdupq_n_s32(0); + + ret0 = vdotq_s32(ret0, b0 << 4, a0); + ret1 = vdotq_s32(ret1, b1 << 4, a0); + ret0 = vdotq_s32(ret0, b2 << 4, a1); + ret1 = vdotq_s32(ret1, b3 << 4, a1); + + ret0 = vdotq_s32(ret0, b0 & 0xf0U, a2); + ret1 = vdotq_s32(ret1, b1 & 0xf0U, a2); + ret0 = vdotq_s32(ret0, b2 & 0xf0U, a3); + ret1 = vdotq_s32(ret1, b3 & 0xf0U, a3); + + int32x4_t ret = vpaddq_s32(ret0, ret1); + + acc = vfmaq_f32(acc, vcvtq_n_f32_s32(ret, 4), + vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd))); + a_ptr++; + b_ptr++; + } + vst1q_f32(s, acc); + s += ncols_interleaved; + } + return; + } +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) + float sumf[4]; + int sumi; + + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); + + for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; + } + sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + } + } + } + for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; + } +} + +void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) +#if defined(__ARM_FEATURE_SVE) + if (ggml_cpu_has_sve() && ggml_cpu_get_sve_cnt() == QK8_0) { + const void * b_ptr = vx; + const void * a_ptr = vy; + float * res_ptr = s; + + __asm__ __volatile__( + "ptrue p0.b\n" + "add %x[b_ptr], %x[b_ptr], #0x10\n" + "1:" // Column loop + "add x22, %x[a_ptr], #0x2\n" + "mov z31.b, #0x0\n" + "mov x21, %x[nb]\n" + "2:" // Block loop + "ld1b { z30.b }, p0/Z, [%x[b_ptr]]\n" + "ld1b { z29.b }, p0/Z, [%x[b_ptr], #1, MUL VL]\n" + "mov z28.s, #0x0\n" + "mov z27.s, #0x0\n" + "ld1rd { z26.d }, p0/Z, [x22]\n" + "ld1b { z25.b }, p0/Z, [%x[b_ptr], #2, MUL VL]\n" + "sub x20, x22, #0x2\n" + "sub x21, x21, #0x1\n" + "ld1b { z24.b }, p0/Z, [%x[b_ptr], #3, MUL VL]\n" + "ld1rd { z23.d }, p0/Z, [x22, #8]\n" + "lsl z22.b, z30.b, #0x4\n" + "lsl z16.b, z29.b, #0x4\n" + "and z30.b, z30.b, #0xf0\n" + "and z29.b, z29.b, #0xf0\n" + "ld1rd { z21.d }, p0/Z, [x22, #16]\n" + "ld1rd { z20.d }, p0/Z, [x22, #24]\n" + "lsl z19.b, z25.b, #0x4\n" + "and z25.b, z25.b, #0xf0\n" + "ld1rh { z17.h }, p0/Z, [x20]\n" + "ld1h { z18.s }, p0/Z, [%x[b_ptr], #-1, MUL VL]\n" + "sdot z28.s, z22.b, z26.b\n" + "sdot z27.s, z16.b, z26.b\n" + "lsl z16.b, z24.b, #0x4\n" + "add x22, x22, #0x22\n" + "and z24.b, z24.b, #0xf0\n" + "add %x[b_ptr], %x[b_ptr], #0x90\n" + "fcvt z17.s, p0/m, z17.h\n" + "fcvt z18.s, p0/m, z18.h\n" + "sdot z28.s, z19.b, z23.b\n" + "sdot z27.s, z16.b, z23.b\n" + "fmul z18.s, z18.s, z17.s\n" + "sdot z28.s, z30.b, z21.b\n" + "sdot z27.s, z29.b, z21.b\n" + "sdot z28.s, z25.b, z20.b\n" + "sdot z27.s, z24.b, z20.b\n" + "uzp1 z17.s, z28.s, z27.s\n" + "uzp2 z16.s, z28.s, z27.s\n" + "add z17.s, z17.s, z16.s\n" + "asr z17.s, z17.s, #0x4\n" + "scvtf z17.s, p0/m, z17.s\n" + "fmla z31.s, p0/M, z17.s, z18.s\n" + "cbnz x21, 2b\n" + "sub %x[nc], %x[nc], #0x8\n" + "st1w { z31.s }, p0, [%x[res_ptr]]\n" + "add %x[res_ptr], %x[res_ptr], #0x20\n" + "cbnz %x[nc], 1b\n" + : [b_ptr] "+&r" (b_ptr), [res_ptr] "+&r" (res_ptr), [nc] "+&r" (nc) + : [a_ptr] "r" (a_ptr), [nb] "r" (nb) + : "memory", "p0", "x20", "x21", "x22", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" + ); + return; + } +#endif // #if defined(__ARM_FEATURE_SVE) + +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) + { + float sumf[8]; + int sumi; + + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); + + for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; + } + sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + } + } + } + for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; + } + } +} + +void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 4; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) + if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { + const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl); + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + float * res_ptr = s; + + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); + + float32x4_t sumf = vdupq_n_f32(0); + for (int l = 0; l < nb; l++) { + uint8x16_t b_0 = vld1q_u8(b_ptr[l].qs + 0); + uint8x16_t b_1 = vld1q_u8(b_ptr[l].qs + 16); + uint8x16_t b_2 = vld1q_u8(b_ptr[l].qs + 32); + uint8x16_t b_3 = vld1q_u8(b_ptr[l].qs + 48); + + int8x16_t b_0_hi = vqtbl1q_s8(kvalues, b_0 >> 4); + int8x16_t b_0_lo = vqtbl1q_s8(kvalues, b_0 & 0x0F); + int8x16_t b_1_hi = vqtbl1q_s8(kvalues, b_1 >> 4); + int8x16_t b_1_lo = vqtbl1q_s8(kvalues, b_1 & 0x0F); + int8x16_t b_2_hi = vqtbl1q_s8(kvalues, b_2 >> 4); + int8x16_t b_2_lo = vqtbl1q_s8(kvalues, b_2 & 0x0F); + int8x16_t b_3_hi = vqtbl1q_s8(kvalues, b_3 >> 4); + int8x16_t b_3_lo = vqtbl1q_s8(kvalues, b_3 & 0x0F); + + int8x16_t a_0 = vld1q_s8(a_ptr[l].qs + 0); + int8x16_t a_1 = vld1q_s8(a_ptr[l].qs + 16); + + int32x4_t sumi = vdupq_n_s32(0); + sumi = vdotq_laneq_s32(sumi, b_0_lo, a_0, 0); + sumi = vdotq_laneq_s32(sumi, b_0_hi, a_1, 0); + sumi = vdotq_laneq_s32(sumi, b_1_lo, a_0, 1); + sumi = vdotq_laneq_s32(sumi, b_1_hi, a_1, 1); + sumi = vdotq_laneq_s32(sumi, b_2_lo, a_0, 2); + sumi = vdotq_laneq_s32(sumi, b_2_hi, a_1, 2); + sumi = vdotq_laneq_s32(sumi, b_3_lo, a_0, 3); + sumi = vdotq_laneq_s32(sumi, b_3_hi, a_1, 3); + + float32x4_t a_d = vcvt_f32_f16(vld1_dup_f16((const float16_t *)&a_ptr[l].d)); + float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *)b_ptr[l].d)); + float32x4_t d = a_d * b_d; + + sumf = vmlaq_f32(sumf, d, vcvtq_f32_s32(sumi)); + } + + vst1q_f32(res_ptr + x * 4, sumf); + } + return; + } +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) + { + float sumf[4]; + int sumi; + + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); + + for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0x0F]; + const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4]; + sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])); + } + sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + } + } + } + for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; + } + } +} + +void ggml_gemm_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 4; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) + if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { + const void * b_ptr = vx; + const void * a_ptr = vy; + float * res_ptr = s; + size_t res_stride = bs * sizeof(float); + + __asm__ __volatile__( + "mov x10, %x[nr]\n" + "mov x9, #0x88\n" + "cmp x10, #0x10\n" + "mul x9, %x[nb], x9\n" + "blt 4f\n" + "1:" // Row loop + "add x28, %x[b_ptr], #0x8\n" + "mov x27, %x[nc]\n" + "add x26, %x[res_ptr], %x[res_stride], LSL #4\n" + "2:" // Column loop + "add x25, %x[a_ptr], #0x8\n" + "movi v15.16b, #0x0\n" + "movi v19.16b, #0x0\n" + "mov x24, %x[nb]\n" + "add x23, x25, x9\n" + "movi v18.16b, #0x0\n" + "movi v14.16b, #0x0\n" + "add x22, x23, x9\n" + "movi v11.16b, #0x0\n" + "movi v13.16b, #0x0\n" + "add x21, x22, x9\n" + "movi v23.16b, #0x0\n" + "movi v16.16b, #0x0\n" + "movi v25.16b, #0x0\n" + "movi v7.16b, #0x0\n" + "movi v0.16b, #0x0\n" + "movi v4.16b, #0x0\n" + "movi v5.16b, #0x0\n" + "movi v21.16b, #0x0\n" + "movi v8.16b, #0x0\n" + "movi v1.16b, #0x0\n" + "3:" // Block loop + "ldr q3, [x28, #0x0]\n" + "ldr q31, [x25, #0x0]\n" + "movi v28.16b, #0x4\n" + "movi v10.4s, #0x0\n" + "ldr q22, [x28, #0x10]\n" + "ldr q6, [x25, #0x10]\n" + "movi v29.4s, #0x0\n" + "movi v9.4s, #0x0\n" + "ldr q27, [x28, #0x20]\n" + "ldr q30, [x28, #0x30]\n" + "movi v20.4s, #0x0\n" + "movi v24.16b, #0xf0\n" + "ldr d2, [x25, #-0x8]\n" + "ldr d26, [x23, #-0x8]\n" + "sshl v12.16b, v3.16b, v28.16b\n" + "sub x20, x28, #0x8\n" + "ldr d17, [x20, #0x0]\n" + "and v3.16b, v3.16b, v24.16b\n" + "subs x24, x24, #0x1\n" + "add x28, x28, #0x48\n" + ".inst 0x4f9fe18a // sdot v10.4s, v12.16b, v31.4b[0]\n" + ".inst 0x4fbfe19d // sdot v29.4s, v12.16b, v31.4b[1]\n" + ".inst 0x4f9fe989 // sdot v9.4s, v12.16b, v31.4b[2]\n" + ".inst 0x4fbfe994 // sdot v20.4s, v12.16b, v31.4b[3]\n" + "sshl v31.16b, v22.16b, v28.16b\n" + "and v22.16b, v22.16b, v24.16b\n" + "fcvtl v17.4s, v17.4h\n" + "fcvtl v2.4s, v2.4h\n" + "fcvtl v26.4s, v26.4h\n" + ".inst 0x4f86e3ea // sdot v10.4s, v31.16b, v6.4b[0]\n" + ".inst 0x4fa6e3fd // sdot v29.4s, v31.16b, v6.4b[1]\n" + ".inst 0x4f86ebe9 // sdot v9.4s, v31.16b, v6.4b[2]\n" + ".inst 0x4fa6ebf4 // sdot v20.4s, v31.16b, v6.4b[3]\n" + "sshl v6.16b, v27.16b, v28.16b\n" + "sshl v28.16b, v30.16b, v28.16b\n" + "and v27.16b, v27.16b, v24.16b\n" + "and v30.16b, v30.16b, v24.16b\n" + "ldr q24, [x25, #0x20]\n" + ".inst 0x4f98e0ca // sdot v10.4s, v6.16b, v24.4b[0]\n" + ".inst 0x4fb8e0dd // sdot v29.4s, v6.16b, v24.4b[1]\n" + ".inst 0x4f98e8c9 // sdot v9.4s, v6.16b, v24.4b[2]\n" + ".inst 0x4fb8e8d4 // sdot v20.4s, v6.16b, v24.4b[3]\n" + "ldr q24, [x25, #0x30]\n" + ".inst 0x4f98e38a // sdot v10.4s, v28.16b, v24.4b[0]\n" + ".inst 0x4fb8e39d // sdot v29.4s, v28.16b, v24.4b[1]\n" + ".inst 0x4f98eb89 // sdot v9.4s, v28.16b, v24.4b[2]\n" + ".inst 0x4fb8eb94 // sdot v20.4s, v28.16b, v24.4b[3]\n" + "ldr q24, [x25, #0x40]\n" + ".inst 0x4f98e06a // sdot v10.4s, v3.16b, v24.4b[0]\n" + ".inst 0x4fb8e07d // sdot v29.4s, v3.16b, v24.4b[1]\n" + ".inst 0x4f98e869 // sdot v9.4s, v3.16b, v24.4b[2]\n" + ".inst 0x4fb8e874 // sdot v20.4s, v3.16b, v24.4b[3]\n" + "ldr q24, [x25, #0x50]\n" + ".inst 0x4f98e2ca // sdot v10.4s, v22.16b, v24.4b[0]\n" + ".inst 0x4fb8e2dd // sdot v29.4s, v22.16b, v24.4b[1]\n" + ".inst 0x4f98eac9 // sdot v9.4s, v22.16b, v24.4b[2]\n" + ".inst 0x4fb8ead4 // sdot v20.4s, v22.16b, v24.4b[3]\n" + "ldr q24, [x25, #0x60]\n" + ".inst 0x4f98e36a // sdot v10.4s, v27.16b, v24.4b[0]\n" + ".inst 0x4fb8e37d // sdot v29.4s, v27.16b, v24.4b[1]\n" + ".inst 0x4f98eb69 // sdot v9.4s, v27.16b, v24.4b[2]\n" + ".inst 0x4fb8eb74 // sdot v20.4s, v27.16b, v24.4b[3]\n" + "ldr q24, [x25, #0x70]\n" + "add x25, x25, #0x88\n" + ".inst 0x4f98e3ca // sdot v10.4s, v30.16b, v24.4b[0]\n" + ".inst 0x4fb8e3dd // sdot v29.4s, v30.16b, v24.4b[1]\n" + ".inst 0x4f98ebc9 // sdot v9.4s, v30.16b, v24.4b[2]\n" + ".inst 0x4fb8ebd4 // sdot v20.4s, v30.16b, v24.4b[3]\n" + "fmul v24.4s, v17.4s, v2.s[0]\n" + "scvtf v10.4s, v10.4s, #0x4\n" + "scvtf v29.4s, v29.4s, #0x4\n" + "scvtf v9.4s, v9.4s, #0x4\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "fmla v15.4s, v10.4s, v24.4s\n" + "ldr q24, [x23, #0x0]\n" + "fmul v10.4s, v17.4s, v2.s[1]\n" + "fmla v19.4s, v29.4s, v10.4s\n" + "ldr q10, [x23, #0x10]\n" + "fmul v29.4s, v17.4s, v2.s[2]\n" + "fmul v2.4s, v17.4s, v2.s[3]\n" + "fmla v18.4s, v9.4s, v29.4s\n" + "movi v9.4s, #0x0\n" + "movi v29.4s, #0x0\n" + ".inst 0x4f98e189 // sdot v9.4s, v12.16b, v24.4b[0]\n" + ".inst 0x4fb8e19d // sdot v29.4s, v12.16b, v24.4b[1]\n" + "fmla v14.4s, v20.4s, v2.4s\n" + "movi v20.4s, #0x0\n" + "movi v2.4s, #0x0\n" + ".inst 0x4f98e994 // sdot v20.4s, v12.16b, v24.4b[2]\n" + ".inst 0x4fb8e982 // sdot v2.4s, v12.16b, v24.4b[3]\n" + "ldr q24, [x23, #0x20]\n" + ".inst 0x4f8ae3e9 // sdot v9.4s, v31.16b, v10.4b[0]\n" + ".inst 0x4faae3fd // sdot v29.4s, v31.16b, v10.4b[1]\n" + ".inst 0x4f8aebf4 // sdot v20.4s, v31.16b, v10.4b[2]\n" + ".inst 0x4faaebe2 // sdot v2.4s, v31.16b, v10.4b[3]\n" + "ldr q10, [x23, #0x30]\n" + ".inst 0x4f98e0c9 // sdot v9.4s, v6.16b, v24.4b[0]\n" + ".inst 0x4fb8e0dd // sdot v29.4s, v6.16b, v24.4b[1]\n" + ".inst 0x4f98e8d4 // sdot v20.4s, v6.16b, v24.4b[2]\n" + ".inst 0x4fb8e8c2 // sdot v2.4s, v6.16b, v24.4b[3]\n" + "ldr q24, [x23, #0x40]\n" + ".inst 0x4f8ae389 // sdot v9.4s, v28.16b, v10.4b[0]\n" + ".inst 0x4faae39d // sdot v29.4s, v28.16b, v10.4b[1]\n" + ".inst 0x4f8aeb94 // sdot v20.4s, v28.16b, v10.4b[2]\n" + ".inst 0x4faaeb82 // sdot v2.4s, v28.16b, v10.4b[3]\n" + "ldr q10, [x23, #0x50]\n" + ".inst 0x4f98e069 // sdot v9.4s, v3.16b, v24.4b[0]\n" + ".inst 0x4fb8e07d // sdot v29.4s, v3.16b, v24.4b[1]\n" + ".inst 0x4f98e874 // sdot v20.4s, v3.16b, v24.4b[2]\n" + ".inst 0x4fb8e862 // sdot v2.4s, v3.16b, v24.4b[3]\n" + "ldr q24, [x23, #0x60]\n" + ".inst 0x4f8ae2c9 // sdot v9.4s, v22.16b, v10.4b[0]\n" + ".inst 0x4faae2dd // sdot v29.4s, v22.16b, v10.4b[1]\n" + ".inst 0x4f8aead4 // sdot v20.4s, v22.16b, v10.4b[2]\n" + ".inst 0x4faaeac2 // sdot v2.4s, v22.16b, v10.4b[3]\n" + "ldr q10, [x23, #0x70]\n" + "add x23, x23, #0x88\n" + ".inst 0x4f98e369 // sdot v9.4s, v27.16b, v24.4b[0]\n" + ".inst 0x4fb8e37d // sdot v29.4s, v27.16b, v24.4b[1]\n" + ".inst 0x4f98eb74 // sdot v20.4s, v27.16b, v24.4b[2]\n" + ".inst 0x4fb8eb62 // sdot v2.4s, v27.16b, v24.4b[3]\n" + "ldr q24, [x22, #0x0]\n" + ".inst 0x4f8ae3c9 // sdot v9.4s, v30.16b, v10.4b[0]\n" + ".inst 0x4faae3dd // sdot v29.4s, v30.16b, v10.4b[1]\n" + ".inst 0x4f8aebd4 // sdot v20.4s, v30.16b, v10.4b[2]\n" + ".inst 0x4faaebc2 // sdot v2.4s, v30.16b, v10.4b[3]\n" + "fmul v10.4s, v17.4s, v26.s[0]\n" + "scvtf v9.4s, v9.4s, #0x4\n" + "scvtf v29.4s, v29.4s, #0x4\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "scvtf v2.4s, v2.4s, #0x4\n" + "fmla v11.4s, v9.4s, v10.4s\n" + "ldr q9, [x22, #0x10]\n" + "fmul v10.4s, v17.4s, v26.s[1]\n" + "fmla v13.4s, v29.4s, v10.4s\n" + "ldr d29, [x22, #-0x8]\n" + "fmul v10.4s, v17.4s, v26.s[2]\n" + "fmul v26.4s, v17.4s, v26.s[3]\n" + "fcvtl v29.4s, v29.4h\n" + "fmla v23.4s, v20.4s, v10.4s\n" + "movi v20.4s, #0x0\n" + "movi v10.4s, #0x0\n" + "fmla v16.4s, v2.4s, v26.4s\n" + "movi v26.4s, #0x0\n" + "movi v2.4s, #0x0\n" + ".inst 0x4f98e194 // sdot v20.4s, v12.16b, v24.4b[0]\n" + ".inst 0x4fb8e18a // sdot v10.4s, v12.16b, v24.4b[1]\n" + ".inst 0x4f98e99a // sdot v26.4s, v12.16b, v24.4b[2]\n" + ".inst 0x4fb8e982 // sdot v2.4s, v12.16b, v24.4b[3]\n" + "ldr q24, [x22, #0x20]\n" + ".inst 0x4f89e3f4 // sdot v20.4s, v31.16b, v9.4b[0]\n" + ".inst 0x4fa9e3ea // sdot v10.4s, v31.16b, v9.4b[1]\n" + ".inst 0x4f89ebfa // sdot v26.4s, v31.16b, v9.4b[2]\n" + ".inst 0x4fa9ebe2 // sdot v2.4s, v31.16b, v9.4b[3]\n" + "ldr q9, [x22, #0x30]\n" + ".inst 0x4f98e0d4 // sdot v20.4s, v6.16b, v24.4b[0]\n" + ".inst 0x4fb8e0ca // sdot v10.4s, v6.16b, v24.4b[1]\n" + ".inst 0x4f98e8da // sdot v26.4s, v6.16b, v24.4b[2]\n" + ".inst 0x4fb8e8c2 // sdot v2.4s, v6.16b, v24.4b[3]\n" + "ldr q24, [x22, #0x40]\n" + ".inst 0x4f89e394 // sdot v20.4s, v28.16b, v9.4b[0]\n" + ".inst 0x4fa9e38a // sdot v10.4s, v28.16b, v9.4b[1]\n" + ".inst 0x4f89eb9a // sdot v26.4s, v28.16b, v9.4b[2]\n" + ".inst 0x4fa9eb82 // sdot v2.4s, v28.16b, v9.4b[3]\n" + "ldr q9, [x22, #0x50]\n" + ".inst 0x4f98e074 // sdot v20.4s, v3.16b, v24.4b[0]\n" + ".inst 0x4fb8e06a // sdot v10.4s, v3.16b, v24.4b[1]\n" + ".inst 0x4f98e87a // sdot v26.4s, v3.16b, v24.4b[2]\n" + ".inst 0x4fb8e862 // sdot v2.4s, v3.16b, v24.4b[3]\n" + "ldr q24, [x22, #0x60]\n" + ".inst 0x4f89e2d4 // sdot v20.4s, v22.16b, v9.4b[0]\n" + ".inst 0x4fa9e2ca // sdot v10.4s, v22.16b, v9.4b[1]\n" + ".inst 0x4f89eada // sdot v26.4s, v22.16b, v9.4b[2]\n" + ".inst 0x4fa9eac2 // sdot v2.4s, v22.16b, v9.4b[3]\n" + "ldr q9, [x22, #0x70]\n" + "add x22, x22, #0x88\n" + ".inst 0x4f98e374 // sdot v20.4s, v27.16b, v24.4b[0]\n" + ".inst 0x4fb8e36a // sdot v10.4s, v27.16b, v24.4b[1]\n" + ".inst 0x4f98eb7a // sdot v26.4s, v27.16b, v24.4b[2]\n" + ".inst 0x4fb8eb62 // sdot v2.4s, v27.16b, v24.4b[3]\n" + "ldr q24, [x21, #0x0]\n" + ".inst 0x4f89e3d4 // sdot v20.4s, v30.16b, v9.4b[0]\n" + ".inst 0x4fa9e3ca // sdot v10.4s, v30.16b, v9.4b[1]\n" + ".inst 0x4f89ebda // sdot v26.4s, v30.16b, v9.4b[2]\n" + ".inst 0x4fa9ebc2 // sdot v2.4s, v30.16b, v9.4b[3]\n" + "fmul v9.4s, v17.4s, v29.s[0]\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "scvtf v10.4s, v10.4s, #0x4\n" + "scvtf v26.4s, v26.4s, #0x4\n" + "scvtf v2.4s, v2.4s, #0x4\n" + "fmla v25.4s, v20.4s, v9.4s\n" + "ldr q9, [x21, #0x10]\n" + "fmul v20.4s, v17.4s, v29.s[1]\n" + "fmla v7.4s, v10.4s, v20.4s\n" + "ldr d20, [x21, #-0x8]\n" + "fmul v10.4s, v17.4s, v29.s[2]\n" + "fmul v29.4s, v17.4s, v29.s[3]\n" + "fcvtl v20.4s, v20.4h\n" + "fmla v0.4s, v26.4s, v10.4s\n" + "movi v26.4s, #0x0\n" + "movi v10.4s, #0x0\n" + "fmla v4.4s, v2.4s, v29.4s\n" + "movi v2.4s, #0x0\n" + "movi v29.4s, #0x0\n" + ".inst 0x4f98e19a // sdot v26.4s, v12.16b, v24.4b[0]\n" + ".inst 0x4fb8e18a // sdot v10.4s, v12.16b, v24.4b[1]\n" + ".inst 0x4f98e982 // sdot v2.4s, v12.16b, v24.4b[2]\n" + ".inst 0x4fb8e99d // sdot v29.4s, v12.16b, v24.4b[3]\n" + "ldr q12, [x21, #0x20]\n" + "fmul v24.4s, v17.4s, v20.s[0]\n" + ".inst 0x4f89e3fa // sdot v26.4s, v31.16b, v9.4b[0]\n" + ".inst 0x4fa9e3ea // sdot v10.4s, v31.16b, v9.4b[1]\n" + ".inst 0x4f89ebe2 // sdot v2.4s, v31.16b, v9.4b[2]\n" + ".inst 0x4fa9ebfd // sdot v29.4s, v31.16b, v9.4b[3]\n" + "ldr q9, [x21, #0x30]\n" + "fmul v31.4s, v17.4s, v20.s[1]\n" + ".inst 0x4f8ce0da // sdot v26.4s, v6.16b, v12.4b[0]\n" + ".inst 0x4face0ca // sdot v10.4s, v6.16b, v12.4b[1]\n" + ".inst 0x4f8ce8c2 // sdot v2.4s, v6.16b, v12.4b[2]\n" + ".inst 0x4face8dd // sdot v29.4s, v6.16b, v12.4b[3]\n" + "ldr q12, [x21, #0x40]\n" + "fmul v6.4s, v17.4s, v20.s[2]\n" + "fmul v20.4s, v17.4s, v20.s[3]\n" + ".inst 0x4f89e39a // sdot v26.4s, v28.16b, v9.4b[0]\n" + ".inst 0x4fa9e38a // sdot v10.4s, v28.16b, v9.4b[1]\n" + ".inst 0x4f89eb82 // sdot v2.4s, v28.16b, v9.4b[2]\n" + ".inst 0x4fa9eb9d // sdot v29.4s, v28.16b, v9.4b[3]\n" + "ldr q9, [x21, #0x50]\n" + ".inst 0x4f8ce07a // sdot v26.4s, v3.16b, v12.4b[0]\n" + ".inst 0x4face06a // sdot v10.4s, v3.16b, v12.4b[1]\n" + ".inst 0x4f8ce862 // sdot v2.4s, v3.16b, v12.4b[2]\n" + ".inst 0x4face87d // sdot v29.4s, v3.16b, v12.4b[3]\n" + "ldr q12, [x21, #0x60]\n" + ".inst 0x4f89e2da // sdot v26.4s, v22.16b, v9.4b[0]\n" + ".inst 0x4fa9e2ca // sdot v10.4s, v22.16b, v9.4b[1]\n" + ".inst 0x4f89eac2 // sdot v2.4s, v22.16b, v9.4b[2]\n" + ".inst 0x4fa9eadd // sdot v29.4s, v22.16b, v9.4b[3]\n" + "ldr q17, [x21, #0x70]\n" + "add x21, x21, #0x88\n" + ".inst 0x4f8ce37a // sdot v26.4s, v27.16b, v12.4b[0]\n" + ".inst 0x4face36a // sdot v10.4s, v27.16b, v12.4b[1]\n" + ".inst 0x4f8ceb62 // sdot v2.4s, v27.16b, v12.4b[2]\n" + ".inst 0x4faceb7d // sdot v29.4s, v27.16b, v12.4b[3]\n" + ".inst 0x4f91e3da // sdot v26.4s, v30.16b, v17.4b[0]\n" + ".inst 0x4fb1e3ca // sdot v10.4s, v30.16b, v17.4b[1]\n" + ".inst 0x4f91ebc2 // sdot v2.4s, v30.16b, v17.4b[2]\n" + ".inst 0x4fb1ebdd // sdot v29.4s, v30.16b, v17.4b[3]\n" + "scvtf v26.4s, v26.4s, #0x4\n" + "scvtf v10.4s, v10.4s, #0x4\n" + "fmla v5.4s, v26.4s, v24.4s\n" + "scvtf v2.4s, v2.4s, #0x4\n" + "scvtf v29.4s, v29.4s, #0x4\n" + "fmla v21.4s, v10.4s, v31.4s\n" + "fmla v8.4s, v2.4s, v6.4s\n" + "fmla v1.4s, v29.4s, v20.4s\n" + "bgt 3b\n" + "mov x20, %x[res_ptr]\n" + "subs x27, x27, #0x4\n" + "add %x[res_ptr], %x[res_ptr], #0x10\n" + "str q15, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q19, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q18, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q14, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q11, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q13, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q23, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q16, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q25, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q7, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q0, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q4, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q5, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q21, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q8, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q1, [x20, #0x0]\n" + "bne 2b\n" + "mov x20, #0x4\n" + "sub x10, x10, #0x10\n" + "cmp x10, #0x10\n" + "mov %x[res_ptr], x26\n" + "madd %x[a_ptr], x20, x9, %x[a_ptr]\n" + "bge 1b\n" + "4:" // Row loop skip + "cbz x10, 9f\n" + "5:" // Row tail: Row loop + "add x24, %x[b_ptr], #0x8\n" + "mov x23, %x[nc]\n" + "add x22, %x[res_ptr], %x[res_stride], LSL #2\n" + "6:" // Row tail: Column loop + "movi v15.16b, #0x0\n" + "movi v19.16b, #0x0\n" + "add x25, %x[a_ptr], #0x8\n" + "mov x21, %x[nb]\n" + "movi v18.16b, #0x0\n" + "movi v14.16b, #0x0\n" + "7:" // Row tail: Block loop + "ldr q7, [x24, #0x0]\n" + "ldr q5, [x25, #0x0]\n" + "movi v9.16b, #0x4\n" + "movi v4.4s, #0x0\n" + "ldr q3, [x24, #0x10]\n" + "ldr q2, [x25, #0x10]\n" + "movi v1.4s, #0x0\n" + "movi v0.4s, #0x0\n" + "ldr q13, [x24, #0x20]\n" + "ldr q31, [x25, #0x20]\n" + "movi v30.4s, #0x0\n" + "movi v29.16b, #0xf0\n" + "ldr q28, [x24, #0x30]\n" + "ldr q27, [x25, #0x30]\n" + "sshl v20.16b, v7.16b, v9.16b\n" + "sub x20, x24, #0x8\n" + "ldr q26, [x25, #0x40]\n" + "ldr q25, [x25, #0x50]\n" + "sshl v17.16b, v3.16b, v9.16b\n" + "and v7.16b, v7.16b, v29.16b\n" + "ldr q24, [x25, #0x60]\n" + "ldr q16, [x25, #0x70]\n" + "sshl v22.16b, v13.16b, v9.16b\n" + "and v3.16b, v3.16b, v29.16b\n" + "ldr d21, [x20, #0x0]\n" + "ldr d12, [x25, #-0x8]\n" + ".inst 0x4f85e284 // sdot v4.4s, v20.16b, v5.4b[0]\n" + ".inst 0x4fa5e281 // sdot v1.4s, v20.16b, v5.4b[1]\n" + ".inst 0x4f85ea80 // sdot v0.4s, v20.16b, v5.4b[2]\n" + ".inst 0x4fa5ea9e // sdot v30.4s, v20.16b, v5.4b[3]\n" + "sshl v9.16b, v28.16b, v9.16b\n" + "subs x21, x21, #0x1\n" + "and v13.16b, v13.16b, v29.16b\n" + "and v28.16b, v28.16b, v29.16b\n" + "add x25, x25, #0x88\n" + "add x24, x24, #0x48\n" + "fcvtl v21.4s, v21.4h\n" + "fcvtl v12.4s, v12.4h\n" + ".inst 0x4f82e224 // sdot v4.4s, v17.16b, v2.4b[0]\n" + ".inst 0x4fa2e221 // sdot v1.4s, v17.16b, v2.4b[1]\n" + ".inst 0x4f82ea20 // sdot v0.4s, v17.16b, v2.4b[2]\n" + ".inst 0x4fa2ea3e // sdot v30.4s, v17.16b, v2.4b[3]\n" + "fmul v11.4s, v21.4s, v12.s[0]\n" + "fmul v23.4s, v21.4s, v12.s[1]\n" + "fmul v17.4s, v21.4s, v12.s[2]\n" + ".inst 0x4f9fe2c4 // sdot v4.4s, v22.16b, v31.4b[0]\n" + "fmul v6.4s, v21.4s, v12.s[3]\n" + ".inst 0x4fbfe2c1 // sdot v1.4s, v22.16b, v31.4b[1]\n" + ".inst 0x4f9feac0 // sdot v0.4s, v22.16b, v31.4b[2]\n" + ".inst 0x4fbfeade // sdot v30.4s, v22.16b, v31.4b[3]\n" + ".inst 0x4f9be124 // sdot v4.4s, v9.16b, v27.4b[0]\n" + ".inst 0x4fbbe121 // sdot v1.4s, v9.16b, v27.4b[1]\n" + ".inst 0x4f9be920 // sdot v0.4s, v9.16b, v27.4b[2]\n" + ".inst 0x4fbbe93e // sdot v30.4s, v9.16b, v27.4b[3]\n" + ".inst 0x4f9ae0e4 // sdot v4.4s, v7.16b, v26.4b[0]\n" + ".inst 0x4fbae0e1 // sdot v1.4s, v7.16b, v26.4b[1]\n" + ".inst 0x4f9ae8e0 // sdot v0.4s, v7.16b, v26.4b[2]\n" + ".inst 0x4fbae8fe // sdot v30.4s, v7.16b, v26.4b[3]\n" + ".inst 0x4f99e064 // sdot v4.4s, v3.16b, v25.4b[0]\n" + ".inst 0x4fb9e061 // sdot v1.4s, v3.16b, v25.4b[1]\n" + ".inst 0x4f99e860 // sdot v0.4s, v3.16b, v25.4b[2]\n" + ".inst 0x4fb9e87e // sdot v30.4s, v3.16b, v25.4b[3]\n" + ".inst 0x4f98e1a4 // sdot v4.4s, v13.16b, v24.4b[0]\n" + ".inst 0x4fb8e1a1 // sdot v1.4s, v13.16b, v24.4b[1]\n" + ".inst 0x4f98e9a0 // sdot v0.4s, v13.16b, v24.4b[2]\n" + ".inst 0x4fb8e9be // sdot v30.4s, v13.16b, v24.4b[3]\n" + ".inst 0x4f90e384 // sdot v4.4s, v28.16b, v16.4b[0]\n" + ".inst 0x4fb0e381 // sdot v1.4s, v28.16b, v16.4b[1]\n" + ".inst 0x4f90eb80 // sdot v0.4s, v28.16b, v16.4b[2]\n" + ".inst 0x4fb0eb9e // sdot v30.4s, v28.16b, v16.4b[3]\n" + "scvtf v4.4s, v4.4s, #0x4\n" + "scvtf v1.4s, v1.4s, #0x4\n" + "scvtf v0.4s, v0.4s, #0x4\n" + "fmla v15.4s, v4.4s, v11.4s\n" + "scvtf v30.4s, v30.4s, #0x4\n" + "fmla v19.4s, v1.4s, v23.4s\n" + "fmla v18.4s, v0.4s, v17.4s\n" + "fmla v14.4s, v30.4s, v6.4s\n" + "bgt 7b\n" + "mov x20, %x[res_ptr]\n" + "cmp x10, #0x1\n" + "str q15, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "cmp x10, #0x2\n" + "str q19, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "cmp x10, #0x3\n" + "str q18, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "str q14, [x20, #0x0]\n" + "8:" // Row tail: Accumulator store skip + "subs x23, x23, #0x4\n" + "add %x[res_ptr], %x[res_ptr], #0x10\n" + "bne 6b\n" + "subs x10, x10, #0x4\n" + "add %x[a_ptr], %x[a_ptr], x9\n" + "mov %x[res_ptr], x22\n" + "bgt 5b\n" + "9:" // Row tail: Row loop skip + : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) + : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" + ); + return; + } +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) + { + float sumf[4][4]; + int sumi; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; + } + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; + } + sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + } + } + } + } + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) + s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; + } + } + } + } +} + +void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) + if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) { + const void * b_ptr = vx; + const void * a_ptr = vy; + float * res_ptr = s; + size_t res_stride = bs * sizeof(float); + + __asm__ __volatile__( + "mov x10, %x[nr]\n" + "mov x9, #0x88\n" + "cmp x10, #0x10\n" + "mul x9, %x[nb], x9\n" + "blt 4f\n" + "1:" // Row loop + "add x28, %x[b_ptr], #0x8\n" + "mov x27, %x[nc]\n" + "add x26, %x[res_ptr], %x[res_stride], LSL #4\n" + "2:" // Column loop + "add x25, %x[a_ptr], #0x8\n" + "movi v2.16b, #0x0\n" + "movi v10.16b, #0x0\n" + "mov x24, %x[nb]\n" + "add x23, x25, x9\n" + "movi v12.16b, #0x0\n" + "movi v28.16b, #0x0\n" + "add x22, x23, x9\n" + "movi v11.16b, #0x0\n" + "movi v13.16b, #0x0\n" + "add x21, x22, x9\n" + "movi v22.16b, #0x0\n" + "movi v23.16b, #0x0\n" + "movi v25.16b, #0x0\n" + "movi v5.16b, #0x0\n" + "movi v7.16b, #0x0\n" + "movi v4.16b, #0x0\n" + "movi v6.16b, #0x0\n" + "movi v30.16b, #0x0\n" + "movi v24.16b, #0x0\n" + "movi v14.16b, #0x0\n" + "3:" // Block loop + "ldr q21, [x28, #0x0]\n" + "ldr q16, [x28, #0x10]\n" + "movi v1.16b, #0x4\n" + "movi v19.4s, #0x0\n" + "ldr q27, [x25, #0x0]\n" + "ldr q15, [x25, #0x10]\n" + "movi v26.4s, #0x0\n" + "movi v18.4s, #0x0\n" + "ldr q29, [x28, #0x20]\n" + "ldr q3, [x28, #0x30]\n" + "movi v17.4s, #0x0\n" + "movi v0.16b, #0xf0\n" + "ldr d20, [x25, #-0x8]\n" + "ldr d9, [x23, #-0x8]\n" + "sshl v8.16b, v21.16b, v1.16b\n" + "sshl v31.16b, v16.16b, v1.16b\n" + "and v21.16b, v21.16b, v0.16b\n" + "and v16.16b, v16.16b, v0.16b\n" + "sub x20, x28, #0x8\n" + "subs x24, x24, #0x1\n" + "add x28, x28, #0x48\n" + ".inst 0x4e88a773 // smmla v19.4s, v27.16b, v8.16b\n" + ".inst 0x4e9fa77a // smmla v26.4s, v27.16b, v31.16b\n" + "ldr q27, [x25, #0x20]\n" + ".inst 0x4e88a5f2 // smmla v18.4s, v15.16b, v8.16b\n" + ".inst 0x4e9fa5f1 // smmla v17.4s, v15.16b, v31.16b\n" + "sshl v15.16b, v29.16b, v1.16b\n" + "sshl v1.16b, v3.16b, v1.16b\n" + "and v29.16b, v29.16b, v0.16b\n" + "and v3.16b, v3.16b, v0.16b\n" + "ldr q0, [x25, #0x30]\n" + "fcvtl v20.4s, v20.4h\n" + ".inst 0x4e8fa773 // smmla v19.4s, v27.16b, v15.16b\n" + "fcvtl v9.4s, v9.4h\n" + ".inst 0x4e81a77a // smmla v26.4s, v27.16b, v1.16b\n" + "ldr q27, [x25, #0x40]\n" + ".inst 0x4e8fa412 // smmla v18.4s, v0.16b, v15.16b\n" + ".inst 0x4e81a411 // smmla v17.4s, v0.16b, v1.16b\n" + "ldr q0, [x25, #0x50]\n" + ".inst 0x4e95a773 // smmla v19.4s, v27.16b, v21.16b\n" + ".inst 0x4e90a77a // smmla v26.4s, v27.16b, v16.16b\n" + "ldr q27, [x25, #0x60]\n" + ".inst 0x4e95a412 // smmla v18.4s, v0.16b, v21.16b\n" + ".inst 0x4e90a411 // smmla v17.4s, v0.16b, v16.16b\n" + "ldr q0, [x25, #0x70]\n" + "add x25, x25, #0x88\n" + ".inst 0x4e9da773 // smmla v19.4s, v27.16b, v29.16b\n" + ".inst 0x4e83a77a // smmla v26.4s, v27.16b, v3.16b\n" + "ldr d27, [x20, #0x0]\n" + ".inst 0x4e9da412 // smmla v18.4s, v0.16b, v29.16b\n" + ".inst 0x4e83a411 // smmla v17.4s, v0.16b, v3.16b\n" + "fcvtl v27.4s, v27.4h\n" + "uzp1 v0.2d, v19.2d, v26.2d\n" + "uzp2 v26.2d, v19.2d, v26.2d\n" + "fmul v19.4s, v27.4s, v20.s[0]\n" + "scvtf v0.4s, v0.4s, #0x4\n" + "scvtf v26.4s, v26.4s, #0x4\n" + "fmla v2.4s, v0.4s, v19.4s\n" + "ldr q19, [x23, #0x0]\n" + "uzp1 v0.2d, v18.2d, v17.2d\n" + "uzp2 v18.2d, v18.2d, v17.2d\n" + "fmul v17.4s, v27.4s, v20.s[1]\n" + "scvtf v0.4s, v0.4s, #0x4\n" + "scvtf v18.4s, v18.4s, #0x4\n" + "fmla v10.4s, v26.4s, v17.4s\n" + "ldr q17, [x23, #0x10]\n" + "fmul v26.4s, v27.4s, v20.s[2]\n" + "fmul v20.4s, v27.4s, v20.s[3]\n" + "fmla v12.4s, v0.4s, v26.4s\n" + "ldr d0, [x22, #-0x8]\n" + "ldr d26, [x21, #-0x8]\n" + "fcvtl v0.4s, v0.4h\n" + "fmla v28.4s, v18.4s, v20.4s\n" + "movi v20.4s, #0x0\n" + "movi v18.4s, #0x0\n" + ".inst 0x4e88a674 // smmla v20.4s, v19.16b, v8.16b\n" + ".inst 0x4e9fa672 // smmla v18.4s, v19.16b, v31.16b\n" + "ldr q19, [x23, #0x20]\n" + "fcvtl v26.4s, v26.4h\n" + ".inst 0x4e8fa674 // smmla v20.4s, v19.16b, v15.16b\n" + ".inst 0x4e81a672 // smmla v18.4s, v19.16b, v1.16b\n" + "ldr q19, [x23, #0x40]\n" + ".inst 0x4e95a674 // smmla v20.4s, v19.16b, v21.16b\n" + ".inst 0x4e90a672 // smmla v18.4s, v19.16b, v16.16b\n" + "ldr q19, [x23, #0x60]\n" + ".inst 0x4e9da674 // smmla v20.4s, v19.16b, v29.16b\n" + ".inst 0x4e83a672 // smmla v18.4s, v19.16b, v3.16b\n" + "uzp1 v19.2d, v20.2d, v18.2d\n" + "scvtf v19.4s, v19.4s, #0x4\n" + "uzp2 v20.2d, v20.2d, v18.2d\n" + "fmul v18.4s, v27.4s, v9.s[0]\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "fmla v11.4s, v19.4s, v18.4s\n" + "ldr q18, [x22, #0x0]\n" + "fmul v19.4s, v27.4s, v9.s[1]\n" + "fmla v13.4s, v20.4s, v19.4s\n" + "movi v19.4s, #0x0\n" + "movi v20.4s, #0x0\n" + ".inst 0x4e88a633 // smmla v19.4s, v17.16b, v8.16b\n" + ".inst 0x4e9fa634 // smmla v20.4s, v17.16b, v31.16b\n" + "ldr q17, [x23, #0x30]\n" + ".inst 0x4e8fa633 // smmla v19.4s, v17.16b, v15.16b\n" + ".inst 0x4e81a634 // smmla v20.4s, v17.16b, v1.16b\n" + "ldr q17, [x23, #0x50]\n" + ".inst 0x4e95a633 // smmla v19.4s, v17.16b, v21.16b\n" + ".inst 0x4e90a634 // smmla v20.4s, v17.16b, v16.16b\n" + "ldr q17, [x23, #0x70]\n" + "add x23, x23, #0x88\n" + ".inst 0x4e9da633 // smmla v19.4s, v17.16b, v29.16b\n" + ".inst 0x4e83a634 // smmla v20.4s, v17.16b, v3.16b\n" + "uzp1 v17.2d, v19.2d, v20.2d\n" + "scvtf v17.4s, v17.4s, #0x4\n" + "uzp2 v20.2d, v19.2d, v20.2d\n" + "fmul v19.4s, v27.4s, v9.s[2]\n" + "fmul v9.4s, v27.4s, v9.s[3]\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "fmla v22.4s, v17.4s, v19.4s\n" + "ldr q17, [x22, #0x10]\n" + "movi v19.4s, #0x0\n" + ".inst 0x4e88a653 // smmla v19.4s, v18.16b, v8.16b\n" + "fmla v23.4s, v20.4s, v9.4s\n" + "movi v20.4s, #0x0\n" + "movi v9.4s, #0x0\n" + ".inst 0x4e9fa654 // smmla v20.4s, v18.16b, v31.16b\n" + "ldr q18, [x22, #0x20]\n" + ".inst 0x4e88a629 // smmla v9.4s, v17.16b, v8.16b\n" + ".inst 0x4e8fa653 // smmla v19.4s, v18.16b, v15.16b\n" + ".inst 0x4e81a654 // smmla v20.4s, v18.16b, v1.16b\n" + "ldr q18, [x22, #0x40]\n" + ".inst 0x4e95a653 // smmla v19.4s, v18.16b, v21.16b\n" + ".inst 0x4e90a654 // smmla v20.4s, v18.16b, v16.16b\n" + "ldr q18, [x22, #0x60]\n" + ".inst 0x4e9da653 // smmla v19.4s, v18.16b, v29.16b\n" + ".inst 0x4e83a654 // smmla v20.4s, v18.16b, v3.16b\n" + "movi v18.4s, #0x0\n" + ".inst 0x4e9fa632 // smmla v18.4s, v17.16b, v31.16b\n" + "ldr q17, [x22, #0x30]\n" + ".inst 0x4e8fa629 // smmla v9.4s, v17.16b, v15.16b\n" + ".inst 0x4e81a632 // smmla v18.4s, v17.16b, v1.16b\n" + "ldr q17, [x22, #0x50]\n" + ".inst 0x4e95a629 // smmla v9.4s, v17.16b, v21.16b\n" + ".inst 0x4e90a632 // smmla v18.4s, v17.16b, v16.16b\n" + "ldr q17, [x22, #0x70]\n" + "add x22, x22, #0x88\n" + ".inst 0x4e9da629 // smmla v9.4s, v17.16b, v29.16b\n" + ".inst 0x4e83a632 // smmla v18.4s, v17.16b, v3.16b\n" + "uzp1 v17.2d, v19.2d, v20.2d\n" + "uzp2 v20.2d, v19.2d, v20.2d\n" + "fmul v19.4s, v27.4s, v0.s[0]\n" + "scvtf v17.4s, v17.4s, #0x4\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "fmla v25.4s, v17.4s, v19.4s\n" + "ldr q19, [x21, #0x0]\n" + "fmul v17.4s, v27.4s, v0.s[1]\n" + "fmla v5.4s, v20.4s, v17.4s\n" + "ldr q17, [x21, #0x10]\n" + "uzp1 v20.2d, v9.2d, v18.2d\n" + "uzp2 v9.2d, v9.2d, v18.2d\n" + "fmul v18.4s, v27.4s, v0.s[2]\n" + "fmul v0.4s, v27.4s, v0.s[3]\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "scvtf v9.4s, v9.4s, #0x4\n" + "fmla v7.4s, v20.4s, v18.4s\n" + "movi v20.4s, #0x0\n" + "movi v18.4s, #0x0\n" + ".inst 0x4e88a674 // smmla v20.4s, v19.16b, v8.16b\n" + ".inst 0x4e9fa672 // smmla v18.4s, v19.16b, v31.16b\n" + "ldr q19, [x21, #0x20]\n" + "fmla v4.4s, v9.4s, v0.4s\n" + "movi v9.4s, #0x0\n" + "movi v0.4s, #0x0\n" + ".inst 0x4e88a629 // smmla v9.4s, v17.16b, v8.16b\n" + "fmul v8.4s, v27.4s, v26.s[0]\n" + ".inst 0x4e9fa620 // smmla v0.4s, v17.16b, v31.16b\n" + "ldr q17, [x21, #0x30]\n" + ".inst 0x4e8fa674 // smmla v20.4s, v19.16b, v15.16b\n" + "fmul v31.4s, v27.4s, v26.s[1]\n" + ".inst 0x4e81a672 // smmla v18.4s, v19.16b, v1.16b\n" + "ldr q19, [x21, #0x40]\n" + ".inst 0x4e8fa629 // smmla v9.4s, v17.16b, v15.16b\n" + "fmul v15.4s, v27.4s, v26.s[2]\n" + "fmul v27.4s, v27.4s, v26.s[3]\n" + ".inst 0x4e81a620 // smmla v0.4s, v17.16b, v1.16b\n" + "ldr q1, [x21, #0x50]\n" + ".inst 0x4e95a674 // smmla v20.4s, v19.16b, v21.16b\n" + ".inst 0x4e90a672 // smmla v18.4s, v19.16b, v16.16b\n" + "ldr q26, [x21, #0x60]\n" + ".inst 0x4e95a429 // smmla v9.4s, v1.16b, v21.16b\n" + ".inst 0x4e90a420 // smmla v0.4s, v1.16b, v16.16b\n" + "ldr q21, [x21, #0x70]\n" + "add x21, x21, #0x88\n" + ".inst 0x4e9da754 // smmla v20.4s, v26.16b, v29.16b\n" + ".inst 0x4e83a752 // smmla v18.4s, v26.16b, v3.16b\n" + ".inst 0x4e9da6a9 // smmla v9.4s, v21.16b, v29.16b\n" + ".inst 0x4e83a6a0 // smmla v0.4s, v21.16b, v3.16b\n" + "uzp1 v29.2d, v20.2d, v18.2d\n" + "uzp2 v21.2d, v20.2d, v18.2d\n" + "scvtf v29.4s, v29.4s, #0x4\n" + "uzp1 v18.2d, v9.2d, v0.2d\n" + "uzp2 v16.2d, v9.2d, v0.2d\n" + "scvtf v21.4s, v21.4s, #0x4\n" + "fmla v6.4s, v29.4s, v8.4s\n" + "scvtf v18.4s, v18.4s, #0x4\n" + "scvtf v16.4s, v16.4s, #0x4\n" + "fmla v30.4s, v21.4s, v31.4s\n" + "fmla v24.4s, v18.4s, v15.4s\n" + "fmla v14.4s, v16.4s, v27.4s\n" + "bgt 3b\n" + "mov x20, %x[res_ptr]\n" + "subs x27, x27, #0x4\n" + "add %x[res_ptr], %x[res_ptr], #0x10\n" + "str q2, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q10, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q12, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q28, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q11, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q13, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q22, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q23, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q25, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q5, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q7, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q4, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q6, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q30, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q24, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q14, [x20, #0x0]\n" + "bne 2b\n" + "mov x20, #0x4\n" + "sub x10, x10, #0x10\n" + "cmp x10, #0x10\n" + "mov %x[res_ptr], x26\n" + "madd %x[a_ptr], x20, x9, %x[a_ptr]\n" + "bge 1b\n" + "4:" // Row loop skip + "cbz x10, 9f\n" + "5:" // Row tail: Row loop + "add x24, %x[b_ptr], #0x8\n" + "mov x23, %x[nc]\n" + "add x22, %x[res_ptr], %x[res_stride], LSL #2\n" + "6:" // Row tail: Column loop + "movi v2.16b, #0x0\n" + "movi v10.16b, #0x0\n" + "add x25, %x[a_ptr], #0x8\n" + "mov x21, %x[nb]\n" + "movi v12.16b, #0x0\n" + "movi v28.16b, #0x0\n" + "7:" // Row tail: Block loop + "ldr q6, [x24, #0x0]\n" + "ldr q5, [x24, #0x10]\n" + "movi v17.16b, #0x4\n" + "movi v8.4s, #0x0\n" + "ldr q4, [x25, #0x0]\n" + "ldr q13, [x25, #0x10]\n" + "movi v27.4s, #0x0\n" + "movi v0.4s, #0x0\n" + "ldr q31, [x24, #0x20]\n" + "ldr q14, [x24, #0x30]\n" + "movi v29.4s, #0x0\n" + "movi v22.16b, #0xf0\n" + "ldr q11, [x25, #0x20]\n" + "ldr q23, [x25, #0x30]\n" + "sshl v21.16b, v6.16b, v17.16b\n" + "sshl v16.16b, v5.16b, v17.16b\n" + "ldr q20, [x25, #0x40]\n" + "ldr q26, [x25, #0x50]\n" + "and v6.16b, v6.16b, v22.16b\n" + "and v5.16b, v5.16b, v22.16b\n" + "ldr q25, [x25, #0x60]\n" + "ldr q3, [x25, #0x70]\n" + "sshl v19.16b, v31.16b, v17.16b\n" + "sshl v18.16b, v14.16b, v17.16b\n" + "ldr d17, [x25, #-0x8]\n" + ".inst 0x4e95a488 // smmla v8.4s, v4.16b, v21.16b\n" + ".inst 0x4e90a49b // smmla v27.4s, v4.16b, v16.16b\n" + "and v31.16b, v31.16b, v22.16b\n" + ".inst 0x4e95a5a0 // smmla v0.4s, v13.16b, v21.16b\n" + ".inst 0x4e90a5bd // smmla v29.4s, v13.16b, v16.16b\n" + "and v14.16b, v14.16b, v22.16b\n" + "sub x20, x24, #0x8\n" + "ldr d16, [x20, #0x0]\n" + "subs x21, x21, #0x1\n" + "add x25, x25, #0x88\n" + "fcvtl v17.4s, v17.4h\n" + "add x24, x24, #0x48\n" + ".inst 0x4e93a568 // smmla v8.4s, v11.16b, v19.16b\n" + ".inst 0x4e92a57b // smmla v27.4s, v11.16b, v18.16b\n" + ".inst 0x4e93a6e0 // smmla v0.4s, v23.16b, v19.16b\n" + ".inst 0x4e92a6fd // smmla v29.4s, v23.16b, v18.16b\n" + "fcvtl v16.4s, v16.4h\n" + ".inst 0x4e86a688 // smmla v8.4s, v20.16b, v6.16b\n" + ".inst 0x4e85a69b // smmla v27.4s, v20.16b, v5.16b\n" + "fmul v23.4s, v16.4s, v17.s[0]\n" + "fmul v21.4s, v16.4s, v17.s[1]\n" + "fmul v1.4s, v16.4s, v17.s[2]\n" + "fmul v20.4s, v16.4s, v17.s[3]\n" + ".inst 0x4e86a740 // smmla v0.4s, v26.16b, v6.16b\n" + ".inst 0x4e85a75d // smmla v29.4s, v26.16b, v5.16b\n" + ".inst 0x4e9fa728 // smmla v8.4s, v25.16b, v31.16b\n" + ".inst 0x4e8ea73b // smmla v27.4s, v25.16b, v14.16b\n" + ".inst 0x4e9fa460 // smmla v0.4s, v3.16b, v31.16b\n" + ".inst 0x4e8ea47d // smmla v29.4s, v3.16b, v14.16b\n" + "uzp1 v19.2d, v8.2d, v27.2d\n" + "uzp2 v18.2d, v8.2d, v27.2d\n" + "scvtf v19.4s, v19.4s, #0x4\n" + "uzp1 v17.2d, v0.2d, v29.2d\n" + "uzp2 v16.2d, v0.2d, v29.2d\n" + "scvtf v18.4s, v18.4s, #0x4\n" + "fmla v2.4s, v19.4s, v23.4s\n" + "scvtf v17.4s, v17.4s, #0x4\n" + "scvtf v16.4s, v16.4s, #0x4\n" + "fmla v10.4s, v18.4s, v21.4s\n" + "fmla v12.4s, v17.4s, v1.4s\n" + "fmla v28.4s, v16.4s, v20.4s\n" + "bgt 7b\n" + "mov x20, %x[res_ptr]\n" + "cmp x10, #0x1\n" + "str q2, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "cmp x10, #0x2\n" + "str q10, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "cmp x10, #0x3\n" + "str q12, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "str q28, [x20, #0x0]\n" + "8:" // Row tail: Accumulator store skip + "subs x23, x23, #0x4\n" + "add %x[res_ptr], %x[res_ptr], #0x10\n" + "bne 6b\n" + "subs x10, x10, #0x4\n" + "add %x[a_ptr], %x[a_ptr], x9\n" + "mov %x[res_ptr], x22\n" + "bgt 5b\n" + "9:" // Row tail: Row loop skip + : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) + : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" + ); + return; + } +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) + float sumf[4][4]; + int sumi; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; + } + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; + } + sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + } + } + } + } + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) + s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; + } + } + } +} + +void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) +#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8) + if (ggml_cpu_has_sve() && ggml_cpu_has_matmul_int8() && ggml_cpu_get_sve_cnt() == QK8_0) { + const void * b_ptr = vx; + const void * a_ptr = vy; + float * res_ptr = s; + size_t res_stride = bs * sizeof(float); + + __asm__ __volatile__( + "mov x20, #0x4\n" + "mov x13, %x[nr]\n" + "mov z28.s, #-0x4\n" + "mov x12, #0x88\n" + "ptrue p1.b\n" + "whilelt p0.s, XZR, x20\n" + "cmp x13, #0x10\n" + "mul x12, %x[nb], x12\n" + "blt 4f\n" + "1:" // Row loop + "add x11, %x[b_ptr], #0x10\n" + "mov x10, %x[nc]\n" + "add x9, %x[res_ptr], %x[res_stride], LSL #4\n" + "2:" // Column loop + "add x28, %x[a_ptr], #0x8\n" + "mov z24.b, #0x0\n" + "mov z15.b, #0x0\n" + "mov x27, %x[nb]\n" + "add x26, x28, x12\n" + "mov z12.b, #0x0\n" + "mov z0.b, #0x0\n" + "add x25, x26, x12\n" + "mov z13.b, #0x0\n" + "mov z1.b, #0x0\n" + "add x24, x25, x12\n" + "mov z20.b, #0x0\n" + "mov z25.b, #0x0\n" + "mov z11.b, #0x0\n" + "mov z16.b, #0x0\n" + "mov z19.b, #0x0\n" + "mov z26.b, #0x0\n" + "mov z8.b, #0x0\n" + "mov z29.b, #0x0\n" + "mov z27.b, #0x0\n" + "mov z10.b, #0x0\n" + "3:" // Block loop + "ld1b { z30.b }, p1/Z, [x11]\n" + "ld1b { z21.b }, p1/Z, [x11, #1, MUL VL]\n" + "mov z18.s, #0x0\n" + "mov z7.s, #0x0\n" + "ld1rqb { z3.b }, p1/Z, [x28]\n" + "ld1rqb { z5.b }, p1/Z, [x28, #16]\n" + "mov z9.s, #0x0\n" + "mov z22.s, #0x0\n" + "ld1b { z4.b }, p1/Z, [x11, #2, MUL VL]\n" + "ld1b { z17.b }, p1/Z, [x11, #3, MUL VL]\n" + "sub x20, x11, #0x10\n" + "sub x23, x28, #0x8\n" + "lsl z31.b, z30.b, #0x4\n" + "lsl z6.b, z21.b, #0x4\n" + "ld1h { z23.s }, p1/Z, [x20]\n" + "sub x22, x26, #0x8\n" + "and z30.b, z30.b, #0xf0\n" + "and z21.b, z21.b, #0xf0\n" + "sub x21, x25, #0x8\n" + "sub x20, x24, #0x8\n" + "lsl z14.b, z4.b, #0x4\n" + "lsl z2.b, z17.b, #0x4\n" + "subs x27, x27, #0x1\n" + "add x11, x11, #0x90\n" + ".inst 0x451f9872 // smmla z18.s, z3.b, z31.b\n" + ".inst 0x45069867 // smmla z7.s, z3.b, z6.b\n" + "ld1rqb { z3.b }, p1/Z, [x28, #32]\n" + "and z4.b, z4.b, #0xf0\n" + ".inst 0x451f98a9 // smmla z9.s, z5.b, z31.b\n" + ".inst 0x450698b6 // smmla z22.s, z5.b, z6.b\n" + "ld1rqb { z5.b }, p1/Z, [x28, #48]\n" + "and z17.b, z17.b, #0xf0\n" + "fcvt z23.s, p1/m, z23.h\n" + ".inst 0x450e9872 // smmla z18.s, z3.b, z14.b\n" + ".inst 0x45029867 // smmla z7.s, z3.b, z2.b\n" + "ld1rqb { z3.b }, p1/Z, [x28, #64]\n" + ".inst 0x450e98a9 // smmla z9.s, z5.b, z14.b\n" + ".inst 0x450298b6 // smmla z22.s, z5.b, z2.b\n" + "ld1rqb { z5.b }, p1/Z, [x28, #80]\n" + "fscale z23.s, p1/m, z23.s, z28.s\n" + ".inst 0x451e9872 // smmla z18.s, z3.b, z30.b\n" + ".inst 0x45159867 // smmla z7.s, z3.b, z21.b\n" + "ld1rqb { z3.b }, p1/Z, [x28, #96]\n" + ".inst 0x451e98a9 // smmla z9.s, z5.b, z30.b\n" + ".inst 0x451598b6 // smmla z22.s, z5.b, z21.b\n" + "ld1rqb { z5.b }, p1/Z, [x28, #112]\n" + "add x28, x28, #0x88\n" + ".inst 0x45049872 // smmla z18.s, z3.b, z4.b\n" + ".inst 0x45119867 // smmla z7.s, z3.b, z17.b\n" + "ld1h { z3.s }, p0/Z, [x23]\n" + ".inst 0x450498a9 // smmla z9.s, z5.b, z4.b\n" + ".inst 0x451198b6 // smmla z22.s, z5.b, z17.b\n" + "fcvt z3.s, p1/m, z3.h\n" + "uzp1 z5.d, z18.d, z7.d\n" + "uzp2 z18.d, z18.d, z7.d\n" + "mov z3.q, z3.q[0]\n" + "uzp1 z7.d, z9.d, z22.d\n" + "uzp2 z22.d, z9.d, z22.d\n" + "fmul z9.s, z23.s, z3.s[0]\n" + "scvtf z5.s, p1/m, z5.s\n" + "scvtf z18.s, p1/m, z18.s\n" + "scvtf z7.s, p1/m, z7.s\n" + "scvtf z22.s, p1/m, z22.s\n" + "fmla z24.s, p1/M, z5.s, z9.s\n" + "ld1rqb { z5.b }, p1/Z, [x26]\n" + "fmul z9.s, z23.s, z3.s[1]\n" + "fmla z15.s, p1/M, z18.s, z9.s\n" + "ld1rqb { z18.b }, p1/Z, [x26, #16]\n" + "fmul z9.s, z23.s, z3.s[2]\n" + "fmul z3.s, z23.s, z3.s[3]\n" + "fmla z12.s, p1/M, z7.s, z9.s\n" + "mov z9.s, #0x0\n" + "ld1h { z7.s }, p0/Z, [x22]\n" + ".inst 0x451f98a9 // smmla z9.s, z5.b, z31.b\n" + "fmla z0.s, p1/M, z22.s, z3.s\n" + "mov z22.s, #0x0\n" + "ld1h { z3.s }, p0/Z, [x21]\n" + ".inst 0x450698b6 // smmla z22.s, z5.b, z6.b\n" + "ld1rqb { z5.b }, p1/Z, [x26, #32]\n" + "fcvt z7.s, p1/m, z7.h\n" + "fcvt z3.s, p1/m, z3.h\n" + ".inst 0x450e98a9 // smmla z9.s, z5.b, z14.b\n" + ".inst 0x450298b6 // smmla z22.s, z5.b, z2.b\n" + "ld1rqb { z5.b }, p1/Z, [x26, #64]\n" + "mov z7.q, z7.q[0]\n" + "mov z3.q, z3.q[0]\n" + ".inst 0x451e98a9 // smmla z9.s, z5.b, z30.b\n" + ".inst 0x451598b6 // smmla z22.s, z5.b, z21.b\n" + "ld1rqb { z5.b }, p1/Z, [x26, #96]\n" + ".inst 0x450498a9 // smmla z9.s, z5.b, z4.b\n" + ".inst 0x451198b6 // smmla z22.s, z5.b, z17.b\n" + "uzp1 z5.d, z9.d, z22.d\n" + "scvtf z5.s, p1/m, z5.s\n" + "uzp2 z22.d, z9.d, z22.d\n" + "fmul z9.s, z23.s, z7.s[0]\n" + "scvtf z22.s, p1/m, z22.s\n" + "fmla z13.s, p1/M, z5.s, z9.s\n" + "ld1rqb { z9.b }, p1/Z, [x25]\n" + "fmul z5.s, z23.s, z7.s[1]\n" + "fmla z1.s, p1/M, z22.s, z5.s\n" + "mov z5.s, #0x0\n" + "mov z22.s, #0x0\n" + ".inst 0x451f9a45 // smmla z5.s, z18.b, z31.b\n" + ".inst 0x45069a56 // smmla z22.s, z18.b, z6.b\n" + "ld1rqb { z18.b }, p1/Z, [x26, #48]\n" + ".inst 0x450e9a45 // smmla z5.s, z18.b, z14.b\n" + ".inst 0x45029a56 // smmla z22.s, z18.b, z2.b\n" + "ld1rqb { z18.b }, p1/Z, [x26, #80]\n" + ".inst 0x451e9a45 // smmla z5.s, z18.b, z30.b\n" + ".inst 0x45159a56 // smmla z22.s, z18.b, z21.b\n" + "ld1rqb { z18.b }, p1/Z, [x26, #112]\n" + "add x26, x26, #0x88\n" + ".inst 0x45049a45 // smmla z5.s, z18.b, z4.b\n" + ".inst 0x45119a56 // smmla z22.s, z18.b, z17.b\n" + "uzp1 z18.d, z5.d, z22.d\n" + "scvtf z18.s, p1/m, z18.s\n" + "uzp2 z22.d, z5.d, z22.d\n" + "fmul z5.s, z23.s, z7.s[2]\n" + "fmul z7.s, z23.s, z7.s[3]\n" + "scvtf z22.s, p1/m, z22.s\n" + "fmla z20.s, p1/M, z18.s, z5.s\n" + "ld1rqb { z18.b }, p1/Z, [x25, #16]\n" + "ld1h { z5.s }, p0/Z, [x20]\n" + "fcvt z5.s, p1/m, z5.h\n" + "fmla z25.s, p1/M, z22.s, z7.s\n" + "mov z22.s, #0x0\n" + "mov z7.s, #0x0\n" + ".inst 0x451f9936 // smmla z22.s, z9.b, z31.b\n" + ".inst 0x45069927 // smmla z7.s, z9.b, z6.b\n" + "ld1rqb { z9.b }, p1/Z, [x25, #32]\n" + "mov z5.q, z5.q[0]\n" + ".inst 0x450e9936 // smmla z22.s, z9.b, z14.b\n" + ".inst 0x45029927 // smmla z7.s, z9.b, z2.b\n" + "ld1rqb { z9.b }, p1/Z, [x25, #64]\n" + ".inst 0x451e9936 // smmla z22.s, z9.b, z30.b\n" + ".inst 0x45159927 // smmla z7.s, z9.b, z21.b\n" + "ld1rqb { z9.b }, p1/Z, [x25, #96]\n" + ".inst 0x45049936 // smmla z22.s, z9.b, z4.b\n" + ".inst 0x45119927 // smmla z7.s, z9.b, z17.b\n" + "uzp1 z9.d, z22.d, z7.d\n" + "scvtf z9.s, p1/m, z9.s\n" + "uzp2 z22.d, z22.d, z7.d\n" + "fmul z7.s, z23.s, z3.s[0]\n" + "scvtf z22.s, p1/m, z22.s\n" + "fmla z11.s, p1/M, z9.s, z7.s\n" + "ld1rqb { z9.b }, p1/Z, [x24]\n" + "fmul z7.s, z23.s, z3.s[1]\n" + "fmla z16.s, p1/M, z22.s, z7.s\n" + "mov z22.s, #0x0\n" + "mov z7.s, #0x0\n" + ".inst 0x451f9a56 // smmla z22.s, z18.b, z31.b\n" + ".inst 0x45069a47 // smmla z7.s, z18.b, z6.b\n" + "ld1rqb { z18.b }, p1/Z, [x25, #48]\n" + ".inst 0x450e9a56 // smmla z22.s, z18.b, z14.b\n" + ".inst 0x45029a47 // smmla z7.s, z18.b, z2.b\n" + "ld1rqb { z18.b }, p1/Z, [x25, #80]\n" + ".inst 0x451e9a56 // smmla z22.s, z18.b, z30.b\n" + ".inst 0x45159a47 // smmla z7.s, z18.b, z21.b\n" + "ld1rqb { z18.b }, p1/Z, [x25, #112]\n" + "add x25, x25, #0x88\n" + ".inst 0x45049a56 // smmla z22.s, z18.b, z4.b\n" + ".inst 0x45119a47 // smmla z7.s, z18.b, z17.b\n" + "uzp1 z18.d, z22.d, z7.d\n" + "scvtf z18.s, p1/m, z18.s\n" + "uzp2 z7.d, z22.d, z7.d\n" + "fmul z22.s, z23.s, z3.s[2]\n" + "fmul z3.s, z23.s, z3.s[3]\n" + "scvtf z7.s, p1/m, z7.s\n" + "fmla z19.s, p1/M, z18.s, z22.s\n" + "ld1rqb { z18.b }, p1/Z, [x24, #16]\n" + "fmul z22.s, z23.s, z5.s[0]\n" + "fmla z26.s, p1/M, z7.s, z3.s\n" + "mov z3.s, #0x0\n" + "mov z7.s, #0x0\n" + ".inst 0x451f9923 // smmla z3.s, z9.b, z31.b\n" + ".inst 0x45069927 // smmla z7.s, z9.b, z6.b\n" + "ld1rqb { z9.b }, p1/Z, [x24, #32]\n" + ".inst 0x450e9923 // smmla z3.s, z9.b, z14.b\n" + ".inst 0x45029927 // smmla z7.s, z9.b, z2.b\n" + "mov z9.s, #0x0\n" + ".inst 0x451f9a49 // smmla z9.s, z18.b, z31.b\n" + "mov z31.s, #0x0\n" + ".inst 0x45069a5f // smmla z31.s, z18.b, z6.b\n" + "ld1rqb { z6.b }, p1/Z, [x24, #48]\n" + "ld1rqb { z18.b }, p1/Z, [x24, #64]\n" + ".inst 0x450e98c9 // smmla z9.s, z6.b, z14.b\n" + "fmul z14.s, z23.s, z5.s[1]\n" + ".inst 0x450298df // smmla z31.s, z6.b, z2.b\n" + "ld1rqb { z6.b }, p1/Z, [x24, #80]\n" + "fmul z2.s, z23.s, z5.s[2]\n" + "fmul z23.s, z23.s, z5.s[3]\n" + ".inst 0x451e9a43 // smmla z3.s, z18.b, z30.b\n" + ".inst 0x45159a47 // smmla z7.s, z18.b, z21.b\n" + "ld1rqb { z5.b }, p1/Z, [x24, #96]\n" + ".inst 0x451e98c9 // smmla z9.s, z6.b, z30.b\n" + ".inst 0x451598df // smmla z31.s, z6.b, z21.b\n" + "ld1rqb { z18.b }, p1/Z, [x24, #112]\n" + "add x24, x24, #0x88\n" + ".inst 0x450498a3 // smmla z3.s, z5.b, z4.b\n" + ".inst 0x451198a7 // smmla z7.s, z5.b, z17.b\n" + ".inst 0x45049a49 // smmla z9.s, z18.b, z4.b\n" + ".inst 0x45119a5f // smmla z31.s, z18.b, z17.b\n" + "uzp1 z18.d, z3.d, z7.d\n" + "uzp2 z5.d, z3.d, z7.d\n" + "scvtf z18.s, p1/m, z18.s\n" + "uzp1 z6.d, z9.d, z31.d\n" + "uzp2 z9.d, z9.d, z31.d\n" + "scvtf z5.s, p1/m, z5.s\n" + "fmla z8.s, p1/M, z18.s, z22.s\n" + "scvtf z6.s, p1/m, z6.s\n" + "scvtf z9.s, p1/m, z9.s\n" + "fmla z29.s, p1/M, z5.s, z14.s\n" + "fmla z27.s, p1/M, z6.s, z2.s\n" + "fmla z10.s, p1/M, z9.s, z23.s\n" + "bgt 3b\n" + "mov x20, %x[res_ptr]\n" + "subs x10, x10, #0x8\n" + "add %x[res_ptr], %x[res_ptr], #0x20\n" + "st1w { z24.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z15.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z12.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z0.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z13.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z1.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z20.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z25.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z11.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z16.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z19.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z26.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z8.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z29.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z27.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z10.s }, p1, [x20]\n" + "bne 2b\n" + "mov x20, #0x4\n" + "sub x13, x13, #0x10\n" + "cmp x13, #0x10\n" + "mov %x[res_ptr], x9\n" + "madd %x[a_ptr], x20, x12, %x[a_ptr]\n" + "bge 1b\n" + "4:" // Row loop skip + "cbz x13, 9f\n" + "5:" // Row tail: Row loop + "add x25, %x[b_ptr], #0x10\n" + "mov x24, %x[nc]\n" + "add x23, %x[res_ptr], %x[res_stride], LSL #2\n" + "6:" // Row tail: Column loop + "mov z24.b, #0x0\n" + "mov z15.b, #0x0\n" + "add x28, %x[a_ptr], #0x8\n" + "mov x22, %x[nb]\n" + "mov z12.b, #0x0\n" + "mov z0.b, #0x0\n" + "7:" // Row tail: Block loop + "ld1b { z3.b }, p1/Z, [x25]\n" + "ld1b { z6.b }, p1/Z, [x25, #1, MUL VL]\n" + "mov z2.s, #0x0\n" + "mov z25.s, #0x0\n" + "ld1rqb { z26.b }, p1/Z, [x28]\n" + "ld1rqb { z21.b }, p1/Z, [x28, #16]\n" + "mov z27.s, #0x0\n" + "mov z19.s, #0x0\n" + "ld1b { z29.b }, p1/Z, [x25, #2, MUL VL]\n" + "ld1b { z16.b }, p1/Z, [x25, #3, MUL VL]\n" + "sub x21, x25, #0x10\n" + "sub x20, x28, #0x8\n" + "lsl z20.b, z3.b, #0x4\n" + "lsl z4.b, z6.b, #0x4\n" + "ld1rqb { z10.b }, p1/Z, [x28, #32]\n" + "ld1rqb { z23.b }, p1/Z, [x28, #48]\n" + "and z3.b, z3.b, #0xf0\n" + "and z6.b, z6.b, #0xf0\n" + "ld1rqb { z11.b }, p1/Z, [x28, #64]\n" + "ld1rqb { z7.b }, p1/Z, [x28, #80]\n" + "lsl z8.b, z29.b, #0x4\n" + "lsl z14.b, z16.b, #0x4\n" + "ld1rqb { z18.b }, p1/Z, [x28, #96]\n" + "ld1rqb { z30.b }, p1/Z, [x28, #112]\n" + ".inst 0x45149b42 // smmla z2.s, z26.b, z20.b\n" + ".inst 0x45049b59 // smmla z25.s, z26.b, z4.b\n" + "and z29.b, z29.b, #0xf0\n" + "ld1h { z17.s }, p1/Z, [x21]\n" + ".inst 0x45149abb // smmla z27.s, z21.b, z20.b\n" + ".inst 0x45049ab3 // smmla z19.s, z21.b, z4.b\n" + "and z16.b, z16.b, #0xf0\n" + "ld1h { z4.s }, p0/Z, [x20]\n" + "subs x22, x22, #0x1\n" + "add x28, x28, #0x88\n" + "fcvt z17.s, p1/m, z17.h\n" + "add x25, x25, #0x90\n" + ".inst 0x45089942 // smmla z2.s, z10.b, z8.b\n" + ".inst 0x450e9959 // smmla z25.s, z10.b, z14.b\n" + "fcvt z4.s, p1/m, z4.h\n" + ".inst 0x45089afb // smmla z27.s, z23.b, z8.b\n" + ".inst 0x450e9af3 // smmla z19.s, z23.b, z14.b\n" + "fscale z17.s, p1/m, z17.s, z28.s\n" + "mov z4.q, z4.q[0]\n" + ".inst 0x45039962 // smmla z2.s, z11.b, z3.b\n" + ".inst 0x45069979 // smmla z25.s, z11.b, z6.b\n" + "fmul z23.s, z17.s, z4.s[0]\n" + "fmul z9.s, z17.s, z4.s[1]\n" + "fmul z21.s, z17.s, z4.s[2]\n" + "fmul z4.s, z17.s, z4.s[3]\n" + ".inst 0x450398fb // smmla z27.s, z7.b, z3.b\n" + ".inst 0x450698f3 // smmla z19.s, z7.b, z6.b\n" + ".inst 0x451d9a42 // smmla z2.s, z18.b, z29.b\n" + ".inst 0x45109a59 // smmla z25.s, z18.b, z16.b\n" + ".inst 0x451d9bdb // smmla z27.s, z30.b, z29.b\n" + ".inst 0x45109bd3 // smmla z19.s, z30.b, z16.b\n" + "uzp1 z31.d, z2.d, z25.d\n" + "uzp2 z13.d, z2.d, z25.d\n" + "scvtf z31.s, p1/m, z31.s\n" + "uzp1 z17.d, z27.d, z19.d\n" + "uzp2 z18.d, z27.d, z19.d\n" + "scvtf z13.s, p1/m, z13.s\n" + "fmla z24.s, p1/M, z31.s, z23.s\n" + "scvtf z17.s, p1/m, z17.s\n" + "scvtf z18.s, p1/m, z18.s\n" + "fmla z15.s, p1/M, z13.s, z9.s\n" + "fmla z12.s, p1/M, z17.s, z21.s\n" + "fmla z0.s, p1/M, z18.s, z4.s\n" + "bgt 7b\n" + "mov x20, %x[res_ptr]\n" + "cmp x13, #0x1\n" + "st1w { z24.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "cmp x13, #0x2\n" + "st1w { z15.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "cmp x13, #0x3\n" + "st1w { z12.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "st1w { z0.s }, p1, [x20]\n" + "8:" // Row tail: Accumulator store skip + "subs x24, x24, #0x8\n" + "add %x[res_ptr], %x[res_ptr], #0x20\n" + "bne 6b\n" + "subs x13, x13, #0x4\n" + "add %x[a_ptr], %x[a_ptr], x12\n" + "mov %x[res_ptr], x23\n" + "bgt 5b\n" + "9:" // Row tail: Row loop skip + : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) + : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) + : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" + ); + return; + } +#endif // #if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8) + +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) + float sumf[4][8]; + int sumi; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; + } + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; + } + sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + } + } + } + } + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) + s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; + } + } + } +} + +void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 4; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) + if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { + const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl); + + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); + + float32x4_t sumf[4]; + for (int m = 0; m < 4; m++) { + sumf[m] = vdupq_n_f32(0); + } + + for (int l = 0; l < nb; l++) { + float32x4_t a_d = vcvt_f32_f16(vld1_f16((const float16_t *)a_ptr[l].d)); + float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *)b_ptr[l].d)); + + int32x4_t sumi_0 = vdupq_n_s32(0); + int32x4_t sumi_1 = vdupq_n_s32(0); + int32x4_t sumi_2 = vdupq_n_s32(0); + int32x4_t sumi_3 = vdupq_n_s32(0); + + for (int k = 0; k < 4; k++) { + int8x16_t a_0 = vld1q_s8(a_ptr[l].qs + 16 * k + 0); + int8x16_t a_1 = vld1q_s8(a_ptr[l].qs + 16 * k + 64); + + uint8x16_t b = vld1q_u8(b_ptr[l].qs + 16 * k); + int8x16_t b_hi = vqtbl1q_s8(kvalues, b >> 4); + int8x16_t b_lo = vqtbl1q_s8(kvalues, b & 0xF); + + sumi_0 = vdotq_laneq_s32(sumi_0, b_lo, a_0, 0); + sumi_1 = vdotq_laneq_s32(sumi_1, b_lo, a_0, 1); + sumi_2 = vdotq_laneq_s32(sumi_2, b_lo, a_0, 2); + sumi_3 = vdotq_laneq_s32(sumi_3, b_lo, a_0, 3); + sumi_0 = vdotq_laneq_s32(sumi_0, b_hi, a_1, 0); + sumi_1 = vdotq_laneq_s32(sumi_1, b_hi, a_1, 1); + sumi_2 = vdotq_laneq_s32(sumi_2, b_hi, a_1, 2); + sumi_3 = vdotq_laneq_s32(sumi_3, b_hi, a_1, 3); + } + + sumf[0] = vmlaq_f32(sumf[0], vmulq_laneq_f32(b_d, a_d, 0), vcvtq_f32_s32(sumi_0)); + sumf[1] = vmlaq_f32(sumf[1], vmulq_laneq_f32(b_d, a_d, 1), vcvtq_f32_s32(sumi_1)); + sumf[2] = vmlaq_f32(sumf[2], vmulq_laneq_f32(b_d, a_d, 2), vcvtq_f32_s32(sumi_2)); + sumf[3] = vmlaq_f32(sumf[3], vmulq_laneq_f32(b_d, a_d, 3), vcvtq_f32_s32(sumi_3)); + } + + for (int m = 0; m < 4; m++) { + vst1q_f32(s + (y * 4 + m) * bs + x * 4, sumf[m]); + } + } + } + return; + } +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) + { + float sumf[4][4]; + int sumi; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; + } + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0x0F]; + const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4]; + sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])); + } + sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + } + } + } + } + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) + s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; + } + } + } + } +} diff --git a/ggml/src/ggml-cpu/arch/loongarch/quants.c b/ggml/src/ggml-cpu/arch/loongarch/quants.c new file mode 100644 index 0000000000000..f2ea965724a3d --- /dev/null +++ b/ggml/src/ggml-cpu/arch/loongarch/quants.c @@ -0,0 +1,2638 @@ +#define GGML_COMMON_IMPL_C +#include "ggml-common.h" +#include "ggml-quants.h" +#include "ggml-impl.h" +#include "ggml-cpu.h" + +#include "../../quants.h" +#include "../../ggml-cpu-impl.h" + +#include +#include +#include +#include +#include // for qsort +#include // for GGML_ASSERT + +#define GROUP_MAX_EPS 1e-15f +#define GROUP_MAX_EPS_IQ3_XXS 1e-8f +#define GROUP_MAX_EPS_IQ2_S 1e-8f +#define GROUP_MAX_EPS_IQ1_M 1e-7f +#define GROUP_MAX_EPS_IQ1_S 1e-12f + +#define UNUSED GGML_UNUSED + +#if defined(__loongarch_sx) + +static __m128i lsx_packs_w(__m128i a, __m128i b) { + __m128i tmp, tmp1; + tmp = __lsx_vsat_w(a, 15); + tmp1 = __lsx_vsat_w(b, 15); + return __lsx_vpickev_h(tmp1, tmp); +} + +static __m128i lsx_packs_h(__m128i a, __m128i b) { + __m128i tmp, tmp1; + tmp = __lsx_vsat_h(a, 7); + tmp1 = __lsx_vsat_h(b, 7); + return __lsx_vpickev_b(tmp1, tmp); +} + +static __m128i lsx_packus_h(__m128i a, __m128i b) { + __m128i tmp, tmp1; + tmp = __lsx_vsat_hu(a, 7); + tmp1 = __lsx_vsat_hu(b, 7); + return __lsx_vpickev_b(tmp1, tmp); +} + +static __m128i lsx_maddubs_h(__m128i a, __m128i b) { + __m128i tmp1, tmp2; + tmp1 = __lsx_vmulwev_h_b(a, b); + tmp2 = __lsx_vmulwod_h_b(a, b); + return __lsx_vsadd_h(tmp1, tmp2); +} + +static __m128i lsx_madd_h(__m128i a, __m128i b) { + __m128i tmp1, tmp2; + tmp1 = __lsx_vmulwev_w_h(a, b); + tmp2 = __lsx_vmulwod_w_h(a, b); + return __lsx_vadd_w(tmp1, tmp2); +} + +static __m128i lsx_set_w(int32_t a, int32_t b, int32_t c, int32_t d) { + v4i32 __ret = {d, c, b, a}; + return (__m128i)__ret; +} + +static __m128i lsx_shuffle_b(__m128i a, __m128i b) { + __m128i mask_f, zero, tmp0, tmp2, mask; + int f = 0x8f; + mask_f = __lsx_vreplgr2vr_b(f); + zero = __lsx_vldi(0); + tmp0 = __lsx_vand_v(b, mask_f); // get mask with low 4 bit and sign bits + tmp0 = __lsx_vori_b(tmp0, 0x10); // make each mask or with 0x10 prepare for positive + mask = __lsx_vsle_b(zero, tmp0); // if mask >= 0, set mask + tmp2 = __lsx_vand_v(tmp0, mask); // maskout the in2 < ones + return __lsx_vshuf_b(a, zero, tmp2); +} + +static __m128i lsx_hadd_h(__m128i a, __m128i b) { + __m128i tmp1 = __lsx_vpickev_h(b, a); + __m128i tmp2 = __lsx_vpickod_h(b, a); + return __lsx_vadd_h(tmp1, tmp2); +} + +static __m128i lsx_hadd_w(__m128i a, __m128i b) { + __m128i tmp1 = __lsx_vpickev_w(b, a); + __m128i tmp2 = __lsx_vpickod_w(b, a); + return __lsx_vadd_w(tmp1, tmp2); +} + +static __m128 lsx_hadd_s(__m128 a, __m128 b) { + __m128 tmp1 = (__m128)__lsx_vpickev_w((__m128i)b, (__m128i)a); + __m128 tmp2 = (__m128)__lsx_vpickod_w((__m128i)b, (__m128i)a); + + return __lsx_vfadd_s(tmp1, tmp2); +} + +static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) { + __m128 res_0 =lsx_hadd_s(a, b); + __m128 res_1 =lsx_hadd_s(c, d); + __m128 res =lsx_hadd_s(res_0, res_1); + res =lsx_hadd_s(res, res); + res =lsx_hadd_s(res, res); + + return ((v4f32)res)[0]; +} +#endif + +#if defined(__loongarch_asx) + +#ifdef __clang__ +#define VREGS_PREFIX "$vr" +#define XREGS_PREFIX "$xr" +#else // GCC +#define VREGS_PREFIX "$f" +#define XREGS_PREFIX "$f" +#endif +#define __ALL_REGS "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31" +// Convert __m128i to __m256i +static inline __m256i ____m256i(__m128i in) { + __m256i out = __lasx_xvldi(0); + __asm__ volatile ( + ".irp i," __ALL_REGS "\n\t" + " .ifc %[out], " XREGS_PREFIX"\\i \n\t" + " .irp j," __ALL_REGS "\n\t" + " .ifc %[in], " VREGS_PREFIX "\\j \n\t" + " xvpermi.q $xr\\i, $xr\\j, 0x20 \n\t" + " .endif \n\t" + " .endr \n\t" + " .endif \n\t" + ".endr \n\t" + : [out] "+f" (out) : [in] "f" (in) + ); + return out; +} +// Convert two __m128i to __m256i +static inline __m256i lasx_set_q(__m128i inhi, __m128i inlo) { + __m256i out; + __asm__ volatile ( + ".irp i," __ALL_REGS "\n\t" + " .ifc %[hi], " VREGS_PREFIX "\\i \n\t" + " .irp j," __ALL_REGS "\n\t" + " .ifc %[lo], " VREGS_PREFIX "\\j \n\t" + " xvpermi.q $xr\\i, $xr\\j, 0x20 \n\t" + " .endif \n\t" + " .endr \n\t" + " .endif \n\t" + ".endr \n\t" + ".ifnc %[out], %[hi] \n\t" + ".irp i," __ALL_REGS "\n\t" + " .ifc %[out], " XREGS_PREFIX "\\i \n\t" + " .irp j," __ALL_REGS "\n\t" + " .ifc %[hi], " VREGS_PREFIX "\\j \n\t" + " xvori.b $xr\\i, $xr\\j, 0 \n\t" + " .endif \n\t" + " .endr \n\t" + " .endif \n\t" + ".endr \n\t" + ".endif \n\t" + : [out] "=f" (out), [hi] "+f" (inhi) + : [lo] "f" (inlo) + ); + return out; +} +// Convert __m256i low part to __m128i +static inline __m128i lasx_extracti128_lo(__m256i in) { + __m128i out; + __asm__ volatile ( + ".ifnc %[out], %[in] \n\t" + ".irp i," __ALL_REGS "\n\t" + " .ifc %[out], " VREGS_PREFIX "\\i \n\t" + " .irp j," __ALL_REGS "\n\t" + " .ifc %[in], " XREGS_PREFIX "\\j \n\t" + " vori.b $vr\\i, $vr\\j, 0 \n\t" + " .endif \n\t" + " .endr \n\t" + " .endif \n\t" + ".endr \n\t" + ".endif \n\t" + : [out] "=f" (out) : [in] "f" (in) + ); + return out; +} +// Convert __m256i high part to __m128i +static inline __m128i lasx_extracti128_hi(__m256i in) { + __m128i out; + __asm__ volatile ( + ".irp i," __ALL_REGS "\n\t" + " .ifc %[out], " VREGS_PREFIX "\\i \n\t" + " .irp j," __ALL_REGS "\n\t" + " .ifc %[in], " XREGS_PREFIX "\\j \n\t" + " xvpermi.q $xr\\i, $xr\\j, 0x11 \n\t" + " .endif \n\t" + " .endr \n\t" + " .endif \n\t" + ".endr \n\t" + : [out] "=f" (out) : [in] "f" (in) + ); + return out; +} + +static __m256i lasx_set_w(int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0) { + v8i32 __ret = {e0, e1, e2, e3, e4, e5, e6, e7}; + return (__m256i)__ret; +} + +static __m256i lasx_set_d(int64_t a, int64_t b, int64_t c, int64_t d) { + v4i64 __ret = {d, c, b, a}; + return (__m256i)__ret; +} + +static __m256i lasx_insertf128( __m128i x, __m128i y) { + return lasx_set_q(x, y); +} + +static __m256i lasx_shuffle_b(__m256i a, __m256i b) { + __m256i mask_f, zero, tmp0, tmp2, mask; + int f = 0x8f; + mask_f = __lasx_xvreplgr2vr_b(f); + zero = __lasx_xvldi(0); + tmp0 = __lasx_xvand_v(b, mask_f); // get mask with low 4 bit and sign bits + tmp0 = __lasx_xvori_b(tmp0, 0x10); // make each mask or with 0x10 prepare for positive + mask = __lasx_xvsle_b(zero, tmp0); // if mask >= 0, set mask + tmp2 = __lasx_xvand_v(tmp0, mask); // maskout the in2 < ones + return __lasx_xvshuf_b(a, zero, tmp2); +} + +static __m256i lasx_extu8_16(__m128i a) { + return __lasx_vext2xv_hu_bu(____m256i(a)); +} + +static __m256i lasx_ext8_16(__m128i a) { + return __lasx_vext2xv_h_b(____m256i(a)); +} + +static __m256i lasx_ext16_32(__m128i a) { + return __lasx_vext2xv_w_h(____m256i(a)); +} + +static __m128i lasx_extracti128( __m256i a, int pos) { + __m128i ret; + if( pos == 0) + { + ret = lasx_extracti128_lo(a); + } else { + ret = lasx_extracti128_hi(a); + } + return ret; +} + +static __m128 lasx_extractf128( __m256 a, int pos) { + __m128 ret; + if( pos == 0) + { + ret = (__m128)lasx_extracti128_lo((__m256i)a); + } else { + ret = (__m128)lasx_extracti128_hi((__m256i)a); + } + return ret; +} + +static __m256i lasx_maddubs_h(__m256i a, __m256i b) { + __m256i tmp1, tmp2; + tmp1 = __lasx_xvmulwev_h_b(a, b); + tmp2 = __lasx_xvmulwod_h_b(a, b); + return __lasx_xvsadd_h(tmp1, tmp2); +} + +static __m256i lasx_madd_h(__m256i a, __m256i b) { + __m256i tmp1, tmp2; + tmp1 = __lasx_xvmulwev_w_h(a, b); + tmp2 = __lasx_xvmulwod_w_h(a, b); + return __lasx_xvadd_w(tmp1, tmp2); +} + +static __m256i lasx_packs_w(__m256i a, __m256i b) { + __m256i tmp, tmp1; + tmp = __lasx_xvsat_w(a, 15); + tmp1 = __lasx_xvsat_w(b, 15); + return __lasx_xvpickev_h(tmp1, tmp); +} + +static __m256i lasx_packs_h(__m256i a, __m256i b) { + __m256i tmp, tmp1; + tmp = __lasx_xvsat_h(a, 7); + tmp1 = __lasx_xvsat_h(b, 7); + return __lasx_xvpickev_b(tmp1, tmp); +} + +static inline __m256i lasx_madd_h_b(__m256i a, __m256i b) { + __m256i tmp1, tmp2; + tmp1 = __lasx_xvmulwev_h_b(a, b); + tmp2 = __lasx_xvmulwod_h_b(a, b); + return __lasx_xvadd_h(tmp1, tmp2); +} + +static inline __m256i lasx_xvrepl128vei_h(__m256i a, const unsigned int b) { + switch (b) { + case 0: return __lasx_xvrepl128vei_h(a, 0); + case 1: return __lasx_xvrepl128vei_h(a, 1); + case 2: return __lasx_xvrepl128vei_h(a, 2); + case 3: return __lasx_xvrepl128vei_h(a, 3); + case 4: return __lasx_xvrepl128vei_h(a, 4); + case 5: return __lasx_xvrepl128vei_h(a, 5); + case 6: return __lasx_xvrepl128vei_h(a, 6); + case 7: return __lasx_xvrepl128vei_h(a, 7); + default: __builtin_unreachable(); + } +} + +static inline __m256i lasx_xvandi_b_bit(__m256i a, const unsigned int b) { + switch (b) { + case 0: return __lasx_xvandi_b(a, 1 << 0); + case 1: return __lasx_xvandi_b(a, 1 << 1); + case 2: return __lasx_xvandi_b(a, 1 << 2); + case 3: return __lasx_xvandi_b(a, 1 << 3); + case 4: return __lasx_xvandi_b(a, 1 << 4); + case 5: return __lasx_xvandi_b(a, 1 << 5); + case 6: return __lasx_xvandi_b(a, 1 << 6); + case 7: return __lasx_xvandi_b(a, 1 << 7); + default: __builtin_unreachable(); + } +} + +// multiply int8_t, add results pairwise twice +static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) { + // Get absolute values of x vectors + const __m128i ax = __lsx_vsigncov_b(x, x); + // Sign the values of the y vectors + const __m128i sy = __lsx_vsigncov_b(x, y); + // Perform multiplication and create 16-bit values + const __m128i dot = lsx_maddubs_h(ax, sy); + const __m128i ones = __lsx_vreplgr2vr_h(1); + return lsx_madd_h(ones, dot); +} + +// horizontally add 8 floats +static inline float hsum_float_8(const __m256 x) { + __m128 res = lasx_extractf128(x, 1); + res = __lsx_vfadd_s(res, lasx_extractf128(x, 0)); + res = __lsx_vfadd_s(res, (__m128)__lsx_vpickod_d((__m128i)res, (__m128i)res)); + res = __lsx_vfadd_s(res, (__m128)__lsx_vinsgr2vr_w(__lsx_vldi(0), __lsx_vpickve2gr_w(res, 1), 0)); + return ((v4f32)res)[0]; +} + +// horizontally add 8 int32_t +static inline int hsum_i32_8(const __m256i a) { + + __m256i tmp1 = __lasx_xvpermi_q(a, a, 0x11); + __m256i tmp2 = __lasx_xvpermi_q(a, a, 0x00); + + __m128i tmp1_128 = lasx_extracti128_lo(tmp1); + __m128i tmp2_128 = lasx_extracti128_lo(tmp2); + + __m128i sum128 = __lsx_vadd_w(tmp1_128, tmp2_128); + + __m128i ev = __lsx_vpickev_w(sum128, sum128); + __m128i od = __lsx_vpickod_w(sum128, sum128); + __m128i sum64 = __lsx_vadd_w(ev, od); + + int sum64_1, sum64_2; + sum64_1 = __lsx_vpickve2gr_w(sum64, 0); + sum64_2 = __lsx_vpickve2gr_w(sum64, 1); + + return sum64_1 + sum64_2; +} + +// horizontally add 4 int32_t +static inline int hsum_i32_4(const __m128i a) { + __m128i ev = __lsx_vpickev_w(a, a); + __m128i od = __lsx_vpickod_w(a, a); + __m128i sum64 = __lsx_vadd_w(ev, od); + + int sum64_1, sum64_2; + sum64_1 = __lsx_vpickve2gr_w(sum64, 0); + sum64_2 = __lsx_vpickve2gr_w(sum64, 1); + + return sum64_1 + sum64_2; +} + +// spread 32 bits to 32 bytes { 0x00, 0xFF } +static inline __m256i bytes_from_bits_32(const uint8_t * x) { + + uint32_t x32; + memcpy(&x32, x, sizeof(uint32_t)); + const __m256i shuf_mask = lasx_set_d( + 0x0303030303030303, 0x0202020202020202, + 0x0101010101010101, 0x0000000000000000); + + __m256i bytes = lasx_shuffle_b(__lasx_xvreplgr2vr_w(x32), shuf_mask); + const __m256i bit_mask = __lasx_xvreplgr2vr_d(0x7fbfdfeff7fbfdfe); + bytes = __lasx_xvor_v(bytes, bit_mask); + return __lasx_xvseq_b(bytes, __lasx_xvreplgr2vr_d(-1)); +} + +// Unpack 32 4-bit fields into 32 bytes +// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval +static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) { + const __m128i lo = __lsx_vld((const __m128i *)rsi, 0); + __m128i hi = __lsx_vsrli_h(lo, 4); + return __lasx_xvandi_b(lasx_insertf128(hi, lo), 0xf); +} + +// add int16_t pairwise and return as float vector +static inline __m256 sum_i16_pairs_float(const __m256i x) { + __m256i v = __lasx_xvpackod_h(x, x); + __m256i summed_pairs = __lasx_xvaddwev_w_h(x, v); + return __lasx_xvffint_s_w(summed_pairs); +} + +static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { + // Perform multiplication and create 16-bit values + const __m256i dot = lasx_maddubs_h(ax, sy); + return sum_i16_pairs_float(dot); +} + +// multiply int8_t, add results pairwise twice and return as float vector +static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { + const __m256i dot = lasx_madd_h_b(x, y); + return sum_i16_pairs_float(dot); +} + +static inline __m128i packNibbles( __m256i bytes ) { + // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh + const __m256i lowByte = __lasx_xvreplgr2vr_h(0xFF); + __m256i high = __lasx_xvandn_v(lowByte, bytes); + __m256i low = __lasx_xvand_v(lowByte, bytes); + high = __lasx_xvsrli_h(high, 4); + bytes = __lasx_xvor_v(low, high); + // Compress uint16_t lanes into bytes + __m128i *r0 = (__m128i *)&bytes; + __m256i tmp_h128 = __lasx_xvpermi_q(bytes, bytes, 0x11); + __m128i *r1 = (__m128i *)&tmp_h128; + + __m128i zero = __lsx_vldi(0); + __m128i tmp, tmp2, tmp3; + + tmp = __lsx_vmax_h(zero, *r0); + tmp2 = __lsx_vsat_hu(tmp, 7); + + tmp = __lsx_vmax_h(zero, *r1); + tmp3 = __lsx_vsat_hu(tmp, 7); + return __lsx_vpickev_b(tmp3, tmp2); +} +#endif //__loongarch_asx + +void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__loongarch_asx) + for (int i = 0; i < nb; i++) { + __m256 v0 = (__m256)__lasx_xvld( x , 0); + __m256 v1 = (__m256)__lasx_xvld( x , 32); + __m256 v2 = (__m256)__lasx_xvld( x , 64); + __m256 v3 = (__m256)__lasx_xvld( x , 96); + x += 32; + + // Compute max(abs(e)) for the block + const __m256 sign_bit = __lasx_xvreplfr2vr_s( -0.0f ); + __m256 max_abs = (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v0 ); + max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v1 ) ); + max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v2 ) ); + max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v3 ) ); + + __m128 max4 = __lsx_vfmax_s( lasx_extractf128( max_abs, 1 ), lasx_extractf128( max_abs , 0) ); + max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vpickod_d((__m128i) max4, (__m128i)max4 ) ); + __m128 tmp = max4; + max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vinsgr2vr_w(tmp, __lsx_vpickve2gr_w( max4, 1 ), 0 )); + const float max_scalar = ((v4f32)max4)[0]; + + // Quantize these floats + const float d = max_scalar / 127.f; + y[i].d = GGML_FP32_TO_FP16(d); + const float id = ( max_scalar != 0.0f ) ? 127.f / max_scalar : 0.0f; + const __m256 mul = (__m256)__lasx_xvreplfr2vr_s( id ); + + // Apply the multiplier + v0 = __lasx_xvfmul_s( v0, mul ); + v1 = __lasx_xvfmul_s( v1, mul ); + v2 = __lasx_xvfmul_s( v2, mul ); + v3 = __lasx_xvfmul_s( v3, mul ); + + // Round to nearest integer + __m256i i0 = __lasx_xvftintrne_w_s( v0 ); + __m256i i1 = __lasx_xvftintrne_w_s( v1 ); + __m256i i2 = __lasx_xvftintrne_w_s( v2 ); + __m256i i3 = __lasx_xvftintrne_w_s( v3 ); + + __m128i ni0 = lasx_extracti128( i0, 0 ); + __m128i ni1 = lasx_extracti128( i0, 1); + __m128i ni2 = lasx_extracti128( i1, 0); + __m128i ni3 = lasx_extracti128( i1, 1); + __m128i ni4 = lasx_extracti128( i2, 0); + __m128i ni5 = lasx_extracti128( i2, 1); + __m128i ni6 = lasx_extracti128( i3, 0); + __m128i ni7 = lasx_extracti128( i3, 1); + + // Convert int32 to int16 + ni0 = lsx_packs_w( ni0, ni1 ); + ni2 = lsx_packs_w( ni2, ni3 ); + ni4 = lsx_packs_w( ni4, ni5 ); + ni6 = lsx_packs_w( ni6, ni7 ); + // Convert int16 to int8 + ni0 = lsx_packs_h( ni0, ni2 ); + ni4 = lsx_packs_h( ni4, ni6 ); + + __lsx_vst(ni0, (__m128i *)(y[i].qs + 0), 0); + __lsx_vst(ni4, (__m128i *)(y[i].qs + 16), 0); + + } +#else + GGML_UNUSED(nb); + // scalar + quantize_row_q8_0_ref(x, y, k); +#endif +} + +void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK8_1 == 0); + const int nb = k / QK8_1; + + block_q8_1 * GGML_RESTRICT y = vy; + +#if defined(__loongarch_asx) + for (int i = 0; i < nb; i++) { + __m256 v0 = (__m256)__lasx_xvld( x , 0 ); + __m256 v1 = (__m256)__lasx_xvld( x , 32 ); + __m256 v2 = (__m256)__lasx_xvld( x , 64 ); + __m256 v3 = (__m256)__lasx_xvld( x , 96 ); + x += 32; + + // Compute max(abs(e)) for the block + const __m256 sign_bit = __lasx_xvreplfr2vr_s( -0.0f ); + __m256 max_abs = (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v0 ); + max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v1 ) ); + max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v2 ) ); + max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v3 ) ); + + __m128 max4 = __lsx_vfmax_s( lasx_extractf128( max_abs, 1 ), lasx_extractf128( max_abs, 0) ); + max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vpickod_d((__m128i) max4, (__m128i)max4 ) ); + __m128 tmp = max4; + max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vextrins_w((__m128i)tmp, (__m128i)max4, 0x10 )); + const float max_scalar = ((v4f32)max4)[0]; + + // Quantize these floats + const float d = max_scalar / 127.f; + y[i].d = GGML_FP32_TO_FP16(d); + const float id = ( max_scalar != 0.0f ) ? 127.f / max_scalar : 0.0f; + const __m256 mul = __lasx_xvreplfr2vr_s( id ); + + // Apply the multiplier + v0 = __lasx_xvfmul_s( v0, mul ); + v1 = __lasx_xvfmul_s( v1, mul ); + v2 = __lasx_xvfmul_s( v2, mul ); + v3 = __lasx_xvfmul_s( v3, mul ); + + // Round to nearest integer + __m256i i0 = __lasx_xvftintrne_w_s( v0 ); + __m256i i1 = __lasx_xvftintrne_w_s( v1 ); + __m256i i2 = __lasx_xvftintrne_w_s( v2 ); + __m256i i3 = __lasx_xvftintrne_w_s( v3 ); + + __m128i ni0 = lasx_extracti128(i0, 0); + __m128i ni1 = lasx_extracti128( i0, 1); + __m128i ni2 = lasx_extracti128( i1, 0); + __m128i ni3 = lasx_extracti128( i1, 1); + __m128i ni4 = lasx_extracti128( i2, 0 ); + __m128i ni5 = lasx_extracti128( i2, 1); + __m128i ni6 = lasx_extracti128( i3, 0); + __m128i ni7 = lasx_extracti128( i3, 1); + + // Compute the sum of the quants and set y[i].s + const __m128i s0 = __lsx_vadd_w(__lsx_vadd_w(ni0, ni1), __lsx_vadd_w(ni2, ni3)); + const __m128i s1 = __lsx_vadd_w(__lsx_vadd_w(ni4, ni5), __lsx_vadd_w(ni6, ni7)); + y[i].s = GGML_FP32_TO_FP16(d * hsum_i32_4(__lsx_vadd_w(s0, s1))); + + // Convert int32 to int16 + ni0 = lsx_packs_w( ni0, ni1 ); + ni2 = lsx_packs_w( ni2, ni3 ); + ni4 = lsx_packs_w( ni4, ni5 ); + ni6 = lsx_packs_w( ni6, ni7 ); + // Convert int16 to int8 + ni0 = lsx_packs_h( ni0, ni2 ); + ni4 = lsx_packs_h( ni4, ni6 ); + + __lsx_vst(ni0, (__m128i *)(y[i].qs + 0), 0); + __lsx_vst(ni4, (__m128i *)(y[i].qs + 16), 0); + } +#else + GGML_UNUSED(nb); + // scalar + quantize_row_q8_1_ref(x, y, k); +#endif +} + + +//===================================== Dot products ================================= + +// +// Helper functions +// + +#if defined(__loongarch_asx) +// shuffles to pick the required scales in dot products +static inline __m256i get_scale_shuffle_q3k(int i) { + static const uint8_t k_shuffle[128] = { + 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, + 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, + 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, + 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15, + }; + return __lasx_xvld((const __m256i*)k_shuffle + i, 0); +} +static inline __m256i get_scale_shuffle_k4(int i) { + static const uint8_t k_shuffle[256] = { + 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, + 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, + 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, + 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, + 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, + 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, + 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, + 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15 + }; + return __lasx_xvld((const __m256i*)k_shuffle + i, 0); +} +static inline __m128i get_scale_shuffle(int i) { + static const uint8_t k_shuffle[128] = { + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, + 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, + 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, + 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11, + 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13, + 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15 + }; + return __lsx_vld((const __m128i*)k_shuffle + i, 0); +} +#endif + +void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined(__loongarch_asx) + // Initialize accumulator with zeros + __m256 acc = (__m256)__lasx_xvldi(0); + + // Main loop + for (; ib < nb; ++ib) { + /* Compute combined scale for the block */ + const __m256 d = __lasx_xvreplfr2vr_s( GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d) ); + + __m256i qx = bytes_from_nibbles_32(x[ib].qs); + + // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. + const __m256i off = __lasx_xvreplgr2vr_b( 8 ); + qx = __lasx_xvsub_b( qx, off ); + + __m256i qy = __lasx_xvld((const __m256i *)y[ib].qs, 0); + + const __m256 q = mul_sum_i8_pairs_float(qx, qy); + + /* Multiply q with scale and accumulate */ + acc = __lasx_xvfmadd_s( d, q, acc ); + } + + sumf = hsum_float_8(acc); + +#elif defined(__loongarch_sx) + // set constants + const __m128i low_mask = __lsx_vreplgr2vr_b(0xF); + const __m128i off = __lsx_vreplgr2vr_b(8); + + // Initialize accumulator with zeros + __m128 acc_0 = (__m128)__lsx_vldi(0); + __m128 acc_1 = (__m128)__lsx_vldi(0); + __m128 acc_2 = (__m128)__lsx_vldi(0); + __m128 acc_3 = (__m128)__lsx_vldi(0); + + for (; ib + 1 < nb; ib += 2) { + + // Compute combined scale for the block 0 and 1 + const __m128 d_0_1 = (__m128)__lsx_vreplgr2vr_w( GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d) ); + + const __m128i tmp_0_1 = __lsx_vld((const __m128i *)x[ib].qs, 0); + + __m128i bx_0 = __lsx_vand_v(low_mask, tmp_0_1); + __m128i by_0 = __lsx_vld((const __m128i *)y[ib].qs, 0); + bx_0 = __lsx_vsub_b(bx_0, off); + const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0); + + __m128i bx_1 = __lsx_vand_v(low_mask, __lsx_vsrli_d(tmp_0_1, 4)); + __m128i by_1 = __lsx_vld((const __m128i *)(y[ib].qs + 16), 0); + bx_1 = __lsx_vsub_b(bx_1, off); + const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1); + + //_mm_prefetch(&x[ib] + 2 * sizeof(block_q4_0), _MM_HINT_T0); + //_mm_prefetch(&y[ib] + 2 * sizeof(block_q8_0), _MM_HINT_T0); + + // Compute combined scale for the block 2 and 3 + const __m128 d_2_3 = (__m128)__lsx_vreplgr2vr_w( GGML_FP16_TO_FP32(x[ib + 1].d) * GGML_FP16_TO_FP32(y[ib + 1].d) ); + + const __m128i tmp_2_3 = __lsx_vld((const __m128i *)x[ib + 1].qs, 0); + + __m128i bx_2 = __lsx_vand_v(low_mask, tmp_2_3); + __m128i by_2 = __lsx_vld((const __m128i *)y[ib + 1].qs, 0); + bx_2 = __lsx_vsub_b(bx_2, off); + const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2); + + __m128i bx_3 = __lsx_vand_v(low_mask, __lsx_vsrli_d(tmp_2_3, 4)); + __m128i by_3 = __lsx_vld((const __m128i *)(y[ib + 1].qs + 16), 0); + bx_3 = __lsx_vsub_b(bx_3, off); + const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3); + + // Convert int32_t to float + __m128 p0 = __lsx_vffint_s_w(i32_0); + __m128 p1 = __lsx_vffint_s_w(i32_1); + __m128 p2 = __lsx_vffint_s_w(i32_2); + __m128 p3 = __lsx_vffint_s_w(i32_3); + + // Apply the scale + __m128 p0_d = __lsx_vfmul_s( d_0_1, p0 ); + __m128 p1_d = __lsx_vfmul_s( d_0_1, p1 ); + __m128 p2_d = __lsx_vfmul_s( d_2_3, p2 ); + __m128 p3_d = __lsx_vfmul_s( d_2_3, p3 ); + + // Acummulate + acc_0 = __lsx_vfadd_s(p0_d, acc_0); + acc_1 = __lsx_vfadd_s(p1_d, acc_1); + acc_2 = __lsx_vfadd_s(p2_d, acc_2); + acc_3 = __lsx_vfadd_s(p3_d, acc_3); + } + + sumf = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3); + +#endif + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F) - 8; + const int v1 = (x[ib].qs[j] >> 4) - 8; + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); + } + + *s = sumf; +} + +void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined(__loongarch_asx) + // Initialize accumulator with zeros + __m256 acc = (__m256)__lasx_xvldi(0); + + float summs = 0; + + // Main loop + for (; ib < nb; ++ib) { + const float d0 = GGML_FP16_TO_FP32(x[ib].d); + const float d1 = GGML_FP16_TO_FP32(y[ib].d); + + summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); + + const __m256 d0v = __lasx_xvreplfr2vr_s( d0 ); + const __m256 d1v = __lasx_xvreplfr2vr_s( d1 ); + + // Compute combined scales + const __m256 d0d1 = __lasx_xvfmul_s( d0v, d1v ); + + // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes + const __m256i qx = bytes_from_nibbles_32(x[ib].qs); + const __m256i qy = __lasx_xvld( (const __m256i *)y[ib].qs, 0); + + const __m256 xy = mul_sum_us8_pairs_float(qx, qy); + + // Accumulate d0*d1*x*y + acc = __lasx_xvfmadd_s( d0d1, xy, acc ); + } + + sumf = hsum_float_8(acc) + summs; + +#endif + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F); + const int v1 = (x[ib].qs[j] >> 4); + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + } + + *s = sumf; +} + +void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + int ib = 0; + float sumf = 0; + + assert(n % qk == 0); + assert(qk == QK5_0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__loongarch_asx) + // Initialize accumulator with zeros + __m256 acc = (__m256)__lasx_xvldi(0); + + // Main loop + for (; ib < nb; ++ib) { + /* Compute combined scale for the block */ + const __m256 d = __lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); //FIXME + + __m256i qx = bytes_from_nibbles_32(x[ib].qs); + __m256i bxhi = bytes_from_bits_32(x[ib].qh); + bxhi = __lasx_xvandn_v(bxhi, __lasx_xvreplgr2vr_b((char)0xF0)); + qx = __lasx_xvor_v(qx, bxhi); + + __m256i qy = __lasx_xvld((const __m256i *)y[ib].qs, 0); + + const __m256 q = mul_sum_i8_pairs_float(qx, qy); + + /* Multiply q with scale and accumulate */ + acc = __lasx_xvfmadd_s(d, q, acc); + } + + sumf = hsum_float_8(acc); + +#endif + for (; ib < nb; ++ib) { + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; + const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12)); + + const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16); + const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16); + + sumi0 += (x0 * y[ib].qs[j]); + sumi1 += (x1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)) * sumi; + } + + *s = sumf; +} + +void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + int ib = 0; + float sumf = 0; + + assert(n % qk == 0); + assert(qk == QK5_1); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + +#if defined(__loongarch_asx) + // Initialize accumulator with zeros + __m256 acc = (__m256)__lasx_xvldi(0); + + float summs = 0.0f; + + // Main loop + for (; ib < nb; ++ib) { + const __m256 dx = __lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(x[ib].d)); + + summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); + + __m256i qx = bytes_from_nibbles_32(x[ib].qs); + __m256i bxhi = bytes_from_bits_32(x[ib].qh); + bxhi = __lasx_xvand_v(bxhi, __lasx_xvreplgr2vr_b(0x10)); + qx = __lasx_xvor_v(qx, bxhi); + + const __m256 dy = __lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(y[ib].d)); + const __m256i qy = __lasx_xvld((const __m256i *)y[ib].qs, 0); + + const __m256 q = mul_sum_us8_pairs_float(qx, qy); + + acc = __lasx_xvfmadd_s(q, __lasx_xvfmul_s(dx, dy), acc); + } + + sumf = hsum_float_8(acc) + summs; + +#endif + for (; ib < nb; ++ib) { + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; + const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; + + const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0; + const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1; + + sumi0 += (x0 * y[ib].qs[j]); + sumi1 += (x1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + } + + *s = sumf; +} + +void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q8_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined(__loongarch_asx) + // Initialize accumulator with zeros + __m256 acc = (__m256)__lasx_xvldi(0); + + // Main loop + for (; ib < nb; ++ib) { + // Compute combined scale for the block + const __m256 d = __lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); + __m256i qx = __lasx_xvld((const __m256i *)x[ib].qs, 0); + __m256i qy = __lasx_xvld((const __m256i *)y[ib].qs, 0); + + const __m256 q = mul_sum_i8_pairs_float(qx, qy); + + // Multiply q with scale and accumulate + acc = __lasx_xvfmadd_s( d, q, acc ); + } + + sumf = hsum_float_8(acc); + +#endif + for (; ib < nb; ++ib) { + int sumi = 0; + + for (int j = 0; j < qk; j++) { + sumi += x[ib].qs[j]*y[ib].qs[j]; + } + + sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); + } + + *s = sumf; +} + +void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q2_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __loongarch_asx + + __m256 acc = (__m256)__lasx_xvldi(0); + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + const uint8_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const __m128i mins_and_scales128 = __lsx_vld((const __m128i*)x[i].scales, 0); + const __m128i scales128 = __lsx_vandi_b(mins_and_scales128, 0xf); + const __m256i mins = lasx_ext8_16(__lsx_vsrli_b(mins_and_scales128, 4)); + const __m256i prod = lasx_madd_h(mins, __lasx_xvld((const __m256i*)y[i].bsums, 0)); + + acc = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(dmin), __lasx_xvffint_s_w(prod), acc); + + const v16i8 shuffle_mask = {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; + const __m256i scales_shuffled = lasx_ext8_16(__lsx_vshuf_b(scales128, scales128, (__m128i)shuffle_mask)); + + __m256i sumi = __lasx_xvldi(0); + + for (int j = 0; j < QK_K/128; ++j) { + + const __m256i q2bits = __lasx_xvld((const __m256i*)q2, 0); q2 += 32; + + const __m256i q8_0 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; + const __m256i q8_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; + const __m256i q8_2 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; + const __m256i q8_3 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; + + const __m256i q2_0 = __lasx_xvandi_b(q2bits, 3); + const __m256i q2_1 = __lasx_xvandi_b(__lasx_xvsrli_b(q2bits, 2), 3); + const __m256i q2_2 = __lasx_xvandi_b(__lasx_xvsrli_b(q2bits, 4), 3); + const __m256i q2_3 = __lasx_xvsrli_b(q2bits, 6); + + __m256i p0 = lasx_madd_h_b(q2_0, q8_0); + __m256i p1 = lasx_madd_h_b(q2_1, q8_1); + __m256i p2 = lasx_madd_h_b(q2_2, q8_2); + __m256i p3 = lasx_madd_h_b(q2_3, q8_3); + + p0 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 0), p0); + p1 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 1), p1); + p2 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 2), p2); + p3 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 3), p3); + + p0 = __lasx_xvadd_w(p0, p1); + p2 = __lasx_xvadd_w(p2, p3); + + sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p0, p2)); + } + + acc = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), acc); + + } + + *s = hsum_float_8(acc); + +#else + + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + + const uint8_t * q2 = x[i].qs; + const int8_t * q8 = y[i].qs; + const uint8_t * sc = x[i].scales; + + int summs = 0; + for (int j = 0; j < 16; ++j) { + summs += y[i].bsums[j] * (sc[j] >> 4); + } + + const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + int isum = 0; + int is = 0; + int d; + for (int k = 0; k < QK_K/128; ++k) { + int shift = 0; + for (int j = 0; j < 4; ++j) { + d = sc[is++] & 0xF; + int isuml = 0; + for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); + isum += d * isuml; + d = sc[is++] & 0xF; + isuml = 0; + for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); + isum += d * isuml; + shift += 2; + q8 += 32; + } + q2 += 32; + } + sumf += dall * isum - dmin * summs; + } + *s = sumf; +#endif +} + +void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const uint32_t kmask1 = 0x03030303; + const uint32_t kmask2 = 0x0f0f0f0f; + + const block_q3_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __loongarch_asx + + const __m128i m32 = __lsx_vreplgr2vr_b(32); + + __m256 acc = (__m256)__lasx_xvldi(0); + + uint32_t aux[3]; + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + // Set up scales + memcpy(aux, x[i].scales, 12); + __m128i scales128 = lsx_set_w( + ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), + ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), + (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), + (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); + scales128 = __lsx_vsub_b(scales128, m32); + + const v16i8 shuffle_mask = {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; + const __m256i scales_shuffled = lasx_ext8_16(__lsx_vshuf_b(scales128, scales128, (__m128i)shuffle_mask)); + + // high bit + const __m256i hbits = __lasx_xvld((const __m256i*)x[i].hmask, 0); + + // integer accumulator + __m256i sumi = __lasx_xvldi(0); + + for (int j = 0; j < QK_K/128; ++j) { + // load low 2 bits + const __m256i q3bits = __lasx_xvld((const __m256i*)q3, 0); q3 += 32; + + // prepare low and high bits + const __m256i q3l_0 = __lasx_xvandi_b(q3bits, 3); + const __m256i q3l_1 = __lasx_xvandi_b(__lasx_xvsrli_b(q3bits, 2), 3); + const __m256i q3l_2 = __lasx_xvandi_b(__lasx_xvsrli_b(q3bits, 4), 3); + const __m256i q3l_3 = __lasx_xvsrli_b(q3bits, 6); + const __m256i q3h_0 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 0), 0), 2); + const __m256i q3h_1 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 1), 0), 2); + const __m256i q3h_2 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 2), 0), 2); + const __m256i q3h_3 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 3), 0), 2); + const __m256i q3_0 = __lasx_xvor_v(q3h_0, q3l_0); + const __m256i q3_1 = __lasx_xvor_v(q3h_1, q3l_1); + const __m256i q3_2 = __lasx_xvor_v(q3h_2, q3l_2); + const __m256i q3_3 = __lasx_xvor_v(q3h_3, q3l_3); + + // load Q8 quants + const __m256i q8_0 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; + const __m256i q8_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; + const __m256i q8_2 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; + const __m256i q8_3 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; + + __m256i p16_0 = lasx_madd_h_b(q8_0, q3_0); + __m256i p16_1 = lasx_madd_h_b(q8_1, q3_1); + __m256i p16_2 = lasx_madd_h_b(q8_2, q3_2); + __m256i p16_3 = lasx_madd_h_b(q8_3, q3_3); + + // multiply with scales + p16_0 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 0), p16_0); + p16_1 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 1), p16_1); + p16_2 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 2), p16_2); + p16_3 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 3), p16_3); + + // accumulate + p16_0 = __lasx_xvadd_w(p16_0, p16_1); + p16_2 = __lasx_xvadd_w(p16_2, p16_3); + sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_0, p16_2)); + } + // multiply with block scale and accumulate + acc = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), acc); + } + + *s = hsum_float_8(acc); + +#else + // scalar version + // This function is written like this so the compiler can manage to vectorize most of it + // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the + // manually vectorized version above. Every other version I tried would run at least 4 times slower. + // The ideal situation would be if we could just write the code once, and the compiler would + // automatically produce the best possible set of machine instructions, instead of us having to manually + // write vectorized versions for AVX, ARM_NEON, etc. + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + uint32_t auxs[4]; + const int8_t * scales = (const int8_t*)auxs; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].hmask; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + q3 += 32; + } + a = aux8; + + memcpy(auxs, x[i].scales, 12); + uint32_t tmp = auxs[2]; + auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); + auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); + auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); + auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); + for (int j = 0; j < QK_K/16; ++j) { + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; + +#endif + +} + +void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + +#if defined __loongarch_asx + + __m256 acc = (__m256)__lasx_xvldi(0); + __m128 acc_m = (__m128)__lsx_vldi(0); + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const __m128i mins_and_scales128 = lsx_set_w(utmp[3], utmp[2], utmp[1], utmp[0]); + const __m128i mins128 = __lsx_vexth_h_b(mins_and_scales128); + const __m128i scales128 = __lsx_vsllwil_h_b(mins_and_scales128, 0); + + const __m256i q8sums = __lasx_xvld((const __m256i*)y[i].bsums, 0); + const __m128i q8s = lsx_hadd_h(lasx_extracti128(q8sums, 0), lasx_extracti128(q8sums, 1)); + const __m128i prod = lsx_madd_h(mins128, q8s); + acc_m = __lsx_vfmadd_s(__lsx_vreplfr2vr_s(dmin), __lsx_vffint_s_w(prod), acc_m); + + const __m256i scales = lasx_insertf128(scales128, scales128); + + __m256i sumi = __lasx_xvldi(0); + + for (int j = 0; j < QK_K/64; ++j) { + + const __m256i scale_l = lasx_xvrepl128vei_h(scales, 2 * j + 0); + const __m256i scale_h = lasx_xvrepl128vei_h(scales, 2 * j + 1); + + const __m256i q4bits = __lasx_xvld((const __m256i*)q4, 0); q4 += 32; + const __m256i q4l = __lasx_xvandi_b(q4bits, 0xf); + const __m256i q4h = __lasx_xvsrli_b(q4bits, 4); + + const __m256i q8l = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; + __m256i p16l = lasx_madd_h_b(q4l, q8l); + p16l = lasx_madd_h(scale_l, p16l); + + const __m256i q8h = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; + __m256i p16h = lasx_madd_h_b(q4h, q8h); + p16h = lasx_madd_h(scale_h, p16h); + const __m256i sumj = __lasx_xvadd_w(p16l, p16h); + + sumi = __lasx_xvadd_w(sumi, sumj); + } + + __m256 vd = __lasx_xvreplfr2vr_s(d); + acc = __lasx_xvfmadd_s(vd, __lasx_xvffint_s_w(sumi), acc); + + } + + acc_m = __lsx_vfadd_s(acc_m, (__m128)__lsx_vpermi_w((__m128i)acc_m, (__m128i)acc_m, 0xee)); + __m128i tmp1 = __lsx_vinsgr2vr_w(__lsx_vldi(0), __lsx_vpickve2gr_w((__m128i)acc_m, 1), 0); + acc_m = __lsx_vfadd_s(acc_m, (__m128)tmp1); + + + *s = hsum_float_8(acc) + ((v4f32)acc_m)[0]; + +#else + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + for (int j = 0; j < QK_K/64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + a += 32; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + a += 32; q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + +#if defined __loongarch_asx + + __m256 acc = (__m256)__lasx_xvldi(0); + __m128 acc_m = (__m128)__lsx_vldi(0); + + for (int i = 0; i < nb; ++i) { + + const uint8_t * GGML_RESTRICT q5 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + const __m128i mins_and_scales128 = lsx_set_w(utmp[3], utmp[2], utmp[1], utmp[0]); + const __m128i mins128 = __lsx_vexth_h_b(mins_and_scales128); + const __m128i scales128 = __lsx_vsllwil_h_b(mins_and_scales128, 0); + + const __m256i q8sums = __lasx_xvld((const __m256i*)y[i].bsums, 0); + const __m128i q8s = lsx_hadd_h(lasx_extracti128(q8sums, 0), lasx_extracti128(q8sums, 1)); + const __m128i prod = lsx_madd_h(mins128, q8s); + acc_m = __lsx_vfmadd_s(__lsx_vreplfr2vr_s(dmin), __lsx_vffint_s_w(prod), acc_m); + + const __m256i scales = lasx_insertf128(scales128, scales128); + + const __m256i hbits = __lasx_xvld((const __m256i*)x[i].qh, 0); + + __m256i sumi = __lasx_xvldi(0); + + for (int j = 0; j < QK_K/64; ++j) { + + const __m256i scale_0 = lasx_xvrepl128vei_h(scales, 2 * j + 0); + const __m256i scale_1 = lasx_xvrepl128vei_h(scales, 2 * j + 1); + + const __m256i q5bits = __lasx_xvld((const __m256i*)q5, 0); q5 += 32; + + const __m256i q5l_0 = __lasx_xvandi_b(q5bits, 0xf); + const __m256i q5l_1 = __lasx_xvsrli_b(q5bits, 4); + const __m256i q5h_0 = __lasx_xvnori_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 2 * j + 0), 0), 0xef); + const __m256i q5h_1 = __lasx_xvnori_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 2 * j + 1), 0), 0xef); + const __m256i q5_0 = __lasx_xvor_v(q5l_0, q5h_0); + const __m256i q5_1 = __lasx_xvor_v(q5l_1, q5h_1); + + const __m256i q8_0 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; + const __m256i q8_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; + + __m256i p16_0 = lasx_madd_h_b(q5_0, q8_0); + __m256i p16_1 = lasx_madd_h_b(q5_1, q8_1); + + p16_0 = lasx_madd_h(scale_0, p16_0); + p16_1 = lasx_madd_h(scale_1, p16_1); + + sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_0, p16_1)); + + } + + __m256 vd = __lasx_xvreplfr2vr_s(d); + acc = __lasx_xvfmadd_s(vd, __lasx_xvffint_s_w(sumi), acc); + + } + + acc_m = __lsx_vfadd_s(acc_m, (__m128)__lsx_vbsrl_v(acc_m, 8)); + acc_m = __lsx_vfadd_s(acc_m, (__m128)__lsx_vbsrl_v(acc_m, 4)); + + *s = hsum_float_8(acc) + ((v4f32)acc_m)[0]; + +#else + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K/64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); + a += 32; m <<= 1; + q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q6_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __loongarch_asx + + const __m256i m32s = __lasx_xvreplgr2vr_b(32); + + __m256 acc = (__m256)__lasx_xvldi(0); + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT q4 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const __m128i scales128 = __lsx_vld((const __m128i*)x[i].scales, 0); + const v16i8 shuffle_mask = {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; + const __m256i scales_shuffled = lasx_ext8_16(__lsx_vshuf_b(scales128, scales128, (__m128i)shuffle_mask)); + + __m256i sumi = __lasx_xvldi(0); + + for (int j = 0; j < QK_K/128; ++j) { + + const __m256i q4bits1 = __lasx_xvld((const __m256i*)q4, 0); q4 += 32; + const __m256i q4bits2 = __lasx_xvld((const __m256i*)q4, 0); q4 += 32; + const __m256i q4bitsH = __lasx_xvld((const __m256i*)qh, 0); qh += 32; + + const __m256i q4h_0 = __lasx_xvslli_b(__lasx_xvandi_b(q4bitsH, 3), 4); + const __m256i q4h_1 = __lasx_xvslli_b(__lasx_xvandi_b(q4bitsH, 3 << 2), 2); + const __m256i q4h_2 = __lasx_xvandi_b(q4bitsH, 3 << 4); + const __m256i q4h_3 = __lasx_xvsrli_b(__lasx_xvandi_b(q4bitsH, 3 << 6), 2); + + const __m256i q4_0 = __lasx_xvor_v(__lasx_xvandi_b(q4bits1, 0xf), q4h_0); + const __m256i q4_1 = __lasx_xvor_v(__lasx_xvandi_b(q4bits2, 0xf), q4h_1); + const __m256i q4_2 = __lasx_xvor_v(__lasx_xvsrli_b(q4bits1, 4), q4h_2); + const __m256i q4_3 = __lasx_xvor_v(__lasx_xvsrli_b(q4bits2, 4), q4h_3); + + const __m256i q8_0 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; + const __m256i q8_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; + const __m256i q8_2 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; + const __m256i q8_3 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; + + __m256i p16_0 = lasx_madd_h_b(__lasx_xvsub_b(q4_0, m32s), q8_0); + __m256i p16_1 = lasx_madd_h_b(__lasx_xvsub_b(q4_1, m32s), q8_1); + __m256i p16_2 = lasx_madd_h_b(__lasx_xvsub_b(q4_2, m32s), q8_2); + __m256i p16_3 = lasx_madd_h_b(__lasx_xvsub_b(q4_3, m32s), q8_3); + + p16_0 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 0), p16_0); + p16_1 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 1), p16_1); + p16_2 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 2), p16_2); + p16_3 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 3), p16_3); + + sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_0, p16_1)); + sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_2, p16_3)); + } + + acc = __lasx_xvfmadd_s((__m256)__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), acc); + } + + *s = hsum_float_8(acc); + +#else + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) { + a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; + a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; + a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; + a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; + } + a += 128; + q4 += 64; + qh += 32; + } + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/16; ++j) { + int scale = x[i].scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +#if defined(__loongarch_asx) +static const int8_t keven_signs_q2xs[1024] = { + 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, + 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, + 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, + 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, + 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, + 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, + 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, + 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, + 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, + 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, + 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, + 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, + 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, + 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, + 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, + 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, + 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, + 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, + 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, + 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, + 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, + 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, + 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, + 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, + 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, + 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, + 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, + 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, + 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, + 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, + 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, + 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, +}; +#endif + +void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_xxs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__loongarch_asx) + + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + uint32_t aux32[4]; + const uint8_t * aux8 = (const uint8_t *)aux32; + + __m256 accumf = (__m256)__lasx_xvldi(0); + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + __m256i sumi1 = __lasx_xvldi(0); + __m256i sumi2 = __lasx_xvldi(0); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; + const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; + memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; + + const __m256i q2_1 = lasx_set_d(iq2xxs_grid[aux8[ 3]], iq2xxs_grid[aux8[ 2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); + const __m256i q2_2 = lasx_set_d(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); + const __m256i s2_1 = lasx_set_d(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], + signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); + const __m256i s2_2 = lasx_set_d(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127], + signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); + const __m256i q8s_1 = __lasx_xvsigncov_b(s2_1, q8_1); + const __m256i q8s_2 = __lasx_xvsigncov_b(s2_2, q8_2); + const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); + const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); + const uint16_t ls1 = aux32[1] >> 28; + const uint16_t ls2 = aux32[3] >> 28; + const __m256i p1 = lasx_madd_h(dot1, __lasx_xvreplgr2vr_h(2*ls1+1)); + const __m256i p2 = lasx_madd_h(dot2, __lasx_xvreplgr2vr_h(2*ls2+1)); + sumi1 = __lasx_xvadd_w(sumi1, p1); + sumi2 = __lasx_xvadd_w(sumi2, p2); + } + + accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); + } + + *s = 0.125f * hsum_float_8(accumf); + +#else + + uint32_t aux32[2]; + const uint8_t * aux8 = (const uint8_t *)aux32; + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + memcpy(aux32, q2, 2*sizeof(uint32_t)); + q2 += 4; + const uint32_t ls = 2*(aux32[1] >> 28) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); + const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls; + } + sumf += d * bsum; + } + *s = 0.125f * sumf; +#endif +} + +void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_xs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__loongarch_asx) + + const __m256i mone = __lasx_xvreplgr2vr_b(1); + static const char block_sign_shuffle_mask_1[32] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + }; + static const char block_sign_shuffle_mask_2[32] = { + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, + }; + static const uint8_t bit_selector_mask_bytes[32] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + }; + + const __m256i bit_selector_mask = __lasx_xvld((const __m256i*)bit_selector_mask_bytes, 0); + const __m256i block_sign_shuffle_1 = __lasx_xvld((const __m256i*)block_sign_shuffle_mask_1, 0); + const __m256i block_sign_shuffle_2 = __lasx_xvld((const __m256i*)block_sign_shuffle_mask_2, 0); + + static const uint8_t k_bit_helper[32] = { + 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, + 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, + }; + const __m256i bit_helper = __lasx_xvld((const __m256i*)k_bit_helper, 0); + const __m256i m511 = __lasx_xvreplgr2vr_h(511); + const __m128i m4 = __lsx_vreplgr2vr_b(0xf); + const __m128i m1 = __lsx_vreplgr2vr_b(1); + + uint64_t aux64; + + // somewhat hacky, but gives a significant boost in performance + __m256i aux_gindex; + const uint16_t * gindex = (const uint16_t *)&aux_gindex; + + __m256 accumf = (__m256)__lasx_xvldi(0); + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + memcpy(&aux64, x[i].scales, 8); + __m128i stmp = __lsx_vreplgr2vr_d(aux64); + stmp = __lsx_vilvl_b( __lsx_vand_v(__lsx_vsrli_h(stmp, 4), m4), __lsx_vand_v(stmp, m4)); + const __m128i scales = __lsx_vadd_b(__lsx_vslli_h(stmp, 1), m1); + + __m256i sumi1 = __lasx_xvldi(0); + __m256i sumi2 = __lasx_xvldi(0); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) { + + const __m256i q2_data = __lasx_xvld((const __m256i*)q2, 0); q2 += 16; + aux_gindex = __lasx_xvand_v(q2_data, m511); + + const __m256i partial_sign_bits = __lasx_xvsrli_h(q2_data, 9); + const __m256i partial_sign_bits_upper = __lasx_xvsrli_h(q2_data, 13); + const __m256i partial_sign_bits_for_counting = __lasx_xvxor_v(partial_sign_bits, partial_sign_bits_upper); + + const __m256i odd_bits = lasx_shuffle_b(bit_helper, partial_sign_bits_for_counting); + const __m256i full_sign_bits = __lasx_xvor_v(partial_sign_bits, odd_bits); + + const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; + const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; + const __m256i q8_3 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; + const __m256i q8_4 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; + + const __m256i q2_1 = lasx_set_d(iq2xs_grid[gindex[ 3]], iq2xs_grid[gindex[ 2]], + iq2xs_grid[gindex[ 1]], iq2xs_grid[gindex[ 0]]); + const __m256i q2_2 = lasx_set_d(iq2xs_grid[gindex[ 7]], iq2xs_grid[gindex[ 6]], + iq2xs_grid[gindex[ 5]], iq2xs_grid[gindex[ 4]]); + const __m256i q2_3 = lasx_set_d(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]], + iq2xs_grid[gindex[ 9]], iq2xs_grid[gindex[ 8]]); + const __m256i q2_4 = lasx_set_d(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]], + iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]); + + const __m128i full_signs_l = lasx_extracti128(full_sign_bits, 0); + const __m128i full_signs_h = lasx_extracti128(full_sign_bits, 1); + const __m256i full_signs_1 = lasx_insertf128(full_signs_l, full_signs_l); + const __m256i full_signs_2 = lasx_insertf128(full_signs_h, full_signs_h); + + __m256i signs; + signs = lasx_shuffle_b(full_signs_1, block_sign_shuffle_1); + signs = __lasx_xvseq_b(__lasx_xvand_v(signs, bit_selector_mask), bit_selector_mask); + const __m256i q8s_1 = __lasx_xvsigncov_b(__lasx_xvor_v(signs, mone), q8_1); + + signs = lasx_shuffle_b(full_signs_1, block_sign_shuffle_2); + signs = __lasx_xvseq_b(__lasx_xvand_v(signs, bit_selector_mask), bit_selector_mask); + const __m256i q8s_2 = __lasx_xvsigncov_b(__lasx_xvor_v(signs, mone), q8_2); + + signs = lasx_shuffle_b(full_signs_2, block_sign_shuffle_1); + signs = __lasx_xvseq_b(__lasx_xvand_v(signs, bit_selector_mask), bit_selector_mask); + const __m256i q8s_3 = __lasx_xvsigncov_b(__lasx_xvor_v(signs, mone), q8_3); + + signs = lasx_shuffle_b(full_signs_2, block_sign_shuffle_2); + signs = __lasx_xvseq_b(__lasx_xvand_v(signs, bit_selector_mask), bit_selector_mask); + const __m256i q8s_4 = __lasx_xvsigncov_b(__lasx_xvor_v(signs, mone), q8_4); + + const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); + const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); + const __m256i dot3 = lasx_maddubs_h(q2_3, q8s_3); + const __m256i dot4 = lasx_maddubs_h(q2_4, q8s_4); + + const __m256i sc1 = lasx_ext8_16(lsx_shuffle_b(scales, get_scale_shuffle(ib32+0))); + const __m256i sc2 = lasx_ext8_16(lsx_shuffle_b(scales, get_scale_shuffle(ib32+1))); + const __m256i sc3 = lasx_ext8_16(lsx_shuffle_b(scales, get_scale_shuffle(ib32+2))); + const __m256i sc4 = lasx_ext8_16(lsx_shuffle_b(scales, get_scale_shuffle(ib32+3))); + + sumi1 = __lasx_xvadd_w(sumi1, lasx_madd_h(dot1, sc1)); + sumi2 = __lasx_xvadd_w(sumi2, lasx_madd_h(dot2, sc2)); + sumi1 = __lasx_xvadd_w(sumi1, lasx_madd_h(dot3, sc3)); + sumi2 = __lasx_xvadd_w(sumi2, lasx_madd_h(dot4, sc4)); + } + + accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); + + } + + *s = 0.125f * hsum_float_8(accumf); + +#else + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const uint8_t * GGML_RESTRICT sc = x[i].scales; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1; + const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1; + int32_t sumi = 0; + for (int l = 0; l < 2; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); + const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls1; + sumi = 0; + for (int l = 2; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); + const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls2; + q2 += 4; + } + sumf += d * bsum; + } + *s = 0.125f * sumf; +#endif +} + +void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__loongarch_asx) + + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + }; + + + const __m128i m4 = __lsx_vreplgr2vr_b(0xf); + const __m128i m1 = __lsx_vreplgr2vr_b(1); + + const __m256i mask1 = __lasx_xvld((const __m256i*)k_mask1, 0); + const __m256i mask2 = __lasx_xvld((const __m256i*)k_mask2, 0); + uint64_t aux64; + + __m256 accumf = (__m256)__lasx_xvldi(0); + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + __m128i tmp1; + memcpy(&aux64, x[i].scales, 8); + tmp1 = __lsx_vinsgr2vr_d(tmp1, aux64, 0); + tmp1 = __lsx_vinsgr2vr_d(tmp1, aux64 >> 4, 1); + const __m128i scales8 = __lsx_vadd_b(__lsx_vslli_h(__lsx_vand_v(tmp1, m4), 1), m1); + const __m256i scales16 = lasx_ext8_16(scales8); // 0 2 4 6 8 10 12 14 1 3 5 7 9 11 13 15 + + __m256i sumi1 = __lasx_xvldi(0); + __m256i sumi2 = __lasx_xvldi(0); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; + const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; + const __m256i q2_1 = lasx_set_d(iq2s_grid[qs[3] | ((qh[ib32+0] << 2) & 0x300)], + iq2s_grid[qs[2] | ((qh[ib32+0] << 4) & 0x300)], + iq2s_grid[qs[1] | ((qh[ib32+0] << 6) & 0x300)], + iq2s_grid[qs[0] | ((qh[ib32+0] << 8) & 0x300)]); + const __m256i q2_2 = lasx_set_d(iq2s_grid[qs[7] | ((qh[ib32+1] << 2) & 0x300)], + iq2s_grid[qs[6] | ((qh[ib32+1] << 4) & 0x300)], + iq2s_grid[qs[5] | ((qh[ib32+1] << 6) & 0x300)], + iq2s_grid[qs[4] | ((qh[ib32+1] << 8) & 0x300)]); + qs += 8; + + __m256i aux256 = __lasx_xvreplgr2vr_w(signs[0] | ((uint32_t) signs[1] << 16)); + aux256 = __lasx_xvand_v(lasx_shuffle_b(aux256,mask1), mask2); + const __m256i s2_1 = __lasx_xvseq_b(aux256, mask2); + const __m256i q8s_1 = __lasx_xvsub_b(__lasx_xvxor_v(s2_1, q8_1), s2_1); + + aux256 = __lasx_xvreplgr2vr_w(signs[2] | ((uint32_t) signs[3] << 16)); + aux256 = __lasx_xvand_v(lasx_shuffle_b(aux256,mask1), mask2); + const __m256i s2_2 = __lasx_xvseq_b(aux256, mask2); + const __m256i q8s_2 = __lasx_xvsub_b(__lasx_xvxor_v(s2_2, q8_2), s2_2); + + signs += 4; + + const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); // blocks 2*ib32+0, 2*ib32+1 + const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); // blocks 2*ib32+2, 2*ib32+3 + + const __m256i p1 = lasx_madd_h(dot1, lasx_shuffle_b(scales16, get_scale_shuffle_k4(ib32+0))); + const __m256i p2 = lasx_madd_h(dot2, lasx_shuffle_b(scales16, get_scale_shuffle_k4(ib32+1))); + sumi1 = __lasx_xvadd_w(sumi1, p1); + sumi2 = __lasx_xvadd_w(sumi2, p2); + } + + accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); + } + + *s = 0.125f * hsum_float_8(accumf); + +#else + + float sumf = 0; + for (int i = 0; i < nb; i++) { + + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint8_t * qh = x[i].qh; + const uint8_t * signs = qs + QK_K/8; + + int bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf); + int ls2 = 1 + 2*(x[i].scales[ib32] >> 4); + int sumi1 = 0, sumi2 = 0; + for (int l = 0; l < 2; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); + for (int j = 0; j < 8; ++j) { + sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + for (int l = 2; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); + for (int j = 0; j < 8; ++j) { + sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += ls1 * sumi1 + ls2 * sumi2; + qs += 4; + signs += 4; + } + + sumf += d * bsum; + } + + *s = 0.125f * sumf; + +#endif + +} + +void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq3_xxs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__loongarch_asx) + + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + uint32_t aux32[2]; + + __m256 accumf = (__m256)__lasx_xvldi(0); + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + __m256i sumi1 = __lasx_xvldi(0); + __m256i sumi2 = __lasx_xvldi(0); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; + const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; + const __m256i q2_1 = lasx_set_w(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], + iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); + q3 += 8; + const __m256i q2_2 = lasx_set_w(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], + iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); + q3 += 8; + memcpy(aux32, gas, 8); gas += 8; + + const __m256i s2_1 = lasx_set_d(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127], + signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]); + const __m256i s2_2 = lasx_set_d(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], + signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); + const __m256i q8s_1 = __lasx_xvsigncov_b(s2_1, q8_1); + const __m256i q8s_2 = __lasx_xvsigncov_b(s2_2, q8_2); + const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); + const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); + const uint16_t ls1 = aux32[0] >> 28; + const uint16_t ls2 = aux32[1] >> 28; + + const __m256i p1 = lasx_madd_h(dot1, __lasx_xvreplgr2vr_h(2*ls1+1)); + const __m256i p2 = lasx_madd_h(dot2, __lasx_xvreplgr2vr_h(2*ls2+1)); + sumi1 = __lasx_xvadd_w(sumi1, p1); + sumi2 = __lasx_xvadd_w(sumi2, p2); + } + + accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); + } + + *s = 0.25f * hsum_float_8(accumf); + +#else + + uint32_t aux32; + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t); + const uint32_t ls = 2*(aux32 >> 28) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]); + const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]); + const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127]; + for (int j = 0; j < 4; ++j) { + sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1); + sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1); + } + q8 += 8; + } + q3 += 8; + bsum += sumi * ls; + } + sumf += d * bsum; + } + *s = 0.25f * sumf; +#endif +} + +void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq3_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__loongarch_asx) + + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + }; + + const __m256i mask1 = __lasx_xvld((const __m256i*)k_mask1, 0); + const __m256i mask2 = __lasx_xvld((const __m256i*)k_mask2, 0); + + __m256i idx_shift = lasx_set_w(1, 2, 3, 4, 5, 6, 7, 8); + const __m256i idx_mask = __lasx_xvreplgr2vr_w(256); + + typedef union { + __m256i vec[2]; + uint32_t index[16]; + } index_t; + + index_t idx; + + __m256 accumf = (__m256)__lasx_xvldi(0); + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + __m256i sumi1 = __lasx_xvldi(0); + __m256i sumi2 = __lasx_xvldi(0); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; + const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; + const __m256i idx_l = lasx_extu8_16(__lsx_vld(qs, 0)); qs += 16; + idx.vec[0] = __lasx_xvreplgr2vr_w(qh[ib32+0]); + idx.vec[1] = __lasx_xvreplgr2vr_w(qh[ib32+1]); + idx.vec[0] = __lasx_xvand_v(__lasx_xvsll_w(idx.vec[0], idx_shift), idx_mask); + idx.vec[1] = __lasx_xvand_v(__lasx_xvsll_w(idx.vec[1], idx_shift), idx_mask); + idx.vec[0] = __lasx_xvor_v(idx.vec[0], lasx_ext16_32(lasx_extracti128(idx_l, 0))); + idx.vec[1] = __lasx_xvor_v(idx.vec[1], lasx_ext16_32(lasx_extracti128(idx_l, 1))); + + // At leat on my CPU (Ryzen 7950X), using _mm256_i32gather_epi32 is slower than _mm256_set_epi32. Strange. + //const __m256i q2_1 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[0], 4); + //const __m256i q2_2 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[1], 4); + const __m256i q2_1 = lasx_set_w( + iq3s_grid[idx.index[7]], iq3s_grid[idx.index[6]], iq3s_grid[idx.index[5]], iq3s_grid[idx.index[4]], + iq3s_grid[idx.index[3]], iq3s_grid[idx.index[2]], iq3s_grid[idx.index[1]], iq3s_grid[idx.index[0]] + ); + const __m256i q2_2 = lasx_set_w( + iq3s_grid[idx.index[15]], iq3s_grid[idx.index[14]], iq3s_grid[idx.index[13]], iq3s_grid[idx.index[12]], + iq3s_grid[idx.index[11]], iq3s_grid[idx.index[10]], iq3s_grid[idx.index[ 9]], iq3s_grid[idx.index[ 8]] + ); + + __m256i aux256 = __lasx_xvreplgr2vr_w(signs[0] | (signs[1] << 16)); + aux256 = __lasx_xvand_v(lasx_shuffle_b(aux256,mask1), mask2); + const __m256i s2_1 = __lasx_xvseq_b(aux256, mask2); + const __m256i q8s_1 = __lasx_xvsub_b(__lasx_xvxor_v(s2_1, q8_1), s2_1); + + aux256 = __lasx_xvreplgr2vr_w(signs[2] | (signs[3] << 16)); + aux256 = __lasx_xvand_v(lasx_shuffle_b(aux256,mask1), mask2); + const __m256i s2_2 = __lasx_xvseq_b(aux256, mask2); + const __m256i q8s_2 = __lasx_xvsub_b(__lasx_xvxor_v(s2_2, q8_2), s2_2); + + signs += 4; + + const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); + const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); + const uint16_t ls1 = x[i].scales[ib32/2] & 0xf; + const uint16_t ls2 = x[i].scales[ib32/2] >> 4; + const __m256i p1 = lasx_madd_h(dot1, __lasx_xvreplgr2vr_h(2*ls1+1)); + const __m256i p2 = lasx_madd_h(dot2, __lasx_xvreplgr2vr_h(2*ls2+1)); + sumi1 = __lasx_xvadd_w(sumi1, p1); + sumi2 = __lasx_xvadd_w(sumi2, p2); + } + + accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); + } + + *s = hsum_float_8(accumf); + +#else + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint8_t * GGML_RESTRICT signs = x[i].signs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1; + const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256))); + const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256))); + for (int j = 0; j < 4; ++j) { + sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); + sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); + } + q8 += 8; + } + qs += 8; + signs += 4; + bsum += sumi * ls1; + sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256))); + const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256))); + for (int j = 0; j < 4; ++j) { + sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); + sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); + } + q8 += 8; + } + qs += 8; + signs += 4; + bsum += sumi * ls2; + } + sumf += d * bsum; + } + *s = sumf; +#endif +} + +#if defined(__loongarch_asx) +static inline __m256i mul_add_epi8(const __m256i x, const __m256i y) { + const __m256i a = __lasx_xvmulwev_h_b(x, y); + const __m256i b = __lasx_xvmulwod_h_b(x, y); + return __lasx_xvadd_h(a, b); +} +#endif + +void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq1_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__loongarch_asx) + + __m256 accum = (__m256)__lasx_xvldi(0); + float accum1 = 0; + for (int i = 0; i < nb; ++i) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint16_t * qh = x[i].qh; + + __m256i sumi = __lasx_xvldi(0); + int sumi1 = 0; + for (int ib = 0; ib < QK_K/32; ib += 2) { + __m256i q1b_1 = __lasx_xvinsgr2vr_d(q1b_1, iq1s_grid[qs[0] | ((qh[ib+0] << 8) & 0x700)], 0); + q1b_1 = __lasx_xvinsgr2vr_d(q1b_1, iq1s_grid[qs[1] | ((qh[ib+0] << 5) & 0x700)], 1); + q1b_1 = __lasx_xvinsgr2vr_d(q1b_1, iq1s_grid[qs[2] | ((qh[ib+0] << 2) & 0x700)], 2); + q1b_1 = __lasx_xvinsgr2vr_d(q1b_1, iq1s_grid[qs[3] | ((qh[ib+0] >> 1) & 0x700)], 3); + + __m256i q1b_2 = __lasx_xvinsgr2vr_d(q1b_2, iq1s_grid[qs[4] | ((qh[ib+1] << 8) & 0x700)], 0); + q1b_2 = __lasx_xvinsgr2vr_d(q1b_2, iq1s_grid[qs[5] | ((qh[ib+1] << 5) & 0x700)], 1); + q1b_2 = __lasx_xvinsgr2vr_d(q1b_2, iq1s_grid[qs[6] | ((qh[ib+1] << 2) & 0x700)], 2); + q1b_2 = __lasx_xvinsgr2vr_d(q1b_2, iq1s_grid[qs[7] | ((qh[ib+1] >> 1) & 0x700)], 3); + + qs += 8; + const __m256i q8b_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; + const __m256i q8b_2 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; + + const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1); + const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2); + const int16_t ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; + const int16_t ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; + + __m256i tmp1, tmp5, tmp6; + tmp1 = __lasx_xvreplgr2vr_h(ls1); + tmp5 = __lasx_xvmulwev_w_h(dot1, tmp1); + tmp6 = __lasx_xvmulwod_w_h(dot1, tmp1); + const __m256i p1 = __lasx_xvadd_w(tmp5, tmp6); + + tmp1 = __lasx_xvreplgr2vr_h(ls2); + tmp5 = __lasx_xvmulwev_w_h(dot2, tmp1); + tmp6 = __lasx_xvmulwod_w_h(dot2, tmp1); + const __m256i p2 = __lasx_xvadd_w(tmp5, tmp6); + + sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p1, p2)); + sumi1 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * (qh[ib+0] & 0x8000 ? -1 : 1) * ls1 + + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2; + } + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + accum = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), accum); + accum1 += d * sumi1; + } + + *s = hsum_float_8(accum) + IQ1S_DELTA * accum1; + +#else + + float sumf = 0; + for (int i = 0; i < nb; i++) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint16_t * qh = x[i].qh; + + int sumi = 0, sumi1 = 0; + for (int ib = 0; ib < QK_K/32; ++ib) { + const int ls = 2*((qh[ib] >> 12) & 7) + 1; + const int delta = qh[ib] & 0x8000 ? -1 : 1; + int lsum = 0; + for (int l = 0; l < 4; ++l) { + const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8))); + for (int j = 0; j < 8; ++j) { + lsum += q8[j] * grid[j]; + } + q8 += 8; + } + sumi += ls * lsum; + sumi1 += ls * delta * (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]); + qs += 4; + } + + sumf += GGML_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); + } + + *s = sumf; + +#endif +} + +void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK4_NL == 0); + static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); + + const block_iq4_nl * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + const int nb = n / QK4_NL; + + int ib = 0; + float sumf = 0; + +#if defined (__loongarch_asx) + + const __m128i values128 = __lsx_vld((const __m128i*)kvalues_iq4nl, 0); + const __m128i m4b = __lsx_vreplgr2vr_b(0x0f); + const __m256i mone = __lasx_xvreplgr2vr_h(1); + + __m256 accum1 = (__m256)__lasx_xvldi(0); + __m256 accum2 = (__m256)__lasx_xvldi(0); + for (; ib + 1 < nb; ib += 2) { + const __m128i q4bits_1 = __lsx_vld((const __m128i*)x[ib + 0].qs, 0); + const __m128i q4bits_2 = __lsx_vld((const __m128i*)x[ib + 1].qs, 0); + const __m256i q8b_1 = __lasx_xvld((const __m256i *)y[ib + 0].qs, 0); + const __m256i q8b_2 = __lasx_xvld((const __m256i *)y[ib + 1].qs, 0); + const __m256i q4b_1 = lasx_insertf128(lsx_shuffle_b(values128, __lsx_vand_v(__lsx_vsrli_h(q4bits_1, 4), m4b)), + lsx_shuffle_b(values128, __lsx_vand_v(q4bits_1, m4b))); + const __m256i q4b_2 = lasx_insertf128(lsx_shuffle_b(values128, __lsx_vand_v(__lsx_vsrli_h(q4bits_2, 4), m4b)), + lsx_shuffle_b(values128, __lsx_vand_v(q4bits_2, m4b))); + const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); + const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); + const __m256i p_1 = lasx_madd_h(p16_1, mone); + const __m256i p_2 = lasx_madd_h(p16_2, mone); + accum1 = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(y[ib + 0].d)*GGML_FP16_TO_FP32(x[ib + 0].d)), + __lasx_xvffint_s_w(p_1), accum1); + accum2 = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(y[ib + 1].d)*GGML_FP16_TO_FP32(x[ib + 1].d)), + __lasx_xvffint_s_w(p_2), accum2); + } + + sumf = hsum_float_8(__lasx_xvfadd_s(accum1, accum2)); + +#endif + for (; ib < nb; ++ib) { + const float d = GGML_FP16_TO_FP32(y[ib].d)*GGML_FP16_TO_FP32(x[ib].d); + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < QK4_NL/2; ++j) { + sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; + sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4]; + } + sumf += d * (sumi1 + sumi2); + } + *s = sumf; +} + +void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK_K == 0); + + const block_iq4_xs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__loongarch_asx) + + const __m128i values128 = __lsx_vld((const __m128i*)kvalues_iq4nl, 0); + + __m256 accum = (__m256)__lasx_xvldi(0); + + for (int ibl = 0; ibl < nb; ++ibl) { + const uint8_t * qs = x[ibl].qs; + const int8_t * q8 = y[ibl].qs; + uint16_t sh = x[ibl].scales_h; + __m256i sumi1 = __lasx_xvldi(0); + __m256i sumi2 = __lasx_xvldi(0); + for (int ib = 0; ib < QK_K/32; ib += 2) { + const __m128i q4bits_1 = __lsx_vld((const __m128i*)qs, 0); qs += 16; + const __m128i q4bits_2 = __lsx_vld((const __m128i*)qs, 0); qs += 16; + const __m256i q8b_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; + const __m256i q8b_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; + const __m256i q4b_1 = lasx_insertf128(__lsx_vshuf_b(values128, values128, __lsx_vsrli_b(q4bits_1, 4)), + __lsx_vshuf_b(values128, values128, __lsx_vandi_b(q4bits_1, 0xf))); + const __m256i q4b_2 = lasx_insertf128(__lsx_vshuf_b(values128, values128, __lsx_vsrli_b(q4bits_2, 4)), + __lsx_vshuf_b(values128, values128, __lsx_vandi_b(q4bits_2, 0xf))); + const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); + const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); + const int16_t ls1 = ((x[ibl].scales_l[ib/2] & 0xf) | ((sh << 4) & 0x30)) - 32; + const int16_t ls2 = ((x[ibl].scales_l[ib/2] >> 4) | ((sh << 2) & 0x30)) - 32; + sh >>= 4; + const __m256i p_1 = lasx_madd_h(p16_1, __lasx_xvreplgr2vr_h(ls1)); + const __m256i p_2 = lasx_madd_h(p16_2, __lasx_xvreplgr2vr_h(ls2)); + sumi1 = __lasx_xvadd_w(p_1, sumi1); + sumi2 = __lasx_xvadd_w(p_2, sumi2); + } + accum = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(x[ibl].d)*y[ibl].d), + __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accum); + } + + *s = hsum_float_8(accum); + +#else + float sumf = 0; + for (int ibl = 0; ibl < nb; ++ibl) { + const float d4d8 = GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d; + uint16_t h = x[ibl].scales_h; + const uint8_t * qs = x[ibl].qs; + const int8_t * q8 = y[ibl].qs; + for (int ib = 0; ib < QK_K/32; ib += 2) { + const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30); + const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30); + h >>= 4; + const float d1 = d4d8*(ls1 - 32); + const float d2 = d4d8*(ls2 - 32); + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < 16; ++j) { + sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; + sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; + } + sumf += d1 * (sumi1 + sumi2); + qs += 16; + q8 += 32; + sumi1 = sumi2 = 0; + for (int j = 0; j < 16; ++j) { + sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; + sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; + } + sumf += d2 * (sumi1 + sumi2); + qs += 16; + q8 += 32; + } + } + *s = sumf; +#endif +} + diff --git a/ggml/src/ggml-cpu/arch/powerpc/quants.c b/ggml/src/ggml-cpu/arch/powerpc/quants.c new file mode 100644 index 0000000000000..ce4e47a863994 --- /dev/null +++ b/ggml/src/ggml-cpu/arch/powerpc/quants.c @@ -0,0 +1,2731 @@ +#define GGML_COMMON_IMPL_C +#include "ggml-common.h" +#include "ggml-quants.h" +#include "ggml-impl.h" +#include "ggml-cpu.h" + +#include "../../quants.h" +#include "../../ggml-cpu-impl.h" + +#include +#include +#include +#include +#include // for qsort +#include // for GGML_ASSERT + +#define GROUP_MAX_EPS 1e-15f +#define GROUP_MAX_EPS_IQ3_XXS 1e-8f +#define GROUP_MAX_EPS_IQ2_S 1e-8f +#define GROUP_MAX_EPS_IQ1_M 1e-7f +#define GROUP_MAX_EPS_IQ1_S 1e-12f + +#define UNUSED GGML_UNUSED + +#if defined(__POWER9_VECTOR__) +#define B1(c,s,n) 0x ## n ## c , 0x ## n ## s +#define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s) +#define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s) +#define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s) +#define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s) +#define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s) +#define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s) +#define B8(c,s ) B7(c,s, c), B7(c,s, s) + +// precomputed tables for expanding 8bits to 8 bytes: +static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4 +static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4 +#endif + +void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__POWER9_VECTOR__) + for (int i = 0; i < nb; i++) { + vector float srcv [8]; + vector float asrcv[8]; + vector float amaxv[8]; + vector signed int vi[8]; + + for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); + for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); + + for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); + for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); + for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); + + const float amax = MAX(MAX(vec_extract(amaxv[0], 0), + vec_extract(amaxv[0], 1)), + MAX(vec_extract(amaxv[0], 2), + vec_extract(amaxv[0], 3))); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + const vector float vid = vec_splats(id); + + y[i].d = GGML_FP32_TO_FP16(d); + + for (int j = 0; j < 8; j++) { + const vector float v = vec_round(vec_mul(srcv[j], vid)); + vi[j] = vec_cts(v, 0); + } + vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]); + vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]); + } +#else + GGML_UNUSED(nb); + // scalar + quantize_row_q8_0_ref(x, y, k); +#endif +} + +void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK8_1 == 0); + const int nb = k / QK8_1; + + block_q8_1 * GGML_RESTRICT y = vy; + +#if defined(__POWER9_VECTOR__) + for (int i = 0; i < nb; i++) { + vector float srcv [8]; + vector float asrcv[8]; + vector float amaxv[8]; + vector signed int vi[8]; + + for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); + for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); + + for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); + for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); + for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); + + const float amax = MAX(MAX(vec_extract(amaxv[0], 0), + vec_extract(amaxv[0], 1)), + MAX(vec_extract(amaxv[0], 2), + vec_extract(amaxv[0], 3))); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + const vector float vid = vec_splats(id); + + y[i].d = GGML_FP32_TO_FP16(d); + + vector int accv = vec_splats(0); + + for (int j = 0; j < 8; j++) { + const vector float v = vec_round(vec_mul(srcv[j], vid)); + vi[j] = vec_cts(v, 0); + + accv = vec_add(accv, vi[j]); + } + vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]); + vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]); + + accv = vec_add(accv, vec_sld(accv, accv, 4)); + accv = vec_add(accv, vec_sld(accv, accv, 8)); + y[i].s = GGML_FP32_TO_FP16(d * vec_extract(accv, 0)); + } + +#else + GGML_UNUSED(nb); + // scalar + quantize_row_q8_1_ref(x, y, k); +#endif +} + + +//===================================== Dot products ================================= + +void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector signed int v0 = vec_splats((int32_t)0); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + const vector signed char v8 = vec_splats((signed char)0x8); + + vector float vsumf0 = vec_splats(0.0f); + +#pragma GCC unroll 8 + for (; ib < nb; ++ib) { + __builtin_prefetch(x[ib].qs, 0, 1); + __builtin_prefetch(y[ib].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); + vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); + vector float vd = vec_mul(vxd, vyd); + + vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); + vector signed char q8y0 = vec_xl( 0, y[ib].qs); + vector signed char q8y1 = vec_xl(16, y[ib].qs); + + vector signed char q4x0 = vec_and(qxs, lowMask); + vector signed char q4x1 = vec_sr(qxs, v4); + + q4x0 = vec_sub(q4x0, v8); + q4x1 = vec_sub(q4x1, v8); + + vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1)); + + vector signed int vsumi0 = v0; + + vsumi0 = vec_sum4s(qv0, vsumi0); + vsumi0 = vec_sum4s(qv1, vsumi0); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + } + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + sumf = vec_extract(vsumf0, 0); + +#endif + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F) - 8; + const int v1 = (x[ib].qs[j] >> 4) - 8; + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); + } + + *s = sumf; +} + +void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector signed int v0 = vec_splats((int32_t)0); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + +#pragma GCC unroll 4 + for (; ib < nb; ++ib) { + __builtin_prefetch(x[ib].qs, 0, 1); + __builtin_prefetch(y[ib].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); + vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); + vector float vd = vec_mul(vxd, vyd); + + vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[ib].m)); + vector float vys = {GGML_FP16_TO_FP32(y[ib].s), 0.0f, 0.0f, 0.0f}; + vsumf0 = vec_madd(vxmin, vys, vsumf0); + + vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); + vector signed char q8y0 = vec_xl( 0, y[ib].qs); + vector signed char q8y1 = vec_xl(16, y[ib].qs); + + vector unsigned char q4x0 = (vector unsigned char)vec_and(qxs, lowMask); + vector unsigned char q4x1 = (vector unsigned char)vec_sr(qxs, v4); + + vector signed int vsumi0 = v0; + + vsumi0 = vec_msum(q8y0, q4x0, vsumi0); + vsumi0 = vec_msum(q8y1, q4x1, vsumi0); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + } + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + sumf = vec_extract(vsumf0, 0); + +#endif + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F); + const int v1 = (x[ib].qs[j] >> 4); + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + } + + *s = sumf; +} + +void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + int ib = 0; + float sumf = 0; + + assert(n % qk == 0); + assert(qk == QK5_0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v4 = vec_splats((unsigned char)4); + + vector float vsumf0 = vec_splats(0.0f); + +#pragma GCC unroll 4 + for (; ib < nb; ++ib) { + __builtin_prefetch(x[ib].qs, 0, 1); + __builtin_prefetch(y[ib].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); + vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); + vector float vd = vec_mul(vxd, vyd); + + vector signed long long aux64x2_0 = {(uint64_t)(table_b2b_1[x[ib].qh[0]]), (uint64_t)(table_b2b_1[x[ib].qh[1]])}; + vector signed long long aux64x2_1 = {(uint64_t)(table_b2b_1[x[ib].qh[2]]), (uint64_t)(table_b2b_1[x[ib].qh[3]])}; + + vector signed char qh0 = (vector signed char)aux64x2_0; + vector signed char qh1 = (vector signed char)aux64x2_1; + + vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); + + vector signed char q5x0 = vec_sub(vec_and (qxs, lowMask), qh0); + vector signed char q5x1 = vec_sub(vec_sr(qxs, v4), qh1); + + vector signed char q8y0 = vec_xl( 0, y[ib].qs); + vector signed char q8y1 = vec_xl( 16, y[ib].qs); + + vector signed short qv0 = vec_add(vec_mule(q5x0, q8y0), vec_mulo(q5x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q5x1, q8y1), vec_mulo(q5x1, q8y1)); + + qv0 = vec_add(qv0, qv1); + + vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + } + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + sumf = vec_extract(vsumf0, 0); + +#endif + for (; ib < nb; ++ib) { + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; + const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12)); + + const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16); + const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16); + + sumi0 += (x0 * y[ib].qs[j]); + sumi1 += (x1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)) * sumi; + } + + *s = sumf; +} + +void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + int ib = 0; + float sumf = 0; + + assert(n % qk == 0); + assert(qk == QK5_1); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + +#if defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector signed int v0 = vec_splats((int32_t)0); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + +#pragma GCC unroll 4 + for (; ib < nb; ++ib) { + __builtin_prefetch(x[ib].qs, 0, 1); + __builtin_prefetch(y[ib].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); + vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); + vector float vd = vec_mul(vxd, vyd); + + vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[ib].m)); + vector float vys = {GGML_FP16_TO_FP32(y[ib].s), 0.f, 0.f, 0.f}; + vsumf0 = vec_madd(vxmin, vys, vsumf0); + + vector unsigned long long aux64x2_0 = {(uint64_t)(table_b2b_0[x[ib].qh[0]]), (uint64_t)(table_b2b_0[x[ib].qh[1]])}; + vector unsigned long long aux64x2_1 = {(uint64_t)(table_b2b_0[x[ib].qh[2]]), (uint64_t)(table_b2b_0[x[ib].qh[3]])}; + + vector signed char qh0 = (vector signed char)aux64x2_0; + vector signed char qh1 = (vector signed char)aux64x2_1; + + vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); + + vector unsigned char q5x0 = (vector unsigned char)vec_or(vec_and(qxs, lowMask), qh0); + vector unsigned char q5x1 = (vector unsigned char)vec_or(vec_sr(qxs, v4), qh1); + + vector signed char q8y0 = vec_xl( 0, y[ib].qs); + vector signed char q8y1 = vec_xl( 16, y[ib].qs); + + vector signed int vsumi0 = v0; + + vsumi0 = vec_msum(q8y0, q5x0, vsumi0); + vsumi0 = vec_msum(q8y1, q5x1, vsumi0); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + } + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + sumf = vec_extract(vsumf0, 0); + +#endif + for (; ib < nb; ++ib) { + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; + const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; + + const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0; + const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1; + + sumi0 += (x0 * y[ib].qs[j]); + sumi1 += (x1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + } + + *s = sumf; +} + +void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q8_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined(__POWER9_VECTOR__) + const vector signed int v0 = vec_splats((int32_t)0); + vector float vsumf0 = vec_splats(0.0f); + +#pragma GCC unroll 8 + for (; ib < nb; ++ib) { + __builtin_prefetch(x[ib].qs, 0, 1); + __builtin_prefetch(y[ib].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); + vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); + vector float vd = vec_mul(vxd, vyd); + + vector signed char q8x0 = vec_xl( 0, x[ib].qs); + vector signed char q8x1 = vec_xl(16, x[ib].qs); + vector signed char q8y0 = vec_xl( 0, y[ib].qs); + vector signed char q8y1 = vec_xl(16, y[ib].qs); + + vector signed short qv0 = vec_mule(q8x0, q8y0); + vector signed short qv1 = vec_mulo(q8x0, q8y0); + vector signed short qv2 = vec_mule(q8x1, q8y1); + vector signed short qv3 = vec_mulo(q8x1, q8y1); + + vector signed int vsumi0 = v0; + vector signed int vsumi1 = v0; + + vsumi0 = vec_sum4s(qv0, vsumi0); + vsumi1 = vec_sum4s(qv1, vsumi1); + vsumi0 = vec_sum4s(qv2, vsumi0); + vsumi1 = vec_sum4s(qv3, vsumi1); + + vsumi0 = vec_add(vsumi0, vsumi1); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + } + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + sumf = vec_extract(vsumf0, 0); + +#endif + for (; ib < nb; ++ib) { + int sumi = 0; + + for (int j = 0; j < qk; j++) { + sumi += x[ib].qs[j]*y[ib].qs[j]; + } + + sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); + } + + *s = sumf; +} + +void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q2_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0x3); + const vector signed char lowScaleMask = vec_splats((signed char)0xF); + const vector int v0 = vec_splats((int32_t)0); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v6 = vec_splats((unsigned char)0x6); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); + vector float vdmin = vec_mul(vxmin, vyd); + + vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); + vector signed short q8ysums1 = vec_xl(16, y[i].bsums); + + vector signed char q2xmins = (vector signed char)vec_xl( 0, x[i].scales); + vector signed char vscales = vec_and(q2xmins, lowScaleMask); + + q2xmins = vec_sr(q2xmins, v4); + vector signed short q2xmins0 = vec_unpackh(q2xmins); + vector signed short q2xmins1 = vec_unpackl(q2xmins); + + vector signed int prod0 = vec_mule(q2xmins0, q8ysums0); + vector signed int prod1 = vec_mulo(q2xmins0, q8ysums0); + vector signed int prod2 = vec_mule(q2xmins1, q8ysums1); + vector signed int prod3 = vec_mulo(q2xmins1, q8ysums1); + + vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); + vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); + vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); + vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); + + vector signed int vsumi0 = v0; + vector signed int vsumi1 = v0; + vector signed int vsumi2 = v0; + vector signed int vsumi3 = v0; + vector signed int vsumi4 = v0; + vector signed int vsumi5 = v0; + vector signed int vsumi6 = v0; + vector signed int vsumi7 = v0; + + const uint8_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + for (int j = 0; j < QK_K/128; ++j) { + __builtin_prefetch(q2, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, q2); + vector signed char qxs1 = (vector signed char)vec_xl(16, q2); + q2 += 32; + + vector unsigned char q2x00 = (vector unsigned char)vec_and(qxs0, lowMask); + vector unsigned char q2x01 = (vector unsigned char)vec_and(vec_sr(qxs0, v2), lowMask); + vector unsigned char q2x02 = (vector unsigned char)vec_and(vec_sr(qxs0, v4), lowMask); + vector unsigned char q2x03 = (vector unsigned char)vec_and(vec_sr(qxs0, v6), lowMask); + vector unsigned char q2x10 = (vector unsigned char)vec_and(qxs1, lowMask); + vector unsigned char q2x11 = (vector unsigned char)vec_and(vec_sr(qxs1, v2), lowMask); + vector unsigned char q2x12 = (vector unsigned char)vec_and(vec_sr(qxs1, v4), lowMask); + vector unsigned char q2x13 = (vector unsigned char)vec_and(vec_sr(qxs1, v6), lowMask); + + vector signed char q8y00 = vec_xl( 0, q8); + vector signed char q8y10 = vec_xl( 16, q8); + vector signed char q8y01 = vec_xl( 32, q8); + vector signed char q8y11 = vec_xl( 48, q8); + vector signed char q8y02 = vec_xl( 64, q8); + vector signed char q8y12 = vec_xl( 80, q8); + vector signed char q8y03 = vec_xl( 96, q8); + vector signed char q8y13 = vec_xl(112, q8); + q8 += 128; + + vector signed int qv0 = vec_msum(q8y00, q2x00, v0); + vector signed int qv1 = vec_msum(q8y01, q2x01, v0); + vector signed int qv2 = vec_msum(q8y02, q2x02, v0); + vector signed int qv3 = vec_msum(q8y03, q2x03, v0); + vector signed int qv4 = vec_msum(q8y10, q2x10, v0); + vector signed int qv5 = vec_msum(q8y11, q2x11, v0); + vector signed int qv6 = vec_msum(q8y12, q2x12, v0); + vector signed int qv7 = vec_msum(q8y13, q2x13, v0); + + vector signed short vscales_07 = vec_unpackh(vscales); + vector signed int vscales_03 = vec_unpackh(vscales_07); + vector signed int vscales_47 = vec_unpackl(vscales_07); + vector signed int vs0 = vec_splat(vscales_03, 0); + vector signed int vs1 = vec_splat(vscales_03, 1); + vector signed int vs2 = vec_splat(vscales_03, 2); + vector signed int vs3 = vec_splat(vscales_03, 3); + vector signed int vs4 = vec_splat(vscales_47, 0); + vector signed int vs5 = vec_splat(vscales_47, 1); + vector signed int vs6 = vec_splat(vscales_47, 2); + vector signed int vs7 = vec_splat(vscales_47, 3); + vscales = vec_sld(vscales, vscales, 8); + + vsumi0 = vec_add(vec_mul(qv0, vs0), vsumi0); + vsumi1 = vec_add(vec_mul(qv1, vs2), vsumi1); + vsumi2 = vec_add(vec_mul(qv2, vs4), vsumi2); + vsumi3 = vec_add(vec_mul(qv3, vs6), vsumi3); + vsumi4 = vec_add(vec_mul(qv4, vs1), vsumi4); + vsumi5 = vec_add(vec_mul(qv5, vs3), vsumi5); + vsumi6 = vec_add(vec_mul(qv6, vs5), vsumi6); + vsumi7 = vec_add(vec_mul(qv7, vs7), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + +#else + + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + + const uint8_t * q2 = x[i].qs; + const int8_t * q8 = y[i].qs; + const uint8_t * sc = x[i].scales; + + int summs = 0; + for (int j = 0; j < 16; ++j) { + summs += y[i].bsums[j] * (sc[j] >> 4); + } + + const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + int isum = 0; + int is = 0; + int d; + for (int k = 0; k < QK_K/128; ++k) { + int shift = 0; + for (int j = 0; j < 4; ++j) { + d = sc[is++] & 0xF; + int isuml = 0; + for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); + isum += d * isuml; + d = sc[is++] & 0xF; + isuml = 0; + for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); + isum += d * isuml; + shift += 2; + q8 += 32; + } + q2 += 32; + } + sumf += dall * isum - dmin * summs; + } + *s = sumf; +#endif +} + +void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const uint32_t kmask1 = 0x03030303; + const uint32_t kmask2 = 0x0f0f0f0f; + + const block_q3_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0x3); + const vector signed char lowMask1 = vec_splats((int8_t)0xf); + const vector signed char lowMask2 = vec_splats((int8_t)0x30); + const vector int v0 = vec_splats((int32_t)0); + const vector signed char v1 = vec_splats((signed char)0x1); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v3 = vec_splats((unsigned char)0x3); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + const vector unsigned char v6 = vec_splats((unsigned char)0x6); + const vector signed char off = vec_splats((signed char)0x20); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + UNUSED(kmask1); + UNUSED(kmask2); + + vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8); + vector signed char u1 = vec_and(u0, lowMask1); + vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4); + vector signed char u3 = (vector signed char)vec_mergeh((vector signed int)u2, (vector signed int)vec_sr(u2, v2)); + vector signed char u30 = vec_sl(vec_and(u3, lowMask), v4); + vector signed char u31 = vec_and(u3, lowMask2); + + u1 = vec_or(u1, u30); + u2 = vec_or(vec_sr(u0, v4), u31); + + vector signed char vscales = (vector signed char)vec_mergeh((vector signed long long)u1, (vector signed long long)u2); + vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].hmask); + vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].hmask); + + vscales = vec_sub(vscales, off); + + vector signed int vsumi0 = v0; + vector signed int vsumi1 = v0; + vector signed int vsumi2 = v0; + vector signed int vsumi3 = v0; + vector signed int vsumi4 = v0; + vector signed int vsumi5 = v0; + vector signed int vsumi6 = v0; + vector signed int vsumi7 = v0; + + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + for (int j = 0; j < QK_K/128; ++j) { + __builtin_prefetch(q3, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, q3); + vector signed char qxs1 = (vector signed char)vec_xl(16, q3); + q3 += 32; + + //the low 2 bits + vector signed char qxs00 = vec_and(qxs0, lowMask); + vector signed char qxs01 = vec_and(vec_sr(qxs0, v2), lowMask); + vector signed char qxs02 = vec_and(vec_sr(qxs0, v4), lowMask); + vector signed char qxs03 = vec_and(vec_sr(qxs0, v6), lowMask); + vector signed char qxs10 = vec_and(qxs1, lowMask); + vector signed char qxs11 = vec_and(vec_sr(qxs1, v2), lowMask); + vector signed char qxs12 = vec_and(vec_sr(qxs1, v4), lowMask); + vector signed char qxs13 = vec_and(vec_sr(qxs1, v6), lowMask); + + //the 3rd bit + vector signed char qxh00 = vec_sl(vec_andc(v1, qxhs0), v2); + vector signed char qxh01 = vec_sl(vec_andc(v1, vec_sr(qxhs0, (vector unsigned char)v1)), v2); + vector signed char qxh02 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v2)), v2); + vector signed char qxh03 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v3)), v2); + vector signed char qxh10 = vec_sl(vec_andc(v1, qxhs1), v2); + vector signed char qxh11 = vec_sl(vec_andc(v1, vec_sr(qxhs1, (vector unsigned char)v1)), v2); + vector signed char qxh12 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v2)), v2); + vector signed char qxh13 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v3)), v2); + qxhs0 = vec_sr(qxhs0, v4); + qxhs1 = vec_sr(qxhs1, v4); + + vector signed char q3x00 = vec_sub(qxs00, qxh00); + vector signed char q3x01 = vec_sub(qxs01, qxh01); + vector signed char q3x02 = vec_sub(qxs02, qxh02); + vector signed char q3x03 = vec_sub(qxs03, qxh03); + vector signed char q3x10 = vec_sub(qxs10, qxh10); + vector signed char q3x11 = vec_sub(qxs11, qxh11); + vector signed char q3x12 = vec_sub(qxs12, qxh12); + vector signed char q3x13 = vec_sub(qxs13, qxh13); + + vector signed char q8y00 = vec_xl( 0, q8); + vector signed char q8y10 = vec_xl( 16, q8); + vector signed char q8y01 = vec_xl( 32, q8); + vector signed char q8y11 = vec_xl( 48, q8); + vector signed char q8y02 = vec_xl( 64, q8); + vector signed char q8y12 = vec_xl( 80, q8); + vector signed char q8y03 = vec_xl( 96, q8); + vector signed char q8y13 = vec_xl(112, q8); + q8 += 128; + + vector signed short vscales_h = vec_unpackh(vscales); + vector signed short vs0 = vec_splat(vscales_h, 0); + vector signed short vs1 = vec_splat(vscales_h, 1); + vector signed short vs2 = vec_splat(vscales_h, 2); + vector signed short vs3 = vec_splat(vscales_h, 3); + vector signed short vs4 = vec_splat(vscales_h, 4); + vector signed short vs5 = vec_splat(vscales_h, 5); + vector signed short vs6 = vec_splat(vscales_h, 6); + vector signed short vs7 = vec_splat(vscales_h, 7); + vscales = vec_sld(vscales, vscales, 8); + + vector signed short qv00 = vec_add(vec_mule(q3x00, q8y00), vec_mulo(q3x00, q8y00)); + vector signed short qv01 = vec_add(vec_mule(q3x01, q8y01), vec_mulo(q3x01, q8y01)); + vector signed short qv02 = vec_add(vec_mule(q3x02, q8y02), vec_mulo(q3x02, q8y02)); + vector signed short qv03 = vec_add(vec_mule(q3x03, q8y03), vec_mulo(q3x03, q8y03)); + vector signed short qv10 = vec_add(vec_mule(q3x10, q8y10), vec_mulo(q3x10, q8y10)); + vector signed short qv11 = vec_add(vec_mule(q3x11, q8y11), vec_mulo(q3x11, q8y11)); + vector signed short qv12 = vec_add(vec_mule(q3x12, q8y12), vec_mulo(q3x12, q8y12)); + vector signed short qv13 = vec_add(vec_mule(q3x13, q8y13), vec_mulo(q3x13, q8y13)); + + vsumi0 = vec_msum(qv00, vs0, vsumi0); + vsumi1 = vec_msum(qv01, vs2, vsumi1); + vsumi2 = vec_msum(qv02, vs4, vsumi2); + vsumi3 = vec_msum(qv03, vs6, vsumi3); + vsumi4 = vec_msum(qv10, vs1, vsumi4); + vsumi5 = vec_msum(qv11, vs3, vsumi5); + vsumi6 = vec_msum(qv12, vs5, vsumi6); + vsumi7 = vec_msum(qv13, vs7, vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + +#else + // scalar version + // This function is written like this so the compiler can manage to vectorize most of it + // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the + // manually vectorized version above. Every other version I tried would run at least 4 times slower. + // The ideal situation would be if we could just write the code once, and the compiler would + // automatically produce the best possible set of machine instructions, instead of us having to manually + // write vectorized versions for AVX, ARM_NEON, etc. + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + uint32_t auxs[4]; + const int8_t * scales = (const int8_t*)auxs; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].hmask; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + q3 += 32; + } + a = aux8; + + memcpy(auxs, x[i].scales, 12); + uint32_t tmp = auxs[2]; + auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); + auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); + auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); + auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); + for (int j = 0; j < QK_K/16; ++j) { + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; + +#endif + +} + +void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + +#if defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector signed char lowMask1 = vec_splats((int8_t)0x3f); + const vector signed char lowMask2 = vec_splats((int8_t)0x30); + const vector int v0 = vec_splats((int32_t)0); + const vector unsigned char v2 = vec_splats((uint8_t)2); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); + vector float vdmin = vec_mul(vxmin, vyd); + + vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); + vector signed short q8ysums1 = vec_xl(16, y[i].bsums); + + UNUSED(kmask1); + UNUSED(kmask2); + UNUSED(kmask3); + UNUSED(utmp); + + vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8); + vector signed char u1 = vec_and(vec_sr(u0, v2), lowMask2); + vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4); + vector signed char u3 = vec_sr(u2, v4); + + vector signed char u30 = u1; + vector signed char u31 = (vector signed char)vec_mergeh((vector signed int)vec_and(u2, lowMask), (vector signed int)u3); + + u1 = vec_and(u0, lowMask1); + u2 = vec_or(u30, u31); + + vector signed char utmps = (vector signed char)vec_mergeh((vector signed int)u1, (vector signed int)u2); + + vector signed short vscales = vec_unpackh(utmps); + vector signed short q4xmins = vec_unpackl(utmps); + vector signed short q4xmins0 = vec_mergeh(q4xmins, q4xmins); + vector signed short q4xmins1 = vec_mergel(q4xmins, q4xmins); + + vector signed int prod0 = vec_mule(q4xmins0, q8ysums0); + vector signed int prod1 = vec_mule(q4xmins1, q8ysums1); + vector signed int prod2 = vec_mulo(q4xmins0, q8ysums0); + vector signed int prod3 = vec_mulo(q4xmins1, q8ysums1); + + vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); + vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); + vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); + vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); + + vector signed int vsumi0 = v0; + vector signed int vsumi1 = v0; + vector signed int vsumi2 = v0; + vector signed int vsumi3 = v0; + + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + for (int j = 0; j < QK_K/64; j+=2) { + __builtin_prefetch(q4, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, q4); + vector signed char qxs1 = (vector signed char)vec_xl(16, q4); + vector signed char qxs2 = (vector signed char)vec_xl(32, q4); + vector signed char qxs3 = (vector signed char)vec_xl(48, q4); + q4 += 64; + + vector unsigned char q4x00 = (vector unsigned char)vec_and(qxs0, lowMask); + vector unsigned char q4x01 = (vector unsigned char)vec_sr(qxs0, v4); + vector unsigned char q4x10 = (vector unsigned char)vec_and(qxs1, lowMask); + vector unsigned char q4x11 = (vector unsigned char)vec_sr(qxs1, v4); + vector unsigned char q4x20 = (vector unsigned char)vec_and(qxs2, lowMask); + vector unsigned char q4x21 = (vector unsigned char)vec_sr(qxs2, v4); + vector unsigned char q4x30 = (vector unsigned char)vec_and(qxs3, lowMask); + vector unsigned char q4x31 = (vector unsigned char)vec_sr(qxs3, v4); + + vector signed char q8y00 = vec_xl( 0, q8); + vector signed char q8y10 = vec_xl( 16, q8); + vector signed char q8y01 = vec_xl( 32, q8); + vector signed char q8y11 = vec_xl( 48, q8); + vector signed char q8y20 = vec_xl( 64, q8); + vector signed char q8y30 = vec_xl( 80, q8); + vector signed char q8y21 = vec_xl( 96, q8); + vector signed char q8y31 = vec_xl(112, q8); + q8 += 128; + + vector signed int qv00 = vec_msum(q8y00, q4x00, v0); + vector signed int qv01 = vec_msum(q8y01, q4x01, v0); + vector signed int qv10 = vec_msum(q8y10, q4x10, v0); + vector signed int qv11 = vec_msum(q8y11, q4x11, v0); + vector signed int qv20 = vec_msum(q8y20, q4x20, v0); + vector signed int qv21 = vec_msum(q8y21, q4x21, v0); + vector signed int qv30 = vec_msum(q8y30, q4x30, v0); + vector signed int qv31 = vec_msum(q8y31, q4x31, v0); + + vector signed int vscales_h = vec_unpackh(vscales); + vector signed int vs0 = vec_splat(vscales_h, 0); + vector signed int vs1 = vec_splat(vscales_h, 1); + vector signed int vs2 = vec_splat(vscales_h, 2); + vector signed int vs3 = vec_splat(vscales_h, 3); + vscales = vec_sld(vscales, vscales, 8); + + vsumi0 = vec_add(vec_mul(qv00, vs0), vsumi0); + vsumi1 = vec_add(vec_mul(qv01, vs1), vsumi1); + vsumi2 = vec_add(vec_mul(qv20, vs2), vsumi2); + vsumi3 = vec_add(vec_mul(qv21, vs3), vsumi3); + + vsumi0 = vec_add(vec_mul(qv10, vs0), vsumi0); + vsumi1 = vec_add(vec_mul(qv11, vs1), vsumi1); + vsumi2 = vec_add(vec_mul(qv30, vs2), vsumi2); + vsumi3 = vec_add(vec_mul(qv31, vs3), vsumi3); + } + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + +#else + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + for (int j = 0; j < QK_K/64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + a += 32; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + a += 32; q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + +#if defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector signed char lowMask1 = vec_splats((int8_t)0x3f); + const vector signed char lowMask2 = vec_splats((int8_t)0x30); + const vector int v0 = vec_splats((int32_t)0); + const vector unsigned char v1 = vec_splats((unsigned char)0x1); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v3 = vec_splats((unsigned char)0x3); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); + vector float vdmin = vec_mul(vxmin, vyd); + + UNUSED(kmask1); + UNUSED(kmask2); + UNUSED(kmask3); + UNUSED(utmp); + + vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8); + vector signed char u1 = vec_and(vec_sr(u0, v2), lowMask2); + vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4); + vector signed char u3 = vec_sr(u2, v4); + + vector signed char u30 = u1; + vector signed char u31 = (vector signed char)vec_mergeh((vector signed int)vec_and(u2, lowMask), (vector signed int)u3); + + u1 = vec_and(u0, lowMask1); + u2 = vec_or(u30, u31); + + vector signed char utmps = (vector signed char)vec_mergeh((vector signed int)u1, (vector signed int)u2); + + vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); + vector signed short q8ysums1 = vec_xl(16, y[i].bsums); + + vector signed short vscales = vec_unpackh(utmps); + + vector signed short q5xmins = vec_unpackl(utmps); + vector signed short q5xmins0 = vec_mergeh(q5xmins, q5xmins); + vector signed short q5xmins1 = vec_mergel(q5xmins, q5xmins); + + vector signed int prod0 = vec_mule(q5xmins0, q8ysums0); + vector signed int prod1 = vec_mule(q5xmins1, q8ysums1); + vector signed int prod2 = vec_mulo(q5xmins0, q8ysums0); + vector signed int prod3 = vec_mulo(q5xmins1, q8ysums1); + + vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); + vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); + vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); + vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); + + vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].qh); + vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].qh); + + vector signed int vsumi0 = v0; + vector signed int vsumi1 = v0; + vector signed int vsumi2 = v0; + vector signed int vsumi3 = v0; + + const uint8_t * GGML_RESTRICT q5 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + for (int j = 0; j < QK_K/64; ++j) { + __builtin_prefetch(q5, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, q5); + vector signed char qxs1 = (vector signed char)vec_xl(16, q5); + q5 += 32; + + vector signed char qxs00 = vec_and(qxs0, lowMask); + vector signed char qxs01 = vec_sr(qxs0, v4); + vector signed char qxs10 = vec_and(qxs1, lowMask); + vector signed char qxs11 = vec_sr(qxs1, v4); + + vector signed char q5h00 = vec_sl(vec_and((vector signed char)v1, qxhs0), v4); + vector signed char q5h01 = vec_sl(vec_and((vector signed char)v2, qxhs0), v3); + vector signed char q5h10 = vec_sl(vec_and((vector signed char)v1, qxhs1), v4); + vector signed char q5h11 = vec_sl(vec_and((vector signed char)v2, qxhs1), v3); + qxhs0 = vec_sr(qxhs0, v2); + qxhs1 = vec_sr(qxhs1, v2); + + vector unsigned char q5x00 = (vector unsigned char)vec_or(q5h00, qxs00); + vector unsigned char q5x01 = (vector unsigned char)vec_or(q5h01, qxs01); + vector unsigned char q5x10 = (vector unsigned char)vec_or(q5h10, qxs10); + vector unsigned char q5x11 = (vector unsigned char)vec_or(q5h11, qxs11); + + vector signed char q8y00 = vec_xl( 0, q8); + vector signed char q8y10 = vec_xl(16, q8); + vector signed char q8y01 = vec_xl(32, q8); + vector signed char q8y11 = vec_xl(48, q8); + q8 += 64; + + vector signed int qv00 = vec_msum(q8y00, q5x00, v0); + vector signed int qv01 = vec_msum(q8y01, q5x01, v0); + vector signed int qv10 = vec_msum(q8y10, q5x10, v0); + vector signed int qv11 = vec_msum(q8y11, q5x11, v0); + + vector signed int vscales_h = vec_unpackh(vscales); + vector signed int vs0 = vec_splat(vscales_h, 0); + vector signed int vs1 = vec_splat(vscales_h, 1); + vscales = vec_sld(vscales, vscales, 12); + + vsumi0 = vec_add(vec_mul(qv00, vs0), vsumi0); + vsumi1 = vec_add(vec_mul(qv10, vs0), vsumi1); + vsumi2 = vec_add(vec_mul(qv01, vs1), vsumi2); + vsumi3 = vec_add(vec_mul(qv11, vs1), vsumi3); + } + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + +#else + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K/64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); + a += 32; m <<= 1; + q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q6_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector int v0 = vec_splats((int32_t)0); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v3 = vec_splats((unsigned char)0x3); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + const vector unsigned char v6 = vec_splats((unsigned char)0x6); + const vector signed char off = vec_splats((signed char)0x20); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = v0; + vector signed int vsumi1 = v0; + vector signed int vsumi2 = v0; + vector signed int vsumi3 = v0; + vector signed int vsumi4 = v0; + vector signed int vsumi5 = v0; + vector signed int vsumi6 = v0; + vector signed int vsumi7 = v0; + + const uint8_t * GGML_RESTRICT q6 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT qs = x[i].scales; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + for (int j = 0; j < QK_K/128; ++j) { + __builtin_prefetch(q6, 0, 0); + __builtin_prefetch(qh, 0, 0); + __builtin_prefetch(q8, 0, 0); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, q6); + vector signed char qxs1 = (vector signed char)vec_xl(16, q6); + vector signed char qxs2 = (vector signed char)vec_xl(32, q6); + vector signed char qxs3 = (vector signed char)vec_xl(48, q6); + q6 += 64; + + vector signed char qxs00 = vec_and(qxs0, lowMask); + vector signed char qxs01 = vec_sr(qxs0, v4); + vector signed char qxs10 = vec_and(qxs1, lowMask); + vector signed char qxs11 = vec_sr(qxs1, v4); + vector signed char qxs20 = vec_and(qxs2, lowMask); + vector signed char qxs21 = vec_sr(qxs2, v4); + vector signed char qxs30 = vec_and(qxs3, lowMask); + vector signed char qxs31 = vec_sr(qxs3, v4); + + vector signed char qxhs0 = (vector signed char)vec_xl( 0, qh); + vector signed char qxhs1 = (vector signed char)vec_xl(16, qh); + qh += 32; + + vector signed char qxh00 = vec_sl(vec_and((vector signed char)v3, qxhs0), v4); + vector signed char qxh01 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v4)), v4); + vector signed char qxh10 = vec_sl(vec_and((vector signed char)v3, qxhs1), v4); + vector signed char qxh11 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v4)), v4); + vector signed char qxh20 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v2)), v4); + vector signed char qxh21 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v6)), v4); + vector signed char qxh30 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v2)), v4); + vector signed char qxh31 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v6)), v4); + + vector signed char q6x00 = vec_sub(vec_or(qxh00, qxs00), off); + vector signed char q6x01 = vec_sub(vec_or(qxh01, qxs01), off); + vector signed char q6x10 = vec_sub(vec_or(qxh10, qxs10), off); + vector signed char q6x11 = vec_sub(vec_or(qxh11, qxs11), off); + vector signed char q6x20 = vec_sub(vec_or(qxh20, qxs20), off); + vector signed char q6x21 = vec_sub(vec_or(qxh21, qxs21), off); + vector signed char q6x30 = vec_sub(vec_or(qxh30, qxs30), off); + vector signed char q6x31 = vec_sub(vec_or(qxh31, qxs31), off); + + vector signed char q8y00 = vec_xl( 0, q8); + vector signed char q8y10 = vec_xl( 16, q8); + vector signed char q8y20 = vec_xl( 32, q8); + vector signed char q8y30 = vec_xl( 48, q8); + vector signed char q8y01 = vec_xl( 64, q8); + vector signed char q8y11 = vec_xl( 80, q8); + vector signed char q8y21 = vec_xl( 96, q8); + vector signed char q8y31 = vec_xl(112, q8); + q8 += 128; + + vector signed short qv00 = vec_add(vec_mule(q6x00, q8y00), vec_mulo(q6x00, q8y00)); + vector signed short qv10 = vec_add(vec_mule(q6x10, q8y10), vec_mulo(q6x10, q8y10)); + vector signed short qv20 = vec_add(vec_mule(q6x20, q8y20), vec_mulo(q6x20, q8y20)); + vector signed short qv30 = vec_add(vec_mule(q6x30, q8y30), vec_mulo(q6x30, q8y30)); + vector signed short qv01 = vec_add(vec_mule(q6x01, q8y01), vec_mulo(q6x01, q8y01)); + vector signed short qv11 = vec_add(vec_mule(q6x11, q8y11), vec_mulo(q6x11, q8y11)); + vector signed short qv21 = vec_add(vec_mule(q6x21, q8y21), vec_mulo(q6x21, q8y21)); + vector signed short qv31 = vec_add(vec_mule(q6x31, q8y31), vec_mulo(q6x31, q8y31)); + + vector signed short vscales = vec_unpackh(vec_xl_len(qs, 8)); + qs += 8; + + vector signed short vs0 = vec_splat(vscales, 0); + vector signed short vs1 = vec_splat(vscales, 1); + vector signed short vs2 = vec_splat(vscales, 2); + vector signed short vs3 = vec_splat(vscales, 3); + vector signed short vs4 = vec_splat(vscales, 4); + vector signed short vs5 = vec_splat(vscales, 5); + vector signed short vs6 = vec_splat(vscales, 6); + vector signed short vs7 = vec_splat(vscales, 7); + + vsumi0 = vec_msum(qv00, vs0, vsumi0); + vsumi1 = vec_msum(qv01, vs4, vsumi1); + vsumi2 = vec_msum(qv10, vs1, vsumi2); + vsumi3 = vec_msum(qv11, vs5, vsumi3); + vsumi4 = vec_msum(qv20, vs2, vsumi4); + vsumi5 = vec_msum(qv21, vs6, vsumi5); + vsumi6 = vec_msum(qv30, vs3, vsumi6); + vsumi7 = vec_msum(qv31, vs7, vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + +#else + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) { + a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; + a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; + a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; + a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; + } + a += 128; + q4 += 64; + qh += 32; + } + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/16; ++j) { + int scale = x[i].scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +#if defined (__POWER9_VECTOR__) +static const int8_t keven_signs_q2xs[1024] = { + 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, + 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, + 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, + 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, + 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, + 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, + 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, + 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, + 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, + 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, + 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, + 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, + 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, + 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, + 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, + 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, + 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, + 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, + 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, + 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, + 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, + 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, + 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, + 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, + 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, + 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, + 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, + 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, + 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, + 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, + 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, + 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, +}; +#endif + +void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_xxs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__POWER9_VECTOR__) + const vector int v0 = vec_splats((int32_t)0); + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = v0; + vector signed int vsumi1 = v0; + vector signed int vsumi2 = v0; + vector signed int vsumi3 = v0; + + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + for (int j = 0; j < QK_K/32; j += 2) { + __builtin_prefetch(q2, 0, 1); + __builtin_prefetch(q8, 0, 1); + + uint32_t aux32[4]; + const uint8_t * aux8 = (const uint8_t *)aux32; + + memcpy(aux32, q2, 4*sizeof(uint32_t)); + q2 += 8; + + vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xxs_grid + aux8[ 0]), *(const int64_t *)(iq2xxs_grid + aux8[ 1])}; + vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xxs_grid + aux8[ 2]), *(const int64_t *)(iq2xxs_grid + aux8[ 3])}; + vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xxs_grid + aux8[ 8]), *(const int64_t *)(iq2xxs_grid + aux8[ 9])}; + vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xxs_grid + aux8[10]), *(const int64_t *)(iq2xxs_grid + aux8[11])}; + + vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((aux32[1] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 7) & 127))}; + vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((aux32[1] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 21) & 127))}; + vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((aux32[3] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 7) & 127))}; + vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((aux32[3] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 21) & 127))}; + + vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0); + vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1); + vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2); + vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3); + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); + + const uint16_t ls0 = aux32[1] >> 28; + const uint16_t ls1 = aux32[3] >> 28; + + vector signed short vscales01 = vec_splats((int16_t)(2*ls0+1)); + vector signed short vscales23 = vec_splats((int16_t)(2*ls1+1)); + + vsumi0 = vec_msum(qv0, vscales01, vsumi0); + vsumi1 = vec_msum(qv1, vscales01, vsumi1); + vsumi2 = vec_msum(qv2, vscales23, vsumi2); + vsumi3 = vec_msum(qv3, vscales23, vsumi3); + } + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = 0.125f * vec_extract(vsumf0, 0); + +#else + + uint32_t aux32[2]; + const uint8_t * aux8 = (const uint8_t *)aux32; + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + memcpy(aux32, q2, 2*sizeof(uint32_t)); + q2 += 4; + const uint32_t ls = 2*(aux32[1] >> 28) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); + const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls; + } + sumf += d * bsum; + } + *s = 0.125f * sumf; +#endif +} + +void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_xs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__POWER9_VECTOR__) + const vector int v0 = vec_splats((int32_t)0); + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = v0; + vector signed int vsumi1 = v0; + vector signed int vsumi2 = v0; + vector signed int vsumi3 = v0; + + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const uint8_t * GGML_RESTRICT sc = x[i].scales; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + for (int j = 0; j < QK_K/64; ++j) { + __builtin_prefetch(q2, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xs_grid + (q2[0] & 511)), *(const int64_t *)(iq2xs_grid + (q2[1] & 511))}; + vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xs_grid + (q2[2] & 511)), *(const int64_t *)(iq2xs_grid + (q2[3] & 511))}; + vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xs_grid + (q2[4] & 511)), *(const int64_t *)(iq2xs_grid + (q2[5] & 511))}; + vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xs_grid + (q2[6] & 511)), *(const int64_t *)(iq2xs_grid + (q2[7] & 511))}; + + vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((q2[0] >> 9))), *(const int64_t *)(signs64 + ((q2[1] >> 9)))}; + vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((q2[2] >> 9))), *(const int64_t *)(signs64 + ((q2[3] >> 9)))}; + vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((q2[4] >> 9))), *(const int64_t *)(signs64 + ((q2[5] >> 9)))}; + vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((q2[6] >> 9))), *(const int64_t *)(signs64 + ((q2[7] >> 9)))}; + q2 += 8; + + vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0); + vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1); + vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2); + vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3); + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); + + const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); + const uint16_t ls1 = (uint16_t)(sc[0] >> 4); + const uint16_t ls2 = (uint16_t)(sc[1] & 0xf); + const uint16_t ls3 = (uint16_t)(sc[1] >> 4); + sc += 2; + + vector signed short vscales0 = vec_splats((int16_t)(2*ls0+1)); + vector signed short vscales1 = vec_splats((int16_t)(2*ls1+1)); + vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1)); + vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1)); + + vsumi0 = vec_msum(qv0, vscales0, vsumi0); + vsumi1 = vec_msum(qv1, vscales1, vsumi1); + vsumi2 = vec_msum(qv2, vscales2, vsumi2); + vsumi3 = vec_msum(qv3, vscales3, vsumi3); + } + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = 0.125f * vec_extract(vsumf0, 0); + +#else + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const uint8_t * GGML_RESTRICT sc = x[i].scales; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1; + const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1; + int32_t sumi = 0; + for (int l = 0; l < 2; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); + const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls1; + sumi = 0; + for (int l = 2; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); + const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls2; + q2 += 4; + } + sumf += d * bsum; + } + *s = 0.125f * sumf; +#endif +} + +void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__POWER9_VECTOR__) + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; + + const vector int v0 = vec_splats((int32_t)0); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + const vector unsigned char mask0 = vec_xl( 0, k_mask1); + const vector unsigned char mask1 = vec_xl(16, k_mask1); + const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = v0; + vector signed int vsumi1 = v0; + vector signed int vsumi2 = v0; + vector signed int vsumi3 = v0; + + const uint8_t * GGML_RESTRICT q2 = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); + const uint8_t * GGML_RESTRICT sc = x[i].scales; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + for (int j = 0; j < QK_K/32; j += 2) { + __builtin_prefetch(q2, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed long long aux64x2_0 = {*(const int64_t *)(iq2s_grid + (q2[0] | ((qh[0] << 8) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[1] | ((qh[0] << 6) & 0x300)))}; + vector signed long long aux64x2_1 = {*(const int64_t *)(iq2s_grid + (q2[2] | ((qh[0] << 4) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[3] | ((qh[0] << 2) & 0x300)))}; + vector signed long long aux64x2_2 = {*(const int64_t *)(iq2s_grid + (q2[4] | ((qh[1] << 8) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[5] | ((qh[1] << 6) & 0x300)))}; + vector signed long long aux64x2_3 = {*(const int64_t *)(iq2s_grid + (q2[6] | ((qh[1] << 4) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[7] | ((qh[1] << 2) & 0x300)))}; + q2 += 8; + qh += 2; + + vector signed char vsigns01 = (vector signed char)vec_splats(*(const uint32_t *)&signs[0]); + vector signed char vsigns23 = (vector signed char)vec_splats(*(const uint32_t *)&signs[2]); + signs += 4; + + vector signed char vsigns0 = vec_perm(vsigns01, vsigns01, mask0); + vector signed char vsigns1 = vec_perm(vsigns01, vsigns01, mask1); + vector signed char vsigns2 = vec_perm(vsigns23, vsigns23, mask0); + vector signed char vsigns3 = vec_perm(vsigns23, vsigns23, mask1); + + vsigns0 = (vector signed char)vec_cmpeq(vec_and(vsigns0, mask2), mask2); + vsigns1 = (vector signed char)vec_cmpeq(vec_and(vsigns1, mask2), mask2); + vsigns2 = (vector signed char)vec_cmpeq(vec_and(vsigns2, mask2), mask2); + vsigns3 = (vector signed char)vec_cmpeq(vec_and(vsigns3, mask2), mask2); + + vector signed char q2x0 = vec_sub(vec_xor(vsigns0, (vector signed char)aux64x2_0), vsigns0); + vector signed char q2x1 = vec_sub(vec_xor(vsigns1, (vector signed char)aux64x2_1), vsigns1); + vector signed char q2x2 = vec_sub(vec_xor(vsigns2, (vector signed char)aux64x2_2), vsigns2); + vector signed char q2x3 = vec_sub(vec_xor(vsigns3, (vector signed char)aux64x2_3), vsigns3); + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); + + const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); + const uint16_t ls1 = (uint16_t)(sc[0] >> 4); + const uint16_t ls2 = (uint16_t)(sc[1] & 0xf); + const uint16_t ls3 = (uint16_t)(sc[1] >> 4); + sc += 2; + + vector signed short vscales0 = vec_splats((int16_t)(2*ls0+1)); + vector signed short vscales1 = vec_splats((int16_t)(2*ls1+1)); + vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1)); + vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1)); + + vsumi0 = vec_msum(qv0, vscales0, vsumi0); + vsumi1 = vec_msum(qv1, vscales1, vsumi1); + vsumi2 = vec_msum(qv2, vscales2, vsumi2); + vsumi3 = vec_msum(qv3, vscales3, vsumi3); + } + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = 0.125f * vec_extract(vsumf0, 0); + +#else + + float sumf = 0; + for (int i = 0; i < nb; i++) { + + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint8_t * qh = x[i].qh; + const uint8_t * signs = qs + QK_K/8; + + int bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf); + int ls2 = 1 + 2*(x[i].scales[ib32] >> 4); + int sumi1 = 0, sumi2 = 0; + for (int l = 0; l < 2; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); + for (int j = 0; j < 8; ++j) { + sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + for (int l = 2; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); + for (int j = 0; j < 8; ++j) { + sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += ls1 * sumi1 + ls2 * sumi2; + qs += 4; + signs += 4; + } + + sumf += d * bsum; + } + + *s = 0.125f * sumf; + +#endif + +} + +void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq3_xxs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__POWER9_VECTOR__) + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + const vector int v0 = vec_splats((int32_t)0); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = v0; + vector signed int vsumi1 = v0; + vector signed int vsumi2 = v0; + vector signed int vsumi3 = v0; + + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint32_t * GGML_RESTRICT signs = (const uint32_t *)(x[i].qs + QK_K/4); + const int8_t * GGML_RESTRICT q8 = y[i].qs; + +#pragma GCC unroll 1 + for (int j = 0; j < QK_K/32; j += 2) { + __builtin_prefetch(q3, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector unsigned int aux32x4_0 = {iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]}; + vector unsigned int aux32x4_1 = {iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]}; + vector unsigned int aux32x4_2 = {iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]}; + vector unsigned int aux32x4_3 = {iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]}; + q3 += 16; + + vector unsigned long long aux64x2_0 = {(uint64_t)(signs64[(signs[0] >> 0) & 127]), (uint64_t)(signs64[(signs[0] >> 7) & 127])}; + vector unsigned long long aux64x2_1 = {(uint64_t)(signs64[(signs[0] >> 14) & 127]), (uint64_t)(signs64[(signs[0] >> 21) & 127])}; + vector unsigned long long aux64x2_2 = {(uint64_t)(signs64[(signs[1] >> 0) & 127]), (uint64_t)(signs64[(signs[1] >> 7) & 127])}; + vector unsigned long long aux64x2_3 = {(uint64_t)(signs64[(signs[1] >> 14) & 127]), (uint64_t)(signs64[(signs[1] >> 21) & 127])}; + + vector signed char q3x0 = vec_mul((vector signed char)aux64x2_0, (vector signed char)aux32x4_0); + vector signed char q3x1 = vec_mul((vector signed char)aux64x2_1, (vector signed char)aux32x4_1); + vector signed char q3x2 = vec_mul((vector signed char)aux64x2_2, (vector signed char)aux32x4_2); + vector signed char q3x3 = vec_mul((vector signed char)aux64x2_3, (vector signed char)aux32x4_3); + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q3x0, q8y0), vec_mulo(q3x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q3x1, q8y1), vec_mulo(q3x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q3x2, q8y2), vec_mulo(q3x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q3x3, q8y3), vec_mulo(q3x3, q8y3)); + + const uint16_t ls0 = (uint16_t)(signs[0] >> 28); + const uint16_t ls1 = (uint16_t)(signs[1] >> 28); + signs += 2; + + vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); + vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); + + vsumi0 = vec_msum(qv0, vscales01, vsumi0); + vsumi1 = vec_msum(qv1, vscales01, vsumi1); + vsumi2 = vec_msum(qv2, vscales23, vsumi2); + vsumi3 = vec_msum(qv3, vscales23, vsumi3); + } + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = 0.25f * vec_extract(vsumf0, 0); + +#else + + uint32_t aux32; + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t); + const uint32_t ls = 2*(aux32 >> 28) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]); + const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]); + const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127]; + for (int j = 0; j < 4; ++j) { + sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1); + sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1); + } + q8 += 8; + } + q3 += 8; + bsum += sumi * ls; + } + sumf += d * bsum; + } + *s = 0.25f * sumf; +#endif +} + +void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq3_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__POWER9_VECTOR__) + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; + + const vector int v0 = vec_splats((int32_t)0); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + const vector unsigned char mask0 = vec_xl( 0, k_mask1); + const vector unsigned char mask1 = vec_xl(16, k_mask1); + const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].signs); + const uint8_t * GGML_RESTRICT sc = x[i].scales; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + vector signed int vsumi0 = v0; + vector signed int vsumi1 = v0; + vector signed int vsumi2 = v0; + vector signed int vsumi3 = v0; + + for (int j = 0; j < QK_K/32; j += 2) { + __builtin_prefetch(q3, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector unsigned int aux32x4_0 = {iq3s_grid[q3[ 0] | ((qh[0] << 8) & 256)], iq3s_grid[q3[ 1] | ((qh[0] << 7) & 256)], + iq3s_grid[q3[ 2] | ((qh[0] << 6) & 256)], iq3s_grid[q3[ 3] | ((qh[0] << 5) & 256)]}; + vector unsigned int aux32x4_1 = {iq3s_grid[q3[ 4] | ((qh[0] << 4) & 256)], iq3s_grid[q3[ 5] | ((qh[0] << 3) & 256)], + iq3s_grid[q3[ 6] | ((qh[0] << 2) & 256)], iq3s_grid[q3[ 7] | ((qh[0] << 1) & 256)]}; + vector unsigned int aux32x4_2 = {iq3s_grid[q3[ 8] | ((qh[1] << 8) & 256)], iq3s_grid[q3[ 9] | ((qh[1] << 7) & 256)], + iq3s_grid[q3[10] | ((qh[1] << 6) & 256)], iq3s_grid[q3[11] | ((qh[1] << 5) & 256)]}; + vector unsigned int aux32x4_3 = {iq3s_grid[q3[12] | ((qh[1] << 4) & 256)], iq3s_grid[q3[13] | ((qh[1] << 3) & 256)], + iq3s_grid[q3[14] | ((qh[1] << 2) & 256)], iq3s_grid[q3[15] | ((qh[1] << 1) & 256)]}; + q3 += 16; + qh += 2; + + vector signed char vsigns01 = (vector signed char)vec_splats(*(const uint32_t *)&signs[0]); + vector signed char vsigns02 = (vector signed char)vec_splats(*(const uint32_t *)&signs[2]); + signs += 4; + + vector signed char vsigns0 = vec_perm(vsigns01, vsigns01, mask0); + vector signed char vsigns1 = vec_perm(vsigns01, vsigns01, mask1); + vector signed char vsigns2 = vec_perm(vsigns02, vsigns02, mask0); + vector signed char vsigns3 = vec_perm(vsigns02, vsigns02, mask1); + + vsigns0 = (vector signed char)vec_cmpeq(vec_and(vsigns0, mask2), mask2); + vsigns1 = (vector signed char)vec_cmpeq(vec_and(vsigns1, mask2), mask2); + vsigns2 = (vector signed char)vec_cmpeq(vec_and(vsigns2, mask2), mask2); + vsigns3 = (vector signed char)vec_cmpeq(vec_and(vsigns3, mask2), mask2); + + vector signed char q3x0 = vec_sub(vec_xor(vsigns0, (vector signed char)aux32x4_0), vsigns0); + vector signed char q3x1 = vec_sub(vec_xor(vsigns1, (vector signed char)aux32x4_1), vsigns1); + vector signed char q3x2 = vec_sub(vec_xor(vsigns2, (vector signed char)aux32x4_2), vsigns2); + vector signed char q3x3 = vec_sub(vec_xor(vsigns3, (vector signed char)aux32x4_3), vsigns3); + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q3x0, q8y0), vec_mulo(q3x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q3x1, q8y1), vec_mulo(q3x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q3x2, q8y2), vec_mulo(q3x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q3x3, q8y3), vec_mulo(q3x3, q8y3)); + + const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); + const uint16_t ls1 = (uint16_t)(sc[0] >> 4); + sc ++; + + vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); + vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); + + vsumi0 = vec_msum(qv0, vscales01, vsumi0); + vsumi1 = vec_msum(qv1, vscales01, vsumi1); + vsumi2 = vec_msum(qv2, vscales23, vsumi2); + vsumi3 = vec_msum(qv3, vscales23, vsumi3); + } + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + +#else + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint8_t * GGML_RESTRICT signs = x[i].signs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1; + const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256))); + const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256))); + for (int j = 0; j < 4; ++j) { + sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); + sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); + } + q8 += 8; + } + qs += 8; + signs += 4; + bsum += sumi * ls1; + sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256))); + const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256))); + for (int j = 0; j < 4; ++j) { + sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); + sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); + } + q8 += 8; + } + qs += 8; + signs += 4; + bsum += sumi * ls2; + } + sumf += d * bsum; + } + *s = sumf; +#endif +} + +void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq1_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__POWER9_VECTOR__) + const vector unsigned char v0 = vec_splats((unsigned char)0x0); + const vector unsigned short vsign = vec_splats((unsigned short)0x8000); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi8 = vec_splats((int32_t)0); + + const uint8_t * GGML_RESTRICT q1 = x[i].qs; + const uint16_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + const int16_t * GGML_RESTRICT qs = y[i].bsums; + + for (int j = 0; j < QK_K/32; j += 2) { + __builtin_prefetch(q1, 0, 1); + __builtin_prefetch(qh, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed long long aux64x2_0 = {*(const int64_t *)(iq1s_grid + (q1[0] | ((qh[0] << 8) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[1] | ((qh[0] << 5) & 0x700)))}; + vector signed long long aux64x2_1 = {*(const int64_t *)(iq1s_grid + (q1[2] | ((qh[0] << 2) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[3] | ((qh[0] >> 1) & 0x700)))}; + vector signed long long aux64x2_2 = {*(const int64_t *)(iq1s_grid + (q1[4] | ((qh[1] << 8) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[5] | ((qh[1] << 5) & 0x700)))}; + vector signed long long aux64x2_3 = {*(const int64_t *)(iq1s_grid + (q1[6] | ((qh[1] << 2) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[7] | ((qh[1] >> 1) & 0x700)))}; + q1 += 8; + + vector signed char q1x0 = (vector signed char)aux64x2_0; + vector signed char q1x1 = (vector signed char)aux64x2_1; + vector signed char q1x2 = (vector signed char)aux64x2_2; + vector signed char q1x3 = (vector signed char)aux64x2_3; + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q1x0, q8y0), vec_mulo(q1x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q1x1, q8y1), vec_mulo(q1x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q1x2, q8y2), vec_mulo(q1x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q1x3, q8y3), vec_mulo(q1x3, q8y3)); + + const uint16_t ls0 = (uint16_t)((qh[0] >> 12) & 7); + const uint16_t ls1 = (uint16_t)((qh[1] >> 12) & 7); + + vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); + vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); + vector signed short vscales = vec_sld(vscales23, vscales01, 8); + + vsumi0 = vec_msum(qv0, vscales01, vsumi0); + vsumi1 = vec_msum(qv1, vscales01, vsumi1); + vsumi2 = vec_msum(qv2, vscales23, vsumi2); + vsumi3 = vec_msum(qv3, vscales23, vsumi3); + + vector signed short q8ysums = vec_xl_len(qs, 8); + qs += 4; + q8ysums = vec_mergeh(q8ysums, (vector signed short)v0); + + vector signed short qxh = (vector signed short)vec_sld(vec_splats(qh[1]), vec_splats(qh[0]), 8); + qh += 2; + vector __bool short vsel = vec_cmpge(qxh, (vector signed short)v0); + + vector signed short q8ysum = vec_sel((vector signed short)vec_xor((vector unsigned short)q8ysums, vsign), q8ysums, vsel); + + vsumi8 = vec_add(vec_mule(q8ysum, vscales), vsumi8); + } + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + + vsumf0 = vec_madd(vec_ctf(vsumi8, 0), vec_mul(vd, vec_splats(IQ1S_DELTA)), vsumf0); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + +#else + + float sumf = 0; + for (int i = 0; i < nb; i++) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint16_t * qh = x[i].qh; + + int sumi = 0, sumi1 = 0; + for (int ib = 0; ib < QK_K/32; ++ib) { + const int ls = 2*((qh[ib] >> 12) & 7) + 1; + const int delta = qh[ib] & 0x8000 ? -1 : 1; + int lsum = 0; + for (int l = 0; l < 4; ++l) { + const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8))); + for (int j = 0; j < 8; ++j) { + lsum += q8[j] * grid[j]; + } + q8 += 8; + } + sumi += ls * lsum; + sumi1 += ls * delta * (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]); + qs += 4; + } + + sumf += GGML_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); + } + + *s = sumf; + +#endif +} + +void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK4_NL == 0); + static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); + + const block_iq4_nl * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + const int nb = n / QK4_NL; + + int ib = 0; + float sumf = 0; + +#if defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector signed int v0 = vec_splats((int32_t)0); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + + const vector signed char values = vec_xl( 0, kvalues_iq4nl); + +#pragma GCC unroll 4 + for (; ib < nb; ++ib) { + __builtin_prefetch(x[ib].qs, 0, 1); + __builtin_prefetch(y[ib].qs, 0, 1); + + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); + vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); + vector float vd = vec_mul(vxd, vyd); + + vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); + vector signed char q4x0 = vec_and(qxs, lowMask); + vector signed char q4x1 = vec_sr(qxs, v4); + + q4x0 = vec_perm(values, values, (vector unsigned char)q4x0); + q4x1 = vec_perm(values, values, (vector unsigned char)q4x1); + + vector signed char q8y0 = vec_xl( 0, y[ib].qs); + vector signed char q8y1 = vec_xl(16, y[ib].qs); + + vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1)); + + vector signed int vsumi0 = v0; + vector signed int vsumi1 = v0; + + vsumi0 = vec_sum4s(qv0, vsumi0); + vsumi1 = vec_sum4s(qv1, vsumi1); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + } + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + sumf = vec_extract(vsumf0, 0); + +#endif + for (; ib < nb; ++ib) { + const float d = GGML_FP16_TO_FP32(y[ib].d)*GGML_FP16_TO_FP32(x[ib].d); + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < QK4_NL/2; ++j) { + sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; + sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4]; + } + sumf += d * (sumi1 + sumi2); + } + *s = sumf; +} + +void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK_K == 0); + + const block_iq4_xs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector int v0 = vec_splats((int32_t)0); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + const vector signed char values = vec_xl( 0, kvalues_iq4nl); + + for (int ibl = 0; ibl < nb; ++ibl) { + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ibl].d)); + vector float vyd = vec_splats(y[ibl].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = v0; + vector signed int vsumi1 = v0; + vector signed int vsumi2 = v0; + vector signed int vsumi3 = v0; + + uint16_t h = x[ibl].scales_h; + + const uint8_t * GGML_RESTRICT q4 = x[ibl].qs; + const uint8_t * GGML_RESTRICT sc = x[ibl].scales_l; + const int8_t * GGML_RESTRICT q8 = y[ibl].qs; + + for (int ib = 0; ib < QK_K/64; ib ++ ) { + __builtin_prefetch(q4, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, q4); + vector signed char qxs1 = (vector signed char)vec_xl(16, q4); + q4 += 32; + + vector signed char q4x00 = (vector signed char)vec_and(qxs0, lowMask); + vector signed char q4x01 = (vector signed char)vec_sr(qxs0, v4); + vector signed char q4x10 = (vector signed char)vec_and(qxs1, lowMask); + vector signed char q4x11 = (vector signed char)vec_sr(qxs1, v4); + + q4x00 = vec_perm(values, values, (vector unsigned char)q4x00); + q4x01 = vec_perm(values, values, (vector unsigned char)q4x01); + q4x10 = vec_perm(values, values, (vector unsigned char)q4x10); + q4x11 = vec_perm(values, values, (vector unsigned char)q4x11); + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q4x00, q8y0), vec_mulo(q4x00, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q4x01, q8y1), vec_mulo(q4x01, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q4x10, q8y2), vec_mulo(q4x10, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q4x11, q8y3), vec_mulo(q4x11, q8y3)); + + const uint16_t ls0 = (uint16_t)(((sc[0] & 0xf) | ((h << 4) & 0x30)) - 32); + const uint16_t ls1 = (uint16_t)(((sc[0] >> 4) | ((h << 2) & 0x30)) - 32); + h >>= 4; + sc ++; + + vector signed short vscales01 = vec_splats((int16_t)ls0); + vector signed short vscales23 = vec_splats((int16_t)ls1); + + vsumi0 = vec_msum(qv0, vscales01, vsumi0); + vsumi1 = vec_msum(qv1, vscales01, vsumi1); + vsumi2 = vec_msum(qv2, vscales23, vsumi2); + vsumi3 = vec_msum(qv3, vscales23, vsumi3); + } + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + +#else + float sumf = 0; + for (int ibl = 0; ibl < nb; ++ibl) { + const float d4d8 = GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d; + uint16_t h = x[ibl].scales_h; + const uint8_t * qs = x[ibl].qs; + const int8_t * q8 = y[ibl].qs; + for (int ib = 0; ib < QK_K/32; ib += 2) { + const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30); + const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30); + h >>= 4; + const float d1 = d4d8*(ls1 - 32); + const float d2 = d4d8*(ls2 - 32); + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < 16; ++j) { + sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; + sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; + } + sumf += d1 * (sumi1 + sumi2); + qs += 16; + q8 += 32; + sumi1 = sumi2 = 0; + for (int j = 0; j < 16; ++j) { + sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; + sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; + } + sumf += d2 * (sumi1 + sumi2); + qs += 16; + q8 += 32; + } + } + *s = sumf; +#endif +} + diff --git a/ggml/src/ggml-cpu/arch/riscv/quants.c b/ggml/src/ggml-cpu/arch/riscv/quants.c new file mode 100644 index 0000000000000..6f3aa94fbbe98 --- /dev/null +++ b/ggml/src/ggml-cpu/arch/riscv/quants.c @@ -0,0 +1,2068 @@ +#define GGML_COMMON_IMPL_C +#include "ggml-common.h" +#include "ggml-quants.h" +#include "ggml-impl.h" +#include "ggml-cpu.h" + +#include "../../quants.h" +#include "../../ggml-cpu-impl.h" + +#include +#include +#include +#include +#include // for qsort +#include // for GGML_ASSERT + +#define GROUP_MAX_EPS 1e-15f +#define GROUP_MAX_EPS_IQ3_XXS 1e-8f +#define GROUP_MAX_EPS_IQ2_S 1e-8f +#define GROUP_MAX_EPS_IQ1_M 1e-7f +#define GROUP_MAX_EPS_IQ1_S 1e-12f + +#define UNUSED GGML_UNUSED + +void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__riscv_v) + + size_t vl = QK8_0; + + for (int i = 0; i < nb; i++) { + // load elements + vfloat32m8_t v_x = __riscv_vle32_v_f32m8(x+i*QK8_0, vl); + + vfloat32m8_t vfabs = __riscv_vfabs_v_f32m8(v_x, vl); + vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0f, vl); + vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m8_f32m1(vfabs, tmp, vl); + float amax = __riscv_vfmv_f_s_f32m1_f32(vmax); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = GGML_FP32_TO_FP16(d); + + vfloat32m8_t x0 = __riscv_vfmul_vf_f32m8(v_x, id, vl); + + // convert to integer + vint16m4_t vi = __riscv_vfncvt_x_f_w_i16m4(x0, vl); + vint8m2_t vs = __riscv_vncvt_x_x_w_i8m2(vi, vl); + + // store result + __riscv_vse8_v_i8m2(y[i].qs , vs, vl); + } +#else + GGML_UNUSED(nb); + // scalar + quantize_row_q8_0_ref(x, y, k); +#endif +} + +void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK8_1 == 0); + const int nb = k / QK8_1; + + block_q8_1 * GGML_RESTRICT y = vy; + +#if defined(__riscv_v) + + size_t vl = QK8_1; + + for (int i = 0; i < nb; i++) { + // load elements + vfloat32m8_t v_x = __riscv_vle32_v_f32m8(x+i*QK8_1, vl); + + vfloat32m8_t vfabs = __riscv_vfabs_v_f32m8(v_x, vl); + vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0, vl); + vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m8_f32m1(vfabs, tmp, vl); + float amax = __riscv_vfmv_f_s_f32m1_f32(vmax); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = GGML_FP32_TO_FP16(d); + + vfloat32m8_t x0 = __riscv_vfmul_vf_f32m8(v_x, id, vl); + + // convert to integer + vint16m4_t vi = __riscv_vfncvt_x_f_w_i16m4(x0, vl); + vint8m2_t vs = __riscv_vncvt_x_x_w_i8m2(vi, vl); + + // store result + __riscv_vse8_v_i8m2(y[i].qs , vs, vl); + + // compute sum for y[i].s + vint16m1_t tmp2 = __riscv_vmv_v_x_i16m1(0, vl); + vint16m1_t vwrs = __riscv_vwredsum_vs_i8m2_i16m1(vs, tmp2, vl); + + // set y[i].s + int sum = __riscv_vmv_x_s_i16m1_i16(vwrs); + y[i].s = GGML_FP32_TO_FP16(sum*d); + } + +#else + GGML_UNUSED(nb); + // scalar + quantize_row_q8_1_ref(x, y, k); +#endif +} + +//===================================== Dot products ================================= + +void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined(__riscv_v) + size_t vl = qk / 2; + + for (; ib < nb; ++ib) { + // load elements + vuint8m1_t tx = __riscv_vle8_v_u8m1(x[ib].qs, vl); + + vint8m1_t y0 = __riscv_vle8_v_i8m1(y[ib].qs, vl); + vint8m1_t y1 = __riscv_vle8_v_i8m1(y[ib].qs+16, vl); + + // mask and store lower part of x, and then upper part + vuint8m1_t x_a = __riscv_vand_vx_u8m1(tx, 0x0F, vl); + vuint8m1_t x_l = __riscv_vsrl_vx_u8m1(tx, 0x04, vl); + + vint8m1_t x_ai = __riscv_vreinterpret_v_u8m1_i8m1(x_a); + vint8m1_t x_li = __riscv_vreinterpret_v_u8m1_i8m1(x_l); + + // subtract offset + vint8m1_t v0 = __riscv_vsub_vx_i8m1(x_ai, 8, vl); + vint8m1_t v1 = __riscv_vsub_vx_i8m1(x_li, 8, vl); + + vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl); + vint16m2_t vec_mul2 = __riscv_vwmacc_vv_i16m2(vec_mul1, v1, y1, vl); + + vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); + vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl); + + int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); + + sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); + } + +#endif + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F) - 8; + const int v1 = (x[ib].qs[j] >> 4) - 8; + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); + } + + *s = sumf; +} + +void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined(__riscv_v) + size_t vl = qk / 2; + + for (; ib < nb; ++ib) { + // load elements + vuint8m1_t tx = __riscv_vle8_v_u8m1(x[ib].qs, vl); + + vint8m1_t y0 = __riscv_vle8_v_i8m1(y[ib].qs, vl); + vint8m1_t y1 = __riscv_vle8_v_i8m1(y[ib].qs+16, vl); + + // mask and store lower part of x, and then upper part + vuint8m1_t x_a = __riscv_vand_vx_u8m1(tx, 0x0F, vl); + vuint8m1_t x_l = __riscv_vsrl_vx_u8m1(tx, 0x04, vl); + + vint8m1_t v0 = __riscv_vreinterpret_v_u8m1_i8m1(x_a); + vint8m1_t v1 = __riscv_vreinterpret_v_u8m1_i8m1(x_l); + + vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl); + vint16m2_t vec_mul2 = __riscv_vwmacc_vv_i16m2(vec_mul1, v1, y1, vl); + + vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); + vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl); + + int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); + + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + } + +#endif + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F); + const int v1 = (x[ib].qs[j] >> 4); + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + } + + *s = sumf; +} + +void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + int ib = 0; + float sumf = 0; + + assert(n % qk == 0); + assert(qk == QK5_0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__riscv_v) + size_t vl; + size_t vlenb = __riscv_vlenb(); + + for (; ib < nb; ++ib) { + vl = qk / 2; + vuint8m1_t v0 = __riscv_vle8_v_u8m1(x[ib].qs, vl); + vint8m1_t v0l = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(v0, 0x0F, vl)); + vint8m1_t v0h = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(v0, 4, vl)); + vint8m2_t v0c; + if (vlenb == 16) { + v0c = __riscv_vcreate_v_i8m1_i8m2(v0l, v0h); + } else { + v0l = __riscv_vslideup_vx_i8m1(v0l, v0h, 16, 32); + v0c = __riscv_vlmul_ext_v_i8m1_i8m2(v0l); + } + + vl = qk; + vbool4_t qh = __riscv_vlm_v_b4(x[ib].qh, vl); + qh = __riscv_vmnand_mm_b4(qh, qh, vl); + vint8m2_t v0f = __riscv_vsub_vx_i8m2_mu(qh, v0c, v0c, 0x10, vl); + vint8m2_t v1 = __riscv_vle8_v_i8m2(y[ib].qs, vl); + vint16m4_t mul = __riscv_vwmul_vv_i16m4(v0f, v1, vl); + vint32m1_t zero = __riscv_vmv_v_x_i32m1(0, vl); + vint32m1_t sum = __riscv_vwredsum_vs_i16m4_i32m1(mul, zero, vl); + int32_t sumi = __riscv_vmv_x_s_i32m1_i32(sum); + + sumf += (GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)) * sumi; + } + +#endif + for (; ib < nb; ++ib) { + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; + const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12)); + + const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16); + const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16); + + sumi0 += (x0 * y[ib].qs[j]); + sumi1 += (x1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)) * sumi; + } + + *s = sumf; +} + +void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + int ib = 0; + float sumf = 0; + + assert(n % qk == 0); + assert(qk == QK5_1); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + +#if defined(__riscv_v) + size_t vl; + size_t vlenb = __riscv_vlenb(); + + for (; ib < nb; ++ib) { + vl = qk / 2; + vuint8m1_t v0 = __riscv_vle8_v_u8m1(x[ib].qs, vl); + vint8m1_t v0l = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(v0, 0x0F, vl)); + vint8m1_t v0h = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(v0, 4, vl)); + vint8m2_t v0c; + if (vlenb == 16) { + v0c = __riscv_vcreate_v_i8m1_i8m2(v0l, v0h); + } else { + v0l = __riscv_vslideup_vx_i8m1(v0l, v0h, 16, 32); + v0c = __riscv_vlmul_ext_v_i8m1_i8m2(v0l); + } + + vl = qk; + vbool4_t qh = __riscv_vlm_v_b4(x[ib].qh, vl); + vint8m2_t v0f = __riscv_vor_vx_i8m2_mu(qh, v0c, v0c, 0x10, vl); + vint8m2_t v1 = __riscv_vle8_v_i8m2(y[ib].qs, vl); + vint16m4_t mul = __riscv_vwmul_vv_i16m4(v0f, v1, vl); + vint32m1_t zero = __riscv_vmv_v_x_i32m1(0, vl); + vint32m1_t sum = __riscv_vwredsum_vs_i16m4_i32m1(mul, zero, vl); + int32_t sumi = __riscv_vmv_x_s_i32m1_i32(sum); + + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + } + +#endif + for (; ib < nb; ++ib) { + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; + const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; + + const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0; + const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1; + + sumi0 += (x0 * y[ib].qs[j]); + sumi1 += (x1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + } + + *s = sumf; +} + +void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q8_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined(__riscv_v) + size_t vl = qk; + + for (; ib < nb; ++ib) { + // load elements + vint8m2_t bx_0 = __riscv_vle8_v_i8m2(x[ib].qs, vl); + vint8m2_t by_0 = __riscv_vle8_v_i8m2(y[ib].qs, vl); + + vint16m4_t vw_mul = __riscv_vwmul_vv_i16m4(bx_0, by_0, vl); + + vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl); + vint32m1_t v_sum = __riscv_vwredsum_vs_i16m4_i32m1(vw_mul, v_zero, vl); + + int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum); + + sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); + } + +#endif + for (; ib < nb; ++ib) { + int sumi = 0; + + for (int j = 0; j < qk; j++) { + sumi += x[ib].qs[j]*y[ib].qs[j]; + } + + sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); + } + + *s = sumf; +} + +void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q2_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __riscv_xtheadvector + + float sumf = 0; + uint8_t atmp[16]; + + for (int i = 0; i < nb; ++i) { + const uint8_t * q2 = x[i].qs; + const int8_t * q8 = y[i].qs; + const uint8_t * sc = x[i].scales; + const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + uint8_t *patmp = atmp; + int vsums; + int tmp; + __asm__ __volatile__( + "th.vsetvli zero, %[vl16], e8, m1\n\t" + "th.vmv.v.x v8, zero\n\t" + "th.vlb.v v1, (%[sc])\n\t" + "th.vand.vi v0, v1, 0xF\n\t" + "th.vsrl.vi v1, v1, 4\n\t" + "th.vsb.v v0, (%[scale])\n\t" + "th.vwaddu.vx v16, v1, zero\n\t" + "th.vsetvli zero, %[vl16], e16, m2\n\t" + "th.vlh.v v2, (%[bsums])\n\t" + "th.vwmul.vv v4, v16, v2\n\t" + "th.vsetvli zero, %[vl16], e32, m4\n\t" + "th.vredsum.vs v8, v4, v8\n\t" + "th.vmv.x.s %[vsums], v8" + : [tmp] "=&r" (tmp), [vsums] "=&r" (vsums) + : [sc] "r" (sc), [scale] "r" (atmp), [bsums] "r" (y[i].bsums) + , [vl16] "r" (16) + : "memory" + , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" + , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" + , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" + , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" + ); + sumf += dmin * vsums; + int isum = 0; + + for (int j = 0; j < QK_K/128; ++j) { + __asm__ __volatile__( + "th.vsetvli zero, %[vl32], e8, m2\n\t" + "th.vlb.v v0, (%[q2])\n\t" + "th.vsrl.vi v2, v0, 2\n\t" + "th.vsrl.vi v4, v0, 4\n\t" + "th.vsrl.vi v6, v0, 6\n\t" + "th.vand.vi v0, v0, 0x3\n\t" + "th.vand.vi v2, v2, 0x3\n\t" + "th.vand.vi v4, v4, 0x3\n\t" + "th.vsetvli zero, %[vl128], e8, m8\n\t" + "th.vlb.v v8, (%[q8])\n\t" + "th.vsetvli zero, %[vl64], e8, m4\n\t" + "th.vwmul.vv v16, v0, v8\n\t" + "th.vwmul.vv v24, v4, v12\n\t" + "th.vsetvli zero, %[vl16], e16, m2\n\t" + "th.vmv.v.x v0, zero\n\t" + "th.vwredsum.vs v10, v16, v0\n\t" + "th.vwredsum.vs v9, v18, v0\n\t" + "th.vwredsum.vs v8, v20, v0\n\t" + "th.vwredsum.vs v7, v22, v0\n\t" + "th.vwredsum.vs v11, v24, v0\n\t" + "th.vwredsum.vs v12, v26, v0\n\t" + "th.vwredsum.vs v13, v28, v0\n\t" + "th.vwredsum.vs v14, v30, v0\n\t" + "li %[tmp], 4\n\t" + "th.vsetvli zero, %[tmp], e32, m1\n\t" + "th.vslideup.vi v10, v9, 1\n\t" + "th.vslideup.vi v8, v7, 1\n\t" + "th.vslideup.vi v11, v12, 1\n\t" + "th.vslideup.vi v13, v14, 1\n\t" + "th.vslideup.vi v10, v8, 2\n\t" + "th.vslideup.vi v11, v13, 2\n\t" + "li %[tmp], 8\n\t" + "th.vsetvli zero, %[tmp], e32, m2\n\t" + "th.vlbu.v v12, (%[scale])\n\t" + "th.vmul.vv v10, v10, v12\n\t" + "th.vredsum.vs v0, v10, v0\n\t" + "th.vmv.x.s %[tmp], v0\n\t" + "add %[isum], %[isum], %[tmp]" + : [tmp] "=&r" (tmp), [isum] "+&r" (isum) + : [q2] "r" (q2), [scale] "r" (patmp), [q8] "r" (q8) + , [vl16] "r" (16), [vl32] "r" (32), [vl64] "r" (64), [vl128] "r" (128) + : "memory" + , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" + , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" + , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" + , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" + ); + q2 += 32; q8 += 128; patmp += 8; + } + + sumf += dall * isum; + } + + *s = sumf; + +#elif defined __riscv_v + + float sumf = 0; + uint8_t atmp[16]; + + const int vector_length = __riscv_vlenb() * 8; + uint8_t temp_01[32] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; + + switch (vector_length) { + case 256: + for (int i = 0; i < nb; ++i) { + const uint8_t * q2 = x[i].qs; + const int8_t * q8 = y[i].qs; + const uint8_t * sc = x[i].scales; + + const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + size_t vl = 16; + + vuint8m1_t scales = __riscv_vle8_v_u8m1(sc, vl); + vuint8m1_t aux = __riscv_vand_vx_u8m1(scales, 0x0F, vl); + + vint16m1_t q8sums = __riscv_vle16_v_i16m1(y[i].bsums, vl); + + vuint8mf2_t scales_2 = __riscv_vle8_v_u8mf2(sc, vl); + vuint8mf2_t mins8 = __riscv_vsrl_vx_u8mf2(scales_2, 0x4, vl); + vint16m1_t mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl)); + vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, mins, vl); + vint32m1_t vsums = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl); + + sumf += dmin * __riscv_vmv_x_s_i32m1_i32(vsums); + + vl = 32; + + vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); + vuint8m1_t v_b = __riscv_vle8_v_u8m1(temp_01, vl); + + uint8_t is = 0; + int isum = 0; + + for (int j = 0; j < QK_K / 128; ++j) { + // load Q2 + vuint8m1_t q2_x = __riscv_vle8_v_u8m1(q2, vl); + + vuint8m1_t q2_0 = __riscv_vand_vx_u8m1(q2_x, 0x03, vl); + vuint8m1_t q2_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x2, vl), 0x03, vl); + vuint8m1_t q2_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x4, vl), 0x03, vl); + vuint8m1_t q2_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x6, vl), 0x03, vl); + + // duplicate scale elements for product + vuint8m1_t sc0 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 0 + is, vl), vl); + vuint8m1_t sc1 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 2 + is, vl), vl); + vuint8m1_t sc2 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 4 + is, vl), vl); + vuint8m1_t sc3 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 6 + is, vl), vl); + + vint16m2_t p0 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_0, sc0, vl)); + vint16m2_t p1 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_1, sc1, vl)); + vint16m2_t p2 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_2, sc2, vl)); + vint16m2_t p3 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_3, sc3, vl)); + + // load Q8 + vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl); + vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8 + 32, vl); + vint8m1_t q8_2 = __riscv_vle8_v_i8m1(q8 + 64, vl); + vint8m1_t q8_3 = __riscv_vle8_v_i8m1(q8 + 96, vl); + + vint32m4_t s0 = __riscv_vwmul_vv_i32m4(p0, __riscv_vwcvt_x_x_v_i16m2(q8_0, vl), vl); + vint32m4_t s1 = __riscv_vwmul_vv_i32m4(p1, __riscv_vwcvt_x_x_v_i16m2(q8_1, vl), vl); + vint32m4_t s2 = __riscv_vwmul_vv_i32m4(p2, __riscv_vwcvt_x_x_v_i16m2(q8_2, vl), vl); + vint32m4_t s3 = __riscv_vwmul_vv_i32m4(p3, __riscv_vwcvt_x_x_v_i16m2(q8_3, vl), vl); + + vint32m1_t isum0 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s0, s1, vl), vzero, vl); + vint32m1_t isum1 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s2, s3, vl), isum0, vl); + + isum += __riscv_vmv_x_s_i32m1_i32(isum1); + + q2 += 32; + q8 += 128; + is = 8; + } + + sumf += dall * isum; + } + break; + case 128: + for (int i = 0; i < nb; ++i) { + const uint8_t * q2 = x[i].qs; + const int8_t * q8 = y[i].qs; + const uint8_t * sc = x[i].scales; + const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + uint8_t *patmp = atmp; + int vsums; + int tmp; + __asm__ __volatile__( + "vsetivli zero, 16, e8, m1\n\t" + "vmv.v.x v8, zero\n\t" + "vle8.v v1, (%[sc])\n\t" + "vand.vi v0, v1, 0xF\n\t" + "vsrl.vi v1, v1, 4\n\t" + "vse8.v v0, (%[scale])\n\t" + "vsetivli zero, 16, e16, m2\n\t" + "vle16.v v2, (%[bsums])\n\t" + "vzext.vf2 v0, v1\n\t" + "vwmul.vv v4, v0, v2\n\t" + "vsetivli zero, 16, e32, m4\n\t" + "vredsum.vs v8, v4, v8\n\t" + "vmv.x.s %[vsums], v8" + : [tmp] "=&r" (tmp), [vsums] "=&r" (vsums) + : [sc] "r" (sc), [scale] "r" (atmp), [bsums] "r" (y[i].bsums) + : "memory" + , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" + , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" + , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" + , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" + ); + sumf += dmin * vsums; + int isum = 0; + + for (int j = 0; j < QK_K/128; ++j) { + __asm__ __volatile__( + "vsetvli zero, %[vl32], e8, m2\n\t" + "vle8.v v0, (%[q2])\n\t" + "vsrl.vi v2, v0, 2\n\t" + "vsrl.vi v4, v0, 4\n\t" + "vsrl.vi v6, v0, 6\n\t" + "vand.vi v0, v0, 0x3\n\t" + "vand.vi v2, v2, 0x3\n\t" + "vand.vi v4, v4, 0x3\n\t" + "vsetvli zero, %[vl128], e8, m8\n\t" + "vle8.v v8, (%[q8])\n\t" + "vsetvli zero, %[vl64], e8, m4\n\t" + "vwmul.vv v16, v0, v8\n\t" + "vwmul.vv v24, v4, v12\n\t" + "vsetivli zero, 16, e16, m2\n\t" + "vmv.v.x v0, zero\n\t" + "vwredsum.vs v10, v16, v0\n\t" + "vwredsum.vs v9, v18, v0\n\t" + "vwredsum.vs v8, v20, v0\n\t" + "vwredsum.vs v7, v22, v0\n\t" + "vwredsum.vs v11, v24, v0\n\t" + "vwredsum.vs v12, v26, v0\n\t" + "vwredsum.vs v13, v28, v0\n\t" + "vwredsum.vs v14, v30, v0\n\t" + "vsetivli zero, 4, e32, m1\n\t" + "vslideup.vi v10, v9, 1\n\t" + "vslideup.vi v8, v7, 1\n\t" + "vslideup.vi v11, v12, 1\n\t" + "vslideup.vi v13, v14, 1\n\t" + "vslideup.vi v10, v8, 2\n\t" + "vslideup.vi v11, v13, 2\n\t" + "vsetivli zero, 8, e32, m2\n\t" + "vle8.v v15, (%[scale])\n\t" + "vzext.vf4 v12, v15\n\t" + "vmul.vv v10, v10, v12\n\t" + "vredsum.vs v0, v10, v0\n\t" + "vmv.x.s %[tmp], v0\n\t" + "add %[isum], %[isum], %[tmp]" + : [tmp] "=&r" (tmp), [isum] "+&r" (isum) + : [q2] "r" (q2), [scale] "r" (patmp), [q8] "r" (q8) + , [vl32] "r" (32), [vl64] "r" (64), [vl128] "r" (128) + : "memory" + , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" + , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" + , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" + , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" + ); + q2 += 32; q8 += 128; patmp += 8; + } + + sumf += dall * isum; + } + break; + default: + assert(false && "Unsupported vector length"); + break; + } + + *s = sumf; + +#else + + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + + const uint8_t * q2 = x[i].qs; + const int8_t * q8 = y[i].qs; + const uint8_t * sc = x[i].scales; + + int summs = 0; + for (int j = 0; j < 16; ++j) { + summs += y[i].bsums[j] * (sc[j] >> 4); + } + + const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + int isum = 0; + int is = 0; + int d; + for (int k = 0; k < QK_K/128; ++k) { + int shift = 0; + for (int j = 0; j < 4; ++j) { + d = sc[is++] & 0xF; + int isuml = 0; + for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); + isum += d * isuml; + d = sc[is++] & 0xF; + isuml = 0; + for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); + isum += d * isuml; + shift += 2; + q8 += 32; + } + q2 += 32; + } + sumf += dall * isum - dmin * summs; + } + *s = sumf; +#endif +} + +void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const uint32_t kmask1 = 0x03030303; + const uint32_t kmask2 = 0x0f0f0f0f; + + const block_q3_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __riscv_xtheadvector + + uint32_t utmp[4]; + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + const uint8_t * restrict q3 = x[i].qs; + const uint8_t * restrict qh = x[i].hmask; + const int8_t * restrict q8 = y[i].qs; + + int8_t * scale = (int8_t *)utmp; + int tmp; + __asm__ __volatile__( + "li %[tmp], 12\n\t" + "th.vsetvli zero, %[tmp], e8, m1\n\t" + "th.vlb.v v0, (%[s6b])\n\t" + "th.vmv.v.v v2, v0\n\t" + "li %[tmp], 2\n\t" + "th.vsetvli zero, %[tmp], e64, m1\n\t" + "th.vmv.v.x v9, %[sh]\n\t"\ + "th.vslidedown.vi v1, v0, 1\n\t" + "th.vslide1up.vx v8, v9, zero\n\t" // {0, 0, 4, 4} + "th.vslideup.vi v0, v2, 1\n\t" // {aux[0], aux[1], aux[0], aux[1]} + "li %[tmp], 4\n\t" + "th.vsetvli zero, %[tmp], e32, m1\n\t" + "th.vid.v v9\n\t" + "th.vmv.x.s %[tmp], v1\n\t" + "th.vsll.vi v9, v9, 1\n\t" // {0, 2, 4, 6} + "th.vmv.v.x v1, %[tmp]\n\t" // {aux[2], aux[2], aux[2], aux[2]} + "th.vsrl.vv v4, v1, v9\n\t" + "th.vsrl.vv v2, v0, v8\n\t" + "th.vand.vx v5, v4, %[kmask1]\n\t" + "th.vand.vx v3, v2, %[kmask2]\n\t" + "th.vsll.vi v6, v5, 4\n\t" + "th.vor.vv v7, v6, v3\n\t" + "li %[tmp], 16\n\t" + "th.vsetvli zero, %[tmp], e8, m1\n\t" + "th.vsub.vx v0, v7, %[c]\n\t" + "th.vsb.v v0, (%[scale])" + : [tmp] "=&r" (tmp) + : [sh] "r" (0x0000000400000004), [s6b] "r" (x[i].scales), [c] "r" (32) + , [scale] "r" (scale), [kmask1] "r" (kmask1), [kmask2] "r" (kmask2) + : "memory" + , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" + , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" + , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" + , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" + ); + + uint8_t m = 1; + int isum = 0; + for (int j = 0; j < QK_K; j += 128) { + __asm__ __volatile__( + // fixme: use v0p7 mask layout directly + "th.vsetvli zero, %[vl32], e8, m2\n\t" + "th.vlb.v v8, (%[q3])\n\t" + "th.vsrl.vi v10, v8, 2\n\t" + "th.vsrl.vi v12, v8, 4\n\t" + "th.vsrl.vi v14, v8, 6\n\t" + "th.vand.vi v8, v8, 3\n\t" + "th.vand.vi v10, v10, 3\n\t" + "th.vand.vi v12, v12, 3\n\t" + "th.vlb.v v2, (%[qh])\n\t" + "th.vand.vx v4, v2, %[m]\n\t" + "slli %[m], %[m], 1\n\t" + "th.vmseq.vx v0, v4, zero\n\t" + "th.vadd.vi v8, v8, -4, v0.t\n\t" + "th.vand.vx v4, v2, %[m]\n\t" + "slli %[m], %[m], 1\n\t" + "th.vmseq.vx v0, v4, zero\n\t" + "th.vadd.vi v10, v10, -4, v0.t\n\t" + "th.vand.vx v4, v2, %[m]\n\t" + "slli %[m], %[m], 1\n\t" + "th.vmseq.vx v0, v4, zero\n\t" + "th.vadd.vi v12, v12, -4, v0.t\n\t" + "th.vand.vx v4, v2, %[m]\n\t" + "slli %[m], %[m], 1\n\t" + "th.vmseq.vx v0, v4, zero\n\t" + "th.vadd.vi v14, v14, -4, v0.t\n\t" + "th.vsetvli zero, %[vl128], e8, m8\n\t" + "th.vlb.v v0, (%[q8])\n\t" + "th.vsetvli zero, %[vl64], e8, m4\n\t" + "th.vwmul.vv v16, v0, v8\n\t" + "th.vwmul.vv v24, v4, v12\n\t" + "li %[tmp], 16\n\t" + "th.vsetvli zero, %[tmp], e16, m2\n\t" + "th.vmv.v.x v0, zero\n\t" + "th.vwredsum.vs v10, v16, v0\n\t" + "th.vwredsum.vs v9, v18, v0\n\t" + "th.vwredsum.vs v8, v20, v0\n\t" + "th.vwredsum.vs v7, v22, v0\n\t" + "th.vwredsum.vs v11, v24, v0\n\t" + "th.vwredsum.vs v12, v26, v0\n\t" + "th.vwredsum.vs v13, v28, v0\n\t" + "th.vwredsum.vs v14, v30, v0\n\t" + "li %[tmp], 4\n\t" + "th.vsetvli zero, %[tmp], e32, m1\n\t" + "th.vslideup.vi v10, v9, 1\n\t" + "th.vslideup.vi v8, v7, 1\n\t" + "th.vslideup.vi v11, v12, 1\n\t" + "th.vslideup.vi v13, v14, 1\n\t" + "th.vslideup.vi v10, v8, 2\n\t" + "th.vslideup.vi v11, v13, 2\n\t" + "li %[tmp], 8\n\t" + "th.vsetvli zero, %[tmp], e32, m2\n\t" + "th.vlb.v v12, (%[scale])\n\t" + "th.vmul.vv v10, v10, v12\n\t" + "th.vredsum.vs v0, v10, v0\n\t" + "th.vmv.x.s %[tmp], v0\n\t" + "add %[isum], %[isum], %[tmp]" + : [tmp] "=&r" (tmp), [m] "+&r" (m), [isum] "+&r" (isum) + : [vl128] "r" (128), [vl64] "r" (64), [vl32] "r" (32) + , [q3] "r" (q3), [qh] "r" (qh), [scale] "r" (scale), [q8] "r" (q8) + : "memory" + , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" + , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" + , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" + , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" + ); + q3 += 32; q8 += 128; scale += 8; + } + + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + sumf += d * isum; + } + + *s = sumf; + +#elif defined __riscv_v + + uint32_t utmp[4]; + float sumf = 0; + uint32_t aux[3]; + const int vector_length = __riscv_vlenb() * 8; + + switch (vector_length) { + case 256: + for (int i = 0; i < nb; ++i) { + + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].hmask; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + memcpy(aux, x[i].scales, 12); + utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); + utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); + utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); + utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); + + int8_t * scale = (int8_t *)utmp; + for (int j = 0; j < 16; ++j) scale[j] -= 32; + + + size_t vl = 32; + uint8_t m = 1; + + vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); + vuint8m1_t vqh = __riscv_vle8_v_u8m1(qh, vl); + + int sum_t = 0; + + for (int j = 0; j < QK_K; j += 128) { + + vl = 32; + + // load Q3 + vuint8m1_t q3_x = __riscv_vle8_v_u8m1(q3, vl); + + vint8m1_t q3_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q3_x, 0x03, vl)); + vint8m1_t q3_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x2, vl), 0x03 , vl)); + vint8m1_t q3_2 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x4, vl), 0x03 , vl)); + vint8m1_t q3_3 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x6, vl), 0x03 , vl)); + + // compute mask for subtraction + vuint8m1_t qh_m0 = __riscv_vand_vx_u8m1(vqh, m, vl); + vbool8_t vmask_0 = __riscv_vmseq_vx_u8m1_b8(qh_m0, 0, vl); + vint8m1_t q3_m0 = __riscv_vsub_vx_i8m1_mu(vmask_0, q3_0, q3_0, 0x4, vl); + m <<= 1; + + vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl); + vbool8_t vmask_1 = __riscv_vmseq_vx_u8m1_b8(qh_m1, 0, vl); + vint8m1_t q3_m1 = __riscv_vsub_vx_i8m1_mu(vmask_1, q3_1, q3_1, 0x4, vl); + m <<= 1; + + vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl); + vbool8_t vmask_2 = __riscv_vmseq_vx_u8m1_b8(qh_m2, 0, vl); + vint8m1_t q3_m2 = __riscv_vsub_vx_i8m1_mu(vmask_2, q3_2, q3_2, 0x4, vl); + m <<= 1; + + vuint8m1_t qh_m3 = __riscv_vand_vx_u8m1(vqh, m, vl); + vbool8_t vmask_3 = __riscv_vmseq_vx_u8m1_b8(qh_m3, 0, vl); + vint8m1_t q3_m3 = __riscv_vsub_vx_i8m1_mu(vmask_3, q3_3, q3_3, 0x4, vl); + m <<= 1; + + // load Q8 and take product with Q3 + vint16m2_t a0 = __riscv_vwmul_vv_i16m2(q3_m0, __riscv_vle8_v_i8m1(q8, vl), vl); + vint16m2_t a1 = __riscv_vwmul_vv_i16m2(q3_m1, __riscv_vle8_v_i8m1(q8+32, vl), vl); + vint16m2_t a2 = __riscv_vwmul_vv_i16m2(q3_m2, __riscv_vle8_v_i8m1(q8+64, vl), vl); + vint16m2_t a3 = __riscv_vwmul_vv_i16m2(q3_m3, __riscv_vle8_v_i8m1(q8+96, vl), vl); + + vl = 16; + + // retrieve lane to multiply with scale + vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl); + vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl); + vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl); + vint32m2_t aux1_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 1), (scale[3]), vl); + vint32m2_t aux2_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 0), (scale[4]), vl); + vint32m2_t aux2_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 1), (scale[5]), vl); + vint32m2_t aux3_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 0), (scale[6]), vl); + vint32m2_t aux3_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 1), (scale[7]), vl); + + vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux0_0, aux0_1, vl), vzero, vl); + vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux1_0, aux1_1, vl), isum0, vl); + vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux2_0, aux2_1, vl), isum1, vl); + vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux3_0, aux3_1, vl), isum2, vl); + + sum_t += __riscv_vmv_x_s_i32m1_i32(isum3); + + q3 += 32; q8 += 128; scale += 8; + + } + + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + + sumf += d*sum_t; + + } + break; + case 128: + for (int i = 0; i < nb; ++i) { + const uint8_t * restrict q3 = x[i].qs; + const uint8_t * restrict qh = x[i].hmask; + const int8_t * restrict q8 = y[i].qs; + + int8_t * scale = (int8_t *)utmp; + int tmp; + __asm__ __volatile__( + "vsetivli zero, 12, e8, m1\n\t" + "vle8.v v0, (%[s6b])\n\t" + "vmv1r.v v2, v0\n\t" + "vsetivli zero, 2, e64, m1\n\t" + "vmv.v.x v9, %[sh]\n\t"\ + "vslidedown.vi v1, v0, 1\n\t" + "vslide1up.vx v8, v9, zero\n\t" // {0, 0, 4, 4} + "vslideup.vi v0, v2, 1\n\t" // {aux[0], aux[1], aux[0], aux[1]} + "vsetivli zero, 4, e32, m1\n\t" + "vid.v v9\n\t" + "vmv.x.s %[tmp], v1\n\t" + "vsll.vi v9, v9, 1\n\t" // {0, 2, 4, 6} + "vmv.v.x v1, %[tmp]\n\t" // {aux[2], aux[2], aux[2], aux[2]} + "vsrl.vv v4, v1, v9\n\t" + "vsrl.vv v2, v0, v8\n\t" + "vand.vx v5, v4, %[kmask1]\n\t" + "vand.vx v3, v2, %[kmask2]\n\t" + "vsll.vi v6, v5, 4\n\t" + "vor.vv v7, v6, v3\n\t" + "vsetivli zero, 16, e8, m1\n\t" + "vsub.vx v0, v7, %[c]\n\t" + "vse8.v v0, (%[scale])" + : [tmp] "=&r" (tmp) + : [sh] "r" (0x0000000400000004), [s6b] "r" (x[i].scales), [c] "r" (32) + , [scale] "r" (scale), [kmask1] "r" (kmask1), [kmask2] "r" (kmask2) + : "memory" + , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" + , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" + , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" + , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" + ); + + uint8_t m = 1; + int isum = 0; + for (int j = 0; j < QK_K; j += 128) { + __asm__ __volatile__( + "vsetvli zero, %[vl32], e8, m2, ta, mu\n\t" + "vle8.v v8, (%[q3])\n\t" + "vsrl.vi v10, v8, 2\n\t" + "vsrl.vi v12, v8, 4\n\t" + "vsrl.vi v14, v8, 6\n\t" + "vand.vi v8, v8, 3\n\t" + "vand.vi v10, v10, 3\n\t" + "vand.vi v12, v12, 3\n\t" + "vle8.v v2, (%[qh])\n\t" + "vand.vx v4, v2, %[m]\n\t" + "slli %[m], %[m], 1\n\t" + "vmseq.vx v0, v4, zero\n\t" + "vadd.vi v8, v8, -4, v0.t\n\t" + "vand.vx v4, v2, %[m]\n\t" + "slli %[m], %[m], 1\n\t" + "vmseq.vx v0, v4, zero\n\t" + "vadd.vi v10, v10, -4, v0.t\n\t" + "vand.vx v4, v2, %[m]\n\t" + "slli %[m], %[m], 1\n\t" + "vmseq.vx v0, v4, zero\n\t" + "vadd.vi v12, v12, -4, v0.t\n\t" + "vand.vx v4, v2, %[m]\n\t" + "slli %[m], %[m], 1\n\t" + "vmseq.vx v0, v4, zero\n\t" + "vadd.vi v14, v14, -4, v0.t\n\t" + "vsetvli zero, %[vl128], e8, m8\n\t" + "vle8.v v0, (%[q8])\n\t" + "vsetvli zero, %[vl64], e8, m4\n\t" + "vwmul.vv v16, v0, v8\n\t" + "vwmul.vv v24, v4, v12\n\t" + "vsetivli zero, 16, e16, m2\n\t" + "vmv.v.x v0, zero\n\t" + "vwredsum.vs v10, v16, v0\n\t" + "vwredsum.vs v9, v18, v0\n\t" + "vwredsum.vs v8, v20, v0\n\t" + "vwredsum.vs v7, v22, v0\n\t" + "vwredsum.vs v11, v24, v0\n\t" + "vwredsum.vs v12, v26, v0\n\t" + "vwredsum.vs v13, v28, v0\n\t" + "vwredsum.vs v14, v30, v0\n\t" + "vsetivli zero, 4, e32, m1\n\t" + "vslideup.vi v10, v9, 1\n\t" + "vslideup.vi v8, v7, 1\n\t" + "vslideup.vi v11, v12, 1\n\t" + "vslideup.vi v13, v14, 1\n\t" + "vslideup.vi v10, v8, 2\n\t" + "vslideup.vi v11, v13, 2\n\t" + "vsetivli zero, 8, e32, m2\n\t" + "vle8.v v15, (%[scale])\n\t" + "vsext.vf4 v12, v15\n\t" + "vmul.vv v10, v10, v12\n\t" + "vredsum.vs v0, v10, v0\n\t" + "vmv.x.s %[tmp], v0\n\t" + "add %[isum], %[isum], %[tmp]" + : [tmp] "=&r" (tmp), [m] "+&r" (m), [isum] "+&r" (isum) + : [vl128] "r" (128), [vl64] "r" (64), [vl32] "r" (32) + , [q3] "r" (q3), [qh] "r" (qh), [scale] "r" (scale), [q8] "r" (q8) + : "memory" + , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" + , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" + , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" + , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" + ); + q3 += 32; q8 += 128; scale += 8; + } + + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + sumf += d * isum; + } + break; + default: + assert(false && "Unsupported vector length"); + break; + } + + *s = sumf; + +#else + // scalar version + // This function is written like this so the compiler can manage to vectorize most of it + // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the + // manually vectorized version above. Every other version I tried would run at least 4 times slower. + // The ideal situation would be if we could just write the code once, and the compiler would + // automatically produce the best possible set of machine instructions, instead of us having to manually + // write vectorized versions for AVX, ARM_NEON, etc. + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + uint32_t auxs[4]; + const int8_t * scales = (const int8_t*)auxs; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].hmask; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + q3 += 32; + } + a = aux8; + + memcpy(auxs, x[i].scales, 12); + uint32_t tmp = auxs[2]; + auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); + auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); + auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); + auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); + for (int j = 0; j < QK_K/16; ++j) { + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; + +#endif + +} + +void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + +#if defined __riscv_xtheadvector + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + int tmp, tmp2, sumi; + __asm__ __volatile__( + "li %[t1], 12\n\t" + "th.vsetvli zero, %[t1], e8, m1\n\t" + "th.vlb.v v1, (%[s6b])\n\t" // {aux[0], aux[1], aux[2]} + "li %[t1], 4\n\t" + "th.vsetvli zero, %[t1], e32, m1\n\t" + "th.vslidedown.vi v2, v1, 2\n\t" + "th.vmv.v.v v3, v2\n\t" + "th.vslideup.vi v2, v3, 1\n\t" // {aux[2], aux[2]} + "li %[t1], 2\n\t" + "th.vsetvli zero, %[t1], e32, m1\n\t" + "th.vmv.v.i v4, 4\n\t" + "th.vand.vx v8, v1, %[kmask1]\n\t" + "th.vslide1up.vx v5, v4, zero\n\t" // {0, 4} + "th.vsrl.vi v6, v1, 6\n\t" + "th.vsrl.vv v7, v2, v5\n\t" + "th.vand.vx v0, v6, %[kmask3]\n\t" + "th.vand.vx v2, v7, %[kmask2]\n\t" + "th.vsll.vi v6, v0, 4\n\t" + "li %[t2], 8\n\t" + "addi %[t1], %[utmp], 4\n\t" + "th.vor.vv v1, v6, v2\n\t" + "th.vssw.v v8, (%[utmp]), %[t2]\n\t" + "th.vssw.v v1, (%[t1]), %[t2]\n\t" + "th.vsetvli zero, zero, e32, m2\n\t" // vl == 8 + "th.vlw.v v2, (%[bsums])\n\t" + "th.vsetvli zero, %[t2], e16, m1\n\t" + "th.vnsrl.vi v0, v2, 0\n\t" + "th.vnsrl.vi v1, v2, 16\n\t" + "th.vadd.vv v2, v0, v1\n\t" + "th.vlbu.v v4, (%[mins])\n\t" + "th.vwmul.vv v6, v4, v2\n\t" + "th.vmv.v.x v0, zero\n\t" + "th.vsetvli zero, %[t2], e32, m2\n\t" + "th.vredsum.vs v0, v6, v0\n\t" + "th.vmv.x.s %[sumi], v0" + : [t1] "=&r" (tmp), [t2] "=&r" (tmp2), [sumi] "=&r" (sumi) + : [bsums] "r" (y[i].bsums), [mins] "r" (mins), [utmp] "r" (utmp) + , [s6b] "r" (x[i].scales), [kmask1] "r" (kmask1) + , [kmask2] "r" (kmask2), [kmask3] "r" (kmask3) + : "memory" + , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" + , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" + , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" + , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" + ); + sumf -= dmin * sumi; + + const uint8_t * restrict q4 = x[i].qs; + const int8_t * restrict q8 = y[i].qs; + + sumi = 0; + const uint8_t * scale = scales; + + for (int j = 0; j < QK_K/128; ++j) { + int vl128 = 128, vl64 = 64, vl32 = 32; + __asm__ __volatile__( + "th.vsetvli zero, %[vl128], e8, m8\n\t" + "th.vlb.v v8, (%[q8])\n\t" + "th.vsetvli zero, %[vl64], e8, m4\n\t" + "th.vlb.v v0, (%[q4])\n\t" + "th.vsrl.vi v4, v0, 4\n\t" + "th.vand.vi v0, v0, 0xF\n\t" + "th.vsetvli zero, %[vl32], e8, m2\n\t" + "th.vwmul.vv v28, v6, v14\n\t" + "th.vwmul.vv v20, v4, v10\n\t" + "th.vwmul.vv v24, v2, v12\n\t" + "th.vwmul.vv v16, v0, v8\n\t" + "li %[tmp], 4\n\t" + "th.vsetvli zero, %[tmp], e32, m1\n\t" + "th.vlbu.v v1, (%[scale])\n\t" + "th.vmv.v.x v0, zero\n\t" + "th.vsetvli zero, %[vl32], e16, m4\n\t" + "th.vwredsum.vs v6, v24, v0\n\t" + "th.vwredsum.vs v7, v28, v0\n\t" + "th.vwredsum.vs v4, v16, v0\n\t" + "th.vwredsum.vs v5, v20, v0\n\t" + "th.vsetvli zero, %[tmp], e32, m1\n\t" + "th.vslideup.vi v6, v7, 1\n\t" + "th.vslideup.vi v4, v5, 1\n\t" + "th.vslideup.vi v4, v6, 2\n\t" + "th.vmul.vv v8, v4, v1\n\t" + "th.vredsum.vs v0, v8, v0\n\t" + "th.vmv.x.s %[tmp], v0\n\t" + "add %[sumi], %[sumi], %[tmp]" + : [tmp] "=&r" (tmp), [sumi] "+&r" (sumi) + : [vl128] "r" (vl128), [vl64] "r" (vl64), [vl32] "r" (vl32) + , [q4] "r" (q4), [q8] "r" (q8), [scale] "r" (scale) + : "memory" + , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" + , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" + , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" + , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" + ); + + q4 += 64; q8 += 128; scale += 4; + } + + sumf += d * sumi; + + } + + *s = sumf; + +#elif defined __riscv_v + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + float sumf = 0; + const int vector_length = __riscv_vlenb() * 8; + + switch (vector_length) { + case 256: + for (int i = 0; i < nb; ++i) { + + size_t vl = 8; + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl); + vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl); + vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl); + + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl); + vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl)); + vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl); + + vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl); + sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi); + + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + vl = 32; + + int32_t sum_1 = 0; + int32_t sum_2 = 0; + + vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1); + + for (int j = 0; j < QK_K/64; ++j) { + // load Q4 + vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl); + + // load Q8 and multiply it with lower Q4 nibble + vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl); + vint8m1_t q4_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl)); + vint16m2_t qv_0 = __riscv_vwmul_vv_i16m2(q4_0, q8_0, vl); + vint16m1_t vs_0 = __riscv_vredsum_vs_i16m2_i16m1(qv_0, vzero, vl); + + sum_1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[2*j+0]; + + // load Q8 and multiply it with upper Q4 nibble + vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl); + vint8m1_t q4_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl)); + vint16m2_t qv_1 = __riscv_vwmul_vv_i16m2(q4_1, q8_1, vl); + vint16m1_t vs_1 = __riscv_vredsum_vs_i16m2_i16m1(qv_1, vzero, vl); + + sum_2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[2*j+1]; + + q4 += 32; q8 += 64; + + } + + sumf += d*(sum_1 + sum_2); + + } + break; + case 128: + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + int tmp, tmp2, sumi; + __asm__ __volatile__( + "vsetivli zero, 12, e8, m1\n\t" + "vle8.v v1, (%[s6b])\n\t" // {aux[0], aux[1], aux[2]} + "vsetivli zero, 4, e32, m1\n\t" + "vslidedown.vi v2, v1, 2\n\t" + "vmv1r.v v3, v2\n\t" + "vslideup.vi v2, v3, 1\n\t" // {aux[2], aux[2]} + "vsetivli zero, 2, e32, m1\n\t" + "vmv.v.i v4, 4\n\t" + "vand.vx v8, v1, %[kmask1]\n\t" + "vslide1up.vx v5, v4, zero\n\t" // {0, 4} + "vsrl.vi v6, v1, 6\n\t" + "vsrl.vv v7, v2, v5\n\t" + "vand.vx v0, v6, %[kmask3]\n\t" + "vand.vx v2, v7, %[kmask2]\n\t" + "vsll.vi v6, v0, 4\n\t" + "li %[t2], 8\n\t" + "addi %[t1], %[utmp], 4\n\t" + "vor.vv v1, v6, v2\n\t" + "vsse32.v v8, (%[utmp]), %[t2]\n\t" + "vsse32.v v1, (%[t1]), %[t2]\n\t" + "vsetivli zero, 8, e16, m1\n\t" + "vle32.v v2, (%[bsums])\n\t" + "vnsrl.wi v0, v2, 0\n\t" + "vnsrl.wi v1, v2, 16\n\t" + "vadd.vv v2, v0, v1\n\t" + "vle8.v v3, (%[mins])\n\t" + "vzext.vf2 v4, v3\n\t" + "vwmul.vv v6, v4, v2\n\t" + "vmv.v.x v0, zero\n\t" + "vsetivli zero, 8, e32, m2\n\t" + "vredsum.vs v0, v6, v0\n\t" + "vmv.x.s %[sumi], v0" + : [t1] "=&r" (tmp), [t2] "=&r" (tmp2), [sumi] "=&r" (sumi) + : [bsums] "r" (y[i].bsums), [mins] "r" (mins), [utmp] "r" (utmp) + , [s6b] "r" (x[i].scales), [kmask1] "r" (kmask1) + , [kmask2] "r" (kmask2), [kmask3] "r" (kmask3) + : "memory" + , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" + , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" + , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" + , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" + ); + sumf -= dmin * sumi; + + const uint8_t * restrict q4 = x[i].qs; + const int8_t * restrict q8 = y[i].qs; + + sumi = 0; + const uint8_t * scale = scales; + + for (int j = 0; j < QK_K/128; ++j) { + int vl128 = 128, vl64 = 64, vl32 = 32; + __asm__ __volatile__( + "vsetvli zero, %[vl128], e8, m8\n\t" + "vle8.v v8, (%[q8])\n\t" + "vsetvli zero, %[vl64], e8, m4\n\t" + "vle8.v v0, (%[q4])\n\t" + "vsrl.vi v4, v0, 4\n\t" + "vand.vi v0, v0, 0xF\n\t" + "vsetvli zero, %[vl32], e8, m2\n\t" + "vwmul.vv v28, v6, v14\n\t" + "vwmul.vv v20, v4, v10\n\t" + "vwmul.vv v24, v2, v12\n\t" + "vwmul.vv v16, v0, v8\n\t" + "vsetivli zero, 4, e32, m1\n\t" + "vle8.v v2, (%[scale])\n\t" + "vmv.v.x v0, zero\n\t" + "vzext.vf4 v1, v2\n\t" + "vsetvli zero, %[vl32], e16, m4\n\t" + "vwredsum.vs v6, v24, v0\n\t" + "vwredsum.vs v7, v28, v0\n\t" + "vwredsum.vs v4, v16, v0\n\t" + "vwredsum.vs v5, v20, v0\n\t" + "vsetivli zero, 4, e32, m1\n\t" + "vslideup.vi v6, v7, 1\n\t" + "vslideup.vi v4, v5, 1\n\t" + "vslideup.vi v4, v6, 2\n\t" + "vmul.vv v8, v4, v1\n\t" + "vredsum.vs v0, v8, v0\n\t" + "vmv.x.s %[tmp], v0\n\t" + "add %[sumi], %[sumi], %[tmp]" + : [tmp] "=&r" (tmp), [sumi] "+&r" (sumi) + : [vl128] "r" (vl128), [vl64] "r" (vl64), [vl32] "r" (vl32) + , [q4] "r" (q4), [q8] "r" (q8), [scale] "r" (scale) + : "memory" + , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" + , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" + , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" + , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" + ); + + q4 += 64; q8 += 128; scale += 4; + } + + sumf += d * sumi; + } + break; + default: + assert(false && "Unsupported vector length"); + break; + } + + *s = sumf; + +#else + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + for (int j = 0; j < QK_K/64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + a += 32; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + a += 32; q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + +#if defined __riscv_v + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + float sumf = 0; + float sums = 0.0; + + size_t vl; + + for (int i = 0; i < nb; ++i) { + + vl = 8; + + const uint8_t * GGML_RESTRICT q5 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + + vint16m1_t q8sums_0 = __riscv_vlse16_v_i16m1(y[i].bsums, 4, vl); + vint16m1_t q8sums_1 = __riscv_vlse16_v_i16m1(y[i].bsums+1, 4, vl); + vint16m1_t q8sums = __riscv_vadd_vv_i16m1(q8sums_0, q8sums_1, vl); + + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + vuint8mf2_t mins8 = __riscv_vle8_v_u8mf2(mins, vl); + vint16m1_t v_mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl)); + vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, v_mins, vl); + + vint32m1_t sumi = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl); + sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi); + + vl = 32; + int32_t aux32 = 0; + int is = 0; + + uint8_t m = 1; + vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); + vuint8m2_t vqh = __riscv_vle8_v_u8m2(hm, vl); + + for (int j = 0; j < QK_K/64; ++j) { + // load Q5 and Q8 + vuint8m2_t q5_x = __riscv_vle8_v_u8m2(q5, vl); + vint8m2_t q8_y1 = __riscv_vle8_v_i8m2(q8, vl); + vint8m2_t q8_y2 = __riscv_vle8_v_i8m2(q8+32, vl); + + // compute mask for addition + vint8m2_t q5_a = __riscv_vreinterpret_v_u8m2_i8m2(__riscv_vand_vx_u8m2(q5_x, 0x0F, vl)); + vuint8m2_t qh_m1 = __riscv_vand_vx_u8m2(vqh, m, vl); + vbool4_t vmask_1 = __riscv_vmsne_vx_u8m2_b4(qh_m1, 0, vl); + vint8m2_t q5_m1 = __riscv_vadd_vx_i8m2_mu(vmask_1, q5_a, q5_a, 16, vl); + m <<= 1; + + vint8m2_t q5_l = __riscv_vreinterpret_v_u8m2_i8m2(__riscv_vsrl_vx_u8m2(q5_x, 0x04, vl)); + vuint8m2_t qh_m2 = __riscv_vand_vx_u8m2(vqh, m, vl); + vbool4_t vmask_2 = __riscv_vmsne_vx_u8m2_b4(qh_m2, 0, vl); + vint8m2_t q5_m2 = __riscv_vadd_vx_i8m2_mu(vmask_2, q5_l, q5_l, 16, vl); + m <<= 1; + + vint16m4_t v0 = __riscv_vwmul_vv_i16m4(q5_m1, q8_y1, vl); + vint16m4_t v1 = __riscv_vwmul_vv_i16m4(q5_m2, q8_y2, vl); + + vint32m8_t vs1 = __riscv_vwmul_vx_i32m8(v0, scales[is++], vl); + vint32m8_t vs2 = __riscv_vwmul_vx_i32m8(v1, scales[is++], vl); + + vint32m1_t vacc1 = __riscv_vredsum_vs_i32m8_i32m1(vs1, vzero, vl); + vint32m1_t vacc2 = __riscv_vredsum_vs_i32m8_i32m1(vs2, vacc1, vl); + + aux32 += __riscv_vmv_x_s_i32m1_i32(vacc2); + q5 += 32; q8 += 64; + + } + + sums += aux32 * d; + + } + + *s = sumf+sums; + +#else + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K/64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); + a += 32; m <<= 1; + q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q6_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __riscv_xtheadvector + + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + + const uint8_t * restrict q6 = x[i].ql; + const uint8_t * restrict qh = x[i].qh; + const int8_t * restrict q8 = y[i].qs; + + const int8_t * restrict scale = x[i].scales; + + int sum_t = 0; + int t0; + + for (int j = 0; j < QK_K/128; ++j) { + __asm__ __volatile__( + "th.vsetvli zero, %[vl32], e8, m2\n\t" // vl == 32 + "th.vlb.v v4, (%[qh])\n\t" + "th.vsll.vi v0, v4, 4\n\t" + "th.vsll.vi v2, v4, 2\n\t" + "th.vsrl.vi v6, v4, 2\n\t" + "th.vsetvli zero, %[vl64], e8, m4\n\t" // vl == 64 + "th.vlb.v v8, (%[q6])\n\t" + "th.vsrl.vi v12, v8, 4\n\t" + "th.vand.vi v8, v8, 0xF\n\t" + "th.vsetvli zero, %[vl128], e8, m8\n\t" // vl == 128 + "th.vand.vx v0, v0, %[mask]\n\t" + "th.vor.vv v8, v8, v0\n\t" + "th.vlb.v v0, (%[q8])\n\t" + "th.vsub.vx v8, v8, %[vl32]\n\t" + "th.vsetvli zero, %[vl64], e8, m4\n\t" // vl == 64 + "th.vwmul.vv v16, v0, v8\n\t" + "th.vwmul.vv v24, v4, v12\n\t" + "li %[t0], 16\n\t" + "th.vsetvli zero, %[t0], e16, m2\n\t" // vl == 16 + "th.vmv.v.x v0, zero\n\t" + "th.vwredsum.vs v10, v16, v0\n\t" + "th.vwredsum.vs v9, v18, v0\n\t" + "th.vwredsum.vs v8, v20, v0\n\t" + "th.vwredsum.vs v7, v22, v0\n\t" + "th.vwredsum.vs v11, v24, v0\n\t" + "th.vwredsum.vs v12, v26, v0\n\t" + "th.vwredsum.vs v13, v28, v0\n\t" + "th.vwredsum.vs v14, v30, v0\n\t" + "li %[t0], 4\n\t" + "th.vsetvli zero, %[t0], e32, m1\n\t" // vl == 4 + "th.vslideup.vi v10, v9, 1\n\t" + "th.vslideup.vi v8, v7, 1\n\t" + "th.vslideup.vi v11, v12, 1\n\t" + "th.vslideup.vi v13, v14, 1\n\t" + "th.vslideup.vi v10, v8, 2\n\t" + "th.vslideup.vi v11, v13, 2\n\t" + "li %[t0], 8\n\t" + "th.vsetvli zero, %[t0], e32, m2\n\t" // vl == 8 + "th.vlb.v v4, (%[scale])\n\t" + "th.vmul.vv v2, v4, v10\n\t" + "th.vredsum.vs v0, v2, v0\n\t" + "th.vmv.x.s %[t0], v0\n\t" + "add %[sumi], %[sumi], %[t0]" + : [sumi] "+&r" (sum_t), [t0] "=&r" (t0) + : [qh] "r" (qh), [q6] "r" (q6), [q8] "r" (q8), [scale] "r" (scale) + , [vl32] "r" (32), [vl64] "r" (64), [vl128] "r" (128) + , [mask] "r" (0x30) + : "memory" + , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" + , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" + , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" + , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" + ); + q6 += 64; qh += 32; q8 += 128; scale += 8; + } + + sumf += d * sum_t; + + } + + *s = sumf; + +#elif defined __riscv_v + + float sumf = 0; + const int vector_length = __riscv_vlenb() * 8; + + switch (vector_length) { + case 256: + for (int i = 0; i < nb; ++i) { + + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + + const uint8_t * GGML_RESTRICT q6 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const int8_t * GGML_RESTRICT scale = x[i].scales; + + size_t vl; + + vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); + + int sum_t = 0; + int is = 0; + + for (int j = 0; j < QK_K/128; ++j) { + + vl = 32; + + // load qh + vuint8m1_t qh_x = __riscv_vle8_v_u8m1(qh, vl); + + // load Q6 + vuint8m1_t q6_0 = __riscv_vle8_v_u8m1(q6, vl); + vuint8m1_t q6_1 = __riscv_vle8_v_u8m1(q6+32, vl); + + vuint8m1_t q6a_0 = __riscv_vand_vx_u8m1(q6_0, 0x0F, vl); + vuint8m1_t q6a_1 = __riscv_vand_vx_u8m1(q6_1, 0x0F, vl); + vuint8m1_t q6s_0 = __riscv_vsrl_vx_u8m1(q6_0, 0x04, vl); + vuint8m1_t q6s_1 = __riscv_vsrl_vx_u8m1(q6_1, 0x04, vl); + + vuint8m1_t qh_0 = __riscv_vand_vx_u8m1(qh_x, 0x03, vl); + vuint8m1_t qh_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x2, vl), 0x03 , vl); + vuint8m1_t qh_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x4, vl), 0x03 , vl); + vuint8m1_t qh_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x6, vl), 0x03 , vl); + + vuint8m1_t qhi_0 = __riscv_vor_vv_u8m1(q6a_0, __riscv_vsll_vx_u8m1(qh_0, 0x04, vl), vl); + vuint8m1_t qhi_1 = __riscv_vor_vv_u8m1(q6a_1, __riscv_vsll_vx_u8m1(qh_1, 0x04, vl), vl); + vuint8m1_t qhi_2 = __riscv_vor_vv_u8m1(q6s_0, __riscv_vsll_vx_u8m1(qh_2, 0x04, vl), vl); + vuint8m1_t qhi_3 = __riscv_vor_vv_u8m1(q6s_1, __riscv_vsll_vx_u8m1(qh_3, 0x04, vl), vl); + + vint8m1_t a_0 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_0), 32, vl); + vint8m1_t a_1 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_1), 32, vl); + vint8m1_t a_2 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_2), 32, vl); + vint8m1_t a_3 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_3), 32, vl); + + // load Q8 and take product + vint16m2_t va_q_0 = __riscv_vwmul_vv_i16m2(a_0, __riscv_vle8_v_i8m1(q8, vl), vl); + vint16m2_t va_q_1 = __riscv_vwmul_vv_i16m2(a_1, __riscv_vle8_v_i8m1(q8+32, vl), vl); + vint16m2_t va_q_2 = __riscv_vwmul_vv_i16m2(a_2, __riscv_vle8_v_i8m1(q8+64, vl), vl); + vint16m2_t va_q_3 = __riscv_vwmul_vv_i16m2(a_3, __riscv_vle8_v_i8m1(q8+96, vl), vl); + + vl = 16; + + vint32m2_t vaux_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 0), scale[is+0], vl); + vint32m2_t vaux_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 1), scale[is+1], vl); + vint32m2_t vaux_2 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 0), scale[is+2], vl); + vint32m2_t vaux_3 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 1), scale[is+3], vl); + vint32m2_t vaux_4 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 0), scale[is+4], vl); + vint32m2_t vaux_5 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 1), scale[is+5], vl); + vint32m2_t vaux_6 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 0), scale[is+6], vl); + vint32m2_t vaux_7 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 1), scale[is+7], vl); + + vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_0, vaux_1, vl), vzero, vl); + vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_2, vaux_3, vl), isum0, vl); + vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_4, vaux_5, vl), isum1, vl); + vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_6, vaux_7, vl), isum2, vl); + + sum_t += __riscv_vmv_x_s_i32m1_i32(isum3); + + q6 += 64; qh += 32; q8 += 128; is=8; + + } + + sumf += d * sum_t; + + } + break; + case 128: + for (int i = 0; i < nb; ++i) { + + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + + const uint8_t * restrict q6 = x[i].ql; + const uint8_t * restrict qh = x[i].qh; + const int8_t * restrict q8 = y[i].qs; + + const int8_t * restrict scale = x[i].scales; + + int sum_t = 0; + int t0; + + for (int j = 0; j < QK_K/128; ++j) { + __asm__ __volatile__( + "vsetvli zero, %[vl32], e8, m2\n\t" + "vle8.v v4, (%[qh])\n\t" + "vsll.vi v0, v4, 4\n\t" + "vsll.vi v2, v4, 2\n\t" + "vsrl.vi v6, v4, 2\n\t" + "vsetvli zero, %[vl64], e8, m4\n\t" + "vle8.v v8, (%[q6])\n\t" + "vsrl.vi v12, v8, 4\n\t" + "vand.vi v8, v8, 0xF\n\t" + "vsetvli zero, %[vl128], e8, m8\n\t" + "vand.vx v0, v0, %[mask]\n\t" + "vor.vv v8, v8, v0\n\t" + "vle8.v v0, (%[q8])\n\t" + "vsub.vx v8, v8, %[vl32]\n\t" + "vsetvli zero, %[vl64], e8, m4\n\t" + "vwmul.vv v16, v0, v8\n\t" + "vwmul.vv v24, v4, v12\n\t" + "vsetivli zero, 16, e16, m2\n\t" + "vmv.v.x v0, zero\n\t" + "vwredsum.vs v10, v16, v0\n\t" + "vwredsum.vs v9, v18, v0\n\t" + "vwredsum.vs v8, v20, v0\n\t" + "vwredsum.vs v7, v22, v0\n\t" + "vwredsum.vs v11, v24, v0\n\t" + "vwredsum.vs v12, v26, v0\n\t" + "vwredsum.vs v13, v28, v0\n\t" + "vwredsum.vs v14, v30, v0\n\t" + "vsetivli zero, 4, e32, m1\n\t" + "vslideup.vi v10, v9, 1\n\t" + "vslideup.vi v8, v7, 1\n\t" + "vslideup.vi v11, v12, 1\n\t" + "vslideup.vi v13, v14, 1\n\t" + "vslideup.vi v10, v8, 2\n\t" + "vslideup.vi v11, v13, 2\n\t" + "vsetivli zero, 8, e32, m2\n\t" + "vle8.v v2, (%[scale])\n\t" + "vsext.vf4 v4, v2\n\t" + "vmul.vv v2, v4, v10\n\t" + "vredsum.vs v0, v2, v0\n\t" + "vmv.x.s %[t0], v0\n\t" + "add %[sumi], %[sumi], %[t0]" + : [sumi] "+&r" (sum_t), [t0] "=&r" (t0) + : [qh] "r" (qh), [q6] "r" (q6), [q8] "r" (q8), [scale] "r" (scale) + , [vl32] "r" (32), [vl64] "r" (64), [vl128] "r" (128) + , [mask] "r" (0x30) + : "memory" + , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" + , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" + , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" + , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" + ); + q6 += 64; qh += 32; q8 += 128; scale += 8; + } + + sumf += d * sum_t; + + } + break; + default: + assert(false && "Unsupported vector length"); + break; + } + + *s = sumf; + +#else + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) { + a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; + a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; + a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; + a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; + } + a += 128; + q4 += 64; + qh += 32; + } + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/16; ++j) { + int scale = x[i].scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + diff --git a/ggml/src/ggml-cpu/arch/riscv/repack.cpp b/ggml/src/ggml-cpu/arch/riscv/repack.cpp new file mode 100644 index 0000000000000..0882b41024362 --- /dev/null +++ b/ggml/src/ggml-cpu/arch/riscv/repack.cpp @@ -0,0 +1,396 @@ +#define GGML_COMMON_IMPL_CPP +#define GGML_COMMON_DECL_CPP +#include "ggml-common.h" +#include "ggml-backend-impl.h" + +#include "ggml-impl.h" +#include "ggml-cpu.h" +#include "ggml-cpu-impl.h" +#include "traits.h" + +#include +#include +#include +#include // for qsort +#include // for GGML_ASSERT + +#define GGML_CPU_CLANG_WORKAROUND +#include "../../repack.h" + +#if defined(__GNUC__) +#pragma GCC diagnostic ignored "-Woverlength-strings" +#endif + +#define UNUSED GGML_UNUSED + +void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if defined __riscv_v + if (__riscv_vlenb() >= QK4_0) { + const size_t vl = QK4_0; + + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); + + vfloat32m1_t sumf = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); + for (int l = 0; l < nb; l++) { + const int64_t a0 = *(const int64_t *)&a_ptr[l].qs[0]; + const int64_t a1 = *(const int64_t *)&a_ptr[l].qs[8]; + const int64_t a2 = *(const int64_t *)&a_ptr[l].qs[16]; + const int64_t a3 = *(const int64_t *)&a_ptr[l].qs[24]; + __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment constraints + const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(a0, vl / 4)); + const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(a1, vl / 4)); + const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(a2, vl / 4)); + const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(a3, vl / 4)); + + const vint8m4_t rhs_raw_vec = __riscv_vle8_v_i8m4((const int8_t *)b_ptr[l].qs, vl * 4); + const vint8m4_t rhs_vec_lo = __riscv_vsra_vx_i8m4(__riscv_vsll_vx_i8m4(rhs_raw_vec, 4, vl * 4), 4, vl * 4); + const vint8m4_t rhs_vec_hi = __riscv_vsra_vx_i8m4(rhs_raw_vec, 4, vl * 4); + const vint8m2_t rhs_vec_lo_0 = __riscv_vget_v_i8m4_i8m2(rhs_vec_lo, 0); + const vint8m2_t rhs_vec_lo_1 = __riscv_vget_v_i8m4_i8m2(rhs_vec_lo, 1); + const vint8m2_t rhs_vec_hi_0 = __riscv_vget_v_i8m4_i8m2(rhs_vec_hi, 0); + const vint8m2_t rhs_vec_hi_1 = __riscv_vget_v_i8m4_i8m2(rhs_vec_hi, 1); + + const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); + const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); + const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); + const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); + + const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_hi_m)); + const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); + const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); + const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); + const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); + const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); + const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); + const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); + const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); + const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); + const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); + const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); + const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); + + // vector version needs Zvfhmin extension + const float a_scale = GGML_FP16_TO_FP32(a_ptr[l].d); + const float b_scales[8] = { + GGML_FP16_TO_FP32(b_ptr[l].d[0]), + GGML_FP16_TO_FP32(b_ptr[l].d[1]), + GGML_FP16_TO_FP32(b_ptr[l].d[2]), + GGML_FP16_TO_FP32(b_ptr[l].d[3]), + GGML_FP16_TO_FP32(b_ptr[l].d[4]), + GGML_FP16_TO_FP32(b_ptr[l].d[5]), + GGML_FP16_TO_FP32(b_ptr[l].d[6]), + GGML_FP16_TO_FP32(b_ptr[l].d[7]) + }; + const vfloat32m1_t b_scales_vec = __riscv_vle32_v_f32m1(b_scales, vl / 4); + const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scale, vl / 4); + sumf = __riscv_vfmacc_vv_f32m1(sumf, tmp1, b_scales_vec, vl / 4); + } + __riscv_vse32_v_f32m1(s + x * ncols_interleaved, sumf, vl / 4); + } + return; + } + +#endif + { + float sumf[8]; + int sumi; + + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); + + for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; + } + sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + } + } + } + for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; + } + } +} + +void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if defined __riscv_v + if (__riscv_vlenb() >= QK4_0) { + const size_t vl = QK4_0; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); + vfloat32m1_t sumf0 = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); + vfloat32m1_t sumf1 = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); + vfloat32m1_t sumf2 = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); + vfloat32m1_t sumf3 = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); + for (int l = 0; l < nb; l++) { + const vint8m4_t rhs_raw_vec = __riscv_vle8_v_i8m4((const int8_t *)b_ptr[l].qs, vl * 4); + const vint8m4_t rhs_vec_lo = __riscv_vsra_vx_i8m4(__riscv_vsll_vx_i8m4(rhs_raw_vec, 4, vl * 4), 4, vl * 4); + const vint8m4_t rhs_vec_hi = __riscv_vsra_vx_i8m4(rhs_raw_vec, 4, vl * 4); + const vint8m2_t rhs_vec_lo_0 = __riscv_vget_v_i8m4_i8m2(rhs_vec_lo, 0); + const vint8m2_t rhs_vec_lo_1 = __riscv_vget_v_i8m4_i8m2(rhs_vec_lo, 1); + const vint8m2_t rhs_vec_hi_0 = __riscv_vget_v_i8m4_i8m2(rhs_vec_hi, 0); + const vint8m2_t rhs_vec_hi_1 = __riscv_vget_v_i8m4_i8m2(rhs_vec_hi, 1); + + // vector version needs Zvfhmin extension + const float a_scales[4] = { + GGML_FP16_TO_FP32(a_ptr[l].d[0]), + GGML_FP16_TO_FP32(a_ptr[l].d[1]), + GGML_FP16_TO_FP32(a_ptr[l].d[2]), + GGML_FP16_TO_FP32(a_ptr[l].d[3]) + }; + const float b_scales[8] = { + GGML_FP16_TO_FP32(b_ptr[l].d[0]), + GGML_FP16_TO_FP32(b_ptr[l].d[1]), + GGML_FP16_TO_FP32(b_ptr[l].d[2]), + GGML_FP16_TO_FP32(b_ptr[l].d[3]), + GGML_FP16_TO_FP32(b_ptr[l].d[4]), + GGML_FP16_TO_FP32(b_ptr[l].d[5]), + GGML_FP16_TO_FP32(b_ptr[l].d[6]), + GGML_FP16_TO_FP32(b_ptr[l].d[7]) + }; + const vfloat32m1_t b_scales_vec = __riscv_vle32_v_f32m1(b_scales, vl / 4); + + const int64_t A0 = *(const int64_t *)&a_ptr[l].qs[0]; + const int64_t A4 = *(const int64_t *)&a_ptr[l].qs[32]; + const int64_t A8 = *(const int64_t *)&a_ptr[l].qs[64]; + const int64_t Ac = *(const int64_t *)&a_ptr[l].qs[96]; + __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment + vint16m4_t sumi_l0; + { + const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A0, vl / 4)); + const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A4, vl / 4)); + const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A8, vl / 4)); + const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Ac, vl / 4)); + const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); + const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); + const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); + const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); + + sumi_l0 = sumi_hi_m; + } + + { + const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_l0)); + const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); + const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); + const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); + const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); + const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); + const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); + const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); + const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); + const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); + const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); + const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); + const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); + + const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scales[0], vl / 4); + sumf0 = __riscv_vfmacc_vv_f32m1(sumf0, tmp1, b_scales_vec, vl / 4); + } + + const int64_t A1 = *(const int64_t *)&a_ptr[l].qs[8]; + const int64_t A5 = *(const int64_t *)&a_ptr[l].qs[40]; + const int64_t A9 = *(const int64_t *)&a_ptr[l].qs[72]; + const int64_t Ad = *(const int64_t *)&a_ptr[l].qs[104]; + __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment + vint16m4_t sumi_l1; + { + const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A1, vl / 4)); + const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A5, vl / 4)); + const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A9, vl / 4)); + const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Ad, vl / 4)); + const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); + const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); + const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); + const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); + + sumi_l1 = sumi_hi_m; + } + + { + const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_l1)); + const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); + const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); + const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); + const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); + const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); + const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); + const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); + const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); + const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); + const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); + const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); + const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); + + const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scales[1], vl / 4); + sumf1 = __riscv_vfmacc_vv_f32m1(sumf1, tmp1, b_scales_vec, vl / 4); + } + + const int64_t A2 = *(const int64_t *)&a_ptr[l].qs[16]; + const int64_t A6 = *(const int64_t *)&a_ptr[l].qs[48]; + const int64_t Aa = *(const int64_t *)&a_ptr[l].qs[80]; + const int64_t Ae = *(const int64_t *)&a_ptr[l].qs[112]; + __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment + vint16m4_t sumi_l2; + { + const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A2, vl / 4)); + const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A6, vl / 4)); + const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Aa, vl / 4)); + const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Ae, vl / 4)); + const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); + const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); + const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); + const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); + + sumi_l2 = sumi_hi_m; + } + + { + const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_l2)); + const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); + const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); + const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); + const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); + const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); + const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); + const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); + const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); + const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); + const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); + const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); + const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); + + const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scales[2], vl / 4); + sumf2 = __riscv_vfmacc_vv_f32m1(sumf2, tmp1, b_scales_vec, vl / 4); + } + + const int64_t A3 = *(const int64_t *)&a_ptr[l].qs[24]; + const int64_t A7 = *(const int64_t *)&a_ptr[l].qs[56]; + const int64_t Ab = *(const int64_t *)&a_ptr[l].qs[88]; + const int64_t Af = *(const int64_t *)&a_ptr[l].qs[120]; + __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment + vint16m4_t sumi_l3; + { + const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A3, vl / 4)); + const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A7, vl / 4)); + const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Ab, vl / 4)); + const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Af, vl / 4)); + const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); + const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); + const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); + const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); + + sumi_l3 = sumi_hi_m; + } + + { + const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_l3)); + const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); + const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); + const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); + const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); + const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); + const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); + const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); + const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); + const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); + const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); + const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); + const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); + + const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scales[3], vl / 4); + sumf3 = __riscv_vfmacc_vv_f32m1(sumf3, tmp1, b_scales_vec, vl / 4); + } + } + __riscv_vse32_v_f32m1(&s[(y * 4 + 0) * bs + x * ncols_interleaved], sumf0, vl / 4); + __riscv_vse32_v_f32m1(&s[(y * 4 + 1) * bs + x * ncols_interleaved], sumf1, vl / 4); + __riscv_vse32_v_f32m1(&s[(y * 4 + 2) * bs + x * ncols_interleaved], sumf2, vl / 4); + __riscv_vse32_v_f32m1(&s[(y * 4 + 3) * bs + x * ncols_interleaved], sumf3, vl / 4); + } + } + + return; + } + +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) + float sumf[4][8]; + int sumi; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; + } + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; + } + sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + } + } + } + } + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) + s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; + } + } + } +} diff --git a/ggml/src/ggml-cpu/arch/s390/quants.c b/ggml/src/ggml-cpu/arch/s390/quants.c new file mode 100644 index 0000000000000..26bd908757114 --- /dev/null +++ b/ggml/src/ggml-cpu/arch/s390/quants.c @@ -0,0 +1,1299 @@ +#define GGML_COMMON_IMPL_C +#include "ggml-common.h" +#include "ggml-quants.h" +#include "ggml-impl.h" +#include "ggml-cpu.h" + +#include "../../quants.h" +#include "../../ggml-cpu-impl.h" + +#include +#include +#include +#include +#include // for qsort +#include // for GGML_ASSERT + +#define GROUP_MAX_EPS 1e-15f +#define GROUP_MAX_EPS_IQ3_XXS 1e-8f +#define GROUP_MAX_EPS_IQ2_S 1e-8f +#define GROUP_MAX_EPS_IQ1_M 1e-7f +#define GROUP_MAX_EPS_IQ1_S 1e-12f + +#define UNUSED GGML_UNUSED + +void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__VXE__) || defined(__VXE2__) + for (int i = 0; i < nb; i++) { + __vector float srcv [8]; + __vector float asrcv[8]; + __vector float amaxv[8]; + + for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); + for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); + for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); + for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); + for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); + + const float amax = MAX(MAX(vec_extract(amaxv[0], 0), + vec_extract(amaxv[0], 1)), + MAX(vec_extract(amaxv[0], 2), + vec_extract(amaxv[0], 3))); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f / d : 0.0f; + + y[i].d = GGML_FP32_TO_FP16(d); + + for (int j = 0; j < 8; j++) { + const __vector float v = vec_mul(srcv[j], vec_splats(id)); + const __vector int32_t vi = vec_signed(v); + + y[i].qs[4*j + 0] = vec_extract(vi, 0); + y[i].qs[4*j + 1] = vec_extract(vi, 1); + y[i].qs[4*j + 2] = vec_extract(vi, 2); + y[i].qs[4*j + 3] = vec_extract(vi, 3); + } + } +#else + GGML_UNUSED(nb); + // scalar + quantize_row_q8_0_ref(x, y, k); +#endif +} + +void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK8_1 == 0); + const int nb = k / QK8_1; + + block_q8_1 * GGML_RESTRICT y = vy; + +#if defined(__VXE__) || defined(__VXE2__) + for (int i = 0; i < nb; i++) { + __vector float srcv [8]; + __vector float asrcv[8]; + __vector float amaxv[8]; + + for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); + for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); + for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); + for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); + for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); + + const float amax = MAX(MAX(vec_extract(amaxv[0], 0), + vec_extract(amaxv[0], 1)), + MAX(vec_extract(amaxv[0], 2), + vec_extract(amaxv[0], 3))); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f / d : 0.0f; + + y[i].d = GGML_FP32_TO_FP16(d); + + __vector int32_t acc = vec_splats(0); + + for (int j = 0; j < 8; j++) { + const __vector float v = vec_mul(srcv[j], vec_splats(id)); + const __vector int32_t vi = vec_signed(v); + + y[i].qs[4*j + 0] = vec_extract(vi, 0); + y[i].qs[4*j + 1] = vec_extract(vi, 1); + y[i].qs[4*j + 2] = vec_extract(vi, 2); + y[i].qs[4*j + 3] = vec_extract(vi, 3); + + acc = vec_add(acc, vi); + } + + y[i].s = GGML_FP32_TO_FP16(d * (acc[0] + acc[1] + acc[2] + acc[3])); + } +#else + GGML_UNUSED(nb); + // scalar + quantize_row_q8_1_ref(x, y, k); +#endif +} + + +//===================================== Dot products ================================= + +void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined(__VXE__) || defined(__VXE2__) + __vector float acc = vec_splats(0.0f); + + const __vector uint8_t v_m = vec_splats((const uint8_t)0x0F); + const __vector int8_t v_s = vec_splats( (const int8_t)0x08); + + for (; ib < nb; ++ib) { + const __vector uint8_t v_x = vec_xl(0, x[ib].qs); + const __vector int8_t v_xl = (const __vector int8_t)(v_x & v_m); + const __vector int8_t v_xh = (const __vector int8_t)(v_x >> 4); + + const __vector int8_t v_xls = vec_sub(v_xl, v_s); + const __vector int8_t v_xhs = vec_sub(v_xh, v_s); + + const __vector int8_t v_yl = vec_xl(0 , y[ib].qs); + const __vector int8_t v_yh = vec_xl(QK8_0/2, y[ib].qs); + + const __vector int16_t v_xylso = vec_mulo(v_xls, v_yl); + const __vector int16_t v_xylse = vec_mule(v_xls, v_yl); + const __vector int16_t v_xyhso = vec_mulo(v_xhs, v_yh); + const __vector int16_t v_xyhse = vec_mule(v_xhs, v_yh); + + __vector int16_t v_xy_ = v_xylso + v_xylse + v_xyhso + v_xyhse; v_xy_ += vec_reve(v_xy_); + + const __vector float v_xy = vec_float(vec_unpackh(v_xy_)); + const __vector float v_d = vec_splats(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); + + acc = vec_madd(v_xy, v_d, acc); + } + + sumf = acc[0] + acc[1] + acc[2] + acc[3]; + +#endif + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F) - 8; + const int v1 = (x[ib].qs[j] >> 4) - 8; + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); + } + + *s = sumf; +} + +void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined(__VXE__) || defined(__VXE2__) + float summs = 0; + float32x4_t acc = vec_splats(0.0f); + + const uint8x16_t v_m = vec_splat_u8(0x0F); + +#pragma GCC unroll 4 + for (; ib < nb; ++ib) { + __builtin_prefetch(x[ib].qs, 0, 1); + __builtin_prefetch(y[ib].qs, 0, 1); + + summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); + + const uint8x16_t v_x = vec_xl(0, x[ib].qs); + const int8x16_t v_xl = (const int8x16_t)(v_x & v_m); + const int8x16_t v_xh = (const int8x16_t)(v_x >> 4); + + const int8x16_t v_yl = vec_xl(0 , y[ib].qs); + const int8x16_t v_yh = vec_xl(QK8_1/2, y[ib].qs); + + const int32x4_t v_xy_ = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh); + const float32x4_t v_xy = vec_float(v_xy_); + + const float32x4_t v_d = vec_splats(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); + + acc = vec_madd(v_xy, v_d, acc); + } + + sumf = acc[0] + acc[1] + acc[2] + acc[3] + summs; + +#endif + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F); + const int v1 = (x[ib].qs[j] >> 4); + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + } + + *s = sumf; +} + +void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q8_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined(__VXE__) || defined(__VXE2__) + __vector float acc = vec_splats(0.0f); + +#pragma GCC unroll 8 + for (; ib < nb; ++ib) { + __builtin_prefetch(x[ib].qs, 0, 1); + __builtin_prefetch(y[ib].qs, 0, 1); + + const int8x16_t v_xl = vec_xl(0 , x[ib].qs); + const int8x16_t v_xh = vec_xl(QK8_0/2, x[ib].qs); + const int8x16_t v_yl = vec_xl(0 , y[ib].qs); + const int8x16_t v_yh = vec_xl(QK8_0/2, y[ib].qs); + + const int32x4_t v_xy_ = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh); + const float32x4_t v_xy = vec_float(v_xy_); + const float32x4_t v_d = vec_splats(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); + + acc = vec_madd(v_xy, v_d, acc); + } + + sumf = acc[0] + acc[1] + acc[2] + acc[3]; + +#endif + for (; ib < nb; ++ib) { + int sumi = 0; + + for (int j = 0; j < qk; j++) { + sumi += x[ib].qs[j]*y[ib].qs[j]; + } + + sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); + } + + *s = sumf; +} + +void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const uint32_t kmask1 = 0x03030303; + const uint32_t kmask2 = 0x0f0f0f0f; + + const block_q3_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__VXE__) || defined(__VXE2__) + uint32_t aux[3]; + uint32_t utmp[4]; + + const int32x4_t v_z = vec_splat_s32(0); + const uint8x16_t v_3m = vec_splat_u8(0x03); + + const uint8x16_t v_0c = vec_splat_u8(1); + const uint8x16_t v_1c = vec_sl(v_0c, 1); + const uint8x16_t v_2c = vec_sl(v_0c, 2); + const uint8x16_t v_3c = vec_sl(v_0c, 3); + + uint8x16_t q3h[4]; + uint8x16_t q3b[2]; + int8x16_t q3bytes[4]; + int8x16_t q8bytes[4]; + uint8x16_t qhbits[2]; + + float sum = 0; + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + + const uint8_t * restrict x0l = x[i].qs; + const uint8_t * restrict x0h = x[i].hmask; + const int8_t * restrict y0 = y[i].qs; + + qhbits[0] = vec_xl(0 , x0h); + qhbits[1] = vec_xl(16, x0h); + + int32_t isum = 0; + + memcpy(aux, x[i].scales, 12); + utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); + utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); + utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); + utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); + + int8_t * scale = (int8_t *)utmp; + for (int j = 0; j < 16; ++j) scale[j] -= 32; + + for (int j = 0; j < QK_K/128; ++j) { + int32x4_t isum0, isum1, isum2, isum3; + + q3b[0] = vec_xl(0 , x0l); + q3b[1] = vec_xl(16, x0l); + x0l += 32; + + q8bytes[0] = vec_xl(0 , y0); + q8bytes[1] = vec_xl(16 , y0); + q8bytes[2] = vec_xl(32 , y0); + q8bytes[3] = vec_xl(48 , y0); + q8bytes[4] = vec_xl(64 , y0); + q8bytes[5] = vec_xl(80 , y0); + q8bytes[6] = vec_xl(96 , y0); + q8bytes[7] = vec_xl(112, y0); + y0 += 128; + + q3h[0] = vec_sl(vec_andc(v_0c, qhbits[0]), 2); + q3h[1] = vec_sl(vec_andc(v_0c, qhbits[1]), 2); + q3h[2] = vec_sl(vec_andc(v_1c, qhbits[0]), 1); + q3h[3] = vec_sl(vec_andc(v_1c, qhbits[1]), 1); + + q3bytes[0] = vec_sub((int8x16_t)vec_and(q3b[0], v_3m), (int8x16_t)q3h[0]); + q3bytes[1] = vec_sub((int8x16_t)vec_and(q3b[1], v_3m), (int8x16_t)q3h[1]); + q3bytes[2] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 2), v_3m), (int8x16_t)q3h[2]); + q3bytes[3] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 2), v_3m), (int8x16_t)q3h[3]); + + isum0 = ggml_vec_dot(v_z, q3bytes[0], q8bytes[0]); + isum1 = ggml_vec_dot(v_z, q3bytes[1], q8bytes[1]); + isum2 = ggml_vec_dot(v_z, q3bytes[2], q8bytes[2]); + isum3 = ggml_vec_dot(v_z, q3bytes[3], q8bytes[3]); + + isum += (isum0[0] + isum0[1] + isum0[2] + isum0[3]) * scale[0]; + isum += (isum1[0] + isum1[1] + isum1[2] + isum1[3]) * scale[1]; + isum += (isum2[0] + isum2[1] + isum2[2] + isum2[3]) * scale[2]; + isum += (isum3[0] + isum3[1] + isum3[2] + isum3[3]) * scale[3]; + + scale += 4; + + q3h[0] = vec_andc(v_2c, qhbits[0]); + q3h[1] = vec_andc(v_2c, qhbits[1]); + q3h[2] = vec_sr(vec_andc(v_3c, qhbits[0]), 1); + q3h[3] = vec_sr(vec_andc(v_3c, qhbits[1]), 1); + + q3bytes[0] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 4), v_3m), (int8x16_t)q3h[0]); + q3bytes[1] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 4), v_3m), (int8x16_t)q3h[1]); + q3bytes[2] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 6), v_3m), (int8x16_t)q3h[2]); + q3bytes[3] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 6), v_3m), (int8x16_t)q3h[3]); + + isum0 = ggml_vec_dot(v_z, q3bytes[0], q8bytes[4]); + isum1 = ggml_vec_dot(v_z, q3bytes[1], q8bytes[5]); + isum2 = ggml_vec_dot(v_z, q3bytes[2], q8bytes[6]); + isum3 = ggml_vec_dot(v_z, q3bytes[3], q8bytes[7]); + + isum += (isum0[0] + isum0[1] + isum0[2] + isum0[3]) * scale[0]; + isum += (isum1[0] + isum1[1] + isum1[2] + isum1[3]) * scale[1]; + isum += (isum2[0] + isum2[1] + isum2[2] + isum2[3]) * scale[2]; + isum += (isum3[0] + isum3[1] + isum3[2] + isum3[3]) * scale[3]; + + scale += 4; + + if (j == 0) { + qhbits[0] = vec_sr(qhbits[0], 4); + qhbits[1] = vec_sr(qhbits[1], 4); + } + } + + sum += d * isum; + } + + *s = sum; + +#else + // scalar version + // This function is written like this so the compiler can manage to vectorize most of it + // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the + // manually vectorized version above. Every other version I tried would run at least 4 times slower. + // The ideal situation would be if we could just write the code once, and the compiler would + // automatically produce the best possible set of machine instructions, instead of us having to manually + // write vectorized versions for AVX, ARM_NEON, etc. + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + uint32_t auxs[4]; + const int8_t * scales = (const int8_t*)auxs; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].hmask; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + q3 += 32; + } + a = aux8; + + memcpy(auxs, x[i].scales, 12); + uint32_t tmp = auxs[2]; + auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); + auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); + auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); + auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); + for (int j = 0; j < QK_K/16; ++j) { + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; + +#endif + +} + +void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + +#if defined(__VXE__) || defined(__VXE2__) + const uint8x16_t v_lm = vec_splat_u8(0x0F); + const int32x4_t v_z = vec_splat_s32(0); + + uint8x16_t v_x[2]; + int8x16_t v_xl[2]; + int8x16_t v_y[2]; + + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + const int16x8_t v_ysumsl = vec_xl(0 , y[i].bsums); + const int16x8_t v_ysumsh = vec_xl(16, y[i].bsums); + const int16x8_t v_ysums = vec_padd_s16(v_ysumsl, v_ysumsh); + + memcpy(utmp, x[i].scales, 12); + + uint32x4_t v_mins8 = { 0 }; + v_mins8 = vec_insert(utmp[1] & kmask1, v_mins8, 0); + v_mins8 = vec_insert(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), v_mins8, 1); + + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[0] &= kmask1; + + const int16x8_t v_minsh = (int16x8_t)vec_unpackh((uint8x16_t)v_mins8); + + const int32x4_t v_minso = vec_mulo(v_ysums, v_minsh); + const int32x4_t v_minse = vec_mule(v_ysums, v_minsh); + const int32x4_t v_mins = v_minso + v_minse; + sumf -= dmin * (v_mins[0] + v_mins[1] + v_mins[2] + v_mins[3]); + + const uint8_t * scales = (const uint8_t *)utmp; + const uint8_t * GGML_RESTRICT x0 = x[i].qs; + const int8_t * GGML_RESTRICT y0 = y[i].qs; + + int32_t sumi1 = 0; + int32_t sumi2 = 0; + + for (int j = 0; j < QK_K/64; ++j) { + v_x[0] = vec_xl(0 , x0); + v_x[1] = vec_xl(16, x0); + x0 += 32; + + v_y[0] = vec_xl(0 , y0); + v_y[1] = vec_xl(16, y0); + y0 += 32; + + v_xl[0] = (int8x16_t)vec_and(v_x[0], v_lm); + v_xl[1] = (int8x16_t)vec_and(v_x[1], v_lm); + + const int32x4_t p1 = ggml_vec_dot(ggml_vec_dot(v_z, v_xl[0], v_y[0]), v_xl[1], v_y[1]); + sumi1 += (p1[0] + p1[1] + p1[2] + p1[3]) * scales[2*j+0]; + + v_y[0] = vec_xl(0 , y0); + v_y[1] = vec_xl(16, y0); + y0 += 32; + + v_xl[0] = (int8x16_t)vec_sr(v_x[0], 4); + v_xl[1] = (int8x16_t)vec_sr(v_x[1], 4); + + const int32x4_t p2 = ggml_vec_dot(ggml_vec_dot(v_z, v_xl[0], v_y[0]), v_xl[1], v_y[1]); + sumi2 += (p2[0] + p2[1] + p2[2] + p2[3]) * scales[2*j+1]; + } + + sumf += d * (sumi1 + sumi2); + } + + *s = sumf; + +#else + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + for (int j = 0; j < QK_K/64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + a += 32; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + a += 32; q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + +#if defined(__VXE__) || defined(__VXE2__) + const uint8x16_t v_lm = vec_splat_u8(0x0F); + const uint8x16_t v_1m = vec_splat_u8(0x01); + const uint8x16_t v_2m = vec_splat_u8(0x02); + + const int32x4_t v_z = vec_splat_s32(0); + + const uchar8x16_t v_minsm = { + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF + }; + + int8x16_t q5b[4]; + uint8x16_t q5h[4]; + + uint8x16_t v_xl[2]; + uint8x16_t v_xh[2]; + int8x16_t v_y[4]; + + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + const int16x8_t v_ysumsl = vec_xl(0 , y[i].bsums); + const int16x8_t v_ysumsh = vec_xl(16, y[i].bsums); + const int16x8_t v_ysums = vec_padd_s16(v_ysumsl, v_ysumsh); + + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + const uint8x16_t v_mins16 = vec_xl(0, (const uint8_t *)utmp); + const uint8x16_t v_mins8 = vec_perm(v_mins16, v_mins16, v_minsm); + const int16x8_t v_minsh = (int16x8_t)vec_unpackh(v_mins8); + + const int32x4_t v_minsho = vec_mulo(v_ysums, v_minsh); + const int32x4_t v_minshe = vec_mule(v_ysums, v_minsh); + const int32x4_t v_mins = vec_add(v_minsho, v_minshe); + const int32_t mins = v_mins[0] + v_mins[1] + v_mins[2] + v_mins[3]; + + const uint8_t * scales = (const uint8_t *)utmp; + const uint8_t * GGML_RESTRICT x0l = x[i].qs; + const uint8_t * GGML_RESTRICT x0h = x[i].qh; + const int8_t * GGML_RESTRICT y0 = y[i].qs; + + v_xh[0] = vec_xl(0 , x0h); + v_xh[1] = vec_xl(16, x0h); + + int32_t sumi = 0; + for (int j = 0; j < QK_K/64; ++j) { + v_xl[0] = vec_xl(0 , x0l); + v_xl[1] = vec_xl(16, x0l); + x0l += 32; + + v_y[0] = vec_xl(0 , y0); + v_y[1] = vec_xl(16, y0); + v_y[2] = vec_xl(32, y0); + v_y[3] = vec_xl(48, y0); + y0 += 64; + + q5h[0] = vec_sl(vec_and(v_1m, v_xh[0]), 4); + q5h[1] = vec_sl(vec_and(v_1m, v_xh[1]), 4); + q5h[2] = vec_sl(vec_and(v_2m, v_xh[0]), 3); + q5h[3] = vec_sl(vec_and(v_2m, v_xh[1]), 3); + v_xh[0] = vec_sr(v_xh[0], 2); + v_xh[1] = vec_sr(v_xh[1], 2); + + q5b[0] = (int8x16_t)vec_or(vec_and(v_xl[0], v_lm), q5h[0]); + q5b[1] = (int8x16_t)vec_or(vec_and(v_xl[1], v_lm), q5h[1]); + q5b[2] = (int8x16_t)vec_or(vec_sr(v_xl[0], 4), q5h[2]); + q5b[3] = (int8x16_t)vec_or(vec_sr(v_xl[1], 4), q5h[3]); + + int32x4_t sumi0 = ggml_vec_dot(ggml_vec_dot(v_z, q5b[0], v_y[0]), q5b[1], v_y[1]); + int32x4_t sumi1 = ggml_vec_dot(ggml_vec_dot(v_z, q5b[2], v_y[2]), q5b[3], v_y[3]); + + sumi += (sumi0[0] + sumi0[1] + sumi0[2] + sumi0[3]) * *scales++; + sumi += (sumi1[0] + sumi1[1] + sumi1[2] + sumi1[3]) * *scales++; + } + + sumf += d * sumi - dmin * mins; + } + + *s = sumf; + +#else + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K/64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); + a += 32; m <<= 1; + q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q6_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__VXE__) || defined(__VXE2__) + float sum = 0; + + // Lower 4-bit and upper 2-bit masks + const uint8x16_t v_lm = vec_splat_u8(0x0F); + const uint8x16_t v_um = vec_splat_u8(0x03); + + const int32x4_t v_z = vec_splat_s32(0); + + int8x16_t q6b[4]; + uint8x16_t q6h[4]; + + uint8x16_t v_xl[4]; + uint8x16_t v_xh[2]; + int8x16_t v_y[4]; + + for (int i = 0; i < nb; ++i) { + const float d_all = GGML_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT x0l = x[i].ql; + const uint8_t * GGML_RESTRICT x0h = x[i].qh; + const int8_t * GGML_RESTRICT y0 = y[i].qs; + + const int8_t * GGML_RESTRICT scale = x[i].scales; + + const int16x8_t v_ysumsl = vec_xl(0 , y[i].bsums); + const int16x8_t v_ysumsh = vec_xl(16, y[i].bsums); + + const int8x16_t v_scale = vec_xl(0, scale); + const int16x8_t v_scalel = vec_unpackh(v_scale); + const int16x8_t v_scaleh = vec_unpackl(v_scale); + + const int32x4_t v_minslo = vec_mulo(v_ysumsl, v_scalel); + const int32x4_t v_minsle = vec_mule(v_ysumsl, v_scalel); + const int32x4_t v_minsho = vec_mulo(v_ysumsh, v_scaleh); + const int32x4_t v_minshe = vec_mule(v_ysumsh, v_scaleh); + const int32x4_t v_mins = v_minslo + v_minsle + v_minsho + v_minshe; + + const int32_t mins = v_mins[0] + v_mins[1] + v_mins[2] + v_mins[3]; + + int32_t isum = 0; + for (int j = 0; j < QK_K/128; ++j) { + // Load model upper 2 bits + v_xh[0] = vec_xl(0 , x0h); + v_xh[1] = vec_xl(16, x0h); + x0h += 32; + + // Load model lower 4 bits + v_xl[0] = vec_xl(0 , x0l); + v_xl[1] = vec_xl(16, x0l); + v_xl[2] = vec_xl(32, x0l); + v_xl[3] = vec_xl(48, x0l); + x0l += 64; + + // Load activation quants + v_y[0] = vec_xl(0 , y0); + v_y[1] = vec_xl(16, y0); + v_y[2] = vec_xl(32, y0); + v_y[3] = vec_xl(48, y0); + y0 += 64; + + q6h[0] = vec_sl(vec_and(v_um, v_xh[0]), 4); + q6h[1] = vec_sl(vec_and(v_um, v_xh[1]), 4); + uint8x16_t shifted = vec_sr(v_xh[0], 2); + q6h[2] = vec_sl(vec_and(v_um, shifted), 4); + shifted = vec_sr(v_xh[1], 2); + q6h[3] = vec_sl(vec_and(v_um, shifted), 4); + + q6b[0] = (int8x16_t)(vec_or(vec_and(v_xl[0], v_lm), q6h[0])); + q6b[1] = (int8x16_t)(vec_or(vec_and(v_xl[1], v_lm), q6h[1])); + q6b[2] = (int8x16_t)(vec_or(vec_and(v_xl[2], v_lm), q6h[2])); + q6b[3] = (int8x16_t)(vec_or(vec_and(v_xl[3], v_lm), q6h[3])); + + int32x4_t summs0 = ggml_vec_dot(v_z, q6b[0], v_y[0]); + int32x4_t summs1 = ggml_vec_dot(v_z, q6b[1], v_y[1]); + int32x4_t summs2 = ggml_vec_dot(v_z, q6b[2], v_y[2]); + int32x4_t summs3 = ggml_vec_dot(v_z, q6b[3], v_y[3]); + + isum += (summs0[0] + summs0[1] + summs0[2] + summs0[3]) * scale[0] + + (summs1[0] + summs1[1] + summs1[2] + summs1[3]) * scale[1] + + (summs2[0] + summs2[1] + summs2[2] + summs2[3]) * scale[2] + + (summs3[0] + summs3[1] + summs3[2] + summs3[3]) * scale[3]; + + scale += 4; + + + // Load activation quants + v_y[0] = vec_xl(0 , y0); + v_y[1] = vec_xl(16, y0); + v_y[2] = vec_xl(32, y0); + v_y[3] = vec_xl(48, y0); + y0 += 64; + + shifted = vec_sr(v_xh[0], 4); + q6h[0] = vec_sl(vec_and(v_um, shifted), 4); + shifted = vec_sr(v_xh[1], 4); + q6h[1] = vec_sl(vec_and(v_um, shifted), 4); + shifted = vec_sr(v_xh[0], 6); + q6h[2] = vec_sl(vec_and(v_um, shifted), 4); + shifted = vec_sr(v_xh[1], 6); + q6h[3] = vec_sl(vec_and(v_um, shifted), 4); + + q6b[0] = (int8x16_t)(vec_or(vec_sr(v_xl[0], 4), q6h[0])); + q6b[1] = (int8x16_t)(vec_or(vec_sr(v_xl[1], 4), q6h[1])); + q6b[2] = (int8x16_t)(vec_or(vec_sr(v_xl[2], 4), q6h[2])); + q6b[3] = (int8x16_t)(vec_or(vec_sr(v_xl[3], 4), q6h[3])); + + summs0 = ggml_vec_dot(v_z, q6b[0], v_y[0]); + summs1 = ggml_vec_dot(v_z, q6b[1], v_y[1]); + summs2 = ggml_vec_dot(v_z, q6b[2], v_y[2]); + summs3 = ggml_vec_dot(v_z, q6b[3], v_y[3]); + + isum += (summs0[0] + summs0[1] + summs0[2] + summs0[3]) * scale[0] + + (summs1[0] + summs1[1] + summs1[2] + summs1[3]) * scale[1] + + (summs2[0] + summs2[1] + summs2[2] + summs2[3]) * scale[2] + + (summs3[0] + summs3[1] + summs3[2] + summs3[3]) * scale[3]; + + scale += 4; + } + + sum += d_all * y[i].d * (isum - 32 * mins); + } + + *s = sum; + +#else + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) { + a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; + a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; + a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; + a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; + } + a += 128; + q4 += 64; + qh += 32; + } + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/16; ++j) { + int scale = x[i].scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +// #if defined(__VXE__) || defined(__VXE2__) +// static const int8_t keven_signs_q2xs[1024] = { +// 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, +// 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, +// 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, +// 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, +// 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, +// 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, +// 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, +// 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, +// 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, +// 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, +// 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, +// 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, +// 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, +// 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, +// 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, +// 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, +// 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, +// 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, +// 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, +// 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, +// 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, +// 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, +// 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, +// 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, +// 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, +// 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, +// 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, +// 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, +// 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, +// 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, +// 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, +// 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, +// }; +// #endif + +// void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { +// assert(n % QK_K == 0); +// assert(nrc == 1); +// UNUSED(nrc); +// UNUSED(bx); +// UNUSED(by); +// UNUSED(bs); + +// const block_iq2_xxs * GGML_RESTRICT x = vx; +// const block_q8_K * GGML_RESTRICT y = vy; + +// const int nb = n / QK_K; + +// #if defined(__VXE__) || defined(__VXE2__) +// const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + +// uint32_t aux32[4]; +// const uint8_t * aux8 = (const uint8_t *)aux32; + +// float sumf = 0; + +// for (int i = 0; i < nb; ++i) { +// const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; +// const uint16_t * GGML_RESTRICT q2 = x[i].qs; +// const int8_t * GGML_RESTRICT q8 = y[i].qs; + +// float sumf1 = 0, sumf2 = 0; + +// for (int ib32 = 0; ib32 < QK_K/32; ib += 2) { +// int8x16_t q8b0 = vec_xl( 0, q8); +// int8x16_t qb81 = vec_xl(16, q8); +// int8x16_t q8b2 = vec_xl(32, q8); +// int8x16_t q8b3 = vec_xl(48, q8); +// q8 += 64; + +// memcpy(aux32, q2, 4 * sizeof(uint32_t)); +// q2 += 8; + +// int8x16_t q2u0 = { *(const int64_t *)(iq2xxs_grid + aux8[ 0]), *(const int64_t *)(iq2xxs_grid + aux8[ 1]) }; +// int8x16_t q2u1 = { *(const int64_t *)(iq2xxs_grid + aux8[ 2]), *(const int64_t *)(iq2xxs_grid + aux8[ 3]) }; +// int8x16_t q2u2 = { *(const int64_t *)(iq2xxs_grid + aux8[ 8]), *(const int64_t *)(iq2xxs_grid + aux8[ 9]) }; +// int8x16_t q2u3 = { *(const int64_t *)(iq2xxs_grid + aux8[10]), *(const int64_t *)(iq2xxs_grid + aux8[11]) }; + +// int8x16_t q2s0 = { *(const int64_t *)(signs64 + ((aux32[1] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 7) & 127)) }; +// int8x16_t q2s1 = { *(const int64_t *)(signs64 + ((aux32[1] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 21) & 127)) }; +// int8x16_t q2s2 = { *(const int64_t *)(signs64 + ((aux32[3] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 7) & 127)) }; +// int8x16_t q2s3 = { *(const int64_t *)(signs64 + ((aux32[3] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 21) & 127)) }; + +// q2u0 = vec_mul(q2u0, q2s0); +// q2u1 = vec_mul(q2u1, q2s1); +// q2u2 = vec_mul(q2u2, q2s2); +// q2u3 = vec_mul(q2u3, q2s3); + +// const int32x4_t p1 = ggml_vec_dot(ggml_vec_dot(vec_splat_s32(0), q2u0, q8b0), q2u1, q8b1); +// const int32x4_t p2 = ggml_vec_dot(ggml_vec_dot(vec_splat_s32(0), q2u2, q8b2), q2u3, q8b3); + +// sumf1 += (p1[0] + p1[1] + p1[2] + p1[3]) * (0.5f + (aux32[1] >> 28)); +// sumf2 += (p2[0] + p2[1] + p2[2] + p2[3]) * (0.5f + (aux32[3] >> 28)); +// } + +// sumf += d * (sumf1 + sumf2); +// } + +// *s = 0.25f * sumf; + +// #else + +// uint32_t aux32[2]; +// const uint8_t * aux8 = (const uint8_t *)aux32; + +// float sumf = 0.f; +// for (int i = 0; i < nb; ++i) { +// const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; +// const uint16_t * GGML_RESTRICT q2 = x[i].qs; +// const int8_t * GGML_RESTRICT q8 = y[i].qs; +// int32_t bsum = 0; +// for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { +// memcpy(aux32, q2, 2*sizeof(uint32_t)); +// q2 += 4; +// const uint32_t ls = 2*(aux32[1] >> 28) + 1; +// int32_t sumi = 0; +// for (int l = 0; l < 4; ++l) { +// const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); +// const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127]; +// for (int j = 0; j < 8; ++j) { +// sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); +// } +// q8 += 8; +// } +// bsum += sumi * ls; +// } +// sumf += d * bsum; +// } +// *s = 0.125f * sumf; +// #endif +// } + +void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK4_NL == 0); + static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); + + const block_iq4_nl * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + const int nb = n / QK4_NL; + + int ib = 0; + float sumf = 0; + +#if defined(__VXE__) || defined(__VXE2__) + const int8x16_t v_k = vec_xl(0, kvalues_iq4nl); + const uint8x16_t v_m = vec_splat_u8(0x0F); + + for (; ib < nb; ++ib) { + const block_iq4_nl * GGML_RESTRICT x0 = &x[ib]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; + + const uint8x16_t v_x = vec_xl(0, x0->qs); + int8x16_t v_xl = (int8x16_t)vec_and(v_x, v_m); + int8x16_t v_xh = (int8x16_t)vec_sr(v_x, 4); + + v_xl = vec_perm(v_k, v_k, (uchar8x16_t)v_xl); + v_xh = vec_perm(v_k, v_k, (uchar8x16_t)v_xh); + + const int8x16_t v_yl = vec_xl(0 , y0->qs); + const int8x16_t v_yh = vec_xl(QK8_0/2, y0->qs); + const int32x4_t v_xy = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh); + + sumf += GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d) * (v_xy[0] + v_xy[1] + v_xy[2] + v_xy[3]); + } + +#endif + for (; ib < nb; ++ib) { + const float d = GGML_FP16_TO_FP32(y[ib].d)*GGML_FP16_TO_FP32(x[ib].d); + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < QK4_NL/2; ++j) { + sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; + sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4]; + } + sumf += d * (sumi1 + sumi2); + } + *s = sumf; +} + +void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK_K == 0); + + const block_iq4_xs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__VXE__) || defined(__VXE2__) + const int8x16_t v_k = vec_xl(0, kvalues_iq4nl); + const uint8x16_t v_m = vec_splat_u8(0x0F); + + float sumf = 0; + + for (int ibl = 0; ibl < nb; ++ibl) { + const uint8_t * GGML_RESTRICT q4 = x[ibl].qs; + const int8_t * GGML_RESTRICT q8 = y[ibl].qs; + + uint16_t h = x[ibl].scales_h; + + int sumi1 = 0, sumi2 = 0; + for (int ib = 0; ib < QK_K/64; ++ib) { + const uint8x16_t v_x0 = vec_xl(0 , q4); + const uint8x16_t v_x1 = vec_xl(QK4_NL/2, q4); + q4 += 32; + + int8x16_t v_x0l = (int8x16_t)vec_and(v_x0, v_m); + int8x16_t v_x0h = (int8x16_t)vec_sr(v_x0, 4); + int8x16_t v_x1l = (int8x16_t)vec_and(v_x1, v_m); + int8x16_t v_x1h = (int8x16_t)vec_sr(v_x1, 4); + + v_x0l = vec_perm(v_k, v_k, (uchar8x16_t)v_x0l); + v_x0h = vec_perm(v_k, v_k, (uchar8x16_t)v_x0h); + v_x1l = vec_perm(v_k, v_k, (uchar8x16_t)v_x1l); + v_x1h = vec_perm(v_k, v_k, (uchar8x16_t)v_x1h); + + const int8x16_t v_y0 = vec_xl( 0, q8); + const int8x16_t v_y1 = vec_xl(16, q8); + const int8x16_t v_y2 = vec_xl(32, q8); + const int8x16_t v_y3 = vec_xl(48, q8); + q8 += 64; + + int32x4_t vsumi0 = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_x0l, v_y0), v_x0h, v_y1); + int32x4_t vsumi1 = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_x1l, v_y2), v_x1h, v_y3); + + int ls1 = ((x[ibl].scales_l[ib] & 0xF) | ((h << 4) & 0x30)) - 32; + int ls2 = ((x[ibl].scales_l[ib] >> 4) | ((h << 2) & 0x30)) - 32; + + h >>= 4; + + sumi1 += (vsumi0[0] + vsumi0[1] + vsumi0[2] + vsumi0[3]) * ls1; + sumi2 += (vsumi1[0] + vsumi1[1] + vsumi1[2] + vsumi1[3]) * ls2; + } + + sumf += GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d * (sumi1 + sumi2); + } + + *s = sumf; + +#else + float sumf = 0; + for (int ibl = 0; ibl < nb; ++ibl) { + const float d4d8 = GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d; + uint16_t h = x[ibl].scales_h; + const uint8_t * qs = x[ibl].qs; + const int8_t * q8 = y[ibl].qs; + for (int ib = 0; ib < QK_K/32; ib += 2) { + const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30); + const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30); + h >>= 4; + const float d1 = d4d8*(ls1 - 32); + const float d2 = d4d8*(ls2 - 32); + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < 16; ++j) { + sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; + sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; + } + sumf += d1 * (sumi1 + sumi2); + qs += 16; + q8 += 32; + sumi1 = sumi2 = 0; + for (int j = 0; j < 16; ++j) { + sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; + sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; + } + sumf += d2 * (sumi1 + sumi2); + qs += 16; + q8 += 32; + } + } + *s = sumf; +#endif +} + diff --git a/ggml/src/ggml-cpu/arch/wasm/quants.c b/ggml/src/ggml-cpu/arch/wasm/quants.c new file mode 100644 index 0000000000000..4ec97f533f1e4 --- /dev/null +++ b/ggml/src/ggml-cpu/arch/wasm/quants.c @@ -0,0 +1,1480 @@ +#define GGML_COMMON_IMPL_C +#include "ggml-common.h" +#include "ggml-quants.h" +#include "ggml-impl.h" +#include "ggml-cpu.h" + +#include "../../quants.h" +#include "../../ggml-cpu-impl.h" + +#include +#include +#include +#include +#include // for qsort +#include // for GGML_ASSERT + +#define GROUP_MAX_EPS 1e-15f +#define GROUP_MAX_EPS_IQ3_XXS 1e-8f +#define GROUP_MAX_EPS_IQ2_S 1e-8f +#define GROUP_MAX_EPS_IQ1_M 1e-7f +#define GROUP_MAX_EPS_IQ1_S 1e-12f + +#define UNUSED GGML_UNUSED + +#if defined(__wasm_simd128__) +#define B1(c,s,n) 0x ## n ## c , 0x ## n ## s +#define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s) +#define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s) +#define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s) +#define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s) +#define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s) +#define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s) +#define B8(c,s ) B7(c,s, c), B7(c,s, s) + +// precomputed tables for expanding 8bits to 8 bytes: +static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4 +static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4 +#endif + +void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0 * GGML_RESTRICT y = vy; + +#if defined __wasm_simd128__ + for (int i = 0; i < nb; i++) { + v128_t srcv [8]; + v128_t asrcv[8]; + v128_t amaxv[8]; + + for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j); + for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]); + + for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]); + for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]); + for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]); + + const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0), + wasm_f32x4_extract_lane(amaxv[0], 1)), + MAX(wasm_f32x4_extract_lane(amaxv[0], 2), + wasm_f32x4_extract_lane(amaxv[0], 3))); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = GGML_FP32_TO_FP16(d); + + for (int j = 0; j < 8; j++) { + const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id)); + const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v); + + y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0); + y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1); + y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2); + y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3); + } + } +#else + GGML_UNUSED(nb); + // scalar + quantize_row_q8_0_ref(x, y, k); +#endif +} + +void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK8_1 == 0); + const int nb = k / QK8_1; + + block_q8_1 * GGML_RESTRICT y = vy; +#if defined __wasm_simd128__ + for (int i = 0; i < nb; i++) { + v128_t srcv [8]; + v128_t asrcv[8]; + v128_t amaxv[8]; + + for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j); + for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]); + + for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]); + for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]); + for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]); + + const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0), + wasm_f32x4_extract_lane(amaxv[0], 1)), + MAX(wasm_f32x4_extract_lane(amaxv[0], 2), + wasm_f32x4_extract_lane(amaxv[0], 3))); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = GGML_FP32_TO_FP16(d); + + v128_t accv = wasm_i32x4_splat(0); + + for (int j = 0; j < 8; j++) { + const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id)); + const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v); + + y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0); + y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1); + y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2); + y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3); + + accv = wasm_i32x4_add(accv, vi); + } + + y[i].s = GGML_FP32_TO_FP16( + d * (wasm_i32x4_extract_lane(accv, 0) + + wasm_i32x4_extract_lane(accv, 1) + + wasm_i32x4_extract_lane(accv, 2) + + wasm_i32x4_extract_lane(accv, 3))); + } +#else + GGML_UNUSED(nb); + // scalar + quantize_row_q8_1_ref(x, y, k); +#endif +} + +//===================================== Q8_K ============================================== + +void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { +#ifdef __wasm_simd128__ + assert(k % QK_K == 0); + const int64_t nb = k / QK_K; + block_q8_K * GGML_RESTRICT yc = y; // Cast to proper type + + for (int i = 0; i < nb; i++) { + const float * x_block = x + i * QK_K; + + v128_t min_vec = wasm_v128_load(x_block); + v128_t max_vec = min_vec; + + for (int j = 4; j < QK_K; j += 4) { + v128_t x_vec = wasm_v128_load(x_block + j); + max_vec = wasm_f32x4_pmax(max_vec, x_vec); + min_vec = wasm_f32x4_pmin(min_vec, x_vec); + } + max_vec = wasm_f32x4_pmax(max_vec, wasm_i32x4_shuffle(max_vec, max_vec, 2, 3, 0, 1)); + max_vec = wasm_f32x4_pmax(max_vec, wasm_i32x4_shuffle(max_vec, max_vec, 1, 0, 3, 2)); + min_vec = wasm_f32x4_pmin(min_vec, wasm_i32x4_shuffle(min_vec, min_vec, 2, 3, 0, 1)); + min_vec = wasm_f32x4_pmin(min_vec, wasm_i32x4_shuffle(min_vec, min_vec, 1, 0, 3, 2)); + float max = wasm_f32x4_extract_lane(max_vec, 0); + float min = wasm_f32x4_extract_lane(min_vec, 0); + float amax = -min > max ? min : max; + + if (amax == 0.0f) { + yc[i].d = 0.0f; + const v128_t zero = wasm_i8x16_splat(0); + for (int j = 0; j < QK_K; j += 16) { + wasm_v128_store(yc[i].qs + j, zero); + } + continue; + } + + const float iscale = -127.0f / amax; + const v128_t scale_vec = wasm_f32x4_splat(iscale); + + // Process 16 elements per iteration + for (int j = 0, jb = 0; j < QK_K; j += 16, jb++) { + // Load and quantize 16 floats + v128_t x0 = wasm_v128_load(x_block + j); + v128_t x1 = wasm_v128_load(x_block + j + 4); + v128_t x2 = wasm_v128_load(x_block + j + 8); + v128_t x3 = wasm_v128_load(x_block + j + 12); + + v128_t q0 = wasm_f32x4_nearest(wasm_f32x4_mul(x0, scale_vec)); + v128_t q1 = wasm_f32x4_nearest(wasm_f32x4_mul(x1, scale_vec)); + v128_t q2 = wasm_f32x4_nearest(wasm_f32x4_mul(x2, scale_vec)); + v128_t q3 = wasm_f32x4_nearest(wasm_f32x4_mul(x3, scale_vec)); + + // Convert to i32 with saturation + v128_t i0 = wasm_i32x4_trunc_sat_f32x4(q0); + v128_t i1 = wasm_i32x4_trunc_sat_f32x4(q1); + v128_t i2 = wasm_i32x4_trunc_sat_f32x4(q2); + v128_t i3 = wasm_i32x4_trunc_sat_f32x4(q3); + + // Pack into 16 i8 values + v128_t i8 = wasm_i8x16_narrow_i16x8( + wasm_i16x8_narrow_i32x4(i0, i1), + wasm_i16x8_narrow_i32x4(i2, i3) + ); + wasm_v128_store(yc[i].qs + j, i8); + + // Calculate bsums using SIMD + v128_t sum16 = wasm_i16x8_add( + wasm_i16x8_extend_low_i8x16(i8), + wasm_i16x8_extend_high_i8x16(i8) + ); + v128_t sum32 = wasm_i32x4_add( + wasm_i32x4_extend_low_i16x8(sum16), + wasm_i32x4_extend_high_i16x8(sum16) + ); + sum32 = wasm_i32x4_add(sum32, wasm_i32x4_shuffle(sum32, sum32, 2, 3, 0, 1)); + sum32 = wasm_i32x4_add(sum32, wasm_i32x4_shuffle(sum32, sum32, 1, 0, 3, 2)); + yc[i].bsums[jb] = wasm_i32x4_extract_lane(sum32, 0); + } + + yc[i].d = 1.0f / iscale; + } +#else + quantize_row_q8_K_ref(x, y, k); +#endif +} + + +//===================================== Dot products ================================= + +void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined __wasm_simd128__ + v128_t sumv = wasm_f32x4_splat(0.0f); + + const v128_t m4b = wasm_i8x16_splat(0x0F); + const v128_t s8b = wasm_i8x16_splat(0x8); + + for (; ib + 1 < nb; ib += 2) { + const block_q4_0 * GGML_RESTRICT x0 = &x[ib]; + const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + // Load and process x0 + v128_t v0_0 = wasm_v128_load(x0->qs); + v128_t v0_0l = wasm_v128_and(v0_0, m4b); + v128_t v0_0h = wasm_u8x16_shr(v0_0, 4); + v128_t v0_0ls = wasm_i8x16_sub(v0_0l, s8b); + v128_t v0_0hs = wasm_i8x16_sub(v0_0h, s8b); + + // Load y0 vectors + v128_t y0_l = wasm_v128_load(y0->qs); + v128_t y0_h = wasm_v128_load(y0->qs + 16); + + // Extend to i16x8 and compute dot products + v128_t dx0l = wasm_i16x8_extend_low_i8x16(v0_0ls); + v128_t dx0h = wasm_i16x8_extend_high_i8x16(v0_0ls); + v128_t dx0hl = wasm_i16x8_extend_low_i8x16(v0_0hs); + v128_t dx0hh = wasm_i16x8_extend_high_i8x16(v0_0hs); + + v128_t dy0ll = wasm_i16x8_extend_low_i8x16(y0_l); + v128_t dy0lh = wasm_i16x8_extend_high_i8x16(y0_l); + v128_t dy0hl = wasm_i16x8_extend_low_i8x16(y0_h); + v128_t dy0hh = wasm_i16x8_extend_high_i8x16(y0_h); + + v128_t dp0 = wasm_i32x4_add( + wasm_i32x4_add( + wasm_i32x4_dot_i16x8(dx0l, dy0ll), + wasm_i32x4_dot_i16x8(dx0h, dy0lh) + ), + wasm_i32x4_add( + wasm_i32x4_dot_i16x8(dx0hl, dy0hl), + wasm_i32x4_dot_i16x8(dx0hh, dy0hh) + ) + ); + + // Load and process x1 + v128_t v0_1 = wasm_v128_load(x1->qs); + v128_t v0_1l = wasm_v128_and(v0_1, m4b); + v128_t v0_1h = wasm_u8x16_shr(v0_1, 4); + v128_t v0_1ls = wasm_i8x16_sub(v0_1l, s8b); + v128_t v0_1hs = wasm_i8x16_sub(v0_1h, s8b); + + // Load y1 vectors + v128_t y1_l = wasm_v128_load(y1->qs); + v128_t y1_h = wasm_v128_load(y1->qs + 16); + + // Extend to i16x8 and compute dot products + v128_t dx1l = wasm_i16x8_extend_low_i8x16(v0_1ls); + v128_t dx1h = wasm_i16x8_extend_high_i8x16(v0_1ls); + v128_t dx1hl = wasm_i16x8_extend_low_i8x16(v0_1hs); + v128_t dx1hh = wasm_i16x8_extend_high_i8x16(v0_1hs); + + v128_t dy1ll = wasm_i16x8_extend_low_i8x16(y1_l); + v128_t dy1lh = wasm_i16x8_extend_high_i8x16(y1_l); + v128_t dy1hl = wasm_i16x8_extend_low_i8x16(y1_h); + v128_t dy1hh = wasm_i16x8_extend_high_i8x16(y1_h); + + v128_t dp1 = wasm_i32x4_add( + wasm_i32x4_add( + wasm_i32x4_dot_i16x8(dx1l, dy1ll), + wasm_i32x4_dot_i16x8(dx1h, dy1lh) + ), + wasm_i32x4_add( + wasm_i32x4_dot_i16x8(dx1hl, dy1hl), + wasm_i32x4_dot_i16x8(dx1hh, dy1hh) + ) + ); + + // Accumulate results with scaling + float scale0 = GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d); + float scale1 = GGML_FP16_TO_FP32(x1->d) * GGML_FP16_TO_FP32(y1->d); + + sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(dp0), wasm_f32x4_splat(scale0))); + sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(dp1), wasm_f32x4_splat(scale1))); + } + + sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + + wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3); + +#endif + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F) - 8; + const int v1 = (x[ib].qs[j] >> 4) - 8; + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); + } + + *s = sumf; +} + +void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + int ib = 0; + float sumf = 0; + + assert(n % qk == 0); + assert(qk == QK5_0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + +#if defined __wasm_simd128__ + v128_t sumv = wasm_f32x4_splat(0.0f); + + uint32_t qh_; + uint64_t tmp[4]; + + // TODO: check if unrolling this is better + for (; ib < nb; ++ib) { + const block_q5_0 * GGML_RESTRICT x0 = &x[ib]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; + + const v128_t m4b = wasm_i8x16_splat(0x0F); + + // extract the 5th bit + memcpy(&qh_, x0->qh, sizeof(qh_)); + + tmp[0] = table_b2b_1[(qh_ >> 0) & 0xFF]; + tmp[1] = table_b2b_1[(qh_ >> 8) & 0xFF]; + tmp[2] = table_b2b_1[(qh_ >> 16) & 0xFF]; + tmp[3] = table_b2b_1[(qh_ >> 24) ]; + + const v128_t qhl = wasm_v128_load(tmp + 0); + const v128_t qhh = wasm_v128_load(tmp + 2); + + const v128_t v0 = wasm_v128_load(x0->qs); + + // 4-bit -> 8-bit + const v128_t v0l = wasm_v128_and (v0, m4b); + const v128_t v0h = wasm_u8x16_shr(v0, 4); + + // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero) + const v128_t v0lf = wasm_i8x16_sub(v0l, qhl); + const v128_t v0hf = wasm_i8x16_sub(v0h, qhh); + + // load y + const v128_t v1l = wasm_v128_load(y0->qs); + const v128_t v1h = wasm_v128_load(y0->qs + 16); + + // int8x16 -> int16x8 + const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf); + const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf); + const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf); + const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf); + + const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l); + const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l); + const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h); + const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h); + + // dot product + sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4( + wasm_i32x4_add( + wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll), + wasm_i32x4_dot_i16x8(v0lfh, v1lh)), + wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), + wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), + wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d)))); + } + + sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + + wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3); + +#endif + for (; ib < nb; ++ib) { + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; + const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12)); + + const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16); + const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16); + + sumi0 += (x0 * y[ib].qs[j]); + sumi1 += (x1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)) * sumi; + } + + *s = sumf; +} + +void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + int ib = 0; + float sumf = 0; + + assert(n % qk == 0); + assert(qk == QK5_1); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + +#if defined __wasm_simd128__ + v128_t sumv = wasm_f32x4_splat(0.0f); + + float summs = 0.0f; + + uint32_t qh_; + uint64_t tmp[4]; + + // TODO: check if unrolling this is better + for (; ib < nb; ++ib) { + const block_q5_1 * GGML_RESTRICT x0 = &x[ib]; + const block_q8_1 * GGML_RESTRICT y0 = &y[ib]; + + summs += GGML_FP16_TO_FP32(x0->m) * GGML_FP16_TO_FP32(y0->s); + + const v128_t m4b = wasm_i8x16_splat(0x0F); + + // extract the 5th bit + memcpy(&qh_, x0->qh, sizeof(qh_)); + + tmp[0] = table_b2b_0[(qh_ >> 0) & 0xFF]; + tmp[1] = table_b2b_0[(qh_ >> 8) & 0xFF]; + tmp[2] = table_b2b_0[(qh_ >> 16) & 0xFF]; + tmp[3] = table_b2b_0[(qh_ >> 24) ]; + + const v128_t qhl = wasm_v128_load(tmp + 0); + const v128_t qhh = wasm_v128_load(tmp + 2); + + const v128_t v0 = wasm_v128_load(x0->qs); + + // 4-bit -> 8-bit + const v128_t v0l = wasm_v128_and (v0, m4b); + const v128_t v0h = wasm_u8x16_shr(v0, 4); + + // add high bit + const v128_t v0lf = wasm_v128_or(v0l, qhl); + const v128_t v0hf = wasm_v128_or(v0h, qhh); + + // load y + const v128_t v1l = wasm_v128_load(y0->qs); + const v128_t v1h = wasm_v128_load(y0->qs + 16); + + // int8x16 -> int16x8 + const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf); + const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf); + const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf); + const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf); + + const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l); + const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l); + const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h); + const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h); + + // dot product + sumv = wasm_f32x4_add(sumv, + wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add( + wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll), + wasm_i32x4_dot_i16x8(v0lfh, v1lh)), + wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), + wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), + wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d)))); + } + + sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + + wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs; + +#endif + for (; ib < nb; ++ib) { + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; + const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; + + const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0; + const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1; + + sumi0 += (x0 * y[ib].qs[j]); + sumi1 += (x1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + } + + *s = sumf; +} + +void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q8_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined __wasm_simd128__ + v128_t sumv = wasm_f32x4_splat(0.0f); + + for (; ib < nb; ++ib) { + const block_q8_0 * GGML_RESTRICT x0 = &x[ib]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; + + const v128_t x0_0 = wasm_v128_load(x0->qs); + const v128_t x0_1 = wasm_v128_load(x0->qs + 16); + const v128_t y0_0 = wasm_v128_load(y0->qs); + const v128_t y0_1 = wasm_v128_load(y0->qs + 16); + + // Extend 8-bit to 16-bit + const v128_t x0_0l = wasm_i16x8_extend_low_i8x16(x0_0); + const v128_t x0_0h = wasm_i16x8_extend_high_i8x16(x0_0); + const v128_t x0_1l = wasm_i16x8_extend_low_i8x16(x0_1); + const v128_t x0_1h = wasm_i16x8_extend_high_i8x16(x0_1); + + const v128_t y0_0l = wasm_i16x8_extend_low_i8x16(y0_0); + const v128_t y0_0h = wasm_i16x8_extend_high_i8x16(y0_0); + const v128_t y0_1l = wasm_i16x8_extend_low_i8x16(y0_1); + const v128_t y0_1h = wasm_i16x8_extend_high_i8x16(y0_1); + + // Compute dot products + const v128_t dx0_0 = wasm_i32x4_dot_i16x8(x0_0l, y0_0l); + const v128_t dx0_1 = wasm_i32x4_dot_i16x8(x0_0h, y0_0h); + const v128_t dx1_0 = wasm_i32x4_dot_i16x8(x0_1l, y0_1l); + const v128_t dx1_1 = wasm_i32x4_dot_i16x8(x0_1h, y0_1h); + + // Sum all dot products + const v128_t sum_dots = wasm_i32x4_add(wasm_i32x4_add(dx0_0, dx0_1), wasm_i32x4_add(dx1_0, dx1_1)); + + // Convert to float and accumulate + const float scale = GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d); + sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(sum_dots), wasm_f32x4_splat(scale))); + } + + sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + + wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3); + +#endif + for (; ib < nb; ++ib) { + int sumi = 0; + + for (int j = 0; j < qk; j++) { + sumi += x[ib].qs[j]*y[ib].qs[j]; + } + + sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); + } + + *s = sumf; +} + +void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q2_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __wasm_simd128__ + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + const uint8_t * q2 = x[i].qs; + const int8_t * q8 = y[i].qs; + const uint8_t * sc = x[i].scales; + + // Vectorized summs calculation + v128_t summs_vec = wasm_i32x4_splat(0); + { + v128_t sc_vec = wasm_v128_load(sc); + v128_t sc_upper = wasm_u8x16_shr(sc_vec, 4); + + v128_t sc_low = wasm_u16x8_extend_low_u8x16(sc_upper); + v128_t sc_high = wasm_u16x8_extend_high_u8x16(sc_upper); + + v128_t bsums1 = wasm_v128_load(&y[i].bsums[0]); + v128_t bsums2 = wasm_v128_load(&y[i].bsums[8]); + + summs_vec = wasm_i32x4_add( + wasm_i32x4_add(wasm_i32x4_dot_i16x8(sc_low, bsums1), + wasm_i32x4_dot_i16x8(sc_high, bsums2)), + summs_vec + ); + + summs_vec = wasm_i32x4_add(summs_vec, wasm_i32x4_shuffle(summs_vec, summs_vec, 2, 3, 0, 1)); + summs_vec = wasm_i32x4_add(summs_vec, wasm_i32x4_shuffle(summs_vec, summs_vec, 1, 0, 3, 2)); + } + int32_t summs = wasm_i32x4_extract_lane(summs_vec, 0); + + // Vectorized isum calculation + int32_t isum = 0; + const uint8_t * sc_ptr = sc; + const int k_iters = QK_K/128; + + for (int k = 0; k < k_iters; ++k) { + v128_t isum_vec = wasm_i32x4_splat(0); + int shift = 0; + + for (int j = 0; j < 4; ++j) { + const int d0 = (sc_ptr[0] & 0xF); + const int d1 = (sc_ptr[1] & 0xF); + sc_ptr += 2; + + // Process first 16 elements + v128_t q2_0 = wasm_v128_load(q2); + v128_t q8_0 = wasm_v128_load(q8); + v128_t q2_shift_0 = wasm_u8x16_shr(q2_0, shift); + v128_t q2_bits_0 = wasm_v128_and(q2_shift_0, wasm_i8x16_splat(0x03)); + + // Process next 16 elements + v128_t q2_1 = wasm_v128_load(q2 + 16); + v128_t q8_1 = wasm_v128_load(q8 + 16); + v128_t q2_shift_1 = wasm_u8x16_shr(q2_1, shift); + v128_t q2_bits_1 = wasm_v128_and(q2_shift_1, wasm_i8x16_splat(0x03)); + + // Calculate dot products + v128_t p0 = wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_low_i8x16(q8_0), + wasm_i16x8_extend_low_i8x16(q2_bits_0) + ); + v128_t p1 = wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_high_i8x16(q8_0), + wasm_i16x8_extend_high_i8x16(q2_bits_0) + ); + v128_t p2 = wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_low_i8x16(q8_1), + wasm_i16x8_extend_low_i8x16(q2_bits_1) + ); + v128_t p3 = wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_high_i8x16(q8_1), + wasm_i16x8_extend_high_i8x16(q2_bits_1) + ); + + // Accumulate scaled results + v128_t scaled = wasm_i32x4_add( + wasm_i32x4_mul(wasm_i32x4_add(p0, p1), wasm_i32x4_splat(d0)), + wasm_i32x4_mul(wasm_i32x4_add(p2, p3), wasm_i32x4_splat(d1)) + ); + + isum_vec = wasm_i32x4_add(isum_vec, scaled); + q8 += 32; + shift += 2; + } + q2 += 32; + + // Horizontal sum of isum_vec + isum_vec = wasm_i32x4_add(isum_vec, wasm_i32x4_shuffle(isum_vec, isum_vec, 2, 3, 0, 1)); + isum_vec = wasm_i32x4_add(isum_vec, wasm_i32x4_shuffle(isum_vec, isum_vec, 1, 0, 3, 2)); + isum += wasm_i32x4_extract_lane(isum_vec, 0); + } + + const float dall = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf += dall * isum - dmin * summs; + } + + *s = sumf; + +#else + + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + + const uint8_t * q2 = x[i].qs; + const int8_t * q8 = y[i].qs; + const uint8_t * sc = x[i].scales; + + int summs = 0; + for (int j = 0; j < 16; ++j) { + summs += y[i].bsums[j] * (sc[j] >> 4); + } + + const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + int isum = 0; + int is = 0; + int d; + for (int k = 0; k < QK_K/128; ++k) { + int shift = 0; + for (int j = 0; j < 4; ++j) { + d = sc[is++] & 0xF; + int isuml = 0; + for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); + isum += d * isuml; + d = sc[is++] & 0xF; + isuml = 0; + for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); + isum += d * isuml; + shift += 2; + q8 += 32; + } + q2 += 32; + } + sumf += dall * isum - dmin * summs; + } + *s = sumf; +#endif +} + +void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const uint32_t kmask1 = 0x03030303; + const uint32_t kmask2 = 0x0f0f0f0f; + + const block_q3_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __wasm_simd128__ + int8_t aux8[QK_K]; + float sums[8] = {0}; + uint32_t auxs[4]; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].hmask; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + // Process blocks with SIMD + int8_t * a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K; j += 128) { + for (int shift = 0; shift <= 6; shift += 2) { + v128_t v_m = wasm_i8x16_splat(m); + for (int l = 0; l < 32; l += 16) { + v128_t v_q3 = wasm_v128_load(q3 + l); + v128_t v_shift = wasm_i8x16_shr(v_q3, shift); + v128_t v_low2 = wasm_v128_and(v_shift, wasm_i8x16_splat(0x03)); + + v128_t v_hm = wasm_v128_load(hm + l); + v128_t v_mask = wasm_v128_and(v_hm, v_m); + v_mask = wasm_i8x16_ne(v_mask, wasm_i8x16_splat(0)); + + v_low2 = wasm_i8x16_sub(v_low2, wasm_v128_and(wasm_i8x16_splat(4), wasm_v128_not(v_mask))); + wasm_v128_store(a + l, v_low2); + } + a += 32; + m <<= 1; + } + q3 += 32; + } + + // Extract scales + memcpy(auxs, x[i].scales, 12); + uint32_t tmp = auxs[2]; + auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); + auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); + auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); + auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); + const int8_t * scales = (const int8_t *)auxs; + + // SIMD dot product with register accumulators + v128_t v_acc0 = wasm_i32x4_splat(0); + v128_t v_acc1 = wasm_i32x4_splat(0); + a = aux8; + for (int j = 0; j < QK_K/16; ++j) { + const v128_t v_scale = wasm_i16x8_splat(scales[j] - 32); + + // Process 16 elements per iteration + for (int k = 0; k < 2; ++k) { + const v128_t v_q8 = wasm_i16x8_load8x8(q8); + const v128_t v_a = wasm_i16x8_load8x8(a); + + v128_t v_prod = wasm_i16x8_mul(v_q8, v_a); + v_prod = wasm_i16x8_mul(v_prod, v_scale); + + v_acc0 = wasm_i32x4_add(v_acc0, wasm_i32x4_extend_low_i16x8(v_prod)); + v_acc1 = wasm_i32x4_add(v_acc1, wasm_i32x4_extend_high_i16x8(v_prod)); + + q8 += 8; + a += 8; + } + } + + // Accumulate results + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const v128_t v_d = wasm_f32x4_splat(d); + v128_t v_sum = wasm_f32x4_add( + wasm_f32x4_mul(wasm_f32x4_convert_i32x4(v_acc0), v_d), + wasm_f32x4_mul(wasm_f32x4_convert_i32x4(v_acc1), v_d) + ); + + // Accumulate into sums vector + wasm_v128_store(sums, wasm_f32x4_add(wasm_v128_load(sums), v_sum)); + } + + // Horizontal sum + v128_t v_sum = wasm_f32x4_add(wasm_v128_load(sums), wasm_v128_load(sums + 4)); + sumf = wasm_f32x4_extract_lane(v_sum, 0) + + wasm_f32x4_extract_lane(v_sum, 1) + + wasm_f32x4_extract_lane(v_sum, 2) + + wasm_f32x4_extract_lane(v_sum, 3); + + *s = sumf; + +#else + // scalar version + // This function is written like this so the compiler can manage to vectorize most of it + // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the + // manually vectorized version above. Every other version I tried would run at least 4 times slower. + // The ideal situation would be if we could just write the code once, and the compiler would + // automatically produce the best possible set of machine instructions, instead of us having to manually + // write vectorized versions for AVX, ARM_NEON, etc. + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + uint32_t auxs[4]; + const int8_t * scales = (const int8_t*)auxs; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].hmask; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + q3 += 32; + } + a = aux8; + + memcpy(auxs, x[i].scales, 12); + uint32_t tmp = auxs[2]; + auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); + auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); + auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); + auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); + for (int j = 0; j < QK_K/16; ++j) { + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; + +#endif + +} + +void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + +#if defined __wasm_simd128__ + const uint8_t * scales = (const uint8_t*)&utmp[0]; + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); // Corrected sign + + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + // Process scales and mins + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + // Sum mins * q8sums + int32_t sumi = 0; + const int16_t * GGML_RESTRICT q8sums = y[i].bsums; + const uint8_t * m = (const uint8_t *)&utmp[2]; + for (int j = 0; j < 16; j += 2) { + sumi += (q8sums[j] + q8sums[j+1]) * m[j/2]; + } + sumf -= dmin * sumi; + + int32_t sumi1 = 0; + int32_t sumi2 = 0; + + for (int j = 0; j < QK_K/64; ++j) { + // Load 64 4-bit weights (32 bytes) + const v128_t q4x0 = wasm_v128_load(q4); + const v128_t q4x1 = wasm_v128_load(q4 + 16); + q4 += 32; + + // Split into low/high nibbles + const v128_t q4l0 = wasm_v128_and(q4x0, wasm_i8x16_splat(0x0F)); + const v128_t q4h0 = wasm_u8x16_shr(q4x0, 4); + const v128_t q4l1 = wasm_v128_and(q4x1, wasm_i8x16_splat(0x0F)); + const v128_t q4h1 = wasm_u8x16_shr(q4x1, 4); + + // Load 64 8-bit values (64 bytes) + const v128_t q8x0 = wasm_v128_load(q8); + const v128_t q8x1 = wasm_v128_load(q8 + 16); + const v128_t q8x2 = wasm_v128_load(q8 + 32); + const v128_t q8x3 = wasm_v128_load(q8 + 48); + q8 += 64; + + // Low nibble products + v128_t vacc1 = wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_low_i8x16(q4l0), + wasm_i16x8_extend_low_i8x16(q8x0) + ); + vacc1 = wasm_i32x4_add(vacc1, wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_high_i8x16(q4l0), + wasm_i16x8_extend_high_i8x16(q8x0) + )); + vacc1 = wasm_i32x4_add(vacc1, wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_low_i8x16(q4l1), + wasm_i16x8_extend_low_i8x16(q8x1) + )); + vacc1 = wasm_i32x4_add(vacc1, wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_high_i8x16(q4l1), + wasm_i16x8_extend_high_i8x16(q8x1) + )); + + // High nibble products + v128_t vacc2 = wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_low_i8x16(q4h0), + wasm_i16x8_extend_low_i8x16(q8x2) + ); + vacc2 = wasm_i32x4_add(vacc2, wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_high_i8x16(q4h0), + wasm_i16x8_extend_high_i8x16(q8x2) + )); + vacc2 = wasm_i32x4_add(vacc2, wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_low_i8x16(q4h1), + wasm_i16x8_extend_low_i8x16(q8x3) + )); + vacc2 = wasm_i32x4_add(vacc2, wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_high_i8x16(q4h1), + wasm_i16x8_extend_high_i8x16(q8x3) + )); + + // Accumulate scaled results + int32_t vacc1_sum = wasm_i32x4_extract_lane(vacc1, 0) + wasm_i32x4_extract_lane(vacc1, 1) + + wasm_i32x4_extract_lane(vacc1, 2) + wasm_i32x4_extract_lane(vacc1, 3); + sumi1 += vacc1_sum * scales[2*j]; + + int32_t vacc2_sum = wasm_i32x4_extract_lane(vacc2, 0) + wasm_i32x4_extract_lane(vacc2, 1) + + wasm_i32x4_extract_lane(vacc2, 2) + wasm_i32x4_extract_lane(vacc2, 3); + sumi2 += vacc2_sum * scales[2*j+1]; + } + + sumf += d * (sumi1 + sumi2); + } + + *s = sumf; + +#else + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + for (int j = 0; j < QK_K/64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + a += 32; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + a += 32; q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + +#if defined __wasm_simd128__ + //const uint8_t * scales = (const uint8_t*)&utmp[0]; + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); // Fixed sign + + const uint8_t * GGML_RESTRICT q5 = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + // Process scales and mins + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + // Sum mins * q8sums + int32_t sumi_mins = 0; + const int16_t * GGML_RESTRICT q8sums = y[i].bsums; + const uint8_t * m = (const uint8_t *)&utmp[2]; + for (int j = 0; j < 16; j += 2) { + sumi_mins += (q8sums[j] + q8sums[j+1]) * m[j/2]; + } + sumf -= dmin * sumi_mins; // Correct subtraction + + v128_t qh0 = wasm_v128_load(qh); + v128_t qh1 = wasm_v128_load(qh + 16); + const uint8_t * sc = (const uint8_t *)utmp; + + int32_t sumi = 0; + + for (int j = 0; j < QK_K/64; ++j) { + const int shift = j * 2; + v128_t qh_shift0 = wasm_u8x16_shr(qh0, shift); + v128_t qh_shift1 = wasm_u8x16_shr(qh1, shift); + + v128_t qh_low0 = wasm_i8x16_shl(wasm_v128_and(qh_shift0, wasm_i8x16_splat(0x01)), 4); + v128_t qh_high0 = wasm_i8x16_shl(wasm_v128_and(qh_shift0, wasm_i8x16_splat(0x02)), 3); + v128_t qh_low1 = wasm_i8x16_shl(wasm_v128_and(qh_shift1, wasm_i8x16_splat(0x01)), 4); + v128_t qh_high1 = wasm_i8x16_shl(wasm_v128_and(qh_shift1, wasm_i8x16_splat(0x02)), 3); + + v128_t q5_0 = wasm_v128_load(q5); + v128_t q5_1 = wasm_v128_load(q5 + 16); + q5 += 32; + + v128_t q5l_0 = wasm_v128_or(wasm_v128_and(q5_0, wasm_i8x16_splat(0x0F)), qh_low0); + v128_t q5h_0 = wasm_v128_or(wasm_u8x16_shr(q5_0, 4), qh_high0); + v128_t q5l_1 = wasm_v128_or(wasm_v128_and(q5_1, wasm_i8x16_splat(0x0F)), qh_low1); + v128_t q5h_1 = wasm_v128_or(wasm_u8x16_shr(q5_1, 4), qh_high1); + + v128_t q8_0 = wasm_v128_load(q8); + v128_t q8_1 = wasm_v128_load(q8 + 16); + v128_t q8_2 = wasm_v128_load(q8 + 32); + v128_t q8_3 = wasm_v128_load(q8 + 48); + q8 += 64; + + // Process low quants + v128_t pl0 = wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_low_i8x16(q5l_0), + wasm_i16x8_extend_low_i8x16(q8_0) + ); + pl0 = wasm_i32x4_add(pl0, wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_high_i8x16(q5l_0), + wasm_i16x8_extend_high_i8x16(q8_0) + )); + v128_t pl1 = wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_low_i8x16(q5l_1), + wasm_i16x8_extend_low_i8x16(q8_1) + ); + pl1 = wasm_i32x4_add(pl1, wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_high_i8x16(q5l_1), + wasm_i16x8_extend_high_i8x16(q8_1) + )); + v128_t sum_low = wasm_i32x4_add(pl0, pl1); + + // Process high quants + v128_t ph0 = wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_low_i8x16(q5h_0), + wasm_i16x8_extend_low_i8x16(q8_2) + ); + ph0 = wasm_i32x4_add(ph0, wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_high_i8x16(q5h_0), + wasm_i16x8_extend_high_i8x16(q8_2) + )); + v128_t ph1 = wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_low_i8x16(q5h_1), + wasm_i16x8_extend_low_i8x16(q8_3) + ); + ph1 = wasm_i32x4_add(ph1, wasm_i32x4_dot_i16x8( + wasm_i16x8_extend_high_i8x16(q5h_1), + wasm_i16x8_extend_high_i8x16(q8_3) + )); + v128_t sum_high = wasm_i32x4_add(ph0, ph1); + + // Accumulate with scale factors + int32_t sl = wasm_i32x4_extract_lane(sum_low, 0) + wasm_i32x4_extract_lane(sum_low, 1) + + wasm_i32x4_extract_lane(sum_low, 2) + wasm_i32x4_extract_lane(sum_low, 3); + int32_t sh = wasm_i32x4_extract_lane(sum_high, 0) + wasm_i32x4_extract_lane(sum_high, 1) + + wasm_i32x4_extract_lane(sum_high, 2) + wasm_i32x4_extract_lane(sum_high, 3); + + sumi += sl * sc[2*j] + sh * sc[2*j+1]; + } + + sumf += d * sumi; + } + + *s = sumf; + +#else + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K/64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); + a += 32; m <<= 1; + q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q6_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __wasm_simd128__ + int8_t aux8[QK_K] __attribute__((aligned(16))); + int32_t aux32[8] __attribute__((aligned(16))) = {0}; + float sums[8] __attribute__((aligned(16))) = {0}; + + for (int i = 0; i < nb; ++i) { + // Unpack 6-bit quantized data into aux8 (unchanged) + const uint8_t * GGML_RESTRICT q4 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + int8_t * a = aux8; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) { + a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; + a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; + a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; + a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; + } + a += 128; + q4 += 64; + qh += 32; + } + + const int8_t * GGML_RESTRICT a_ptr = aux8; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + v128_t acc0 = wasm_i32x4_splat(0); + v128_t acc1 = wasm_i32x4_splat(0); + + for (int j = 0; j < QK_K/16; ++j) { + const int scale = x[i].scales[j]; + const v128_t vscale = wasm_i32x4_splat(scale); + + // Load 16 elements from a and q8 + const v128_t a_vec = wasm_v128_load(a_ptr); + const v128_t q8_vec = wasm_v128_load(q8); + + // Process low 8 elements + v128_t a_low = wasm_i16x8_extend_low_i8x16(a_vec); + v128_t q8_low = wasm_i16x8_extend_low_i8x16(q8_vec); + v128_t prod_low = wasm_i16x8_mul(a_low, q8_low); + v128_t prod_lo_lo = wasm_i32x4_extend_low_i16x8(prod_low); + v128_t prod_lo_hi = wasm_i32x4_extend_high_i16x8(prod_low); + + // Process high 8 elements + v128_t a_high = wasm_i16x8_extend_high_i8x16(a_vec); + v128_t q8_high = wasm_i16x8_extend_high_i8x16(q8_vec); + v128_t prod_high = wasm_i16x8_mul(a_high, q8_high); + v128_t prod_hi_lo = wasm_i32x4_extend_low_i16x8(prod_high); + v128_t prod_hi_hi = wasm_i32x4_extend_high_i16x8(prod_high); + + // Scale and accumulate + prod_lo_lo = wasm_i32x4_mul(prod_lo_lo, vscale); + prod_lo_hi = wasm_i32x4_mul(prod_lo_hi, vscale); + prod_hi_lo = wasm_i32x4_mul(prod_hi_lo, vscale); + prod_hi_hi = wasm_i32x4_mul(prod_hi_hi, vscale); + + acc0 = wasm_i32x4_add(acc0, wasm_i32x4_add(prod_lo_lo, prod_hi_lo)); + acc1 = wasm_i32x4_add(acc1, wasm_i32x4_add(prod_lo_hi, prod_hi_hi)); + + a_ptr += 16; + q8 += 16; + } + + // Store accumulated results + wasm_v128_store(&aux32[0], acc0); + wasm_v128_store(&aux32[4], acc1); + + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) { + sums[l] += d * aux32[l]; + } + } + + // Sum final results + float sumf = 0; + for (int l = 0; l < 8; ++l) { + sumf += sums[l]; + } + *s = sumf; + +#else + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) { + a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; + a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; + a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; + a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; + } + a += 128; + q4 += 64; + qh += 32; + } + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/16; ++j) { + int scale = x[i].scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + diff --git a/ggml/src/ggml-cpu/cpu-feats-x86.cpp b/ggml/src/ggml-cpu/arch/x86/cpu-feats.cpp similarity index 100% rename from ggml/src/ggml-cpu/cpu-feats-x86.cpp rename to ggml/src/ggml-cpu/arch/x86/cpu-feats.cpp diff --git a/ggml/src/ggml-cpu/arch/x86/quants.c b/ggml/src/ggml-cpu/arch/x86/quants.c new file mode 100644 index 0000000000000..e3f722b52c9b2 --- /dev/null +++ b/ggml/src/ggml-cpu/arch/x86/quants.c @@ -0,0 +1,4310 @@ +#define GGML_COMMON_IMPL_C +#include "ggml-common.h" +#include "ggml-quants.h" +#include "ggml-impl.h" +#include "ggml-cpu.h" + +#include "../../quants.h" +#include "../../ggml-cpu-impl.h" + +#include +#include +#include +#include // for qsort +#include // for GGML_ASSERT + +#define GROUP_MAX_EPS 1e-15f +#define GROUP_MAX_EPS_IQ3_XXS 1e-8f +#define GROUP_MAX_EPS_IQ2_S 1e-8f +#define GROUP_MAX_EPS_IQ1_M 1e-7f +#define GROUP_MAX_EPS_IQ1_S 1e-12f + +#define UNUSED GGML_UNUSED + +// some compilers don't provide _mm256_set_m128i, e.g. gcc 7 +#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1) + +#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) +// multiply int8_t, add results pairwise twice +static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) { + // Get absolute values of x vectors + const __m128i ax = _mm_sign_epi8(x, x); + // Sign the values of the y vectors + const __m128i sy = _mm_sign_epi8(y, x); + // Perform multiplication and create 16-bit values + const __m128i dot = _mm_maddubs_epi16(ax, sy); + const __m128i ones = _mm_set1_epi16(1); + return _mm_madd_epi16(ones, dot); +} + +#if __AVX__ || __AVX2__ || __AVX512F__ +// horizontally add 8 floats +static inline float hsum_float_8(const __m256 x) { + __m128 res = _mm256_extractf128_ps(x, 1); + res = _mm_add_ps(res, _mm256_castps256_ps128(x)); + res = _mm_add_ps(res, _mm_movehl_ps(res, res)); + res = _mm_add_ss(res, _mm_movehdup_ps(res)); + return _mm_cvtss_f32(res); +} + +// horizontally add 8 int32_t +static inline int hsum_i32_8(const __m256i a) { + const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1)); + const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128); + const __m128i sum64 = _mm_add_epi32(hi64, sum128); + const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1)); + return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); +} + +// horizontally add 4 int32_t +static inline int hsum_i32_4(const __m128i a) { + const __m128i hi64 = _mm_unpackhi_epi64(a, a); + const __m128i sum64 = _mm_add_epi32(hi64, a); + const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1)); + return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); +} + +#if defined(__AVX2__) || defined(__AVX512F__) +// spread 32 bits to 32 bytes { 0x00, 0xFF } +static inline __m256i bytes_from_bits_32(const uint8_t * x) { + uint32_t x32; + memcpy(&x32, x, sizeof(uint32_t)); + const __m256i shuf_mask = _mm256_set_epi64x( + 0x0303030303030303, 0x0202020202020202, + 0x0101010101010101, 0x0000000000000000); + __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask); + const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe); + bytes = _mm256_or_si256(bytes, bit_mask); + return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1)); +} + +// Unpack 32 4-bit fields into 32 bytes +// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval +static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) +{ + const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi); + const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp); + const __m256i lowMask = _mm256_set1_epi8( 0xF ); + return _mm256_and_si256(lowMask, bytes); +} + +// add int16_t pairwise and return as float vector +static inline __m256 sum_i16_pairs_float(const __m256i x) { + const __m256i ones = _mm256_set1_epi16(1); + const __m256i summed_pairs = _mm256_madd_epi16(ones, x); + return _mm256_cvtepi32_ps(summed_pairs); +} + +static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { +#if defined(__AVX512VNNI__) && defined(__AVX512VL__) + const __m256i zero = _mm256_setzero_si256(); + const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy); + return _mm256_cvtepi32_ps(summed_pairs); +#elif defined(__AVXVNNI__) + const __m256i zero = _mm256_setzero_si256(); + const __m256i summed_pairs = _mm256_dpbusd_avx_epi32(zero, ax, sy); + return _mm256_cvtepi32_ps(summed_pairs); +#else + // Perform multiplication and create 16-bit values + const __m256i dot = _mm256_maddubs_epi16(ax, sy); + return sum_i16_pairs_float(dot); +#endif +} + +// multiply int8_t, add results pairwise twice and return as float vector +static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { +#if __AVXVNNIINT8__ + const __m256i zero = _mm256_setzero_si256(); + const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y); + return _mm256_cvtepi32_ps(summed_pairs); +#else + // Get absolute values of x vectors + const __m256i ax = _mm256_sign_epi8(x, x); + // Sign the values of the y vectors + const __m256i sy = _mm256_sign_epi8(y, x); + return mul_sum_us8_pairs_float(ax, sy); +#endif +} + +static inline __m128i packNibbles( __m256i bytes ) +{ + // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh +#if __AVX512F__ + const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000 + bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh + return _mm256_cvtepi16_epi8(bytes); // abcd_efgh +#else + const __m256i lowByte = _mm256_set1_epi16( 0xFF ); + __m256i high = _mm256_andnot_si256( lowByte, bytes ); + __m256i low = _mm256_and_si256( lowByte, bytes ); + high = _mm256_srli_epi16( high, 4 ); + bytes = _mm256_or_si256( low, high ); + + // Compress uint16_t lanes into bytes + __m128i r0 = _mm256_castsi256_si128( bytes ); + __m128i r1 = _mm256_extracti128_si256( bytes, 1 ); + return _mm_packus_epi16( r0, r1 ); +#endif +} +#elif defined(__AVX__) +static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 ) +{ + // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh + const __m128i lowByte = _mm_set1_epi16( 0xFF ); + __m128i high = _mm_andnot_si128( lowByte, bytes1 ); + __m128i low = _mm_and_si128( lowByte, bytes1 ); + high = _mm_srli_epi16( high, 4 ); + bytes1 = _mm_or_si128( low, high ); + high = _mm_andnot_si128( lowByte, bytes2 ); + low = _mm_and_si128( lowByte, bytes2 ); + high = _mm_srli_epi16( high, 4 ); + bytes2 = _mm_or_si128( low, high ); + + return _mm_packus_epi16( bytes1, bytes2); +} + +static inline __m128i mul_add_epi8_sse(const __m128i x, const __m128i y) { + const __m128i ax = _mm_sign_epi8(x, x); + const __m128i sy = _mm_sign_epi8(y, x); + return _mm_maddubs_epi16(ax, sy); +} + +// spread 32 bits to 32 bytes { 0x00, 0xFF } +static inline __m256i bytes_from_bits_32(const uint8_t * x) { + uint32_t x32; + memcpy(&x32, x, sizeof(uint32_t)); + const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000); + const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202); + __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl); + __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh); + const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe); + bytesl = _mm_or_si128(bytesl, bit_mask); + bytesh = _mm_or_si128(bytesh, bit_mask); + bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1)); + bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1)); + return MM256_SET_M128I(bytesh, bytesl); +} + +// Unpack 32 4-bit fields into 32 bytes +// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval +static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) +{ + // Load 16 bytes from memory + __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi); + __m128i tmph = _mm_srli_epi16(tmpl, 4); + const __m128i lowMask = _mm_set1_epi8(0xF); + tmpl = _mm_and_si128(lowMask, tmpl); + tmph = _mm_and_si128(lowMask, tmph); + return MM256_SET_M128I(tmph, tmpl); +} + +// add int16_t pairwise and return as float vector +static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) { + const __m128i ones = _mm_set1_epi16(1); + const __m128i summed_pairsl = _mm_madd_epi16(ones, xl); + const __m128i summed_pairsh = _mm_madd_epi16(ones, xh); + const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl); + return _mm256_cvtepi32_ps(summed_pairs); +} + +static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { + const __m128i axl = _mm256_castsi256_si128(ax); + const __m128i axh = _mm256_extractf128_si256(ax, 1); + const __m128i syl = _mm256_castsi256_si128(sy); + const __m128i syh = _mm256_extractf128_si256(sy, 1); + // Perform multiplication and create 16-bit values + const __m128i dotl = _mm_maddubs_epi16(axl, syl); + const __m128i doth = _mm_maddubs_epi16(axh, syh); + return sum_i16_pairs_float(doth, dotl); +} + +// multiply int8_t, add results pairwise twice and return as float vector +static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { + const __m128i xl = _mm256_castsi256_si128(x); + const __m128i xh = _mm256_extractf128_si256(x, 1); + const __m128i yl = _mm256_castsi256_si128(y); + const __m128i yh = _mm256_extractf128_si256(y, 1); + // Get absolute values of x vectors + const __m128i axl = _mm_sign_epi8(xl, xl); + const __m128i axh = _mm_sign_epi8(xh, xh); + // Sign the values of the y vectors + const __m128i syl = _mm_sign_epi8(yl, xl); + const __m128i syh = _mm_sign_epi8(yh, xh); + // Perform multiplication and create 16-bit values + const __m128i dotl = _mm_maddubs_epi16(axl, syl); + const __m128i doth = _mm_maddubs_epi16(axh, syh); + return sum_i16_pairs_float(doth, dotl); +} + +// larger version of mul_sum_i8_pairs_float where x and y are each represented by four 128-bit vectors +static inline __m256 mul_sum_i8_quad_float(const __m128i x_1_0, const __m128i x_1_1, const __m128i x_2_0, const __m128i x_2_1, + const __m128i y_1_0, const __m128i y_1_1, const __m128i y_2_0, const __m128i y_2_1) { + const __m128i mone = _mm_set1_epi16(1); + + const __m128i p16_1_0 = mul_add_epi8_sse(x_1_0, y_1_0); + const __m128i p16_1_1 = mul_add_epi8_sse(x_1_1, y_1_1); + const __m128i p16_2_0 = mul_add_epi8_sse(x_2_0, y_2_0); + const __m128i p16_2_1 = mul_add_epi8_sse(x_2_1, y_2_1); + const __m128i p_1_0 = _mm_madd_epi16(p16_1_0, mone); + const __m128i p_1_1 = _mm_madd_epi16(p16_1_1, mone); + const __m128i p_2_0 = _mm_madd_epi16(p16_2_0, mone); + const __m128i p_2_1 = _mm_madd_epi16(p16_2_1, mone); + const __m128i p_1 = _mm_add_epi32(p_1_0, p_1_1); + const __m128i p_2 = _mm_add_epi32(p_2_0, p_2_1); + return _mm256_cvtepi32_ps(MM256_SET_M128I(p_2, p_1)); +} + +// quad fp16 delta calculation +static inline __m256 quad_fp16_delta_float(const float x0, const float y0, const float x1, const float y1) { + // GGML_FP16_TO_FP32 is faster than Intel F16C + return _mm256_set_m128(_mm_set1_ps(GGML_FP16_TO_FP32(x1) * GGML_FP16_TO_FP32(y1)), + _mm_set1_ps(GGML_FP16_TO_FP32(x0) * GGML_FP16_TO_FP32(y0))); +} +#endif +#elif defined(__SSSE3__) +// horizontally add 4x4 floats +static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) { + __m128 res_0 =_mm_hadd_ps(a, b); + __m128 res_1 =_mm_hadd_ps(c, d); + __m128 res =_mm_hadd_ps(res_0, res_1); + res =_mm_hadd_ps(res, res); + res =_mm_hadd_ps(res, res); + + return _mm_cvtss_f32(res); +} +#endif // __AVX__ || __AVX2__ || __AVX512F__ +#endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) + +void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__AVX2__) || defined(__AVX__) + for (int i = 0; i < nb; i++) { + // Load elements into 4 AVX vectors + __m256 v0 = _mm256_loadu_ps( x ); + __m256 v1 = _mm256_loadu_ps( x + 8 ); + __m256 v2 = _mm256_loadu_ps( x + 16 ); + __m256 v3 = _mm256_loadu_ps( x + 24 ); + x += 32; + + // Compute max(abs(e)) for the block + const __m256 signBit = _mm256_set1_ps( -0.0f ); + __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); + + __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); + max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); + max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); + const float maxScalar = _mm_cvtss_f32( max4 ); + + // Quantize these floats + const float d = maxScalar / 127.f; + y[i].d = GGML_FP32_TO_FP16(d); + const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; + const __m256 mul = _mm256_set1_ps( id ); + + // Apply the multiplier + v0 = _mm256_mul_ps( v0, mul ); + v1 = _mm256_mul_ps( v1, mul ); + v2 = _mm256_mul_ps( v2, mul ); + v3 = _mm256_mul_ps( v3, mul ); + + // Round to nearest integer + v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); + v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); + v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); + v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); + + // Convert floats to integers + __m256i i0 = _mm256_cvtps_epi32( v0 ); + __m256i i1 = _mm256_cvtps_epi32( v1 ); + __m256i i2 = _mm256_cvtps_epi32( v2 ); + __m256i i3 = _mm256_cvtps_epi32( v3 ); + +#if defined(__AVX2__) + // Convert int32 to int16 + i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 + i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 + // Convert int16 to int8 + i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 + + // We got our precious signed bytes, but the order is now wrong + // These AVX2 pack instructions process 16-byte pieces independently + // The following instruction is fixing the order + const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); + i0 = _mm256_permutevar8x32_epi32( i0, perm ); + + _mm256_storeu_si256((__m256i *)y[i].qs, i0); +#else + // Since we don't have in AVX some necessary functions, + // we split the registers in half and call AVX2 analogs from SSE + __m128i ni0 = _mm256_castsi256_si128( i0 ); + __m128i ni1 = _mm256_extractf128_si256( i0, 1); + __m128i ni2 = _mm256_castsi256_si128( i1 ); + __m128i ni3 = _mm256_extractf128_si256( i1, 1); + __m128i ni4 = _mm256_castsi256_si128( i2 ); + __m128i ni5 = _mm256_extractf128_si256( i2, 1); + __m128i ni6 = _mm256_castsi256_si128( i3 ); + __m128i ni7 = _mm256_extractf128_si256( i3, 1); + + // Convert int32 to int16 + ni0 = _mm_packs_epi32( ni0, ni1 ); + ni2 = _mm_packs_epi32( ni2, ni3 ); + ni4 = _mm_packs_epi32( ni4, ni5 ); + ni6 = _mm_packs_epi32( ni6, ni7 ); + // Convert int16 to int8 + ni0 = _mm_packs_epi16( ni0, ni2 ); + ni4 = _mm_packs_epi16( ni4, ni6 ); + + _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0); + _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4); +#endif + } +#else + GGML_UNUSED(nb); + // scalar + quantize_row_q8_0_ref(x, y, k); +#endif +} + +void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK8_1 == 0); + const int nb = k / QK8_1; + + block_q8_1 * GGML_RESTRICT y = vy; +#if defined(__AVX2__) || defined(__AVX__) + for (int i = 0; i < nb; i++) { + // Load elements into 4 AVX vectors + __m256 v0 = _mm256_loadu_ps( x ); + __m256 v1 = _mm256_loadu_ps( x + 8 ); + __m256 v2 = _mm256_loadu_ps( x + 16 ); + __m256 v3 = _mm256_loadu_ps( x + 24 ); + x += 32; + + // Compute max(abs(e)) for the block + const __m256 signBit = _mm256_set1_ps( -0.0f ); + __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); + + __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); + max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); + max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); + const float max_scalar = _mm_cvtss_f32( max4 ); + + // Quantize these floats + const float d = max_scalar / 127.f; + y[i].d = GGML_FP32_TO_FP16(d); + const float id = ( max_scalar != 0.0f ) ? 127.f / max_scalar : 0.0f; + const __m256 mul = _mm256_set1_ps( id ); + + // Apply the multiplier + v0 = _mm256_mul_ps( v0, mul ); + v1 = _mm256_mul_ps( v1, mul ); + v2 = _mm256_mul_ps( v2, mul ); + v3 = _mm256_mul_ps( v3, mul ); + + // Round to nearest integer + v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); + v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); + v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); + v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); + + // Convert floats to integers + __m256i i0 = _mm256_cvtps_epi32( v0 ); + __m256i i1 = _mm256_cvtps_epi32( v1 ); + __m256i i2 = _mm256_cvtps_epi32( v2 ); + __m256i i3 = _mm256_cvtps_epi32( v3 ); + +#if defined(__AVX2__) + // Compute the sum of the quants and set y[i].s + y[i].s = GGML_FP32_TO_FP16(d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)))); + + // Convert int32 to int16 + i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 + i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 + // Convert int16 to int8 + i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 + + // We got our precious signed bytes, but the order is now wrong + // These AVX2 pack instructions process 16-byte pieces independently + // The following instruction is fixing the order + const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); + i0 = _mm256_permutevar8x32_epi32( i0, perm ); + + _mm256_storeu_si256((__m256i *)y[i].qs, i0); +#else + // Since we don't have in AVX some necessary functions, + // we split the registers in half and call AVX2 analogs from SSE + __m128i ni0 = _mm256_castsi256_si128( i0 ); + __m128i ni1 = _mm256_extractf128_si256( i0, 1); + __m128i ni2 = _mm256_castsi256_si128( i1 ); + __m128i ni3 = _mm256_extractf128_si256( i1, 1); + __m128i ni4 = _mm256_castsi256_si128( i2 ); + __m128i ni5 = _mm256_extractf128_si256( i2, 1); + __m128i ni6 = _mm256_castsi256_si128( i3 ); + __m128i ni7 = _mm256_extractf128_si256( i3, 1); + + // Compute the sum of the quants and set y[i].s + const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3)); + const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7)); + y[i].s = GGML_FP32_TO_FP16(d * hsum_i32_4(_mm_add_epi32(s0, s1))); + + // Convert int32 to int16 + ni0 = _mm_packs_epi32( ni0, ni1 ); + ni2 = _mm_packs_epi32( ni2, ni3 ); + ni4 = _mm_packs_epi32( ni4, ni5 ); + ni6 = _mm_packs_epi32( ni6, ni7 ); + // Convert int16 to int8 + ni0 = _mm_packs_epi16( ni0, ni2 ); + ni4 = _mm_packs_epi16( ni4, ni6 ); + + _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0); + _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4); +#endif + } +#else + GGML_UNUSED(nb); + // scalar + quantize_row_q8_1_ref(x, y, k); +#endif +} + +// placeholder implementation for Apple targets +void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_q8_K_ref(x, y, k); +} + +//===================================== Dot products ================================= + +// +// Helper functions +// + +#if __AVX__ || __AVX2__ || __AVX512F__ + +// shuffles to pick the required scales in dot products +static inline __m256i get_scale_shuffle_q3k(int i) { + static const uint8_t k_shuffle[128] = { + 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, + 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, + 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, + 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15, + }; + return _mm256_loadu_si256((const __m256i*)k_shuffle + i); +} +static inline __m256i get_scale_shuffle_k4(int i) { + static const uint8_t k_shuffle[256] = { + 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, + 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, + 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, + 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, + 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, + 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, + 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, + 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15 + }; + return _mm256_loadu_si256((const __m256i*)k_shuffle + i); +} +static inline __m128i get_scale_shuffle(int i) { + static const uint8_t k_shuffle[128] = { + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, + 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, + 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, + 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11, + 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13, + 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15 + }; + return _mm_loadu_si128((const __m128i*)k_shuffle + i); +} +#endif + +void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined(__AVX2__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + + // Main loop + for (; ib < nb; ++ib) { + /* Compute combined scale for the block */ + const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d) ); + + __m256i qx = bytes_from_nibbles_32(x[ib].qs); + + // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. + const __m256i off = _mm256_set1_epi8( 8 ); + qx = _mm256_sub_epi8( qx, off ); + + __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); + + const __m256 q = mul_sum_i8_pairs_float(qx, qy); + + /* Multiply q with scale and accumulate */ + acc = _mm256_fmadd_ps( d, q, acc ); + } + + sumf = hsum_float_8(acc); +#elif defined(__AVX__) + __m256 accum = _mm256_setzero_ps(); + for (; ib + 1 < nb; ib += 2) { + const __m128i q4bits_1 = _mm_loadu_si128((const __m128i *)x[ib + 0].qs); + const __m128i q4bits_2 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); + const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs); + const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs + 1); + const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); + const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs + 1); + + const __m128i q4b_1_0 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), q4bits_1), _mm_set1_epi8(8)); + const __m128i q4b_1_1 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(q4bits_1, 4)), _mm_set1_epi8(8)); + const __m128i q4b_2_0 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), q4bits_2), _mm_set1_epi8(8)); + const __m128i q4b_2_1 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(q4bits_2, 4)), _mm_set1_epi8(8)); + + const __m128i p16_1_0 = mul_add_epi8_sse(q4b_1_0, q8b_1_0); + const __m128i p16_1_1 = mul_add_epi8_sse(q4b_1_1, q8b_1_1); + const __m128i p16_2_0 = mul_add_epi8_sse(q4b_2_0, q8b_2_0); + const __m128i p16_2_1 = mul_add_epi8_sse(q4b_2_1, q8b_2_1); + const __m128i p_1 = _mm_add_epi16(p16_1_0, p16_1_1); + const __m128i p_2 = _mm_add_epi16(p16_2_0, p16_2_1); + const __m256 p = sum_i16_pairs_float(p_2, p_1); + + const __m256 deltas = quad_fp16_delta_float(x[ib].d, y[ib].d, x[ib + 1].d, y[ib + 1].d); + accum = _mm256_add_ps(_mm256_mul_ps(deltas, p), accum); + } + + sumf = hsum_float_8(accum); +#elif defined(__SSSE3__) + // set constants + const __m128i lowMask = _mm_set1_epi8(0xF); + const __m128i off = _mm_set1_epi8(8); + + // Initialize accumulator with zeros + __m128 acc_0 = _mm_setzero_ps(); + __m128 acc_1 = _mm_setzero_ps(); + __m128 acc_2 = _mm_setzero_ps(); + __m128 acc_3 = _mm_setzero_ps(); + + for (; ib + 1 < nb; ib += 2) { + _mm_prefetch(&x[ib] + sizeof(block_q4_0), _MM_HINT_T0); + _mm_prefetch(&y[ib] + sizeof(block_q8_0), _MM_HINT_T0); + + // Compute combined scale for the block 0 and 1 + const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d) ); + + const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[ib].qs); + + __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1); + __m128i by_0 = _mm_loadu_si128((const __m128i *)y[ib].qs); + bx_0 = _mm_sub_epi8(bx_0, off); + const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0); + + __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4)); + __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[ib].qs + 16)); + bx_1 = _mm_sub_epi8(bx_1, off); + const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1); + + _mm_prefetch(&x[ib] + 2 * sizeof(block_q4_0), _MM_HINT_T0); + _mm_prefetch(&y[ib] + 2 * sizeof(block_q8_0), _MM_HINT_T0); + + // Compute combined scale for the block 2 and 3 + const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[ib + 1].d) * GGML_FP16_TO_FP32(y[ib + 1].d) ); + + const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); + + __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3); + __m128i by_2 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); + bx_2 = _mm_sub_epi8(bx_2, off); + const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2); + + __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4)); + __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[ib + 1].qs + 16)); + bx_3 = _mm_sub_epi8(bx_3, off); + const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3); + + // Convert int32_t to float + __m128 p0 = _mm_cvtepi32_ps(i32_0); + __m128 p1 = _mm_cvtepi32_ps(i32_1); + __m128 p2 = _mm_cvtepi32_ps(i32_2); + __m128 p3 = _mm_cvtepi32_ps(i32_3); + + // Apply the scale + __m128 p0_d = _mm_mul_ps( d_0_1, p0 ); + __m128 p1_d = _mm_mul_ps( d_0_1, p1 ); + __m128 p2_d = _mm_mul_ps( d_2_3, p2 ); + __m128 p3_d = _mm_mul_ps( d_2_3, p3 ); + + // Acummulate + acc_0 = _mm_add_ps(p0_d, acc_0); + acc_1 = _mm_add_ps(p1_d, acc_1); + acc_2 = _mm_add_ps(p2_d, acc_2); + acc_3 = _mm_add_ps(p3_d, acc_3); + } + + sumf = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3); + +#endif + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F) - 8; + const int v1 = (x[ib].qs[j] >> 4) - 8; + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); + } + + *s = sumf; +} + +void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined(__AVX2__) || defined(__AVX__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + + float summs = 0; + + // Main loop + for (; ib < nb; ++ib) { + const float d0 = GGML_FP16_TO_FP32(x[ib].d); + const float d1 = GGML_FP16_TO_FP32(y[ib].d); + + summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); + + const __m256 d0v = _mm256_set1_ps( d0 ); + const __m256 d1v = _mm256_set1_ps( d1 ); + + // Compute combined scales + const __m256 d0d1 = _mm256_mul_ps( d0v, d1v ); + + // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes + const __m256i qx = bytes_from_nibbles_32(x[ib].qs); + const __m256i qy = _mm256_loadu_si256( (const __m256i *)y[ib].qs ); + + const __m256 xy = mul_sum_us8_pairs_float(qx, qy); + + // Accumulate d0*d1*x*y +#if defined(__AVX2__) + acc = _mm256_fmadd_ps( d0d1, xy, acc ); +#else + acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc ); +#endif + } + + sumf = hsum_float_8(acc) + summs; + +#endif + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F); + const int v1 = (x[ib].qs[j] >> 4); + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + } + + *s = sumf; +} + +void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + int ib = 0; + float sumf = 0; + + assert(n % qk == 0); + assert(qk == QK5_0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__AVX2__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + + // Main loop + for (; ib < nb; ++ib) { + /* Compute combined scale for the block */ + const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); + + __m256i qx = bytes_from_nibbles_32(x[ib].qs); + __m256i bxhi = bytes_from_bits_32(x[ib].qh); + bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0)); + qx = _mm256_or_si256(qx, bxhi); + + __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); + + const __m256 q = mul_sum_i8_pairs_float(qx, qy); + + /* Multiply q with scale and accumulate */ + acc = _mm256_fmadd_ps(d, q, acc); + } + + sumf = hsum_float_8(acc); +#elif defined(__AVX__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + __m128i mask = _mm_set1_epi8((char)0xF0); + + // Main loop + for (; ib < nb; ++ib) { + /* Compute combined scale for the block */ + const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); + + __m256i bx_0 = bytes_from_nibbles_32(x[ib].qs); + const __m256i bxhi = bytes_from_bits_32(x[ib].qh); + __m128i bxhil = _mm256_castsi256_si128(bxhi); + __m128i bxhih = _mm256_extractf128_si256(bxhi, 1); + bxhil = _mm_andnot_si128(bxhil, mask); + bxhih = _mm_andnot_si128(bxhih, mask); + __m128i bxl = _mm256_castsi256_si128(bx_0); + __m128i bxh = _mm256_extractf128_si256(bx_0, 1); + bxl = _mm_or_si128(bxl, bxhil); + bxh = _mm_or_si128(bxh, bxhih); + bx_0 = MM256_SET_M128I(bxh, bxl); + + const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[ib].qs); + + const __m256 q = mul_sum_i8_pairs_float(bx_0, by_0); + + /* Multiply q with scale and accumulate */ + acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc); + } + + sumf = hsum_float_8(acc); + +#endif + for (; ib < nb; ++ib) { + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; + const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12)); + + const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16); + const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16); + + sumi0 += (x0 * y[ib].qs[j]); + sumi1 += (x1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)) * sumi; + } + + *s = sumf; +} + +void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + int ib = 0; + float sumf = 0; + + assert(n % qk == 0); + assert(qk == QK5_1); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + +#if defined(__AVX2__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + + float summs = 0.0f; + + // Main loop + for (; ib < nb; ++ib) { + const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d)); + + summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); + + __m256i qx = bytes_from_nibbles_32(x[ib].qs); + __m256i bxhi = bytes_from_bits_32(x[ib].qh); + bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10)); + qx = _mm256_or_si256(qx, bxhi); + + const __m256 dy = _mm256_set1_ps(GGML_FP16_TO_FP32(y[ib].d)); + const __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); + + const __m256 q = mul_sum_us8_pairs_float(qx, qy); + + acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc); + } + + sumf = hsum_float_8(acc) + summs; +#elif defined(__AVX__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + __m128i mask = _mm_set1_epi8(0x10); + + float summs = 0.0f; + + // Main loop + for (; ib < nb; ++ib) { + const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d)); + + summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); + + __m256i bx_0 = bytes_from_nibbles_32(x[ib].qs); + const __m256i bxhi = bytes_from_bits_32(x[ib].qh); + __m128i bxhil = _mm256_castsi256_si128(bxhi); + __m128i bxhih = _mm256_extractf128_si256(bxhi, 1); + bxhil = _mm_and_si128(bxhil, mask); + bxhih = _mm_and_si128(bxhih, mask); + __m128i bxl = _mm256_castsi256_si128(bx_0); + __m128i bxh = _mm256_extractf128_si256(bx_0, 1); + bxl = _mm_or_si128(bxl, bxhil); + bxh = _mm_or_si128(bxh, bxhih); + bx_0 = MM256_SET_M128I(bxh, bxl); + + const __m256 dy = _mm256_set1_ps(GGML_FP16_TO_FP32(y[ib].d)); + const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[ib].qs); + + const __m256 q = mul_sum_us8_pairs_float(bx_0, by_0); + + acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc); + } + + sumf = hsum_float_8(acc) + summs; + +#endif + for (; ib < nb; ++ib) { + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; + const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; + + const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0; + const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1; + + sumi0 += (x0 * y[ib].qs[j]); + sumi1 += (x1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + } + + *s = sumf; +} + +void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q8_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined(__AVX2__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + + // Main loop + for (; ib < nb; ++ib) { + // Compute combined scale for the block + const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); + __m256i qx = _mm256_loadu_si256((const __m256i *)x[ib].qs); + __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); + + const __m256 q = mul_sum_i8_pairs_float(qx, qy); + + // Multiply q with scale and accumulate + acc = _mm256_fmadd_ps( d, q, acc ); + } + + sumf = hsum_float_8(acc); +#elif defined(__AVX__) + __m256 accum = _mm256_setzero_ps(); + + for (; ib + 1 < nb; ib += 2) { + const __m128i qx_1_0 = _mm_loadu_si128((const __m128i *)x[ib].qs); + const __m128i qx_1_1 = _mm_loadu_si128((const __m128i *)x[ib].qs + 1); + const __m128i qx_2_0 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); + const __m128i qx_2_1 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs + 1); + const __m128i qy_1_0 = _mm_loadu_si128((const __m128i *)y[ib].qs); + const __m128i qy_1_1 = _mm_loadu_si128((const __m128i *)y[ib].qs + 1); + const __m128i qy_2_0 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); + const __m128i qy_2_1 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs + 1); + + const __m256 p = mul_sum_i8_quad_float(qx_1_0, qx_1_1, qx_2_0, qx_2_1, qy_1_0, qy_1_1, qy_2_0, qy_2_1); + const __m256 deltas = quad_fp16_delta_float(x[ib].d, y[ib].d, x[ib + 1].d, y[ib + 1].d); + accum = _mm256_add_ps(_mm256_mul_ps(deltas, p), accum); + } + + sumf = hsum_float_8(accum); + +#endif + for (; ib < nb; ++ib) { + int sumi = 0; + + for (int j = 0; j < qk; j++) { + sumi += x[ib].qs[j]*y[ib].qs[j]; + } + + sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); + } + + *s = sumf; +} + +void ggml_vec_dot_tq1_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_tq1_0 * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__AVX2__) + __m256 sumf = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + // 16-bit sums + __m256i sumi0 = _mm256_setzero_si256(); + __m256i sumi1 = _mm256_setzero_si256(); + __m256i sumi2 = _mm256_setzero_si256(); + + // first 32 bytes of 5 elements + { + __m256i qx0 = _mm256_loadu_si256((const __m256i *) (x[i].qs)); + // 8-bit multiplies with shifts, masks and adds + __m256i qx1 = _mm256_add_epi8(qx0, _mm256_add_epi8(qx0, qx0)); // 1 * 3 + __m256i qx2 = _mm256_add_epi8(_mm256_and_si256(_mm256_slli_epi16(qx0, 3), _mm256_set1_epi8(-8)), qx0); // 1 * 9 + __m256i qx3 = _mm256_add_epi8(_mm256_and_si256(_mm256_slli_epi16(qx1, 3), _mm256_set1_epi8(-8)), qx1); // 3 * 9 + __m256i qx4 = _mm256_add_epi8(_mm256_and_si256(_mm256_slli_epi16(qx2, 3), _mm256_set1_epi8(-8)), qx2); // 9 * 9 + + // TODO: can _mm256_mulhi_epu16 be faster even if 16-bits? + + // Cancel the +1 from avg so that it behaves like a halving add + qx0 = _mm256_subs_epu8(qx0, _mm256_set1_epi8(1)); + qx1 = _mm256_subs_epu8(qx1, _mm256_set1_epi8(1)); + qx2 = _mm256_subs_epu8(qx2, _mm256_set1_epi8(1)); + qx3 = _mm256_subs_epu8(qx3, _mm256_set1_epi8(1)); + qx4 = _mm256_subs_epu8(qx4, _mm256_set1_epi8(1)); + // Multiply by 3 and get the top 2 bits + qx0 = _mm256_avg_epu8(qx0, _mm256_avg_epu8(qx0, _mm256_setzero_si256())); + qx1 = _mm256_avg_epu8(qx1, _mm256_avg_epu8(qx1, _mm256_setzero_si256())); + qx2 = _mm256_avg_epu8(qx2, _mm256_avg_epu8(qx2, _mm256_setzero_si256())); + qx3 = _mm256_avg_epu8(qx3, _mm256_avg_epu8(qx3, _mm256_setzero_si256())); + qx4 = _mm256_avg_epu8(qx4, _mm256_avg_epu8(qx4, _mm256_setzero_si256())); + qx0 = _mm256_and_si256(_mm256_srli_epi16(qx0, 6), _mm256_set1_epi8(3)); + qx1 = _mm256_and_si256(_mm256_srli_epi16(qx1, 6), _mm256_set1_epi8(3)); + qx2 = _mm256_and_si256(_mm256_srli_epi16(qx2, 6), _mm256_set1_epi8(3)); + qx3 = _mm256_and_si256(_mm256_srli_epi16(qx3, 6), _mm256_set1_epi8(3)); + qx4 = _mm256_and_si256(_mm256_srli_epi16(qx4, 6), _mm256_set1_epi8(3)); + + const __m256i qy0 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 0)); + const __m256i qy1 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 32)); + const __m256i qy2 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 64)); + const __m256i qy3 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 96)); + const __m256i qy4 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 128)); + + qx0 = _mm256_maddubs_epi16(qx0, qy0); + qx1 = _mm256_maddubs_epi16(qx1, qy1); + qx2 = _mm256_maddubs_epi16(qx2, qy2); + qx3 = _mm256_maddubs_epi16(qx3, qy3); + qx4 = _mm256_maddubs_epi16(qx4, qy4); + + sumi0 = _mm256_add_epi16(sumi0, _mm256_add_epi16(qx0, qx1)); + sumi1 = _mm256_add_epi16(sumi1, _mm256_add_epi16(qx2, qx3)); + sumi2 = _mm256_add_epi16(sumi2, qx4); + } + + // last 16 bytes of 5-element, along with the 4 bytes of 4 elements + { + __m128i qx0 = _mm_loadu_si128((const __m128i *) (x[i].qs + 32)); + uint32_t qh; + memcpy(&qh, x[i].qh, sizeof(qh)); // potentially unaligned + __m256i qx5_l = _mm256_cvtepu8_epi16(_mm_set1_epi32(qh)); + __m128i qx1 = _mm_add_epi8(qx0, _mm_add_epi8(qx0, qx0)); // 1 * 3 + __m128i qx2 = _mm_add_epi8(_mm_and_si128(_mm_slli_epi16(qx0, 3), _mm_set1_epi8(-8)), qx0); // 1 * 9 + __m128i qx3 = _mm_add_epi8(_mm_and_si128(_mm_slli_epi16(qx1, 3), _mm_set1_epi8(-8)), qx1); // 3 * 9 + __m128i qx4 = _mm_add_epi8(_mm_and_si128(_mm_slli_epi16(qx2, 3), _mm_set1_epi8(-8)), qx2); // 9 * 9 + __m256i qx01 = MM256_SET_M128I(qx1, qx0); + __m256i qx23 = MM256_SET_M128I(qx3, qx2); + + // avx2 does not have 8-bit multiplies, so 16-bit it is. + qx5_l = _mm256_mullo_epi16(qx5_l, _mm256_set_epi16(27, 27, 27, 27, 9, 9, 9, 9, 3, 3, 3, 3, 1, 1, 1, 1)); + qx5_l = _mm256_and_si256(qx5_l, _mm256_set1_epi16(0xFF)); + __m128i qx5 = _mm_packus_epi16(_mm256_castsi256_si128(qx5_l), _mm256_extracti128_si256(qx5_l, 1)); + + __m256i qx45 = MM256_SET_M128I(qx5, qx4); + + // Cancel the +1 from avg so that it behaves like a halving add + qx01 = _mm256_subs_epu8(qx01, _mm256_set1_epi8(1)); + qx23 = _mm256_subs_epu8(qx23, _mm256_set1_epi8(1)); + qx45 = _mm256_subs_epu8(qx45, _mm256_set1_epi8(1)); + // Multiply by 3 and get the top 2 bits + qx01 = _mm256_avg_epu8(qx01, _mm256_avg_epu8(qx01, _mm256_setzero_si256())); + qx23 = _mm256_avg_epu8(qx23, _mm256_avg_epu8(qx23, _mm256_setzero_si256())); + qx45 = _mm256_avg_epu8(qx45, _mm256_avg_epu8(qx45, _mm256_setzero_si256())); + qx01 = _mm256_and_si256(_mm256_srli_epi16(qx01, 6), _mm256_set1_epi8(3)); + qx23 = _mm256_and_si256(_mm256_srli_epi16(qx23, 6), _mm256_set1_epi8(3)); + qx45 = _mm256_and_si256(_mm256_srli_epi16(qx45, 6), _mm256_set1_epi8(3)); + + const __m256i qy01 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 160)); + const __m256i qy23 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 192)); + const __m256i qy45 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 224)); + + qx01 = _mm256_maddubs_epi16(qx01, qy01); + qx23 = _mm256_maddubs_epi16(qx23, qy23); + qx45 = _mm256_maddubs_epi16(qx45, qy45); + + sumi0 = _mm256_add_epi16(sumi0, qx01); + sumi1 = _mm256_add_epi16(sumi1, qx23); + sumi2 = _mm256_add_epi16(sumi2, qx45); + } + + const __m256i ysum = _mm256_loadu_si256((const __m256i *) y[i].bsums); + const __m256 d = _mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(x[i].d)); + + sumi0 = _mm256_sub_epi16(sumi0, ysum); + sumi0 = _mm256_add_epi16(sumi0, _mm256_add_epi16(sumi1, sumi2)); + sumi0 = _mm256_madd_epi16(sumi0, _mm256_set1_epi16(1)); + + sumf = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(sumi0), d), sumf); + } + + *s = hsum_float_8(sumf); + +#else + const uint8_t pow3[6] = {1, 3, 9, 27, 81, 243}; + + float sumf = 0.0f; + + for (int i = 0; i < nb; ++i) { + int sum = 0; + + for (size_t j = 0; j < sizeof(x->qs) - sizeof(x->qs) % 32; j += 32) { + for (size_t l = 0; l < 5; ++l) { + for (size_t m = 0; m < 32; ++m) { + uint8_t q = x[i].qs[j + m] * pow3[l]; + uint16_t xi = ((uint16_t) q * 3) >> 8; + sum += (xi - 1) * y[i].qs[j*5 + l*32 + m]; + } + } + } + for (size_t j = sizeof(x->qs) - sizeof(x->qs) % 32; j < sizeof(x->qs); j += 16) { + for (size_t l = 0; l < 5; ++l) { + for (size_t m = 0; m < 16; ++m) { + uint8_t q = x[i].qs[j + m] * pow3[l]; + uint16_t xi = ((uint16_t) q * 3) >> 8; + sum += (xi - 1) * y[i].qs[j*5 + l*16 + m]; + } + } + } + + for (size_t l = 0; l < 4; ++l) { + for (size_t j = 0; j < sizeof(x->qh); ++j) { + uint8_t q = x[i].qh[j] * pow3[l]; + uint16_t xi = ((uint16_t) q * 3) >> 8; + sum += (xi - 1) * y[i].qs[sizeof(x->qs)*5 + l*sizeof(x->qh) + j]; + } + } + + sumf += (float) sum * (GGML_FP16_TO_FP32(x[i].d) * y[i].d); + } + + *s = sumf; +#endif +} + +void ggml_vec_dot_tq2_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_tq2_0 * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__AVX2__) + __m256 sumf = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + // 16-bit sums, because 256*127 still fits + __m256i sumi0 = _mm256_setzero_si256(); + __m256i sumi1 = _mm256_setzero_si256(); + + for (size_t j = 0; j < sizeof(x->qs); j += 32) { + __m256i qx0 = _mm256_loadu_si256((const __m256i *) (x[i].qs + j)); + __m256i qx1 = _mm256_srli_epi16(qx0, 2); + __m256i qx2 = _mm256_srli_epi16(qx0, 4); + __m256i qx3 = _mm256_srli_epi16(qx0, 6); + + // 0, 1, 2 (should not be 3) + qx0 = _mm256_and_si256(qx0, _mm256_set1_epi8(3)); + qx1 = _mm256_and_si256(qx1, _mm256_set1_epi8(3)); + qx2 = _mm256_and_si256(qx2, _mm256_set1_epi8(3)); + qx3 = _mm256_and_si256(qx3, _mm256_set1_epi8(3)); + + const __m256i qy0 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 0)); + const __m256i qy1 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 32)); + const __m256i qy2 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 64)); + const __m256i qy3 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 96)); + + qx0 = _mm256_maddubs_epi16(qx0, qy0); + qx1 = _mm256_maddubs_epi16(qx1, qy1); + qx2 = _mm256_maddubs_epi16(qx2, qy2); + qx3 = _mm256_maddubs_epi16(qx3, qy3); + + sumi0 = _mm256_add_epi16(sumi0, _mm256_add_epi16(qx0, qx1)); + sumi1 = _mm256_add_epi16(sumi1, _mm256_add_epi16(qx2, qx3)); + } + + const __m256i ysum = _mm256_loadu_si256((const __m256i *) y[i].bsums); + const __m256 d = _mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(x[i].d)); + + sumi0 = _mm256_add_epi16(sumi0, sumi1); + sumi0 = _mm256_sub_epi16(sumi0, ysum); + sumi0 = _mm256_madd_epi16(sumi0, _mm256_set1_epi16(1)); + + sumf = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(sumi0), d), sumf); + } + + *s = hsum_float_8(sumf); + +#else + float sumf = 0.0f; + + for (int i = 0; i < nb; ++i) { + int32_t sumi = 0; + + for (size_t j = 0; j < sizeof(x->qs); j += 32) { + for (size_t l = 0; l < 4; ++l) { + for (size_t k = 0; k < 32; ++k) { + sumi += y[i].qs[j*4 + l*32 + k] * (((x[i].qs[j + k] >> (l*2)) & 3) - 1); + } + } + } + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + + sumf += (float) sumi * d; + } + + *s = sumf; +#endif +} + +void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q2_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __AVX2__ + + const __m256i m3 = _mm256_set1_epi8(3); + const __m128i m4 = _mm_set1_epi8(0xF); + + __m256 acc = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + const uint8_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales); + const __m128i scales8 = _mm_and_si128(mins_and_scales, m4); + const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); + const __m256i mins = _mm256_cvtepi8_epi16(mins8); + const __m256i prod = _mm256_madd_epi16(mins, _mm256_loadu_si256((const __m256i*)y[i].bsums)); + + acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc); + + const __m256i all_scales = _mm256_cvtepi8_epi16(scales8); + const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); + const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); + const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)}; + + __m256i sumi = _mm256_setzero_si256(); + + for (int j = 0; j < QK_K/128; ++j) { + + const __m256i q2bits = _mm256_loadu_si256((const __m256i*)q2); q2 += 32; + + const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + + const __m256i q2_0 = _mm256_and_si256(q2bits, m3); + const __m256i q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3); + const __m256i q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3); + const __m256i q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3); + + __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0); + __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1); + __m256i p2 = _mm256_maddubs_epi16(q2_2, q8_2); + __m256i p3 = _mm256_maddubs_epi16(q2_3, q8_3); + + p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(0)), p0); + p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(1)), p1); + p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(2)), p2); + p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(3)), p3); + + p0 = _mm256_add_epi32(p0, p1); + p2 = _mm256_add_epi32(p2, p3); + + sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2)); + } + + acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); + + } + + *s = hsum_float_8(acc); + +#elif defined __AVX__ + + const __m128i m3 = _mm_set1_epi8(0x3); + const __m128i m4 = _mm_set1_epi8(0xF); + const __m128i m2 = _mm_set1_epi8(0x2); + + __m256 acc = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + + const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + const uint8_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + // load mins and scales from block_q2_K.scales[QK_K/16] + const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales); + const __m128i scales16 = _mm_and_si128(mins_and_scales, m4); + const __m128i mins16 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); + const __m128i mins_0 = _mm_cvtepi8_epi16(mins16); + const __m128i mins_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(mins16, mins16)); + + // summs = y[i].bsums * (x[i].scales >> 4) in 16bits*8*2 to 32bits*4*2 + const __m128i summs_0 = _mm_madd_epi16(mins_0, _mm_loadu_si128((const __m128i*)&y[i].bsums[0])); + const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i*)&y[i].bsums[8])); + + // sumf += -dmin * summs in 32bits*8 + acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc); + + const __m128i scales_0 = _mm_cvtepi8_epi16(scales16); + const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16)); + const __m128i scales[2] = { scales_0, scales_1 }; + + __m128i sumi_0 = _mm_setzero_si128(); + __m128i sumi_1 = _mm_setzero_si128(); + + for (int j = 0; j < QK_K/128; ++j) { + + // load Q8 quants int8*16*8 from block_q8_K.qs[QK_K] + const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + + // load 2bits*16*8 from block_q2_K.qs[QK_K/4] + __m128i q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16; + const __m128i q2_0 = _mm_and_si128(q2bits, m3); + const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3); + const __m128i q2_4 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3); + const __m128i q2_6 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3); + q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16; + const __m128i q2_1 = _mm_and_si128(q2bits, m3); + const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3); + const __m128i q2_5 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3); + const __m128i q2_7 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3); + + // isuml = q8[l] * ((q2[l] >> shift) & 3) in 8bits*16*8 to 16bits*8*8 + __m128i p0 = _mm_maddubs_epi16(q2_0, q8_0); + __m128i p1 = _mm_maddubs_epi16(q2_1, q8_1); + __m128i p2 = _mm_maddubs_epi16(q2_2, q8_2); + __m128i p3 = _mm_maddubs_epi16(q2_3, q8_3); + __m128i p4 = _mm_maddubs_epi16(q2_4, q8_4); + __m128i p5 = _mm_maddubs_epi16(q2_5, q8_5); + __m128i p6 = _mm_maddubs_epi16(q2_6, q8_6); + __m128i p7 = _mm_maddubs_epi16(q2_7, q8_7); + + // isum += (x[i].scales[is++] & 0xF) * isuml in 16bits*8*8 to 32bits*4*8 + __m128i shuffle = _mm_set1_epi16(0x0100); + p0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p0); + shuffle = _mm_add_epi16(shuffle, m2); + p1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p1); + shuffle = _mm_add_epi16(shuffle, m2); + p2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p2); + shuffle = _mm_add_epi16(shuffle, m2); + p3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p3); + shuffle = _mm_add_epi16(shuffle, m2); + p4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p4); + shuffle = _mm_add_epi16(shuffle, m2); + p5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p5); + shuffle = _mm_add_epi16(shuffle, m2); + p6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p6); + shuffle = _mm_add_epi16(shuffle, m2); + p7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p7); + + p0 = _mm_add_epi32(p0, p1); + p2 = _mm_add_epi32(p2, p3); + p4 = _mm_add_epi32(p4, p5); + p6 = _mm_add_epi32(p6, p7); + + // isum in 32bits*4*2 + sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p0, p2)); + sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p4, p6)); + } + + // sumf += dall * isum - dmin * summs in 32bits + __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); + acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc); + } + + *s = hsum_float_8(acc); + +#else + + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + + const uint8_t * q2 = x[i].qs; + const int8_t * q8 = y[i].qs; + const uint8_t * sc = x[i].scales; + + int summs = 0; + for (int j = 0; j < 16; ++j) { + summs += y[i].bsums[j] * (sc[j] >> 4); + } + + const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + int isum = 0; + int is = 0; + int d; + for (int k = 0; k < QK_K/128; ++k) { + int shift = 0; + for (int j = 0; j < 4; ++j) { + d = sc[is++] & 0xF; + int isuml = 0; + for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); + isum += d * isuml; + d = sc[is++] & 0xF; + isuml = 0; + for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); + isum += d * isuml; + shift += 2; + q8 += 32; + } + q2 += 32; + } + sumf += dall * isum - dmin * summs; + } + *s = sumf; +#endif +} + +void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const uint32_t kmask1 = 0x03030303; + const uint32_t kmask2 = 0x0f0f0f0f; + + const block_q3_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __AVX2__ + + const __m256i m3 = _mm256_set1_epi8(3); + const __m256i mone = _mm256_set1_epi8(1); + const __m128i m32 = _mm_set1_epi8(32); + + __m256 acc = _mm256_setzero_ps(); + + uint32_t aux[3]; + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + // Set up scales + memcpy(aux, x[i].scales, 12); + __m128i scales128 = _mm_set_epi32( + ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), + ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), + (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), + (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); + scales128 = _mm_sub_epi8(scales128, m32); + const __m256i all_scales = _mm256_cvtepi8_epi16(scales128); + const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); + const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); + const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)}; + + // high bit + const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].hmask); + + // integer accumulator + __m256i sumi = _mm256_setzero_si256(); + + int bit = 0; + int is = 0; + + for (int j = 0; j < QK_K/128; ++j) { + // load low 2 bits + const __m256i q3bits = _mm256_loadu_si256((const __m256i*)q3); q3 += 32; + + // prepare low and high bits + const __m256i q3l_0 = _mm256_and_si256(q3bits, m3); + const __m256i q3h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); + ++bit; + + const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3); + const __m256i q3h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); + ++bit; + + const __m256i q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3); + const __m256i q3h_2 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); + ++bit; + + const __m256i q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3); + const __m256i q3h_3 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); + ++bit; + + // load Q8 quants + const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + + // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16, + // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, + // and 2 if the high bit was set) + __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0); + __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1); + __m256i q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2); + __m256i q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3); + + __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0); + __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1); + __m256i p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2); + __m256i p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3); + + p16_0 = _mm256_sub_epi16(p16_0, q8s_0); + p16_1 = _mm256_sub_epi16(p16_1, q8s_1); + p16_2 = _mm256_sub_epi16(p16_2, q8s_2); + p16_3 = _mm256_sub_epi16(p16_3, q8s_3); + + // multiply with scales + p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 0)), p16_0); + p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 1)), p16_1); + p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 2)), p16_2); + p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 3)), p16_3); + + // accumulate + p16_0 = _mm256_add_epi32(p16_0, p16_1); + p16_2 = _mm256_add_epi32(p16_2, p16_3); + sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2)); + + } + + // multiply with block scale and accumulate + acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); + + } + + *s = hsum_float_8(acc); + +#elif defined __AVX__ + + const __m128i m3 = _mm_set1_epi8(3); + const __m128i mone = _mm_set1_epi8(1); + const __m128i m32 = _mm_set1_epi8(32); + const __m128i m2 = _mm_set1_epi8(2); + + __m256 acc = _mm256_setzero_ps(); + + const uint32_t *aux; + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + // Set up scales + aux = (const uint32_t *)x[i].scales; + __m128i scales128 = _mm_set_epi32( + ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), + ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), + (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), + (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); + scales128 = _mm_sub_epi8(scales128, m32); + const __m128i scales_0 = _mm_cvtepi8_epi16(scales128); + const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales128, scales128)); + const __m128i scales[2] = { scales_0, scales_1 }; + + // high bit *128*2 from block_q3_K.hmask[QK_K/8] + const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].hmask[0]); + const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].hmask[16]); + + // integer accumulator + __m128i sumi_0 = _mm_setzero_si128(); + __m128i sumi_1 = _mm_setzero_si128(); + + for (int j = 0; j < QK_K/128; ++j) { + // load low 2 bits *64*2 from block_q3_K.qs[QK_K/4] + const __m128i q3bits_0 = _mm_loadu_si128((const __m128i*)q3); q3 += 16; + const __m128i q3bits_1 = _mm_loadu_si128((const __m128i*)q3); q3 += 16; + + // prepare low and high bits + const int bit = j << 2; + + const __m128i q3l_0 = _mm_and_si128(q3bits_0, m3); + const __m128i q3l_1 = _mm_and_si128(q3bits_1, m3); + const __m128i q3h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit)), bit), 2); + const __m128i q3h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit)), bit), 2); + + const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 2), m3); + const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 2), m3); + const __m128i q3h_2 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+1)), bit+1), 2); + const __m128i q3h_3 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+1)), bit+1), 2); + + const __m128i q3l_4 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 4), m3); + const __m128i q3l_5 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 4), m3); + const __m128i q3h_4 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+2)), bit+2), 2); + const __m128i q3h_5 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+2)), bit+2), 2); + + const __m128i q3l_6 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 6), m3); + const __m128i q3l_7 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 6), m3); + const __m128i q3h_6 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+3)), bit+3), 2); + const __m128i q3h_7 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+3)), bit+3), 2); + + // load Q8 quants from block_q8_K.qs[QK_K] + const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + + // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16, + // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, + // and 2 if the high bit was set) + __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, q8_0); + __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, q8_1); + __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, q8_2); + __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, q8_3); + __m128i q8s_4 = _mm_maddubs_epi16(q3h_4, q8_4); + __m128i q8s_5 = _mm_maddubs_epi16(q3h_5, q8_5); + __m128i q8s_6 = _mm_maddubs_epi16(q3h_6, q8_6); + __m128i q8s_7 = _mm_maddubs_epi16(q3h_7, q8_7); + + __m128i p16_0 = _mm_maddubs_epi16(q3l_0, q8_0); + __m128i p16_1 = _mm_maddubs_epi16(q3l_1, q8_1); + __m128i p16_2 = _mm_maddubs_epi16(q3l_2, q8_2); + __m128i p16_3 = _mm_maddubs_epi16(q3l_3, q8_3); + __m128i p16_4 = _mm_maddubs_epi16(q3l_4, q8_4); + __m128i p16_5 = _mm_maddubs_epi16(q3l_5, q8_5); + __m128i p16_6 = _mm_maddubs_epi16(q3l_6, q8_6); + __m128i p16_7 = _mm_maddubs_epi16(q3l_7, q8_7); + + p16_0 = _mm_sub_epi16(p16_0, q8s_0); + p16_1 = _mm_sub_epi16(p16_1, q8s_1); + p16_2 = _mm_sub_epi16(p16_2, q8s_2); + p16_3 = _mm_sub_epi16(p16_3, q8s_3); + p16_4 = _mm_sub_epi16(p16_4, q8s_4); + p16_5 = _mm_sub_epi16(p16_5, q8s_5); + p16_6 = _mm_sub_epi16(p16_6, q8s_6); + p16_7 = _mm_sub_epi16(p16_7, q8s_7); + + // multiply with scales + __m128i shuffle = _mm_set1_epi16(0x0100); + p16_0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_0); + shuffle = _mm_add_epi16(shuffle, m2); + p16_1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_1); + shuffle = _mm_add_epi16(shuffle, m2); + p16_2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_2); + shuffle = _mm_add_epi16(shuffle, m2); + p16_3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_3); + shuffle = _mm_add_epi16(shuffle, m2); + p16_4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_4); + shuffle = _mm_add_epi16(shuffle, m2); + p16_5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_5); + shuffle = _mm_add_epi16(shuffle, m2); + p16_6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_6); + shuffle = _mm_add_epi16(shuffle, m2); + p16_7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_7); + + // accumulate + p16_0 = _mm_add_epi32(p16_0, p16_1); + p16_2 = _mm_add_epi32(p16_2, p16_3); + p16_4 = _mm_add_epi32(p16_4, p16_5); + p16_6 = _mm_add_epi32(p16_6, p16_7); + sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); + sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_4, p16_6)); + + } + + // multiply with block scale and accumulate + __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); + acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc); + + } + + *s = hsum_float_8(acc); + +#else + // scalar version + // This function is written like this so the compiler can manage to vectorize most of it + // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the + // manually vectorized version above. Every other version I tried would run at least 4 times slower. + // The ideal situation would be if we could just write the code once, and the compiler would + // automatically produce the best possible set of machine instructions, instead of us having to manually + // write vectorized versions for AVX, ARM_NEON, etc. + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + uint32_t auxs[4]; + const int8_t * scales = (const int8_t*)auxs; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].hmask; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + q3 += 32; + } + a = aux8; + + memcpy(auxs, x[i].scales, 12); + uint32_t tmp = auxs[2]; + auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); + auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); + auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); + auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); + for (int j = 0; j < QK_K/16; ++j) { + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; + +#endif + +} + +void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + +#if defined __AVX2__ + + const __m256i m4 = _mm256_set1_epi8(0xF); + + __m256 acc = _mm256_setzero_ps(); + __m128 acc_m = _mm_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0])); + + const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums); + const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1)); + const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s); + acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m); + + const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0); + const __m256i scales = MM256_SET_M128I(sc128, sc128); + + __m256i sumi = _mm256_setzero_si256(); + + for (int j = 0; j < QK_K/64; ++j) { + + const __m256i scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0)); + const __m256i scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1)); + + const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4); q4 += 32; + const __m256i q4l = _mm256_and_si256(q4bits, m4); + const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4); + + const __m256i q8l = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + __m256i p16l = _mm256_maddubs_epi16(q4l, q8l); + p16l = _mm256_madd_epi16(scale_l, p16l); + + const __m256i q8h = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + __m256i p16h = _mm256_maddubs_epi16(q4h, q8h); + p16h = _mm256_madd_epi16(scale_h, p16h); + const __m256i sumj = _mm256_add_epi32(p16l, p16h); + + sumi = _mm256_add_epi32(sumi, sumj); + } + + __m256 vd = _mm256_set1_ps(d); + acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc); + + } + + acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m)); + acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m)); + + *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m); + +#elif defined __AVX__ + + const __m128i m4 = _mm_set1_epi8(0xF); + const __m128i m2 = _mm_set1_epi8(0x2); + + __m256 acc = _mm256_setzero_ps(); + __m128 acc_m = _mm_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]); + const __m128i scales = _mm_cvtepu8_epi16(utmps); + const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps)); + + const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]); + const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]); + const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1); + const __m128i prod = _mm_madd_epi16(mins, q8s); + acc_m = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod)), acc_m); + + __m128i sumi_0 = _mm_setzero_si128(); + __m128i sumi_1 = _mm_setzero_si128(); + + __m128i shuffle = _mm_set1_epi16(0x0100); + for (int j = 0; j < QK_K/64; ++j) { + + const __m128i scale_l = _mm_shuffle_epi8(scales, shuffle); + shuffle = _mm_add_epi16(shuffle, m2); + const __m128i scale_h = _mm_shuffle_epi8(scales, shuffle); + shuffle = _mm_add_epi16(shuffle, m2); + + __m128i q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16; + const __m128i q4l_0 = _mm_and_si128(q4bits, m4); + const __m128i q4h_0 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4); + q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16; + const __m128i q4l_1 = _mm_and_si128(q4bits, m4); + const __m128i q4h_1 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4); + + const __m128i q8l_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + __m128i p16l = _mm_maddubs_epi16(q4l_0, q8l_0); + p16l = _mm_madd_epi16(scale_l, p16l); + sumi_0 = _mm_add_epi32(sumi_0, p16l); + const __m128i q8l_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + p16l = _mm_maddubs_epi16(q4l_1, q8l_1); + p16l = _mm_madd_epi16(scale_l, p16l); + sumi_1 = _mm_add_epi32(sumi_1, p16l); + + const __m128i q8h_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + __m128i p16h = _mm_maddubs_epi16(q4h_0, q8h_0); + p16h = _mm_madd_epi16(scale_h, p16h); + sumi_0 = _mm_add_epi32(sumi_0, p16h); + const __m128i q8h_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + p16h = _mm_maddubs_epi16(q4h_1, q8h_1); + p16h = _mm_madd_epi16(scale_h, p16h); + sumi_1 = _mm_add_epi32(sumi_1, p16h); + + } + + __m256 vd = _mm256_set1_ps(d); + __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); + acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc); + + } + + acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m)); + acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m)); + + *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m); + +#else + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + for (int j = 0; j < QK_K/64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + a += 32; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + a += 32; q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + +#if defined __AVX2__ + + const __m256i m4 = _mm256_set1_epi8(0xF); + const __m128i mzero = _mm_setzero_si128(); + const __m256i mone = _mm256_set1_epi8(1); + + __m256 acc = _mm256_setzero_ps(); + + float summs = 0.f; + + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q5 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0])); + + const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums); + const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1)); + const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s); + const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero); + summs += dmin * _mm_extract_epi32(hsum, 0); + + const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0); + const __m256i scales = MM256_SET_M128I(sc128, sc128); + + const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].qh); + __m256i hmask = mone; + + __m256i sumi = _mm256_setzero_si256(); + + int bit = 0; + + for (int j = 0; j < QK_K/64; ++j) { + + const __m256i scale_0 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0)); + const __m256i scale_1 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1)); + + const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); q5 += 32; + + const __m256i q5l_0 = _mm256_and_si256(q5bits, m4); + const __m256i q5h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4); + const __m256i q5_0 = _mm256_add_epi8(q5l_0, q5h_0); + hmask = _mm256_slli_epi16(hmask, 1); + + const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4); + const __m256i q5h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4); + const __m256i q5_1 = _mm256_add_epi8(q5l_1, q5h_1); + hmask = _mm256_slli_epi16(hmask, 1); + + const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + + __m256i p16_0 = _mm256_maddubs_epi16(q5_0, q8_0); + __m256i p16_1 = _mm256_maddubs_epi16(q5_1, q8_1); + + p16_0 = _mm256_madd_epi16(scale_0, p16_0); + p16_1 = _mm256_madd_epi16(scale_1, p16_1); + + sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); + + } + + __m256 vd = _mm256_set1_ps(d); + acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc); + + } + + *s = hsum_float_8(acc) + summs; + +#elif defined __AVX__ + + const __m128i m4 = _mm_set1_epi8(0xF); + const __m128i mzero = _mm_setzero_si128(); + const __m128i mone = _mm_set1_epi8(1); + const __m128i m2 = _mm_set1_epi8(2); + + __m256 acc = _mm256_setzero_ps(); + + float summs = 0.f; + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + const uint8_t * GGML_RESTRICT q5 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]); + const __m128i scales = _mm_cvtepu8_epi16(utmps); + const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps)); + + const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]); + const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]); + const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1); + const __m128i prod = _mm_madd_epi16(mins, q8s); + const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero); + summs += dmin * _mm_extract_epi32(hsum, 0); + + const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].qh[0]); + const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].qh[16]); + __m128i hmask = mone; + + __m128i sumi_0 = _mm_setzero_si128(); + __m128i sumi_1 = _mm_setzero_si128(); + + int bit = 0; + + __m128i shuffle = _mm_set1_epi16(0x0100); + for (int j = 0; j < QK_K/64; ++j) { + + const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle); + shuffle = _mm_add_epi16(shuffle, m2); + const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle); + shuffle = _mm_add_epi16(shuffle, m2); + + const __m128i q5bits_0 = _mm_loadu_si128((const __m128i*)q5); q5 += 16; + const __m128i q5bits_1 = _mm_loadu_si128((const __m128i*)q5); q5 += 16; + + __m128i q5l_0 = _mm_and_si128(q5bits_0, m4); + __m128i q5l_1 = _mm_and_si128(q5bits_1, m4); + __m128i q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4); + __m128i q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4); + __m128i q5_0 = _mm_add_epi8(q5l_0, q5h_0); + __m128i q5_1 = _mm_add_epi8(q5l_1, q5h_1); + hmask = _mm_slli_epi16(hmask, 1); + + __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + __m128i p16_0 = _mm_maddubs_epi16(q5_0, q8_0); + __m128i p16_1 = _mm_maddubs_epi16(q5_1, q8_1); + p16_0 = _mm_madd_epi16(scale_0, p16_0); + p16_1 = _mm_madd_epi16(scale_0, p16_1); + + q5l_0 = _mm_and_si128(_mm_srli_epi16(q5bits_0, 4), m4); + q5l_1 = _mm_and_si128(_mm_srli_epi16(q5bits_1, 4), m4); + q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4); + q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4); + q5_0 = _mm_add_epi8(q5l_0, q5h_0); + q5_1 = _mm_add_epi8(q5l_1, q5h_1); + hmask = _mm_slli_epi16(hmask, 1); + + q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + __m128i p16_2 = _mm_maddubs_epi16(q5_0, q8_0); + __m128i p16_3 = _mm_maddubs_epi16(q5_1, q8_1); + p16_2 = _mm_madd_epi16(scale_1, p16_2); + p16_3 = _mm_madd_epi16(scale_1, p16_3); + + sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); + sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3)); + + } + + __m256 vd = _mm256_set1_ps(d); + __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); + acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc); + + } + + *s = hsum_float_8(acc) + summs; + +#else + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K/64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); + a += 32; m <<= 1; + q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q6_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __AVX2__ + + const __m256i m4 = _mm256_set1_epi8(0xF); + const __m256i m2 = _mm256_set1_epi8(3); + const __m256i m32s = _mm256_set1_epi8(32); + + __m256 acc = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT q4 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales); + + __m256i sumi = _mm256_setzero_si256(); + + int is = 0; + + for (int j = 0; j < QK_K/128; ++j) { + + const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0)); + const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1)); + const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2)); + const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3)); + is += 4; + + const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32; + const __m256i q4bits2 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32; + const __m256i q4bitsH = _mm256_loadu_si256((const __m256i*)qh); qh += 32; + + const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bitsH, m2), 4); + const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 2), m2), 4); + const __m256i q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 4), m2), 4); + const __m256i q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 6), m2), 4); + + const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0); + const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1); + const __m256i q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2); + const __m256i q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3); + + const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + + __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0); + __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1); + __m256i q8s_2 = _mm256_maddubs_epi16(m32s, q8_2); + __m256i q8s_3 = _mm256_maddubs_epi16(m32s, q8_3); + + __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0); + __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1); + __m256i p16_2 = _mm256_maddubs_epi16(q4_2, q8_2); + __m256i p16_3 = _mm256_maddubs_epi16(q4_3, q8_3); + + p16_0 = _mm256_sub_epi16(p16_0, q8s_0); + p16_1 = _mm256_sub_epi16(p16_1, q8s_1); + p16_2 = _mm256_sub_epi16(p16_2, q8s_2); + p16_3 = _mm256_sub_epi16(p16_3, q8s_3); + + p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0); + p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1); + p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2); + p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3); + + sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); + sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3)); + + } + + acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); + } + + *s = hsum_float_8(acc); + +#elif defined __AVX__ + + const __m128i m3 = _mm_set1_epi8(3); + const __m128i m15 = _mm_set1_epi8(15); + + __m256 acc = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT q4 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + // handle the q6_k -32 offset separately using bsums + const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)y[i].bsums); + const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)y[i].bsums + 1); + const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales); + const __m128i scales_16_0 = _mm_cvtepi8_epi16(scales); + const __m128i scales_16_1 = _mm_cvtepi8_epi16(_mm_bsrli_si128(scales, 8)); + const __m128i q8sclsub_0 = _mm_slli_epi32(_mm_madd_epi16(q8sums_0, scales_16_0), 5); + const __m128i q8sclsub_1 = _mm_slli_epi32(_mm_madd_epi16(q8sums_1, scales_16_1), 5); + + __m128i sumi_0 = _mm_setzero_si128(); + __m128i sumi_1 = _mm_setzero_si128(); + + int is = 0; + + for (int j = 0; j < QK_K/128; ++j) { + + const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16; + const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i*)qh); qh += 16; + + const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4); + const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4); + const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, _mm_set1_epi8(12)), 2); + const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, _mm_set1_epi8(12)), 2); + const __m128i q4h_4 = _mm_and_si128(q4bitsH_0, _mm_set1_epi8(48)); + const __m128i q4h_5 = _mm_and_si128(q4bitsH_1, _mm_set1_epi8(48)); + const __m128i q4h_6 = _mm_srli_epi16(_mm_and_si128(q4bitsH_0, _mm_set1_epi8(-64)), 2); + const __m128i q4h_7 = _mm_srli_epi16(_mm_and_si128(q4bitsH_1, _mm_set1_epi8(-64)), 2); + + const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; + const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; + const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; + const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; + + const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m15), q4h_0); + const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m15), q4h_1); + const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m15), q4h_2); + const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m15), q4h_3); + const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m15), q4h_4); + const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m15), q4h_5); + const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m15), q4h_6); + const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m15), q4h_7); + + const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + + __m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0); + __m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1); + __m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2); + __m128i p16_3 = _mm_maddubs_epi16(q4_3, q8_3); + __m128i p16_4 = _mm_maddubs_epi16(q4_4, q8_4); + __m128i p16_5 = _mm_maddubs_epi16(q4_5, q8_5); + __m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6); + __m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7); + + const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0)); + const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1)); + const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2)); + const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3)); + is += 4; + + p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0); + p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_0, 8)), p16_1); + p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2); + p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_1, 8)), p16_3); + p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4); + p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_2, 8)), p16_5); + p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6); + p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_3, 8)), p16_7); + + sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); + sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3)); + sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_4, p16_6)); + sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_5, p16_7)); + + } + + sumi_0 = _mm_sub_epi32(sumi_0, q8sclsub_0); + sumi_1 = _mm_sub_epi32(sumi_1, q8sclsub_1); + const __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); + acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(sumi)), acc); + } + + *s = hsum_float_8(acc); + +#else + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) { + a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; + a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; + a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; + a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; + } + a += 128; + q4 += 64; + qh += 32; + } + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/16; ++j) { + int scale = x[i].scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +#endif +} + +#if defined (__AVX__) || defined (__AVX2__) +static const int8_t keven_signs_q2xs[1024] = { + 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, + 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, + 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, + 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, + 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, + 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, + 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, + 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, + 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, + 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, + 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, + 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, + 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, + 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, + 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, + 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, + 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, + 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, + 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, + 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, + 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, + 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, + 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, + 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, + 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, + 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, + 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, + 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, + 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, + 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, + 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, + 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, +}; +#endif + +void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_xxs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__AVX2__) + + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + uint32_t aux32[4]; + const uint8_t * aux8 = (const uint8_t *)aux32; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + __m256i sumi1 = _mm256_setzero_si256(); + __m256i sumi2 = _mm256_setzero_si256(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; + const __m256i q2_1 = _mm256_set_epi64x(iq2xxs_grid[aux8[ 3]], iq2xxs_grid[aux8[ 2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); + const __m256i q2_2 = _mm256_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); + const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], + signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); + const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127], + signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); + const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1); + const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2); + const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); + const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); + const uint16_t ls1 = aux32[1] >> 28; + const uint16_t ls2 = aux32[3] >> 28; + const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1)); + const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1)); + sumi1 = _mm256_add_epi32(sumi1, p1); + sumi2 = _mm256_add_epi32(sumi2, p2); + } + + accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); + + } + + *s = 0.125f * hsum_float_8(accumf); + +#elif defined(__AVX__) + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + uint32_t aux32[4]; + const uint8_t * aux8 = (const uint8_t *)aux32; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + __m128i sumi1_0 = _mm_setzero_si128(); + __m128i sumi1_1 = _mm_setzero_si128(); + __m128i sumi2_0 = _mm_setzero_si128(); + __m128i sumi2_1 = _mm_setzero_si128(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; + const __m128i q2_1_0 = _mm_set_epi64x(iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); + const __m128i q2_1_1 = _mm_set_epi64x(iq2xxs_grid[aux8[3]], iq2xxs_grid[aux8[2]]); + const __m128i q2_2_0 = _mm_set_epi64x(iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); + const __m128i q2_2_1 = _mm_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]]); + const __m128i s2_1_0 = _mm_set_epi64x(signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); + const __m128i s2_1_1 = _mm_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127]); + const __m128i s2_2_0 = _mm_set_epi64x(signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); + const __m128i s2_2_1 = _mm_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127]); + const __m128i q8s_1_0 = _mm_sign_epi8(q8_1_0, s2_1_0); + const __m128i q8s_1_1 = _mm_sign_epi8(q8_1_1, s2_1_1); + const __m128i q8s_2_0 = _mm_sign_epi8(q8_2_0, s2_2_0); + const __m128i q8s_2_1 = _mm_sign_epi8(q8_2_1, s2_2_1); + const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); + const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); + const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); + const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); + const uint16_t ls1 = aux32[1] >> 28; + const uint16_t ls2 = aux32[3] >> 28; + const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(2*ls1+1)); + const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(2*ls1+1)); + const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(2*ls2+1)); + const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(2*ls2+1)); + sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); + sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); + sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); + sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); + } + + accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); + + } + + *s = 0.125f * hsum_float_8(accumf); + +#else + + uint32_t aux32[2]; + const uint8_t * aux8 = (const uint8_t *)aux32; + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + memcpy(aux32, q2, 2*sizeof(uint32_t)); + q2 += 4; + const uint32_t ls = 2*(aux32[1] >> 28) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); + const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls; + } + sumf += d * bsum; + } + *s = 0.125f * sumf; +#endif +} + +void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_xs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__AVX2__) + + const __m256i mone = _mm256_set1_epi8(1); + static const char block_sign_shuffle_mask_1[32] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + }; + static const char block_sign_shuffle_mask_2[32] = { + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, + }; + static const uint8_t bit_selector_mask_bytes[32] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + }; + + const __m256i bit_selector_mask = _mm256_loadu_si256((const __m256i*)bit_selector_mask_bytes); + const __m256i block_sign_shuffle_1 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_1); + const __m256i block_sign_shuffle_2 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_2); + + static const uint8_t k_bit_helper[32] = { + 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, + 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, + }; + const __m256i bit_helper = _mm256_loadu_si256((const __m256i*)k_bit_helper); + const __m256i m511 = _mm256_set1_epi16(511); + const __m128i m4 = _mm_set1_epi8(0xf); + const __m128i m1 = _mm_set1_epi8(1); + + uint64_t aux64; + + // somewhat hacky, but gives a significant boost in performance + __m256i aux_gindex; + const uint16_t * gindex = (const uint16_t *)&aux_gindex; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + memcpy(&aux64, x[i].scales, 8); + __m128i stmp = _mm_set1_epi64x(aux64); + stmp = _mm_unpacklo_epi8(_mm_and_si128(stmp, m4), _mm_and_si128(_mm_srli_epi16(stmp, 4), m4)); + const __m128i scales = _mm_add_epi8(_mm_slli_epi16(stmp, 1), m1); + + __m256i sumi1 = _mm256_setzero_si256(); + __m256i sumi2 = _mm256_setzero_si256(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) { + + const __m256i q2_data = _mm256_loadu_si256((const __m256i*)q2); q2 += 16; + aux_gindex = _mm256_and_si256(q2_data, m511); + + const __m256i partial_sign_bits = _mm256_srli_epi16(q2_data, 9); + const __m256i partial_sign_bits_upper = _mm256_srli_epi16(q2_data, 13); + const __m256i partial_sign_bits_for_counting = _mm256_xor_si256(partial_sign_bits, partial_sign_bits_upper); + + const __m256i odd_bits = _mm256_shuffle_epi8(bit_helper, partial_sign_bits_for_counting); + const __m256i full_sign_bits = _mm256_or_si256(partial_sign_bits, odd_bits); + + const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q8_3 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q8_4 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + + const __m256i q2_1 = _mm256_set_epi64x(iq2xs_grid[gindex[ 3]], iq2xs_grid[gindex[ 2]], + iq2xs_grid[gindex[ 1]], iq2xs_grid[gindex[ 0]]); + const __m256i q2_2 = _mm256_set_epi64x(iq2xs_grid[gindex[ 7]], iq2xs_grid[gindex[ 6]], + iq2xs_grid[gindex[ 5]], iq2xs_grid[gindex[ 4]]); + const __m256i q2_3 = _mm256_set_epi64x(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]], + iq2xs_grid[gindex[ 9]], iq2xs_grid[gindex[ 8]]); + const __m256i q2_4 = _mm256_set_epi64x(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]], + iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]); + + const __m128i full_signs_l = _mm256_castsi256_si128(full_sign_bits); + const __m128i full_signs_h = _mm256_extractf128_si256(full_sign_bits, 1); + const __m256i full_signs_1 = MM256_SET_M128I(full_signs_l, full_signs_l); + const __m256i full_signs_2 = MM256_SET_M128I(full_signs_h, full_signs_h); + + __m256i signs; + signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_1); + signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); + const __m256i q8s_1 = _mm256_sign_epi8(q8_1, _mm256_or_si256(signs, mone)); + + signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_2); + signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); + const __m256i q8s_2 = _mm256_sign_epi8(q8_2, _mm256_or_si256(signs, mone)); + + signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_1); + signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); + const __m256i q8s_3 = _mm256_sign_epi8(q8_3, _mm256_or_si256(signs, mone)); + + signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_2); + signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); + const __m256i q8s_4 = _mm256_sign_epi8(q8_4, _mm256_or_si256(signs, mone)); + + const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); + const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); + const __m256i dot3 = _mm256_maddubs_epi16(q2_3, q8s_3); + const __m256i dot4 = _mm256_maddubs_epi16(q2_4, q8s_4); + + const __m256i sc1 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+0))); + const __m256i sc2 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+1))); + const __m256i sc3 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+2))); + const __m256i sc4 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+3))); + + sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot1, sc1)); + sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot2, sc2)); + sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot3, sc3)); + sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot4, sc4)); + } + + accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); + + } + + *s = 0.125f * hsum_float_8(accumf); + +#elif defined(__AVX__) + const __m128i mone = _mm_set1_epi8(1); + static const char block_sign_shuffle_mask_1[32] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + }; + static const char block_sign_shuffle_mask_2[32] = { + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, + }; + static const uint8_t bit_selector_mask_bytes[32] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + }; + + const __m128i bit_selector_mask_0 = _mm_loadu_si128((const __m128i*)bit_selector_mask_bytes); + const __m128i bit_selector_mask_1 = _mm_loadu_si128((const __m128i*)bit_selector_mask_bytes + 1); + const __m128i block_sign_shuffle_1_0 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_1); + const __m128i block_sign_shuffle_1_1 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_1 + 1); + const __m128i block_sign_shuffle_2_0 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_2); + const __m128i block_sign_shuffle_2_1 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_2 + 1); + + static const uint8_t k_bit_helper[32] = { + 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, + 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, + }; + const __m128i bit_helper_0 = _mm_loadu_si128((const __m128i*)k_bit_helper); + const __m128i bit_helper_1 = _mm_loadu_si128((const __m128i*)k_bit_helper + 1); + const __m128i m511 = _mm_set1_epi16(511); + const __m128i m4 = _mm_set1_epi8(0xf); + const __m128i m1 = _mm_set1_epi8(1); + + uint64_t aux64; + + // somewhat hacky, but gives a significant boost in performance + __m256i aux_gindex; + const uint16_t * gindex = (const uint16_t *)&aux_gindex; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + memcpy(&aux64, x[i].scales, 8); + __m128i stmp = _mm_set1_epi64x(aux64); + stmp = _mm_unpacklo_epi8(_mm_and_si128(stmp, m4), _mm_and_si128(_mm_srli_epi16(stmp, 4), m4)); + const __m128i scales = _mm_add_epi8(_mm_slli_epi16(stmp, 1), m1); + + __m128i sumi1_0 = _mm_setzero_si128(); + __m128i sumi1_1 = _mm_setzero_si128(); + __m128i sumi2_0 = _mm_setzero_si128(); + __m128i sumi2_1 = _mm_setzero_si128(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) { + + const __m128i q2_data_0 = _mm_loadu_si128((const __m128i*)q2); + const __m128i q2_data_1 = _mm_loadu_si128((const __m128i*)q2 + 1); q2 += 16; + aux_gindex = MM256_SET_M128I(_mm_and_si128(q2_data_1, m511), _mm_and_si128(q2_data_0, m511)); + + const __m128i partial_sign_bits_0 = _mm_srli_epi16(q2_data_0, 9); + const __m128i partial_sign_bits_1 = _mm_srli_epi16(q2_data_1, 9); + const __m128i partial_sign_bits_upper_0 = _mm_srli_epi16(q2_data_0, 13); + const __m128i partial_sign_bits_upper_1 = _mm_srli_epi16(q2_data_1, 13); + const __m128i partial_sign_bits_for_counting_0 = _mm_xor_si128(partial_sign_bits_0, partial_sign_bits_upper_0); + const __m128i partial_sign_bits_for_counting_1 = _mm_xor_si128(partial_sign_bits_1, partial_sign_bits_upper_1); + + const __m128i odd_bits_0 = _mm_shuffle_epi8(bit_helper_0, partial_sign_bits_for_counting_0); + const __m128i odd_bits_1 = _mm_shuffle_epi8(bit_helper_1, partial_sign_bits_for_counting_1); + const __m128i full_sign_bits_0 = _mm_or_si128(partial_sign_bits_0, odd_bits_0); + const __m128i full_sign_bits_1 = _mm_or_si128(partial_sign_bits_1, odd_bits_1); + + const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_3_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_3_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_4_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_4_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + + const __m128i q2_1_0 = _mm_set_epi64x(iq2xs_grid[gindex[1]], iq2xs_grid[gindex[0]]); + const __m128i q2_1_1 = _mm_set_epi64x(iq2xs_grid[gindex[3]], iq2xs_grid[gindex[2]]); + const __m128i q2_2_0 = _mm_set_epi64x(iq2xs_grid[gindex[5]], iq2xs_grid[gindex[4]]); + const __m128i q2_2_1 = _mm_set_epi64x(iq2xs_grid[gindex[7]], iq2xs_grid[gindex[6]]); + const __m128i q2_3_0 = _mm_set_epi64x(iq2xs_grid[gindex[9]], iq2xs_grid[gindex[8]]); + const __m128i q2_3_1 = _mm_set_epi64x(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]]); + const __m128i q2_4_0 = _mm_set_epi64x(iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]); + const __m128i q2_4_1 = _mm_set_epi64x(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]]); + + // AVX2 full_signs_1 is full_sign_bits_0 here + // AVX2 full_signs_2 is full_sign_bits_1 here + __m128i signs_0, signs_1; + signs_0 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_1_0); + signs_1 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_1_1); + signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); + signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); + const __m128i q8s_1_0 = _mm_sign_epi8(q8_1_0, _mm_or_si128(signs_0, mone)); + const __m128i q8s_1_1 = _mm_sign_epi8(q8_1_1, _mm_or_si128(signs_1, mone)); + + signs_0 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_2_0); + signs_1 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_2_1); + signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); + signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); + const __m128i q8s_2_0 = _mm_sign_epi8(q8_2_0, _mm_or_si128(signs_0, mone)); + const __m128i q8s_2_1 = _mm_sign_epi8(q8_2_1, _mm_or_si128(signs_1, mone)); + + signs_0 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_1_0); + signs_1 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_1_1); + signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); + signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); + const __m128i q8s_3_0 = _mm_sign_epi8(q8_3_0, _mm_or_si128(signs_0, mone)); + const __m128i q8s_3_1 = _mm_sign_epi8(q8_3_1, _mm_or_si128(signs_1, mone)); + + signs_0 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_2_0); + signs_1 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_2_1); + signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); + signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); + const __m128i q8s_4_0 = _mm_sign_epi8(q8_4_0, _mm_or_si128(signs_0, mone)); + const __m128i q8s_4_1 = _mm_sign_epi8(q8_4_1, _mm_or_si128(signs_1, mone)); + + const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); + const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); + const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); + const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); + const __m128i dot3_0 = _mm_maddubs_epi16(q2_3_0, q8s_3_0); + const __m128i dot3_1 = _mm_maddubs_epi16(q2_3_1, q8s_3_1); + const __m128i dot4_0 = _mm_maddubs_epi16(q2_4_0, q8s_4_0); + const __m128i dot4_1 = _mm_maddubs_epi16(q2_4_1, q8s_4_1); + + __m128i sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+0)); + const __m128i sc1_0 = _mm_cvtepi8_epi16(sc_tmp); + const __m128i sc1_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); + sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+1)); + const __m128i sc2_0 = _mm_cvtepi8_epi16(sc_tmp); + const __m128i sc2_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); + sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+2)); + const __m128i sc3_0 = _mm_cvtepi8_epi16(sc_tmp); + const __m128i sc3_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); + sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+3)); + const __m128i sc4_0 = _mm_cvtepi8_epi16(sc_tmp); + const __m128i sc4_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); + + sumi1_0 = _mm_add_epi32(sumi1_0, _mm_madd_epi16(dot1_0, sc1_0)); + sumi1_1 = _mm_add_epi32(sumi1_1, _mm_madd_epi16(dot1_1, sc1_1)); + sumi2_0 = _mm_add_epi32(sumi2_0, _mm_madd_epi16(dot2_0, sc2_0)); + sumi2_1 = _mm_add_epi32(sumi2_1, _mm_madd_epi16(dot2_1, sc2_1)); + sumi1_0 = _mm_add_epi32(sumi1_0, _mm_madd_epi16(dot3_0, sc3_0)); + sumi1_1 = _mm_add_epi32(sumi1_1, _mm_madd_epi16(dot3_1, sc3_1)); + sumi2_0 = _mm_add_epi32(sumi2_0, _mm_madd_epi16(dot4_0, sc4_0)); + sumi2_1 = _mm_add_epi32(sumi2_1, _mm_madd_epi16(dot4_1, sc4_1)); + } + + accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); + + } + + *s = 0.125f * hsum_float_8(accumf); + +#else + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const uint8_t * GGML_RESTRICT sc = x[i].scales; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1; + const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1; + int32_t sumi = 0; + for (int l = 0; l < 2; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); + const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls1; + sumi = 0; + for (int l = 2; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); + const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls2; + q2 += 4; + } + sumf += d * bsum; + } + *s = 0.125f * sumf; +#endif +} + +void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__AVX2__) + + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + }; + + const __m128i m4 = _mm_set1_epi8(0xf); + const __m128i m1 = _mm_set1_epi8(1); + + const __m256i mask1 = _mm256_loadu_si256((const __m256i*)k_mask1); + const __m256i mask2 = _mm256_loadu_si256((const __m256i*)k_mask2); + + uint64_t aux64; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + memcpy(&aux64, x[i].scales, 8); + const __m128i scales8 = _mm_add_epi8(_mm_slli_epi16(_mm_and_si128(_mm_set_epi64x(aux64 >> 4, aux64), m4), 1), m1); + const __m256i scales16 = _mm256_cvtepi8_epi16(scales8); // 0 2 4 6 8 10 12 14 1 3 5 7 9 11 13 15 + + __m256i sumi1 = _mm256_setzero_si256(); + __m256i sumi2 = _mm256_setzero_si256(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q2_1 = _mm256_set_epi64x(iq2s_grid[qs[3] | ((qh[ib32+0] << 2) & 0x300)], + iq2s_grid[qs[2] | ((qh[ib32+0] << 4) & 0x300)], + iq2s_grid[qs[1] | ((qh[ib32+0] << 6) & 0x300)], + iq2s_grid[qs[0] | ((qh[ib32+0] << 8) & 0x300)]); + const __m256i q2_2 = _mm256_set_epi64x(iq2s_grid[qs[7] | ((qh[ib32+1] << 2) & 0x300)], + iq2s_grid[qs[6] | ((qh[ib32+1] << 4) & 0x300)], + iq2s_grid[qs[5] | ((qh[ib32+1] << 6) & 0x300)], + iq2s_grid[qs[4] | ((qh[ib32+1] << 8) & 0x300)]); + qs += 8; + + __m256i aux256 = _mm256_set1_epi32(signs[0] | ((uint32_t) signs[1] << 16)); + aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); + const __m256i s2_1 = _mm256_cmpeq_epi8(aux256, mask2); + const __m256i q8s_1 = _mm256_sub_epi8(_mm256_xor_si256(s2_1, q8_1), s2_1); + + aux256 = _mm256_set1_epi32(signs[2] | ((uint32_t) signs[3] << 16)); + aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); + const __m256i s2_2 = _mm256_cmpeq_epi8(aux256, mask2); + const __m256i q8s_2 = _mm256_sub_epi8(_mm256_xor_si256(s2_2, q8_2), s2_2); + + signs += 4; + + const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); // blocks 2*ib32+0, 2*ib32+1 + const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); // blocks 2*ib32+2, 2*ib32+3 + + const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_shuffle_epi8(scales16, get_scale_shuffle_k4(ib32+0))); + const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_shuffle_epi8(scales16, get_scale_shuffle_k4(ib32+1))); + sumi1 = _mm256_add_epi32(sumi1, p1); + sumi2 = _mm256_add_epi32(sumi2, p2); + } + + accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); + + } + + *s = 0.125f * hsum_float_8(accumf); + +#elif defined(__AVX__) + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + }; + + const __m128i m4 = _mm_set1_epi8(0xf); + const __m128i m1 = _mm_set1_epi8(1); + + const __m128i mask1_0 = _mm_loadu_si128((const __m128i*)k_mask1); + const __m128i mask1_1 = _mm_loadu_si128((const __m128i*)k_mask1 + 1); + const __m128i mask2_0 = _mm_loadu_si128((const __m128i*)k_mask2); + const __m128i mask2_1 = _mm_loadu_si128((const __m128i*)k_mask2 + 1); + + uint64_t aux64; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + memcpy(&aux64, x[i].scales, 8); + const __m128i scales8 = _mm_add_epi8(_mm_slli_epi16(_mm_and_si128(_mm_set_epi64x(aux64 >> 4, aux64), m4), 1), m1); + const __m128i scales16_0 = _mm_cvtepi8_epi16(scales8); + const __m128i scales16_1 = _mm_cvtepi8_epi16(_mm_srli_si128(scales8, 8)); + + __m128i sumi1_0 = _mm_setzero_si128(); + __m128i sumi1_1 = _mm_setzero_si128(); + __m128i sumi2_0 = _mm_setzero_si128(); + __m128i sumi2_1 = _mm_setzero_si128(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q2_1_0 = _mm_set_epi64x(iq2s_grid[qs[1] | ((qh[ib32+0] << 6) & 0x300)], + iq2s_grid[qs[0] | ((qh[ib32+0] << 8) & 0x300)]); + const __m128i q2_1_1 = _mm_set_epi64x(iq2s_grid[qs[3] | ((qh[ib32+0] << 2) & 0x300)], + iq2s_grid[qs[2] | ((qh[ib32+0] << 4) & 0x300)]); + const __m128i q2_2_0 = _mm_set_epi64x(iq2s_grid[qs[5] | ((qh[ib32+1] << 6) & 0x300)], + iq2s_grid[qs[4] | ((qh[ib32+1] << 8) & 0x300)]); + const __m128i q2_2_1 = _mm_set_epi64x(iq2s_grid[qs[7] | ((qh[ib32+1] << 2) & 0x300)], + iq2s_grid[qs[6] | ((qh[ib32+1] << 4) & 0x300)]); + qs += 8; + + __m128i aux128_0 = _mm_set1_epi32(signs[0] | ((uint32_t) signs[1] << 16)); + __m128i aux128_1 = aux128_0; + aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); + aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); + const __m128i s2_1_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); + const __m128i s2_1_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); + const __m128i q8s_1_0 = _mm_sub_epi8(_mm_xor_si128(s2_1_0, q8_1_0), s2_1_0); + const __m128i q8s_1_1 = _mm_sub_epi8(_mm_xor_si128(s2_1_1, q8_1_1), s2_1_1); + + aux128_0 = _mm_set1_epi32(signs[2] | ((uint32_t) signs[3] << 16)); + aux128_1 = aux128_0; + aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); + aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); + const __m128i s2_2_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); + const __m128i s2_2_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); + const __m128i q8s_2_0 = _mm_sub_epi8(_mm_xor_si128(s2_2_0, q8_2_0), s2_2_0); + const __m128i q8s_2_1 = _mm_sub_epi8(_mm_xor_si128(s2_2_1, q8_2_1), s2_2_1); + + signs += 4; + + const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); + const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); + const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); + const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); + + const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_shuffle_epi8(scales16_0, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+0), 0))); + const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_shuffle_epi8(scales16_1, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+0), 1))); + const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_shuffle_epi8(scales16_0, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+1), 0))); + const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_shuffle_epi8(scales16_1, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+1), 1))); + sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); + sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); + sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); + sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); + } + + accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); + + } + + *s = 0.125f * hsum_float_8(accumf); + +#else + + float sumf = 0; + for (int i = 0; i < nb; i++) { + + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint8_t * qh = x[i].qh; + const uint8_t * signs = qs + QK_K/8; + + int bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf); + int ls2 = 1 + 2*(x[i].scales[ib32] >> 4); + int sumi1 = 0, sumi2 = 0; + for (int l = 0; l < 2; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); + for (int j = 0; j < 8; ++j) { + sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + for (int l = 2; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); + for (int j = 0; j < 8; ++j) { + sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += ls1 * sumi1 + ls2 * sumi2; + qs += 4; + signs += 4; + } + + sumf += d * bsum; + } + + *s = 0.125f * sumf; + +#endif + +} + +void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq3_xxs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__AVX2__) + + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + uint32_t aux32[2]; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + __m256i sumi1 = _mm256_setzero_si256(); + __m256i sumi2 = _mm256_setzero_si256(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q2_1 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], + iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); + q3 += 8; + const __m256i q2_2 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], + iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); + q3 += 8; + memcpy(aux32, gas, 8); gas += 8; + const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127], + signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]); + const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], + signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); + const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1); + const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2); + const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); + const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); + const uint16_t ls1 = aux32[0] >> 28; + const uint16_t ls2 = aux32[1] >> 28; + const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1)); + const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1)); + sumi1 = _mm256_add_epi32(sumi1, p1); + sumi2 = _mm256_add_epi32(sumi2, p2); + } + + accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); + + } + + *s = 0.25f * hsum_float_8(accumf); + +#elif defined(__AVX__) + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + uint32_t aux32[2]; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + __m128i sumi1_0 = _mm_setzero_si128(); + __m128i sumi1_1 = _mm_setzero_si128(); + __m128i sumi2_0 = _mm_setzero_si128(); + __m128i sumi2_1 = _mm_setzero_si128(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q2_1_0 = _mm_set_epi32(iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); + const __m128i q2_1_1 = _mm_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]]); + q3 += 8; + const __m128i q2_2_0 = _mm_set_epi32(iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); + const __m128i q2_2_1 = _mm_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]]); + q3 += 8; + memcpy(aux32, gas, 8); gas += 8; + const __m128i s2_1_0 = _mm_set_epi64x(signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]); + const __m128i s2_1_1 = _mm_set_epi64x(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127]); + const __m128i s2_2_0 = _mm_set_epi64x(signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); + const __m128i s2_2_1 = _mm_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127]); + const __m128i q8s_1_0 = _mm_sign_epi8(q8_1_0, s2_1_0); + const __m128i q8s_1_1 = _mm_sign_epi8(q8_1_1, s2_1_1); + const __m128i q8s_2_0 = _mm_sign_epi8(q8_2_0, s2_2_0); + const __m128i q8s_2_1 = _mm_sign_epi8(q8_2_1, s2_2_1); + const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); + const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); + const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); + const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); + const uint16_t ls1 = aux32[0] >> 28; + const uint16_t ls2 = aux32[1] >> 28; + const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(2*ls1+1)); + const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(2*ls1+1)); + const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(2*ls2+1)); + const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(2*ls2+1)); + sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); + sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); + sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); + sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); + } + + accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); + + } + + *s = 0.25f * hsum_float_8(accumf); + +#else + + uint32_t aux32; + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t); + const uint32_t ls = 2*(aux32 >> 28) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]); + const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]); + const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127]; + for (int j = 0; j < 4; ++j) { + sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1); + sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1); + } + q8 += 8; + } + q3 += 8; + bsum += sumi * ls; + } + sumf += d * bsum; + } + *s = 0.25f * sumf; +#endif +} + +void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq3_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__AVX2__) + + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + }; + + const __m256i mask1 = _mm256_loadu_si256((const __m256i*)k_mask1); + const __m256i mask2 = _mm256_loadu_si256((const __m256i*)k_mask2); + + const __m256i idx_shift = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + const __m256i idx_mask = _mm256_set1_epi32(256); + + typedef union { + __m256i vec[2]; + uint32_t index[16]; + } index_t; + + index_t idx; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + __m256i sumi1 = _mm256_setzero_si256(); + __m256i sumi2 = _mm256_setzero_si256(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i idx_l = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i *)qs)); qs += 16; + idx.vec[0] = _mm256_set1_epi32(qh[ib32+0]); + idx.vec[1] = _mm256_set1_epi32(qh[ib32+1]); + idx.vec[0] = _mm256_and_si256(_mm256_sllv_epi32(idx.vec[0], idx_shift), idx_mask); + idx.vec[1] = _mm256_and_si256(_mm256_sllv_epi32(idx.vec[1], idx_shift), idx_mask); + idx.vec[0] = _mm256_or_si256(idx.vec[0], _mm256_cvtepi16_epi32(_mm256_castsi256_si128(idx_l))); + idx.vec[1] = _mm256_or_si256(idx.vec[1], _mm256_cvtepi16_epi32(_mm256_extractf128_si256(idx_l, 1))); + + // At leat on my CPU (Ryzen 7950X), using _mm256_i32gather_epi32 is slower than _mm256_set_epi32. Strange. + //const __m256i q2_1 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[0], 4); + //const __m256i q2_2 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[1], 4); + const __m256i q2_1 = _mm256_set_epi32( + iq3s_grid[idx.index[7]], iq3s_grid[idx.index[6]], iq3s_grid[idx.index[5]], iq3s_grid[idx.index[4]], + iq3s_grid[idx.index[3]], iq3s_grid[idx.index[2]], iq3s_grid[idx.index[1]], iq3s_grid[idx.index[0]] + ); + const __m256i q2_2 = _mm256_set_epi32( + iq3s_grid[idx.index[15]], iq3s_grid[idx.index[14]], iq3s_grid[idx.index[13]], iq3s_grid[idx.index[12]], + iq3s_grid[idx.index[11]], iq3s_grid[idx.index[10]], iq3s_grid[idx.index[ 9]], iq3s_grid[idx.index[ 8]] + ); + + __m256i aux256 = _mm256_set1_epi32(signs[0] | (signs[1] << 16)); + aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); + const __m256i s2_1 = _mm256_cmpeq_epi8(aux256, mask2); + const __m256i q8s_1 = _mm256_sub_epi8(_mm256_xor_si256(s2_1, q8_1), s2_1); + + aux256 = _mm256_set1_epi32(signs[2] | (signs[3] << 16)); + aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); + const __m256i s2_2 = _mm256_cmpeq_epi8(aux256, mask2); + const __m256i q8s_2 = _mm256_sub_epi8(_mm256_xor_si256(s2_2, q8_2), s2_2); + + signs += 4; + + const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); + const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); + const uint16_t ls1 = x[i].scales[ib32/2] & 0xf; + const uint16_t ls2 = x[i].scales[ib32/2] >> 4; + const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1)); + const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1)); + sumi1 = _mm256_add_epi32(sumi1, p1); + sumi2 = _mm256_add_epi32(sumi2, p2); + } + + accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); + + } + + *s = hsum_float_8(accumf); + +#elif defined(__AVX__) + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + }; + + const __m128i mask1_0 = _mm_loadu_si128((const __m128i*)k_mask1); + const __m128i mask1_1 = _mm_loadu_si128((const __m128i*)k_mask1 + 1); + const __m128i mask2_0 = _mm_loadu_si128((const __m128i*)k_mask2); + const __m128i mask2_1 = _mm_loadu_si128((const __m128i*)k_mask2 + 1); + + const __m128i idx_mul_0 = _mm_set_epi32(32, 64, 128, 256); + const __m128i idx_mul_1 = _mm_set_epi32(2, 4, 8, 16); + const __m128i idx_mask = _mm_set1_epi32(256); + + typedef union { + __m128i vec[4]; + uint32_t index[16]; + } index_t; + + index_t idx; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + __m128i sumi1_0 = _mm_setzero_si128(); + __m128i sumi1_1 = _mm_setzero_si128(); + __m128i sumi2_0 = _mm_setzero_si128(); + __m128i sumi2_1 = _mm_setzero_si128(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i qs_tmp = _mm_loadu_si128((const __m128i *)qs); + const __m128i idx_l_0 = _mm_cvtepu8_epi16(qs_tmp); + const __m128i idx_l_1 = _mm_cvtepu8_epi16(_mm_srli_si128(qs_tmp, 8)); qs += 16; + idx.vec[0] = _mm_set1_epi32(qh[ib32+0]); + idx.vec[1] = idx.vec[0]; + idx.vec[2] = _mm_set1_epi32(qh[ib32+1]); + idx.vec[3] = idx.vec[2]; + + idx.vec[0] = _mm_and_si128(_mm_mullo_epi32(idx.vec[0], idx_mul_0), idx_mask); + idx.vec[1] = _mm_and_si128(_mm_mullo_epi32(idx.vec[1], idx_mul_1), idx_mask); + idx.vec[2] = _mm_and_si128(_mm_mullo_epi32(idx.vec[2], idx_mul_0), idx_mask); + idx.vec[3] = _mm_and_si128(_mm_mullo_epi32(idx.vec[3], idx_mul_1), idx_mask); + + idx.vec[0] = _mm_or_si128(idx.vec[0], _mm_cvtepi16_epi32(idx_l_0)); + idx.vec[1] = _mm_or_si128(idx.vec[1], _mm_cvtepi16_epi32(_mm_srli_si128(idx_l_0, 8))); + idx.vec[2] = _mm_or_si128(idx.vec[2], _mm_cvtepi16_epi32(idx_l_1)); + idx.vec[3] = _mm_or_si128(idx.vec[3], _mm_cvtepi16_epi32(_mm_srli_si128(idx_l_1, 8))); + + const __m128i q2_1_0 = _mm_set_epi32(iq3s_grid[idx.index[3]], iq3s_grid[idx.index[2]], iq3s_grid[idx.index[1]], iq3s_grid[idx.index[0]]); + const __m128i q2_1_1 = _mm_set_epi32(iq3s_grid[idx.index[7]], iq3s_grid[idx.index[6]], iq3s_grid[idx.index[5]], iq3s_grid[idx.index[4]]); + const __m128i q2_2_0 = _mm_set_epi32(iq3s_grid[idx.index[11]], iq3s_grid[idx.index[10]], iq3s_grid[idx.index[9]], iq3s_grid[idx.index[8]]); + const __m128i q2_2_1 = _mm_set_epi32(iq3s_grid[idx.index[15]], iq3s_grid[idx.index[14]], iq3s_grid[idx.index[13]], iq3s_grid[idx.index[12]]); + + __m128i aux128_0 = _mm_set1_epi32(signs[0] | (signs[1] << 16)); + __m128i aux128_1 = aux128_0; + aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); + aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); + const __m128i s2_1_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); + const __m128i s2_1_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); + const __m128i q8s_1_0 = _mm_sub_epi8(_mm_xor_si128(s2_1_0, q8_1_0), s2_1_0); + const __m128i q8s_1_1 = _mm_sub_epi8(_mm_xor_si128(s2_1_1, q8_1_1), s2_1_1); + + aux128_0 = _mm_set1_epi32(signs[2] | (signs[3] << 16)); + aux128_1 = aux128_0; + aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); + aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); + const __m128i s2_2_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); + const __m128i s2_2_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); + const __m128i q8s_2_0 = _mm_sub_epi8(_mm_xor_si128(s2_2_0, q8_2_0), s2_2_0); + const __m128i q8s_2_1 = _mm_sub_epi8(_mm_xor_si128(s2_2_1, q8_2_1), s2_2_1); + + signs += 4; + + const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); + const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); + const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); + const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); + const uint16_t ls1 = x[i].scales[ib32/2] & 0xf; + const uint16_t ls2 = x[i].scales[ib32/2] >> 4; + const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(2*ls1+1)); + const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(2*ls1+1)); + const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(2*ls2+1)); + const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(2*ls2+1)); + sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); + sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); + sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); + sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); + } + + accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); + + } + + *s = hsum_float_8(accumf); + +#else + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint8_t * GGML_RESTRICT signs = x[i].signs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1; + const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256))); + const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256))); + for (int j = 0; j < 4; ++j) { + sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); + sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); + } + q8 += 8; + } + qs += 8; + signs += 4; + bsum += sumi * ls1; + sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256))); + const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256))); + for (int j = 0; j < 4; ++j) { + sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); + sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); + } + q8 += 8; + } + qs += 8; + signs += 4; + bsum += sumi * ls2; + } + sumf += d * bsum; + } + *s = sumf; +#endif +} + +#if defined(__AVX2__) +static inline __m256i mul_add_epi8(const __m256i x, const __m256i y) { + const __m256i ax = _mm256_sign_epi8(x, x); + const __m256i sy = _mm256_sign_epi8(y, x); + return _mm256_maddubs_epi16(ax, sy); +} +#endif + +void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq1_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __AVX2__ + + __m256 accum = _mm256_setzero_ps(); + float accum1 = 0; + for (int i = 0; i < nb; ++i) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint16_t * qh = x[i].qh; + + __m256i sumi = _mm256_setzero_si256(); + int sumi1 = 0; + for (int ib = 0; ib < QK_K/32; ib += 2) { +#ifdef __BMI2__ + const uint64_t packed_idx1 = _pdep_u64(*(const uint32_t *)qs, 0x00ff00ff00ff00ffULL) | _pdep_u64(qh[ib], 0x700070007000700ULL); + const uint64_t packed_idx2 = _pdep_u64(*(const uint32_t *)(qs + 4), 0x00ff00ff00ff00ffULL) | _pdep_u64(qh[ib + 1], 0x700070007000700ULL); + const uint16_t *idx1 = (const uint16_t *)(&packed_idx1); + const uint16_t *idx2 = (const uint16_t *)(&packed_idx2); + const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[idx1[3]], iq1s_grid[idx1[2]], iq1s_grid[idx1[1]], iq1s_grid[idx1[0]]); + const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[idx2[3]], iq1s_grid[idx2[2]], iq1s_grid[idx2[1]], iq1s_grid[idx2[0]]); +#else + const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[qs[3] | ((qh[ib+0] >> 1) & 0x700)], iq1s_grid[qs[2] | ((qh[ib+0] << 2) & 0x700)], + iq1s_grid[qs[1] | ((qh[ib+0] << 5) & 0x700)], iq1s_grid[qs[0] | ((qh[ib+0] << 8) & 0x700)]); + const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[qs[7] | ((qh[ib+1] >> 1) & 0x700)], iq1s_grid[qs[6] | ((qh[ib+1] << 2) & 0x700)], + iq1s_grid[qs[5] | ((qh[ib+1] << 5) & 0x700)], iq1s_grid[qs[4] | ((qh[ib+1] << 8) & 0x700)]); +#endif + qs += 8; + const __m256i q8b_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8b_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + + const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1); + const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2); + const int16_t ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; + const int16_t ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; + const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(ls1)); + const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(ls2)); + + sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p1, p2)); + sumi1 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * (qh[ib+0] & 0x8000 ? -1 : 1) * ls1 + + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2; + } + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + accum = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(sumi), accum); + accum1 += d * sumi1; + + } + + *s = hsum_float_8(accum) + IQ1S_DELTA * accum1; + +#elif defined __AVX__ + __m256 accum = _mm256_setzero_ps(); + float accum1 = 0; + for (int i = 0; i < nb; ++i) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint16_t * qh = x[i].qh; + + __m128i sumi1_0 = _mm_setzero_si128(); + __m128i sumi1_1 = _mm_setzero_si128(); + int sumi1 = 0; + for (int ib = 0; ib < QK_K/32; ib += 2) { + const __m128i q1b_1_0 = _mm_set_epi64x(iq1s_grid[qs[1] | ((qh[ib+0] << 5) & 0x700)], iq1s_grid[qs[0] | ((qh[ib+0] << 8) & 0x700)]); + const __m128i q1b_1_1 = _mm_set_epi64x(iq1s_grid[qs[3] | ((qh[ib+0] >> 1) & 0x700)], iq1s_grid[qs[2] | ((qh[ib+0] << 2) & 0x700)]); + const __m128i q1b_2_0 = _mm_set_epi64x(iq1s_grid[qs[5] | ((qh[ib+1] << 5) & 0x700)], iq1s_grid[qs[4] | ((qh[ib+1] << 8) & 0x700)]); + const __m128i q1b_2_1 = _mm_set_epi64x(iq1s_grid[qs[7] | ((qh[ib+1] >> 1) & 0x700)], iq1s_grid[qs[6] | ((qh[ib+1] << 2) & 0x700)]); + qs += 8; + const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + + const __m128i dot1_0 = mul_add_epi8_sse(q1b_1_0, q8b_1_0); + const __m128i dot1_1 = mul_add_epi8_sse(q1b_1_1, q8b_1_1); + const __m128i dot2_0 = mul_add_epi8_sse(q1b_2_0, q8b_2_0); + const __m128i dot2_1 = mul_add_epi8_sse(q1b_2_1, q8b_2_1); + const int16_t ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; + const int16_t ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; + const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(ls1)); + const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(ls1)); + const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(ls2)); + const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(ls2)); + + sumi1_0 = _mm_add_epi32(sumi1_0, _mm_add_epi32(p1_0, p2_0)); + sumi1_1 = _mm_add_epi32(sumi1_1, _mm_add_epi32(p1_1, p2_1)); + sumi1 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * (qh[ib+0] & 0x8000 ? -1 : 1) * ls1 + + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2; + } + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + accum = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi1_1, sumi1_0))), accum); + accum1 += d * sumi1; + + } + + *s = hsum_float_8(accum) + IQ1S_DELTA * accum1; + +#else + + float sumf = 0; + for (int i = 0; i < nb; i++) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint16_t * qh = x[i].qh; + + int sumi = 0, sumi1 = 0; + for (int ib = 0; ib < QK_K/32; ++ib) { + const int ls = 2*((qh[ib] >> 12) & 7) + 1; + const int delta = qh[ib] & 0x8000 ? -1 : 1; + int lsum = 0; + for (int l = 0; l < 4; ++l) { + const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8))); + for (int j = 0; j < 8; ++j) { + lsum += q8[j] * grid[j]; + } + q8 += 8; + } + sumi += ls * lsum; + sumi1 += ls * delta * (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]); + qs += 4; + } + + sumf += GGML_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); + } + + *s = sumf; + +#endif +} + +void ggml_vec_dot_iq1_m_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq1_m * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + iq1m_scale_t scale; + +#if defined __AVX2__ + + const __m256i mask = _mm256_set1_epi16(0x7); + const __m256i mone = _mm256_set1_epi16(1); + const __m256i mone8 = _mm256_set1_epi8(1); + const __m256i mtwo8 = _mm256_set1_epi8(2); + // VPSHUFB cannot cross 128-bit lanes so odd shifts go to upper half. + const __m256i scales_shift = _mm256_set_epi64x(9, 3, 6, 0); + + __m256 accum1 = _mm256_setzero_ps(); + __m256 accum2 = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint8_t * qh = x[i].qh; + const uint16_t * sc = (const uint16_t *)x[i].scales; + + scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); + // Extract 3-bit scales (16 values) + __m256i scales = _mm256_set1_epi64x(*(const uint64_t*)sc); + scales = _mm256_srlv_epi64(scales, scales_shift); + scales = _mm256_add_epi16(_mm256_slli_epi16(_mm256_and_si256(scales, mask), 1), mone); + + // Indices to repeat each scale 8 times. + __m256i scales_idx1 = _mm256_set1_epi16(0x0100); + __m256i scales_idx2 = _mm256_add_epi8(scales_idx1, _mm256_set1_epi8(8)); + + __m256i sumi1 = _mm256_setzero_si256(); + __m256i sumi2 = _mm256_setzero_si256(); + for (int ib = 0; ib < QK_K/32; ib += 2) { +#ifdef __BMI2__ + const uint64_t packed_idx1 = _pdep_u64(*(const uint32_t *)qs, 0x00ff00ff00ff00ffULL) + | _pdep_u64(*(const uint16_t*)(qh) & 0x7777, 0xf000f000f000f00ULL); + const uint64_t packed_idx2 = _pdep_u64(*(const uint32_t *)(qs + 4), 0x00ff00ff00ff00ffULL) + | _pdep_u64(*(const uint16_t*)(qh + 2) & 0x7777, 0xf000f000f000f00ULL); + const uint16_t *idx1 = (const uint16_t *)(&packed_idx1); + const uint16_t *idx2 = (const uint16_t *)(&packed_idx2); + const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[idx1[3]], iq1s_grid[idx1[2]], iq1s_grid[idx1[1]], iq1s_grid[idx1[0]]); + const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[idx2[3]], iq1s_grid[idx2[2]], iq1s_grid[idx2[1]], iq1s_grid[idx2[0]]); + + // Convert signs to bytes 0x81 (negative) or 0x01 (positive) + const uint64_t delta_sign = _pdep_u64(*(const uint32_t*)(qh) & 0x88888888, 0xf0f0f0f0f0f0f0f0ULL); + const __m256i delta1 = _mm256_or_si256(mone8, _mm256_cvtepi8_epi64(_mm_set1_epi32(delta_sign))); + const __m256i delta2 = _mm256_or_si256(mone8, _mm256_cvtepi8_epi64(_mm_set1_epi32(delta_sign >> 32))); +#else + const __m256i q1b_1 = _mm256_set_epi64x( + iq1s_grid[qs[3] | (((uint16_t)qh[1] << 4) & 0x700)], iq1s_grid[qs[2] | (((uint16_t)qh[1] << 8) & 0x700)], + iq1s_grid[qs[1] | (((uint16_t)qh[0] << 4) & 0x700)], iq1s_grid[qs[0] | (((uint16_t)qh[0] << 8) & 0x700)] + ); + const __m256i q1b_2 = _mm256_set_epi64x( + iq1s_grid[qs[7] | (((uint16_t)qh[3] << 4) & 0x700)], iq1s_grid[qs[6] | (((uint16_t)qh[3] << 8) & 0x700)], + iq1s_grid[qs[5] | (((uint16_t)qh[2] << 4) & 0x700)], iq1s_grid[qs[4] | (((uint16_t)qh[2] << 8) & 0x700)] + ); + + const __m256i delta1 = _mm256_set_epi64x(qh[1] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, + qh[1] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101, + qh[0] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, + qh[0] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); + const __m256i delta2 = _mm256_set_epi64x(qh[3] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, + qh[3] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101, + qh[2] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, + qh[2] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); +#endif + const __m256i q8b_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8b_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + + const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1); + const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2); + const __m256i dot3 = _mm256_maddubs_epi16(mone8, _mm256_sign_epi8(q8b_1, delta1)); + const __m256i dot4 = _mm256_maddubs_epi16(mone8, _mm256_sign_epi8(q8b_2, delta2)); + + __m256i scale1 = _mm256_shuffle_epi8(scales, scales_idx1); + __m256i scale2 = _mm256_shuffle_epi8(scales, scales_idx2); + + scales_idx1 = _mm256_add_epi8(scales_idx1, mtwo8); + scales_idx2 = _mm256_add_epi8(scales_idx2, mtwo8); + + const __m256i p1 = _mm256_madd_epi16(dot1, scale1); + const __m256i p2 = _mm256_madd_epi16(dot2, scale2); + const __m256i p3 = _mm256_madd_epi16(dot3, scale1); + const __m256i p4 = _mm256_madd_epi16(dot4, scale2); + + sumi1 = _mm256_add_epi32(sumi1, _mm256_add_epi32(p1, p2)); + sumi2 = _mm256_add_epi32(sumi2, _mm256_add_epi32(p3, p4)); + + qs += 8; qh += 4; + } + + const __m256 d = _mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(scale.f16)); + + accum1 = _mm256_fmadd_ps(d, _mm256_cvtepi32_ps(sumi1), accum1); + accum2 = _mm256_fmadd_ps(d, _mm256_cvtepi32_ps(sumi2), accum2); + } + + *s = hsum_float_8(accum1) + IQ1M_DELTA * hsum_float_8(accum2); + +#elif defined __AVX__ + const __m128i mask = _mm_set1_epi16(0x7); + const __m128i mone = _mm_set1_epi16(1); + + __m256 accum1 = _mm256_setzero_ps(); + __m256 accum2 = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint8_t * qh = x[i].qh; + const uint16_t * sc = (const uint16_t *)x[i].scales; + + scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); + + __m128i sumi1_0 = _mm_setzero_si128(); + __m128i sumi1_1 = _mm_setzero_si128(); + __m128i sumi2_0 = _mm_setzero_si128(); + __m128i sumi2_1 = _mm_setzero_si128(); + for (int ib = 0; ib < QK_K/32; ib += 2) { + const __m128i q1b_1_0 = _mm_set_epi64x( + iq1s_grid[qs[1] | (((uint16_t)qh[0] << 4) & 0x700)], iq1s_grid[qs[0] | (((uint16_t)qh[0] << 8) & 0x700)]); + const __m128i q1b_1_1 = _mm_set_epi64x( + iq1s_grid[qs[3] | (((uint16_t)qh[1] << 4) & 0x700)], iq1s_grid[qs[2] | (((uint16_t)qh[1] << 8) & 0x700)]); + const __m128i q1b_2_0 = _mm_set_epi64x( + iq1s_grid[qs[5] | (((uint16_t)qh[2] << 4) & 0x700)], iq1s_grid[qs[4] | (((uint16_t)qh[2] << 8) & 0x700)]); + const __m128i q1b_2_1 = _mm_set_epi64x( + iq1s_grid[qs[7] | (((uint16_t)qh[3] << 4) & 0x700)], iq1s_grid[qs[6] | (((uint16_t)qh[3] << 8) & 0x700)]); + const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + + const __m128i dot1_0 = mul_add_epi8_sse(q1b_1_0, q8b_1_0); + const __m128i dot1_1 = mul_add_epi8_sse(q1b_1_1, q8b_1_1); + const __m128i dot2_0 = mul_add_epi8_sse(q1b_2_0, q8b_2_0); + const __m128i dot2_1 = mul_add_epi8_sse(q1b_2_1, q8b_2_1); + + const __m128i delta1_0 = _mm_set_epi64x(qh[0] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, + qh[0] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); + const __m128i delta1_1 = _mm_set_epi64x(qh[1] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, + qh[1] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); + const __m128i delta2_0 = _mm_set_epi64x(qh[2] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, + qh[2] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); + const __m128i delta2_1 = _mm_set_epi64x(qh[3] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, + qh[3] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); + + const __m128i dot3_0 = mul_add_epi8_sse(delta1_0, q8b_1_0); + const __m128i dot3_1 = mul_add_epi8_sse(delta1_1, q8b_1_1); + const __m128i dot4_0 = mul_add_epi8_sse(delta2_0, q8b_2_0); + const __m128i dot4_1 = mul_add_epi8_sse(delta2_1, q8b_2_1); + + __m128i scale1_0 = _mm_set1_epi16(sc[ib/2] >> 0); + __m128i scale1_1 = _mm_set1_epi16(sc[ib/2] >> 3); + __m128i scale2_0 = _mm_set1_epi16(sc[ib/2] >> 6); + __m128i scale2_1 = _mm_set1_epi16(sc[ib/2] >> 9); + + scale1_0 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale1_0, mask), 1), mone); + scale1_1 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale1_1, mask), 1), mone); + scale2_0 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale2_0, mask), 1), mone); + scale2_1 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale2_1, mask), 1), mone); + const __m128i p1_0 = _mm_madd_epi16(dot1_0, scale1_0); + const __m128i p1_1 = _mm_madd_epi16(dot1_1, scale1_1); + const __m128i p2_0 = _mm_madd_epi16(dot2_0, scale2_0); + const __m128i p2_1 = _mm_madd_epi16(dot2_1, scale2_1); + const __m128i p3_0 = _mm_madd_epi16(dot3_0, scale1_0); + const __m128i p3_1 = _mm_madd_epi16(dot3_1, scale1_1); + const __m128i p4_0 = _mm_madd_epi16(dot4_0, scale2_0); + const __m128i p4_1 = _mm_madd_epi16(dot4_1, scale2_1); + + sumi1_0 = _mm_add_epi32(sumi1_0, _mm_add_epi32(p1_0, p2_0)); + sumi1_1 = _mm_add_epi32(sumi1_1, _mm_add_epi32(p1_1, p2_1)); + sumi2_0 = _mm_add_epi32(sumi2_0, _mm_add_epi32(p3_0, p4_0)); + sumi2_1 = _mm_add_epi32(sumi2_1, _mm_add_epi32(p3_1, p4_1)); + + qs += 8; qh += 4; + } + + const __m256 d = _mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(scale.f16)); + + accum1 = _mm256_add_ps(_mm256_mul_ps(d, _mm256_cvtepi32_ps(MM256_SET_M128I(sumi1_1, sumi1_0))), accum1); + accum2 = _mm256_add_ps(_mm256_mul_ps(d, _mm256_cvtepi32_ps(MM256_SET_M128I(sumi2_1, sumi2_0))), accum2); + } + + *s = hsum_float_8(accum1) + IQ1M_DELTA * hsum_float_8(accum2); + +#else + + int sum1[2], sum2[2], delta[4]; + + float sumf = 0; + for (int i = 0; i < nb; i++) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint8_t * qh = x[i].qh; + const uint16_t * sc = (const uint16_t *)x[i].scales; + + scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); + + int sumi1 = 0, sumi2 = 0; + for (int ib = 0; ib < QK_K/32; ++ib) { + delta[0] = qh[0] & 0x08 ? -1 : 1; + delta[1] = qh[0] & 0x80 ? -1 : 1; + delta[2] = qh[1] & 0x08 ? -1 : 1; + delta[3] = qh[1] & 0x80 ? -1 : 1; + sum1[0] = sum1[1] = sum2[0] = sum2[1] = 0; + for (int l = 0; l < 4; ++l) { + const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((uint16_t)qh[l/2] << (8 - 4*(l%2))) & 0x700))); + int lsum1 = 0, lsum2 = 0; + for (int j = 0; j < 8; ++j) { + lsum1 += q8[j] * grid[j]; + lsum2 += q8[j]; + } + q8 += 8; + sum1[l/2] += lsum1; + sum2[l/2] += lsum2*delta[l]; + } + + const int ls1 = 2*((sc[ib/2] >> (6*(ib%2)+0)) & 0x7) + 1; + const int ls2 = 2*((sc[ib/2] >> (6*(ib%2)+3)) & 0x7) + 1; + + sumi1 += sum1[0] * ls1 + sum1[1] * ls2; + sumi2 += sum2[0] * ls1 + sum2[1] * ls2; + qs += 4; + qh += 2; + } + + sumf += GGML_FP16_TO_FP32(scale.f16) * y[i].d * (sumi1 + IQ1M_DELTA * sumi2); + } + + *s = sumf; + +#endif +} + +void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK4_NL == 0); + static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); + + const block_iq4_nl * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + const int nb = n / QK4_NL; + + int ib = 0; + float sumf = 0; + +#if defined __AVX2__ + + const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); + const __m128i m4b = _mm_set1_epi8(0x0f); + const __m256i mone = _mm256_set1_epi16(1); + + __m256 accum1 = _mm256_setzero_ps(); + __m256 accum2 = _mm256_setzero_ps(); + for (; ib + 1 < nb; ib += 2) { + const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)x[ib + 0].qs); + const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)x[ib + 1].qs); + const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)y[ib + 0].qs); + const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)y[ib + 1].qs); + const __m256i q4b_1 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)), + _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b))); + const __m256i q4b_2 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)), + _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b))); + const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); + const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); + const __m256i p_1 = _mm256_madd_epi16(p16_1, mone); + const __m256i p_2 = _mm256_madd_epi16(p16_2, mone); + accum1 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(y[ib + 0].d)*GGML_FP16_TO_FP32(x[ib + 0].d)), + _mm256_cvtepi32_ps(p_1), accum1); + accum2 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(y[ib + 1].d)*GGML_FP16_TO_FP32(x[ib + 1].d)), + _mm256_cvtepi32_ps(p_2), accum2); + } + + sumf = hsum_float_8(_mm256_add_ps(accum1, accum2)); + +#elif defined __AVX__ + const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); + const __m128i m4b = _mm_set1_epi8(0x0f); + + __m256 accum = _mm256_setzero_ps(); + for (; ib + 1 < nb; ib += 2) { + const __m128i q4bits_1 = _mm_loadu_si128((const __m128i *)x[ib + 0].qs); + const __m128i q4bits_2 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); + const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs); + const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs + 1); + const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); + const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs + 1); + + const __m128i q4b_1_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b)); + const __m128i q4b_1_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)); + const __m128i q4b_2_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b)); + const __m128i q4b_2_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)); + + const __m256 p = mul_sum_i8_quad_float(q4b_1_0, q4b_1_1, q4b_2_0, q4b_2_1, q8b_1_0, q8b_1_1, q8b_2_0, q8b_2_1); + const __m256 deltas = quad_fp16_delta_float(x[ib].d, y[ib].d, x[ib + 1].d, y[ib + 1].d); + accum = _mm256_add_ps(_mm256_mul_ps(deltas, p), accum); + } + + sumf = hsum_float_8(accum); + +#endif + for (; ib < nb; ++ib) { + const float d = GGML_FP16_TO_FP32(y[ib].d)*GGML_FP16_TO_FP32(x[ib].d); + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < QK4_NL/2; ++j) { + sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; + sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4]; + } + sumf += d * (sumi1 + sumi2); + } + *s = sumf; +} + +void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK_K == 0); + + const block_iq4_xs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __AVX2__ + + const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); + const __m128i m4b = _mm_set1_epi8(0x0f); + + __m256 accum = _mm256_setzero_ps(); + for (int ibl = 0; ibl < nb; ++ibl) { + const uint8_t * qs = x[ibl].qs; + const int8_t * q8 = y[ibl].qs; + uint16_t sh = x[ibl].scales_h; + __m256i sumi1 = _mm256_setzero_si256(); + __m256i sumi2 = _mm256_setzero_si256(); + for (int ib = 0; ib < QK_K/32; ib += 2) { + const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)qs); qs += 16; + const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)qs); qs += 16; + const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q4b_1 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)), + _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b))); + const __m256i q4b_2 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)), + _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b))); + const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); + const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); + const int16_t ls1 = ((x[ibl].scales_l[ib/2] & 0xf) | ((sh << 4) & 0x30)) - 32; + const int16_t ls2 = ((x[ibl].scales_l[ib/2] >> 4) | ((sh << 2) & 0x30)) - 32; + sh >>= 4; + const __m256i p_1 = _mm256_madd_epi16(p16_1, _mm256_set1_epi16(ls1)); + const __m256i p_2 = _mm256_madd_epi16(p16_2, _mm256_set1_epi16(ls2)); + sumi1 = _mm256_add_epi32(p_1, sumi1); + sumi2 = _mm256_add_epi32(p_2, sumi2); + } + accum = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(x[ibl].d)*y[ibl].d), + _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accum); + } + + *s = hsum_float_8(accum); + +#elif defined __AVX__ + const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); + const __m128i m4b = _mm_set1_epi8(0x0f); + + __m256 accum = _mm256_setzero_ps(); + for (int ibl = 0; ibl < nb; ++ibl) { + const uint8_t * qs = x[ibl].qs; + const int8_t * q8 = y[ibl].qs; + uint16_t sh = x[ibl].scales_h; + __m128i sumi1_0 = _mm_setzero_si128(); + __m128i sumi1_1 = _mm_setzero_si128(); + __m128i sumi2_0 = _mm_setzero_si128(); + __m128i sumi2_1 = _mm_setzero_si128(); + for (int ib = 0; ib < QK_K/32; ib += 2) { + const __m128i q4bits_1 = _mm_loadu_si128((const __m128i *)qs); qs += 16; + const __m128i q4bits_2 = _mm_loadu_si128((const __m128i *)qs); qs += 16; + const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q4b_1_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b)); + const __m128i q4b_1_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)); + const __m128i q4b_2_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b)); + const __m128i q4b_2_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)); + const __m128i p16_1_0 = mul_add_epi8_sse(q4b_1_0, q8b_1_0); + const __m128i p16_1_1 = mul_add_epi8_sse(q4b_1_1, q8b_1_1); + const __m128i p16_2_0 = mul_add_epi8_sse(q4b_2_0, q8b_2_0); + const __m128i p16_2_1 = mul_add_epi8_sse(q4b_2_1, q8b_2_1); + const int16_t ls1 = ((x[ibl].scales_l[ib/2] & 0xf) | ((sh << 4) & 0x30)) - 32; + const int16_t ls2 = ((x[ibl].scales_l[ib/2] >> 4) | ((sh << 2) & 0x30)) - 32; + sh >>= 4; + const __m128i p_1_0 = _mm_madd_epi16(p16_1_0, _mm_set1_epi16(ls1)); + const __m128i p_1_1 = _mm_madd_epi16(p16_1_1, _mm_set1_epi16(ls1)); + const __m128i p_2_0 = _mm_madd_epi16(p16_2_0, _mm_set1_epi16(ls2)); + const __m128i p_2_1 = _mm_madd_epi16(p16_2_1, _mm_set1_epi16(ls2)); + sumi1_0 = _mm_add_epi32(p_1_0, sumi1_0); + sumi1_1 = _mm_add_epi32(p_1_1, sumi1_1); + sumi2_0 = _mm_add_epi32(p_2_0, sumi2_0); + sumi2_1 = _mm_add_epi32(p_2_1, sumi2_1); + } + __m128i sumi12_0 = _mm_add_epi32(sumi1_0, sumi2_0); + __m128i sumi12_1 = _mm_add_epi32(sumi1_1, sumi2_1); + accum = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(x[ibl].d)*y[ibl].d), + _mm256_cvtepi32_ps(MM256_SET_M128I(sumi12_1, sumi12_0))), accum); + } + + *s = hsum_float_8(accum); + +#else + float sumf = 0; + for (int ibl = 0; ibl < nb; ++ibl) { + const float d4d8 = GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d; + uint16_t h = x[ibl].scales_h; + const uint8_t * qs = x[ibl].qs; + const int8_t * q8 = y[ibl].qs; + for (int ib = 0; ib < QK_K/32; ib += 2) { + const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30); + const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30); + h >>= 4; + const float d1 = d4d8*(ls1 - 32); + const float d2 = d4d8*(ls2 - 32); + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < 16; ++j) { + sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; + sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; + } + sumf += d1 * (sumi1 + sumi2); + qs += 16; + q8 += 32; + sumi1 = sumi2 = 0; + for (int j = 0; j < 16; ++j) { + sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; + sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; + } + sumf += d2 * (sumi1 + sumi2); + qs += 16; + q8 += 32; + } + } + *s = sumf; +#endif +} + diff --git a/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp b/ggml/src/ggml-cpu/arch/x86/repack.cpp similarity index 68% rename from ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp rename to ggml/src/ggml-cpu/arch/x86/repack.cpp index 0a3ff867cfeca..e7635a294a796 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +++ b/ggml/src/ggml-cpu/arch/x86/repack.cpp @@ -3,72 +3,19 @@ #include "ggml-common.h" #include "ggml-backend-impl.h" -#include "ggml-quants.h" #include "ggml-impl.h" #include "ggml-cpu.h" #include "ggml-cpu-impl.h" -#include "ggml-cpu-traits.h" +#include "traits.h" #include #include #include -#include #include // for qsort #include // for GGML_ASSERT -#include "ggml-cpu-aarch64.h" - -// TODO: move to include file? -template constexpr int QK_0() { - if constexpr (K == 4) { - return QK4_0; - } - if constexpr (K == 8) { - return QK8_0; - } - return -1; -} - -template struct block { - ggml_half d[N]; // deltas for N qK_0 blocks - int8_t qs[(QK_0() * N * K) / 8]; // quants for N qK_0 blocks -}; - -// control size -static_assert(sizeof(block<4, 4>) == 4 * sizeof(ggml_half) + QK8_0 * 2, "wrong block<4,4> size/padding"); -static_assert(sizeof(block<4, 8>) == 8 * sizeof(ggml_half) + QK8_0 * 4, "wrong block<4,8> size/padding"); -static_assert(sizeof(block<8, 4>) == 4 * sizeof(ggml_half) + QK8_0 * 4, "wrong block<8,4> size/padding"); -static_assert(sizeof(block<8, 8>) == 8 * sizeof(ggml_half) + QK8_0 * 8, "wrong block<8,8> size/padding"); - -using block_q4_0x4 = block<4, 4>; -using block_q4_0x8 = block<4, 8>; -using block_q8_0x4 = block<8, 4>; -using block_q8_0x8 = block<8, 8>; - - -struct block_q4_Kx8 { - ggml_half d[8]; // super-block scale for quantized scales - ggml_half dmin[8]; // super-block scale for quantized mins - uint8_t scales[96]; // scales and mins, quantized with 6 bits - uint8_t qs[1024]; // 4--bit quants -}; - -static_assert(sizeof(block_q4_Kx8) == sizeof(ggml_half) * 16 + K_SCALE_SIZE * 8 + QK_K * 4, "wrong q4_K block size/padding"); - -struct block_q8_Kx4 { - float d[4]; // delta - int8_t qs[QK_K * 4]; // quants - int16_t bsums[QK_K / 4]; // sum of quants in groups of 16 -}; - -static_assert(sizeof(block_q8_Kx4) == sizeof(float) * 4 + QK_K * 4 + (QK_K / 4) * sizeof(int16_t), "wrong q8_K block size/padding"); - -struct block_iq4_nlx4 { - ggml_half d[4]; // deltas for 4 iq4_nl blocks - uint8_t qs[QK4_NL * 2]; // nibbles / quants for 4 iq4_nl blocks -}; - -static_assert(sizeof(block_iq4_nlx4) == 4 * sizeof(ggml_half) + QK4_NL * 2, "wrong iq4_nlx4 block size/padding"); +#define GGML_CPU_CLANG_WORKAROUND +#include "../../repack.h" #if defined(__GNUC__) #pragma GCC diagnostic ignored "-Woverlength-strings" @@ -76,27 +23,6 @@ static_assert(sizeof(block_iq4_nlx4) == 4 * sizeof(ggml_half) + QK4_NL * 2, "wro #define UNUSED GGML_UNUSED -static inline int nearest_int(float fval) { - assert(fabsf(fval) <= 4194303.f); - float val = fval + 12582912.f; - int i; memcpy(&i, &val, sizeof(int)); - return (i & 0x007fffff) - 0x00400000; -} - -// Functions to create the interleaved data layout formats - -// interleave 4 block_q4_0s in blocks of blck_size_interleave -// returns an interleaved block_q4_0x4 -// in the interleaved block_q4_0x4, place deltas for 4 block_q4_0 blocks -// first, then interleave quants from 4 block_q4_0s in blocks of blck_size_interleave -// -// - in : an array of block_q4_0 pointers -// - blck_size_interleave : the block_q4_0 quants bytes are interleaved in blocks of -// blck_size_interleave bytes -// - xor_mask : the mask to convert the nibbles in block_q4_0 quants bytes -// from bias offset form to pure sign form (this saves subtract -// operations durin unpacking) -// #if defined(__AVX__) #if defined(__F16C__) #if defined(__AVX512F__) @@ -178,6 +104,12 @@ static inline __m256 __avx_rearranged_f32cx8_load(ggml_fp16_t *x, __m128i arrang #endif #endif +static inline int nearest_int(float fval) { + assert(fabsf(fval) <= 4194303.f); + float val = fval + 12582912.f; + int i; memcpy(&i, &val, sizeof(int)); + return (i & 0x007fffff) - 0x00400000; +} #if defined(__AVX2__) || defined(__AVX512F__) #if defined(__AVX512F__) @@ -242,188 +174,14 @@ static inline __m256i mul_sum_i8_pairs_acc_int32x8(const __m256i acc, const __m2 } #endif -static const int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113}; - -static void ggml_quantize_mat_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - assert(QK8_0 == 32); - assert(k % QK8_0 == 0); - const int nb = k / QK8_0; - - block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; - -#if defined(__ARM_NEON) - float32x4_t srcv[4][8]; - float id[4]; - - for (int i = 0; i < nb; i++) { - float32x4_t asrcv[8]; - float32x4_t amaxv[8]; - - for (int row_iter = 0; row_iter < 4; row_iter++) { - for (int j = 0; j < 8; j++) srcv[row_iter][j] = vld1q_f32(x + row_iter * k + i * 32 + 4 * j); - for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[row_iter][j]); - - for (int j = 0; j < 4; j++) amaxv[2 * j] = vmaxq_f32(asrcv[2 * j], asrcv[2 * j + 1]); - for (int j = 0; j < 2; j++) amaxv[4 * j] = vmaxq_f32(amaxv[4 * j], amaxv[4 * j + 2]); - for (int j = 0; j < 1; j++) amaxv[8 * j] = vmaxq_f32(amaxv[8 * j], amaxv[8 * j + 4]); - - const float amax = vmaxvq_f32(amaxv[0]); - - const float d = amax / ((1 << 7) - 1); - id[row_iter] = d ? 1.0f / d : 0.0f; - - y[i].d[row_iter] = GGML_FP32_TO_FP16(d); - } - - for (int j = 0; j < 8; j++) { - float32x4_t v = vmulq_n_f32(srcv[0][j], id[0]); - int32x4_t vi = vcvtnq_s32_f32(v); - y[i].qs[16 * j + 0] = vgetq_lane_s32(vi, 0); - y[i].qs[16 * j + 1] = vgetq_lane_s32(vi, 1); - y[i].qs[16 * j + 2] = vgetq_lane_s32(vi, 2); - y[i].qs[16 * j + 3] = vgetq_lane_s32(vi, 3); - - v = vmulq_n_f32(srcv[1][j], id[1]); - vi = vcvtnq_s32_f32(v); - y[i].qs[16 * j + 4] = vgetq_lane_s32(vi, 0); - y[i].qs[16 * j + 5] = vgetq_lane_s32(vi, 1); - y[i].qs[16 * j + 6] = vgetq_lane_s32(vi, 2); - y[i].qs[16 * j + 7] = vgetq_lane_s32(vi, 3); - - v = vmulq_n_f32(srcv[2][j], id[2]); - vi = vcvtnq_s32_f32(v); - y[i].qs[16 * j + 8] = vgetq_lane_s32(vi, 0); - y[i].qs[16 * j + 9] = vgetq_lane_s32(vi, 1); - y[i].qs[16 * j + 10] = vgetq_lane_s32(vi, 2); - y[i].qs[16 * j + 11] = vgetq_lane_s32(vi, 3); - - v = vmulq_n_f32(srcv[3][j], id[3]); - vi = vcvtnq_s32_f32(v); - y[i].qs[16 * j + 12] = vgetq_lane_s32(vi, 0); - y[i].qs[16 * j + 13] = vgetq_lane_s32(vi, 1); - y[i].qs[16 * j + 14] = vgetq_lane_s32(vi, 2); - y[i].qs[16 * j + 15] = vgetq_lane_s32(vi, 3); - } - } -#else - // scalar - const int blck_size_interleave = 4; - float srcv[4][QK8_0]; - float id[4]; - - for (int i = 0; i < nb; i++) { - for (int row_iter = 0; row_iter < 4; row_iter++) { - float amax = 0.0f; // absolute max - - for (int j = 0; j < QK8_0; j++) { - srcv[row_iter][j] = x[row_iter * k + i * QK8_0 + j]; - amax = MAX(amax, fabsf(srcv[row_iter][j])); - } - - const float d = amax / ((1 << 7) - 1); - id[row_iter] = d ? 1.0f / d : 0.0f; - - y[i].d[row_iter] = GGML_FP32_TO_FP16(d); - } - - for (int j = 0; j < QK8_0 * 4; j++) { - int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave; - int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave; - src_offset += (j % blck_size_interleave); - - float x0 = srcv[src_id][src_offset] * id[src_id]; - y[i].qs[j] = roundf(x0); - } - } -#endif -} - -static void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { +void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); assert(k % QK8_0 == 0); const int nb = k / QK8_0; block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; -#if defined(__ARM_NEON) - float32x4_t srcv[4][8]; - float id[4]; - - for (int i = 0; i < nb; i++) { - float32x4_t asrcv[8]; - float32x4_t amaxv[8]; - - for (int row_iter = 0; row_iter < 4; row_iter++) { - for (int j = 0; j < 8; j++) srcv[row_iter][j] = vld1q_f32(x + row_iter * k + i * 32 + 4 * j); - for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[row_iter][j]); - - for (int j = 0; j < 4; j++) amaxv[2 * j] = vmaxq_f32(asrcv[2 * j], asrcv[2 * j + 1]); - for (int j = 0; j < 2; j++) amaxv[4 * j] = vmaxq_f32(amaxv[4 * j], amaxv[4 * j + 2]); - for (int j = 0; j < 1; j++) amaxv[8 * j] = vmaxq_f32(amaxv[8 * j], amaxv[8 * j + 4]); - - const float amax = vmaxvq_f32(amaxv[0]); - - const float d = amax / ((1 << 7) - 1); - id[row_iter] = d ? 1.0f / d : 0.0f; - - y[i].d[row_iter] = GGML_FP32_TO_FP16(d); - } - - for (int j = 0; j < 4; j++) { - float32x4_t v = vmulq_n_f32(srcv[0][2 * j], id[0]); - int32x4_t vi = vcvtnq_s32_f32(v); - y[i].qs[32 * j + 0] = vgetq_lane_s32(vi, 0); - y[i].qs[32 * j + 1] = vgetq_lane_s32(vi, 1); - y[i].qs[32 * j + 2] = vgetq_lane_s32(vi, 2); - y[i].qs[32 * j + 3] = vgetq_lane_s32(vi, 3); - v = vmulq_n_f32(srcv[0][2 * j + 1], id[0]); - vi = vcvtnq_s32_f32(v); - y[i].qs[32 * j + 4] = vgetq_lane_s32(vi, 0); - y[i].qs[32 * j + 5] = vgetq_lane_s32(vi, 1); - y[i].qs[32 * j + 6] = vgetq_lane_s32(vi, 2); - y[i].qs[32 * j + 7] = vgetq_lane_s32(vi, 3); - - v = vmulq_n_f32(srcv[1][2 * j], id[1]); - vi = vcvtnq_s32_f32(v); - y[i].qs[32 * j + 8] = vgetq_lane_s32(vi, 0); - y[i].qs[32 * j + 9] = vgetq_lane_s32(vi, 1); - y[i].qs[32 * j + 10] = vgetq_lane_s32(vi, 2); - y[i].qs[32 * j + 11] = vgetq_lane_s32(vi, 3); - v = vmulq_n_f32(srcv[1][2 * j + 1], id[1]); - vi = vcvtnq_s32_f32(v); - y[i].qs[32 * j + 12] = vgetq_lane_s32(vi, 0); - y[i].qs[32 * j + 13] = vgetq_lane_s32(vi, 1); - y[i].qs[32 * j + 14] = vgetq_lane_s32(vi, 2); - y[i].qs[32 * j + 15] = vgetq_lane_s32(vi, 3); - - v = vmulq_n_f32(srcv[2][2 * j], id[2]); - vi = vcvtnq_s32_f32(v); - y[i].qs[32 * j + 16] = vgetq_lane_s32(vi, 0); - y[i].qs[32 * j + 17] = vgetq_lane_s32(vi, 1); - y[i].qs[32 * j + 18] = vgetq_lane_s32(vi, 2); - y[i].qs[32 * j + 19] = vgetq_lane_s32(vi, 3); - v = vmulq_n_f32(srcv[2][2 * j + 1], id[2]); - vi = vcvtnq_s32_f32(v); - y[i].qs[32 * j + 20] = vgetq_lane_s32(vi, 0); - y[i].qs[32 * j + 21] = vgetq_lane_s32(vi, 1); - y[i].qs[32 * j + 22] = vgetq_lane_s32(vi, 2); - y[i].qs[32 * j + 23] = vgetq_lane_s32(vi, 3); - - v = vmulq_n_f32(srcv[3][2 * j], id[3]); - vi = vcvtnq_s32_f32(v); - y[i].qs[32 * j + 24] = vgetq_lane_s32(vi, 0); - y[i].qs[32 * j + 25] = vgetq_lane_s32(vi, 1); - y[i].qs[32 * j + 26] = vgetq_lane_s32(vi, 2); - y[i].qs[32 * j + 27] = vgetq_lane_s32(vi, 3); - v = vmulq_n_f32(srcv[3][2 * j + 1], id[3]); - vi = vcvtnq_s32_f32(v); - y[i].qs[32 * j + 28] = vgetq_lane_s32(vi, 0); - y[i].qs[32 * j + 29] = vgetq_lane_s32(vi, 1); - y[i].qs[32 * j + 30] = vgetq_lane_s32(vi, 2); - y[i].qs[32 * j + 31] = vgetq_lane_s32(vi, 3); - } - } -#elif defined(__AVX2__) || defined(__AVX__) +#if defined(__AVX2__) || defined(__AVX__) float id[4]; __m256 srcv[4][4]; __m256 idvec[4]; @@ -520,6 +278,7 @@ static void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGM #endif } } + #else // scalar const int blck_size_interleave = 8; @@ -553,7 +312,7 @@ static void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGM #endif } -static void ggml_quantize_mat_q8_K_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { +void ggml_quantize_mat_q8_K_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK_K == 256); assert(k % QK_K == 0); const int nb = k / QK_K; @@ -817,203 +576,7 @@ static void ggml_quantize_mat_q8_K_4x8(const float * GGML_RESTRICT x, void * GGM #endif } -template -void ggml_quantize_mat_t(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row); - -template <> void ggml_quantize_mat_t<4, GGML_TYPE_Q8_0>(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row) { - assert(nrow == 4); - UNUSED(nrow); - ggml_quantize_mat_q8_0_4x4(x, vy, n_per_row); -} - -template <> void ggml_quantize_mat_t<8, GGML_TYPE_Q8_0>(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row) { - assert(nrow == 4); - UNUSED(nrow); - ggml_quantize_mat_q8_0_4x8(x, vy, n_per_row); -} - -template <> void ggml_quantize_mat_t<8, GGML_TYPE_Q8_K>(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row) { - assert(nrow == 4); - UNUSED(nrow); - ggml_quantize_mat_q8_K_4x8(x, vy, n_per_row); -} - -static void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { - const int qk = QK8_0; - const int nb = n / qk; - const int ncols_interleaved = 4; - const int blocklen = 4; - - assert (n % qk == 0); - assert (nc % ncols_interleaved == 0); - - UNUSED(s); - UNUSED(bs); - UNUSED(vx); - UNUSED(vy); - UNUSED(nr); - UNUSED(nc); - UNUSED(nb); - UNUSED(ncols_interleaved); - UNUSED(blocklen); - -#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx; - - for (int c = 0; c < nc; c += ncols_interleaved) { - const block_q8_0 * a_ptr = (const block_q8_0 *) vy; - float32x4_t acc = vdupq_n_f32(0); - for (int b = 0; b < nb; b++) { - int8x16_t b0 = vld1q_s8((const int8_t *) b_ptr->qs); - int8x16_t b1 = vld1q_s8((const int8_t *) b_ptr->qs + 16); - int8x16_t b2 = vld1q_s8((const int8_t *) b_ptr->qs + 32); - int8x16_t b3 = vld1q_s8((const int8_t *) b_ptr->qs + 48); - float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d); - - int8x16_t a0 = vld1q_s8(a_ptr->qs); - int8x16_t a1 = vld1q_s8(a_ptr->qs + qk/2); - float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d); - - int32x4_t ret = vdupq_n_s32(0); - - ret = vdotq_laneq_s32(ret, b0 << 4, a0, 0); - ret = vdotq_laneq_s32(ret, b1 << 4, a0, 1); - ret = vdotq_laneq_s32(ret, b2 << 4, a0, 2); - ret = vdotq_laneq_s32(ret, b3 << 4, a0, 3); - - ret = vdotq_laneq_s32(ret, b0 & 0xf0U, a1, 0); - ret = vdotq_laneq_s32(ret, b1 & 0xf0U, a1, 1); - ret = vdotq_laneq_s32(ret, b2 & 0xf0U, a1, 2); - ret = vdotq_laneq_s32(ret, b3 & 0xf0U, a1, 3); - - acc = vfmaq_f32(acc, vcvtq_n_f32_s32(ret, 4), - vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd))); - a_ptr++; - b_ptr++; - } - vst1q_f32(s, acc); - s += ncols_interleaved; - } - return; - } -#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) - float sumf[4]; - int sumi; - - const block_q8_0 * a_ptr = (const block_q8_0 *) vy; - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); - - for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; - for (int l = 0; l < nb; l++) { - for (int k = 0; k < (qk / (2 * blocklen)); k++) { - for (int j = 0; j < ncols_interleaved; j++) { - sumi = 0; - for (int i = 0; i < blocklen; ++i) { - const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); - const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); - sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; - } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); - } - } - } - for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; - } -} - -static void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { - const int qk = QK8_0; - const int nb = n / qk; - const int ncols_interleaved = 4; - const int blocklen = 8; - - assert (n % qk == 0); - assert (nc % ncols_interleaved == 0); - - UNUSED(s); - UNUSED(bs); - UNUSED(vx); - UNUSED(vy); - UNUSED(nr); - UNUSED(nc); - UNUSED(nb); - UNUSED(ncols_interleaved); - UNUSED(blocklen); - -#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx; - - for (int c = 0; c < nc; c += ncols_interleaved) { - const block_q8_0 * a_ptr = (const block_q8_0 *) vy; - float32x4_t acc = vdupq_n_f32(0); - for (int b = 0; b < nb; b++) { - int8x16_t b0 = vld1q_s8((const int8_t *) b_ptr->qs); - int8x16_t b1 = vld1q_s8((const int8_t *) b_ptr->qs + 16); - int8x16_t b2 = vld1q_s8((const int8_t *) b_ptr->qs + 32); - int8x16_t b3 = vld1q_s8((const int8_t *) b_ptr->qs + 48); - float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d); - - int8x16_t a0 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs); - int8x16_t a1 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 1); - int8x16_t a2 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 2); - int8x16_t a3 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 3); - float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d); - - int32x4_t ret0 = vdupq_n_s32(0); - int32x4_t ret1 = vdupq_n_s32(0); - - ret0 = vdotq_s32(ret0, b0 << 4, a0); - ret1 = vdotq_s32(ret1, b1 << 4, a0); - ret0 = vdotq_s32(ret0, b2 << 4, a1); - ret1 = vdotq_s32(ret1, b3 << 4, a1); - - ret0 = vdotq_s32(ret0, b0 & 0xf0U, a2); - ret1 = vdotq_s32(ret1, b1 & 0xf0U, a2); - ret0 = vdotq_s32(ret0, b2 & 0xf0U, a3); - ret1 = vdotq_s32(ret1, b3 & 0xf0U, a3); - - int32x4_t ret = vpaddq_s32(ret0, ret1); - - acc = vfmaq_f32(acc, vcvtq_n_f32_s32(ret, 4), - vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd))); - a_ptr++; - b_ptr++; - } - vst1q_f32(s, acc); - s += ncols_interleaved; - } - return; - } -#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) - float sumf[4]; - int sumi; - - const block_q8_0 * a_ptr = (const block_q8_0 *) vy; - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); - - for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; - for (int l = 0; l < nb; l++) { - for (int k = 0; k < (qk / (2 * blocklen)); k++) { - for (int j = 0; j < ncols_interleaved; j++) { - sumi = 0; - for (int i = 0; i < blocklen; ++i) { - const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); - const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); - sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; - } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); - } - } - } - for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; - } -} - -static void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { +void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 8; @@ -1032,75 +595,7 @@ static void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c UNUSED(ncols_interleaved); UNUSED(blocklen); -#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) -#if defined(__ARM_FEATURE_SVE) - if (ggml_cpu_has_sve() && ggml_cpu_get_sve_cnt() == QK8_0) { - const void * b_ptr = vx; - const void * a_ptr = vy; - float * res_ptr = s; - - __asm__ __volatile__( - "ptrue p0.b\n" - "add %x[b_ptr], %x[b_ptr], #0x10\n" - "1:" // Column loop - "add x22, %x[a_ptr], #0x2\n" - "mov z31.b, #0x0\n" - "mov x21, %x[nb]\n" - "2:" // Block loop - "ld1b { z30.b }, p0/Z, [%x[b_ptr]]\n" - "ld1b { z29.b }, p0/Z, [%x[b_ptr], #1, MUL VL]\n" - "mov z28.s, #0x0\n" - "mov z27.s, #0x0\n" - "ld1rd { z26.d }, p0/Z, [x22]\n" - "ld1b { z25.b }, p0/Z, [%x[b_ptr], #2, MUL VL]\n" - "sub x20, x22, #0x2\n" - "sub x21, x21, #0x1\n" - "ld1b { z24.b }, p0/Z, [%x[b_ptr], #3, MUL VL]\n" - "ld1rd { z23.d }, p0/Z, [x22, #8]\n" - "lsl z22.b, z30.b, #0x4\n" - "lsl z16.b, z29.b, #0x4\n" - "and z30.b, z30.b, #0xf0\n" - "and z29.b, z29.b, #0xf0\n" - "ld1rd { z21.d }, p0/Z, [x22, #16]\n" - "ld1rd { z20.d }, p0/Z, [x22, #24]\n" - "lsl z19.b, z25.b, #0x4\n" - "and z25.b, z25.b, #0xf0\n" - "ld1rh { z17.h }, p0/Z, [x20]\n" - "ld1h { z18.s }, p0/Z, [%x[b_ptr], #-1, MUL VL]\n" - "sdot z28.s, z22.b, z26.b\n" - "sdot z27.s, z16.b, z26.b\n" - "lsl z16.b, z24.b, #0x4\n" - "add x22, x22, #0x22\n" - "and z24.b, z24.b, #0xf0\n" - "add %x[b_ptr], %x[b_ptr], #0x90\n" - "fcvt z17.s, p0/m, z17.h\n" - "fcvt z18.s, p0/m, z18.h\n" - "sdot z28.s, z19.b, z23.b\n" - "sdot z27.s, z16.b, z23.b\n" - "fmul z18.s, z18.s, z17.s\n" - "sdot z28.s, z30.b, z21.b\n" - "sdot z27.s, z29.b, z21.b\n" - "sdot z28.s, z25.b, z20.b\n" - "sdot z27.s, z24.b, z20.b\n" - "uzp1 z17.s, z28.s, z27.s\n" - "uzp2 z16.s, z28.s, z27.s\n" - "add z17.s, z17.s, z16.s\n" - "asr z17.s, z17.s, #0x4\n" - "scvtf z17.s, p0/m, z17.s\n" - "fmla z31.s, p0/M, z17.s, z18.s\n" - "cbnz x21, 2b\n" - "sub %x[nc], %x[nc], #0x8\n" - "st1w { z31.s }, p0, [%x[res_ptr]]\n" - "add %x[res_ptr], %x[res_ptr], #0x20\n" - "cbnz %x[nc], 1b\n" - : [b_ptr] "+&r" (b_ptr), [res_ptr] "+&r" (res_ptr), [nc] "+&r" (nc) - : [a_ptr] "r" (a_ptr), [nb] "r" (nb) - : "memory", "p0", "x20", "x21", "x22", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" - ); - return; - } -#endif // #if defined(__ARM_FEATURE_SVE) -#elif defined(__AVX2__) +#if defined(__AVX2__) // Lookup table to convert signed nibbles to signed bytes __m256i signextendlut = _mm256_castsi128_si256(_mm_set_epi8(-1, -2, -3, -4, -5, -6, -7, -8, 7, 6, 5, 4, 3, 2, 1, 0)); signextendlut = _mm256_permute2f128_si256(signextendlut, signextendlut, 0); @@ -1191,74 +686,8 @@ static void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c } } return; -#elif defined __riscv_v - if (__riscv_vlenb() >= QK4_0) { - const size_t vl = QK4_0; - - const block_q8_0 * a_ptr = (const block_q8_0 *) vy; - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); - vfloat32m1_t sumf = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); - for (int l = 0; l < nb; l++) { - const int64_t a0 = *(const int64_t *)&a_ptr[l].qs[0]; - const int64_t a1 = *(const int64_t *)&a_ptr[l].qs[8]; - const int64_t a2 = *(const int64_t *)&a_ptr[l].qs[16]; - const int64_t a3 = *(const int64_t *)&a_ptr[l].qs[24]; - __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment - const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(a0, vl / 4)); - const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(a1, vl / 4)); - const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(a2, vl / 4)); - const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(a3, vl / 4)); - - const vint8m4_t rhs_raw_vec = __riscv_vle8_v_i8m4((const int8_t *)b_ptr[l].qs, vl * 4); - const vint8m4_t rhs_vec_lo = __riscv_vsra_vx_i8m4(__riscv_vsll_vx_i8m4(rhs_raw_vec, 4, vl * 4), 4, vl * 4); - const vint8m4_t rhs_vec_hi = __riscv_vsra_vx_i8m4(rhs_raw_vec, 4, vl * 4); - const vint8m2_t rhs_vec_lo_0 = __riscv_vget_v_i8m4_i8m2(rhs_vec_lo, 0); - const vint8m2_t rhs_vec_lo_1 = __riscv_vget_v_i8m4_i8m2(rhs_vec_lo, 1); - const vint8m2_t rhs_vec_hi_0 = __riscv_vget_v_i8m4_i8m2(rhs_vec_hi, 0); - const vint8m2_t rhs_vec_hi_1 = __riscv_vget_v_i8m4_i8m2(rhs_vec_hi, 1); - - const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); - const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); - const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); - const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); - - const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_hi_m)); - const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); - const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); - const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); - const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); - const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); - const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); - const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); - const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); - const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); - const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); - const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); - const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); - - // vector version needs Zvfhmin extension - const float a_scale = GGML_FP16_TO_FP32(a_ptr[l].d); - const float b_scales[8] = { - GGML_FP16_TO_FP32(b_ptr[l].d[0]), - GGML_FP16_TO_FP32(b_ptr[l].d[1]), - GGML_FP16_TO_FP32(b_ptr[l].d[2]), - GGML_FP16_TO_FP32(b_ptr[l].d[3]), - GGML_FP16_TO_FP32(b_ptr[l].d[4]), - GGML_FP16_TO_FP32(b_ptr[l].d[5]), - GGML_FP16_TO_FP32(b_ptr[l].d[6]), - GGML_FP16_TO_FP32(b_ptr[l].d[7]) - }; - const vfloat32m1_t b_scales_vec = __riscv_vle32_v_f32m1(b_scales, vl / 4); - const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scale, vl / 4); - sumf = __riscv_vfmacc_vv_f32m1(sumf, tmp1, b_scales_vec, vl / 4); - } - __riscv_vse32_v_f32m1(s + x * ncols_interleaved, sumf, vl / 4); - } - return; - } -#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) +#endif { float sumf[8]; int sumi; @@ -1286,7 +715,7 @@ static void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c } } -static void ggml_gemv_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { +void ggml_gemv_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK_K; const int nb = n / qk; const int ncols_interleaved = 8; @@ -1560,14 +989,14 @@ static void ggml_gemv_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, c #endif } - -static void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { +void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; - const int ncols_interleaved = 4; - const int blocklen = 4; + const int ncols_interleaved = 8; + const int blocklen = 8; assert (n % qk == 0); + assert (nr % 4 == 0); assert (nc % ncols_interleaved == 0); UNUSED(s); @@ -1580,1529 +1009,49 @@ static void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, UNUSED(ncols_interleaved); UNUSED(blocklen); -#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl); - const block_q8_0 * a_ptr = (const block_q8_0 *) vy; - float * res_ptr = s; +#if defined(__AVX2__) || defined(__AVX512F__) + { + const block_q4_0x8 * b_ptr_start = (const block_q4_0x8 *)vx; + const block_q8_0x4 * a_ptr_start = (const block_q8_0x4 *)vy; + int64_t b_nb = n / QK4_0; + int64_t y = 0; + // Mask to mask out nibbles from packed bytes + const __m256i m4b = _mm256_set1_epi8(0x0F); + const __m128i loadMask = _mm_blend_epi32(_mm_setzero_si128(), _mm_set1_epi32(0xFFFFFFFF), 3); + // Lookup table to convert signed nibbles to signed bytes + __m256i signextendlut = _mm256_castsi128_si256(_mm_set_epi8(-1, -2, -3, -4, -5, -6, -7, -8, 7, 6, 5, 4, 3, 2, 1, 0)); + signextendlut = _mm256_permute2f128_si256(signextendlut, signextendlut, 0); + // Permute mask used for easier vector processing at later stages + __m256i requiredOrder = _mm256_set_epi32(3, 2, 1, 0, 7, 6, 5, 4); + int64_t xstart = 0; + int anr = nr - nr%16; // Used to align nr with boundary of 16 + #ifdef __AVX512F__ + int anc = nc - nc%16; // Used to align nc with boundary of 16 + // Mask to mask out nibbles from packed bytes expanded to 512 bit length + const __m512i m4bexpanded = _mm512_set1_epi8(0x0F); + // Lookup table to convert signed nibbles to signed bytes expanded to 512 bit length + __m512i signextendlutexpanded = _mm512_inserti32x8(_mm512_castsi256_si512(signextendlut), signextendlut, 1); - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); + // Take group of four block_q8_0x4 structures at each pass of the loop and perform dot product operation + for (; y < anr / 4; y += 4) { - float32x4_t sumf = vdupq_n_f32(0); - for (int l = 0; l < nb; l++) { - uint8x16_t b_0 = vld1q_u8(b_ptr[l].qs + 0); - uint8x16_t b_1 = vld1q_u8(b_ptr[l].qs + 16); - uint8x16_t b_2 = vld1q_u8(b_ptr[l].qs + 32); - uint8x16_t b_3 = vld1q_u8(b_ptr[l].qs + 48); - - int8x16_t b_0_hi = vqtbl1q_s8(kvalues, b_0 >> 4); - int8x16_t b_0_lo = vqtbl1q_s8(kvalues, b_0 & 0x0F); - int8x16_t b_1_hi = vqtbl1q_s8(kvalues, b_1 >> 4); - int8x16_t b_1_lo = vqtbl1q_s8(kvalues, b_1 & 0x0F); - int8x16_t b_2_hi = vqtbl1q_s8(kvalues, b_2 >> 4); - int8x16_t b_2_lo = vqtbl1q_s8(kvalues, b_2 & 0x0F); - int8x16_t b_3_hi = vqtbl1q_s8(kvalues, b_3 >> 4); - int8x16_t b_3_lo = vqtbl1q_s8(kvalues, b_3 & 0x0F); - - int8x16_t a_0 = vld1q_s8(a_ptr[l].qs + 0); - int8x16_t a_1 = vld1q_s8(a_ptr[l].qs + 16); - - int32x4_t sumi = vdupq_n_s32(0); - sumi = vdotq_laneq_s32(sumi, b_0_lo, a_0, 0); - sumi = vdotq_laneq_s32(sumi, b_0_hi, a_1, 0); - sumi = vdotq_laneq_s32(sumi, b_1_lo, a_0, 1); - sumi = vdotq_laneq_s32(sumi, b_1_hi, a_1, 1); - sumi = vdotq_laneq_s32(sumi, b_2_lo, a_0, 2); - sumi = vdotq_laneq_s32(sumi, b_2_hi, a_1, 2); - sumi = vdotq_laneq_s32(sumi, b_3_lo, a_0, 3); - sumi = vdotq_laneq_s32(sumi, b_3_hi, a_1, 3); - - float32x4_t a_d = vcvt_f32_f16(vld1_dup_f16((const float16_t *)&a_ptr[l].d)); - float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *)b_ptr[l].d)); - float32x4_t d = a_d * b_d; - - sumf = vmlaq_f32(sumf, d, vcvtq_f32_s32(sumi)); + const block_q8_0x4 * a_ptrs[4]; + + a_ptrs[0] = a_ptr_start + (y * nb); + for (int i = 0; i < 3; ++i) { + a_ptrs[i + 1] = a_ptrs[i] + nb; } - vst1q_f32(res_ptr + x * 4, sumf); - } - return; - } -#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) - { - float sumf[4]; - int sumi; + // Take group of two block_q4_0x8 structures at each pass of the loop and perform dot product operation + for (int64_t x = 0; x < anc / 8; x += 2) { - const block_q8_0 * a_ptr = (const block_q8_0 *) vy; - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); + const block_q4_0x8 * b_ptr_0 = b_ptr_start + ((x) * b_nb); + const block_q4_0x8 * b_ptr_1 = b_ptr_start + ((x + 1) * b_nb); - for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; - for (int l = 0; l < nb; l++) { - for (int k = 0; k < (qk / (2 * blocklen)); k++) { - for (int j = 0; j < ncols_interleaved; j++) { - sumi = 0; - for (int i = 0; i < blocklen; ++i) { - const int v0 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0x0F]; - const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4]; - sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])); - } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); - } - } - } - for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; - } - } -} - -static void ggml_gemm_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { - const int qk = QK8_0; - const int nb = n / qk; - const int ncols_interleaved = 4; - const int blocklen = 4; - - assert (n % qk == 0); - assert (nr % 4 == 0); - assert (nc % ncols_interleaved == 0); - - UNUSED(s); - UNUSED(bs); - UNUSED(vx); - UNUSED(vy); - UNUSED(nr); - UNUSED(nc); - UNUSED(nb); - UNUSED(ncols_interleaved); - UNUSED(blocklen); - -#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - const void * b_ptr = vx; - const void * a_ptr = vy; - float * res_ptr = s; - size_t res_stride = bs * sizeof(float); - - __asm__ __volatile__( - "mov x10, %x[nr]\n" - "mov x9, #0x88\n" - "cmp x10, #0x10\n" - "mul x9, %x[nb], x9\n" - "blt 4f\n" - "1:" // Row loop - "add x28, %x[b_ptr], #0x8\n" - "mov x27, %x[nc]\n" - "add x26, %x[res_ptr], %x[res_stride], LSL #4\n" - "2:" // Column loop - "add x25, %x[a_ptr], #0x8\n" - "movi v15.16b, #0x0\n" - "movi v19.16b, #0x0\n" - "mov x24, %x[nb]\n" - "add x23, x25, x9\n" - "movi v18.16b, #0x0\n" - "movi v14.16b, #0x0\n" - "add x22, x23, x9\n" - "movi v11.16b, #0x0\n" - "movi v13.16b, #0x0\n" - "add x21, x22, x9\n" - "movi v23.16b, #0x0\n" - "movi v16.16b, #0x0\n" - "movi v25.16b, #0x0\n" - "movi v7.16b, #0x0\n" - "movi v0.16b, #0x0\n" - "movi v4.16b, #0x0\n" - "movi v5.16b, #0x0\n" - "movi v21.16b, #0x0\n" - "movi v8.16b, #0x0\n" - "movi v1.16b, #0x0\n" - "3:" // Block loop - "ldr q3, [x28, #0x0]\n" - "ldr q31, [x25, #0x0]\n" - "movi v28.16b, #0x4\n" - "movi v10.4s, #0x0\n" - "ldr q22, [x28, #0x10]\n" - "ldr q6, [x25, #0x10]\n" - "movi v29.4s, #0x0\n" - "movi v9.4s, #0x0\n" - "ldr q27, [x28, #0x20]\n" - "ldr q30, [x28, #0x30]\n" - "movi v20.4s, #0x0\n" - "movi v24.16b, #0xf0\n" - "ldr d2, [x25, #-0x8]\n" - "ldr d26, [x23, #-0x8]\n" - "sshl v12.16b, v3.16b, v28.16b\n" - "sub x20, x28, #0x8\n" - "ldr d17, [x20, #0x0]\n" - "and v3.16b, v3.16b, v24.16b\n" - "subs x24, x24, #0x1\n" - "add x28, x28, #0x48\n" - ".inst 0x4f9fe18a // sdot v10.4s, v12.16b, v31.4b[0]\n" - ".inst 0x4fbfe19d // sdot v29.4s, v12.16b, v31.4b[1]\n" - ".inst 0x4f9fe989 // sdot v9.4s, v12.16b, v31.4b[2]\n" - ".inst 0x4fbfe994 // sdot v20.4s, v12.16b, v31.4b[3]\n" - "sshl v31.16b, v22.16b, v28.16b\n" - "and v22.16b, v22.16b, v24.16b\n" - "fcvtl v17.4s, v17.4h\n" - "fcvtl v2.4s, v2.4h\n" - "fcvtl v26.4s, v26.4h\n" - ".inst 0x4f86e3ea // sdot v10.4s, v31.16b, v6.4b[0]\n" - ".inst 0x4fa6e3fd // sdot v29.4s, v31.16b, v6.4b[1]\n" - ".inst 0x4f86ebe9 // sdot v9.4s, v31.16b, v6.4b[2]\n" - ".inst 0x4fa6ebf4 // sdot v20.4s, v31.16b, v6.4b[3]\n" - "sshl v6.16b, v27.16b, v28.16b\n" - "sshl v28.16b, v30.16b, v28.16b\n" - "and v27.16b, v27.16b, v24.16b\n" - "and v30.16b, v30.16b, v24.16b\n" - "ldr q24, [x25, #0x20]\n" - ".inst 0x4f98e0ca // sdot v10.4s, v6.16b, v24.4b[0]\n" - ".inst 0x4fb8e0dd // sdot v29.4s, v6.16b, v24.4b[1]\n" - ".inst 0x4f98e8c9 // sdot v9.4s, v6.16b, v24.4b[2]\n" - ".inst 0x4fb8e8d4 // sdot v20.4s, v6.16b, v24.4b[3]\n" - "ldr q24, [x25, #0x30]\n" - ".inst 0x4f98e38a // sdot v10.4s, v28.16b, v24.4b[0]\n" - ".inst 0x4fb8e39d // sdot v29.4s, v28.16b, v24.4b[1]\n" - ".inst 0x4f98eb89 // sdot v9.4s, v28.16b, v24.4b[2]\n" - ".inst 0x4fb8eb94 // sdot v20.4s, v28.16b, v24.4b[3]\n" - "ldr q24, [x25, #0x40]\n" - ".inst 0x4f98e06a // sdot v10.4s, v3.16b, v24.4b[0]\n" - ".inst 0x4fb8e07d // sdot v29.4s, v3.16b, v24.4b[1]\n" - ".inst 0x4f98e869 // sdot v9.4s, v3.16b, v24.4b[2]\n" - ".inst 0x4fb8e874 // sdot v20.4s, v3.16b, v24.4b[3]\n" - "ldr q24, [x25, #0x50]\n" - ".inst 0x4f98e2ca // sdot v10.4s, v22.16b, v24.4b[0]\n" - ".inst 0x4fb8e2dd // sdot v29.4s, v22.16b, v24.4b[1]\n" - ".inst 0x4f98eac9 // sdot v9.4s, v22.16b, v24.4b[2]\n" - ".inst 0x4fb8ead4 // sdot v20.4s, v22.16b, v24.4b[3]\n" - "ldr q24, [x25, #0x60]\n" - ".inst 0x4f98e36a // sdot v10.4s, v27.16b, v24.4b[0]\n" - ".inst 0x4fb8e37d // sdot v29.4s, v27.16b, v24.4b[1]\n" - ".inst 0x4f98eb69 // sdot v9.4s, v27.16b, v24.4b[2]\n" - ".inst 0x4fb8eb74 // sdot v20.4s, v27.16b, v24.4b[3]\n" - "ldr q24, [x25, #0x70]\n" - "add x25, x25, #0x88\n" - ".inst 0x4f98e3ca // sdot v10.4s, v30.16b, v24.4b[0]\n" - ".inst 0x4fb8e3dd // sdot v29.4s, v30.16b, v24.4b[1]\n" - ".inst 0x4f98ebc9 // sdot v9.4s, v30.16b, v24.4b[2]\n" - ".inst 0x4fb8ebd4 // sdot v20.4s, v30.16b, v24.4b[3]\n" - "fmul v24.4s, v17.4s, v2.s[0]\n" - "scvtf v10.4s, v10.4s, #0x4\n" - "scvtf v29.4s, v29.4s, #0x4\n" - "scvtf v9.4s, v9.4s, #0x4\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "fmla v15.4s, v10.4s, v24.4s\n" - "ldr q24, [x23, #0x0]\n" - "fmul v10.4s, v17.4s, v2.s[1]\n" - "fmla v19.4s, v29.4s, v10.4s\n" - "ldr q10, [x23, #0x10]\n" - "fmul v29.4s, v17.4s, v2.s[2]\n" - "fmul v2.4s, v17.4s, v2.s[3]\n" - "fmla v18.4s, v9.4s, v29.4s\n" - "movi v9.4s, #0x0\n" - "movi v29.4s, #0x0\n" - ".inst 0x4f98e189 // sdot v9.4s, v12.16b, v24.4b[0]\n" - ".inst 0x4fb8e19d // sdot v29.4s, v12.16b, v24.4b[1]\n" - "fmla v14.4s, v20.4s, v2.4s\n" - "movi v20.4s, #0x0\n" - "movi v2.4s, #0x0\n" - ".inst 0x4f98e994 // sdot v20.4s, v12.16b, v24.4b[2]\n" - ".inst 0x4fb8e982 // sdot v2.4s, v12.16b, v24.4b[3]\n" - "ldr q24, [x23, #0x20]\n" - ".inst 0x4f8ae3e9 // sdot v9.4s, v31.16b, v10.4b[0]\n" - ".inst 0x4faae3fd // sdot v29.4s, v31.16b, v10.4b[1]\n" - ".inst 0x4f8aebf4 // sdot v20.4s, v31.16b, v10.4b[2]\n" - ".inst 0x4faaebe2 // sdot v2.4s, v31.16b, v10.4b[3]\n" - "ldr q10, [x23, #0x30]\n" - ".inst 0x4f98e0c9 // sdot v9.4s, v6.16b, v24.4b[0]\n" - ".inst 0x4fb8e0dd // sdot v29.4s, v6.16b, v24.4b[1]\n" - ".inst 0x4f98e8d4 // sdot v20.4s, v6.16b, v24.4b[2]\n" - ".inst 0x4fb8e8c2 // sdot v2.4s, v6.16b, v24.4b[3]\n" - "ldr q24, [x23, #0x40]\n" - ".inst 0x4f8ae389 // sdot v9.4s, v28.16b, v10.4b[0]\n" - ".inst 0x4faae39d // sdot v29.4s, v28.16b, v10.4b[1]\n" - ".inst 0x4f8aeb94 // sdot v20.4s, v28.16b, v10.4b[2]\n" - ".inst 0x4faaeb82 // sdot v2.4s, v28.16b, v10.4b[3]\n" - "ldr q10, [x23, #0x50]\n" - ".inst 0x4f98e069 // sdot v9.4s, v3.16b, v24.4b[0]\n" - ".inst 0x4fb8e07d // sdot v29.4s, v3.16b, v24.4b[1]\n" - ".inst 0x4f98e874 // sdot v20.4s, v3.16b, v24.4b[2]\n" - ".inst 0x4fb8e862 // sdot v2.4s, v3.16b, v24.4b[3]\n" - "ldr q24, [x23, #0x60]\n" - ".inst 0x4f8ae2c9 // sdot v9.4s, v22.16b, v10.4b[0]\n" - ".inst 0x4faae2dd // sdot v29.4s, v22.16b, v10.4b[1]\n" - ".inst 0x4f8aead4 // sdot v20.4s, v22.16b, v10.4b[2]\n" - ".inst 0x4faaeac2 // sdot v2.4s, v22.16b, v10.4b[3]\n" - "ldr q10, [x23, #0x70]\n" - "add x23, x23, #0x88\n" - ".inst 0x4f98e369 // sdot v9.4s, v27.16b, v24.4b[0]\n" - ".inst 0x4fb8e37d // sdot v29.4s, v27.16b, v24.4b[1]\n" - ".inst 0x4f98eb74 // sdot v20.4s, v27.16b, v24.4b[2]\n" - ".inst 0x4fb8eb62 // sdot v2.4s, v27.16b, v24.4b[3]\n" - "ldr q24, [x22, #0x0]\n" - ".inst 0x4f8ae3c9 // sdot v9.4s, v30.16b, v10.4b[0]\n" - ".inst 0x4faae3dd // sdot v29.4s, v30.16b, v10.4b[1]\n" - ".inst 0x4f8aebd4 // sdot v20.4s, v30.16b, v10.4b[2]\n" - ".inst 0x4faaebc2 // sdot v2.4s, v30.16b, v10.4b[3]\n" - "fmul v10.4s, v17.4s, v26.s[0]\n" - "scvtf v9.4s, v9.4s, #0x4\n" - "scvtf v29.4s, v29.4s, #0x4\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "scvtf v2.4s, v2.4s, #0x4\n" - "fmla v11.4s, v9.4s, v10.4s\n" - "ldr q9, [x22, #0x10]\n" - "fmul v10.4s, v17.4s, v26.s[1]\n" - "fmla v13.4s, v29.4s, v10.4s\n" - "ldr d29, [x22, #-0x8]\n" - "fmul v10.4s, v17.4s, v26.s[2]\n" - "fmul v26.4s, v17.4s, v26.s[3]\n" - "fcvtl v29.4s, v29.4h\n" - "fmla v23.4s, v20.4s, v10.4s\n" - "movi v20.4s, #0x0\n" - "movi v10.4s, #0x0\n" - "fmla v16.4s, v2.4s, v26.4s\n" - "movi v26.4s, #0x0\n" - "movi v2.4s, #0x0\n" - ".inst 0x4f98e194 // sdot v20.4s, v12.16b, v24.4b[0]\n" - ".inst 0x4fb8e18a // sdot v10.4s, v12.16b, v24.4b[1]\n" - ".inst 0x4f98e99a // sdot v26.4s, v12.16b, v24.4b[2]\n" - ".inst 0x4fb8e982 // sdot v2.4s, v12.16b, v24.4b[3]\n" - "ldr q24, [x22, #0x20]\n" - ".inst 0x4f89e3f4 // sdot v20.4s, v31.16b, v9.4b[0]\n" - ".inst 0x4fa9e3ea // sdot v10.4s, v31.16b, v9.4b[1]\n" - ".inst 0x4f89ebfa // sdot v26.4s, v31.16b, v9.4b[2]\n" - ".inst 0x4fa9ebe2 // sdot v2.4s, v31.16b, v9.4b[3]\n" - "ldr q9, [x22, #0x30]\n" - ".inst 0x4f98e0d4 // sdot v20.4s, v6.16b, v24.4b[0]\n" - ".inst 0x4fb8e0ca // sdot v10.4s, v6.16b, v24.4b[1]\n" - ".inst 0x4f98e8da // sdot v26.4s, v6.16b, v24.4b[2]\n" - ".inst 0x4fb8e8c2 // sdot v2.4s, v6.16b, v24.4b[3]\n" - "ldr q24, [x22, #0x40]\n" - ".inst 0x4f89e394 // sdot v20.4s, v28.16b, v9.4b[0]\n" - ".inst 0x4fa9e38a // sdot v10.4s, v28.16b, v9.4b[1]\n" - ".inst 0x4f89eb9a // sdot v26.4s, v28.16b, v9.4b[2]\n" - ".inst 0x4fa9eb82 // sdot v2.4s, v28.16b, v9.4b[3]\n" - "ldr q9, [x22, #0x50]\n" - ".inst 0x4f98e074 // sdot v20.4s, v3.16b, v24.4b[0]\n" - ".inst 0x4fb8e06a // sdot v10.4s, v3.16b, v24.4b[1]\n" - ".inst 0x4f98e87a // sdot v26.4s, v3.16b, v24.4b[2]\n" - ".inst 0x4fb8e862 // sdot v2.4s, v3.16b, v24.4b[3]\n" - "ldr q24, [x22, #0x60]\n" - ".inst 0x4f89e2d4 // sdot v20.4s, v22.16b, v9.4b[0]\n" - ".inst 0x4fa9e2ca // sdot v10.4s, v22.16b, v9.4b[1]\n" - ".inst 0x4f89eada // sdot v26.4s, v22.16b, v9.4b[2]\n" - ".inst 0x4fa9eac2 // sdot v2.4s, v22.16b, v9.4b[3]\n" - "ldr q9, [x22, #0x70]\n" - "add x22, x22, #0x88\n" - ".inst 0x4f98e374 // sdot v20.4s, v27.16b, v24.4b[0]\n" - ".inst 0x4fb8e36a // sdot v10.4s, v27.16b, v24.4b[1]\n" - ".inst 0x4f98eb7a // sdot v26.4s, v27.16b, v24.4b[2]\n" - ".inst 0x4fb8eb62 // sdot v2.4s, v27.16b, v24.4b[3]\n" - "ldr q24, [x21, #0x0]\n" - ".inst 0x4f89e3d4 // sdot v20.4s, v30.16b, v9.4b[0]\n" - ".inst 0x4fa9e3ca // sdot v10.4s, v30.16b, v9.4b[1]\n" - ".inst 0x4f89ebda // sdot v26.4s, v30.16b, v9.4b[2]\n" - ".inst 0x4fa9ebc2 // sdot v2.4s, v30.16b, v9.4b[3]\n" - "fmul v9.4s, v17.4s, v29.s[0]\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "scvtf v10.4s, v10.4s, #0x4\n" - "scvtf v26.4s, v26.4s, #0x4\n" - "scvtf v2.4s, v2.4s, #0x4\n" - "fmla v25.4s, v20.4s, v9.4s\n" - "ldr q9, [x21, #0x10]\n" - "fmul v20.4s, v17.4s, v29.s[1]\n" - "fmla v7.4s, v10.4s, v20.4s\n" - "ldr d20, [x21, #-0x8]\n" - "fmul v10.4s, v17.4s, v29.s[2]\n" - "fmul v29.4s, v17.4s, v29.s[3]\n" - "fcvtl v20.4s, v20.4h\n" - "fmla v0.4s, v26.4s, v10.4s\n" - "movi v26.4s, #0x0\n" - "movi v10.4s, #0x0\n" - "fmla v4.4s, v2.4s, v29.4s\n" - "movi v2.4s, #0x0\n" - "movi v29.4s, #0x0\n" - ".inst 0x4f98e19a // sdot v26.4s, v12.16b, v24.4b[0]\n" - ".inst 0x4fb8e18a // sdot v10.4s, v12.16b, v24.4b[1]\n" - ".inst 0x4f98e982 // sdot v2.4s, v12.16b, v24.4b[2]\n" - ".inst 0x4fb8e99d // sdot v29.4s, v12.16b, v24.4b[3]\n" - "ldr q12, [x21, #0x20]\n" - "fmul v24.4s, v17.4s, v20.s[0]\n" - ".inst 0x4f89e3fa // sdot v26.4s, v31.16b, v9.4b[0]\n" - ".inst 0x4fa9e3ea // sdot v10.4s, v31.16b, v9.4b[1]\n" - ".inst 0x4f89ebe2 // sdot v2.4s, v31.16b, v9.4b[2]\n" - ".inst 0x4fa9ebfd // sdot v29.4s, v31.16b, v9.4b[3]\n" - "ldr q9, [x21, #0x30]\n" - "fmul v31.4s, v17.4s, v20.s[1]\n" - ".inst 0x4f8ce0da // sdot v26.4s, v6.16b, v12.4b[0]\n" - ".inst 0x4face0ca // sdot v10.4s, v6.16b, v12.4b[1]\n" - ".inst 0x4f8ce8c2 // sdot v2.4s, v6.16b, v12.4b[2]\n" - ".inst 0x4face8dd // sdot v29.4s, v6.16b, v12.4b[3]\n" - "ldr q12, [x21, #0x40]\n" - "fmul v6.4s, v17.4s, v20.s[2]\n" - "fmul v20.4s, v17.4s, v20.s[3]\n" - ".inst 0x4f89e39a // sdot v26.4s, v28.16b, v9.4b[0]\n" - ".inst 0x4fa9e38a // sdot v10.4s, v28.16b, v9.4b[1]\n" - ".inst 0x4f89eb82 // sdot v2.4s, v28.16b, v9.4b[2]\n" - ".inst 0x4fa9eb9d // sdot v29.4s, v28.16b, v9.4b[3]\n" - "ldr q9, [x21, #0x50]\n" - ".inst 0x4f8ce07a // sdot v26.4s, v3.16b, v12.4b[0]\n" - ".inst 0x4face06a // sdot v10.4s, v3.16b, v12.4b[1]\n" - ".inst 0x4f8ce862 // sdot v2.4s, v3.16b, v12.4b[2]\n" - ".inst 0x4face87d // sdot v29.4s, v3.16b, v12.4b[3]\n" - "ldr q12, [x21, #0x60]\n" - ".inst 0x4f89e2da // sdot v26.4s, v22.16b, v9.4b[0]\n" - ".inst 0x4fa9e2ca // sdot v10.4s, v22.16b, v9.4b[1]\n" - ".inst 0x4f89eac2 // sdot v2.4s, v22.16b, v9.4b[2]\n" - ".inst 0x4fa9eadd // sdot v29.4s, v22.16b, v9.4b[3]\n" - "ldr q17, [x21, #0x70]\n" - "add x21, x21, #0x88\n" - ".inst 0x4f8ce37a // sdot v26.4s, v27.16b, v12.4b[0]\n" - ".inst 0x4face36a // sdot v10.4s, v27.16b, v12.4b[1]\n" - ".inst 0x4f8ceb62 // sdot v2.4s, v27.16b, v12.4b[2]\n" - ".inst 0x4faceb7d // sdot v29.4s, v27.16b, v12.4b[3]\n" - ".inst 0x4f91e3da // sdot v26.4s, v30.16b, v17.4b[0]\n" - ".inst 0x4fb1e3ca // sdot v10.4s, v30.16b, v17.4b[1]\n" - ".inst 0x4f91ebc2 // sdot v2.4s, v30.16b, v17.4b[2]\n" - ".inst 0x4fb1ebdd // sdot v29.4s, v30.16b, v17.4b[3]\n" - "scvtf v26.4s, v26.4s, #0x4\n" - "scvtf v10.4s, v10.4s, #0x4\n" - "fmla v5.4s, v26.4s, v24.4s\n" - "scvtf v2.4s, v2.4s, #0x4\n" - "scvtf v29.4s, v29.4s, #0x4\n" - "fmla v21.4s, v10.4s, v31.4s\n" - "fmla v8.4s, v2.4s, v6.4s\n" - "fmla v1.4s, v29.4s, v20.4s\n" - "bgt 3b\n" - "mov x20, %x[res_ptr]\n" - "subs x27, x27, #0x4\n" - "add %x[res_ptr], %x[res_ptr], #0x10\n" - "str q15, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q19, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q18, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q14, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q11, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q13, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q23, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q16, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q25, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q7, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q0, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q4, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q5, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q21, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q8, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q1, [x20, #0x0]\n" - "bne 2b\n" - "mov x20, #0x4\n" - "sub x10, x10, #0x10\n" - "cmp x10, #0x10\n" - "mov %x[res_ptr], x26\n" - "madd %x[a_ptr], x20, x9, %x[a_ptr]\n" - "bge 1b\n" - "4:" // Row loop skip - "cbz x10, 9f\n" - "5:" // Row tail: Row loop - "add x24, %x[b_ptr], #0x8\n" - "mov x23, %x[nc]\n" - "add x22, %x[res_ptr], %x[res_stride], LSL #2\n" - "6:" // Row tail: Column loop - "movi v15.16b, #0x0\n" - "movi v19.16b, #0x0\n" - "add x25, %x[a_ptr], #0x8\n" - "mov x21, %x[nb]\n" - "movi v18.16b, #0x0\n" - "movi v14.16b, #0x0\n" - "7:" // Row tail: Block loop - "ldr q7, [x24, #0x0]\n" - "ldr q5, [x25, #0x0]\n" - "movi v9.16b, #0x4\n" - "movi v4.4s, #0x0\n" - "ldr q3, [x24, #0x10]\n" - "ldr q2, [x25, #0x10]\n" - "movi v1.4s, #0x0\n" - "movi v0.4s, #0x0\n" - "ldr q13, [x24, #0x20]\n" - "ldr q31, [x25, #0x20]\n" - "movi v30.4s, #0x0\n" - "movi v29.16b, #0xf0\n" - "ldr q28, [x24, #0x30]\n" - "ldr q27, [x25, #0x30]\n" - "sshl v20.16b, v7.16b, v9.16b\n" - "sub x20, x24, #0x8\n" - "ldr q26, [x25, #0x40]\n" - "ldr q25, [x25, #0x50]\n" - "sshl v17.16b, v3.16b, v9.16b\n" - "and v7.16b, v7.16b, v29.16b\n" - "ldr q24, [x25, #0x60]\n" - "ldr q16, [x25, #0x70]\n" - "sshl v22.16b, v13.16b, v9.16b\n" - "and v3.16b, v3.16b, v29.16b\n" - "ldr d21, [x20, #0x0]\n" - "ldr d12, [x25, #-0x8]\n" - ".inst 0x4f85e284 // sdot v4.4s, v20.16b, v5.4b[0]\n" - ".inst 0x4fa5e281 // sdot v1.4s, v20.16b, v5.4b[1]\n" - ".inst 0x4f85ea80 // sdot v0.4s, v20.16b, v5.4b[2]\n" - ".inst 0x4fa5ea9e // sdot v30.4s, v20.16b, v5.4b[3]\n" - "sshl v9.16b, v28.16b, v9.16b\n" - "subs x21, x21, #0x1\n" - "and v13.16b, v13.16b, v29.16b\n" - "and v28.16b, v28.16b, v29.16b\n" - "add x25, x25, #0x88\n" - "add x24, x24, #0x48\n" - "fcvtl v21.4s, v21.4h\n" - "fcvtl v12.4s, v12.4h\n" - ".inst 0x4f82e224 // sdot v4.4s, v17.16b, v2.4b[0]\n" - ".inst 0x4fa2e221 // sdot v1.4s, v17.16b, v2.4b[1]\n" - ".inst 0x4f82ea20 // sdot v0.4s, v17.16b, v2.4b[2]\n" - ".inst 0x4fa2ea3e // sdot v30.4s, v17.16b, v2.4b[3]\n" - "fmul v11.4s, v21.4s, v12.s[0]\n" - "fmul v23.4s, v21.4s, v12.s[1]\n" - "fmul v17.4s, v21.4s, v12.s[2]\n" - ".inst 0x4f9fe2c4 // sdot v4.4s, v22.16b, v31.4b[0]\n" - "fmul v6.4s, v21.4s, v12.s[3]\n" - ".inst 0x4fbfe2c1 // sdot v1.4s, v22.16b, v31.4b[1]\n" - ".inst 0x4f9feac0 // sdot v0.4s, v22.16b, v31.4b[2]\n" - ".inst 0x4fbfeade // sdot v30.4s, v22.16b, v31.4b[3]\n" - ".inst 0x4f9be124 // sdot v4.4s, v9.16b, v27.4b[0]\n" - ".inst 0x4fbbe121 // sdot v1.4s, v9.16b, v27.4b[1]\n" - ".inst 0x4f9be920 // sdot v0.4s, v9.16b, v27.4b[2]\n" - ".inst 0x4fbbe93e // sdot v30.4s, v9.16b, v27.4b[3]\n" - ".inst 0x4f9ae0e4 // sdot v4.4s, v7.16b, v26.4b[0]\n" - ".inst 0x4fbae0e1 // sdot v1.4s, v7.16b, v26.4b[1]\n" - ".inst 0x4f9ae8e0 // sdot v0.4s, v7.16b, v26.4b[2]\n" - ".inst 0x4fbae8fe // sdot v30.4s, v7.16b, v26.4b[3]\n" - ".inst 0x4f99e064 // sdot v4.4s, v3.16b, v25.4b[0]\n" - ".inst 0x4fb9e061 // sdot v1.4s, v3.16b, v25.4b[1]\n" - ".inst 0x4f99e860 // sdot v0.4s, v3.16b, v25.4b[2]\n" - ".inst 0x4fb9e87e // sdot v30.4s, v3.16b, v25.4b[3]\n" - ".inst 0x4f98e1a4 // sdot v4.4s, v13.16b, v24.4b[0]\n" - ".inst 0x4fb8e1a1 // sdot v1.4s, v13.16b, v24.4b[1]\n" - ".inst 0x4f98e9a0 // sdot v0.4s, v13.16b, v24.4b[2]\n" - ".inst 0x4fb8e9be // sdot v30.4s, v13.16b, v24.4b[3]\n" - ".inst 0x4f90e384 // sdot v4.4s, v28.16b, v16.4b[0]\n" - ".inst 0x4fb0e381 // sdot v1.4s, v28.16b, v16.4b[1]\n" - ".inst 0x4f90eb80 // sdot v0.4s, v28.16b, v16.4b[2]\n" - ".inst 0x4fb0eb9e // sdot v30.4s, v28.16b, v16.4b[3]\n" - "scvtf v4.4s, v4.4s, #0x4\n" - "scvtf v1.4s, v1.4s, #0x4\n" - "scvtf v0.4s, v0.4s, #0x4\n" - "fmla v15.4s, v4.4s, v11.4s\n" - "scvtf v30.4s, v30.4s, #0x4\n" - "fmla v19.4s, v1.4s, v23.4s\n" - "fmla v18.4s, v0.4s, v17.4s\n" - "fmla v14.4s, v30.4s, v6.4s\n" - "bgt 7b\n" - "mov x20, %x[res_ptr]\n" - "cmp x10, #0x1\n" - "str q15, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "cmp x10, #0x2\n" - "str q19, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "cmp x10, #0x3\n" - "str q18, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "str q14, [x20, #0x0]\n" - "8:" // Row tail: Accumulator store skip - "subs x23, x23, #0x4\n" - "add %x[res_ptr], %x[res_ptr], #0x10\n" - "bne 6b\n" - "subs x10, x10, #0x4\n" - "add %x[a_ptr], %x[a_ptr], x9\n" - "mov %x[res_ptr], x22\n" - "bgt 5b\n" - "9:" // Row tail: Row loop skip - : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) - : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" - ); - return; - } -#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) - { - float sumf[4][4]; - int sumi; - - for (int y = 0; y < nr / 4; y++) { - const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; - } - for (int l = 0; l < nb; l++) { - for (int k = 0; k < (qk / (2 * blocklen)); k++) { - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) { - sumi = 0; - for (int i = 0; i < blocklen; ++i) { - const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); - const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); - sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + - (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; - } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); - } - } - } - } - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) - s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; - } - } - } - } -} - -static void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { - const int qk = QK8_0; - const int nb = n / qk; - const int ncols_interleaved = 4; - const int blocklen = 8; - - assert (n % qk == 0); - assert (nr % 4 == 0); - assert (nc % ncols_interleaved == 0); - - UNUSED(s); - UNUSED(bs); - UNUSED(vx); - UNUSED(vy); - UNUSED(nr); - UNUSED(nc); - UNUSED(nb); - UNUSED(ncols_interleaved); - UNUSED(blocklen); - -#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) - if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) { - const void * b_ptr = vx; - const void * a_ptr = vy; - float * res_ptr = s; - size_t res_stride = bs * sizeof(float); - - __asm__ __volatile__( - "mov x10, %x[nr]\n" - "mov x9, #0x88\n" - "cmp x10, #0x10\n" - "mul x9, %x[nb], x9\n" - "blt 4f\n" - "1:" // Row loop - "add x28, %x[b_ptr], #0x8\n" - "mov x27, %x[nc]\n" - "add x26, %x[res_ptr], %x[res_stride], LSL #4\n" - "2:" // Column loop - "add x25, %x[a_ptr], #0x8\n" - "movi v2.16b, #0x0\n" - "movi v10.16b, #0x0\n" - "mov x24, %x[nb]\n" - "add x23, x25, x9\n" - "movi v12.16b, #0x0\n" - "movi v28.16b, #0x0\n" - "add x22, x23, x9\n" - "movi v11.16b, #0x0\n" - "movi v13.16b, #0x0\n" - "add x21, x22, x9\n" - "movi v22.16b, #0x0\n" - "movi v23.16b, #0x0\n" - "movi v25.16b, #0x0\n" - "movi v5.16b, #0x0\n" - "movi v7.16b, #0x0\n" - "movi v4.16b, #0x0\n" - "movi v6.16b, #0x0\n" - "movi v30.16b, #0x0\n" - "movi v24.16b, #0x0\n" - "movi v14.16b, #0x0\n" - "3:" // Block loop - "ldr q21, [x28, #0x0]\n" - "ldr q16, [x28, #0x10]\n" - "movi v1.16b, #0x4\n" - "movi v19.4s, #0x0\n" - "ldr q27, [x25, #0x0]\n" - "ldr q15, [x25, #0x10]\n" - "movi v26.4s, #0x0\n" - "movi v18.4s, #0x0\n" - "ldr q29, [x28, #0x20]\n" - "ldr q3, [x28, #0x30]\n" - "movi v17.4s, #0x0\n" - "movi v0.16b, #0xf0\n" - "ldr d20, [x25, #-0x8]\n" - "ldr d9, [x23, #-0x8]\n" - "sshl v8.16b, v21.16b, v1.16b\n" - "sshl v31.16b, v16.16b, v1.16b\n" - "and v21.16b, v21.16b, v0.16b\n" - "and v16.16b, v16.16b, v0.16b\n" - "sub x20, x28, #0x8\n" - "subs x24, x24, #0x1\n" - "add x28, x28, #0x48\n" - ".inst 0x4e88a773 // smmla v19.4s, v27.16b, v8.16b\n" - ".inst 0x4e9fa77a // smmla v26.4s, v27.16b, v31.16b\n" - "ldr q27, [x25, #0x20]\n" - ".inst 0x4e88a5f2 // smmla v18.4s, v15.16b, v8.16b\n" - ".inst 0x4e9fa5f1 // smmla v17.4s, v15.16b, v31.16b\n" - "sshl v15.16b, v29.16b, v1.16b\n" - "sshl v1.16b, v3.16b, v1.16b\n" - "and v29.16b, v29.16b, v0.16b\n" - "and v3.16b, v3.16b, v0.16b\n" - "ldr q0, [x25, #0x30]\n" - "fcvtl v20.4s, v20.4h\n" - ".inst 0x4e8fa773 // smmla v19.4s, v27.16b, v15.16b\n" - "fcvtl v9.4s, v9.4h\n" - ".inst 0x4e81a77a // smmla v26.4s, v27.16b, v1.16b\n" - "ldr q27, [x25, #0x40]\n" - ".inst 0x4e8fa412 // smmla v18.4s, v0.16b, v15.16b\n" - ".inst 0x4e81a411 // smmla v17.4s, v0.16b, v1.16b\n" - "ldr q0, [x25, #0x50]\n" - ".inst 0x4e95a773 // smmla v19.4s, v27.16b, v21.16b\n" - ".inst 0x4e90a77a // smmla v26.4s, v27.16b, v16.16b\n" - "ldr q27, [x25, #0x60]\n" - ".inst 0x4e95a412 // smmla v18.4s, v0.16b, v21.16b\n" - ".inst 0x4e90a411 // smmla v17.4s, v0.16b, v16.16b\n" - "ldr q0, [x25, #0x70]\n" - "add x25, x25, #0x88\n" - ".inst 0x4e9da773 // smmla v19.4s, v27.16b, v29.16b\n" - ".inst 0x4e83a77a // smmla v26.4s, v27.16b, v3.16b\n" - "ldr d27, [x20, #0x0]\n" - ".inst 0x4e9da412 // smmla v18.4s, v0.16b, v29.16b\n" - ".inst 0x4e83a411 // smmla v17.4s, v0.16b, v3.16b\n" - "fcvtl v27.4s, v27.4h\n" - "uzp1 v0.2d, v19.2d, v26.2d\n" - "uzp2 v26.2d, v19.2d, v26.2d\n" - "fmul v19.4s, v27.4s, v20.s[0]\n" - "scvtf v0.4s, v0.4s, #0x4\n" - "scvtf v26.4s, v26.4s, #0x4\n" - "fmla v2.4s, v0.4s, v19.4s\n" - "ldr q19, [x23, #0x0]\n" - "uzp1 v0.2d, v18.2d, v17.2d\n" - "uzp2 v18.2d, v18.2d, v17.2d\n" - "fmul v17.4s, v27.4s, v20.s[1]\n" - "scvtf v0.4s, v0.4s, #0x4\n" - "scvtf v18.4s, v18.4s, #0x4\n" - "fmla v10.4s, v26.4s, v17.4s\n" - "ldr q17, [x23, #0x10]\n" - "fmul v26.4s, v27.4s, v20.s[2]\n" - "fmul v20.4s, v27.4s, v20.s[3]\n" - "fmla v12.4s, v0.4s, v26.4s\n" - "ldr d0, [x22, #-0x8]\n" - "ldr d26, [x21, #-0x8]\n" - "fcvtl v0.4s, v0.4h\n" - "fmla v28.4s, v18.4s, v20.4s\n" - "movi v20.4s, #0x0\n" - "movi v18.4s, #0x0\n" - ".inst 0x4e88a674 // smmla v20.4s, v19.16b, v8.16b\n" - ".inst 0x4e9fa672 // smmla v18.4s, v19.16b, v31.16b\n" - "ldr q19, [x23, #0x20]\n" - "fcvtl v26.4s, v26.4h\n" - ".inst 0x4e8fa674 // smmla v20.4s, v19.16b, v15.16b\n" - ".inst 0x4e81a672 // smmla v18.4s, v19.16b, v1.16b\n" - "ldr q19, [x23, #0x40]\n" - ".inst 0x4e95a674 // smmla v20.4s, v19.16b, v21.16b\n" - ".inst 0x4e90a672 // smmla v18.4s, v19.16b, v16.16b\n" - "ldr q19, [x23, #0x60]\n" - ".inst 0x4e9da674 // smmla v20.4s, v19.16b, v29.16b\n" - ".inst 0x4e83a672 // smmla v18.4s, v19.16b, v3.16b\n" - "uzp1 v19.2d, v20.2d, v18.2d\n" - "scvtf v19.4s, v19.4s, #0x4\n" - "uzp2 v20.2d, v20.2d, v18.2d\n" - "fmul v18.4s, v27.4s, v9.s[0]\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "fmla v11.4s, v19.4s, v18.4s\n" - "ldr q18, [x22, #0x0]\n" - "fmul v19.4s, v27.4s, v9.s[1]\n" - "fmla v13.4s, v20.4s, v19.4s\n" - "movi v19.4s, #0x0\n" - "movi v20.4s, #0x0\n" - ".inst 0x4e88a633 // smmla v19.4s, v17.16b, v8.16b\n" - ".inst 0x4e9fa634 // smmla v20.4s, v17.16b, v31.16b\n" - "ldr q17, [x23, #0x30]\n" - ".inst 0x4e8fa633 // smmla v19.4s, v17.16b, v15.16b\n" - ".inst 0x4e81a634 // smmla v20.4s, v17.16b, v1.16b\n" - "ldr q17, [x23, #0x50]\n" - ".inst 0x4e95a633 // smmla v19.4s, v17.16b, v21.16b\n" - ".inst 0x4e90a634 // smmla v20.4s, v17.16b, v16.16b\n" - "ldr q17, [x23, #0x70]\n" - "add x23, x23, #0x88\n" - ".inst 0x4e9da633 // smmla v19.4s, v17.16b, v29.16b\n" - ".inst 0x4e83a634 // smmla v20.4s, v17.16b, v3.16b\n" - "uzp1 v17.2d, v19.2d, v20.2d\n" - "scvtf v17.4s, v17.4s, #0x4\n" - "uzp2 v20.2d, v19.2d, v20.2d\n" - "fmul v19.4s, v27.4s, v9.s[2]\n" - "fmul v9.4s, v27.4s, v9.s[3]\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "fmla v22.4s, v17.4s, v19.4s\n" - "ldr q17, [x22, #0x10]\n" - "movi v19.4s, #0x0\n" - ".inst 0x4e88a653 // smmla v19.4s, v18.16b, v8.16b\n" - "fmla v23.4s, v20.4s, v9.4s\n" - "movi v20.4s, #0x0\n" - "movi v9.4s, #0x0\n" - ".inst 0x4e9fa654 // smmla v20.4s, v18.16b, v31.16b\n" - "ldr q18, [x22, #0x20]\n" - ".inst 0x4e88a629 // smmla v9.4s, v17.16b, v8.16b\n" - ".inst 0x4e8fa653 // smmla v19.4s, v18.16b, v15.16b\n" - ".inst 0x4e81a654 // smmla v20.4s, v18.16b, v1.16b\n" - "ldr q18, [x22, #0x40]\n" - ".inst 0x4e95a653 // smmla v19.4s, v18.16b, v21.16b\n" - ".inst 0x4e90a654 // smmla v20.4s, v18.16b, v16.16b\n" - "ldr q18, [x22, #0x60]\n" - ".inst 0x4e9da653 // smmla v19.4s, v18.16b, v29.16b\n" - ".inst 0x4e83a654 // smmla v20.4s, v18.16b, v3.16b\n" - "movi v18.4s, #0x0\n" - ".inst 0x4e9fa632 // smmla v18.4s, v17.16b, v31.16b\n" - "ldr q17, [x22, #0x30]\n" - ".inst 0x4e8fa629 // smmla v9.4s, v17.16b, v15.16b\n" - ".inst 0x4e81a632 // smmla v18.4s, v17.16b, v1.16b\n" - "ldr q17, [x22, #0x50]\n" - ".inst 0x4e95a629 // smmla v9.4s, v17.16b, v21.16b\n" - ".inst 0x4e90a632 // smmla v18.4s, v17.16b, v16.16b\n" - "ldr q17, [x22, #0x70]\n" - "add x22, x22, #0x88\n" - ".inst 0x4e9da629 // smmla v9.4s, v17.16b, v29.16b\n" - ".inst 0x4e83a632 // smmla v18.4s, v17.16b, v3.16b\n" - "uzp1 v17.2d, v19.2d, v20.2d\n" - "uzp2 v20.2d, v19.2d, v20.2d\n" - "fmul v19.4s, v27.4s, v0.s[0]\n" - "scvtf v17.4s, v17.4s, #0x4\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "fmla v25.4s, v17.4s, v19.4s\n" - "ldr q19, [x21, #0x0]\n" - "fmul v17.4s, v27.4s, v0.s[1]\n" - "fmla v5.4s, v20.4s, v17.4s\n" - "ldr q17, [x21, #0x10]\n" - "uzp1 v20.2d, v9.2d, v18.2d\n" - "uzp2 v9.2d, v9.2d, v18.2d\n" - "fmul v18.4s, v27.4s, v0.s[2]\n" - "fmul v0.4s, v27.4s, v0.s[3]\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "scvtf v9.4s, v9.4s, #0x4\n" - "fmla v7.4s, v20.4s, v18.4s\n" - "movi v20.4s, #0x0\n" - "movi v18.4s, #0x0\n" - ".inst 0x4e88a674 // smmla v20.4s, v19.16b, v8.16b\n" - ".inst 0x4e9fa672 // smmla v18.4s, v19.16b, v31.16b\n" - "ldr q19, [x21, #0x20]\n" - "fmla v4.4s, v9.4s, v0.4s\n" - "movi v9.4s, #0x0\n" - "movi v0.4s, #0x0\n" - ".inst 0x4e88a629 // smmla v9.4s, v17.16b, v8.16b\n" - "fmul v8.4s, v27.4s, v26.s[0]\n" - ".inst 0x4e9fa620 // smmla v0.4s, v17.16b, v31.16b\n" - "ldr q17, [x21, #0x30]\n" - ".inst 0x4e8fa674 // smmla v20.4s, v19.16b, v15.16b\n" - "fmul v31.4s, v27.4s, v26.s[1]\n" - ".inst 0x4e81a672 // smmla v18.4s, v19.16b, v1.16b\n" - "ldr q19, [x21, #0x40]\n" - ".inst 0x4e8fa629 // smmla v9.4s, v17.16b, v15.16b\n" - "fmul v15.4s, v27.4s, v26.s[2]\n" - "fmul v27.4s, v27.4s, v26.s[3]\n" - ".inst 0x4e81a620 // smmla v0.4s, v17.16b, v1.16b\n" - "ldr q1, [x21, #0x50]\n" - ".inst 0x4e95a674 // smmla v20.4s, v19.16b, v21.16b\n" - ".inst 0x4e90a672 // smmla v18.4s, v19.16b, v16.16b\n" - "ldr q26, [x21, #0x60]\n" - ".inst 0x4e95a429 // smmla v9.4s, v1.16b, v21.16b\n" - ".inst 0x4e90a420 // smmla v0.4s, v1.16b, v16.16b\n" - "ldr q21, [x21, #0x70]\n" - "add x21, x21, #0x88\n" - ".inst 0x4e9da754 // smmla v20.4s, v26.16b, v29.16b\n" - ".inst 0x4e83a752 // smmla v18.4s, v26.16b, v3.16b\n" - ".inst 0x4e9da6a9 // smmla v9.4s, v21.16b, v29.16b\n" - ".inst 0x4e83a6a0 // smmla v0.4s, v21.16b, v3.16b\n" - "uzp1 v29.2d, v20.2d, v18.2d\n" - "uzp2 v21.2d, v20.2d, v18.2d\n" - "scvtf v29.4s, v29.4s, #0x4\n" - "uzp1 v18.2d, v9.2d, v0.2d\n" - "uzp2 v16.2d, v9.2d, v0.2d\n" - "scvtf v21.4s, v21.4s, #0x4\n" - "fmla v6.4s, v29.4s, v8.4s\n" - "scvtf v18.4s, v18.4s, #0x4\n" - "scvtf v16.4s, v16.4s, #0x4\n" - "fmla v30.4s, v21.4s, v31.4s\n" - "fmla v24.4s, v18.4s, v15.4s\n" - "fmla v14.4s, v16.4s, v27.4s\n" - "bgt 3b\n" - "mov x20, %x[res_ptr]\n" - "subs x27, x27, #0x4\n" - "add %x[res_ptr], %x[res_ptr], #0x10\n" - "str q2, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q10, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q12, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q28, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q11, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q13, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q22, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q23, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q25, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q5, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q7, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q4, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q6, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q30, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q24, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q14, [x20, #0x0]\n" - "bne 2b\n" - "mov x20, #0x4\n" - "sub x10, x10, #0x10\n" - "cmp x10, #0x10\n" - "mov %x[res_ptr], x26\n" - "madd %x[a_ptr], x20, x9, %x[a_ptr]\n" - "bge 1b\n" - "4:" // Row loop skip - "cbz x10, 9f\n" - "5:" // Row tail: Row loop - "add x24, %x[b_ptr], #0x8\n" - "mov x23, %x[nc]\n" - "add x22, %x[res_ptr], %x[res_stride], LSL #2\n" - "6:" // Row tail: Column loop - "movi v2.16b, #0x0\n" - "movi v10.16b, #0x0\n" - "add x25, %x[a_ptr], #0x8\n" - "mov x21, %x[nb]\n" - "movi v12.16b, #0x0\n" - "movi v28.16b, #0x0\n" - "7:" // Row tail: Block loop - "ldr q6, [x24, #0x0]\n" - "ldr q5, [x24, #0x10]\n" - "movi v17.16b, #0x4\n" - "movi v8.4s, #0x0\n" - "ldr q4, [x25, #0x0]\n" - "ldr q13, [x25, #0x10]\n" - "movi v27.4s, #0x0\n" - "movi v0.4s, #0x0\n" - "ldr q31, [x24, #0x20]\n" - "ldr q14, [x24, #0x30]\n" - "movi v29.4s, #0x0\n" - "movi v22.16b, #0xf0\n" - "ldr q11, [x25, #0x20]\n" - "ldr q23, [x25, #0x30]\n" - "sshl v21.16b, v6.16b, v17.16b\n" - "sshl v16.16b, v5.16b, v17.16b\n" - "ldr q20, [x25, #0x40]\n" - "ldr q26, [x25, #0x50]\n" - "and v6.16b, v6.16b, v22.16b\n" - "and v5.16b, v5.16b, v22.16b\n" - "ldr q25, [x25, #0x60]\n" - "ldr q3, [x25, #0x70]\n" - "sshl v19.16b, v31.16b, v17.16b\n" - "sshl v18.16b, v14.16b, v17.16b\n" - "ldr d17, [x25, #-0x8]\n" - ".inst 0x4e95a488 // smmla v8.4s, v4.16b, v21.16b\n" - ".inst 0x4e90a49b // smmla v27.4s, v4.16b, v16.16b\n" - "and v31.16b, v31.16b, v22.16b\n" - ".inst 0x4e95a5a0 // smmla v0.4s, v13.16b, v21.16b\n" - ".inst 0x4e90a5bd // smmla v29.4s, v13.16b, v16.16b\n" - "and v14.16b, v14.16b, v22.16b\n" - "sub x20, x24, #0x8\n" - "ldr d16, [x20, #0x0]\n" - "subs x21, x21, #0x1\n" - "add x25, x25, #0x88\n" - "fcvtl v17.4s, v17.4h\n" - "add x24, x24, #0x48\n" - ".inst 0x4e93a568 // smmla v8.4s, v11.16b, v19.16b\n" - ".inst 0x4e92a57b // smmla v27.4s, v11.16b, v18.16b\n" - ".inst 0x4e93a6e0 // smmla v0.4s, v23.16b, v19.16b\n" - ".inst 0x4e92a6fd // smmla v29.4s, v23.16b, v18.16b\n" - "fcvtl v16.4s, v16.4h\n" - ".inst 0x4e86a688 // smmla v8.4s, v20.16b, v6.16b\n" - ".inst 0x4e85a69b // smmla v27.4s, v20.16b, v5.16b\n" - "fmul v23.4s, v16.4s, v17.s[0]\n" - "fmul v21.4s, v16.4s, v17.s[1]\n" - "fmul v1.4s, v16.4s, v17.s[2]\n" - "fmul v20.4s, v16.4s, v17.s[3]\n" - ".inst 0x4e86a740 // smmla v0.4s, v26.16b, v6.16b\n" - ".inst 0x4e85a75d // smmla v29.4s, v26.16b, v5.16b\n" - ".inst 0x4e9fa728 // smmla v8.4s, v25.16b, v31.16b\n" - ".inst 0x4e8ea73b // smmla v27.4s, v25.16b, v14.16b\n" - ".inst 0x4e9fa460 // smmla v0.4s, v3.16b, v31.16b\n" - ".inst 0x4e8ea47d // smmla v29.4s, v3.16b, v14.16b\n" - "uzp1 v19.2d, v8.2d, v27.2d\n" - "uzp2 v18.2d, v8.2d, v27.2d\n" - "scvtf v19.4s, v19.4s, #0x4\n" - "uzp1 v17.2d, v0.2d, v29.2d\n" - "uzp2 v16.2d, v0.2d, v29.2d\n" - "scvtf v18.4s, v18.4s, #0x4\n" - "fmla v2.4s, v19.4s, v23.4s\n" - "scvtf v17.4s, v17.4s, #0x4\n" - "scvtf v16.4s, v16.4s, #0x4\n" - "fmla v10.4s, v18.4s, v21.4s\n" - "fmla v12.4s, v17.4s, v1.4s\n" - "fmla v28.4s, v16.4s, v20.4s\n" - "bgt 7b\n" - "mov x20, %x[res_ptr]\n" - "cmp x10, #0x1\n" - "str q2, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "cmp x10, #0x2\n" - "str q10, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "cmp x10, #0x3\n" - "str q12, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "str q28, [x20, #0x0]\n" - "8:" // Row tail: Accumulator store skip - "subs x23, x23, #0x4\n" - "add %x[res_ptr], %x[res_ptr], #0x10\n" - "bne 6b\n" - "subs x10, x10, #0x4\n" - "add %x[a_ptr], %x[a_ptr], x9\n" - "mov %x[res_ptr], x22\n" - "bgt 5b\n" - "9:" // Row tail: Row loop skip - : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) - : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" - ); - return; - } -#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) - float sumf[4][4]; - int sumi; - - for (int y = 0; y < nr / 4; y++) { - const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; - } - for (int l = 0; l < nb; l++) { - for (int k = 0; k < (qk / (2 * blocklen)); k++) { - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) { - sumi = 0; - for (int i = 0; i < blocklen; ++i) { - const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); - const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); - sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + - (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; - } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); - } - } - } - } - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) - s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; - } - } - } -} - -static void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { - const int qk = QK8_0; - const int nb = n / qk; - const int ncols_interleaved = 8; - const int blocklen = 8; - - assert (n % qk == 0); - assert (nr % 4 == 0); - assert (nc % ncols_interleaved == 0); - - UNUSED(s); - UNUSED(bs); - UNUSED(vx); - UNUSED(vy); - UNUSED(nr); - UNUSED(nc); - UNUSED(nb); - UNUSED(ncols_interleaved); - UNUSED(blocklen); - -#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) -#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8) - if (ggml_cpu_has_sve() && ggml_cpu_has_matmul_int8() && ggml_cpu_get_sve_cnt() == QK8_0) { - const void * b_ptr = vx; - const void * a_ptr = vy; - float * res_ptr = s; - size_t res_stride = bs * sizeof(float); - - __asm__ __volatile__( - "mov x20, #0x4\n" - "mov x13, %x[nr]\n" - "mov z28.s, #-0x4\n" - "mov x12, #0x88\n" - "ptrue p1.b\n" - "whilelt p0.s, XZR, x20\n" - "cmp x13, #0x10\n" - "mul x12, %x[nb], x12\n" - "blt 4f\n" - "1:" // Row loop - "add x11, %x[b_ptr], #0x10\n" - "mov x10, %x[nc]\n" - "add x9, %x[res_ptr], %x[res_stride], LSL #4\n" - "2:" // Column loop - "add x28, %x[a_ptr], #0x8\n" - "mov z24.b, #0x0\n" - "mov z15.b, #0x0\n" - "mov x27, %x[nb]\n" - "add x26, x28, x12\n" - "mov z12.b, #0x0\n" - "mov z0.b, #0x0\n" - "add x25, x26, x12\n" - "mov z13.b, #0x0\n" - "mov z1.b, #0x0\n" - "add x24, x25, x12\n" - "mov z20.b, #0x0\n" - "mov z25.b, #0x0\n" - "mov z11.b, #0x0\n" - "mov z16.b, #0x0\n" - "mov z19.b, #0x0\n" - "mov z26.b, #0x0\n" - "mov z8.b, #0x0\n" - "mov z29.b, #0x0\n" - "mov z27.b, #0x0\n" - "mov z10.b, #0x0\n" - "3:" // Block loop - "ld1b { z30.b }, p1/Z, [x11]\n" - "ld1b { z21.b }, p1/Z, [x11, #1, MUL VL]\n" - "mov z18.s, #0x0\n" - "mov z7.s, #0x0\n" - "ld1rqb { z3.b }, p1/Z, [x28]\n" - "ld1rqb { z5.b }, p1/Z, [x28, #16]\n" - "mov z9.s, #0x0\n" - "mov z22.s, #0x0\n" - "ld1b { z4.b }, p1/Z, [x11, #2, MUL VL]\n" - "ld1b { z17.b }, p1/Z, [x11, #3, MUL VL]\n" - "sub x20, x11, #0x10\n" - "sub x23, x28, #0x8\n" - "lsl z31.b, z30.b, #0x4\n" - "lsl z6.b, z21.b, #0x4\n" - "ld1h { z23.s }, p1/Z, [x20]\n" - "sub x22, x26, #0x8\n" - "and z30.b, z30.b, #0xf0\n" - "and z21.b, z21.b, #0xf0\n" - "sub x21, x25, #0x8\n" - "sub x20, x24, #0x8\n" - "lsl z14.b, z4.b, #0x4\n" - "lsl z2.b, z17.b, #0x4\n" - "subs x27, x27, #0x1\n" - "add x11, x11, #0x90\n" - ".inst 0x451f9872 // smmla z18.s, z3.b, z31.b\n" - ".inst 0x45069867 // smmla z7.s, z3.b, z6.b\n" - "ld1rqb { z3.b }, p1/Z, [x28, #32]\n" - "and z4.b, z4.b, #0xf0\n" - ".inst 0x451f98a9 // smmla z9.s, z5.b, z31.b\n" - ".inst 0x450698b6 // smmla z22.s, z5.b, z6.b\n" - "ld1rqb { z5.b }, p1/Z, [x28, #48]\n" - "and z17.b, z17.b, #0xf0\n" - "fcvt z23.s, p1/m, z23.h\n" - ".inst 0x450e9872 // smmla z18.s, z3.b, z14.b\n" - ".inst 0x45029867 // smmla z7.s, z3.b, z2.b\n" - "ld1rqb { z3.b }, p1/Z, [x28, #64]\n" - ".inst 0x450e98a9 // smmla z9.s, z5.b, z14.b\n" - ".inst 0x450298b6 // smmla z22.s, z5.b, z2.b\n" - "ld1rqb { z5.b }, p1/Z, [x28, #80]\n" - "fscale z23.s, p1/m, z23.s, z28.s\n" - ".inst 0x451e9872 // smmla z18.s, z3.b, z30.b\n" - ".inst 0x45159867 // smmla z7.s, z3.b, z21.b\n" - "ld1rqb { z3.b }, p1/Z, [x28, #96]\n" - ".inst 0x451e98a9 // smmla z9.s, z5.b, z30.b\n" - ".inst 0x451598b6 // smmla z22.s, z5.b, z21.b\n" - "ld1rqb { z5.b }, p1/Z, [x28, #112]\n" - "add x28, x28, #0x88\n" - ".inst 0x45049872 // smmla z18.s, z3.b, z4.b\n" - ".inst 0x45119867 // smmla z7.s, z3.b, z17.b\n" - "ld1h { z3.s }, p0/Z, [x23]\n" - ".inst 0x450498a9 // smmla z9.s, z5.b, z4.b\n" - ".inst 0x451198b6 // smmla z22.s, z5.b, z17.b\n" - "fcvt z3.s, p1/m, z3.h\n" - "uzp1 z5.d, z18.d, z7.d\n" - "uzp2 z18.d, z18.d, z7.d\n" - "mov z3.q, z3.q[0]\n" - "uzp1 z7.d, z9.d, z22.d\n" - "uzp2 z22.d, z9.d, z22.d\n" - "fmul z9.s, z23.s, z3.s[0]\n" - "scvtf z5.s, p1/m, z5.s\n" - "scvtf z18.s, p1/m, z18.s\n" - "scvtf z7.s, p1/m, z7.s\n" - "scvtf z22.s, p1/m, z22.s\n" - "fmla z24.s, p1/M, z5.s, z9.s\n" - "ld1rqb { z5.b }, p1/Z, [x26]\n" - "fmul z9.s, z23.s, z3.s[1]\n" - "fmla z15.s, p1/M, z18.s, z9.s\n" - "ld1rqb { z18.b }, p1/Z, [x26, #16]\n" - "fmul z9.s, z23.s, z3.s[2]\n" - "fmul z3.s, z23.s, z3.s[3]\n" - "fmla z12.s, p1/M, z7.s, z9.s\n" - "mov z9.s, #0x0\n" - "ld1h { z7.s }, p0/Z, [x22]\n" - ".inst 0x451f98a9 // smmla z9.s, z5.b, z31.b\n" - "fmla z0.s, p1/M, z22.s, z3.s\n" - "mov z22.s, #0x0\n" - "ld1h { z3.s }, p0/Z, [x21]\n" - ".inst 0x450698b6 // smmla z22.s, z5.b, z6.b\n" - "ld1rqb { z5.b }, p1/Z, [x26, #32]\n" - "fcvt z7.s, p1/m, z7.h\n" - "fcvt z3.s, p1/m, z3.h\n" - ".inst 0x450e98a9 // smmla z9.s, z5.b, z14.b\n" - ".inst 0x450298b6 // smmla z22.s, z5.b, z2.b\n" - "ld1rqb { z5.b }, p1/Z, [x26, #64]\n" - "mov z7.q, z7.q[0]\n" - "mov z3.q, z3.q[0]\n" - ".inst 0x451e98a9 // smmla z9.s, z5.b, z30.b\n" - ".inst 0x451598b6 // smmla z22.s, z5.b, z21.b\n" - "ld1rqb { z5.b }, p1/Z, [x26, #96]\n" - ".inst 0x450498a9 // smmla z9.s, z5.b, z4.b\n" - ".inst 0x451198b6 // smmla z22.s, z5.b, z17.b\n" - "uzp1 z5.d, z9.d, z22.d\n" - "scvtf z5.s, p1/m, z5.s\n" - "uzp2 z22.d, z9.d, z22.d\n" - "fmul z9.s, z23.s, z7.s[0]\n" - "scvtf z22.s, p1/m, z22.s\n" - "fmla z13.s, p1/M, z5.s, z9.s\n" - "ld1rqb { z9.b }, p1/Z, [x25]\n" - "fmul z5.s, z23.s, z7.s[1]\n" - "fmla z1.s, p1/M, z22.s, z5.s\n" - "mov z5.s, #0x0\n" - "mov z22.s, #0x0\n" - ".inst 0x451f9a45 // smmla z5.s, z18.b, z31.b\n" - ".inst 0x45069a56 // smmla z22.s, z18.b, z6.b\n" - "ld1rqb { z18.b }, p1/Z, [x26, #48]\n" - ".inst 0x450e9a45 // smmla z5.s, z18.b, z14.b\n" - ".inst 0x45029a56 // smmla z22.s, z18.b, z2.b\n" - "ld1rqb { z18.b }, p1/Z, [x26, #80]\n" - ".inst 0x451e9a45 // smmla z5.s, z18.b, z30.b\n" - ".inst 0x45159a56 // smmla z22.s, z18.b, z21.b\n" - "ld1rqb { z18.b }, p1/Z, [x26, #112]\n" - "add x26, x26, #0x88\n" - ".inst 0x45049a45 // smmla z5.s, z18.b, z4.b\n" - ".inst 0x45119a56 // smmla z22.s, z18.b, z17.b\n" - "uzp1 z18.d, z5.d, z22.d\n" - "scvtf z18.s, p1/m, z18.s\n" - "uzp2 z22.d, z5.d, z22.d\n" - "fmul z5.s, z23.s, z7.s[2]\n" - "fmul z7.s, z23.s, z7.s[3]\n" - "scvtf z22.s, p1/m, z22.s\n" - "fmla z20.s, p1/M, z18.s, z5.s\n" - "ld1rqb { z18.b }, p1/Z, [x25, #16]\n" - "ld1h { z5.s }, p0/Z, [x20]\n" - "fcvt z5.s, p1/m, z5.h\n" - "fmla z25.s, p1/M, z22.s, z7.s\n" - "mov z22.s, #0x0\n" - "mov z7.s, #0x0\n" - ".inst 0x451f9936 // smmla z22.s, z9.b, z31.b\n" - ".inst 0x45069927 // smmla z7.s, z9.b, z6.b\n" - "ld1rqb { z9.b }, p1/Z, [x25, #32]\n" - "mov z5.q, z5.q[0]\n" - ".inst 0x450e9936 // smmla z22.s, z9.b, z14.b\n" - ".inst 0x45029927 // smmla z7.s, z9.b, z2.b\n" - "ld1rqb { z9.b }, p1/Z, [x25, #64]\n" - ".inst 0x451e9936 // smmla z22.s, z9.b, z30.b\n" - ".inst 0x45159927 // smmla z7.s, z9.b, z21.b\n" - "ld1rqb { z9.b }, p1/Z, [x25, #96]\n" - ".inst 0x45049936 // smmla z22.s, z9.b, z4.b\n" - ".inst 0x45119927 // smmla z7.s, z9.b, z17.b\n" - "uzp1 z9.d, z22.d, z7.d\n" - "scvtf z9.s, p1/m, z9.s\n" - "uzp2 z22.d, z22.d, z7.d\n" - "fmul z7.s, z23.s, z3.s[0]\n" - "scvtf z22.s, p1/m, z22.s\n" - "fmla z11.s, p1/M, z9.s, z7.s\n" - "ld1rqb { z9.b }, p1/Z, [x24]\n" - "fmul z7.s, z23.s, z3.s[1]\n" - "fmla z16.s, p1/M, z22.s, z7.s\n" - "mov z22.s, #0x0\n" - "mov z7.s, #0x0\n" - ".inst 0x451f9a56 // smmla z22.s, z18.b, z31.b\n" - ".inst 0x45069a47 // smmla z7.s, z18.b, z6.b\n" - "ld1rqb { z18.b }, p1/Z, [x25, #48]\n" - ".inst 0x450e9a56 // smmla z22.s, z18.b, z14.b\n" - ".inst 0x45029a47 // smmla z7.s, z18.b, z2.b\n" - "ld1rqb { z18.b }, p1/Z, [x25, #80]\n" - ".inst 0x451e9a56 // smmla z22.s, z18.b, z30.b\n" - ".inst 0x45159a47 // smmla z7.s, z18.b, z21.b\n" - "ld1rqb { z18.b }, p1/Z, [x25, #112]\n" - "add x25, x25, #0x88\n" - ".inst 0x45049a56 // smmla z22.s, z18.b, z4.b\n" - ".inst 0x45119a47 // smmla z7.s, z18.b, z17.b\n" - "uzp1 z18.d, z22.d, z7.d\n" - "scvtf z18.s, p1/m, z18.s\n" - "uzp2 z7.d, z22.d, z7.d\n" - "fmul z22.s, z23.s, z3.s[2]\n" - "fmul z3.s, z23.s, z3.s[3]\n" - "scvtf z7.s, p1/m, z7.s\n" - "fmla z19.s, p1/M, z18.s, z22.s\n" - "ld1rqb { z18.b }, p1/Z, [x24, #16]\n" - "fmul z22.s, z23.s, z5.s[0]\n" - "fmla z26.s, p1/M, z7.s, z3.s\n" - "mov z3.s, #0x0\n" - "mov z7.s, #0x0\n" - ".inst 0x451f9923 // smmla z3.s, z9.b, z31.b\n" - ".inst 0x45069927 // smmla z7.s, z9.b, z6.b\n" - "ld1rqb { z9.b }, p1/Z, [x24, #32]\n" - ".inst 0x450e9923 // smmla z3.s, z9.b, z14.b\n" - ".inst 0x45029927 // smmla z7.s, z9.b, z2.b\n" - "mov z9.s, #0x0\n" - ".inst 0x451f9a49 // smmla z9.s, z18.b, z31.b\n" - "mov z31.s, #0x0\n" - ".inst 0x45069a5f // smmla z31.s, z18.b, z6.b\n" - "ld1rqb { z6.b }, p1/Z, [x24, #48]\n" - "ld1rqb { z18.b }, p1/Z, [x24, #64]\n" - ".inst 0x450e98c9 // smmla z9.s, z6.b, z14.b\n" - "fmul z14.s, z23.s, z5.s[1]\n" - ".inst 0x450298df // smmla z31.s, z6.b, z2.b\n" - "ld1rqb { z6.b }, p1/Z, [x24, #80]\n" - "fmul z2.s, z23.s, z5.s[2]\n" - "fmul z23.s, z23.s, z5.s[3]\n" - ".inst 0x451e9a43 // smmla z3.s, z18.b, z30.b\n" - ".inst 0x45159a47 // smmla z7.s, z18.b, z21.b\n" - "ld1rqb { z5.b }, p1/Z, [x24, #96]\n" - ".inst 0x451e98c9 // smmla z9.s, z6.b, z30.b\n" - ".inst 0x451598df // smmla z31.s, z6.b, z21.b\n" - "ld1rqb { z18.b }, p1/Z, [x24, #112]\n" - "add x24, x24, #0x88\n" - ".inst 0x450498a3 // smmla z3.s, z5.b, z4.b\n" - ".inst 0x451198a7 // smmla z7.s, z5.b, z17.b\n" - ".inst 0x45049a49 // smmla z9.s, z18.b, z4.b\n" - ".inst 0x45119a5f // smmla z31.s, z18.b, z17.b\n" - "uzp1 z18.d, z3.d, z7.d\n" - "uzp2 z5.d, z3.d, z7.d\n" - "scvtf z18.s, p1/m, z18.s\n" - "uzp1 z6.d, z9.d, z31.d\n" - "uzp2 z9.d, z9.d, z31.d\n" - "scvtf z5.s, p1/m, z5.s\n" - "fmla z8.s, p1/M, z18.s, z22.s\n" - "scvtf z6.s, p1/m, z6.s\n" - "scvtf z9.s, p1/m, z9.s\n" - "fmla z29.s, p1/M, z5.s, z14.s\n" - "fmla z27.s, p1/M, z6.s, z2.s\n" - "fmla z10.s, p1/M, z9.s, z23.s\n" - "bgt 3b\n" - "mov x20, %x[res_ptr]\n" - "subs x10, x10, #0x8\n" - "add %x[res_ptr], %x[res_ptr], #0x20\n" - "st1w { z24.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z15.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z12.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z0.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z13.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z1.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z20.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z25.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z11.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z16.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z19.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z26.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z8.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z29.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z27.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z10.s }, p1, [x20]\n" - "bne 2b\n" - "mov x20, #0x4\n" - "sub x13, x13, #0x10\n" - "cmp x13, #0x10\n" - "mov %x[res_ptr], x9\n" - "madd %x[a_ptr], x20, x12, %x[a_ptr]\n" - "bge 1b\n" - "4:" // Row loop skip - "cbz x13, 9f\n" - "5:" // Row tail: Row loop - "add x25, %x[b_ptr], #0x10\n" - "mov x24, %x[nc]\n" - "add x23, %x[res_ptr], %x[res_stride], LSL #2\n" - "6:" // Row tail: Column loop - "mov z24.b, #0x0\n" - "mov z15.b, #0x0\n" - "add x28, %x[a_ptr], #0x8\n" - "mov x22, %x[nb]\n" - "mov z12.b, #0x0\n" - "mov z0.b, #0x0\n" - "7:" // Row tail: Block loop - "ld1b { z3.b }, p1/Z, [x25]\n" - "ld1b { z6.b }, p1/Z, [x25, #1, MUL VL]\n" - "mov z2.s, #0x0\n" - "mov z25.s, #0x0\n" - "ld1rqb { z26.b }, p1/Z, [x28]\n" - "ld1rqb { z21.b }, p1/Z, [x28, #16]\n" - "mov z27.s, #0x0\n" - "mov z19.s, #0x0\n" - "ld1b { z29.b }, p1/Z, [x25, #2, MUL VL]\n" - "ld1b { z16.b }, p1/Z, [x25, #3, MUL VL]\n" - "sub x21, x25, #0x10\n" - "sub x20, x28, #0x8\n" - "lsl z20.b, z3.b, #0x4\n" - "lsl z4.b, z6.b, #0x4\n" - "ld1rqb { z10.b }, p1/Z, [x28, #32]\n" - "ld1rqb { z23.b }, p1/Z, [x28, #48]\n" - "and z3.b, z3.b, #0xf0\n" - "and z6.b, z6.b, #0xf0\n" - "ld1rqb { z11.b }, p1/Z, [x28, #64]\n" - "ld1rqb { z7.b }, p1/Z, [x28, #80]\n" - "lsl z8.b, z29.b, #0x4\n" - "lsl z14.b, z16.b, #0x4\n" - "ld1rqb { z18.b }, p1/Z, [x28, #96]\n" - "ld1rqb { z30.b }, p1/Z, [x28, #112]\n" - ".inst 0x45149b42 // smmla z2.s, z26.b, z20.b\n" - ".inst 0x45049b59 // smmla z25.s, z26.b, z4.b\n" - "and z29.b, z29.b, #0xf0\n" - "ld1h { z17.s }, p1/Z, [x21]\n" - ".inst 0x45149abb // smmla z27.s, z21.b, z20.b\n" - ".inst 0x45049ab3 // smmla z19.s, z21.b, z4.b\n" - "and z16.b, z16.b, #0xf0\n" - "ld1h { z4.s }, p0/Z, [x20]\n" - "subs x22, x22, #0x1\n" - "add x28, x28, #0x88\n" - "fcvt z17.s, p1/m, z17.h\n" - "add x25, x25, #0x90\n" - ".inst 0x45089942 // smmla z2.s, z10.b, z8.b\n" - ".inst 0x450e9959 // smmla z25.s, z10.b, z14.b\n" - "fcvt z4.s, p1/m, z4.h\n" - ".inst 0x45089afb // smmla z27.s, z23.b, z8.b\n" - ".inst 0x450e9af3 // smmla z19.s, z23.b, z14.b\n" - "fscale z17.s, p1/m, z17.s, z28.s\n" - "mov z4.q, z4.q[0]\n" - ".inst 0x45039962 // smmla z2.s, z11.b, z3.b\n" - ".inst 0x45069979 // smmla z25.s, z11.b, z6.b\n" - "fmul z23.s, z17.s, z4.s[0]\n" - "fmul z9.s, z17.s, z4.s[1]\n" - "fmul z21.s, z17.s, z4.s[2]\n" - "fmul z4.s, z17.s, z4.s[3]\n" - ".inst 0x450398fb // smmla z27.s, z7.b, z3.b\n" - ".inst 0x450698f3 // smmla z19.s, z7.b, z6.b\n" - ".inst 0x451d9a42 // smmla z2.s, z18.b, z29.b\n" - ".inst 0x45109a59 // smmla z25.s, z18.b, z16.b\n" - ".inst 0x451d9bdb // smmla z27.s, z30.b, z29.b\n" - ".inst 0x45109bd3 // smmla z19.s, z30.b, z16.b\n" - "uzp1 z31.d, z2.d, z25.d\n" - "uzp2 z13.d, z2.d, z25.d\n" - "scvtf z31.s, p1/m, z31.s\n" - "uzp1 z17.d, z27.d, z19.d\n" - "uzp2 z18.d, z27.d, z19.d\n" - "scvtf z13.s, p1/m, z13.s\n" - "fmla z24.s, p1/M, z31.s, z23.s\n" - "scvtf z17.s, p1/m, z17.s\n" - "scvtf z18.s, p1/m, z18.s\n" - "fmla z15.s, p1/M, z13.s, z9.s\n" - "fmla z12.s, p1/M, z17.s, z21.s\n" - "fmla z0.s, p1/M, z18.s, z4.s\n" - "bgt 7b\n" - "mov x20, %x[res_ptr]\n" - "cmp x13, #0x1\n" - "st1w { z24.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "cmp x13, #0x2\n" - "st1w { z15.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "cmp x13, #0x3\n" - "st1w { z12.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "st1w { z0.s }, p1, [x20]\n" - "8:" // Row tail: Accumulator store skip - "subs x24, x24, #0x8\n" - "add %x[res_ptr], %x[res_ptr], #0x20\n" - "bne 6b\n" - "subs x13, x13, #0x4\n" - "add %x[a_ptr], %x[a_ptr], x12\n" - "mov %x[res_ptr], x23\n" - "bgt 5b\n" - "9:" // Row tail: Row loop skip - : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) - : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) - : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" - ); - return; - } -#endif // #if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8) -#elif defined(__AVX2__) || defined(__AVX512F__) - { - const block_q4_0x8 * b_ptr_start = (const block_q4_0x8 *)vx; - const block_q8_0x4 * a_ptr_start = (const block_q8_0x4 *)vy; - int64_t b_nb = n / QK4_0; - int64_t y = 0; - // Mask to mask out nibbles from packed bytes - const __m256i m4b = _mm256_set1_epi8(0x0F); - const __m128i loadMask = _mm_blend_epi32(_mm_setzero_si128(), _mm_set1_epi32(0xFFFFFFFF), 3); - // Lookup table to convert signed nibbles to signed bytes - __m256i signextendlut = _mm256_castsi128_si256(_mm_set_epi8(-1, -2, -3, -4, -5, -6, -7, -8, 7, 6, 5, 4, 3, 2, 1, 0)); - signextendlut = _mm256_permute2f128_si256(signextendlut, signextendlut, 0); - // Permute mask used for easier vector processing at later stages - __m256i requiredOrder = _mm256_set_epi32(3, 2, 1, 0, 7, 6, 5, 4); - int64_t xstart = 0; - int anr = nr - nr%16; // Used to align nr with boundary of 16 - #ifdef __AVX512F__ - int anc = nc - nc%16; // Used to align nc with boundary of 16 - // Mask to mask out nibbles from packed bytes expanded to 512 bit length - const __m512i m4bexpanded = _mm512_set1_epi8(0x0F); - // Lookup table to convert signed nibbles to signed bytes expanded to 512 bit length - __m512i signextendlutexpanded = _mm512_inserti32x8(_mm512_castsi256_si512(signextendlut), signextendlut, 1); - - // Take group of four block_q8_0x4 structures at each pass of the loop and perform dot product operation - for (; y < anr / 4; y += 4) { - - const block_q8_0x4 * a_ptrs[4]; - - a_ptrs[0] = a_ptr_start + (y * nb); - for (int i = 0; i < 3; ++i) { - a_ptrs[i + 1] = a_ptrs[i] + nb; - } - - // Take group of two block_q4_0x8 structures at each pass of the loop and perform dot product operation - for (int64_t x = 0; x < anc / 8; x += 2) { - - const block_q4_0x8 * b_ptr_0 = b_ptr_start + ((x) * b_nb); - const block_q4_0x8 * b_ptr_1 = b_ptr_start + ((x + 1) * b_nb); - - // Master FP accumulators - __m512 acc_rows[16]; - for (int i = 0; i < 16; i++) { - acc_rows[i] = _mm512_setzero_ps(); + // Master FP accumulators + __m512 acc_rows[16]; + for (int i = 0; i < 16; i++) { + acc_rows[i] = _mm512_setzero_ps(); } for (int64_t b = 0; b < nb; b++) { @@ -3783,207 +1732,7 @@ static void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c } return; } -#elif defined __riscv_v - if (__riscv_vlenb() >= QK4_0) { - const size_t vl = QK4_0; - - for (int y = 0; y < nr / 4; y++) { - const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); - vfloat32m1_t sumf0 = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); - vfloat32m1_t sumf1 = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); - vfloat32m1_t sumf2 = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); - vfloat32m1_t sumf3 = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); - for (int l = 0; l < nb; l++) { - const vint8m4_t rhs_raw_vec = __riscv_vle8_v_i8m4((const int8_t *)b_ptr[l].qs, vl * 4); - const vint8m4_t rhs_vec_lo = __riscv_vsra_vx_i8m4(__riscv_vsll_vx_i8m4(rhs_raw_vec, 4, vl * 4), 4, vl * 4); - const vint8m4_t rhs_vec_hi = __riscv_vsra_vx_i8m4(rhs_raw_vec, 4, vl * 4); - const vint8m2_t rhs_vec_lo_0 = __riscv_vget_v_i8m4_i8m2(rhs_vec_lo, 0); - const vint8m2_t rhs_vec_lo_1 = __riscv_vget_v_i8m4_i8m2(rhs_vec_lo, 1); - const vint8m2_t rhs_vec_hi_0 = __riscv_vget_v_i8m4_i8m2(rhs_vec_hi, 0); - const vint8m2_t rhs_vec_hi_1 = __riscv_vget_v_i8m4_i8m2(rhs_vec_hi, 1); - - // vector version needs Zvfhmin extension - const float a_scales[4] = { - GGML_FP16_TO_FP32(a_ptr[l].d[0]), - GGML_FP16_TO_FP32(a_ptr[l].d[1]), - GGML_FP16_TO_FP32(a_ptr[l].d[2]), - GGML_FP16_TO_FP32(a_ptr[l].d[3]) - }; - const float b_scales[8] = { - GGML_FP16_TO_FP32(b_ptr[l].d[0]), - GGML_FP16_TO_FP32(b_ptr[l].d[1]), - GGML_FP16_TO_FP32(b_ptr[l].d[2]), - GGML_FP16_TO_FP32(b_ptr[l].d[3]), - GGML_FP16_TO_FP32(b_ptr[l].d[4]), - GGML_FP16_TO_FP32(b_ptr[l].d[5]), - GGML_FP16_TO_FP32(b_ptr[l].d[6]), - GGML_FP16_TO_FP32(b_ptr[l].d[7]) - }; - const vfloat32m1_t b_scales_vec = __riscv_vle32_v_f32m1(b_scales, vl / 4); - - const int64_t A0 = *(const int64_t *)&a_ptr[l].qs[0]; - const int64_t A4 = *(const int64_t *)&a_ptr[l].qs[32]; - const int64_t A8 = *(const int64_t *)&a_ptr[l].qs[64]; - const int64_t Ac = *(const int64_t *)&a_ptr[l].qs[96]; - __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment - vint16m4_t sumi_l0; - { - const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A0, vl / 4)); - const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A4, vl / 4)); - const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A8, vl / 4)); - const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Ac, vl / 4)); - const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); - const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); - const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); - const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); - - sumi_l0 = sumi_hi_m; - } - - { - const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_l0)); - const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); - const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); - const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); - const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); - const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); - const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); - const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); - const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); - const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); - const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); - const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); - const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); - - const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scales[0], vl / 4); - sumf0 = __riscv_vfmacc_vv_f32m1(sumf0, tmp1, b_scales_vec, vl / 4); - } - - const int64_t A1 = *(const int64_t *)&a_ptr[l].qs[8]; - const int64_t A5 = *(const int64_t *)&a_ptr[l].qs[40]; - const int64_t A9 = *(const int64_t *)&a_ptr[l].qs[72]; - const int64_t Ad = *(const int64_t *)&a_ptr[l].qs[104]; - __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment - vint16m4_t sumi_l1; - { - const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A1, vl / 4)); - const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A5, vl / 4)); - const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A9, vl / 4)); - const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Ad, vl / 4)); - const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); - const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); - const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); - const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); - - sumi_l1 = sumi_hi_m; - } - - { - const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_l1)); - const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); - const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); - const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); - const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); - const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); - const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); - const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); - const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); - const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); - const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); - const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); - const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); - - const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scales[1], vl / 4); - sumf1 = __riscv_vfmacc_vv_f32m1(sumf1, tmp1, b_scales_vec, vl / 4); - } - - const int64_t A2 = *(const int64_t *)&a_ptr[l].qs[16]; - const int64_t A6 = *(const int64_t *)&a_ptr[l].qs[48]; - const int64_t Aa = *(const int64_t *)&a_ptr[l].qs[80]; - const int64_t Ae = *(const int64_t *)&a_ptr[l].qs[112]; - __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment - vint16m4_t sumi_l2; - { - const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A2, vl / 4)); - const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A6, vl / 4)); - const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Aa, vl / 4)); - const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Ae, vl / 4)); - const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); - const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); - const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); - const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); - - sumi_l2 = sumi_hi_m; - } - - { - const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_l2)); - const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); - const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); - const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); - const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); - const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); - const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); - const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); - const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); - const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); - const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); - const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); - const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); - - const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scales[2], vl / 4); - sumf2 = __riscv_vfmacc_vv_f32m1(sumf2, tmp1, b_scales_vec, vl / 4); - } - const int64_t A3 = *(const int64_t *)&a_ptr[l].qs[24]; - const int64_t A7 = *(const int64_t *)&a_ptr[l].qs[56]; - const int64_t Ab = *(const int64_t *)&a_ptr[l].qs[88]; - const int64_t Af = *(const int64_t *)&a_ptr[l].qs[120]; - __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment - vint16m4_t sumi_l3; - { - const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A3, vl / 4)); - const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A7, vl / 4)); - const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Ab, vl / 4)); - const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Af, vl / 4)); - const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); - const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); - const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); - const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); - - sumi_l3 = sumi_hi_m; - } - - { - const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_l3)); - const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); - const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); - const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); - const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); - const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); - const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); - const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); - const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); - const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); - const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); - const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); - const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); - - const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scales[3], vl / 4); - sumf3 = __riscv_vfmacc_vv_f32m1(sumf3, tmp1, b_scales_vec, vl / 4); - } - } - __riscv_vse32_v_f32m1(&s[(y * 4 + 0) * bs + x * ncols_interleaved], sumf0, vl / 4); - __riscv_vse32_v_f32m1(&s[(y * 4 + 1) * bs + x * ncols_interleaved], sumf1, vl / 4); - __riscv_vse32_v_f32m1(&s[(y * 4 + 2) * bs + x * ncols_interleaved], sumf2, vl / 4); - __riscv_vse32_v_f32m1(&s[(y * 4 + 3) * bs + x * ncols_interleaved], sumf3, vl / 4); - } - } - - return; - } #endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) float sumf[4][8]; int sumi; @@ -4019,7 +1768,7 @@ static void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c } } -static void ggml_gemm_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { +void ggml_gemm_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK_K; const int nb = n / qk; const int ncols_interleaved = 8; @@ -5533,899 +3282,3 @@ static void ggml_gemm_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, c } #endif } - -static void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { - const int qk = QK8_0; - const int nb = n / qk; - const int ncols_interleaved = 4; - const int blocklen = 4; - - assert (n % qk == 0); - assert (nr % 4 == 0); - assert (nc % ncols_interleaved == 0); - - UNUSED(s); - UNUSED(bs); - UNUSED(vx); - UNUSED(vy); - UNUSED(nr); - UNUSED(nc); - UNUSED(nb); - UNUSED(ncols_interleaved); - UNUSED(blocklen); - -#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl); - - for (int y = 0; y < nr / 4; y++) { - const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); - - float32x4_t sumf[4]; - for (int m = 0; m < 4; m++) { - sumf[m] = vdupq_n_f32(0); - } - - for (int l = 0; l < nb; l++) { - float32x4_t a_d = vcvt_f32_f16(vld1_f16((const float16_t *)a_ptr[l].d)); - float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *)b_ptr[l].d)); - - int32x4_t sumi_0 = vdupq_n_s32(0); - int32x4_t sumi_1 = vdupq_n_s32(0); - int32x4_t sumi_2 = vdupq_n_s32(0); - int32x4_t sumi_3 = vdupq_n_s32(0); - - for (int k = 0; k < 4; k++) { - int8x16_t a_0 = vld1q_s8(a_ptr[l].qs + 16 * k + 0); - int8x16_t a_1 = vld1q_s8(a_ptr[l].qs + 16 * k + 64); - - uint8x16_t b = vld1q_u8(b_ptr[l].qs + 16 * k); - int8x16_t b_hi = vqtbl1q_s8(kvalues, b >> 4); - int8x16_t b_lo = vqtbl1q_s8(kvalues, b & 0xF); - - sumi_0 = vdotq_laneq_s32(sumi_0, b_lo, a_0, 0); - sumi_1 = vdotq_laneq_s32(sumi_1, b_lo, a_0, 1); - sumi_2 = vdotq_laneq_s32(sumi_2, b_lo, a_0, 2); - sumi_3 = vdotq_laneq_s32(sumi_3, b_lo, a_0, 3); - sumi_0 = vdotq_laneq_s32(sumi_0, b_hi, a_1, 0); - sumi_1 = vdotq_laneq_s32(sumi_1, b_hi, a_1, 1); - sumi_2 = vdotq_laneq_s32(sumi_2, b_hi, a_1, 2); - sumi_3 = vdotq_laneq_s32(sumi_3, b_hi, a_1, 3); - } - - sumf[0] = vmlaq_f32(sumf[0], vmulq_laneq_f32(b_d, a_d, 0), vcvtq_f32_s32(sumi_0)); - sumf[1] = vmlaq_f32(sumf[1], vmulq_laneq_f32(b_d, a_d, 1), vcvtq_f32_s32(sumi_1)); - sumf[2] = vmlaq_f32(sumf[2], vmulq_laneq_f32(b_d, a_d, 2), vcvtq_f32_s32(sumi_2)); - sumf[3] = vmlaq_f32(sumf[3], vmulq_laneq_f32(b_d, a_d, 3), vcvtq_f32_s32(sumi_3)); - } - - for (int m = 0; m < 4; m++) { - vst1q_f32(s + (y * 4 + m) * bs + x * 4, sumf[m]); - } - } - } - return; - } -#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) - { - float sumf[4][4]; - int sumi; - - for (int y = 0; y < nr / 4; y++) { - const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; - } - for (int l = 0; l < nb; l++) { - for (int k = 0; k < (qk / (2 * blocklen)); k++) { - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) { - sumi = 0; - for (int i = 0; i < blocklen; ++i) { - const int v0 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0x0F]; - const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4]; - sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + - (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])); - } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); - } - } - } - } - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) - s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; - } - } - } - } -} - -static block_q4_0x4 make_block_q4_0x4(block_q4_0 * in, unsigned int blck_size_interleave) { - block_q4_0x4 out; - - for (int i = 0; i < 4; i++) { - out.d[i] = in[i].d; - } - - const int end = QK4_0 * 2 / blck_size_interleave; - - if (blck_size_interleave == 8) { - const uint64_t xor_mask = 0x8888888888888888ULL; - for (int i = 0; i < end; ++i) { - int src_id = i % 4; - int src_offset = (i / 4) * blck_size_interleave; - int dst_offset = i * blck_size_interleave; - - uint64_t elems; - // Using memcpy to avoid unaligned memory accesses - memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); - elems ^= xor_mask; - memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); - } - } else if (blck_size_interleave == 4) { - const uint32_t xor_mask = 0x88888888; - for (int i = 0; i < end; ++i) { - int src_id = i % 4; - int src_offset = (i / 4) * blck_size_interleave; - int dst_offset = i * blck_size_interleave; - - uint32_t elems; - memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint32_t)); - elems ^= xor_mask; - memcpy(&out.qs[dst_offset], &elems, sizeof(uint32_t)); - } - } else { - GGML_ASSERT(false); - } - - return out; -} - -// interleave 8 block_q4_0s in blocks of blck_size_interleave -// returns an interleaved block_q4_0x8 -// in the interleaved block_q4_0x8, place deltas for 8 block_q4_0 blocks -// first, then interleave quants from 8 block_q4_0s in blocks of blck_size_interleave -static block_q4_0x8 make_block_q4_0x8(block_q4_0 * in, unsigned int blck_size_interleave) { - block_q4_0x8 out; - - for (int i = 0; i < 8; i++) { - out.d[i] = in[i].d; - } - - const int end = QK4_0 * 4 / blck_size_interleave; - const uint64_t xor_mask = 0x8888888888888888ULL; - - for (int i = 0; i < end; ++i) { - int src_id = i % 8; - int src_offset = (i / 8) * blck_size_interleave; - int dst_offset = i * blck_size_interleave; - - uint64_t elems; - memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); - elems ^= xor_mask; - memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); - } - - return out; -} - -static block_q4_Kx8 make_block_q4_Kx8(block_q4_K * in, unsigned int blck_size_interleave) { - block_q4_Kx8 out; - //Delta(scale) and dmin values of the eight Q4_K structures are copied onto the output interleaved structure - for (int i = 0; i < 8; i++) { - out.d[i] = in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d; - } - - for (int i = 0; i < 8; i++) { - out.dmin[i] = in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.dmin; - } - - const int end = QK_K * 4 / blck_size_interleave; - - // Interleave Q4_K quants by taking 8 bytes at a time - for (int i = 0; i < end; ++i) { - int src_id = i % 8; - int src_offset = (i / 8) * blck_size_interleave; - int dst_offset = i * blck_size_interleave; - - uint64_t elems; - memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); - memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); - } - - // The below logic is designed so as to unpack and rearrange scales and mins values in Q4_K - // Currently the Q4_K structure has 8 scales and 8 mins packed in 12 bytes ( 6 bits for each value) - // The output Q4_Kx8 structure has 96 bytes - // Every 12 byte is packed such that it contains scales and mins for corresponding sub blocks from Q4_K structure - // For eg - First 12 bytes contains 8 scales and 8 mins - each of first sub block from different Q4_K structures - uint8_t s[8], m[8]; - - for (int i = 0; i < 4; i++) { - for (int j = 0; j < 8; j++) { - s[j] = in[j].scales[i] & 63; - m[j] = in[j].scales[i + 4] & 63; - } - - out.scales[i * 12] = (s[0] & 63) + ((s[4] & 48) << 2); - out.scales[i * 12 + 1] = (s[1] & 63) + ((s[5] & 48) << 2); - out.scales[i * 12 + 2] = (s[2] & 63) + ((s[6] & 48) << 2); - out.scales[i * 12 + 3] = (s[3] & 63) + ((s[7] & 48) << 2); - out.scales[i * 12 + 4] = (m[0] & 63) + ((m[4] & 48) << 2); - out.scales[i * 12 + 5] = (m[1] & 63) + ((m[5] & 48) << 2); - out.scales[i * 12 + 6] = (m[2] & 63) + ((m[6] & 48) << 2); - out.scales[i * 12 + 7] = (m[3] & 63) + ((m[7] & 48) << 2); - out.scales[i * 12 + 8] = (s[4] & 15) + ((m[4] & 15) << 4); - out.scales[i * 12 + 9] = (s[5] & 15) + ((m[5] & 15) << 4); - out.scales[i * 12 + 10] = (s[6] & 15) + ((m[6] & 15) << 4); - out.scales[i * 12 + 11] = (s[7] & 15) + ((m[7] & 15) << 4); - - } - - for (int i = 0; i < 4; i++) { - for (int j = 0; j < 8; j++) { - s[j] = ((in[j].scales[i] & 192) >> 2) | (in[j].scales[i+8] & 15); - m[j] = ((in[j].scales[i + 4] & 192) >> 2) | ((in[j].scales[i+8] & 240) >> 4); - } - - out.scales[i * 12 + 48] = (s[0] & 63) + ((s[4] & 48) << 2); - out.scales[i * 12 + 49] = (s[1] & 63) + ((s[5] & 48) << 2); - out.scales[i * 12 + 50] = (s[2] & 63) + ((s[6] & 48) << 2); - out.scales[i * 12 + 51] = (s[3] & 63) + ((s[7] & 48) << 2); - out.scales[i * 12 + 52] = (m[0] & 63) + ((m[4] & 48) << 2); - out.scales[i * 12 + 53] = (m[1] & 63) + ((m[5] & 48) << 2); - out.scales[i * 12 + 54] = (m[2] & 63) + ((m[6] & 48) << 2); - out.scales[i * 12 + 55] = (m[3] & 63) + ((m[7] & 48) << 2); - out.scales[i * 12 + 56] = (s[4] & 15) + ((m[4] & 15) << 4); - out.scales[i * 12 + 57] = (s[5] & 15) + ((m[5] & 15) << 4); - out.scales[i * 12 + 58] = (s[6] & 15) + ((m[6] & 15) << 4); - out.scales[i * 12 + 59] = (s[7] & 15) + ((m[7] & 15) << 4); - - } - - return out; -} - -static int repack_q4_0_to_q4_0_4_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { - GGML_ASSERT(t->type == GGML_TYPE_Q4_0); - GGML_ASSERT(interleave_block == 4 || interleave_block == 8); - constexpr int nrows_interleaved = 4; - - block_q4_0x4 * dst = (block_q4_0x4 *)t->data; - const block_q4_0 * src = (const block_q4_0 *)data; - block_q4_0 dst_tmp[4]; - int nrow = ggml_nrows(t); - int nblocks = t->ne[0] / QK4_0; - - GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_0)); - - if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { - return -1; - } - - for (int b = 0; b < nrow; b += nrows_interleaved) { - for (int64_t x = 0; x < nblocks; x++) { - for (int i = 0; i < nrows_interleaved; i++) { - dst_tmp[i] = src[x + i * nblocks]; - } - *dst++ = make_block_q4_0x4(dst_tmp, interleave_block); - } - src += nrows_interleaved * nblocks; - } - return 0; - - GGML_UNUSED(data_size); -} -static int repack_q4_K_to_q4_K_8_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { - GGML_ASSERT(t->type == GGML_TYPE_Q4_K); - GGML_ASSERT(interleave_block == 8); - constexpr int nrows_interleaved = 8; - - block_q4_Kx8 * dst = (block_q4_Kx8*)t->data; - const block_q4_K * src = (const block_q4_K*) data; - block_q4_K dst_tmp[8]; - int nrow = ggml_nrows(t); - int nblocks = t->ne[0] / QK_K; - - GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_K)); - - if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { - return -1; - } - - for (int b = 0; b < nrow; b += nrows_interleaved) { - for (int64_t x = 0; x < nblocks; x++) { - for (int i = 0; i < nrows_interleaved; i++ ) { - dst_tmp[i] = src[x + i * nblocks]; - } - *dst++ = make_block_q4_Kx8(dst_tmp, interleave_block); - } - src += nrows_interleaved * nblocks; - } - return 0; - - GGML_UNUSED(data_size); -} - -static int repack_q4_0_to_q4_0_8_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { - GGML_ASSERT(t->type == GGML_TYPE_Q4_0); - GGML_ASSERT(interleave_block == 8); - constexpr int nrows_interleaved = 8; - - block_q4_0x8 * dst = (block_q4_0x8*)t->data; - const block_q4_0 * src = (const block_q4_0*) data; - block_q4_0 dst_tmp[8]; - int nrow = ggml_nrows(t); - int nblocks = t->ne[0] / QK4_0; - - GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_0)); - - if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { - return -1; - } - - for (int b = 0; b < nrow; b += nrows_interleaved) { - for (int64_t x = 0; x < nblocks; x++) { - for (int i = 0; i < nrows_interleaved; i++ ) { - dst_tmp[i] = src[x + i * nblocks]; - } - *dst++ = make_block_q4_0x8(dst_tmp, interleave_block); - } - src += nrows_interleaved * nblocks; - } - return 0; - - GGML_UNUSED(data_size); -} - -static block_iq4_nlx4 make_block_iq4_nlx4(block_iq4_nl * in, unsigned int blck_size_interleave) { - block_iq4_nlx4 out; - - for (int i = 0; i < 4; i++) { - out.d[i] = in[i].d; - } - - const int end = QK4_NL * 2 / blck_size_interleave; - - // TODO: this branch seems wrong - //if (blck_size_interleave == 8) { - // for (int i = 0; i < end; ++i) { - // int src_id = i % 4; - // int src_offset = (i / 4) * blck_size_interleave; - // int dst_offset = i * blck_size_interleave; - - // // Using memcpy to avoid unaligned memory accesses - // memcpy(&out.qs[dst_offset], &in[src_id].qs[src_offset], sizeof(uint64_t)); - // } - //} else - if (blck_size_interleave == 4) { - for (int i = 0; i < end; ++i) { - int src_id = i % 4; - int src_offset = (i / 4) * blck_size_interleave; - int dst_offset = i * blck_size_interleave; - - memcpy(&out.qs[dst_offset], &in[src_id].qs[src_offset], sizeof(uint32_t)); - } - } else { - GGML_ASSERT(false); - } - - return out; -} - -static int repack_iq4_nl_to_iq4_nl_4_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { - GGML_ASSERT(t->type == GGML_TYPE_IQ4_NL); - //GGML_ASSERT(interleave_block == 4 || interleave_block == 8); - GGML_ASSERT(interleave_block == 4); - - block_iq4_nlx4 * dst = (block_iq4_nlx4 *)t->data; - const block_iq4_nl * src = (const block_iq4_nl *)data; - block_iq4_nl dst_tmp[4]; - int nrow = ggml_nrows(t); - int nrows_interleaved = 4; - int nblocks = t->ne[0] / QK4_0; - - GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_iq4_nl)); - - if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { - return -1; - } - - for (int b = 0; b < nrow; b += nrows_interleaved) { - for (int64_t x = 0; x < nblocks; x++) { - for (int i = 0; i < nrows_interleaved; i++) { - dst_tmp[i] = src[x + i * nblocks]; - } - *dst++ = make_block_iq4_nlx4(dst_tmp, interleave_block); - } - src += nrows_interleaved * nblocks; - } - return 0; - - GGML_UNUSED(data_size); -} - -namespace ggml::cpu::aarch64 { -// repack -template -int repack(struct ggml_tensor *, const void *, size_t); - -// TODO: generalise. -template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { - return repack_q4_0_to_q4_0_4_bl(t, 4, data, data_size); -} - -template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { - return repack_q4_0_to_q4_0_4_bl(t, 8, data, data_size); -} - -template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { - return repack_q4_0_to_q4_0_8_bl(t, 8, data, data_size); -} - -template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { - return repack_q4_K_to_q4_K_8_bl(t, 8, data, data_size); -} - -template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { - return repack_iq4_nl_to_iq4_nl_4_bl(t, 4, data, data_size); -} - -// TODO: needs to be revisited -//template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { -// return repack_iq4_nl_to_iq4_nl_4_bl(t, 8, data, data_size); -//} - -// gemv -template -void gemv(int, float *, size_t, const void *, const void *, int, int); - -template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemv_q4_0_4x4_q8_0(n, s, bs, vx, vy, nr, nc); -} - -template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemv_q4_0_4x8_q8_0(n, s, bs, vx, vy, nr, nc); -} - -template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemv_q4_0_8x8_q8_0(n, s, bs, vx, vy, nr, nc); -} - -template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemv_q4_K_8x8_q8_K(n, s, bs, vx, vy, nr, nc); -} - -template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemv_iq4_nl_4x4_q8_0(n, s, bs, vx, vy, nr, nc); -} - -// gemm -template -void gemm(int, float *, size_t, const void *, const void *, int, int); - -template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemm_q4_0_4x4_q8_0(n, s, bs, vx, vy, nr, nc); -} - -template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemm_q4_0_4x8_q8_0(n, s, bs, vx, vy, nr, nc); -} - -template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemm_q4_0_8x8_q8_0(n, s, bs, vx, vy, nr, nc); -} - -template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemm_q4_K_8x8_q8_K(n, s, bs, vx, vy, nr, nc); -} - -template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemm_iq4_nl_4x4_q8_0(n, s, bs, vx, vy, nr, nc); -} - -class tensor_traits_base : public ggml::cpu::tensor_traits { - public: - virtual int repack(struct ggml_tensor * t, const void * data, size_t data_size) = 0; -}; - -template class tensor_traits : public tensor_traits_base { - - bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override { - // not realy a GGML_TYPE_Q8_0 but same size. - switch (op->op) { - case GGML_OP_MUL_MAT: - size = ggml_row_size(PARAM_TYPE, ggml_nelements(op->src[1])); - return true; - case GGML_OP_MUL_MAT_ID: - size = ggml_row_size(PARAM_TYPE, ggml_nelements(op->src[1])); - size = GGML_PAD(size, sizeof(int64_t)); // + padding for next bloc. - size += sizeof(int64_t) * (1+op->src[0]->ne[2]) * op->src[1]->ne[2]; - return true; - default: - // GGML_ABORT("fatal error"); - break; - } - return false; - } - - bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) override { - switch (op->op) { - case GGML_OP_MUL_MAT: - forward_mul_mat(params, op); - return true; - case GGML_OP_MUL_MAT_ID: - forward_mul_mat_id(params, op); - return true; - default: - // GGML_ABORT("fatal error"); - break; - } - return false; - } - - void forward_mul_mat(ggml_compute_params * params, ggml_tensor * op) { - const ggml_tensor * src0 = op->src[0]; - const ggml_tensor * src1 = op->src[1]; - ggml_tensor * dst = op; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int ith = params->ith; - const int nth = params->nth; - - GGML_ASSERT(ne0 == ne01); - GGML_ASSERT(ne1 == ne11); - GGML_ASSERT(ne2 == ne12); - GGML_ASSERT(ne3 == ne13); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - GGML_ASSERT(src1->type == GGML_TYPE_F32); - - GGML_ASSERT(ggml_n_dims(op->src[0]) == 2); - // GGML_ASSERT(ggml_n_dims(op->src[1]) == 2); - - char * wdata = static_cast(params->wdata); - const size_t nbw1 = ggml_row_size(PARAM_TYPE, ne10); - - assert(params->wsize >= nbw1 * ne11); - - const ggml_from_float_t from_float = ggml_get_type_traits_cpu(PARAM_TYPE)->from_float; - - int64_t i11_processed = 0; - for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) { - ggml_quantize_mat_t((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), 4, ne10); - } - - i11_processed = ne11 - ne11 % 4; - for (int64_t i11 = i11_processed + ith; i11 < ne11; i11 += nth) { - from_float((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), ne10); - } - - ggml_barrier(params->threadpool); - - const void * src1_wdata = params->wdata; - const size_t src1_col_stride = ggml_row_size(PARAM_TYPE, ne10); - int64_t src0_start = (ith * ne01) / nth; - int64_t src0_end = ((ith + 1) * ne01) / nth; - src0_start = (src0_start % NB_COLS) ? src0_start + NB_COLS - (src0_start % NB_COLS) : src0_start; - src0_end = (src0_end % NB_COLS) ? src0_end + NB_COLS - (src0_end % NB_COLS) : src0_end; - if (src0_start >= src0_end) { - return; - } - - // If there are more than three rows in src1, use gemm; otherwise, use gemv. - if (ne11 > 3) { - gemm(ne00, - (float *) ((char *) dst->data) + src0_start, ne01, - (const char *) src0->data + src0_start * nb01, - (const char *) src1_wdata, ne11 - ne11 % 4, src0_end - src0_start); - } - for (int iter = ne11 - ne11 % 4; iter < ne11; iter++) { - gemv(ne00, - (float *) ((char *) dst->data + (iter * nb1)) + src0_start, ne01, - (const char *) src0->data + src0_start * nb01, - (const char *) src1_wdata + (src1_col_stride * iter), 1, - src0_end - src0_start); - } - } - - void forward_mul_mat_id(ggml_compute_params * params, ggml_tensor * op) { - const ggml_tensor * src0 = op->src[0]; - const ggml_tensor * src1 = op->src[1]; - const ggml_tensor * ids = op->src[2]; - ggml_tensor * dst = op; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int ith = params->ith; - const int nth = params->nth; - - const ggml_from_float_t from_float = ggml_get_type_traits_cpu(PARAM_TYPE)->from_float; - - // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == ggml_type_size(src0->type)); - GGML_ASSERT(nb10 == ggml_type_size(src1->type)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - GGML_ASSERT(ne03 == 1); - GGML_ASSERT(ne13 == 1); - GGML_ASSERT(ne3 == 1); - - GGML_ASSERT(src1->type == GGML_TYPE_F32); - - // row groups - const int n_ids = ids->ne[0]; // n_expert_used - const int n_as = ne02; // n_expert - - const size_t nbw1 = ggml_row_size(PARAM_TYPE, ne10); - const size_t nbw2 = nbw1*ne11; - const size_t nbw3 = nbw2*ne12; - - struct mmid_row_mapping { - int32_t i1; - int32_t i2; - }; - - GGML_ASSERT(params->wsize >= (GGML_PAD(nbw3, sizeof(int64_t)) + n_as * sizeof(int64_t) + - n_as * ne12 * sizeof(mmid_row_mapping))); - - auto * wdata = (char *) params->wdata; - auto * wdata_src1_end = (char *) wdata + GGML_PAD(nbw3, sizeof(int64_t)); - auto * matrix_row_counts = (int64_t *) (wdata_src1_end); // [n_as] - - struct mmid_row_mapping * matrix_rows = (struct mmid_row_mapping *) (matrix_row_counts + n_as); // [n_as][ne12] - - // src1: float32 => param type - for (int64_t i12 = 0; i12 < ne12; ++i12) { - for (int64_t i11 = ith; i11 < ne11; i11 += nth) { - from_float((float *)((char *) src1->data + i12 * nb12 + i11 * nb11), - (void *) (wdata + i12 * nbw2 + i11 * nbw1), - ne10); - } - } - -#define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id) * ne12 + (i1)] - - if (ith == 0) { - // initialize matrix_row_counts - memset(matrix_row_counts, 0, n_as * sizeof(int64_t)); - - // group rows by src0 matrix - for (int32_t iid1 = 0; iid1 < ids->ne[1]; ++iid1) { - for (int32_t id = 0; id < n_ids; ++id) { - const int32_t i02 = - *(const int32_t *) ((const char *) ids->data + iid1 * ids->nb[1] + id * ids->nb[0]); - - GGML_ASSERT(i02 >= 0 && i02 < n_as); - - MMID_MATRIX_ROW(i02, matrix_row_counts[i02]) = { id, iid1 }; - matrix_row_counts[i02] += 1; - } - } - } - - ggml_barrier(params->threadpool); - - // compute each matrix multiplication in sequence - for (int cur_a = 0; cur_a < n_as; ++cur_a) { - const int64_t cne1 = matrix_row_counts[cur_a]; - - if (cne1 == 0) { - continue; - } - - const auto * src0_cur = (const char *) src0->data + cur_a*nb02; - - //const int64_t nr0 = ne01; // src0 rows - const int64_t nr1 = cne1; // src1 rows - - int64_t src0_cur_start = (ith * ne01) / nth; - int64_t src0_cur_end = ((ith + 1) * ne01) / nth; - - src0_cur_start = (src0_cur_start % NB_COLS) ? src0_cur_start + NB_COLS - (src0_cur_start % NB_COLS) : src0_cur_start; - src0_cur_end = (src0_cur_end % NB_COLS) ? src0_cur_end + NB_COLS - (src0_cur_end % NB_COLS) : src0_cur_end; - - if (src0_cur_start >= src0_cur_end) { - return; - } - - for (int ir1 = 0; ir1 < nr1; ir1++) { - struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, ir1); - - const int id = row_mapping.i1; // selected expert index - - const int64_t i11 = id % ne11; - const int64_t i12 = row_mapping.i2; // row index in src1 - - const int64_t i1 = id; // selected expert index - const int64_t i2 = i12; // row - - const auto * src1_col = (const char *) wdata + (i11 * nbw1 + i12 * nbw2); - - gemv(ne00, - (float *)((char *) dst->data + (i1 * nb1 + i2 * nb2)) + src0_cur_start, ne01, - src0_cur + src0_cur_start * nb01, - src1_col, 1, src0_cur_end - src0_cur_start); - } - } -#undef MMID_MATRIX_ROW - } - - int repack(struct ggml_tensor * t, const void * data, size_t data_size) override { - GGML_LOG_DEBUG("%s: repack tensor %s with %s_%dx%d\n", __func__, t->name, ggml_type_name(t->type), - (int) NB_COLS, (int) INTER_SIZE); - return ggml::cpu::aarch64::repack(t, data, data_size); - } -}; - -// instance for Q4 -static const tensor_traits q4_0_4x4_q8_0; -static const tensor_traits q4_0_4x8_q8_0; -static const tensor_traits q4_0_8x8_q8_0; -static const tensor_traits q4_K_8x8_q8_K; - -// instance for IQ4 -static const tensor_traits iq4_nl_4x4_q8_0; - -} // namespace ggml::cpu::aarch64 - -static const ggml::cpu::tensor_traits * ggml_aarch64_get_optimal_repack_type(const struct ggml_tensor * cur) { - if (cur->type == GGML_TYPE_Q4_0) { - if (ggml_cpu_has_avx2() || (ggml_cpu_has_sve() && ggml_cpu_has_matmul_int8() && ggml_cpu_get_sve_cnt() == QK8_0)) { - if (cur->ne[1] % 8 == 0) { - return &ggml::cpu::aarch64::q4_0_8x8_q8_0; - } - } - if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) { - if (cur->ne[1] % 4 == 0) { - return &ggml::cpu::aarch64::q4_0_4x8_q8_0; - } - } - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - if (cur->ne[1] % 4 == 0) { - return &ggml::cpu::aarch64::q4_0_4x4_q8_0; - } - } - } else if (cur->type == GGML_TYPE_Q4_K) { - if (ggml_cpu_has_avx2()) { - if (cur->ne[1] % 8 == 0) { - return &ggml::cpu::aarch64::q4_K_8x8_q8_K; - } - } - } else if (cur->type == GGML_TYPE_IQ4_NL) { - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - if (cur->ne[1] % 4 == 0) { - return &ggml::cpu::aarch64::iq4_nl_4x4_q8_0; - } - } - } - - return nullptr; -} - -static enum ggml_status ggml_backend_cpu_aarch64_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { - tensor->extra = (void *) const_cast(ggml_aarch64_get_optimal_repack_type(tensor)); - - GGML_UNUSED(buffer); - return GGML_STATUS_SUCCESS; -} - -static void ggml_backend_cpu_aarch64_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, - const void * data, size_t offset, size_t size) { - GGML_ASSERT(offset == 0); - GGML_ASSERT(size == ggml_nbytes(tensor)); - - auto tensor_traits = (ggml::cpu::aarch64::tensor_traits_base *) tensor->extra; - auto OK = tensor_traits->repack(tensor, data, size); - - GGML_ASSERT(OK == 0); - GGML_UNUSED(buffer); -} - -static const char * ggml_backend_cpu_aarch64_buffer_type_get_name(ggml_backend_buffer_type_t buft) { - return "CPU_AARCH64"; - - GGML_UNUSED(buft); -} - -static ggml_backend_buffer_t ggml_backend_cpu_aarch64_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { - ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); - - if (buffer == nullptr) { - return nullptr; - } - - buffer->buft = buft; - buffer->iface.init_tensor = ggml_backend_cpu_aarch64_buffer_init_tensor; - buffer->iface.set_tensor = ggml_backend_cpu_aarch64_buffer_set_tensor; - buffer->iface.get_tensor = nullptr; - buffer->iface.cpy_tensor = nullptr; - return buffer; -} - -static size_t ggml_backend_cpu_aarch64_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { - return TENSOR_ALIGNMENT; - - GGML_UNUSED(buft); -} - -namespace ggml::cpu::aarch64 { -class extra_buffer_type : ggml::cpu::extra_buffer_type { - bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override { - if ( op->op == GGML_OP_MUL_MAT && - op->src[0]->buffer && - (ggml_n_dims(op->src[0]) == 2) && - op->src[0]->buffer->buft == ggml_backend_cpu_aarch64_buffer_type() && - ggml_aarch64_get_optimal_repack_type(op->src[0]) - ) { - if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) { - return false; - } - if (op->src[1]->type == GGML_TYPE_F32) { - return true; - } - //if (op->src[1]->type == GGML_TYPE_Q8_0) { - // return true; - //} - // may be possible if Q8_0 packed... - } else if (op->op == GGML_OP_MUL_MAT_ID - && op->src[0]->buffer - && (ggml_n_dims(op->src[0]) == 3) - && op->src[0]->buffer->buft == ggml_backend_cpu_aarch64_buffer_type() - && ggml_aarch64_get_optimal_repack_type(op->src[0]) - ) { - if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) { - return false; - } - if (op->src[1]->type == GGML_TYPE_F32) { - return true; - } - //if (op->src[1]->type == GGML_TYPE_Q8_0) { - // return true; - //} - } - return false; - } - - ggml::cpu::tensor_traits * get_tensor_traits(const struct ggml_tensor * op) override { - if (op->op == GGML_OP_MUL_MAT || op->op == GGML_OP_MUL_MAT_ID) { - if (op->src[0]->buffer && op->src[0]->buffer->buft == ggml_backend_cpu_aarch64_buffer_type()) { - return (ggml::cpu::tensor_traits *) op->src[0]->extra; - } - } - return nullptr; - } -}; -} // namespace ggml::cpu::aarch64 - -ggml_backend_buffer_type_t ggml_backend_cpu_aarch64_buffer_type(void) { - static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_aarch64 = { - /* .iface = */ { - /* .get_name = */ ggml_backend_cpu_aarch64_buffer_type_get_name, - /* .alloc_buffer = */ ggml_backend_cpu_aarch64_buffer_type_alloc_buffer, - /* .get_alignment = */ ggml_backend_cpu_aarch64_buffer_type_get_alignment, - /* .get_max_size = */ nullptr, // defaults to SIZE_MAX - /* .get_alloc_size = */ nullptr, // defaults to ggml_nbytes - /* .is_host = */ nullptr, - }, - /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), - /* .context = */ new ggml::cpu::aarch64::extra_buffer_type(), - }; - - return &ggml_backend_cpu_buffer_type_aarch64; -} diff --git a/ggml/src/ggml-cpu/common.h b/ggml/src/ggml-cpu/common.h index 3df01c1edffeb..5624176cce94b 100644 --- a/ggml/src/ggml-cpu/common.h +++ b/ggml/src/ggml-cpu/common.h @@ -1,7 +1,7 @@ #pragma once #include "ggml.h" -#include "ggml-cpu-traits.h" +#include "traits.h" #include "ggml-cpu-impl.h" #include "ggml-impl.h" diff --git a/ggml/src/ggml-cpu/ggml-cpu-aarch64.h b/ggml/src/ggml-cpu/ggml-cpu-aarch64.h deleted file mode 100644 index 6e84c826b4091..0000000000000 --- a/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +++ /dev/null @@ -1,8 +0,0 @@ -#pragma once - -#include "ggml-cpu-traits.h" -#include "ggml.h" - -// GGML internal header - -ggml_backend_buffer_type_t ggml_backend_cpu_aarch64_buffer_type(void); diff --git a/ggml/src/ggml-cpu/ggml-cpu-impl.h b/ggml/src/ggml-cpu/ggml-cpu-impl.h index b3f1b5ca79092..337d8094e8092 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-impl.h +++ b/ggml/src/ggml-cpu/ggml-cpu-impl.h @@ -506,3 +506,25 @@ void ggml_barrier(struct ggml_threadpool * tp); #ifdef __cplusplus } #endif + +#define GGML_DO_PRAGMA_(x) _Pragma (#x) +#define GGML_DO_PRAGMA(x) GGML_DO_PRAGMA_(x) +#if defined(GGML_CPU_GENERIC) || defined(__HIPCC__) +// Note for Apple targets: +// - clang: aliases are not supported on darwin +// - all native kernels need to be implemented in both x86 and arm files +// - on iOS, tvOS, and visionOS, if cmake cannot determine the target architecture, all `_generic` names are replaced by defines +# define GGML_WEAK_ALIAS(name, alias) +#elif defined(__GNUC__) +// GCC/Clang on *nix +# define GGML_WEAK_ALIAS(name, alias) GGML_DO_PRAGMA(weak name = alias) // NOLINT +#elif defined(_MSC_VER) && defined (_WIN64) +// MSVC +// Note: C name mangling varies across different calling conventions +// see https://learn.microsoft.com/en-us/cpp/build/reference/decorated-names?view=msvc-170 +# define GGML_WEAK_ALIAS(name, alias) GGML_DO_PRAGMA(comment(linker, "/alternatename:" #name "=" #alias)) +#else +# error "Unsupported compiler for GGML_WEAK_ALIAS" +#endif + +#define GGML_CPU_NATIVE_IMPL(name) GGML_WEAK_ALIAS(name, name ## _generic) diff --git a/ggml/src/ggml-cpu/ggml-cpu-quants.c b/ggml/src/ggml-cpu/ggml-cpu-quants.c deleted file mode 100644 index 40bded4767b47..0000000000000 --- a/ggml/src/ggml-cpu/ggml-cpu-quants.c +++ /dev/null @@ -1,13891 +0,0 @@ -#define GGML_COMMON_IMPL_C -#include "ggml-common.h" - -#include "ggml-quants.h" -#include "ggml-cpu-quants.h" -#include "ggml-impl.h" -#include "ggml-cpu-impl.h" -#include "ggml-cpu.h" - -#include -#include -#include -#include -#include // for qsort -#include // for GGML_ASSERT - -#define GROUP_MAX_EPS 1e-15f -#define GROUP_MAX_EPS_IQ3_XXS 1e-8f -#define GROUP_MAX_EPS_IQ2_S 1e-8f -#define GROUP_MAX_EPS_IQ1_M 1e-7f -#define GROUP_MAX_EPS_IQ1_S 1e-12f - -#define UNUSED GGML_UNUSED - -// some compilers don't provide _mm256_set_m128i, e.g. gcc 7 -#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1) - -#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) -// multiply int8_t, add results pairwise twice -static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) { - // Get absolute values of x vectors - const __m128i ax = _mm_sign_epi8(x, x); - // Sign the values of the y vectors - const __m128i sy = _mm_sign_epi8(y, x); - // Perform multiplication and create 16-bit values - const __m128i dot = _mm_maddubs_epi16(ax, sy); - const __m128i ones = _mm_set1_epi16(1); - return _mm_madd_epi16(ones, dot); -} - -#if __AVX__ || __AVX2__ || __AVX512F__ -// horizontally add 8 floats -static inline float hsum_float_8(const __m256 x) { - __m128 res = _mm256_extractf128_ps(x, 1); - res = _mm_add_ps(res, _mm256_castps256_ps128(x)); - res = _mm_add_ps(res, _mm_movehl_ps(res, res)); - res = _mm_add_ss(res, _mm_movehdup_ps(res)); - return _mm_cvtss_f32(res); -} - -// horizontally add 8 int32_t -static inline int hsum_i32_8(const __m256i a) { - const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1)); - const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128); - const __m128i sum64 = _mm_add_epi32(hi64, sum128); - const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1)); - return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); -} - -// horizontally add 4 int32_t -static inline int hsum_i32_4(const __m128i a) { - const __m128i hi64 = _mm_unpackhi_epi64(a, a); - const __m128i sum64 = _mm_add_epi32(hi64, a); - const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1)); - return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); -} - -#if defined(__AVX2__) || defined(__AVX512F__) -// spread 32 bits to 32 bytes { 0x00, 0xFF } -static inline __m256i bytes_from_bits_32(const uint8_t * x) { - uint32_t x32; - memcpy(&x32, x, sizeof(uint32_t)); - const __m256i shuf_mask = _mm256_set_epi64x( - 0x0303030303030303, 0x0202020202020202, - 0x0101010101010101, 0x0000000000000000); - __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask); - const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe); - bytes = _mm256_or_si256(bytes, bit_mask); - return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1)); -} - -// Unpack 32 4-bit fields into 32 bytes -// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval -static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) -{ - const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi); - const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp); - const __m256i lowMask = _mm256_set1_epi8( 0xF ); - return _mm256_and_si256(lowMask, bytes); -} - -// add int16_t pairwise and return as float vector -static inline __m256 sum_i16_pairs_float(const __m256i x) { - const __m256i ones = _mm256_set1_epi16(1); - const __m256i summed_pairs = _mm256_madd_epi16(ones, x); - return _mm256_cvtepi32_ps(summed_pairs); -} - -static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { -#if defined(__AVX512VNNI__) && defined(__AVX512VL__) - const __m256i zero = _mm256_setzero_si256(); - const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy); - return _mm256_cvtepi32_ps(summed_pairs); -#elif defined(__AVXVNNI__) - const __m256i zero = _mm256_setzero_si256(); - const __m256i summed_pairs = _mm256_dpbusd_avx_epi32(zero, ax, sy); - return _mm256_cvtepi32_ps(summed_pairs); -#else - // Perform multiplication and create 16-bit values - const __m256i dot = _mm256_maddubs_epi16(ax, sy); - return sum_i16_pairs_float(dot); -#endif -} - -// multiply int8_t, add results pairwise twice and return as float vector -static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { -#if __AVXVNNIINT8__ - const __m256i zero = _mm256_setzero_si256(); - const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y); - return _mm256_cvtepi32_ps(summed_pairs); -#else - // Get absolute values of x vectors - const __m256i ax = _mm256_sign_epi8(x, x); - // Sign the values of the y vectors - const __m256i sy = _mm256_sign_epi8(y, x); - return mul_sum_us8_pairs_float(ax, sy); -#endif -} - -static inline __m128i packNibbles( __m256i bytes ) -{ - // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh -#if __AVX512F__ - const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000 - bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh - return _mm256_cvtepi16_epi8(bytes); // abcd_efgh -#else - const __m256i lowByte = _mm256_set1_epi16( 0xFF ); - __m256i high = _mm256_andnot_si256( lowByte, bytes ); - __m256i low = _mm256_and_si256( lowByte, bytes ); - high = _mm256_srli_epi16( high, 4 ); - bytes = _mm256_or_si256( low, high ); - - // Compress uint16_t lanes into bytes - __m128i r0 = _mm256_castsi256_si128( bytes ); - __m128i r1 = _mm256_extracti128_si256( bytes, 1 ); - return _mm_packus_epi16( r0, r1 ); -#endif -} -#elif defined(__AVX__) -static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 ) -{ - // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh - const __m128i lowByte = _mm_set1_epi16( 0xFF ); - __m128i high = _mm_andnot_si128( lowByte, bytes1 ); - __m128i low = _mm_and_si128( lowByte, bytes1 ); - high = _mm_srli_epi16( high, 4 ); - bytes1 = _mm_or_si128( low, high ); - high = _mm_andnot_si128( lowByte, bytes2 ); - low = _mm_and_si128( lowByte, bytes2 ); - high = _mm_srli_epi16( high, 4 ); - bytes2 = _mm_or_si128( low, high ); - - return _mm_packus_epi16( bytes1, bytes2); -} - -static inline __m128i mul_add_epi8_sse(const __m128i x, const __m128i y) { - const __m128i ax = _mm_sign_epi8(x, x); - const __m128i sy = _mm_sign_epi8(y, x); - return _mm_maddubs_epi16(ax, sy); -} - -// spread 32 bits to 32 bytes { 0x00, 0xFF } -static inline __m256i bytes_from_bits_32(const uint8_t * x) { - uint32_t x32; - memcpy(&x32, x, sizeof(uint32_t)); - const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000); - const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202); - __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl); - __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh); - const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe); - bytesl = _mm_or_si128(bytesl, bit_mask); - bytesh = _mm_or_si128(bytesh, bit_mask); - bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1)); - bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1)); - return MM256_SET_M128I(bytesh, bytesl); -} - -// Unpack 32 4-bit fields into 32 bytes -// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval -static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) -{ - // Load 16 bytes from memory - __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi); - __m128i tmph = _mm_srli_epi16(tmpl, 4); - const __m128i lowMask = _mm_set1_epi8(0xF); - tmpl = _mm_and_si128(lowMask, tmpl); - tmph = _mm_and_si128(lowMask, tmph); - return MM256_SET_M128I(tmph, tmpl); -} - -// add int16_t pairwise and return as float vector -static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) { - const __m128i ones = _mm_set1_epi16(1); - const __m128i summed_pairsl = _mm_madd_epi16(ones, xl); - const __m128i summed_pairsh = _mm_madd_epi16(ones, xh); - const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl); - return _mm256_cvtepi32_ps(summed_pairs); -} - -static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { - const __m128i axl = _mm256_castsi256_si128(ax); - const __m128i axh = _mm256_extractf128_si256(ax, 1); - const __m128i syl = _mm256_castsi256_si128(sy); - const __m128i syh = _mm256_extractf128_si256(sy, 1); - // Perform multiplication and create 16-bit values - const __m128i dotl = _mm_maddubs_epi16(axl, syl); - const __m128i doth = _mm_maddubs_epi16(axh, syh); - return sum_i16_pairs_float(doth, dotl); -} - -// multiply int8_t, add results pairwise twice and return as float vector -static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { - const __m128i xl = _mm256_castsi256_si128(x); - const __m128i xh = _mm256_extractf128_si256(x, 1); - const __m128i yl = _mm256_castsi256_si128(y); - const __m128i yh = _mm256_extractf128_si256(y, 1); - // Get absolute values of x vectors - const __m128i axl = _mm_sign_epi8(xl, xl); - const __m128i axh = _mm_sign_epi8(xh, xh); - // Sign the values of the y vectors - const __m128i syl = _mm_sign_epi8(yl, xl); - const __m128i syh = _mm_sign_epi8(yh, xh); - // Perform multiplication and create 16-bit values - const __m128i dotl = _mm_maddubs_epi16(axl, syl); - const __m128i doth = _mm_maddubs_epi16(axh, syh); - return sum_i16_pairs_float(doth, dotl); -} - -// larger version of mul_sum_i8_pairs_float where x and y are each represented by four 128-bit vectors -static inline __m256 mul_sum_i8_quad_float(const __m128i x_1_0, const __m128i x_1_1, const __m128i x_2_0, const __m128i x_2_1, - const __m128i y_1_0, const __m128i y_1_1, const __m128i y_2_0, const __m128i y_2_1) { - const __m128i mone = _mm_set1_epi16(1); - - const __m128i p16_1_0 = mul_add_epi8_sse(x_1_0, y_1_0); - const __m128i p16_1_1 = mul_add_epi8_sse(x_1_1, y_1_1); - const __m128i p16_2_0 = mul_add_epi8_sse(x_2_0, y_2_0); - const __m128i p16_2_1 = mul_add_epi8_sse(x_2_1, y_2_1); - const __m128i p_1_0 = _mm_madd_epi16(p16_1_0, mone); - const __m128i p_1_1 = _mm_madd_epi16(p16_1_1, mone); - const __m128i p_2_0 = _mm_madd_epi16(p16_2_0, mone); - const __m128i p_2_1 = _mm_madd_epi16(p16_2_1, mone); - const __m128i p_1 = _mm_add_epi32(p_1_0, p_1_1); - const __m128i p_2 = _mm_add_epi32(p_2_0, p_2_1); - return _mm256_cvtepi32_ps(MM256_SET_M128I(p_2, p_1)); -} - -// quad fp16 delta calculation -static inline __m256 quad_fp16_delta_float(const float x0, const float y0, const float x1, const float y1) { - // GGML_FP16_TO_FP32 is faster than Intel F16C - return _mm256_set_m128(_mm_set1_ps(GGML_FP16_TO_FP32(x1) * GGML_FP16_TO_FP32(y1)), - _mm_set1_ps(GGML_FP16_TO_FP32(x0) * GGML_FP16_TO_FP32(y0))); -} -#endif -#elif defined(__SSSE3__) -// horizontally add 4x4 floats -static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) { - __m128 res_0 =_mm_hadd_ps(a, b); - __m128 res_1 =_mm_hadd_ps(c, d); - __m128 res =_mm_hadd_ps(res_0, res_1); - res =_mm_hadd_ps(res, res); - res =_mm_hadd_ps(res, res); - - return _mm_cvtss_f32(res); -} -#endif // __AVX__ || __AVX2__ || __AVX512F__ -#endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) - -#if defined(__ARM_NEON) || defined(__wasm_simd128__) || defined(__POWER9_VECTOR__) -#define B1(c,s,n) 0x ## n ## c , 0x ## n ## s -#define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s) -#define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s) -#define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s) -#define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s) -#define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s) -#define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s) -#define B8(c,s ) B7(c,s, c), B7(c,s, s) - -// precomputed tables for expanding 8bits to 8 bytes: -static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4 -static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4 -#endif - -#if defined(__loongarch_sx) - -static __m128i lsx_packs_w(__m128i a, __m128i b) { - __m128i tmp, tmp1; - tmp = __lsx_vsat_w(a, 15); - tmp1 = __lsx_vsat_w(b, 15); - return __lsx_vpickev_h(tmp1, tmp); -} - -static __m128i lsx_packs_h(__m128i a, __m128i b) { - __m128i tmp, tmp1; - tmp = __lsx_vsat_h(a, 7); - tmp1 = __lsx_vsat_h(b, 7); - return __lsx_vpickev_b(tmp1, tmp); -} - -static __m128i lsx_packus_h(__m128i a, __m128i b) { - __m128i tmp, tmp1; - tmp = __lsx_vsat_hu(a, 7); - tmp1 = __lsx_vsat_hu(b, 7); - return __lsx_vpickev_b(tmp1, tmp); -} - -static __m128i lsx_maddubs_h(__m128i a, __m128i b) { - __m128i tmp1, tmp2; - tmp1 = __lsx_vmulwev_h_b(a, b); - tmp2 = __lsx_vmulwod_h_b(a, b); - return __lsx_vsadd_h(tmp1, tmp2); -} - -static __m128i lsx_madd_h(__m128i a, __m128i b) { - __m128i tmp1, tmp2; - tmp1 = __lsx_vmulwev_w_h(a, b); - tmp2 = __lsx_vmulwod_w_h(a, b); - return __lsx_vadd_w(tmp1, tmp2); -} - -static __m128i lsx_set_w(int32_t a, int32_t b, int32_t c, int32_t d) { - v4i32 __ret = {d, c, b, a}; - return (__m128i)__ret; -} - -static __m128i lsx_shuffle_b(__m128i a, __m128i b) { - __m128i mask_f, zero, tmp0, tmp2, mask; - int f = 0x8f; - mask_f = __lsx_vreplgr2vr_b(f); - zero = __lsx_vldi(0); - tmp0 = __lsx_vand_v(b, mask_f); // get mask with low 4 bit and sign bits - tmp0 = __lsx_vori_b(tmp0, 0x10); // make each mask or with 0x10 prepare for positive - mask = __lsx_vsle_b(zero, tmp0); // if mask >= 0, set mask - tmp2 = __lsx_vand_v(tmp0, mask); // maskout the in2 < ones - return __lsx_vshuf_b(a, zero, tmp2); -} - -static __m128i lsx_hadd_h(__m128i a, __m128i b) { - __m128i tmp1 = __lsx_vpickev_h(b, a); - __m128i tmp2 = __lsx_vpickod_h(b, a); - return __lsx_vadd_h(tmp1, tmp2); -} - -static __m128i lsx_hadd_w(__m128i a, __m128i b) { - __m128i tmp1 = __lsx_vpickev_w(b, a); - __m128i tmp2 = __lsx_vpickod_w(b, a); - return __lsx_vadd_w(tmp1, tmp2); -} - -static __m128 lsx_hadd_s(__m128 a, __m128 b) { - __m128 tmp1 = (__m128)__lsx_vpickev_w((__m128i)b, (__m128i)a); - __m128 tmp2 = (__m128)__lsx_vpickod_w((__m128i)b, (__m128i)a); - - return __lsx_vfadd_s(tmp1, tmp2); -} - -static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) { - __m128 res_0 =lsx_hadd_s(a, b); - __m128 res_1 =lsx_hadd_s(c, d); - __m128 res =lsx_hadd_s(res_0, res_1); - res =lsx_hadd_s(res, res); - res =lsx_hadd_s(res, res); - - return ((v4f32)res)[0]; -} -#endif - -#if defined(__loongarch_asx) - -#ifdef __clang__ -#define VREGS_PREFIX "$vr" -#define XREGS_PREFIX "$xr" -#else // GCC -#define VREGS_PREFIX "$f" -#define XREGS_PREFIX "$f" -#endif -#define __ALL_REGS "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31" -// Convert __m128i to __m256i -static inline __m256i ____m256i(__m128i in) { - __m256i out = __lasx_xvldi(0); - __asm__ volatile ( - ".irp i," __ALL_REGS "\n\t" - " .ifc %[out], " XREGS_PREFIX"\\i \n\t" - " .irp j," __ALL_REGS "\n\t" - " .ifc %[in], " VREGS_PREFIX "\\j \n\t" - " xvpermi.q $xr\\i, $xr\\j, 0x20 \n\t" - " .endif \n\t" - " .endr \n\t" - " .endif \n\t" - ".endr \n\t" - : [out] "+f" (out) : [in] "f" (in) - ); - return out; -} -// Convert two __m128i to __m256i -static inline __m256i lasx_set_q(__m128i inhi, __m128i inlo) { - __m256i out; - __asm__ volatile ( - ".irp i," __ALL_REGS "\n\t" - " .ifc %[hi], " VREGS_PREFIX "\\i \n\t" - " .irp j," __ALL_REGS "\n\t" - " .ifc %[lo], " VREGS_PREFIX "\\j \n\t" - " xvpermi.q $xr\\i, $xr\\j, 0x20 \n\t" - " .endif \n\t" - " .endr \n\t" - " .endif \n\t" - ".endr \n\t" - ".ifnc %[out], %[hi] \n\t" - ".irp i," __ALL_REGS "\n\t" - " .ifc %[out], " XREGS_PREFIX "\\i \n\t" - " .irp j," __ALL_REGS "\n\t" - " .ifc %[hi], " VREGS_PREFIX "\\j \n\t" - " xvori.b $xr\\i, $xr\\j, 0 \n\t" - " .endif \n\t" - " .endr \n\t" - " .endif \n\t" - ".endr \n\t" - ".endif \n\t" - : [out] "=f" (out), [hi] "+f" (inhi) - : [lo] "f" (inlo) - ); - return out; -} -// Convert __m256i low part to __m128i -static inline __m128i lasx_extracti128_lo(__m256i in) { - __m128i out; - __asm__ volatile ( - ".ifnc %[out], %[in] \n\t" - ".irp i," __ALL_REGS "\n\t" - " .ifc %[out], " VREGS_PREFIX "\\i \n\t" - " .irp j," __ALL_REGS "\n\t" - " .ifc %[in], " XREGS_PREFIX "\\j \n\t" - " vori.b $vr\\i, $vr\\j, 0 \n\t" - " .endif \n\t" - " .endr \n\t" - " .endif \n\t" - ".endr \n\t" - ".endif \n\t" - : [out] "=f" (out) : [in] "f" (in) - ); - return out; -} -// Convert __m256i high part to __m128i -static inline __m128i lasx_extracti128_hi(__m256i in) { - __m128i out; - __asm__ volatile ( - ".irp i," __ALL_REGS "\n\t" - " .ifc %[out], " VREGS_PREFIX "\\i \n\t" - " .irp j," __ALL_REGS "\n\t" - " .ifc %[in], " XREGS_PREFIX "\\j \n\t" - " xvpermi.q $xr\\i, $xr\\j, 0x11 \n\t" - " .endif \n\t" - " .endr \n\t" - " .endif \n\t" - ".endr \n\t" - : [out] "=f" (out) : [in] "f" (in) - ); - return out; -} - -static __m256i lasx_set_w(int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0) { - v8i32 __ret = {e0, e1, e2, e3, e4, e5, e6, e7}; - return (__m256i)__ret; -} - -static __m256i lasx_set_d(int64_t a, int64_t b, int64_t c, int64_t d) { - v4i64 __ret = {d, c, b, a}; - return (__m256i)__ret; -} - -static __m256i lasx_insertf128( __m128i x, __m128i y) { - return lasx_set_q(x, y); -} - -static __m256i lasx_shuffle_b(__m256i a, __m256i b) { - __m256i mask_f, zero, tmp0, tmp2, mask; - int f = 0x8f; - mask_f = __lasx_xvreplgr2vr_b(f); - zero = __lasx_xvldi(0); - tmp0 = __lasx_xvand_v(b, mask_f); // get mask with low 4 bit and sign bits - tmp0 = __lasx_xvori_b(tmp0, 0x10); // make each mask or with 0x10 prepare for positive - mask = __lasx_xvsle_b(zero, tmp0); // if mask >= 0, set mask - tmp2 = __lasx_xvand_v(tmp0, mask); // maskout the in2 < ones - return __lasx_xvshuf_b(a, zero, tmp2); -} - -static __m256i lasx_extu8_16(__m128i a) { - return __lasx_vext2xv_hu_bu(____m256i(a)); -} - -static __m256i lasx_ext8_16(__m128i a) { - return __lasx_vext2xv_h_b(____m256i(a)); -} - -static __m256i lasx_ext16_32(__m128i a) { - return __lasx_vext2xv_w_h(____m256i(a)); -} - -static __m128i lasx_extracti128( __m256i a, int pos) { - __m128i ret; - if( pos == 0) - { - ret = lasx_extracti128_lo(a); - } else { - ret = lasx_extracti128_hi(a); - } - return ret; -} - -static __m128 lasx_extractf128( __m256 a, int pos) { - __m128 ret; - if( pos == 0) - { - ret = (__m128)lasx_extracti128_lo((__m256i)a); - } else { - ret = (__m128)lasx_extracti128_hi((__m256i)a); - } - return ret; -} - -static __m256i lasx_maddubs_h(__m256i a, __m256i b) { - __m256i tmp1, tmp2; - tmp1 = __lasx_xvmulwev_h_b(a, b); - tmp2 = __lasx_xvmulwod_h_b(a, b); - return __lasx_xvsadd_h(tmp1, tmp2); -} - -static __m256i lasx_madd_h(__m256i a, __m256i b) { - __m256i tmp1, tmp2; - tmp1 = __lasx_xvmulwev_w_h(a, b); - tmp2 = __lasx_xvmulwod_w_h(a, b); - return __lasx_xvadd_w(tmp1, tmp2); -} - -static __m256i lasx_packs_w(__m256i a, __m256i b) { - __m256i tmp, tmp1; - tmp = __lasx_xvsat_w(a, 15); - tmp1 = __lasx_xvsat_w(b, 15); - return __lasx_xvpickev_h(tmp1, tmp); -} - -static __m256i lasx_packs_h(__m256i a, __m256i b) { - __m256i tmp, tmp1; - tmp = __lasx_xvsat_h(a, 7); - tmp1 = __lasx_xvsat_h(b, 7); - return __lasx_xvpickev_b(tmp1, tmp); -} - -static inline __m256i lasx_madd_h_b(__m256i a, __m256i b) { - __m256i tmp1, tmp2; - tmp1 = __lasx_xvmulwev_h_b(a, b); - tmp2 = __lasx_xvmulwod_h_b(a, b); - return __lasx_xvadd_h(tmp1, tmp2); -} - -static inline __m256i lasx_xvrepl128vei_h(__m256i a, const unsigned int b) { - switch (b) { - case 0: return __lasx_xvrepl128vei_h(a, 0); - case 1: return __lasx_xvrepl128vei_h(a, 1); - case 2: return __lasx_xvrepl128vei_h(a, 2); - case 3: return __lasx_xvrepl128vei_h(a, 3); - case 4: return __lasx_xvrepl128vei_h(a, 4); - case 5: return __lasx_xvrepl128vei_h(a, 5); - case 6: return __lasx_xvrepl128vei_h(a, 6); - case 7: return __lasx_xvrepl128vei_h(a, 7); - default: __builtin_unreachable(); - } -} - -static inline __m256i lasx_xvandi_b_bit(__m256i a, const unsigned int b) { - switch (b) { - case 0: return __lasx_xvandi_b(a, 1 << 0); - case 1: return __lasx_xvandi_b(a, 1 << 1); - case 2: return __lasx_xvandi_b(a, 1 << 2); - case 3: return __lasx_xvandi_b(a, 1 << 3); - case 4: return __lasx_xvandi_b(a, 1 << 4); - case 5: return __lasx_xvandi_b(a, 1 << 5); - case 6: return __lasx_xvandi_b(a, 1 << 6); - case 7: return __lasx_xvandi_b(a, 1 << 7); - default: __builtin_unreachable(); - } -} - -// multiply int8_t, add results pairwise twice -static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) { - // Get absolute values of x vectors - const __m128i ax = __lsx_vsigncov_b(x, x); - // Sign the values of the y vectors - const __m128i sy = __lsx_vsigncov_b(x, y); - // Perform multiplication and create 16-bit values - const __m128i dot = lsx_maddubs_h(ax, sy); - const __m128i ones = __lsx_vreplgr2vr_h(1); - return lsx_madd_h(ones, dot); -} - -// horizontally add 8 floats -static inline float hsum_float_8(const __m256 x) { - __m128 res = lasx_extractf128(x, 1); - res = __lsx_vfadd_s(res, lasx_extractf128(x, 0)); - res = __lsx_vfadd_s(res, (__m128)__lsx_vpickod_d((__m128i)res, (__m128i)res)); - res = __lsx_vfadd_s(res, (__m128)__lsx_vinsgr2vr_w(__lsx_vldi(0), __lsx_vpickve2gr_w(res, 1), 0)); - return ((v4f32)res)[0]; -} - -// horizontally add 8 int32_t -static inline int hsum_i32_8(const __m256i a) { - - __m256i tmp1 = __lasx_xvpermi_q(a, a, 0x11); - __m256i tmp2 = __lasx_xvpermi_q(a, a, 0x00); - - __m128i tmp1_128 = lasx_extracti128_lo(tmp1); - __m128i tmp2_128 = lasx_extracti128_lo(tmp2); - - __m128i sum128 = __lsx_vadd_w(tmp1_128, tmp2_128); - - __m128i ev = __lsx_vpickev_w(sum128, sum128); - __m128i od = __lsx_vpickod_w(sum128, sum128); - __m128i sum64 = __lsx_vadd_w(ev, od); - - int sum64_1, sum64_2; - sum64_1 = __lsx_vpickve2gr_w(sum64, 0); - sum64_2 = __lsx_vpickve2gr_w(sum64, 1); - - return sum64_1 + sum64_2; -} - -// horizontally add 4 int32_t -static inline int hsum_i32_4(const __m128i a) { - __m128i ev = __lsx_vpickev_w(a, a); - __m128i od = __lsx_vpickod_w(a, a); - __m128i sum64 = __lsx_vadd_w(ev, od); - - int sum64_1, sum64_2; - sum64_1 = __lsx_vpickve2gr_w(sum64, 0); - sum64_2 = __lsx_vpickve2gr_w(sum64, 1); - - return sum64_1 + sum64_2; -} - -// spread 32 bits to 32 bytes { 0x00, 0xFF } -static inline __m256i bytes_from_bits_32(const uint8_t * x) { - - uint32_t x32; - memcpy(&x32, x, sizeof(uint32_t)); - const __m256i shuf_mask = lasx_set_d( - 0x0303030303030303, 0x0202020202020202, - 0x0101010101010101, 0x0000000000000000); - - __m256i bytes = lasx_shuffle_b(__lasx_xvreplgr2vr_w(x32), shuf_mask); - const __m256i bit_mask = __lasx_xvreplgr2vr_d(0x7fbfdfeff7fbfdfe); - bytes = __lasx_xvor_v(bytes, bit_mask); - return __lasx_xvseq_b(bytes, __lasx_xvreplgr2vr_d(-1)); -} - -// Unpack 32 4-bit fields into 32 bytes -// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval -static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) { - const __m128i lo = __lsx_vld((const __m128i *)rsi, 0); - __m128i hi = __lsx_vsrli_h(lo, 4); - return __lasx_xvandi_b(lasx_insertf128(hi, lo), 0xf); -} - -// add int16_t pairwise and return as float vector -static inline __m256 sum_i16_pairs_float(const __m256i x) { - __m256i v = __lasx_xvpackod_h(x, x); - __m256i summed_pairs = __lasx_xvaddwev_w_h(x, v); - return __lasx_xvffint_s_w(summed_pairs); -} - -static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { - // Perform multiplication and create 16-bit values - const __m256i dot = lasx_maddubs_h(ax, sy); - return sum_i16_pairs_float(dot); -} - -// multiply int8_t, add results pairwise twice and return as float vector -static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { - const __m256i dot = lasx_madd_h_b(x, y); - return sum_i16_pairs_float(dot); -} - -static inline __m128i packNibbles( __m256i bytes ) { - // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh - const __m256i lowByte = __lasx_xvreplgr2vr_h(0xFF); - __m256i high = __lasx_xvandn_v(lowByte, bytes); - __m256i low = __lasx_xvand_v(lowByte, bytes); - high = __lasx_xvsrli_h(high, 4); - bytes = __lasx_xvor_v(low, high); - // Compress uint16_t lanes into bytes - __m128i *r0 = (__m128i *)&bytes; - __m256i tmp_h128 = __lasx_xvpermi_q(bytes, bytes, 0x11); - __m128i *r1 = (__m128i *)&tmp_h128; - - __m128i zero = __lsx_vldi(0); - __m128i tmp, tmp2, tmp3; - - tmp = __lsx_vmax_h(zero, *r0); - tmp2 = __lsx_vsat_hu(tmp, 7); - - tmp = __lsx_vmax_h(zero, *r1); - tmp3 = __lsx_vsat_hu(tmp, 7); - return __lsx_vpickev_b(tmp3, tmp2); -} -#endif //__loongarch_asx - -void quantize_row_q4_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { - quantize_row_q4_0_ref(x, y, k); -} - -void quantize_row_q4_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { - quantize_row_q4_1_ref(x, y, k); -} - -void quantize_row_q5_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { - quantize_row_q5_0_ref(x, y, k); -} - -void quantize_row_q5_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { - quantize_row_q5_1_ref(x, y, k); -} - -void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - assert(QK8_0 == 32); - assert(k % QK8_0 == 0); - const int nb = k / QK8_0; - - block_q8_0 * GGML_RESTRICT y = vy; - -#if defined(__ARM_NEON) - for (int i = 0; i < nb; i++) { - float32x4_t srcv [8]; - float32x4_t asrcv[8]; - float32x4_t amaxv[8]; - - for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]); - - for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]); - - const float amax = vmaxvq_f32(amaxv[0]); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - for (int j = 0; j < 8; j++) { - const float32x4_t v = vmulq_n_f32(srcv[j], id); - const int32x4_t vi = vcvtnq_s32_f32(v); - - y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0); - y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1); - y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2); - y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3); - } - } -#elif defined __wasm_simd128__ - for (int i = 0; i < nb; i++) { - v128_t srcv [8]; - v128_t asrcv[8]; - v128_t amaxv[8]; - - for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]); - - for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]); - - const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0), - wasm_f32x4_extract_lane(amaxv[0], 1)), - MAX(wasm_f32x4_extract_lane(amaxv[0], 2), - wasm_f32x4_extract_lane(amaxv[0], 3))); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - for (int j = 0; j < 8; j++) { - const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id)); - const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v); - - y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0); - y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1); - y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2); - y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3); - } - } -#elif defined(__AVX2__) || defined(__AVX__) - for (int i = 0; i < nb; i++) { - // Load elements into 4 AVX vectors - __m256 v0 = _mm256_loadu_ps( x ); - __m256 v1 = _mm256_loadu_ps( x + 8 ); - __m256 v2 = _mm256_loadu_ps( x + 16 ); - __m256 v3 = _mm256_loadu_ps( x + 24 ); - x += 32; - - // Compute max(abs(e)) for the block - const __m256 signBit = _mm256_set1_ps( -0.0f ); - __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); - - __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); - max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); - max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); - const float maxScalar = _mm_cvtss_f32( max4 ); - - // Quantize these floats - const float d = maxScalar / 127.f; - y[i].d = GGML_FP32_TO_FP16(d); - const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; - const __m256 mul = _mm256_set1_ps( id ); - - // Apply the multiplier - v0 = _mm256_mul_ps( v0, mul ); - v1 = _mm256_mul_ps( v1, mul ); - v2 = _mm256_mul_ps( v2, mul ); - v3 = _mm256_mul_ps( v3, mul ); - - // Round to nearest integer - v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); - v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); - v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); - v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); - - // Convert floats to integers - __m256i i0 = _mm256_cvtps_epi32( v0 ); - __m256i i1 = _mm256_cvtps_epi32( v1 ); - __m256i i2 = _mm256_cvtps_epi32( v2 ); - __m256i i3 = _mm256_cvtps_epi32( v3 ); - -#if defined(__AVX2__) - // Convert int32 to int16 - i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 - i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 - // Convert int16 to int8 - i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 - - // We got our precious signed bytes, but the order is now wrong - // These AVX2 pack instructions process 16-byte pieces independently - // The following instruction is fixing the order - const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); - i0 = _mm256_permutevar8x32_epi32( i0, perm ); - - _mm256_storeu_si256((__m256i *)y[i].qs, i0); -#else - // Since we don't have in AVX some necessary functions, - // we split the registers in half and call AVX2 analogs from SSE - __m128i ni0 = _mm256_castsi256_si128( i0 ); - __m128i ni1 = _mm256_extractf128_si256( i0, 1); - __m128i ni2 = _mm256_castsi256_si128( i1 ); - __m128i ni3 = _mm256_extractf128_si256( i1, 1); - __m128i ni4 = _mm256_castsi256_si128( i2 ); - __m128i ni5 = _mm256_extractf128_si256( i2, 1); - __m128i ni6 = _mm256_castsi256_si128( i3 ); - __m128i ni7 = _mm256_extractf128_si256( i3, 1); - - // Convert int32 to int16 - ni0 = _mm_packs_epi32( ni0, ni1 ); - ni2 = _mm_packs_epi32( ni2, ni3 ); - ni4 = _mm_packs_epi32( ni4, ni5 ); - ni6 = _mm_packs_epi32( ni6, ni7 ); - // Convert int16 to int8 - ni0 = _mm_packs_epi16( ni0, ni2 ); - ni4 = _mm_packs_epi16( ni4, ni6 ); - - _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0); - _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4); -#endif - } -#elif defined(__riscv_v) - - size_t vl = QK8_0; - - for (int i = 0; i < nb; i++) { - // load elements - vfloat32m8_t v_x = __riscv_vle32_v_f32m8(x+i*QK8_0, vl); - - vfloat32m8_t vfabs = __riscv_vfabs_v_f32m8(v_x, vl); - vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0f, vl); - vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m8_f32m1(vfabs, tmp, vl); - float amax = __riscv_vfmv_f_s_f32m1_f32(vmax); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - vfloat32m8_t x0 = __riscv_vfmul_vf_f32m8(v_x, id, vl); - - // convert to integer - vint16m4_t vi = __riscv_vfncvt_x_f_w_i16m4(x0, vl); - vint8m2_t vs = __riscv_vncvt_x_x_w_i8m2(vi, vl); - - // store result - __riscv_vse8_v_i8m2(y[i].qs , vs, vl); - } - -#elif defined(__POWER9_VECTOR__) - for (int i = 0; i < nb; i++) { - vector float srcv [8]; - vector float asrcv[8]; - vector float amaxv[8]; - vector signed int vi[8]; - - for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); - - for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); - - const float amax = MAX(MAX(vec_extract(amaxv[0], 0), - vec_extract(amaxv[0], 1)), - MAX(vec_extract(amaxv[0], 2), - vec_extract(amaxv[0], 3))); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - const vector float vid = vec_splats(id); - - y[i].d = GGML_FP32_TO_FP16(d); - - for (int j = 0; j < 8; j++) { - const vector float v = vec_round(vec_mul(srcv[j], vid)); - vi[j] = vec_cts(v, 0); - } - vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]); - vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]); - } - -#elif defined(__loongarch_asx) - for (int i = 0; i < nb; i++) { - __m256 v0 = (__m256)__lasx_xvld( x , 0); - __m256 v1 = (__m256)__lasx_xvld( x , 32); - __m256 v2 = (__m256)__lasx_xvld( x , 64); - __m256 v3 = (__m256)__lasx_xvld( x , 96); - x += 32; - - // Compute max(abs(e)) for the block - const __m256 sign_bit = __lasx_xvreplfr2vr_s( -0.0f ); - __m256 max_abs = (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v0 ); - max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v1 ) ); - max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v2 ) ); - max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v3 ) ); - - __m128 max4 = __lsx_vfmax_s( lasx_extractf128( max_abs, 1 ), lasx_extractf128( max_abs , 0) ); - max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vpickod_d((__m128i) max4, (__m128i)max4 ) ); - __m128 tmp = max4; - max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vinsgr2vr_w(tmp, __lsx_vpickve2gr_w( max4, 1 ), 0 )); - const float max_scalar = ((v4f32)max4)[0]; - - // Quantize these floats - const float d = max_scalar / 127.f; - y[i].d = GGML_FP32_TO_FP16(d); - const float id = ( max_scalar != 0.0f ) ? 127.f / max_scalar : 0.0f; - const __m256 mul = (__m256)__lasx_xvreplfr2vr_s( id ); - - // Apply the multiplier - v0 = __lasx_xvfmul_s( v0, mul ); - v1 = __lasx_xvfmul_s( v1, mul ); - v2 = __lasx_xvfmul_s( v2, mul ); - v3 = __lasx_xvfmul_s( v3, mul ); - - // Round to nearest integer - __m256i i0 = __lasx_xvftintrne_w_s( v0 ); - __m256i i1 = __lasx_xvftintrne_w_s( v1 ); - __m256i i2 = __lasx_xvftintrne_w_s( v2 ); - __m256i i3 = __lasx_xvftintrne_w_s( v3 ); - - __m128i ni0 = lasx_extracti128( i0, 0 ); - __m128i ni1 = lasx_extracti128( i0, 1); - __m128i ni2 = lasx_extracti128( i1, 0); - __m128i ni3 = lasx_extracti128( i1, 1); - __m128i ni4 = lasx_extracti128( i2, 0); - __m128i ni5 = lasx_extracti128( i2, 1); - __m128i ni6 = lasx_extracti128( i3, 0); - __m128i ni7 = lasx_extracti128( i3, 1); - - // Convert int32 to int16 - ni0 = lsx_packs_w( ni0, ni1 ); - ni2 = lsx_packs_w( ni2, ni3 ); - ni4 = lsx_packs_w( ni4, ni5 ); - ni6 = lsx_packs_w( ni6, ni7 ); - // Convert int16 to int8 - ni0 = lsx_packs_h( ni0, ni2 ); - ni4 = lsx_packs_h( ni4, ni6 ); - - __lsx_vst(ni0, (__m128i *)(y[i].qs + 0), 0); - __lsx_vst(ni4, (__m128i *)(y[i].qs + 16), 0); - - } -#elif defined(__VXE__) || defined(__VXE2__) - for (int i = 0; i < nb; i++) { - __vector float srcv [8]; - __vector float asrcv[8]; - __vector float amaxv[8]; - - for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); - for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); - - const float amax = MAX(MAX(vec_extract(amaxv[0], 0), - vec_extract(amaxv[0], 1)), - MAX(vec_extract(amaxv[0], 2), - vec_extract(amaxv[0], 3))); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f / d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - for (int j = 0; j < 8; j++) { - const __vector float v = vec_mul(srcv[j], vec_splats(id)); - const __vector int32_t vi = vec_signed(v); - - y[i].qs[4*j + 0] = vec_extract(vi, 0); - y[i].qs[4*j + 1] = vec_extract(vi, 1); - y[i].qs[4*j + 2] = vec_extract(vi, 2); - y[i].qs[4*j + 3] = vec_extract(vi, 3); - } - } -#else - GGML_UNUSED(nb); - // scalar - quantize_row_q8_0_ref(x, y, k); -#endif -} - -void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - assert(k % QK8_1 == 0); - const int nb = k / QK8_1; - - block_q8_1 * GGML_RESTRICT y = vy; - -#if defined(__ARM_NEON) - for (int i = 0; i < nb; i++) { - float32x4_t srcv [8]; - float32x4_t asrcv[8]; - float32x4_t amaxv[8]; - - for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]); - - for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]); - - const float amax = vmaxvq_f32(amaxv[0]); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - int32x4_t accv = vdupq_n_s32(0); - - for (int j = 0; j < 8; j++) { - const float32x4_t v = vmulq_n_f32(srcv[j], id); - const int32x4_t vi = vcvtnq_s32_f32(v); - - y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0); - y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1); - y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2); - y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3); - - accv = vaddq_s32(accv, vi); - } - - y[i].s = GGML_FP32_TO_FP16(d * vaddvq_s32(accv)); - } -#elif defined __wasm_simd128__ - for (int i = 0; i < nb; i++) { - v128_t srcv [8]; - v128_t asrcv[8]; - v128_t amaxv[8]; - - for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]); - - for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]); - - const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0), - wasm_f32x4_extract_lane(amaxv[0], 1)), - MAX(wasm_f32x4_extract_lane(amaxv[0], 2), - wasm_f32x4_extract_lane(amaxv[0], 3))); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - v128_t accv = wasm_i32x4_splat(0); - - for (int j = 0; j < 8; j++) { - const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id)); - const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v); - - y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0); - y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1); - y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2); - y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3); - - accv = wasm_i32x4_add(accv, vi); - } - - y[i].s = GGML_FP32_TO_FP16( - d * (wasm_i32x4_extract_lane(accv, 0) + - wasm_i32x4_extract_lane(accv, 1) + - wasm_i32x4_extract_lane(accv, 2) + - wasm_i32x4_extract_lane(accv, 3))); - } -#elif defined(__AVX2__) || defined(__AVX__) - for (int i = 0; i < nb; i++) { - // Load elements into 4 AVX vectors - __m256 v0 = _mm256_loadu_ps( x ); - __m256 v1 = _mm256_loadu_ps( x + 8 ); - __m256 v2 = _mm256_loadu_ps( x + 16 ); - __m256 v3 = _mm256_loadu_ps( x + 24 ); - x += 32; - - // Compute max(abs(e)) for the block - const __m256 signBit = _mm256_set1_ps( -0.0f ); - __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); - - __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); - max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); - max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); - const float max_scalar = _mm_cvtss_f32( max4 ); - - // Quantize these floats - const float d = max_scalar / 127.f; - y[i].d = GGML_FP32_TO_FP16(d); - const float id = ( max_scalar != 0.0f ) ? 127.f / max_scalar : 0.0f; - const __m256 mul = _mm256_set1_ps( id ); - - // Apply the multiplier - v0 = _mm256_mul_ps( v0, mul ); - v1 = _mm256_mul_ps( v1, mul ); - v2 = _mm256_mul_ps( v2, mul ); - v3 = _mm256_mul_ps( v3, mul ); - - // Round to nearest integer - v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); - v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); - v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); - v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); - - // Convert floats to integers - __m256i i0 = _mm256_cvtps_epi32( v0 ); - __m256i i1 = _mm256_cvtps_epi32( v1 ); - __m256i i2 = _mm256_cvtps_epi32( v2 ); - __m256i i3 = _mm256_cvtps_epi32( v3 ); - -#if defined(__AVX2__) - // Compute the sum of the quants and set y[i].s - y[i].s = GGML_FP32_TO_FP16(d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)))); - - // Convert int32 to int16 - i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 - i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 - // Convert int16 to int8 - i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 - - // We got our precious signed bytes, but the order is now wrong - // These AVX2 pack instructions process 16-byte pieces independently - // The following instruction is fixing the order - const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); - i0 = _mm256_permutevar8x32_epi32( i0, perm ); - - _mm256_storeu_si256((__m256i *)y[i].qs, i0); -#else - // Since we don't have in AVX some necessary functions, - // we split the registers in half and call AVX2 analogs from SSE - __m128i ni0 = _mm256_castsi256_si128( i0 ); - __m128i ni1 = _mm256_extractf128_si256( i0, 1); - __m128i ni2 = _mm256_castsi256_si128( i1 ); - __m128i ni3 = _mm256_extractf128_si256( i1, 1); - __m128i ni4 = _mm256_castsi256_si128( i2 ); - __m128i ni5 = _mm256_extractf128_si256( i2, 1); - __m128i ni6 = _mm256_castsi256_si128( i3 ); - __m128i ni7 = _mm256_extractf128_si256( i3, 1); - - // Compute the sum of the quants and set y[i].s - const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3)); - const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7)); - y[i].s = GGML_FP32_TO_FP16(d * hsum_i32_4(_mm_add_epi32(s0, s1))); - - // Convert int32 to int16 - ni0 = _mm_packs_epi32( ni0, ni1 ); - ni2 = _mm_packs_epi32( ni2, ni3 ); - ni4 = _mm_packs_epi32( ni4, ni5 ); - ni6 = _mm_packs_epi32( ni6, ni7 ); - // Convert int16 to int8 - ni0 = _mm_packs_epi16( ni0, ni2 ); - ni4 = _mm_packs_epi16( ni4, ni6 ); - - _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0); - _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4); -#endif - } -#elif defined(__riscv_v) - - size_t vl = QK8_1; - - for (int i = 0; i < nb; i++) { - // load elements - vfloat32m8_t v_x = __riscv_vle32_v_f32m8(x+i*QK8_1, vl); - - vfloat32m8_t vfabs = __riscv_vfabs_v_f32m8(v_x, vl); - vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0, vl); - vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m8_f32m1(vfabs, tmp, vl); - float amax = __riscv_vfmv_f_s_f32m1_f32(vmax); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - vfloat32m8_t x0 = __riscv_vfmul_vf_f32m8(v_x, id, vl); - - // convert to integer - vint16m4_t vi = __riscv_vfncvt_x_f_w_i16m4(x0, vl); - vint8m2_t vs = __riscv_vncvt_x_x_w_i8m2(vi, vl); - - // store result - __riscv_vse8_v_i8m2(y[i].qs , vs, vl); - - // compute sum for y[i].s - vint16m1_t tmp2 = __riscv_vmv_v_x_i16m1(0, vl); - vint16m1_t vwrs = __riscv_vwredsum_vs_i8m2_i16m1(vs, tmp2, vl); - - // set y[i].s - int sum = __riscv_vmv_x_s_i16m1_i16(vwrs); - y[i].s = GGML_FP32_TO_FP16(sum*d); - } - -#elif defined(__POWER9_VECTOR__) - for (int i = 0; i < nb; i++) { - vector float srcv [8]; - vector float asrcv[8]; - vector float amaxv[8]; - vector signed int vi[8]; - - for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); - - for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); - - const float amax = MAX(MAX(vec_extract(amaxv[0], 0), - vec_extract(amaxv[0], 1)), - MAX(vec_extract(amaxv[0], 2), - vec_extract(amaxv[0], 3))); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - const vector float vid = vec_splats(id); - - y[i].d = GGML_FP32_TO_FP16(d); - - vector int accv = vec_splats(0); - - for (int j = 0; j < 8; j++) { - const vector float v = vec_round(vec_mul(srcv[j], vid)); - vi[j] = vec_cts(v, 0); - - accv = vec_add(accv, vi[j]); - } - vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]); - vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]); - - accv = vec_add(accv, vec_sld(accv, accv, 4)); - accv = vec_add(accv, vec_sld(accv, accv, 8)); - y[i].s = GGML_FP32_TO_FP16(d * vec_extract(accv, 0)); - } - -#elif defined(__loongarch_asx) - for (int i = 0; i < nb; i++) { - __m256 v0 = (__m256)__lasx_xvld( x , 0 ); - __m256 v1 = (__m256)__lasx_xvld( x , 32 ); - __m256 v2 = (__m256)__lasx_xvld( x , 64 ); - __m256 v3 = (__m256)__lasx_xvld( x , 96 ); - x += 32; - - // Compute max(abs(e)) for the block - const __m256 sign_bit = __lasx_xvreplfr2vr_s( -0.0f ); - __m256 max_abs = (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v0 ); - max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v1 ) ); - max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v2 ) ); - max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v3 ) ); - - __m128 max4 = __lsx_vfmax_s( lasx_extractf128( max_abs, 1 ), lasx_extractf128( max_abs, 0) ); - max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vpickod_d((__m128i) max4, (__m128i)max4 ) ); - __m128 tmp = max4; - max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vextrins_w((__m128i)tmp, (__m128i)max4, 0x10 )); - const float max_scalar = ((v4f32)max4)[0]; - - // Quantize these floats - const float d = max_scalar / 127.f; - y[i].d = GGML_FP32_TO_FP16(d); - const float id = ( max_scalar != 0.0f ) ? 127.f / max_scalar : 0.0f; - const __m256 mul = __lasx_xvreplfr2vr_s( id ); - - // Apply the multiplier - v0 = __lasx_xvfmul_s( v0, mul ); - v1 = __lasx_xvfmul_s( v1, mul ); - v2 = __lasx_xvfmul_s( v2, mul ); - v3 = __lasx_xvfmul_s( v3, mul ); - - // Round to nearest integer - __m256i i0 = __lasx_xvftintrne_w_s( v0 ); - __m256i i1 = __lasx_xvftintrne_w_s( v1 ); - __m256i i2 = __lasx_xvftintrne_w_s( v2 ); - __m256i i3 = __lasx_xvftintrne_w_s( v3 ); - - __m128i ni0 = lasx_extracti128(i0, 0); - __m128i ni1 = lasx_extracti128( i0, 1); - __m128i ni2 = lasx_extracti128( i1, 0); - __m128i ni3 = lasx_extracti128( i1, 1); - __m128i ni4 = lasx_extracti128( i2, 0 ); - __m128i ni5 = lasx_extracti128( i2, 1); - __m128i ni6 = lasx_extracti128( i3, 0); - __m128i ni7 = lasx_extracti128( i3, 1); - - // Compute the sum of the quants and set y[i].s - const __m128i s0 = __lsx_vadd_w(__lsx_vadd_w(ni0, ni1), __lsx_vadd_w(ni2, ni3)); - const __m128i s1 = __lsx_vadd_w(__lsx_vadd_w(ni4, ni5), __lsx_vadd_w(ni6, ni7)); - y[i].s = GGML_FP32_TO_FP16(d * hsum_i32_4(__lsx_vadd_w(s0, s1))); - - // Convert int32 to int16 - ni0 = lsx_packs_w( ni0, ni1 ); - ni2 = lsx_packs_w( ni2, ni3 ); - ni4 = lsx_packs_w( ni4, ni5 ); - ni6 = lsx_packs_w( ni6, ni7 ); - // Convert int16 to int8 - ni0 = lsx_packs_h( ni0, ni2 ); - ni4 = lsx_packs_h( ni4, ni6 ); - - __lsx_vst(ni0, (__m128i *)(y[i].qs + 0), 0); - __lsx_vst(ni4, (__m128i *)(y[i].qs + 16), 0); - } -#elif defined(__VXE__) || defined(__VXE2__) - for (int i = 0; i < nb; i++) { - __vector float srcv [8]; - __vector float asrcv[8]; - __vector float amaxv[8]; - - for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); - for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); - - const float amax = MAX(MAX(vec_extract(amaxv[0], 0), - vec_extract(amaxv[0], 1)), - MAX(vec_extract(amaxv[0], 2), - vec_extract(amaxv[0], 3))); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f / d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - __vector int32_t acc = vec_splats(0); - - for (int j = 0; j < 8; j++) { - const __vector float v = vec_mul(srcv[j], vec_splats(id)); - const __vector int32_t vi = vec_signed(v); - - y[i].qs[4*j + 0] = vec_extract(vi, 0); - y[i].qs[4*j + 1] = vec_extract(vi, 1); - y[i].qs[4*j + 2] = vec_extract(vi, 2); - y[i].qs[4*j + 3] = vec_extract(vi, 3); - - acc = vec_add(acc, vi); - } - - y[i].s = GGML_FP32_TO_FP16(d * (acc[0] + acc[1] + acc[2] + acc[3])); - } -#else - GGML_UNUSED(nb); - // scalar - quantize_row_q8_1_ref(x, y, k); -#endif -} - -// -// 2-6 bit quantization in super-blocks -// - -// -// ===================== Helper functions -// -static inline int nearest_int(float fval) { - assert(fabsf(fval) <= 4194303.f); - float val = fval + 12582912.f; - int i; memcpy(&i, &val, sizeof(int)); - return (i & 0x007fffff) - 0x00400000; -} - -static float make_qx_quants(int n, int nmax, const float * GGML_RESTRICT x, int8_t * GGML_RESTRICT L, int rmse_type, - const float * GGML_RESTRICT qw) { - float max = 0; - float amax = 0; - for (int i = 0; i < n; ++i) { - float ax = fabsf(x[i]); - if (ax > amax) { amax = ax; max = x[i]; } - } - if (amax < GROUP_MAX_EPS) { // all zero - for (int i = 0; i < n; ++i) { - L[i] = 0; - } - return 0.f; - } - float iscale = -nmax / max; - if (rmse_type == 0) { - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale * x[i]); - L[i] = nmax + MAX(-nmax, MIN(nmax-1, l)); - } - return 1/iscale; - } - bool return_early = false; - if (rmse_type < 0) { - rmse_type = -rmse_type; - return_early = true; - } - float sumlx = 0; - float suml2 = 0; -#ifdef HAVE_BUGGY_APPLE_LINKER - // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7 - for (volatile int i = 0; i < n; ++i) { -#else - for (int i = 0; i < n; ++i) { -#endif - int l = nearest_int(iscale * x[i]); - l = MAX(-nmax, MIN(nmax-1, l)); - L[i] = l + nmax; - float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i])); - sumlx += w*x[i]*l; - suml2 += w*l*l; - } - float scale = suml2 ? sumlx/suml2 : 0.0f; - if (return_early) return suml2 > 0 ? 0.5f*(scale + 1/iscale) : 1/iscale; - float best = scale * sumlx; - for (int is = -9; is <= 9; ++is) { - if (is == 0) { - continue; - } - iscale = -(nmax + 0.1f*is) / max; - sumlx = suml2 = 0; - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale * x[i]); - l = MAX(-nmax, MIN(nmax-1, l)); - float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i])); - sumlx += w*x[i]*l; - suml2 += w*l*l; - } - if (suml2 > 0 && sumlx*sumlx > best*suml2) { - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale * x[i]); - L[i] = nmax + MAX(-nmax, MIN(nmax-1, l)); - } - scale = sumlx/suml2; best = scale*sumlx; - } - } - return scale; -} - -static float make_q3_quants(int n, int nmax, const float * GGML_RESTRICT x, int8_t * GGML_RESTRICT L, bool do_rmse) { - float max = 0; - float amax = 0; - for (int i = 0; i < n; ++i) { - float ax = fabsf(x[i]); - if (ax > amax) { amax = ax; max = x[i]; } - } - if (amax < GROUP_MAX_EPS) { // all zero - for (int i = 0; i < n; ++i) { L[i] = 0; } - return 0.f; - } - float iscale = -nmax / max; - if (do_rmse) { - float sumlx = 0; - float suml2 = 0; - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale * x[i]); - l = MAX(-nmax, MIN(nmax-1, l)); - L[i] = l; - float w = x[i]*x[i]; - sumlx += w*x[i]*l; - suml2 += w*l*l; - } - for (int itry = 0; itry < 5; ++itry) { - int n_changed = 0; - for (int i = 0; i < n; ++i) { - float w = x[i]*x[i]; - float slx = sumlx - w*x[i]*L[i]; - if (slx > 0) { - float sl2 = suml2 - w*L[i]*L[i]; - int new_l = nearest_int(x[i] * sl2 / slx); - new_l = MAX(-nmax, MIN(nmax-1, new_l)); - if (new_l != L[i]) { - slx += w*x[i]*new_l; - sl2 += w*new_l*new_l; - if (sl2 > 0 && slx*slx*suml2 > sumlx*sumlx*sl2) { - L[i] = new_l; sumlx = slx; suml2 = sl2; - ++n_changed; - } - } - } - } - if (!n_changed) { - break; - } - } - for (int i = 0; i < n; ++i) { - L[i] += nmax; - } - return sumlx / suml2; - } - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale * x[i]); - l = MAX(-nmax, MIN(nmax-1, l)); - L[i] = l + nmax; - } - return 1/iscale; -} - -static float make_qkx1_quants(int n, int nmax, const float * GGML_RESTRICT x, uint8_t * GGML_RESTRICT L, float * GGML_RESTRICT the_min, - int ntry, float alpha) { - float min = x[0]; - float max = x[0]; - for (int i = 1; i < n; ++i) { - if (x[i] < min) min = x[i]; - if (x[i] > max) max = x[i]; - } - if (max == min) { - for (int i = 0; i < n; ++i) L[i] = 0; - *the_min = 0; - return 0.f; - } - if (min > 0) min = 0; - float iscale = nmax/(max - min); - float scale = 1/iscale; - for (int itry = 0; itry < ntry; ++itry) { - float sumlx = 0; int suml2 = 0; - bool did_change = false; - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale*(x[i] - min)); - l = MAX(0, MIN(nmax, l)); - if (l != L[i]) { - L[i] = l; - did_change = true; - } - sumlx += (x[i] - min)*l; - suml2 += l*l; - } - scale = sumlx/suml2; - float sum = 0; - for (int i = 0; i < n; ++i) { - sum += x[i] - scale*L[i]; - } - min = alpha*min + (1 - alpha)*sum/n; - if (min > 0) min = 0; - iscale = 1/scale; - if (!did_change) break; - } - *the_min = -min; - return scale; -} - -static float make_qkx2_quants(int n, int nmax, const float * GGML_RESTRICT x, const float * GGML_RESTRICT weights, - uint8_t * GGML_RESTRICT L, float * GGML_RESTRICT the_min, uint8_t * GGML_RESTRICT Laux, - float rmin, float rdelta, int nstep, bool use_mad) { - float min = x[0]; - float max = x[0]; - float sum_w = weights[0]; - float sum_x = sum_w * x[0]; -#ifdef HAVE_BUGGY_APPLE_LINKER - // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7 - for (volatile int i = 1; i < n; ++i) { -#else - for (int i = 1; i < n; ++i) { -#endif - if (x[i] < min) min = x[i]; - if (x[i] > max) max = x[i]; - float w = weights[i]; - sum_w += w; - sum_x += w * x[i]; - } - if (min > 0) min = 0; - if (max == min) { - for (int i = 0; i < n; ++i) L[i] = 0; - *the_min = -min; - return 0.f; - } - float iscale = nmax/(max - min); - float scale = 1/iscale; - float best_mad = 0; - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale*(x[i] - min)); - L[i] = MAX(0, MIN(nmax, l)); - float diff = scale * L[i] + min - x[i]; - diff = use_mad ? fabsf(diff) : diff * diff; - float w = weights[i]; - best_mad += w * diff; - } - if (nstep < 1) { - *the_min = -min; - return scale; - } - for (int is = 0; is <= nstep; ++is) { - iscale = (rmin + rdelta*is + nmax)/(max - min); - float sum_l = 0, sum_l2 = 0, sum_xl = 0; - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale*(x[i] - min)); - l = MAX(0, MIN(nmax, l)); - Laux[i] = l; - float w = weights[i]; - sum_l += w*l; - sum_l2 += w*l*l; - sum_xl += w*l*x[i]; - } - float D = sum_w * sum_l2 - sum_l * sum_l; - if (D > 0) { - float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D; - float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D; - if (this_min > 0) { - this_min = 0; - this_scale = sum_xl / sum_l2; - } - float mad = 0; - for (int i = 0; i < n; ++i) { - float diff = this_scale * Laux[i] + this_min - x[i]; - diff = use_mad ? fabsf(diff) : diff * diff; - float w = weights[i]; - mad += w * diff; - } - if (mad < best_mad) { - for (int i = 0; i < n; ++i) { - L[i] = Laux[i]; - } - best_mad = mad; - scale = this_scale; - min = this_min; - } - } - } - *the_min = -min; - return scale; -} - -static inline void get_scale_min_k4(int j, const uint8_t * GGML_RESTRICT q, uint8_t * GGML_RESTRICT d, uint8_t * GGML_RESTRICT m) { - if (j < 4) { - *d = q[j] & 63; *m = q[j + 4] & 63; - } else { - *d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4); - *m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4); - } -} - -//========================- 2-bit (de)-quantization - -void quantize_row_q2_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - quantize_row_q2_K_ref(x, vy, k); -} - -//========================= 3-bit (de)-quantization - -void quantize_row_q3_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - quantize_row_q3_K_ref(x, vy, k); -} - -// ====================== 4-bit (de)-quantization - -void quantize_row_q4_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - assert(k % QK_K == 0); - block_q4_K * GGML_RESTRICT y = vy; - quantize_row_q4_K_ref(x, y, k); -} - -// ====================== 5-bit (de)-quantization - -void quantize_row_q5_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - assert(k % QK_K == 0); - block_q5_K * GGML_RESTRICT y = vy; - quantize_row_q5_K_ref(x, y, k); -} - -// ====================== 6-bit (de)-quantization - -void quantize_row_q6_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - assert(k % QK_K == 0); - block_q6_K * GGML_RESTRICT y = vy; - quantize_row_q6_K_ref(x, y, k); -} - -// ====================== Ternary (de)-quantization (BitNet b1.58 and TriLMs) - -void quantize_row_tq1_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - assert(k % QK_K == 0); - block_tq1_0 * GGML_RESTRICT y = vy; - quantize_row_tq1_0_ref(x, y, k); -} - -void quantize_row_tq2_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - assert(k % QK_K == 0); - block_tq2_0 * GGML_RESTRICT y = vy; - quantize_row_tq2_0_ref(x, y, k); -} - -static const int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113}; - -//===================================== Q8_K ============================================== - -void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { -#ifdef __wasm_simd128__ - assert(k % QK_K == 0); - const int64_t nb = k / QK_K; - block_q8_K * GGML_RESTRICT yc = y; // Cast to proper type - - for (int i = 0; i < nb; i++) { - const float * x_block = x + i * QK_K; - - v128_t min_vec = wasm_v128_load(x_block); - v128_t max_vec = min_vec; - - for (int j = 4; j < QK_K; j += 4) { - v128_t x_vec = wasm_v128_load(x_block + j); - max_vec = wasm_f32x4_pmax(max_vec, x_vec); - min_vec = wasm_f32x4_pmin(min_vec, x_vec); - } - max_vec = wasm_f32x4_pmax(max_vec, wasm_i32x4_shuffle(max_vec, max_vec, 2, 3, 0, 1)); - max_vec = wasm_f32x4_pmax(max_vec, wasm_i32x4_shuffle(max_vec, max_vec, 1, 0, 3, 2)); - min_vec = wasm_f32x4_pmin(min_vec, wasm_i32x4_shuffle(min_vec, min_vec, 2, 3, 0, 1)); - min_vec = wasm_f32x4_pmin(min_vec, wasm_i32x4_shuffle(min_vec, min_vec, 1, 0, 3, 2)); - float max = wasm_f32x4_extract_lane(max_vec, 0); - float min = wasm_f32x4_extract_lane(min_vec, 0); - float amax = -min > max ? min : max; - - if (amax == 0.0f) { - yc[i].d = 0.0f; - const v128_t zero = wasm_i8x16_splat(0); - for (int j = 0; j < QK_K; j += 16) { - wasm_v128_store(yc[i].qs + j, zero); - } - continue; - } - - const float iscale = -127.0f / amax; - const v128_t scale_vec = wasm_f32x4_splat(iscale); - - // Process 16 elements per iteration - for (int j = 0, jb = 0; j < QK_K; j += 16, jb++) { - // Load and quantize 16 floats - v128_t x0 = wasm_v128_load(x_block + j); - v128_t x1 = wasm_v128_load(x_block + j + 4); - v128_t x2 = wasm_v128_load(x_block + j + 8); - v128_t x3 = wasm_v128_load(x_block + j + 12); - - v128_t q0 = wasm_f32x4_nearest(wasm_f32x4_mul(x0, scale_vec)); - v128_t q1 = wasm_f32x4_nearest(wasm_f32x4_mul(x1, scale_vec)); - v128_t q2 = wasm_f32x4_nearest(wasm_f32x4_mul(x2, scale_vec)); - v128_t q3 = wasm_f32x4_nearest(wasm_f32x4_mul(x3, scale_vec)); - - // Convert to i32 with saturation - v128_t i0 = wasm_i32x4_trunc_sat_f32x4(q0); - v128_t i1 = wasm_i32x4_trunc_sat_f32x4(q1); - v128_t i2 = wasm_i32x4_trunc_sat_f32x4(q2); - v128_t i3 = wasm_i32x4_trunc_sat_f32x4(q3); - - // Pack into 16 i8 values - v128_t i8 = wasm_i8x16_narrow_i16x8( - wasm_i16x8_narrow_i32x4(i0, i1), - wasm_i16x8_narrow_i32x4(i2, i3) - ); - wasm_v128_store(yc[i].qs + j, i8); - - // Calculate bsums using SIMD - v128_t sum16 = wasm_i16x8_add( - wasm_i16x8_extend_low_i8x16(i8), - wasm_i16x8_extend_high_i8x16(i8) - ); - v128_t sum32 = wasm_i32x4_add( - wasm_i32x4_extend_low_i16x8(sum16), - wasm_i32x4_extend_high_i16x8(sum16) - ); - sum32 = wasm_i32x4_add(sum32, wasm_i32x4_shuffle(sum32, sum32, 2, 3, 0, 1)); - sum32 = wasm_i32x4_add(sum32, wasm_i32x4_shuffle(sum32, sum32, 1, 0, 3, 2)); - yc[i].bsums[jb] = wasm_i32x4_extract_lane(sum32, 0); - } - - yc[i].d = 1.0f / iscale; - } -#else - quantize_row_q8_K_ref(x, y, k); -#endif -} - -//===================================== Dot products ================================= - -// -// Helper functions -// -#if __AVX__ || __AVX2__ || __AVX512F__ - -// shuffles to pick the required scales in dot products -static inline __m256i get_scale_shuffle_q3k(int i) { - static const uint8_t k_shuffle[128] = { - 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, - 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, - 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, - 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15, - }; - return _mm256_loadu_si256((const __m256i*)k_shuffle + i); -} -static inline __m256i get_scale_shuffle_k4(int i) { - static const uint8_t k_shuffle[256] = { - 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, - 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, - 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, - 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, - 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, - 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, - 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, - 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15 - }; - return _mm256_loadu_si256((const __m256i*)k_shuffle + i); -} -static inline __m128i get_scale_shuffle(int i) { - static const uint8_t k_shuffle[128] = { - 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, - 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, - 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, - 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, - 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11, - 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13, - 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15 - }; - return _mm_loadu_si128((const __m128i*)k_shuffle + i); -} -#elif defined(__loongarch_asx) -// shuffles to pick the required scales in dot products -static inline __m256i get_scale_shuffle_q3k(int i) { - static const uint8_t k_shuffle[128] = { - 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, - 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, - 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, - 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15, - }; - return __lasx_xvld((const __m256i*)k_shuffle + i, 0); -} -static inline __m256i get_scale_shuffle_k4(int i) { - static const uint8_t k_shuffle[256] = { - 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, - 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, - 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, - 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, - 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, - 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, - 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, - 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15 - }; - return __lasx_xvld((const __m256i*)k_shuffle + i, 0); -} -static inline __m128i get_scale_shuffle(int i) { - static const uint8_t k_shuffle[128] = { - 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, - 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, - 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, - 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, - 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11, - 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13, - 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15 - }; - return __lsx_vld((const __m128i*)k_shuffle + i, 0); -} -#endif - -void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - const int qk = QK8_0; - const int nb = n / qk; - - assert(n % qk == 0); -#if defined(__ARM_FEATURE_MATMUL_INT8) - assert((nrc == 2) || (nrc == 1)); -#else - assert(nrc == 1); -#endif - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_q4_0 * GGML_RESTRICT x = vx; - const block_q8_0 * GGML_RESTRICT y = vy; - -#if defined(__ARM_FEATURE_MATMUL_INT8) - if (nrc == 2) { - const block_q4_0 * GGML_RESTRICT vx0 = vx; - const block_q4_0 * GGML_RESTRICT vx1 = (const block_q4_0 *) ((const uint8_t*)vx + bx); - const block_q8_0 * GGML_RESTRICT vy0 = vy; - const block_q8_0 * GGML_RESTRICT vy1 = (const block_q8_0 *) ((const uint8_t*)vy + by); - - float32x4_t sumv0 = vdupq_n_f32(0.0f); - - for (int i = 0; i < nb; i++) { - const block_q4_0 * GGML_RESTRICT b_x0 = &vx0[i]; - const block_q4_0 * GGML_RESTRICT b_x1 = &vx1[i]; - const block_q8_0 * GGML_RESTRICT b_y0 = &vy0[i]; - const block_q8_0 * GGML_RESTRICT b_y1 = &vy1[i]; - - const uint8x16_t m4b = vdupq_n_u8(0x0F); - const int8x16_t s8b = vdupq_n_s8(0x8); - - const uint8x16_t v0_0 = vld1q_u8(b_x0->qs); - const uint8x16_t v0_1 = vld1q_u8(b_x1->qs); - - // 4-bit -> 8-bit - const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // sub 8 - const int8x16_t x0_l = vsubq_s8(v0_0l, s8b); - const int8x16_t x0_h = vsubq_s8(v0_0h, s8b); - const int8x16_t x1_l = vsubq_s8(v0_1l, s8b); - const int8x16_t x1_h = vsubq_s8(v0_1h, s8b); - - // load y - const int8x16_t y0_l = vld1q_s8(b_y0->qs); - const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); - const int8x16_t y1_l = vld1q_s8(b_y1->qs); - const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); - - float32_t _scale[4] = { - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d) - }; - float32x4_t scale = vld1q_f32(_scale); - - int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); - int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); - - int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); - int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); - - int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); - int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); - - int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); - int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); - - sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), - l1, r1)), l2, r2)), l3, r3))), scale); - } - - float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2); - float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); - - vst1_f32(s, vget_low_f32 (sumv2)); - vst1_f32(s + bs, vget_high_f32(sumv2)); - - return; - } -#endif - - int ib = 0; - float sumf = 0; - -#if defined(__ARM_FEATURE_SVE) - svfloat32_t sumv0 = svdup_n_f32(0.0f); - svfloat32_t sumv1 = svdup_n_f32(0.0f); - - const int vector_length = ggml_cpu_get_sve_cnt()*8; - - // VLA Implementation using switch case - switch (vector_length) { - case 128: - { - // predicate for activating higher lanes for 4 float32 elements - const svbool_t ph4 = svptrue_pat_b32(SV_VL4); - - for (; ib + 1 < nb; ib += 2) { - const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; - const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - // load x - const svuint8_t qx0r = svld1rq_u8(svptrue_b8(), x0->qs); - const svuint8_t qx1r = svld1rq_u8(svptrue_b8(), x1->qs); - - // 4-bit -> 8-bit - const svint8_t qx0l = svreinterpret_s8_u8(svand_n_u8_m(svptrue_b8(), qx0r, 0x0F)); - const svint8_t qx0h = svreinterpret_s8_u8(svlsr_n_u8_m(svptrue_b8(), qx0r, 0x04)); - const svint8_t qx1l = svreinterpret_s8_u8(svand_n_u8_m(svptrue_b8(), qx1r, 0x0F)); - const svint8_t qx1h = svreinterpret_s8_u8(svlsr_n_u8_m(svptrue_b8(), qx1r, 0x04)); - - // sub 8 - const svint8_t qx0ls = svsub_n_s8_x(svptrue_b8(), qx0h, 8); - const svint8_t qx0hs = svsub_n_s8_x(svptrue_b8(), qx0l, 8); - const svint8_t qx1ls = svsub_n_s8_x(svptrue_b8(), qx1h, 8); - const svint8_t qx1hs = svsub_n_s8_x(svptrue_b8(), qx1l, 8); - - // load y - const svint8_t qy0h = svld1_s8(svptrue_b8(), y0->qs); - const svint8_t qy0l = svld1_s8(svptrue_b8(), y0->qs + 16); - const svint8_t qy1h = svld1_s8(svptrue_b8(), y1->qs); - const svint8_t qy1l = svld1_s8(svptrue_b8(), y1->qs + 16); - - // dot product - sumv0 = svmla_n_f32_x(ph4, sumv0, svcvt_f32_s32_x(ph4, svadd_x(ph4, - svdot_s32(svdup_n_s32(0), qx0ls, qy0l), - svdot_s32(svdup_n_s32(0), qx0hs, qy0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = svmla_n_f32_x(ph4, sumv1, svcvt_f32_s32_x(ph4, svadd_x(ph4, - svdot_s32(svdup_n_s32(0), qx1ls, qy1l), - svdot_s32(svdup_n_s32(0), qx1hs, qy1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1)); - } break; - case 256: - { - // predicate for activating higher lanes for 16 int8 elements - const svbool_t ph16 = svptrue_pat_b8(SV_VL16); - // predicate for activating lower lanes for 16 int8 elements - const svbool_t pl16 = svnot_b_z(svptrue_b8(), ph16); - - for (; ib + 1 < nb; ib += 2) { - const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; - const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - // load x - const svuint8_t qx0r = svld1rq_u8(svptrue_b8(), x0->qs); - const svuint8_t qx1r = svld1rq_u8(svptrue_b8(), x1->qs); - - // 4-bit -> 8-bit - const svint8_t qx0 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx0r, 0x0F), 0x04)); - const svint8_t qx1 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx1r, 0x0F), 0x04)); - - // sub 8 - const svint8_t qx0s = svsub_n_s8_x(svptrue_b8(), qx0, 8); - const svint8_t qx1s = svsub_n_s8_x(svptrue_b8(), qx1, 8); - - // load y - const svint8_t qy0 = svld1_s8(svptrue_b8(), y0->qs); - const svint8_t qy1 = svld1_s8(svptrue_b8(), y1->qs); - - // dot product - sumv0 = svmla_n_f32_x(svptrue_b32(), sumv0, svcvt_f32_s32_x(svptrue_b32(), - svdot_s32(svdup_n_s32(0), qx0s, qy0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = svmla_n_f32_x(svptrue_b32(), sumv1, svcvt_f32_s32_x(svptrue_b32(), - svdot_s32(svdup_n_s32(0), qx1s, qy1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1)); - } break; - case 512: - { - // predicate for activating higher lanes for 32 int8 elements - const svbool_t ph32 = svptrue_pat_b8(SV_VL32); - - // predicate for activating higher lanes for 16 int8 elements - const svbool_t ph16 = svptrue_pat_b8(SV_VL16); - // predicate for activating lower lanes for 16 int8 elements from first 32 int8 activated lanes - const svbool_t pl16 = svnot_b_z(ph32, ph16); - - for (; ib + 1 < nb; ib += 2) { - const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; - const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - // load x - const svuint8_t qx0r = svld1rq_u8(ph32, x0->qs); - const svuint8_t qx1r = svld1rq_u8(ph32, x1->qs); - - // 4-bit -> 8-bit - const svint8_t qx0 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx0r, 0x0F), 0x04)); - const svint8_t qx1 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx1r, 0x0F), 0x04)); - - // sub 8 - const svint8_t qx0s = svsub_n_s8_x(ph32, qx0, 8); - const svint8_t qx1s = svsub_n_s8_x(ph32, qx1, 8); - - // load y - const svint8_t qy0 = svld1_s8(ph32, y0->qs); - const svint8_t qy1 = svld1_s8(ph32, y1->qs); - - // dot product - sumv0 = svmla_n_f32_x(ph32, sumv0, svcvt_f32_s32_x(ph32, - svdot_s32(svdup_n_s32(0), qx0s, qy0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = svmla_n_f32_x(ph32, sumv1, svcvt_f32_s32_x(ph32, - svdot_s32(svdup_n_s32(0), qx1s, qy1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = svaddv_f32(ph32, svadd_f32_x(ph32, sumv0, sumv1)); - } break; - default: - assert(false && "Unsupported vector length"); - break; - } - -#elif defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - for (; ib + 1 < nb; ib += 2) { - const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; - const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - const uint8x16_t m4b = vdupq_n_u8(0x0F); - const int8x16_t s8b = vdupq_n_s8(0x8); - - const uint8x16_t v0_0 = vld1q_u8(x0->qs); - const uint8x16_t v0_1 = vld1q_u8(x1->qs); - - // 4-bit -> 8-bit - const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // sub 8 - const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b); - const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b); - const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b); - const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b); - - // load y - const int8x16_t v1_0l = vld1q_s8(y0->qs); - const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); - const int8x16_t v1_1l = vld1q_s8(y1->qs); - const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); - - // dot product into int32x4_t - const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h); - const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); -#elif defined __wasm_simd128__ - v128_t sumv = wasm_f32x4_splat(0.0f); - - const v128_t m4b = wasm_i8x16_splat(0x0F); - const v128_t s8b = wasm_i8x16_splat(0x8); - - for (; ib + 1 < nb; ib += 2) { - const block_q4_0 * GGML_RESTRICT x0 = &x[ib]; - const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - // Load and process x0 - v128_t v0_0 = wasm_v128_load(x0->qs); - v128_t v0_0l = wasm_v128_and(v0_0, m4b); - v128_t v0_0h = wasm_u8x16_shr(v0_0, 4); - v128_t v0_0ls = wasm_i8x16_sub(v0_0l, s8b); - v128_t v0_0hs = wasm_i8x16_sub(v0_0h, s8b); - - // Load y0 vectors - v128_t y0_l = wasm_v128_load(y0->qs); - v128_t y0_h = wasm_v128_load(y0->qs + 16); - - // Extend to i16x8 and compute dot products - v128_t dx0l = wasm_i16x8_extend_low_i8x16(v0_0ls); - v128_t dx0h = wasm_i16x8_extend_high_i8x16(v0_0ls); - v128_t dx0hl = wasm_i16x8_extend_low_i8x16(v0_0hs); - v128_t dx0hh = wasm_i16x8_extend_high_i8x16(v0_0hs); - - v128_t dy0ll = wasm_i16x8_extend_low_i8x16(y0_l); - v128_t dy0lh = wasm_i16x8_extend_high_i8x16(y0_l); - v128_t dy0hl = wasm_i16x8_extend_low_i8x16(y0_h); - v128_t dy0hh = wasm_i16x8_extend_high_i8x16(y0_h); - - v128_t dp0 = wasm_i32x4_add( - wasm_i32x4_add( - wasm_i32x4_dot_i16x8(dx0l, dy0ll), - wasm_i32x4_dot_i16x8(dx0h, dy0lh) - ), - wasm_i32x4_add( - wasm_i32x4_dot_i16x8(dx0hl, dy0hl), - wasm_i32x4_dot_i16x8(dx0hh, dy0hh) - ) - ); - - // Load and process x1 - v128_t v0_1 = wasm_v128_load(x1->qs); - v128_t v0_1l = wasm_v128_and(v0_1, m4b); - v128_t v0_1h = wasm_u8x16_shr(v0_1, 4); - v128_t v0_1ls = wasm_i8x16_sub(v0_1l, s8b); - v128_t v0_1hs = wasm_i8x16_sub(v0_1h, s8b); - - // Load y1 vectors - v128_t y1_l = wasm_v128_load(y1->qs); - v128_t y1_h = wasm_v128_load(y1->qs + 16); - - // Extend to i16x8 and compute dot products - v128_t dx1l = wasm_i16x8_extend_low_i8x16(v0_1ls); - v128_t dx1h = wasm_i16x8_extend_high_i8x16(v0_1ls); - v128_t dx1hl = wasm_i16x8_extend_low_i8x16(v0_1hs); - v128_t dx1hh = wasm_i16x8_extend_high_i8x16(v0_1hs); - - v128_t dy1ll = wasm_i16x8_extend_low_i8x16(y1_l); - v128_t dy1lh = wasm_i16x8_extend_high_i8x16(y1_l); - v128_t dy1hl = wasm_i16x8_extend_low_i8x16(y1_h); - v128_t dy1hh = wasm_i16x8_extend_high_i8x16(y1_h); - - v128_t dp1 = wasm_i32x4_add( - wasm_i32x4_add( - wasm_i32x4_dot_i16x8(dx1l, dy1ll), - wasm_i32x4_dot_i16x8(dx1h, dy1lh) - ), - wasm_i32x4_add( - wasm_i32x4_dot_i16x8(dx1hl, dy1hl), - wasm_i32x4_dot_i16x8(dx1hh, dy1hh) - ) - ); - - // Accumulate results with scaling - float scale0 = GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d); - float scale1 = GGML_FP16_TO_FP32(x1->d) * GGML_FP16_TO_FP32(y1->d); - - sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(dp0), wasm_f32x4_splat(scale0))); - sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(dp1), wasm_f32x4_splat(scale1))); - } - - sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + - wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3); -#elif defined(__AVX2__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - // Main loop - for (; ib < nb; ++ib) { - /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d) ); - - __m256i qx = bytes_from_nibbles_32(x[ib].qs); - - // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. - const __m256i off = _mm256_set1_epi8( 8 ); - qx = _mm256_sub_epi8( qx, off ); - - __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); - - const __m256 q = mul_sum_i8_pairs_float(qx, qy); - - /* Multiply q with scale and accumulate */ - acc = _mm256_fmadd_ps( d, q, acc ); - } - - sumf = hsum_float_8(acc); -#elif defined(__AVX__) - __m256 accum = _mm256_setzero_ps(); - for (; ib + 1 < nb; ib += 2) { - const __m128i q4bits_1 = _mm_loadu_si128((const __m128i *)x[ib + 0].qs); - const __m128i q4bits_2 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); - const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs); - const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs + 1); - const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); - const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs + 1); - - const __m128i q4b_1_0 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), q4bits_1), _mm_set1_epi8(8)); - const __m128i q4b_1_1 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(q4bits_1, 4)), _mm_set1_epi8(8)); - const __m128i q4b_2_0 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), q4bits_2), _mm_set1_epi8(8)); - const __m128i q4b_2_1 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(q4bits_2, 4)), _mm_set1_epi8(8)); - - const __m128i p16_1_0 = mul_add_epi8_sse(q4b_1_0, q8b_1_0); - const __m128i p16_1_1 = mul_add_epi8_sse(q4b_1_1, q8b_1_1); - const __m128i p16_2_0 = mul_add_epi8_sse(q4b_2_0, q8b_2_0); - const __m128i p16_2_1 = mul_add_epi8_sse(q4b_2_1, q8b_2_1); - const __m128i p_1 = _mm_add_epi16(p16_1_0, p16_1_1); - const __m128i p_2 = _mm_add_epi16(p16_2_0, p16_2_1); - const __m256 p = sum_i16_pairs_float(p_2, p_1); - - const __m256 deltas = quad_fp16_delta_float(x[ib].d, y[ib].d, x[ib + 1].d, y[ib + 1].d); - accum = _mm256_add_ps(_mm256_mul_ps(deltas, p), accum); - } - - sumf = hsum_float_8(accum); -#elif defined(__SSSE3__) - // set constants - const __m128i lowMask = _mm_set1_epi8(0xF); - const __m128i off = _mm_set1_epi8(8); - - // Initialize accumulator with zeros - __m128 acc_0 = _mm_setzero_ps(); - __m128 acc_1 = _mm_setzero_ps(); - __m128 acc_2 = _mm_setzero_ps(); - __m128 acc_3 = _mm_setzero_ps(); - - for (; ib + 1 < nb; ib += 2) { - _mm_prefetch(&x[ib] + sizeof(block_q4_0), _MM_HINT_T0); - _mm_prefetch(&y[ib] + sizeof(block_q8_0), _MM_HINT_T0); - - // Compute combined scale for the block 0 and 1 - const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d) ); - - const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[ib].qs); - - __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1); - __m128i by_0 = _mm_loadu_si128((const __m128i *)y[ib].qs); - bx_0 = _mm_sub_epi8(bx_0, off); - const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0); - - __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4)); - __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[ib].qs + 16)); - bx_1 = _mm_sub_epi8(bx_1, off); - const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1); - - _mm_prefetch(&x[ib] + 2 * sizeof(block_q4_0), _MM_HINT_T0); - _mm_prefetch(&y[ib] + 2 * sizeof(block_q8_0), _MM_HINT_T0); - - // Compute combined scale for the block 2 and 3 - const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[ib + 1].d) * GGML_FP16_TO_FP32(y[ib + 1].d) ); - - const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); - - __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3); - __m128i by_2 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); - bx_2 = _mm_sub_epi8(bx_2, off); - const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2); - - __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4)); - __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[ib + 1].qs + 16)); - bx_3 = _mm_sub_epi8(bx_3, off); - const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3); - - // Convert int32_t to float - __m128 p0 = _mm_cvtepi32_ps(i32_0); - __m128 p1 = _mm_cvtepi32_ps(i32_1); - __m128 p2 = _mm_cvtepi32_ps(i32_2); - __m128 p3 = _mm_cvtepi32_ps(i32_3); - - // Apply the scale - __m128 p0_d = _mm_mul_ps( d_0_1, p0 ); - __m128 p1_d = _mm_mul_ps( d_0_1, p1 ); - __m128 p2_d = _mm_mul_ps( d_2_3, p2 ); - __m128 p3_d = _mm_mul_ps( d_2_3, p3 ); - - // Acummulate - acc_0 = _mm_add_ps(p0_d, acc_0); - acc_1 = _mm_add_ps(p1_d, acc_1); - acc_2 = _mm_add_ps(p2_d, acc_2); - acc_3 = _mm_add_ps(p3_d, acc_3); - } - - sumf = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3); -#elif defined(__riscv_v) - size_t vl = qk / 2; - - for (; ib < nb; ++ib) { - // load elements - vuint8m1_t tx = __riscv_vle8_v_u8m1(x[ib].qs, vl); - - vint8m1_t y0 = __riscv_vle8_v_i8m1(y[ib].qs, vl); - vint8m1_t y1 = __riscv_vle8_v_i8m1(y[ib].qs+16, vl); - - // mask and store lower part of x, and then upper part - vuint8m1_t x_a = __riscv_vand_vx_u8m1(tx, 0x0F, vl); - vuint8m1_t x_l = __riscv_vsrl_vx_u8m1(tx, 0x04, vl); - - vint8m1_t x_ai = __riscv_vreinterpret_v_u8m1_i8m1(x_a); - vint8m1_t x_li = __riscv_vreinterpret_v_u8m1_i8m1(x_l); - - // subtract offset - vint8m1_t v0 = __riscv_vsub_vx_i8m1(x_ai, 8, vl); - vint8m1_t v1 = __riscv_vsub_vx_i8m1(x_li, 8, vl); - - vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl); - vint16m2_t vec_mul2 = __riscv_vwmacc_vv_i16m2(vec_mul1, v1, y1, vl); - - vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); - vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl); - - int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); - - sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); - } - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0xF); - const vector signed int v0 = vec_splats((int32_t)0); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - const vector signed char v8 = vec_splats((signed char)0x8); - - vector float vsumf0 = vec_splats(0.0f); - -#pragma GCC unroll 8 - for (; ib < nb; ++ib) { - __builtin_prefetch(x[ib].qs, 0, 1); - __builtin_prefetch(y[ib].qs, 0, 1); - - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); - vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); - vector float vd = vec_mul(vxd, vyd); - - vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); - vector signed char q8y0 = vec_xl( 0, y[ib].qs); - vector signed char q8y1 = vec_xl(16, y[ib].qs); - - vector signed char q4x0 = vec_and(qxs, lowMask); - vector signed char q4x1 = vec_sr(qxs, v4); - - q4x0 = vec_sub(q4x0, v8); - q4x1 = vec_sub(q4x1, v8); - - vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1)); - - vector signed int vsumi0 = v0; - - vsumi0 = vec_sum4s(qv0, vsumi0); - vsumi0 = vec_sum4s(qv1, vsumi0); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - } - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - sumf = vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - // Initialize accumulator with zeros - __m256 acc = (__m256)__lasx_xvldi(0); - - // Main loop - for (; ib < nb; ++ib) { - /* Compute combined scale for the block */ - const __m256 d = __lasx_xvreplfr2vr_s( GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d) ); - - __m256i qx = bytes_from_nibbles_32(x[ib].qs); - - // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. - const __m256i off = __lasx_xvreplgr2vr_b( 8 ); - qx = __lasx_xvsub_b( qx, off ); - - __m256i qy = __lasx_xvld((const __m256i *)y[ib].qs, 0); - - const __m256 q = mul_sum_i8_pairs_float(qx, qy); - - /* Multiply q with scale and accumulate */ - acc = __lasx_xvfmadd_s( d, q, acc ); - } - - sumf = hsum_float_8(acc); - -#elif defined(__loongarch_sx) - // set constants - const __m128i low_mask = __lsx_vreplgr2vr_b(0xF); - const __m128i off = __lsx_vreplgr2vr_b(8); - - // Initialize accumulator with zeros - __m128 acc_0 = (__m128)__lsx_vldi(0); - __m128 acc_1 = (__m128)__lsx_vldi(0); - __m128 acc_2 = (__m128)__lsx_vldi(0); - __m128 acc_3 = (__m128)__lsx_vldi(0); - - for (; ib + 1 < nb; ib += 2) { - - // Compute combined scale for the block 0 and 1 - const __m128 d_0_1 = (__m128)__lsx_vreplgr2vr_w( GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d) ); - - const __m128i tmp_0_1 = __lsx_vld((const __m128i *)x[ib].qs, 0); - - __m128i bx_0 = __lsx_vand_v(low_mask, tmp_0_1); - __m128i by_0 = __lsx_vld((const __m128i *)y[ib].qs, 0); - bx_0 = __lsx_vsub_b(bx_0, off); - const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0); - - __m128i bx_1 = __lsx_vand_v(low_mask, __lsx_vsrli_d(tmp_0_1, 4)); - __m128i by_1 = __lsx_vld((const __m128i *)(y[ib].qs + 16), 0); - bx_1 = __lsx_vsub_b(bx_1, off); - const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1); - - //_mm_prefetch(&x[ib] + 2 * sizeof(block_q4_0), _MM_HINT_T0); - //_mm_prefetch(&y[ib] + 2 * sizeof(block_q8_0), _MM_HINT_T0); - - // Compute combined scale for the block 2 and 3 - const __m128 d_2_3 = (__m128)__lsx_vreplgr2vr_w( GGML_FP16_TO_FP32(x[ib + 1].d) * GGML_FP16_TO_FP32(y[ib + 1].d) ); - - const __m128i tmp_2_3 = __lsx_vld((const __m128i *)x[ib + 1].qs, 0); - - __m128i bx_2 = __lsx_vand_v(low_mask, tmp_2_3); - __m128i by_2 = __lsx_vld((const __m128i *)y[ib + 1].qs, 0); - bx_2 = __lsx_vsub_b(bx_2, off); - const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2); - - __m128i bx_3 = __lsx_vand_v(low_mask, __lsx_vsrli_d(tmp_2_3, 4)); - __m128i by_3 = __lsx_vld((const __m128i *)(y[ib + 1].qs + 16), 0); - bx_3 = __lsx_vsub_b(bx_3, off); - const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3); - - // Convert int32_t to float - __m128 p0 = __lsx_vffint_s_w(i32_0); - __m128 p1 = __lsx_vffint_s_w(i32_1); - __m128 p2 = __lsx_vffint_s_w(i32_2); - __m128 p3 = __lsx_vffint_s_w(i32_3); - - // Apply the scale - __m128 p0_d = __lsx_vfmul_s( d_0_1, p0 ); - __m128 p1_d = __lsx_vfmul_s( d_0_1, p1 ); - __m128 p2_d = __lsx_vfmul_s( d_2_3, p2 ); - __m128 p3_d = __lsx_vfmul_s( d_2_3, p3 ); - - // Acummulate - acc_0 = __lsx_vfadd_s(p0_d, acc_0); - acc_1 = __lsx_vfadd_s(p1_d, acc_1); - acc_2 = __lsx_vfadd_s(p2_d, acc_2); - acc_3 = __lsx_vfadd_s(p3_d, acc_3); - } - - sumf = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3); -#elif defined(__VXE__) || defined(__VXE2__) - __vector float acc = vec_splats(0.0f); - - const __vector uint8_t v_m = vec_splats((const uint8_t)0x0F); - const __vector int8_t v_s = vec_splats( (const int8_t)0x08); - - for (; ib < nb; ++ib) { - const __vector uint8_t v_x = vec_xl(0, x[ib].qs); - const __vector int8_t v_xl = (const __vector int8_t)(v_x & v_m); - const __vector int8_t v_xh = (const __vector int8_t)(v_x >> 4); - - const __vector int8_t v_xls = vec_sub(v_xl, v_s); - const __vector int8_t v_xhs = vec_sub(v_xh, v_s); - - const __vector int8_t v_yl = vec_xl(0 , y[ib].qs); - const __vector int8_t v_yh = vec_xl(QK8_0/2, y[ib].qs); - - const __vector int16_t v_xylso = vec_mulo(v_xls, v_yl); - const __vector int16_t v_xylse = vec_mule(v_xls, v_yl); - const __vector int16_t v_xyhso = vec_mulo(v_xhs, v_yh); - const __vector int16_t v_xyhse = vec_mule(v_xhs, v_yh); - - __vector int16_t v_xy_ = v_xylso + v_xylse + v_xyhso + v_xyhse; v_xy_ += vec_reve(v_xy_); - - const __vector float v_xy = vec_float(vec_unpackh(v_xy_)); - const __vector float v_d = vec_splats(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); - - acc = vec_madd(v_xy, v_d, acc); - } - - sumf = acc[0] + acc[1] + acc[2] + acc[3]; -#endif - for (; ib < nb; ++ib) { - int sumi0 = 0; - int sumi1 = 0; - - for (int j = 0; j < qk/2; ++j) { - const int v0 = (x[ib].qs[j] & 0x0F) - 8; - const int v1 = (x[ib].qs[j] >> 4) - 8; - - sumi0 += (v0 * y[ib].qs[j]); - sumi1 += (v1 * y[ib].qs[j + qk/2]); - } - - int sumi = sumi0 + sumi1; - sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); - } - - *s = sumf; -} - -void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - const int qk = QK8_1; - const int nb = n / qk; - - assert(n % qk == 0); -#if defined(__ARM_FEATURE_MATMUL_INT8) - assert((nrc == 2) || (nrc == 1)); -#else - assert(nrc == 1); -#endif - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_q4_1 * GGML_RESTRICT x = vx; - const block_q8_1 * GGML_RESTRICT y = vy; - -#if defined(__ARM_FEATURE_MATMUL_INT8) - if (nrc == 2) { - const block_q4_1 * GGML_RESTRICT vx0 = vx; - const block_q4_1 * GGML_RESTRICT vx1 = (const block_q4_1 *) ((const uint8_t*)vx + bx); - const block_q8_1 * GGML_RESTRICT vy0 = vy; - const block_q8_1 * GGML_RESTRICT vy1 = (const block_q8_1 *) ((const uint8_t*)vy + by); - - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t summs0 = vdupq_n_f32(0.0f); - - for (int i = 0; i < nb; i++) { - const block_q4_1 * GGML_RESTRICT b_x0 = &vx0[i]; - const block_q4_1 * GGML_RESTRICT b_x1 = &vx1[i]; - const block_q8_1 * GGML_RESTRICT b_y0 = &vy0[i]; - const block_q8_1 * GGML_RESTRICT b_y1 = &vy1[i]; - - float32_t summs_t[4] = { - GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y0->s), - GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y0->s), - GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y1->s), - GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y1->s) - }; - summs0 = vaddq_f32(summs0, vld1q_f32(summs_t)); - - const uint8x16_t m4b = vdupq_n_u8(0x0F); - - const uint8x16_t v0_0 = vld1q_u8(b_x0->qs); - const uint8x16_t v0_1 = vld1q_u8(b_x1->qs); - - // 4-bit -> 8-bit - const int8x16_t x0_l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - const int8x16_t x0_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - const int8x16_t x1_l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - const int8x16_t x1_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // load y - const int8x16_t y0_l = vld1q_s8(b_y0->qs); - const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); - const int8x16_t y1_l = vld1q_s8(b_y1->qs); - const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); - - // mmla into int32x4_t - float32_t _scale[4] = { - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d) - }; - float32x4_t scale = vld1q_f32(_scale); - - int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); - int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); - - int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); - int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); - - int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); - int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); - - int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); - int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); - sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), - l1, r1)), l2, r2)), l3, r3))), scale); - } - - float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2); - float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); - - sumv2 = vaddq_f32(sumv2, summs0); - - vst1_f32(s, vget_low_f32 (sumv2)); - vst1_f32(s + bs, vget_high_f32(sumv2)); - - return; - } -#endif - - int ib = 0; - float sumf = 0; - - // TODO: add WASM SIMD -#if defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - float summs = 0; - - for (; ib + 1 < nb; ib += 2) { - const block_q4_1 * GGML_RESTRICT x0 = &x[ib + 0]; - const block_q4_1 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_1 * GGML_RESTRICT y0 = &y[ib + 0]; - const block_q8_1 * GGML_RESTRICT y1 = &y[ib + 1]; - - summs += GGML_FP16_TO_FP32(x0->m) * GGML_FP16_TO_FP32(y0->s) + GGML_FP16_TO_FP32(x1->m) * GGML_FP16_TO_FP32(y1->s); - - const uint8x16_t m4b = vdupq_n_u8(0x0F); - - const uint8x16_t v0_0 = vld1q_u8(x0->qs); - const uint8x16_t v0_1 = vld1q_u8(x1->qs); - - // 4-bit -> 8-bit - const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // load y - const int8x16_t v1_0l = vld1q_s8(y0->qs); - const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); - const int8x16_t v1_1l = vld1q_s8(y1->qs); - const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); - - // dot product into int32x4_t - const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h); - const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs; -#elif defined(__AVX2__) || defined(__AVX__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - float summs = 0; - - // Main loop - for (; ib < nb; ++ib) { - const float d0 = GGML_FP16_TO_FP32(x[ib].d); - const float d1 = GGML_FP16_TO_FP32(y[ib].d); - - summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); - - const __m256 d0v = _mm256_set1_ps( d0 ); - const __m256 d1v = _mm256_set1_ps( d1 ); - - // Compute combined scales - const __m256 d0d1 = _mm256_mul_ps( d0v, d1v ); - - // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes - const __m256i qx = bytes_from_nibbles_32(x[ib].qs); - const __m256i qy = _mm256_loadu_si256( (const __m256i *)y[ib].qs ); - - const __m256 xy = mul_sum_us8_pairs_float(qx, qy); - - // Accumulate d0*d1*x*y -#if defined(__AVX2__) - acc = _mm256_fmadd_ps( d0d1, xy, acc ); -#else - acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc ); -#endif - } - - sumf = hsum_float_8(acc) + summs; -#elif defined(__riscv_v) - size_t vl = qk / 2; - - for (; ib < nb; ++ib) { - // load elements - vuint8m1_t tx = __riscv_vle8_v_u8m1(x[ib].qs, vl); - - vint8m1_t y0 = __riscv_vle8_v_i8m1(y[ib].qs, vl); - vint8m1_t y1 = __riscv_vle8_v_i8m1(y[ib].qs+16, vl); - - // mask and store lower part of x, and then upper part - vuint8m1_t x_a = __riscv_vand_vx_u8m1(tx, 0x0F, vl); - vuint8m1_t x_l = __riscv_vsrl_vx_u8m1(tx, 0x04, vl); - - vint8m1_t v0 = __riscv_vreinterpret_v_u8m1_i8m1(x_a); - vint8m1_t v1 = __riscv_vreinterpret_v_u8m1_i8m1(x_l); - - vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl); - vint16m2_t vec_mul2 = __riscv_vwmacc_vv_i16m2(vec_mul1, v1, y1, vl); - - vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); - vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl); - - int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); - - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); - } - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0xF); - const vector signed int v0 = vec_splats((int32_t)0); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - - vector float vsumf0 = vec_splats(0.0f); - -#pragma GCC unroll 4 - for (; ib < nb; ++ib) { - __builtin_prefetch(x[ib].qs, 0, 1); - __builtin_prefetch(y[ib].qs, 0, 1); - - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); - vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); - vector float vd = vec_mul(vxd, vyd); - - vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[ib].m)); - vector float vys = {GGML_FP16_TO_FP32(y[ib].s), 0.0f, 0.0f, 0.0f}; - vsumf0 = vec_madd(vxmin, vys, vsumf0); - - vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); - vector signed char q8y0 = vec_xl( 0, y[ib].qs); - vector signed char q8y1 = vec_xl(16, y[ib].qs); - - vector unsigned char q4x0 = (vector unsigned char)vec_and(qxs, lowMask); - vector unsigned char q4x1 = (vector unsigned char)vec_sr(qxs, v4); - - vector signed int vsumi0 = v0; - - vsumi0 = vec_msum(q8y0, q4x0, vsumi0); - vsumi0 = vec_msum(q8y1, q4x1, vsumi0); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - } - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - sumf = vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - // Initialize accumulator with zeros - __m256 acc = (__m256)__lasx_xvldi(0); - - float summs = 0; - - // Main loop - for (; ib < nb; ++ib) { - const float d0 = GGML_FP16_TO_FP32(x[ib].d); - const float d1 = GGML_FP16_TO_FP32(y[ib].d); - - summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); - - const __m256 d0v = __lasx_xvreplfr2vr_s( d0 ); - const __m256 d1v = __lasx_xvreplfr2vr_s( d1 ); - - // Compute combined scales - const __m256 d0d1 = __lasx_xvfmul_s( d0v, d1v ); - - // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes - const __m256i qx = bytes_from_nibbles_32(x[ib].qs); - const __m256i qy = __lasx_xvld( (const __m256i *)y[ib].qs, 0); - - const __m256 xy = mul_sum_us8_pairs_float(qx, qy); - - // Accumulate d0*d1*x*y - acc = __lasx_xvfmadd_s( d0d1, xy, acc ); - } - - sumf = hsum_float_8(acc) + summs; -#elif defined(__VXE__) || defined(__VXE2__) - float summs = 0; - float32x4_t acc = vec_splats(0.0f); - - const uint8x16_t v_m = vec_splat_u8(0x0F); - -#pragma GCC unroll 4 - for (; ib < nb; ++ib) { - __builtin_prefetch(x[ib].qs, 0, 1); - __builtin_prefetch(y[ib].qs, 0, 1); - - summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); - - const uint8x16_t v_x = vec_xl(0, x[ib].qs); - const int8x16_t v_xl = (const int8x16_t)(v_x & v_m); - const int8x16_t v_xh = (const int8x16_t)(v_x >> 4); - - const int8x16_t v_yl = vec_xl(0 , y[ib].qs); - const int8x16_t v_yh = vec_xl(QK8_1/2, y[ib].qs); - - const int32x4_t v_xy_ = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh); - const float32x4_t v_xy = vec_float(v_xy_); - - const float32x4_t v_d = vec_splats(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); - - acc = vec_madd(v_xy, v_d, acc); - } - - sumf = acc[0] + acc[1] + acc[2] + acc[3] + summs; -#endif - for (; ib < nb; ++ib) { - int sumi0 = 0; - int sumi1 = 0; - - for (int j = 0; j < qk/2; ++j) { - const int v0 = (x[ib].qs[j] & 0x0F); - const int v1 = (x[ib].qs[j] >> 4); - - sumi0 += (v0 * y[ib].qs[j]); - sumi1 += (v1 * y[ib].qs[j + qk/2]); - } - - int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); - } - - *s = sumf; -} - -void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - const int qk = QK8_0; - const int nb = n / qk; - - int ib = 0; - float sumf = 0; - - assert(n % qk == 0); - assert(qk == QK5_0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_q5_0 * GGML_RESTRICT x = vx; - const block_q8_0 * GGML_RESTRICT y = vy; - -#if defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - uint32_t qh0; - uint32_t qh1; - - uint64_t tmp0[4]; - uint64_t tmp1[4]; - - for (; ib + 1 < nb; ib += 2) { - const block_q5_0 * GGML_RESTRICT x0 = &x[ib]; - const block_q5_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - const uint8x16_t m4b = vdupq_n_u8(0x0F); - - // extract the 5th bit via lookup table ((!b) << 4) - memcpy(&qh0, x0->qh, sizeof(qh0)); - memcpy(&qh1, x1->qh, sizeof(qh1)); - - tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF]; - tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF]; - tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF]; - tmp0[3] = table_b2b_1[(qh0 >> 24) ]; - - tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF]; - tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF]; - tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF]; - tmp1[3] = table_b2b_1[(qh1 >> 24) ]; - - const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0)); - const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2)); - const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0)); - const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2)); - - const uint8x16_t v0_0 = vld1q_u8(x0->qs); - const uint8x16_t v0_1 = vld1q_u8(x1->qs); - - // 4-bit -> 8-bit - int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero) - const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0); - const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0); - const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1); - const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1); - - // load y - const int8x16_t v1_0l = vld1q_s8(y0->qs); - const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); - const int8x16_t v1_1l = vld1q_s8(y1->qs); - const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( - ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), - ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( - ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), - ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); -#elif defined __wasm_simd128__ - v128_t sumv = wasm_f32x4_splat(0.0f); - - uint32_t qh_; - uint64_t tmp[4]; - - // TODO: check if unrolling this is better - for (; ib < nb; ++ib) { - const block_q5_0 * GGML_RESTRICT x0 = &x[ib]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; - - const v128_t m4b = wasm_i8x16_splat(0x0F); - - // extract the 5th bit - memcpy(&qh_, x0->qh, sizeof(qh_)); - - tmp[0] = table_b2b_1[(qh_ >> 0) & 0xFF]; - tmp[1] = table_b2b_1[(qh_ >> 8) & 0xFF]; - tmp[2] = table_b2b_1[(qh_ >> 16) & 0xFF]; - tmp[3] = table_b2b_1[(qh_ >> 24) ]; - - const v128_t qhl = wasm_v128_load(tmp + 0); - const v128_t qhh = wasm_v128_load(tmp + 2); - - const v128_t v0 = wasm_v128_load(x0->qs); - - // 4-bit -> 8-bit - const v128_t v0l = wasm_v128_and (v0, m4b); - const v128_t v0h = wasm_u8x16_shr(v0, 4); - - // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero) - const v128_t v0lf = wasm_i8x16_sub(v0l, qhl); - const v128_t v0hf = wasm_i8x16_sub(v0h, qhh); - - // load y - const v128_t v1l = wasm_v128_load(y0->qs); - const v128_t v1h = wasm_v128_load(y0->qs + 16); - - // int8x16 -> int16x8 - const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf); - const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf); - const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf); - const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf); - - const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l); - const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l); - const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h); - const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h); - - // dot product - sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4( - wasm_i32x4_add( - wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll), - wasm_i32x4_dot_i16x8(v0lfh, v1lh)), - wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), - wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), - wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d)))); - } - - sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + - wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3); -#elif defined(__AVX2__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - // Main loop - for (; ib < nb; ++ib) { - /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); - - __m256i qx = bytes_from_nibbles_32(x[ib].qs); - __m256i bxhi = bytes_from_bits_32(x[ib].qh); - bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0)); - qx = _mm256_or_si256(qx, bxhi); - - __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); - - const __m256 q = mul_sum_i8_pairs_float(qx, qy); - - /* Multiply q with scale and accumulate */ - acc = _mm256_fmadd_ps(d, q, acc); - } - - sumf = hsum_float_8(acc); -#elif defined(__AVX__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - __m128i mask = _mm_set1_epi8((char)0xF0); - - // Main loop - for (; ib < nb; ++ib) { - /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); - - __m256i bx_0 = bytes_from_nibbles_32(x[ib].qs); - const __m256i bxhi = bytes_from_bits_32(x[ib].qh); - __m128i bxhil = _mm256_castsi256_si128(bxhi); - __m128i bxhih = _mm256_extractf128_si256(bxhi, 1); - bxhil = _mm_andnot_si128(bxhil, mask); - bxhih = _mm_andnot_si128(bxhih, mask); - __m128i bxl = _mm256_castsi256_si128(bx_0); - __m128i bxh = _mm256_extractf128_si256(bx_0, 1); - bxl = _mm_or_si128(bxl, bxhil); - bxh = _mm_or_si128(bxh, bxhih); - bx_0 = MM256_SET_M128I(bxh, bxl); - - const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[ib].qs); - - const __m256 q = mul_sum_i8_pairs_float(bx_0, by_0); - - /* Multiply q with scale and accumulate */ - acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc); - } - - sumf = hsum_float_8(acc); -#elif defined(__riscv_v) - size_t vl; - size_t vlenb = __riscv_vlenb(); - - for (; ib < nb; ++ib) { - vl = qk / 2; - vuint8m1_t v0 = __riscv_vle8_v_u8m1(x[ib].qs, vl); - vint8m1_t v0l = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(v0, 0x0F, vl)); - vint8m1_t v0h = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(v0, 4, vl)); - vint8m2_t v0c; - if (vlenb == 16) { - v0c = __riscv_vcreate_v_i8m1_i8m2(v0l, v0h); - } else { - v0l = __riscv_vslideup_vx_i8m1(v0l, v0h, 16, 32); - v0c = __riscv_vlmul_ext_v_i8m1_i8m2(v0l); - } - - vl = qk; - vbool4_t qh = __riscv_vlm_v_b4(x[ib].qh, vl); - qh = __riscv_vmnand_mm_b4(qh, qh, vl); - vint8m2_t v0f = __riscv_vsub_vx_i8m2_mu(qh, v0c, v0c, 0x10, vl); - vint8m2_t v1 = __riscv_vle8_v_i8m2(y[ib].qs, vl); - vint16m4_t mul = __riscv_vwmul_vv_i16m4(v0f, v1, vl); - vint32m1_t zero = __riscv_vmv_v_x_i32m1(0, vl); - vint32m1_t sum = __riscv_vwredsum_vs_i16m4_i32m1(mul, zero, vl); - int32_t sumi = __riscv_vmv_x_s_i32m1_i32(sum); - - sumf += (GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)) * sumi; - } - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0xF); - const vector unsigned char v4 = vec_splats((unsigned char)4); - - vector float vsumf0 = vec_splats(0.0f); - -#pragma GCC unroll 4 - for (; ib < nb; ++ib) { - __builtin_prefetch(x[ib].qs, 0, 1); - __builtin_prefetch(y[ib].qs, 0, 1); - - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); - vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); - vector float vd = vec_mul(vxd, vyd); - - vector signed long long aux64x2_0 = {(uint64_t)(table_b2b_1[x[ib].qh[0]]), (uint64_t)(table_b2b_1[x[ib].qh[1]])}; - vector signed long long aux64x2_1 = {(uint64_t)(table_b2b_1[x[ib].qh[2]]), (uint64_t)(table_b2b_1[x[ib].qh[3]])}; - - vector signed char qh0 = (vector signed char)aux64x2_0; - vector signed char qh1 = (vector signed char)aux64x2_1; - - vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); - - vector signed char q5x0 = vec_sub(vec_and (qxs, lowMask), qh0); - vector signed char q5x1 = vec_sub(vec_sr(qxs, v4), qh1); - - vector signed char q8y0 = vec_xl( 0, y[ib].qs); - vector signed char q8y1 = vec_xl( 16, y[ib].qs); - - vector signed short qv0 = vec_add(vec_mule(q5x0, q8y0), vec_mulo(q5x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q5x1, q8y1), vec_mulo(q5x1, q8y1)); - - qv0 = vec_add(qv0, qv1); - - vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0)); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - } - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - sumf = vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - // Initialize accumulator with zeros - __m256 acc = (__m256)__lasx_xvldi(0); - - // Main loop - for (; ib < nb; ++ib) { - /* Compute combined scale for the block */ - const __m256 d = __lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); //FIXME - - __m256i qx = bytes_from_nibbles_32(x[ib].qs); - __m256i bxhi = bytes_from_bits_32(x[ib].qh); - bxhi = __lasx_xvandn_v(bxhi, __lasx_xvreplgr2vr_b((char)0xF0)); - qx = __lasx_xvor_v(qx, bxhi); - - __m256i qy = __lasx_xvld((const __m256i *)y[ib].qs, 0); - - const __m256 q = mul_sum_i8_pairs_float(qx, qy); - - /* Multiply q with scale and accumulate */ - acc = __lasx_xvfmadd_s(d, q, acc); - } - - sumf = hsum_float_8(acc); -#endif - for (; ib < nb; ++ib) { - uint32_t qh; - memcpy(&qh, x[ib].qh, sizeof(qh)); - - int sumi0 = 0; - int sumi1 = 0; - - for (int j = 0; j < qk/2; ++j) { - const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; - const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12)); - - const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16); - const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16); - - sumi0 += (x0 * y[ib].qs[j]); - sumi1 += (x1 * y[ib].qs[j + qk/2]); - } - - int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)) * sumi; - } - - *s = sumf; -} - -void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - const int qk = QK8_1; - const int nb = n / qk; - - int ib = 0; - float sumf = 0; - - assert(n % qk == 0); - assert(qk == QK5_1); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_q5_1 * GGML_RESTRICT x = vx; - const block_q8_1 * GGML_RESTRICT y = vy; - -#if defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - float summs0 = 0.0f; - float summs1 = 0.0f; - - uint32_t qh0; - uint32_t qh1; - - uint64_t tmp0[4]; - uint64_t tmp1[4]; - - for (; ib + 1 < nb; ib += 2) { - const block_q5_1 * GGML_RESTRICT x0 = &x[ib]; - const block_q5_1 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_1 * GGML_RESTRICT y0 = &y[ib]; - const block_q8_1 * GGML_RESTRICT y1 = &y[ib + 1]; - - const uint8x16_t m4b = vdupq_n_u8(0x0F); - - summs0 += GGML_FP16_TO_FP32(x0->m) * GGML_FP16_TO_FP32(y0->s); - summs1 += GGML_FP16_TO_FP32(x1->m) * GGML_FP16_TO_FP32(y1->s); - - // extract the 5th bit via lookup table ((b) << 4) - memcpy(&qh0, x0->qh, sizeof(qh0)); - memcpy(&qh1, x1->qh, sizeof(qh1)); - - tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF]; - tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF]; - tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF]; - tmp0[3] = table_b2b_0[(qh0 >> 24) ]; - - tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF]; - tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF]; - tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF]; - tmp1[3] = table_b2b_0[(qh1 >> 24) ]; - - const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0)); - const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2)); - const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0)); - const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2)); - - const uint8x16_t v0_0 = vld1q_u8(x0->qs); - const uint8x16_t v0_1 = vld1q_u8(x1->qs); - - // 4-bit -> 8-bit - const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // add high bit - const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0); - const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0); - const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1); - const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1); - - // load y - const int8x16_t v1_0l = vld1q_s8(y0->qs); - const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); - const int8x16_t v1_1l = vld1q_s8(y1->qs); - const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( - ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), - ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( - ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), - ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1; -#elif defined __wasm_simd128__ - v128_t sumv = wasm_f32x4_splat(0.0f); - - float summs = 0.0f; - - uint32_t qh_; - uint64_t tmp[4]; - - // TODO: check if unrolling this is better - for (; ib < nb; ++ib) { - const block_q5_1 * GGML_RESTRICT x0 = &x[ib]; - const block_q8_1 * GGML_RESTRICT y0 = &y[ib]; - - summs += GGML_FP16_TO_FP32(x0->m) * GGML_FP16_TO_FP32(y0->s); - - const v128_t m4b = wasm_i8x16_splat(0x0F); - - // extract the 5th bit - memcpy(&qh_, x0->qh, sizeof(qh_)); - - tmp[0] = table_b2b_0[(qh_ >> 0) & 0xFF]; - tmp[1] = table_b2b_0[(qh_ >> 8) & 0xFF]; - tmp[2] = table_b2b_0[(qh_ >> 16) & 0xFF]; - tmp[3] = table_b2b_0[(qh_ >> 24) ]; - - const v128_t qhl = wasm_v128_load(tmp + 0); - const v128_t qhh = wasm_v128_load(tmp + 2); - - const v128_t v0 = wasm_v128_load(x0->qs); - - // 4-bit -> 8-bit - const v128_t v0l = wasm_v128_and (v0, m4b); - const v128_t v0h = wasm_u8x16_shr(v0, 4); - - // add high bit - const v128_t v0lf = wasm_v128_or(v0l, qhl); - const v128_t v0hf = wasm_v128_or(v0h, qhh); - - // load y - const v128_t v1l = wasm_v128_load(y0->qs); - const v128_t v1h = wasm_v128_load(y0->qs + 16); - - // int8x16 -> int16x8 - const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf); - const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf); - const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf); - const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf); - - const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l); - const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l); - const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h); - const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h); - - // dot product - sumv = wasm_f32x4_add(sumv, - wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add( - wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll), - wasm_i32x4_dot_i16x8(v0lfh, v1lh)), - wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), - wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), - wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d)))); - } - - sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + - wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs; -#elif defined(__AVX2__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - float summs = 0.0f; - - // Main loop - for (; ib < nb; ++ib) { - const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d)); - - summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); - - __m256i qx = bytes_from_nibbles_32(x[ib].qs); - __m256i bxhi = bytes_from_bits_32(x[ib].qh); - bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10)); - qx = _mm256_or_si256(qx, bxhi); - - const __m256 dy = _mm256_set1_ps(GGML_FP16_TO_FP32(y[ib].d)); - const __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); - - const __m256 q = mul_sum_us8_pairs_float(qx, qy); - - acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc); - } - - sumf = hsum_float_8(acc) + summs; -#elif defined(__AVX__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - __m128i mask = _mm_set1_epi8(0x10); - - float summs = 0.0f; - - // Main loop - for (; ib < nb; ++ib) { - const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d)); - - summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); - - __m256i bx_0 = bytes_from_nibbles_32(x[ib].qs); - const __m256i bxhi = bytes_from_bits_32(x[ib].qh); - __m128i bxhil = _mm256_castsi256_si128(bxhi); - __m128i bxhih = _mm256_extractf128_si256(bxhi, 1); - bxhil = _mm_and_si128(bxhil, mask); - bxhih = _mm_and_si128(bxhih, mask); - __m128i bxl = _mm256_castsi256_si128(bx_0); - __m128i bxh = _mm256_extractf128_si256(bx_0, 1); - bxl = _mm_or_si128(bxl, bxhil); - bxh = _mm_or_si128(bxh, bxhih); - bx_0 = MM256_SET_M128I(bxh, bxl); - - const __m256 dy = _mm256_set1_ps(GGML_FP16_TO_FP32(y[ib].d)); - const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[ib].qs); - - const __m256 q = mul_sum_us8_pairs_float(bx_0, by_0); - - acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc); - } - - sumf = hsum_float_8(acc) + summs; -#elif defined(__riscv_v) - size_t vl; - size_t vlenb = __riscv_vlenb(); - - for (; ib < nb; ++ib) { - vl = qk / 2; - vuint8m1_t v0 = __riscv_vle8_v_u8m1(x[ib].qs, vl); - vint8m1_t v0l = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(v0, 0x0F, vl)); - vint8m1_t v0h = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(v0, 4, vl)); - vint8m2_t v0c; - if (vlenb == 16) { - v0c = __riscv_vcreate_v_i8m1_i8m2(v0l, v0h); - } else { - v0l = __riscv_vslideup_vx_i8m1(v0l, v0h, 16, 32); - v0c = __riscv_vlmul_ext_v_i8m1_i8m2(v0l); - } - - vl = qk; - vbool4_t qh = __riscv_vlm_v_b4(x[ib].qh, vl); - vint8m2_t v0f = __riscv_vor_vx_i8m2_mu(qh, v0c, v0c, 0x10, vl); - vint8m2_t v1 = __riscv_vle8_v_i8m2(y[ib].qs, vl); - vint16m4_t mul = __riscv_vwmul_vv_i16m4(v0f, v1, vl); - vint32m1_t zero = __riscv_vmv_v_x_i32m1(0, vl); - vint32m1_t sum = __riscv_vwredsum_vs_i16m4_i32m1(mul, zero, vl); - int32_t sumi = __riscv_vmv_x_s_i32m1_i32(sum); - - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); - } - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0xF); - const vector signed int v0 = vec_splats((int32_t)0); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - - vector float vsumf0 = vec_splats(0.0f); - -#pragma GCC unroll 4 - for (; ib < nb; ++ib) { - __builtin_prefetch(x[ib].qs, 0, 1); - __builtin_prefetch(y[ib].qs, 0, 1); - - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); - vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); - vector float vd = vec_mul(vxd, vyd); - - vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[ib].m)); - vector float vys = {GGML_FP16_TO_FP32(y[ib].s), 0.f, 0.f, 0.f}; - vsumf0 = vec_madd(vxmin, vys, vsumf0); - - vector unsigned long long aux64x2_0 = {(uint64_t)(table_b2b_0[x[ib].qh[0]]), (uint64_t)(table_b2b_0[x[ib].qh[1]])}; - vector unsigned long long aux64x2_1 = {(uint64_t)(table_b2b_0[x[ib].qh[2]]), (uint64_t)(table_b2b_0[x[ib].qh[3]])}; - - vector signed char qh0 = (vector signed char)aux64x2_0; - vector signed char qh1 = (vector signed char)aux64x2_1; - - vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); - - vector unsigned char q5x0 = (vector unsigned char)vec_or(vec_and(qxs, lowMask), qh0); - vector unsigned char q5x1 = (vector unsigned char)vec_or(vec_sr(qxs, v4), qh1); - - vector signed char q8y0 = vec_xl( 0, y[ib].qs); - vector signed char q8y1 = vec_xl( 16, y[ib].qs); - - vector signed int vsumi0 = v0; - - vsumi0 = vec_msum(q8y0, q5x0, vsumi0); - vsumi0 = vec_msum(q8y1, q5x1, vsumi0); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - } - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - sumf = vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - // Initialize accumulator with zeros - __m256 acc = (__m256)__lasx_xvldi(0); - - float summs = 0.0f; - - // Main loop - for (; ib < nb; ++ib) { - const __m256 dx = __lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(x[ib].d)); - - summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); - - __m256i qx = bytes_from_nibbles_32(x[ib].qs); - __m256i bxhi = bytes_from_bits_32(x[ib].qh); - bxhi = __lasx_xvand_v(bxhi, __lasx_xvreplgr2vr_b(0x10)); - qx = __lasx_xvor_v(qx, bxhi); - - const __m256 dy = __lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(y[ib].d)); - const __m256i qy = __lasx_xvld((const __m256i *)y[ib].qs, 0); - - const __m256 q = mul_sum_us8_pairs_float(qx, qy); - - acc = __lasx_xvfmadd_s(q, __lasx_xvfmul_s(dx, dy), acc); - } - - sumf = hsum_float_8(acc) + summs; -#endif - for (; ib < nb; ++ib) { - uint32_t qh; - memcpy(&qh, x[ib].qh, sizeof(qh)); - - int sumi0 = 0; - int sumi1 = 0; - - for (int j = 0; j < qk/2; ++j) { - const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; - const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; - - const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0; - const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1; - - sumi0 += (x0 * y[ib].qs[j]); - sumi1 += (x1 * y[ib].qs[j + qk/2]); - } - - int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); - } - - *s = sumf; -} - -void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - const int qk = QK8_0; - const int nb = n / qk; - - assert(n % qk == 0); -#if defined(__ARM_FEATURE_MATMUL_INT8) - assert((nrc == 2) || (nrc == 1)); -#else - assert(nrc == 1); -#endif - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_q8_0 * GGML_RESTRICT x = vx; - const block_q8_0 * GGML_RESTRICT y = vy; - -#if defined(__ARM_FEATURE_MATMUL_INT8) - if (nrc == 2) { - const block_q8_0 * GGML_RESTRICT vx0 = vx; - const block_q8_0 * GGML_RESTRICT vx1 = (const block_q8_0 *) ((const uint8_t*)vx + bx); - const block_q8_0 * GGML_RESTRICT vy0 = vy; - const block_q8_0 * GGML_RESTRICT vy1 = (const block_q8_0 *) ((const uint8_t*)vy + by); - - float32x4_t sumv0 = vdupq_n_f32(0.0f); - - for (int i = 0; i < nb; i++) { - const block_q8_0 * GGML_RESTRICT b_x0 = &vx0[i]; - const block_q8_0 * GGML_RESTRICT b_y0 = &vy0[i]; - - const block_q8_0 * GGML_RESTRICT b_x1 = &vx1[i]; - const block_q8_0 * GGML_RESTRICT b_y1 = &vy1[i]; - - const int8x16_t x0_l = vld1q_s8(b_x0->qs); - const int8x16_t x0_h = vld1q_s8(b_x0->qs + 16); - const int8x16_t x1_l = vld1q_s8(b_x1->qs); - const int8x16_t x1_h = vld1q_s8(b_x1->qs + 16); - - // load y - const int8x16_t y0_l = vld1q_s8(b_y0->qs); - const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); - const int8x16_t y1_l = vld1q_s8(b_y1->qs); - const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); - - float32_t _scale[4] = { - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d) - }; - float32x4_t scale = vld1q_f32(_scale); - - int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); - int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); - - int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); - int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); - - int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); - int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); - - int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); - int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); - - sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), - l1, r1)), l2, r2)), l3, r3))), scale); - } - - float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2); - float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); - - vst1_f32(s, vget_low_f32 (sumv2)); - vst1_f32(s + bs, vget_high_f32(sumv2)); - - return; - } -#endif - - int ib = 0; - float sumf = 0; - -#if defined(__ARM_FEATURE_SVE) - svfloat32_t sumv0 = svdup_n_f32(0.0f); - svfloat32_t sumv1 = svdup_n_f32(0.0f); - - const int vector_length = ggml_cpu_get_sve_cnt()*8; - - //VLA Implemenation for SVE - switch (vector_length) { - case 128: - { - // predicate for activating lanes for 16 Int8 elements - const svbool_t ph16 = svptrue_pat_b8 (SV_VL16); - const svbool_t pl16 = svptrue_pat_b32(SV_VL4); - - for (; ib + 1 < nb; ib += 2) { - const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; - const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - // load x - const svint8_t qx0_0 = svld1_s8(ph16, x0->qs); - const svint8_t qx0_1 = svld1_s8(ph16, x0->qs+16); - const svint8_t qx1_0 = svld1_s8(ph16, x1->qs); - const svint8_t qx1_1 = svld1_s8(ph16, x1->qs+16); - - // load y - const svint8_t qy0_0 = svld1_s8(ph16, y0->qs); - const svint8_t qy0_1 = svld1_s8(ph16, y0->qs+16); - const svint8_t qy1_0 = svld1_s8(ph16, y1->qs); - const svint8_t qy1_1 = svld1_s8(ph16, y1->qs+16); - - sumv0 = svmla_n_f32_x(pl16, sumv0, svcvt_f32_s32_x(pl16, svadd_x(pl16, - svdot_s32(svdup_n_s32(0), qx0_0, qy0_0), - svdot_s32(svdup_n_s32(0), qx0_1, qy0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = svmla_n_f32_x(pl16, sumv1, svcvt_f32_s32_x(pl16, svadd_x(pl16, - svdot_s32(svdup_n_s32(0), qx1_0, qy1_0), - svdot_s32(svdup_n_s32(0), qx1_1, qy1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = svaddv_f32(pl16, svadd_f32_x(pl16, sumv0, sumv1)); - } break; - case 256: - { - //printf("sve256"); - for (; ib + 1 < nb; ib += 2) { - const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; - const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - // load x - const svint8_t qx0 = svld1_s8(svptrue_b8(), x0->qs); - const svint8_t qx1 = svld1_s8(svptrue_b8(), x1->qs); - - // load y - const svint8_t qy0 = svld1_s8(svptrue_b8(), y0->qs); - const svint8_t qy1 = svld1_s8(svptrue_b8(), y1->qs); - - sumv0 = svmla_n_f32_x(svptrue_b32(), sumv0, svcvt_f32_s32_x(svptrue_b32(), - svdot_s32(svdup_n_s32(0), qx0, qy0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = svmla_n_f32_x(svptrue_b32(), sumv1, svcvt_f32_s32_x(svptrue_b32(), - svdot_s32(svdup_n_s32(0), qx1, qy1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1)); - } break; - case 512: - { - // predicate for activating high 256 bit - const svbool_t ph32 = svptrue_pat_b8(SV_VL32); - // predicate for activating low 256 bit - const svbool_t pl32 = svnot_b_z(svptrue_b8(), ph32); - - // predicate for activating high lanes for 8 float32 elements - const svbool_t ph8 = svptrue_pat_b32(SV_VL8); - // predicate for activating low lanes for 8 float32 elements - const svbool_t pl8 = svnot_b_z(svptrue_b32(), ph8); - - svfloat32_t sumv00 = svdup_n_f32(0.0f); - - for (; ib + 1 < nb; ib += 2) { - const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; - const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - //load 32 int8_t in first half of vector and put another 32 int8_t in second vector lower bits - // and add them to make one 64 element vector - // load x - const svint8_t qx_32 = svld1_s8(ph32, x0->qs); - svint8_t qx_64 = svld1_s8(pl32, x0->qs + 2); - - qx_64 = svadd_s8_x(svptrue_b8(), qx_32, qx_64); - - // load y - const svint8_t qy_32 = svld1_s8(ph32, y0->qs); - svint8_t qy_64 = svld1_s8(pl32, y0->qs + 2); - - qy_64 = svadd_s8_x(svptrue_b8(), qy_32, qy_64); - - // scale creation - const float32_t deq1 = GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d); - const float32_t deq2 = GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d); - - // duplicate deq1 in first half of vector and deq2 in second half of vector - const svfloat32_t temp = svdup_f32_m(svdup_f32_z(ph8, deq1), pl8, deq2); - - const svfloat32_t sumvt = svcvt_f32_s32_x(svptrue_b32(), svdot_s32(svdup_n_s32(0), qx_64, qy_64)); - - sumv00 = svmla_f32_m(svptrue_b32(), sumv00, sumvt, temp); - } - - sumf = svaddv_f32(svptrue_b32(), sumv00); - break; - } - default: - assert(false && "Unsupported vector length"); - break; - } -#elif defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - for (; ib + 1 < nb; ib += 2) { - const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; - const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - const int8x16_t x0_0 = vld1q_s8(x0->qs); - const int8x16_t x0_1 = vld1q_s8(x0->qs + 16); - const int8x16_t x1_0 = vld1q_s8(x1->qs); - const int8x16_t x1_1 = vld1q_s8(x1->qs + 16); - - // load y - const int8x16_t y0_0 = vld1q_s8(y0->qs); - const int8x16_t y0_1 = vld1q_s8(y0->qs + 16); - const int8x16_t y1_0 = vld1q_s8(y1->qs); - const int8x16_t y1_1 = vld1q_s8(y1->qs + 16); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( - ggml_vdotq_s32(vdupq_n_s32(0), x0_0, y0_0), - ggml_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( - ggml_vdotq_s32(vdupq_n_s32(0), x1_0, y1_0), - ggml_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); -#elif defined __wasm_simd128__ - v128_t sumv = wasm_f32x4_splat(0.0f); - - for (; ib < nb; ++ib) { - const block_q8_0 * GGML_RESTRICT x0 = &x[ib]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; - - const v128_t x0_0 = wasm_v128_load(x0->qs); - const v128_t x0_1 = wasm_v128_load(x0->qs + 16); - const v128_t y0_0 = wasm_v128_load(y0->qs); - const v128_t y0_1 = wasm_v128_load(y0->qs + 16); - - // Extend 8-bit to 16-bit - const v128_t x0_0l = wasm_i16x8_extend_low_i8x16(x0_0); - const v128_t x0_0h = wasm_i16x8_extend_high_i8x16(x0_0); - const v128_t x0_1l = wasm_i16x8_extend_low_i8x16(x0_1); - const v128_t x0_1h = wasm_i16x8_extend_high_i8x16(x0_1); - - const v128_t y0_0l = wasm_i16x8_extend_low_i8x16(y0_0); - const v128_t y0_0h = wasm_i16x8_extend_high_i8x16(y0_0); - const v128_t y0_1l = wasm_i16x8_extend_low_i8x16(y0_1); - const v128_t y0_1h = wasm_i16x8_extend_high_i8x16(y0_1); - - // Compute dot products - const v128_t dx0_0 = wasm_i32x4_dot_i16x8(x0_0l, y0_0l); - const v128_t dx0_1 = wasm_i32x4_dot_i16x8(x0_0h, y0_0h); - const v128_t dx1_0 = wasm_i32x4_dot_i16x8(x0_1l, y0_1l); - const v128_t dx1_1 = wasm_i32x4_dot_i16x8(x0_1h, y0_1h); - - // Sum all dot products - const v128_t sum_dots = wasm_i32x4_add(wasm_i32x4_add(dx0_0, dx0_1), wasm_i32x4_add(dx1_0, dx1_1)); - - // Convert to float and accumulate - const float scale = GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d); - sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(sum_dots), wasm_f32x4_splat(scale))); - } - - sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + - wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3); -#elif defined(__AVX2__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - // Main loop - for (; ib < nb; ++ib) { - // Compute combined scale for the block - const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); - __m256i qx = _mm256_loadu_si256((const __m256i *)x[ib].qs); - __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); - - const __m256 q = mul_sum_i8_pairs_float(qx, qy); - - // Multiply q with scale and accumulate - acc = _mm256_fmadd_ps( d, q, acc ); - } - - sumf = hsum_float_8(acc); -#elif defined(__AVX__) - __m256 accum = _mm256_setzero_ps(); - - for (; ib + 1 < nb; ib += 2) { - const __m128i qx_1_0 = _mm_loadu_si128((const __m128i *)x[ib].qs); - const __m128i qx_1_1 = _mm_loadu_si128((const __m128i *)x[ib].qs + 1); - const __m128i qx_2_0 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); - const __m128i qx_2_1 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs + 1); - const __m128i qy_1_0 = _mm_loadu_si128((const __m128i *)y[ib].qs); - const __m128i qy_1_1 = _mm_loadu_si128((const __m128i *)y[ib].qs + 1); - const __m128i qy_2_0 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); - const __m128i qy_2_1 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs + 1); - - const __m256 p = mul_sum_i8_quad_float(qx_1_0, qx_1_1, qx_2_0, qx_2_1, qy_1_0, qy_1_1, qy_2_0, qy_2_1); - const __m256 deltas = quad_fp16_delta_float(x[ib].d, y[ib].d, x[ib + 1].d, y[ib + 1].d); - accum = _mm256_add_ps(_mm256_mul_ps(deltas, p), accum); - } - - sumf = hsum_float_8(accum); -#elif defined(__riscv_v) - size_t vl = qk; - - for (; ib < nb; ++ib) { - // load elements - vint8m2_t bx_0 = __riscv_vle8_v_i8m2(x[ib].qs, vl); - vint8m2_t by_0 = __riscv_vle8_v_i8m2(y[ib].qs, vl); - - vint16m4_t vw_mul = __riscv_vwmul_vv_i16m4(bx_0, by_0, vl); - - vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl); - vint32m1_t v_sum = __riscv_vwredsum_vs_i16m4_i32m1(vw_mul, v_zero, vl); - - int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum); - - sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); - } -#elif defined(__POWER9_VECTOR__) - const vector signed int v0 = vec_splats((int32_t)0); - vector float vsumf0 = vec_splats(0.0f); - -#pragma GCC unroll 8 - for (; ib < nb; ++ib) { - __builtin_prefetch(x[ib].qs, 0, 1); - __builtin_prefetch(y[ib].qs, 0, 1); - - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); - vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); - vector float vd = vec_mul(vxd, vyd); - - vector signed char q8x0 = vec_xl( 0, x[ib].qs); - vector signed char q8x1 = vec_xl(16, x[ib].qs); - vector signed char q8y0 = vec_xl( 0, y[ib].qs); - vector signed char q8y1 = vec_xl(16, y[ib].qs); - - vector signed short qv0 = vec_mule(q8x0, q8y0); - vector signed short qv1 = vec_mulo(q8x0, q8y0); - vector signed short qv2 = vec_mule(q8x1, q8y1); - vector signed short qv3 = vec_mulo(q8x1, q8y1); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - - vsumi0 = vec_sum4s(qv0, vsumi0); - vsumi1 = vec_sum4s(qv1, vsumi1); - vsumi0 = vec_sum4s(qv2, vsumi0); - vsumi1 = vec_sum4s(qv3, vsumi1); - - vsumi0 = vec_add(vsumi0, vsumi1); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - } - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - sumf = vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - // Initialize accumulator with zeros - __m256 acc = (__m256)__lasx_xvldi(0); - - // Main loop - for (; ib < nb; ++ib) { - // Compute combined scale for the block - const __m256 d = __lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); - __m256i qx = __lasx_xvld((const __m256i *)x[ib].qs, 0); - __m256i qy = __lasx_xvld((const __m256i *)y[ib].qs, 0); - - const __m256 q = mul_sum_i8_pairs_float(qx, qy); - - // Multiply q with scale and accumulate - acc = __lasx_xvfmadd_s( d, q, acc ); - } - - sumf = hsum_float_8(acc); -#elif defined(__VXE__) || defined(__VXE2__) - __vector float acc = vec_splats(0.0f); - -#pragma GCC unroll 8 - for (; ib < nb; ++ib) { - __builtin_prefetch(x[ib].qs, 0, 1); - __builtin_prefetch(y[ib].qs, 0, 1); - - const int8x16_t v_xl = vec_xl(0 , x[ib].qs); - const int8x16_t v_xh = vec_xl(QK8_0/2, x[ib].qs); - const int8x16_t v_yl = vec_xl(0 , y[ib].qs); - const int8x16_t v_yh = vec_xl(QK8_0/2, y[ib].qs); - - const int32x4_t v_xy_ = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh); - const float32x4_t v_xy = vec_float(v_xy_); - const float32x4_t v_d = vec_splats(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); - - acc = vec_madd(v_xy, v_d, acc); - } - - sumf = acc[0] + acc[1] + acc[2] + acc[3]; -#endif - for (; ib < nb; ++ib) { - int sumi = 0; - - for (int j = 0; j < qk; j++) { - sumi += x[ib].qs[j]*y[ib].qs[j]; - } - - sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); - } - - *s = sumf; -} - -void ggml_vec_dot_tq1_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_tq1_0 * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined(__ARM_NEON) - float sumf = 0.0f; - - uint8_t k_shift[16] = {1, 1, 1, 1, 3, 3, 3, 3, 9, 9, 9, 9, 27, 27, 27, 27}; - - const uint8x16_t shift = vld1q_u8(k_shift); - - for (int i = 0; i < nb; ++i) { -#if defined(__ARM_FEATURE_DOTPROD) - int32x4_t sumi0 = vdupq_n_s32(0); - int32x4_t sumi1 = vdupq_n_s32(0); -#else - int16x8_t sumi0 = vdupq_n_s16(0); - int16x8_t sumi1 = vdupq_n_s16(0); -#endif - - // first 32 bytes of 5 elements - { - uint8x16_t qx0 = vld1q_u8(x[i].qs + 0); - uint8x16_t qx1 = vld1q_u8(x[i].qs + 16); - uint8x16_t qx2 = vmulq_u8(qx0, vdupq_n_u8(3)); - uint8x16_t qx3 = vmulq_u8(qx1, vdupq_n_u8(3)); - uint8x16_t qx4 = vmulq_u8(qx0, vdupq_n_u8(9)); - uint8x16_t qx5 = vmulq_u8(qx1, vdupq_n_u8(9)); - uint8x16_t qx6 = vmulq_u8(qx0, vdupq_n_u8(27)); - uint8x16_t qx7 = vmulq_u8(qx1, vdupq_n_u8(27)); - uint8x16_t qx8 = vmulq_u8(qx0, vdupq_n_u8(81)); - uint8x16_t qx9 = vmulq_u8(qx1, vdupq_n_u8(81)); - - // multiply by 3 and keep the 2 bits above 8 bits - int8x16_t sqx0 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx0, vshrq_n_u8(qx0, 1)), 6)); - int8x16_t sqx1 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx1, vshrq_n_u8(qx1, 1)), 6)); - int8x16_t sqx2 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx2, vshrq_n_u8(qx2, 1)), 6)); - int8x16_t sqx3 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx3, vshrq_n_u8(qx3, 1)), 6)); - int8x16_t sqx4 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx4, vshrq_n_u8(qx4, 1)), 6)); - int8x16_t sqx5 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx5, vshrq_n_u8(qx5, 1)), 6)); - int8x16_t sqx6 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx6, vshrq_n_u8(qx6, 1)), 6)); - int8x16_t sqx7 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx7, vshrq_n_u8(qx7, 1)), 6)); - int8x16_t sqx8 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx8, vshrq_n_u8(qx8, 1)), 6)); - int8x16_t sqx9 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx9, vshrq_n_u8(qx9, 1)), 6)); - - const int8x16_t qy0 = vld1q_s8(y[i].qs + 0); - const int8x16_t qy1 = vld1q_s8(y[i].qs + 16); - const int8x16_t qy2 = vld1q_s8(y[i].qs + 32); - const int8x16_t qy3 = vld1q_s8(y[i].qs + 48); - const int8x16_t qy4 = vld1q_s8(y[i].qs + 64); - const int8x16_t qy5 = vld1q_s8(y[i].qs + 80); - const int8x16_t qy6 = vld1q_s8(y[i].qs + 96); - const int8x16_t qy7 = vld1q_s8(y[i].qs + 112); - const int8x16_t qy8 = vld1q_s8(y[i].qs + 128); - const int8x16_t qy9 = vld1q_s8(y[i].qs + 144); - -#if defined(__ARM_FEATURE_DOTPROD) - sumi0 = vdotq_s32(sumi0, sqx0, qy0); - sumi1 = vdotq_s32(sumi1, sqx1, qy1); - sumi0 = vdotq_s32(sumi0, sqx2, qy2); - sumi1 = vdotq_s32(sumi1, sqx3, qy3); - sumi0 = vdotq_s32(sumi0, sqx4, qy4); - sumi1 = vdotq_s32(sumi1, sqx5, qy5); - sumi0 = vdotq_s32(sumi0, sqx6, qy6); - sumi1 = vdotq_s32(sumi1, sqx7, qy7); - sumi0 = vdotq_s32(sumi0, sqx8, qy8); - sumi1 = vdotq_s32(sumi1, sqx9, qy9); -#else - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx0), vget_low_s8(qy0)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx0), vget_high_s8(qy0)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx1), vget_low_s8(qy1)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx1), vget_high_s8(qy1)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx2), vget_low_s8(qy2)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx2), vget_high_s8(qy2)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx3), vget_low_s8(qy3)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx3), vget_high_s8(qy3)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx4), vget_low_s8(qy4)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx4), vget_high_s8(qy4)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx5), vget_low_s8(qy5)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx5), vget_high_s8(qy5)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx6), vget_low_s8(qy6)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx6), vget_high_s8(qy6)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx7), vget_low_s8(qy7)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx7), vget_high_s8(qy7)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx8), vget_low_s8(qy8)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx8), vget_high_s8(qy8)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx9), vget_low_s8(qy9)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx9), vget_high_s8(qy9)); -#endif - } - - // last 16 bytes of 5-element, along with the 4 bytes of 4 elements - { - uint8x16_t qx0 = vld1q_u8(x[i].qs + 32); - uint8x16_t qx1 = vmulq_u8(qx0, vdupq_n_u8(3)); - uint8x16_t qx2 = vmulq_u8(qx0, vdupq_n_u8(9)); - uint8x16_t qx3 = vmulq_u8(qx0, vdupq_n_u8(27)); - uint8x16_t qx4 = vmulq_u8(qx0, vdupq_n_u8(81)); - uint32_t qh; - memcpy(&qh, x[i].qh, sizeof(qh)); // potentially unaligned - uint8x16_t qx5 = vreinterpretq_u8_u32(vdupq_n_u32(qh)); - qx5 = vmulq_u8(qx5, shift); - - // multiply by 3 and keep the 2 bits above 8 bits - int8x16_t sqx0 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx0, vshrq_n_u8(qx0, 1)), 6)); - int8x16_t sqx1 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx1, vshrq_n_u8(qx1, 1)), 6)); - int8x16_t sqx2 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx2, vshrq_n_u8(qx2, 1)), 6)); - int8x16_t sqx3 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx3, vshrq_n_u8(qx3, 1)), 6)); - int8x16_t sqx4 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx4, vshrq_n_u8(qx4, 1)), 6)); - int8x16_t sqx5 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx5, vshrq_n_u8(qx5, 1)), 6)); - - const int8x16_t qy0 = vld1q_s8(y[i].qs + 160); - const int8x16_t qy1 = vld1q_s8(y[i].qs + 176); - const int8x16_t qy2 = vld1q_s8(y[i].qs + 192); - const int8x16_t qy3 = vld1q_s8(y[i].qs + 208); - const int8x16_t qy4 = vld1q_s8(y[i].qs + 224); - const int8x16_t qy5 = vld1q_s8(y[i].qs + 240); - -#if defined(__ARM_FEATURE_DOTPROD) - sumi0 = vdotq_s32(sumi0, sqx0, qy0); - sumi1 = vdotq_s32(sumi1, sqx1, qy1); - sumi0 = vdotq_s32(sumi0, sqx2, qy2); - sumi1 = vdotq_s32(sumi1, sqx3, qy3); - sumi0 = vdotq_s32(sumi0, sqx4, qy4); - sumi1 = vdotq_s32(sumi1, sqx5, qy5); -#else - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx0), vget_low_s8(qy0)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx0), vget_high_s8(qy0)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx1), vget_low_s8(qy1)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx1), vget_high_s8(qy1)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx2), vget_low_s8(qy2)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx2), vget_high_s8(qy2)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx3), vget_low_s8(qy3)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx3), vget_high_s8(qy3)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx4), vget_low_s8(qy4)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx4), vget_high_s8(qy4)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx5), vget_low_s8(qy5)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx5), vget_high_s8(qy5)); -#endif - } - - const int16x8_t ysum0 = vld1q_s16(y[i].bsums); - const int16x8_t ysum1 = vld1q_s16(y[i].bsums + 8); - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - -#if defined(__ARM_FEATURE_DOTPROD) - sumi0 = vaddq_s32(sumi0, sumi1); - sumi0 = vsubq_s32(sumi0, vpaddlq_s16(vaddq_s16(ysum0, ysum1))); - - sumf += d * (float) vaddvq_s32(sumi0); -#else - sumi0 = vaddq_s16(sumi0, sumi1); - sumi0 = vsubq_s16(sumi0, vaddq_s16(ysum0, ysum1)); - - sumf += d * (float) vaddlvq_s16(sumi0); -#endif - } - - *s = sumf; - -#elif defined(__AVX2__) - __m256 sumf = _mm256_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - // 16-bit sums - __m256i sumi0 = _mm256_setzero_si256(); - __m256i sumi1 = _mm256_setzero_si256(); - __m256i sumi2 = _mm256_setzero_si256(); - - // first 32 bytes of 5 elements - { - __m256i qx0 = _mm256_loadu_si256((const __m256i *) (x[i].qs)); - // 8-bit multiplies with shifts, masks and adds - __m256i qx1 = _mm256_add_epi8(qx0, _mm256_add_epi8(qx0, qx0)); // 1 * 3 - __m256i qx2 = _mm256_add_epi8(_mm256_and_si256(_mm256_slli_epi16(qx0, 3), _mm256_set1_epi8(-8)), qx0); // 1 * 9 - __m256i qx3 = _mm256_add_epi8(_mm256_and_si256(_mm256_slli_epi16(qx1, 3), _mm256_set1_epi8(-8)), qx1); // 3 * 9 - __m256i qx4 = _mm256_add_epi8(_mm256_and_si256(_mm256_slli_epi16(qx2, 3), _mm256_set1_epi8(-8)), qx2); // 9 * 9 - - // TODO: can _mm256_mulhi_epu16 be faster even if 16-bits? - - // Cancel the +1 from avg so that it behaves like a halving add - qx0 = _mm256_subs_epu8(qx0, _mm256_set1_epi8(1)); - qx1 = _mm256_subs_epu8(qx1, _mm256_set1_epi8(1)); - qx2 = _mm256_subs_epu8(qx2, _mm256_set1_epi8(1)); - qx3 = _mm256_subs_epu8(qx3, _mm256_set1_epi8(1)); - qx4 = _mm256_subs_epu8(qx4, _mm256_set1_epi8(1)); - // Multiply by 3 and get the top 2 bits - qx0 = _mm256_avg_epu8(qx0, _mm256_avg_epu8(qx0, _mm256_setzero_si256())); - qx1 = _mm256_avg_epu8(qx1, _mm256_avg_epu8(qx1, _mm256_setzero_si256())); - qx2 = _mm256_avg_epu8(qx2, _mm256_avg_epu8(qx2, _mm256_setzero_si256())); - qx3 = _mm256_avg_epu8(qx3, _mm256_avg_epu8(qx3, _mm256_setzero_si256())); - qx4 = _mm256_avg_epu8(qx4, _mm256_avg_epu8(qx4, _mm256_setzero_si256())); - qx0 = _mm256_and_si256(_mm256_srli_epi16(qx0, 6), _mm256_set1_epi8(3)); - qx1 = _mm256_and_si256(_mm256_srli_epi16(qx1, 6), _mm256_set1_epi8(3)); - qx2 = _mm256_and_si256(_mm256_srli_epi16(qx2, 6), _mm256_set1_epi8(3)); - qx3 = _mm256_and_si256(_mm256_srli_epi16(qx3, 6), _mm256_set1_epi8(3)); - qx4 = _mm256_and_si256(_mm256_srli_epi16(qx4, 6), _mm256_set1_epi8(3)); - - const __m256i qy0 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 0)); - const __m256i qy1 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 32)); - const __m256i qy2 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 64)); - const __m256i qy3 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 96)); - const __m256i qy4 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 128)); - - qx0 = _mm256_maddubs_epi16(qx0, qy0); - qx1 = _mm256_maddubs_epi16(qx1, qy1); - qx2 = _mm256_maddubs_epi16(qx2, qy2); - qx3 = _mm256_maddubs_epi16(qx3, qy3); - qx4 = _mm256_maddubs_epi16(qx4, qy4); - - sumi0 = _mm256_add_epi16(sumi0, _mm256_add_epi16(qx0, qx1)); - sumi1 = _mm256_add_epi16(sumi1, _mm256_add_epi16(qx2, qx3)); - sumi2 = _mm256_add_epi16(sumi2, qx4); - } - - // last 16 bytes of 5-element, along with the 4 bytes of 4 elements - { - __m128i qx0 = _mm_loadu_si128((const __m128i *) (x[i].qs + 32)); - uint32_t qh; - memcpy(&qh, x[i].qh, sizeof(qh)); // potentially unaligned - __m256i qx5_l = _mm256_cvtepu8_epi16(_mm_set1_epi32(qh)); - __m128i qx1 = _mm_add_epi8(qx0, _mm_add_epi8(qx0, qx0)); // 1 * 3 - __m128i qx2 = _mm_add_epi8(_mm_and_si128(_mm_slli_epi16(qx0, 3), _mm_set1_epi8(-8)), qx0); // 1 * 9 - __m128i qx3 = _mm_add_epi8(_mm_and_si128(_mm_slli_epi16(qx1, 3), _mm_set1_epi8(-8)), qx1); // 3 * 9 - __m128i qx4 = _mm_add_epi8(_mm_and_si128(_mm_slli_epi16(qx2, 3), _mm_set1_epi8(-8)), qx2); // 9 * 9 - __m256i qx01 = MM256_SET_M128I(qx1, qx0); - __m256i qx23 = MM256_SET_M128I(qx3, qx2); - - // avx2 does not have 8-bit multiplies, so 16-bit it is. - qx5_l = _mm256_mullo_epi16(qx5_l, _mm256_set_epi16(27, 27, 27, 27, 9, 9, 9, 9, 3, 3, 3, 3, 1, 1, 1, 1)); - qx5_l = _mm256_and_si256(qx5_l, _mm256_set1_epi16(0xFF)); - __m128i qx5 = _mm_packus_epi16(_mm256_castsi256_si128(qx5_l), _mm256_extracti128_si256(qx5_l, 1)); - - __m256i qx45 = MM256_SET_M128I(qx5, qx4); - - // Cancel the +1 from avg so that it behaves like a halving add - qx01 = _mm256_subs_epu8(qx01, _mm256_set1_epi8(1)); - qx23 = _mm256_subs_epu8(qx23, _mm256_set1_epi8(1)); - qx45 = _mm256_subs_epu8(qx45, _mm256_set1_epi8(1)); - // Multiply by 3 and get the top 2 bits - qx01 = _mm256_avg_epu8(qx01, _mm256_avg_epu8(qx01, _mm256_setzero_si256())); - qx23 = _mm256_avg_epu8(qx23, _mm256_avg_epu8(qx23, _mm256_setzero_si256())); - qx45 = _mm256_avg_epu8(qx45, _mm256_avg_epu8(qx45, _mm256_setzero_si256())); - qx01 = _mm256_and_si256(_mm256_srli_epi16(qx01, 6), _mm256_set1_epi8(3)); - qx23 = _mm256_and_si256(_mm256_srli_epi16(qx23, 6), _mm256_set1_epi8(3)); - qx45 = _mm256_and_si256(_mm256_srli_epi16(qx45, 6), _mm256_set1_epi8(3)); - - const __m256i qy01 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 160)); - const __m256i qy23 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 192)); - const __m256i qy45 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 224)); - - qx01 = _mm256_maddubs_epi16(qx01, qy01); - qx23 = _mm256_maddubs_epi16(qx23, qy23); - qx45 = _mm256_maddubs_epi16(qx45, qy45); - - sumi0 = _mm256_add_epi16(sumi0, qx01); - sumi1 = _mm256_add_epi16(sumi1, qx23); - sumi2 = _mm256_add_epi16(sumi2, qx45); - } - - const __m256i ysum = _mm256_loadu_si256((const __m256i *) y[i].bsums); - const __m256 d = _mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(x[i].d)); - - sumi0 = _mm256_sub_epi16(sumi0, ysum); - sumi0 = _mm256_add_epi16(sumi0, _mm256_add_epi16(sumi1, sumi2)); - sumi0 = _mm256_madd_epi16(sumi0, _mm256_set1_epi16(1)); - - sumf = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(sumi0), d), sumf); - } - - *s = hsum_float_8(sumf); - -#else - const uint8_t pow3[6] = {1, 3, 9, 27, 81, 243}; - - float sumf = 0.0f; - - for (int i = 0; i < nb; ++i) { - int sum = 0; - - for (size_t j = 0; j < sizeof(x->qs) - sizeof(x->qs) % 32; j += 32) { - for (size_t l = 0; l < 5; ++l) { - for (size_t m = 0; m < 32; ++m) { - uint8_t q = x[i].qs[j + m] * pow3[l]; - uint16_t xi = ((uint16_t) q * 3) >> 8; - sum += (xi - 1) * y[i].qs[j*5 + l*32 + m]; - } - } - } - for (size_t j = sizeof(x->qs) - sizeof(x->qs) % 32; j < sizeof(x->qs); j += 16) { - for (size_t l = 0; l < 5; ++l) { - for (size_t m = 0; m < 16; ++m) { - uint8_t q = x[i].qs[j + m] * pow3[l]; - uint16_t xi = ((uint16_t) q * 3) >> 8; - sum += (xi - 1) * y[i].qs[j*5 + l*16 + m]; - } - } - } - - for (size_t l = 0; l < 4; ++l) { - for (size_t j = 0; j < sizeof(x->qh); ++j) { - uint8_t q = x[i].qh[j] * pow3[l]; - uint16_t xi = ((uint16_t) q * 3) >> 8; - sum += (xi - 1) * y[i].qs[sizeof(x->qs)*5 + l*sizeof(x->qh) + j]; - } - } - - sumf += (float) sum * (GGML_FP16_TO_FP32(x[i].d) * y[i].d); - } - - *s = sumf; -#endif -} - -void ggml_vec_dot_tq2_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_tq2_0 * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined(__ARM_NEON) - float sumf = 0.0f; - - const uint8x16_t m3 = vdupq_n_u8(3); - - for (int i = 0; i < nb; ++i) { -#if defined(__ARM_FEATURE_DOTPROD) - int32x4_t sumi0 = vdupq_n_s32(0); - int32x4_t sumi1 = vdupq_n_s32(0); -#else - int16x8_t sumi0 = vdupq_n_s16(0); - int16x8_t sumi1 = vdupq_n_s16(0); -#endif - - for (size_t j = 0; j < sizeof(x->qs); j += 32) { - uint8x16_t qx0 = vld1q_u8(x[i].qs + j); - uint8x16_t qx1 = vld1q_u8(x[i].qs + j + 16); - uint8x16_t qx2 = vshrq_n_u8(qx0, 2); - uint8x16_t qx3 = vshrq_n_u8(qx1, 2); - uint8x16_t qx4 = vshrq_n_u8(qx0, 4); - uint8x16_t qx5 = vshrq_n_u8(qx1, 4); - uint8x16_t qx6 = vshrq_n_u8(qx0, 6); - uint8x16_t qx7 = vshrq_n_u8(qx1, 6); - - int8x16_t sqx0 = vreinterpretq_s8_u8(vandq_u8(qx0, m3)); - int8x16_t sqx1 = vreinterpretq_s8_u8(vandq_u8(qx1, m3)); - int8x16_t sqx2 = vreinterpretq_s8_u8(vandq_u8(qx2, m3)); - int8x16_t sqx3 = vreinterpretq_s8_u8(vandq_u8(qx3, m3)); - int8x16_t sqx4 = vreinterpretq_s8_u8(vandq_u8(qx4, m3)); - int8x16_t sqx5 = vreinterpretq_s8_u8(vandq_u8(qx5, m3)); - int8x16_t sqx6 = vreinterpretq_s8_u8(vandq_u8(qx6, m3)); - int8x16_t sqx7 = vreinterpretq_s8_u8(vandq_u8(qx7, m3)); - - const int8x16_t qy0 = vld1q_s8(y[i].qs + j*4 + 0); - const int8x16_t qy1 = vld1q_s8(y[i].qs + j*4 + 16); - const int8x16_t qy2 = vld1q_s8(y[i].qs + j*4 + 32); - const int8x16_t qy3 = vld1q_s8(y[i].qs + j*4 + 48); - const int8x16_t qy4 = vld1q_s8(y[i].qs + j*4 + 64); - const int8x16_t qy5 = vld1q_s8(y[i].qs + j*4 + 80); - const int8x16_t qy6 = vld1q_s8(y[i].qs + j*4 + 96); - const int8x16_t qy7 = vld1q_s8(y[i].qs + j*4 + 112); - -#if defined(__ARM_FEATURE_DOTPROD) - sumi0 = vdotq_s32(sumi0, sqx0, qy0); - sumi1 = vdotq_s32(sumi1, sqx1, qy1); - sumi0 = vdotq_s32(sumi0, sqx2, qy2); - sumi1 = vdotq_s32(sumi1, sqx3, qy3); - sumi0 = vdotq_s32(sumi0, sqx4, qy4); - sumi1 = vdotq_s32(sumi1, sqx5, qy5); - sumi0 = vdotq_s32(sumi0, sqx6, qy6); - sumi1 = vdotq_s32(sumi1, sqx7, qy7); -#else - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx0), vget_low_s8(qy0)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx0), vget_high_s8(qy0)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx1), vget_low_s8(qy1)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx1), vget_high_s8(qy1)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx2), vget_low_s8(qy2)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx2), vget_high_s8(qy2)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx3), vget_low_s8(qy3)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx3), vget_high_s8(qy3)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx4), vget_low_s8(qy4)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx4), vget_high_s8(qy4)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx5), vget_low_s8(qy5)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx5), vget_high_s8(qy5)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx6), vget_low_s8(qy6)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx6), vget_high_s8(qy6)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx7), vget_low_s8(qy7)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx7), vget_high_s8(qy7)); -#endif - } - - const int16x8_t ysum0 = vld1q_s16(y[i].bsums); - const int16x8_t ysum1 = vld1q_s16(y[i].bsums + 8); - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - -#if defined(__ARM_FEATURE_DOTPROD) - sumi0 = vaddq_s32(sumi0, sumi1); - sumi0 = vsubq_s32(sumi0, vpaddlq_s16(vaddq_s16(ysum0, ysum1))); - - sumf += d * (float) vaddvq_s32(sumi0); -#else - sumi0 = vaddq_s16(sumi0, sumi1); - sumi0 = vsubq_s16(sumi0, vaddq_s16(ysum0, ysum1)); - - sumf += d * (float) vaddlvq_s16(sumi0); -#endif - } - - *s = sumf; - -#elif defined(__AVX2__) - __m256 sumf = _mm256_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - // 16-bit sums, because 256*127 still fits - __m256i sumi0 = _mm256_setzero_si256(); - __m256i sumi1 = _mm256_setzero_si256(); - - for (size_t j = 0; j < sizeof(x->qs); j += 32) { - __m256i qx0 = _mm256_loadu_si256((const __m256i *) (x[i].qs + j)); - __m256i qx1 = _mm256_srli_epi16(qx0, 2); - __m256i qx2 = _mm256_srli_epi16(qx0, 4); - __m256i qx3 = _mm256_srli_epi16(qx0, 6); - - // 0, 1, 2 (should not be 3) - qx0 = _mm256_and_si256(qx0, _mm256_set1_epi8(3)); - qx1 = _mm256_and_si256(qx1, _mm256_set1_epi8(3)); - qx2 = _mm256_and_si256(qx2, _mm256_set1_epi8(3)); - qx3 = _mm256_and_si256(qx3, _mm256_set1_epi8(3)); - - const __m256i qy0 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 0)); - const __m256i qy1 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 32)); - const __m256i qy2 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 64)); - const __m256i qy3 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 96)); - - qx0 = _mm256_maddubs_epi16(qx0, qy0); - qx1 = _mm256_maddubs_epi16(qx1, qy1); - qx2 = _mm256_maddubs_epi16(qx2, qy2); - qx3 = _mm256_maddubs_epi16(qx3, qy3); - - sumi0 = _mm256_add_epi16(sumi0, _mm256_add_epi16(qx0, qx1)); - sumi1 = _mm256_add_epi16(sumi1, _mm256_add_epi16(qx2, qx3)); - } - - const __m256i ysum = _mm256_loadu_si256((const __m256i *) y[i].bsums); - const __m256 d = _mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(x[i].d)); - - sumi0 = _mm256_add_epi16(sumi0, sumi1); - sumi0 = _mm256_sub_epi16(sumi0, ysum); - sumi0 = _mm256_madd_epi16(sumi0, _mm256_set1_epi16(1)); - - sumf = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(sumi0), d), sumf); - } - - *s = hsum_float_8(sumf); - -#else - float sumf = 0.0f; - - for (int i = 0; i < nb; ++i) { - int32_t sumi = 0; - - for (size_t j = 0; j < sizeof(x->qs); j += 32) { - for (size_t l = 0; l < 4; ++l) { - for (size_t k = 0; k < 32; ++k) { - sumi += y[i].qs[j*4 + l*32 + k] * (((x[i].qs[j + k] >> (l*2)) & 3) - 1); - } - } - } - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - - sumf += (float) sumi * d; - } - - *s = sumf; -#endif -} - -void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_q2_K * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#ifdef __ARM_FEATURE_SVE - const int vector_length = svcntb()*8; - const svuint8_t m3s = svdup_n_u8(0x3); - const svuint32_t m4s = svdup_n_u32(0xF); - const svint32_t vzero_sv = svdup_n_s32(0); - svfloat32_t acc_sum = svdup_n_f32(0); - svbool_t pred_s32 = svptrue_pat_b32(SV_VL4); - - switch (vector_length) { - case 128: - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - svfloat32_t d_broad = svdup_n_f32((float32_t)d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - svfloat32_t dmin_broad = svdup_n_f32((float32_t)dmin); - - const uint8_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8_sv = y[i].qs; - const uint8_t * GGML_RESTRICT sc = x[i].scales; - - svuint32_t mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc); - const svint32_t mins_sv_1 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); - - mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc+4); - const svint32_t mins_sv_2 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); - - svint32_t q8sums_sv_1 = svld1sh_s32(svptrue_b32(), y[i].bsums); - svint32_t q8sums_sv_2 = svld1sh_s32(svptrue_b32(), y[i].bsums+4); - - const svint32_t s0 = svadd_s32_x(svptrue_b32(), svmul_s32_x(svptrue_b32(), mins_sv_1, q8sums_sv_1), svmul_s32_x(svptrue_b32(), mins_sv_2, q8sums_sv_2)); - - mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc+8); - const svint32_t mins_sv_3 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); - - mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc+12); - const svint32_t mins_sv_4 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); - - q8sums_sv_1 = svld1sh_s32(svptrue_b32(), y[i].bsums+8); - q8sums_sv_2 = svld1sh_s32(svptrue_b32(), y[i].bsums+12); - - svint32_t s1 = svadd_s32_x(svptrue_b32(), svmul_s32_x(svptrue_b32(), mins_sv_3, q8sums_sv_1), svmul_s32_x(svptrue_b32(), mins_sv_4, q8sums_sv_2)); - - svfloat32_t temp = svcvt_f32_s32_x(svptrue_b32(), svadd_s32_x(svptrue_b32(), s0, s1)); - - acc_sum = svmla_f32_m(svptrue_b32(), acc_sum, temp, dmin_broad); - - svint32_t sumi1 = svdup_n_s32(0); - - { - const svuint8_t q2bits_1 = svld1_u8(svptrue_b8(), q2); - svint8_t q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_1, m3s)); - svint8_t q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - const svint32_t scales_sv = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc), m4s)); - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 0)); - - const svuint8_t q2bits_3 = svld1_u8(svptrue_b8(), q2+16); - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_3, m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 1)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 2), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 2)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 2), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 3)); - - - const svint32_t scales_sv_1 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc+4), m4s)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 4), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 0)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 4), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 1)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 6), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 2)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 6), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 3)); - - //------------------------------- - - q2 += 32; - const svint32_t scales_sv_2 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc+8), m4s)); - const svuint8_t q2bits_2 = svld1_u8(svptrue_b8(), q2); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_2, m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 0)); - - const svuint8_t q2bits_4 = svld1_u8(svptrue_b8(), q2+16); - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_4, m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 1)); - - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 2), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 2)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 2), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 3)); - - - const svint32_t scales_sv_3 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc+12), m4s)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 4), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 0)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 4), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 1)); - - - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 6), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 2)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 6), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 3)); - } - acc_sum = svmla_f32_m(svptrue_b32(), acc_sum, svcvt_f32_s32_x(svptrue_b32(), sumi1), d_broad); - } - *s = svaddv_f32(svptrue_b32(), acc_sum); - break; - - case 256: - case 512: - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - svfloat32_t d_broad = svdup_n_f32((float32_t)d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - svfloat32_t dmin_broad = svdup_n_f32((float32_t)dmin); - - const uint8_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8_sv = y[i].qs; - const uint8_t * GGML_RESTRICT sc = x[i].scales; - - const svuint32_t mins_and_scales_sve = svld1ub_u32(svptrue_pat_b32(SV_VL8), sc); sc += 8; - const svint32_t scales_sv = svreinterpret_s32_u32(svand_u32_m(svptrue_pat_b32(SV_VL8), mins_and_scales_sve, m4s)); - const svint32_t mins_sv_1 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_pat_b32(SV_VL8), mins_and_scales_sve, 4)); - svint32_t q8sums_sv_1 = svld1sh_s32(svptrue_pat_b32(SV_VL8), y[i].bsums); - - const svuint32_t mins_and_scales_sve_1 = svld1ub_u32(svptrue_pat_b32(SV_VL8), sc); - const svint32_t scales_sv_1 = svreinterpret_s32_u32(svand_u32_m(svptrue_pat_b32(SV_VL8), mins_and_scales_sve_1, m4s)); - const svint32_t mins_sv_2 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_pat_b32(SV_VL8), mins_and_scales_sve_1, 4)); - - svint32_t q8sums_sv_2 = svld1sh_s32(svptrue_pat_b32(SV_VL8), y[i].bsums+8); - - svfloat32_t temp = svcvt_f32_s32_x(svptrue_pat_b32(SV_VL8), svadd_s32_x(svptrue_pat_b32(SV_VL8), svmul_s32_x(svptrue_pat_b32(SV_VL8), mins_sv_1, q8sums_sv_1), svmul_s32_x(svptrue_pat_b32(SV_VL8), mins_sv_2, q8sums_sv_2))); - - acc_sum = svmla_f32_m(svptrue_pat_b32(SV_VL8), acc_sum, temp, dmin_broad); - - svint32_t sumi1 = svdup_n_s32(0); - - { - const svuint8_t q2bits_1 = svld1_u8(svptrue_pat_b8(SV_VL32), q2); - svint8_t q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q2bits_1, m3s)); - svint8_t q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - svint32_t scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv, 0), svdup_lane_s32(scales_sv, 1)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 2), m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - svint32_t scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv, 2), svdup_lane_s32(scales_sv, 3)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(svdup_n_s32(0), q2bytes_sv, q8bytes_sv), scale_2); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 4), m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv, 4), svdup_lane_s32(scales_sv, 5)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 6), m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv, 6), svdup_lane_s32(scales_sv, 7)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); - - q2 += 32; - - const svuint8_t q2bits_2 = svld1_u8(svptrue_pat_b8(SV_VL32), q2); - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q2bits_2, m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 0), svdup_lane_s32(scales_sv_1, 1)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 2), m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 2), svdup_lane_s32(scales_sv_1, 3)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 4), m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 4), svdup_lane_s32(scales_sv_1, 5)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 6), m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 6), svdup_lane_s32(scales_sv_1, 7)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); - } - acc_sum = svmla_f32_m(svptrue_pat_b32(SV_VL8), acc_sum, svcvt_f32_s32_x(svptrue_pat_b32(SV_VL8), sumi1), d_broad); - } - *s = svaddv_f32(svptrue_pat_b32(SV_VL8), acc_sum); - break; - - default: - assert(false && "Unsupported vector length"); - break; - } - -#elif __ARM_NEON - const uint8x16_t m3 = vdupq_n_u8(0x3); - const uint8x16_t m4 = vdupq_n_u8(0xF); - - const int32x4_t vzero = vdupq_n_s32(0); - - ggml_int8x16x2_t q2bytes; - uint8_t aux[16]; - - float sum = 0; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const uint8_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - const uint8_t * GGML_RESTRICT sc = x[i].scales; - - const uint8x16_t mins_and_scales = vld1q_u8(sc); - const uint8x16_t scales = vandq_u8(mins_and_scales, m4); - vst1q_u8(aux, scales); - - const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4); - const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums); - const ggml_int16x8x2_t mins16 = {{vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}}; - const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])), - vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0]))); - const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])), - vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1]))); - sum += dmin * vaddvq_s32(vaddq_s32(s0, s1)); - - int isum = 0; - int is = 0; - -// We use this macro instead of a function call because for some reason -// the code runs 2-3% slower, even if the function is declared inline -#define MULTIPLY_ACCUM_WITH_SCALE(index)\ - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * aux[is+(index)];\ - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * aux[is+1+(index)]; - -#define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index)\ - q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;\ - q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], (shift)), m3));\ - q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3));\ - MULTIPLY_ACCUM_WITH_SCALE((index)); - - for (int j = 0; j < QK_K/128; ++j) { - const ggml_uint8x16x2_t q2bits = ggml_vld1q_u8_x2(q2); q2 += 32; - - ggml_int8x16x2_t q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; - q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3)); - q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3)); - - MULTIPLY_ACCUM_WITH_SCALE(0); - - SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2); - SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4); - SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6); - - is += 8; - } - - sum += d * isum; - } - - *s = sum; - -#elif defined __AVX2__ - - const __m256i m3 = _mm256_set1_epi8(3); - const __m128i m4 = _mm_set1_epi8(0xF); - - __m256 acc = _mm256_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const uint8_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales); - const __m128i scales8 = _mm_and_si128(mins_and_scales, m4); - const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); - const __m256i mins = _mm256_cvtepi8_epi16(mins8); - const __m256i prod = _mm256_madd_epi16(mins, _mm256_loadu_si256((const __m256i*)y[i].bsums)); - - acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc); - - const __m256i all_scales = _mm256_cvtepi8_epi16(scales8); - const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); - const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); - const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)}; - - __m256i sumi = _mm256_setzero_si256(); - - for (int j = 0; j < QK_K/128; ++j) { - - const __m256i q2bits = _mm256_loadu_si256((const __m256i*)q2); q2 += 32; - - const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - - const __m256i q2_0 = _mm256_and_si256(q2bits, m3); - const __m256i q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3); - const __m256i q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3); - const __m256i q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3); - - __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0); - __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1); - __m256i p2 = _mm256_maddubs_epi16(q2_2, q8_2); - __m256i p3 = _mm256_maddubs_epi16(q2_3, q8_3); - - p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(0)), p0); - p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(1)), p1); - p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(2)), p2); - p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(3)), p3); - - p0 = _mm256_add_epi32(p0, p1); - p2 = _mm256_add_epi32(p2, p3); - - sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2)); - } - - acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); - - } - - *s = hsum_float_8(acc); - -#elif defined __AVX__ - - const __m128i m3 = _mm_set1_epi8(0x3); - const __m128i m4 = _mm_set1_epi8(0xF); - const __m128i m2 = _mm_set1_epi8(0x2); - - __m256 acc = _mm256_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const uint8_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - // load mins and scales from block_q2_K.scales[QK_K/16] - const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales); - const __m128i scales16 = _mm_and_si128(mins_and_scales, m4); - const __m128i mins16 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); - const __m128i mins_0 = _mm_cvtepi8_epi16(mins16); - const __m128i mins_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(mins16, mins16)); - - // summs = y[i].bsums * (x[i].scales >> 4) in 16bits*8*2 to 32bits*4*2 - const __m128i summs_0 = _mm_madd_epi16(mins_0, _mm_loadu_si128((const __m128i*)&y[i].bsums[0])); - const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i*)&y[i].bsums[8])); - - // sumf += -dmin * summs in 32bits*8 - acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc); - - const __m128i scales_0 = _mm_cvtepi8_epi16(scales16); - const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16)); - const __m128i scales[2] = { scales_0, scales_1 }; - - __m128i sumi_0 = _mm_setzero_si128(); - __m128i sumi_1 = _mm_setzero_si128(); - - for (int j = 0; j < QK_K/128; ++j) { - - // load Q8 quants int8*16*8 from block_q8_K.qs[QK_K] - const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - - // load 2bits*16*8 from block_q2_K.qs[QK_K/4] - __m128i q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16; - const __m128i q2_0 = _mm_and_si128(q2bits, m3); - const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3); - const __m128i q2_4 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3); - const __m128i q2_6 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3); - q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16; - const __m128i q2_1 = _mm_and_si128(q2bits, m3); - const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3); - const __m128i q2_5 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3); - const __m128i q2_7 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3); - - // isuml = q8[l] * ((q2[l] >> shift) & 3) in 8bits*16*8 to 16bits*8*8 - __m128i p0 = _mm_maddubs_epi16(q2_0, q8_0); - __m128i p1 = _mm_maddubs_epi16(q2_1, q8_1); - __m128i p2 = _mm_maddubs_epi16(q2_2, q8_2); - __m128i p3 = _mm_maddubs_epi16(q2_3, q8_3); - __m128i p4 = _mm_maddubs_epi16(q2_4, q8_4); - __m128i p5 = _mm_maddubs_epi16(q2_5, q8_5); - __m128i p6 = _mm_maddubs_epi16(q2_6, q8_6); - __m128i p7 = _mm_maddubs_epi16(q2_7, q8_7); - - // isum += (x[i].scales[is++] & 0xF) * isuml in 16bits*8*8 to 32bits*4*8 - __m128i shuffle = _mm_set1_epi16(0x0100); - p0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p0); - shuffle = _mm_add_epi16(shuffle, m2); - p1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p1); - shuffle = _mm_add_epi16(shuffle, m2); - p2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p2); - shuffle = _mm_add_epi16(shuffle, m2); - p3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p3); - shuffle = _mm_add_epi16(shuffle, m2); - p4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p4); - shuffle = _mm_add_epi16(shuffle, m2); - p5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p5); - shuffle = _mm_add_epi16(shuffle, m2); - p6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p6); - shuffle = _mm_add_epi16(shuffle, m2); - p7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p7); - - p0 = _mm_add_epi32(p0, p1); - p2 = _mm_add_epi32(p2, p3); - p4 = _mm_add_epi32(p4, p5); - p6 = _mm_add_epi32(p6, p7); - - // isum in 32bits*4*2 - sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p0, p2)); - sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p4, p6)); - } - - // sumf += dall * isum - dmin * summs in 32bits - __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); - acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc); - } - - *s = hsum_float_8(acc); - -#elif defined __wasm_simd128__ - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - const uint8_t * q2 = x[i].qs; - const int8_t * q8 = y[i].qs; - const uint8_t * sc = x[i].scales; - - // Vectorized summs calculation - v128_t summs_vec = wasm_i32x4_splat(0); - { - v128_t sc_vec = wasm_v128_load(sc); - v128_t sc_upper = wasm_u8x16_shr(sc_vec, 4); - - v128_t sc_low = wasm_u16x8_extend_low_u8x16(sc_upper); - v128_t sc_high = wasm_u16x8_extend_high_u8x16(sc_upper); - - v128_t bsums1 = wasm_v128_load(&y[i].bsums[0]); - v128_t bsums2 = wasm_v128_load(&y[i].bsums[8]); - - summs_vec = wasm_i32x4_add( - wasm_i32x4_add(wasm_i32x4_dot_i16x8(sc_low, bsums1), - wasm_i32x4_dot_i16x8(sc_high, bsums2)), - summs_vec - ); - - summs_vec = wasm_i32x4_add(summs_vec, wasm_i32x4_shuffle(summs_vec, summs_vec, 2, 3, 0, 1)); - summs_vec = wasm_i32x4_add(summs_vec, wasm_i32x4_shuffle(summs_vec, summs_vec, 1, 0, 3, 2)); - } - int32_t summs = wasm_i32x4_extract_lane(summs_vec, 0); - - // Vectorized isum calculation - int32_t isum = 0; - const uint8_t * sc_ptr = sc; - const int k_iters = QK_K/128; - - for (int k = 0; k < k_iters; ++k) { - v128_t isum_vec = wasm_i32x4_splat(0); - int shift = 0; - - for (int j = 0; j < 4; ++j) { - const int d0 = (sc_ptr[0] & 0xF); - const int d1 = (sc_ptr[1] & 0xF); - sc_ptr += 2; - - // Process first 16 elements - v128_t q2_0 = wasm_v128_load(q2); - v128_t q8_0 = wasm_v128_load(q8); - v128_t q2_shift_0 = wasm_u8x16_shr(q2_0, shift); - v128_t q2_bits_0 = wasm_v128_and(q2_shift_0, wasm_i8x16_splat(0x03)); - - // Process next 16 elements - v128_t q2_1 = wasm_v128_load(q2 + 16); - v128_t q8_1 = wasm_v128_load(q8 + 16); - v128_t q2_shift_1 = wasm_u8x16_shr(q2_1, shift); - v128_t q2_bits_1 = wasm_v128_and(q2_shift_1, wasm_i8x16_splat(0x03)); - - // Calculate dot products - v128_t p0 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q8_0), - wasm_i16x8_extend_low_i8x16(q2_bits_0) - ); - v128_t p1 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q8_0), - wasm_i16x8_extend_high_i8x16(q2_bits_0) - ); - v128_t p2 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q8_1), - wasm_i16x8_extend_low_i8x16(q2_bits_1) - ); - v128_t p3 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q8_1), - wasm_i16x8_extend_high_i8x16(q2_bits_1) - ); - - // Accumulate scaled results - v128_t scaled = wasm_i32x4_add( - wasm_i32x4_mul(wasm_i32x4_add(p0, p1), wasm_i32x4_splat(d0)), - wasm_i32x4_mul(wasm_i32x4_add(p2, p3), wasm_i32x4_splat(d1)) - ); - - isum_vec = wasm_i32x4_add(isum_vec, scaled); - q8 += 32; - shift += 2; - } - q2 += 32; - - // Horizontal sum of isum_vec - isum_vec = wasm_i32x4_add(isum_vec, wasm_i32x4_shuffle(isum_vec, isum_vec, 2, 3, 0, 1)); - isum_vec = wasm_i32x4_add(isum_vec, wasm_i32x4_shuffle(isum_vec, isum_vec, 1, 0, 3, 2)); - isum += wasm_i32x4_extract_lane(isum_vec, 0); - } - - const float dall = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; - sumf += dall * isum - dmin * summs; - } - - *s = sumf; - -#elif defined __riscv_xtheadvector - - float sumf = 0; - uint8_t atmp[16]; - - for (int i = 0; i < nb; ++i) { - const uint8_t * q2 = x[i].qs; - const int8_t * q8 = y[i].qs; - const uint8_t * sc = x[i].scales; - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - uint8_t *patmp = atmp; - int vsums; - int tmp; - __asm__ __volatile__( - "th.vsetvli zero, %[vl16], e8, m1\n\t" - "th.vmv.v.x v8, zero\n\t" - "th.vlb.v v1, (%[sc])\n\t" - "th.vand.vi v0, v1, 0xF\n\t" - "th.vsrl.vi v1, v1, 4\n\t" - "th.vsb.v v0, (%[scale])\n\t" - "th.vwaddu.vx v16, v1, zero\n\t" - "th.vsetvli zero, %[vl16], e16, m2\n\t" - "th.vlh.v v2, (%[bsums])\n\t" - "th.vwmul.vv v4, v16, v2\n\t" - "th.vsetvli zero, %[vl16], e32, m4\n\t" - "th.vredsum.vs v8, v4, v8\n\t" - "th.vmv.x.s %[vsums], v8" - : [tmp] "=&r" (tmp), [vsums] "=&r" (vsums) - : [sc] "r" (sc), [scale] "r" (atmp), [bsums] "r" (y[i].bsums) - , [vl16] "r" (16) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - sumf += dmin * vsums; - int isum = 0; - - for (int j = 0; j < QK_K/128; ++j) { - __asm__ __volatile__( - "th.vsetvli zero, %[vl32], e8, m2\n\t" - "th.vlb.v v0, (%[q2])\n\t" - "th.vsrl.vi v2, v0, 2\n\t" - "th.vsrl.vi v4, v0, 4\n\t" - "th.vsrl.vi v6, v0, 6\n\t" - "th.vand.vi v0, v0, 0x3\n\t" - "th.vand.vi v2, v2, 0x3\n\t" - "th.vand.vi v4, v4, 0x3\n\t" - "th.vsetvli zero, %[vl128], e8, m8\n\t" - "th.vlb.v v8, (%[q8])\n\t" - "th.vsetvli zero, %[vl64], e8, m4\n\t" - "th.vwmul.vv v16, v0, v8\n\t" - "th.vwmul.vv v24, v4, v12\n\t" - "th.vsetvli zero, %[vl16], e16, m2\n\t" - "th.vmv.v.x v0, zero\n\t" - "th.vwredsum.vs v10, v16, v0\n\t" - "th.vwredsum.vs v9, v18, v0\n\t" - "th.vwredsum.vs v8, v20, v0\n\t" - "th.vwredsum.vs v7, v22, v0\n\t" - "th.vwredsum.vs v11, v24, v0\n\t" - "th.vwredsum.vs v12, v26, v0\n\t" - "th.vwredsum.vs v13, v28, v0\n\t" - "th.vwredsum.vs v14, v30, v0\n\t" - "li %[tmp], 4\n\t" - "th.vsetvli zero, %[tmp], e32, m1\n\t" - "th.vslideup.vi v10, v9, 1\n\t" - "th.vslideup.vi v8, v7, 1\n\t" - "th.vslideup.vi v11, v12, 1\n\t" - "th.vslideup.vi v13, v14, 1\n\t" - "th.vslideup.vi v10, v8, 2\n\t" - "th.vslideup.vi v11, v13, 2\n\t" - "li %[tmp], 8\n\t" - "th.vsetvli zero, %[tmp], e32, m2\n\t" - "th.vlbu.v v12, (%[scale])\n\t" - "th.vmul.vv v10, v10, v12\n\t" - "th.vredsum.vs v0, v10, v0\n\t" - "th.vmv.x.s %[tmp], v0\n\t" - "add %[isum], %[isum], %[tmp]" - : [tmp] "=&r" (tmp), [isum] "+&r" (isum) - : [q2] "r" (q2), [scale] "r" (patmp), [q8] "r" (q8) - , [vl16] "r" (16), [vl32] "r" (32), [vl64] "r" (64), [vl128] "r" (128) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - q2 += 32; q8 += 128; patmp += 8; - } - - sumf += dall * isum; - } - - *s = sumf; - -#elif defined __riscv_v - - float sumf = 0; - uint8_t atmp[16]; - - const int vector_length = __riscv_vlenb() * 8; - uint8_t temp_01[32] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; - - switch (vector_length) { - case 256: - for (int i = 0; i < nb; ++i) { - const uint8_t * q2 = x[i].qs; - const int8_t * q8 = y[i].qs; - const uint8_t * sc = x[i].scales; - - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - size_t vl = 16; - - vuint8m1_t scales = __riscv_vle8_v_u8m1(sc, vl); - vuint8m1_t aux = __riscv_vand_vx_u8m1(scales, 0x0F, vl); - - vint16m1_t q8sums = __riscv_vle16_v_i16m1(y[i].bsums, vl); - - vuint8mf2_t scales_2 = __riscv_vle8_v_u8mf2(sc, vl); - vuint8mf2_t mins8 = __riscv_vsrl_vx_u8mf2(scales_2, 0x4, vl); - vint16m1_t mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl)); - vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, mins, vl); - vint32m1_t vsums = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl); - - sumf += dmin * __riscv_vmv_x_s_i32m1_i32(vsums); - - vl = 32; - - vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); - vuint8m1_t v_b = __riscv_vle8_v_u8m1(temp_01, vl); - - uint8_t is = 0; - int isum = 0; - - for (int j = 0; j < QK_K / 128; ++j) { - // load Q2 - vuint8m1_t q2_x = __riscv_vle8_v_u8m1(q2, vl); - - vuint8m1_t q2_0 = __riscv_vand_vx_u8m1(q2_x, 0x03, vl); - vuint8m1_t q2_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x2, vl), 0x03, vl); - vuint8m1_t q2_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x4, vl), 0x03, vl); - vuint8m1_t q2_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x6, vl), 0x03, vl); - - // duplicate scale elements for product - vuint8m1_t sc0 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 0 + is, vl), vl); - vuint8m1_t sc1 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 2 + is, vl), vl); - vuint8m1_t sc2 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 4 + is, vl), vl); - vuint8m1_t sc3 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 6 + is, vl), vl); - - vint16m2_t p0 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_0, sc0, vl)); - vint16m2_t p1 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_1, sc1, vl)); - vint16m2_t p2 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_2, sc2, vl)); - vint16m2_t p3 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_3, sc3, vl)); - - // load Q8 - vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl); - vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8 + 32, vl); - vint8m1_t q8_2 = __riscv_vle8_v_i8m1(q8 + 64, vl); - vint8m1_t q8_3 = __riscv_vle8_v_i8m1(q8 + 96, vl); - - vint32m4_t s0 = __riscv_vwmul_vv_i32m4(p0, __riscv_vwcvt_x_x_v_i16m2(q8_0, vl), vl); - vint32m4_t s1 = __riscv_vwmul_vv_i32m4(p1, __riscv_vwcvt_x_x_v_i16m2(q8_1, vl), vl); - vint32m4_t s2 = __riscv_vwmul_vv_i32m4(p2, __riscv_vwcvt_x_x_v_i16m2(q8_2, vl), vl); - vint32m4_t s3 = __riscv_vwmul_vv_i32m4(p3, __riscv_vwcvt_x_x_v_i16m2(q8_3, vl), vl); - - vint32m1_t isum0 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s0, s1, vl), vzero, vl); - vint32m1_t isum1 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s2, s3, vl), isum0, vl); - - isum += __riscv_vmv_x_s_i32m1_i32(isum1); - - q2 += 32; - q8 += 128; - is = 8; - } - - sumf += dall * isum; - } - break; - case 128: - for (int i = 0; i < nb; ++i) { - const uint8_t * q2 = x[i].qs; - const int8_t * q8 = y[i].qs; - const uint8_t * sc = x[i].scales; - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - uint8_t *patmp = atmp; - int vsums; - int tmp; - __asm__ __volatile__( - "vsetivli zero, 16, e8, m1\n\t" - "vmv.v.x v8, zero\n\t" - "vle8.v v1, (%[sc])\n\t" - "vand.vi v0, v1, 0xF\n\t" - "vsrl.vi v1, v1, 4\n\t" - "vse8.v v0, (%[scale])\n\t" - "vsetivli zero, 16, e16, m2\n\t" - "vle16.v v2, (%[bsums])\n\t" - "vzext.vf2 v0, v1\n\t" - "vwmul.vv v4, v0, v2\n\t" - "vsetivli zero, 16, e32, m4\n\t" - "vredsum.vs v8, v4, v8\n\t" - "vmv.x.s %[vsums], v8" - : [tmp] "=&r" (tmp), [vsums] "=&r" (vsums) - : [sc] "r" (sc), [scale] "r" (atmp), [bsums] "r" (y[i].bsums) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - sumf += dmin * vsums; - int isum = 0; - - for (int j = 0; j < QK_K/128; ++j) { - __asm__ __volatile__( - "vsetvli zero, %[vl32], e8, m2\n\t" - "vle8.v v0, (%[q2])\n\t" - "vsrl.vi v2, v0, 2\n\t" - "vsrl.vi v4, v0, 4\n\t" - "vsrl.vi v6, v0, 6\n\t" - "vand.vi v0, v0, 0x3\n\t" - "vand.vi v2, v2, 0x3\n\t" - "vand.vi v4, v4, 0x3\n\t" - "vsetvli zero, %[vl128], e8, m8\n\t" - "vle8.v v8, (%[q8])\n\t" - "vsetvli zero, %[vl64], e8, m4\n\t" - "vwmul.vv v16, v0, v8\n\t" - "vwmul.vv v24, v4, v12\n\t" - "vsetivli zero, 16, e16, m2\n\t" - "vmv.v.x v0, zero\n\t" - "vwredsum.vs v10, v16, v0\n\t" - "vwredsum.vs v9, v18, v0\n\t" - "vwredsum.vs v8, v20, v0\n\t" - "vwredsum.vs v7, v22, v0\n\t" - "vwredsum.vs v11, v24, v0\n\t" - "vwredsum.vs v12, v26, v0\n\t" - "vwredsum.vs v13, v28, v0\n\t" - "vwredsum.vs v14, v30, v0\n\t" - "vsetivli zero, 4, e32, m1\n\t" - "vslideup.vi v10, v9, 1\n\t" - "vslideup.vi v8, v7, 1\n\t" - "vslideup.vi v11, v12, 1\n\t" - "vslideup.vi v13, v14, 1\n\t" - "vslideup.vi v10, v8, 2\n\t" - "vslideup.vi v11, v13, 2\n\t" - "vsetivli zero, 8, e32, m2\n\t" - "vle8.v v15, (%[scale])\n\t" - "vzext.vf4 v12, v15\n\t" - "vmul.vv v10, v10, v12\n\t" - "vredsum.vs v0, v10, v0\n\t" - "vmv.x.s %[tmp], v0\n\t" - "add %[isum], %[isum], %[tmp]" - : [tmp] "=&r" (tmp), [isum] "+&r" (isum) - : [q2] "r" (q2), [scale] "r" (patmp), [q8] "r" (q8) - , [vl32] "r" (32), [vl64] "r" (64), [vl128] "r" (128) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - q2 += 32; q8 += 128; patmp += 8; - } - - sumf += dall * isum; - } - break; - default: - assert(false && "Unsupported vector length"); - break; - } - - *s = sumf; - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0x3); - const vector signed char lowScaleMask = vec_splats((signed char)0xF); - const vector int v0 = vec_splats((int32_t)0); - const vector unsigned char v2 = vec_splats((unsigned char)0x2); - const vector unsigned char v6 = vec_splats((unsigned char)0x6); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); - vector float vdmin = vec_mul(vxmin, vyd); - - vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); - vector signed short q8ysums1 = vec_xl(16, y[i].bsums); - - vector signed char q2xmins = (vector signed char)vec_xl( 0, x[i].scales); - vector signed char vscales = vec_and(q2xmins, lowScaleMask); - - q2xmins = vec_sr(q2xmins, v4); - vector signed short q2xmins0 = vec_unpackh(q2xmins); - vector signed short q2xmins1 = vec_unpackl(q2xmins); - - vector signed int prod0 = vec_mule(q2xmins0, q8ysums0); - vector signed int prod1 = vec_mulo(q2xmins0, q8ysums0); - vector signed int prod2 = vec_mule(q2xmins1, q8ysums1); - vector signed int prod3 = vec_mulo(q2xmins1, q8ysums1); - - vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); - vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); - vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); - vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - vector signed int vsumi4 = v0; - vector signed int vsumi5 = v0; - vector signed int vsumi6 = v0; - vector signed int vsumi7 = v0; - - const uint8_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - for (int j = 0; j < QK_K/128; ++j) { - __builtin_prefetch(q2, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector signed char qxs0 = (vector signed char)vec_xl( 0, q2); - vector signed char qxs1 = (vector signed char)vec_xl(16, q2); - q2 += 32; - - vector unsigned char q2x00 = (vector unsigned char)vec_and(qxs0, lowMask); - vector unsigned char q2x01 = (vector unsigned char)vec_and(vec_sr(qxs0, v2), lowMask); - vector unsigned char q2x02 = (vector unsigned char)vec_and(vec_sr(qxs0, v4), lowMask); - vector unsigned char q2x03 = (vector unsigned char)vec_and(vec_sr(qxs0, v6), lowMask); - vector unsigned char q2x10 = (vector unsigned char)vec_and(qxs1, lowMask); - vector unsigned char q2x11 = (vector unsigned char)vec_and(vec_sr(qxs1, v2), lowMask); - vector unsigned char q2x12 = (vector unsigned char)vec_and(vec_sr(qxs1, v4), lowMask); - vector unsigned char q2x13 = (vector unsigned char)vec_and(vec_sr(qxs1, v6), lowMask); - - vector signed char q8y00 = vec_xl( 0, q8); - vector signed char q8y10 = vec_xl( 16, q8); - vector signed char q8y01 = vec_xl( 32, q8); - vector signed char q8y11 = vec_xl( 48, q8); - vector signed char q8y02 = vec_xl( 64, q8); - vector signed char q8y12 = vec_xl( 80, q8); - vector signed char q8y03 = vec_xl( 96, q8); - vector signed char q8y13 = vec_xl(112, q8); - q8 += 128; - - vector signed int qv0 = vec_msum(q8y00, q2x00, v0); - vector signed int qv1 = vec_msum(q8y01, q2x01, v0); - vector signed int qv2 = vec_msum(q8y02, q2x02, v0); - vector signed int qv3 = vec_msum(q8y03, q2x03, v0); - vector signed int qv4 = vec_msum(q8y10, q2x10, v0); - vector signed int qv5 = vec_msum(q8y11, q2x11, v0); - vector signed int qv6 = vec_msum(q8y12, q2x12, v0); - vector signed int qv7 = vec_msum(q8y13, q2x13, v0); - - vector signed short vscales_07 = vec_unpackh(vscales); - vector signed int vscales_03 = vec_unpackh(vscales_07); - vector signed int vscales_47 = vec_unpackl(vscales_07); - vector signed int vs0 = vec_splat(vscales_03, 0); - vector signed int vs1 = vec_splat(vscales_03, 1); - vector signed int vs2 = vec_splat(vscales_03, 2); - vector signed int vs3 = vec_splat(vscales_03, 3); - vector signed int vs4 = vec_splat(vscales_47, 0); - vector signed int vs5 = vec_splat(vscales_47, 1); - vector signed int vs6 = vec_splat(vscales_47, 2); - vector signed int vs7 = vec_splat(vscales_47, 3); - vscales = vec_sld(vscales, vscales, 8); - - vsumi0 = vec_add(vec_mul(qv0, vs0), vsumi0); - vsumi1 = vec_add(vec_mul(qv1, vs2), vsumi1); - vsumi2 = vec_add(vec_mul(qv2, vs4), vsumi2); - vsumi3 = vec_add(vec_mul(qv3, vs6), vsumi3); - vsumi4 = vec_add(vec_mul(qv4, vs1), vsumi4); - vsumi5 = vec_add(vec_mul(qv5, vs3), vsumi5); - vsumi6 = vec_add(vec_mul(qv6, vs5), vsumi6); - vsumi7 = vec_add(vec_mul(qv7, vs7), vsumi7); - } - - vsumi0 = vec_add(vsumi0, vsumi4); - vsumi1 = vec_add(vsumi1, vsumi5); - vsumi2 = vec_add(vsumi2, vsumi6); - vsumi3 = vec_add(vsumi3, vsumi7); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = vec_extract(vsumf0, 0); - -#elif defined __loongarch_asx - - __m256 acc = (__m256)__lasx_xvldi(0); - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const uint8_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const __m128i mins_and_scales128 = __lsx_vld((const __m128i*)x[i].scales, 0); - const __m128i scales128 = __lsx_vandi_b(mins_and_scales128, 0xf); - const __m256i mins = lasx_ext8_16(__lsx_vsrli_b(mins_and_scales128, 4)); - const __m256i prod = lasx_madd_h(mins, __lasx_xvld((const __m256i*)y[i].bsums, 0)); - - acc = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(dmin), __lasx_xvffint_s_w(prod), acc); - - const v16i8 shuffle_mask = {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; - const __m256i scales_shuffled = lasx_ext8_16(__lsx_vshuf_b(scales128, scales128, (__m128i)shuffle_mask)); - - __m256i sumi = __lasx_xvldi(0); - - for (int j = 0; j < QK_K/128; ++j) { - - const __m256i q2bits = __lasx_xvld((const __m256i*)q2, 0); q2 += 32; - - const __m256i q8_0 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_3 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - - const __m256i q2_0 = __lasx_xvandi_b(q2bits, 3); - const __m256i q2_1 = __lasx_xvandi_b(__lasx_xvsrli_b(q2bits, 2), 3); - const __m256i q2_2 = __lasx_xvandi_b(__lasx_xvsrli_b(q2bits, 4), 3); - const __m256i q2_3 = __lasx_xvsrli_b(q2bits, 6); - - __m256i p0 = lasx_madd_h_b(q2_0, q8_0); - __m256i p1 = lasx_madd_h_b(q2_1, q8_1); - __m256i p2 = lasx_madd_h_b(q2_2, q8_2); - __m256i p3 = lasx_madd_h_b(q2_3, q8_3); - - p0 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 0), p0); - p1 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 1), p1); - p2 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 2), p2); - p3 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 3), p3); - - p0 = __lasx_xvadd_w(p0, p1); - p2 = __lasx_xvadd_w(p2, p3); - - sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p0, p2)); - } - - acc = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), acc); - - } - - *s = hsum_float_8(acc); - -#else - - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - - const uint8_t * q2 = x[i].qs; - const int8_t * q8 = y[i].qs; - const uint8_t * sc = x[i].scales; - - int summs = 0; - for (int j = 0; j < 16; ++j) { - summs += y[i].bsums[j] * (sc[j] >> 4); - } - - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - int isum = 0; - int is = 0; - int d; - for (int k = 0; k < QK_K/128; ++k) { - int shift = 0; - for (int j = 0; j < 4; ++j) { - d = sc[is++] & 0xF; - int isuml = 0; - for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); - isum += d * isuml; - d = sc[is++] & 0xF; - isuml = 0; - for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); - isum += d * isuml; - shift += 2; - q8 += 32; - } - q2 += 32; - } - sumf += dall * isum - dmin * summs; - } - *s = sumf; -#endif -} - -void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const uint32_t kmask1 = 0x03030303; - const uint32_t kmask2 = 0x0f0f0f0f; - - const block_q3_K * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined(__ARM_FEATURE_SVE) - - uint32_t aux[3]; - uint32_t utmp[4]; - - const int8_t m32 = 32; - const int vector_length = svcntb()*8; - const svuint8_t m3b_sv = svdup_n_u8(0x3); - const svint32_t vzero_sv = svdup_n_s32(0); - - const svuint8_t m0_sv = svdup_n_u8(1); - const svuint8_t m1_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 1); - const svuint8_t m2_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 2); - const svuint8_t m3_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 3); - - float sum = 0; - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT q3_sv = x[i].qs; - const uint8_t * GGML_RESTRICT qh_sv = x[i].hmask; - const int8_t * GGML_RESTRICT q8_sv = y[i].qs; - - // Set up scales - memcpy(aux, x[i].scales, 12); - utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); - utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); - utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); - utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); - - int8_t * scale = (int8_t *)utmp; - - for (int j = 0; j < 16; ++j) scale[j] -= m32; - - switch (vector_length) { - case 128: - { - svuint8_t qhbits_sv_1 = svld1_u8(svptrue_b8(), qh_sv); - svuint8_t qhbits_sv_2 = svld1_u8(svptrue_b8(), qh_sv+16); - svuint8_t q3h_sv; - - svint32_t sumi1_1 = svdup_n_s32(0); - svint8_t q3bytes_sv; - - for (int j = 0; j < QK_K/128; ++j) { - - const svuint8_t q3bits_sv = svld1_u8(svptrue_b8(), q3_sv); q3_sv += 16; - const svuint8_t q3bits_sv_1 = svld1_u8(svptrue_b8(), q3_sv); q3_sv += 16; - svint8_t q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - svint8_t q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m0_sv, qhbits_sv_1), 2); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), q3bits_sv, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[0])); - - q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m0_sv, qhbits_sv_2), 2); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), q3bits_sv_1, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[1])); - - q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m1_sv, qhbits_sv_1), 1); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[2])); - - q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m1_sv, qhbits_sv_2), 1); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[3])); - - - scale += 4; - q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - q3h_sv = svbic_u8_x(svptrue_b8(), m2_sv, qhbits_sv_1); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[0])); - - q3h_sv = svbic_u8_x(svptrue_b8(), m2_sv, qhbits_sv_2); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[1])); - - - q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - q3h_sv = svlsr_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m3_sv, qhbits_sv_1), 1); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[2])); - - q3h_sv = svlsr_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m3_sv, qhbits_sv_2), 1); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[3])); - - if (j == 0) { - qhbits_sv_1 = svlsr_n_u8_x(svptrue_b8(), qhbits_sv_1, 4); - qhbits_sv_2 = svlsr_n_u8_x(svptrue_b8(), qhbits_sv_2, 4); - } - - scale += 4; - } - - sum += d * (svaddv_s32(svptrue_b32(), sumi1_1)); - } break; - case 256: - case 512: - { - svuint8_t qhbits_sv = svld1_u8(svptrue_pat_b8(SV_VL32), qh_sv); - svuint8_t q3h_sv; - - svint32_t sumi1_1 = svdup_n_s32(0); - svint8_t q3bytes_sv; - - for (int j = 0; j < QK_K/128; ++j) { - - const svuint8_t q3bits_sv = svld1_u8(svptrue_pat_b8(SV_VL32), q3_sv); q3_sv += 32; - svint8_t q8bytes_1_sv_1 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - svint8_t q8bytes_1_sv_2 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - q3h_sv = svlsl_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m0_sv, qhbits_sv), 2); - q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q3bits_sv, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - - svint32_t scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[0]), svdup_n_s32((int32_t)scale[1])); - sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), scale_1); - - q3h_sv = svlsl_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m1_sv, qhbits_sv), 1); - q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[2]), svdup_n_s32((int32_t)scale[3])); - sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), scale_1); - - scale += 4; - q8bytes_1_sv_1 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - q8bytes_1_sv_2 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - q3h_sv = svbic_u8_x(svptrue_pat_b8(SV_VL32), m2_sv, qhbits_sv); - q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[0]), svdup_n_s32((int32_t)scale[1])); - sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), scale_1); - - q3h_sv = svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m3_sv, qhbits_sv), 1); - q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[2]), svdup_n_s32((int32_t)scale[3])); - sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), scale_1); - - if (j == 0) { - qhbits_sv = svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), qhbits_sv, 4); - } - - scale += 4; - } - - sum += d * (svaddv_s32(svptrue_pat_b32(SV_VL8), sumi1_1)); - } break; - default: - assert(false && "Unsupported vector length"); - break; - } - } - *s = sum; - -#elif __ARM_NEON - - uint32_t aux[3]; - uint32_t utmp[4]; - - const uint8x16_t m3b = vdupq_n_u8(0x3); - const int32x4_t vzero = vdupq_n_s32(0); - - const uint8x16_t m0 = vdupq_n_u8(1); - const uint8x16_t m1 = vshlq_n_u8(m0, 1); - const uint8x16_t m2 = vshlq_n_u8(m0, 2); - const uint8x16_t m3 = vshlq_n_u8(m0, 3); - const int8_t m32 = 32; - - ggml_int8x16x4_t q3bytes; - - float sum = 0; - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].hmask; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); - - ggml_uint8x16x4_t q3h; - - int32_t isum = 0; - - // Set up scales - memcpy(aux, x[i].scales, 12); - utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); - utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); - utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); - utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); - - int8_t * scale = (int8_t *)utmp; - for (int j = 0; j < 16; ++j) scale[j] -= m32; - - for (int j = 0; j < QK_K/128; ++j) { - - const ggml_uint8x16x2_t q3bits = ggml_vld1q_u8_x2(q3); q3 += 32; - const ggml_int8x16x4_t q8bytes_1 = ggml_vld1q_s8_x4(q8); q8 += 64; - const ggml_int8x16x4_t q8bytes_2 = ggml_vld1q_s8_x4(q8); q8 += 64; - - q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2); - q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2); - q3h.val[2] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[0]), 1); - q3h.val[3] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[1]), 1); - - q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[0], m3b)), vreinterpretq_s8_u8(q3h.val[0])); - q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[1], m3b)), vreinterpretq_s8_u8(q3h.val[1])); - q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2])); - q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3])); - - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0]; - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1]; - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2]; - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3]; - - scale += 4; - - q3h.val[0] = vbicq_u8(m2, qhbits.val[0]); - q3h.val[1] = vbicq_u8(m2, qhbits.val[1]); - q3h.val[2] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[0]), 1); - q3h.val[3] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[1]), 1); - - q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 4), m3b)), vreinterpretq_s8_u8(q3h.val[0])); - q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 4), m3b)), vreinterpretq_s8_u8(q3h.val[1])); - q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2])); - q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3])); - - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0]; - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1]; - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2]; - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3]; - - scale += 4; - - if (j == 0) { - qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 4); - qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 4); - } - - } - sum += d * isum; - - } - - *s = sum; - -#elif defined __AVX2__ - - const __m256i m3 = _mm256_set1_epi8(3); - const __m256i mone = _mm256_set1_epi8(1); - const __m128i m32 = _mm_set1_epi8(32); - - __m256 acc = _mm256_setzero_ps(); - - uint32_t aux[3]; - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - // Set up scales - memcpy(aux, x[i].scales, 12); - __m128i scales128 = _mm_set_epi32( - ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), - ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), - (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), - (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); - scales128 = _mm_sub_epi8(scales128, m32); - const __m256i all_scales = _mm256_cvtepi8_epi16(scales128); - const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); - const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); - const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)}; - - // high bit - const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].hmask); - - // integer accumulator - __m256i sumi = _mm256_setzero_si256(); - - int bit = 0; - int is = 0; - - for (int j = 0; j < QK_K/128; ++j) { - // load low 2 bits - const __m256i q3bits = _mm256_loadu_si256((const __m256i*)q3); q3 += 32; - - // prepare low and high bits - const __m256i q3l_0 = _mm256_and_si256(q3bits, m3); - const __m256i q3h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); - ++bit; - - const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3); - const __m256i q3h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); - ++bit; - - const __m256i q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3); - const __m256i q3h_2 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); - ++bit; - - const __m256i q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3); - const __m256i q3h_3 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); - ++bit; - - // load Q8 quants - const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - - // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16, - // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, - // and 2 if the high bit was set) - __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0); - __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1); - __m256i q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2); - __m256i q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3); - - __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0); - __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1); - __m256i p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2); - __m256i p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3); - - p16_0 = _mm256_sub_epi16(p16_0, q8s_0); - p16_1 = _mm256_sub_epi16(p16_1, q8s_1); - p16_2 = _mm256_sub_epi16(p16_2, q8s_2); - p16_3 = _mm256_sub_epi16(p16_3, q8s_3); - - // multiply with scales - p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 0)), p16_0); - p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 1)), p16_1); - p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 2)), p16_2); - p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 3)), p16_3); - - // accumulate - p16_0 = _mm256_add_epi32(p16_0, p16_1); - p16_2 = _mm256_add_epi32(p16_2, p16_3); - sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2)); - - } - - // multiply with block scale and accumulate - acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); - - } - - *s = hsum_float_8(acc); - -#elif defined __AVX__ - - const __m128i m3 = _mm_set1_epi8(3); - const __m128i mone = _mm_set1_epi8(1); - const __m128i m32 = _mm_set1_epi8(32); - const __m128i m2 = _mm_set1_epi8(2); - - __m256 acc = _mm256_setzero_ps(); - - const uint32_t *aux; - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - // Set up scales - aux = (const uint32_t *)x[i].scales; - __m128i scales128 = _mm_set_epi32( - ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), - ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), - (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), - (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); - scales128 = _mm_sub_epi8(scales128, m32); - const __m128i scales_0 = _mm_cvtepi8_epi16(scales128); - const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales128, scales128)); - const __m128i scales[2] = { scales_0, scales_1 }; - - // high bit *128*2 from block_q3_K.hmask[QK_K/8] - const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].hmask[0]); - const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].hmask[16]); - - // integer accumulator - __m128i sumi_0 = _mm_setzero_si128(); - __m128i sumi_1 = _mm_setzero_si128(); - - for (int j = 0; j < QK_K/128; ++j) { - // load low 2 bits *64*2 from block_q3_K.qs[QK_K/4] - const __m128i q3bits_0 = _mm_loadu_si128((const __m128i*)q3); q3 += 16; - const __m128i q3bits_1 = _mm_loadu_si128((const __m128i*)q3); q3 += 16; - - // prepare low and high bits - const int bit = j << 2; - - const __m128i q3l_0 = _mm_and_si128(q3bits_0, m3); - const __m128i q3l_1 = _mm_and_si128(q3bits_1, m3); - const __m128i q3h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit)), bit), 2); - const __m128i q3h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit)), bit), 2); - - const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 2), m3); - const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 2), m3); - const __m128i q3h_2 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+1)), bit+1), 2); - const __m128i q3h_3 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+1)), bit+1), 2); - - const __m128i q3l_4 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 4), m3); - const __m128i q3l_5 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 4), m3); - const __m128i q3h_4 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+2)), bit+2), 2); - const __m128i q3h_5 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+2)), bit+2), 2); - - const __m128i q3l_6 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 6), m3); - const __m128i q3l_7 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 6), m3); - const __m128i q3h_6 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+3)), bit+3), 2); - const __m128i q3h_7 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+3)), bit+3), 2); - - // load Q8 quants from block_q8_K.qs[QK_K] - const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - - // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16, - // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, - // and 2 if the high bit was set) - __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, q8_0); - __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, q8_1); - __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, q8_2); - __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, q8_3); - __m128i q8s_4 = _mm_maddubs_epi16(q3h_4, q8_4); - __m128i q8s_5 = _mm_maddubs_epi16(q3h_5, q8_5); - __m128i q8s_6 = _mm_maddubs_epi16(q3h_6, q8_6); - __m128i q8s_7 = _mm_maddubs_epi16(q3h_7, q8_7); - - __m128i p16_0 = _mm_maddubs_epi16(q3l_0, q8_0); - __m128i p16_1 = _mm_maddubs_epi16(q3l_1, q8_1); - __m128i p16_2 = _mm_maddubs_epi16(q3l_2, q8_2); - __m128i p16_3 = _mm_maddubs_epi16(q3l_3, q8_3); - __m128i p16_4 = _mm_maddubs_epi16(q3l_4, q8_4); - __m128i p16_5 = _mm_maddubs_epi16(q3l_5, q8_5); - __m128i p16_6 = _mm_maddubs_epi16(q3l_6, q8_6); - __m128i p16_7 = _mm_maddubs_epi16(q3l_7, q8_7); - - p16_0 = _mm_sub_epi16(p16_0, q8s_0); - p16_1 = _mm_sub_epi16(p16_1, q8s_1); - p16_2 = _mm_sub_epi16(p16_2, q8s_2); - p16_3 = _mm_sub_epi16(p16_3, q8s_3); - p16_4 = _mm_sub_epi16(p16_4, q8s_4); - p16_5 = _mm_sub_epi16(p16_5, q8s_5); - p16_6 = _mm_sub_epi16(p16_6, q8s_6); - p16_7 = _mm_sub_epi16(p16_7, q8s_7); - - // multiply with scales - __m128i shuffle = _mm_set1_epi16(0x0100); - p16_0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_0); - shuffle = _mm_add_epi16(shuffle, m2); - p16_1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_1); - shuffle = _mm_add_epi16(shuffle, m2); - p16_2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_2); - shuffle = _mm_add_epi16(shuffle, m2); - p16_3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_3); - shuffle = _mm_add_epi16(shuffle, m2); - p16_4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_4); - shuffle = _mm_add_epi16(shuffle, m2); - p16_5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_5); - shuffle = _mm_add_epi16(shuffle, m2); - p16_6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_6); - shuffle = _mm_add_epi16(shuffle, m2); - p16_7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_7); - - // accumulate - p16_0 = _mm_add_epi32(p16_0, p16_1); - p16_2 = _mm_add_epi32(p16_2, p16_3); - p16_4 = _mm_add_epi32(p16_4, p16_5); - p16_6 = _mm_add_epi32(p16_6, p16_7); - sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); - sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_4, p16_6)); - - } - - // multiply with block scale and accumulate - __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); - acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc); - - } - - *s = hsum_float_8(acc); - -#elif defined __wasm_simd128__ - int8_t aux8[QK_K]; - float sums[8] = {0}; - uint32_t auxs[4]; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT hm = x[i].hmask; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - // Process blocks with SIMD - int8_t * a = aux8; - uint8_t m = 1; - for (int j = 0; j < QK_K; j += 128) { - for (int shift = 0; shift <= 6; shift += 2) { - v128_t v_m = wasm_i8x16_splat(m); - for (int l = 0; l < 32; l += 16) { - v128_t v_q3 = wasm_v128_load(q3 + l); - v128_t v_shift = wasm_i8x16_shr(v_q3, shift); - v128_t v_low2 = wasm_v128_and(v_shift, wasm_i8x16_splat(0x03)); - - v128_t v_hm = wasm_v128_load(hm + l); - v128_t v_mask = wasm_v128_and(v_hm, v_m); - v_mask = wasm_i8x16_ne(v_mask, wasm_i8x16_splat(0)); - - v_low2 = wasm_i8x16_sub(v_low2, wasm_v128_and(wasm_i8x16_splat(4), wasm_v128_not(v_mask))); - wasm_v128_store(a + l, v_low2); - } - a += 32; - m <<= 1; - } - q3 += 32; - } - - // Extract scales - memcpy(auxs, x[i].scales, 12); - uint32_t tmp = auxs[2]; - auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); - auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); - auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); - auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); - const int8_t * scales = (const int8_t *)auxs; - - // SIMD dot product with register accumulators - v128_t v_acc0 = wasm_i32x4_splat(0); - v128_t v_acc1 = wasm_i32x4_splat(0); - a = aux8; - for (int j = 0; j < QK_K/16; ++j) { - const v128_t v_scale = wasm_i16x8_splat(scales[j] - 32); - - // Process 16 elements per iteration - for (int k = 0; k < 2; ++k) { - const v128_t v_q8 = wasm_i16x8_load8x8(q8); - const v128_t v_a = wasm_i16x8_load8x8(a); - - v128_t v_prod = wasm_i16x8_mul(v_q8, v_a); - v_prod = wasm_i16x8_mul(v_prod, v_scale); - - v_acc0 = wasm_i32x4_add(v_acc0, wasm_i32x4_extend_low_i16x8(v_prod)); - v_acc1 = wasm_i32x4_add(v_acc1, wasm_i32x4_extend_high_i16x8(v_prod)); - - q8 += 8; - a += 8; - } - } - - // Accumulate results - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const v128_t v_d = wasm_f32x4_splat(d); - v128_t v_sum = wasm_f32x4_add( - wasm_f32x4_mul(wasm_f32x4_convert_i32x4(v_acc0), v_d), - wasm_f32x4_mul(wasm_f32x4_convert_i32x4(v_acc1), v_d) - ); - - // Accumulate into sums vector - wasm_v128_store(sums, wasm_f32x4_add(wasm_v128_load(sums), v_sum)); - } - - // Horizontal sum - v128_t v_sum = wasm_f32x4_add(wasm_v128_load(sums), wasm_v128_load(sums + 4)); - sumf = wasm_f32x4_extract_lane(v_sum, 0) + - wasm_f32x4_extract_lane(v_sum, 1) + - wasm_f32x4_extract_lane(v_sum, 2) + - wasm_f32x4_extract_lane(v_sum, 3); - - *s = sumf; - -#elif defined __riscv_xtheadvector - - uint32_t utmp[4]; - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - const uint8_t * restrict q3 = x[i].qs; - const uint8_t * restrict qh = x[i].hmask; - const int8_t * restrict q8 = y[i].qs; - - int8_t * scale = (int8_t *)utmp; - int tmp; - __asm__ __volatile__( - "li %[tmp], 12\n\t" - "th.vsetvli zero, %[tmp], e8, m1\n\t" - "th.vlb.v v0, (%[s6b])\n\t" - "th.vmv.v.v v2, v0\n\t" - "li %[tmp], 2\n\t" - "th.vsetvli zero, %[tmp], e64, m1\n\t" - "th.vmv.v.x v9, %[sh]\n\t"\ - "th.vslidedown.vi v1, v0, 1\n\t" - "th.vslide1up.vx v8, v9, zero\n\t" // {0, 0, 4, 4} - "th.vslideup.vi v0, v2, 1\n\t" // {aux[0], aux[1], aux[0], aux[1]} - "li %[tmp], 4\n\t" - "th.vsetvli zero, %[tmp], e32, m1\n\t" - "th.vid.v v9\n\t" - "th.vmv.x.s %[tmp], v1\n\t" - "th.vsll.vi v9, v9, 1\n\t" // {0, 2, 4, 6} - "th.vmv.v.x v1, %[tmp]\n\t" // {aux[2], aux[2], aux[2], aux[2]} - "th.vsrl.vv v4, v1, v9\n\t" - "th.vsrl.vv v2, v0, v8\n\t" - "th.vand.vx v5, v4, %[kmask1]\n\t" - "th.vand.vx v3, v2, %[kmask2]\n\t" - "th.vsll.vi v6, v5, 4\n\t" - "th.vor.vv v7, v6, v3\n\t" - "li %[tmp], 16\n\t" - "th.vsetvli zero, %[tmp], e8, m1\n\t" - "th.vsub.vx v0, v7, %[c]\n\t" - "th.vsb.v v0, (%[scale])" - : [tmp] "=&r" (tmp) - : [sh] "r" (0x0000000400000004), [s6b] "r" (x[i].scales), [c] "r" (32) - , [scale] "r" (scale), [kmask1] "r" (kmask1), [kmask2] "r" (kmask2) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - - uint8_t m = 1; - int isum = 0; - for (int j = 0; j < QK_K; j += 128) { - __asm__ __volatile__( - // fixme: use v0p7 mask layout directly - "th.vsetvli zero, %[vl32], e8, m2\n\t" - "th.vlb.v v8, (%[q3])\n\t" - "th.vsrl.vi v10, v8, 2\n\t" - "th.vsrl.vi v12, v8, 4\n\t" - "th.vsrl.vi v14, v8, 6\n\t" - "th.vand.vi v8, v8, 3\n\t" - "th.vand.vi v10, v10, 3\n\t" - "th.vand.vi v12, v12, 3\n\t" - "th.vlb.v v2, (%[qh])\n\t" - "th.vand.vx v4, v2, %[m]\n\t" - "slli %[m], %[m], 1\n\t" - "th.vmseq.vx v0, v4, zero\n\t" - "th.vadd.vi v8, v8, -4, v0.t\n\t" - "th.vand.vx v4, v2, %[m]\n\t" - "slli %[m], %[m], 1\n\t" - "th.vmseq.vx v0, v4, zero\n\t" - "th.vadd.vi v10, v10, -4, v0.t\n\t" - "th.vand.vx v4, v2, %[m]\n\t" - "slli %[m], %[m], 1\n\t" - "th.vmseq.vx v0, v4, zero\n\t" - "th.vadd.vi v12, v12, -4, v0.t\n\t" - "th.vand.vx v4, v2, %[m]\n\t" - "slli %[m], %[m], 1\n\t" - "th.vmseq.vx v0, v4, zero\n\t" - "th.vadd.vi v14, v14, -4, v0.t\n\t" - "th.vsetvli zero, %[vl128], e8, m8\n\t" - "th.vlb.v v0, (%[q8])\n\t" - "th.vsetvli zero, %[vl64], e8, m4\n\t" - "th.vwmul.vv v16, v0, v8\n\t" - "th.vwmul.vv v24, v4, v12\n\t" - "li %[tmp], 16\n\t" - "th.vsetvli zero, %[tmp], e16, m2\n\t" - "th.vmv.v.x v0, zero\n\t" - "th.vwredsum.vs v10, v16, v0\n\t" - "th.vwredsum.vs v9, v18, v0\n\t" - "th.vwredsum.vs v8, v20, v0\n\t" - "th.vwredsum.vs v7, v22, v0\n\t" - "th.vwredsum.vs v11, v24, v0\n\t" - "th.vwredsum.vs v12, v26, v0\n\t" - "th.vwredsum.vs v13, v28, v0\n\t" - "th.vwredsum.vs v14, v30, v0\n\t" - "li %[tmp], 4\n\t" - "th.vsetvli zero, %[tmp], e32, m1\n\t" - "th.vslideup.vi v10, v9, 1\n\t" - "th.vslideup.vi v8, v7, 1\n\t" - "th.vslideup.vi v11, v12, 1\n\t" - "th.vslideup.vi v13, v14, 1\n\t" - "th.vslideup.vi v10, v8, 2\n\t" - "th.vslideup.vi v11, v13, 2\n\t" - "li %[tmp], 8\n\t" - "th.vsetvli zero, %[tmp], e32, m2\n\t" - "th.vlb.v v12, (%[scale])\n\t" - "th.vmul.vv v10, v10, v12\n\t" - "th.vredsum.vs v0, v10, v0\n\t" - "th.vmv.x.s %[tmp], v0\n\t" - "add %[isum], %[isum], %[tmp]" - : [tmp] "=&r" (tmp), [m] "+&r" (m), [isum] "+&r" (isum) - : [vl128] "r" (128), [vl64] "r" (64), [vl32] "r" (32) - , [q3] "r" (q3), [qh] "r" (qh), [scale] "r" (scale), [q8] "r" (q8) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - q3 += 32; q8 += 128; scale += 8; - } - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - sumf += d * isum; - } - - *s = sumf; - -#elif defined __riscv_v - - uint32_t utmp[4]; - float sumf = 0; - uint32_t aux[3]; - const int vector_length = __riscv_vlenb() * 8; - - switch (vector_length) { - case 256: - for (int i = 0; i < nb; ++i) { - - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].hmask; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - memcpy(aux, x[i].scales, 12); - utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); - utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); - utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); - utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); - - int8_t * scale = (int8_t *)utmp; - for (int j = 0; j < 16; ++j) scale[j] -= 32; - - - size_t vl = 32; - uint8_t m = 1; - - vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); - vuint8m1_t vqh = __riscv_vle8_v_u8m1(qh, vl); - - int sum_t = 0; - - for (int j = 0; j < QK_K; j += 128) { - - vl = 32; - - // load Q3 - vuint8m1_t q3_x = __riscv_vle8_v_u8m1(q3, vl); - - vint8m1_t q3_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q3_x, 0x03, vl)); - vint8m1_t q3_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x2, vl), 0x03 , vl)); - vint8m1_t q3_2 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x4, vl), 0x03 , vl)); - vint8m1_t q3_3 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x6, vl), 0x03 , vl)); - - // compute mask for subtraction - vuint8m1_t qh_m0 = __riscv_vand_vx_u8m1(vqh, m, vl); - vbool8_t vmask_0 = __riscv_vmseq_vx_u8m1_b8(qh_m0, 0, vl); - vint8m1_t q3_m0 = __riscv_vsub_vx_i8m1_mu(vmask_0, q3_0, q3_0, 0x4, vl); - m <<= 1; - - vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl); - vbool8_t vmask_1 = __riscv_vmseq_vx_u8m1_b8(qh_m1, 0, vl); - vint8m1_t q3_m1 = __riscv_vsub_vx_i8m1_mu(vmask_1, q3_1, q3_1, 0x4, vl); - m <<= 1; - - vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl); - vbool8_t vmask_2 = __riscv_vmseq_vx_u8m1_b8(qh_m2, 0, vl); - vint8m1_t q3_m2 = __riscv_vsub_vx_i8m1_mu(vmask_2, q3_2, q3_2, 0x4, vl); - m <<= 1; - - vuint8m1_t qh_m3 = __riscv_vand_vx_u8m1(vqh, m, vl); - vbool8_t vmask_3 = __riscv_vmseq_vx_u8m1_b8(qh_m3, 0, vl); - vint8m1_t q3_m3 = __riscv_vsub_vx_i8m1_mu(vmask_3, q3_3, q3_3, 0x4, vl); - m <<= 1; - - // load Q8 and take product with Q3 - vint16m2_t a0 = __riscv_vwmul_vv_i16m2(q3_m0, __riscv_vle8_v_i8m1(q8, vl), vl); - vint16m2_t a1 = __riscv_vwmul_vv_i16m2(q3_m1, __riscv_vle8_v_i8m1(q8+32, vl), vl); - vint16m2_t a2 = __riscv_vwmul_vv_i16m2(q3_m2, __riscv_vle8_v_i8m1(q8+64, vl), vl); - vint16m2_t a3 = __riscv_vwmul_vv_i16m2(q3_m3, __riscv_vle8_v_i8m1(q8+96, vl), vl); - - vl = 16; - - // retrieve lane to multiply with scale - vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl); - vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl); - vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl); - vint32m2_t aux1_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 1), (scale[3]), vl); - vint32m2_t aux2_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 0), (scale[4]), vl); - vint32m2_t aux2_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 1), (scale[5]), vl); - vint32m2_t aux3_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 0), (scale[6]), vl); - vint32m2_t aux3_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 1), (scale[7]), vl); - - vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux0_0, aux0_1, vl), vzero, vl); - vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux1_0, aux1_1, vl), isum0, vl); - vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux2_0, aux2_1, vl), isum1, vl); - vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux3_0, aux3_1, vl), isum2, vl); - - sum_t += __riscv_vmv_x_s_i32m1_i32(isum3); - - q3 += 32; q8 += 128; scale += 8; - - } - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - - sumf += d*sum_t; - - } - break; - case 128: - for (int i = 0; i < nb; ++i) { - const uint8_t * restrict q3 = x[i].qs; - const uint8_t * restrict qh = x[i].hmask; - const int8_t * restrict q8 = y[i].qs; - - int8_t * scale = (int8_t *)utmp; - int tmp; - __asm__ __volatile__( - "vsetivli zero, 12, e8, m1\n\t" - "vle8.v v0, (%[s6b])\n\t" - "vmv1r.v v2, v0\n\t" - "vsetivli zero, 2, e64, m1\n\t" - "vmv.v.x v9, %[sh]\n\t"\ - "vslidedown.vi v1, v0, 1\n\t" - "vslide1up.vx v8, v9, zero\n\t" // {0, 0, 4, 4} - "vslideup.vi v0, v2, 1\n\t" // {aux[0], aux[1], aux[0], aux[1]} - "vsetivli zero, 4, e32, m1\n\t" - "vid.v v9\n\t" - "vmv.x.s %[tmp], v1\n\t" - "vsll.vi v9, v9, 1\n\t" // {0, 2, 4, 6} - "vmv.v.x v1, %[tmp]\n\t" // {aux[2], aux[2], aux[2], aux[2]} - "vsrl.vv v4, v1, v9\n\t" - "vsrl.vv v2, v0, v8\n\t" - "vand.vx v5, v4, %[kmask1]\n\t" - "vand.vx v3, v2, %[kmask2]\n\t" - "vsll.vi v6, v5, 4\n\t" - "vor.vv v7, v6, v3\n\t" - "vsetivli zero, 16, e8, m1\n\t" - "vsub.vx v0, v7, %[c]\n\t" - "vse8.v v0, (%[scale])" - : [tmp] "=&r" (tmp) - : [sh] "r" (0x0000000400000004), [s6b] "r" (x[i].scales), [c] "r" (32) - , [scale] "r" (scale), [kmask1] "r" (kmask1), [kmask2] "r" (kmask2) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - - uint8_t m = 1; - int isum = 0; - for (int j = 0; j < QK_K; j += 128) { - __asm__ __volatile__( - "vsetvli zero, %[vl32], e8, m2, ta, mu\n\t" - "vle8.v v8, (%[q3])\n\t" - "vsrl.vi v10, v8, 2\n\t" - "vsrl.vi v12, v8, 4\n\t" - "vsrl.vi v14, v8, 6\n\t" - "vand.vi v8, v8, 3\n\t" - "vand.vi v10, v10, 3\n\t" - "vand.vi v12, v12, 3\n\t" - "vle8.v v2, (%[qh])\n\t" - "vand.vx v4, v2, %[m]\n\t" - "slli %[m], %[m], 1\n\t" - "vmseq.vx v0, v4, zero\n\t" - "vadd.vi v8, v8, -4, v0.t\n\t" - "vand.vx v4, v2, %[m]\n\t" - "slli %[m], %[m], 1\n\t" - "vmseq.vx v0, v4, zero\n\t" - "vadd.vi v10, v10, -4, v0.t\n\t" - "vand.vx v4, v2, %[m]\n\t" - "slli %[m], %[m], 1\n\t" - "vmseq.vx v0, v4, zero\n\t" - "vadd.vi v12, v12, -4, v0.t\n\t" - "vand.vx v4, v2, %[m]\n\t" - "slli %[m], %[m], 1\n\t" - "vmseq.vx v0, v4, zero\n\t" - "vadd.vi v14, v14, -4, v0.t\n\t" - "vsetvli zero, %[vl128], e8, m8\n\t" - "vle8.v v0, (%[q8])\n\t" - "vsetvli zero, %[vl64], e8, m4\n\t" - "vwmul.vv v16, v0, v8\n\t" - "vwmul.vv v24, v4, v12\n\t" - "vsetivli zero, 16, e16, m2\n\t" - "vmv.v.x v0, zero\n\t" - "vwredsum.vs v10, v16, v0\n\t" - "vwredsum.vs v9, v18, v0\n\t" - "vwredsum.vs v8, v20, v0\n\t" - "vwredsum.vs v7, v22, v0\n\t" - "vwredsum.vs v11, v24, v0\n\t" - "vwredsum.vs v12, v26, v0\n\t" - "vwredsum.vs v13, v28, v0\n\t" - "vwredsum.vs v14, v30, v0\n\t" - "vsetivli zero, 4, e32, m1\n\t" - "vslideup.vi v10, v9, 1\n\t" - "vslideup.vi v8, v7, 1\n\t" - "vslideup.vi v11, v12, 1\n\t" - "vslideup.vi v13, v14, 1\n\t" - "vslideup.vi v10, v8, 2\n\t" - "vslideup.vi v11, v13, 2\n\t" - "vsetivli zero, 8, e32, m2\n\t" - "vle8.v v15, (%[scale])\n\t" - "vsext.vf4 v12, v15\n\t" - "vmul.vv v10, v10, v12\n\t" - "vredsum.vs v0, v10, v0\n\t" - "vmv.x.s %[tmp], v0\n\t" - "add %[isum], %[isum], %[tmp]" - : [tmp] "=&r" (tmp), [m] "+&r" (m), [isum] "+&r" (isum) - : [vl128] "r" (128), [vl64] "r" (64), [vl32] "r" (32) - , [q3] "r" (q3), [qh] "r" (qh), [scale] "r" (scale), [q8] "r" (q8) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - q3 += 32; q8 += 128; scale += 8; - } - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - sumf += d * isum; - } - break; - default: - assert(false && "Unsupported vector length"); - break; - } - - *s = sumf; - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0x3); - const vector signed char lowMask1 = vec_splats((int8_t)0xf); - const vector signed char lowMask2 = vec_splats((int8_t)0x30); - const vector int v0 = vec_splats((int32_t)0); - const vector signed char v1 = vec_splats((signed char)0x1); - const vector unsigned char v2 = vec_splats((unsigned char)0x2); - const vector unsigned char v3 = vec_splats((unsigned char)0x3); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - const vector unsigned char v6 = vec_splats((unsigned char)0x6); - const vector signed char off = vec_splats((signed char)0x20); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - UNUSED(kmask1); - UNUSED(kmask2); - - vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8); - vector signed char u1 = vec_and(u0, lowMask1); - vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4); - vector signed char u3 = (vector signed char)vec_mergeh((vector signed int)u2, (vector signed int)vec_sr(u2, v2)); - vector signed char u30 = vec_sl(vec_and(u3, lowMask), v4); - vector signed char u31 = vec_and(u3, lowMask2); - - u1 = vec_or(u1, u30); - u2 = vec_or(vec_sr(u0, v4), u31); - - vector signed char vscales = (vector signed char)vec_mergeh((vector signed long long)u1, (vector signed long long)u2); - vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].hmask); - vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].hmask); - - vscales = vec_sub(vscales, off); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - vector signed int vsumi4 = v0; - vector signed int vsumi5 = v0; - vector signed int vsumi6 = v0; - vector signed int vsumi7 = v0; - - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - for (int j = 0; j < QK_K/128; ++j) { - __builtin_prefetch(q3, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector signed char qxs0 = (vector signed char)vec_xl( 0, q3); - vector signed char qxs1 = (vector signed char)vec_xl(16, q3); - q3 += 32; - - //the low 2 bits - vector signed char qxs00 = vec_and(qxs0, lowMask); - vector signed char qxs01 = vec_and(vec_sr(qxs0, v2), lowMask); - vector signed char qxs02 = vec_and(vec_sr(qxs0, v4), lowMask); - vector signed char qxs03 = vec_and(vec_sr(qxs0, v6), lowMask); - vector signed char qxs10 = vec_and(qxs1, lowMask); - vector signed char qxs11 = vec_and(vec_sr(qxs1, v2), lowMask); - vector signed char qxs12 = vec_and(vec_sr(qxs1, v4), lowMask); - vector signed char qxs13 = vec_and(vec_sr(qxs1, v6), lowMask); - - //the 3rd bit - vector signed char qxh00 = vec_sl(vec_andc(v1, qxhs0), v2); - vector signed char qxh01 = vec_sl(vec_andc(v1, vec_sr(qxhs0, (vector unsigned char)v1)), v2); - vector signed char qxh02 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v2)), v2); - vector signed char qxh03 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v3)), v2); - vector signed char qxh10 = vec_sl(vec_andc(v1, qxhs1), v2); - vector signed char qxh11 = vec_sl(vec_andc(v1, vec_sr(qxhs1, (vector unsigned char)v1)), v2); - vector signed char qxh12 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v2)), v2); - vector signed char qxh13 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v3)), v2); - qxhs0 = vec_sr(qxhs0, v4); - qxhs1 = vec_sr(qxhs1, v4); - - vector signed char q3x00 = vec_sub(qxs00, qxh00); - vector signed char q3x01 = vec_sub(qxs01, qxh01); - vector signed char q3x02 = vec_sub(qxs02, qxh02); - vector signed char q3x03 = vec_sub(qxs03, qxh03); - vector signed char q3x10 = vec_sub(qxs10, qxh10); - vector signed char q3x11 = vec_sub(qxs11, qxh11); - vector signed char q3x12 = vec_sub(qxs12, qxh12); - vector signed char q3x13 = vec_sub(qxs13, qxh13); - - vector signed char q8y00 = vec_xl( 0, q8); - vector signed char q8y10 = vec_xl( 16, q8); - vector signed char q8y01 = vec_xl( 32, q8); - vector signed char q8y11 = vec_xl( 48, q8); - vector signed char q8y02 = vec_xl( 64, q8); - vector signed char q8y12 = vec_xl( 80, q8); - vector signed char q8y03 = vec_xl( 96, q8); - vector signed char q8y13 = vec_xl(112, q8); - q8 += 128; - - vector signed short vscales_h = vec_unpackh(vscales); - vector signed short vs0 = vec_splat(vscales_h, 0); - vector signed short vs1 = vec_splat(vscales_h, 1); - vector signed short vs2 = vec_splat(vscales_h, 2); - vector signed short vs3 = vec_splat(vscales_h, 3); - vector signed short vs4 = vec_splat(vscales_h, 4); - vector signed short vs5 = vec_splat(vscales_h, 5); - vector signed short vs6 = vec_splat(vscales_h, 6); - vector signed short vs7 = vec_splat(vscales_h, 7); - vscales = vec_sld(vscales, vscales, 8); - - vector signed short qv00 = vec_add(vec_mule(q3x00, q8y00), vec_mulo(q3x00, q8y00)); - vector signed short qv01 = vec_add(vec_mule(q3x01, q8y01), vec_mulo(q3x01, q8y01)); - vector signed short qv02 = vec_add(vec_mule(q3x02, q8y02), vec_mulo(q3x02, q8y02)); - vector signed short qv03 = vec_add(vec_mule(q3x03, q8y03), vec_mulo(q3x03, q8y03)); - vector signed short qv10 = vec_add(vec_mule(q3x10, q8y10), vec_mulo(q3x10, q8y10)); - vector signed short qv11 = vec_add(vec_mule(q3x11, q8y11), vec_mulo(q3x11, q8y11)); - vector signed short qv12 = vec_add(vec_mule(q3x12, q8y12), vec_mulo(q3x12, q8y12)); - vector signed short qv13 = vec_add(vec_mule(q3x13, q8y13), vec_mulo(q3x13, q8y13)); - - vsumi0 = vec_msum(qv00, vs0, vsumi0); - vsumi1 = vec_msum(qv01, vs2, vsumi1); - vsumi2 = vec_msum(qv02, vs4, vsumi2); - vsumi3 = vec_msum(qv03, vs6, vsumi3); - vsumi4 = vec_msum(qv10, vs1, vsumi4); - vsumi5 = vec_msum(qv11, vs3, vsumi5); - vsumi6 = vec_msum(qv12, vs5, vsumi6); - vsumi7 = vec_msum(qv13, vs7, vsumi7); - } - - vsumi0 = vec_add(vsumi0, vsumi4); - vsumi1 = vec_add(vsumi1, vsumi5); - vsumi2 = vec_add(vsumi2, vsumi6); - vsumi3 = vec_add(vsumi3, vsumi7); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = vec_extract(vsumf0, 0); - -#elif defined __loongarch_asx - - const __m128i m32 = __lsx_vreplgr2vr_b(32); - - __m256 acc = (__m256)__lasx_xvldi(0); - - uint32_t aux[3]; - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - // Set up scales - memcpy(aux, x[i].scales, 12); - __m128i scales128 = lsx_set_w( - ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), - ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), - (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), - (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); - scales128 = __lsx_vsub_b(scales128, m32); - - const v16i8 shuffle_mask = {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; - const __m256i scales_shuffled = lasx_ext8_16(__lsx_vshuf_b(scales128, scales128, (__m128i)shuffle_mask)); - - // high bit - const __m256i hbits = __lasx_xvld((const __m256i*)x[i].hmask, 0); - - // integer accumulator - __m256i sumi = __lasx_xvldi(0); - - for (int j = 0; j < QK_K/128; ++j) { - // load low 2 bits - const __m256i q3bits = __lasx_xvld((const __m256i*)q3, 0); q3 += 32; - - // prepare low and high bits - const __m256i q3l_0 = __lasx_xvandi_b(q3bits, 3); - const __m256i q3l_1 = __lasx_xvandi_b(__lasx_xvsrli_b(q3bits, 2), 3); - const __m256i q3l_2 = __lasx_xvandi_b(__lasx_xvsrli_b(q3bits, 4), 3); - const __m256i q3l_3 = __lasx_xvsrli_b(q3bits, 6); - const __m256i q3h_0 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 0), 0), 2); - const __m256i q3h_1 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 1), 0), 2); - const __m256i q3h_2 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 2), 0), 2); - const __m256i q3h_3 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 3), 0), 2); - const __m256i q3_0 = __lasx_xvor_v(q3h_0, q3l_0); - const __m256i q3_1 = __lasx_xvor_v(q3h_1, q3l_1); - const __m256i q3_2 = __lasx_xvor_v(q3h_2, q3l_2); - const __m256i q3_3 = __lasx_xvor_v(q3h_3, q3l_3); - - // load Q8 quants - const __m256i q8_0 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_3 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - - __m256i p16_0 = lasx_madd_h_b(q8_0, q3_0); - __m256i p16_1 = lasx_madd_h_b(q8_1, q3_1); - __m256i p16_2 = lasx_madd_h_b(q8_2, q3_2); - __m256i p16_3 = lasx_madd_h_b(q8_3, q3_3); - - // multiply with scales - p16_0 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 0), p16_0); - p16_1 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 1), p16_1); - p16_2 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 2), p16_2); - p16_3 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 3), p16_3); - - // accumulate - p16_0 = __lasx_xvadd_w(p16_0, p16_1); - p16_2 = __lasx_xvadd_w(p16_2, p16_3); - sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_0, p16_2)); - } - // multiply with block scale and accumulate - acc = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), acc); - } - - *s = hsum_float_8(acc); -#elif defined(__VXE__) || defined(__VXE2__) - uint32_t aux[3]; - uint32_t utmp[4]; - - const int32x4_t v_z = vec_splat_s32(0); - const uint8x16_t v_3m = vec_splat_u8(0x03); - - const uint8x16_t v_0c = vec_splat_u8(1); - const uint8x16_t v_1c = vec_sl(v_0c, 1); - const uint8x16_t v_2c = vec_sl(v_0c, 2); - const uint8x16_t v_3c = vec_sl(v_0c, 3); - - uint8x16_t q3h[4]; - uint8x16_t q3b[2]; - int8x16_t q3bytes[4]; - int8x16_t q8bytes[4]; - uint8x16_t qhbits[2]; - - float sum = 0; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * restrict x0l = x[i].qs; - const uint8_t * restrict x0h = x[i].hmask; - const int8_t * restrict y0 = y[i].qs; - - qhbits[0] = vec_xl(0 , x0h); - qhbits[1] = vec_xl(16, x0h); - - int32_t isum = 0; - - memcpy(aux, x[i].scales, 12); - utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); - utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); - utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); - utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); - - int8_t * scale = (int8_t *)utmp; - for (int j = 0; j < 16; ++j) scale[j] -= 32; - - for (int j = 0; j < QK_K/128; ++j) { - int32x4_t isum0, isum1, isum2, isum3; - - q3b[0] = vec_xl(0 , x0l); - q3b[1] = vec_xl(16, x0l); - x0l += 32; - - q8bytes[0] = vec_xl(0 , y0); - q8bytes[1] = vec_xl(16 , y0); - q8bytes[2] = vec_xl(32 , y0); - q8bytes[3] = vec_xl(48 , y0); - q8bytes[4] = vec_xl(64 , y0); - q8bytes[5] = vec_xl(80 , y0); - q8bytes[6] = vec_xl(96 , y0); - q8bytes[7] = vec_xl(112, y0); - y0 += 128; - - q3h[0] = vec_sl(vec_andc(v_0c, qhbits[0]), 2); - q3h[1] = vec_sl(vec_andc(v_0c, qhbits[1]), 2); - q3h[2] = vec_sl(vec_andc(v_1c, qhbits[0]), 1); - q3h[3] = vec_sl(vec_andc(v_1c, qhbits[1]), 1); - - q3bytes[0] = vec_sub((int8x16_t)vec_and(q3b[0], v_3m), (int8x16_t)q3h[0]); - q3bytes[1] = vec_sub((int8x16_t)vec_and(q3b[1], v_3m), (int8x16_t)q3h[1]); - q3bytes[2] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 2), v_3m), (int8x16_t)q3h[2]); - q3bytes[3] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 2), v_3m), (int8x16_t)q3h[3]); - - isum0 = ggml_vec_dot(v_z, q3bytes[0], q8bytes[0]); - isum1 = ggml_vec_dot(v_z, q3bytes[1], q8bytes[1]); - isum2 = ggml_vec_dot(v_z, q3bytes[2], q8bytes[2]); - isum3 = ggml_vec_dot(v_z, q3bytes[3], q8bytes[3]); - - isum += (isum0[0] + isum0[1] + isum0[2] + isum0[3]) * scale[0]; - isum += (isum1[0] + isum1[1] + isum1[2] + isum1[3]) * scale[1]; - isum += (isum2[0] + isum2[1] + isum2[2] + isum2[3]) * scale[2]; - isum += (isum3[0] + isum3[1] + isum3[2] + isum3[3]) * scale[3]; - - scale += 4; - - q3h[0] = vec_andc(v_2c, qhbits[0]); - q3h[1] = vec_andc(v_2c, qhbits[1]); - q3h[2] = vec_sr(vec_andc(v_3c, qhbits[0]), 1); - q3h[3] = vec_sr(vec_andc(v_3c, qhbits[1]), 1); - - q3bytes[0] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 4), v_3m), (int8x16_t)q3h[0]); - q3bytes[1] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 4), v_3m), (int8x16_t)q3h[1]); - q3bytes[2] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 6), v_3m), (int8x16_t)q3h[2]); - q3bytes[3] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 6), v_3m), (int8x16_t)q3h[3]); - - isum0 = ggml_vec_dot(v_z, q3bytes[0], q8bytes[4]); - isum1 = ggml_vec_dot(v_z, q3bytes[1], q8bytes[5]); - isum2 = ggml_vec_dot(v_z, q3bytes[2], q8bytes[6]); - isum3 = ggml_vec_dot(v_z, q3bytes[3], q8bytes[7]); - - isum += (isum0[0] + isum0[1] + isum0[2] + isum0[3]) * scale[0]; - isum += (isum1[0] + isum1[1] + isum1[2] + isum1[3]) * scale[1]; - isum += (isum2[0] + isum2[1] + isum2[2] + isum2[3]) * scale[2]; - isum += (isum3[0] + isum3[1] + isum3[2] + isum3[3]) * scale[3]; - - scale += 4; - - if (j == 0) { - qhbits[0] = vec_sr(qhbits[0], 4); - qhbits[1] = vec_sr(qhbits[1], 4); - } - } - - sum += d * isum; - } - - *s = sum; -#else - // scalar version - // This function is written like this so the compiler can manage to vectorize most of it - // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the - // manually vectorized version above. Every other version I tried would run at least 4 times slower. - // The ideal situation would be if we could just write the code once, and the compiler would - // automatically produce the best possible set of machine instructions, instead of us having to manually - // write vectorized versions for AVX, ARM_NEON, etc. - - int8_t aux8[QK_K]; - int16_t aux16[8]; - float sums [8]; - int32_t aux32[8]; - memset(sums, 0, 8*sizeof(float)); - - uint32_t auxs[4]; - const int8_t * scales = (const int8_t*)auxs; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT hm = x[i].hmask; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - memset(aux32, 0, 8*sizeof(int32_t)); - int8_t * GGML_RESTRICT a = aux8; - uint8_t m = 1; - for (int j = 0; j < QK_K; j += 128) { - for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3; - for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); - a += 32; m <<= 1; - for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3; - for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); - a += 32; m <<= 1; - for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3; - for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); - a += 32; m <<= 1; - for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3; - for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); - a += 32; m <<= 1; - q3 += 32; - } - a = aux8; - - memcpy(auxs, x[i].scales, 12); - uint32_t tmp = auxs[2]; - auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); - auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); - auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); - auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); - for (int j = 0; j < QK_K/16; ++j) { - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; - q8 += 8; a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; - q8 += 8; a += 8; - } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - } - for (int l = 0; l < 8; ++l) sumf += sums[l]; - *s = sumf; - -#endif - -} - -void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); -#ifdef __ARM_FEATURE_MATMUL_INT8 - assert((nrc == 2) || (nrc == 1)); -#else - assert(nrc == 1); -#endif - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_q4_K * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - - static const uint32_t kmask1 = 0x3f3f3f3f; - static const uint32_t kmask2 = 0x0f0f0f0f; - static const uint32_t kmask3 = 0x03030303; - - uint32_t utmp[4]; - -#if defined(__ARM_FEATURE_MATMUL_INT8) - if (nrc == 2) { - const block_q4_K * GGML_RESTRICT x0 = x; - const block_q4_K * GGML_RESTRICT x1 = (const block_q4_K *) ((const uint8_t *)vx + bx); - const block_q8_K * GGML_RESTRICT y0 = y; - const block_q8_K * GGML_RESTRICT y1 = (const block_q8_K *) ((const uint8_t *)vy + by); - - const uint8x16_t m4b = vdupq_n_u8(0x0f); - - float32x4_t vfsum = vdupq_n_f32(0.0f); - - for (int i = 0; i < nb; ++i, ++x0, ++x1, ++y0, ++y1) { - const uint8_t * GGML_RESTRICT qx0 = x0->qs; - const uint8_t * GGML_RESTRICT qx1 = x1->qs; - const int8_t * GGML_RESTRICT qy0 = y0->qs; - const int8_t * GGML_RESTRICT qy1 = y1->qs; - - // decode scales and mins - int8_t x0_scales[8], x1_scales[8]; - int16x8_t x0_mins, x1_mins; - { - uint32_t scales_mins[3]; - memcpy(scales_mins, x0->scales, 12); - const uint32_t mins_0_3 = scales_mins[1] & kmask1; - const uint32_t mins_4_7 = ((scales_mins[2] >> 4) & kmask2) | (((scales_mins[1] >> 6) & kmask3) << 4); - const uint32x2_t mins = {mins_0_3, mins_4_7}; - x0_mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins))); - uint32_t scales[2]; - scales[0] = scales_mins[0] & kmask1; // scales 0~3 - scales[1] = (scales_mins[2] & kmask2) | (((scales_mins[0] >> 6) & kmask3) << 4); // scales 4~7 - memcpy(x0_scales, scales, 8); - } - { - uint32_t scales_mins[3]; - memcpy(scales_mins, x1->scales, 12); - const uint32_t mins_0_3 = scales_mins[1] & kmask1; - const uint32_t mins_4_7 = ((scales_mins[2] >> 4) & kmask2) | (((scales_mins[1] >> 6) & kmask3) << 4); - const uint32x2_t mins = {mins_0_3, mins_4_7}; - x1_mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins))); - uint32_t scales[2]; - scales[0] = scales_mins[0] & kmask1; // scales 0~3 - scales[1] = (scales_mins[2] & kmask2) | (((scales_mins[0] >> 6) & kmask3) << 4); // scales 4~7 - memcpy(x1_scales, scales, 8); - } - - int32x4_t visum = {0}; - - // process 64 data points per iteration, totally 256 data points - for (int j = 0; j < QK_K / 64; ++j, qx0 += 32, qx1 += 32, qy0 += 64, qy1 += 64) { - const int8x16x4_t vy0 = vld1q_s8_x4(qy0); - const int8x16x4_t vy1 = vld1q_s8_x4(qy1); - - int8x16_t vx0[4], vx1[4]; - { - const uint8x16x2_t vv = vld1q_u8_x2(qx0); - vx0[0] = vreinterpretq_s8_u8(vandq_u8(vv.val[0], m4b)); - vx0[1] = vreinterpretq_s8_u8(vandq_u8(vv.val[1], m4b)); - vx0[2] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[0], 4)); - vx0[3] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[1], 4)); - } - { - const uint8x16x2_t vv = vld1q_u8_x2(qx1); - vx1[0] = vreinterpretq_s8_u8(vandq_u8(vv.val[0], m4b)); - vx1[1] = vreinterpretq_s8_u8(vandq_u8(vv.val[1], m4b)); - vx1[2] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[0], 4)); - vx1[3] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[1], 4)); - } - - // process 32 data points (share same block scale) per iteration - for (int k = 0; k < 2; ++k) { - const int blk = j * 2 + k; - const int32x4_t block_scale = { - x0_scales[blk], - x0_scales[blk], - x1_scales[blk], - x1_scales[blk], - }; - - int32x4_t vr = {0}; - for (int l = 0; l < 2; ++l) { - const int idx = k * 2 + l; - const int64x2_t vx0_s64 = vreinterpretq_s64_s8(vx0[idx]); - const int64x2_t vx1_s64 = vreinterpretq_s64_s8(vx1[idx]); - const int64x2_t vy0_s64 = vreinterpretq_s64_s8(vy0.val[idx]); - const int64x2_t vy1_s64 = vreinterpretq_s64_s8(vy1.val[idx]); - const int8x16_t vx_l = vreinterpretq_s8_s64(vzip1q_s64(vx0_s64, vx1_s64)); - const int8x16_t vx_h = vreinterpretq_s8_s64(vzip2q_s64(vx0_s64, vx1_s64)); - const int8x16_t vy_l = vreinterpretq_s8_s64(vzip1q_s64(vy0_s64, vy1_s64)); - const int8x16_t vy_h = vreinterpretq_s8_s64(vzip2q_s64(vy0_s64, vy1_s64)); - vr = vmmlaq_s32(vr, vx_l, vy_l); - vr = vmmlaq_s32(vr, vx_h, vy_h); - } - // apply block scale, will NOT overflow - // block_scale * sum_256(int4*int8) <= 2^(8+8+4+8) = 28 bits - visum = vmlaq_s32(visum, vr, block_scale); - } - } - - // adjust bias, apply superblock scale - { - int32_t bias[4]; - // no obvious uplift from sve sdot-16, just use neon mul add - const int16x8_t y0_sums = vpaddq_s16(vld1q_s16(y0->bsums), vld1q_s16(y0->bsums+8)); - const int16x8_t y1_sums = vpaddq_s16(vld1q_s16(y1->bsums), vld1q_s16(y1->bsums+8)); - bias[0] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y0_sums), vget_low_s16(x0_mins)), - vmull_s16(vget_high_s16(y0_sums), vget_high_s16(x0_mins)))); - bias[1] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y1_sums), vget_low_s16(x0_mins)), - vmull_s16(vget_high_s16(y1_sums), vget_high_s16(x0_mins)))); - bias[2] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y0_sums), vget_low_s16(x1_mins)), - vmull_s16(vget_high_s16(y0_sums), vget_high_s16(x1_mins)))); - bias[3] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y1_sums), vget_low_s16(x1_mins)), - vmull_s16(vget_high_s16(y1_sums), vget_high_s16(x1_mins)))); - const float32x4_t dmins = { - GGML_FP16_TO_FP32(x0->dmin) * y0->d, - GGML_FP16_TO_FP32(x0->dmin) * y1->d, - GGML_FP16_TO_FP32(x1->dmin) * y0->d, - GGML_FP16_TO_FP32(x1->dmin) * y1->d, - }; - vfsum = vmlsq_f32(vfsum, vcvtq_f32_s32(vld1q_s32(bias)), dmins); - - const float32x4_t superblock_scale = { - GGML_FP16_TO_FP32(x0->d) * y0->d, - GGML_FP16_TO_FP32(x0->d) * y1->d, - GGML_FP16_TO_FP32(x1->d) * y0->d, - GGML_FP16_TO_FP32(x1->d) * y1->d, - }; - vfsum = vmlaq_f32(vfsum, vcvtq_f32_s32(visum), superblock_scale); - } - } - - // vfsum = ABCD -> ACBD - // AC -> s, BD -> (s+bs) - vfsum = vzip1q_f32(vfsum, vextq_f32(vfsum, vfsum, 2)); - vst1_f32(s, vget_low_f32 (vfsum)); - vst1_f32(s + bs, vget_high_f32(vfsum)); - - return; - } -#endif - -#ifdef __ARM_FEATURE_SVE - float sumf = 0; - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); - - memcpy(utmp, x[i].scales, K_SCALE_SIZE); - - uint32x2_t mins8 = { 0 }; - mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0); - mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1); - - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[0] &= kmask1; - - const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8))); - const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)), - vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); - sumf -= dmin * vaddvq_s32(prod); - - const uint8_t * scales = (const uint8_t *)utmp; - - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const int vector_length = ggml_cpu_get_sve_cnt()*8; - const svuint8_t m4b = svdup_n_u8(0xf); - const svint32_t mzero = svdup_n_s32(0); - svint32_t sumi1 = svdup_n_s32(0); - svint32_t sumi1_1 = svdup_n_s32(0); - svint32_t sumi1_2 = svdup_n_s32(0); - svint32_t sumi2 = svdup_n_s32(0); - svint32_t sumi2_1 = svdup_n_s32(0); - svint32_t sumi2_2 = svdup_n_s32(0); - switch (vector_length) { - case 128: - { - for (int j = 0; j < QK_K/64; ++j) { - svint8_t q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4), m4b)); - svint8_t q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; - sumi1_1 = svmla_n_s32_x(svptrue_b32(), sumi1_1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]); - q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4+16), m4b)); - q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; - sumi1_2 = svmla_n_s32_x(svptrue_b32(), sumi1_2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]); - - q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4), 4)); - q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; - sumi2_1 = svmla_n_s32_x(svptrue_b32(), sumi2_1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]); - q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4+16), 4)); - q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; - sumi2_2 = svmla_n_s32_x(svptrue_b32(), sumi2_2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]); - q4 += 32; - } - sumi1 = svadd_s32_x(svptrue_b32(), sumi1_1, sumi1_2); - sumi2 = svadd_s32_x(svptrue_b32(), sumi2_1, sumi2_2); - sumf += d * (svaddv_s32(svptrue_b32(), svadd_s32_x(svptrue_b32(), sumi1, sumi2))); - } break; - case 256: - case 512: - { - for (int j = 0; j < QK_K/64; ++j) { - const svuint8_t q4bits = svld1_u8(svptrue_pat_b8(SV_VL32), q4); q4 += 32; - svint8_t q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_pat_b8(SV_VL32), q4bits, m4b)); - svint8_t q8bytes = svld1_s8(svptrue_pat_b8(SV_VL32), q8); q8 += 32; - sumi1 = svmla_n_s32_x(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]); - - q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q4bits, 4)); - q8bytes = svld1_s8(svptrue_pat_b8(SV_VL32), q8); q8 += 32; - sumi2 = svmla_n_s32_x(svptrue_pat_b32(SV_VL8), sumi2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]); - } - sumf += d * (svaddv_s32(svptrue_pat_b32(SV_VL8), svadd_s32_x(svptrue_pat_b32(SV_VL8), sumi1, sumi2))); - } break; - default: - assert(false && "Unsupported vector length"); - break; - } - } - *s = sumf; -#elif defined __ARM_NEON - const uint8x16_t m4b = vdupq_n_u8(0xf); - const int32x4_t mzero = vdupq_n_s32(0); - - ggml_int8x16x2_t q4bytes; - ggml_int8x16x2_t q8bytes; - - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); - - memcpy(utmp, x[i].scales, 12); - - uint32x2_t mins8 = { 0 }; - mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0); - mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1); - - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[0] &= kmask1; - - const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8))); - const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)), - vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); - sumf -= dmin * vaddvq_s32(prod); - - const uint8_t * scales = (const uint8_t *)utmp; - - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - int32_t sumi1 = 0; - int32_t sumi2 = 0; - - for (int j = 0; j < QK_K/64; ++j) { - const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); q4 += 32; - - q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; - q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b)); - q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b)); - - const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); - sumi1 += vaddvq_s32(p1) * scales[2*j+0]; - - q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; - q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); - q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); - - const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); - - sumi2 += vaddvq_s32(p2) * scales[2*j+1]; - } - - sumf += d * (sumi1 + sumi2); - - } - - *s = sumf; - -#elif defined __wasm_simd128__ - const uint8_t * scales = (const uint8_t*)&utmp[0]; - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); // Corrected sign - - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - // Process scales and mins - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - // Sum mins * q8sums - int32_t sumi = 0; - const int16_t * GGML_RESTRICT q8sums = y[i].bsums; - const uint8_t * m = (const uint8_t *)&utmp[2]; - for (int j = 0; j < 16; j += 2) { - sumi += (q8sums[j] + q8sums[j+1]) * m[j/2]; - } - sumf -= dmin * sumi; - - int32_t sumi1 = 0; - int32_t sumi2 = 0; - - for (int j = 0; j < QK_K/64; ++j) { - // Load 64 4-bit weights (32 bytes) - const v128_t q4x0 = wasm_v128_load(q4); - const v128_t q4x1 = wasm_v128_load(q4 + 16); - q4 += 32; - - // Split into low/high nibbles - const v128_t q4l0 = wasm_v128_and(q4x0, wasm_i8x16_splat(0x0F)); - const v128_t q4h0 = wasm_u8x16_shr(q4x0, 4); - const v128_t q4l1 = wasm_v128_and(q4x1, wasm_i8x16_splat(0x0F)); - const v128_t q4h1 = wasm_u8x16_shr(q4x1, 4); - - // Load 64 8-bit values (64 bytes) - const v128_t q8x0 = wasm_v128_load(q8); - const v128_t q8x1 = wasm_v128_load(q8 + 16); - const v128_t q8x2 = wasm_v128_load(q8 + 32); - const v128_t q8x3 = wasm_v128_load(q8 + 48); - q8 += 64; - - // Low nibble products - v128_t vacc1 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q4l0), - wasm_i16x8_extend_low_i8x16(q8x0) - ); - vacc1 = wasm_i32x4_add(vacc1, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q4l0), - wasm_i16x8_extend_high_i8x16(q8x0) - )); - vacc1 = wasm_i32x4_add(vacc1, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q4l1), - wasm_i16x8_extend_low_i8x16(q8x1) - )); - vacc1 = wasm_i32x4_add(vacc1, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q4l1), - wasm_i16x8_extend_high_i8x16(q8x1) - )); - - // High nibble products - v128_t vacc2 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q4h0), - wasm_i16x8_extend_low_i8x16(q8x2) - ); - vacc2 = wasm_i32x4_add(vacc2, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q4h0), - wasm_i16x8_extend_high_i8x16(q8x2) - )); - vacc2 = wasm_i32x4_add(vacc2, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q4h1), - wasm_i16x8_extend_low_i8x16(q8x3) - )); - vacc2 = wasm_i32x4_add(vacc2, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q4h1), - wasm_i16x8_extend_high_i8x16(q8x3) - )); - - // Accumulate scaled results - int32_t vacc1_sum = wasm_i32x4_extract_lane(vacc1, 0) + wasm_i32x4_extract_lane(vacc1, 1) + - wasm_i32x4_extract_lane(vacc1, 2) + wasm_i32x4_extract_lane(vacc1, 3); - sumi1 += vacc1_sum * scales[2*j]; - - int32_t vacc2_sum = wasm_i32x4_extract_lane(vacc2, 0) + wasm_i32x4_extract_lane(vacc2, 1) + - wasm_i32x4_extract_lane(vacc2, 2) + wasm_i32x4_extract_lane(vacc2, 3); - sumi2 += vacc2_sum * scales[2*j+1]; - } - - sumf += d * (sumi1 + sumi2); - } - - *s = sumf; - -#elif defined __AVX2__ - - const __m256i m4 = _mm256_set1_epi8(0xF); - - __m256 acc = _mm256_setzero_ps(); - __m128 acc_m = _mm_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0])); - - const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums); - const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1)); - const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s); - acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m); - - const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0); - const __m256i scales = MM256_SET_M128I(sc128, sc128); - - __m256i sumi = _mm256_setzero_si256(); - - for (int j = 0; j < QK_K/64; ++j) { - - const __m256i scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0)); - const __m256i scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1)); - - const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4); q4 += 32; - const __m256i q4l = _mm256_and_si256(q4bits, m4); - const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4); - - const __m256i q8l = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - __m256i p16l = _mm256_maddubs_epi16(q4l, q8l); - p16l = _mm256_madd_epi16(scale_l, p16l); - - const __m256i q8h = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - __m256i p16h = _mm256_maddubs_epi16(q4h, q8h); - p16h = _mm256_madd_epi16(scale_h, p16h); - const __m256i sumj = _mm256_add_epi32(p16l, p16h); - - sumi = _mm256_add_epi32(sumi, sumj); - } - - __m256 vd = _mm256_set1_ps(d); - acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc); - - } - - acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m)); - acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m)); - - *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m); - -#elif defined __AVX__ - - const __m128i m4 = _mm_set1_epi8(0xF); - const __m128i m2 = _mm_set1_epi8(0x2); - - __m256 acc = _mm256_setzero_ps(); - __m128 acc_m = _mm_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]); - const __m128i scales = _mm_cvtepu8_epi16(utmps); - const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps)); - - const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]); - const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]); - const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1); - const __m128i prod = _mm_madd_epi16(mins, q8s); - acc_m = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod)), acc_m); - - __m128i sumi_0 = _mm_setzero_si128(); - __m128i sumi_1 = _mm_setzero_si128(); - - __m128i shuffle = _mm_set1_epi16(0x0100); - for (int j = 0; j < QK_K/64; ++j) { - - const __m128i scale_l = _mm_shuffle_epi8(scales, shuffle); - shuffle = _mm_add_epi16(shuffle, m2); - const __m128i scale_h = _mm_shuffle_epi8(scales, shuffle); - shuffle = _mm_add_epi16(shuffle, m2); - - __m128i q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16; - const __m128i q4l_0 = _mm_and_si128(q4bits, m4); - const __m128i q4h_0 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4); - q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16; - const __m128i q4l_1 = _mm_and_si128(q4bits, m4); - const __m128i q4h_1 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4); - - const __m128i q8l_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - __m128i p16l = _mm_maddubs_epi16(q4l_0, q8l_0); - p16l = _mm_madd_epi16(scale_l, p16l); - sumi_0 = _mm_add_epi32(sumi_0, p16l); - const __m128i q8l_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - p16l = _mm_maddubs_epi16(q4l_1, q8l_1); - p16l = _mm_madd_epi16(scale_l, p16l); - sumi_1 = _mm_add_epi32(sumi_1, p16l); - - const __m128i q8h_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - __m128i p16h = _mm_maddubs_epi16(q4h_0, q8h_0); - p16h = _mm_madd_epi16(scale_h, p16h); - sumi_0 = _mm_add_epi32(sumi_0, p16h); - const __m128i q8h_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - p16h = _mm_maddubs_epi16(q4h_1, q8h_1); - p16h = _mm_madd_epi16(scale_h, p16h); - sumi_1 = _mm_add_epi32(sumi_1, p16h); - - } - - __m256 vd = _mm256_set1_ps(d); - __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); - acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc); - - } - - acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m)); - acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m)); - - *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m); - -#elif defined __riscv_xtheadvector - - const uint8_t * scales = (const uint8_t*)&utmp[0]; - const uint8_t * mins = (const uint8_t*)&utmp[2]; - - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - int tmp, tmp2, sumi; - __asm__ __volatile__( - "li %[t1], 12\n\t" - "th.vsetvli zero, %[t1], e8, m1\n\t" - "th.vlb.v v1, (%[s6b])\n\t" // {aux[0], aux[1], aux[2]} - "li %[t1], 4\n\t" - "th.vsetvli zero, %[t1], e32, m1\n\t" - "th.vslidedown.vi v2, v1, 2\n\t" - "th.vmv.v.v v3, v2\n\t" - "th.vslideup.vi v2, v3, 1\n\t" // {aux[2], aux[2]} - "li %[t1], 2\n\t" - "th.vsetvli zero, %[t1], e32, m1\n\t" - "th.vmv.v.i v4, 4\n\t" - "th.vand.vx v8, v1, %[kmask1]\n\t" - "th.vslide1up.vx v5, v4, zero\n\t" // {0, 4} - "th.vsrl.vi v6, v1, 6\n\t" - "th.vsrl.vv v7, v2, v5\n\t" - "th.vand.vx v0, v6, %[kmask3]\n\t" - "th.vand.vx v2, v7, %[kmask2]\n\t" - "th.vsll.vi v6, v0, 4\n\t" - "li %[t2], 8\n\t" - "addi %[t1], %[utmp], 4\n\t" - "th.vor.vv v1, v6, v2\n\t" - "th.vssw.v v8, (%[utmp]), %[t2]\n\t" - "th.vssw.v v1, (%[t1]), %[t2]\n\t" - "th.vsetvli zero, zero, e32, m2\n\t" // vl == 8 - "th.vlw.v v2, (%[bsums])\n\t" - "th.vsetvli zero, %[t2], e16, m1\n\t" - "th.vnsrl.vi v0, v2, 0\n\t" - "th.vnsrl.vi v1, v2, 16\n\t" - "th.vadd.vv v2, v0, v1\n\t" - "th.vlbu.v v4, (%[mins])\n\t" - "th.vwmul.vv v6, v4, v2\n\t" - "th.vmv.v.x v0, zero\n\t" - "th.vsetvli zero, %[t2], e32, m2\n\t" - "th.vredsum.vs v0, v6, v0\n\t" - "th.vmv.x.s %[sumi], v0" - : [t1] "=&r" (tmp), [t2] "=&r" (tmp2), [sumi] "=&r" (sumi) - : [bsums] "r" (y[i].bsums), [mins] "r" (mins), [utmp] "r" (utmp) - , [s6b] "r" (x[i].scales), [kmask1] "r" (kmask1) - , [kmask2] "r" (kmask2), [kmask3] "r" (kmask3) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - sumf -= dmin * sumi; - - const uint8_t * restrict q4 = x[i].qs; - const int8_t * restrict q8 = y[i].qs; - - sumi = 0; - const uint8_t * scale = scales; - - for (int j = 0; j < QK_K/128; ++j) { - int vl128 = 128, vl64 = 64, vl32 = 32; - __asm__ __volatile__( - "th.vsetvli zero, %[vl128], e8, m8\n\t" - "th.vlb.v v8, (%[q8])\n\t" - "th.vsetvli zero, %[vl64], e8, m4\n\t" - "th.vlb.v v0, (%[q4])\n\t" - "th.vsrl.vi v4, v0, 4\n\t" - "th.vand.vi v0, v0, 0xF\n\t" - "th.vsetvli zero, %[vl32], e8, m2\n\t" - "th.vwmul.vv v28, v6, v14\n\t" - "th.vwmul.vv v20, v4, v10\n\t" - "th.vwmul.vv v24, v2, v12\n\t" - "th.vwmul.vv v16, v0, v8\n\t" - "li %[tmp], 4\n\t" - "th.vsetvli zero, %[tmp], e32, m1\n\t" - "th.vlbu.v v1, (%[scale])\n\t" - "th.vmv.v.x v0, zero\n\t" - "th.vsetvli zero, %[vl32], e16, m4\n\t" - "th.vwredsum.vs v6, v24, v0\n\t" - "th.vwredsum.vs v7, v28, v0\n\t" - "th.vwredsum.vs v4, v16, v0\n\t" - "th.vwredsum.vs v5, v20, v0\n\t" - "th.vsetvli zero, %[tmp], e32, m1\n\t" - "th.vslideup.vi v6, v7, 1\n\t" - "th.vslideup.vi v4, v5, 1\n\t" - "th.vslideup.vi v4, v6, 2\n\t" - "th.vmul.vv v8, v4, v1\n\t" - "th.vredsum.vs v0, v8, v0\n\t" - "th.vmv.x.s %[tmp], v0\n\t" - "add %[sumi], %[sumi], %[tmp]" - : [tmp] "=&r" (tmp), [sumi] "+&r" (sumi) - : [vl128] "r" (vl128), [vl64] "r" (vl64), [vl32] "r" (vl32) - , [q4] "r" (q4), [q8] "r" (q8), [scale] "r" (scale) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - - q4 += 64; q8 += 128; scale += 4; - } - - sumf += d * sumi; - - } - - *s = sumf; - -#elif defined __riscv_v - - const uint8_t * scales = (const uint8_t*)&utmp[0]; - const uint8_t * mins = (const uint8_t*)&utmp[2]; - - float sumf = 0; - const int vector_length = __riscv_vlenb() * 8; - - switch (vector_length) { - case 256: - for (int i = 0; i < nb; ++i) { - - size_t vl = 8; - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl); - vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl); - vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl); - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl); - vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl)); - vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl); - - vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl); - sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi); - - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - vl = 32; - - int32_t sum_1 = 0; - int32_t sum_2 = 0; - - vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1); - - for (int j = 0; j < QK_K/64; ++j) { - // load Q4 - vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl); - - // load Q8 and multiply it with lower Q4 nibble - vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl); - vint8m1_t q4_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl)); - vint16m2_t qv_0 = __riscv_vwmul_vv_i16m2(q4_0, q8_0, vl); - vint16m1_t vs_0 = __riscv_vredsum_vs_i16m2_i16m1(qv_0, vzero, vl); - - sum_1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[2*j+0]; - - // load Q8 and multiply it with upper Q4 nibble - vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl); - vint8m1_t q4_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl)); - vint16m2_t qv_1 = __riscv_vwmul_vv_i16m2(q4_1, q8_1, vl); - vint16m1_t vs_1 = __riscv_vredsum_vs_i16m2_i16m1(qv_1, vzero, vl); - - sum_2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[2*j+1]; - - q4 += 32; q8 += 64; - - } - - sumf += d*(sum_1 + sum_2); - - } - break; - case 128: - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - int tmp, tmp2, sumi; - __asm__ __volatile__( - "vsetivli zero, 12, e8, m1\n\t" - "vle8.v v1, (%[s6b])\n\t" // {aux[0], aux[1], aux[2]} - "vsetivli zero, 4, e32, m1\n\t" - "vslidedown.vi v2, v1, 2\n\t" - "vmv1r.v v3, v2\n\t" - "vslideup.vi v2, v3, 1\n\t" // {aux[2], aux[2]} - "vsetivli zero, 2, e32, m1\n\t" - "vmv.v.i v4, 4\n\t" - "vand.vx v8, v1, %[kmask1]\n\t" - "vslide1up.vx v5, v4, zero\n\t" // {0, 4} - "vsrl.vi v6, v1, 6\n\t" - "vsrl.vv v7, v2, v5\n\t" - "vand.vx v0, v6, %[kmask3]\n\t" - "vand.vx v2, v7, %[kmask2]\n\t" - "vsll.vi v6, v0, 4\n\t" - "li %[t2], 8\n\t" - "addi %[t1], %[utmp], 4\n\t" - "vor.vv v1, v6, v2\n\t" - "vsse32.v v8, (%[utmp]), %[t2]\n\t" - "vsse32.v v1, (%[t1]), %[t2]\n\t" - "vsetivli zero, 8, e16, m1\n\t" - "vle32.v v2, (%[bsums])\n\t" - "vnsrl.wi v0, v2, 0\n\t" - "vnsrl.wi v1, v2, 16\n\t" - "vadd.vv v2, v0, v1\n\t" - "vle8.v v3, (%[mins])\n\t" - "vzext.vf2 v4, v3\n\t" - "vwmul.vv v6, v4, v2\n\t" - "vmv.v.x v0, zero\n\t" - "vsetivli zero, 8, e32, m2\n\t" - "vredsum.vs v0, v6, v0\n\t" - "vmv.x.s %[sumi], v0" - : [t1] "=&r" (tmp), [t2] "=&r" (tmp2), [sumi] "=&r" (sumi) - : [bsums] "r" (y[i].bsums), [mins] "r" (mins), [utmp] "r" (utmp) - , [s6b] "r" (x[i].scales), [kmask1] "r" (kmask1) - , [kmask2] "r" (kmask2), [kmask3] "r" (kmask3) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - sumf -= dmin * sumi; - - const uint8_t * restrict q4 = x[i].qs; - const int8_t * restrict q8 = y[i].qs; - - sumi = 0; - const uint8_t * scale = scales; - - for (int j = 0; j < QK_K/128; ++j) { - int vl128 = 128, vl64 = 64, vl32 = 32; - __asm__ __volatile__( - "vsetvli zero, %[vl128], e8, m8\n\t" - "vle8.v v8, (%[q8])\n\t" - "vsetvli zero, %[vl64], e8, m4\n\t" - "vle8.v v0, (%[q4])\n\t" - "vsrl.vi v4, v0, 4\n\t" - "vand.vi v0, v0, 0xF\n\t" - "vsetvli zero, %[vl32], e8, m2\n\t" - "vwmul.vv v28, v6, v14\n\t" - "vwmul.vv v20, v4, v10\n\t" - "vwmul.vv v24, v2, v12\n\t" - "vwmul.vv v16, v0, v8\n\t" - "vsetivli zero, 4, e32, m1\n\t" - "vle8.v v2, (%[scale])\n\t" - "vmv.v.x v0, zero\n\t" - "vzext.vf4 v1, v2\n\t" - "vsetvli zero, %[vl32], e16, m4\n\t" - "vwredsum.vs v6, v24, v0\n\t" - "vwredsum.vs v7, v28, v0\n\t" - "vwredsum.vs v4, v16, v0\n\t" - "vwredsum.vs v5, v20, v0\n\t" - "vsetivli zero, 4, e32, m1\n\t" - "vslideup.vi v6, v7, 1\n\t" - "vslideup.vi v4, v5, 1\n\t" - "vslideup.vi v4, v6, 2\n\t" - "vmul.vv v8, v4, v1\n\t" - "vredsum.vs v0, v8, v0\n\t" - "vmv.x.s %[tmp], v0\n\t" - "add %[sumi], %[sumi], %[tmp]" - : [tmp] "=&r" (tmp), [sumi] "+&r" (sumi) - : [vl128] "r" (vl128), [vl64] "r" (vl64), [vl32] "r" (vl32) - , [q4] "r" (q4), [q8] "r" (q8), [scale] "r" (scale) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - - q4 += 64; q8 += 128; scale += 4; - } - - sumf += d * sumi; - } - break; - default: - assert(false && "Unsupported vector length"); - break; - } - - *s = sumf; - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0xF); - const vector signed char lowMask1 = vec_splats((int8_t)0x3f); - const vector signed char lowMask2 = vec_splats((int8_t)0x30); - const vector int v0 = vec_splats((int32_t)0); - const vector unsigned char v2 = vec_splats((uint8_t)2); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); - vector float vdmin = vec_mul(vxmin, vyd); - - vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); - vector signed short q8ysums1 = vec_xl(16, y[i].bsums); - - UNUSED(kmask1); - UNUSED(kmask2); - UNUSED(kmask3); - UNUSED(utmp); - - vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8); - vector signed char u1 = vec_and(vec_sr(u0, v2), lowMask2); - vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4); - vector signed char u3 = vec_sr(u2, v4); - - vector signed char u30 = u1; - vector signed char u31 = (vector signed char)vec_mergeh((vector signed int)vec_and(u2, lowMask), (vector signed int)u3); - - u1 = vec_and(u0, lowMask1); - u2 = vec_or(u30, u31); - - vector signed char utmps = (vector signed char)vec_mergeh((vector signed int)u1, (vector signed int)u2); - - vector signed short vscales = vec_unpackh(utmps); - vector signed short q4xmins = vec_unpackl(utmps); - vector signed short q4xmins0 = vec_mergeh(q4xmins, q4xmins); - vector signed short q4xmins1 = vec_mergel(q4xmins, q4xmins); - - vector signed int prod0 = vec_mule(q4xmins0, q8ysums0); - vector signed int prod1 = vec_mule(q4xmins1, q8ysums1); - vector signed int prod2 = vec_mulo(q4xmins0, q8ysums0); - vector signed int prod3 = vec_mulo(q4xmins1, q8ysums1); - - vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); - vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); - vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); - vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - for (int j = 0; j < QK_K/64; j+=2) { - __builtin_prefetch(q4, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector signed char qxs0 = (vector signed char)vec_xl( 0, q4); - vector signed char qxs1 = (vector signed char)vec_xl(16, q4); - vector signed char qxs2 = (vector signed char)vec_xl(32, q4); - vector signed char qxs3 = (vector signed char)vec_xl(48, q4); - q4 += 64; - - vector unsigned char q4x00 = (vector unsigned char)vec_and(qxs0, lowMask); - vector unsigned char q4x01 = (vector unsigned char)vec_sr(qxs0, v4); - vector unsigned char q4x10 = (vector unsigned char)vec_and(qxs1, lowMask); - vector unsigned char q4x11 = (vector unsigned char)vec_sr(qxs1, v4); - vector unsigned char q4x20 = (vector unsigned char)vec_and(qxs2, lowMask); - vector unsigned char q4x21 = (vector unsigned char)vec_sr(qxs2, v4); - vector unsigned char q4x30 = (vector unsigned char)vec_and(qxs3, lowMask); - vector unsigned char q4x31 = (vector unsigned char)vec_sr(qxs3, v4); - - vector signed char q8y00 = vec_xl( 0, q8); - vector signed char q8y10 = vec_xl( 16, q8); - vector signed char q8y01 = vec_xl( 32, q8); - vector signed char q8y11 = vec_xl( 48, q8); - vector signed char q8y20 = vec_xl( 64, q8); - vector signed char q8y30 = vec_xl( 80, q8); - vector signed char q8y21 = vec_xl( 96, q8); - vector signed char q8y31 = vec_xl(112, q8); - q8 += 128; - - vector signed int qv00 = vec_msum(q8y00, q4x00, v0); - vector signed int qv01 = vec_msum(q8y01, q4x01, v0); - vector signed int qv10 = vec_msum(q8y10, q4x10, v0); - vector signed int qv11 = vec_msum(q8y11, q4x11, v0); - vector signed int qv20 = vec_msum(q8y20, q4x20, v0); - vector signed int qv21 = vec_msum(q8y21, q4x21, v0); - vector signed int qv30 = vec_msum(q8y30, q4x30, v0); - vector signed int qv31 = vec_msum(q8y31, q4x31, v0); - - vector signed int vscales_h = vec_unpackh(vscales); - vector signed int vs0 = vec_splat(vscales_h, 0); - vector signed int vs1 = vec_splat(vscales_h, 1); - vector signed int vs2 = vec_splat(vscales_h, 2); - vector signed int vs3 = vec_splat(vscales_h, 3); - vscales = vec_sld(vscales, vscales, 8); - - vsumi0 = vec_add(vec_mul(qv00, vs0), vsumi0); - vsumi1 = vec_add(vec_mul(qv01, vs1), vsumi1); - vsumi2 = vec_add(vec_mul(qv20, vs2), vsumi2); - vsumi3 = vec_add(vec_mul(qv21, vs3), vsumi3); - - vsumi0 = vec_add(vec_mul(qv10, vs0), vsumi0); - vsumi1 = vec_add(vec_mul(qv11, vs1), vsumi1); - vsumi2 = vec_add(vec_mul(qv30, vs2), vsumi2); - vsumi3 = vec_add(vec_mul(qv31, vs3), vsumi3); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = vec_extract(vsumf0, 0); - -#elif defined __loongarch_asx - - __m256 acc = (__m256)__lasx_xvldi(0); - __m128 acc_m = (__m128)__lsx_vldi(0); - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const __m128i mins_and_scales128 = lsx_set_w(utmp[3], utmp[2], utmp[1], utmp[0]); - const __m128i mins128 = __lsx_vexth_h_b(mins_and_scales128); - const __m128i scales128 = __lsx_vsllwil_h_b(mins_and_scales128, 0); - - const __m256i q8sums = __lasx_xvld((const __m256i*)y[i].bsums, 0); - const __m128i q8s = lsx_hadd_h(lasx_extracti128(q8sums, 0), lasx_extracti128(q8sums, 1)); - const __m128i prod = lsx_madd_h(mins128, q8s); - acc_m = __lsx_vfmadd_s(__lsx_vreplfr2vr_s(dmin), __lsx_vffint_s_w(prod), acc_m); - - const __m256i scales = lasx_insertf128(scales128, scales128); - - __m256i sumi = __lasx_xvldi(0); - - for (int j = 0; j < QK_K/64; ++j) { - - const __m256i scale_l = lasx_xvrepl128vei_h(scales, 2 * j + 0); - const __m256i scale_h = lasx_xvrepl128vei_h(scales, 2 * j + 1); - - const __m256i q4bits = __lasx_xvld((const __m256i*)q4, 0); q4 += 32; - const __m256i q4l = __lasx_xvandi_b(q4bits, 0xf); - const __m256i q4h = __lasx_xvsrli_b(q4bits, 4); - - const __m256i q8l = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - __m256i p16l = lasx_madd_h_b(q4l, q8l); - p16l = lasx_madd_h(scale_l, p16l); - - const __m256i q8h = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - __m256i p16h = lasx_madd_h_b(q4h, q8h); - p16h = lasx_madd_h(scale_h, p16h); - const __m256i sumj = __lasx_xvadd_w(p16l, p16h); - - sumi = __lasx_xvadd_w(sumi, sumj); - } - - __m256 vd = __lasx_xvreplfr2vr_s(d); - acc = __lasx_xvfmadd_s(vd, __lasx_xvffint_s_w(sumi), acc); - - } - - acc_m = __lsx_vfadd_s(acc_m, (__m128)__lsx_vpermi_w((__m128i)acc_m, (__m128i)acc_m, 0xee)); - __m128i tmp1 = __lsx_vinsgr2vr_w(__lsx_vldi(0), __lsx_vpickve2gr_w((__m128i)acc_m, 1), 0); - acc_m = __lsx_vfadd_s(acc_m, (__m128)tmp1); - - - *s = hsum_float_8(acc) + ((v4f32)acc_m)[0]; -#elif defined(__VXE__) || defined(__VXE2__) - const uint8x16_t v_lm = vec_splat_u8(0x0F); - const int32x4_t v_z = vec_splat_s32(0); - - uint8x16_t v_x[2]; - int8x16_t v_xl[2]; - int8x16_t v_y[2]; - - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const int16x8_t v_ysumsl = vec_xl(0 , y[i].bsums); - const int16x8_t v_ysumsh = vec_xl(16, y[i].bsums); - const int16x8_t v_ysums = vec_padd_s16(v_ysumsl, v_ysumsh); - - memcpy(utmp, x[i].scales, 12); - - uint32x4_t v_mins8 = { 0 }; - v_mins8 = vec_insert(utmp[1] & kmask1, v_mins8, 0); - v_mins8 = vec_insert(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), v_mins8, 1); - - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[0] &= kmask1; - - const int16x8_t v_minsh = (int16x8_t)vec_unpackh((uint8x16_t)v_mins8); - - const int32x4_t v_minso = vec_mulo(v_ysums, v_minsh); - const int32x4_t v_minse = vec_mule(v_ysums, v_minsh); - const int32x4_t v_mins = v_minso + v_minse; - sumf -= dmin * (v_mins[0] + v_mins[1] + v_mins[2] + v_mins[3]); - - const uint8_t * scales = (const uint8_t *)utmp; - const uint8_t * GGML_RESTRICT x0 = x[i].qs; - const int8_t * GGML_RESTRICT y0 = y[i].qs; - - int32_t sumi1 = 0; - int32_t sumi2 = 0; - - for (int j = 0; j < QK_K/64; ++j) { - v_x[0] = vec_xl(0 , x0); - v_x[1] = vec_xl(16, x0); - x0 += 32; - - v_y[0] = vec_xl(0 , y0); - v_y[1] = vec_xl(16, y0); - y0 += 32; - - v_xl[0] = (int8x16_t)vec_and(v_x[0], v_lm); - v_xl[1] = (int8x16_t)vec_and(v_x[1], v_lm); - - const int32x4_t p1 = ggml_vec_dot(ggml_vec_dot(v_z, v_xl[0], v_y[0]), v_xl[1], v_y[1]); - sumi1 += (p1[0] + p1[1] + p1[2] + p1[3]) * scales[2*j+0]; - - v_y[0] = vec_xl(0 , y0); - v_y[1] = vec_xl(16, y0); - y0 += 32; - - v_xl[0] = (int8x16_t)vec_sr(v_x[0], 4); - v_xl[1] = (int8x16_t)vec_sr(v_x[1], 4); - - const int32x4_t p2 = ggml_vec_dot(ggml_vec_dot(v_z, v_xl[0], v_y[0]), v_xl[1], v_y[1]); - sumi2 += (p2[0] + p2[1] + p2[2] + p2[3]) * scales[2*j+1]; - } - - sumf += d * (sumi1 + sumi2); - } - - *s = sumf; -#else - - const uint8_t * scales = (const uint8_t*)&utmp[0]; - const uint8_t * mins = (const uint8_t*)&utmp[2]; - - int8_t aux8[QK_K]; - int16_t aux16[8]; - float sums [8]; - int32_t aux32[8]; - memset(sums, 0, 8*sizeof(float)); - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - memset(aux32, 0, 8*sizeof(int32_t)); - int8_t * GGML_RESTRICT a = aux8; - for (int j = 0; j < QK_K/64; ++j) { - for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); - a += 32; - for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); - a += 32; q4 += 32; - } - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - int sumi = 0; - for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; - a = aux8; - int is = 0; - for (int j = 0; j < QK_K/32; ++j) { - int32_t scale = scales[is++]; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; - sumf -= dmin * sumi; - } - for (int l = 0; l < 8; ++l) sumf += sums[l]; - *s = sumf; -#endif -} - -void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_q5_K * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - - static const uint32_t kmask1 = 0x3f3f3f3f; - static const uint32_t kmask2 = 0x0f0f0f0f; - static const uint32_t kmask3 = 0x03030303; - - uint32_t utmp[4]; - -#ifdef __ARM_NEON - const uint8x16_t m4b = vdupq_n_u8(0xf); - const uint8x16_t mone = vdupq_n_u8(1); - const uint8x16_t mtwo = vdupq_n_u8(2); - const int32x4_t mzero = vdupq_n_s32(0); - - ggml_int8x16x4_t q5bytes; - - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - const uint8x8_t mins8 = vld1_u8((const uint8_t*)utmp + 8); - const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(mins8)); - const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)), - vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); - int32_t sumi_mins = vaddvq_s32(prod); - - const uint8_t * scales = (const uint8_t *)utmp; - - const uint8_t * GGML_RESTRICT q5 = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); - - ggml_uint8x16x4_t q5h; - - int32_t sumi = 0; - - for (int j = 0; j < QK_K/64; ++j) { - - const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5); q5 += 32; - const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; - - q5h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4); - q5h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4); - q5h.val[2] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[0]), 3); - q5h.val[3] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[1]), 3); - qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 2); - qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 2); - - q5bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[0], m4b), q5h.val[0])); - q5bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[1], m4b), q5h.val[1])); - q5bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[0], 4), q5h.val[2])); - q5bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[1], 4), q5h.val[3])); - - sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]), q5bytes.val[1], q8bytes.val[1])) * *scales++; - sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]), q5bytes.val[3], q8bytes.val[3])) * *scales++; - } - - sumf += d * sumi - dmin * sumi_mins; - } - - *s = sumf; - -#elif defined __AVX2__ - - const __m256i m4 = _mm256_set1_epi8(0xF); - const __m128i mzero = _mm_setzero_si128(); - const __m256i mone = _mm256_set1_epi8(1); - - __m256 acc = _mm256_setzero_ps(); - - float summs = 0.f; - - for (int i = 0; i < nb; ++i) { - const uint8_t * GGML_RESTRICT q5 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0])); - - const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums); - const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1)); - const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s); - const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero); - summs += dmin * _mm_extract_epi32(hsum, 0); - - const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0); - const __m256i scales = MM256_SET_M128I(sc128, sc128); - - const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].qh); - __m256i hmask = mone; - - __m256i sumi = _mm256_setzero_si256(); - - int bit = 0; - - for (int j = 0; j < QK_K/64; ++j) { - - const __m256i scale_0 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0)); - const __m256i scale_1 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1)); - - const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); q5 += 32; - - const __m256i q5l_0 = _mm256_and_si256(q5bits, m4); - const __m256i q5h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4); - const __m256i q5_0 = _mm256_add_epi8(q5l_0, q5h_0); - hmask = _mm256_slli_epi16(hmask, 1); - - const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4); - const __m256i q5h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4); - const __m256i q5_1 = _mm256_add_epi8(q5l_1, q5h_1); - hmask = _mm256_slli_epi16(hmask, 1); - - const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - - __m256i p16_0 = _mm256_maddubs_epi16(q5_0, q8_0); - __m256i p16_1 = _mm256_maddubs_epi16(q5_1, q8_1); - - p16_0 = _mm256_madd_epi16(scale_0, p16_0); - p16_1 = _mm256_madd_epi16(scale_1, p16_1); - - sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); - - } - - __m256 vd = _mm256_set1_ps(d); - acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc); - - } - - *s = hsum_float_8(acc) + summs; - -#elif defined __AVX__ - - const __m128i m4 = _mm_set1_epi8(0xF); - const __m128i mzero = _mm_setzero_si128(); - const __m128i mone = _mm_set1_epi8(1); - const __m128i m2 = _mm_set1_epi8(2); - - __m256 acc = _mm256_setzero_ps(); - - float summs = 0.f; - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const uint8_t * GGML_RESTRICT q5 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]); - const __m128i scales = _mm_cvtepu8_epi16(utmps); - const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps)); - - const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]); - const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]); - const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1); - const __m128i prod = _mm_madd_epi16(mins, q8s); - const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero); - summs += dmin * _mm_extract_epi32(hsum, 0); - - const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].qh[0]); - const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].qh[16]); - __m128i hmask = mone; - - __m128i sumi_0 = _mm_setzero_si128(); - __m128i sumi_1 = _mm_setzero_si128(); - - int bit = 0; - - __m128i shuffle = _mm_set1_epi16(0x0100); - for (int j = 0; j < QK_K/64; ++j) { - - const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle); - shuffle = _mm_add_epi16(shuffle, m2); - const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle); - shuffle = _mm_add_epi16(shuffle, m2); - - const __m128i q5bits_0 = _mm_loadu_si128((const __m128i*)q5); q5 += 16; - const __m128i q5bits_1 = _mm_loadu_si128((const __m128i*)q5); q5 += 16; - - __m128i q5l_0 = _mm_and_si128(q5bits_0, m4); - __m128i q5l_1 = _mm_and_si128(q5bits_1, m4); - __m128i q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4); - __m128i q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4); - __m128i q5_0 = _mm_add_epi8(q5l_0, q5h_0); - __m128i q5_1 = _mm_add_epi8(q5l_1, q5h_1); - hmask = _mm_slli_epi16(hmask, 1); - - __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - __m128i p16_0 = _mm_maddubs_epi16(q5_0, q8_0); - __m128i p16_1 = _mm_maddubs_epi16(q5_1, q8_1); - p16_0 = _mm_madd_epi16(scale_0, p16_0); - p16_1 = _mm_madd_epi16(scale_0, p16_1); - - q5l_0 = _mm_and_si128(_mm_srli_epi16(q5bits_0, 4), m4); - q5l_1 = _mm_and_si128(_mm_srli_epi16(q5bits_1, 4), m4); - q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4); - q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4); - q5_0 = _mm_add_epi8(q5l_0, q5h_0); - q5_1 = _mm_add_epi8(q5l_1, q5h_1); - hmask = _mm_slli_epi16(hmask, 1); - - q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - __m128i p16_2 = _mm_maddubs_epi16(q5_0, q8_0); - __m128i p16_3 = _mm_maddubs_epi16(q5_1, q8_1); - p16_2 = _mm_madd_epi16(scale_1, p16_2); - p16_3 = _mm_madd_epi16(scale_1, p16_3); - - sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); - sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3)); - - } - - __m256 vd = _mm256_set1_ps(d); - __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); - acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc); - - } - - *s = hsum_float_8(acc) + summs; - -#elif defined __wasm_simd128__ - //const uint8_t * scales = (const uint8_t*)&utmp[0]; - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); // Fixed sign - - const uint8_t * GGML_RESTRICT q5 = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - // Process scales and mins - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - // Sum mins * q8sums - int32_t sumi_mins = 0; - const int16_t * GGML_RESTRICT q8sums = y[i].bsums; - const uint8_t * m = (const uint8_t *)&utmp[2]; - for (int j = 0; j < 16; j += 2) { - sumi_mins += (q8sums[j] + q8sums[j+1]) * m[j/2]; - } - sumf -= dmin * sumi_mins; // Correct subtraction - - v128_t qh0 = wasm_v128_load(qh); - v128_t qh1 = wasm_v128_load(qh + 16); - const uint8_t * sc = (const uint8_t *)utmp; - - int32_t sumi = 0; - - for (int j = 0; j < QK_K/64; ++j) { - const int shift = j * 2; - v128_t qh_shift0 = wasm_u8x16_shr(qh0, shift); - v128_t qh_shift1 = wasm_u8x16_shr(qh1, shift); - - v128_t qh_low0 = wasm_i8x16_shl(wasm_v128_and(qh_shift0, wasm_i8x16_splat(0x01)), 4); - v128_t qh_high0 = wasm_i8x16_shl(wasm_v128_and(qh_shift0, wasm_i8x16_splat(0x02)), 3); - v128_t qh_low1 = wasm_i8x16_shl(wasm_v128_and(qh_shift1, wasm_i8x16_splat(0x01)), 4); - v128_t qh_high1 = wasm_i8x16_shl(wasm_v128_and(qh_shift1, wasm_i8x16_splat(0x02)), 3); - - v128_t q5_0 = wasm_v128_load(q5); - v128_t q5_1 = wasm_v128_load(q5 + 16); - q5 += 32; - - v128_t q5l_0 = wasm_v128_or(wasm_v128_and(q5_0, wasm_i8x16_splat(0x0F)), qh_low0); - v128_t q5h_0 = wasm_v128_or(wasm_u8x16_shr(q5_0, 4), qh_high0); - v128_t q5l_1 = wasm_v128_or(wasm_v128_and(q5_1, wasm_i8x16_splat(0x0F)), qh_low1); - v128_t q5h_1 = wasm_v128_or(wasm_u8x16_shr(q5_1, 4), qh_high1); - - v128_t q8_0 = wasm_v128_load(q8); - v128_t q8_1 = wasm_v128_load(q8 + 16); - v128_t q8_2 = wasm_v128_load(q8 + 32); - v128_t q8_3 = wasm_v128_load(q8 + 48); - q8 += 64; - - // Process low quants - v128_t pl0 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q5l_0), - wasm_i16x8_extend_low_i8x16(q8_0) - ); - pl0 = wasm_i32x4_add(pl0, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q5l_0), - wasm_i16x8_extend_high_i8x16(q8_0) - )); - v128_t pl1 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q5l_1), - wasm_i16x8_extend_low_i8x16(q8_1) - ); - pl1 = wasm_i32x4_add(pl1, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q5l_1), - wasm_i16x8_extend_high_i8x16(q8_1) - )); - v128_t sum_low = wasm_i32x4_add(pl0, pl1); - - // Process high quants - v128_t ph0 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q5h_0), - wasm_i16x8_extend_low_i8x16(q8_2) - ); - ph0 = wasm_i32x4_add(ph0, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q5h_0), - wasm_i16x8_extend_high_i8x16(q8_2) - )); - v128_t ph1 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q5h_1), - wasm_i16x8_extend_low_i8x16(q8_3) - ); - ph1 = wasm_i32x4_add(ph1, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q5h_1), - wasm_i16x8_extend_high_i8x16(q8_3) - )); - v128_t sum_high = wasm_i32x4_add(ph0, ph1); - - // Accumulate with scale factors - int32_t sl = wasm_i32x4_extract_lane(sum_low, 0) + wasm_i32x4_extract_lane(sum_low, 1) + - wasm_i32x4_extract_lane(sum_low, 2) + wasm_i32x4_extract_lane(sum_low, 3); - int32_t sh = wasm_i32x4_extract_lane(sum_high, 0) + wasm_i32x4_extract_lane(sum_high, 1) + - wasm_i32x4_extract_lane(sum_high, 2) + wasm_i32x4_extract_lane(sum_high, 3); - - sumi += sl * sc[2*j] + sh * sc[2*j+1]; - } - - sumf += d * sumi; - } - - *s = sumf; - -#elif defined __riscv_v - - const uint8_t * scales = (const uint8_t*)&utmp[0]; - const uint8_t * mins = (const uint8_t*)&utmp[2]; - - float sumf = 0; - float sums = 0.0; - - size_t vl; - - for (int i = 0; i < nb; ++i) { - - vl = 8; - - const uint8_t * GGML_RESTRICT q5 = x[i].qs; - const uint8_t * GGML_RESTRICT hm = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; - - vint16m1_t q8sums_0 = __riscv_vlse16_v_i16m1(y[i].bsums, 4, vl); - vint16m1_t q8sums_1 = __riscv_vlse16_v_i16m1(y[i].bsums+1, 4, vl); - vint16m1_t q8sums = __riscv_vadd_vv_i16m1(q8sums_0, q8sums_1, vl); - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - vuint8mf2_t mins8 = __riscv_vle8_v_u8mf2(mins, vl); - vint16m1_t v_mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl)); - vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, v_mins, vl); - - vint32m1_t sumi = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl); - sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi); - - vl = 32; - int32_t aux32 = 0; - int is = 0; - - uint8_t m = 1; - vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); - vuint8m2_t vqh = __riscv_vle8_v_u8m2(hm, vl); - - for (int j = 0; j < QK_K/64; ++j) { - // load Q5 and Q8 - vuint8m2_t q5_x = __riscv_vle8_v_u8m2(q5, vl); - vint8m2_t q8_y1 = __riscv_vle8_v_i8m2(q8, vl); - vint8m2_t q8_y2 = __riscv_vle8_v_i8m2(q8+32, vl); - - // compute mask for addition - vint8m2_t q5_a = __riscv_vreinterpret_v_u8m2_i8m2(__riscv_vand_vx_u8m2(q5_x, 0x0F, vl)); - vuint8m2_t qh_m1 = __riscv_vand_vx_u8m2(vqh, m, vl); - vbool4_t vmask_1 = __riscv_vmsne_vx_u8m2_b4(qh_m1, 0, vl); - vint8m2_t q5_m1 = __riscv_vadd_vx_i8m2_mu(vmask_1, q5_a, q5_a, 16, vl); - m <<= 1; - - vint8m2_t q5_l = __riscv_vreinterpret_v_u8m2_i8m2(__riscv_vsrl_vx_u8m2(q5_x, 0x04, vl)); - vuint8m2_t qh_m2 = __riscv_vand_vx_u8m2(vqh, m, vl); - vbool4_t vmask_2 = __riscv_vmsne_vx_u8m2_b4(qh_m2, 0, vl); - vint8m2_t q5_m2 = __riscv_vadd_vx_i8m2_mu(vmask_2, q5_l, q5_l, 16, vl); - m <<= 1; - - vint16m4_t v0 = __riscv_vwmul_vv_i16m4(q5_m1, q8_y1, vl); - vint16m4_t v1 = __riscv_vwmul_vv_i16m4(q5_m2, q8_y2, vl); - - vint32m8_t vs1 = __riscv_vwmul_vx_i32m8(v0, scales[is++], vl); - vint32m8_t vs2 = __riscv_vwmul_vx_i32m8(v1, scales[is++], vl); - - vint32m1_t vacc1 = __riscv_vredsum_vs_i32m8_i32m1(vs1, vzero, vl); - vint32m1_t vacc2 = __riscv_vredsum_vs_i32m8_i32m1(vs2, vacc1, vl); - - aux32 += __riscv_vmv_x_s_i32m1_i32(vacc2); - q5 += 32; q8 += 64; - - } - - sums += aux32 * d; - - } - - *s = sumf+sums; - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0xF); - const vector signed char lowMask1 = vec_splats((int8_t)0x3f); - const vector signed char lowMask2 = vec_splats((int8_t)0x30); - const vector int v0 = vec_splats((int32_t)0); - const vector unsigned char v1 = vec_splats((unsigned char)0x1); - const vector unsigned char v2 = vec_splats((unsigned char)0x2); - const vector unsigned char v3 = vec_splats((unsigned char)0x3); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); - vector float vdmin = vec_mul(vxmin, vyd); - - UNUSED(kmask1); - UNUSED(kmask2); - UNUSED(kmask3); - UNUSED(utmp); - - vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8); - vector signed char u1 = vec_and(vec_sr(u0, v2), lowMask2); - vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4); - vector signed char u3 = vec_sr(u2, v4); - - vector signed char u30 = u1; - vector signed char u31 = (vector signed char)vec_mergeh((vector signed int)vec_and(u2, lowMask), (vector signed int)u3); - - u1 = vec_and(u0, lowMask1); - u2 = vec_or(u30, u31); - - vector signed char utmps = (vector signed char)vec_mergeh((vector signed int)u1, (vector signed int)u2); - - vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); - vector signed short q8ysums1 = vec_xl(16, y[i].bsums); - - vector signed short vscales = vec_unpackh(utmps); - - vector signed short q5xmins = vec_unpackl(utmps); - vector signed short q5xmins0 = vec_mergeh(q5xmins, q5xmins); - vector signed short q5xmins1 = vec_mergel(q5xmins, q5xmins); - - vector signed int prod0 = vec_mule(q5xmins0, q8ysums0); - vector signed int prod1 = vec_mule(q5xmins1, q8ysums1); - vector signed int prod2 = vec_mulo(q5xmins0, q8ysums0); - vector signed int prod3 = vec_mulo(q5xmins1, q8ysums1); - - vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); - vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); - vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); - vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); - - vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].qh); - vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].qh); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - - const uint8_t * GGML_RESTRICT q5 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - for (int j = 0; j < QK_K/64; ++j) { - __builtin_prefetch(q5, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector signed char qxs0 = (vector signed char)vec_xl( 0, q5); - vector signed char qxs1 = (vector signed char)vec_xl(16, q5); - q5 += 32; - - vector signed char qxs00 = vec_and(qxs0, lowMask); - vector signed char qxs01 = vec_sr(qxs0, v4); - vector signed char qxs10 = vec_and(qxs1, lowMask); - vector signed char qxs11 = vec_sr(qxs1, v4); - - vector signed char q5h00 = vec_sl(vec_and((vector signed char)v1, qxhs0), v4); - vector signed char q5h01 = vec_sl(vec_and((vector signed char)v2, qxhs0), v3); - vector signed char q5h10 = vec_sl(vec_and((vector signed char)v1, qxhs1), v4); - vector signed char q5h11 = vec_sl(vec_and((vector signed char)v2, qxhs1), v3); - qxhs0 = vec_sr(qxhs0, v2); - qxhs1 = vec_sr(qxhs1, v2); - - vector unsigned char q5x00 = (vector unsigned char)vec_or(q5h00, qxs00); - vector unsigned char q5x01 = (vector unsigned char)vec_or(q5h01, qxs01); - vector unsigned char q5x10 = (vector unsigned char)vec_or(q5h10, qxs10); - vector unsigned char q5x11 = (vector unsigned char)vec_or(q5h11, qxs11); - - vector signed char q8y00 = vec_xl( 0, q8); - vector signed char q8y10 = vec_xl(16, q8); - vector signed char q8y01 = vec_xl(32, q8); - vector signed char q8y11 = vec_xl(48, q8); - q8 += 64; - - vector signed int qv00 = vec_msum(q8y00, q5x00, v0); - vector signed int qv01 = vec_msum(q8y01, q5x01, v0); - vector signed int qv10 = vec_msum(q8y10, q5x10, v0); - vector signed int qv11 = vec_msum(q8y11, q5x11, v0); - - vector signed int vscales_h = vec_unpackh(vscales); - vector signed int vs0 = vec_splat(vscales_h, 0); - vector signed int vs1 = vec_splat(vscales_h, 1); - vscales = vec_sld(vscales, vscales, 12); - - vsumi0 = vec_add(vec_mul(qv00, vs0), vsumi0); - vsumi1 = vec_add(vec_mul(qv10, vs0), vsumi1); - vsumi2 = vec_add(vec_mul(qv01, vs1), vsumi2); - vsumi3 = vec_add(vec_mul(qv11, vs1), vsumi3); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = vec_extract(vsumf0, 0); - -#elif defined __loongarch_asx - - __m256 acc = (__m256)__lasx_xvldi(0); - __m128 acc_m = (__m128)__lsx_vldi(0); - - for (int i = 0; i < nb; ++i) { - - const uint8_t * GGML_RESTRICT q5 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - const __m128i mins_and_scales128 = lsx_set_w(utmp[3], utmp[2], utmp[1], utmp[0]); - const __m128i mins128 = __lsx_vexth_h_b(mins_and_scales128); - const __m128i scales128 = __lsx_vsllwil_h_b(mins_and_scales128, 0); - - const __m256i q8sums = __lasx_xvld((const __m256i*)y[i].bsums, 0); - const __m128i q8s = lsx_hadd_h(lasx_extracti128(q8sums, 0), lasx_extracti128(q8sums, 1)); - const __m128i prod = lsx_madd_h(mins128, q8s); - acc_m = __lsx_vfmadd_s(__lsx_vreplfr2vr_s(dmin), __lsx_vffint_s_w(prod), acc_m); - - const __m256i scales = lasx_insertf128(scales128, scales128); - - const __m256i hbits = __lasx_xvld((const __m256i*)x[i].qh, 0); - - __m256i sumi = __lasx_xvldi(0); - - for (int j = 0; j < QK_K/64; ++j) { - - const __m256i scale_0 = lasx_xvrepl128vei_h(scales, 2 * j + 0); - const __m256i scale_1 = lasx_xvrepl128vei_h(scales, 2 * j + 1); - - const __m256i q5bits = __lasx_xvld((const __m256i*)q5, 0); q5 += 32; - - const __m256i q5l_0 = __lasx_xvandi_b(q5bits, 0xf); - const __m256i q5l_1 = __lasx_xvsrli_b(q5bits, 4); - const __m256i q5h_0 = __lasx_xvnori_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 2 * j + 0), 0), 0xef); - const __m256i q5h_1 = __lasx_xvnori_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 2 * j + 1), 0), 0xef); - const __m256i q5_0 = __lasx_xvor_v(q5l_0, q5h_0); - const __m256i q5_1 = __lasx_xvor_v(q5l_1, q5h_1); - - const __m256i q8_0 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - - __m256i p16_0 = lasx_madd_h_b(q5_0, q8_0); - __m256i p16_1 = lasx_madd_h_b(q5_1, q8_1); - - p16_0 = lasx_madd_h(scale_0, p16_0); - p16_1 = lasx_madd_h(scale_1, p16_1); - - sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_0, p16_1)); - - } - - __m256 vd = __lasx_xvreplfr2vr_s(d); - acc = __lasx_xvfmadd_s(vd, __lasx_xvffint_s_w(sumi), acc); - - } - - acc_m = __lsx_vfadd_s(acc_m, (__m128)__lsx_vbsrl_v(acc_m, 8)); - acc_m = __lsx_vfadd_s(acc_m, (__m128)__lsx_vbsrl_v(acc_m, 4)); - - *s = hsum_float_8(acc) + ((v4f32)acc_m)[0]; -#elif defined(__VXE__) || defined(__VXE2__) - const uint8x16_t v_lm = vec_splat_u8(0x0F); - const uint8x16_t v_1m = vec_splat_u8(0x01); - const uint8x16_t v_2m = vec_splat_u8(0x02); - - const int32x4_t v_z = vec_splat_s32(0); - - const uchar8x16_t v_minsm = { - 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF - }; - - int8x16_t q5b[4]; - uint8x16_t q5h[4]; - - uint8x16_t v_xl[2]; - uint8x16_t v_xh[2]; - int8x16_t v_y[4]; - - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const int16x8_t v_ysumsl = vec_xl(0 , y[i].bsums); - const int16x8_t v_ysumsh = vec_xl(16, y[i].bsums); - const int16x8_t v_ysums = vec_padd_s16(v_ysumsl, v_ysumsh); - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - const uint8x16_t v_mins16 = vec_xl(0, (const uint8_t *)utmp); - const uint8x16_t v_mins8 = vec_perm(v_mins16, v_mins16, v_minsm); - const int16x8_t v_minsh = (int16x8_t)vec_unpackh(v_mins8); - - const int32x4_t v_minsho = vec_mulo(v_ysums, v_minsh); - const int32x4_t v_minshe = vec_mule(v_ysums, v_minsh); - const int32x4_t v_mins = vec_add(v_minsho, v_minshe); - const int32_t mins = v_mins[0] + v_mins[1] + v_mins[2] + v_mins[3]; - - const uint8_t * scales = (const uint8_t *)utmp; - const uint8_t * GGML_RESTRICT x0l = x[i].qs; - const uint8_t * GGML_RESTRICT x0h = x[i].qh; - const int8_t * GGML_RESTRICT y0 = y[i].qs; - - v_xh[0] = vec_xl(0 , x0h); - v_xh[1] = vec_xl(16, x0h); - - int32_t sumi = 0; - for (int j = 0; j < QK_K/64; ++j) { - v_xl[0] = vec_xl(0 , x0l); - v_xl[1] = vec_xl(16, x0l); - x0l += 32; - - v_y[0] = vec_xl(0 , y0); - v_y[1] = vec_xl(16, y0); - v_y[2] = vec_xl(32, y0); - v_y[3] = vec_xl(48, y0); - y0 += 64; - - q5h[0] = vec_sl(vec_and(v_1m, v_xh[0]), 4); - q5h[1] = vec_sl(vec_and(v_1m, v_xh[1]), 4); - q5h[2] = vec_sl(vec_and(v_2m, v_xh[0]), 3); - q5h[3] = vec_sl(vec_and(v_2m, v_xh[1]), 3); - v_xh[0] = vec_sr(v_xh[0], 2); - v_xh[1] = vec_sr(v_xh[1], 2); - - q5b[0] = (int8x16_t)vec_or(vec_and(v_xl[0], v_lm), q5h[0]); - q5b[1] = (int8x16_t)vec_or(vec_and(v_xl[1], v_lm), q5h[1]); - q5b[2] = (int8x16_t)vec_or(vec_sr(v_xl[0], 4), q5h[2]); - q5b[3] = (int8x16_t)vec_or(vec_sr(v_xl[1], 4), q5h[3]); - - int32x4_t sumi0 = ggml_vec_dot(ggml_vec_dot(v_z, q5b[0], v_y[0]), q5b[1], v_y[1]); - int32x4_t sumi1 = ggml_vec_dot(ggml_vec_dot(v_z, q5b[2], v_y[2]), q5b[3], v_y[3]); - - sumi += (sumi0[0] + sumi0[1] + sumi0[2] + sumi0[3]) * *scales++; - sumi += (sumi1[0] + sumi1[1] + sumi1[2] + sumi1[3]) * *scales++; - } - - sumf += d * sumi - dmin * mins; - } - - *s = sumf; -#else - - const uint8_t * scales = (const uint8_t*)&utmp[0]; - const uint8_t * mins = (const uint8_t*)&utmp[2]; - - int8_t aux8[QK_K]; - int16_t aux16[8]; - float sums [8]; - int32_t aux32[8]; - memset(sums, 0, 8*sizeof(float)); - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const uint8_t * GGML_RESTRICT hm = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - memset(aux32, 0, 8*sizeof(int32_t)); - int8_t * GGML_RESTRICT a = aux8; - uint8_t m = 1; - for (int j = 0; j < QK_K/64; ++j) { - for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); - for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); - a += 32; m <<= 1; - for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); - for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); - a += 32; m <<= 1; - q4 += 32; - } - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - int sumi = 0; - for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; - a = aux8; - int is = 0; - for (int j = 0; j < QK_K/32; ++j) { - int32_t scale = scales[is++]; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; - sumf -= dmin * sumi; - } - for (int l = 0; l < 8; ++l) sumf += sums[l]; - *s = sumf; -#endif -} - -void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); -#ifdef __ARM_FEATURE_MATMUL_INT8 - assert((nrc == 2) || (nrc == 1)); -#else - assert(nrc == 1); -#endif - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_q6_K * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined(__ARM_FEATURE_MATMUL_INT8) - if (nrc == 2) { - const block_q6_K * GGML_RESTRICT x0 = x; - const block_q6_K * GGML_RESTRICT x1 = (const block_q6_K *) ((const uint8_t *)vx + bx); - const block_q8_K * GGML_RESTRICT y0 = y; - const block_q8_K * GGML_RESTRICT y1 = (const block_q8_K *) ((const uint8_t *)vy + by); - - float32x4_t vfsum = vdupq_n_f32(0.0f); - - for (int i = 0; i < nb; ++i, ++x0, ++x1, ++y0, ++y1) { - const uint8_t * GGML_RESTRICT ql0 = x0->ql; - const uint8_t * GGML_RESTRICT ql1 = x1->ql; - const uint8_t * GGML_RESTRICT qh0 = x0->qh; - const uint8_t * GGML_RESTRICT qh1 = x1->qh; - const int8_t * GGML_RESTRICT qy0 = y0->qs; - const int8_t * GGML_RESTRICT qy1 = y1->qs; - - const uint8x16_t mone = vdupq_n_u8(0x30); - const uint8x16_t m4b = vdupq_n_u8(0x0f); - - int32x4_t visum = vdupq_n_s32(0); - - // process 8 blocks per iteration, totally 16 blocks - for (int j = 0; j < 2; ++j, qh0 += 32, ql0 += 64, qh1 += 32, ql1 += 64) { - int8x16_t vx0[8], vx1[8]; - - // de-quantize vx0[8] - { - const uint8x16x2_t qh_bits = vld1q_u8_x2(qh0); - const uint8x16x4_t ql_bits = vld1q_u8_x4(ql0); - - uint8x16_t q6h_0 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 4)); - uint8x16_t q6h_1 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 4)); - uint8x16_t q6h_2 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 2)); - uint8x16_t q6h_3 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 2)); - - vx0[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[0], m4b), q6h_0)); - vx0[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[1], m4b), q6h_1)); - vx0[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[2], m4b), q6h_2)); - vx0[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[3], m4b), q6h_3)); - - q6h_0 = vandq_u8(mone, qh_bits.val[0]); - q6h_1 = vandq_u8(mone, qh_bits.val[1]); - q6h_2 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[0], 2)); - q6h_3 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[1], 2)); - - vx0[4] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[0], 4), q6h_0)); - vx0[5] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[1], 4), q6h_1)); - vx0[6] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[2], 4), q6h_2)); - vx0[7] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[3], 4), q6h_3)); - } - - // de-quantize vx1[8] - { - const uint8x16x2_t qh_bits = vld1q_u8_x2(qh1); - const uint8x16x4_t ql_bits = vld1q_u8_x4(ql1); - - uint8x16_t q6h_0 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 4)); - uint8x16_t q6h_1 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 4)); - uint8x16_t q6h_2 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 2)); - uint8x16_t q6h_3 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 2)); - - vx1[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[0], m4b), q6h_0)); - vx1[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[1], m4b), q6h_1)); - vx1[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[2], m4b), q6h_2)); - vx1[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[3], m4b), q6h_3)); - - q6h_0 = vandq_u8(mone, qh_bits.val[0]); - q6h_1 = vandq_u8(mone, qh_bits.val[1]); - q6h_2 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[0], 2)); - q6h_3 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[1], 2)); - - vx1[4] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[0], 4), q6h_0)); - vx1[5] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[1], 4), q6h_1)); - vx1[6] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[2], 4), q6h_2)); - vx1[7] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[3], 4), q6h_3)); - } - - // process 16 elements (one block with same scale) per iteration - // - vx = concat(ql, qh) - 32 - // - r1,r2,r3,r4 = smmla(vx, vy) - for (int k = 0; k < 8; ++k) { - const int blk = j * 8 + k; - - const int8x16_t vy0 = vld1q_s8(qy0); - const int8x16_t vy1 = vld1q_s8(qy1); - qy0 += 16; - qy1 += 16; - - const int32x4_t block_scale = { - x0->scales[blk], - x0->scales[blk], - x1->scales[blk], - x1->scales[blk], - }; - - // calculate four results at once with outer product - const int8x16_t vx_l = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(vx0[k]), vreinterpretq_s64_s8(vx1[k]))); - const int8x16_t vx_h = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(vx0[k]), vreinterpretq_s64_s8(vx1[k]))); - const int8x16_t vy_l = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(vy0), vreinterpretq_s64_s8(vy1))); - const int8x16_t vy_h = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(vy0), vreinterpretq_s64_s8(vy1))); - int32x4_t vr = vdupq_n_s32(0); - vr = vmmlaq_s32(vr, vx_l, vy_l); - vr = vmmlaq_s32(vr, vx_h, vy_h); - - // apply block scale, will NOT overflow - // block_scale * sum_256(int6*int8) <= 2^(8+8+6+8) = 30 bits - visum = vmlaq_s32(visum, vr, block_scale); - } - } - - // adjust bias, apply superblock scale - { - int32_t bias[4]; -#ifdef __ARM_FEATURE_SVE - const svbool_t pg16_8 = svptrue_pat_b16(SV_VL8); - const svbool_t pg8_8 = svptrue_pat_b8(SV_VL8); - const svint16_t y0_q8sums_0 = svld1_s16(pg16_8, y0->bsums); - const svint16_t y0_q8sums_1 = svld1_s16(pg16_8, y0->bsums + 8); - const svint16_t y1_q8sums_0 = svld1_s16(pg16_8, y1->bsums); - const svint16_t y1_q8sums_1 = svld1_s16(pg16_8, y1->bsums + 8); - const svint16_t x0_q6scales_0 = svunpklo_s16(svld1_s8(pg8_8, x0->scales)); - const svint16_t x0_q6scales_1 = svunpklo_s16(svld1_s8(pg8_8, x0->scales + 8)); - const svint16_t x1_q6scales_0 = svunpklo_s16(svld1_s8(pg8_8, x1->scales)); - const svint16_t x1_q6scales_1 = svunpklo_s16(svld1_s8(pg8_8, x1->scales + 8)); - const svint64_t zero = svdup_n_s64(0); - bias[0] = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(zero, y0_q8sums_0, x0_q6scales_0), - svdot_s64(zero, y0_q8sums_1, x0_q6scales_1))); - bias[1] = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(zero, y1_q8sums_0, x0_q6scales_0), - svdot_s64(zero, y1_q8sums_1, x0_q6scales_1))); - bias[2] = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(zero, y0_q8sums_0, x1_q6scales_0), - svdot_s64(zero, y0_q8sums_1, x1_q6scales_1))); - bias[3] = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(zero, y1_q8sums_0, x1_q6scales_0), - svdot_s64(zero, y1_q8sums_1, x1_q6scales_1))); -#else - // NEON doesn't support int16 dot product, fallback to separated mul and add - const int16x8x2_t q8sums0 = vld1q_s16_x2(y0->bsums); - const int16x8x2_t q8sums1 = vld1q_s16_x2(y1->bsums); - - int8x16_t scales_s8 = vld1q_s8(x0->scales); - const int16x8x2_t q6scales0 = {{vmovl_s8(vget_low_s8(scales_s8)), vmovl_s8(vget_high_s8(scales_s8))}}; - scales_s8 = vld1q_s8(x1->scales); - const int16x8x2_t q6scales1 = {{vmovl_s8(vget_low_s8(scales_s8)), vmovl_s8(vget_high_s8(scales_s8))}}; - - int32x4_t prod; - prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[0]), vget_low_s16 (q6scales0.val[0])), - vmull_s16(vget_high_s16(q8sums0.val[0]), vget_high_s16(q6scales0.val[0]))), - vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[1]), vget_low_s16 (q6scales0.val[1])), - vmull_s16(vget_high_s16(q8sums0.val[1]), vget_high_s16(q6scales0.val[1])))); - bias[0] = vaddvq_s32(prod); - prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[0]), vget_low_s16 (q6scales0.val[0])), - vmull_s16(vget_high_s16(q8sums1.val[0]), vget_high_s16(q6scales0.val[0]))), - vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[1]), vget_low_s16 (q6scales0.val[1])), - vmull_s16(vget_high_s16(q8sums1.val[1]), vget_high_s16(q6scales0.val[1])))); - bias[1] = vaddvq_s32(prod); - prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[0]), vget_low_s16 (q6scales1.val[0])), - vmull_s16(vget_high_s16(q8sums0.val[0]), vget_high_s16(q6scales1.val[0]))), - vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[1]), vget_low_s16 (q6scales1.val[1])), - vmull_s16(vget_high_s16(q8sums0.val[1]), vget_high_s16(q6scales1.val[1])))); - bias[2] = vaddvq_s32(prod); - prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[0]), vget_low_s16 (q6scales1.val[0])), - vmull_s16(vget_high_s16(q8sums1.val[0]), vget_high_s16(q6scales1.val[0]))), - vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[1]), vget_low_s16 (q6scales1.val[1])), - vmull_s16(vget_high_s16(q8sums1.val[1]), vget_high_s16(q6scales1.val[1])))); - bias[3] = vaddvq_s32(prod); - -#endif - const int32x4_t vibias = vmulq_n_s32(vld1q_s32(bias), 32); - - const float32x4_t superblock_scale = { - GGML_FP16_TO_FP32(x0->d) * y0->d, - GGML_FP16_TO_FP32(x0->d) * y1->d, - GGML_FP16_TO_FP32(x1->d) * y0->d, - GGML_FP16_TO_FP32(x1->d) * y1->d, - }; - - visum = vsubq_s32(visum, vibias); - vfsum = vmlaq_f32(vfsum, vcvtq_f32_s32(visum), superblock_scale); - } - } - - // vfsum = ABCD -> ACBD - // AC -> s, BD -> (s+bs) - vfsum = vzip1q_f32(vfsum, vextq_f32(vfsum, vfsum, 2)); - vst1_f32(s, vget_low_f32 (vfsum)); - vst1_f32(s + bs, vget_high_f32(vfsum)); - - return; - } -#endif - -#ifdef __ARM_FEATURE_SVE - const int vector_length = ggml_cpu_get_sve_cnt()*8; - float sum = 0; - svuint8_t m4b = svdup_n_u8(0xf); - svint32_t vzero = svdup_n_s32(0); - svuint8_t mone = svdup_n_u8(0x30); - svint8_t q6bytes_1, q6bytes_2, q6bytes_3, q6bytes_4; - svuint8_t q6h_1, q6h_2, q6h_3, q6h_4; - - for (int i = 0; i < nb; ++i) { - const float d_all = GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT q6 = x[i].ql; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const int8_t * GGML_RESTRICT scale = x[i].scales; - - const svbool_t pg16_8 = svptrue_pat_b16(SV_VL8); - const svint16_t q8sums_1 = svld1_s16(pg16_8, y[i].bsums); - const svint16_t q8sums_2 = svld1_s16(pg16_8, y[i].bsums + 8); - const svint16_t q6scales_1 = svunpklo_s16(svld1_s8(svptrue_pat_b8(SV_VL8), scale)); - const svint16_t q6scales_2 = svunpklo_s16(svld1_s8(svptrue_pat_b8(SV_VL8), scale + 8)); - const svint64_t prod = svdup_n_s64(0); - int32_t isum_mins = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(prod, q8sums_1, q6scales_1), - svdot_s64(prod, q8sums_2, q6scales_2))); - int32_t isum = 0; - - switch (vector_length) { - case 128: - { - const svbool_t pg32_4 = svptrue_pat_b32(SV_VL4); - const svbool_t pg8_16 = svptrue_pat_b8(SV_VL16); - svint32_t isum_tmp = svdup_n_s32(0); - for (int j = 0; j < QK_K/128; ++j) { - svuint8_t qhbits_1 = svld1_u8(pg8_16, qh); - svuint8_t qhbits_2 = svld1_u8(pg8_16, qh+16); - qh += 32; - svuint8_t q6bits_1 = svld1_u8(pg8_16, q6); - svuint8_t q6bits_2 = svld1_u8(pg8_16, q6+16); - svuint8_t q6bits_3 = svld1_u8(pg8_16, q6+32); - svuint8_t q6bits_4 = svld1_u8(pg8_16, q6+48); - q6 += 64; - svint8_t q8bytes_1 = svld1_s8(pg8_16, q8); - svint8_t q8bytes_2 = svld1_s8(pg8_16, q8+16); - svint8_t q8bytes_3 = svld1_s8(pg8_16, q8+32); - svint8_t q8bytes_4 = svld1_s8(pg8_16, q8+48); - q8 += 64; - - q6h_1 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_1, 4)); - q6h_2 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_2, 4)); - q6h_3 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_1, 2)); - q6h_4 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_2, 2)); - q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_1, m4b), q6h_1)); - q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_2, m4b), q6h_2)); - q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_3, m4b), q6h_3)); - q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_4, m4b), q6h_4)); - isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale[0]); - isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale[1]); - isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale[2]); - isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale[3]); - - scale += 4; - q8bytes_1 = svld1_s8(pg8_16, q8); - q8bytes_2 = svld1_s8(pg8_16, q8+16); - q8bytes_3 = svld1_s8(pg8_16, q8+32); - q8bytes_4 = svld1_s8(pg8_16, q8+48); - q8 += 64; - - q6h_1 = svand_u8_x(pg16_8, mone, qhbits_1); - q6h_2 = svand_u8_x(pg16_8, mone, qhbits_2); - q6h_3 = svand_u8_x(pg16_8, mone, svlsr_n_u8_x(pg16_8, qhbits_1, 2)); - q6h_4 = svand_u8_x(pg16_8, mone, svlsr_n_u8_x(pg16_8, qhbits_2, 2)); - q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_1, 4), q6h_1)); - q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_2, 4), q6h_2)); - q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_3, 4), q6h_3)); - q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_4, 4), q6h_4)); - isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale[0]); - isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale[1]); - isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale[2]); - isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale[3]); - scale += 4; - } - isum += svaddv_s32(pg32_4, isum_tmp); - sum += d_all * y[i].d * (isum - 32 * isum_mins); - } - break; - case 256: - case 512: - { - const svbool_t pg8_2 = svptrue_pat_b8(SV_VL2); - const svbool_t pg32_8 = svptrue_pat_b32(SV_VL8); - const svbool_t pg8_32 = svptrue_pat_b8(SV_VL32); - svint32_t isum_tmp = svdup_n_s32(0); - for (int j = 0; j < QK_K/128; j++) { - svuint8_t qhbits_1 = svld1_u8(pg8_32, qh); - qh += 32; - svuint8_t q6bits_1 = svld1_u8(pg8_32, q6); - svuint8_t q6bits_2 = svld1_u8(pg8_32, q6+32); - q6 += 64; - svint8_t q8bytes_1 = svld1_s8(pg8_32, q8); - svint8_t q8bytes_2 = svld1_s8(pg8_32, q8+32); - svint8_t q8bytes_3 = svld1_s8(pg8_32, q8+64); - svint8_t q8bytes_4 = svld1_s8(pg8_32, q8+96); - q8 += 128; - q6h_1 = svand_u8_x(pg8_32, mone, svlsl_n_u8_x(pg8_32, qhbits_1, 4)); - q6h_2 = svand_u8_x(pg8_32, mone, svlsl_n_u8_x(pg8_32, qhbits_1, 2)); - q6h_3 = svand_u8_x(pg8_32, mone, qhbits_1); - q6h_4 = svand_u8_x(pg8_32, mone, svlsr_n_u8_x(pg8_32, qhbits_1, 2)); - q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svand_u8_x(pg8_32, q6bits_1, m4b), q6h_1)); - q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svand_u8_x(pg8_32, q6bits_2, m4b), q6h_2)); - q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svlsr_n_u8_x(pg8_32, q6bits_1, 4), q6h_3)); - q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svlsr_n_u8_x(pg8_32, q6bits_2, 4), q6h_4)); - - svint8_t scale_lane_1_tmp = svld1_s8(pg8_2, scale); - scale_lane_1_tmp= svzip1_s8(scale_lane_1_tmp, scale_lane_1_tmp); - scale_lane_1_tmp= svzip1_s8(scale_lane_1_tmp, scale_lane_1_tmp); - svint8_t scale_lane_2_tmp = svld1_s8(pg8_2, scale+2); - scale_lane_2_tmp = svzip1_s8(scale_lane_2_tmp, scale_lane_2_tmp); - scale_lane_2_tmp = svzip1_s8(scale_lane_2_tmp, scale_lane_2_tmp); - svint8_t scale_lane_3_tmp = svld1_s8(pg8_2, scale+4); - scale_lane_3_tmp = svzip1_s8(scale_lane_3_tmp, scale_lane_3_tmp); - scale_lane_3_tmp = svzip1_s8(scale_lane_3_tmp, scale_lane_3_tmp); - svint8_t scale_lane_4_tmp = svld1_s8(pg8_2, scale+6); - scale_lane_4_tmp = svzip1_s8(scale_lane_4_tmp, scale_lane_4_tmp); - scale_lane_4_tmp = svzip1_s8(scale_lane_4_tmp, scale_lane_4_tmp); - svint32_t scale_lane_1 = svunpklo_s32(svunpklo_s16(scale_lane_1_tmp)); - svint32_t scale_lane_2 = svunpklo_s32(svunpklo_s16(scale_lane_2_tmp)); - svint32_t scale_lane_3 = svunpklo_s32(svunpklo_s16(scale_lane_3_tmp)); - svint32_t scale_lane_4 = svunpklo_s32(svunpklo_s16(scale_lane_4_tmp)); - - isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale_lane_1); - isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale_lane_2); - isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale_lane_3); - isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale_lane_4); - scale += 8; - } - isum += svaddv_s32(pg32_8, isum_tmp); - sum += d_all * y[i].d * (isum - 32 * isum_mins); - } - break; - default: - assert(false && "Unsupported vector length"); - break; - } - } - - *s = sum; - -#elif __ARM_NEON - float sum = 0; - - const uint8x16_t m4b = vdupq_n_u8(0xF); - const int32x4_t vzero = vdupq_n_s32(0); - //const int8x16_t m32s = vdupq_n_s8(32); - - const uint8x16_t mone = vdupq_n_u8(3); - - ggml_int8x16x4_t q6bytes; - ggml_uint8x16x4_t q6h; - - for (int i = 0; i < nb; ++i) { - - const float d_all = GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT q6 = x[i].ql; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const int8_t * GGML_RESTRICT scale = x[i].scales; - - const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums); - const int8x16_t scales = vld1q_s8(scale); - const ggml_int16x8x2_t q6scales = {{vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}}; - - const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[0]), vget_low_s16 (q6scales.val[0])), - vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))), - vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[1]), vget_low_s16 (q6scales.val[1])), - vmull_s16(vget_high_s16(q8sums.val[1]), vget_high_s16(q6scales.val[1])))); - int32_t isum_mins = vaddvq_s32(prod); - - int32_t isum = 0; - - for (int j = 0; j < QK_K/128; ++j) { - - ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); qh += 32; - ggml_uint8x16x4_t q6bits = ggml_vld1q_u8_x4(q6); q6 += 64; - ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; - - q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4); - q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4); - uint8x16_t shifted = vshrq_n_u8(qhbits.val[0], 2); - q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - shifted = vshrq_n_u8(qhbits.val[1], 2); - q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - - //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s); - //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s); - //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])), m32s); - //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])), m32s); - q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])); - q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])); - q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])); - q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])); - - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + - vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + - vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + - vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; - - scale += 4; - - q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; - - shifted = vshrq_n_u8(qhbits.val[0], 4); - q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - shifted = vshrq_n_u8(qhbits.val[1], 4); - q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - shifted = vshrq_n_u8(qhbits.val[0], 6); - q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - shifted = vshrq_n_u8(qhbits.val[1], 6); - q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - - //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])), m32s); - //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])), m32s); - //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])), m32s); - //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])), m32s); - q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])); - q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])); - q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])); - q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])); - - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + - vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + - vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + - vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; - scale += 4; - } - //sum += isum * d_all * y[i].d; - sum += d_all * y[i].d * (isum - 32 * isum_mins); - - } - *s = sum; - -#elif defined __AVX2__ - - const __m256i m4 = _mm256_set1_epi8(0xF); - const __m256i m2 = _mm256_set1_epi8(3); - const __m256i m32s = _mm256_set1_epi8(32); - - __m256 acc = _mm256_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT q4 = x[i].ql; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales); - - __m256i sumi = _mm256_setzero_si256(); - - int is = 0; - - for (int j = 0; j < QK_K/128; ++j) { - - const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0)); - const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1)); - const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2)); - const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3)); - is += 4; - - const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32; - const __m256i q4bits2 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32; - const __m256i q4bitsH = _mm256_loadu_si256((const __m256i*)qh); qh += 32; - - const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bitsH, m2), 4); - const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 2), m2), 4); - const __m256i q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 4), m2), 4); - const __m256i q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 6), m2), 4); - - const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0); - const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1); - const __m256i q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2); - const __m256i q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3); - - const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - - __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0); - __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1); - __m256i q8s_2 = _mm256_maddubs_epi16(m32s, q8_2); - __m256i q8s_3 = _mm256_maddubs_epi16(m32s, q8_3); - - __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0); - __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1); - __m256i p16_2 = _mm256_maddubs_epi16(q4_2, q8_2); - __m256i p16_3 = _mm256_maddubs_epi16(q4_3, q8_3); - - p16_0 = _mm256_sub_epi16(p16_0, q8s_0); - p16_1 = _mm256_sub_epi16(p16_1, q8s_1); - p16_2 = _mm256_sub_epi16(p16_2, q8s_2); - p16_3 = _mm256_sub_epi16(p16_3, q8s_3); - - p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0); - p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1); - p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2); - p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3); - - sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); - sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3)); - - } - - acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); - } - - *s = hsum_float_8(acc); - -#elif defined __AVX__ - - const __m128i m3 = _mm_set1_epi8(3); - const __m128i m15 = _mm_set1_epi8(15); - - __m256 acc = _mm256_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT q4 = x[i].ql; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - // handle the q6_k -32 offset separately using bsums - const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)y[i].bsums); - const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)y[i].bsums + 1); - const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales); - const __m128i scales_16_0 = _mm_cvtepi8_epi16(scales); - const __m128i scales_16_1 = _mm_cvtepi8_epi16(_mm_bsrli_si128(scales, 8)); - const __m128i q8sclsub_0 = _mm_slli_epi32(_mm_madd_epi16(q8sums_0, scales_16_0), 5); - const __m128i q8sclsub_1 = _mm_slli_epi32(_mm_madd_epi16(q8sums_1, scales_16_1), 5); - - __m128i sumi_0 = _mm_setzero_si128(); - __m128i sumi_1 = _mm_setzero_si128(); - - int is = 0; - - for (int j = 0; j < QK_K/128; ++j) { - - const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16; - const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i*)qh); qh += 16; - - const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4); - const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4); - const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, _mm_set1_epi8(12)), 2); - const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, _mm_set1_epi8(12)), 2); - const __m128i q4h_4 = _mm_and_si128(q4bitsH_0, _mm_set1_epi8(48)); - const __m128i q4h_5 = _mm_and_si128(q4bitsH_1, _mm_set1_epi8(48)); - const __m128i q4h_6 = _mm_srli_epi16(_mm_and_si128(q4bitsH_0, _mm_set1_epi8(-64)), 2); - const __m128i q4h_7 = _mm_srli_epi16(_mm_and_si128(q4bitsH_1, _mm_set1_epi8(-64)), 2); - - const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; - const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; - const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; - const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; - - const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m15), q4h_0); - const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m15), q4h_1); - const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m15), q4h_2); - const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m15), q4h_3); - const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m15), q4h_4); - const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m15), q4h_5); - const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m15), q4h_6); - const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m15), q4h_7); - - const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - - __m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0); - __m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1); - __m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2); - __m128i p16_3 = _mm_maddubs_epi16(q4_3, q8_3); - __m128i p16_4 = _mm_maddubs_epi16(q4_4, q8_4); - __m128i p16_5 = _mm_maddubs_epi16(q4_5, q8_5); - __m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6); - __m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7); - - const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0)); - const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1)); - const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2)); - const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3)); - is += 4; - - p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0); - p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_0, 8)), p16_1); - p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2); - p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_1, 8)), p16_3); - p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4); - p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_2, 8)), p16_5); - p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6); - p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_3, 8)), p16_7); - - sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); - sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3)); - sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_4, p16_6)); - sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_5, p16_7)); - - } - - sumi_0 = _mm_sub_epi32(sumi_0, q8sclsub_0); - sumi_1 = _mm_sub_epi32(sumi_1, q8sclsub_1); - const __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); - acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(sumi)), acc); - } - - *s = hsum_float_8(acc); - -#elif defined __wasm_simd128__ - int8_t aux8[QK_K] __attribute__((aligned(16))); - int32_t aux32[8] __attribute__((aligned(16))) = {0}; - float sums[8] __attribute__((aligned(16))) = {0}; - - for (int i = 0; i < nb; ++i) { - // Unpack 6-bit quantized data into aux8 (unchanged) - const uint8_t * GGML_RESTRICT q4 = x[i].ql; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - int8_t * a = aux8; - for (int j = 0; j < QK_K; j += 128) { - for (int l = 0; l < 32; ++l) { - a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; - a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; - a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; - a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; - } - a += 128; - q4 += 64; - qh += 32; - } - - const int8_t * GGML_RESTRICT a_ptr = aux8; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - v128_t acc0 = wasm_i32x4_splat(0); - v128_t acc1 = wasm_i32x4_splat(0); - - for (int j = 0; j < QK_K/16; ++j) { - const int scale = x[i].scales[j]; - const v128_t vscale = wasm_i32x4_splat(scale); - - // Load 16 elements from a and q8 - const v128_t a_vec = wasm_v128_load(a_ptr); - const v128_t q8_vec = wasm_v128_load(q8); - - // Process low 8 elements - v128_t a_low = wasm_i16x8_extend_low_i8x16(a_vec); - v128_t q8_low = wasm_i16x8_extend_low_i8x16(q8_vec); - v128_t prod_low = wasm_i16x8_mul(a_low, q8_low); - v128_t prod_lo_lo = wasm_i32x4_extend_low_i16x8(prod_low); - v128_t prod_lo_hi = wasm_i32x4_extend_high_i16x8(prod_low); - - // Process high 8 elements - v128_t a_high = wasm_i16x8_extend_high_i8x16(a_vec); - v128_t q8_high = wasm_i16x8_extend_high_i8x16(q8_vec); - v128_t prod_high = wasm_i16x8_mul(a_high, q8_high); - v128_t prod_hi_lo = wasm_i32x4_extend_low_i16x8(prod_high); - v128_t prod_hi_hi = wasm_i32x4_extend_high_i16x8(prod_high); - - // Scale and accumulate - prod_lo_lo = wasm_i32x4_mul(prod_lo_lo, vscale); - prod_lo_hi = wasm_i32x4_mul(prod_lo_hi, vscale); - prod_hi_lo = wasm_i32x4_mul(prod_hi_lo, vscale); - prod_hi_hi = wasm_i32x4_mul(prod_hi_hi, vscale); - - acc0 = wasm_i32x4_add(acc0, wasm_i32x4_add(prod_lo_lo, prod_hi_lo)); - acc1 = wasm_i32x4_add(acc1, wasm_i32x4_add(prod_lo_hi, prod_hi_hi)); - - a_ptr += 16; - q8 += 16; - } - - // Store accumulated results - wasm_v128_store(&aux32[0], acc0); - wasm_v128_store(&aux32[4], acc1); - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - for (int l = 0; l < 8; ++l) { - sums[l] += d * aux32[l]; - } - } - - // Sum final results - float sumf = 0; - for (int l = 0; l < 8; ++l) { - sumf += sums[l]; - } - *s = sumf; - -#elif defined __riscv_xtheadvector - - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - - const uint8_t * restrict q6 = x[i].ql; - const uint8_t * restrict qh = x[i].qh; - const int8_t * restrict q8 = y[i].qs; - - const int8_t * restrict scale = x[i].scales; - - int sum_t = 0; - int t0; - - for (int j = 0; j < QK_K/128; ++j) { - __asm__ __volatile__( - "th.vsetvli zero, %[vl32], e8, m2\n\t" // vl == 32 - "th.vlb.v v4, (%[qh])\n\t" - "th.vsll.vi v0, v4, 4\n\t" - "th.vsll.vi v2, v4, 2\n\t" - "th.vsrl.vi v6, v4, 2\n\t" - "th.vsetvli zero, %[vl64], e8, m4\n\t" // vl == 64 - "th.vlb.v v8, (%[q6])\n\t" - "th.vsrl.vi v12, v8, 4\n\t" - "th.vand.vi v8, v8, 0xF\n\t" - "th.vsetvli zero, %[vl128], e8, m8\n\t" // vl == 128 - "th.vand.vx v0, v0, %[mask]\n\t" - "th.vor.vv v8, v8, v0\n\t" - "th.vlb.v v0, (%[q8])\n\t" - "th.vsub.vx v8, v8, %[vl32]\n\t" - "th.vsetvli zero, %[vl64], e8, m4\n\t" // vl == 64 - "th.vwmul.vv v16, v0, v8\n\t" - "th.vwmul.vv v24, v4, v12\n\t" - "li %[t0], 16\n\t" - "th.vsetvli zero, %[t0], e16, m2\n\t" // vl == 16 - "th.vmv.v.x v0, zero\n\t" - "th.vwredsum.vs v10, v16, v0\n\t" - "th.vwredsum.vs v9, v18, v0\n\t" - "th.vwredsum.vs v8, v20, v0\n\t" - "th.vwredsum.vs v7, v22, v0\n\t" - "th.vwredsum.vs v11, v24, v0\n\t" - "th.vwredsum.vs v12, v26, v0\n\t" - "th.vwredsum.vs v13, v28, v0\n\t" - "th.vwredsum.vs v14, v30, v0\n\t" - "li %[t0], 4\n\t" - "th.vsetvli zero, %[t0], e32, m1\n\t" // vl == 4 - "th.vslideup.vi v10, v9, 1\n\t" - "th.vslideup.vi v8, v7, 1\n\t" - "th.vslideup.vi v11, v12, 1\n\t" - "th.vslideup.vi v13, v14, 1\n\t" - "th.vslideup.vi v10, v8, 2\n\t" - "th.vslideup.vi v11, v13, 2\n\t" - "li %[t0], 8\n\t" - "th.vsetvli zero, %[t0], e32, m2\n\t" // vl == 8 - "th.vlb.v v4, (%[scale])\n\t" - "th.vmul.vv v2, v4, v10\n\t" - "th.vredsum.vs v0, v2, v0\n\t" - "th.vmv.x.s %[t0], v0\n\t" - "add %[sumi], %[sumi], %[t0]" - : [sumi] "+&r" (sum_t), [t0] "=&r" (t0) - : [qh] "r" (qh), [q6] "r" (q6), [q8] "r" (q8), [scale] "r" (scale) - , [vl32] "r" (32), [vl64] "r" (64), [vl128] "r" (128) - , [mask] "r" (0x30) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - q6 += 64; qh += 32; q8 += 128; scale += 8; - } - - sumf += d * sum_t; - - } - - *s = sumf; - -#elif defined __riscv_v - - float sumf = 0; - const int vector_length = __riscv_vlenb() * 8; - - switch (vector_length) { - case 256: - for (int i = 0; i < nb; ++i) { - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - - const uint8_t * GGML_RESTRICT q6 = x[i].ql; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const int8_t * GGML_RESTRICT scale = x[i].scales; - - size_t vl; - - vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); - - int sum_t = 0; - int is = 0; - - for (int j = 0; j < QK_K/128; ++j) { - - vl = 32; - - // load qh - vuint8m1_t qh_x = __riscv_vle8_v_u8m1(qh, vl); - - // load Q6 - vuint8m1_t q6_0 = __riscv_vle8_v_u8m1(q6, vl); - vuint8m1_t q6_1 = __riscv_vle8_v_u8m1(q6+32, vl); - - vuint8m1_t q6a_0 = __riscv_vand_vx_u8m1(q6_0, 0x0F, vl); - vuint8m1_t q6a_1 = __riscv_vand_vx_u8m1(q6_1, 0x0F, vl); - vuint8m1_t q6s_0 = __riscv_vsrl_vx_u8m1(q6_0, 0x04, vl); - vuint8m1_t q6s_1 = __riscv_vsrl_vx_u8m1(q6_1, 0x04, vl); - - vuint8m1_t qh_0 = __riscv_vand_vx_u8m1(qh_x, 0x03, vl); - vuint8m1_t qh_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x2, vl), 0x03 , vl); - vuint8m1_t qh_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x4, vl), 0x03 , vl); - vuint8m1_t qh_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x6, vl), 0x03 , vl); - - vuint8m1_t qhi_0 = __riscv_vor_vv_u8m1(q6a_0, __riscv_vsll_vx_u8m1(qh_0, 0x04, vl), vl); - vuint8m1_t qhi_1 = __riscv_vor_vv_u8m1(q6a_1, __riscv_vsll_vx_u8m1(qh_1, 0x04, vl), vl); - vuint8m1_t qhi_2 = __riscv_vor_vv_u8m1(q6s_0, __riscv_vsll_vx_u8m1(qh_2, 0x04, vl), vl); - vuint8m1_t qhi_3 = __riscv_vor_vv_u8m1(q6s_1, __riscv_vsll_vx_u8m1(qh_3, 0x04, vl), vl); - - vint8m1_t a_0 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_0), 32, vl); - vint8m1_t a_1 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_1), 32, vl); - vint8m1_t a_2 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_2), 32, vl); - vint8m1_t a_3 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_3), 32, vl); - - // load Q8 and take product - vint16m2_t va_q_0 = __riscv_vwmul_vv_i16m2(a_0, __riscv_vle8_v_i8m1(q8, vl), vl); - vint16m2_t va_q_1 = __riscv_vwmul_vv_i16m2(a_1, __riscv_vle8_v_i8m1(q8+32, vl), vl); - vint16m2_t va_q_2 = __riscv_vwmul_vv_i16m2(a_2, __riscv_vle8_v_i8m1(q8+64, vl), vl); - vint16m2_t va_q_3 = __riscv_vwmul_vv_i16m2(a_3, __riscv_vle8_v_i8m1(q8+96, vl), vl); - - vl = 16; - - vint32m2_t vaux_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 0), scale[is+0], vl); - vint32m2_t vaux_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 1), scale[is+1], vl); - vint32m2_t vaux_2 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 0), scale[is+2], vl); - vint32m2_t vaux_3 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 1), scale[is+3], vl); - vint32m2_t vaux_4 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 0), scale[is+4], vl); - vint32m2_t vaux_5 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 1), scale[is+5], vl); - vint32m2_t vaux_6 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 0), scale[is+6], vl); - vint32m2_t vaux_7 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 1), scale[is+7], vl); - - vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_0, vaux_1, vl), vzero, vl); - vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_2, vaux_3, vl), isum0, vl); - vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_4, vaux_5, vl), isum1, vl); - vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_6, vaux_7, vl), isum2, vl); - - sum_t += __riscv_vmv_x_s_i32m1_i32(isum3); - - q6 += 64; qh += 32; q8 += 128; is=8; - - } - - sumf += d * sum_t; - - } - break; - case 128: - for (int i = 0; i < nb; ++i) { - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - - const uint8_t * restrict q6 = x[i].ql; - const uint8_t * restrict qh = x[i].qh; - const int8_t * restrict q8 = y[i].qs; - - const int8_t * restrict scale = x[i].scales; - - int sum_t = 0; - int t0; - - for (int j = 0; j < QK_K/128; ++j) { - __asm__ __volatile__( - "vsetvli zero, %[vl32], e8, m2\n\t" - "vle8.v v4, (%[qh])\n\t" - "vsll.vi v0, v4, 4\n\t" - "vsll.vi v2, v4, 2\n\t" - "vsrl.vi v6, v4, 2\n\t" - "vsetvli zero, %[vl64], e8, m4\n\t" - "vle8.v v8, (%[q6])\n\t" - "vsrl.vi v12, v8, 4\n\t" - "vand.vi v8, v8, 0xF\n\t" - "vsetvli zero, %[vl128], e8, m8\n\t" - "vand.vx v0, v0, %[mask]\n\t" - "vor.vv v8, v8, v0\n\t" - "vle8.v v0, (%[q8])\n\t" - "vsub.vx v8, v8, %[vl32]\n\t" - "vsetvli zero, %[vl64], e8, m4\n\t" - "vwmul.vv v16, v0, v8\n\t" - "vwmul.vv v24, v4, v12\n\t" - "vsetivli zero, 16, e16, m2\n\t" - "vmv.v.x v0, zero\n\t" - "vwredsum.vs v10, v16, v0\n\t" - "vwredsum.vs v9, v18, v0\n\t" - "vwredsum.vs v8, v20, v0\n\t" - "vwredsum.vs v7, v22, v0\n\t" - "vwredsum.vs v11, v24, v0\n\t" - "vwredsum.vs v12, v26, v0\n\t" - "vwredsum.vs v13, v28, v0\n\t" - "vwredsum.vs v14, v30, v0\n\t" - "vsetivli zero, 4, e32, m1\n\t" - "vslideup.vi v10, v9, 1\n\t" - "vslideup.vi v8, v7, 1\n\t" - "vslideup.vi v11, v12, 1\n\t" - "vslideup.vi v13, v14, 1\n\t" - "vslideup.vi v10, v8, 2\n\t" - "vslideup.vi v11, v13, 2\n\t" - "vsetivli zero, 8, e32, m2\n\t" - "vle8.v v2, (%[scale])\n\t" - "vsext.vf4 v4, v2\n\t" - "vmul.vv v2, v4, v10\n\t" - "vredsum.vs v0, v2, v0\n\t" - "vmv.x.s %[t0], v0\n\t" - "add %[sumi], %[sumi], %[t0]" - : [sumi] "+&r" (sum_t), [t0] "=&r" (t0) - : [qh] "r" (qh), [q6] "r" (q6), [q8] "r" (q8), [scale] "r" (scale) - , [vl32] "r" (32), [vl64] "r" (64), [vl128] "r" (128) - , [mask] "r" (0x30) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - q6 += 64; qh += 32; q8 += 128; scale += 8; - } - - sumf += d * sum_t; - - } - break; - default: - assert(false && "Unsupported vector length"); - break; - } - - *s = sumf; - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0xF); - const vector int v0 = vec_splats((int32_t)0); - const vector unsigned char v2 = vec_splats((unsigned char)0x2); - const vector unsigned char v3 = vec_splats((unsigned char)0x3); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - const vector unsigned char v6 = vec_splats((unsigned char)0x6); - const vector signed char off = vec_splats((signed char)0x20); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - vector signed int vsumi4 = v0; - vector signed int vsumi5 = v0; - vector signed int vsumi6 = v0; - vector signed int vsumi7 = v0; - - const uint8_t * GGML_RESTRICT q6 = x[i].ql; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT qs = x[i].scales; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - for (int j = 0; j < QK_K/128; ++j) { - __builtin_prefetch(q6, 0, 0); - __builtin_prefetch(qh, 0, 0); - __builtin_prefetch(q8, 0, 0); - - vector signed char qxs0 = (vector signed char)vec_xl( 0, q6); - vector signed char qxs1 = (vector signed char)vec_xl(16, q6); - vector signed char qxs2 = (vector signed char)vec_xl(32, q6); - vector signed char qxs3 = (vector signed char)vec_xl(48, q6); - q6 += 64; - - vector signed char qxs00 = vec_and(qxs0, lowMask); - vector signed char qxs01 = vec_sr(qxs0, v4); - vector signed char qxs10 = vec_and(qxs1, lowMask); - vector signed char qxs11 = vec_sr(qxs1, v4); - vector signed char qxs20 = vec_and(qxs2, lowMask); - vector signed char qxs21 = vec_sr(qxs2, v4); - vector signed char qxs30 = vec_and(qxs3, lowMask); - vector signed char qxs31 = vec_sr(qxs3, v4); - - vector signed char qxhs0 = (vector signed char)vec_xl( 0, qh); - vector signed char qxhs1 = (vector signed char)vec_xl(16, qh); - qh += 32; - - vector signed char qxh00 = vec_sl(vec_and((vector signed char)v3, qxhs0), v4); - vector signed char qxh01 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v4)), v4); - vector signed char qxh10 = vec_sl(vec_and((vector signed char)v3, qxhs1), v4); - vector signed char qxh11 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v4)), v4); - vector signed char qxh20 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v2)), v4); - vector signed char qxh21 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v6)), v4); - vector signed char qxh30 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v2)), v4); - vector signed char qxh31 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v6)), v4); - - vector signed char q6x00 = vec_sub(vec_or(qxh00, qxs00), off); - vector signed char q6x01 = vec_sub(vec_or(qxh01, qxs01), off); - vector signed char q6x10 = vec_sub(vec_or(qxh10, qxs10), off); - vector signed char q6x11 = vec_sub(vec_or(qxh11, qxs11), off); - vector signed char q6x20 = vec_sub(vec_or(qxh20, qxs20), off); - vector signed char q6x21 = vec_sub(vec_or(qxh21, qxs21), off); - vector signed char q6x30 = vec_sub(vec_or(qxh30, qxs30), off); - vector signed char q6x31 = vec_sub(vec_or(qxh31, qxs31), off); - - vector signed char q8y00 = vec_xl( 0, q8); - vector signed char q8y10 = vec_xl( 16, q8); - vector signed char q8y20 = vec_xl( 32, q8); - vector signed char q8y30 = vec_xl( 48, q8); - vector signed char q8y01 = vec_xl( 64, q8); - vector signed char q8y11 = vec_xl( 80, q8); - vector signed char q8y21 = vec_xl( 96, q8); - vector signed char q8y31 = vec_xl(112, q8); - q8 += 128; - - vector signed short qv00 = vec_add(vec_mule(q6x00, q8y00), vec_mulo(q6x00, q8y00)); - vector signed short qv10 = vec_add(vec_mule(q6x10, q8y10), vec_mulo(q6x10, q8y10)); - vector signed short qv20 = vec_add(vec_mule(q6x20, q8y20), vec_mulo(q6x20, q8y20)); - vector signed short qv30 = vec_add(vec_mule(q6x30, q8y30), vec_mulo(q6x30, q8y30)); - vector signed short qv01 = vec_add(vec_mule(q6x01, q8y01), vec_mulo(q6x01, q8y01)); - vector signed short qv11 = vec_add(vec_mule(q6x11, q8y11), vec_mulo(q6x11, q8y11)); - vector signed short qv21 = vec_add(vec_mule(q6x21, q8y21), vec_mulo(q6x21, q8y21)); - vector signed short qv31 = vec_add(vec_mule(q6x31, q8y31), vec_mulo(q6x31, q8y31)); - - vector signed short vscales = vec_unpackh(vec_xl_len(qs, 8)); - qs += 8; - - vector signed short vs0 = vec_splat(vscales, 0); - vector signed short vs1 = vec_splat(vscales, 1); - vector signed short vs2 = vec_splat(vscales, 2); - vector signed short vs3 = vec_splat(vscales, 3); - vector signed short vs4 = vec_splat(vscales, 4); - vector signed short vs5 = vec_splat(vscales, 5); - vector signed short vs6 = vec_splat(vscales, 6); - vector signed short vs7 = vec_splat(vscales, 7); - - vsumi0 = vec_msum(qv00, vs0, vsumi0); - vsumi1 = vec_msum(qv01, vs4, vsumi1); - vsumi2 = vec_msum(qv10, vs1, vsumi2); - vsumi3 = vec_msum(qv11, vs5, vsumi3); - vsumi4 = vec_msum(qv20, vs2, vsumi4); - vsumi5 = vec_msum(qv21, vs6, vsumi5); - vsumi6 = vec_msum(qv30, vs3, vsumi6); - vsumi7 = vec_msum(qv31, vs7, vsumi7); - } - - vsumi0 = vec_add(vsumi0, vsumi4); - vsumi1 = vec_add(vsumi1, vsumi5); - vsumi2 = vec_add(vsumi2, vsumi6); - vsumi3 = vec_add(vsumi3, vsumi7); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = vec_extract(vsumf0, 0); - -#elif defined __loongarch_asx - - const __m256i m32s = __lasx_xvreplgr2vr_b(32); - - __m256 acc = (__m256)__lasx_xvldi(0); - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT q4 = x[i].ql; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const __m128i scales128 = __lsx_vld((const __m128i*)x[i].scales, 0); - const v16i8 shuffle_mask = {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; - const __m256i scales_shuffled = lasx_ext8_16(__lsx_vshuf_b(scales128, scales128, (__m128i)shuffle_mask)); - - __m256i sumi = __lasx_xvldi(0); - - for (int j = 0; j < QK_K/128; ++j) { - - const __m256i q4bits1 = __lasx_xvld((const __m256i*)q4, 0); q4 += 32; - const __m256i q4bits2 = __lasx_xvld((const __m256i*)q4, 0); q4 += 32; - const __m256i q4bitsH = __lasx_xvld((const __m256i*)qh, 0); qh += 32; - - const __m256i q4h_0 = __lasx_xvslli_b(__lasx_xvandi_b(q4bitsH, 3), 4); - const __m256i q4h_1 = __lasx_xvslli_b(__lasx_xvandi_b(q4bitsH, 3 << 2), 2); - const __m256i q4h_2 = __lasx_xvandi_b(q4bitsH, 3 << 4); - const __m256i q4h_3 = __lasx_xvsrli_b(__lasx_xvandi_b(q4bitsH, 3 << 6), 2); - - const __m256i q4_0 = __lasx_xvor_v(__lasx_xvandi_b(q4bits1, 0xf), q4h_0); - const __m256i q4_1 = __lasx_xvor_v(__lasx_xvandi_b(q4bits2, 0xf), q4h_1); - const __m256i q4_2 = __lasx_xvor_v(__lasx_xvsrli_b(q4bits1, 4), q4h_2); - const __m256i q4_3 = __lasx_xvor_v(__lasx_xvsrli_b(q4bits2, 4), q4h_3); - - const __m256i q8_0 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_3 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - - __m256i p16_0 = lasx_madd_h_b(__lasx_xvsub_b(q4_0, m32s), q8_0); - __m256i p16_1 = lasx_madd_h_b(__lasx_xvsub_b(q4_1, m32s), q8_1); - __m256i p16_2 = lasx_madd_h_b(__lasx_xvsub_b(q4_2, m32s), q8_2); - __m256i p16_3 = lasx_madd_h_b(__lasx_xvsub_b(q4_3, m32s), q8_3); - - p16_0 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 0), p16_0); - p16_1 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 1), p16_1); - p16_2 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 2), p16_2); - p16_3 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 3), p16_3); - - sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_0, p16_1)); - sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_2, p16_3)); - } - - acc = __lasx_xvfmadd_s((__m256)__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), acc); - } - - *s = hsum_float_8(acc); -#elif defined(__VXE__) || defined(__VXE2__) - float sum = 0; - - // Lower 4-bit and upper 2-bit masks - const uint8x16_t v_lm = vec_splat_u8(0x0F); - const uint8x16_t v_um = vec_splat_u8(0x03); - - const int32x4_t v_z = vec_splat_s32(0); - - int8x16_t q6b[4]; - uint8x16_t q6h[4]; - - uint8x16_t v_xl[4]; - uint8x16_t v_xh[2]; - int8x16_t v_y[4]; - - for (int i = 0; i < nb; ++i) { - const float d_all = GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT x0l = x[i].ql; - const uint8_t * GGML_RESTRICT x0h = x[i].qh; - const int8_t * GGML_RESTRICT y0 = y[i].qs; - - const int8_t * GGML_RESTRICT scale = x[i].scales; - - const int16x8_t v_ysumsl = vec_xl(0 , y[i].bsums); - const int16x8_t v_ysumsh = vec_xl(16, y[i].bsums); - - const int8x16_t v_scale = vec_xl(0, scale); - const int16x8_t v_scalel = vec_unpackh(v_scale); - const int16x8_t v_scaleh = vec_unpackl(v_scale); - - const int32x4_t v_minslo = vec_mulo(v_ysumsl, v_scalel); - const int32x4_t v_minsle = vec_mule(v_ysumsl, v_scalel); - const int32x4_t v_minsho = vec_mulo(v_ysumsh, v_scaleh); - const int32x4_t v_minshe = vec_mule(v_ysumsh, v_scaleh); - const int32x4_t v_mins = v_minslo + v_minsle + v_minsho + v_minshe; - - const int32_t mins = v_mins[0] + v_mins[1] + v_mins[2] + v_mins[3]; - - int32_t isum = 0; - for (int j = 0; j < QK_K/128; ++j) { - // Load model upper 2 bits - v_xh[0] = vec_xl(0 , x0h); - v_xh[1] = vec_xl(16, x0h); - x0h += 32; - - // Load model lower 4 bits - v_xl[0] = vec_xl(0 , x0l); - v_xl[1] = vec_xl(16, x0l); - v_xl[2] = vec_xl(32, x0l); - v_xl[3] = vec_xl(48, x0l); - x0l += 64; - - // Load activation quants - v_y[0] = vec_xl(0 , y0); - v_y[1] = vec_xl(16, y0); - v_y[2] = vec_xl(32, y0); - v_y[3] = vec_xl(48, y0); - y0 += 64; - - q6h[0] = vec_sl(vec_and(v_um, v_xh[0]), 4); - q6h[1] = vec_sl(vec_and(v_um, v_xh[1]), 4); - uint8x16_t shifted = vec_sr(v_xh[0], 2); - q6h[2] = vec_sl(vec_and(v_um, shifted), 4); - shifted = vec_sr(v_xh[1], 2); - q6h[3] = vec_sl(vec_and(v_um, shifted), 4); - - q6b[0] = (int8x16_t)(vec_or(vec_and(v_xl[0], v_lm), q6h[0])); - q6b[1] = (int8x16_t)(vec_or(vec_and(v_xl[1], v_lm), q6h[1])); - q6b[2] = (int8x16_t)(vec_or(vec_and(v_xl[2], v_lm), q6h[2])); - q6b[3] = (int8x16_t)(vec_or(vec_and(v_xl[3], v_lm), q6h[3])); - - int32x4_t summs0 = ggml_vec_dot(v_z, q6b[0], v_y[0]); - int32x4_t summs1 = ggml_vec_dot(v_z, q6b[1], v_y[1]); - int32x4_t summs2 = ggml_vec_dot(v_z, q6b[2], v_y[2]); - int32x4_t summs3 = ggml_vec_dot(v_z, q6b[3], v_y[3]); - - isum += (summs0[0] + summs0[1] + summs0[2] + summs0[3]) * scale[0] + - (summs1[0] + summs1[1] + summs1[2] + summs1[3]) * scale[1] + - (summs2[0] + summs2[1] + summs2[2] + summs2[3]) * scale[2] + - (summs3[0] + summs3[1] + summs3[2] + summs3[3]) * scale[3]; - - scale += 4; - - - // Load activation quants - v_y[0] = vec_xl(0 , y0); - v_y[1] = vec_xl(16, y0); - v_y[2] = vec_xl(32, y0); - v_y[3] = vec_xl(48, y0); - y0 += 64; - - shifted = vec_sr(v_xh[0], 4); - q6h[0] = vec_sl(vec_and(v_um, shifted), 4); - shifted = vec_sr(v_xh[1], 4); - q6h[1] = vec_sl(vec_and(v_um, shifted), 4); - shifted = vec_sr(v_xh[0], 6); - q6h[2] = vec_sl(vec_and(v_um, shifted), 4); - shifted = vec_sr(v_xh[1], 6); - q6h[3] = vec_sl(vec_and(v_um, shifted), 4); - - q6b[0] = (int8x16_t)(vec_or(vec_sr(v_xl[0], 4), q6h[0])); - q6b[1] = (int8x16_t)(vec_or(vec_sr(v_xl[1], 4), q6h[1])); - q6b[2] = (int8x16_t)(vec_or(vec_sr(v_xl[2], 4), q6h[2])); - q6b[3] = (int8x16_t)(vec_or(vec_sr(v_xl[3], 4), q6h[3])); - - summs0 = ggml_vec_dot(v_z, q6b[0], v_y[0]); - summs1 = ggml_vec_dot(v_z, q6b[1], v_y[1]); - summs2 = ggml_vec_dot(v_z, q6b[2], v_y[2]); - summs3 = ggml_vec_dot(v_z, q6b[3], v_y[3]); - - isum += (summs0[0] + summs0[1] + summs0[2] + summs0[3]) * scale[0] + - (summs1[0] + summs1[1] + summs1[2] + summs1[3]) * scale[1] + - (summs2[0] + summs2[1] + summs2[2] + summs2[3]) * scale[2] + - (summs3[0] + summs3[1] + summs3[2] + summs3[3]) * scale[3]; - - scale += 4; - } - - sum += d_all * y[i].d * (isum - 32 * mins); - } - - *s = sum; -#else - - int8_t aux8[QK_K]; - int16_t aux16[8]; - float sums [8]; - int32_t aux32[8]; - memset(sums, 0, 8*sizeof(float)); - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const uint8_t * GGML_RESTRICT q4 = x[i].ql; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - memset(aux32, 0, 8*sizeof(int32_t)); - int8_t * GGML_RESTRICT a = aux8; - for (int j = 0; j < QK_K; j += 128) { - for (int l = 0; l < 32; ++l) { - a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; - a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; - a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; - a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; - } - a += 128; - q4 += 64; - qh += 32; - } - a = aux8; - int is = 0; - for (int j = 0; j < QK_K/16; ++j) { - int scale = x[i].scales[is++]; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - } - for (int l = 0; l < 8; ++l) sumf += sums[l]; - *s = sumf; -#endif -} - -#if defined (__AVX__) || defined (__AVX2__) || defined (__ARM_NEON) || defined (__POWER9_VECTOR__) || defined(__loongarch_asx) -static const int8_t keven_signs_q2xs[1024] = { - 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, - 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, - 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, - 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, - 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, - 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, - 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, - 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, - 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, - 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, - 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, - 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, - 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, - 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, - 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, - 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, - 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, - 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, - 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, - 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, - 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, - 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, - 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, - 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, - 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, - 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, - 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, - 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, - 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, - 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, - 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, - 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -}; -#endif - -void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_iq2_xxs * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined(__ARM_NEON) - - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[4]; - const uint8_t * aux8 = (const uint8_t *)aux32; - - ggml_int8x16x4_t q2u; - ggml_int8x16x4_t q2s; - ggml_int8x16x4_t q8b; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - float sumf1 = 0, sumf2 = 0; - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - q8b = ggml_vld1q_s8_x4(q8); q8 += 64; - memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; - q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 0])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 1]))); - q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 2])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 3]))); - q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 8])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 9]))); - q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[10])), vld1_s8((const void *)(iq2xxs_grid + aux8[11]))); - q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127)))); - q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127)))); - q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 7) & 127)))); - q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 21) & 127)))); - q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]); - q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]); - q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]); - q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]); - const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]), q2u.val[1], q8b.val[1]); - const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]), q2u.val[3], q8b.val[3]); - sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[1] >> 28)); - sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[3] >> 28)); - } - sumf += d*(sumf1 + sumf2); - } - *s = 0.25f * sumf; - -#elif defined(__AVX2__) - - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[4]; - const uint8_t * aux8 = (const uint8_t *)aux32; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - __m256i sumi1 = _mm256_setzero_si256(); - __m256i sumi2 = _mm256_setzero_si256(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; - const __m256i q2_1 = _mm256_set_epi64x(iq2xxs_grid[aux8[ 3]], iq2xxs_grid[aux8[ 2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); - const __m256i q2_2 = _mm256_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); - const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], - signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); - const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127], - signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); - const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1); - const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2); - const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); - const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); - const uint16_t ls1 = aux32[1] >> 28; - const uint16_t ls2 = aux32[3] >> 28; - const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1)); - const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1)); - sumi1 = _mm256_add_epi32(sumi1, p1); - sumi2 = _mm256_add_epi32(sumi2, p2); - } - - accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); - - } - - *s = 0.125f * hsum_float_8(accumf); - -#elif defined(__AVX__) - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[4]; - const uint8_t * aux8 = (const uint8_t *)aux32; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - __m128i sumi1_0 = _mm_setzero_si128(); - __m128i sumi1_1 = _mm_setzero_si128(); - __m128i sumi2_0 = _mm_setzero_si128(); - __m128i sumi2_1 = _mm_setzero_si128(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; - const __m128i q2_1_0 = _mm_set_epi64x(iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); - const __m128i q2_1_1 = _mm_set_epi64x(iq2xxs_grid[aux8[3]], iq2xxs_grid[aux8[2]]); - const __m128i q2_2_0 = _mm_set_epi64x(iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); - const __m128i q2_2_1 = _mm_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]]); - const __m128i s2_1_0 = _mm_set_epi64x(signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); - const __m128i s2_1_1 = _mm_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127]); - const __m128i s2_2_0 = _mm_set_epi64x(signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); - const __m128i s2_2_1 = _mm_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127]); - const __m128i q8s_1_0 = _mm_sign_epi8(q8_1_0, s2_1_0); - const __m128i q8s_1_1 = _mm_sign_epi8(q8_1_1, s2_1_1); - const __m128i q8s_2_0 = _mm_sign_epi8(q8_2_0, s2_2_0); - const __m128i q8s_2_1 = _mm_sign_epi8(q8_2_1, s2_2_1); - const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); - const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); - const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); - const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); - const uint16_t ls1 = aux32[1] >> 28; - const uint16_t ls2 = aux32[3] >> 28; - const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(2*ls1+1)); - const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(2*ls1+1)); - const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(2*ls2+1)); - const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(2*ls2+1)); - sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); - sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); - sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); - sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); - } - - accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); - - } - - *s = 0.125f * hsum_float_8(accumf); - -#elif defined(__POWER9_VECTOR__) - const vector int v0 = vec_splats((int32_t)0); - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - for (int j = 0; j < QK_K/32; j += 2) { - __builtin_prefetch(q2, 0, 1); - __builtin_prefetch(q8, 0, 1); - - uint32_t aux32[4]; - const uint8_t * aux8 = (const uint8_t *)aux32; - - memcpy(aux32, q2, 4*sizeof(uint32_t)); - q2 += 8; - - vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xxs_grid + aux8[ 0]), *(const int64_t *)(iq2xxs_grid + aux8[ 1])}; - vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xxs_grid + aux8[ 2]), *(const int64_t *)(iq2xxs_grid + aux8[ 3])}; - vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xxs_grid + aux8[ 8]), *(const int64_t *)(iq2xxs_grid + aux8[ 9])}; - vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xxs_grid + aux8[10]), *(const int64_t *)(iq2xxs_grid + aux8[11])}; - - vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((aux32[1] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 7) & 127))}; - vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((aux32[1] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 21) & 127))}; - vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((aux32[3] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 7) & 127))}; - vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((aux32[3] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 21) & 127))}; - - vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0); - vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1); - vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2); - vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3); - - vector signed char q8y0 = vec_xl( 0, q8); - vector signed char q8y1 = vec_xl(16, q8); - vector signed char q8y2 = vec_xl(32, q8); - vector signed char q8y3 = vec_xl(48, q8); - q8 += 64; - - vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); - vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); - vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); - - const uint16_t ls0 = aux32[1] >> 28; - const uint16_t ls1 = aux32[3] >> 28; - - vector signed short vscales01 = vec_splats((int16_t)(2*ls0+1)); - vector signed short vscales23 = vec_splats((int16_t)(2*ls1+1)); - - vsumi0 = vec_msum(qv0, vscales01, vsumi0); - vsumi1 = vec_msum(qv1, vscales01, vsumi1); - vsumi2 = vec_msum(qv2, vscales23, vsumi2); - vsumi3 = vec_msum(qv3, vscales23, vsumi3); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = 0.125f * vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[4]; - const uint8_t * aux8 = (const uint8_t *)aux32; - - __m256 accumf = (__m256)__lasx_xvldi(0); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - __m256i sumi1 = __lasx_xvldi(0); - __m256i sumi2 = __lasx_xvldi(0); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; - - const __m256i q2_1 = lasx_set_d(iq2xxs_grid[aux8[ 3]], iq2xxs_grid[aux8[ 2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); - const __m256i q2_2 = lasx_set_d(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); - const __m256i s2_1 = lasx_set_d(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], - signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); - const __m256i s2_2 = lasx_set_d(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127], - signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); - const __m256i q8s_1 = __lasx_xvsigncov_b(s2_1, q8_1); - const __m256i q8s_2 = __lasx_xvsigncov_b(s2_2, q8_2); - const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); - const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); - const uint16_t ls1 = aux32[1] >> 28; - const uint16_t ls2 = aux32[3] >> 28; - const __m256i p1 = lasx_madd_h(dot1, __lasx_xvreplgr2vr_h(2*ls1+1)); - const __m256i p2 = lasx_madd_h(dot2, __lasx_xvreplgr2vr_h(2*ls2+1)); - sumi1 = __lasx_xvadd_w(sumi1, p1); - sumi2 = __lasx_xvadd_w(sumi2, p2); - } - - accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); - } - - *s = 0.125f * hsum_float_8(accumf); -//#elif defined(__VXE__) || defined(__VXE2__) -// const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; -// -// uint32_t aux32[4]; -// const uint8_t * aux8 = (const uint8_t *)aux32; -// -// float sumf = 0; -// -// for (int i = 0; i < nb; ++i) { -// const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; -// const uint16_t * GGML_RESTRICT q2 = x[i].qs; -// const int8_t * GGML_RESTRICT q8 = y[i].qs; -// -// float sumf1 = 0, sumf2 = 0; -// -// for (int ib32 = 0; ib32 < QK_K/32; ib += 2) { -// int8x16_t q8b0 = vec_xl( 0, q8); -// int8x16_t qb81 = vec_xl(16, q8); -// int8x16_t q8b2 = vec_xl(32, q8); -// int8x16_t q8b3 = vec_xl(48, q8); -// q8 += 64; -// -// memcpy(aux32, q2, 4 * sizeof(uint32_t)); -// q2 += 8; -// -// int8x16_t q2u0 = { *(const int64_t *)(iq2xxs_grid + aux8[ 0]), *(const int64_t *)(iq2xxs_grid + aux8[ 1]) }; -// int8x16_t q2u1 = { *(const int64_t *)(iq2xxs_grid + aux8[ 2]), *(const int64_t *)(iq2xxs_grid + aux8[ 3]) }; -// int8x16_t q2u2 = { *(const int64_t *)(iq2xxs_grid + aux8[ 8]), *(const int64_t *)(iq2xxs_grid + aux8[ 9]) }; -// int8x16_t q2u3 = { *(const int64_t *)(iq2xxs_grid + aux8[10]), *(const int64_t *)(iq2xxs_grid + aux8[11]) }; -// -// int8x16_t q2s0 = { *(const int64_t *)(signs64 + ((aux32[1] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 7) & 127)) }; -// int8x16_t q2s1 = { *(const int64_t *)(signs64 + ((aux32[1] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 21) & 127)) }; -// int8x16_t q2s2 = { *(const int64_t *)(signs64 + ((aux32[3] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 7) & 127)) }; -// int8x16_t q2s3 = { *(const int64_t *)(signs64 + ((aux32[3] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 21) & 127)) }; -// -// q2u0 = vec_mul(q2u0, q2s0); -// q2u1 = vec_mul(q2u1, q2s1); -// q2u2 = vec_mul(q2u2, q2s2); -// q2u3 = vec_mul(q2u3, q2s3); -// -// const int32x4_t p1 = ggml_vec_dot(ggml_vec_dot(vec_splat_s32(0), q2u0, q8b0), q2u1, q8b1); -// const int32x4_t p2 = ggml_vec_dot(ggml_vec_dot(vec_splat_s32(0), q2u2, q8b2), q2u3, q8b3); -// -// sumf1 += (p1[0] + p1[1] + p1[2] + p1[3]) * (0.5f + (aux32[1] >> 28)); -// sumf2 += (p2[0] + p2[1] + p2[2] + p2[3]) * (0.5f + (aux32[3] >> 28)); -// } -// -// sumf += d * (sumf1 + sumf2); -// } -// -// *s = 0.25f * sumf; -#else - - uint32_t aux32[2]; - const uint8_t * aux8 = (const uint8_t *)aux32; - - float sumf = 0.f; - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - int32_t bsum = 0; - for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { - memcpy(aux32, q2, 2*sizeof(uint32_t)); - q2 += 4; - const uint32_t ls = 2*(aux32[1] >> 28) + 1; - int32_t sumi = 0; - for (int l = 0; l < 4; ++l) { - const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); - const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127]; - for (int j = 0; j < 8; ++j) { - sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); - } - q8 += 8; - } - bsum += sumi * ls; - } - sumf += d * bsum; - } - *s = 0.125f * sumf; -#endif -} - -void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_iq2_xs * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined(__ARM_NEON) - - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - ggml_int8x16x4_t q2u; - ggml_int8x16x4_t q2s; - ggml_int8x16x4_t q8b; - - int32x4x4_t scales32; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - const uint8x8_t scales8 = vld1_u8(x[i].scales); - const uint8x8_t scales_l = vand_u8(scales8, vdup_n_u8(0xf)); - const uint8x8_t scales_h = vshr_n_u8(scales8, 4); - uint8x16_t scales = vcombine_u8(vzip1_u8(scales_l, scales_h), vzip2_u8(scales_l, scales_h)); - scales = vaddq_u8(vshlq_n_u8(scales, 1), vdupq_n_u8(1)); - const uint16x8_t scales1 = vmovl_u8(vget_low_u8(scales)); - const uint16x8_t scales2 = vmovl_u8(vget_high_u8(scales)); - scales32.val[0] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales1))); - scales32.val[1] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales1))); - scales32.val[2] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales2))); - scales32.val[3] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales2))); - int32x4_t sumi = vdupq_n_s32(0); - for (int ib64 = 0; ib64 < QK_K/64; ++ib64) { - q8b = ggml_vld1q_s8_x4(q8); q8 += 64; - q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[0] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[1] & 511)))); - q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[2] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[3] & 511)))); - q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[4] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[5] & 511)))); - q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[6] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[7] & 511)))); - q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[0] >> 9))), vld1_s8((const void *)(signs64 + (q2[1] >> 9)))); - q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[2] >> 9))), vld1_s8((const void *)(signs64 + (q2[3] >> 9)))); - q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[4] >> 9))), vld1_s8((const void *)(signs64 + (q2[5] >> 9)))); - q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[6] >> 9))), vld1_s8((const void *)(signs64 + (q2[7] >> 9)))); - q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]); - q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]); - q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]); - q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]); - const int32x4_t p1 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]); - const int32x4_t p2 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[1], q8b.val[1]); - const int32x4_t p3 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]); - const int32x4_t p4 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[3], q8b.val[3]); - const int32x4_t p = vpaddq_s32(vpaddq_s32(p1, p2), vpaddq_s32(p3, p4)); - sumi = vmlaq_s32(sumi, p, scales32.val[ib64]); - q2 += 8; - } - sumf += d*vaddvq_s32(sumi); - } - *s = 0.125f * sumf; - -#elif defined(__AVX2__) - - const __m256i mone = _mm256_set1_epi8(1); - static const char block_sign_shuffle_mask_1[32] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - }; - static const char block_sign_shuffle_mask_2[32] = { - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, - 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, - }; - static const uint8_t bit_selector_mask_bytes[32] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - }; - - const __m256i bit_selector_mask = _mm256_loadu_si256((const __m256i*)bit_selector_mask_bytes); - const __m256i block_sign_shuffle_1 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_1); - const __m256i block_sign_shuffle_2 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_2); - - static const uint8_t k_bit_helper[32] = { - 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, - 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, - }; - const __m256i bit_helper = _mm256_loadu_si256((const __m256i*)k_bit_helper); - const __m256i m511 = _mm256_set1_epi16(511); - const __m128i m4 = _mm_set1_epi8(0xf); - const __m128i m1 = _mm_set1_epi8(1); - - uint64_t aux64; - - // somewhat hacky, but gives a significant boost in performance - __m256i aux_gindex; - const uint16_t * gindex = (const uint16_t *)&aux_gindex; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - memcpy(&aux64, x[i].scales, 8); - __m128i stmp = _mm_set1_epi64x(aux64); - stmp = _mm_unpacklo_epi8(_mm_and_si128(stmp, m4), _mm_and_si128(_mm_srli_epi16(stmp, 4), m4)); - const __m128i scales = _mm_add_epi8(_mm_slli_epi16(stmp, 1), m1); - - __m256i sumi1 = _mm256_setzero_si256(); - __m256i sumi2 = _mm256_setzero_si256(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) { - - const __m256i q2_data = _mm256_loadu_si256((const __m256i*)q2); q2 += 16; - aux_gindex = _mm256_and_si256(q2_data, m511); - - const __m256i partial_sign_bits = _mm256_srli_epi16(q2_data, 9); - const __m256i partial_sign_bits_upper = _mm256_srli_epi16(q2_data, 13); - const __m256i partial_sign_bits_for_counting = _mm256_xor_si256(partial_sign_bits, partial_sign_bits_upper); - - const __m256i odd_bits = _mm256_shuffle_epi8(bit_helper, partial_sign_bits_for_counting); - const __m256i full_sign_bits = _mm256_or_si256(partial_sign_bits, odd_bits); - - const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q8_3 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q8_4 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - - const __m256i q2_1 = _mm256_set_epi64x(iq2xs_grid[gindex[ 3]], iq2xs_grid[gindex[ 2]], - iq2xs_grid[gindex[ 1]], iq2xs_grid[gindex[ 0]]); - const __m256i q2_2 = _mm256_set_epi64x(iq2xs_grid[gindex[ 7]], iq2xs_grid[gindex[ 6]], - iq2xs_grid[gindex[ 5]], iq2xs_grid[gindex[ 4]]); - const __m256i q2_3 = _mm256_set_epi64x(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]], - iq2xs_grid[gindex[ 9]], iq2xs_grid[gindex[ 8]]); - const __m256i q2_4 = _mm256_set_epi64x(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]], - iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]); - - const __m128i full_signs_l = _mm256_castsi256_si128(full_sign_bits); - const __m128i full_signs_h = _mm256_extractf128_si256(full_sign_bits, 1); - const __m256i full_signs_1 = MM256_SET_M128I(full_signs_l, full_signs_l); - const __m256i full_signs_2 = MM256_SET_M128I(full_signs_h, full_signs_h); - - __m256i signs; - signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_1); - signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); - const __m256i q8s_1 = _mm256_sign_epi8(q8_1, _mm256_or_si256(signs, mone)); - - signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_2); - signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); - const __m256i q8s_2 = _mm256_sign_epi8(q8_2, _mm256_or_si256(signs, mone)); - - signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_1); - signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); - const __m256i q8s_3 = _mm256_sign_epi8(q8_3, _mm256_or_si256(signs, mone)); - - signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_2); - signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); - const __m256i q8s_4 = _mm256_sign_epi8(q8_4, _mm256_or_si256(signs, mone)); - - const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); - const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); - const __m256i dot3 = _mm256_maddubs_epi16(q2_3, q8s_3); - const __m256i dot4 = _mm256_maddubs_epi16(q2_4, q8s_4); - - const __m256i sc1 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+0))); - const __m256i sc2 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+1))); - const __m256i sc3 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+2))); - const __m256i sc4 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+3))); - - sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot1, sc1)); - sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot2, sc2)); - sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot3, sc3)); - sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot4, sc4)); - } - - accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); - - } - - *s = 0.125f * hsum_float_8(accumf); - -#elif defined(__AVX__) - const __m128i mone = _mm_set1_epi8(1); - static const char block_sign_shuffle_mask_1[32] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - }; - static const char block_sign_shuffle_mask_2[32] = { - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, - 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, - }; - static const uint8_t bit_selector_mask_bytes[32] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - }; - - const __m128i bit_selector_mask_0 = _mm_loadu_si128((const __m128i*)bit_selector_mask_bytes); - const __m128i bit_selector_mask_1 = _mm_loadu_si128((const __m128i*)bit_selector_mask_bytes + 1); - const __m128i block_sign_shuffle_1_0 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_1); - const __m128i block_sign_shuffle_1_1 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_1 + 1); - const __m128i block_sign_shuffle_2_0 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_2); - const __m128i block_sign_shuffle_2_1 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_2 + 1); - - static const uint8_t k_bit_helper[32] = { - 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, - 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, - }; - const __m128i bit_helper_0 = _mm_loadu_si128((const __m128i*)k_bit_helper); - const __m128i bit_helper_1 = _mm_loadu_si128((const __m128i*)k_bit_helper + 1); - const __m128i m511 = _mm_set1_epi16(511); - const __m128i m4 = _mm_set1_epi8(0xf); - const __m128i m1 = _mm_set1_epi8(1); - - uint64_t aux64; - - // somewhat hacky, but gives a significant boost in performance - __m256i aux_gindex; - const uint16_t * gindex = (const uint16_t *)&aux_gindex; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - memcpy(&aux64, x[i].scales, 8); - __m128i stmp = _mm_set1_epi64x(aux64); - stmp = _mm_unpacklo_epi8(_mm_and_si128(stmp, m4), _mm_and_si128(_mm_srli_epi16(stmp, 4), m4)); - const __m128i scales = _mm_add_epi8(_mm_slli_epi16(stmp, 1), m1); - - __m128i sumi1_0 = _mm_setzero_si128(); - __m128i sumi1_1 = _mm_setzero_si128(); - __m128i sumi2_0 = _mm_setzero_si128(); - __m128i sumi2_1 = _mm_setzero_si128(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) { - - const __m128i q2_data_0 = _mm_loadu_si128((const __m128i*)q2); - const __m128i q2_data_1 = _mm_loadu_si128((const __m128i*)q2 + 1); q2 += 16; - aux_gindex = MM256_SET_M128I(_mm_and_si128(q2_data_1, m511), _mm_and_si128(q2_data_0, m511)); - - const __m128i partial_sign_bits_0 = _mm_srli_epi16(q2_data_0, 9); - const __m128i partial_sign_bits_1 = _mm_srli_epi16(q2_data_1, 9); - const __m128i partial_sign_bits_upper_0 = _mm_srli_epi16(q2_data_0, 13); - const __m128i partial_sign_bits_upper_1 = _mm_srli_epi16(q2_data_1, 13); - const __m128i partial_sign_bits_for_counting_0 = _mm_xor_si128(partial_sign_bits_0, partial_sign_bits_upper_0); - const __m128i partial_sign_bits_for_counting_1 = _mm_xor_si128(partial_sign_bits_1, partial_sign_bits_upper_1); - - const __m128i odd_bits_0 = _mm_shuffle_epi8(bit_helper_0, partial_sign_bits_for_counting_0); - const __m128i odd_bits_1 = _mm_shuffle_epi8(bit_helper_1, partial_sign_bits_for_counting_1); - const __m128i full_sign_bits_0 = _mm_or_si128(partial_sign_bits_0, odd_bits_0); - const __m128i full_sign_bits_1 = _mm_or_si128(partial_sign_bits_1, odd_bits_1); - - const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_3_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_3_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_4_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_4_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - - const __m128i q2_1_0 = _mm_set_epi64x(iq2xs_grid[gindex[1]], iq2xs_grid[gindex[0]]); - const __m128i q2_1_1 = _mm_set_epi64x(iq2xs_grid[gindex[3]], iq2xs_grid[gindex[2]]); - const __m128i q2_2_0 = _mm_set_epi64x(iq2xs_grid[gindex[5]], iq2xs_grid[gindex[4]]); - const __m128i q2_2_1 = _mm_set_epi64x(iq2xs_grid[gindex[7]], iq2xs_grid[gindex[6]]); - const __m128i q2_3_0 = _mm_set_epi64x(iq2xs_grid[gindex[9]], iq2xs_grid[gindex[8]]); - const __m128i q2_3_1 = _mm_set_epi64x(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]]); - const __m128i q2_4_0 = _mm_set_epi64x(iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]); - const __m128i q2_4_1 = _mm_set_epi64x(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]]); - - // AVX2 full_signs_1 is full_sign_bits_0 here - // AVX2 full_signs_2 is full_sign_bits_1 here - __m128i signs_0, signs_1; - signs_0 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_1_0); - signs_1 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_1_1); - signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); - signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); - const __m128i q8s_1_0 = _mm_sign_epi8(q8_1_0, _mm_or_si128(signs_0, mone)); - const __m128i q8s_1_1 = _mm_sign_epi8(q8_1_1, _mm_or_si128(signs_1, mone)); - - signs_0 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_2_0); - signs_1 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_2_1); - signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); - signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); - const __m128i q8s_2_0 = _mm_sign_epi8(q8_2_0, _mm_or_si128(signs_0, mone)); - const __m128i q8s_2_1 = _mm_sign_epi8(q8_2_1, _mm_or_si128(signs_1, mone)); - - signs_0 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_1_0); - signs_1 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_1_1); - signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); - signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); - const __m128i q8s_3_0 = _mm_sign_epi8(q8_3_0, _mm_or_si128(signs_0, mone)); - const __m128i q8s_3_1 = _mm_sign_epi8(q8_3_1, _mm_or_si128(signs_1, mone)); - - signs_0 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_2_0); - signs_1 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_2_1); - signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); - signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); - const __m128i q8s_4_0 = _mm_sign_epi8(q8_4_0, _mm_or_si128(signs_0, mone)); - const __m128i q8s_4_1 = _mm_sign_epi8(q8_4_1, _mm_or_si128(signs_1, mone)); - - const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); - const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); - const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); - const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); - const __m128i dot3_0 = _mm_maddubs_epi16(q2_3_0, q8s_3_0); - const __m128i dot3_1 = _mm_maddubs_epi16(q2_3_1, q8s_3_1); - const __m128i dot4_0 = _mm_maddubs_epi16(q2_4_0, q8s_4_0); - const __m128i dot4_1 = _mm_maddubs_epi16(q2_4_1, q8s_4_1); - - __m128i sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+0)); - const __m128i sc1_0 = _mm_cvtepi8_epi16(sc_tmp); - const __m128i sc1_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); - sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+1)); - const __m128i sc2_0 = _mm_cvtepi8_epi16(sc_tmp); - const __m128i sc2_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); - sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+2)); - const __m128i sc3_0 = _mm_cvtepi8_epi16(sc_tmp); - const __m128i sc3_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); - sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+3)); - const __m128i sc4_0 = _mm_cvtepi8_epi16(sc_tmp); - const __m128i sc4_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); - - sumi1_0 = _mm_add_epi32(sumi1_0, _mm_madd_epi16(dot1_0, sc1_0)); - sumi1_1 = _mm_add_epi32(sumi1_1, _mm_madd_epi16(dot1_1, sc1_1)); - sumi2_0 = _mm_add_epi32(sumi2_0, _mm_madd_epi16(dot2_0, sc2_0)); - sumi2_1 = _mm_add_epi32(sumi2_1, _mm_madd_epi16(dot2_1, sc2_1)); - sumi1_0 = _mm_add_epi32(sumi1_0, _mm_madd_epi16(dot3_0, sc3_0)); - sumi1_1 = _mm_add_epi32(sumi1_1, _mm_madd_epi16(dot3_1, sc3_1)); - sumi2_0 = _mm_add_epi32(sumi2_0, _mm_madd_epi16(dot4_0, sc4_0)); - sumi2_1 = _mm_add_epi32(sumi2_1, _mm_madd_epi16(dot4_1, sc4_1)); - } - - accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); - - } - - *s = 0.125f * hsum_float_8(accumf); - -#elif defined(__loongarch_asx) - - const __m256i mone = __lasx_xvreplgr2vr_b(1); - static const char block_sign_shuffle_mask_1[32] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - }; - static const char block_sign_shuffle_mask_2[32] = { - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, - 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, - }; - static const uint8_t bit_selector_mask_bytes[32] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - }; - - const __m256i bit_selector_mask = __lasx_xvld((const __m256i*)bit_selector_mask_bytes, 0); - const __m256i block_sign_shuffle_1 = __lasx_xvld((const __m256i*)block_sign_shuffle_mask_1, 0); - const __m256i block_sign_shuffle_2 = __lasx_xvld((const __m256i*)block_sign_shuffle_mask_2, 0); - - static const uint8_t k_bit_helper[32] = { - 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, - 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, - }; - const __m256i bit_helper = __lasx_xvld((const __m256i*)k_bit_helper, 0); - const __m256i m511 = __lasx_xvreplgr2vr_h(511); - const __m128i m4 = __lsx_vreplgr2vr_b(0xf); - const __m128i m1 = __lsx_vreplgr2vr_b(1); - - uint64_t aux64; - - // somewhat hacky, but gives a significant boost in performance - __m256i aux_gindex; - const uint16_t * gindex = (const uint16_t *)&aux_gindex; - - __m256 accumf = (__m256)__lasx_xvldi(0); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - memcpy(&aux64, x[i].scales, 8); - __m128i stmp = __lsx_vreplgr2vr_d(aux64); - stmp = __lsx_vilvl_b( __lsx_vand_v(__lsx_vsrli_h(stmp, 4), m4), __lsx_vand_v(stmp, m4)); - const __m128i scales = __lsx_vadd_b(__lsx_vslli_h(stmp, 1), m1); - - __m256i sumi1 = __lasx_xvldi(0); - __m256i sumi2 = __lasx_xvldi(0); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) { - - const __m256i q2_data = __lasx_xvld((const __m256i*)q2, 0); q2 += 16; - aux_gindex = __lasx_xvand_v(q2_data, m511); - - const __m256i partial_sign_bits = __lasx_xvsrli_h(q2_data, 9); - const __m256i partial_sign_bits_upper = __lasx_xvsrli_h(q2_data, 13); - const __m256i partial_sign_bits_for_counting = __lasx_xvxor_v(partial_sign_bits, partial_sign_bits_upper); - - const __m256i odd_bits = lasx_shuffle_b(bit_helper, partial_sign_bits_for_counting); - const __m256i full_sign_bits = __lasx_xvor_v(partial_sign_bits, odd_bits); - - const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q8_3 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q8_4 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - - const __m256i q2_1 = lasx_set_d(iq2xs_grid[gindex[ 3]], iq2xs_grid[gindex[ 2]], - iq2xs_grid[gindex[ 1]], iq2xs_grid[gindex[ 0]]); - const __m256i q2_2 = lasx_set_d(iq2xs_grid[gindex[ 7]], iq2xs_grid[gindex[ 6]], - iq2xs_grid[gindex[ 5]], iq2xs_grid[gindex[ 4]]); - const __m256i q2_3 = lasx_set_d(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]], - iq2xs_grid[gindex[ 9]], iq2xs_grid[gindex[ 8]]); - const __m256i q2_4 = lasx_set_d(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]], - iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]); - - const __m128i full_signs_l = lasx_extracti128(full_sign_bits, 0); - const __m128i full_signs_h = lasx_extracti128(full_sign_bits, 1); - const __m256i full_signs_1 = lasx_insertf128(full_signs_l, full_signs_l); - const __m256i full_signs_2 = lasx_insertf128(full_signs_h, full_signs_h); - - __m256i signs; - signs = lasx_shuffle_b(full_signs_1, block_sign_shuffle_1); - signs = __lasx_xvseq_b(__lasx_xvand_v(signs, bit_selector_mask), bit_selector_mask); - const __m256i q8s_1 = __lasx_xvsigncov_b(__lasx_xvor_v(signs, mone), q8_1); - - signs = lasx_shuffle_b(full_signs_1, block_sign_shuffle_2); - signs = __lasx_xvseq_b(__lasx_xvand_v(signs, bit_selector_mask), bit_selector_mask); - const __m256i q8s_2 = __lasx_xvsigncov_b(__lasx_xvor_v(signs, mone), q8_2); - - signs = lasx_shuffle_b(full_signs_2, block_sign_shuffle_1); - signs = __lasx_xvseq_b(__lasx_xvand_v(signs, bit_selector_mask), bit_selector_mask); - const __m256i q8s_3 = __lasx_xvsigncov_b(__lasx_xvor_v(signs, mone), q8_3); - - signs = lasx_shuffle_b(full_signs_2, block_sign_shuffle_2); - signs = __lasx_xvseq_b(__lasx_xvand_v(signs, bit_selector_mask), bit_selector_mask); - const __m256i q8s_4 = __lasx_xvsigncov_b(__lasx_xvor_v(signs, mone), q8_4); - - const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); - const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); - const __m256i dot3 = lasx_maddubs_h(q2_3, q8s_3); - const __m256i dot4 = lasx_maddubs_h(q2_4, q8s_4); - - const __m256i sc1 = lasx_ext8_16(lsx_shuffle_b(scales, get_scale_shuffle(ib32+0))); - const __m256i sc2 = lasx_ext8_16(lsx_shuffle_b(scales, get_scale_shuffle(ib32+1))); - const __m256i sc3 = lasx_ext8_16(lsx_shuffle_b(scales, get_scale_shuffle(ib32+2))); - const __m256i sc4 = lasx_ext8_16(lsx_shuffle_b(scales, get_scale_shuffle(ib32+3))); - - sumi1 = __lasx_xvadd_w(sumi1, lasx_madd_h(dot1, sc1)); - sumi2 = __lasx_xvadd_w(sumi2, lasx_madd_h(dot2, sc2)); - sumi1 = __lasx_xvadd_w(sumi1, lasx_madd_h(dot3, sc3)); - sumi2 = __lasx_xvadd_w(sumi2, lasx_madd_h(dot4, sc4)); - } - - accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); - - } - - *s = 0.125f * hsum_float_8(accumf); -#elif defined(__POWER9_VECTOR__) - const vector int v0 = vec_splats((int32_t)0); - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const uint8_t * GGML_RESTRICT sc = x[i].scales; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - for (int j = 0; j < QK_K/64; ++j) { - __builtin_prefetch(q2, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xs_grid + (q2[0] & 511)), *(const int64_t *)(iq2xs_grid + (q2[1] & 511))}; - vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xs_grid + (q2[2] & 511)), *(const int64_t *)(iq2xs_grid + (q2[3] & 511))}; - vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xs_grid + (q2[4] & 511)), *(const int64_t *)(iq2xs_grid + (q2[5] & 511))}; - vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xs_grid + (q2[6] & 511)), *(const int64_t *)(iq2xs_grid + (q2[7] & 511))}; - - vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((q2[0] >> 9))), *(const int64_t *)(signs64 + ((q2[1] >> 9)))}; - vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((q2[2] >> 9))), *(const int64_t *)(signs64 + ((q2[3] >> 9)))}; - vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((q2[4] >> 9))), *(const int64_t *)(signs64 + ((q2[5] >> 9)))}; - vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((q2[6] >> 9))), *(const int64_t *)(signs64 + ((q2[7] >> 9)))}; - q2 += 8; - - vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0); - vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1); - vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2); - vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3); - - vector signed char q8y0 = vec_xl( 0, q8); - vector signed char q8y1 = vec_xl(16, q8); - vector signed char q8y2 = vec_xl(32, q8); - vector signed char q8y3 = vec_xl(48, q8); - q8 += 64; - - vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); - vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); - vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); - - const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); - const uint16_t ls1 = (uint16_t)(sc[0] >> 4); - const uint16_t ls2 = (uint16_t)(sc[1] & 0xf); - const uint16_t ls3 = (uint16_t)(sc[1] >> 4); - sc += 2; - - vector signed short vscales0 = vec_splats((int16_t)(2*ls0+1)); - vector signed short vscales1 = vec_splats((int16_t)(2*ls1+1)); - vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1)); - vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1)); - - vsumi0 = vec_msum(qv0, vscales0, vsumi0); - vsumi1 = vec_msum(qv1, vscales1, vsumi1); - vsumi2 = vec_msum(qv2, vscales2, vsumi2); - vsumi3 = vec_msum(qv3, vscales3, vsumi3); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = 0.125f * vec_extract(vsumf0, 0); -#else - - float sumf = 0.f; - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const uint8_t * GGML_RESTRICT sc = x[i].scales; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - int32_t bsum = 0; - for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { - const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1; - const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1; - int32_t sumi = 0; - for (int l = 0; l < 2; ++l) { - const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); - const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; - for (int j = 0; j < 8; ++j) { - sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); - } - q8 += 8; - } - bsum += sumi * ls1; - sumi = 0; - for (int l = 2; l < 4; ++l) { - const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); - const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; - for (int j = 0; j < 8; ++j) { - sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); - } - q8 += 8; - } - bsum += sumi * ls2; - q2 += 4; - } - sumf += d * bsum; - } - *s = 0.125f * sumf; -#endif -} - -void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_iq2_s * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined(__ARM_NEON) - - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; - - const ggml_uint8x16x2_t mask1 = ggml_vld1q_u8_x2(k_mask1); - const uint8x16_t mask2 = vld1q_u8(k_mask2); - const uint8x16_t m1 = vdupq_n_u8(1); - const int32x4_t vzero = vdupq_n_s32(0); - - uint8x16x2_t vs; - ggml_int8x16x4_t q2s; - ggml_int8x16x4_t q8b; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - - const uint8_t * GGML_RESTRICT qs = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - int sumi1 = 0, sumi2 = 0; - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - q8b = ggml_vld1q_s8_x4(q8); q8 += 64; - q2s.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[0] | ((qh[ib32+0] << 8) & 0x300)))), - vld1_s8((const int8_t *)(iq2s_grid + (qs[1] | ((qh[ib32+0] << 6) & 0x300))))); - q2s.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[2] | ((qh[ib32+0] << 4) & 0x300)))), - vld1_s8((const int8_t *)(iq2s_grid + (qs[3] | ((qh[ib32+0] << 2) & 0x300))))); - q2s.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[4] | ((qh[ib32+1] << 8) & 0x300)))), - vld1_s8((const int8_t *)(iq2s_grid + (qs[5] | ((qh[ib32+1] << 6) & 0x300))))); - q2s.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[6] | ((qh[ib32+1] << 4) & 0x300)))), - vld1_s8((const int8_t *)(iq2s_grid + (qs[7] | ((qh[ib32+1] << 2) & 0x300))))); - qs += 8; - - vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | ((uint32_t) signs[1] << 16))); - vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); - vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); - vs.val[0] = vceqq_u8(vs.val[0], mask2); - vs.val[1] = vceqq_u8(vs.val[1], mask2); - - q2s.val[0] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[0], m1)), q2s.val[0]); - q2s.val[1] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[1], m1)), q2s.val[1]); - - vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | ((uint32_t) signs[3] << 16))); - vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); - vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); - vs.val[0] = vceqq_u8(vs.val[0], mask2); - vs.val[1] = vceqq_u8(vs.val[1], mask2); - - signs += 4; - - q2s.val[2] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[0], m1)), q2s.val[2]); - q2s.val[3] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[1], m1)), q2s.val[3]); - - const int32x4_t p1 = ggml_vdotq_s32(vzero, q2s.val[0], q8b.val[0]); - const int32x4_t p2 = ggml_vdotq_s32(vzero, q2s.val[1], q8b.val[1]); - const int32x4_t p3 = ggml_vdotq_s32(vzero, q2s.val[2], q8b.val[2]); - const int32x4_t p4 = ggml_vdotq_s32(vzero, q2s.val[3], q8b.val[3]); - - sumi1 += vaddvq_s32(p1) * (1 + 2*(x[i].scales[ib32+0] & 0xf)); - sumi2 += vaddvq_s32(p2) * (1 + 2*(x[i].scales[ib32+0] >> 4)); - sumi1 += vaddvq_s32(p3) * (1 + 2*(x[i].scales[ib32+1] & 0xf)); - sumi2 += vaddvq_s32(p4) * (1 + 2*(x[i].scales[ib32+1] >> 4)); - } - sumf += d*(sumi1 + sumi2); - } - - *s = 0.125f * sumf; - -#elif defined(__AVX2__) - - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - }; - - const __m128i m4 = _mm_set1_epi8(0xf); - const __m128i m1 = _mm_set1_epi8(1); - - const __m256i mask1 = _mm256_loadu_si256((const __m256i*)k_mask1); - const __m256i mask2 = _mm256_loadu_si256((const __m256i*)k_mask2); - - uint64_t aux64; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT qs = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - memcpy(&aux64, x[i].scales, 8); - const __m128i scales8 = _mm_add_epi8(_mm_slli_epi16(_mm_and_si128(_mm_set_epi64x(aux64 >> 4, aux64), m4), 1), m1); - const __m256i scales16 = _mm256_cvtepi8_epi16(scales8); // 0 2 4 6 8 10 12 14 1 3 5 7 9 11 13 15 - - __m256i sumi1 = _mm256_setzero_si256(); - __m256i sumi2 = _mm256_setzero_si256(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q2_1 = _mm256_set_epi64x(iq2s_grid[qs[3] | ((qh[ib32+0] << 2) & 0x300)], - iq2s_grid[qs[2] | ((qh[ib32+0] << 4) & 0x300)], - iq2s_grid[qs[1] | ((qh[ib32+0] << 6) & 0x300)], - iq2s_grid[qs[0] | ((qh[ib32+0] << 8) & 0x300)]); - const __m256i q2_2 = _mm256_set_epi64x(iq2s_grid[qs[7] | ((qh[ib32+1] << 2) & 0x300)], - iq2s_grid[qs[6] | ((qh[ib32+1] << 4) & 0x300)], - iq2s_grid[qs[5] | ((qh[ib32+1] << 6) & 0x300)], - iq2s_grid[qs[4] | ((qh[ib32+1] << 8) & 0x300)]); - qs += 8; - - __m256i aux256 = _mm256_set1_epi32(signs[0] | ((uint32_t) signs[1] << 16)); - aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); - const __m256i s2_1 = _mm256_cmpeq_epi8(aux256, mask2); - const __m256i q8s_1 = _mm256_sub_epi8(_mm256_xor_si256(s2_1, q8_1), s2_1); - - aux256 = _mm256_set1_epi32(signs[2] | ((uint32_t) signs[3] << 16)); - aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); - const __m256i s2_2 = _mm256_cmpeq_epi8(aux256, mask2); - const __m256i q8s_2 = _mm256_sub_epi8(_mm256_xor_si256(s2_2, q8_2), s2_2); - - signs += 4; - - const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); // blocks 2*ib32+0, 2*ib32+1 - const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); // blocks 2*ib32+2, 2*ib32+3 - - const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_shuffle_epi8(scales16, get_scale_shuffle_k4(ib32+0))); - const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_shuffle_epi8(scales16, get_scale_shuffle_k4(ib32+1))); - sumi1 = _mm256_add_epi32(sumi1, p1); - sumi2 = _mm256_add_epi32(sumi2, p2); - } - - accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); - - } - - *s = 0.125f * hsum_float_8(accumf); - -#elif defined(__AVX__) - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - }; - - const __m128i m4 = _mm_set1_epi8(0xf); - const __m128i m1 = _mm_set1_epi8(1); - - const __m128i mask1_0 = _mm_loadu_si128((const __m128i*)k_mask1); - const __m128i mask1_1 = _mm_loadu_si128((const __m128i*)k_mask1 + 1); - const __m128i mask2_0 = _mm_loadu_si128((const __m128i*)k_mask2); - const __m128i mask2_1 = _mm_loadu_si128((const __m128i*)k_mask2 + 1); - - uint64_t aux64; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT qs = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - memcpy(&aux64, x[i].scales, 8); - const __m128i scales8 = _mm_add_epi8(_mm_slli_epi16(_mm_and_si128(_mm_set_epi64x(aux64 >> 4, aux64), m4), 1), m1); - const __m128i scales16_0 = _mm_cvtepi8_epi16(scales8); - const __m128i scales16_1 = _mm_cvtepi8_epi16(_mm_srli_si128(scales8, 8)); - - __m128i sumi1_0 = _mm_setzero_si128(); - __m128i sumi1_1 = _mm_setzero_si128(); - __m128i sumi2_0 = _mm_setzero_si128(); - __m128i sumi2_1 = _mm_setzero_si128(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q2_1_0 = _mm_set_epi64x(iq2s_grid[qs[1] | ((qh[ib32+0] << 6) & 0x300)], - iq2s_grid[qs[0] | ((qh[ib32+0] << 8) & 0x300)]); - const __m128i q2_1_1 = _mm_set_epi64x(iq2s_grid[qs[3] | ((qh[ib32+0] << 2) & 0x300)], - iq2s_grid[qs[2] | ((qh[ib32+0] << 4) & 0x300)]); - const __m128i q2_2_0 = _mm_set_epi64x(iq2s_grid[qs[5] | ((qh[ib32+1] << 6) & 0x300)], - iq2s_grid[qs[4] | ((qh[ib32+1] << 8) & 0x300)]); - const __m128i q2_2_1 = _mm_set_epi64x(iq2s_grid[qs[7] | ((qh[ib32+1] << 2) & 0x300)], - iq2s_grid[qs[6] | ((qh[ib32+1] << 4) & 0x300)]); - qs += 8; - - __m128i aux128_0 = _mm_set1_epi32(signs[0] | ((uint32_t) signs[1] << 16)); - __m128i aux128_1 = aux128_0; - aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); - aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); - const __m128i s2_1_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); - const __m128i s2_1_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); - const __m128i q8s_1_0 = _mm_sub_epi8(_mm_xor_si128(s2_1_0, q8_1_0), s2_1_0); - const __m128i q8s_1_1 = _mm_sub_epi8(_mm_xor_si128(s2_1_1, q8_1_1), s2_1_1); - - aux128_0 = _mm_set1_epi32(signs[2] | ((uint32_t) signs[3] << 16)); - aux128_1 = aux128_0; - aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); - aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); - const __m128i s2_2_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); - const __m128i s2_2_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); - const __m128i q8s_2_0 = _mm_sub_epi8(_mm_xor_si128(s2_2_0, q8_2_0), s2_2_0); - const __m128i q8s_2_1 = _mm_sub_epi8(_mm_xor_si128(s2_2_1, q8_2_1), s2_2_1); - - signs += 4; - - const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); - const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); - const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); - const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); - - const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_shuffle_epi8(scales16_0, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+0), 0))); - const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_shuffle_epi8(scales16_1, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+0), 1))); - const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_shuffle_epi8(scales16_0, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+1), 0))); - const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_shuffle_epi8(scales16_1, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+1), 1))); - sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); - sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); - sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); - sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); - } - - accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); - - } - - *s = 0.125f * hsum_float_8(accumf); - -#elif defined(__POWER9_VECTOR__) - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; - - const vector int v0 = vec_splats((int32_t)0); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - const vector unsigned char mask0 = vec_xl( 0, k_mask1); - const vector unsigned char mask1 = vec_xl(16, k_mask1); - const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - - const uint8_t * GGML_RESTRICT q2 = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); - const uint8_t * GGML_RESTRICT sc = x[i].scales; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - for (int j = 0; j < QK_K/32; j += 2) { - __builtin_prefetch(q2, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector signed long long aux64x2_0 = {*(const int64_t *)(iq2s_grid + (q2[0] | ((qh[0] << 8) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[1] | ((qh[0] << 6) & 0x300)))}; - vector signed long long aux64x2_1 = {*(const int64_t *)(iq2s_grid + (q2[2] | ((qh[0] << 4) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[3] | ((qh[0] << 2) & 0x300)))}; - vector signed long long aux64x2_2 = {*(const int64_t *)(iq2s_grid + (q2[4] | ((qh[1] << 8) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[5] | ((qh[1] << 6) & 0x300)))}; - vector signed long long aux64x2_3 = {*(const int64_t *)(iq2s_grid + (q2[6] | ((qh[1] << 4) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[7] | ((qh[1] << 2) & 0x300)))}; - q2 += 8; - qh += 2; - - vector signed char vsigns01 = (vector signed char)vec_splats(*(const uint32_t *)&signs[0]); - vector signed char vsigns23 = (vector signed char)vec_splats(*(const uint32_t *)&signs[2]); - signs += 4; - - vector signed char vsigns0 = vec_perm(vsigns01, vsigns01, mask0); - vector signed char vsigns1 = vec_perm(vsigns01, vsigns01, mask1); - vector signed char vsigns2 = vec_perm(vsigns23, vsigns23, mask0); - vector signed char vsigns3 = vec_perm(vsigns23, vsigns23, mask1); - - vsigns0 = (vector signed char)vec_cmpeq(vec_and(vsigns0, mask2), mask2); - vsigns1 = (vector signed char)vec_cmpeq(vec_and(vsigns1, mask2), mask2); - vsigns2 = (vector signed char)vec_cmpeq(vec_and(vsigns2, mask2), mask2); - vsigns3 = (vector signed char)vec_cmpeq(vec_and(vsigns3, mask2), mask2); - - vector signed char q2x0 = vec_sub(vec_xor(vsigns0, (vector signed char)aux64x2_0), vsigns0); - vector signed char q2x1 = vec_sub(vec_xor(vsigns1, (vector signed char)aux64x2_1), vsigns1); - vector signed char q2x2 = vec_sub(vec_xor(vsigns2, (vector signed char)aux64x2_2), vsigns2); - vector signed char q2x3 = vec_sub(vec_xor(vsigns3, (vector signed char)aux64x2_3), vsigns3); - - vector signed char q8y0 = vec_xl( 0, q8); - vector signed char q8y1 = vec_xl(16, q8); - vector signed char q8y2 = vec_xl(32, q8); - vector signed char q8y3 = vec_xl(48, q8); - q8 += 64; - - vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); - vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); - vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); - - const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); - const uint16_t ls1 = (uint16_t)(sc[0] >> 4); - const uint16_t ls2 = (uint16_t)(sc[1] & 0xf); - const uint16_t ls3 = (uint16_t)(sc[1] >> 4); - sc += 2; - - vector signed short vscales0 = vec_splats((int16_t)(2*ls0+1)); - vector signed short vscales1 = vec_splats((int16_t)(2*ls1+1)); - vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1)); - vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1)); - - vsumi0 = vec_msum(qv0, vscales0, vsumi0); - vsumi1 = vec_msum(qv1, vscales1, vsumi1); - vsumi2 = vec_msum(qv2, vscales2, vsumi2); - vsumi3 = vec_msum(qv3, vscales3, vsumi3); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = 0.125f * vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - }; - - - const __m128i m4 = __lsx_vreplgr2vr_b(0xf); - const __m128i m1 = __lsx_vreplgr2vr_b(1); - - const __m256i mask1 = __lasx_xvld((const __m256i*)k_mask1, 0); - const __m256i mask2 = __lasx_xvld((const __m256i*)k_mask2, 0); - uint64_t aux64; - - __m256 accumf = (__m256)__lasx_xvldi(0); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT qs = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - __m128i tmp1; - memcpy(&aux64, x[i].scales, 8); - tmp1 = __lsx_vinsgr2vr_d(tmp1, aux64, 0); - tmp1 = __lsx_vinsgr2vr_d(tmp1, aux64 >> 4, 1); - const __m128i scales8 = __lsx_vadd_b(__lsx_vslli_h(__lsx_vand_v(tmp1, m4), 1), m1); - const __m256i scales16 = lasx_ext8_16(scales8); // 0 2 4 6 8 10 12 14 1 3 5 7 9 11 13 15 - - __m256i sumi1 = __lasx_xvldi(0); - __m256i sumi2 = __lasx_xvldi(0); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q2_1 = lasx_set_d(iq2s_grid[qs[3] | ((qh[ib32+0] << 2) & 0x300)], - iq2s_grid[qs[2] | ((qh[ib32+0] << 4) & 0x300)], - iq2s_grid[qs[1] | ((qh[ib32+0] << 6) & 0x300)], - iq2s_grid[qs[0] | ((qh[ib32+0] << 8) & 0x300)]); - const __m256i q2_2 = lasx_set_d(iq2s_grid[qs[7] | ((qh[ib32+1] << 2) & 0x300)], - iq2s_grid[qs[6] | ((qh[ib32+1] << 4) & 0x300)], - iq2s_grid[qs[5] | ((qh[ib32+1] << 6) & 0x300)], - iq2s_grid[qs[4] | ((qh[ib32+1] << 8) & 0x300)]); - qs += 8; - - __m256i aux256 = __lasx_xvreplgr2vr_w(signs[0] | ((uint32_t) signs[1] << 16)); - aux256 = __lasx_xvand_v(lasx_shuffle_b(aux256,mask1), mask2); - const __m256i s2_1 = __lasx_xvseq_b(aux256, mask2); - const __m256i q8s_1 = __lasx_xvsub_b(__lasx_xvxor_v(s2_1, q8_1), s2_1); - - aux256 = __lasx_xvreplgr2vr_w(signs[2] | ((uint32_t) signs[3] << 16)); - aux256 = __lasx_xvand_v(lasx_shuffle_b(aux256,mask1), mask2); - const __m256i s2_2 = __lasx_xvseq_b(aux256, mask2); - const __m256i q8s_2 = __lasx_xvsub_b(__lasx_xvxor_v(s2_2, q8_2), s2_2); - - signs += 4; - - const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); // blocks 2*ib32+0, 2*ib32+1 - const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); // blocks 2*ib32+2, 2*ib32+3 - - const __m256i p1 = lasx_madd_h(dot1, lasx_shuffle_b(scales16, get_scale_shuffle_k4(ib32+0))); - const __m256i p2 = lasx_madd_h(dot2, lasx_shuffle_b(scales16, get_scale_shuffle_k4(ib32+1))); - sumi1 = __lasx_xvadd_w(sumi1, p1); - sumi2 = __lasx_xvadd_w(sumi2, p2); - } - - accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); - } - - *s = 0.125f * hsum_float_8(accumf); - -#else - - float sumf = 0; - for (int i = 0; i < nb; i++) { - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint8_t * qh = x[i].qh; - const uint8_t * signs = qs + QK_K/8; - - int bsum = 0; - for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { - int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf); - int ls2 = 1 + 2*(x[i].scales[ib32] >> 4); - int sumi1 = 0, sumi2 = 0; - for (int l = 0; l < 2; ++l) { - const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); - for (int j = 0; j < 8; ++j) { - sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); - } - q8 += 8; - } - for (int l = 2; l < 4; ++l) { - const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); - for (int j = 0; j < 8; ++j) { - sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); - } - q8 += 8; - } - bsum += ls1 * sumi1 + ls2 * sumi2; - qs += 4; - signs += 4; - } - - sumf += d * bsum; - } - - *s = 0.125f * sumf; - -#endif - -} - -void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_iq3_xxs * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined(__ARM_NEON) - - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[2]; - - ggml_int8x16x4_t q3s; - ggml_int8x16x4_t q8b; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - float sumf1 = 0, sumf2 = 0; - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - q8b = ggml_vld1q_s8_x4(q8); q8 += 64; - memcpy(aux32, gas, 2*sizeof(uint32_t)); gas += 2*sizeof(uint32_t); - const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]); - const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]); - const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]); - const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]); - q3 += 16; - q3s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 7) & 127)))); - q3s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 21) & 127)))); - q3s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127)))); - q3s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127)))); - q3s.val[0] = vmulq_s8(q3s.val[0], vreinterpretq_s8_u32(aux32x4_0)); - q3s.val[1] = vmulq_s8(q3s.val[1], vreinterpretq_s8_u32(aux32x4_1)); - q3s.val[2] = vmulq_s8(q3s.val[2], vreinterpretq_s8_u32(aux32x4_2)); - q3s.val[3] = vmulq_s8(q3s.val[3], vreinterpretq_s8_u32(aux32x4_3)); - const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]); - const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]); - sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[0] >> 28)); - sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[1] >> 28)); - } - sumf += d*(sumf1 + sumf2); - } - *s = 0.5f * sumf; - -#elif defined(__AVX2__) - - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[2]; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - __m256i sumi1 = _mm256_setzero_si256(); - __m256i sumi2 = _mm256_setzero_si256(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q2_1 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], - iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); - q3 += 8; - const __m256i q2_2 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], - iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); - q3 += 8; - memcpy(aux32, gas, 8); gas += 8; - const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127], - signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]); - const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], - signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); - const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1); - const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2); - const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); - const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); - const uint16_t ls1 = aux32[0] >> 28; - const uint16_t ls2 = aux32[1] >> 28; - const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1)); - const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1)); - sumi1 = _mm256_add_epi32(sumi1, p1); - sumi2 = _mm256_add_epi32(sumi2, p2); - } - - accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); - - } - - *s = 0.25f * hsum_float_8(accumf); - -#elif defined(__AVX__) - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[2]; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - __m128i sumi1_0 = _mm_setzero_si128(); - __m128i sumi1_1 = _mm_setzero_si128(); - __m128i sumi2_0 = _mm_setzero_si128(); - __m128i sumi2_1 = _mm_setzero_si128(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q2_1_0 = _mm_set_epi32(iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); - const __m128i q2_1_1 = _mm_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]]); - q3 += 8; - const __m128i q2_2_0 = _mm_set_epi32(iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); - const __m128i q2_2_1 = _mm_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]]); - q3 += 8; - memcpy(aux32, gas, 8); gas += 8; - const __m128i s2_1_0 = _mm_set_epi64x(signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]); - const __m128i s2_1_1 = _mm_set_epi64x(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127]); - const __m128i s2_2_0 = _mm_set_epi64x(signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); - const __m128i s2_2_1 = _mm_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127]); - const __m128i q8s_1_0 = _mm_sign_epi8(q8_1_0, s2_1_0); - const __m128i q8s_1_1 = _mm_sign_epi8(q8_1_1, s2_1_1); - const __m128i q8s_2_0 = _mm_sign_epi8(q8_2_0, s2_2_0); - const __m128i q8s_2_1 = _mm_sign_epi8(q8_2_1, s2_2_1); - const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); - const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); - const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); - const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); - const uint16_t ls1 = aux32[0] >> 28; - const uint16_t ls2 = aux32[1] >> 28; - const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(2*ls1+1)); - const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(2*ls1+1)); - const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(2*ls2+1)); - const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(2*ls2+1)); - sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); - sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); - sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); - sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); - } - - accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); - - } - - *s = 0.25f * hsum_float_8(accumf); - -#elif defined(__POWER9_VECTOR__) - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - const vector int v0 = vec_splats((int32_t)0); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint32_t * GGML_RESTRICT signs = (const uint32_t *)(x[i].qs + QK_K/4); - const int8_t * GGML_RESTRICT q8 = y[i].qs; - -#pragma GCC unroll 1 - for (int j = 0; j < QK_K/32; j += 2) { - __builtin_prefetch(q3, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector unsigned int aux32x4_0 = {iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]}; - vector unsigned int aux32x4_1 = {iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]}; - vector unsigned int aux32x4_2 = {iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]}; - vector unsigned int aux32x4_3 = {iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]}; - q3 += 16; - - vector unsigned long long aux64x2_0 = {(uint64_t)(signs64[(signs[0] >> 0) & 127]), (uint64_t)(signs64[(signs[0] >> 7) & 127])}; - vector unsigned long long aux64x2_1 = {(uint64_t)(signs64[(signs[0] >> 14) & 127]), (uint64_t)(signs64[(signs[0] >> 21) & 127])}; - vector unsigned long long aux64x2_2 = {(uint64_t)(signs64[(signs[1] >> 0) & 127]), (uint64_t)(signs64[(signs[1] >> 7) & 127])}; - vector unsigned long long aux64x2_3 = {(uint64_t)(signs64[(signs[1] >> 14) & 127]), (uint64_t)(signs64[(signs[1] >> 21) & 127])}; - - vector signed char q3x0 = vec_mul((vector signed char)aux64x2_0, (vector signed char)aux32x4_0); - vector signed char q3x1 = vec_mul((vector signed char)aux64x2_1, (vector signed char)aux32x4_1); - vector signed char q3x2 = vec_mul((vector signed char)aux64x2_2, (vector signed char)aux32x4_2); - vector signed char q3x3 = vec_mul((vector signed char)aux64x2_3, (vector signed char)aux32x4_3); - - vector signed char q8y0 = vec_xl( 0, q8); - vector signed char q8y1 = vec_xl(16, q8); - vector signed char q8y2 = vec_xl(32, q8); - vector signed char q8y3 = vec_xl(48, q8); - q8 += 64; - - vector signed short qv0 = vec_add(vec_mule(q3x0, q8y0), vec_mulo(q3x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q3x1, q8y1), vec_mulo(q3x1, q8y1)); - vector signed short qv2 = vec_add(vec_mule(q3x2, q8y2), vec_mulo(q3x2, q8y2)); - vector signed short qv3 = vec_add(vec_mule(q3x3, q8y3), vec_mulo(q3x3, q8y3)); - - const uint16_t ls0 = (uint16_t)(signs[0] >> 28); - const uint16_t ls1 = (uint16_t)(signs[1] >> 28); - signs += 2; - - vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); - vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); - - vsumi0 = vec_msum(qv0, vscales01, vsumi0); - vsumi1 = vec_msum(qv1, vscales01, vsumi1); - vsumi2 = vec_msum(qv2, vscales23, vsumi2); - vsumi3 = vec_msum(qv3, vscales23, vsumi3); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = 0.25f * vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[2]; - - __m256 accumf = (__m256)__lasx_xvldi(0); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - __m256i sumi1 = __lasx_xvldi(0); - __m256i sumi2 = __lasx_xvldi(0); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q2_1 = lasx_set_w(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], - iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); - q3 += 8; - const __m256i q2_2 = lasx_set_w(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], - iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); - q3 += 8; - memcpy(aux32, gas, 8); gas += 8; - - const __m256i s2_1 = lasx_set_d(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127], - signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]); - const __m256i s2_2 = lasx_set_d(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], - signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); - const __m256i q8s_1 = __lasx_xvsigncov_b(s2_1, q8_1); - const __m256i q8s_2 = __lasx_xvsigncov_b(s2_2, q8_2); - const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); - const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); - const uint16_t ls1 = aux32[0] >> 28; - const uint16_t ls2 = aux32[1] >> 28; - - const __m256i p1 = lasx_madd_h(dot1, __lasx_xvreplgr2vr_h(2*ls1+1)); - const __m256i p2 = lasx_madd_h(dot2, __lasx_xvreplgr2vr_h(2*ls2+1)); - sumi1 = __lasx_xvadd_w(sumi1, p1); - sumi2 = __lasx_xvadd_w(sumi2, p2); - } - - accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); - } - - *s = 0.25f * hsum_float_8(accumf); - -#else - - uint32_t aux32; - - float sumf = 0.f; - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - int32_t bsum = 0; - for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { - memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t); - const uint32_t ls = 2*(aux32 >> 28) + 1; - int32_t sumi = 0; - for (int l = 0; l < 4; ++l) { - const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]); - const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]); - const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127]; - for (int j = 0; j < 4; ++j) { - sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1); - sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1); - } - q8 += 8; - } - q3 += 8; - bsum += sumi * ls; - } - sumf += d * bsum; - } - *s = 0.25f * sumf; -#endif -} - -void ggml_vec_dot_iq3_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_iq3_s * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined(__ARM_NEON) - - typedef union { - uint16x8_t vec_index; - uint16_t index[8]; - } vec_index_t; - - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; - - static const int16_t k_shift[8] = {8, 7, 6, 5, 4, 3, 2, 1}; - - const ggml_uint8x16x2_t mask1 = ggml_vld1q_u8_x2(k_mask1); - const uint8x16_t mask2 = vld1q_u8(k_mask2); - - const int16x8_t hshift = vld1q_s16(k_shift); - const uint16x8_t m256 = vdupq_n_u16(256); - const uint8x16_t m1 = vdupq_n_u8(1); - - uint8x16x2_t vs; - ggml_int8x16x4_t q3s; - ggml_int8x16x4_t q8b; - vec_index_t idx; - - uint32_t scales32[2]; - const uint8_t * scales8 = (const uint8_t *)scales32; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT qs = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - memcpy(scales32, x[i].scales, 4); - scales32[1] = (((scales32[0] >> 4) & 0x0f0f0f0f) << 1) | 0x01010101; - scales32[0] = ((scales32[0] & 0x0f0f0f0f) << 1) | 0x01010101; - - int sumi1 = 0, sumi2 = 0; - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - q8b = ggml_vld1q_s8_x4(q8); q8 += 64; - - const uint8x16_t idx_l = vld1q_u8(qs); qs += 16; - idx.vec_index = vorrq_u16(vmovl_u8(vget_low_u8 (idx_l)), vandq_u16(vshlq_u16(vdupq_n_u16(qh[ib32+0]), hshift), m256)); - const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3s_grid[idx.index[0]], iq3s_grid[idx.index[1]], - iq3s_grid[idx.index[2]], iq3s_grid[idx.index[3]]); - const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3s_grid[idx.index[4]], iq3s_grid[idx.index[5]], - iq3s_grid[idx.index[6]], iq3s_grid[idx.index[7]]); - idx.vec_index = vorrq_u16(vmovl_u8(vget_high_u8(idx_l)), vandq_u16(vshlq_u16(vdupq_n_u16(qh[ib32+1]), hshift), m256)); - const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3s_grid[idx.index[0]], iq3s_grid[idx.index[1]], - iq3s_grid[idx.index[2]], iq3s_grid[idx.index[3]]); - const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3s_grid[idx.index[4]], iq3s_grid[idx.index[5]], - iq3s_grid[idx.index[6]], iq3s_grid[idx.index[7]]); - - - vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | ((uint32_t) signs[1] << 16))); - vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); - vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); - vs.val[0] = vorrq_u8(vceqq_u8(vs.val[0], mask2), m1); - vs.val[1] = vorrq_u8(vceqq_u8(vs.val[1], mask2), m1); - - q3s.val[0] = vmulq_s8(vreinterpretq_s8_u8(vs.val[0]), vreinterpretq_s8_u32(aux32x4_0)); - q3s.val[1] = vmulq_s8(vreinterpretq_s8_u8(vs.val[1]), vreinterpretq_s8_u32(aux32x4_1)); - - vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | ((uint32_t) signs[3] << 16))); - vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); - vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); - vs.val[0] = vorrq_u8(vceqq_u8(vs.val[0], mask2), m1); - vs.val[1] = vorrq_u8(vceqq_u8(vs.val[1], mask2), m1); - - signs += 4; - - q3s.val[2] = vmulq_s8(vreinterpretq_s8_u8(vs.val[0]), vreinterpretq_s8_u32(aux32x4_2)); - q3s.val[3] = vmulq_s8(vreinterpretq_s8_u8(vs.val[1]), vreinterpretq_s8_u32(aux32x4_3)); - - const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]); - const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]); - - sumi1 += vaddvq_s32(p1) * scales8[ib32/2+0]; - sumi2 += vaddvq_s32(p2) * scales8[ib32/2+4]; - } - sumf += d*(sumi1 + sumi2); - } - *s = sumf; - -#elif defined(__AVX2__) - - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - }; - - const __m256i mask1 = _mm256_loadu_si256((const __m256i*)k_mask1); - const __m256i mask2 = _mm256_loadu_si256((const __m256i*)k_mask2); - - const __m256i idx_shift = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); - const __m256i idx_mask = _mm256_set1_epi32(256); - - typedef union { - __m256i vec[2]; - uint32_t index[16]; - } index_t; - - index_t idx; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT qs = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - __m256i sumi1 = _mm256_setzero_si256(); - __m256i sumi2 = _mm256_setzero_si256(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i idx_l = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i *)qs)); qs += 16; - idx.vec[0] = _mm256_set1_epi32(qh[ib32+0]); - idx.vec[1] = _mm256_set1_epi32(qh[ib32+1]); - idx.vec[0] = _mm256_and_si256(_mm256_sllv_epi32(idx.vec[0], idx_shift), idx_mask); - idx.vec[1] = _mm256_and_si256(_mm256_sllv_epi32(idx.vec[1], idx_shift), idx_mask); - idx.vec[0] = _mm256_or_si256(idx.vec[0], _mm256_cvtepi16_epi32(_mm256_castsi256_si128(idx_l))); - idx.vec[1] = _mm256_or_si256(idx.vec[1], _mm256_cvtepi16_epi32(_mm256_extractf128_si256(idx_l, 1))); - - // At leat on my CPU (Ryzen 7950X), using _mm256_i32gather_epi32 is slower than _mm256_set_epi32. Strange. - //const __m256i q2_1 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[0], 4); - //const __m256i q2_2 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[1], 4); - const __m256i q2_1 = _mm256_set_epi32( - iq3s_grid[idx.index[7]], iq3s_grid[idx.index[6]], iq3s_grid[idx.index[5]], iq3s_grid[idx.index[4]], - iq3s_grid[idx.index[3]], iq3s_grid[idx.index[2]], iq3s_grid[idx.index[1]], iq3s_grid[idx.index[0]] - ); - const __m256i q2_2 = _mm256_set_epi32( - iq3s_grid[idx.index[15]], iq3s_grid[idx.index[14]], iq3s_grid[idx.index[13]], iq3s_grid[idx.index[12]], - iq3s_grid[idx.index[11]], iq3s_grid[idx.index[10]], iq3s_grid[idx.index[ 9]], iq3s_grid[idx.index[ 8]] - ); - - __m256i aux256 = _mm256_set1_epi32(signs[0] | (signs[1] << 16)); - aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); - const __m256i s2_1 = _mm256_cmpeq_epi8(aux256, mask2); - const __m256i q8s_1 = _mm256_sub_epi8(_mm256_xor_si256(s2_1, q8_1), s2_1); - - aux256 = _mm256_set1_epi32(signs[2] | (signs[3] << 16)); - aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); - const __m256i s2_2 = _mm256_cmpeq_epi8(aux256, mask2); - const __m256i q8s_2 = _mm256_sub_epi8(_mm256_xor_si256(s2_2, q8_2), s2_2); - - signs += 4; - - const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); - const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); - const uint16_t ls1 = x[i].scales[ib32/2] & 0xf; - const uint16_t ls2 = x[i].scales[ib32/2] >> 4; - const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1)); - const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1)); - sumi1 = _mm256_add_epi32(sumi1, p1); - sumi2 = _mm256_add_epi32(sumi2, p2); - } - - accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); - - } - - *s = hsum_float_8(accumf); - -#elif defined(__AVX__) - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - }; - - const __m128i mask1_0 = _mm_loadu_si128((const __m128i*)k_mask1); - const __m128i mask1_1 = _mm_loadu_si128((const __m128i*)k_mask1 + 1); - const __m128i mask2_0 = _mm_loadu_si128((const __m128i*)k_mask2); - const __m128i mask2_1 = _mm_loadu_si128((const __m128i*)k_mask2 + 1); - - const __m128i idx_mul_0 = _mm_set_epi32(32, 64, 128, 256); - const __m128i idx_mul_1 = _mm_set_epi32(2, 4, 8, 16); - const __m128i idx_mask = _mm_set1_epi32(256); - - typedef union { - __m128i vec[4]; - uint32_t index[16]; - } index_t; - - index_t idx; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT qs = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - __m128i sumi1_0 = _mm_setzero_si128(); - __m128i sumi1_1 = _mm_setzero_si128(); - __m128i sumi2_0 = _mm_setzero_si128(); - __m128i sumi2_1 = _mm_setzero_si128(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i qs_tmp = _mm_loadu_si128((const __m128i *)qs); - const __m128i idx_l_0 = _mm_cvtepu8_epi16(qs_tmp); - const __m128i idx_l_1 = _mm_cvtepu8_epi16(_mm_srli_si128(qs_tmp, 8)); qs += 16; - idx.vec[0] = _mm_set1_epi32(qh[ib32+0]); - idx.vec[1] = idx.vec[0]; - idx.vec[2] = _mm_set1_epi32(qh[ib32+1]); - idx.vec[3] = idx.vec[2]; - - idx.vec[0] = _mm_and_si128(_mm_mullo_epi32(idx.vec[0], idx_mul_0), idx_mask); - idx.vec[1] = _mm_and_si128(_mm_mullo_epi32(idx.vec[1], idx_mul_1), idx_mask); - idx.vec[2] = _mm_and_si128(_mm_mullo_epi32(idx.vec[2], idx_mul_0), idx_mask); - idx.vec[3] = _mm_and_si128(_mm_mullo_epi32(idx.vec[3], idx_mul_1), idx_mask); - - idx.vec[0] = _mm_or_si128(idx.vec[0], _mm_cvtepi16_epi32(idx_l_0)); - idx.vec[1] = _mm_or_si128(idx.vec[1], _mm_cvtepi16_epi32(_mm_srli_si128(idx_l_0, 8))); - idx.vec[2] = _mm_or_si128(idx.vec[2], _mm_cvtepi16_epi32(idx_l_1)); - idx.vec[3] = _mm_or_si128(idx.vec[3], _mm_cvtepi16_epi32(_mm_srli_si128(idx_l_1, 8))); - - const __m128i q2_1_0 = _mm_set_epi32(iq3s_grid[idx.index[3]], iq3s_grid[idx.index[2]], iq3s_grid[idx.index[1]], iq3s_grid[idx.index[0]]); - const __m128i q2_1_1 = _mm_set_epi32(iq3s_grid[idx.index[7]], iq3s_grid[idx.index[6]], iq3s_grid[idx.index[5]], iq3s_grid[idx.index[4]]); - const __m128i q2_2_0 = _mm_set_epi32(iq3s_grid[idx.index[11]], iq3s_grid[idx.index[10]], iq3s_grid[idx.index[9]], iq3s_grid[idx.index[8]]); - const __m128i q2_2_1 = _mm_set_epi32(iq3s_grid[idx.index[15]], iq3s_grid[idx.index[14]], iq3s_grid[idx.index[13]], iq3s_grid[idx.index[12]]); - - __m128i aux128_0 = _mm_set1_epi32(signs[0] | (signs[1] << 16)); - __m128i aux128_1 = aux128_0; - aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); - aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); - const __m128i s2_1_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); - const __m128i s2_1_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); - const __m128i q8s_1_0 = _mm_sub_epi8(_mm_xor_si128(s2_1_0, q8_1_0), s2_1_0); - const __m128i q8s_1_1 = _mm_sub_epi8(_mm_xor_si128(s2_1_1, q8_1_1), s2_1_1); - - aux128_0 = _mm_set1_epi32(signs[2] | (signs[3] << 16)); - aux128_1 = aux128_0; - aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); - aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); - const __m128i s2_2_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); - const __m128i s2_2_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); - const __m128i q8s_2_0 = _mm_sub_epi8(_mm_xor_si128(s2_2_0, q8_2_0), s2_2_0); - const __m128i q8s_2_1 = _mm_sub_epi8(_mm_xor_si128(s2_2_1, q8_2_1), s2_2_1); - - signs += 4; - - const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); - const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); - const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); - const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); - const uint16_t ls1 = x[i].scales[ib32/2] & 0xf; - const uint16_t ls2 = x[i].scales[ib32/2] >> 4; - const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(2*ls1+1)); - const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(2*ls1+1)); - const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(2*ls2+1)); - const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(2*ls2+1)); - sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); - sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); - sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); - sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); - } - - accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); - - } - - *s = hsum_float_8(accumf); - -#elif defined(__POWER9_VECTOR__) - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; - - const vector int v0 = vec_splats((int32_t)0); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - const vector unsigned char mask0 = vec_xl( 0, k_mask1); - const vector unsigned char mask1 = vec_xl(16, k_mask1); - const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].signs); - const uint8_t * GGML_RESTRICT sc = x[i].scales; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - - for (int j = 0; j < QK_K/32; j += 2) { - __builtin_prefetch(q3, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector unsigned int aux32x4_0 = {iq3s_grid[q3[ 0] | ((qh[0] << 8) & 256)], iq3s_grid[q3[ 1] | ((qh[0] << 7) & 256)], - iq3s_grid[q3[ 2] | ((qh[0] << 6) & 256)], iq3s_grid[q3[ 3] | ((qh[0] << 5) & 256)]}; - vector unsigned int aux32x4_1 = {iq3s_grid[q3[ 4] | ((qh[0] << 4) & 256)], iq3s_grid[q3[ 5] | ((qh[0] << 3) & 256)], - iq3s_grid[q3[ 6] | ((qh[0] << 2) & 256)], iq3s_grid[q3[ 7] | ((qh[0] << 1) & 256)]}; - vector unsigned int aux32x4_2 = {iq3s_grid[q3[ 8] | ((qh[1] << 8) & 256)], iq3s_grid[q3[ 9] | ((qh[1] << 7) & 256)], - iq3s_grid[q3[10] | ((qh[1] << 6) & 256)], iq3s_grid[q3[11] | ((qh[1] << 5) & 256)]}; - vector unsigned int aux32x4_3 = {iq3s_grid[q3[12] | ((qh[1] << 4) & 256)], iq3s_grid[q3[13] | ((qh[1] << 3) & 256)], - iq3s_grid[q3[14] | ((qh[1] << 2) & 256)], iq3s_grid[q3[15] | ((qh[1] << 1) & 256)]}; - q3 += 16; - qh += 2; - - vector signed char vsigns01 = (vector signed char)vec_splats(*(const uint32_t *)&signs[0]); - vector signed char vsigns02 = (vector signed char)vec_splats(*(const uint32_t *)&signs[2]); - signs += 4; - - vector signed char vsigns0 = vec_perm(vsigns01, vsigns01, mask0); - vector signed char vsigns1 = vec_perm(vsigns01, vsigns01, mask1); - vector signed char vsigns2 = vec_perm(vsigns02, vsigns02, mask0); - vector signed char vsigns3 = vec_perm(vsigns02, vsigns02, mask1); - - vsigns0 = (vector signed char)vec_cmpeq(vec_and(vsigns0, mask2), mask2); - vsigns1 = (vector signed char)vec_cmpeq(vec_and(vsigns1, mask2), mask2); - vsigns2 = (vector signed char)vec_cmpeq(vec_and(vsigns2, mask2), mask2); - vsigns3 = (vector signed char)vec_cmpeq(vec_and(vsigns3, mask2), mask2); - - vector signed char q3x0 = vec_sub(vec_xor(vsigns0, (vector signed char)aux32x4_0), vsigns0); - vector signed char q3x1 = vec_sub(vec_xor(vsigns1, (vector signed char)aux32x4_1), vsigns1); - vector signed char q3x2 = vec_sub(vec_xor(vsigns2, (vector signed char)aux32x4_2), vsigns2); - vector signed char q3x3 = vec_sub(vec_xor(vsigns3, (vector signed char)aux32x4_3), vsigns3); - - vector signed char q8y0 = vec_xl( 0, q8); - vector signed char q8y1 = vec_xl(16, q8); - vector signed char q8y2 = vec_xl(32, q8); - vector signed char q8y3 = vec_xl(48, q8); - q8 += 64; - - vector signed short qv0 = vec_add(vec_mule(q3x0, q8y0), vec_mulo(q3x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q3x1, q8y1), vec_mulo(q3x1, q8y1)); - vector signed short qv2 = vec_add(vec_mule(q3x2, q8y2), vec_mulo(q3x2, q8y2)); - vector signed short qv3 = vec_add(vec_mule(q3x3, q8y3), vec_mulo(q3x3, q8y3)); - - const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); - const uint16_t ls1 = (uint16_t)(sc[0] >> 4); - sc ++; - - vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); - vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); - - vsumi0 = vec_msum(qv0, vscales01, vsumi0); - vsumi1 = vec_msum(qv1, vscales01, vsumi1); - vsumi2 = vec_msum(qv2, vscales23, vsumi2); - vsumi3 = vec_msum(qv3, vscales23, vsumi3); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - }; - - const __m256i mask1 = __lasx_xvld((const __m256i*)k_mask1, 0); - const __m256i mask2 = __lasx_xvld((const __m256i*)k_mask2, 0); - - __m256i idx_shift = lasx_set_w(1, 2, 3, 4, 5, 6, 7, 8); - const __m256i idx_mask = __lasx_xvreplgr2vr_w(256); - - typedef union { - __m256i vec[2]; - uint32_t index[16]; - } index_t; - - index_t idx; - - __m256 accumf = (__m256)__lasx_xvldi(0); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT qs = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - __m256i sumi1 = __lasx_xvldi(0); - __m256i sumi2 = __lasx_xvldi(0); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i idx_l = lasx_extu8_16(__lsx_vld(qs, 0)); qs += 16; - idx.vec[0] = __lasx_xvreplgr2vr_w(qh[ib32+0]); - idx.vec[1] = __lasx_xvreplgr2vr_w(qh[ib32+1]); - idx.vec[0] = __lasx_xvand_v(__lasx_xvsll_w(idx.vec[0], idx_shift), idx_mask); - idx.vec[1] = __lasx_xvand_v(__lasx_xvsll_w(idx.vec[1], idx_shift), idx_mask); - idx.vec[0] = __lasx_xvor_v(idx.vec[0], lasx_ext16_32(lasx_extracti128(idx_l, 0))); - idx.vec[1] = __lasx_xvor_v(idx.vec[1], lasx_ext16_32(lasx_extracti128(idx_l, 1))); - - // At leat on my CPU (Ryzen 7950X), using _mm256_i32gather_epi32 is slower than _mm256_set_epi32. Strange. - //const __m256i q2_1 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[0], 4); - //const __m256i q2_2 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[1], 4); - const __m256i q2_1 = lasx_set_w( - iq3s_grid[idx.index[7]], iq3s_grid[idx.index[6]], iq3s_grid[idx.index[5]], iq3s_grid[idx.index[4]], - iq3s_grid[idx.index[3]], iq3s_grid[idx.index[2]], iq3s_grid[idx.index[1]], iq3s_grid[idx.index[0]] - ); - const __m256i q2_2 = lasx_set_w( - iq3s_grid[idx.index[15]], iq3s_grid[idx.index[14]], iq3s_grid[idx.index[13]], iq3s_grid[idx.index[12]], - iq3s_grid[idx.index[11]], iq3s_grid[idx.index[10]], iq3s_grid[idx.index[ 9]], iq3s_grid[idx.index[ 8]] - ); - - __m256i aux256 = __lasx_xvreplgr2vr_w(signs[0] | (signs[1] << 16)); - aux256 = __lasx_xvand_v(lasx_shuffle_b(aux256,mask1), mask2); - const __m256i s2_1 = __lasx_xvseq_b(aux256, mask2); - const __m256i q8s_1 = __lasx_xvsub_b(__lasx_xvxor_v(s2_1, q8_1), s2_1); - - aux256 = __lasx_xvreplgr2vr_w(signs[2] | (signs[3] << 16)); - aux256 = __lasx_xvand_v(lasx_shuffle_b(aux256,mask1), mask2); - const __m256i s2_2 = __lasx_xvseq_b(aux256, mask2); - const __m256i q8s_2 = __lasx_xvsub_b(__lasx_xvxor_v(s2_2, q8_2), s2_2); - - signs += 4; - - const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); - const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); - const uint16_t ls1 = x[i].scales[ib32/2] & 0xf; - const uint16_t ls2 = x[i].scales[ib32/2] >> 4; - const __m256i p1 = lasx_madd_h(dot1, __lasx_xvreplgr2vr_h(2*ls1+1)); - const __m256i p2 = lasx_madd_h(dot2, __lasx_xvreplgr2vr_h(2*ls2+1)); - sumi1 = __lasx_xvadd_w(sumi1, p1); - sumi2 = __lasx_xvadd_w(sumi2, p2); - } - - accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); - } - - *s = hsum_float_8(accumf); - -#else - - float sumf = 0.f; - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT qs = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint8_t * GGML_RESTRICT signs = x[i].signs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - int32_t bsum = 0; - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1; - const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1; - int32_t sumi = 0; - for (int l = 0; l < 4; ++l) { - const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256))); - const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256))); - for (int j = 0; j < 4; ++j) { - sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); - sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); - } - q8 += 8; - } - qs += 8; - signs += 4; - bsum += sumi * ls1; - sumi = 0; - for (int l = 0; l < 4; ++l) { - const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256))); - const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256))); - for (int j = 0; j < 4; ++j) { - sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); - sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); - } - q8 += 8; - } - qs += 8; - signs += 4; - bsum += sumi * ls2; - } - sumf += d * bsum; - } - *s = sumf; -#endif -} - -#if defined(__AVX2__) -static inline __m256i mul_add_epi8(const __m256i x, const __m256i y) { - const __m256i ax = _mm256_sign_epi8(x, x); - const __m256i sy = _mm256_sign_epi8(y, x); - return _mm256_maddubs_epi16(ax, sy); -} -#elif defined(__loongarch_asx) -static inline __m256i mul_add_epi8(const __m256i x, const __m256i y) { - const __m256i a = __lasx_xvmulwev_h_b(x, y); - const __m256i b = __lasx_xvmulwod_h_b(x, y); - return __lasx_xvadd_h(a, b); -} -#endif - -void ggml_vec_dot_iq1_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_iq1_s * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined __ARM_NEON - - ggml_int8x16x4_t q1b; - ggml_int8x16x4_t q8b; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint16_t * qh = x[i].qh; - - int sumi1 = 0, sumi2 = 0, sumi3 = 0; - - for (int ib = 0; ib < QK_K/32; ib += 2) { - - q1b.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[0] | ((qh[ib+0] << 8) & 0x700)))), - vld1_s8((const int8_t *)(iq1s_grid + (qs[1] | ((qh[ib+0] << 5) & 0x700))))); - q1b.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[2] | ((qh[ib+0] << 2) & 0x700)))), - vld1_s8((const int8_t *)(iq1s_grid + (qs[3] | ((qh[ib+0] >> 1) & 0x700))))); - q1b.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[4] | ((qh[ib+1] << 8) & 0x700)))), - vld1_s8((const int8_t *)(iq1s_grid + (qs[5] | ((qh[ib+1] << 5) & 0x700))))); - q1b.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[6] | ((qh[ib+1] << 2) & 0x700)))), - vld1_s8((const int8_t *)(iq1s_grid + (qs[7] | ((qh[ib+1] >> 1) & 0x700))))); - qs += 8; - - q8b = ggml_vld1q_s8_x4(q8); q8 += 64; - - const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q1b.val[0], q8b.val[0]), q1b.val[1], q8b.val[1]); - const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q1b.val[2], q8b.val[2]), q1b.val[3], q8b.val[3]); - - const int ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; - const int ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; - sumi1 += vaddvq_s32(p1) * ls1; - sumi2 += vaddvq_s32(p2) * ls2; - sumi3 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * ls1 * (qh[ib+0] & 0x8000 ? -1 : 1) - + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * ls2 * (qh[ib+1] & 0x8000 ? -1 : 1); - - } - - sumf += y[i].d * GGML_FP16_TO_FP32(x[i].d) * (sumi1 + sumi2 + IQ1S_DELTA * sumi3); - } - - *s = sumf; - -#elif defined __AVX2__ - - __m256 accum = _mm256_setzero_ps(); - float accum1 = 0; - for (int i = 0; i < nb; ++i) { - - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint16_t * qh = x[i].qh; - - __m256i sumi = _mm256_setzero_si256(); - int sumi1 = 0; - for (int ib = 0; ib < QK_K/32; ib += 2) { -#ifdef __BMI2__ - const uint64_t packed_idx1 = _pdep_u64(*(const uint32_t *)qs, 0x00ff00ff00ff00ffULL) | _pdep_u64(qh[ib], 0x700070007000700ULL); - const uint64_t packed_idx2 = _pdep_u64(*(const uint32_t *)(qs + 4), 0x00ff00ff00ff00ffULL) | _pdep_u64(qh[ib + 1], 0x700070007000700ULL); - const uint16_t *idx1 = (const uint16_t *)(&packed_idx1); - const uint16_t *idx2 = (const uint16_t *)(&packed_idx2); - const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[idx1[3]], iq1s_grid[idx1[2]], iq1s_grid[idx1[1]], iq1s_grid[idx1[0]]); - const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[idx2[3]], iq1s_grid[idx2[2]], iq1s_grid[idx2[1]], iq1s_grid[idx2[0]]); -#else - const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[qs[3] | ((qh[ib+0] >> 1) & 0x700)], iq1s_grid[qs[2] | ((qh[ib+0] << 2) & 0x700)], - iq1s_grid[qs[1] | ((qh[ib+0] << 5) & 0x700)], iq1s_grid[qs[0] | ((qh[ib+0] << 8) & 0x700)]); - const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[qs[7] | ((qh[ib+1] >> 1) & 0x700)], iq1s_grid[qs[6] | ((qh[ib+1] << 2) & 0x700)], - iq1s_grid[qs[5] | ((qh[ib+1] << 5) & 0x700)], iq1s_grid[qs[4] | ((qh[ib+1] << 8) & 0x700)]); -#endif - qs += 8; - const __m256i q8b_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8b_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - - const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1); - const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2); - const int16_t ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; - const int16_t ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; - const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(ls1)); - const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(ls2)); - - sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p1, p2)); - sumi1 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * (qh[ib+0] & 0x8000 ? -1 : 1) * ls1 - + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2; - } - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - accum = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(sumi), accum); - accum1 += d * sumi1; - - } - - *s = hsum_float_8(accum) + IQ1S_DELTA * accum1; - -#elif defined __AVX__ - __m256 accum = _mm256_setzero_ps(); - float accum1 = 0; - for (int i = 0; i < nb; ++i) { - - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint16_t * qh = x[i].qh; - - __m128i sumi1_0 = _mm_setzero_si128(); - __m128i sumi1_1 = _mm_setzero_si128(); - int sumi1 = 0; - for (int ib = 0; ib < QK_K/32; ib += 2) { - const __m128i q1b_1_0 = _mm_set_epi64x(iq1s_grid[qs[1] | ((qh[ib+0] << 5) & 0x700)], iq1s_grid[qs[0] | ((qh[ib+0] << 8) & 0x700)]); - const __m128i q1b_1_1 = _mm_set_epi64x(iq1s_grid[qs[3] | ((qh[ib+0] >> 1) & 0x700)], iq1s_grid[qs[2] | ((qh[ib+0] << 2) & 0x700)]); - const __m128i q1b_2_0 = _mm_set_epi64x(iq1s_grid[qs[5] | ((qh[ib+1] << 5) & 0x700)], iq1s_grid[qs[4] | ((qh[ib+1] << 8) & 0x700)]); - const __m128i q1b_2_1 = _mm_set_epi64x(iq1s_grid[qs[7] | ((qh[ib+1] >> 1) & 0x700)], iq1s_grid[qs[6] | ((qh[ib+1] << 2) & 0x700)]); - qs += 8; - const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - - const __m128i dot1_0 = mul_add_epi8_sse(q1b_1_0, q8b_1_0); - const __m128i dot1_1 = mul_add_epi8_sse(q1b_1_1, q8b_1_1); - const __m128i dot2_0 = mul_add_epi8_sse(q1b_2_0, q8b_2_0); - const __m128i dot2_1 = mul_add_epi8_sse(q1b_2_1, q8b_2_1); - const int16_t ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; - const int16_t ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; - const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(ls1)); - const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(ls1)); - const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(ls2)); - const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(ls2)); - - sumi1_0 = _mm_add_epi32(sumi1_0, _mm_add_epi32(p1_0, p2_0)); - sumi1_1 = _mm_add_epi32(sumi1_1, _mm_add_epi32(p1_1, p2_1)); - sumi1 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * (qh[ib+0] & 0x8000 ? -1 : 1) * ls1 - + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2; - } - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - accum = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi1_1, sumi1_0))), accum); - accum1 += d * sumi1; - - } - - *s = hsum_float_8(accum) + IQ1S_DELTA * accum1; - -#elif defined(__POWER9_VECTOR__) - const vector unsigned char v0 = vec_splats((unsigned char)0x0); - const vector unsigned short vsign = vec_splats((unsigned short)0x8000); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector signed int vsumi0 = vec_splats((int32_t)0); - vector signed int vsumi1 = vec_splats((int32_t)0); - vector signed int vsumi2 = vec_splats((int32_t)0); - vector signed int vsumi3 = vec_splats((int32_t)0); - vector signed int vsumi8 = vec_splats((int32_t)0); - - const uint8_t * GGML_RESTRICT q1 = x[i].qs; - const uint16_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - const int16_t * GGML_RESTRICT qs = y[i].bsums; - - for (int j = 0; j < QK_K/32; j += 2) { - __builtin_prefetch(q1, 0, 1); - __builtin_prefetch(qh, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector signed long long aux64x2_0 = {*(const int64_t *)(iq1s_grid + (q1[0] | ((qh[0] << 8) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[1] | ((qh[0] << 5) & 0x700)))}; - vector signed long long aux64x2_1 = {*(const int64_t *)(iq1s_grid + (q1[2] | ((qh[0] << 2) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[3] | ((qh[0] >> 1) & 0x700)))}; - vector signed long long aux64x2_2 = {*(const int64_t *)(iq1s_grid + (q1[4] | ((qh[1] << 8) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[5] | ((qh[1] << 5) & 0x700)))}; - vector signed long long aux64x2_3 = {*(const int64_t *)(iq1s_grid + (q1[6] | ((qh[1] << 2) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[7] | ((qh[1] >> 1) & 0x700)))}; - q1 += 8; - - vector signed char q1x0 = (vector signed char)aux64x2_0; - vector signed char q1x1 = (vector signed char)aux64x2_1; - vector signed char q1x2 = (vector signed char)aux64x2_2; - vector signed char q1x3 = (vector signed char)aux64x2_3; - - vector signed char q8y0 = vec_xl( 0, q8); - vector signed char q8y1 = vec_xl(16, q8); - vector signed char q8y2 = vec_xl(32, q8); - vector signed char q8y3 = vec_xl(48, q8); - q8 += 64; - - vector signed short qv0 = vec_add(vec_mule(q1x0, q8y0), vec_mulo(q1x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q1x1, q8y1), vec_mulo(q1x1, q8y1)); - vector signed short qv2 = vec_add(vec_mule(q1x2, q8y2), vec_mulo(q1x2, q8y2)); - vector signed short qv3 = vec_add(vec_mule(q1x3, q8y3), vec_mulo(q1x3, q8y3)); - - const uint16_t ls0 = (uint16_t)((qh[0] >> 12) & 7); - const uint16_t ls1 = (uint16_t)((qh[1] >> 12) & 7); - - vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); - vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); - vector signed short vscales = vec_sld(vscales23, vscales01, 8); - - vsumi0 = vec_msum(qv0, vscales01, vsumi0); - vsumi1 = vec_msum(qv1, vscales01, vsumi1); - vsumi2 = vec_msum(qv2, vscales23, vsumi2); - vsumi3 = vec_msum(qv3, vscales23, vsumi3); - - vector signed short q8ysums = vec_xl_len(qs, 8); - qs += 4; - q8ysums = vec_mergeh(q8ysums, (vector signed short)v0); - - vector signed short qxh = (vector signed short)vec_sld(vec_splats(qh[1]), vec_splats(qh[0]), 8); - qh += 2; - vector __bool short vsel = vec_cmpge(qxh, (vector signed short)v0); - - vector signed short q8ysum = vec_sel((vector signed short)vec_xor((vector unsigned short)q8ysums, vsign), q8ysums, vsel); - - vsumi8 = vec_add(vec_mule(q8ysum, vscales), vsumi8); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - - vsumf0 = vec_madd(vec_ctf(vsumi8, 0), vec_mul(vd, vec_splats(IQ1S_DELTA)), vsumf0); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - - __m256 accum = (__m256)__lasx_xvldi(0); - float accum1 = 0; - for (int i = 0; i < nb; ++i) { - - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint16_t * qh = x[i].qh; - - __m256i sumi = __lasx_xvldi(0); - int sumi1 = 0; - for (int ib = 0; ib < QK_K/32; ib += 2) { - __m256i q1b_1 = __lasx_xvinsgr2vr_d(q1b_1, iq1s_grid[qs[0] | ((qh[ib+0] << 8) & 0x700)], 0); - q1b_1 = __lasx_xvinsgr2vr_d(q1b_1, iq1s_grid[qs[1] | ((qh[ib+0] << 5) & 0x700)], 1); - q1b_1 = __lasx_xvinsgr2vr_d(q1b_1, iq1s_grid[qs[2] | ((qh[ib+0] << 2) & 0x700)], 2); - q1b_1 = __lasx_xvinsgr2vr_d(q1b_1, iq1s_grid[qs[3] | ((qh[ib+0] >> 1) & 0x700)], 3); - - __m256i q1b_2 = __lasx_xvinsgr2vr_d(q1b_2, iq1s_grid[qs[4] | ((qh[ib+1] << 8) & 0x700)], 0); - q1b_2 = __lasx_xvinsgr2vr_d(q1b_2, iq1s_grid[qs[5] | ((qh[ib+1] << 5) & 0x700)], 1); - q1b_2 = __lasx_xvinsgr2vr_d(q1b_2, iq1s_grid[qs[6] | ((qh[ib+1] << 2) & 0x700)], 2); - q1b_2 = __lasx_xvinsgr2vr_d(q1b_2, iq1s_grid[qs[7] | ((qh[ib+1] >> 1) & 0x700)], 3); - - qs += 8; - const __m256i q8b_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8b_2 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - - const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1); - const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2); - const int16_t ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; - const int16_t ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; - - __m256i tmp1, tmp5, tmp6; - tmp1 = __lasx_xvreplgr2vr_h(ls1); - tmp5 = __lasx_xvmulwev_w_h(dot1, tmp1); - tmp6 = __lasx_xvmulwod_w_h(dot1, tmp1); - const __m256i p1 = __lasx_xvadd_w(tmp5, tmp6); - - tmp1 = __lasx_xvreplgr2vr_h(ls2); - tmp5 = __lasx_xvmulwev_w_h(dot2, tmp1); - tmp6 = __lasx_xvmulwod_w_h(dot2, tmp1); - const __m256i p2 = __lasx_xvadd_w(tmp5, tmp6); - - sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p1, p2)); - sumi1 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * (qh[ib+0] & 0x8000 ? -1 : 1) * ls1 - + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2; - } - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - accum = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), accum); - accum1 += d * sumi1; - } - - *s = hsum_float_8(accum) + IQ1S_DELTA * accum1; - -#else - - float sumf = 0; - for (int i = 0; i < nb; i++) { - - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint16_t * qh = x[i].qh; - - int sumi = 0, sumi1 = 0; - for (int ib = 0; ib < QK_K/32; ++ib) { - const int ls = 2*((qh[ib] >> 12) & 7) + 1; - const int delta = qh[ib] & 0x8000 ? -1 : 1; - int lsum = 0; - for (int l = 0; l < 4; ++l) { - const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8))); - for (int j = 0; j < 8; ++j) { - lsum += q8[j] * grid[j]; - } - q8 += 8; - } - sumi += ls * lsum; - sumi1 += ls * delta * (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]); - qs += 4; - } - - sumf += GGML_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); - } - - *s = sumf; - -#endif -} - -void ggml_vec_dot_iq1_m_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_iq1_m * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - - iq1m_scale_t scale; - -#if defined __ARM_NEON - const int32x4_t mask = vdupq_n_s32(0x7); - const int32x4_t mone = vdupq_n_s32(1); - const int32x4_t mzero = vdupq_n_s32(0); - - ggml_int8x16x4_t deltas; - deltas.val[0] = vcombine_s8(vdup_n_s8(+1), vdup_n_s8(+1)); - deltas.val[1] = vcombine_s8(vdup_n_s8(-1), vdup_n_s8(+1)); - deltas.val[2] = vcombine_s8(vdup_n_s8(+1), vdup_n_s8(-1)); - deltas.val[3] = vcombine_s8(vdup_n_s8(-1), vdup_n_s8(-1)); - - ggml_int8x16x4_t q1b; - ggml_int8x16x4_t q8b; - - uint32_t aux32; - const uint8_t * aux8 = (const uint8_t *)&aux32; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint8_t * qh = x[i].qh; - const uint16_t * sc = (const uint16_t *)x[i].scales; - - scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); - - int32x4_t sumi1 = mzero; - int32x4_t sumi2 = mzero; - - for (int ib = 0; ib < QK_K/32; ib += 2) { - - q1b.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[0] | ((qh[0] << 8) & 0x700)))), - vld1_s8((const int8_t *)(iq1s_grid + (qs[1] | ((qh[0] << 4) & 0x700))))); - q1b.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[2] | ((qh[1] << 8) & 0x700)))), - vld1_s8((const int8_t *)(iq1s_grid + (qs[3] | ((qh[1] << 4) & 0x700))))); - q1b.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[4] | ((qh[2] << 8) & 0x700)))), - vld1_s8((const int8_t *)(iq1s_grid + (qs[5] | ((qh[2] << 4) & 0x700))))); - q1b.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[6] | ((qh[3] << 8) & 0x700)))), - vld1_s8((const int8_t *)(iq1s_grid + (qs[7] | ((qh[3] << 4) & 0x700))))); - - q8b = ggml_vld1q_s8_x4(q8); q8 += 64; - - const int32x4_t p1 = vpaddq_s32(ggml_vdotq_s32(mzero, q1b.val[0], q8b.val[0]), ggml_vdotq_s32(mzero, q1b.val[1], q8b.val[1])); - const int32x4_t p2 = vpaddq_s32(ggml_vdotq_s32(mzero, q1b.val[2], q8b.val[2]), ggml_vdotq_s32(mzero, q1b.val[3], q8b.val[3])); - const int32x4_t p12 = vpaddq_s32(p1, p2); - - const uint32_t * qh32 = (const uint32_t *)qh; // we are 4-byte aligned, so we can do that - aux32 = ((qh32[0] >> 3) & 0x01010101) | ((qh32[0] >> 6) & 0x02020202); - - const int32x4_t p3 = vpaddq_s32(ggml_vdotq_s32(mzero, deltas.val[aux8[0]], q8b.val[0]), ggml_vdotq_s32(mzero, deltas.val[aux8[1]], q8b.val[1])); - const int32x4_t p4 = vpaddq_s32(ggml_vdotq_s32(mzero, deltas.val[aux8[2]], q8b.val[2]), ggml_vdotq_s32(mzero, deltas.val[aux8[3]], q8b.val[3])); - const int32x4_t p34 = vpaddq_s32(p3, p4); - - int32x4_t scales_4 = ggml_vld1q_u32(sc[ib/2] >> 0, sc[ib/2] >> 3, sc[ib/2] >> 6, sc[ib/2] >> 9); - - scales_4 = vaddq_s32(vshlq_n_s32(vandq_s32(scales_4, mask), 1), mone); - - sumi1 = vmlaq_s32(sumi1, scales_4, p12); - sumi2 = vmlaq_s32(sumi2, scales_4, p34); - - qs += 8; qh += 4; - - } - - sumf += y[i].d * GGML_FP16_TO_FP32(scale.f16) * (vaddvq_s32(sumi1) + IQ1M_DELTA * vaddvq_s32(sumi2)); - } - - *s = sumf; - -#elif defined __AVX2__ - - const __m256i mask = _mm256_set1_epi16(0x7); - const __m256i mone = _mm256_set1_epi16(1); - const __m256i mone8 = _mm256_set1_epi8(1); - const __m256i mtwo8 = _mm256_set1_epi8(2); - // VPSHUFB cannot cross 128-bit lanes so odd shifts go to upper half. - const __m256i scales_shift = _mm256_set_epi64x(9, 3, 6, 0); - - __m256 accum1 = _mm256_setzero_ps(); - __m256 accum2 = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint8_t * qh = x[i].qh; - const uint16_t * sc = (const uint16_t *)x[i].scales; - - scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); - // Extract 3-bit scales (16 values) - __m256i scales = _mm256_set1_epi64x(*(const uint64_t*)sc); - scales = _mm256_srlv_epi64(scales, scales_shift); - scales = _mm256_add_epi16(_mm256_slli_epi16(_mm256_and_si256(scales, mask), 1), mone); - - // Indices to repeat each scale 8 times. - __m256i scales_idx1 = _mm256_set1_epi16(0x0100); - __m256i scales_idx2 = _mm256_add_epi8(scales_idx1, _mm256_set1_epi8(8)); - - __m256i sumi1 = _mm256_setzero_si256(); - __m256i sumi2 = _mm256_setzero_si256(); - for (int ib = 0; ib < QK_K/32; ib += 2) { -#ifdef __BMI2__ - const uint64_t packed_idx1 = _pdep_u64(*(const uint32_t *)qs, 0x00ff00ff00ff00ffULL) - | _pdep_u64(*(const uint16_t*)(qh) & 0x7777, 0xf000f000f000f00ULL); - const uint64_t packed_idx2 = _pdep_u64(*(const uint32_t *)(qs + 4), 0x00ff00ff00ff00ffULL) - | _pdep_u64(*(const uint16_t*)(qh + 2) & 0x7777, 0xf000f000f000f00ULL); - const uint16_t *idx1 = (const uint16_t *)(&packed_idx1); - const uint16_t *idx2 = (const uint16_t *)(&packed_idx2); - const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[idx1[3]], iq1s_grid[idx1[2]], iq1s_grid[idx1[1]], iq1s_grid[idx1[0]]); - const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[idx2[3]], iq1s_grid[idx2[2]], iq1s_grid[idx2[1]], iq1s_grid[idx2[0]]); - - // Convert signs to bytes 0x81 (negative) or 0x01 (positive) - const uint64_t delta_sign = _pdep_u64(*(const uint32_t*)(qh) & 0x88888888, 0xf0f0f0f0f0f0f0f0ULL); - const __m256i delta1 = _mm256_or_si256(mone8, _mm256_cvtepi8_epi64(_mm_set1_epi32(delta_sign))); - const __m256i delta2 = _mm256_or_si256(mone8, _mm256_cvtepi8_epi64(_mm_set1_epi32(delta_sign >> 32))); -#else - const __m256i q1b_1 = _mm256_set_epi64x( - iq1s_grid[qs[3] | (((uint16_t)qh[1] << 4) & 0x700)], iq1s_grid[qs[2] | (((uint16_t)qh[1] << 8) & 0x700)], - iq1s_grid[qs[1] | (((uint16_t)qh[0] << 4) & 0x700)], iq1s_grid[qs[0] | (((uint16_t)qh[0] << 8) & 0x700)] - ); - const __m256i q1b_2 = _mm256_set_epi64x( - iq1s_grid[qs[7] | (((uint16_t)qh[3] << 4) & 0x700)], iq1s_grid[qs[6] | (((uint16_t)qh[3] << 8) & 0x700)], - iq1s_grid[qs[5] | (((uint16_t)qh[2] << 4) & 0x700)], iq1s_grid[qs[4] | (((uint16_t)qh[2] << 8) & 0x700)] - ); - - const __m256i delta1 = _mm256_set_epi64x(qh[1] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, - qh[1] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101, - qh[0] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, - qh[0] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); - const __m256i delta2 = _mm256_set_epi64x(qh[3] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, - qh[3] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101, - qh[2] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, - qh[2] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); -#endif - const __m256i q8b_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8b_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - - const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1); - const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2); - const __m256i dot3 = _mm256_maddubs_epi16(mone8, _mm256_sign_epi8(q8b_1, delta1)); - const __m256i dot4 = _mm256_maddubs_epi16(mone8, _mm256_sign_epi8(q8b_2, delta2)); - - __m256i scale1 = _mm256_shuffle_epi8(scales, scales_idx1); - __m256i scale2 = _mm256_shuffle_epi8(scales, scales_idx2); - - scales_idx1 = _mm256_add_epi8(scales_idx1, mtwo8); - scales_idx2 = _mm256_add_epi8(scales_idx2, mtwo8); - - const __m256i p1 = _mm256_madd_epi16(dot1, scale1); - const __m256i p2 = _mm256_madd_epi16(dot2, scale2); - const __m256i p3 = _mm256_madd_epi16(dot3, scale1); - const __m256i p4 = _mm256_madd_epi16(dot4, scale2); - - sumi1 = _mm256_add_epi32(sumi1, _mm256_add_epi32(p1, p2)); - sumi2 = _mm256_add_epi32(sumi2, _mm256_add_epi32(p3, p4)); - - qs += 8; qh += 4; - } - - const __m256 d = _mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(scale.f16)); - - accum1 = _mm256_fmadd_ps(d, _mm256_cvtepi32_ps(sumi1), accum1); - accum2 = _mm256_fmadd_ps(d, _mm256_cvtepi32_ps(sumi2), accum2); - } - - *s = hsum_float_8(accum1) + IQ1M_DELTA * hsum_float_8(accum2); - -#elif defined __AVX__ - const __m128i mask = _mm_set1_epi16(0x7); - const __m128i mone = _mm_set1_epi16(1); - - __m256 accum1 = _mm256_setzero_ps(); - __m256 accum2 = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint8_t * qh = x[i].qh; - const uint16_t * sc = (const uint16_t *)x[i].scales; - - scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); - - __m128i sumi1_0 = _mm_setzero_si128(); - __m128i sumi1_1 = _mm_setzero_si128(); - __m128i sumi2_0 = _mm_setzero_si128(); - __m128i sumi2_1 = _mm_setzero_si128(); - for (int ib = 0; ib < QK_K/32; ib += 2) { - const __m128i q1b_1_0 = _mm_set_epi64x( - iq1s_grid[qs[1] | (((uint16_t)qh[0] << 4) & 0x700)], iq1s_grid[qs[0] | (((uint16_t)qh[0] << 8) & 0x700)]); - const __m128i q1b_1_1 = _mm_set_epi64x( - iq1s_grid[qs[3] | (((uint16_t)qh[1] << 4) & 0x700)], iq1s_grid[qs[2] | (((uint16_t)qh[1] << 8) & 0x700)]); - const __m128i q1b_2_0 = _mm_set_epi64x( - iq1s_grid[qs[5] | (((uint16_t)qh[2] << 4) & 0x700)], iq1s_grid[qs[4] | (((uint16_t)qh[2] << 8) & 0x700)]); - const __m128i q1b_2_1 = _mm_set_epi64x( - iq1s_grid[qs[7] | (((uint16_t)qh[3] << 4) & 0x700)], iq1s_grid[qs[6] | (((uint16_t)qh[3] << 8) & 0x700)]); - const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - - const __m128i dot1_0 = mul_add_epi8_sse(q1b_1_0, q8b_1_0); - const __m128i dot1_1 = mul_add_epi8_sse(q1b_1_1, q8b_1_1); - const __m128i dot2_0 = mul_add_epi8_sse(q1b_2_0, q8b_2_0); - const __m128i dot2_1 = mul_add_epi8_sse(q1b_2_1, q8b_2_1); - - const __m128i delta1_0 = _mm_set_epi64x(qh[0] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, - qh[0] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); - const __m128i delta1_1 = _mm_set_epi64x(qh[1] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, - qh[1] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); - const __m128i delta2_0 = _mm_set_epi64x(qh[2] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, - qh[2] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); - const __m128i delta2_1 = _mm_set_epi64x(qh[3] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, - qh[3] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); - - const __m128i dot3_0 = mul_add_epi8_sse(delta1_0, q8b_1_0); - const __m128i dot3_1 = mul_add_epi8_sse(delta1_1, q8b_1_1); - const __m128i dot4_0 = mul_add_epi8_sse(delta2_0, q8b_2_0); - const __m128i dot4_1 = mul_add_epi8_sse(delta2_1, q8b_2_1); - - __m128i scale1_0 = _mm_set1_epi16(sc[ib/2] >> 0); - __m128i scale1_1 = _mm_set1_epi16(sc[ib/2] >> 3); - __m128i scale2_0 = _mm_set1_epi16(sc[ib/2] >> 6); - __m128i scale2_1 = _mm_set1_epi16(sc[ib/2] >> 9); - - scale1_0 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale1_0, mask), 1), mone); - scale1_1 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale1_1, mask), 1), mone); - scale2_0 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale2_0, mask), 1), mone); - scale2_1 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale2_1, mask), 1), mone); - const __m128i p1_0 = _mm_madd_epi16(dot1_0, scale1_0); - const __m128i p1_1 = _mm_madd_epi16(dot1_1, scale1_1); - const __m128i p2_0 = _mm_madd_epi16(dot2_0, scale2_0); - const __m128i p2_1 = _mm_madd_epi16(dot2_1, scale2_1); - const __m128i p3_0 = _mm_madd_epi16(dot3_0, scale1_0); - const __m128i p3_1 = _mm_madd_epi16(dot3_1, scale1_1); - const __m128i p4_0 = _mm_madd_epi16(dot4_0, scale2_0); - const __m128i p4_1 = _mm_madd_epi16(dot4_1, scale2_1); - - sumi1_0 = _mm_add_epi32(sumi1_0, _mm_add_epi32(p1_0, p2_0)); - sumi1_1 = _mm_add_epi32(sumi1_1, _mm_add_epi32(p1_1, p2_1)); - sumi2_0 = _mm_add_epi32(sumi2_0, _mm_add_epi32(p3_0, p4_0)); - sumi2_1 = _mm_add_epi32(sumi2_1, _mm_add_epi32(p3_1, p4_1)); - - qs += 8; qh += 4; - } - - const __m256 d = _mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(scale.f16)); - - accum1 = _mm256_add_ps(_mm256_mul_ps(d, _mm256_cvtepi32_ps(MM256_SET_M128I(sumi1_1, sumi1_0))), accum1); - accum2 = _mm256_add_ps(_mm256_mul_ps(d, _mm256_cvtepi32_ps(MM256_SET_M128I(sumi2_1, sumi2_0))), accum2); - } - - *s = hsum_float_8(accum1) + IQ1M_DELTA * hsum_float_8(accum2); - -#else - - int sum1[2], sum2[2], delta[4]; - - float sumf = 0; - for (int i = 0; i < nb; i++) { - - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint8_t * qh = x[i].qh; - const uint16_t * sc = (const uint16_t *)x[i].scales; - - scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); - - int sumi1 = 0, sumi2 = 0; - for (int ib = 0; ib < QK_K/32; ++ib) { - delta[0] = qh[0] & 0x08 ? -1 : 1; - delta[1] = qh[0] & 0x80 ? -1 : 1; - delta[2] = qh[1] & 0x08 ? -1 : 1; - delta[3] = qh[1] & 0x80 ? -1 : 1; - sum1[0] = sum1[1] = sum2[0] = sum2[1] = 0; - for (int l = 0; l < 4; ++l) { - const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((uint16_t)qh[l/2] << (8 - 4*(l%2))) & 0x700))); - int lsum1 = 0, lsum2 = 0; - for (int j = 0; j < 8; ++j) { - lsum1 += q8[j] * grid[j]; - lsum2 += q8[j]; - } - q8 += 8; - sum1[l/2] += lsum1; - sum2[l/2] += lsum2*delta[l]; - } - - const int ls1 = 2*((sc[ib/2] >> (6*(ib%2)+0)) & 0x7) + 1; - const int ls2 = 2*((sc[ib/2] >> (6*(ib%2)+3)) & 0x7) + 1; - - sumi1 += sum1[0] * ls1 + sum1[1] * ls2; - sumi2 += sum2[0] * ls1 + sum2[1] * ls2; - qs += 4; - qh += 2; - } - - sumf += GGML_FP16_TO_FP32(scale.f16) * y[i].d * (sumi1 + IQ1M_DELTA * sumi2); - } - - *s = sumf; - -#endif -} - -void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - assert(n % QK4_NL == 0); - static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); - - const block_iq4_nl * GGML_RESTRICT x = vx; - const block_q8_0 * GGML_RESTRICT y = vy; - - const int nb = n / QK4_NL; - - int ib = 0; - float sumf = 0; - -#if defined __ARM_NEON - const int8x16_t values = vld1q_s8(kvalues_iq4nl); - const uint8x16_t m4b = vdupq_n_u8(0x0f); - uint8x16x2_t q4bits; - int8x16x4_t q4b; - int8x16x4_t q8b; - int32x4_t prod_1, prod_2; - - for (; ib + 1 < nb; ib += 2) { - - q4bits.val[0] = vld1q_u8(x[ib + 0].qs); - q4bits.val[1] = vld1q_u8(x[ib + 1].qs); - q8b.val[0] = vld1q_s8(y[ib + 0].qs); - q8b.val[1] = vld1q_s8(y[ib + 0].qs + 16); - q8b.val[2] = vld1q_s8(y[ib + 1].qs); - q8b.val[3] = vld1q_s8(y[ib + 1].qs + 16); - - q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b)); - q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4)); - q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b)); - q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4)); - - prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]); - prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]); - - sumf += - GGML_FP16_TO_FP32(x[ib+0].d) * GGML_FP16_TO_FP32(y[ib + 0].d) * vaddvq_s32(prod_1) + - GGML_FP16_TO_FP32(x[ib+1].d) * GGML_FP16_TO_FP32(y[ib + 1].d) * vaddvq_s32(prod_2); - } - -#elif defined __AVX2__ - - const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); - const __m128i m4b = _mm_set1_epi8(0x0f); - const __m256i mone = _mm256_set1_epi16(1); - - __m256 accum1 = _mm256_setzero_ps(); - __m256 accum2 = _mm256_setzero_ps(); - for (; ib + 1 < nb; ib += 2) { - const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)x[ib + 0].qs); - const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)x[ib + 1].qs); - const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)y[ib + 0].qs); - const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)y[ib + 1].qs); - const __m256i q4b_1 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)), - _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b))); - const __m256i q4b_2 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)), - _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b))); - const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); - const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); - const __m256i p_1 = _mm256_madd_epi16(p16_1, mone); - const __m256i p_2 = _mm256_madd_epi16(p16_2, mone); - accum1 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(y[ib + 0].d)*GGML_FP16_TO_FP32(x[ib + 0].d)), - _mm256_cvtepi32_ps(p_1), accum1); - accum2 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(y[ib + 1].d)*GGML_FP16_TO_FP32(x[ib + 1].d)), - _mm256_cvtepi32_ps(p_2), accum2); - } - - sumf = hsum_float_8(_mm256_add_ps(accum1, accum2)); - -#elif defined __AVX__ - const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); - const __m128i m4b = _mm_set1_epi8(0x0f); - - __m256 accum = _mm256_setzero_ps(); - for (; ib + 1 < nb; ib += 2) { - const __m128i q4bits_1 = _mm_loadu_si128((const __m128i *)x[ib + 0].qs); - const __m128i q4bits_2 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); - const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs); - const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs + 1); - const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); - const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs + 1); - - const __m128i q4b_1_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b)); - const __m128i q4b_1_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)); - const __m128i q4b_2_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b)); - const __m128i q4b_2_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)); - - const __m256 p = mul_sum_i8_quad_float(q4b_1_0, q4b_1_1, q4b_2_0, q4b_2_1, q8b_1_0, q8b_1_1, q8b_2_0, q8b_2_1); - const __m256 deltas = quad_fp16_delta_float(x[ib].d, y[ib].d, x[ib + 1].d, y[ib + 1].d); - accum = _mm256_add_ps(_mm256_mul_ps(deltas, p), accum); - } - - sumf = hsum_float_8(accum); - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0xF); - const vector signed int v0 = vec_splats((int32_t)0); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - - const vector signed char values = vec_xl( 0, kvalues_iq4nl); - -#pragma GCC unroll 4 - for (; ib < nb; ++ib) { - __builtin_prefetch(x[ib].qs, 0, 1); - __builtin_prefetch(y[ib].qs, 0, 1); - - - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); - vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); - vector float vd = vec_mul(vxd, vyd); - - vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); - vector signed char q4x0 = vec_and(qxs, lowMask); - vector signed char q4x1 = vec_sr(qxs, v4); - - q4x0 = vec_perm(values, values, (vector unsigned char)q4x0); - q4x1 = vec_perm(values, values, (vector unsigned char)q4x1); - - vector signed char q8y0 = vec_xl( 0, y[ib].qs); - vector signed char q8y1 = vec_xl(16, y[ib].qs); - - vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1)); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - - vsumi0 = vec_sum4s(qv0, vsumi0); - vsumi1 = vec_sum4s(qv1, vsumi1); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - } - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - sumf = vec_extract(vsumf0, 0); - -#elif defined (__loongarch_asx) - - const __m128i values128 = __lsx_vld((const __m128i*)kvalues_iq4nl, 0); - const __m128i m4b = __lsx_vreplgr2vr_b(0x0f); - const __m256i mone = __lasx_xvreplgr2vr_h(1); - - __m256 accum1 = (__m256)__lasx_xvldi(0); - __m256 accum2 = (__m256)__lasx_xvldi(0); - for (; ib + 1 < nb; ib += 2) { - const __m128i q4bits_1 = __lsx_vld((const __m128i*)x[ib + 0].qs, 0); - const __m128i q4bits_2 = __lsx_vld((const __m128i*)x[ib + 1].qs, 0); - const __m256i q8b_1 = __lasx_xvld((const __m256i *)y[ib + 0].qs, 0); - const __m256i q8b_2 = __lasx_xvld((const __m256i *)y[ib + 1].qs, 0); - const __m256i q4b_1 = lasx_insertf128(lsx_shuffle_b(values128, __lsx_vand_v(__lsx_vsrli_h(q4bits_1, 4), m4b)), - lsx_shuffle_b(values128, __lsx_vand_v(q4bits_1, m4b))); - const __m256i q4b_2 = lasx_insertf128(lsx_shuffle_b(values128, __lsx_vand_v(__lsx_vsrli_h(q4bits_2, 4), m4b)), - lsx_shuffle_b(values128, __lsx_vand_v(q4bits_2, m4b))); - const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); - const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); - const __m256i p_1 = lasx_madd_h(p16_1, mone); - const __m256i p_2 = lasx_madd_h(p16_2, mone); - accum1 = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(y[ib + 0].d)*GGML_FP16_TO_FP32(x[ib + 0].d)), - __lasx_xvffint_s_w(p_1), accum1); - accum2 = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(y[ib + 1].d)*GGML_FP16_TO_FP32(x[ib + 1].d)), - __lasx_xvffint_s_w(p_2), accum2); - } - - sumf = hsum_float_8(__lasx_xvfadd_s(accum1, accum2)); - -#elif defined(__VXE__) || defined(__VXE2__) - const int8x16_t v_k = vec_xl(0, kvalues_iq4nl); - const uint8x16_t v_m = vec_splat_u8(0x0F); - - for (; ib < nb; ++ib) { - const block_iq4_nl * GGML_RESTRICT x0 = &x[ib]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; - - const uint8x16_t v_x = vec_xl(0, x0->qs); - int8x16_t v_xl = (int8x16_t)vec_and(v_x, v_m); - int8x16_t v_xh = (int8x16_t)vec_sr(v_x, 4); - - v_xl = vec_perm(v_k, v_k, (uchar8x16_t)v_xl); - v_xh = vec_perm(v_k, v_k, (uchar8x16_t)v_xh); - - const int8x16_t v_yl = vec_xl(0 , y0->qs); - const int8x16_t v_yh = vec_xl(QK8_0/2, y0->qs); - const int32x4_t v_xy = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh); - - sumf += GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d) * (v_xy[0] + v_xy[1] + v_xy[2] + v_xy[3]); - } -#endif - for (; ib < nb; ++ib) { - const float d = GGML_FP16_TO_FP32(y[ib].d)*GGML_FP16_TO_FP32(x[ib].d); - int sumi1 = 0, sumi2 = 0; - for (int j = 0; j < QK4_NL/2; ++j) { - sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; - sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4]; - } - sumf += d * (sumi1 + sumi2); - } - *s = sumf; -} - -void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - assert(n % QK_K == 0); - - const block_iq4_xs * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined __ARM_NEON - const int8x16_t values = vld1q_s8(kvalues_iq4nl); - const uint8x16_t m4b = vdupq_n_u8(0x0f); - ggml_uint8x16x2_t q4bits; - ggml_int8x16x4_t q4b; - ggml_int8x16x4_t q8b; - int32x4_t prod_1, prod_2; - - float sumf = 0; - - for (int ibl = 0; ibl < nb; ++ibl) { - - const int8_t * q8 = y[ibl].qs; - const uint8_t * q4 = x[ibl].qs; - uint16_t h = x[ibl].scales_h; - - int sumi1 = 0, sumi2 = 0; - for (int ib = 0; ib < QK_K/64; ++ib) { - - q4bits = ggml_vld1q_u8_x2(q4); q4 += 32; - q8b = ggml_vld1q_s8_x4(q8); q8 += 64; - - q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b)); - q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4)); - q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b)); - q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4)); - - prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]); - prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]); - - int ls1 = ((x[ibl].scales_l[ib] & 0xf) | ((h << 4) & 0x30)) - 32; - int ls2 = ((x[ibl].scales_l[ib] >> 4) | ((h << 2) & 0x30)) - 32; - h >>= 4; - sumi1 += vaddvq_s32(prod_1) * ls1; - sumi2 += vaddvq_s32(prod_2) * ls2; - - } - - sumf += GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d * (sumi1 + sumi2); - } - - *s = sumf; - -#elif defined __AVX2__ - - const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); - const __m128i m4b = _mm_set1_epi8(0x0f); - - __m256 accum = _mm256_setzero_ps(); - for (int ibl = 0; ibl < nb; ++ibl) { - const uint8_t * qs = x[ibl].qs; - const int8_t * q8 = y[ibl].qs; - uint16_t sh = x[ibl].scales_h; - __m256i sumi1 = _mm256_setzero_si256(); - __m256i sumi2 = _mm256_setzero_si256(); - for (int ib = 0; ib < QK_K/32; ib += 2) { - const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)qs); qs += 16; - const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)qs); qs += 16; - const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q4b_1 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)), - _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b))); - const __m256i q4b_2 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)), - _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b))); - const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); - const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); - const int16_t ls1 = ((x[ibl].scales_l[ib/2] & 0xf) | ((sh << 4) & 0x30)) - 32; - const int16_t ls2 = ((x[ibl].scales_l[ib/2] >> 4) | ((sh << 2) & 0x30)) - 32; - sh >>= 4; - const __m256i p_1 = _mm256_madd_epi16(p16_1, _mm256_set1_epi16(ls1)); - const __m256i p_2 = _mm256_madd_epi16(p16_2, _mm256_set1_epi16(ls2)); - sumi1 = _mm256_add_epi32(p_1, sumi1); - sumi2 = _mm256_add_epi32(p_2, sumi2); - } - accum = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(x[ibl].d)*y[ibl].d), - _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accum); - } - - *s = hsum_float_8(accum); - -#elif defined __AVX__ - const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); - const __m128i m4b = _mm_set1_epi8(0x0f); - - __m256 accum = _mm256_setzero_ps(); - for (int ibl = 0; ibl < nb; ++ibl) { - const uint8_t * qs = x[ibl].qs; - const int8_t * q8 = y[ibl].qs; - uint16_t sh = x[ibl].scales_h; - __m128i sumi1_0 = _mm_setzero_si128(); - __m128i sumi1_1 = _mm_setzero_si128(); - __m128i sumi2_0 = _mm_setzero_si128(); - __m128i sumi2_1 = _mm_setzero_si128(); - for (int ib = 0; ib < QK_K/32; ib += 2) { - const __m128i q4bits_1 = _mm_loadu_si128((const __m128i *)qs); qs += 16; - const __m128i q4bits_2 = _mm_loadu_si128((const __m128i *)qs); qs += 16; - const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q4b_1_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b)); - const __m128i q4b_1_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)); - const __m128i q4b_2_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b)); - const __m128i q4b_2_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)); - const __m128i p16_1_0 = mul_add_epi8_sse(q4b_1_0, q8b_1_0); - const __m128i p16_1_1 = mul_add_epi8_sse(q4b_1_1, q8b_1_1); - const __m128i p16_2_0 = mul_add_epi8_sse(q4b_2_0, q8b_2_0); - const __m128i p16_2_1 = mul_add_epi8_sse(q4b_2_1, q8b_2_1); - const int16_t ls1 = ((x[ibl].scales_l[ib/2] & 0xf) | ((sh << 4) & 0x30)) - 32; - const int16_t ls2 = ((x[ibl].scales_l[ib/2] >> 4) | ((sh << 2) & 0x30)) - 32; - sh >>= 4; - const __m128i p_1_0 = _mm_madd_epi16(p16_1_0, _mm_set1_epi16(ls1)); - const __m128i p_1_1 = _mm_madd_epi16(p16_1_1, _mm_set1_epi16(ls1)); - const __m128i p_2_0 = _mm_madd_epi16(p16_2_0, _mm_set1_epi16(ls2)); - const __m128i p_2_1 = _mm_madd_epi16(p16_2_1, _mm_set1_epi16(ls2)); - sumi1_0 = _mm_add_epi32(p_1_0, sumi1_0); - sumi1_1 = _mm_add_epi32(p_1_1, sumi1_1); - sumi2_0 = _mm_add_epi32(p_2_0, sumi2_0); - sumi2_1 = _mm_add_epi32(p_2_1, sumi2_1); - } - __m128i sumi12_0 = _mm_add_epi32(sumi1_0, sumi2_0); - __m128i sumi12_1 = _mm_add_epi32(sumi1_1, sumi2_1); - accum = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(x[ibl].d)*y[ibl].d), - _mm256_cvtepi32_ps(MM256_SET_M128I(sumi12_1, sumi12_0))), accum); - } - - *s = hsum_float_8(accum); - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0xF); - const vector int v0 = vec_splats((int32_t)0); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - const vector signed char values = vec_xl( 0, kvalues_iq4nl); - - for (int ibl = 0; ibl < nb; ++ibl) { - - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ibl].d)); - vector float vyd = vec_splats(y[ibl].d); - vector float vd = vec_mul(vxd, vyd); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - - uint16_t h = x[ibl].scales_h; - - const uint8_t * GGML_RESTRICT q4 = x[ibl].qs; - const uint8_t * GGML_RESTRICT sc = x[ibl].scales_l; - const int8_t * GGML_RESTRICT q8 = y[ibl].qs; - - for (int ib = 0; ib < QK_K/64; ib ++ ) { - __builtin_prefetch(q4, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector signed char qxs0 = (vector signed char)vec_xl( 0, q4); - vector signed char qxs1 = (vector signed char)vec_xl(16, q4); - q4 += 32; - - vector signed char q4x00 = (vector signed char)vec_and(qxs0, lowMask); - vector signed char q4x01 = (vector signed char)vec_sr(qxs0, v4); - vector signed char q4x10 = (vector signed char)vec_and(qxs1, lowMask); - vector signed char q4x11 = (vector signed char)vec_sr(qxs1, v4); - - q4x00 = vec_perm(values, values, (vector unsigned char)q4x00); - q4x01 = vec_perm(values, values, (vector unsigned char)q4x01); - q4x10 = vec_perm(values, values, (vector unsigned char)q4x10); - q4x11 = vec_perm(values, values, (vector unsigned char)q4x11); - - vector signed char q8y0 = vec_xl( 0, q8); - vector signed char q8y1 = vec_xl(16, q8); - vector signed char q8y2 = vec_xl(32, q8); - vector signed char q8y3 = vec_xl(48, q8); - q8 += 64; - - vector signed short qv0 = vec_add(vec_mule(q4x00, q8y0), vec_mulo(q4x00, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q4x01, q8y1), vec_mulo(q4x01, q8y1)); - vector signed short qv2 = vec_add(vec_mule(q4x10, q8y2), vec_mulo(q4x10, q8y2)); - vector signed short qv3 = vec_add(vec_mule(q4x11, q8y3), vec_mulo(q4x11, q8y3)); - - const uint16_t ls0 = (uint16_t)(((sc[0] & 0xf) | ((h << 4) & 0x30)) - 32); - const uint16_t ls1 = (uint16_t)(((sc[0] >> 4) | ((h << 2) & 0x30)) - 32); - h >>= 4; - sc ++; - - vector signed short vscales01 = vec_splats((int16_t)ls0); - vector signed short vscales23 = vec_splats((int16_t)ls1); - - vsumi0 = vec_msum(qv0, vscales01, vsumi0); - vsumi1 = vec_msum(qv1, vscales01, vsumi1); - vsumi2 = vec_msum(qv2, vscales23, vsumi2); - vsumi3 = vec_msum(qv3, vscales23, vsumi3); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - - const __m128i values128 = __lsx_vld((const __m128i*)kvalues_iq4nl, 0); - - __m256 accum = (__m256)__lasx_xvldi(0); - - for (int ibl = 0; ibl < nb; ++ibl) { - const uint8_t * qs = x[ibl].qs; - const int8_t * q8 = y[ibl].qs; - uint16_t sh = x[ibl].scales_h; - __m256i sumi1 = __lasx_xvldi(0); - __m256i sumi2 = __lasx_xvldi(0); - for (int ib = 0; ib < QK_K/32; ib += 2) { - const __m128i q4bits_1 = __lsx_vld((const __m128i*)qs, 0); qs += 16; - const __m128i q4bits_2 = __lsx_vld((const __m128i*)qs, 0); qs += 16; - const __m256i q8b_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q8b_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q4b_1 = lasx_insertf128(__lsx_vshuf_b(values128, values128, __lsx_vsrli_b(q4bits_1, 4)), - __lsx_vshuf_b(values128, values128, __lsx_vandi_b(q4bits_1, 0xf))); - const __m256i q4b_2 = lasx_insertf128(__lsx_vshuf_b(values128, values128, __lsx_vsrli_b(q4bits_2, 4)), - __lsx_vshuf_b(values128, values128, __lsx_vandi_b(q4bits_2, 0xf))); - const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); - const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); - const int16_t ls1 = ((x[ibl].scales_l[ib/2] & 0xf) | ((sh << 4) & 0x30)) - 32; - const int16_t ls2 = ((x[ibl].scales_l[ib/2] >> 4) | ((sh << 2) & 0x30)) - 32; - sh >>= 4; - const __m256i p_1 = lasx_madd_h(p16_1, __lasx_xvreplgr2vr_h(ls1)); - const __m256i p_2 = lasx_madd_h(p16_2, __lasx_xvreplgr2vr_h(ls2)); - sumi1 = __lasx_xvadd_w(p_1, sumi1); - sumi2 = __lasx_xvadd_w(p_2, sumi2); - } - accum = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(x[ibl].d)*y[ibl].d), - __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accum); - } - - *s = hsum_float_8(accum); -#elif defined(__VXE__) || defined(__VXE2__) - const int8x16_t v_k = vec_xl(0, kvalues_iq4nl); - const uint8x16_t v_m = vec_splat_u8(0x0F); - - float sumf = 0; - - for (int ibl = 0; ibl < nb; ++ibl) { - const uint8_t * GGML_RESTRICT q4 = x[ibl].qs; - const int8_t * GGML_RESTRICT q8 = y[ibl].qs; - - uint16_t h = x[ibl].scales_h; - - int sumi1 = 0, sumi2 = 0; - for (int ib = 0; ib < QK_K/64; ++ib) { - const uint8x16_t v_x0 = vec_xl(0 , q4); - const uint8x16_t v_x1 = vec_xl(QK4_NL/2, q4); - q4 += 32; - - int8x16_t v_x0l = (int8x16_t)vec_and(v_x0, v_m); - int8x16_t v_x0h = (int8x16_t)vec_sr(v_x0, 4); - int8x16_t v_x1l = (int8x16_t)vec_and(v_x1, v_m); - int8x16_t v_x1h = (int8x16_t)vec_sr(v_x1, 4); - - v_x0l = vec_perm(v_k, v_k, (uchar8x16_t)v_x0l); - v_x0h = vec_perm(v_k, v_k, (uchar8x16_t)v_x0h); - v_x1l = vec_perm(v_k, v_k, (uchar8x16_t)v_x1l); - v_x1h = vec_perm(v_k, v_k, (uchar8x16_t)v_x1h); - - const int8x16_t v_y0 = vec_xl( 0, q8); - const int8x16_t v_y1 = vec_xl(16, q8); - const int8x16_t v_y2 = vec_xl(32, q8); - const int8x16_t v_y3 = vec_xl(48, q8); - q8 += 64; - - int32x4_t vsumi0 = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_x0l, v_y0), v_x0h, v_y1); - int32x4_t vsumi1 = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_x1l, v_y2), v_x1h, v_y3); - - int ls1 = ((x[ibl].scales_l[ib] & 0xF) | ((h << 4) & 0x30)) - 32; - int ls2 = ((x[ibl].scales_l[ib] >> 4) | ((h << 2) & 0x30)) - 32; - - h >>= 4; - - sumi1 += (vsumi0[0] + vsumi0[1] + vsumi0[2] + vsumi0[3]) * ls1; - sumi2 += (vsumi1[0] + vsumi1[1] + vsumi1[2] + vsumi1[3]) * ls2; - } - - sumf += GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d * (sumi1 + sumi2); - } - - *s = sumf; - -#else - float sumf = 0; - for (int ibl = 0; ibl < nb; ++ibl) { - const float d4d8 = GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d; - uint16_t h = x[ibl].scales_h; - const uint8_t * qs = x[ibl].qs; - const int8_t * q8 = y[ibl].qs; - for (int ib = 0; ib < QK_K/32; ib += 2) { - const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30); - const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30); - h >>= 4; - const float d1 = d4d8*(ls1 - 32); - const float d2 = d4d8*(ls2 - 32); - int sumi1 = 0, sumi2 = 0; - for (int j = 0; j < 16; ++j) { - sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; - sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; - } - sumf += d1 * (sumi1 + sumi2); - qs += 16; - q8 += 32; - sumi1 = sumi2 = 0; - for (int j = 0; j < 16; ++j) { - sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; - sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; - } - sumf += d2 * (sumi1 + sumi2); - qs += 16; - q8 += 32; - } - } - *s = sumf; -#endif -} - -// ============================ 4-bit non-linear quants - -void quantize_row_iq4_nl(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { - assert(k % QK4_NL == 0); - quantize_row_iq4_nl_ref(x, y, k); -} - -void quantize_row_iq4_xs(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { - assert(k % QK_K == 0); - quantize_iq4_xs(x, y, 1, k, NULL); -} diff --git a/ggml/src/ggml-cpu/ggml-cpu-quants.h b/ggml/src/ggml-cpu/ggml-cpu-quants.h deleted file mode 100644 index e33d9d473ea66..0000000000000 --- a/ggml/src/ggml-cpu/ggml-cpu-quants.h +++ /dev/null @@ -1,63 +0,0 @@ -#pragma once - -#define GGML_COMMON_DECL_C -#include "ggml-common.h" - -#include "ggml.h" - -// GGML CPU internal header - -#ifdef __cplusplus -extern "C" { -#endif - -// Quantization -void quantize_row_q4_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q4_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q5_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q5_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); - -void quantize_row_q2_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q3_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q4_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q5_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q6_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); - -void quantize_row_tq1_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_tq2_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); - -void quantize_row_iq4_nl (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); -void quantize_row_iq4_xs (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); - -// Dot product -void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); - -void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); - -void ggml_vec_dot_tq1_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_tq2_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); - -void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_iq2_xs_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_iq2_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_iq1_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_iq1_m_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_iq4_nl_q8_0 (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_iq4_xs_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_iq3_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); - -#ifdef __cplusplus -} -#endif diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index c7426df2b851b..ff28bf98bc7df 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -3,11 +3,11 @@ #include "ggml-backend-impl.h" #include "ggml-backend.h" -#include "ggml-cpu-traits.h" +#include "traits.h" #include "ggml-cpu-impl.h" #include "ggml-cpu.h" #include "ggml-impl.h" -#include "ggml-cpu-quants.h" +#include "quants.h" #include "ggml-threading.h" #include "unary-ops.h" #include "binary-ops.h" diff --git a/ggml/src/ggml-cpu/ggml-cpu.cpp b/ggml/src/ggml-cpu/ggml-cpu.cpp index e013e8b416222..735ef3f015c13 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.cpp +++ b/ggml/src/ggml-cpu/ggml-cpu.cpp @@ -1,8 +1,8 @@ #include "ggml-backend.h" #include "ggml-backend-impl.h" #include "ggml-cpu.h" -#include "ggml-cpu-aarch64.h" -#include "ggml-cpu-traits.h" +#include "repack.h" +#include "traits.h" #include "ggml-impl.h" #include "amx/amx.h" @@ -11,7 +11,7 @@ #include #ifdef GGML_USE_CPU_HBM -# include "ggml-cpu-hbm.h" +# include "hbm.h" #endif #ifdef GGML_USE_CPU_KLEIDIAI @@ -51,9 +51,9 @@ std::vector& ggml_backend_cpu_get_extra_buffers_type } #endif -#ifdef GGML_USE_CPU_AARCH64 - if (ggml_backend_cpu_aarch64_buffer_type()) { - bufts.push_back(ggml_backend_cpu_aarch64_buffer_type()); +#ifdef GGML_USE_CPU_REPACK + if (ggml_backend_cpu_repack_buffer_type()) { + bufts.push_back(ggml_backend_cpu_repack_buffer_type()); } #endif @@ -596,8 +596,8 @@ static ggml_backend_feature * ggml_backend_cpu_get_features(ggml_backend_reg_t r #ifdef GGML_USE_CPU_KLEIDIAI features.push_back({ "KLEIDIAI", "1" }); #endif - #ifdef GGML_USE_CPU_AARCH64 - features.push_back({ "AARCH64_REPACK", "1" }); + #ifdef GGML_USE_CPU_REPACK + features.push_back({ "REPACK", "1" }); #endif features.push_back({ nullptr, nullptr }); diff --git a/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp b/ggml/src/ggml-cpu/hbm.cpp similarity index 98% rename from ggml/src/ggml-cpu/ggml-cpu-hbm.cpp rename to ggml/src/ggml-cpu/hbm.cpp index fa8dea2af9c72..a4073c15e6c90 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp +++ b/ggml/src/ggml-cpu/hbm.cpp @@ -5,7 +5,7 @@ #include "ggml-cpu.h" #include "ggml-impl.h" -#include "ggml-cpu-hbm.h" +#include "hbm.h" // buffer type HBM diff --git a/ggml/src/ggml-cpu/ggml-cpu-hbm.h b/ggml/src/ggml-cpu/hbm.h similarity index 100% rename from ggml/src/ggml-cpu/ggml-cpu-hbm.h rename to ggml/src/ggml-cpu/hbm.h diff --git a/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp b/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp index 15f0cd1540686..fafe45e6c5c51 100644 --- a/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +++ b/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp @@ -26,7 +26,7 @@ #include "ggml-impl.h" #include "ggml-backend-impl.h" #include "ggml-threading.h" -#include "ggml-cpu-traits.h" +#include "traits.h" #include "kernels.h" diff --git a/ggml/src/ggml-cpu/quants.c b/ggml/src/ggml-cpu/quants.c new file mode 100644 index 0000000000000..1ca9c50e724a3 --- /dev/null +++ b/ggml/src/ggml-cpu/quants.c @@ -0,0 +1,1179 @@ +#define GGML_COMMON_IMPL_C +#include "ggml-common.h" + +#include "ggml-cpu-impl.h" +#include "ggml-quants.h" +#include "quants.h" + +#include +#include +#include +#include // for qsort +#include // for GGML_ASSERT + +#define GROUP_MAX_EPS 1e-15f +#define GROUP_MAX_EPS_IQ3_XXS 1e-8f +#define GROUP_MAX_EPS_IQ2_S 1e-8f +#define GROUP_MAX_EPS_IQ1_M 1e-7f +#define GROUP_MAX_EPS_IQ1_S 1e-12f + +#define UNUSED GGML_UNUSED + +void quantize_row_q4_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_q4_0_ref(x, y, k); +} + +void quantize_row_q4_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_q4_1_ref(x, y, k); +} + +void quantize_row_q5_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_q5_0_ref(x, y, k); +} + +void quantize_row_q5_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_q5_1_ref(x, y, k); +} + +void quantize_row_q8_0_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_q8_0_ref(x, y, k); +} +GGML_CPU_NATIVE_IMPL(quantize_row_q8_0) + +void quantize_row_q8_1_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_q8_1_ref(x, y, k); +} +GGML_CPU_NATIVE_IMPL(quantize_row_q8_1) + +// +// 2-6 bit quantization in super-blocks +// + +//========================- 2-bit (de)-quantization + +void quantize_row_q2_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + quantize_row_q2_K_ref(x, vy, k); +} + +//========================= 3-bit (de)-quantization + +void quantize_row_q3_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + quantize_row_q3_K_ref(x, vy, k); +} + +// ====================== 4-bit (de)-quantization + +void quantize_row_q4_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK_K == 0); + block_q4_K * GGML_RESTRICT y = vy; + quantize_row_q4_K_ref(x, y, k); +} + +// ====================== 5-bit (de)-quantization + +void quantize_row_q5_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK_K == 0); + block_q5_K * GGML_RESTRICT y = vy; + quantize_row_q5_K_ref(x, y, k); +} + +// ====================== 6-bit (de)-quantization + +void quantize_row_q6_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK_K == 0); + block_q6_K * GGML_RESTRICT y = vy; + quantize_row_q6_K_ref(x, y, k); +} + +// ====================== Ternary (de)-quantization (BitNet b1.58 and TriLMs) + +void quantize_row_tq1_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK_K == 0); + block_tq1_0 * GGML_RESTRICT y = vy; + quantize_row_tq1_0_ref(x, y, k); +} + +void quantize_row_tq2_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK_K == 0); + block_tq2_0 * GGML_RESTRICT y = vy; + quantize_row_tq2_0_ref(x, y, k); +} + +//===================================== Q8_K ============================================== + +void quantize_row_q8_K_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_q8_K_ref(x, y, k); +} +GGML_CPU_NATIVE_IMPL(quantize_row_q8_K) + +//===================================== Dot products ================================= + +void ggml_vec_dot_q4_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F) - 8; + const int v1 = (x[ib].qs[j] >> 4) - 8; + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); + } + + *s = sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q4_0_q8_0) + +// TODO: add WASM SIMD +void ggml_vec_dot_q4_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F); + const int v1 = (x[ib].qs[j] >> 4); + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + } + + *s = sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q4_1_q8_1) + +void ggml_vec_dot_q5_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + int ib = 0; + float sumf = 0; + + assert(n % qk == 0); + assert(qk == QK5_0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + for (; ib < nb; ++ib) { + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; + const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12)); + + const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16); + const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16); + + sumi0 += (x0 * y[ib].qs[j]); + sumi1 += (x1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)) * sumi; + } + + *s = sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q5_0_q8_0) + +void ggml_vec_dot_q5_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + int ib = 0; + float sumf = 0; + + assert(n % qk == 0); + assert(qk == QK5_1); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + + for (; ib < nb; ++ib) { + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; + const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; + + const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0; + const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1; + + sumi0 += (x0 * y[ib].qs[j]); + sumi1 += (x1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + } + + *s = sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q5_1_q8_1) + +void ggml_vec_dot_q8_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q8_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + + for (; ib < nb; ++ib) { + int sumi = 0; + + for (int j = 0; j < qk; j++) { + sumi += x[ib].qs[j]*y[ib].qs[j]; + } + + sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); + } + + *s = sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q8_0_q8_0) + +void ggml_vec_dot_tq1_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_tq1_0 * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + const uint8_t pow3[6] = {1, 3, 9, 27, 81, 243}; + + float sumf = 0.0f; + + for (int i = 0; i < nb; ++i) { + int sum = 0; + + for (size_t j = 0; j < sizeof(x->qs) - sizeof(x->qs) % 32; j += 32) { + for (size_t l = 0; l < 5; ++l) { + for (size_t m = 0; m < 32; ++m) { + uint8_t q = x[i].qs[j + m] * pow3[l]; + uint16_t xi = ((uint16_t) q * 3) >> 8; + sum += (xi - 1) * y[i].qs[j*5 + l*32 + m]; + } + } + } + for (size_t j = sizeof(x->qs) - sizeof(x->qs) % 32; j < sizeof(x->qs); j += 16) { + for (size_t l = 0; l < 5; ++l) { + for (size_t m = 0; m < 16; ++m) { + uint8_t q = x[i].qs[j + m] * pow3[l]; + uint16_t xi = ((uint16_t) q * 3) >> 8; + sum += (xi - 1) * y[i].qs[j*5 + l*16 + m]; + } + } + } + + for (size_t l = 0; l < 4; ++l) { + for (size_t j = 0; j < sizeof(x->qh); ++j) { + uint8_t q = x[i].qh[j] * pow3[l]; + uint16_t xi = ((uint16_t) q * 3) >> 8; + sum += (xi - 1) * y[i].qs[sizeof(x->qs)*5 + l*sizeof(x->qh) + j]; + } + } + + sumf += (float) sum * (GGML_FP16_TO_FP32(x[i].d) * y[i].d); + } + + *s = sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_tq1_0_q8_K) + +void ggml_vec_dot_tq2_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_tq2_0 * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + float sumf = 0.0f; + + for (int i = 0; i < nb; ++i) { + int32_t sumi = 0; + + for (size_t j = 0; j < sizeof(x->qs); j += 32) { + for (size_t l = 0; l < 4; ++l) { + for (size_t k = 0; k < 32; ++k) { + sumi += y[i].qs[j*4 + l*32 + k] * (((x[i].qs[j + k] >> (l*2)) & 3) - 1); + } + } + } + + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + + sumf += (float) sumi * d; + } + + *s = sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_tq2_0_q8_K) + +void ggml_vec_dot_q2_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q2_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + + const uint8_t * q2 = x[i].qs; + const int8_t * q8 = y[i].qs; + const uint8_t * sc = x[i].scales; + + int summs = 0; + for (int j = 0; j < 16; ++j) { + summs += y[i].bsums[j] * (sc[j] >> 4); + } + + const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + + int isum = 0; + int is = 0; + int d; + for (int k = 0; k < QK_K/128; ++k) { + int shift = 0; + for (int j = 0; j < 4; ++j) { + d = sc[is++] & 0xF; + int isuml = 0; + for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); + isum += d * isuml; + d = sc[is++] & 0xF; + isuml = 0; + for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); + isum += d * isuml; + shift += 2; + q8 += 32; + } + q2 += 32; + } + sumf += dall * isum - dmin * summs; + } + *s = sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q2_K_q8_K) + +void ggml_vec_dot_q3_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const uint32_t kmask1 = 0x03030303; + const uint32_t kmask2 = 0x0f0f0f0f; + + const block_q3_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + // scalar version + // This function is written like this so the compiler can manage to vectorize most of it + // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the + // manually vectorized version above. Every other version I tried would run at least 4 times slower. + // The ideal situation would be if we could just write the code once, and the compiler would + // automatically produce the best possible set of machine instructions, instead of us having to manually + // write vectorized versions for AVX, ARM_NEON, etc. + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + uint32_t auxs[4]; + const int8_t * scales = (const int8_t*)auxs; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].hmask; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + q3 += 32; + } + a = aux8; + + memcpy(auxs, x[i].scales, 12); + uint32_t tmp = auxs[2]; + auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); + auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); + auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); + auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); + for (int j = 0; j < QK_K/16; ++j) { + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q3_K_q8_K) + +void ggml_vec_dot_q4_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + for (int j = 0; j < QK_K/64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + a += 32; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + a += 32; q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q4_K_q8_K) + +void ggml_vec_dot_q5_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K/64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); + a += 32; m <<= 1; + q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q5_K_q8_K) + +void ggml_vec_dot_q6_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q6_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) { + a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; + a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; + a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; + a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; + } + a += 128; + q4 += 64; + qh += 32; + } + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/16; ++j) { + int scale = x[i].scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q6_K_q8_K) + +void ggml_vec_dot_iq2_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_xxs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + uint32_t aux32[2]; + const uint8_t * aux8 = (const uint8_t *)aux32; + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + memcpy(aux32, q2, 2*sizeof(uint32_t)); + q2 += 4; + const uint32_t ls = 2*(aux32[1] >> 28) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); + const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls; + } + sumf += d * bsum; + } + *s = 0.125f * sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq2_xxs_q8_K) + +void ggml_vec_dot_iq2_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_xs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const uint8_t * GGML_RESTRICT sc = x[i].scales; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1; + const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1; + int32_t sumi = 0; + for (int l = 0; l < 2; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); + const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls1; + sumi = 0; + for (int l = 2; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); + const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls2; + q2 += 4; + } + sumf += d * bsum; + } + *s = 0.125f * sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq2_xs_q8_K) + +void ggml_vec_dot_iq2_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + float sumf = 0; + for (int i = 0; i < nb; i++) { + + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint8_t * qh = x[i].qh; + const uint8_t * signs = qs + QK_K/8; + + int bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf); + int ls2 = 1 + 2*(x[i].scales[ib32] >> 4); + int sumi1 = 0, sumi2 = 0; + for (int l = 0; l < 2; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); + for (int j = 0; j < 8; ++j) { + sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + for (int l = 2; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); + for (int j = 0; j < 8; ++j) { + sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += ls1 * sumi1 + ls2 * sumi2; + qs += 4; + signs += 4; + } + + sumf += d * bsum; + } + + *s = 0.125f * sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq2_s_q8_K) + +void ggml_vec_dot_iq3_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq3_xxs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + uint32_t aux32; + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t); + const uint32_t ls = 2*(aux32 >> 28) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]); + const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]); + const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127]; + for (int j = 0; j < 4; ++j) { + sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1); + sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1); + } + q8 += 8; + } + q3 += 8; + bsum += sumi * ls; + } + sumf += d * bsum; + } + *s = 0.25f * sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq3_xxs_q8_K) + +void ggml_vec_dot_iq3_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq3_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint8_t * GGML_RESTRICT signs = x[i].signs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1; + const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256))); + const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256))); + for (int j = 0; j < 4; ++j) { + sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); + sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); + } + q8 += 8; + } + qs += 8; + signs += 4; + bsum += sumi * ls1; + sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256))); + const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256))); + for (int j = 0; j < 4; ++j) { + sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); + sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); + } + q8 += 8; + } + qs += 8; + signs += 4; + bsum += sumi * ls2; + } + sumf += d * bsum; + } + *s = sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq3_s_q8_K) + +void ggml_vec_dot_iq1_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq1_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + float sumf = 0; + for (int i = 0; i < nb; i++) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint16_t * qh = x[i].qh; + + int sumi = 0, sumi1 = 0; + for (int ib = 0; ib < QK_K/32; ++ib) { + const int ls = 2*((qh[ib] >> 12) & 7) + 1; + const int delta = qh[ib] & 0x8000 ? -1 : 1; + int lsum = 0; + for (int l = 0; l < 4; ++l) { + const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8))); + for (int j = 0; j < 8; ++j) { + lsum += q8[j] * grid[j]; + } + q8 += 8; + } + sumi += ls * lsum; + sumi1 += ls * delta * (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]); + qs += 4; + } + + sumf += GGML_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); + } + + *s = sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq1_s_q8_K) + +void ggml_vec_dot_iq1_m_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq1_m * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + iq1m_scale_t scale; + + int sum1[2], sum2[2], delta[4]; + + float sumf = 0; + for (int i = 0; i < nb; i++) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint8_t * qh = x[i].qh; + const uint16_t * sc = (const uint16_t *)x[i].scales; + + scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); + + int sumi1 = 0, sumi2 = 0; + for (int ib = 0; ib < QK_K/32; ++ib) { + delta[0] = qh[0] & 0x08 ? -1 : 1; + delta[1] = qh[0] & 0x80 ? -1 : 1; + delta[2] = qh[1] & 0x08 ? -1 : 1; + delta[3] = qh[1] & 0x80 ? -1 : 1; + sum1[0] = sum1[1] = sum2[0] = sum2[1] = 0; + for (int l = 0; l < 4; ++l) { + const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((uint16_t)qh[l/2] << (8 - 4*(l%2))) & 0x700))); + int lsum1 = 0, lsum2 = 0; + for (int j = 0; j < 8; ++j) { + lsum1 += q8[j] * grid[j]; + lsum2 += q8[j]; + } + q8 += 8; + sum1[l/2] += lsum1; + sum2[l/2] += lsum2*delta[l]; + } + + const int ls1 = 2*((sc[ib/2] >> (6*(ib%2)+0)) & 0x7) + 1; + const int ls2 = 2*((sc[ib/2] >> (6*(ib%2)+3)) & 0x7) + 1; + + sumi1 += sum1[0] * ls1 + sum1[1] * ls2; + sumi2 += sum2[0] * ls1 + sum2[1] * ls2; + qs += 4; + qh += 2; + } + + sumf += GGML_FP16_TO_FP32(scale.f16) * y[i].d * (sumi1 + IQ1M_DELTA * sumi2); + } + + *s = sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq1_m_q8_K) + +void ggml_vec_dot_iq4_nl_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK4_NL == 0); + static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); + + const block_iq4_nl * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + const int nb = n / QK4_NL; + + int ib = 0; + float sumf = 0; + + for (; ib < nb; ++ib) { + const float d = GGML_FP16_TO_FP32(y[ib].d)*GGML_FP16_TO_FP32(x[ib].d); + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < QK4_NL/2; ++j) { + sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; + sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4]; + } + sumf += d * (sumi1 + sumi2); + } + *s = sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq4_nl_q8_0) + +void ggml_vec_dot_iq4_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK_K == 0); + + const block_iq4_xs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + float sumf = 0; + for (int ibl = 0; ibl < nb; ++ibl) { + const float d4d8 = GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d; + uint16_t h = x[ibl].scales_h; + const uint8_t * qs = x[ibl].qs; + const int8_t * q8 = y[ibl].qs; + for (int ib = 0; ib < QK_K/32; ib += 2) { + const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30); + const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30); + h >>= 4; + const float d1 = d4d8*(ls1 - 32); + const float d2 = d4d8*(ls2 - 32); + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < 16; ++j) { + sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; + sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; + } + sumf += d1 * (sumi1 + sumi2); + qs += 16; + q8 += 32; + sumi1 = sumi2 = 0; + for (int j = 0; j < 16; ++j) { + sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; + sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; + } + sumf += d2 * (sumi1 + sumi2); + qs += 16; + q8 += 32; + } + } + *s = sumf; +} +GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq4_xs_q8_K) + +// ============================ 4-bit non-linear quants + +void quantize_row_iq4_nl(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + assert(k % QK4_NL == 0); + quantize_row_iq4_nl_ref(x, y, k); +} + +void quantize_row_iq4_xs(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + assert(k % QK_K == 0); + quantize_iq4_xs(x, y, 1, k, NULL); +} diff --git a/ggml/src/ggml-cpu/quants.h b/ggml/src/ggml-cpu/quants.h new file mode 100644 index 0000000000000..d729e07d633f5 --- /dev/null +++ b/ggml/src/ggml-cpu/quants.h @@ -0,0 +1,116 @@ +#pragma once + +#define GGML_COMMON_DECL_C +#include "ggml-common.h" + +#include "ggml.h" + +// GGML CPU internal header + +#ifdef __cplusplus +extern "C" { +#endif + +// Quantization +void quantize_row_q4_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q4_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q5_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q5_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); + +void quantize_row_q2_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q3_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q4_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q5_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q6_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); + +void quantize_row_tq1_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_tq2_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); + +void quantize_row_iq4_nl (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_iq4_xs (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); + +// Dot product +void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); + +void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); + +void ggml_vec_dot_tq1_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_tq2_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); + +void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq2_xs_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq2_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq1_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq1_m_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq4_nl_q8_0 (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq4_xs_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq3_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); + +// Generic implementation +void quantize_row_q8_0_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); +void quantize_row_q8_1_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); +void quantize_row_q8_K_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void ggml_vec_dot_q4_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q4_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q5_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q5_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q8_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_tq1_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_tq2_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q2_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q3_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q4_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q5_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q6_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq2_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq2_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq2_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq3_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq3_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq1_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq1_m_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq4_nl_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq4_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); + +#if defined(GGML_CPU_GENERIC) +#define quantize_row_q8_0_generic quantize_row_q8_0 +#define quantize_row_q8_1_generic quantize_row_q8_1 +#define quantize_row_q8_K_generic quantize_row_q8_K +#define ggml_vec_dot_q4_0_q8_0_generic ggml_vec_dot_q4_0_q8_0 +#define ggml_vec_dot_q4_1_q8_1_generic ggml_vec_dot_q4_1_q8_1 +#define ggml_vec_dot_q5_0_q8_0_generic ggml_vec_dot_q5_0_q8_0 +#define ggml_vec_dot_q5_1_q8_1_generic ggml_vec_dot_q5_1_q8_1 +#define ggml_vec_dot_q8_0_q8_0_generic ggml_vec_dot_q8_0_q8_0 +#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K +#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K +#define ggml_vec_dot_q2_K_q8_K_generic ggml_vec_dot_q2_K_q8_K +#define ggml_vec_dot_q3_K_q8_K_generic ggml_vec_dot_q3_K_q8_K +#define ggml_vec_dot_q4_K_q8_K_generic ggml_vec_dot_q4_K_q8_K +#define ggml_vec_dot_q5_K_q8_K_generic ggml_vec_dot_q5_K_q8_K +#define ggml_vec_dot_q6_K_q8_K_generic ggml_vec_dot_q6_K_q8_K +#define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K +#define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K +#define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K +#define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K +#define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K +#define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K +#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K +#define ggml_vec_dot_iq4_nl_q8_0_generic ggml_vec_dot_iq4_nl_q8_0 +#define ggml_vec_dot_iq4_xs_q8_K_generic ggml_vec_dot_iq4_xs_q8_K +#endif + +#ifdef __cplusplus +} +#endif diff --git a/ggml/src/ggml-cpu/repack.cpp b/ggml/src/ggml-cpu/repack.cpp new file mode 100644 index 0000000000000..628142d5f630a --- /dev/null +++ b/ggml/src/ggml-cpu/repack.cpp @@ -0,0 +1,1566 @@ +#define GGML_COMMON_IMPL_CPP +#define GGML_COMMON_DECL_CPP +#include "ggml-common.h" +#include "ggml-backend-impl.h" + +#include "ggml-impl.h" +#include "ggml-cpu.h" +#include "ggml-cpu-impl.h" +#include "traits.h" + +#include +#include +#include +#include // for qsort +#include // for GGML_ASSERT + +#include "repack.h" + +#if defined(__GNUC__) +#pragma GCC diagnostic ignored "-Woverlength-strings" +#endif + +#define UNUSED GGML_UNUSED + +static inline int nearest_int(float fval) { + assert(fabsf(fval) <= 4194303.f); + float val = fval + 12582912.f; + int i; memcpy(&i, &val, sizeof(int)); + return (i & 0x007fffff) - 0x00400000; +} + +// Functions to create the interleaved data layout formats + +// interleave 4 block_q4_0s in blocks of blck_size_interleave +// returns an interleaved block_q4_0x4 +// in the interleaved block_q4_0x4, place deltas for 4 block_q4_0 blocks +// first, then interleave quants from 4 block_q4_0s in blocks of blck_size_interleave +// +// - in : an array of block_q4_0 pointers +// - blck_size_interleave : the block_q4_0 quants bytes are interleaved in blocks of +// blck_size_interleave bytes +// - xor_mask : the mask to convert the nibbles in block_q4_0 quants bytes +// from bias offset form to pure sign form (this saves subtract +// operations durin unpacking) +// + +extern "C" { + +void ggml_quantize_mat_q8_0_4x4_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; + + // scalar + const int blck_size_interleave = 4; + float srcv[4][QK8_0]; + float id[4]; + + for (int i = 0; i < nb; i++) { + for (int row_iter = 0; row_iter < 4; row_iter++) { + float amax = 0.0f; // absolute max + + for (int j = 0; j < QK8_0; j++) { + srcv[row_iter][j] = x[row_iter * k + i * QK8_0 + j]; + amax = MAX(amax, fabsf(srcv[row_iter][j])); + } + + const float d = amax / ((1 << 7) - 1); + id[row_iter] = d ? 1.0f / d : 0.0f; + + y[i].d[row_iter] = GGML_FP32_TO_FP16(d); + } + + for (int j = 0; j < QK8_0 * 4; j++) { + int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave; + int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave; + src_offset += (j % blck_size_interleave); + + float x0 = srcv[src_id][src_offset] * id[src_id]; + y[i].qs[j] = roundf(x0); + } + } +} +GGML_CPU_NATIVE_IMPL(ggml_quantize_mat_q8_0_4x4) + +void ggml_quantize_mat_q8_0_4x8_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; + + // scalar + const int blck_size_interleave = 8; + float srcv[4][QK8_0]; + float id[4]; + + for (int i = 0; i < nb; i++) { + for (int row_iter = 0; row_iter < 4; row_iter++) { + float amax = 0.0f; // absolute max + + for (int j = 0; j < QK8_0; j++) { + srcv[row_iter][j] = x[row_iter * k + i * QK8_0 + j]; + amax = MAX(amax, fabsf(srcv[row_iter][j])); + } + + const float d = amax / ((1 << 7) - 1); + id[row_iter] = d ? 1.0f / d : 0.0f; + + y[i].d[row_iter] = GGML_FP32_TO_FP16(d); + } + + for (int j = 0; j < QK8_0 * 4; j++) { + int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave; + int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave; + src_offset += (j % blck_size_interleave); + + float x0 = srcv[src_id][src_offset] * id[src_id]; + y[i].qs[j] = roundf(x0); + } + } +} +GGML_CPU_NATIVE_IMPL(ggml_quantize_mat_q8_0_4x8) + +void ggml_quantize_mat_q8_K_4x8_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK_K == 256); + assert(k % QK_K == 0); + const int nb = k / QK_K; + + block_q8_Kx4 * GGML_RESTRICT y = (block_q8_Kx4 *) vy; + + // scalar + const int blck_size_interleave = 8; + float srcv[4][QK_K]; + float iscale[4]; + + for (int i = 0; i < nb; i++) { + for (int row_iter = 0; row_iter < 4; row_iter++) { + float amax = 0.0f; // absolute max + float max = 0; + + for (int j = 0; j < QK_K; j++) { + srcv[row_iter][j] = x[row_iter * k + i * QK_K + j]; + // Update the maximum value of the corresponding super block + if(amax < fabsf(srcv[row_iter][j])) { + amax = fabsf(srcv[row_iter][j]); + max = srcv[row_iter][j]; + } + } + + iscale[row_iter] = amax ? -127.f/max : 0; + + y[i].d[row_iter] = amax ? 1/iscale[row_iter] : 0; + } + + for (int j = 0; j < QK_K / 4; j++) { + y[i].bsums[j] = 0; + } + + // Quants values are interleaved in sequence of eight bytes from corresponding super blocks + // Bsums values are interleaved in sequence of four bsums from each super block taken for interleaving + // i.e first four bsums from the first super block, followed by first four bsums from second super block and so on + for (int j = 0; j < QK_K * 4; j++) { + int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave; + int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave; + src_offset += (j % blck_size_interleave); + int index = (((j & 31) >> 3) << 2) + ((j >> 8) << 4) + ((j >> 6) & 3); + + float x0 = srcv[src_id][src_offset] * iscale[src_id]; + y[i].qs[j] = nearest_int(x0); + y[i].bsums[index] += y[i].qs[j]; + } + } +} +GGML_CPU_NATIVE_IMPL(ggml_quantize_mat_q8_K_4x8) + +} // extern "C" + +template +void ggml_quantize_mat_t(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row); + +template <> void ggml_quantize_mat_t<4, GGML_TYPE_Q8_0>(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row) { + assert(nrow == 4); + UNUSED(nrow); + ggml_quantize_mat_q8_0_4x4(x, vy, n_per_row); +} + +template <> void ggml_quantize_mat_t<8, GGML_TYPE_Q8_0>(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row) { + assert(nrow == 4); + UNUSED(nrow); + ggml_quantize_mat_q8_0_4x8(x, vy, n_per_row); +} + +template <> void ggml_quantize_mat_t<8, GGML_TYPE_Q8_K>(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row) { + assert(nrow == 4); + UNUSED(nrow); + ggml_quantize_mat_q8_K_4x8(x, vy, n_per_row); +} + +extern "C" { + +void ggml_gemv_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 4; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + float sumf[4]; + int sumi; + + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); + + for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; + } + sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + } + } + } + for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; + } +} +GGML_CPU_NATIVE_IMPL(ggml_gemv_q4_0_4x4_q8_0) + +void ggml_gemv_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + float sumf[4]; + int sumi; + + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); + + for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; + } + sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + } + } + } + for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; + } +} +GGML_CPU_NATIVE_IMPL(ggml_gemv_q4_0_4x8_q8_0) + +void ggml_gemv_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + { + float sumf[8]; + int sumi; + + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); + + for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; + } + sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + } + } + } + for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; + } + } +} +GGML_CPU_NATIVE_IMPL(ggml_gemv_q4_0_8x8_q8_0) + +void ggml_gemv_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK_K; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + float sumf[8]; + float sum_minf[8]; + uint32_t utmp[32]; + int sumi1; + int sumi2; + int sumi; + + const block_q8_K * a_ptr = (const block_q8_K *) vy; + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_Kx8 * b_ptr = (const block_q4_Kx8 *) vx + (x * nb); + + for (int j = 0; j < ncols_interleaved; j++) { + sumf[j] = 0.0; + sum_minf[j] = 0.0; + } + for (int l = 0; l < nb; l++) { + for (int sb = 0; sb < 8; sb++) { + memcpy(utmp + sb * 4, b_ptr[l].scales + sb * 12, 12); + utmp[sb * 4 + 3] = ((utmp[sb * 4 + 2] >> 4) & kmask2) | (((utmp[sb * 4 + 1] >> 6) & kmask3) << 4); + const uint32_t uaux_0 = utmp[sb * 4 + 1] & kmask1; + utmp[sb * 4 + 1] = (utmp[sb * 4 + 2] & kmask2) | (((utmp[sb * 4 + 0] >> 6) & kmask3) << 4); + utmp[sb * 4 + 2] = uaux_0; + utmp[sb * 4 + 0] &= kmask1; + } + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + uint8_t *scales_0 = (uint8_t*) utmp + (k / 4) * 32; + uint8_t *scales_1 = (uint8_t*) utmp + (k / 4) * 32 + 16; + for (int j = 0; j < ncols_interleaved; j++) { + sumi1 = 0; + sumi2 = 0; + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4); + sumi1 = (v0 * a_ptr[l].qs[(k >> 2) * 64 + (k % 4) * blocklen + i]); + sumi2 = (v1 * a_ptr[l].qs[(k >> 2) * 64 + (k % 4) * blocklen + i + 32]); + sumi1 = sumi1 * scales_0[j]; + sumi2 = sumi2 * scales_1[j]; + sumi += sumi1 + sumi2; + } + sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d; + } + } + for (int sb = 0; sb < 8; sb++) { + uint8_t *mins = (uint8_t*) utmp + 8 + sb * 16; + for (int j = 0; j < ncols_interleaved; j++) { + sum_minf[j] += mins[j] * (a_ptr[l].bsums[sb * 2] + a_ptr[l].bsums[sb * 2 + 1]) * GGML_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d; + } + } + } + for (int j = 0; j < ncols_interleaved; j++) { + s[x * ncols_interleaved + j] = sumf[j] - sum_minf[j]; + } + } +} +GGML_CPU_NATIVE_IMPL(ggml_gemv_q4_K_8x8_q8_K) + +void ggml_gemv_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 4; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + { + float sumf[4]; + int sumi; + + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); + + for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0x0F]; + const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4]; + sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])); + } + sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + } + } + } + for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; + } + } +} +GGML_CPU_NATIVE_IMPL(ggml_gemv_iq4_nl_4x4_q8_0) + +void ggml_gemm_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 4; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + { + float sumf[4][4]; + int sumi; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; + } + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; + } + sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + } + } + } + } + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) + s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; + } + } + } + } +} +GGML_CPU_NATIVE_IMPL(ggml_gemm_q4_0_4x4_q8_0) + +void ggml_gemm_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + float sumf[4][4]; + int sumi; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; + } + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; + } + sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + } + } + } + } + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) + s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; + } + } + } +} +GGML_CPU_NATIVE_IMPL(ggml_gemm_q4_0_4x8_q8_0) + +void ggml_gemm_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + float sumf[4][8]; + int sumi; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; + } + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; + } + sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + } + } + } + } + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) + s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; + } + } + } +} +GGML_CPU_NATIVE_IMPL(ggml_gemm_q4_0_8x8_q8_0) + +void ggml_gemm_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK_K; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + float sumf[4][8]; + float sum_minf[4][8]; + uint32_t utmp[32]; + int sumi1; + int sumi2; + int sumi; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_Kx4 * a_ptr = (const block_q8_Kx4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_Kx8 * b_ptr = (const block_q4_Kx8 *) vx + (x * nb); + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumf[m][j] = 0.0; + sum_minf[m][j] = 0.0; + } + } + for (int l = 0; l < nb; l++) { + for (int sb = 0; sb < 8; sb++) { + memcpy(utmp + sb * 4, b_ptr[l].scales + sb * 12, 12); + utmp[sb * 4 + 3] = ((utmp[sb * 4 + 2] >> 4) & kmask2) | (((utmp[sb * 4 + 1] >> 6) & kmask3) << 4); + const uint32_t uaux_0 = utmp[sb * 4 + 1] & kmask1; + utmp[sb * 4 + 1] = (utmp[sb * 4 + 2] & kmask2) | (((utmp[sb * 4 + 0] >> 6) & kmask3) << 4); + utmp[sb * 4 + 2] = uaux_0; + utmp[sb * 4 + 0] &= kmask1; + } + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + uint8_t *scales_0 = (uint8_t*) utmp + (k / 4) * 32; + uint8_t *scales_1 = (uint8_t*) utmp + (k / 4) * 32 + 16; + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi1 = 0; + sumi2 = 0; + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4); + sumi1 = (v0 * a_ptr[l].qs[(k >> 2) * 256 + (k % 4) * 4 * blocklen + m * blocklen + i]); + sumi2 = (v1 * a_ptr[l].qs[(k >> 2) * 256 + (k % 4) * 4 * blocklen + m * blocklen + i + 128]); + sumi1 = sumi1 * scales_0[j]; + sumi2 = sumi2 * scales_1[j]; + sumi += sumi1 + sumi2; + } + sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d[m]; + } + } + } + for (int sb = 0; sb < 8; sb++) { + uint8_t *mins = (uint8_t*) utmp + 8 + sb * 16; + for(int m = 0; m < 4; m++) { + const int16_t *bsums = a_ptr[l].bsums + (sb * 8) + (m * 4) - ((sb % 2) * 6); + for(int j = 0; j < ncols_interleaved; j++) { + sum_minf[m][j] += mins[j] * (bsums[0] + bsums[1]) * GGML_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d[m]; + } + } + } + } + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j] - sum_minf[m][j]; + } + } + } + } +} +GGML_CPU_NATIVE_IMPL(ggml_gemm_q4_K_8x8_q8_K) + +void ggml_gemm_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 4; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + { + float sumf[4][4]; + int sumi; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; + } + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0x0F]; + const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4]; + sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])); + } + sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + } + } + } + } + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) + s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; + } + } + } + } +} +GGML_CPU_NATIVE_IMPL(ggml_gemm_iq4_nl_4x4_q8_0) + +} // extern "C" + +static block_q4_0x4 make_block_q4_0x4(block_q4_0 * in, unsigned int blck_size_interleave) { + block_q4_0x4 out; + + for (int i = 0; i < 4; i++) { + out.d[i] = in[i].d; + } + + const int end = QK4_0 * 2 / blck_size_interleave; + + if (blck_size_interleave == 8) { + const uint64_t xor_mask = 0x8888888888888888ULL; + for (int i = 0; i < end; ++i) { + int src_id = i % 4; + int src_offset = (i / 4) * blck_size_interleave; + int dst_offset = i * blck_size_interleave; + + uint64_t elems; + // Using memcpy to avoid unaligned memory accesses + memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); + elems ^= xor_mask; + memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); + } + } else if (blck_size_interleave == 4) { + const uint32_t xor_mask = 0x88888888; + for (int i = 0; i < end; ++i) { + int src_id = i % 4; + int src_offset = (i / 4) * blck_size_interleave; + int dst_offset = i * blck_size_interleave; + + uint32_t elems; + memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint32_t)); + elems ^= xor_mask; + memcpy(&out.qs[dst_offset], &elems, sizeof(uint32_t)); + } + } else { + GGML_ASSERT(false); + } + + return out; +} + +// interleave 8 block_q4_0s in blocks of blck_size_interleave +// returns an interleaved block_q4_0x8 +// in the interleaved block_q4_0x8, place deltas for 8 block_q4_0 blocks +// first, then interleave quants from 8 block_q4_0s in blocks of blck_size_interleave +static block_q4_0x8 make_block_q4_0x8(block_q4_0 * in, unsigned int blck_size_interleave) { + block_q4_0x8 out; + + for (int i = 0; i < 8; i++) { + out.d[i] = in[i].d; + } + + const int end = QK4_0 * 4 / blck_size_interleave; + const uint64_t xor_mask = 0x8888888888888888ULL; + + for (int i = 0; i < end; ++i) { + int src_id = i % 8; + int src_offset = (i / 8) * blck_size_interleave; + int dst_offset = i * blck_size_interleave; + + uint64_t elems; + memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); + elems ^= xor_mask; + memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); + } + + return out; +} + +static block_q4_Kx8 make_block_q4_Kx8(block_q4_K * in, unsigned int blck_size_interleave) { + block_q4_Kx8 out; + //Delta(scale) and dmin values of the eight Q4_K structures are copied onto the output interleaved structure + for (int i = 0; i < 8; i++) { + out.d[i] = in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d; + } + + for (int i = 0; i < 8; i++) { + out.dmin[i] = in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.dmin; + } + + const int end = QK_K * 4 / blck_size_interleave; + + // Interleave Q4_K quants by taking 8 bytes at a time + for (int i = 0; i < end; ++i) { + int src_id = i % 8; + int src_offset = (i / 8) * blck_size_interleave; + int dst_offset = i * blck_size_interleave; + + uint64_t elems; + memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); + memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); + } + + // The below logic is designed so as to unpack and rearrange scales and mins values in Q4_K + // Currently the Q4_K structure has 8 scales and 8 mins packed in 12 bytes ( 6 bits for each value) + // The output Q4_Kx8 structure has 96 bytes + // Every 12 byte is packed such that it contains scales and mins for corresponding sub blocks from Q4_K structure + // For eg - First 12 bytes contains 8 scales and 8 mins - each of first sub block from different Q4_K structures + uint8_t s[8], m[8]; + + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 8; j++) { + s[j] = in[j].scales[i] & 63; + m[j] = in[j].scales[i + 4] & 63; + } + + out.scales[i * 12] = (s[0] & 63) + ((s[4] & 48) << 2); + out.scales[i * 12 + 1] = (s[1] & 63) + ((s[5] & 48) << 2); + out.scales[i * 12 + 2] = (s[2] & 63) + ((s[6] & 48) << 2); + out.scales[i * 12 + 3] = (s[3] & 63) + ((s[7] & 48) << 2); + out.scales[i * 12 + 4] = (m[0] & 63) + ((m[4] & 48) << 2); + out.scales[i * 12 + 5] = (m[1] & 63) + ((m[5] & 48) << 2); + out.scales[i * 12 + 6] = (m[2] & 63) + ((m[6] & 48) << 2); + out.scales[i * 12 + 7] = (m[3] & 63) + ((m[7] & 48) << 2); + out.scales[i * 12 + 8] = (s[4] & 15) + ((m[4] & 15) << 4); + out.scales[i * 12 + 9] = (s[5] & 15) + ((m[5] & 15) << 4); + out.scales[i * 12 + 10] = (s[6] & 15) + ((m[6] & 15) << 4); + out.scales[i * 12 + 11] = (s[7] & 15) + ((m[7] & 15) << 4); + + } + + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 8; j++) { + s[j] = ((in[j].scales[i] & 192) >> 2) | (in[j].scales[i+8] & 15); + m[j] = ((in[j].scales[i + 4] & 192) >> 2) | ((in[j].scales[i+8] & 240) >> 4); + } + + out.scales[i * 12 + 48] = (s[0] & 63) + ((s[4] & 48) << 2); + out.scales[i * 12 + 49] = (s[1] & 63) + ((s[5] & 48) << 2); + out.scales[i * 12 + 50] = (s[2] & 63) + ((s[6] & 48) << 2); + out.scales[i * 12 + 51] = (s[3] & 63) + ((s[7] & 48) << 2); + out.scales[i * 12 + 52] = (m[0] & 63) + ((m[4] & 48) << 2); + out.scales[i * 12 + 53] = (m[1] & 63) + ((m[5] & 48) << 2); + out.scales[i * 12 + 54] = (m[2] & 63) + ((m[6] & 48) << 2); + out.scales[i * 12 + 55] = (m[3] & 63) + ((m[7] & 48) << 2); + out.scales[i * 12 + 56] = (s[4] & 15) + ((m[4] & 15) << 4); + out.scales[i * 12 + 57] = (s[5] & 15) + ((m[5] & 15) << 4); + out.scales[i * 12 + 58] = (s[6] & 15) + ((m[6] & 15) << 4); + out.scales[i * 12 + 59] = (s[7] & 15) + ((m[7] & 15) << 4); + + } + + return out; +} + +static int repack_q4_0_to_q4_0_4_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { + GGML_ASSERT(t->type == GGML_TYPE_Q4_0); + GGML_ASSERT(interleave_block == 4 || interleave_block == 8); + constexpr int nrows_interleaved = 4; + + block_q4_0x4 * dst = (block_q4_0x4 *)t->data; + const block_q4_0 * src = (const block_q4_0 *)data; + block_q4_0 dst_tmp[4]; + int nrow = ggml_nrows(t); + int nblocks = t->ne[0] / QK4_0; + + GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_0)); + + if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { + return -1; + } + + for (int b = 0; b < nrow; b += nrows_interleaved) { + for (int64_t x = 0; x < nblocks; x++) { + for (int i = 0; i < nrows_interleaved; i++) { + dst_tmp[i] = src[x + i * nblocks]; + } + *dst++ = make_block_q4_0x4(dst_tmp, interleave_block); + } + src += nrows_interleaved * nblocks; + } + return 0; + + GGML_UNUSED(data_size); +} +static int repack_q4_K_to_q4_K_8_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { + GGML_ASSERT(t->type == GGML_TYPE_Q4_K); + GGML_ASSERT(interleave_block == 8); + constexpr int nrows_interleaved = 8; + + block_q4_Kx8 * dst = (block_q4_Kx8*)t->data; + const block_q4_K * src = (const block_q4_K*) data; + block_q4_K dst_tmp[8]; + int nrow = ggml_nrows(t); + int nblocks = t->ne[0] / QK_K; + + GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_K)); + + if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { + return -1; + } + + for (int b = 0; b < nrow; b += nrows_interleaved) { + for (int64_t x = 0; x < nblocks; x++) { + for (int i = 0; i < nrows_interleaved; i++ ) { + dst_tmp[i] = src[x + i * nblocks]; + } + *dst++ = make_block_q4_Kx8(dst_tmp, interleave_block); + } + src += nrows_interleaved * nblocks; + } + return 0; + + GGML_UNUSED(data_size); +} + +static int repack_q4_0_to_q4_0_8_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { + GGML_ASSERT(t->type == GGML_TYPE_Q4_0); + GGML_ASSERT(interleave_block == 8); + constexpr int nrows_interleaved = 8; + + block_q4_0x8 * dst = (block_q4_0x8*)t->data; + const block_q4_0 * src = (const block_q4_0*) data; + block_q4_0 dst_tmp[8]; + int nrow = ggml_nrows(t); + int nblocks = t->ne[0] / QK4_0; + + GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_0)); + + if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { + return -1; + } + + for (int b = 0; b < nrow; b += nrows_interleaved) { + for (int64_t x = 0; x < nblocks; x++) { + for (int i = 0; i < nrows_interleaved; i++ ) { + dst_tmp[i] = src[x + i * nblocks]; + } + *dst++ = make_block_q4_0x8(dst_tmp, interleave_block); + } + src += nrows_interleaved * nblocks; + } + return 0; + + GGML_UNUSED(data_size); +} + +static block_iq4_nlx4 make_block_iq4_nlx4(block_iq4_nl * in, unsigned int blck_size_interleave) { + block_iq4_nlx4 out; + + for (int i = 0; i < 4; i++) { + out.d[i] = in[i].d; + } + + const int end = QK4_NL * 2 / blck_size_interleave; + + // TODO: this branch seems wrong + //if (blck_size_interleave == 8) { + // for (int i = 0; i < end; ++i) { + // int src_id = i % 4; + // int src_offset = (i / 4) * blck_size_interleave; + // int dst_offset = i * blck_size_interleave; + + // // Using memcpy to avoid unaligned memory accesses + // memcpy(&out.qs[dst_offset], &in[src_id].qs[src_offset], sizeof(uint64_t)); + // } + //} else + if (blck_size_interleave == 4) { + for (int i = 0; i < end; ++i) { + int src_id = i % 4; + int src_offset = (i / 4) * blck_size_interleave; + int dst_offset = i * blck_size_interleave; + + memcpy(&out.qs[dst_offset], &in[src_id].qs[src_offset], sizeof(uint32_t)); + } + } else { + GGML_ASSERT(false); + } + + return out; +} + +static int repack_iq4_nl_to_iq4_nl_4_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { + GGML_ASSERT(t->type == GGML_TYPE_IQ4_NL); + //GGML_ASSERT(interleave_block == 4 || interleave_block == 8); + GGML_ASSERT(interleave_block == 4); + + block_iq4_nlx4 * dst = (block_iq4_nlx4 *)t->data; + const block_iq4_nl * src = (const block_iq4_nl *)data; + block_iq4_nl dst_tmp[4]; + int nrow = ggml_nrows(t); + int nrows_interleaved = 4; + int nblocks = t->ne[0] / QK4_0; + + GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_iq4_nl)); + + if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { + return -1; + } + + for (int b = 0; b < nrow; b += nrows_interleaved) { + for (int64_t x = 0; x < nblocks; x++) { + for (int i = 0; i < nrows_interleaved; i++) { + dst_tmp[i] = src[x + i * nblocks]; + } + *dst++ = make_block_iq4_nlx4(dst_tmp, interleave_block); + } + src += nrows_interleaved * nblocks; + } + return 0; + + GGML_UNUSED(data_size); +} + +namespace ggml::cpu::repack { +// repack +template +int repack(struct ggml_tensor *, const void *, size_t); + +// TODO: generalise. +template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { + return repack_q4_0_to_q4_0_4_bl(t, 4, data, data_size); +} + +template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { + return repack_q4_0_to_q4_0_4_bl(t, 8, data, data_size); +} + +template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { + return repack_q4_0_to_q4_0_8_bl(t, 8, data, data_size); +} + +template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { + return repack_q4_K_to_q4_K_8_bl(t, 8, data, data_size); +} + +template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { + return repack_iq4_nl_to_iq4_nl_4_bl(t, 4, data, data_size); +} + +// TODO: needs to be revisited +//template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { +// return repack_iq4_nl_to_iq4_nl_4_bl(t, 8, data, data_size); +//} + +// gemv +template +void gemv(int, float *, size_t, const void *, const void *, int, int); + +template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemv_q4_0_4x4_q8_0(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemv_q4_0_4x8_q8_0(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemv_q4_0_8x8_q8_0(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemv_q4_K_8x8_q8_K(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemv_iq4_nl_4x4_q8_0(n, s, bs, vx, vy, nr, nc); +} + +// gemm +template +void gemm(int, float *, size_t, const void *, const void *, int, int); + +template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemm_q4_0_4x4_q8_0(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemm_q4_0_4x8_q8_0(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemm_q4_0_8x8_q8_0(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemm_q4_K_8x8_q8_K(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemm_iq4_nl_4x4_q8_0(n, s, bs, vx, vy, nr, nc); +} + +class tensor_traits_base : public ggml::cpu::tensor_traits { + public: + virtual int repack(struct ggml_tensor * t, const void * data, size_t data_size) = 0; +}; + +template class tensor_traits : public tensor_traits_base { + + bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override { + // not realy a GGML_TYPE_Q8_0 but same size. + switch (op->op) { + case GGML_OP_MUL_MAT: + size = ggml_row_size(PARAM_TYPE, ggml_nelements(op->src[1])); + return true; + case GGML_OP_MUL_MAT_ID: + size = ggml_row_size(PARAM_TYPE, ggml_nelements(op->src[1])); + size = GGML_PAD(size, sizeof(int64_t)); // + padding for next bloc. + size += sizeof(int64_t) * (1+op->src[0]->ne[2]) * op->src[1]->ne[2]; + return true; + default: + // GGML_ABORT("fatal error"); + break; + } + return false; + } + + bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) override { + switch (op->op) { + case GGML_OP_MUL_MAT: + forward_mul_mat(params, op); + return true; + case GGML_OP_MUL_MAT_ID: + forward_mul_mat_id(params, op); + return true; + default: + // GGML_ABORT("fatal error"); + break; + } + return false; + } + + void forward_mul_mat(ggml_compute_params * params, ggml_tensor * op) { + const ggml_tensor * src0 = op->src[0]; + const ggml_tensor * src1 = op->src[1]; + ggml_tensor * dst = op; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + GGML_ASSERT(ne0 == ne01); + GGML_ASSERT(ne1 == ne11); + GGML_ASSERT(ne2 == ne12); + GGML_ASSERT(ne3 == ne13); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + GGML_ASSERT(ggml_n_dims(op->src[0]) == 2); + // GGML_ASSERT(ggml_n_dims(op->src[1]) == 2); + + char * wdata = static_cast(params->wdata); + const size_t nbw1 = ggml_row_size(PARAM_TYPE, ne10); + + assert(params->wsize >= nbw1 * ne11); + + const ggml_from_float_t from_float = ggml_get_type_traits_cpu(PARAM_TYPE)->from_float; + + int64_t i11_processed = 0; + for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) { + ggml_quantize_mat_t((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), 4, ne10); + } + + i11_processed = ne11 - ne11 % 4; + for (int64_t i11 = i11_processed + ith; i11 < ne11; i11 += nth) { + from_float((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), ne10); + } + + ggml_barrier(params->threadpool); + + const void * src1_wdata = params->wdata; + const size_t src1_col_stride = ggml_row_size(PARAM_TYPE, ne10); + int64_t src0_start = (ith * ne01) / nth; + int64_t src0_end = ((ith + 1) * ne01) / nth; + src0_start = (src0_start % NB_COLS) ? src0_start + NB_COLS - (src0_start % NB_COLS) : src0_start; + src0_end = (src0_end % NB_COLS) ? src0_end + NB_COLS - (src0_end % NB_COLS) : src0_end; + if (src0_start >= src0_end) { + return; + } + + // If there are more than three rows in src1, use gemm; otherwise, use gemv. + if (ne11 > 3) { + gemm(ne00, + (float *) ((char *) dst->data) + src0_start, ne01, + (const char *) src0->data + src0_start * nb01, + (const char *) src1_wdata, ne11 - ne11 % 4, src0_end - src0_start); + } + for (int iter = ne11 - ne11 % 4; iter < ne11; iter++) { + gemv(ne00, + (float *) ((char *) dst->data + (iter * nb1)) + src0_start, ne01, + (const char *) src0->data + src0_start * nb01, + (const char *) src1_wdata + (src1_col_stride * iter), 1, + src0_end - src0_start); + } + } + + void forward_mul_mat_id(ggml_compute_params * params, ggml_tensor * op) { + const ggml_tensor * src0 = op->src[0]; + const ggml_tensor * src1 = op->src[1]; + const ggml_tensor * ids = op->src[2]; + ggml_tensor * dst = op; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + const ggml_from_float_t from_float = ggml_get_type_traits_cpu(PARAM_TYPE)->from_float; + + // we don't support permuted src0 or src1 + GGML_ASSERT(nb00 == ggml_type_size(src0->type)); + GGML_ASSERT(nb10 == ggml_type_size(src1->type)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + GGML_ASSERT(ne03 == 1); + GGML_ASSERT(ne13 == 1); + GGML_ASSERT(ne3 == 1); + + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + // row groups + const int n_ids = ids->ne[0]; // n_expert_used + const int n_as = ne02; // n_expert + + const size_t nbw1 = ggml_row_size(PARAM_TYPE, ne10); + const size_t nbw2 = nbw1*ne11; + const size_t nbw3 = nbw2*ne12; + + struct mmid_row_mapping { + int32_t i1; + int32_t i2; + }; + + GGML_ASSERT(params->wsize >= (GGML_PAD(nbw3, sizeof(int64_t)) + n_as * sizeof(int64_t) + + n_as * ne12 * sizeof(mmid_row_mapping))); + + auto * wdata = (char *) params->wdata; + auto * wdata_src1_end = (char *) wdata + GGML_PAD(nbw3, sizeof(int64_t)); + auto * matrix_row_counts = (int64_t *) (wdata_src1_end); // [n_as] + + struct mmid_row_mapping * matrix_rows = (struct mmid_row_mapping *) (matrix_row_counts + n_as); // [n_as][ne12] + + // src1: float32 => param type + for (int64_t i12 = 0; i12 < ne12; ++i12) { + for (int64_t i11 = ith; i11 < ne11; i11 += nth) { + from_float((float *)((char *) src1->data + i12 * nb12 + i11 * nb11), + (void *) (wdata + i12 * nbw2 + i11 * nbw1), + ne10); + } + } + +#define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id) * ne12 + (i1)] + + if (ith == 0) { + // initialize matrix_row_counts + memset(matrix_row_counts, 0, n_as * sizeof(int64_t)); + + // group rows by src0 matrix + for (int32_t iid1 = 0; iid1 < ids->ne[1]; ++iid1) { + for (int32_t id = 0; id < n_ids; ++id) { + const int32_t i02 = + *(const int32_t *) ((const char *) ids->data + iid1 * ids->nb[1] + id * ids->nb[0]); + + GGML_ASSERT(i02 >= 0 && i02 < n_as); + + MMID_MATRIX_ROW(i02, matrix_row_counts[i02]) = { id, iid1 }; + matrix_row_counts[i02] += 1; + } + } + } + + ggml_barrier(params->threadpool); + + // compute each matrix multiplication in sequence + for (int cur_a = 0; cur_a < n_as; ++cur_a) { + const int64_t cne1 = matrix_row_counts[cur_a]; + + if (cne1 == 0) { + continue; + } + + const auto * src0_cur = (const char *) src0->data + cur_a*nb02; + + //const int64_t nr0 = ne01; // src0 rows + const int64_t nr1 = cne1; // src1 rows + + int64_t src0_cur_start = (ith * ne01) / nth; + int64_t src0_cur_end = ((ith + 1) * ne01) / nth; + + src0_cur_start = (src0_cur_start % NB_COLS) ? src0_cur_start + NB_COLS - (src0_cur_start % NB_COLS) : src0_cur_start; + src0_cur_end = (src0_cur_end % NB_COLS) ? src0_cur_end + NB_COLS - (src0_cur_end % NB_COLS) : src0_cur_end; + + if (src0_cur_start >= src0_cur_end) { + return; + } + + for (int ir1 = 0; ir1 < nr1; ir1++) { + struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, ir1); + + const int id = row_mapping.i1; // selected expert index + + const int64_t i11 = id % ne11; + const int64_t i12 = row_mapping.i2; // row index in src1 + + const int64_t i1 = id; // selected expert index + const int64_t i2 = i12; // row + + const auto * src1_col = (const char *) wdata + (i11 * nbw1 + i12 * nbw2); + + gemv(ne00, + (float *)((char *) dst->data + (i1 * nb1 + i2 * nb2)) + src0_cur_start, ne01, + src0_cur + src0_cur_start * nb01, + src1_col, 1, src0_cur_end - src0_cur_start); + } + } +#undef MMID_MATRIX_ROW + } + + int repack(struct ggml_tensor * t, const void * data, size_t data_size) override { + GGML_LOG_DEBUG("%s: repack tensor %s with %s_%dx%d\n", __func__, t->name, ggml_type_name(t->type), + (int) NB_COLS, (int) INTER_SIZE); + return ggml::cpu::repack::repack(t, data, data_size); + } +}; + +// instance for Q4 +static const tensor_traits q4_0_4x4_q8_0; +static const tensor_traits q4_0_4x8_q8_0; +static const tensor_traits q4_0_8x8_q8_0; +static const tensor_traits q4_K_8x8_q8_K; + +// instance for IQ4 +static const tensor_traits iq4_nl_4x4_q8_0; + +} // namespace ggml::cpu::repack + +static const ggml::cpu::tensor_traits * ggml_repack_get_optimal_repack_type(const struct ggml_tensor * cur) { + if (cur->type == GGML_TYPE_Q4_0) { + if (ggml_cpu_has_avx2() || (ggml_cpu_has_sve() && ggml_cpu_has_matmul_int8() && ggml_cpu_get_sve_cnt() == QK8_0)) { + if (cur->ne[1] % 8 == 0) { + return &ggml::cpu::repack::q4_0_8x8_q8_0; + } + } + if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) { + if (cur->ne[1] % 4 == 0) { + return &ggml::cpu::repack::q4_0_4x8_q8_0; + } + } + if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { + if (cur->ne[1] % 4 == 0) { + return &ggml::cpu::repack::q4_0_4x4_q8_0; + } + } + } else if (cur->type == GGML_TYPE_Q4_K) { + if (ggml_cpu_has_avx2()) { + if (cur->ne[1] % 8 == 0) { + return &ggml::cpu::repack::q4_K_8x8_q8_K; + } + } + } else if (cur->type == GGML_TYPE_IQ4_NL) { + if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { + if (cur->ne[1] % 4 == 0) { + return &ggml::cpu::repack::iq4_nl_4x4_q8_0; + } + } + } + + return nullptr; +} + +static enum ggml_status ggml_backend_cpu_repack_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { + tensor->extra = (void *) const_cast(ggml_repack_get_optimal_repack_type(tensor)); + + GGML_UNUSED(buffer); + return GGML_STATUS_SUCCESS; +} + +static void ggml_backend_cpu_repack_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, + const void * data, size_t offset, size_t size) { + GGML_ASSERT(offset == 0); + GGML_ASSERT(size == ggml_nbytes(tensor)); + + auto tensor_traits = (ggml::cpu::repack::tensor_traits_base *) tensor->extra; + auto OK = tensor_traits->repack(tensor, data, size); + + GGML_ASSERT(OK == 0); + GGML_UNUSED(buffer); +} + +static const char * ggml_backend_cpu_repack_buffer_type_get_name(ggml_backend_buffer_type_t buft) { + return "CPU_REPACK"; + + GGML_UNUSED(buft); +} + +static ggml_backend_buffer_t ggml_backend_cpu_repack_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { + ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); + + if (buffer == nullptr) { + return nullptr; + } + + buffer->buft = buft; + buffer->iface.init_tensor = ggml_backend_cpu_repack_buffer_init_tensor; + buffer->iface.set_tensor = ggml_backend_cpu_repack_buffer_set_tensor; + buffer->iface.get_tensor = nullptr; + buffer->iface.cpy_tensor = nullptr; + return buffer; +} + +static size_t ggml_backend_cpu_repack_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { + return TENSOR_ALIGNMENT; + + GGML_UNUSED(buft); +} + +namespace ggml::cpu::repack { +class extra_buffer_type : ggml::cpu::extra_buffer_type { + bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override { + if ( op->op == GGML_OP_MUL_MAT && + op->src[0]->buffer && + (ggml_n_dims(op->src[0]) == 2) && + op->src[0]->buffer->buft == ggml_backend_cpu_repack_buffer_type() && + ggml_repack_get_optimal_repack_type(op->src[0]) + ) { + if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) { + return false; + } + if (op->src[1]->type == GGML_TYPE_F32) { + return true; + } + //if (op->src[1]->type == GGML_TYPE_Q8_0) { + // return true; + //} + // may be possible if Q8_0 packed... + } else if (op->op == GGML_OP_MUL_MAT_ID + && op->src[0]->buffer + && (ggml_n_dims(op->src[0]) == 3) + && op->src[0]->buffer->buft == ggml_backend_cpu_repack_buffer_type() + && ggml_repack_get_optimal_repack_type(op->src[0]) + ) { + if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) { + return false; + } + if (op->src[1]->type == GGML_TYPE_F32) { + return true; + } + //if (op->src[1]->type == GGML_TYPE_Q8_0) { + // return true; + //} + } + return false; + } + + ggml::cpu::tensor_traits * get_tensor_traits(const struct ggml_tensor * op) override { + if (op->op == GGML_OP_MUL_MAT || op->op == GGML_OP_MUL_MAT_ID) { + if (op->src[0]->buffer && op->src[0]->buffer->buft == ggml_backend_cpu_repack_buffer_type()) { + return (ggml::cpu::tensor_traits *) op->src[0]->extra; + } + } + return nullptr; + } +}; +} // namespace ggml::cpu::repack + +ggml_backend_buffer_type_t ggml_backend_cpu_repack_buffer_type(void) { + static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_repack = { + /* .iface = */ { + /* .get_name = */ ggml_backend_cpu_repack_buffer_type_get_name, + /* .alloc_buffer = */ ggml_backend_cpu_repack_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_cpu_repack_buffer_type_get_alignment, + /* .get_max_size = */ nullptr, // defaults to SIZE_MAX + /* .get_alloc_size = */ nullptr, // defaults to ggml_nbytes + /* .is_host = */ nullptr, + }, + /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), + /* .context = */ new ggml::cpu::repack::extra_buffer_type(), + }; + + return &ggml_backend_cpu_buffer_type_repack; +} diff --git a/ggml/src/ggml-cpu/repack.h b/ggml/src/ggml-cpu/repack.h new file mode 100644 index 0000000000000..8ee6e92ea96b8 --- /dev/null +++ b/ggml/src/ggml-cpu/repack.h @@ -0,0 +1,119 @@ +#pragma once + +#define GGML_COMMON_DECL_CPP +#include "ggml-common.h" + +#include "traits.h" +#include "ggml.h" + +// GGML internal header + +ggml_backend_buffer_type_t ggml_backend_cpu_repack_buffer_type(void); + +template constexpr int QK_0() { + if constexpr (K == 4) { + return QK4_0; + } + if constexpr (K == 8) { + return QK8_0; + } + return -1; +} + +template struct block { + ggml_half d[N]; // deltas for N qK_0 blocks + int8_t qs[(QK_0() * N * K) / 8]; // quants for N qK_0 blocks +}; + +// control size +static_assert(sizeof(block<4, 4>) == 4 * sizeof(ggml_half) + QK8_0 * 2, "wrong block<4,4> size/padding"); +static_assert(sizeof(block<4, 8>) == 8 * sizeof(ggml_half) + QK8_0 * 4, "wrong block<4,8> size/padding"); +static_assert(sizeof(block<8, 4>) == 4 * sizeof(ggml_half) + QK8_0 * 4, "wrong block<8,4> size/padding"); +static_assert(sizeof(block<8, 8>) == 8 * sizeof(ggml_half) + QK8_0 * 8, "wrong block<8,8> size/padding"); + +using block_q4_0x4 = block<4, 4>; +using block_q4_0x8 = block<4, 8>; +using block_q8_0x4 = block<8, 4>; +using block_q8_0x8 = block<8, 8>; + +struct block_q4_Kx8 { + ggml_half d[8]; // super-block scale for quantized scales + ggml_half dmin[8]; // super-block scale for quantized mins + uint8_t scales[96]; // scales and mins, quantized with 6 bits + uint8_t qs[1024]; // 4--bit quants +}; + +static_assert(sizeof(block_q4_Kx8) == sizeof(ggml_half) * 16 + K_SCALE_SIZE * 8 + QK_K * 4, "wrong q4_K block size/padding"); + +struct block_q8_Kx4 { + float d[4]; // delta + int8_t qs[QK_K * 4]; // quants + int16_t bsums[QK_K / 4]; // sum of quants in groups of 16 +}; + +static_assert(sizeof(block_q8_Kx4) == sizeof(float) * 4 + QK_K * 4 + (QK_K / 4) * sizeof(int16_t), "wrong q8_K block size/padding"); + +struct block_iq4_nlx4 { + ggml_half d[4]; // deltas for 4 iq4_nl blocks + uint8_t qs[QK4_NL * 2]; // nibbles / quants for 4 iq4_nl blocks +}; + +static_assert(sizeof(block_iq4_nlx4) == 4 * sizeof(ggml_half) + QK4_NL * 2, "wrong iq4_nlx4 block size/padding"); + +#if defined(__cplusplus) +extern "C" { +#endif + +// Workaround for clang: +// clang++ complains: ``error: call to 'ggml_gemm_q4_0_4x4_q8_0' is ambiguous'' +// repro: https://godbolt.org/z/oKdeWKonM (ICE), https://godbolt.org/z/1szq6P36v (ambiguous call) +#if defined(GGML_CPU_CLANG_WORKAROUND) || !(defined(__GNUC__) && defined(__clang__)) || defined(__HIPCC__) +void ggml_quantize_mat_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); +void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); +void ggml_quantize_mat_q8_K_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); +void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemv_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +#endif // !defined(__clang__) + +// Native implementations +void ggml_quantize_mat_q8_0_4x4_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); +void ggml_quantize_mat_q8_0_4x8_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); +void ggml_quantize_mat_q8_K_4x8_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); +void ggml_gemv_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemv_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemv_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemv_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemv_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); + +#if defined(GGML_CPU_GENERIC) +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#endif + +#if defined(__cplusplus) +} // extern "C" +#endif diff --git a/ggml/src/ggml-cpu/ggml-cpu-traits.cpp b/ggml/src/ggml-cpu/traits.cpp similarity index 97% rename from ggml/src/ggml-cpu/ggml-cpu-traits.cpp rename to ggml/src/ggml-cpu/traits.cpp index 62a0712dabbf6..139fa59641440 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-traits.cpp +++ b/ggml/src/ggml-cpu/traits.cpp @@ -1,4 +1,4 @@ -#include "ggml-cpu-traits.h" +#include "traits.h" #include "ggml-backend-impl.h" #include "ggml-backend.h" diff --git a/ggml/src/ggml-cpu/ggml-cpu-traits.h b/ggml/src/ggml-cpu/traits.h similarity index 100% rename from ggml/src/ggml-cpu/ggml-cpu-traits.h rename to ggml/src/ggml-cpu/traits.h diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index e1ce1d4cd1558..a82ec26ee1a2d 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -466,9 +466,6 @@ static __device__ __forceinline__ int ggml_cuda_dp4a(const int a, const int b, i #endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) } -// TODO: move to ggml-common.h -static constexpr __device__ int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113}; - typedef void (*dequantize_kernel_t)(const void * vx, const int64_t ib, const int iqs, dfloat2 & v); static __device__ __forceinline__ float get_alibi_slope( diff --git a/ggml/src/ggml-quants.c b/ggml/src/ggml-quants.c index 84ec6dfe31bfc..e389a46dbed87 100644 --- a/ggml/src/ggml-quants.c +++ b/ggml/src/ggml-quants.c @@ -2425,8 +2425,6 @@ void dequantize_row_iq1_m(const block_iq1_m * GGML_RESTRICT x, float * GGML_REST } } -static const int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113}; - void dequantize_row_iq4_nl(const block_iq4_nl * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK4_NL == 0); const int64_t nb = k / QK4_NL; diff --git a/ggml/src/ggml-sycl/common.hpp b/ggml/src/ggml-sycl/common.hpp index 15ee9dc69d149..4f17699a5fcc1 100644 --- a/ggml/src/ggml-sycl/common.hpp +++ b/ggml/src/ggml-sycl/common.hpp @@ -149,8 +149,6 @@ typedef sycl::float2 dfloat2; #define MMVQ_MAX_BATCH_SIZE 8 -static const int8_t kvalues_iq4nl[16]={-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113}; - static int g_all_sycl_device_count = -1; static bool g_ggml_backend_sycl_buffer_type_initialized = false; From f707247b28e76231050aed3a5fae67ae1dcabceb Mon Sep 17 00:00:00 2001 From: Diego Devesa Date: Mon, 9 Jun 2025 11:03:09 -0700 Subject: [PATCH 010/192] llama : allow building all tests on windows when not using shared libs (#13980) * llama : allow building all tests on windows when not using shared libraries * add static windows build to ci * tests : enable debug logs for test-chat --------- Co-authored-by: Georgi Gerganov --- .github/workflows/build.yml | 4 ++-- tests/CMakeLists.txt | 4 ++-- tests/test-chat.cpp | 4 ++++ 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 867a589ce1648..3c9804d437cdc 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -687,8 +687,8 @@ jobs: strategy: matrix: include: - - build: 'cpu-x64' - defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/x64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_OPENMP=OFF' + - build: 'cpu-x64 (static)' + defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/x64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DBUILD_SHARED_LIBS=OFF' - build: 'openblas-x64' defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/x64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_OPENMP=OFF -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"' - build: 'vulkan-x64' diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 83f7d1a4584f7..2f7bad2cf7ec9 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -104,8 +104,8 @@ if (LLAMA_LLGUIDANCE) llama_build_and_test(test-grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf) endif () -if (NOT WIN32) - # these tests are disabled on Windows because they use internal functions not exported with LLAMA_API +if (NOT WIN32 OR NOT BUILD_SHARED_LIBS) + # these tests are disabled on Windows because they use internal functions not exported with LLAMA_API (when building with shared libraries) llama_build_and_test(test-sampling.cpp) llama_build_and_test(test-grammar-parser.cpp) llama_build_and_test(test-grammar-integration.cpp) diff --git a/tests/test-chat.cpp b/tests/test-chat.cpp index c6d998f101912..6ebf1464d911a 100644 --- a/tests/test-chat.cpp +++ b/tests/test-chat.cpp @@ -7,6 +7,8 @@ // #include "chat.h" +#include "log.h" + #include "../src/unicode.h" #include "../src/llama-grammar.h" @@ -1428,6 +1430,8 @@ static void test_msg_diffs_compute() { } int main(int argc, char ** argv) { + common_log_set_verbosity_thold(999); + // try { #ifndef _WIN32 if (argc > 1) { From 634135638efcf03edc0076d685c196b1a2c87a0d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 9 Jun 2025 23:04:35 +0300 Subject: [PATCH 011/192] kv-cache : fix shift and defrag logic (#14081) * kv-cache : fix shift ggml-ci * cont : reset shift[i] ggml-ci * cont : fix defrag erasing cells that didn't move ggml-ci --- src/llama-kv-cache-unified.cpp | 6 ++---- src/llama-kv-cells.h | 15 ++++++++++----- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/src/llama-kv-cache-unified.cpp b/src/llama-kv-cache-unified.cpp index 3a40463fd29ca..3566d5fd4d72b 100644 --- a/src/llama-kv-cache-unified.cpp +++ b/src/llama-kv-cache-unified.cpp @@ -462,7 +462,7 @@ bool llama_kv_cache_unified::update(llama_context * lctx, bool do_shift, const d for (uint32_t i = 0; i < n_kv; ++i) { assert(dinfo.ids[i] <= n_kv); - if (dinfo.ids[i] == n_kv) { + if (dinfo.ids[i] == n_kv || dinfo.ids[i] == i) { continue; } @@ -944,11 +944,9 @@ llm_graph_result_ptr llama_kv_cache_unified::build_graph_shift( const auto & n_embd_head_k = hparams.n_embd_head_k; //const auto & n_embd_head_v = hparams.n_embd_head_v; - //GGML_ASSERT(kv_self->size == n_ctx); - auto inp = std::make_unique(this); - inp->k_shift = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, cparams.n_ctx); + inp->k_shift = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, cells.size()); ggml_set_input(inp->k_shift); for (const auto & layer : layers) { diff --git a/src/llama-kv-cells.h b/src/llama-kv-cells.h index 9e2c4d927699d..acf30aebec69b 100644 --- a/src/llama-kv-cells.h +++ b/src/llama-kv-cells.h @@ -80,6 +80,9 @@ class llama_kv_cells_unified { assert(isrc < pos.size()); assert(idst < pos.size()); + assert(pos[idst] == -1); + assert(pos[isrc] != -1); + pos [idst] = pos [isrc]; shift[idst] = shift[isrc]; seq [idst] = seq [isrc]; @@ -144,9 +147,10 @@ class llama_kv_cells_unified { assert(pos[i] != -1); seq_pos_rm(i); + seq[i].reset(); pos[i] = -1; - seq[i].reset(); + shift[i] = 0; used.erase(i); } @@ -164,6 +168,7 @@ class llama_kv_cells_unified { if (seq[i].none()) { pos[i] = -1; + shift[i] = 0; used.erase(i); @@ -192,6 +197,7 @@ class llama_kv_cells_unified { seq[i].reset(); pos[i] = -1; + shift[i] = 0; used.erase(i); @@ -317,21 +323,20 @@ class llama_kv_cells_unified { pos[i] += d; shift[i] += d; - seq_pos_add(i); - has_shift = true; if (pos[i] < 0) { - seq_pos_rm(i); - seq[i].reset(); pos[i] = -1; + shift[i] = 0; used.erase(i); return true; } + seq_pos_add(i); + return false; } From 9b903083e5e49dc6f25de81dd532b1c069edf518 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 9 Jun 2025 23:05:02 +0300 Subject: [PATCH 012/192] metal : use less stack memory in FA kernel (#14088) * metal : use less stack memory in FA kernel ggml-ci * cont : fix BF16 variant --- ggml/src/ggml-metal/ggml-metal.metal | 113 +++++++++++++-------------- 1 file changed, 54 insertions(+), 59 deletions(-) diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index 58763e39e8353..5d7760217f826 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -3333,8 +3333,6 @@ kernel void kernel_flash_attn_ext( threadgroup q_t * sq = (threadgroup q_t *) (shmem_f16 + 0*DK); // holds the query data threadgroup q4_t * sq4 = (threadgroup q4_t *) (shmem_f16 + 0*DK); // same as above but in q4_t - threadgroup o_t * so = (threadgroup o_t *) (shmem_f16 + 0*DK); // reuse query data for accumulation - threadgroup o4_t * so4 = (threadgroup o4_t *) (shmem_f16 + 0*DK); // same as above but in o4_t threadgroup s_t * ss = (threadgroup s_t *) (shmem_f16 + 2*sgitg*SH + 2*Q*DK); // scratch buffer for attention, mask and diagonal matrix threadgroup k_t * sk = (threadgroup k_t *) (shmem_f16 + sgitg*(4*16*KV) + Q*T); // scratch buffer to load K in shared memory @@ -3548,20 +3546,20 @@ kernel void kernel_flash_attn_ext( // O = diag(ms)*O { - s8x8_t mm; - simdgroup_load(mm, ss + 2*C, TS, 0, false); + s8x8_t ms; + simdgroup_load(ms, ss + 2*C, TS, 0, false); #pragma unroll(DV8) for (short i = 0; i < DV8; ++i) { - simdgroup_multiply(lo[i], mm, lo[i]); + simdgroup_multiply(lo[i], ms, lo[i]); } } // O = O + (Q*K^T)*V { for (short cc = 0; cc < C/8; ++cc) { - s8x8_t ms; - simdgroup_load(ms, ss + 8*cc, TS, 0, false); + s8x8_t vs; + simdgroup_load(vs, ss + 8*cc, TS, 0, false); if (is_same::value) { // we can read directly from global memory @@ -3572,7 +3570,7 @@ kernel void kernel_flash_attn_ext( v8x8_t mv; simdgroup_load(mv, pv + i*8, args.nb21/sizeof(v_t), 0, false); // TODO: use ne20 - simdgroup_multiply_accumulate(lo[i], ms, mv, lo[i]); + simdgroup_multiply_accumulate(lo[i], vs, mv, lo[i]); } } else { for (short ii = 0; ii < DV16; ii += 4) { @@ -3593,10 +3591,10 @@ kernel void kernel_flash_attn_ext( v8x8_t mv; simdgroup_load(mv, sv + 16*k + 0*8, 4*16, 0, false); - simdgroup_multiply_accumulate(lo[2*(ii + k) + 0], ms, mv, lo[2*(ii + k) + 0]); + simdgroup_multiply_accumulate(lo[2*(ii + k) + 0], vs, mv, lo[2*(ii + k) + 0]); simdgroup_load(mv, sv + 16*k + 1*8, 4*16, 0, false); - simdgroup_multiply_accumulate(lo[2*(ii + k) + 1], ms, mv, lo[2*(ii + k) + 1]); + simdgroup_multiply_accumulate(lo[2*(ii + k) + 1], vs, mv, lo[2*(ii + k) + 1]); } } else { if (ii + tx < DV16) { @@ -3611,10 +3609,10 @@ kernel void kernel_flash_attn_ext( v8x8_t mv; simdgroup_load(mv, sv + 16*k + 0*8, 4*16, 0, false); - simdgroup_multiply_accumulate(lo[2*(ii + k) + 0], ms, mv, lo[2*(ii + k) + 0]); + simdgroup_multiply_accumulate(lo[2*(ii + k) + 0], vs, mv, lo[2*(ii + k) + 0]); simdgroup_load(mv, sv + 16*k + 1*8, 4*16, 0, false); - simdgroup_multiply_accumulate(lo[2*(ii + k) + 1], ms, mv, lo[2*(ii + k) + 1]); + simdgroup_multiply_accumulate(lo[2*(ii + k) + 1], vs, mv, lo[2*(ii + k) + 1]); } } } @@ -3624,83 +3622,80 @@ kernel void kernel_flash_attn_ext( } // these are needed for reducing the results from the simdgroups (reuse the ss buffer) - for (short j = 0; j < Q; ++j) { - if (tiisg == 0) { - ss[j*TS + 0] = S[j]; - ss[j*TS + 1] = M[j]; - } + for (short j = tiisg; j < Q; j += NW) { + ss[j*TS + 0] = S[j]; + ss[j*TS + 1] = M[j]; } } - // reduce the warps sequentially - for (ushort sg = 1; sg < nsg; ++sg) { - threadgroup_barrier(mem_flags::mem_threadgroup); + threadgroup_barrier(mem_flags::mem_threadgroup); - // each simdgroup stores its output to shared memory, reusing sq - if (sgitg == sg) { - for (short i = 0; i < DV8; ++i) { - simdgroup_store(lo[i], so + i*8, DV, 0, false); - } + threadgroup float * so = (threadgroup float *) (shmem_f16 + 0*DK); // reuse query data for accumulation + threadgroup float4 * so4 = (threadgroup float4 *) (shmem_f16 + 0*DK); + + // store result to shared memory in F32 + if (sgitg == 0) { + for (short i = 0; i < DV8; ++i) { + //simdgroup_store(lo[i], so + i*8, DV, 0, false); + simdgroup_float8x8 t(1.0f); + simdgroup_multiply(t, lo[i], t); + simdgroup_store(t, so + i*8, DV, 0, false); } + } - threadgroup_barrier(mem_flags::mem_threadgroup); + threadgroup_barrier(mem_flags::mem_threadgroup); - // the first simdgroup accumulates the results from the other simdgroups - if (sgitg == 0) { - for (short j = 0; j < Q; ++j) { - const float S0 = ss[j*TS + 0]; - const float S1 = ss[j*TS + sg*SH + 0]; + // reduce the warps sequentially + for (ushort sg = 1; sg < nsg; ++sg) { + if (sgitg == sg) { + for (short j = tiisg; j < Q; j += NW) { + const float S0 = ss[j*TS - 1*SH + 0]; + const float S1 = ss[j*TS + 0]; - const float M0 = ss[j*TS + 1]; - const float M1 = ss[j*TS + sg*SH + 1]; + const float M0 = ss[j*TS - 1*SH + 1]; + const float M1 = ss[j*TS + 1]; const float M = max(M0, M1); - const float ms0 = exp(M0 - M); - const float ms1 = exp(M1 - M); + float ms0 = exp(M0 - M); + float ms1 = exp(M1 - M); const float S = S0*ms0 + S1*ms1; - if (tiisg == 0) { - ss[j*TS + 0] = S; - ss[j*TS + 1] = M; + ss[j*TS + 0] = S; + ss[j*TS + 1] = M; - ss[j*TS + 2*C + j ] = ms0; - ss[j*TS + 2*C + j + sg*SH] = ms1; - } + ss[j*TS + 2*C + j - 1*SH] = ms0; + ss[j*TS + 2*C + j ] = ms1; } + //simdgroup_barrier(mem_flags::mem_threadgroup); + // O_0 = diag(ms0)*O_0 + diag(ms1)*O_1 { s8x8_t ms0; s8x8_t ms1; - simdgroup_load(ms0, ss + 2*C, TS, 0, false); - simdgroup_load(ms1, ss + 2*C + sg*SH, TS, 0, false); + simdgroup_load(ms0, ss + 2*C - 1*SH, TS, 0, false); + simdgroup_load(ms1, ss + 2*C, TS, 0, false); #pragma unroll(DV8) for (short i = 0; i < DV8; ++i) { - o8x8_t t; + simdgroup_float8x8 t; simdgroup_load (t, so + i*8, DV, 0, false); - simdgroup_multiply(t, ms1, t); + simdgroup_multiply(t, ms0, t); - simdgroup_multiply_accumulate(lo[i], ms0, lo[i], t); + simdgroup_multiply_accumulate(t, ms1, lo[i], t); + simdgroup_store(t, so + i*8, DV, 0, false); } } } - } - // store result to shared memory (reuse sq) - if (sgitg == 0) { - for (short i = 0; i < DV8; ++i) { - simdgroup_store(lo[i], so + i*8, DV, 0, false); - } + threadgroup_barrier(mem_flags::mem_threadgroup); } - threadgroup_barrier(mem_flags::mem_threadgroup); - - threadgroup s_t * sf = (threadgroup s_t *) (shmem_f16 + 2*Q*DK); + threadgroup s_t * sf = (threadgroup s_t *) (shmem_f16 + 2*(nsg-1)*SH + 2*Q*DK); // final rescale with 1/S and store to global memory for (short j = sgitg; j < Q && iq1 + j < args.ne01; j += nsg) { @@ -3723,8 +3718,8 @@ kernel void kernel_flash_attn_ext( half, half4x4, simdgroup_half8x8, \ float, simdgroup_float8x8, \ float, simdgroup_float8x8, \ - float, float4, simdgroup_float8x8 - //half, half4, simdgroup_half8x8 + half, half4, simdgroup_half8x8 + //float, float4, simdgroup_float8x8 #define FA_TYPES_BF \ bfloat, bfloat4, simdgroup_bfloat8x8, \ @@ -3732,8 +3727,8 @@ kernel void kernel_flash_attn_ext( bfloat, bfloat4x4, simdgroup_bfloat8x8, \ float, simdgroup_float8x8, \ float, simdgroup_float8x8, \ - float, float4, simdgroup_float8x8 - //half, half4, simdgroup_half8x8 + half, half4, simdgroup_half8x8 + //float, float4, simdgroup_float8x8 typedef decltype(kernel_flash_attn_ext) flash_attn_ext_t; From 5d9c218f381fcf9004fdfafa3525680780b8b97c Mon Sep 17 00:00:00 2001 From: Kai Pastor Date: Tue, 3 Jun 2025 12:33:28 +0200 Subject: [PATCH 013/192] Add in-build ggml::ggml ALIAS library (ggml/1260) Enable uniform linking with subproject and with find_package. --- ggml/src/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index 7dcb031f0f9c6..d91dbc46fe9e1 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -212,6 +212,7 @@ endif() add_library(ggml ggml-backend-reg.cpp) +add_library(ggml::ggml ALIAS ggml) target_link_libraries(ggml PUBLIC ggml-base) From f6eca5c8b340cd018ae1b3cf10678b9f60c06e24 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 10 Jun 2025 09:20:51 +0300 Subject: [PATCH 014/192] sync : ggml ggml-ci --- scripts/sync-ggml.last | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/sync-ggml.last b/scripts/sync-ggml.last index aa0fb8fb02001..490115500d224 100644 --- a/scripts/sync-ggml.last +++ b/scripts/sync-ggml.last @@ -1 +1 @@ -94a83ba5a725ae2aee79df75dd99b2119d0478cc +a1761cd64ce4a75a9770a954a013422c7910db8b From e227eef2ea55354c0182de364d648e805d5ebd38 Mon Sep 17 00:00:00 2001 From: Isaac McFadyen Date: Tue, 10 Jun 2025 02:41:01 -0400 Subject: [PATCH 015/192] rpc : nicer error messages for RPC server crash (#14076) --- ggml/src/ggml-rpc/ggml-rpc.cpp | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/ggml/src/ggml-rpc/ggml-rpc.cpp b/ggml/src/ggml-rpc/ggml-rpc.cpp index 4f0abb5a60f48..f468f796d5773 100644 --- a/ggml/src/ggml-rpc/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc/ggml-rpc.cpp @@ -53,6 +53,9 @@ struct socket_t { } }; +// macro for nicer error messages on server crash +#define RPC_STATUS_ASSERT(x) if (!(x)) GGML_ABORT("Remote RPC server crashed or returned malformed response") + // all RPC structures must be packed #pragma pack(push, 1) // ggml_tensor is serialized into rpc_tensor @@ -425,7 +428,7 @@ static bool send_rpc_cmd(const std::shared_ptr & sock, enum rpc_cmd cm static bool check_server_version(const std::shared_ptr & sock) { rpc_msg_hello_rsp response; bool status = send_rpc_cmd(sock, RPC_CMD_HELLO, nullptr, 0, &response, sizeof(response)); - GGML_ASSERT(status); + RPC_STATUS_ASSERT(status); if (response.major != RPC_PROTO_MAJOR_VERSION || response.minor > RPC_PROTO_MINOR_VERSION) { fprintf(stderr, "RPC server version mismatch: %d.%d.%d\n", response.major, response.minor, response.patch); return false; @@ -481,7 +484,7 @@ static void ggml_backend_rpc_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; rpc_msg_free_buffer_req request = {ctx->remote_ptr}; bool status = send_rpc_cmd(ctx->sock, RPC_CMD_FREE_BUFFER, &request, sizeof(request), nullptr, 0); - GGML_ASSERT(status); + RPC_STATUS_ASSERT(status); delete ctx; } @@ -493,7 +496,7 @@ static void * ggml_backend_rpc_buffer_get_base(ggml_backend_buffer_t buffer) { rpc_msg_buffer_get_base_req request = {ctx->remote_ptr}; rpc_msg_buffer_get_base_rsp response; bool status = send_rpc_cmd(ctx->sock, RPC_CMD_BUFFER_GET_BASE, &request, sizeof(request), &response, sizeof(response)); - GGML_ASSERT(status); + RPC_STATUS_ASSERT(status); ctx->base_ptr = reinterpret_cast(response.base_ptr); return ctx->base_ptr; } @@ -545,7 +548,7 @@ static enum ggml_status ggml_backend_rpc_buffer_init_tensor(ggml_backend_buffer_ request.tensor = serialize_tensor(tensor); bool status = send_rpc_cmd(ctx->sock, RPC_CMD_INIT_TENSOR, &request, sizeof(request), nullptr, 0); - GGML_ASSERT(status); + RPC_STATUS_ASSERT(status); } return GGML_STATUS_SUCCESS; } @@ -560,7 +563,7 @@ static void ggml_backend_rpc_buffer_set_tensor(ggml_backend_buffer_t buffer, ggm request.hash = fnv_hash((const uint8_t*)data, size); rpc_msg_set_tensor_hash_rsp response; bool status = send_rpc_cmd(ctx->sock, RPC_CMD_SET_TENSOR_HASH, &request, sizeof(request), &response, sizeof(response)); - GGML_ASSERT(status); + RPC_STATUS_ASSERT(status); if (response.result) { // the server has the same data, no need to send it return; @@ -573,7 +576,7 @@ static void ggml_backend_rpc_buffer_set_tensor(ggml_backend_buffer_t buffer, ggm memcpy(input.data() + sizeof(rpc_tensor), &offset, sizeof(offset)); memcpy(input.data() + sizeof(rpc_tensor) + sizeof(offset), data, size); bool status = send_rpc_cmd(ctx->sock, RPC_CMD_SET_TENSOR, input.data(), input.size()); - GGML_ASSERT(status); + RPC_STATUS_ASSERT(status); } static void ggml_backend_rpc_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { @@ -583,7 +586,7 @@ static void ggml_backend_rpc_buffer_get_tensor(ggml_backend_buffer_t buffer, con request.offset = offset; request.size = size; bool status = send_rpc_cmd(ctx->sock, RPC_CMD_GET_TENSOR, &request, sizeof(request), data, size); - GGML_ASSERT(status); + RPC_STATUS_ASSERT(status); } static bool ggml_backend_rpc_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) { @@ -601,7 +604,7 @@ static bool ggml_backend_rpc_buffer_cpy_tensor(ggml_backend_buffer_t buffer, con request.dst = serialize_tensor(dst); rpc_msg_copy_tensor_rsp response; bool status = send_rpc_cmd(ctx->sock, RPC_CMD_COPY_TENSOR, &request, sizeof(request), &response, sizeof(response)); - GGML_ASSERT(status); + RPC_STATUS_ASSERT(status); return response.result; } @@ -609,7 +612,7 @@ static void ggml_backend_rpc_buffer_clear(ggml_backend_buffer_t buffer, uint8_t ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; rpc_msg_buffer_clear_req request = {ctx->remote_ptr, value}; bool status = send_rpc_cmd(ctx->sock, RPC_CMD_BUFFER_CLEAR, &request, sizeof(request), nullptr, 0); - GGML_ASSERT(status); + RPC_STATUS_ASSERT(status); } static ggml_backend_buffer_i ggml_backend_rpc_buffer_interface = { @@ -635,7 +638,7 @@ static ggml_backend_buffer_t ggml_backend_rpc_buffer_type_alloc_buffer(ggml_back rpc_msg_alloc_buffer_rsp response; auto sock = get_socket(buft_ctx->endpoint); bool status = send_rpc_cmd(sock, RPC_CMD_ALLOC_BUFFER, &request, sizeof(request), &response, sizeof(response)); - GGML_ASSERT(status); + RPC_STATUS_ASSERT(status); if (response.remote_ptr != 0) { ggml_backend_buffer_t buffer = ggml_backend_buffer_init(buft, ggml_backend_rpc_buffer_interface, @@ -650,7 +653,7 @@ static ggml_backend_buffer_t ggml_backend_rpc_buffer_type_alloc_buffer(ggml_back static size_t get_alignment(const std::shared_ptr & sock) { rpc_msg_get_alignment_rsp response; bool status = send_rpc_cmd(sock, RPC_CMD_GET_ALIGNMENT, nullptr, 0, &response, sizeof(response)); - GGML_ASSERT(status); + RPC_STATUS_ASSERT(status); return response.alignment; } @@ -662,7 +665,7 @@ static size_t ggml_backend_rpc_buffer_type_get_alignment(ggml_backend_buffer_typ static size_t get_max_size(const std::shared_ptr & sock) { rpc_msg_get_max_size_rsp response; bool status = send_rpc_cmd(sock, RPC_CMD_GET_MAX_SIZE, nullptr, 0, &response, sizeof(response)); - GGML_ASSERT(status); + RPC_STATUS_ASSERT(status); return response.max_size; } @@ -683,7 +686,7 @@ static size_t ggml_backend_rpc_buffer_type_get_alloc_size(ggml_backend_buffer_ty rpc_msg_get_alloc_size_rsp response; bool status = send_rpc_cmd(sock, RPC_CMD_GET_ALLOC_SIZE, &request, sizeof(request), &response, sizeof(response)); - GGML_ASSERT(status); + RPC_STATUS_ASSERT(status); return response.alloc_size; } else { @@ -761,7 +764,7 @@ static enum ggml_status ggml_backend_rpc_graph_compute(ggml_backend_t backend, g rpc_msg_graph_compute_rsp response; auto sock = get_socket(rpc_ctx->endpoint); bool status = send_rpc_cmd(sock, RPC_CMD_GRAPH_COMPUTE, input.data(), input.size(), &response, sizeof(response)); - GGML_ASSERT(status); + RPC_STATUS_ASSERT(status); return (enum ggml_status)response.result; } @@ -835,7 +838,7 @@ bool ggml_backend_is_rpc(ggml_backend_t backend) { static void get_device_memory(const std::shared_ptr & sock, size_t * free, size_t * total) { rpc_msg_get_device_memory_rsp response; bool status = send_rpc_cmd(sock, RPC_CMD_GET_DEVICE_MEMORY, nullptr, 0, &response, sizeof(response)); - GGML_ASSERT(status); + RPC_STATUS_ASSERT(status); *free = response.free_mem; *total = response.total_mem; } From 23757443120d0335480ef5f2e00b1306be0274a5 Mon Sep 17 00:00:00 2001 From: 0cc4m Date: Tue, 10 Jun 2025 14:01:33 +0200 Subject: [PATCH 016/192] Vulkan: Don't default to CPU device (like llvmpipe), even if no other device is available, to allow fallback to CPU backend (#14099) --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 3e43b03bc446a..8ccc73e7422fe 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -3595,11 +3595,11 @@ static void ggml_vk_instance_init() { vk_perf_logger_enabled = getenv("GGML_VK_PERF_LOGGER") != nullptr; - size_t num_available_devices = vk_instance.instance.enumeratePhysicalDevices().size(); - // Emulate behavior of CUDA_VISIBLE_DEVICES for Vulkan char * devices_env = getenv("GGML_VK_VISIBLE_DEVICES"); if (devices_env != nullptr) { + size_t num_available_devices = vk_instance.instance.enumeratePhysicalDevices().size(); + std::string devices(devices_env); std::replace(devices.begin(), devices.end(), ',', ' '); @@ -3615,9 +3615,9 @@ static void ggml_vk_instance_init() { } else { std::vector devices = vk_instance.instance.enumeratePhysicalDevices(); - // Make sure at least one device exists + // If no vulkan devices are found, return early if (devices.empty()) { - std::cerr << "ggml_vulkan: Error: No devices found." << std::endl; + GGML_LOG_INFO("ggml_vulkan: No devices found.\n"); return; } @@ -3700,9 +3700,20 @@ static void ggml_vk_instance_init() { } } - // If no dedicated GPUs found, fall back to GPU 0 + // If no dedicated GPUs found, fall back to the first non-CPU device. + // If only CPU devices are available, return without devices. + if (vk_instance.device_indices.empty()) { + for (size_t i = 0; i < devices.size(); i++) { + if (devices[i].getProperties().deviceType != vk::PhysicalDeviceType::eCpu) { + vk_instance.device_indices.push_back(i); + break; + } + } + } + if (vk_instance.device_indices.empty()) { - vk_instance.device_indices.push_back(0); + GGML_LOG_INFO("ggml_vulkan: No devices found.\n"); + return; } } GGML_LOG_DEBUG("ggml_vulkan: Found %zu Vulkan devices:\n", vk_instance.device_indices.size()); From b4d1bcba96bfbdbd1894b34e6a1684186ab2a1d2 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 10 Jun 2025 11:34:10 +0300 Subject: [PATCH 017/192] ggml : fix weak alias win32 (whisper/0) ggml-ci --- ggml/src/ggml-cpu/ggml-cpu-impl.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu-impl.h b/ggml/src/ggml-cpu/ggml-cpu-impl.h index 337d8094e8092..69415daa82025 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-impl.h +++ b/ggml/src/ggml-cpu/ggml-cpu-impl.h @@ -518,11 +518,14 @@ void ggml_barrier(struct ggml_threadpool * tp); #elif defined(__GNUC__) // GCC/Clang on *nix # define GGML_WEAK_ALIAS(name, alias) GGML_DO_PRAGMA(weak name = alias) // NOLINT -#elif defined(_MSC_VER) && defined (_WIN64) +#elif defined(_MSC_VER) && defined(_WIN64) // MSVC // Note: C name mangling varies across different calling conventions // see https://learn.microsoft.com/en-us/cpp/build/reference/decorated-names?view=msvc-170 # define GGML_WEAK_ALIAS(name, alias) GGML_DO_PRAGMA(comment(linker, "/alternatename:" #name "=" #alias)) +#elif defined(_MSC_VER) && defined(WIN32) +// ref: https://github.com/ggml-org/whisper.cpp/pull/3239#issuecomment-2958224591 +# define GGML_WEAK_ALIAS(name, alias) GGML_DO_PRAGMA(comment(linker, "/alternatename:_" #name "=_" #alias)) #else # error "Unsupported compiler for GGML_WEAK_ALIAS" #endif From c13372c5b64caed97ee563c44551abc0d0d60a77 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 10 Jun 2025 17:37:45 +0300 Subject: [PATCH 018/192] sync : ggml ggml-ci --- scripts/sync-ggml.last | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/sync-ggml.last b/scripts/sync-ggml.last index 490115500d224..914fe47ff6a34 100644 --- a/scripts/sync-ggml.last +++ b/scripts/sync-ggml.last @@ -1 +1 @@ -a1761cd64ce4a75a9770a954a013422c7910db8b +6a7d170c04789f6ebcf320ed03c1b16973f93bd7 From 50162e6a45856c2b7c62b014e7b17a7f906bdaf9 Mon Sep 17 00:00:00 2001 From: Juk Armstrong <69222624+jukofyork@users.noreply.github.com> Date: Tue, 10 Jun 2025 16:48:07 +0100 Subject: [PATCH 019/192] Fixed spec timings to: accepted/tested instead of accepted/drafted (#14104) --- tools/server/server.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/server/server.cpp b/tools/server/server.cpp index 77dcbc11bf1f0..3b5e03528e2d7 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -3556,9 +3556,6 @@ struct server_context { const llama_tokens & cached_text_tokens = slot.cache_tokens.get_text_tokens(); llama_tokens draft = common_speculative_gen_draft(slot.spec, params_spec, cached_text_tokens, id); - // keep track of total number of tokens generated in the draft - slot.n_draft_total += draft.size(); - // ignore small drafts if (slot.params.speculative.n_min > (int) draft.size()) { SLT_DBG(slot, "ignoring small draft: %d < %d\n", (int) draft.size(), slot.params.speculative.n_min); @@ -3566,6 +3563,9 @@ struct server_context { continue; } + // keep track of total number of drafted tokens tested + slot.n_draft_total += draft.size(); + // construct the speculation batch common_batch_clear(slot.batch_spec); common_batch_add (slot.batch_spec, id, slot.n_past, { slot.id }, true); @@ -3584,7 +3584,7 @@ struct server_context { slot.n_past += ids.size(); slot.n_decoded += ids.size(); - // update how many tokens out of draft was accepted + // update how many tokens out of those tested were accepted slot.n_draft_accepted += ids.size() - 1; slot.cache_tokens.push_back(id); From 11d326598835a62d3c5fe78512e936476bde0fcd Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Tue, 10 Jun 2025 10:53:47 -0500 Subject: [PATCH 020/192] vulkan: force device 0 in CI (#14106) --- .github/workflows/build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3c9804d437cdc..5422dd81723f9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -306,6 +306,7 @@ jobs: id: cmake_test run: | cd build + export GGML_VK_VISIBLE_DEVICES=0 # This is using llvmpipe and runs slower than other backends ctest -L main --verbose --timeout 3600 From 627669e84b88463210f05a09ed8a0f365220f996 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Tue, 10 Jun 2025 18:02:08 +0200 Subject: [PATCH 021/192] llama : support GEGLU for jina-bert-v2 (#14090) --- convert_hf_to_gguf.py | 27 --------------------------- gguf-py/gguf/tensor_mapping.py | 6 ++++-- src/llama-graph.cpp | 3 ++- src/llama-model.cpp | 6 +++--- 4 files changed, 9 insertions(+), 33 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 7b9893c8a3e10..a208c42ba9a8b 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -4798,25 +4798,6 @@ def prepare_tensors(self): class JinaBertV2Model(BertModel): model_arch = gguf.MODEL_ARCH.JINA_BERT_V2 - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.intermediate_size = self.hparams["intermediate_size"] - - def get_tensors(self): - for name, data in super().get_tensors(): - if 'gated_layer' in name: - d1 = data[:self.intermediate_size, :] - name1 = name.replace('gated_layers', 'gated_layers_w') - name1 = name1.replace('up_gated_layer', 'gated_layers_v') - d2 = data[self.intermediate_size:, :] - name2 = name.replace('gated_layers', 'gated_layers_v') - name2 = name2.replace('up_gated_layer', 'gated_layers_w') - yield name1, d1 - yield name2, d2 - continue - - yield name, data - def set_vocab(self): tokenizer_class = 'BertTokenizer' with open(self.dir_model / "tokenizer_config.json", "r", encoding="utf-8") as f: @@ -4832,14 +4813,6 @@ def set_vocab(self): self.gguf_writer.add_add_bos_token(True) self.gguf_writer.add_add_eos_token(True) - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - # if name starts with "bert.", remove the prefix - # e.g. https://huggingface.co/jinaai/jina-reranker-v1-tiny-en - if name.startswith("bert."): - name = name[5:] - - return super().modify_tensors(data_torch, name, bid) - @ModelBase.register("OpenELMForCausalLM") class OpenELMModel(TextModel): diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 93dd1d8028f3d..439fc1afeeb0c 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -333,7 +333,9 @@ class TensorNameMap: "encoder.layers.{bid}.mlp.fc11", # nomic-bert "encoder.layers.{bid}.mlp.fc1", # nomic-bert-moe "model.layers.{bid}.mlp.c_fc", # starcoder2 - "encoder.layer.{bid}.mlp.gated_layers_v", # jina-bert-v2 + "encoder.layer.{bid}.mlp.gated_layers_v", # jina-bert-v2 (split up/gate, no longer used) + "encoder.layer.{bid}.mlp.gated_layers", # jina-bert-v2 (GEGLU) + "encoder.layer.{bid}.mlp.up_gated_layer", # jina-v2-code (GEGLU) "model.layers.{bid}.residual_mlp.w3", # arctic "encoder.layers.{bid}.mlp.dense_h_to_4h", # chatglm "transformer.h.{bid}.mlp.c_fc_1", # exaone @@ -370,7 +372,7 @@ class TensorNameMap: "model.layers.layers.{bid}.mlp.gate_proj", # plamo "model.layers.{bid}.feed_forward.w1", # internlm2 "encoder.layers.{bid}.mlp.fc12", # nomic-bert - "encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2 + "encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2 (split up/gate, no longer used) "transformer.h.{bid}.mlp.linear_1", # refact "model.layers.{bid}.residual_mlp.w1", # arctic "transformer.h.{bid}.mlp.c_fc_0", # exaone diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 27c9ab74be112..56082279119d8 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -650,6 +650,7 @@ ggml_tensor * llm_graph_context::build_ffn( { // Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf int64_t split_point = cur->ne[0] / 2; + // TODO: these conts should not be needed, see https://github.com/ggml-org/llama.cpp/pull/14090#discussion_r2137437217 ggml_tensor * x0 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], 0)); ggml_tensor * x1 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], split_point * ggml_element_size(cur))); @@ -663,7 +664,7 @@ ggml_tensor * llm_graph_context::build_ffn( { // Split into two equal parts int64_t split_point = cur->ne[0] / 2; - // TODO: these conts should not be needed + // TODO: these conts should not be needed, see https://github.com/ggml-org/llama.cpp/pull/14090#discussion_r2137437217 ggml_tensor * x0 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], 0)); ggml_tensor * x1 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], split_point * ggml_element_size(cur))); diff --git a/src/llama-model.cpp b/src/llama-model.cpp index c41ee24507fca..f4a66390c7981 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -2224,8 +2224,8 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.attn_norm_2 = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED); layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); - layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); - layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, layer.ffn_gate ? n_ff : n_ff * 2}, 0); layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0); layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0); @@ -6043,7 +6043,7 @@ struct llm_build_bert : public llm_graph_context { model.layers[il].ffn_gate, NULL, NULL, model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, NULL, - LLM_FFN_GELU, LLM_FFN_PAR, il); + model.layers[il].ffn_gate ? LLM_FFN_GELU : LLM_FFN_GEGLU, LLM_FFN_PAR, il); cb(cur, "ffn_out", il); } else { cur = build_ffn(cur, From 34641af838c186b182c2249bf4dc098361938105 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Tue, 10 Jun 2025 23:29:52 +0200 Subject: [PATCH 022/192] convert : fix duplicate key DeepSeek-R1 conversion error (#14103) --- convert_hf_to_gguf.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index a208c42ba9a8b..173a103badc60 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -556,8 +556,11 @@ def set_gguf_parameters(self): logger.info(f"gguf: experts used count = {n_experts_used}") if (head_dim := self.hparams.get("head_dim")) is not None: - self.gguf_writer.add_key_length(head_dim) - self.gguf_writer.add_value_length(head_dim) + # Workaround for incorrect AutoConfig value for DeepSeekV3 (is set correctly in DeepSeekV2Model class) + # https://github.com/huggingface/transformers/blob/19224c3642705c5b6988c9f5f4251f83323d05ae/src/transformers/models/deepseek_v3/configuration_deepseek_v3.py#L210 + if self.hparams.get("model_type") != "deepseek_v3": + self.gguf_writer.add_key_length(head_dim) + self.gguf_writer.add_value_length(head_dim) self.gguf_writer.add_file_type(self.ftype) logger.info(f"gguf: file type = {self.ftype}") From cf34b124af75d57a0944721c55133b58dc52d854 Mon Sep 17 00:00:00 2001 From: compilade Date: Tue, 10 Jun 2025 18:20:14 -0400 Subject: [PATCH 023/192] kv-cache : avoid modifying recurrent cells when setting inputs (#13834) * kv-cache : avoid modifying recurrent cells when setting inputs * kv-cache : remove inp_s_mask It was replaced with equivalent and simpler functionality with rs_z (the first zeroed state) and the already-existing inp_s_copy. * kv-cache : fix non-consecutive token pos warning for recurrent models The problem was apparently caused by how the tail cells were swapped. * graph : simplify logic for recurrent state copies * kv-cache : use cell without src refs for rs_z in recurrent cache * llama-graph : fix recurrent state copy The `state_copy` shuffle assumes everything is moved at once, which is not true when `states_extra` is copied back to the cache before copying the range of states between `head` and `head + n_seqs`. This is only a problem if any of the cells in [`head`, `head + n_seqs`) have an `src` in [`head + n_seqs`, `head + n_kv`), which does happen when `n_ubatch > 1` in the `llama-parallel` example. Changing the order of the operations avoids the potential overwrite before use, although when copies are avoided (like with Mamba2), this will require further changes. * llama-graph : rename n_state to state_size in build_recurrent_state This naming should reduce confusion between the state size and the number of states. --- src/llama-graph.cpp | 83 +++++++------------ src/llama-graph.h | 22 +---- src/llama-kv-cache-recurrent.cpp | 135 ++++++++++++++----------------- src/llama-kv-cache-recurrent.h | 12 +-- src/llama-kv-cache-unified.cpp | 2 - src/llama-model.cpp | 43 ++++------ 6 files changed, 117 insertions(+), 180 deletions(-) diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 56082279119d8..e74c9ff53b05a 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -250,22 +250,6 @@ void llm_graph_input_s_copy::set_input(const llama_ubatch * ubatch) { } } -void llm_graph_input_s_mask::set_input(const llama_ubatch * ubatch) { - GGML_UNUSED(ubatch); - - const int64_t n_kv = kv_state->get_n_kv(); - - if (s_mask) { - GGML_ASSERT(ggml_backend_buffer_is_host(s_mask->buffer)); - float * data = (float *) s_mask->data; - - // clear unused states - for (int i = 0; i < n_kv; ++i) { - data[i] = kv_state->s_mask(i); - } - } -} - void llm_graph_input_cross_embd::set_input(const llama_ubatch * ubatch) { GGML_UNUSED(ubatch); @@ -987,23 +971,6 @@ ggml_tensor * llm_graph_context::build_inp_s_copy() const { return cur; } -ggml_tensor * llm_graph_context::build_inp_s_mask() const { - const auto * kv_state = static_cast(mstate); - - auto inp = std::make_unique(kv_state); - - const auto n_kv = kv_state->get_n_kv(); - - auto & cur = inp->s_mask; - - cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, 1, n_kv); - ggml_set_input(cur); - - res->add_input(std::move(inp)); - - return cur; -} - ggml_tensor * llm_graph_context::build_inp_cross_embd() const { auto inp = std::make_unique(cross); @@ -1456,43 +1423,53 @@ ggml_tensor * llm_graph_context::build_attn( return cur; } -ggml_tensor * llm_graph_context::build_copy_mask_state( +ggml_tensor * llm_graph_context::build_recurrent_state( ggml_cgraph * gf, ggml_tensor * s, ggml_tensor * state_copy, - ggml_tensor * state_mask, - int32_t n_state, - int32_t n_seqs) const { + int32_t state_size, + int32_t n_seqs, + bool avoid_copies) const { const auto * kv_state = static_cast(mstate); const auto n_kv = kv_state->get_n_kv(); const auto kv_head = kv_state->get_head(); + const auto rs_zero = kv_state->get_rs_z(); - ggml_tensor * states = ggml_reshape_2d(ctx0, s, n_state, kv_state->get_size()); + ggml_tensor * states = ggml_reshape_2d(ctx0, s, state_size, kv_state->get_size()); - // copy states - // NOTE: assuming the copy destinations are ALL contained between kv_head and kv_head + n_kv - // this shrinks the tensors's ne[1] to n_kv - states = ggml_get_rows(ctx0, states, state_copy); + // Clear a single state which will then be copied to the other cleared states. + // Note that this is a no-op when the view is zero-sized. + ggml_tensor * state_zero = ggml_view_1d(ctx0, states, state_size*(rs_zero >= 0), rs_zero*states->nb[1]*(rs_zero >= 0)); + ggml_build_forward_expand(gf, ggml_scale_inplace(ctx0, state_zero, 0)); - // clear states of sequences which are starting at the beginning of this batch - // FIXME: zero-out NANs? - states = ggml_mul(ctx0, states, state_mask); + ggml_tensor * output_states; + + if (!avoid_copies) { + // copy states + // NOTE: assuming the copy destinations are ALL contained between kv_head and kv_head + n_kv + // {state_size, kv_size} -> {state_size, n_seqs} + output_states = ggml_get_rows(ctx0, states, ggml_view_1d(ctx0, state_copy, n_seqs, 0)); + ggml_build_forward_expand(gf, output_states); + } else { + // FIXME: make the gathering operation happen before the copy below + // (maybe with an optional lambda function passed as a parameter instead of `avoid_copies`?) + output_states = states; + } - // copy states which won't be changed further (between n_seqs and n_kv) + // copy extra states which won't be changed further (between n_seqs and n_kv) + ggml_tensor * states_extra = ggml_get_rows(ctx0, states, ggml_view_1d(ctx0, state_copy, n_kv - n_seqs, n_seqs*state_copy->nb[0])); ggml_build_forward_expand(gf, ggml_cpy(ctx0, - ggml_view_1d(ctx0, states, n_state*(n_kv - n_seqs), (n_seqs )*n_state*ggml_element_size(states)), - ggml_view_1d(ctx0, s, n_state*(n_kv - n_seqs), (kv_head + n_seqs)*n_state*ggml_element_size(s)))); + states_extra, + ggml_view_1d(ctx0, s, state_size*(n_kv - n_seqs), (kv_head + n_seqs)*state_size*ggml_element_size(s)))); - // the part of the states that will be used and modified - return ggml_view_2d(ctx0, states, n_state, n_seqs, states->nb[1], 0); + return output_states; } ggml_tensor * llm_graph_context::build_rwkv_token_shift_load( ggml_cgraph * gf, ggml_tensor * state_copy, - ggml_tensor * state_mask, const llama_ubatch & ubatch, int il) const { const auto * kv_state = static_cast(mstate); @@ -1503,8 +1480,8 @@ ggml_tensor * llm_graph_context::build_rwkv_token_shift_load( ggml_tensor * token_shift_all = kv_state->get_k_l(il); - ggml_tensor * token_shift = build_copy_mask_state( - gf, token_shift_all, state_copy, state_mask, + ggml_tensor * token_shift = build_recurrent_state( + gf, token_shift_all, state_copy, hparams.n_embd_k_s(), n_seqs); token_shift = ggml_reshape_3d(ctx0, token_shift, hparams.n_embd, token_shift_count, n_seqs); diff --git a/src/llama-graph.h b/src/llama-graph.h index 28da6a5228bdc..88fb77f1ddc9a 100644 --- a/src/llama-graph.h +++ b/src/llama-graph.h @@ -200,18 +200,6 @@ class llm_graph_input_s_copy : public llm_graph_input_i { const llama_kv_cache_recurrent_state * kv_state; }; -class llm_graph_input_s_mask : public llm_graph_input_i { -public: - llm_graph_input_s_mask(const llama_kv_cache_recurrent_state * kv_state) : kv_state(kv_state) {} - virtual ~llm_graph_input_s_mask() = default; - - void set_input(const llama_ubatch * ubatch) override; - - ggml_tensor * s_mask; // F32 [1, n_kv] - - const llama_kv_cache_recurrent_state * kv_state; -}; - class llm_graph_input_cross_embd : public llm_graph_input_i { public: llm_graph_input_cross_embd( @@ -521,7 +509,6 @@ struct llm_graph_context { ggml_tensor * build_inp_mean() const; ggml_tensor * build_inp_cls() const; ggml_tensor * build_inp_s_copy() const; - ggml_tensor * build_inp_s_mask() const; ggml_tensor * build_inp_cross_embd() const; ggml_tensor * build_inp_pos_bucket_enc() const; @@ -606,18 +593,17 @@ struct llm_graph_context { // recurrent // - ggml_tensor * build_copy_mask_state( + ggml_tensor * build_recurrent_state( ggml_cgraph * gf, ggml_tensor * s, ggml_tensor * state_copy, - ggml_tensor * state_mask, - int32_t n_state, - int32_t n_seqs) const; + int32_t state_size, + int32_t n_seqs, + bool avoid_copies = false) const; ggml_tensor * build_rwkv_token_shift_load( ggml_cgraph * gf, ggml_tensor * state_copy, - ggml_tensor * state_mask, const llama_ubatch & ubatch, int il) const; diff --git a/src/llama-kv-cache-recurrent.cpp b/src/llama-kv-cache-recurrent.cpp index f5c6dcd66ce9e..f8cdd52808d7b 100644 --- a/src/llama-kv-cache-recurrent.cpp +++ b/src/llama-kv-cache-recurrent.cpp @@ -406,21 +406,12 @@ bool llama_kv_cache_recurrent::prepare(const std::vector & ubatche bool success = true; - // TODO: here we have to verify that all ubatches can fit in the cells - // however, the current implementation is broken because it relies on s_copy() and s_mask() to update the cells - // during the compute of each ubatch. to reproduce, uncomment the following loop and run: - // - // $ llama-parallel -m ./mamba-130m/ggml-model-f16.gguf -np 5 -ns 8 - // - // recovery from failures when the batch does not fit in the KV cache will not work correctly until this is fixed - // - GGML_UNUSED(ubatches); - //for (const auto & ubatch : ubatches) { - // if (!find_slot(ubatch)) { - // success = false; - // break; - // } - //} + for (const auto & ubatch : ubatches) { + if (!find_slot(ubatch)) { + success = false; + break; + } + } // restore the original state cells = std::move(org_cells); @@ -431,14 +422,13 @@ bool llama_kv_cache_recurrent::prepare(const std::vector & ubatche } bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) { - const uint32_t n_tokens = ubatch.n_tokens; - const uint32_t n_seqs = ubatch.n_seqs; + const uint32_t n_seqs = ubatch.n_seqs; const uint32_t n_seq_tokens = ubatch.n_seq_tokens; // if we have enough unused cells before the current head -> // better to start searching from the beginning of the cache, hoping to fill it - if (head > used + 2*n_tokens) { + if (head > used + 2*n_seqs) { head = 0; } @@ -534,16 +524,16 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) { empty_cell.src = orig_cell.src; orig_cell.seq_id.erase(seq_id); empty_cell.seq_id.insert(seq_id); // will be overwritten + GGML_ASSERT(!orig_cell.is_empty()); // has at least one remaining seq_id } seq_meta.tail = next_empty_cell; // find next empty cell if (s + 1 < n_seqs) { - next_empty_cell += 1; for (uint32_t i = 0; i < size; ++i) { + next_empty_cell += 1; if (next_empty_cell >= size) { next_empty_cell -= size; } kv_cell & cell = cells[next_empty_cell]; if (cell.is_empty()) { break; } - next_empty_cell += 1; } } } @@ -553,8 +543,8 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) { // gather and re-order for (uint32_t s = 0; s < n_seqs; ++s) { - int32_t dst_id = s + min; - int32_t src_id = cells[ubatch.seq_id[s][0]].tail; + const int32_t dst_id = s + min; + const int32_t src_id = cells[ubatch.seq_id[s][0]].tail; if (dst_id != src_id) { kv_cell & dst_cell = cells[dst_id]; kv_cell & src_cell = cells[src_id]; @@ -563,12 +553,14 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) { std::swap(dst_cell.src, src_cell.src); std::swap(dst_cell.seq_id, src_cell.seq_id); - // swap tails (assuming they NEVER overlap) - for (const llama_seq_id seq_id : src_cell.seq_id) { - cells[seq_id].tail = src_id; - } - for (const llama_seq_id seq_id : dst_cell.seq_id) { - cells[seq_id].tail = dst_id; + // swap tails + for (uint32_t i = 0; i < size; ++i) { + int32_t & tail = cells[i].tail; + if (tail == src_id) { + tail = dst_id; + } else if (tail == dst_id) { + tail = src_id; + } } } } @@ -576,7 +568,7 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) { // update the pos of the used seqs for (uint32_t s = 0; s < n_seqs; ++s) { const llama_pos last_pos = ubatch.pos[n_seq_tokens * s + n_seq_tokens - 1]; - int32_t cell_id = s + min; + const int32_t cell_id = s + min; kv_cell & cell = cells[cell_id]; if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) { @@ -594,6 +586,38 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) { } } + // Find first cell without src refs, to use as the zero-ed state + { + // TODO: bake-in src refcounts in the cell metadata + std::vector refcounts(size, 0); + for (size_t i = 0; i < size; ++i) { + const int32_t src = cells[i].src; + if (src >= 0) { + refcounts[src] += 1; + } + } + + rs_z = -1; + for (int i = min; i <= max; ++i) { + if (refcounts[i] == 0) { + rs_z = i; + break; + } + } + + for (int i = min; i <= max; ++i) { + if (cells[i].src < 0) { + GGML_ASSERT(rs_z >= 0); + cells[i].src0 = rs_z; + } else { + // Stage the source ids for all used cells to allow correct seq_* behavior + // and still make these values available when setting the inputs + cells[i].src0 = cells[i].src; + } + cells[i].src = i; // avoid moving or clearing twice + } + } + // allow getting the range of used cells, from head to head + n head = min; n = max - min + 1; @@ -605,47 +629,8 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) { } bool llama_kv_cache_recurrent::get_can_shift() const { - return false; -} - -int32_t llama_kv_cache_recurrent::s_copy(int i) const { - const uint32_t cell_id = i + head; - - ////////////////////////////////////////////// - // TODO: this should not mutate the KV cache ! - kv_cell & cell = const_cast(cells[cell_id]); - - // prevent out-of-bound sources - if (cell.src < 0 || (uint32_t) cell.src >= size) { - cell.src = cell_id; - } - - int32_t res = cell.src; - - // TODO: do not mutate the KV cache - // ensure copy only happens once - if (cell.src != (int32_t) cell_id) { - cell.src = cell_id; - } - - return res; -} - -float llama_kv_cache_recurrent::s_mask(int i) const { - const uint32_t cell_id = i + head; - - ////////////////////////////////////////////// - // TODO: this should not mutate the KV cache ! - kv_cell & cell = const_cast(cells[cell_id]); - - float res = (float) (cell.src >= 0); - - // only clear once - if (cell.src < 0) { - cell.src = cell_id; - } - - return res; + // shifting the pos is trivial for recurrent models + return true; } size_t llama_kv_cache_recurrent::total_size() const { @@ -1111,6 +1096,10 @@ uint32_t llama_kv_cache_recurrent_state::get_head() const { return is_full ? 0 : kv->head; } +int32_t llama_kv_cache_recurrent_state::get_rs_z() const { + return is_full ? 0 : kv->rs_z; +} + uint32_t llama_kv_cache_recurrent_state::get_size() const { return kv->size; } @@ -1124,9 +1113,5 @@ ggml_tensor * llama_kv_cache_recurrent_state::get_v_l(int32_t il) const { } int32_t llama_kv_cache_recurrent_state::s_copy(int i) const { - return kv->s_copy(i); -} - -float llama_kv_cache_recurrent_state::s_mask(int i) const { - return kv->s_mask(i); + return kv->cells[i + kv->head].src0; } diff --git a/src/llama-kv-cache-recurrent.h b/src/llama-kv-cache-recurrent.h index d1da1225655fa..4b33bafd71cca 100644 --- a/src/llama-kv-cache-recurrent.h +++ b/src/llama-kv-cache-recurrent.h @@ -57,10 +57,6 @@ class llama_kv_cache_recurrent : public llama_memory_i { bool get_can_shift() const override; - // TODO: temporary methods - they are not really const as they do const_cast<>, fix this - int32_t s_copy(int i) const; - float s_mask(int i) const; - // state write/load void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override; @@ -73,10 +69,14 @@ class llama_kv_cache_recurrent : public llama_memory_i { // computed before each graph build uint32_t n = 0; + // first zero-ed state + int32_t rs_z = -1; + // TODO: optimize for recurrent state needs struct kv_cell { llama_pos pos = -1; - int32_t src = -1; // used to copy states + int32_t src = -1; // used to know where states should be copied from + int32_t src0 = -1; // like src, but only used when setting the inputs (allowing to copy once) int32_t tail = -1; std::set seq_id; @@ -157,13 +157,13 @@ class llama_kv_cache_recurrent_state : public llama_memory_state_i { uint32_t get_n_kv() const; uint32_t get_head() const; + int32_t get_rs_z() const; uint32_t get_size() const; ggml_tensor * get_k_l(int32_t il) const; ggml_tensor * get_v_l(int32_t il) const; int32_t s_copy(int i) const; - float s_mask(int i) const; private: const llama_memory_status status; diff --git a/src/llama-kv-cache-unified.cpp b/src/llama-kv-cache-unified.cpp index 3566d5fd4d72b..fe41b94804310 100644 --- a/src/llama-kv-cache-unified.cpp +++ b/src/llama-kv-cache-unified.cpp @@ -512,8 +512,6 @@ int32_t llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) const { head_cur = 0; } - // otherwise, one cell per token. - if (n_tokens > cells.size()) { LLAMA_LOG_ERROR("%s: n_tokens = %d > size = %u\n", __func__, n_tokens, cells.size()); return -1; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index f4a66390c7981..c64bf9de939f4 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -8857,7 +8857,6 @@ struct llm_build_mamba : public llm_graph_context { inpL = build_inp_embd(model.tok_embd); ggml_tensor * state_copy = build_inp_s_copy(); - ggml_tensor * state_mask = build_inp_s_mask(); for (int il = 0; il < n_layer; ++il) { // norm @@ -8866,8 +8865,7 @@ struct llm_build_mamba : public llm_graph_context { LLM_NORM_RMS, il); cb(cur, "attn_norm", il); - //cur = build_mamba_layer(gf, cur, state_copy, state_mask, il); - cur = build_mamba_layer(gf, cur, state_copy, state_mask, ubatch, il); + cur = build_mamba_layer(gf, cur, state_copy, ubatch, il); if (il == n_layer - 1) { // skip computing output for unused tokens @@ -8908,7 +8906,6 @@ struct llm_build_mamba : public llm_graph_context { ggml_cgraph * gf, ggml_tensor * cur, ggml_tensor * state_copy, - ggml_tensor * state_mask, const llama_ubatch & ubatch, int il) const { const auto * kv_state = static_cast(mstate); @@ -8935,12 +8932,12 @@ struct llm_build_mamba : public llm_graph_context { ggml_tensor * ssm_states_all = kv_state->get_v_l(il); // (ab)using the KV cache to store the states - ggml_tensor * conv = build_copy_mask_state( - gf, conv_states_all, state_copy, state_mask, + ggml_tensor * conv = build_recurrent_state( + gf, conv_states_all, state_copy, hparams.n_embd_k_s(), n_seqs); conv = ggml_reshape_3d(ctx0, conv, d_conv - 1, d_inner, n_seqs); - ggml_tensor * ssm = build_copy_mask_state( - gf, ssm_states_all, state_copy, state_mask, + ggml_tensor * ssm = build_recurrent_state( + gf, ssm_states_all, state_copy, hparams.n_embd_v_s(), n_seqs); ssm = ggml_reshape_3d(ctx0, ssm, d_state, d_inner, n_seqs); @@ -11656,7 +11653,6 @@ struct llm_build_rwkv6_base : public llm_graph_context { ggml_tensor * cur, ggml_tensor * x_prev, ggml_tensor * state_copy, - ggml_tensor * state_mask, const llama_ubatch & ubatch, int il) const { const auto * kv_state = static_cast(mstate); @@ -11780,8 +11776,8 @@ struct llm_build_rwkv6_base : public llm_graph_context { k = ggml_sub(ctx0, k, ggml_mul(ctx0, k, w)); } - ggml_tensor * wkv_state = build_copy_mask_state( - gf, kv_state->get_v_l(il), state_copy, state_mask, + ggml_tensor * wkv_state = build_recurrent_state( + gf, kv_state->get_v_l(il), state_copy, hparams.n_embd_v_s(), n_seqs); ggml_tensor * wkv_output; @@ -11837,7 +11833,6 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base { inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1); ggml_tensor * state_copy = build_inp_s_copy(); - ggml_tensor * state_mask = build_inp_s_mask(); const auto n_embd = hparams.n_embd; const auto n_seq_tokens = ubatch.n_seq_tokens; @@ -11848,7 +11843,7 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base { inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); ggml_tensor * token_shift = build_rwkv_token_shift_load( - gf, state_copy, state_mask, ubatch, il + gf, state_copy, ubatch, il ); ggml_tensor * att_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0); @@ -11864,7 +11859,7 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base { 1 ); - cur = build_rwkv6_time_mix(gf, att_norm, x_prev, state_copy, state_mask, ubatch, il); + cur = build_rwkv6_time_mix(gf, att_norm, x_prev, state_copy, ubatch, il); ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); cb(ffn_inp, "ffn_inp", il); @@ -11935,7 +11930,6 @@ struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base { inpL = build_inp_embd(model.tok_embd); ggml_tensor * state_copy = build_inp_s_copy(); - ggml_tensor * state_mask = build_inp_s_mask(); const auto n_embd = hparams.n_embd; const auto n_seq_tokens = ubatch.n_seq_tokens; @@ -11946,7 +11940,7 @@ struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base { inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); ggml_tensor * token_shift = build_rwkv_token_shift_load( - gf, state_copy, state_mask, ubatch, il + gf, state_copy, ubatch, il ); ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM_RMS, il); @@ -11959,7 +11953,7 @@ struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base { 1 ); - cur = build_rwkv6_time_mix(gf, att_norm, x_prev, state_copy, state_mask, ubatch, il); + cur = build_rwkv6_time_mix(gf, att_norm, x_prev, state_copy, ubatch, il); token_shift = ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm)); ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il)); @@ -12051,7 +12045,6 @@ struct llm_build_rwkv7_base : public llm_graph_context { ggml_tensor * cur, ggml_tensor * x_prev, ggml_tensor * state_copy, - ggml_tensor * state_mask, ggml_tensor *& first_layer_value, const llama_ubatch & ubatch, int il) const { @@ -12134,8 +12127,8 @@ struct llm_build_rwkv7_base : public llm_graph_context { v = ggml_reshape_3d(ctx0, v, head_size, head_count, n_tokens); a = ggml_reshape_3d(ctx0, a, head_size, head_count, n_tokens); - ggml_tensor * wkv_state = build_copy_mask_state( - gf, kv_state->get_v_l(il), state_copy, state_mask, + ggml_tensor * wkv_state = build_recurrent_state( + gf, kv_state->get_v_l(il), state_copy, hparams.n_embd_v_s(), n_seqs); ggml_tensor * wkv_output = ggml_rwkv_wkv7(ctx0, r, w, k, v, ggml_neg(ctx0, kk), ggml_mul(ctx0, kk, a), wkv_state); @@ -12193,7 +12186,6 @@ struct llm_build_rwkv7 : public llm_build_rwkv7_base { inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1); ggml_tensor * state_copy = build_inp_s_copy(); - ggml_tensor * state_mask = build_inp_s_mask(); const auto n_embd = hparams.n_embd; const auto n_seq_tokens = ubatch.n_seq_tokens; @@ -12204,7 +12196,7 @@ struct llm_build_rwkv7 : public llm_build_rwkv7_base { inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); ggml_tensor * token_shift = build_rwkv_token_shift_load( - gf, state_copy, state_mask, ubatch, il + gf, state_copy, ubatch, il ); ggml_tensor * att_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0); @@ -12220,7 +12212,7 @@ struct llm_build_rwkv7 : public llm_build_rwkv7_base { 1 ); - cur = build_rwkv7_time_mix(gf, att_norm, x_prev, state_copy, state_mask, v_first, ubatch, il); + cur = build_rwkv7_time_mix(gf, att_norm, x_prev, state_copy, v_first, ubatch, il); ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); cb(ffn_inp, "ffn_inp", il); @@ -12287,7 +12279,6 @@ struct llm_build_arwkv7 : public llm_build_rwkv7_base { inpL = build_inp_embd(model.tok_embd); ggml_tensor * state_copy = build_inp_s_copy(); - ggml_tensor * state_mask = build_inp_s_mask(); const auto n_embd = hparams.n_embd; const auto n_seq_tokens = ubatch.n_seq_tokens; @@ -12298,7 +12289,7 @@ struct llm_build_arwkv7 : public llm_build_rwkv7_base { inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); ggml_tensor * token_shift = build_rwkv_token_shift_load( - gf, state_copy, state_mask, ubatch, il + gf, state_copy, ubatch, il ); ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM_RMS, il); @@ -12311,7 +12302,7 @@ struct llm_build_arwkv7 : public llm_build_rwkv7_base { 1 ); - cur = build_rwkv7_time_mix(gf, att_norm, x_prev, state_copy, state_mask, v_first, ubatch, il); + cur = build_rwkv7_time_mix(gf, att_norm, x_prev, state_copy, v_first, ubatch, il); token_shift = ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm)); ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il)); From 39d4a904838a2e89448499c3800254161cf0600b Mon Sep 17 00:00:00 2001 From: lhez Date: Tue, 10 Jun 2025 16:55:58 -0700 Subject: [PATCH 024/192] opencl: add `mul_mv_id_q4_0_f32_8x_flat` (#14003) --- ggml/src/ggml-opencl/CMakeLists.txt | 1 + ggml/src/ggml-opencl/ggml-opencl.cpp | 163 +++++++++- .../kernels/mul_mv_id_q4_0_f32_8x_flat.cl | 283 ++++++++++++++++++ 3 files changed, 446 insertions(+), 1 deletion(-) create mode 100644 ggml/src/ggml-opencl/kernels/mul_mv_id_q4_0_f32_8x_flat.cl diff --git a/ggml/src/ggml-opencl/CMakeLists.txt b/ggml/src/ggml-opencl/CMakeLists.txt index d0a8b4cc6d0fc..0e2a419649cea 100644 --- a/ggml/src/ggml-opencl/CMakeLists.txt +++ b/ggml/src/ggml-opencl/CMakeLists.txt @@ -80,6 +80,7 @@ set(GGML_OPENCL_KERNELS mul_mv_q4_0_f32_1d_8x_flat mul_mv_q4_0_f32_1d_16x_flat mul_mv_q6_k + mul_mv_id_q4_0_f32_8x_flat mul norm relu diff --git a/ggml/src/ggml-opencl/ggml-opencl.cpp b/ggml/src/ggml-opencl/ggml-opencl.cpp index 80a364380d05a..628e574f0f71e 100644 --- a/ggml/src/ggml-opencl/ggml-opencl.cpp +++ b/ggml/src/ggml-opencl/ggml-opencl.cpp @@ -321,6 +321,7 @@ struct ggml_backend_opencl_context { cl_program program_upscale; cl_program program_concat; cl_program program_tsembd; + cl_program program_mul_mv_id_q4_0_f32_8x_flat; cl_kernel kernel_add, kernel_add_row; cl_kernel kernel_mul, kernel_mul_row; @@ -366,6 +367,7 @@ struct ggml_backend_opencl_context { cl_kernel kernel_concat_f32_contiguous; cl_kernel kernel_concat_f32_non_contiguous; cl_kernel kernel_timestep_embedding; + cl_kernel kernel_mul_mv_id_q4_0_f32_8x_flat; #ifdef GGML_OPENCL_USE_ADRENO_KERNELS // Transpose kernels @@ -1112,7 +1114,7 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve GGML_LOG_CONT("."); } - // repeat + // repeat { #ifdef GGML_OPENCL_EMBED_KERNELS const std::string kernel_src { @@ -1256,6 +1258,22 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve } } + // mul_mv_id_q4_0_f32_8x_flat + { +#ifdef GGML_OPENCL_EMBED_KERNELS + const std::string kernel_src { + #include "mul_mv_id_q4_0_f32_8x_flat.cl.h" + }; +#else + const std::string kernel_src = read_file("mul_mv_id_q4_0_f32_8x_flat.cl"); +#endif + backend_ctx->program_mul_mv_id_q4_0_f32_8x_flat = + build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); + + CL_CHECK((backend_ctx->kernel_mul_mv_id_q4_0_f32_8x_flat = clCreateKernel(backend_ctx->program_mul_mv_id_q4_0_f32_8x_flat, "kernel_mul_mv_id_q4_0_f32_8x_flat", &err), err)); + GGML_LOG_CONT("."); + } + // Adreno kernels #ifdef GGML_OPENCL_USE_ADRENO_KERNELS // transpose @@ -2178,6 +2196,13 @@ static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_te return op->src[1]->type == GGML_TYPE_F32 && ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]); } return false; + case GGML_OP_MUL_MAT_ID: + if (op->src[0]->type == GGML_TYPE_Q4_0) { + if (op->src[1]->type == GGML_TYPE_F32) { + return ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]); + } + } + return false; case GGML_OP_RESHAPE: case GGML_OP_VIEW: case GGML_OP_PERMUTE: @@ -5536,6 +5561,136 @@ static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, co } } +static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src0); + GGML_ASSERT(src0->extra); + GGML_ASSERT(src1); + GGML_ASSERT(src1->extra); + GGML_ASSERT(dst); + GGML_ASSERT(dst->extra); + + const ggml_tensor * src2 = dst->src[2]; + GGML_ASSERT(src2); + GGML_ASSERT(src2->extra); + + ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + cl_command_queue queue = backend_ctx->queue; + + ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; + ggml_tensor_extra_cl * extra2 = (ggml_tensor_extra_cl *)src2->extra; + ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; + + cl_ulong offset1 = extra1->offset + src1->view_offs; + cl_ulong offset2 = extra2->offset + src2->view_offs; + cl_ulong offsetd = extrad->offset + dst->view_offs; + +#ifdef GGML_OPENCL_SOA_Q + ggml_tensor_extra_cl_q4_0 * extra0_q4_0 = (ggml_tensor_extra_cl_q4_0 *)src0->extra; +#endif + + const int ne00 = src0->ne[0]; + const int ne01 = src0->ne[1]; + const int ne02 = src0->ne[2]; + const int ne03 = src0->ne[3]; + + const cl_ulong nb00 = src0->nb[0]; + const cl_ulong nb02 = src0->nb[2]; + + const int ne10 = src1->ne[0]; + const int ne11 = src1->ne[1]; + const int ne12 = src1->ne[2]; + const int ne13 = src1->ne[3]; + + const cl_ulong nb11 = src1->nb[1]; + const cl_ulong nb12 = src1->nb[2]; + + const int ne20 = src2->ne[0]; + const int ne21 = src2->ne[1]; + + const cl_ulong nb21 = src2->nb[1]; + + const int ne0 = dst->ne[0]; + const int ne1 = dst->ne[1]; + + const int r2 = ne12/ne02; + const int r3 = ne13/ne03; + const int dst_rows = ne20*ne21; // ne20 = n_used_experts, ne21 = n_rows + + GGML_ASSERT(ne00 == ne10); + + int sgs = 32; // subgroup size + int nsg = 1; // number of subgroups + int nrows = 1; // number of row in src1 + int ndst = 4; // number of values produced by each subgroup + + cl_kernel kernel; + + // subgroup mat vec + switch (src0->type) { + case GGML_TYPE_Q4_0: { + kernel = backend_ctx->kernel_mul_mv_id_q4_0_f32_8x_flat; + + if (backend_ctx->gpu_family == INTEL) { + sgs = 16; + nsg = 1; + ndst = 8; + } else if (backend_ctx->gpu_family == ADRENO) { + sgs = 64; + nsg = 1; + ndst = 8; + } else { + GGML_ASSERT(false && "TODO: Unknown GPU"); + } + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q4_0->q)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q4_0->d)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra2->data_device)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne01)); + CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne02)); + CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb00)); + CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb02)); + CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne10)); + CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne11)); + CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne12)); + CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &nb11)); + CL_CHECK(clSetKernelArg(kernel, 17, sizeof(cl_ulong), &nb12)); + CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &ne20)); + CL_CHECK(clSetKernelArg(kernel, 19, sizeof(int), &ne21)); + CL_CHECK(clSetKernelArg(kernel, 20, sizeof(cl_ulong), &nb21)); + CL_CHECK(clSetKernelArg(kernel, 21, sizeof(int), &ne0)); + CL_CHECK(clSetKernelArg(kernel, 22, sizeof(int), &ne1)); + CL_CHECK(clSetKernelArg(kernel, 23, sizeof(int), &r2)); + CL_CHECK(clSetKernelArg(kernel, 24, sizeof(int), &r3)); + + break; + } + default: + GGML_ASSERT(false && "not implemented");; + } + + int _ne1 = 1; + int ne123 = dst_rows; + + size_t global_work_size[] = {(size_t)(ne01+ndst*nsg-1)/(ndst*nsg)*sgs, (size_t)(_ne1+nrows-1)/nrows*nsg, (size_t)ne123}; + size_t local_work_size[] = {(size_t)sgs, (size_t)nsg, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif +} + static void ggml_cl_scale(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); @@ -6444,6 +6599,12 @@ bool ggml_cl_compute_forward(ggml_backend_t backend, struct ggml_tensor * tensor } func = ggml_cl_mul_mat; break; + case GGML_OP_MUL_MAT_ID: + if (!any_on_device) { + return false; + } + func = ggml_cl_mul_mat_id; + break; case GGML_OP_SCALE: if (!any_on_device) { return false; diff --git a/ggml/src/ggml-opencl/kernels/mul_mv_id_q4_0_f32_8x_flat.cl b/ggml/src/ggml-opencl/kernels/mul_mv_id_q4_0_f32_8x_flat.cl new file mode 100644 index 0000000000000..7ccf41efbe918 --- /dev/null +++ b/ggml/src/ggml-opencl/kernels/mul_mv_id_q4_0_f32_8x_flat.cl @@ -0,0 +1,283 @@ +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +#ifdef cl_intel_subgroups +#pragma OPENCL EXTENSION cl_intel_subgroups : enable +#else +#pragma OPENCL EXTENSION cl_khr_subgroups : enable +#endif + +#ifdef cl_intel_required_subgroup_size +#pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable +#define INTEL_GPU 1 +#define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) +#define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) +#elif defined(cl_qcom_reqd_sub_group_size) +#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable +#define ADRENO_GPU 1 +#define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) +#define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) +#endif + +#define QK4_0 32 + +typedef char int8_t; +typedef uchar uint8_t; +typedef short int16_t; +typedef ushort uint16_t; +typedef int int32_t; +typedef uint uint32_t; + +//------------------------------------------------------------------------------ +// block_q4_0 +//------------------------------------------------------------------------------ +struct block_q4_0 +{ + half d; + uint8_t qs[QK4_0 / 2]; +}; + +// This function requires the original shuffled weights. +// As a reminder, the original weights are shuffled so that (q[0], q[16]) are +// packed together in a byte, so are (q[1], q[17]) and so on. +inline float block_q_4_0_dot_y_flat( + global uchar * x, + global half * dh, + float sumy, + float16 yl, + int il +) { + float d = *dh; + global ushort * qs = ((global ushort *)x + il/2); + float acc = 0.f; + + acc += yl.s0 * (qs[0] & 0x000F); + acc += yl.s1 * (qs[0] & 0x0F00); + acc += yl.s8 * (qs[0] & 0x00F0); + acc += yl.s9 * (qs[0] & 0xF000); + + acc += yl.s2 * (qs[1] & 0x000F); + acc += yl.s3 * (qs[1] & 0x0F00); + acc += yl.sa * (qs[1] & 0x00F0); + acc += yl.sb * (qs[1] & 0xF000); + + acc += yl.s4 * (qs[2] & 0x000F); + acc += yl.s5 * (qs[2] & 0x0F00); + acc += yl.sc * (qs[2] & 0x00F0); + acc += yl.sd * (qs[2] & 0xF000); + + acc += yl.s6 * (qs[3] & 0x000F); + acc += yl.s7 * (qs[3] & 0x0F00); + acc += yl.se * (qs[3] & 0x00F0); + acc += yl.sf * (qs[3] & 0xF000); + + return d * (sumy * -8.f + acc); +} + +// +// This variant outputs 8 values. +// +#undef N_DST +#undef N_SIMDGROUP +#undef N_SIMDWIDTH + +#ifdef INTEL_GPU +#define N_DST 8 // each SIMD group works on 8 rows +#define N_SIMDGROUP 1 // number of SIMD groups in a thread group +#define N_SIMDWIDTH 16 // subgroup size +#elif defined (ADRENO_GPU) +#define N_DST 8 +#define N_SIMDGROUP 1 +#define N_SIMDWIDTH 64 +#endif + +inline void mul_vec_q_n_f32_8x_flat( + global char * src0_q, + global half * src0_d, + global float * src1, + global float * dst, + int ne00, + int ne01, + int ne02, + int ne10, + int ne12, + int ne0, + int ne1, + int r2, + int r3 +) { + const ulong nb = ne00/QK4_0; + + int r0 = get_group_id(0); + int r1 = get_group_id(1); + int im = 0; + + int first_row = (r0 * N_SIMDGROUP + get_sub_group_id()) * N_DST; + + int i12 = im%ne12; + int i13 = im/ne12; + + // The number of scales is the same as the number of blocks. + ulong offset0_d = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); + // Each block contains QK4_0/2 uchars, hence offset for qs is as follows. + ulong offset0_q = (first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02)) * QK4_0/2; + + global uchar * x = (global uchar *) src0_q + offset0_q; + global half * d = (global half *) src0_d + offset0_d; + global float * y = (global float *) src1 + r1*ne10 + im*ne00*ne1; + + float16 yl; + float8 sumf = 0.f; + + int ix = get_sub_group_local_id()/2; + int il = 8*(get_sub_group_local_id()%2); + + global float * yb = y + ix*QK4_0 + il; + + for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) { + float sumy = 0.f; + + sumy += yb[0]; + sumy += yb[1]; + sumy += yb[2]; + sumy += yb[3]; + sumy += yb[4]; + sumy += yb[5]; + sumy += yb[6]; + sumy += yb[7]; + + sumy += yb[16]; + sumy += yb[17]; + sumy += yb[18]; + sumy += yb[19]; + sumy += yb[20]; + sumy += yb[21]; + sumy += yb[22]; + sumy += yb[23]; + + yl.s0 = yb[0]; + yl.s1 = yb[1]/256.f; + + yl.s2 = yb[2]; + yl.s3 = yb[3]/256.f; + + yl.s4 = yb[4]; + yl.s5 = yb[5]/256.f; + + yl.s6 = yb[6]; + yl.s7 = yb[7]/256.f; + + yl.s8 = yb[16]/16.f; + yl.s9 = yb[17]/4096.f; + + yl.sa = yb[18]/16.f; + yl.sb = yb[19]/4096.f; + + yl.sc = yb[20]/16.f; + yl.sd = yb[21]/4096.f; + + yl.se = yb[22]/16.f; + yl.sf = yb[23]/4096.f; + + sumf.s0 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 0*nb*QK4_0/2, d + ib + 0*nb, sumy, yl, il); + sumf.s1 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 1*nb*QK4_0/2, d + ib + 1*nb, sumy, yl, il); + sumf.s2 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 2*nb*QK4_0/2, d + ib + 2*nb, sumy, yl, il); + sumf.s3 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 3*nb*QK4_0/2, d + ib + 3*nb, sumy, yl, il); + + sumf.s4 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 4*nb*QK4_0/2, d + ib + 4*nb, sumy, yl, il); + sumf.s5 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 5*nb*QK4_0/2, d + ib + 5*nb, sumy, yl, il); + sumf.s6 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 6*nb*QK4_0/2, d + ib + 6*nb, sumy, yl, il); + sumf.s7 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 7*nb*QK4_0/2, d + ib + 7*nb, sumy, yl, il); + + yb += QK4_0 * (N_SIMDWIDTH/2); + } + + float8 tot = (float8)( + sub_group_reduce_add(sumf.s0), sub_group_reduce_add(sumf.s1), + sub_group_reduce_add(sumf.s2), sub_group_reduce_add(sumf.s3), + sub_group_reduce_add(sumf.s4), sub_group_reduce_add(sumf.s5), + sub_group_reduce_add(sumf.s6), sub_group_reduce_add(sumf.s7) + ); + + if (get_sub_group_local_id() == 0) { + if (first_row + 0 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 0] = tot.s0; + } + if (first_row + 1 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 1] = tot.s1; + } + if (first_row + 2 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 2] = tot.s2; + } + if (first_row + 3 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 3] = tot.s3; + } + + if (first_row + 4 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 4] = tot.s4; + } + if (first_row + 5 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 5] = tot.s5; + } + if (first_row + 6 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 6] = tot.s6; + } + if (first_row + 7 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 7] = tot.s7; + } + } +} + +#ifdef INTEL_GPU +REQD_SUBGROUP_SIZE_16 +#elif defined (ADRENO_GPU) +REQD_SUBGROUP_SIZE_64 +#endif +kernel void kernel_mul_mv_id_q4_0_f32_8x_flat( + global char * src0_q, + global half * src0_d, + global float * src1, + ulong offset1, + global char * src2, + ulong offset2, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + ulong nb00, + ulong nb02, + int ne10, + int ne11, + int ne12, + ulong nb11, + ulong nb12, + int ne20, + int ne21, + ulong nb21, + int ne0, + int ne1, + int r2, + int r3 +) { + src1 = (global float *)((global char *)src1 + offset1); + src2 = (global char *)((global char *)src2 + offset2); + dst = (global float *)((global char *)dst + offsetd); + + const int iid1 = get_group_id(2)/ne20; + const int idx = get_group_id(2)%ne20; + + const int i02 = ((global int *)(src2 + iid1*nb21))[idx]; + + const int i11 = idx%ne11; + const int i12 = iid1; + + const int i1 = idx; + const int i2 = i12; + + global char * src0_q_cur = src0_q + (i02*nb02/nb00)*(QK4_0/2); + global half * src0_d_cur = src0_d + (i02*nb02/nb00); + global float * src1_cur = (global float *)((global char *) src1 + i11*nb11 + i12*nb12); + global float * dst_cur = dst + i1*ne0 + i2*ne1*ne0; + + mul_vec_q_n_f32_8x_flat(src0_q_cur, src0_d_cur, src1_cur, dst_cur, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3); +} From e41272952b41413d3333ff0e97ab11777067b912 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Wed, 11 Jun 2025 00:19:25 -0500 Subject: [PATCH 025/192] vulkan: Track descriptor pools/sets per-context (#14109) Use the same descriptor set layout for all pipelines (MAX_PARAMETER_COUNT == 8) and move it to the vk_device. Move all the descriptor pool and set tracking to the context - none of it is specific to pipelines anymore. It has a single vector of pools and vector of sets, and a single counter to track requests and a single counter to track use. --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 233 ++++++++++++--------------- 1 file changed, 104 insertions(+), 129 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 8ccc73e7422fe..e5200b96d0d8d 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -78,7 +78,7 @@ static bool is_pow2(uint32_t x) { return x > 1 && (x & (x-1)) == 0; } #define VK_VENDOR_ID_INTEL 0x8086 #define VK_VENDOR_ID_NVIDIA 0x10de -#define VK_DEVICE_DESCRIPTOR_POOL_SIZE 32 +#define VK_DEVICE_DESCRIPTOR_POOL_SIZE 256 #define GGML_VK_MAX_NODES 8192 @@ -114,13 +114,11 @@ struct vk_queue { bool transfer_only; }; +#define MAX_PARAMETER_COUNT 8 + struct vk_pipeline_struct { std::string name; vk::ShaderModule shader_module; - vk::DescriptorSetLayout dsl; - std::vector descriptor_pools; - std::vector descriptor_sets; - uint32_t descriptor_set_idx; vk::PipelineLayout layout; vk::Pipeline pipeline; uint32_t push_constant_size; @@ -341,6 +339,8 @@ struct vk_device_struct { // set to true to indicate that some shaders need to be compiled after the dryrun bool need_compiles {}; + vk::DescriptorSetLayout dsl; + vk_matmul_pipeline pipeline_matmul_f32 {}; vk_matmul_pipeline pipeline_matmul_f32_f16 {}; vk_matmul_pipeline pipeline_matmul_bf16 {}; @@ -458,7 +458,6 @@ struct vk_device_struct { vk_pipeline pipeline_flash_attn_split_k_reduce; std::unordered_map pipelines; - std::unordered_map pipeline_descriptor_set_requirements; std::vector> pinned_memory; @@ -498,6 +497,8 @@ struct vk_device_struct { } pipelines.clear(); + device.destroyDescriptorSetLayout(dsl); + device.destroy(); } }; @@ -930,6 +931,11 @@ struct ggml_backend_vk_context { vk_context_ref transfer_ctx; std::vector tensor_ctxs; + + std::vector descriptor_pools; + std::vector descriptor_sets; + uint32_t descriptor_set_idx {}; + uint32_t pipeline_descriptor_set_requirements {}; }; static void * const vk_ptr_base = (void *)(uintptr_t) 0x1000; // NOLINT @@ -1060,39 +1066,19 @@ static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipelin ", (" << wg_denoms[0] << "," << wg_denoms[1] << "," << wg_denoms[2] << "), specialization_constants, " << disable_robustness << ", " << require_full_subgroups << ", " << required_subgroup_size << ")"); GGML_ASSERT(parameter_count > 0); + GGML_ASSERT(parameter_count <= MAX_PARAMETER_COUNT); GGML_ASSERT(wg_denoms[0] > 0 && wg_denoms[1] > 0 && wg_denoms[2] > 0); // NOLINT vk::ShaderModuleCreateInfo shader_module_create_info({}, spv_size, reinterpret_cast(spv_data)); pipeline->shader_module = device->device.createShaderModule(shader_module_create_info); - std::vector dsl_binding; - std::vector dsl_binding_flags; - for (uint32_t i = 0; i < parameter_count; i++) { - dsl_binding.push_back({i, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eCompute}); - dsl_binding_flags.push_back({}); - } - - vk::DescriptorSetLayoutBindingFlagsCreateInfo dslbfci = { dsl_binding_flags }; - vk::PushConstantRange pcr( vk::ShaderStageFlagBits::eCompute, 0, pipeline->push_constant_size ); - vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_create_info( - {}, - dsl_binding); - descriptor_set_layout_create_info.setPNext(&dslbfci); - pipeline->dsl = device->device.createDescriptorSetLayout(descriptor_set_layout_create_info); - - vk::DescriptorPoolSize descriptor_pool_size(vk::DescriptorType::eStorageBuffer, pipeline->parameter_count * VK_DEVICE_DESCRIPTOR_POOL_SIZE); - vk::DescriptorPoolCreateInfo descriptor_pool_create_info({}, VK_DEVICE_DESCRIPTOR_POOL_SIZE, descriptor_pool_size); - pipeline->descriptor_pools.push_back(device->device.createDescriptorPool(descriptor_pool_create_info)); - - pipeline->descriptor_set_idx = 0; - - vk::PipelineLayoutCreateInfo pipeline_layout_create_info(vk::PipelineLayoutCreateFlags(), pipeline->dsl, pcr); + vk::PipelineLayoutCreateInfo pipeline_layout_create_info(vk::PipelineLayoutCreateFlags(), device->dsl, pcr); pipeline->layout = device->device.createPipelineLayout(pipeline_layout_create_info); std::vector specialization_entries(specialization_constants.size()); @@ -1167,15 +1153,6 @@ static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipelin static void ggml_vk_destroy_pipeline(vk::Device& device, vk_pipeline& pipeline) { VK_LOG_DEBUG("ggml_pipeline_destroy_pipeline(" << pipeline->name << ")"); - for (auto& pool : pipeline->descriptor_pools) { - device.destroyDescriptorPool(pool); - } - pipeline->descriptor_pools.clear(); - pipeline->descriptor_sets.clear(); - pipeline->descriptor_set_idx = 0; - - device.destroyDescriptorSetLayout(pipeline->dsl); - device.destroyPipelineLayout(pipeline->layout); device.destroyShaderModule(pipeline->shader_module); @@ -1183,60 +1160,49 @@ static void ggml_vk_destroy_pipeline(vk::Device& device, vk_pipeline& pipeline) device.destroyPipeline(pipeline->pipeline); } -static void ggml_pipeline_request_descriptor_sets(vk_device& device, vk_pipeline& pipeline, uint32_t n) { +static void ggml_pipeline_request_descriptor_sets(ggml_backend_vk_context *ctx, vk_pipeline& pipeline, uint32_t n) { VK_LOG_DEBUG("ggml_pipeline_request_descriptor_sets(" << pipeline->name << ", " << n << ")"); - device->pipeline_descriptor_set_requirements[pipeline->name] += n; + ctx->pipeline_descriptor_set_requirements += n; if (!pipeline->compiled) { pipeline->needed = true; - device->need_compiles = true; + ctx->device->need_compiles = true; } } -static void ggml_pipeline_allocate_descriptor_sets(vk_device& device) { - std::lock_guard guard(device->mutex); - - for (auto& pair : device->pipeline_descriptor_set_requirements) { - vk_pipeline pipeline = device->pipelines.at(pair.first).lock(); - const uint64_t n = pair.second; - - VK_LOG_DEBUG("ggml_pipeline_allocate_descriptor_sets(" << pipeline->name << ", " << n << ")"); +static void ggml_pipeline_allocate_descriptor_sets(ggml_backend_vk_context * ctx) { - if (pipeline->descriptor_sets.size() >= pipeline->descriptor_set_idx + n) { - // Enough descriptors are available - continue; - } + if (ctx->descriptor_sets.size() >= ctx->pipeline_descriptor_set_requirements) { + // Enough descriptors are available + return; + } - uint32_t to_alloc = pipeline->descriptor_set_idx + n - pipeline->descriptor_sets.size(); - uint32_t pool_remaining = VK_DEVICE_DESCRIPTOR_POOL_SIZE - pipeline->descriptor_sets.size() % VK_DEVICE_DESCRIPTOR_POOL_SIZE; - uint32_t pool_idx = pipeline->descriptor_sets.size() / VK_DEVICE_DESCRIPTOR_POOL_SIZE; + vk_device& device = ctx->device; - while (to_alloc > 0) { - const uint32_t alloc_count = std::min(pool_remaining, to_alloc); - to_alloc -= alloc_count; - pool_remaining = VK_DEVICE_DESCRIPTOR_POOL_SIZE; + uint32_t to_alloc = ctx->pipeline_descriptor_set_requirements - ctx->descriptor_sets.size(); + uint32_t pool_remaining = VK_DEVICE_DESCRIPTOR_POOL_SIZE - ctx->descriptor_sets.size() % VK_DEVICE_DESCRIPTOR_POOL_SIZE; + uint32_t pool_idx = ctx->descriptor_sets.size() / VK_DEVICE_DESCRIPTOR_POOL_SIZE; - if (pool_idx >= pipeline->descriptor_pools.size()) { - vk::DescriptorPoolSize descriptor_pool_size(vk::DescriptorType::eStorageBuffer, pipeline->parameter_count * VK_DEVICE_DESCRIPTOR_POOL_SIZE); - vk::DescriptorPoolCreateInfo descriptor_pool_create_info({}, VK_DEVICE_DESCRIPTOR_POOL_SIZE, descriptor_pool_size); - pipeline->descriptor_pools.push_back(device->device.createDescriptorPool(descriptor_pool_create_info)); - } + while (to_alloc > 0) { + const uint32_t alloc_count = std::min(pool_remaining, to_alloc); + to_alloc -= alloc_count; + pool_remaining = VK_DEVICE_DESCRIPTOR_POOL_SIZE; - std::vector layouts(alloc_count); - for (uint32_t i = 0; i < alloc_count; i++) { - layouts[i] = pipeline->dsl; - } - vk::DescriptorSetAllocateInfo descriptor_set_alloc_info(pipeline->descriptor_pools[pool_idx], alloc_count, layouts.data()); - std::vector sets = device->device.allocateDescriptorSets(descriptor_set_alloc_info); - pipeline->descriptor_sets.insert(pipeline->descriptor_sets.end(), sets.begin(), sets.end()); + if (pool_idx >= ctx->descriptor_pools.size()) { + vk::DescriptorPoolSize descriptor_pool_size(vk::DescriptorType::eStorageBuffer, MAX_PARAMETER_COUNT * VK_DEVICE_DESCRIPTOR_POOL_SIZE); + vk::DescriptorPoolCreateInfo descriptor_pool_create_info({}, VK_DEVICE_DESCRIPTOR_POOL_SIZE, descriptor_pool_size); + ctx->descriptor_pools.push_back(device->device.createDescriptorPool(descriptor_pool_create_info)); + } - pool_idx++; + std::vector layouts(alloc_count); + for (uint32_t i = 0; i < alloc_count; i++) { + layouts[i] = device->dsl; } - } -} + vk::DescriptorSetAllocateInfo descriptor_set_alloc_info(ctx->descriptor_pools[pool_idx], alloc_count, layouts.data()); + std::vector sets = device->device.allocateDescriptorSets(descriptor_set_alloc_info); + ctx->descriptor_sets.insert(ctx->descriptor_sets.end(), sets.begin(), sets.end()); -static void ggml_pipeline_cleanup(vk_pipeline& pipeline) { - VK_LOG_DEBUG("ggml_pipeline_cleanup(" << pipeline->name << ")"); - pipeline->descriptor_set_idx = 0; + pool_idx++; + } } static vk::CommandBuffer ggml_vk_create_cmd_buffer(vk_device& device, vk_queue& q) { @@ -3369,6 +3335,22 @@ static vk_device ggml_vk_get_device(size_t idx) { } } + + std::vector dsl_binding; + std::vector dsl_binding_flags; + for (uint32_t i = 0; i < MAX_PARAMETER_COUNT; i++) { + dsl_binding.push_back({i, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eCompute}); + dsl_binding_flags.push_back({}); + } + + vk::DescriptorSetLayoutBindingFlagsCreateInfo dslbfci = { dsl_binding_flags }; + + vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_create_info( + {}, + dsl_binding); + descriptor_set_layout_create_info.setPNext(&dslbfci); + device->dsl = device->device.createDescriptorSetLayout(descriptor_set_layout_create_info); + ggml_vk_load_shaders(device); if (!device->single_queue) { @@ -4154,10 +4136,10 @@ static void ggml_vk_dispatch_pipeline(ggml_backend_vk_context* ctx, vk_context& std::cerr << "(" << buffer.buffer << ", " << buffer.offset << ", " << buffer.range << "), "; } std::cerr << "}, (" << wg0 << "," << wg1 << "," << wg2 << "))"); - GGML_ASSERT(pipeline->descriptor_set_idx < pipeline->descriptor_sets.size()); - GGML_ASSERT(descriptor_buffer_infos.size() == pipeline->parameter_count); + GGML_ASSERT(ctx->descriptor_set_idx < ctx->descriptor_sets.size()); + GGML_ASSERT(descriptor_buffer_infos.size() <= MAX_PARAMETER_COUNT); - vk::DescriptorSet& descriptor_set = pipeline->descriptor_sets[pipeline->descriptor_set_idx++]; + vk::DescriptorSet& descriptor_set = ctx->descriptor_sets[ctx->descriptor_set_idx++]; vk::WriteDescriptorSet write_descriptor_set{ descriptor_set, 0, 0, pipeline->parameter_count, vk::DescriptorType::eStorageBuffer, nullptr, descriptor_buffer_infos.begin() }; ctx->device->device.updateDescriptorSets({ write_descriptor_set }, {}); @@ -4964,18 +4946,18 @@ static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context& sub } // Request descriptor sets - ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1); + ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1); if (qx_needs_dequant) { - ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_0, 1); + ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_0, 1); } if (qy_needs_dequant) { - ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_1, 1); + ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_1, 1); } if (quantize_y) { - ggml_pipeline_request_descriptor_sets(ctx->device, to_q8_1, 1); + ggml_pipeline_request_descriptor_sets(ctx, to_q8_1, 1); } if (split_k > 1) { - ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_matmul_split_k_reduce, 1); + ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_matmul_split_k_reduce, 1); } return; } @@ -5157,12 +5139,12 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& // Request descriptor sets if (qx_needs_dequant) { - ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_0, 1); + ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_0, 1); } if (qy_needs_dequant) { - ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_1, 1); + ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_1, 1); } - ggml_pipeline_request_descriptor_sets(ctx->device, dmmv, 1); + ggml_pipeline_request_descriptor_sets(ctx, dmmv, 1); return; } @@ -5295,7 +5277,7 @@ static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_c if (dryrun) { // Request descriptor sets - ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_mul_mat_vec_p021_f16_f32[gqa_ratio - 1], 1); + ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32[gqa_ratio - 1], 1); return; } @@ -5384,7 +5366,7 @@ static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_con if (dryrun) { // Request descriptor sets - ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_mul_mat_vec_nc_f16_f32, 1); + ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_mul_mat_vec_nc_f16_f32, 1); return; } @@ -5571,12 +5553,12 @@ static void ggml_vk_mul_mat_id_q_f16(ggml_backend_vk_context * ctx, vk_context& } // Request descriptor sets - ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1); + ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1); if (qx_needs_dequant) { - ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_0, 1); + ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_0, 1); } if (qy_needs_dequant) { - ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_1, 1); + ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_1, 1); } return; } @@ -5765,12 +5747,12 @@ static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_conte // Request descriptor sets if (qx_needs_dequant) { - ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_0, 1); + ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_0, 1); } if (qy_needs_dequant) { - ggml_pipeline_request_descriptor_sets(ctx->device, to_fp16_vk_1, 1); + ggml_pipeline_request_descriptor_sets(ctx, to_fp16_vk_1, 1); } - ggml_pipeline_request_descriptor_sets(ctx->device, dmmv, 1); + ggml_pipeline_request_descriptor_sets(ctx, dmmv, 1); return; } @@ -6090,9 +6072,9 @@ static void ggml_vk_flash_attn(ggml_backend_vk_context * ctx, vk_context& subctx if (dryrun) { // Request descriptor sets - ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1); + ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1); if (split_k > 1) { - ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_flash_attn_split_k_reduce, 1); + ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_flash_attn_split_k_reduce, 1); } return; } @@ -6655,7 +6637,7 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co } if (dryrun) { - ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1); + ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1); return; } @@ -7036,7 +7018,7 @@ static void ggml_vk_op_f32_wkv(ggml_backend_vk_context * ctx, vk_context& subctx GGML_ASSERT(pipeline != nullptr); if (dryrun) { - ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1); + ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1); return; } @@ -7175,7 +7157,7 @@ static void ggml_vk_op_f32_opt_step_adamw(ggml_backend_vk_context * ctx, vk_cont GGML_ASSERT(pipeline != nullptr); if (dryrun) { - ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1); + ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1); return; } @@ -7853,9 +7835,9 @@ static void ggml_vk_test_matmul(ggml_backend_vk_context * ctx, size_t m, size_t } } - ggml_pipeline_request_descriptor_sets(ctx->device, p, num_it); + ggml_pipeline_request_descriptor_sets(ctx, p, num_it); if (split_k > 1) { - ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_matmul_split_k_reduce, num_it); + ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_matmul_split_k_reduce, num_it); if (ctx->prealloc_split_k == nullptr || ctx->prealloc_split_k->size < sizeof(float) * d_ne * split_k) { // Resize buffer @@ -7870,7 +7852,7 @@ static void ggml_vk_test_matmul(ggml_backend_vk_context * ctx, size_t m, size_t ggml_vk_load_shaders(ctx->device); } - ggml_pipeline_allocate_descriptor_sets(ctx->device); + ggml_pipeline_allocate_descriptor_sets(ctx); vk_buffer d_X = ggml_vk_create_buffer_check(ctx->device, sizeof(X_TYPE) * x_ne, vk::MemoryPropertyFlagBits::eDeviceLocal); vk_buffer d_Y = ggml_vk_create_buffer_check(ctx->device, sizeof(Y_TYPE) * y_ne, vk::MemoryPropertyFlagBits::eDeviceLocal); @@ -8036,9 +8018,6 @@ static void ggml_vk_test_matmul(ggml_backend_vk_context * ctx, size_t m, size_t ggml_vk_destroy_buffer(d_Y); ggml_vk_destroy_buffer(d_D); - ggml_pipeline_cleanup(p); - ggml_pipeline_cleanup(ctx->device->pipeline_matmul_split_k_reduce); - free(x); free(y); free(d); @@ -8116,13 +8095,13 @@ static void ggml_vk_test_dequant(ggml_backend_vk_context * ctx, size_t ne, ggml_ ggml_vk_quantize_data(x, qx, ne, quant); ggml_vk_dequantize_data(qx, x_ref, ne, quant); - ggml_pipeline_request_descriptor_sets(ctx->device, p, 1); + ggml_pipeline_request_descriptor_sets(ctx, p, 1); if (ctx->device->need_compiles) { ggml_vk_load_shaders(ctx->device); } - ggml_pipeline_allocate_descriptor_sets(ctx->device); + ggml_pipeline_allocate_descriptor_sets(ctx); ggml_vk_buffer_write(qx_buf, 0, qx, qx_sz); @@ -8216,13 +8195,13 @@ static void ggml_vk_test_dequant(ggml_backend_vk_context * ctx, size_t ne, ggml_ // // vk_pipeline p = ggml_vk_get_quantize_pipeline(ctx, quant); // -// ggml_pipeline_request_descriptor_sets(ctx->device, p, 1); +// ggml_pipeline_request_descriptor_sets(ctx, p, 1); // // if (ctx->device->need_compiles) { // ggml_vk_load_shaders(ctx->device); // } // -// ggml_pipeline_allocate_descriptor_sets(ctx->device); +// ggml_pipeline_allocate_descriptor_sets(ctx); // // ggml_vk_buffer_write(x_buf, 0, x, x_sz); // @@ -8375,9 +8354,9 @@ static void ggml_vk_test_dequant_matmul(ggml_backend_vk_context * ctx, size_t m, // y[i] = i % k; } - ggml_pipeline_request_descriptor_sets(ctx->device, p, num_it); + ggml_pipeline_request_descriptor_sets(ctx, p, num_it); if (split_k > 1) { - ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_matmul_split_k_reduce, num_it); + ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_matmul_split_k_reduce, num_it); if (ctx->prealloc_split_k == nullptr || ctx->prealloc_split_k->size < sizeof(float) * d_ne * split_k) { // Resize buffer @@ -8388,14 +8367,14 @@ static void ggml_vk_test_dequant_matmul(ggml_backend_vk_context * ctx, size_t m, } } if (mmq) { - ggml_pipeline_request_descriptor_sets(ctx->device, ctx->device->pipeline_quantize_q8_1, num_it); + ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_quantize_q8_1, num_it); } if (ctx->device->need_compiles) { ggml_vk_load_shaders(ctx->device); } - ggml_pipeline_allocate_descriptor_sets(ctx->device); + ggml_pipeline_allocate_descriptor_sets(ctx); ggml_vk_buffer_write(qx_buf, 0, qx, qx_sz); ggml_vk_buffer_write(y_buf, 0, y, y_sz); @@ -8797,7 +8776,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod // These operations all go through ggml_vk_op_f32, so short-circuit and // do the only thing needed for the dryrun. vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, src0, src1, src2, node, node->op); - ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1); + ggml_pipeline_request_descriptor_sets(ctx, pipeline, 1); return false; } default: @@ -9189,17 +9168,6 @@ static void ggml_vk_graph_cleanup(ggml_backend_vk_context * ctx) { } ctx->gc.temp_buffers.clear(); - for (auto& dsr : ctx->device->pipeline_descriptor_set_requirements) { - vk_pipeline_ref plr = ctx->device->pipelines[dsr.first]; - - if (plr.expired()) { - continue; - } - - vk_pipeline pl = plr.lock(); - ggml_pipeline_cleanup(pl); - } - ggml_vk_queue_cleanup(ctx->device, ctx->device->compute_queue); ggml_vk_queue_cleanup(ctx->device, ctx->device->transfer_queue); @@ -9222,7 +9190,8 @@ static void ggml_vk_graph_cleanup(ggml_backend_vk_context * ctx) { ctx->tensor_ctxs.clear(); ctx->gc.contexts.clear(); - ctx->device->pipeline_descriptor_set_requirements.clear(); + ctx->pipeline_descriptor_set_requirements = 0; + ctx->descriptor_set_idx = 0; } // Clean up on backend free @@ -9249,6 +9218,12 @@ static void ggml_vk_cleanup(ggml_backend_vk_context * ctx) { ctx->device->device.destroyFence(ctx->fence); ctx->device->device.destroyFence(ctx->almost_ready_fence); + + for (auto& pool : ctx->descriptor_pools) { + ctx->device->device.destroyDescriptorPool(pool); + } + ctx->descriptor_pools.clear(); + ctx->descriptor_sets.clear(); } static int ggml_vk_get_device_count() { @@ -9622,7 +9597,7 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg ggml_vk_load_shaders(ctx->device); } ggml_vk_preallocate_buffers(ctx); - ggml_pipeline_allocate_descriptor_sets(ctx->device); + ggml_pipeline_allocate_descriptor_sets(ctx); int last_node = cgraph->n_nodes - 1; From 038e0ef5b4838bcfe618559aa3263114b91657a9 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 11 Jun 2025 12:52:45 +0300 Subject: [PATCH 026/192] kv-cache : add LLAMA_KV_CACHE_DEBUG environment variable (#14121) --- src/llama-kv-cache-unified.cpp | 53 +++++++++++++++++++++++++--------- src/llama-kv-cache-unified.h | 2 ++ 2 files changed, 41 insertions(+), 14 deletions(-) diff --git a/src/llama-kv-cache-unified.cpp b/src/llama-kv-cache-unified.cpp index fe41b94804310..b184735566a0a 100644 --- a/src/llama-kv-cache-unified.cpp +++ b/src/llama-kv-cache-unified.cpp @@ -127,6 +127,9 @@ llama_kv_cache_unified::llama_kv_cache_unified( ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f), ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f)); } + + const char * LLAMA_KV_CACHE_DEBUG = getenv("LLAMA_KV_CACHE_DEBUG"); + debug = LLAMA_KV_CACHE_DEBUG ? atoi(LLAMA_KV_CACHE_DEBUG) : 0; } void llama_kv_cache_unified::clear(bool data) { @@ -517,14 +520,12 @@ int32_t llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) const { return -1; } -//#define FIND_SLOT_DEBUG 1 -#if FIND_SLOT_DEBUG - LLAMA_LOG_WARN("begin: n = %5d, used = %5d, head = %5d, n_swa = %5d\n", cells.used_max_p1(), cells.get_used(), head, n_swa); + if (debug > 0) { + LLAMA_LOG_CONT("\n"); + LLAMA_LOG_DEBUG("%s: n = %5d, used = %5d, head = %5d, size = %5d, n_swa = %5d\n", __func__, cells.used_max_p1(), cells.get_used(), head, get_size(), n_swa); - // for debugging - { - std::string ss; - if (n_swa > 0) { + if ((debug == 2 && n_swa > 0) || debug > 2) { + std::string ss; for (uint32_t i = 0; i < cells.size(); ++i) { if (cells.is_empty(i)) { ss += '.'; @@ -532,21 +533,45 @@ int32_t llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) const { ss += std::to_string(cells.seq_get(i)); } if (i%256 == 255) { + ss += " *"; ss += '\n'; } } + LLAMA_LOG_DEBUG("\n%s\n", ss.c_str()); } - LLAMA_LOG_WARN("\n%s\n", ss.c_str()); - } - for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { - if (cells.seq_pos_min(s) < 0) { - continue; + if ((debug == 2 && n_swa > 0) || debug > 2) { + std::string ss; + for (uint32_t i = 0; i < cells.size(); ++i) { + std::string cur; + if (cells.is_empty(i)) { + cur = '.'; + } else { + cur = std::to_string(cells.pos_get(i)); + } + const int n = cur.size(); + for (int j = 0; j < 5 - n; ++j) { + cur += ' '; + } + ss += cur; + if (i%256 == 255) { + ss += " *"; + } + if (i%64 == 63) { + ss += '\n'; + } + } + LLAMA_LOG_DEBUG("\n%s\n", ss.c_str()); } - LLAMA_LOG_WARN("kv_cells: n_swa = %4d, min[%d] = %5d, max[%d] = %5d\n", n_swa, s, cells.seq_pos_min(s), s, cells.seq_pos_max(s)); + for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + if (cells.seq_pos_min(s) < 0) { + continue; + } + + LLAMA_LOG_DEBUG("%s: min[%d] = %5d, max[%d] = %5d\n", __func__, s, cells.seq_pos_min(s), s, cells.seq_pos_max(s)); + } } -#endif uint32_t n_tested = 0; diff --git a/src/llama-kv-cache-unified.h b/src/llama-kv-cache-unified.h index 49f410ef6ecab..cf4c691babd1e 100644 --- a/src/llama-kv-cache-unified.h +++ b/src/llama-kv-cache-unified.h @@ -158,6 +158,8 @@ class llama_kv_cache_unified : public llama_memory_i { // SWA const uint32_t n_swa = 0; + int debug = 0; + const llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE; std::vector ctxs; From 5043576cd3b94812d4be8d315a5fcceaa6cb9e1e Mon Sep 17 00:00:00 2001 From: Taylor Date: Wed, 11 Jun 2025 06:43:43 -0400 Subject: [PATCH 027/192] server : pass default --keep argument (#14120) --- tools/server/server.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/server/server.cpp b/tools/server/server.cpp index 3b5e03528e2d7..1b1cf439baa5f 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -233,6 +233,7 @@ struct server_task { slot_params defaults; defaults.sampling = params_base.sampling; defaults.speculative = params_base.speculative; + defaults.n_keep = params_base.n_keep; // enabling this will output extra debug information in the HTTP responses from the server params.verbose = params_base.verbosity > 9; @@ -2060,6 +2061,7 @@ struct server_context { SLT_INF(slot, "new slot n_ctx_slot = %d\n", slot.n_ctx); slot.params.sampling = params_base.sampling; + slot.params.n_keep = params_base.n_keep; slot.callback_on_release = [this](int) { queue_tasks.pop_deferred_task(); From 5fa57da843f48577c389a6d0f2d0a5707122da36 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 11 Jun 2025 16:48:45 +0300 Subject: [PATCH 028/192] kv-cache : relax SWA masking condition (#14119) ggml-ci --- src/llama-kv-cache-unified.cpp | 58 +++++++++++++++++++++++----------- 1 file changed, 39 insertions(+), 19 deletions(-) diff --git a/src/llama-kv-cache-unified.cpp b/src/llama-kv-cache-unified.cpp index b184735566a0a..1a9f4e3159f94 100644 --- a/src/llama-kv-cache-unified.cpp +++ b/src/llama-kv-cache-unified.cpp @@ -582,21 +582,15 @@ int32_t llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) const { continue; } - // keep track of what the minimum sequence positions would be if we accept the ubatch - llama_seq_id seq_pos_min[LLAMA_MAX_PARALLEL_SEQUENCES]; - for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { - seq_pos_min[s] = cells.seq_pos_min(s); - } - bool found = true; for (uint32_t i = 0; i < n_tokens; i++) { - const llama_pos pos = ubatch.pos[i]; - const llama_seq_id seq_id = ubatch.seq_id[i][0]; + //const llama_pos pos = ubatch.pos[i]; + //const llama_seq_id seq_id = ubatch.seq_id[i][0]; // can we use this cell? either: // - the cell is empty // - the cell is occupied only by one sequence: - // - mask causally, if the sequence is the same as the one we are inserting + // - (disabled) mask causally, if the sequence is the same as the one we are inserting // - mask SWA, using current max pos for that sequence in the cache // always insert in the cell with minimum pos bool can_use = cells.is_empty(head_cur + i); @@ -604,21 +598,17 @@ int32_t llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) const { if (!can_use && cells.seq_count(head_cur + i) == 1) { const llama_pos pos_cell = cells.pos_get(head_cur + i); - // causal mask - if (cells.seq_has(head_cur + i, seq_id)) { - can_use = pos_cell >= pos; - } + // (disabled) causal mask + // note: it's better to purge any "future" tokens beforehand + //if (cells.seq_has(head_cur + i, seq_id)) { + // can_use = pos_cell >= pos; + //} if (!can_use) { const llama_seq_id seq_id_cell = cells.seq_get(head_cur + i); // SWA mask - // note: we insert only in the cell with minimum pos in order to preserve the invariant that - // all positions between [pos_min, pos_max] for each sequence will be present in the cache - // ref: https://github.com/ggml-org/llama.cpp/pull/13746#issuecomment-2916057092 - if (pos_cell == seq_pos_min[seq_id_cell] && - is_masked_swa(pos_cell, cells.seq_pos_max(seq_id_cell) + 1)) { - seq_pos_min[seq_id_cell]++; + if (is_masked_swa(pos_cell, cells.seq_pos_max(seq_id_cell) + 1)) { can_use = true; } } @@ -646,8 +636,22 @@ int32_t llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) const { } void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch & ubatch) { + // keep track of the max sequence position that we would overwrite with this ubatch + // for non-SWA cache, this would be always empty + llama_seq_id seq_pos_max_rm[LLAMA_MAX_PARALLEL_SEQUENCES]; + for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + seq_pos_max_rm[s] = -1; + } + for (uint32_t i = 0; i < ubatch.n_tokens; ++i) { if (!cells.is_empty(head_cur + i)) { + assert(cells.seq_count(head_cur + i) == 1); + + const llama_seq_id seq_id = cells.seq_get(head_cur + i); + const llama_pos pos = cells.pos_get(head_cur + i); + + seq_pos_max_rm[seq_id] = std::max(seq_pos_max_rm[seq_id], pos); + cells.rm(head_cur + i); } @@ -658,6 +662,22 @@ void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch } } + // note: we want to preserve the invariant that all positions between [pos_min, pos_max] for each sequence + // will be present in the cache. so we have to purge any position which is less than those we would overwrite + // ref: https://github.com/ggml-org/llama.cpp/pull/13746#issuecomment-2916057092 + for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + if (seq_pos_max_rm[s] == -1) { + continue; + } + + if (cells.seq_pos_min(s) <= seq_pos_max_rm[s]) { + LLAMA_LOG_DEBUG("%s: purging positions [%d, %d] of sequence %d from KV cache\n", + __func__, cells.seq_pos_min(s), seq_pos_max_rm[s], s); + + seq_rm(s, cells.seq_pos_min(s), seq_pos_max_rm[s] + 1); + } + } + // move the head at the end of the slot head = head_cur + ubatch.n_tokens; } From 35394981bb9b4a6b64f6625604f378bec2b4debb Mon Sep 17 00:00:00 2001 From: Aman Date: Wed, 11 Jun 2025 22:42:25 +0800 Subject: [PATCH 029/192] webui: Wrap long numbers instead of infinite horizontal scroll (#14062) * webui: Wrap long numbers instead of infinite horizontal scroll * Use tailwind class * update index.html.gz --- tools/server/public/index.html.gz | Bin 1913882 -> 1913886 bytes tools/server/webui/src/index.scss | 4 ++++ 2 files changed, 4 insertions(+) diff --git a/tools/server/public/index.html.gz b/tools/server/public/index.html.gz index 35b9e702f698c154ceb60ac4fb8b5b3fd38c0e50..0fb01665ae5ccf999a186e13ea09b8f67de3fa1e 100644 GIT binary patch delta 935429 zcmV(;K-<5X=yaawbby2bgaU*Egam{Iga(8Mgb0KQgbIWUgbaiYv<}QHf7j`-KlTp2 zqWX&Pefq?3xO|br9#@_MAHH1sWZl%C`H-G++dtiDK|%r019)cR?!VnvIwU>tgTKVt zrTyac$G)q1`fVJ<$31@E+ui;RX5b&Rd7^(leDRPV2ry}|JXRUik zo&WREy-2Yz1ut*!G8BbkKVPXYgMl2@?U$AR$xjY&rMpZ2XJkzfe+9Sd=ig<_!Pos$ zGB{k4yE*amU**#+K%h=PKYg#)C`B?H>i_hE-8a_f9{}|+dmqY2{jNE2d+>Gq_vGhW z>|xaV#3upf117cKx2jx-B7-z^KR@z7C4KpV1m)1^r@r8u)%QDRz=x0fNYM4%%tLV> z4}N~Q00Vj53 zb+eE7@@txp*z%ixKIF>p&cORj`8DZ>Jo(MY-eJih%KGF_m(2Sd`E#H8V}|^i>SKQV zrmqj#@oTCd@z!_0$7dUS#EakkRIeAMOSwM%uGshOF8Xx$ZyE9Drs79@_|1p;hz-B} z7;m`n=N8h(e@yu8nf8?jd)~Wz>4VefE&u)8XaAP{`T>J6@9I-;PLOZ7FGG7I_bxAd z-Rk!--*2|jd%X9>6$9Ka1ZT#Z_W$zW!hinTJ3Zey-yg~{pE~?qEc2==iYX4c~ z_H~m_>$a~O{eZi@8O~35=WE)}@y^#RKIWZo$M7NVe|$~$Ij#2F7N1mVUpM=RcfO|i zhzkYB4 zeO7&ae{&jrT6=wSh2i4*jv5|3eP_v3C1EH;4XMDfu~v{@kbj-yHf!YStgbp+7ej|B)Q}a|`MB zbLbDW+fT9Q&wcjKYqvjQ&mU{I|Do&|(4cpgfAfmZ0Knw`mE?Dixbrm`IRKG^{Ojd^ z|4Rz%{tjn+aK@LiMXwtE`~Lw4dDy^t*Z$I`_b&vmivH)b-jp z(D3WdAOa!+tXKOtxba>KV9SuVlX~0eQ32`x_?OqYbK!djuQ>CY1~0{U?`>YgGGBMu zf8AFKxbmUC;7h2SNZCVe<`Q* z@Bhb37|1W*AtnCSj*y1)_AmeGpZ^n7VSiMdfmDx9)&}tN0fK?k3UUiP;k&-=>DB7^ zxT9|yJ;x4y_Q78OOz-#eED`?Br$0e}f4;Q({{X=~VjNOz{Fk2$=aB&Qn?7HI%Kq{9 z`X0je`M3KX-u6em<5!5==STAj`1VJAmcM5h-=XmS@xgq>?fuTL@fy?jb!xJph`RV^ z)AIf(UPA#t{bCPE{|CMOQQzqfH~jn;`g$CGzRk;#`S})KAdH`H@)h3rdsf{Yf2H?% z`sZ`|cbsI;kjdZC;tr?$JozhF{|GtHf5+Kg+3>kH7bHb2cNbS#i#{*me6pX%M7fUnk{3jN zku3RTnn;_-@Z1G!7x>!V)XNzWjXpwaeA^B?3?HTH*lrj-a>UDi$D>QGyM+i+P`-8R z_`Fs3WwL8LWXbKvl>~ujxO@<+X%Xu2xnA4uTs6A_&5WTn2Xa4Wwnec|e`%7qwCQ;s z)2EKH>vYZ%^I9So((SmdeTGEz%~{B0N8RkbbJi&cO{IO^kx5o#UTLn5_3?iAkAC6e?6yKG#2p{|T_v$_bYix^49Y=-R2<(8fcLLVI9 zwM<+-j=Ik;yK&iBtrIBv)i>~-p+Yx_%iQ8+mzs@7#W^}VV&%6MBP*keYA#WgMpLPY zEleT9tDKP?ZgYg?e`oLsd@Pz zV|>;%+?qzCNb<-aMXsGjFvoqlx?yLmiWz~8@skh}_^^qbu%C!4nHI*rKS>E0%W%|? zlDfb zDwsvKTH)-fe>AS{n6nE?j)VQhR+D`gUHY?_^5NW1u~lCais7OrW)9NYXf zqx&+QOn8#x=_=ZWY9&||N^frq{;)lDK}aFt`h1y@>u_;Sazjmo&8P(NY$wwidAg-# zsmvQK)oiv?T}lcUH>a`%rE^o8uQUu@88uIAOEpqYa?;EDz?REP7tN7%Lm`Y3v1oOw zd1GT;f7gKCm+h*768 zEc8|F++&CvaddXgc*#f?naWAluG(AKWoPm+g!TY4mJ*MZ@x@e6||yh}a3uR!p}N3-ip2n2 zTbMN*A3^5gOp_RMGhVE*YDTeiaqy2Tu3{*6c8rm8-5&amSTG}G-(Km~4%IM_h5%7!NZ{d6eWY7rH0HOb>4wSzazd^vbQMl}C-m#eNDv_o z*HI(JQe;&yCkgSVc>(*r+UR62#OB6nfdEe1`AII;|4IJo0w8 zN{-9ux?<6!B=lp9UXmIl1+t`3`Lf>bReWKFGqqYeTxDMqFEJW)fd+&xK#L6x$)!rn7p;SxPSThIt;uO3_xUHmymxA`M zyH(#J^-_**%$h(gm#iZrT_(J?J@&Pv8SB!IaaO`-X3+hVppj z=p9?qx;!NASw2gczEhoUe}fslsw;ZvRK{FMGO`lzif>2n=$iCmPh75e)Zd(2i3lj^ zX#>@3=)@G5y{F~ z_U9>!S{+pttC}zJt-L_?_T*N+}o7ys!3XBKC_}xx3DGf8t5(_G=pvPO4d^ z-Og$h(>^REkqZ_yx>lEm#X;8RNIULYv0$TKG>$8Ll=tyI66dngua4|RG1%yGqZVN? zE@M-XqAHWE;~3?uvu@`bp*h-nDLVGm>PQrW8fAq@5gXZMc5^xr&CPN`h0)Ju+UN+| zfYh56GxSxP_nY2ue{?04m!!RkZVMq+=L>eD4L9V%i!D`4dwYEi%TBN>xf6WDXz48W zY<*)j^;NVzz3+D2Qs?pLLXC3Txf$D$vx`}-tW`vd`@V3QD;Tnu{yH;!#y(R9b*PSc z>fA(-XE!TE5vEhS5%9Al*%$SU$LXA$)0K3_!efG+3yGj4f29;UZlw*Pu1GGv)AJ=h zAZB&y3nY@r*`W^0av>SgB}+XrEIu@kvYc`)}g(;oBR$D z*SB?JC%kmX=W5!aUqYmg`=2C7p5+414g|?Rb*36biU@}6=Zggsuseg zD1~{1Q`5%Qjn&rb-Nh`chatg<+xoK0Y}q$yc*)c1e>%}oCNo}H85pycEtJ!4J+itK z`^9BHqZ+*pROfOsv3i%Z*}AepGSPdRU%K0@vvwCP3r^ShZ2)HC^0)oAn{iVlToh)0 zsIGjzNepRdIw^O3jS-vMBCoM5P6m3RG{%U)(xx%N;VVNn6)HJ!#^tsWbony2G~sTE zgdnRuf4&;F?)B1v_Lf9x>VF+Y7sqNoLZ3JajtHa^;T(P_7c~QZd}&ZViq6Qa(`&lm9lLWBA`|+&kf>|-*{>nQA0c?zToev zf0dgq`@K#Hg>cFi=`xTA%d3#N8oD^A9C5Nbg(pPWQq+b*lS8ce^(G)d=$>xnV$E)5 z)+USdac!>Aw4>d%GD9~hhfi0@ujOd(^_!$w$;w{9ZgV>BuUkDaM^Rx+ZLraZxoy0? zt5)W>B};$xxUV|x~^hh9df(FvS! z-e=qO;q0ZIHM;uzFnNOLi&Z;vt9f;C8Fq6ByUku$dfUq>w{vZ?v$zFU6^fxzxr6Xq zXKja5f@C>GQ8~BkPFpcJrZ>u&>fOq0O>|_E3oB9_v)OJk5H@$0G+Bm;jb0XOe;n1q zBWIrXOzGbmxnRm#s}K`)i^9D2d#0Z+@huj+7zM&NQKP4DIgND7VOHhm22S|b39-X| zIh}p`ppf~1x;*C++R5_}gEC9TyxJko8O_eROx#qE5D_H>YwvfafMIv4pa zqr!vR!oOOur|M#t^Zr2{n?rlHe}!Xw9dE6p=%PLPQWWLdvd?d#fGpGAd0W(0he_JF$laj}U zkn*R^l8W{#T4vVfa(4_j8|}Oo+x?IVCom0FvbYi?b0O-~G-)NKT4anN6rI=7;zBTX zoHGZ0;}As1X@o<~&QnF#U0pb9{~~dx3}4CJf=y*c-BQMtUJ)1<5lpDDdUh29;N0r* zSfCBGX>C-UfmiN&Okf8M1eCT^5sdsF3w(sl<6%m;eC%-JCtf^a0SH}R z-mjag>#km0kxFfsI(_5fF^XaowRp4D<^!_F6q!8~8_V7wjOKC(y`!$iIc2q@%{S4G zr*dX6B-uDphaCPbk4u5xfzjZsBs3qE+lp#Yw$t^~;(!W#n~O(Nf7o{Np>NqW+FR$a zfMX)|a$KQsT;(I!bD9|Tc7I@EBpYe&yq+Q1UPHy=d^w~EXE0`kMO#V1bX12$d)rz| z-nQ|0T_aPos-t6M*}K4AO0ryg(J>*W{mCc9`37z9Bg>zYVRY7Oe!a0wi9K6-FU;1v zfE8*(JPrfjs|$Ode>x+_oB7F7Iy_n%my(I(%hlSeJij&08eT6WeN)&V0-fve+U~8} zKGj*=3d^-VC)P!0MyvEZxI&VMRUw7VbXI4@@MQFRZP-`oO@z!85 zBuaGa&P4=dELt(npb}fVjprP0m<$Rx9}lDJ^@oWx!?{5iI+-Z?a)T$xZoW^q398Pl6@T$(p52}{a(DzaLb{Zay0hGnbt=pm=W0R+PhUb zQ<*7d=0=pKWGEur73<%GlZb;zs|B+f^r#jku2Z)xr6d+v4CUHMJLRd!OtLo)e2v=~ z(VZ~7-_L2yAICHtOJQ-q`^6@*d2eyu>|AD&uvtSEe+eHg*88>3tCy;Z5xW2jt%{>k za$T~AIaS^woyVdmSl8Jl&GwP)M5B4B)%osPvGM6zqMfTQgJ|E1$C;mzGlsQx@l4W#c%Ca)O1=iI zl-TAhe?*mqQ_S_tnlX0bmW-`@i5+A)P!_s=1T6tsTts@wjPp_1^2jMZ%5J>gax8i2 z7ZtfnDEv(4K|tEOfx3mN%~A8)0aUI;Jjb*luy&fqNlBk{R!WQbkej|cKU343Wt9aM zPx(pE4)ihJuq-I#WHS{~H`i#%wm5DOyqQref4U_cL){eM^xl=F7CE3kXX=z(r1Sd1 z+-%tOXt4Jk6|i((Rp6C%D-Ok!2nm_x@kw60G1zbW%e)H-zroWatCGtxzoPXRvd8)E zvVoIozrfkL8rM!vUI;L#PVs(ujJYjd%f^b?$Gva#p6uR)=C&W4WNJboy==8?yTKK6 ze+b10MCYDa^F*wHHSkgmVK>@WeC}*W&`j#}cCDnOrGVWWzeLkQ2dOyEvnV{;LyAWc zxjHRsvkW>6!Rf19WynIf#mj&WcZG16`fa3a)qH93X<{NVfIfXMU;@3pO7eVnioDyY zPD^!J_SDJiw-ydIwayYvKDfa3ecpw;f4* z%l?X>4glwoNXy@Bti7VR7rE!#0=2q>jtq?`#J6r0($T(Ch~^@?XJ&Id6&v%o*`IF5 zQ+!5PIuap2wt%?mL6*+`Ee$b@T`ok)=khIPXYO^_D^hk%VIpq~YlJor`%esNp`(Ih_l(O*yJ-aev3M%F(Yhkt%y42xG1x_-((yX*ZEZ}7Fymp!e z5jOK(+|x~33w0Ny#s$1sI(N95e_b|<-GTMk#cZWz?37XU(HeD0&cm}vALo8Tok0-6 z(OedUoK{KYwuW=snz$5Mt%$iw&ioa-mkvuc2jd`hMh*+o$~U5LbmC!WxXpTJZcih+ zCqtdq>`2X~h1;s={&Za#L2BMeIu?sol#gH~KXT)ml#}JA6prK#(MP8tf0M&TySOpi z<_g)?GTjE7br?IcL3ALwTQT46&-UqZBU5LGBdoFImIAqm{VP5mNy1xR#^NYqV{7%f zB_F*OwpPLM$I)n~-O(2ZWKK?oA|2IXD_=!0O)jt8M76HXUV?)zi-TC%MQhFcLR-=j zcGiYG;iw2-n`TzK@d@E{f87MhqvmQj8XHD3%$a2k@e&eez$mggN)J0yzUAgf(b z-CO`$T;~0}nG0y7`vM-0i@urZ!Bx~^yOg3}=N>3?M>V%jK(5V1fAX%?T^juw%YKPC z*R&+nH68KSY&g<~tBz@&GI_jEYN@ao#l~5UG~JgeQ#Rsau}`L($!XTo6Aa8B$b%x0 zU^xyPumo)vrerS#u3l6#Q^%GIN1KgWuO4%yJnzChrCsB8vm>^aeOCgrchKqN$w>y!S zV8-hs=i%gXF4>f|M9*zhT7$eoR#aj;8EUc@b%R~4)!70TC9jkbW0=6ZXcrAl!h_No zrRfcIxk~PPj7}G%v`{CPof`|yE1lR>H7Q2Z`MwC;&2Y&je_@Y^w=Cn-ZQY!edS*+w zbRAe_Fq?6QBGhq_76jR>V$#}g@R`r()?xwnk7dTTi@ei^^9DM zvAy7$@K0J&RmS0&f`PoMM!K-*J(mNU3bl>3zDgC^bVFkr*z}5uZCFPZiieEBL>^vG zIX-nBvO0k+e_?-a==x$T)NpqQJY$8S#gLKPOSG%fXh@sYIsvzWysw3D%?3wZTr0Oy zH)(y>uzh(RmupKQFmKuVa*7z$aVUn>aN7v{YU6ZQBMH1|90m30Zj`2DO2jA)C;DeY z=M`Q}X#1E7n_Rp4R)53hF6X1hnM8#s*?|+NFsNCe539_LXvxZZd*5!#%oDY8*Ua(= z3%Yi(f5+S#JC{x~*5~bIC%E1PO_bWlR97@iXHZs%+}L0w^qZv(0WO;%6*S* zi54pVz$r!5bG5J4{alJz$xgsx)fE?583kq}%Y6$XR2SFe%H4|vlP`|c38U1uX^(xn zA7|jzJrGw{Ok6$pSKuCuZML7wlnj=}DFbJve_mfMbN+mUvwB5B3OMa(P_Er?bnv}` z)E_#A*IuqyYOgv1&ak!@kh}_J1kQ>OKECYLKs@XV5o1JG$cU)HwNAV!`gkFgsV%v} zV>UXw^(=MI-8grsrIm%N7cCe!myJjab78fpZ41qmIlZA3wcRhYS&?ov61i3roHz5V ze+4D002OTE?~lmdr1ufN>$r2D;w0GggO1dLDAN?XO%|2WqifH!qvPYXm28(QvYnqQZM_Ipa_JEcRuJorr3+Q26GLkkX&Z?G6xUt%{mHb zf7ZOq?usrrx!rEGojr0jHDtqFf01liIjZxx5;oiFB3aQST?W5^&C@jIQYC$33bwEa z<`#IFtCMwHm{kmJmSY=>IofcEovxLk->9rnIvyB%HPAlDdA!}4r_gik&UZcD-mNp* znqOo=v_Y;=7VtqY?xoko6=vsdw;_3{cEHesbS1mc5#lt_oAFSZqhv6cf9Kwu)>4D{ z`8dGYc;I)XMwtBC9`wMTpV_5wJ(@S;V9n=twFsHSG%wdJcr%uD6HxVqBG-}J9xgmu zz-M0URHDa6(z7bIjBnjgLEPK&orC%rWvL z+_nuf4Nqiy`t0SSr=~FlYEM3qS5s}Bj{e5ynQ1%wv}UUEpOM2 z*x1ctca41%ctp-}}k3(27_G=C;1p$pSK*HF;RFfzi!VoMu`=I4!MNPQL+{X`WaeV{8gu<@;;Drtx>(fPU+*XcDD zPR;o+S65leZ^0p$e}wue0w1DCD-pKk0vl{p!TF+`&qYWI*PV0`#iMk*U9a4+_1jIZ ziidHdV7X?XJ*68MeJmGTv8;~XYL1zneZV^pYz}99hvS)i1DA8hqOyO+HXwrXGx4U} zSoYW~CCV5Mu9L5{F_y3M1HA=7UQ7GKUR5(qUbJ()szWPaf70b_4uf~GSv5Oog1PKF zuoJ1cwq}-sxku9G)UuaE?tv5J(NyJFS?Y#S@S9#r_MrPvtjGgrrHQR$jUY4w-x-O#8h%T$;f@1^; ztI%$o<@-7K$Dqdy)WK+2N2YBuml-i2wm{p7r;8Ksf0w&*Syy68@)fyUu+@y_PnvSB zFb%vMv#Wk{0yED7Zc|EE*+tiIoTR25r|5i3p~`vH0X9j&&kO2ybEdgE-59_Qsj{`B zZHZON(~43lcRaDmCCGM^3f7C(9fLzv9*3+XIexkT-P%2jmD2N<)Jh;pq2KLe4O^#u z~_h_ z+*3Nv%|k}BdSjmy6c%^qnF~JE^mxwA2Ha3>e|wk!OE7b{y1*R`n6!*Z$?{aoWOG_; zv<2WGU0z?0;-ut*XzCE>m&jQdbiRnQcZQ6sdLtR*!8l9PpyQ zPTaZFW;_qR4s^+^2=Qp6SXgfhA>KK9sAgAWqG@rgP3N*WqZcr?7Jj+mLeh|^&Hkvd zf9S5*q=8wK){>28m=n*ga9c^ZqdD7;3 z8zy>)(RsenZj0M_w=su>=2Xgv3ku4Pf9z>49^gMql?B66=3jbXk;UmO)xMGvXA~45 z`!$j$eaY5c4!ln9PXE z)~h1hc(~Qd2)4OgP*)Vi`D_m^)lIcj+sRFK%k`ws@<5qRQZo?E?hIB^>2#c1$Drt@}_R=VSkRM~i> zSx8ZlhiR~!m)!YWLvF-HZJ?;(sJok49j!}7VO7?XRRH7ZcgfWrZFh0Kn%i|}*>$@- zV5<{A9L$xLT57)uimiS&3p^8t3Gge;pJiQhiUa;4}YQxawHrUOcmU(CH@RMzyEHgEP%=1l`-E*>& zyPH!tG_vMh3AL~dOR=);aYvN|PLCnF@CJJ93I4!nJEJ7{<9u2>rDZHQe_PG7EGt5# z8|TB4UI3VJb)hbZi+cT0y_q^J2yA_uC7|d}>ckMj^2T{54aB-B0l}7>&dIoELl)~i z9c`{FQ1?MiA{?FM`wKnhpz+rJG%=`530RBE{fxz%F&R7X8~6w^q0RDoz4kettS=$R zpGRf5bq&8uXQ#s)SpBiSf61|JU(wrQlBH}5`KU<^ye0Pa*txCWi(|aExkb{61@Dy? zdk(yT%W$zqDm9o{YM~E?ek7OsTURN>hCP&L0&hhdx8$m%&#KhKA+25(W#g(UQFjZx zr^?{&y3SoNIJuUylXq39+vDYUOI(cGlFRI}qkN@WSyOt@|A3{ zl)0e!q`;i>oYUDoN~hpsJgEmj3V?V$Wh?z?^(>8a-g2&L9>Pc#0HhAy074p=eB8ud zt3J!ONW)!8JWd)B&F5|ARo%fDbH}z7?AV*&?_P2}c2j1tfA1H&Qw#Q9fSWZg9`NFT zg+eC!8E6={YHKhY2o^A-wxl|XM4pLpRYg}9R92KtHPAc=-&oNgF-Kk2chRBg!mH;` zfE)n~c(QQq5ncCS=+|hSI)@T5`lT|H4pFu@c{L(Zl>>RTydf(t6vfIZSO(-ZjN*W&KQ~QNS+JTXwu`_M5{%Agic6Z*`sbV=KWH6r1shNfPzN z<8FtAR%v|GOlW?gyD4eLQayod7A*IJ9n=#mrx9S?fA-t$wOI5cNs_6D?#P`T6gj~o zaSJ}TSsd?U3q|H&jI7fRx6b3OVx_$$wT2~ZR^?edd)H>O)OP$_JG6+z6~#Q|_~r@@ z6muH5dD-da_I4OD4&7{!ospJ#y#%N#!W=Uj>wEXa+rrg9gqogI1Z`&6CN=SEhgoKz z>nn$Ce`hnUCy-s>pybh+M@}Fsb1++~Ob>(*4_Y%% zFeA9);w53dIzYMkrp6BA4eTmyG2z{@Ia#F0En<+~4r~_0Sl@1j+!2Z^vRT$FyD2*~ z4bJE~4LT1E4Db&zL;)3?vThvMaomqwx8$m@e{QB*U+z-Yn*=5&=ZVo9;*MV^gw08f zo1+{x8_LbG9&@T#`t=-OfkwbY$6yICY;&s3p*;D%?<7W;E@_*wno;mM8yTI*2b^Y~ zBA3gE_{`1iaiKLAwKyZ3)ee)${J`y&VYy?NORsXAbS%BeSuST-4c6r&l|e%MdR5lN ze<9or!D4ejXBX0PCnlG}=7g*b6w8l{lBpxUJ4?i3eU)|6P(TN}iM~ZR#ojK@k}z8k zw{lJ&!I#sQSbCE8bJM)$GE$%}vkx!KzT>U9mp%D-OyvAP0@#^fyE%T|*vZ8^PnQ-z zoU=H*=|Y1~0Do)1dzK)qx!|!im{W^Re*#llw#t+g01ZbW^1B_k8?V%HHbb*3Poo8P zSwI?!6H~8}$%tnvdpL))TSN>v$g>&U9*J16`#n#PXebKu4je92r^Y+(y4xPZ8avPJ z#u7|zpQ2z{h!0yvTI5tp#tQ@&7U*~&ZF$kBD{t><=Xtl5We~zQ?I;a-8!f71f3X$i z5SylXqYvZ?d{st24Z#k!FS#DtL$&7yzZHChaAJl%tR?3RLq_BE`Lt9dUQRB6EDtSr zZTrf0UE`c8b9NQnkA!|%mlfAA5d$||^ z}cQ3G~_DOK#qFi$-7WwNR$WtvtBvf8f&cBiB~K z9)u=vQzW~~5}W{lIUM4(FgF;0nXy4aC(B(9a(hH?c5>%&#p;rv0N~~8^#NB3a;?YM z=4dfiEuhVa>chynqE!G!VdGwC$Fyr-UHP|j2M|SF@Y5M~IncYcPIrf3Q!IgLgT7%o zJOGL=_36@DEYjFaZznrSe+TR;6`ee8ljF-2w*B_hOPH&iRElT%6?p3xT-ePI{t&OW zS>_XY!04cM9gdtc$Ey^$1Vbxv2p7Hs#kRkgCZ?e->~_NKP>c1@aQUc#Q4f7y+gOWL{Qv8k8| zhJ*Nram!#%NuK-ubgVK`Ky7_-lKN9uQR8uP2~^!hdcoVEkJ!}NKNwoJX~da{dwjgy z%mB!8xe@YiUM6;KrQP*3?c!WE%F#az6Q^}eyF+uf4(c@k-a_FL1Od&~#)2}fGLY5c z*6->7^QKG-Ifz!*VYGoAZU%d zP$l5tdV{|K7HPg1ns|_K3IJg2^0-^0qjiry(5f9}k_K3e9J9EYA?t6m6@ck>PB%ET zqiMUkwT`U2lqWTJThCSNXvivlJJTU>dW79qkfgP1=1h z?7`|dk~{}+fA&+c-(Si@a2de&5$D?){82rfT9tK%Op~V*{rlsYgUa^ zV;6g|oHxUYX-z<<)3bxFK+MUqFO2n0p7_&N^USjfe^!S;2CUu<9Kf}PJG#_NQ9}{C zOqfrK;^ev(VTVhM^5J$<92XdQY%Ogbw&=n2Mm^XcPH;Y695bEN9Uc(YB_&TsDJua8 z$1*3u)I}n^;h}%y;}}(hn4;;W=WQ3Hd)7;h@-c2UqQ%;{2dMaUnO#?#o~rV8VNpSG zDP!)8e}=BAVH)jsYVR*=914_7<3SCFJ&e6r(;528J-2?!E z&Ln@C$Zc`Fo-<_fgM4>oSMu5eM2EmQp>xIn?F_4d4zoqjrCR)v9Fd@ ze{cfKP{2&!+UXeXQXfKWf5=Y`s)&}trWQHQ!yV1a5x~LaaVfJ9WH8%q6&V$*vkwZ1 zaO^OO9C6YBefpLbc6_2(lrPqf-D~dB1hm0*s{Q4u0&i;%peK^snywiaN2xJ6fGgxp zibyxW0j*=F;TPBHx&c@}xY$&*IBk%6e-=jwkvTK5@Y-@aQV!o~*!Bn@lc`%DSM>5U zBTuxsu*O+(j_3ueF^g}z=&?z6z~k8DmVxLbs<=mdB=%C5uxJNXHXVyHglUIhe*|zTAShvC zw9wl5v7tCZm|^L5WIIymqtc_K=eOavLQ8_h8g+03N-y=1Cz+3^6PWgyB1gIz^FM>kx~rZTXx|9KMS! z8;}%Wg2z|0y4j6$IL8^0Gf<>ima}vPTgaHt@H{?fM5Q9!mM0p(f4#R}nDh<*YaXj`y+baJ*iQcct58Ykfb^o9Zp4mZh4Pa{KI zPX&Ek6&-!)V+ctW+nLRRr(k{3h9)8`Y8Rz(r@7VDI?T7-f6@`If+)DN4uOE!!StA{ zwgeG9Vua7FqFJamTVwzuzdVplZBBk$XrUc@s@T-onCdxI+DpAgplV)So3Ko&gg+f2 zuoOQbA+PKt-8)|v+FmQk?s(cMyp&1aJc58gUO6Zt<)n>cB!}a^*4h;WUz}OxcHBoR z{ZKWVZt%vXf4AvNCq3w@BBn>NR4(B`+r)t1n#kR5UR1JO+_c-(pswQ9y3P03+ss~` zRq+&wPR}K?>_krsFZbMof5PizeFc$xr=7C^zYq|)z7LH8;4HjNq!gFx2WoT>5TRlN zpR=3mti{3T3G#CntX-eN5O7Ba$ed>8t0h>U7%e`|f2uwU!rG4lW;a~*aVxqTX0zJK zo6%6NRc^|h)cPLIH8fY8Pey&B$_VlE{GzqjhDx(-wANQEJ=OHgqT-st54$8`xZ@I6 z6%N*(y&b~HuVby6=wj=3y=FKf4qlLG(pJ^hY?rx`<(J8=c(mH}?Q&Hub z%#P<&f6F*1%6^8J^oDTBbT;QXM~;sRdIPSE?aFVYjU_kh#afk#3MY1}#rfu}DUat9 zRVOZ_obuj$d(gQ3aT9NG)&@jyoAc4YbPI?m2>jVT4iErsfsOL871ThgI7JadfD=|K zog+~84y3^Cj1hJv1o0mb^qkGsvdUC*S_%#-e=Xf4Q*U0*iEX23) z1u)|ZsA5w&2NfxE?&M3i-r70H3fv$|>=KcwX6>s~2Ez~oYns%E*#$Z;jv9?pP-jQ2u ze*mtnaxyFByKS!&`%c{kbASq#qIH@KIX+Ma0Y5knpWZg$pM$8=np%rTW+r6dwNt>K zG!0=9?Kr+q!fUH0Jj%zV2(TI+zn$@L<&_k=!^OOz1j+*_Y=Lg0Gonl^4y%BK;D+0> zj{zQn9It-PQOZ?GZY>#rX|$&jPap>te>9K-AO_hu-<@XXAvPAFTu%G$MCFt$(u0Nf zV_+OE7eP4$6xZBb&OCbAl57zcr!IqAZDx8SWvkmnbw`VqSLFfnZYoS^^+DC*&Vul~ zo?h@2M7YD&;UF-}^zphe2o2>1#3DAjc`ab3TzQ6H38mS5q|iz^;9&x2M$tKO)+V!f8*Rb&if!*h}9o36sbNhN%`H(rJQ0+V-rZDJx@hRIK(8d$7{u!x_{`Y8BeHx?h_iUZK=* zxnR+m>0_m3K=6u8;@HYoI)nIve@Z+Q$A&kY#d>u}L|hRhN1flIOXXj9tuOEs9QjKc zg5)I?V?Ya`&qx6w-y4cdYC1gy?MwrYw~@O8ZwS`yx>sX_+KHlj8si|$-0dwoM+bl* zL(r^mT-1JFFUN~NNPW%=5DP9`YaCNLZI?|_i-A#86zutI)1-wycUF5Xf2En(dAm|2 z@KBbNe6TvcWpEce)CXjen*9Nrah}GnBc-07({#mM&Pmu>Tw`ZD7NVj~CWI`Hhiqkb z%gc;If`Yx<*J*Cn(8(FS1{Y!=Hsr=og?80#!bNJFPXKf~p+_iI%%>xSob$_Kf;bh; zz%3$753Y_vj39VaysbVQe>n&;I!EbML8e+Cd7a$2^2#~qUSl~^x66<2Rc{P)6-zqA zz9+ z!U0^=aG1BE2r;9;6Pw8Rm9s>#YbxSqDQO|(=m8>ZcjUNVfz-aRfBI&(l^{s9@C3<# zfMs)iG!`jKvfP#RiPgxOlrcM-NQS}ZT$BbrRolhduQ>Mt?wEGRMWTlxS!=sDae;z2 zvU1i*;;(Rtr@?gw^+4+@VVzh9j8JUo7Ms}3O=lCIonS44rvRWR{2bDpq^ISEFb}nL zOe@Gfksh5}v~}hNf4dU8d`qBl(XZy@VgRxH7E*yGp8)`to9S)}r4Uo`3#0E=)9nxj z$DMnwjt)f^GL6L$5{_lm%{fCE*g?8zh0GidXV6;2vbDSrl4V=~VAyW9VV@cUI}MQ2 zBuP%OY2vmHfXs8@fS^rHTh3nEvsL4%TzK5AIK4NS)lr9he-9Z5s~&gU(X*>>*8YWL zgi~CPc(CHo7-Isuu9jgzi4ahu5h64DKZ6{kL`lhSerm47<*mgxQKQ1ndd^*i!^P6rL82A*T2|AC3r+AWs&5HP29SfO4A4cVI~4-!;dIt z4_PJfET6IVzK*V9HwU=g%&~7`KJ#Xlz*ZdQblHl9bS{uHr*FCAY0Dt9-2uJs{I<(H zg-%<0SBQK{f64%y`k~cgGXURi4II`w9CH3}s^fqy zA?&1JEgqstr7HyMoG6HiNAPp!s=9b|_8TKI3tx%H_@c<+)vm)WRP$x1(Z`TbF;W6Q z4EXA9cg{(~&NKs}VC|~DdM8oI7So!q63j-nv!=B0Y{ebCg=#Td4-QIVr%rpk*J(r7 zf0`N$#zf0hHxtZ(+U;B@JXHC`_6y4#<*g8{SG&r}LBzQ%kDaVpra5dza<8Pv^Sz zKViO>-gf$>1Rf$Idm9Y~Q^=Xwa13ck=X7bfj=Y%j9qL{WLrhgGG-YhGvH_c)U6v%o z^1$$WYa@A}PFjiA4;+NAuWvfGTi8qB|639QM)zCDZ-^7 zKq73}J-D?fFaRREVRH8}_}7O~le$XUCG{M^v5O0iC=SCJ@0=?fDBO@@0})qepMSep z=?Y!6DsLI_y1L=Vn@b?1tagXR8Mih_8zKX}-$^^u++vNMgS;T0P!@8VoR4mb`@5UA zVQt9cu#tjO)5=tH(2w)Y7TLLV9x-Zifs{2V=GRLYY7e?#gJTok5F(t!<>dFQPO*$| z-7aEc9kpn$oB*3QhX8gRuo{`Uw}0&cNm8?twtGbAR*?2%M<6v|_u66U=I2d_ZhalX zPl-W8ZEvNS;14uy(#Y#7pH~+Xfx=Ut0E>2-B-xIv}b5pg~wt1Q>6NKgh z_N*ulBuxgWAn+Y%VMufP7tvt zfba)-ZSotjTojJZts!?r16dC`h$`f6Tm2eB5~`AS4)a>?IR1>I7YH`!u5AKV)b-KMa%s>$g#m+n{Na>TXPrP+FoD;VQMEVv&)(FwC{p}iqlp;ubF_^^|kn9rW z64X7d6SA0Z`XYv^uYcklYd&eYm!HX*ruk`^so~K*@_jLPW3C;vfxJrOls4AISk7^4 z3^bB6FybU#r)0qQlCsWS$}0{_05;q%ZlLkQ312U2>{gIRdhL6NuY$I}Id9NesU(n! z8PdLCB=03_JX(gViIZoNassQ&Y_~9g?SNvpVu|CmgM3Uk#((fAK#)3VEFk8AR)qQ4 z+(F_W5O8isDq2_k<|6Tf6J2&-*>UJmK5l!9J*38{7x^{~lzt2WK^4};=s~WUiO&>6 zRnF^vK8EhzT;7hgE0<9AfY^#>HRS2N(Ss5mBd1^R6l$Xr+hu8{*EKCe&~nX?$$w?Jr($YtgtBx+r)&hL6$JAz zr?g+COwB?{h!rMY<#iE)>mzJ+bHz8T86uivu((F3-o>3m9t?UTPK%qN&e(LWbH|(@ zPnR_ZD{mEt#-Zzqa0mJS2o{-Lk^`0>Aht-?-QhZ~wKIAg>V1apX5GB-F2_kPt34sI zwBTXjs()(lcs2-6EWjb*?jqjY*B!D~+YT7`OvE7=c1JS#Z7nT9O{UH)r5F*_?IsNa zoFw0$OG3RU%j*C!7mW$2yR7RXbm1@(O>#cJbQfe)4EfkXoC1~1S(S|RIVyC ztYoFLn3OAS#FXxl5m}euX2J@kTFjNJIqnwW^(t#ijXUx4!C^%O&ju?&QI@ zr4ip0O}UJ$mMqAfnL~aacb`VC0|3B~Tgy?bgU?3Kgwdb@YqOgz3aO>SlL>)5A%8Jh z34lOnj0g!X#ON>>5BCAE=4|nVOcHT0bOMz6e14REuPewWrJmt8iblSK~5U zLN-XHWj9Dej4hTRa{`mv0mGcdY*u+2bmBP;KQ-I+7Ba^Xmzeib123$BJwpIYa~|69 zcv;l|2RJu}G+WI_yaF}MjHik<=YJMg)pH0@jpyK94F-jGgB>4nr+__Shh(+@d&JP_ zhnl7Y#a?522)P@*{aT8aoV{k8C1jLKjdoY!^V|cky$1nYhxzh~&@*flTMe_*BO0QI z8_i#j$xX!o2!YL$QIC3R`BNG5YWGC@#+^p-Za3N7sJrin3CaQ#+Ab-FM+7%5J z(m%4-DuAG^+sZ(^O2ss1*ziWYQU(h$IIOLk*7v4TQYU`~pw5K65stGzgr}xU7~Vf& zNe?-c5*9znLzQ3mQbO$J>T(T0bxqxGuwj#7JVb1JbqsMtL!bbbTIV?&(t=v(RX22y)@BR-+w~O7SA& z2&IYen!Z~4fx9V}lZPd^5J3JH#RrUQgJgw0o)|+=E*yy!XJbF0aeq06n>yKUh@(3# zqzfou6fDA0tCpzK<>j(FX&x}DL|5J2Skn$z5?U4#;7;pK;SOu6Zt^Z?!h&E@ZHJwg z$+7Pmktx(ViApMk@UU7OHPN|U^V7;K0f?KOa};X~2;5s9STPzmNqLre8e%vgtiFb% zJja;E?en5KyS$wVsDHm*Dgb|zg;RB~&Vcj0T!rQA*q;U^0k$tg6$K4!PwF~CNM7XU zN$4qH3vKwbFV0RuQc|9}rw2$cjBeKa1e)|r)im+Qn8RZ5FKlOx31b5xjmAAn`5gDJ%2d1-w4hvnZap->7Fj{ zoJ)r7jIn8Nd9+o$Q^xy&f$#RTeO69qeKxoBAZ$#v@A8ulnNJ~w9wd4J_cS>-t4Nnr zO-Na)b=-*z&hchC>b;SHS@*V`mH@6)*?8B%oEjJlX_QDvAAx0%o}z%GUCCC`Y-y1c zqMkO!=BmvtWPc6k>-xm+p?(uZ7$DuhYT!+(&$);%*`lkn&j5;~EgJad?~ z@8mm-D!WI+QH&G7lfx2MRY+e@b=}V6g;HK3g$`s2 z@}RC|1%DhIQ(t%_;`bw&N>VFEoahuUvC#kNW))a7IwTMV9or4as z$JHoA@LY=l>JZY>cu1OMeDk<%=9at5&F!r+vWA_3PoH(!e``4xmKe1E9u73 zzJla>SRL6@H)GcuLYW$THP2JP_^+=-(Zg|HNRa7Im1C4;SEQ&rD>ZRsy%|vSJm~DC zrGF&k>{48Xu9~ZNfZU&0jX>O9maT7CzAa@gI%p>e5(}JUiZ1nZDFLrv3*Zk`?aR2{ zvp!`0(QsoQ`sH4uP9*Ci7<9EH0>qy3t2gP{L7VRmc!cCD0HpHVJb;8TJ?mb%6CgZF zj>M~N8a%j!vv^{h>`TSQm+OqtD8_s7*h&8`#JOB#n7aV~OCt9fE;t6P#Da;v;m*MEb` zHOIY>v;G`1VS?K&BzET(gLHQ2S!qWfGH1-kDl?*n32ZbyomEh&7e#XpkzmnXAZYM< zx_aekUN?t$N4fn%Y{7JT;$3YoAX}RVF0rS8@uLy_eR=_7oH7$Ci118Ce7@>pgMo>w zQcIlrphgAmYJ;GsogfC+Ev&DH4EopTyhMlU=K@<(c3VWokc<#q*e8{XRUM$M;2u@e4$0v~7NM1MlFPjc57 zKfT`tZf%^FtSU~N^O(#B+tL-v4Ud-93KRVmC+nMEp(M0|1%|L>3MiC6>LsWAQFEK^Z4jL5ZQ^aHV7dZ!y9xnbT)WFtbhCbRN zrH`m5ZnpqYwiir~n$vc2E`JP;3;wg1+Mhi;YzV|PQ=UOJjKs&UP~A>7E%kZ2ECuMA zqW)anUAh6Uv|n%qS<5W8lRnH}mOZ*S(QC4U8OYyXZvHSsj4Zq9f5`ShR?>bHIB$MwiI_4Mg^o7uO?AvTunw|~y@bZ;d8+)*dL z+Ax^AUz5ONizC;R%2fS!Sn^Gv!gWAGRRllbMGA7Yc)93qHxXd|D$ns$4=(_yaeYRM z)?TLM0WKuk#Ri2gu*oQA5uB;g`Vnco_pIjq%MsfSD zcIbrr-0bMfgAwt_@e@^BcF5(MXsjuK6FE+-?{OLWdg_y3nUk9kT}|+aRfNYP!ww5P zFjX(0(|Qfh%pD2a`;pxQ`f{-Mc3t||(vSV^C&ihF^Q53`xqt13vN*` zJsYRWMT! z<|-!}i@_q-7bnJ)7ZoHQ;nFALR5D1v2N2u>DZNh~x12e>E{c`oUQ>tcbo17<*MSHY z_x)Z1jkMI*1~cn~nuX%$JKDN-rWj8`f9(AaiI%)ey_5Zqs_RXYe#P2}6XW8n1c4ilUBA|-^>=a@_;nb0ZM2Geu>>-6s_-xIe~ zOVGO54xFb_{%GQ|hH|vdwSa3=l%+2plYiaHoP2-fb8@?Msk4s|vjUv)9qDlt*2LNR zqX*r%iSMx!2`BcuXRchVw(2&p_jS}lk{qyhtCpGQxtqOg&tWOQK?d| zcK!VO@&Jd=GgTXG6OTIBrhU4**m@CR3CAM3MFrm3{LZeRBFOg=*XNm}ySa8{o9Yr=#p( zJ5*PEWraQ$mX39wCtQG9#n6%%yT_;pd4F(R^p|N^Yr_kScsY)x*>i8Ie8;YtO@;Wf z#(UH*LO2}nDt#XM_i|*`{b9^6UH_8tvtiI&RsxFh%sV@nSFg2v(ypu>NMboKT*a>q z_K83>mXYpU^B6>>{y1EO$2A;ZAPzFJ4EBn*L*D%DIl<+e_n{rWQMyKC)#Hx)SATRw z3CvUbv(G4=Q?hpY{g_gJi5~3-an9-f`WR(DiKb!MA8L_dJ|Dh2dn}tNthVq1f+^FGiMub@{AUzaaMWQ}Qtw9M#<%Z6x?Fy?B*j>g zjmcF#J6e+n*q#J#!LPnm6>C*U{w@i0%q*|$I9+Hic@!0O$Xu|t?tD?HNq?AvwDDcW z9l8%hl!N?CDv8B@K^(LMu!4GKI{m)W)+Ne*e(`LDLlkmJIb9WI&Hqrk3C|&+5#5TI z#DZEeID9=~jJqMOYM@QVI193m+wNGalszsvNv9Yg42L8A3PYLGtSS-{3;3;)OwAK6HqUE9Cxff1BV;D$|$mcNuF<71@8X@7_CWru%w)I zDM^Xq8{c;nU(xuIFarBfm0cG(=;Zmv=u9%ENaWMF(@J#NX+J&WPpq)ryUdD}_YKsx zMdWuEX9c=>30>@3LW7!rAuJM`q}yV$&+;x zm+47q7E2koy=j&Sm7C!>aym$fb>HG1f(eHE^N>T# zgk*-r!ti&zWh%kbD>PZ*V6TK1MaEGs>HB3c&4Wu17S-)q5kuDCHcTL`|}E-_G|=k zHyZzpiGjwp4A?5F!)p-_WB;}B>V{0De38V3EDkkq=#}wJ7tOQ?zM9hen|@f#P4-4` zDRc8;5g$W#fKi^`m{+jMk5-vbgUzZ`G}JP}LM;cB~&HSx%ahk*43k>~3& z)$_e~Gk;@~<~uaqKziNVhpoQ-z|^DbSJAE5pdFqueMNhRm)Wl6eeQdjs5>+}pp7@X z`iVT&i-XWc1`+X|eIN)qY)(2YfJoo+EcnIIZko*F{??L}iNR{%8{caMX-#Z9i$6r~ zgI_Rt4Y>{aZNA)qTPjQd8@u0@wtox zuQUiHT-nQcar4Q-@q8vb6rA57o{alyw9YsRI=r$8Tt7M>4U(1R0X<92UFvm1Z-YHF zRG&)^D8z){%a#2E>PgvSh(g&ZF7AhH-xz!HT-o8Lj^=R2d|m7q7qrfjayqKEf{?Ur z5P$R1{U9TO8NCgiF&z-lWAB+bme<)!BR z>Lfm=xXKZ!SJuoSdS4B1k*&>KAD+tOrF+})3j`-1jql#=)mk-db&uIo#p@-u?=vqA z<9x<@n5XP;E_Zw3$`|J2DJAGoGo+_OGk+w$FhqVnv$(tI)tN%21b(SE65no0BJl0Y zL-pH^3VdBuWkhd>c<;0n`?V4U0C+Cc!Aq_hA0n*>XmwO%Y<@16QNH)a4y=L$CWL*V z4PABFP(&bnknKO5nhJe$wd*H!KN@H7n@~o7VUCMA)sA}Yz{UVCy*LG@`=N>0<9|c_ zxg#m=!?>WQOFJ4Lg4>G7Lp$Gc#7ro_Q^P4pQks_u3j>e*z-!(1)I~yJpWnD)_nHlx zG;yj(1*dvMBIl;#Gbiuglo zxH6&=L2}`l?tVwRjEDAie2KwvJM`9Ceg^Wy`CE+N0qI6p8|Vlhk$y{>4SkkBd$mDS zM|;Pq(;cNgYn`VK5sWK4vzh;V-$|dB%Cd2FSh(*If~4twRy_a6Jz&{Ugnx$bav=-+ zsl?Ug72Q4N@a?Z-KtEd)C2cd<8lfw8Z?;m0XgS-j({(x-^HWI{Q03i=cTI$%7at?u z;IpIsh(r&x3*OybzE93kL#3f#XY)3nG=$-gW0SELsqdU!WPEO&;t}=@Z160~E3?}J zbn;G9N7-7(%3rCku+6J|aDQ{5frNJZrRjEO+saNrk#xJ^AW)aiZ7!{?A^jJ}z$9XS)PeL{)d?|+WfWl~7;PXjDq zz>K>D9&xt{LMfng-VFOy>ZOnALrh3IvlpHEjJKI*_;M6q$c<74)~x6LiVr%*3Q6fI zp^q^1Ea+;&JKRY*H8xXqJ4fc?z5C*Sah5Ibs|I(9v}h`+P=Ic@X^!WjYnc}?#g2A>WEI@$00skj-uVEM3nDYq<=J(YNUC;_}n~yhB5*y zVf*#3%7hCiMviSTKGn(*@q9qY`ElsKQ4+MyPI?m&t|TvvHfDnat+!LDCyq4}SoNrP z>Oa^Jh&Y@1wl78h(_8!eduMZeJ-6mG6z|KOReSBSE6b=jMNA^b$szHzt^+7fSgyJH zBB~=vU6l52)PKD)dJNHydnEYo{fOZ@W6IzBvZwBNyP^tOld@X=jofTUmsdqxb}znp z4`R~=@MB3qn2AC_X80Wqt1l+EGJIAWAhAENmd+>VGrZQ&rKa92m|k&C5#5MUkSyg( zI8gj_eB6Kda#h-N;eP0NvKLl-+)nwnNiBUi{Lm1<&wuDI??H@N*StSk!y#`}S9^R$ z%pbpQe|?O@K2@WRd`(CaRRG;rZU83xgc&E%j2wko&wji5yT;7ySns5&U_}gA?Q6K# zK673;C*_Ia-g^f0ql2xZok(~I0m|rhe3UK5Db0s=_xZ34Dzk{F)?;#fM2GhH?Pehy z8`lgGspUJi#~kiyQorKPKCX}K^;X#*e*lk~eScW*Gp==q9391U!UASIDb}(#p7kSf zifxB`fV^LGDSe%-p8ink%b+6-b&d-UoXx9v|9K)>46!-q;I10hvpn8Uhi(GO+gkGxAv`+`yO7@I%72Y*knkzq;}??3~3H4a0A zPJbWL2G35WuUr}4$=yxCO}t&Db=B4%`r#wcT2~pQDX?**rClO1{ za`^H>atdTt7HpbPgaeuaXAqA2Q*Z0E_kS$oTty3{xxN+@UkpCklkM<0IWiGBA6dR= zjswYt)316l;2vu^D+Zlz7=XF-b$scT?UrFy9RT#$|2o(i};IL!&!EUMmoiewk zy(>hf12yFf^E-Z%;7kK^nGbs-1#Iou#}nSvM^$-*j6{eOU(-IX>ZeBTM~CZdq=1BSaaR&JHiX%zPKakQl& zm7GicW!fLrfU1NV!2|FuPQAW~mv@!p$?Q@*!;Q=lp8IWo9)~aR2xD;rYb4q}-+r&) zO_oQQTM|+B{Joy-X8TS(asl@~>raP!ny1OBG zm2~PIiIB;h13T{?ub-w_Fd-d|(*8ui|3umh`1WA5qHzO~Z2Lti%P!3;&`T;dg~>JD z)4Li*(buiH4oo0{R)IW8&AgAN;&y6|-r*Hne3lx;uX2tDBFXr-Jl#2DH8me8Ci%r} zSut|WGd5K!OYrbUEL45guz&y8pALtU4<;0Ep6M^aN|)W^AOV)Yyb4iq0wkdD4g~B< z@S8Zx+IIL(KdF8$^CK14f>OkeRN05dw~}v0RP!&HaEV@!_aa0$3#AhSLC&(mFaq0X zn+fz;4_-`ydRBU{PGvo1Nq4w>&NL|=4a`D8i3i+04!0-BgnxS2f?&G_FQ9D4 z+!>yG!>gztp!yDeBl#OaE_88`ry$3nC;`}{LIRYca5`_x*W;(NJbG-UEGDbYzjr=w zwK10W%u_s9-ZgEzu8A1Q1{R)RbAj%+_rssri86>`^_I_3heeeXWOZ`LDQSaAKV9woh!cTM)aSt zae~W6gYYSDIe*~Rk)N-ur0A1PH+{1GcFFBYO=nyzcKUM3AB-iBF*-aBv+aJ^b74ce zyr>6zq=nZ5Ii>SrzfWhmB)cy-uT6`e{WC{0J7Yc=R_A(Hf$I}EJ7`U0&v_RQJIFIG z(*uQMG;)zPzQ{LjJL<-~hxhUF03D&VZMVzzU*~jTC4UFUiCs!vOTzbXdX~Ij;SIPp z`O^jVSMAb`9+n8Zo&5R)rrl$Y)5Ug&YIN6{%(0oZX{TgHX2Zcl9#gDPhc#qk`WQOA zW}U#3mky)~>>>Fin1}&DZEU&Y@mQ%;8={Xqi!g?SHc6j7@SAx&|yHRWSvwj1)-p2b|h3MD2 zCFW16W_<*c!sutx;N#orgwU_hkr-_2n1297TVqv(=j1E@*2c>?BxSW3P4E~5@^h6_ z%*SuMX-vICP8s*^;T*m5Z=We4GM^8qYAd_yVzHBh#car&d_Gi3#Sg-CogV-WOzpIv zh?o+l7%_szuJ$sYe92o+*F}Bwz9tr>JjA(j?^J~VrBNUIz_sGBhD~BcQ+{>pZhskM zJZ=Q(&rWk_@0e^3M|Q*eb02k@B($GD4p=`3>yEvA2;!S>(wIlhFn2KMi$}r1ha?Yo0gL3 zq;QFHGoiw>Y-ToFtTZAiw_G6w7k}T{mw{ulBE>(ub}w+XbM|>XBg&g!mels=K&iFi z{n&qn2Rq_;#vbv=9J_B(hVxnz1sGX2vIY@|VJSz78|qEl$vmr~867OkVJmRzHIR?iMr(vTR_%Lz_B#x#2{_2qlDSi_AbMzu zhh^w@WE^J);Y#cn-W7E3=zn&Ramzuc*P|1@xxB09B@yJ`sNX~=r*a;Sf#s|+omQQF zY_B)Ya+=*5z4j52YJFYJ^4NWBf}oV-<&#}t5NKlF;_=EF7>@2kfg6z^znWb1I3=wY zu2mY=*EG&kuO$S=dSTk zV4p^&9sQ%*L&-iv4EWwbk1-jV?ceRr9hdXzhi((50PXt=YRGRA$(AyHLqhd2CBKT# zWhV&v4sow{(ceuzW$~bvg+z#Izs|L4;UxR8lMJ)rEO|B$v?T4nHlDepu|n7sjMs}P zzX1w3w+_#waIKmZrhgpgz5v*UY5i%1@5ujtf+uVE?`{bn8<7&Jxle)R(s2S-YwA`p znSx9`vFm6S=(;N~K{47dEU(}g%X7%7d6LR?|-X)=(U$l@$N?l38+~Q z=M8ag7GC6COi}su4eXw|*cdZIn#(lED+Z}OZR*@Z^MoNz%!c!-rRQO(|w=hTafVPAApAu zZi_Tq)2pdOR}@+X|o4C6}R zpvawpd;=*v`o5SYdr(5|)BQfz=Ib)YLkD`~ar*rb(6M81rpT1Jw@m+XnaU?8ubMdU z4@gI^QQOkDVLp;V!21eU?G^3A?d2X6D9V=CBEHktBW<`O&{aGewc! zc&xmYvc2@{g3Ic$?xfw{>E8wQd5<#%fGfT%Fq;0rk)%p-t9F zRMAU@1>HU2d(BgVLfwDBAp{$~6K}P)K7YwV9LlZQzpGOiTlkfq2F*59jQRvvIFw8T zW%o*ZZCijEIq(3SSnL55*=7Lr#a42fvg19vZ6#k*<41ktY5-fcb8A};P%!B34T1`* zi6L;H6xay+LmyQgtBlz;s_ z!y7mEaQL8Vxs8$9 zdbBOl1h2@~68Z~TIIA-JrB~^6jK@!jFy9{Rw?DKJESfJ~4$w^0Vu{94CV!blWAZ@S zHFg`+ZB@gOy9#a-xA8F}ERC#?9XMatup^;EJ}tZ<36sf&0Rm_TuL$j?^s11mGM_l9al-1( zKcHL#fw%urXb8t>{1bC{e}9fz0TYZ_80BfzP-Gi z{OGLm@Nqwo~bP5s8ZHXbPf+9ZpI2BgS_dORP8-B$^x4?vrK-x zd=lq(Hs+xX&F#!j_0=syXeR8}?<@vBoHdvXKb_|>dM1i_Lr~)AFhG@`(D&IzFPCH< z&(~d?zDIA1US}uFKYx533%R_zn-0RjoFmSN;8fVsB6q#jYD*9|sN;XP%QrS3pWjnH zz>Qv6C!W7$=u4KD!8&7ref22>ibLYlJ6=G;zjGpC`QH*zB)bmZg%5`FWPuZZ`x!US zVqfOzk_V0y%Cb8L1(i%uc^7WXyKPElRJs;HpI#PB9HxI&;D2iAi1(v))NIXY+z*x` zMI^M%9#AHWv|rSvtEKym=Y%r2R*sJFl6`>Z+zE0+yTwnu`L>m>x*vQ=Lm0-~z{M$< zC9kt|k1iWqzhkugj+665wm9T7?R(%FDK>Efw6jnb(QFpFS1Mj>T_;yv*3~opI6Ji6 zCV@|NCZNEQAAdykLJx*~(cu`YA3h4@6IdCpN+;! zglcbrbgvHG^;W)L?|Yx9YBxi^0I$U7QD0tVHpixZ|2)yk4r}@gPU#eqNOpbO=6ZM^ z-!dr9DURS;mNe%Ar!;wQOAKa7w4jITU7srG_?`0d7=O;Rm|m?y%Yt+Cxm&UbQN4^- zJkPAZpAiG{wH$YD>nVKMJjqx=E-Bcdn-=fLH=VdrEs8j#>DT6Q{%z#pQF1$cuOH!p zekC5j%9(;I2A8H7DPs_&7kSLzNqliVglL+(0@cm)BuaZiLN4HN3)!=J69+GcK(2@~ z(QQz$P=BGtjgyB8Soujb#2EsNy67LG>gw`UiV<5=a9)<1>zxuWP@^3%1G)+7MHCgb zKprUKT@P;eISHcHOaf?eCJB_kk3{YH6IdoqXq)-!VGp0E@k0#6;Bsk5eBaTR8ja_P@=GHB|BJNM=BeH#`Lp09~voIfQzSG(8OJ!IDG zzJEacyBg}w)p&8it;rrI4QW=q(7ns+VTg%s1VEb@nn;o22c;y?)^JIqZ`*1*-Pq`E zqDWsB*Skz@Q(-heUMXN@d(}^mOdK|&Mt}4rao+KDR=eLdp}&ZAK<~5@66={qPwsxEwf>-T3RtSJ zf=I`7>?HaipH7U;F#jl(Tj-cz?-IqXIzlZ7l-7emwJta|DQt_7-v~c7HeR zHL%&{<10rr)gOx?GcqyV7dMrJ-OU7bbvfc&%-ayj!{lt|P#n>6c6+0%)|l=0+#*G4 zQ6VLH&)jA4bUpE~HBTBy*61m%-`W455Qrw-_ zu=K^djE3`h-%G$|?GQ1&?~#9{U4Q)ofUBL|sH-Aw$xi}07R|6wsdFW|4em{|BD~sh zF_tv)psJ2{-9CHf$R?aN^(SKyXTE#1i^V>wn%I&&P3_ zKlSly#j5UkI62|_eglkZ{HD!i$%kNno5K~?eUvzc1JK`cvaOx|z=tsR7cg#v`}izA zAfqMSyUROy2#pUgzy~`+noWAI0`fL&;&D{5Gyiu> z@w!;0K=vX}3)ea{+BbsrLw^>1U;NbPw1l$bugSV}N}{&SBm0vQyL)EF=_%l2SZyf6 zC%-jD#_B89fKZ3G>G=PCe5&(3eue^c-;NMOj7n2V0WdMeXSsVH)UZD98lIWL{8XoG zZ>~gos<*}ECGojCisw%S%=Swq9*+AvU14i?_GIF_LZSxe8{HfWOMk$Crpb-vu|HQI zxoMm%&T*lO_?^V?{Dyl)fY|>1V|qIJQ7|C6G%AwI3zMb+QlG-_;D$A7|6-|=Ttftt zEuWj2VJIcN`pw%d`{j0mFmm-y6kmh7dbH+ajL5cgpS^nF4wmD5bLW*7N30^f--I7P zxibN%(CQovMOVDOpMOiPafR~~r#Ji1MWaKx{(fA6hyB5MxT?PLn~rW=g@jJu%hT6{ zq!m;<`;ko2s>ro+p574u!3zNV@_C3;R`1t}opXQZUq^6{{e65>jW+XJEAEaV^LPlX zH62lmF0G=xB(C8B8q88tvHOmE9S-_rwwmVwPj=WP59!GDuXycre{dx4RxPQUz<6b!QwJcLba8bwd|3*#?^>ljFGx7idiD;V$ZTXC*Y(tZr`AJmP4$ zk3>Wlh>-zBw0}pngc6|czx!W#Hr{ylKT7hTUGJhK z{{3+_M^R+|`Ab5_pWJ``^8ffl!ZLBc;{TKX{IcWu?|<*zuP@BMBKw=f_@5^)z@KI1 zjz3er@H5vffnVzHS5|Pm;g8$jpDgj#VC;lqdill2px=KV>4Q&2WMQL#0hcc8g8Aq3 zd;a};`JaFF?}LiCP5ij@?=J&<7McKqzvA-m_DSd8R|>r?KGKTA1O7G+tB`-kz&eOg zjeiCk{(s5+=YyPZKDqySyUpS8bFMIpYQ=Xuk|oVIa7lF(=W`_H4B zDV~j@dL9@=A8)6>5A?rZvA^FRK1pf13vu3|J&C_t;oro^`}@#pfn7#rr3-FaBtG8m z?{_M0fB)*U{0Ay6P0p zIamGf8S0&13)2A&VslL3Usw9$PA^`-BPhb5acs&)GW&dY(V47@0nYgCV25 zYA=qTq)flIKVT!lW!U&R)Y{zOKb9RtBxS7Zl@N1mEQ{aby2|=cDtf=)$<+0dN{j*6 z``ljkSX`aoza3)Q5&o&mZv3^QlUTrR9DlthnW%8(pEUA!3sDWTAq1Ok%v`p&XgWtU z^n9_xPiE1q482;eGDop|_gD{sk!LN+o}youf>Fy%5vUu7bQG6IroEToCy?sgx-ff- zoX4xwACSv@oy3r3R+K-{6Rgc8Nk3g%SnIVr5_XlTW1BQLk{3dx%L*#zdw~SMhkxnw z@EV|NE3gPLF2qXj8S)J<%qPZ6YPte#&VW2=)uQ*MZd zU+dW@Rk+tJr$K{>O-L&t;$WFH6Ms#?AFzt%m+}d7+Fi#&OLDbl@XCh}ep#ADIy^`H zPL8Te`;7{q6Zv$MayTPG1yd8)V%@6dcp6F~eF|OEpoS!!K6fGY@ImF%dDlF^S9X`0XwhBr4(rT(E;XhYg&%4Zj>jiVynh38`h6OS z?)cDry>ahGNSv>Tv+5fYMN!$XP_(u*e=uZeqz3N0VDHT3C8 z;Y<->@t#n@|AHWVl>sT4 zHcn)$&&|}wH{hA|z3?`Fu2;5G>PnXL$~>JZx9sTI51QWW(??PLopInCN7h@39&1pK zJCv4QHI`1oq91l2rPYc)`}EuIfQBL>%}!5@*kl;bPdWu$p3kdt9DfRJj-b)%-?ET7 z^XtoX%z;#@SX_*jV>T$BNjoGZuQfqr-w|Kxayrr-lUavXWlY45`yyU0Y{WQ+BJfG^ zb_B!b;dQk?z4GoOk zBmHR3HKr$bd{QrKoXEput%^m%bf8K<9zj3ll8 zl3FnfP;zi_BaUhwsGG5fQ7kqfPd)W!Cd!2w%91}R6d}g zw*a?NVP^6NU0cP9$OaxDCO&^~*y52f zX6fEImZx#(jDc0kfn7KP;P@SvitEeXUNED(HR}2%r9Ocz4cjFqoW!%dN>H6Lqx3?dBDO~KKX{;0C z^3JoJ9{@u@yuXuF!n(hS!JSpmS2O!K!N<5(R(R^~v9IoqD;9a8DsCF)zQU9uwh2SX z4W6`S+1sg)eL&FEStEq8Bnc$86h;*)FlS>zI9ZNe(D*SuEBPmh=x1y2ze` z16%O#2X|wYCw1(c(=A`Iy`A^n6VkrptpUmb(mvM_Txmg(({m2!yhw<+H0PWmse@56 z!49jkT{0f;WEWlWdc72suD9z!Tevy3(iGE*_tAeSArTPJ=%#l$;c;yCwrYCW6{;?N zyOI!!qXQRlf|YyUxIXm6`^~elgcbAh?b9t{OqYR2SJJH zl`VhX^fWAl8jgeEOa~H-MkF!1rThWB4D_#hO}m{I$W4%cpo$qVIfCb3Q*2mvvD{Vm zauVpv_*x%Ny0wVAO;McC(Saf1>yc7vJGR{qSEs=^1lspXcOT7VpK1I8lKj>k+fgxr z9%F9&=kym4x*|b=E!r)%bLtTX;z%k1XT*On>xT4$zx>t;!@^>Xy0oQ$1JEn$h>i?X z)nPbX86L9CH>`6RZ5P(Vw432wTcqX+L*yNdW^IeBPP{)hdhV!qK#{FE&C)0cW|*M#mJX}*$4Uwt-h+qvGz>eV>2=WEBwyOyFL9K7~3k1SIk&N6U9BeS+ zGv{T9!@Za7y1xAUPx=R^_x*pyHq{=JPPsakKRol6YYgqdh3EZy<6ov4kj%CB+4HSS zHdODhtx^C0@f_3t_4|5X9@!&op78?}B*n!QlwKD5E1Jwf!xB$<#qb)8Ep2$r!wv-` zgK`9Z5c($4gQL2hclg6vG=#zo6j9|rRN1q92>bZ(29?=q$O+P}eXD=Zu1uuph-B5} z*j~Se|9HHV^lQT+$>sJp?3unxgDv;-9yi%uo5PyDY4dR3_FdvC@?)R(oBcdNj)<ma>=&&3BT<3i9p&l9DhOw+-4 zUowbe`{88x^kQpid}>P2M_3&$#aK z7CRb0@i0r{kIOvOZHEwpXTHG0X~~xZEG#N_t)`rattZq8ENID(v}mswe{*zDKEZs) zD(gJL&#>TNd0AurDPJC7bZ}ihy&lX{c7TWDut9ltz*49P;=C29{m99B7}lUYEHQfM2&>@@Rsm z&-qtuZlVsDWf*^Dvq)tl#zPh{JAB(3M*1Pu0xds3;D|oUOFu!*x8hi z`vSn+Kj=GsUqO2JbC5y@#IHtxd+-ZXVcD7{j=nI$MrRjb^w;~{f1kS@Kp=gv0v#-( zUa%ZK?cZ)@9&X-f+<>DLg!PR!@;>+jaf0$CN_-b7GJAiQtx`PH95Fy9jKZl8V%S*3 zOa=NXS>QpMaiwOE>U_4PLbH?N>yL7PC0g!?%MJ7(|mCypK;-{+kn zjv>E4rg(BzcsaQIvD@utdB*@B0CwHU?x7a*81yAM^TT1-+3(WVfxOJ8ef%o<+gJE~ z$>u`#;^9!3dxofzULVH#%=mCC{hlp--6J4BL*{?LvhOqA=+hCK7J=C3k2!dM)5k~X zJ>Xl|g&atlqEv)hA6QpaU&BG)a<)9ou=c@Y7dxz8^q<}6vwPQmt#&ND$0IKx6?Xvw z!0~29RS9vu&!UX!IIut4W1o&>)zL>e_l_wKleH_?vD~ZHExmk$s8I0cM*c3Xp$$CW z&UJqPM+k^^xVNec1c(t2g;*+x3`7J4UpyuWuAUu4B9rJ4crh@^gVlcs9wwqeGhOwu z$hV@shk(XBIV1J*9SNC4W3Z+7O1%m%-Zl=?M$ikV(#LCyHZ)qimbl#<>efk&yMdWx z?$q~aJ<|BxWDJ_w-A-m!qM!o0=gi>#Ic|TT)S`7|Sf6-|0bw-$0-bd>M>AxzC0J#4 zuC>FKlWNJL|Ghy9rO)x`!nXI+K9-@FRlC36cLd>eZvY=R8K7Nu_}=={`!fF~sSK0H ztKDB*7U1m{zT>TF6SO}cJ_>D2^n5J!-QoS}PoKU&wBsb)3);@o&ME0mb1=|9i5F+_1VErz6<=Hn7a4SU8ChW#7BSctIoBRD+JV&>JWi1 z1xkFhW#rDlo0@(fM7riTc-_#Ca8A;TO;|c+P-(ketSfiY3qAh5>j$o1!pYyg+Jy;f$mj|{|IRF(6_eoT<}oWW^>|q zFXY$9@2FzbMjde^CAgl}4ZNY#nJ|%0PXB88Ukd#MzjrxmVc|cs2doX{L->Ez;`3?Y zXtiXluQ7boR@W3_-aYqOaUv=)bw2fnCNR%UOTF#a*bLYX8B(cTdrnwigqMd=UCX^#1;-B_K>KXF zHpxCm6smc}3jbjME=Ol5e-n^~c&FlJJ@6g}@E!Yz)(K_Q2y71X2iJc=3006)$A_yk zQFwy-EnOqDsUiQ`qda7$)Ii+=_Vc?MPhN3YN&qmHKtWsQpXU&rCuA=pD>X$q9)rIr zkywv@{vb1Y{AmWM?m00y17U3E*5`wv{c<9|>UnRbtfV#D{i;Yv!63XRtLX}$>1T)G z69STpbu8+x-s|l#9f*H9JyQ zE|J&qw`Gsc$#)MBE*Y9r)qj}bf(d4Qq%m&^Rbv>hruPKx6|Oqid?~tN)Ny99A#{w< z8%g2barY+7ulfjXx68>s=vRd0PHqHy_et+-IsxZJzDuvsErS=CUgyqba_i@{)2mEG zMKclOy&X%Bxg#VfTDbCIY;X>M+HGa7HXe}G-kCNpX9h@5h&|;_=JO7ydh}EZ|iex^~Z>wV* zr`j7j+)~Y;Fg9qf$9uqNLMP7WCu!ha@uS&6;F3A*Dq4TE_U<#z069Qx4|c(+t9(6M z?N@&lF2668k6LLqpAL1ab&(`i&>nm2CP3Jyx%hs!MlKT^{l*4_PtdS@U(EzM_)9PK zd)<6$k(XcRc}#o+pYKu(@#YLB=XyKC4D);Lacr!|{k^IOOT@yMrNz4f0@mkd3o@MBc{*Gt4 z0|HbyxXbr3`I7~E*mcp!896mj@NnLi82JcbAsc_ZkZL7RPYk%Bym_aG0*B&F)Vfu$yS%UW!-2 zzng#Ijy<7wa#xQpXLUNW>&$*XJl@A+zl}{TjC;1u4Dr5CRMAsCT{B|Q$WdS{=hsnA zKvO6%^q2X#^K14&a41ViT2Lt2kWWGky zRd;()d@EwH7~SOCaaJQvfBb^OH>#aqwfl((n}pw^pM>BiVYvJQil@!RX@HXGo4~5X zHn>J;2e~-{#eVr71s%?tK9w@G$Dnd`329WwpW9jI{Bltlo6*mT{XCG)_h;VN{2hPy zVr_dgDqq))0ri;Syp|)T)?H*Ze94Xd0hXz=U_Tod^Gfy% zt8~9R5-wOMqgZ~ms9vA46Vt}NQ-pu|z!LL*=Lv4SyHSUSl(DL4Kp$YXii$LUONWr7 zJwR8@QP_^?Z&)((qVEPcgR}2&luA!oJP7lY$tw8v4nTX}1>#Q*Lv>vb4AzZ&y<5 z0?8aUfDgaUJh@qukB|NhQiTtA`hw-=xqHg~1=B!}Wjrty(TW10K-njzKB71eu4ZsO zjL*jM$RI~XzB47HOJ3~`!UTWrIzQzGVc74J$@P+mE_waMjp}C~?|Zfo>L|qA!`VOW z@gCQ&kaCyoiDG;I zw3EZXyo!4F8#NW+h#zskm+Jo7=|7Tc56yUkqV=#dOI4bWGvapKd;))g^-`f5zntj1 zOSTD7V%VC3#V_79Lq7+PWxgLigMK!T4~Ym=*Zq7B@?||{G&*pX#X1+uS$TE-Zhyoz zBs8QiZZNLm9vd8r^sWF8kd&?8pCad+hD^IRpA@!(ao=MRf3gnBg8s;NC|n#V3ZhrL z2%|44h&IQ&m-_2cW`KVZtw#q`!=|YrKv_BLpQVSyLsX&pPg0@Y3{SqtJREAsJ72Fj zu;{h`2|-^@kr;=UNCqE!dAZjY3lGe)aMdrvy9F!lS4OevU}-5vG$@n}>I3~l#{LFd z%zWt)V2peu%+y5@8Fl7NgzB3Yzo86JNFCT1&QV^U_T+zF%JhE}Sayt>DD-BruH4_BClE)ALpYVQ{`JWn%I z#W^D~;hfdxd)_9%N%&+zO<_z&)_G>xJAbmp%%*~#RY%2PYMveWSf|6UUM6c8Ha3ww z01N3LnHWYpL{WccML8(^;Cx0#HzcH<=%c!xU!9MpSZUl@;r>RXbY@B`9-pi5>;$0%b<$Ftg$dmRX;ARvGE`b28@-$&wA*) z{$V{$a|CYKlN;D`_}*+IZ>*_Le@kA6{o`Mh_~t`mE&WQQ8H z!ATbmX>$c`cRdu*764Bp9Gl}KF;A5C3)>aN?A`5o&?0|O#z&YO*@yeRy1uTbt9?IwIpS7e_vI>4&MQN)6M^3)`W>u-J>Fb2<{2;;j-__Y z-Y;p!B}RXIVRD-Tnp$oB?s0UB13UYC8?O(sAcf?og832_ec4^01aG+Sc~IetY2*9e zN)~Q-6V#n;zR!HW#_54*j|9x;!EVKhTmWF3KF$8aB$=L{V^4obLt>kGimqG--Y;y=avUDbk^$GW zL;`}&ag77Kues!RZcpijWgWEY8!{$z48UgRz`8#7hoa>S7f_W-k7WMQuEDN==+xGj zG(ts5nu19|ITg0=`_r$fY*HST#o5+(a78+>Qn^m;n=+^y{PF>{)4 z$=H8qVONwE!4Z{m(%~;r-?t;1p=xGF?=_vU z6Lvwe1)ykD)1p36LYR?z$u2EQT*NcNqSLQPkY#><(o;XbXt?YB^bngAcdDxUIUL|0 zO*TB`0DA8F`!oFHQMA6@pH(2H3>Gc2bQpiK$8Xe*GEARs|6zC6IMLTn`1|`b2a>S) z7*(GARjO#@s#BWpl~e4+Rsvet(sp})Z$Fm)l4oGSg7yA2tyeUTaf0|@Fx-5k>lLC$ zNtc{)k+^8fM3Yb3o$o!P;bEG}0cb@%{|EEg>0;YU%&j=uUK?7*O zXOXIhcl$&4l^oQ4ZwbwIOL$Wxr1yW<=X$`oqlNnk67iDQ->OTuMF;F+yHsDYFH1aN zL+bDb!^<;$uvO`bWgAVze9zqO=Q-aSkJHCi-$xoTp3C}zD5UAfWK}NLi}t#`#8)VV zF4$a^Ts+sd`(_cmdxuyO9xif!Us%J&j>^xM`r4?O7VdoeOro!P@;#xg7s-+QbDVTG}+P)66dFfQ<7FjgBws-u65z|WtWo>qR^ z7PB9E*Ti7vQuo7cC=Vz;9m1pTGlbXQipqb@yi$Ao>8;hTWn>er*FBAXuMc4lgP%ED zzi8BlV8PgA&)o5FWXgy*+Ag?y4S5BpF?o#u>nFYlx}`vRg!)4_5P~* zVd>Fj8Ao9E1=ml3lSV?wcw{UGSm7QXvpM!`9CEAh3Ys8WRQ2{%eu6R(h*1Z$+s zdLuAS_x?cd{NjHwwW|&^t(Wxsy0db$U5X39#zBI=- ze@^nKS@6yP2rmaE;hCA(IEqYpFBmm@aD79Ck$k;1?cKiusM2ScYyTb+ub|$O?5@~Sb0wEDW`(8 zf8;tp&BK5BgSoohZ>y?#L*Du zz6Eg96l}PjokBc3z}z2X~G<$hh=yJDeWFf<9eXc#PJBO+H~-I6spY# z9$59APGQ1ii?MY2(>mDVdbXhcbC4D3d52kgF{6L-+gp^aS~nHylOBGR?7j(Iu!8F| z71!!YP}+U*um*-?{Xi(0Ki1hH3Qsb5M}-s8J~QS?aFwO@U9K%@J16^6nS=IR-^k92 zjIcS_rK1)a*7hV9nTpKSki&jJT^l}j$hZ2`vhA*OWX$FYArfDog$ux~&looJ^g}N9 z-8+ByrU>O2$Ng3n0XoGY%U+a%*OAu-Y^+wTQ8rqV9cSg9;P<1 zy-&OY-MleRW8)}opb&OVQ+Lxqq;mN+fBnb)8mUDKX$VW4FdzAon5@sCTgzr1juH8a zOh0|q95e{X{uo(){A6Q4Ttq?{0dUAB<{j1|gl*AwF!Iq^cj9!jfEx`)VPq|+sqTOA zRI7-67`3K;U`-|qE3ZmWawz`H;|g5cDbL;J)yC1_K?k*+*qV;_6wP~d?PD)q_pbJh zpX1x$xuKAM+0DoElnXropW%wGf*6^ORXzNgCP<~ixee+!+_lDrXuV4U(s#sh!lS-VeCelo?W_E>Tp1Tci`i>^IUywXOss&klzTxC*tyT0|G}8|UbqW!;c%iC zByc!&`(@|vQ$Bo4f?f^jK3;6NS`{mj+&XO{5?osVgFXVe->&w4k};_|Pfh)*zBD&y z$6B9Nt%mfhf^mN^?)%_yAt!&BYdrILfI|FDx98Dog_9O+T?=1y=_e~#tVVN5Yau32 z;S+ev>J1+zuoQoVR_GU8n|HNI7|y%1gM|yqqj%cBKJyHtwDKziJ|@MBKYfj~A67p?gi;M{-o+WmxL=O}Wd zy)eZWwort_zG#j7niIw)K(i{a~v6sL+$N1W(&v3#GTv& z#_ltbO0f@$em#+w!d?ycqhiQL%6;v#=E z++kHt*Yasv+KWF?&Fk%DQSf%2VP=Qc>m8MN3EpvOpp)0VqleVIW3?*knZ|=~K=nH^ zB3$1wUI;h?-WAHCuJ00LBUJ}o;w}`?GlUK>YbJC;md{L#QN4e^5yv!TZrryc63mbf zOvC%w)nv<(^mQFw4)>>7ZX^Ap;h*()MT;>Q3Q$7VI?)GqXbx5YGf(6=8kWv4LgO=PQmT!%tem-J-fuXE@P2=gM7-C3@ z!%xV8GwM)L0Q7(3xzh#o>wg)|-G!DlV)`!rlvx;+7=lc;VKf|Id zwC`tbd@4Awal5tRpZ+otrrP!C!^y`yY949DG>M!lc)5RDAO9q9kE_ezd+~-S>CWBx zEpnY^W>>n+osP)od_Eaw3qmgv(c$)W{N!@G@1Py$kwA}Qq!=yp$Uo%9y?$;{|MX86 zqQMKwEzIGiuFR1@?r}%__J?Q_-hpB5q?LFXQ1##rp}wqg z?|sOrg4eDI(`8pG@mSp)raWES_ve~3-0iN2nKx$pz&B4P&HVlr=jQe&Ih$(R_BG>< zlT;0@6Oumf8LVW0&s-@z=oNT$->-UVW#OHVxBh<|l&PElGVCx@KH9^uc7Y{^ zC?p4};V8kGp+vd3_unwN(jch2Px*IjD|_|~cHQ~4PxTQ%H0!f(Y&Hc&++EvuaOBMZ zTHt@V7f^6rA2O;Czzc=nDhl!Sqb=J#Bo9ZSGxEa12^;T6CA2t+Zp*6nm#}nFDHv)?AS2`Zj3{f#>Gu;!>gG@4p;d?Z( z0gMfZ7McC2Lps3)&-MId@6oT)cY{p)^pAhJM$_C?pcji4ZMh5DG!R6jLi{8WrRib0}OBXz3;jE zqngXJ@oiRwoI^t=C;tA8u$c_i@e?Cj@yQ+ia%&!jeF1xw9SKw`-Y?I%sdX>k$L4?B z7|BOHd_6xHi|4hFwAT2MVShiF&xw3tq|9+4M&S%zYOkk`EJNYP*PPGpgG_ zztQ}TryqJHj9rc3MR{&1Y*B>GppLCo8uIaXF5}jDZ6wSs&}LHtgGBI6c!7<}T3f}3 zVs=9`dJo27jY~DLmxhIIC%D}9LqU-PSOVUogjfSW#O5d0$5?CRM;`z1Be;K@U|P*% zs!SX~1`E~+CYL%MxVYYX)h1u*2m5rV_WMAL$++@fHY+#PNzNTD;w_5Xe#V_rSNi+I zk+%O_@*7RCDor1E&wa_bYW(`1C~5s7rYUgsW1}e-BI==P*rDTcl}#{hYuKX&5(R(^ zdzjKo?%bU;4f1s=?>>Wu%wd0CX^=Q)xLq($yf@yzyH)YL52DP~F+9zu=ieiNlEaF; z&*KF*z2=MZxo8j)W<1A9uc0u>((@P2x=pf-V>p0}Ldi=-JU^{Xjx_eW)osfhaBYu~ zUxM`aF1{$fT*+TmZB+yc@-?19e*IO#uAQ#o5kSATI7ixgpS+T=ln8&pr-Su=eaaWm zi*LDH5(Mbo?_A5-u;$Gb{ND_$5LKy|>AQU{)`2;NTQB{D%a;J{<~Tj(BeXA;K@}3s z>+kqzKCH|*Eb0Y>?q{GU;0}hDU4xmog%PliALjYpV}hMoN|%|rJtS&Fqdx}A{O|MA ztrew&ZHR8qT|tKOhdzH?lBl$nsFeZ!!qz!VF|T@E5PXpl>AiZ35Jb@AnMi%BlyH=P zvmA3Cr>tc3=5u%y*Z#IJWQi;pC{fg|CPrc19@2QHN{|y@F;?>MwY#JJ=53i^_NgDF zpHuuGSt(+>-%UDYA1mF66~-_f1>-R}mByF3Z-$IxkwJg$#VS4Lt8>x$ZbbAj*qn!)`z;b8WQa9`wfckd!C34W|2 zXYVdb9?2y4g?83t}M67R>IyEtKZAfakJBZ|H9KS5W`WPbCbOC?E+4Yo1ma(2sVt z!zOf&?iBlRh4kp%e(mXkV?Muy+bv6&;08OIdA}`DWgPMPxBAe$kA<3Us(!lHIS7+v zWcBxte2N@kraxD1v@fSGpiJ|QE7k2`l}(DbM&eU2>(kM}i~@keia<<QZQQ2XaiS6eWKUvwV`JET>=JUGNqwzJ?bk$W}Y^ z8FO}y^PtVgFYd#~zIl)#E_;!g6%&=`2JV{I;ha?U_B)vzKdS@y-6SDtQ?~lLVP38=)#|Z^fmET~De~sVo)84P#bi38|n0*bn>N?O@ z!i0Y*8h5(`XK}(Y(l?9Va=5+f2H4>;2V03k>9#G&=OlchJx%{opE;y=hH+$<`Z*h% z2zD8n#pKnUN+;{{^8+DBwKG@|Y@}mfKSqjvnwo65#GyldX3rpvhQ>Or7wT{W6Oo>} zPSWo@&t^@hFMP3>eHl7WLhu^%O7T{Lm~wxRBYmXZwf8;MF13BV^~J-Nd%AfX_Bg2M zKk0iq?k+gO@7*eO8fk-2+wFOGwNDs0nBA_VJcfWp(yx;Geb5=!d)g4bh*_)YiaW&^T#xd^w z7h-Zm2TYnPkkOw8B)c0Qy1jqE&^wQ;_`G@|KX;n-I7Pg3X41Rpb|bM0C_ zSpKIx!rd8t(Ja~8d+&c29fMOQ(Ku6($+_E+Lt=_u;1&z$(2Dn#ym z;T3%w#);yzEzzU9utTit$v<*GcP{-THh*vc7pv0k5%KmN-BWG?u)ShP9D1naH1jdM z#Po6iD%^H02h?ihA5=(kDLSEg>ppMhEB9D$&+zt&T%&yKGbw+1L`e65@@V*VKjFFl zY6LwAmsEGtYqxH`yL-MsdWW$RAA1YKt_iI&V$i!Oiv`$VBcbC|Lpnk4hg$Qv{88Vevz^lu8+1iy50EcKzbUN0Tu#-PLK-L zG{$o`h+j$Xj_cA3PD8j1G&um7#aKL@O$1rzhCOz(#C&!?|1ik|Mj5{mzew=>a2ID z4l^`_@2uXUF}f51|I*MPdtP+3Pj@Kg)S-DZw_ydHL!%#8QtH0j4dq3ihCfvF2;b?I;TNeh=v1sG$6a7PY`&<~bXtZG=p5VQ4u2f;)*L zriR?aF@Aq#)@sp?eWb7UszCR761;;%V&@)2LdG=q{sdJ(V?7avrk#c|Kb$C!N$Xg4 zn*BPLAV#w2H2Q&TVSjA|Ftqc*^1go9=l5q(>6W}>iZ8C+GkEB}ih_0WDopZsTa>0f zwkb`{s;D5U@$)_`g^$XK_lmP8{Q8{mqQ6bq(sX}xV7tfh;)OOFP&7Z@_6|FE*Z#e; z+phJ%?(Z4;sy&zIMr>+3I7Ur?!W`U)O@Lj^)lZ|xK}<*N+OVQCTrs}-42Hl zdDxRE5ULQdJ@b1ejRQ9z9wGxp9U>!$1=ey=emKZH4NKS`jy4deYReRAUy`d8;78yL=pdQD#&dt| zQ;%ldC}o!Bq(F^#Hy!e7gfx0>rtw|f8=LhNLe4iui_IF5CQn~~fu^va6mU3n!byjr zu!L;cN{hjnn47d4)@Xf+3VWCmdF#uu4c)wsB1g+00WE);a3&$v`!HUVA<=s@Pmw!I%cz65Ec0P~9qoR- z?fQc&(Dsof?g@)t8mZ*$kf}SUc#i;gk5@nA>2G;Akny(AYEh0yG&yIY&H{(Ec0k*_ z(aU{lx)XeFtv<=A2fOW(tKNHlE0%syU>WBTA}j;4jX3|S*UWeE=E;dg>?nWU$@SlT z%k(T3e3CrAz%m!FK=+cfZ1eR;LWEWlA%*Y zQ_>Yyg7;}YfX@7_ZyxoaaYm1(fNnP^LQlY2rnPt=hQwl-NxS_T#M?A)U7BveF7)Y_ z_Kvds>l8d?^|zQ2c3L0z0TO>A;Oq90gG2-%2j&?ha+|85Iy}c>yPd>Q{2drYU~U3Y zbvnZ5j>nNhtUN=+Rh>Su@|N@skh$i?VRPhD+^#|2_q@Z@n4<>>Bi=qsq+I5k&}KOQ zY~7tnojO{Yyp)UfY#Syd(&`Q&EUqpGLNB*XJIq1mul5$LIZhtb37>!aelYe!Jl|kw zu%tK(=HAvMd(AH$vKQzM4D`lF4d{|E&X_1Is{K~ULd07Tt8jZtPq?Ub)cJu+xVGuz`0nj)MpCYnPJ1p(zasmvPv#rNoE$e?n<{I~y9=jw`s)g}M z8HJMTTP;Q6&lFm8^ugxuHRAXxE~Itjtck;1ifm;W-UST50%?^8s5SQ@eTgi$a&!|q zL^TmUG#?$ZNATLa?*|SN$@`H;@^Xd*T)&>BGIMlcLY)7!3BeJu2TwA9Z1@gF$8?pr zc+XZ^5hX9`@kW1Z^Y|_4t;*7gZYBP9RqqdFOp}1!esM#i8FN|d@b1o^IhxP%@*P@v z8QQYAwWxw#4_k5;wc`P&h{ycwgf4*~W``2N-c$R#Q$u;=%GG>(?m9$m2{i$G!e^_q zw?`C8)<{9sOuF>wDW@1lfuG=Jsg!AaK|OVvu?=bJ-^YJojD0~K?eUeutsZ@G=Bh4B zRUm}&M>g=%oUEH4QWE;MK!1L5N_0KdrP@m;^wEw2@9|t55dmb5S8(3ta(KqiC(Z*5 z0v%iMBt8p8ji=KvJT-VHF8){gGUh1s)Kk5VX^k8g)nISTb!rhc7)avy)&@mqp>sza z0hNS1=V^b(TIhbH=wWof(hUw!RF3pJVb1}q}D5B?>A2QH+OH7wB5_7E4s z%*&B4Fco;<6{V<{3@t1~aACRZDI{xl>uLnSyrrL`=3>)TpC=>hH5}KPslA)#(%xw& zrT5w8zMB%~C+F{Xbu5STLIRko$oljhnUICk{rZ1RUUYK1g933ec2r6=Bbk+m6xx>N zx&@xL&+5xhM+gf@@)^Ojkb)3jDHXxEz#Le4Q??zw>Tw{J$nvZsi(uO!KJ(yq)d;UV z9x6KR0qajnn}UB~BfVX&+UfEo=EDz;dxQ#^XB&tBWaJ}s%?*tJ`DV~GiyiL$erevn z)MyP$B+OzRvWh3+>jlP^~tDnc6{N>1~bli0g?)v7&VN z(Qd)40W#+cRs^?6gkhn!e=}dp$JTza_(bclW-+3_YNQ`x&a(?x4d_BOah+?8B@tRT zL%GWa##MiO#@(suqn1vWkhp#?YmAgJMxlSoL^?^Oe%W5pWfo7&5~Yc!Ji!(Mh;Dzn zzg|tZkfm@0ShcyPAHq|2F*U|zAM2-|?_>yDI`Ka2EynwU{V?$Kpk&o=FJ_+c^bMo^ zkki*E8H=uI7cW5^yWU?D-?kArxYo)*{mrHA4U}WA3-ZHVt9v`A-e=}wpAM=2T26oP z(`{#Js+%zlL-P2Y^!eB_?_IFV`VFl(6nB8tS}<=O-Js14^sI!mU+Dgzx)^Axdd;FPUXSJ7xa{OJe3QT1Wb1q|dO$z`>;&&s8TPqPn%sLSQvIH2?|bw7 zA_zVN3gLK2Y;n2gkA-_)LG^*zg&%)+UMMP@{0p^vbks%`8SGj(JZdkzsNPw;dtPhY zt3gLG5?jdg=ZVylPv|yRw`3r}L15W4x0b7Q+I28QfE!`Q%_kZXU;AS=MeMZOY(`6- z$STwp79lZsT#;bCD^YslLN@UL69ZCsUfp$kq+QhD?9d7(OIU zAMZwxAyDks7Q3?YEE;GpfX=Y-OZuBeybZ5@L6-D80p~PPYu|k9abDE?L71mJWuwFU zc7Jx0LRLq6ZA(Dr<+|dyotZq$&N%te3Z>2Ug*KbBGv1d!5zmb8GXeJ@0>uksXas(6TlflNkp?_2C!grj{)zSE$^(`_`RE_xs@O zDjG5CS&hU}b@!;qC+`zzlXY2!r5A}4;hvFfQ91z`9kp8DD|`&EMUwLVJza~e1+@et6&2Z0?_#*XqWQN%0+*ry2o~%66BQh zH&j8vSb$vZE4=25Cc+PTeac&u0T<*wfb;sOM5X7U+^Oq_@WCV)yH;g6)b;jdroS#o z_c=dS;+yyWr4$X4*l(QI^IfU5?sdz-t=n`?zon*!rp%uw{s0cw+^XrhL5;TS^mVB?fb%j_3%Tg z%aO29Yf)R$KfJm(3BP}QlDVfHBfPb<5sEX_+i`ui?D70k1`dA{2h|N1J?w6-;0-b) z;Lyo_k1ry0ev^NmA0`VtSJikWJc{l&D9zh@YQdz&wwC|m{LZ1W$3q&EpK+7>vv5C$h$}LT!vW^0 z+lCfzEdRcNtYLq%tgvW8_y(q+{q+|li6tlKupcZEC4GIS*d<|0U}D!@B@+?C@^c$y zBnr__akj`TX(;mq>Akyqz=-@}5xDuHcg+-a(K&ZfTw`~4>uw(#|8b4qla9s>W8FyL zgR0}}{NqlNc%$+Ywx9us$;@m$i_FoE+tczDz5!N>OQV0MaoCGKgu7bleN9Ta()Tj< zet(C0Up^3eCoMxyEV>w<+P2kp+&LHEOYBhyxE<8ce18wteQn_`-KWhf=Vr!9;Hcu0(OMk*!~PyH{InWS?HV>UwKHX4TY81bw6Rtd9$ybcreA z*z1gbBtw6*TI?8Pu}U*O@;A?A{ZoIn1H-smK3JJMyo2-|@6G*u%$lG;$_S^X((cSY zcjUV0)vDh?_TX+S1SnKu)I6Kd6Ue2f>Q$Ns ztS`5;dhOKGrdeC}7n4mJui5y#j~*q`Yd=LaenWp0Q>^fl&*F#6(HG|D6MZtFFTvfZb<<&eIU;iQ9!Je0kH18zsBqq8ad37n4?Z0Q)X^;i49Y|9!fQWl zM*-lO^hjFeIhrHpWLwh)DalMfgaKRT7-CLw8^E=E$4?q#L$~PKbK1+i`7rq9WrpOg zYjuCEACV;qIWkvF1{|h3%eX6t2*b;1TEG$q*O9p+WiI+Fveo)|e#NQxGt#k&NEv_L^Z>%xMiNl&V}=DbW86odeGBC6PEEt5so^7isEYvi(^ zM$``E?Rg^6=}5!gy`DY^;6O?BMJmQS$GHApED>8eE6gb7L6G9ZpX&B9eLuTzw>5u- zIs#^$YKMV#j{2$d$){bTkQv?c==!1ZSdzcVUbtlw_jqY6d_-yACd9qX<5c#)U6+FT z9d*G&%Zt|pQF9=#^tIm^0MaDU?)8WT7Y)56vbFGJY-xh2Ektw($%$dAEoOWsEs6`< z_ORTc?M&Z0=CKo$>nNc5(;&iql0JX8PlGzz^%b3?oD+Vl@@RHDxu5Ox^=W^MA3}Rh za4|%s-AK><*3gcV&vqS2h@L+Es8dO=$7#4`2|Ch_S8iKaFzk7q)?ko0J9E!K3c##? zSK)WJ8~Fzn|K2rSQ+Wwz2Zrqt_MnP&yWkh1N+RA;}%ak-k1;Bquh$Eusv3XPXBM`^*S3;01!h46+7bWo?#Yw7uG*eZE z9~B(t7rO_nF*up;2dHrHv>sixn158t-n2a5?T$-=sh*jt!?CZ0MhNXK2AH0v0%kUy zLwY=2LV-(nkC@)|4?FNsZaz4Q+!_y2e(cTM-o?L%AHbr5G(g!O;x>Ojj{pU?y>Tpl zZ^9R$R%|7keG_DIo~a&Ui*b|CbHG^{;n~Jz8k&s z`LO3}m=pNgB914Wk2CivA6=`A(cr;~@LCI9-o3iEfvvjjUfP9%(KJH-I!dR6!pKp3 zw=Nbzh+JyEfA{QDPBDLRERX);=KC*_LP|{h6hk@HcD2^s05bNgD|zu89uM^6V*S1Y zJL~{06HiygD>NhDF_#7R@$EZK+ZO-`unyOI+Z~`M`4G@U+c5Ab$?aNyGc1^>pz=!4 zvE!Ke66yWuM`JAko0B-wsEci1q)xnEjL)~y7yu({)Ntxd>Z%ON{h9O%NV6dJc`6e3j*8Ozsj zvfR1f5OyyV7wrRG0#Ch%ES|1E%L$C}e8ux|%IRX}*$)83-qC*pW%dKS5`ng}XO8^N z9XF!Sq7b;tNZ=W!8tyB9&6m_Wd{2s<|86?t=SfLJ(oU73hu|l~PK9eCvhvH5#bn+P zk|DbRSzHmmPYtkDD_=lamhQSSAmE#`&+5vI)nB3y zw+H`Wt*YbeRzOdX=E<7WvO5Dg;|JM9vlUnez%xc)|H$-(46Mu18GM_+#D()XGcWp$ zT)8={_3;E@ee^vv{n*m>UVoqxw!P|O2%4F7|844x_*A!JyHgNkSA@3!TdjtZ_YB)A z=)1>HGy|LAR2v9?k%6hEDIbI$oP6khdsS`uq7yGKI$d#)Wvwf1DN2^)>d|cDsa$55 zWy10jQM0(G`a2oJjyi|aqZWOJ%0dK*kO)gf(An*yXuH0@Tx6k_ca16xl-8GKTN3mi$~Od#Y$am`y8zIPS56j+VA%V z?TZ`^txev*L_g2FAhhoH^z}TQ1P~&lkpNvW_1+@P3J=OV{*vPYj!twYG2NuzG$QIq zWMCwc2sQwwmF-%kZJT1j80rtibgbNXU#-*&NTM?gLzPoJlio%UK5(*v7nc)T% z%_(|hkH))yejj_%8rX-=7ORLF`yEjbcWFTpwT((Fj=ecQy+qO7XK!zW z*}=>PYG}w+7yb#hFCFqQ@TI(ufE+6fTp5(a-T}IQV&`11_U^2Lt*w9$sS@zZ8jZIy z3*UY7i~=WQyJ>EQ2nF^uJfGBMXt_>=QNBPBQWUDW4WH^P)O7Ndo}+(oqumqT=7h&< z(TjbA^g|{i-*N!WTGIevi@xQR)=gwa7gnARH=tk9V%!jU^APhhReSTCWR87>l%~6U zZRtLLjrZNLneJ>MtC;scK;SBDljq2&a<4?K7%UxD3;Jp8GzMP;7jQtIkLsLj*=2gW z#zR1n&4$JuAgYsjct;}KIRs{A6n-nN8_H+@dM!lMWeG8_lt<;3tC}vYzMsfb`N}0g zLw zRX~|}ytbdpN6VK0boSH_T{MmTx4D>eU+vUao~`W5szUb7*5#V&T3t7xYwlgA)5&gs zujD@kXw=1wY_uVF@MUTK47tb|Lm>-tagR=!OaQ$RxSxcB52ZZQ7lC_a&@~Sg`M+=d+ z(TNrui!t-hy6=xbY3ecqx#W8}lqVv8s5{@@tkVY_SoN85HxJ-tsyCgl%)f0L14quH z{&^>XEko#T|3-ro>q8|aa{QpdI&r+WYF$w>zmb(c*K>? z4z0`ABCeAuZHp9K_MZ8d^X@$b_UQ{cf(&BV>JaU8c9Jl)g5#PcFJU1!aZ%oXxpmVE z-S-}j8@;Ye=xRD_cPqPp%lwyJzD{!^f2TJ9FLv%;#a21>52|;fxqj5cw!A+^OsUfl zaOdLB64Jxxh7Xa=Tj{ADSu7?K!G>}N0O_I+Bzefr@E~*1!KuQleKPXpnM$SuI60Cx zyN7$QRz#XH;162F>jbtm42hqAOOo*1%Ie z>B+;)g8k12p?^2?R_~NNet{&(Bzr%q8qnnu3ROxzeku0pFV*MUhk}S*OuWuyV2E|Ls^d#ti?`Z@O^P9kJ zzXGO%oPumBUWSG~xKuCLFk#QY}j#kS($zjAj5$1e&bE9F$My`WE=n7#R_s0 zx{?fpoD6;nRMO_r_h{sQ#%gcG_SeK(moPrq_0Bm96k;bA5xM&h zh|4k(iWJDm0u}yb5J8{K5oHRlZ7+zERd(FC^1E=RWe`6AY3Uw+QgGxYEPUm0eq%jm zRZblIX)>oSzW3Mg#y#bda$n~xZy67J&NeKw;||ncR7r?+REx~r3AceTRBzO|NnB-g zT`KQ9ji3f7_EGA&Gii^u;bvP>CFKPHp$O`@hlOl3#%khWPB(VUIbiwx@)-|0?;60K zThq6AwvjR!b5iercUjVT#jTt9*X=fBqp5c=^8E~crYlQBe?qud6pVpEWbzfv1|l~-fwRb{iPQE`Utgblo_sd=R*?}fT+xVv}nJ12gB^|Qup(YnEgBRVf-iZ?zkMOc77W%DJ&7abc%jd)ZGDW0*XV56B{9MLkgoz z%js8_V4?U_Sz=gj)*@^>CI#kojzo8W`@pEECOdY==RNfz^G_%KASaIw-~P{xwzs?K zWAiM3r--F3OXPm$)$920Ek1Gbb(c}?UT+`PsPaimQHS0j_`@^o_9`gxiU?#id$&h# z@@4WZ07O^a821jnEt4!L7S?Q1h9IjvkRd6xdr@5Hxvc%lmc8(B7oZNxaTs0hYzIww z?5=LGPxTfn6WS0z#f=E(iR6r&cfoo3AXV0XcyH+i2fJ*4%oYzUdBr%*p1`Tgg*TS> z*b#`Llt9+bOhg#a+6df_P8@f4;i|rHHO<#b8oK{u|5`x{L1>rFa~K-dsa>r$wiZ{x z$~tEJk>XGE(}<>Ds;7nKq!BQ=wAW_A0TL<4gmR5Ixg)#4FDQb5V>jjd)A6lj2Qj68 zFz4+c%sR%kg?+o)89KX``Amn-!!g~Ha0{(WM1Z6pO03qu*I~`|55?9)L zJk|SFSG)s9iJaSL3Z`1QyDj1UvRAg9$KuNtkBcp zP=ZhQd4J5W*cc*W>Hqwx5Swx`YNO5`PJGLRGmG+aVsC9{joI~IWkH}V^HuMES05CN z;|6woeLA8tWclK>K!JJKJ~%vlC+5!f>mExUXz@s60M%p?ynqF=WXgw1DGtU>i!LZ4KGcU#nMBz!6 z3h@KUZ)7i%cNM(8#lrS}bl>TIK)DEtfg}tps{3kFAakLf2wfeO9kd-cL@bj)_MZda zJ*3gxB3cL8GD#M#h`ApQ1^)q6iH&2Tg0BWZ5wO3Q$1?(P{uJ2Py#&*js>WGqJwAeh z%&H#+J`gg8ro7?%$UWNS<8QiuGG7S+82_u3-P>^r_BoG{Sbv10@Sr>B1TG(#6-3VA8rMvM4;ZZe zpVkWe7{dCfljxIwK4Mjx9o_DKAVD8MzwY5wG309LI*e~#;IQ4#f z273)jqDQnA5Nmc2v7&$;CQ!H4Cg|_uvQ@fjeAfx6yof~p;YJa1mh9LOsUfh5H+${G zt~H_`f)ON{2Il!~^;=T?ApSnrlPJ7Tzj)eDlm2@3*9L9#(6)v#MWK?YyWjJa=42IS z%Ll~hc5t$PsPos|$McF5g@a+AT9BTJAA8%z( zgIEqII>%)o_3Hw5E1pbDwWYRoO{r)1*777yfShNNs5m*JoiOEa|J;$Pzk|SXp5H)EPD6jsmrjFBVN{~%5d2r@k%X%{)pzwK>*Ez@JGDio!LAMMe#I6quRpE+%JItb^z zu1xPP@z{?jZn$S+X2tRBwAn?@Z69LmBbPNEEg3%_Ai~vKc4-}X~^S$ z?OjcGZ%@aT3FG>)&W>j_lQv%b74QgpuiLJpaw!4cSyMfvi+Y%o%Fl#RMoL9XiX z1w`lihCyMi3iPcj{)R^7yov0Nx}0~{Yh>Iw-Y7qPty-Kq7L597wolz*+u$O9@gjA# zb-8qXe~6*|Ils~C(|ox*Yg1$eY&eM-a8?Zo%HfL4(U&l?QhJ$S@LPD@`|@^xwdlU* zkME_rH6Jy-IVvrpRG$f$13s=#sP`cv0Q9PEJ1#DtuLpXUL1!F46y)^lyqK^374|Mf z2)%btQ=QsU$`QRY{szs=nc}X0+=8YK8S00l65-wJ4}o|OUd+Hsm}cLWlf*u(io1Vq z`q51huA#RmQNIqJ4JR}hLo~;QqJY^$@cp5 zT~q_TptT!@fiZ2?Z|az1h#@pq6WdOG?H9URiDoY#RKJS}KCh3Agj_gy9e$3|e+gJF zUMB%*btBY2Kfivj)vy14zVjrak=fn>EaIzuj*v8h9-RcxPD;yvna+EBDos7!x5-aN zHXxaJy5~YN3E-RmzH3OC=P-*Bz7l7b-%S`ax1+L{-zQ^1J2N2Dql~!}xcWKC0a50s zRKDlj3k;iG8B?c`y83P45t3lX^&G}u_6Uw_yN@S@?+!y($`@kN*7k_7ixEQgb)mAS z1n5_?46Kq8&mi1?JtrhJ>g}=eQgA-b?TJWmiUe&i`G(9B5!0b^@;ZN+37Vu5Z7ZOG zWpfmKDC1RrP&ly82TFo%gWzgBf4K}Z%hY2yn;R8puYJm)BrmR)?8j)F9Qu$@JD1S7 zAnz}xr6KHm{Y@a9$$C|FoO!7^5#Q~`L`(ABeRjelPQ^`s+VW|}^P8l#jE2$SC)}@` z4ZHIJ+UvH_&{9dCaT{9drYps_QC1`oQvMhD>z?h>Jy251QK*C9)!tqpg@nwe)Sw~C zxw>OO-*i{1U?hh&@MHwNw|B+i`p360VKZ;tMW;^%Eo;>4r77WSkEA2i@ z`4u1>O|17JBG2jLr`(;A(2PuZ+trC}Ivjig(n03M?v0&<{{tZH2HH+d+^gIa~C@Qq36sn*lpOlVo%2^c34HDq!EHk2n6mXk==H#=MdQe>7Pc}GB-Et(Z>yq4>Z}_<< z@}$2%_FvrE%D!PgSf@fceRQVbt$_Uu`?-A-z#g26na1EDf^G7x?4IaS+PFIxNRVcqHxe>upRo0Y8O* zMR2J(sivybDK8c_=WUR&ysz#Hkc7-m5BC5UcPrgZ_nvhVd9zU8`n(wSE6|MgTJKk^?#O95zU}jb z=pw{ipnzLGk=nWKhc1|ZQ@fdc?~nk0?}bC}=e21)**g_>c#Qog zB&yc`{q~{TosUo9$?Yo9>*;kR^<%ev3hMh+0Z;K7j#Br;##@)^Zi(uWv>M$pJ=xb_58L zjKv+l-0LC&GX=!dEgG8%)BTG+5!d5>1dEF~UijvC>4E6|PIX;NQx<&4UHd~0Pc}t{ zva~g%Ld1yMOEwr{Q|{C&m_`{`bi{tg8TaGb=M%ycMZur@xjn542iDpE6C#8f!`&YW z>+~EaRd;$SyS~ep@jFY%-GEAe_rZE!15NAGv+w8pt=%7|yg4)37#nf9e%w#;?DWgM zKTLI*0eQI}h&TFg`wL4TUX;(D+!IIOOj%fyXBA)VlV$>c*JJRa=oHG#&Koc5k1x1&nUuwN(>wZO@h>6 zyO07@fn?Okcgm5nzNZrlyS3H76QTbhNd|D`j9RYLTlrF?T+01sk5hF4E#atSp*8Yv zr13I(s#LJ=%ndBRUx?b~?sbnj%2GHu^blmLVYX4BE{-?B8V(9C3Q)r`{QHH{ao{;b z(Jw;f2gzW$#o4FKf9+*7Ak@EcJdHp`BSi}yksnnV6meon&UrDlQH0qV+!Nmf!Wh2t zQ~u&UGHI*GFnvgWXv^l${&MG|adg`o9iRIb3OVJl*h!QdDZo7_=7L~+P@g?QgpD?* z%ZWQa9>g_WM7B1}Jmt@7o`ZoxoV=qL{oIPaVv<6Fub&xn=oi^yQH-P$CM{Vs5afm? zHb;HXB?5kuIEf?axt)Ks4WN!jOuQdlz%DjD^js^cMpdWxU{m`r$zYLz|wd^i{Z z;&0`>K;fM1d09mSWYs6zRzV+Jyjc!0!=XF>#8Eon_PlfD zkx~QP_@Oqk$iq#15QqE9DxMM4{qO`X2 zUM7cs?$7&3HeVt`UQ%*RliSmL((6viG8jgIHR_(9P%T#UdR4?j504wP=of58&*}+E z$v1_y>Vc^NBpJ9Ira8;_5on1>$$4(5fk@7mYn1Ftzub_02k^SLO+pRCx~Lfk^>Ymy z@CTrv%p43aC|)91`MN9ZbO=!XNF z_Z{Z#GSr036t8c{KADuNjwkQFzam3!J9Ka(!@xQe;U+7|d%9ZWiGX~li&^I>f_NH# zLmXxcJe5_CF+*m3xZTn9ml?&KgC`lVi)n?esK%ujD$)2|?4B-K9WKi*TSj9Zby z(7DbjcyanJ-`{_3*nFUitG_(ae5y9f7x|n5l(3a~GV`VQZJ{efGM zM>?e~)tCo5T;i_WoH~DC5ZgK5IN>_ zRs{Uwez6((gDsAZ96=E%v~TuQeJ(`cQi~?lHB40$KK(<{y1Snt+=;J60!e*3ivBJ% zeFWvvx3KbwmE;NDv<}#;*1$>{wNTIqc9P~hw`Ex3XC+lHZG^x>G+b9%v5;*^9aMez)K20KeGJH<35@fyuAGTdgb?Z0!(Qp`Az-@kY=V z4!VXZEg%Rfcv^2`BLFm0_%x(yYJe?$+rc&PRkl|^`fVANFwC9qKP$v_TWWn(D9UPe3Ym!%z}@ zQZ_wxcp5f$C#=Q)l%26lP&9Gv;ly)?DPP7i4&!HepxVAw4$XEHosbn)>dYy|-U_4- z%LK~j2aW9db86~KywJLqi#1${GepOJSxYq25x zE2+MqlphC_tv~X3xSKSfA?i`7GQ01VDOFPcN&)%(Tgb$J*?r&`wM~6swk)sHT@#YS z{rJd_F}YzMXhRE91v~udG{rgI@0J<|MsS@A5~J>1*+9he^bXxzE>6VLVEJCVXhbsB zK+{7UA_@Nl9fX6Vi60J1CAcx}a2R`oo8Q1p{!VOkwJhzZYKQmMx;C4#*b$^{4J2SocG%pT2KVVzT#n@hb-1^|Zw$+vDI~kRfO8#!o*p0l;PMMO1z;U%FA!LCP zpgR?As_!WAu5#DZGR}I}?hV8=Uw0*tKttQ)pD+m{$y1E1xoD@SH`qiDd;-Lp@zjBH zVjcZ|#|RMHDM+fkdn4ULNe77;7K`hWSvV%gkEr>@Floe4G;ZVgB=?Nd6rqUic}z}W z6~66PYTSFX#{5zthwN~G<-Uk&JlCi1cto-U1hD!01n(H)$u-O=1EHza=}WcFFHbiM zjAdWVkaas3=g0YydPu!99tel+;;uH&byp*Q&Cq)~<8e5Z!m?dEnlWAkuVDN7LL>aG z&abFz_`{%-ar(}8r|%K*pa&5KA~9se@V59ZB99ZfXHFvVwxr>Bl~BNt^}}fb|7%Zs zI<_^<>GaF#dp)Fh;+*!Ib)s*2r}*)hQy>Xgq!@PhtuDus>^G$gKkNrHljKV!tzCbA zfGRo*NVb=f8Q7Tb*<`nLYq?_gQQ9Rov7(UJQ-|WV-;se{-qZM6AfAaSKhM~wkVpGG zM?aVxZZ~@2H$MRB>mV*%!+f+WbOl@5$nAJ~octP@0q|HLp@o#Q%XTM4XD|li15@}5 zN?OfUir);nDw-+?Fg+&l(>#Abb|kld$Vd*mhO@H$G!OZFB>~o)%f@*rJ9w;3n?TJ} zI_#a{JekDv*|Xa?+y^1i`MXG7hSEJh-|o-0cfhAoME)`z6NWkP6&CRXgOUXO#+Mf> zLDEGVzN}R(koON@=rx7cFz|P<%Dy%1SRwn#hhP?KSegLdvyqT=Y5eQup?!{j2BGHy zx8I)_)f{DB{CMC&b`}9wR~2cOWBY-3$lnA4-FJsOe8La!o>^7k|*!onRsQG+Q};^l5nwviuq=^OPYPToOJ*!{|iR0ymfafYJaH9U;dpQmeZ1J4m5m>qOm8+*E_s-eHGB1V=e5`q(-Pd&2 z5SulOq&U)^e?KdY>g;Bbw@~!@?Oqe{Ai(n66{j9zTJp;vy8BRnH}1mhzaUQN$L-s9 zza6(H`b)~3u7~B9)=3b4i5#w!(cQtaVD1{I&|81kwa61Ims6kRN<9iUt{f|+{0ZrC zEq_ECQzbt3A#^Kxm(tgY(f;&&C0O^?4p{xGTH&L=2DC! z{YcgO-#N)knXZ7tfWKbFITcr&Ob`H(x3JMUoIJRHEQ4WO`2nm%*~`{Mw4nsuw? zJe8hPwIvXv-WEvl0>n>asjHO4f5xB-@;qKoKu!c}1=3T?_qwV!ettzr#jl22@BOQw_fZHX6cikh z%M-_yC3~il>r|mJ4gBp#K4X45BRSmuY)h!TGyxHNr<57FgiXzD&zS5&#Z*UmnWt7N zqKgrg^uE4-K4r$&b{J(H5e*$&|M=-Xc2|i zBc|qK%dkn8Lj3vZiYcX6+anC(iy*pR4*iDK1TX!6`o%{7#oo8eyJq!+7G03q!P{!O z-H%Vijoa8>weyLwvuPJDyEm*%1}7rsZnX!xIQjb{5}+%c1JN}ch&{L);;(sbWPvxtDf<#J7cxN($afULBS0dN^P0PEtJozEyy2vsnw(`%W6%{&FjCBAj=FHl4_OOS zgrch-kz~A+>t20_x%)7EGy9w<3Knw3gTw_#*XSs2-O|G!J>C_U>!)b1>P3CKCUBwh zo@)RIzHUA9xIX|}f`Z{H!TO$eP3DIo(Y+~ugW8T767l0xQmUsm;}JT(d;St?z=btE z{^>+Ge2}ml*b7&jv+}NJ+^z+^_3whm)$h&|?C|;KyTIq|_D;27i$?AKj=Y;oLLc)S zSrI2720HV$BGXX0>ej7LqF)EPkA6En?oYXZGkSPIbIrD`JsECcUzv@jcDr|WRi#IN z_zne732rp0Bd?VmKR4cG4iDt9yiK?xNGNND$BBHp$U9^oq9(jm3IP~BCqb=dmB>*x zb$KD^Uw4EZBtf!}AzxvyJ+_ontc|8UonRIQU$uO~a*QzUjyF0$ery4laKXnhq z!|k4ea^xg;Te<*EVvzTKPeNmp?E5Z%aD<|S{bDz{HoaMI(0g{h^-22lDI?`FZXmaM z%$Q2SI%^PXPnA}s4VC?^?C@~Xqab;!Pp?$BDe&#nhp*J9f78s}o%&SwQCX0KaJ`+s zCxy`{Wnf4RW!i6^kQ`joGA&Hn`AoIBYhGG8WpOzFNm-2zARXhV5040PyY6{^*~4|> zOCME7ZlI{NHO6j>6w@I;F4-?N`Jh=sgtn6h^Kn}s^IKXFtz6v%x0UljUC{IZX&t4z z?(_mdglaoW_oyCYAKqSKMuyIn-}8U}oWB>;A{<`+_dd^`|NbZQXY22^TF?l8{I7q4 zittAs9toB2cvcrC9Biu+yf#gLeLi0POaH0U z{0`S^qwcuMKKr~D7oU@t*cv>O=QZm!ZvO)yQP*8K`_3N#4ZzA4*4k5lFF&8I3Qt23)n1_!jxSwz#VSN>b76}-H*O?hKlz>fW)!=5 z?2rNnNEteDmtF4&7XJ`0mE(q}Ny;$LWM`>T38!YxlW5!=vblWsb^+w97zf_uUK`2S zM=!U`%}c+TD>QAe=?enry&?Bun@UqBcuSa)RAd1GS6gW0a9>V2y4vD9sptH~$1Unp)j{%iB0y zx8(AvV2E~+gNkB*Up6I#^vnLnH!uDhy?Vt1NTj~fe39f2mGdGh$I%<1HD-@3n6jB1Pu?KoR*iVkjsxcn)M-n=nh(77OSyJWxUKjfa+It z@X*tCPnLX;x(o&wUHx{WZoJSw7`t!-nwo&Vo5Z;(?R%{ve ziQsCRGZftmwSH}YA=(p3Z_@dZ-KU$3MnR6#JRo|qIGuEF1G79@0lD?W-?E=&QS65a zm3FIJuWp@xzAm~F4ACP1l;MRSYPo8VVDvf@sj@H1s_v_-`Wl7y@m2moEjqhV&&>TP zC&6NP2RPWrF#G#Q#@MUHvLlhJIKUj%o5bkF*GcdDuPTeAJmfWg-OFz1-_P`|-oSrY z3rgXS%B|5~o&l8z_sP~G|7ggAr3wMKnsN6@c5bYHhG!dQ^fXK8)oSbu1M~T`eyF`t zzJ~9djqdrPQzW>d)6Gs3pD6d=G+&y_NoSov?4JBVWDxko2hyRJh~Y7p?aqXmQ69dm zk+stEgS^rFxqmy>0*CmX7+u(fPO?1>gEnVoL(KNAjQabGp`*O-w}VjKWC3xDJ4d}W z{s|j@dxCp_A|t-qD_`@v!3yx%KCR|`lfkj^UqX>YLUI2ZcZa*6rH+k4!p-tUU9sK^ z!tW+qv|${kt#zh6wJ zq1If_hhi}RTphIUVeitcBj!I%+L!+A_S~QC8!f6YP0aL0M9PWI^IpsMdaJJRKt*hS zE4ts-%&x7f?7sX>`|@B~^9lJijqEF;t*Zvx$bW(7e09k#C&+ISoZ(r3qWKVl7P(DT zT0;u{mqJf&PXr9w(j?i{CpOfXrDfeQmw7aCs{t@Pvj>3KGPtsEa8 zYS@g!QAAF`H{1nntkuTjTVI#$bL*4GCA#RhE06HFD>QeSHLJg)a)A$WBtBoed4pp> zGHJ*r$OS2%k-#kt3Ok>u@XZQVLr((}5*%U5 ze{c5)_~>CebZ@kbYW{eBFPCZeSE$$sNJme<;pqwYu=??(drxamRAc{tcl{Q>(gr_% zt3L2_GhYc&L(MvcYJ>*e42{sf3XzZ&^e6Ziz9*l)T(DDN?_aBANwfZNlWRtZIOwF? z0T4`iq_ZDXMAt*~_Ahts*7w8(K_$fohv@lj9>_Pmqxt0vONp@9_rfO9xi^9s`lRLe zK13xO*%oomFVt<94EFYaO?&>p3{})xlOkV33Jeah{`zicRcw3YndP%vQA}v7ohS^8 z0A2$F6*QmTkeBz9IufZUB`iU~r>9e4{AKSYNS6{+paWBm+`GX?N z4=^2-Pf8@=xG4k7(cIx@)~}@_Wo0Ug6Dnv{xr6>?d zNtcZAW)UC8@U|<0Oatgs%`q{c1>55qr5D4u@8=lNtUT7bma#1qSlA?fftYvZBa(8@ z46tygp#cG+jGgx;knj^((m55Tr56PO;bXcfwlCf;M2a6S7CL<5t5ZT$yFwY5|6IP7 z?L&=2wo>m~0OMMJ)v!gmgT3Xa9X^Mpd{m|b%J2~+-n-6TEyIje?WxePUx;o9*{t4p zj&@SeW_$kD6^%4CK^Xjkeib0|_xHug6)COGv{vpmCwGc>E5Z_y{?HZzof1KvV<4UR zYt%T2Mmg#O2mmK_0A8-2NBg7w(!Fb&p6d{`Hm4ff?+(O&;c*C>DrO&TJ7Kys*?LcW zleLnn+vpF4;QR^KBM6JQv^mpo79xX!@iJ4{ff}^Hmw*B)2o?PRXWW&J@#_a-sk_?A z2VL{nB%h-*qqN_@yq$Gx!?!G^_wN4lUXPM_Zp_syADDIm;HAsg`iqeZen`uvaFVCY z8gI){lit;TkjyR9x(vX=AL;!J zgDPLCzGb6~iPzv_$a^$Km$&YDU(NTO9T*!@k%#BV9#;E3ZJ-{SB8`pj&PO7IrWodg zm8h7$i6cKVEf>-fDu|AMN^jl4W7AI&cTmO|@LjsAcvK zzobuf)oREX*b-HY8B4couQ}D{1|}x|6#vHQT|ahOSsln&pX0&Xorx`p7-<9UtbjNy z7lqA#nb7r>-sA0&>($+YeY7?};ssAnonX>NSqdjh!H4A`<;Ly`5`OdBcpEP8y(Wwa z2xM&;zMdi^7xC)?y)6kf|G1_&bOrCUgrkCgfvKy4Zo(PhF&mWkccf(x7VNc$PjPu!)qg}`r61wtC;RH??$`&+K(A=D1gQB zmKrt3=CNWMa!)_C+OJI>nun|Rp&s|JM${UGV+#>%*)sBf zF7DJS_T!O?7){BiP}EX1dVrdtHT=5rw0s-Gxd}IlyQPPLTeDexetw!_%INOQdFTW1 zv9XLFPX6W4qImu=|E#LAJXgW785-PL@1VIKgFT;KgP*Tdmd(^=dwXN&KbD_8kkguX zIy(=9^;;wYruElRvbEIhIwW}QmmB?mQ>g^)_EV%iQ%E3?$p5(&(#Yj{e1N518rPK` zAJ;u4=q$F^AL{rX`4o7K;(Y8rkm95MuOP|k2fl%U7yykC$K4GD_+QzRH}`!wP%rQ? z?1k8<$b~325TpxD;!1*-lZYW!P-IJ_zXPy3?Cxvate}0giH*>ib3XE1Tebp!*wP2> z2E}5fR|S%@f5L5Z5k)KBO;NmP-1@8CS$b|DIMh*;bes9|vktOw`h^FuR1IGm`z^gs;vkweDzn!?%<4n? z*pz3nbk*rf>#_wBw63E2&o!@qOWInIl91x__DzG`+xq(Oz3(3EV}FH8Vn9Ri0o3#w z?`^Y-1Nsq*rk#R-HJm{E?8oD62U$h#b9{{GLB&QF+(tUshvjr_2#z`z?(ffOuhupyr>N45}7C-;{j5>zoi;VmSPnss2soo^S3-}w>K<5Zav;mk~b*(c8Xi>sM} z`*$9F#~M&BXHz&!bo{7z$NXKq0O7eMTo?&tl)T>TX&L+dyLNfZubHF%f}(UzxW#sK zA}C&cY6nxi>>(vWU{cMwo9*}|7Q=3ZL`FXtHRD}fXF7A^$h>=CqfssbIai^{rrVdm z+keq0a5sQ2lY<_Fq>w~^!|_27?uJ{9bf^L_gYd+BV5bO89`aBIMB?H0pY!zB=7q-N z1MMSxqiW!1+mBv}D*}$806WiLsFx7o{QY-+`a+i?J3qYs-ekWN3^nF`S==5)AzWS1 zu4XgGu1!UO2x&X>*=)YIP>UFi8 z*XrkTqQ>KBJH3RUMJc~a=r*c+qAv0L)W?heXihi{@P~5(L*z>y~qPVAb;oCehxx`$J>sj z5W`LxkLYrLD!U`XaC&GI`Oiz`DaYJ#F3l71_6T;j2SDDccy`Uw`_1$j^QoudZJNfN z(gwPOEc)Lb!K>~5ZrSC&5Gcyl`ig%%FWKiaY|s&Wp9HugC;;;BH~7b+U|p|c1R*>= zcAO0Z1e*Re;@_Q>>ANLg!Y`$|TEM2(_Y&3=PP-m|1J9d*k2|;A`cla-UcX3YIsQEY zBAkj}@OVCVDikRU*gR2;0Kt>*@*K)D|LU|Oz0o+cr>&8U>dWjj76v8gK7MWc7dF{6 z_rtqoGt_&^3QD9l`7-4_3I{}k*2mrD$6SHF9n+D1)}Q0k?wA$YWZilaDchYP;@`*TdXWA3pz?YUY2>o|x=&x}lxgPd+VL)aMX&KQzi+%EhLdYAX z2;Zc>#)FpLFY^d~SXpiZ{WXj%b-+!(n&hs3(7dCYAnbAq4_Eb5@2>0` z?TKZ%m8a*M&8@87;}GV1c#<5OY=CH6-4gIj_O(Fp`3`4)?weiBKC%u(b)qJbccqXt z`SsQ4DF342`kHTMB*)(2EWG#fCcS2QuMbkWzp@vpSX3uDlSzNj#FG)(LW8vN>tT|A zXyr`U+37ivECfj2{a&On@aE048=#oZyCkj=oK`1@)xGz{ZMcGs=i0>l9LnnfNY&qP zwX&5^I6~ed)t$BSU;YX}v&%<8vs zp_xb(Hs;UQuwJuDgvnmMh^0_<*0`yEwQRWbjeo=QdFw4^=vty?o`XPuQab-;N#D|b z#OdiL%EqP0@vQ)Ch83^);k-XzA#n)5X8P{|i?tK6gFY8O;`yfBFADiIUxLi!ZRCtl(i>ZqvV8%0*U{QwS`` zrg)b{JLc~Zl3J+KNF8?+D$&;b_d&G9;F+TSyLWTJq5ALNpHlqu{YLOT{#6^V_@9NC z2027(0QbVV4DUPtemUz~BCA*wktSsS{ru~{o{#%WW0yXR&fCO2PG0^V1WGpffAqe^ z3OchizEQ#dpiuvku{g%~eiv|Id{h{35&6L_&Mo+UcKZ8O-5HedEi)5GY4QH` zRN;e~3Sn^)3m;c!@qAobNo|z{=K>KYRL15rN^&eF5(9C0$$AZ^L19UP($L` zuY4;0U7o)ma-L>G;V(Upw%(nWe;xi3@Gx}^eo*~(s1l*k-G3zh@m61tuZ5Ey?B2YBb#^R3f3E8aeqI_!I49F z{B;rW^6ygjd<$0t|94#;{Bu6*ycY0cz+WqMREj$6kZ6HmUT`?y>K_$y!*jU)yAEX# z;JN@uaQl1CLpTbsICD0ux{AEXf=(MJ8E&4^a(f33!M>mdaZIO+X+yxQ6(+iU?U@3I4_g1@X?``~UeJFoGpq|9|79 zH2>>sAOHE>hVfAZ>%VdIGT)Mm@xfpB({j6s}fBYO6!YrD_Y=UdOR|H2r3r;rhPJnB6`+);$Yx67!`;%NCdL5sa zbnN+Y9W}myZ9xD;__8+yj3;B=U+fSmcgjhT17KN@%Gb`SAn>Y#J807PAnnbL>ushc zmw{}H+!bX%JQj>(vv|he?4rkea6Xn9>{SXq-|#&DeSU8cf2e^;<p|(#)T`>=9PvMVUEx6?$L1m@BUT-x&D2CzZeP zf$R2n@BY^qfBCzI@iln)ci+BoKVEKsZ=+A?$ijSk{rlQ~FD3$s(VLm^J^y(-#8{3n z9{-ISTw{DZz~ld2`#hT6O0{FYJkN!8aZCk`7c`kA?_R)iO?ny<2L<#e|6FaKi>qQ) z#69nblEA~@UpQ8Daek^>L`EfA=L?i~&I8NW?IIMof9`x(XbXlckKMh)DKOH3-%|3{bsf7+ja^B$hhf6u>w=kC9sFWfVK;)?&L`}5!RK~@ok>xJ?30AY>)`~CoDuCKP< zT#*<5>t6eJEntRK1L<#T_;)>V&4ul6ZvDG%|G5|C`P%)>bD$Ck|6c#!-^SnkKi~d; z`uzX;A~7F6O1;C!CEnE#9C^-el@ntO;QC-) z!2Q8vzt4N(|GXC+F~Gg`?_RVDgg|0mMr7DzJ9<2hzMl?yAnk9(;tq=b-4*vTd2a;=#v4S~0_&1jR zem{Ia#@m0*`AG6s7yR7WB>%?Kzu#doslPP_ViiJhRF$8OZ(=}~)w!9tA9XlOy#HKh z6{YbX*2o!ip8Dg92fpUty=?GB>D%tS*Bwl5GZLk-W{O2G{*8n4qDJC5(J*&ne=eUz z!^eY?w}(~04x8P@2##$f=`Y3qZ?OeV2b6a43 za4d4Ld~|la^jkQZaS^Yodd2t&e-zA53KoJ=6Y!io%rj237mTNcm@q%!ZT!CYKGDMS zp-Hb|#vZ{ZhAg;X)Zy>e#OcT?=bI%sf1BZd;|f8Rd;T^HCoDnyyTDGAZ&4gQ?H*CU zV)9${HF#ls{N3;WnRjuG&A<8OZ(jLd=ajelw-)2NJA3Pm#qa&!bBc=ZfBl=^k>32D z@5i3u-(K$T`~KHC{@*(PH!uBt?f){yF4(KRB~S&W5?%1cktP2>bJ4%A{o4yVe`Dz1 zJ`cD2zxC$d@5!UX0sBAbLaKcBgM=^k2H!@4Hvi2RSnG=bEwX=efT!|-{E;)X z^UMoAFPr=wHBg*9_;U@8*Vio&pWwF5xm|GsYj+spsYIpYRM&m+RM{F^uaKK}n_ z?abPeSC_5*gE#;b6%W%BAV3lVgfpZ`AP6XGfM5UJ^(3IabI!F_^X+TDYuccar>SPt zs8O=tau&qrKkDGjf1aENgZ)3Ly)MpFVRzJ-=`s0TY=Wk2)YboEXD*zb#1g;HUeCFI zbFn-3y-UPDITseOBtMsB^aD05ojuY0$T9tBG`;}4ra^>vo*4QEqg|N^o@!5~f9x9x zt;$2jyPo5f;Un<1ZuD6v#HOcxf7@o1487EcIPi@4llGuKf6@}VeyS`dw$;%i2z@!R zLEy!DZI3T-`T2yscKN&EfCFP|2X}60drJ?tVRVA;!MhrSiR;QBz`@g`g$R;fJ65O? zN$k-n&WP8i$zJ!l!_fL>t~){>{0`LB>KcJNsJ>S_Q$Gq|cYVA{w=|Of+Oj6`!_xR_ zH2S)0>#1v#e`R*;fLVn4k^IurEQuW_-tOWBLD#BjtE*L^*+|HWP3$lYb{x$Ej2 zaqqpWZ@I;bH~-btHV9v>`I{!^X+6K?6STGe;bZ()e|%hEnfSbQ^r!u(@iT@0F`3Hn zaB7l_^@t5)q60%)xljzem%_IkJ39J%zrK$~x^+6ny5H#OM$lZI$q_NPdcL>IpJ!LS zp+hLanKE{*;})Co55DV?yeMBY{m}Wko;ai5pBHh6DL_GT9O|u8W9hnGa!9cWya)MP zH4cfge;*KHd&BmdT{D^3Tf3wH$C9g~|(wxmQ@07D7XR6lIo8~^& z*r!?_iYB=?{y@X7Xev(QF8e0AS%*@*X=68yypyiZak=kD8m{G7->2U-*?WHPo2)IPKCs~G=kmy5+)CxK`t(Lj;c$*P3%o7>yAq#t ze~mw7yAq%P_L%SKX3jT}xxpvN_e{>+gBfVP>2oG%gB{v|**jylQ@)n``WiL>IPEFc zAGrRS>0@ejK%ck!(Uuu?5~p4Y4B*!t&m|X7Hom}b#6HVDkiRP%ea!)VCb$%N{I1Px zeU(?J{0&qHd$00(d^qzkh#Mk&{d`^8e}W6KM7_6}IU3ckw*T|~S+;YTSxZyDq(k9dbBon|M42XNMq(?++H?_B1#b4`v`_oTb89Mta{vzI% zoC?3XR#?k5|NY0iP{dF^xg9p}4?Fo6TP=HanO8K4qr|TNd=2}c@Rx7AN972mEyw@< zB`*7ESGVo+-nizG_;=->t#hoqe~J7a_4`LW{M7O#>w+wll^8QP+PdK_;-^Wz)WgZ z>-80T`o)adpSBh-BawLPc;H@1eAl&4qtISGy=g=CnQ1oG$qlqTw#Q*g58Wp#}b|t3{j{vDFgK%>!mxZwK~h{E^i9{0$STshn9yqf7K( z+gh;$d55@;Gwt`QGjF7g2>ShQ9s3q4Gn`{nz!g?G_ue{69`Gjjem0&?hePly?tPmo z3Y&vZmw4Yw$Qe)g)&KUaf6-l2V6|5axA%YMht|Es^qh6$C5f3dkkduqtu#=|6Rh-~ z@1E%3-SmA&P>T>bQZq<_)A{S~wR;-@XIe;4Ktw(pRm{u!4O zkD&o#`BtdsU(W~LqL=b4#NjGFdyI(B^j8RT8ky!$#o_x(kAB~~mo>2`aX7YFVY6*; zP}WuK>Fxe1KPs_!sxX?uw$RL6^GsNYQcCU zRox0#a>5C<3!im&nwp5@EJ0No#(Yol4!4o zdiKZbT*EYIu*hTmgP2U;?oAEWi=HQrzR{oh$q@oXlk*@xf3{im17bk&&q4Qjq$QHu zb5}dZ!bi6%UB7i;YAQZn?rH2INYpomU);wgdDSiM;>`1Gx!^_a)B=y^_n1|>80wr` zGil41m0lHOtVlAhTbR_^0{ z3{DbhE5^Qle?rcO8tdrfi*TR#eU?+bTao)Y3g^XT+F#WV?HJ3s>S(sh+>KE<95P`B ztRJ6SVC|d-wWq)Cu{P$8OYHw$v-y3$O%)R;-{4~V!SV%rm$>c6Icoc^Iw$!2siULe zoLi7>Y7=bsadeWr8WA#LCAx~RQ{<%eddJR{|7St*e-YeMS#v#^Eb7Xi$20eobv3vn z_)I1T4c`{6tgA0M8G#$2z$fbtrVVzOG`nh@{Xqp)+xx{^V`e3LVv$=M-Lvu)ih@tn zez?1Nq&qo#FdB~^Psb1pN$jtMn7i&B!27?RW@O8%_my?KVj(Jq!mbwiZaXY;=WE@H`H5mDmH$^c@aD(&Hwp)l8k+dlB1Rzh zmY>_b+6dTvybX6J+g{rV>Zi%dipE+P_w7xxABI`VrFuk4_r>s=Y4vSEECudQOWGEN zU5=Kk%}zZ%s0>Hz_!B!zrvj~u(Ox%)r}dY+e_giH18TvY=6YLO4#3|+y7PxMcZhvS zcN^QEM9#2!toBFvW^1%FPoo$c1IKouqi6EsY`5_%bskfcKVpB17*}jp%33S@ zf8mOQs^2Xb>b2%n;130E`ofTggCV5U@K76{kK}tE$-jzgW`zyoz~%AMQuw!vjv(5a z+U2FhqXkE5fmaJ&$u;-ydQbCq*VyOm!w!qOaZ*dw3tDT>)craUn1u~cxuP?so2+}@ zgXoSLx~}}wpRq~D)+AR{v56shfvrg`e~j3qTCWI>${r%r-x%2^r>X@OJzaYP8|zg+ zpDIt(qYUg|JM>nq#vAV=?uE?Q7~?Wg4}Xa-G{A&*U&kN0VInEtx>~Clk6NdS^U?;4 z8IejP9bf}rJMH57iEzWe>+0OUA)Kl}@;O!8{fP--cbZ0tAC0l6N!2MjpX=?R(@A6f z*uq~Hbt)B$T=M|-xnY$1R@4NGJd%4{#vZZ{R}A|9$bJ4=*Sg{WYZvED&Q*~w;S1HdJNX-aZbN)~QD-$hnxCFE zqz(~GwY}bDf=9<{zr%arzN(njva|VHJEf-T+nYEP4Dx0p7~k|W_W=Wt+~-{8K7Caq zlds{k<_~?j)+W*(>jTS)aJ#lVuKE)K!Z(NNkGa!@Z%LTlKo44p#Vr;`fllD4TMD zOkSLw;!Ml;uJ{R@e}6~k)I=K+B zbJAjZ-ur3)%(ymR+cFb7oxgPSY};A;OMH)MXzp8Nc+>vI9kwpevDcP|MM@1|74~1i z6=SLLT2kU%e|QQpYH@cPcAtUlHaHnIQ)_IKQ`P;M#K(7uh2G@; z7`)Wl;G8YfboHlU)Ur~RKG*zerDiZp;zP4DZIK()v1tKrZI2TZwBoOLV!=^RHN0C5 zS>mv=e+KP->O~%M#fOWUQE^wk^I85~?S4_;{)5v{++A!uwFz~H`JC7Adbxk(&VH>O z&QI6(QDPJ?+QZ?=E%J|PJI6#9b-qu}kK?v60(+3p{lp>mB6ou)!rs7J3~MBtS`@Yn z#W86ot?AoZ7hf%}_YD0k`60n4x!_FYB(ewAf8>RU;F6SNxG6u-=;+QGepA2fv@WC_M1caFVMcjSikm$gC_vt1-kQ&^Wae@lU|nOb|M-6s!7I8=xWKKG`zAZ{lWUo|-U zGyx~}8JuZincP{14v7E0?U_#@-=Cwu=9`0~jg3oQ;1Wk%y1zk93 zVYGZ5&QmSS`iv+ZU_-drkx^ZE{chOMyPAF1;Y`9SY+C7F?4Db#98HDe(^soxEp1+3 zE8_4WIC5C*TVX?GAAjxfN##@%8k7umzg1mM_6q+CZiPR;;tjTn%L={#{LUy6T&vvY zB2SjPJCyr5Cmr?3;fbee=}WO^fBVqPgB)zvF_SSOU?3Oy9RD}hXXAzql^ven(WnAC z38~1`8gN7;&$2qs2<&yQWx5_MA$@Qm=GLaArRuzBwUF^rJqo&A6jNGlu6K3vaj*NQ zw+VLyJ3pQ$fO!176V^bq5SI@m}6UWQBMui3ql7T77L*o_+3msI|1Q z)<#c{*l|wnslYWPHpAB6-4TFe*NRI&4X?j(2o7%SElZ0?e z^`+CSI|=oS+$w4c#hIawug-$xU*veQ|D2iYU6-1(;)Tpn(NO+ExFZF>spOjqR$Sz6 zb9KgqFRacSe&PBW#Whs^Se+m76<7O%o%hJy@exI=hu`pu_>S`ofA56cK*a!j9--FH z91N+!NUk~Bi@cxU_@ma)YUyXF?Mc4k92NHCi&!X+gm2cRFyHmp$rovFhxna%m;FAx zO?7QQ9n?8{30nPNT%$!Nk*Jeqt<%=f4tM@Gknxkl1btRaBks` zm+^zIKS}(soGcELH8ob^2Q5_mp=-_Y8|yB3L{hW3au5E>Rl=XJm*NxZrLFqr!Abmo z^EJc@)TE2^DX~OstMhdBQ0gS_E^^j_Z?57G6|XS5+M5s`>d#36*67DWt0r+khj@*s zX@vP8pF3q!e+z!dk2qvcoSP*VogJH2M{+RnTVz%($CYO7x`I|~p ztLcjml^+s2cn^II7oj+mD+&hgkhXkdEqlcmi#PzB>6}gWyDjaom^lre$WJbk;t60^ z{A>_!AA>qUTv<>4QQz=d=GRklrzNBkOuY21FN8vEe+{Wh?XHL8ay>&hMp7Ro@nWDR z@qsm+vS4ueMx@+@p-6lzAc9()9w*aX$#KxyY@uA0o$PACqG-?(XH>77^BRI8onxC0=eb)fbqf}mG!0P zJIxOae`^0r-+Ktsla3+~=QMX%7i=#-zoMQ%3|`$+?;lOaW_0*4@=3){_Af0h63$tG z?k1{(2e6YdGyc4b6xfI*Qu4N!;LD@Fw4y>U^(B|R_Km6F)XAys1^ytadz=Z`*SJSK z5UQ9i`N)Da(KB#;&Yk2yi09R)ry{9(cG1;+e*v4rHw>{$Ys-Oi`i-lmd%^pcTFVo@ zKgL?>NHz+*!B=cFCdGP7ElBaS#GYRLb!m55?^~b79E^qgB3Sj+mR_+q>TBhFk?(!S z7e#)>-7Mm>KXYj9WR;k!a5S(RYqk)_ZJ4Gyz5g&0MP8oIyE)F(HeBoSdUQRxoP#>|j*zV()X4`W#%3LshM$q&1 z`sjl-uXGHt*K?=9CG;j>Yh5p2ewuR>ljbJjDT|q|DlZK z9sIlQ=5A~!_Fy%BXby9@3=%H{bA3t;4@|CIrR8h+ifs?Pa1{_7`CPh00}#=uw~63aMF0XW2$hDvsW*9zIFG!H@gWKn!>(3=8ed?DD7ww+bMjluH7&>E<4mg{Ta;uNtY__ySNa0rUesaflW@CR zM))x4X9Q}gyW_4$>`~0~gB&%!f3UCk#Dq$BOlF>V&BDTygI9liX){ zt!wc54)1?MW9PxRWco16IV%lNbg;H_!*2T_Tvo9^5*MCkEnJ7HH>y0;f9{02&-nxE zoZ@x@ZwS)gR`sdDbD%wV&;ejwK$Y1f#zxtJ!(Ftf$6{ereMM6^)-~?oPXD;4Dn?R% zl08k;zDqn*v8CJYV8_;ev#XA|AWY-}Boh6g=>2Uzf7yDyA#* zk#F2g<@mxqFa9oLNQzz{f8n;ff~U#*8W#OXy~OD`;r?VyW_9u@xLolJ)yU)OUYmlW zM+3)%h-H?YizY#<83iH zQKL9}{WXz3XQjsk$aH+~^*#o6p^X67QDYB++4-`Q!zSsTFy+-7bHHzS7&b5g9C_dxYi1UH+DM%*Rxpo znEB*-*3{nr!e!Oj5>A02Yqds(s>fCF?dQ5BWU*oYG9Sb5-{j!xI8FNgzs)S2nS!vFYAU}QEI zRyfx$6Eh85*9S*U>8roM1-{G&c9Jh3l6MwZD}M*Be~97??%LbEA7PI<_rha9V2L(i zUk*h+urTem?mZ8wJ7{_KNtErvRXq{YI%XijD_=QfJBD<8ksA?(r>2*42FRORc|IXq zN!>L#8yKzO@4=UeWyx>3bHou_Fa-j!WN&e)^}#T%**U^z8*2=I_SwsYPnNw0V6okE zi~iCrwmU<0U@nNPtKm<&Nff=_i6}+05Iz5oYEvCtKHM{7ou6>m%40xdeDx z$-B$k#8(&%ah|ow;fl4{a=&3!?lbid{>WFO$Jxl|+1FDu<5npp&CJi{8t$T-XwLCO z9fg|e&88Q%iGxyx)Zs0gq4y^Kd zS%qr6Yd^SyJAEDwpF=Pfy=olEsfgW9kXUV3_DX#r-_HgQ;ptq{%kQ^=aM}V% zStsNaz*=@17*MEz9b`LzEWdxDC%E3Ie_Yp44`#&kmeyZYKD7_mm;k!)bQ3u{U~BT8 z9^pQmac;LbT-{Mq)nLod1;>k=*`+;%O=sGI(}YRg@vVS`^o^Ihrf{}q=)IZD4SyNe zc8q%OP@5r-vC)hqmz3H89AxrP@|M|rEB0Puav}?=L6eR~T*LeHR@J8V;p@kQf66uG zn!9aK2!X@@ULJlYbT<_ROI>nQ(%%jklQJB`VwFA|@yYdx_p!SSe@4z&Xf8tUmU$=?*FRQYM=biGTdZOeCa|`YEK9>ydzy4x^xKiU^<<;Ke<6kj65$q2RtULF`nlVWlDk%t-S{*ayc# zr6UI~EB%JQxM*t^JPcsPceifM9U#(z{gP+$C+4MB@q)N-Y-;d_;SPo$Ez2+VxjU%7 zX;e2U-R=2;)Jv#Q3C~v5f$5c;7whzmcV2l%KVqV?w+bKpBX+;qf2>i;?tv${CEPor2wMx8Rr5F7o}dLufnO&Jltn2Q}!oA|}#KXla5|xU4^%W#SyE zSGm4vBJ+_n>#!X7AM~1>Bv+$ev1!~`O@MZ9gGxgsl zQ$iqQgV`F!vl+m((9rSp+#Ic)OFP!Wt@vW+n36DT(Txf8VSb5Q7@6%b3M<3h_Iw=^dHIzNfj)%sI@bc@vzOX9F{nJ836O4oH$Z zb+`*`q`O1MUc&4B#KwmDI$QHQz%=2Brrr(?Gv%R%e?t-;^oYf%?@Tza_%HfoeT})R zv9Xk1AQ)7f4Z#c*M}z#R>{U~DC!l_C)j245043j@e+J{^rovH&AFen+#EH`1)Y84` zpcT|e4Z$*0FXm=;zsZfYzNb!E_tpUmI85@^aGoGUG3=ijx>kh~oHkQy-SDa1QD>6= zPM1C)e-*#)s+GMF06%LcHbs~G7rTMhimG{k_h|p>@h)TTtN;C_@8Pm{^4eZFH;wn0 zYW8tBIPKrU@Z?Y7wQ>wzBcpZ~SO; z!AL}E$AP>5SQwi|XM#W}y&F7BA~`^(^SY%_f8SUeXlx=b2ZjdcXbl%$C?0^CkO*Hj z=DmtiHhcoHjp7Gyd_XW=h2^WyMF-?3cjv|_#9p#b7k}Qid~i_sz4keSkJMfbPjJJr zTMi?s1mo!hbDRF)c?&T_r1v8;d0+;c)#rYJ*JbU2$=D2~50ZsEC-x`enGvsqn=bVZ zf0JQEJebS-WnW0WH7EV?{=egMl^^}tTczThlDA*lclfJUj`l^faF9H+ys!K@>PYQL z$LGNj`lsh{PFbL#mrqF9n;OkFeF$Gut%jWy{PFLfr*-ky6mXP%?BxuneA3|4YZYPc zvd&=Jx43{9jN0DWx~}==iWQc2j(@ipe=q&57oz$-s7d#8lZkRXJ27PhSYjQG6Qr)M zr@m!0QBp>qCucHu-WF$_7?q!>waVF3;hj#4o=<7yX)i) zsT*UT-QxMlfmAd434Fkh4lx{m-mh(Jn{RyHceMGUOOMHs-y=9E&!i91Cn%~>yOPve zQ3O9-;`*8gJ^^dWWWF-F1Q>Ide|Lb-^$1%4aW?jb*o0iq9}d<{DxHE?qd~jPzwk41 z0UMg_QNiIHo5YT~M{Vu4r(i8HIIlX(Ev%mCLnz%IBBtU@gu>szCY5mrK0(e0=Ya5v zuUY!>UB9P+N z&)DlWc;>%(Mtwg<0wl4Gf0+6#_$lXh{sb1a=?Olkfg^%Zi3U*zz8OPBp77r}zKOg{ z`3KoI?hE@S6y4wZCU^0+Zw1yKkBD`6w(q+v-1)ZnZr}~R<~!$Oi`?_aUS&Jy#Mr$W zA|7F*)j1t~evjE66PLZl`m6ZqHCQmZE&V!}?{p(|e|M$2#tZ~`-^|=s zOlJ0oSXkYq@4W9!EuPQI{VD3Zp)cK`Dq1J*Yes=&A5iK1(; zo(FEZ_Hf-K4&17mf12b=^njK)Wn*?0+){h#&{teP9k+1lCw?v2(9hFyl2xVS&)*$l z89VV%d96JMk=DRw>g~~UY@>nd-(lN|eLxv=)Qv`4$XnzwtEZ~Ou^6}8KGbV*wZCaH z4Bi^IbOfNNFjt@F<~zECno$!nKhknBKJ+j51VlvnGHOZY0aKSYfnKe?HE8N)7&yJhgEd?;vj2MSCWLCk?2-KDG(8_1#KLlK9yout zC-+8**o8iYf4V2C#&r?R#YvfP=QzvTUB_rx&LwHB41Nx6DeZC!jLrN(9?X~?5#Wt;@OMDEIrHZTYIFaS znE%bc^N!T>Ui#Vs7}_VV_qJ_|0sBd2ZT$`dp|Xx{f7LSjI@Y=$;3(D40I3iZix>zCvzI zPCk8Qalt}rjV*r*yGyMkqfd{SD8J(h^y#opg^my$Q*i7b=cTLapQ>sZBfZEYR6r5t}7w617ak=uv6$wWuckorw$o zgFnu0Klf~*Q6N7Dkw%)`&avPi)E2BwgU$MB;=v8{5NkTx}HhuskpDy>HKojaKm}_l?i1r}?CPhyG)@e}gb+ zcTYybd8eJioBgo#nTb}_DF!=%yAj6BExA;x^zoGM(?{!3eny_V z+ZiU*V0oW(QXs_H(B{}tQD#!OwmPHVe|kFX%}rOU=u^4XxYT);v#;(%iIv{^fQ4t> zZxnH-WgS{RLV)pdc4+jdIl+a>>7R`}>Xu`Id~DM*Q!b*8mCa9RWOaSh8IA()Aq)?b zrQft^$e{n|tzSDy{T}3pywy8p4!&5h&tY(``R1reZ_>O6@1WJXL|yu!CVm}me}*1? zyx+l96P!zUK+kieZfFw1FJAem>MRy^a-yEM;Qjs95tKMt`4RQJKWZ1H9;NJUM$bEA zW3~a^Q#-~aqh(<*#GX1FxtNDO7CmnIwBJR4K%RfFky`2TZp@S#J8~a~Ee)MN;W*EY z7Wws#fw@|04+RpU13??CrNuCECTafoQA2-vJ87@8lsGSsUhucrC34He!wdV=4w@81UCTPpyJpzr9Qn&^)a-Y)hgx)o zC$BXZ3>j+mlSQ1;*NqR6+=ZHV6I^b3HyXVjmgm}eZVI-XwY%%))gH8rdh}5y{Eg~k z&8YbuO3ku(Xi9wR)Lps5e`THc2WR1$a|(x8`2OE#OKOoShQDFaWh~F{D!7Ew&m$OA z96Y_{S${Z$IFoqP%j@PuIJ|nlu}Z<8+k14PEbnEcwaxH&=-|&g>qf_1Yu%Pk4~3Sm zOwB`+gnl_@v^zOml&wvI_-WLh8D@ z%GUygCCYl-?9KdU2N|){>SV_e^=88z?vLQ*wgcZ-?EHsz>b8^I;qGWf^E{s`t z5OGre*;Q@QXm_YN@i$#3#IFB(9zMs96?Hl^roZjv)y|W_H$?kGFux-{1*EU}+CPit zMCXkftySH(c!ul%Pnwt~nYJ%z0)UU8`wZ^K@Y)10?L8LBf72UYBYv^;hZ2(%c8h2I zqJIJ#O5@A?ErdtMnxzPlqxT{>@vstBA3W1KzM7hpu=iso+32f;XQV%$_(voDF>; z)tA~wj6TD}f5><)ZQ6m0o@x$j2!CcEKeJ~~YJEHJyi7CJG-|^UFXHRcKbE<5>v=lP zpV-O=IBJ`ofkvaW@oBMPFuaO>EXgMXHwI7WyvVFmU#;`6TuGiqK2HD0jTk9BzoT(X z`SvnTml%6Z-K4;ni}fmW2t$kgT76>-j8Np637>MSf4cjs5HsI$Cf+N&%r=ux=j%Zz z+PeNn7_g4`4lShyx_WqI4dCj78_2q8`!~n$P;?mA5`V~j zRe^3a*AeHp$3lo*p2JXMxo4^OV%;0x9#H0F0B zXZ|<$v|~pd_?2j{xE5wvBc61W8n%M!0^Ma4o;F0JpXW4jhf3@x=M2|AA1S z_c}-;X!3J=u<<)bVsz?`9$3s0EMVf&N1{jjcdYGum>hN;b&qO$V|A$!GxVVea)+{$ ze?tx->*{Ri)%%J26?qgn_@xaCVBqwQCckh)Y zJqX&2LVl>7jEzrr?$6k($C$p~@kaUze^9ESbX(hJ#Bg(HZQY8wG*G(f$SpU7phW6AE-EH*1%+=-27{UoaJBgaijLEX>f0-ja zUcKtTZ+G+>nALng&1qgIyY1BWo#{CdJGcZRQueA^b+K3QK6P+>wV(fBuNW`cqxQ?* zq4BN!(Kq%hxFL3(^ng7-j+8Gzo=EvU{9iR=V2ku6y+q{1r-fU*Zrss!m1KpCCiree z_U~+AR5L>l;4cyX^!i8fKbKf^f6hbyUVTq`ffOzU2?KmxsLBA^GU`BQZKY0C!~>_O zI8#5)n>OTYQqTLgKi^m&y;)82B&B$Yv|JTK)*~2i$FS}xhIKO$rT|GjSFKua<-?ypPF_LfA)dkWKFs;s-L{lI(aS(>9g7MZ>+{f5M$)PK;gr@t-oa&Sgmw@aOz z*k6p7>c?5j`koSIgfYiq4!ufvA-o60%fGHMypro0m$-_;zpl7~fA9mpYl~nuXk$h6 z&fH>m_&3I_MpxGG6y|e{dCIje>q~WFFzP0%KTNHC?;P!JdUeI$l6YU=gy%>6HW--l zm54-z_(y0I5* z6-Qxt#I~X#7`L}sj3$Qw1MzT3YoPEJz{3c_cr1S!F#?^^K_gD{?foHxjo&t>X7_ z*W~+GofTK=USoV3jc%r1Th;vi#mytuKDL=Xy6>K>V0x&1N?$yB$Qv(zuUzzI0#9=Q z^X^HDxl74oe`kG)7(02y7dNkW=S0KJ_3r57yw)1lYV_JAW`Ooed5^d=#XS;T6=xry zk3_v!JK4SDfK<3E`{S!UpjVoj)MW~dZID3!_x})Jt1a)XoZ9RK~sYRBEHbe=DgeooaVtr9NzW6Un3O&#@+JS-j{HY~R(ko6E89Hmnu0D7iRQJvH@RGS&20@uVF~MTdhu?d-+r8gq ztyLd;ykT}KHExxL(OW}q9Pr*YLihXYvuBVI54Wb_JvY&>Y&L7(2Y`@s|1VG2_l6Bmmu!+y9CxHj;U z-@Vl7VLrQzLY2H&ba|iNs5D2{P-kt8CD5HU}8sWd4kS3p$a@rN3x4=pUkohhAtX{i^RHrhgCrlrU$@io_aqj!3lnA|fSiNDmK44<3u ziZl90nC7%u_{0=0{UgU(Qsg$;%-+!vqND77t^XKERq zt_jW<%&7r+#sqmIIXimF@LzP2bb1>FV`=F%s65O!(HeckKZf*Y>gK%Icz^cMljAZZ)YsI1Wu6qnb5NSeiEWtD zK*~-YPbcK{eTQgEoXO$gN8EVV|0U0Z=g~c_IY7Zr&&Wy2kI6s(JO0NLga3t-qMwTY zUf})~&k8>$e9oA+Ej@_XxvWWiT&zprUU?y_p21uJb?!IlJ~*_aGT2r45S-znMkz6S z*?*r=^!BL!v0~m&!Tq}R4i+^+)5QhP}{mv!U`)KojW`7I%R6er7 zMsz~Cr-3`6udlb*qRT1w%aPQX?EFR6S)wwT$x-lf3$D-)O}y_pE($ZmZWP}uZOcs; zk@-%ujen-jpGZxBiw;bUzSKHS43v|JZQeT&Mwzw1U`{+Z9B-W=@epBRGac9Yegfay z)Zw|(6JQ!>yE0E}zzTzi!T&DQW zZ+khSo-ot~CX=SF_DOJ2`WrUV-%w4owx$zbFpEg_1!{^n#aKP!R5(~-Yv{&R_7*&7 z-9DjZH1ZZ5xV_9BTUI|Atw;YBb#K9?nCVch)DyV5)u*v`tVOM$v43x+_mv*c_4<(a zcpqJP57ndSM_Y;uq}dnx*_eYhYijhj364|hsFblaDKQFl#IwDwty^cm(+iSym&{>ix_K{}f4FHgGdAeA(?^Kj zje*YNUC8Jo@guX^F>9UkXG62!!z{b&NKLBX0nqV4AA%2WwtwUomd^{{?)HqrvqNu^ z81S$kR9~3Ih@B(o7ym>qXFKZ%jcnLxF95Ic_{9Bb z(Qu31fyW4*UVnY3VZlG3o7S4j`K##3bL)P;T*rGU4l^eZPHEB+jq5JJvaPQk!9uNG z+^V5R&(N*A5km4!G&hL51&3mlPFnNRTUje|M| z<7$rS8J)9cJ5oHW07?tSDkE z!G-0#{D1T{%$vN%CxVs#=tFsEvJd+s_HqlPZM1QYm99dg*tDl1{gd4cc)K_S+?m6qq7hD;B=OKNY zH|g);3@0snb7*V96R#z{(ZTBzj@W|fYd$VZCHCTXI4V9(D*T0UQtE?79B-k@t{{z_F5L>yljnOn<*R z(H^W=au%->W`Mpy?6xlZS~s|UkJ%f!*L|x!e4wX7&Zq5}=P?^FipMxH?-JeHnFh`Y z&c!Uwz}eJR=+nHQ`%w1v0E>U1X_ss7s{c^%7*+Ew`o6#Woo?s1oO!3}YGoWHdIIk2 zaLX(w;bsT>Lugnl*JPC1d7k0#j(>e0+bg|&$*JY7_Xm2*s%>V2zTCG%W|25(&wX@; z&4GDI9yldTHT0{l<~hAQ`W&3HkiI|Sf5q3jmk#<`g7O~S+dRYV?g>7s7r5!aSa-!Q zyyhf|3x_smp}kP_kX>VmD}VjUk1)W#$)n*c1m3F^B_`h~i4*dc4e+*~+_)ZEkkGh>eEo%A|R zcG&ZbhH=ST;}JNiG9l_tG#0-3SanYt0_n52wyh{xlDl^3N2dAeNDf6mMUp@vit62U;KLIl zU&{O8h1n%3`e4}VZg?JV;UZgu$LD_fG`Qro{@us^!#F9t**C*%H_XsS9DU5pP2Okj z20eET-Ms*SXZ!bVn18g;e%di3Q%}yKW|g96eEeX3ax_UAG{#9^8#A?N(KYHo#LQUe z2hpq*JZK|gQFLQGG<(nt%sbPK#Ej_vGeljuPT+q$!>ENU7COAlAP5dyYQAth7NZfl zdnawquu;?25FU)2M-xU)(#nLtuCx}0H*MJ;Yj)#(l|6DdK7U+^sbJd*{jNuM&z!2y4dYJ4#+f`p$-T7azii+4^Z&4cCH8ckL3~|5 zr8$dEX<4)SOW*8uUyAz1cW?7=`;lV{q^=KFL%4${tx@q##~1MHRj?NA&f+|x(TMG9 zSj?z>=Wg?{|Bf=lbJxh6^1Kq*v55A9?E63c*_Y?F+w5N@f#dY>I+XN^uh;I3bO1L9VxP6|!$lbxDSWa3yicz!O+97`K9~lx zhM38~f3rpJ>rr&gqz)i`uxtbQy8I7*7V*B7=nb(mg?>n(vG-kH{bPN>%3p1D1Ys!a(}@$lKy+X7z~Dsf&SJU7zx%=a=gtvbT;z^KFBcV6x%$X8YsF$>9O8Q zrRHDX#LN^jADi7Et))Y{W%XIJuZ@m9{eu4WQ1USHv|QBLA-3q*hWx=kGV4uG+vMlT zo_ZSG6i@WJT4+%B{b0{IEAn?0CoLJSmdmtkRewhl&1h9KQ@z>h-d{Cr%kwFCO)46L z%z`iK&D1;|zw}!3xmRmC?>RmFL?n_Q_D=4scdFqJ+#UrTx;{lOo;gtXBr~yJ;HEZ$ zD65h0(u3|fR>nXR7Ki?lmi_{VQS{=OQOE1RUwo}Us6>LVU_FU1sSfxmJ_1*fKHO4Y zk$-)WbFFwrVLTGdrRZtH{wJcLtTbR`Z*)I|ITljm9z21$x1Y`Ysgi#^0WC3e3e98U zfk8E929lqFfLLGvrEB+(nDSNciwOHDC{%_v=ue^=Y*U( zT)SY=zh*+rT^zM%gpni#|@U=B~wiyQ3WYv#oBl5hE%`Bj-| zUwdQxz23sZQ0sloAO7Wfi;w=!sianfHmJKsZyimo?lbiPW92vd)nm7kBgXmbe1Bxk zjMwp6Z$L~32gWDvr|B&Gm(Rf5QGZHr0D4!x&AI+Q=i~!!5l^dn@$GI4_Ml0BOzD$Y za}_g4?4V+Zm_h5@=hUAF<}GW^5p68}ISp`&hb6^a^?p+`r{595U7llReGJdFk72IP zczH*&#V{2u^0-@kjd_egz2Bl{L4U<=(Hd(sZ;6OT`F=l?p2k#?I)$)aW8flZqTeLG z;$_zunJI4ne2&nAtaQ)JuG3H2ICG+5+IR;St)Y`ssgR5O<31$qth;H3=FpvYFAwZ$ zb59)>{5~GHE7m{W0d-$h9)pgUFXujy^;Z!^iIK^it&4EQseh_mfw^959)BLA{V&cG za-3Q|qqjjgI>>}D|JpK^E3`NC!54b|NxP!%@v6MXq7kd^vD?>{Xv_+~B_~PY=MAk( z!b&dg=Pl|H!|G?lc*jnvx;a>|*wGvRM0^JQmesliPpo^A%iJ#RL(%_J@VSxVpq^T4 z3b^MeyJ{4vocvm2L)Yu-e}DPhc;y-B!A7&UTe*J>^0j6mXzYc|Zc3^j*!rmuIi; zk#{9}+vn`bIrw@4&(`SoL?0tG8`ULwmQUG|_g#Gvyjo&_HqlYjxTjvdj?wAiENlB$ zaCo$eld-$_YPPB)iGM|(V#V)Qe!RmuQ+_hTnSZK3TJhuLwe%O=I8~nTb&5R|J%rMxF`q(e761PJy)^x=V4FIf z*8}^dFV+qE9^kQ34|mVB;+H<>Yuewx`3<-a^kq=v2E3H-Ls_mFPxw+INxj|J_Ir_o z6Jw5d(m`U1vn^+t-%)MBI}Elv^#0o{U)FMprlsh>MS6>TM$cJK){xoIG9xvq4DR>($#{0g zR{S>0SO@!#J`8X-0X^M~3-n+3z{+#>xIUOkwX$9}&fsT)*YW*2bMLxKJ-KVISqHc| zhAJh=jG{QbtAFMz0PADh8B@OXvk2}etjg=!NVHknf(PaqByff=OuS*P5_`CFnc=T^ zi`F~1uA^|>k*048e^bWrMGxlB+6OpOIrBk?ol5KM##a*mdhL1mnQvWnrc_*}W(5`J zx!_O>zWF_m^{Tnl+l%yWC{rNKvDiGz*0NNjE&I#|YKV#`xr`qdJzn&bpY3N|x zzoI$fw14O8<4)}dd~MIC|AKI#k9?@;j)7aK`JGCq|C%Qi_TiRy{nmf_xgTh`sr*-F zC`mu1)Gz#T_V7e67xpe^zJlMb;$M2!tLVqg2{Fn1l6E>d=Y0<7WdBBeiJ7d%>anS1 z&r)V3e5cyO)IMl_?YEb`Lwy9TTy;jxcQ9lG>wi)Ccg;FQ2@72*usKKgGNaSS$_VSE zqts)xc<_37n+yMkT7dpC`lz^*cMp?rqU1c&rNMco4?$)qOMW185RAp4T#M=>v9nqWcn9elHHjVIoAL#V zEPp@x>2mPNxMwSwZQ3JQUuTT&8QNI>d344sZfgT%{f+nJrE*SeIP%yJzt4|!!Oca- zE$(X1J~QgQmYc{d#@Ozo*hjmt&ogZ?9e6zmdF*?sU#EKhIn|qt888wFBKd4~ETI z+qWwT=S+A388f;>gITmR^uR*5G`AM$Oo~o)e5q9i@go{z;6eVlzh@8k?WX>fBYy#{ z`eON8)ToeP;_S@(m=wXLxqIk2Q?wfU*3&?GVSHxoA|tNyT&Xxu#ZouzwQv1= z#dW^vkO+?Vd$zayKE=^d^+NVR^ncUKnbC;_R|AdiSDB6Abq7&KjV2=wx7sa}H4YFa zF%yMDANJm(30DO&-M_I9(cL|qEY;g4+CigT1+H5kyEw`u&Ro!PF>Sir89W4dT-eT$ z-*{MYCTHkRuvgT6tmgA3Wc(V8_J6q-m<8=MBan*8PrK+&HHdGRr?q-o1b>LJO=W&P zd>1E|{DHh)V+6~;AD>F^$_;N8oq%s_<7N|o>ablg0fpK9`!SH)ADPtHPoC$1ZnQS^ zUY_@{6=n^=IL*4%#NRVlTs!yDgqi8+3l2pyyGssO!;V_Jb=0W)v0(BMeX46{k`b7W zXpgU&{^{rr?`q(sp3_SA)PFh~fU0$m+$R~1o6MX;HzRDq%V#DeCrjZ#gXW9i3N&|3 zG)wgZsydYnVlUP$vekS(@fXASmbv2p!&?0tF%J0gf4N3*vi~`@8UM#?BUPOXb%O3v zd&@Mc6he%F=b-%HmD|BRyVY66){XUo&rz)5HC_~(dF7C<;UF-tqJN!qnPHQd)d_lA z3Ay!Nax&&gz!IH&oE~<*^#H}STaD;vCLCF#Rd3|0=LMRb=w9g4B^;UQedW}E=ky-6 z%q4ozn&@V~d=l3RFOuA(7j%ao7Z1%DY;5m|kb~UWdJ-%djhFUzy6)e@-0}tg#!i97 zedL{k>X#8aRcblho_~X(7T36!9uL7ZWS?#RYnAqWjiAJrPYt4 zZTZJcTfRRxefODuXQUbRKB{H&erBES+c?;s5N%C0hRE zv0m-tzCy0oqp8o*o*Rw6(`YdV%~daeck`r+teZsaSk{_qg zk-5t!*Xc7nnVAD_`@D>qb!ooKjZA%yuDa(i=uZGUvrBMuCw8`#MMmqrk8EaOp_)$J^VheN1OUr zomqJ@Pm_DyYu!yrQq>xKc9J`0Ev43CWg!X~#mLz9oPX8FTaUSV^fI5}2fhq0SWY#E zAC8~Y>Q1%$9c#xqY1!d6LHl@8ti`L|S=HOR-kSvoz>(+Sqs1>-o!MrJCN76r`jAdX z>N{P?0*R{c6gm#yvn18NDn9H@A791B#eA$=ZlbH+Xw|1AxlP#@^R3%gaC?5&IBwbp zoJD#i=zse&7NaJkLE317&_}ZKYxnsm#5N}QxFq=spPm=YVR}WEuvMM@mww|Noy1go z?%vJOtA{7etS>dY+nIk2d~DunG=Q+ZV?G4}c&86%@DMq9I(VT653FwKq{lDxQyeqO zzudFld&zShzx`3#&Ms3}|5(>O@sg^WU1LXytAC4HO?iH9HJ@AE{&vsK#3}!iAKr@= zc<%42pVOz9yqj2phP3DISH0#q>vqFMqnx-IACT-uE9Ti~`*C#^|Cj!zHNXx-iJ;y6 za=?)7C&NoRZ=cv(zxnjCcF~FVZ0l0{nw|vrX+5LA^&k2mc$kCcxyV~&4iCD~68jar z-G4j$GP-?g&cCu@iZAtp`y<@tk`M7C)-3KUarjMRR`KTy(T^2n{tjt%7iJxvVp_rgU=%(kJwb&`Sl`GN;Z{+kr}@9_7zl$oE`Sh~FFkpMiQQn)C7YUVj6p z*ZZM=ic>YyP5K)0`F`MfQ^SKFslnSto@Efh#b7wINx!gY*lNtBIL~Qt%HdMd)l5&p zl)Z5z(Mv+Ej5Y|vYp`$gHv>F)J`eH{!+*LbL7GM@ zW)ca_dLs5vTT_n|-tnd(ni?`AP-bgDhv^TA$M9#&4YYM$%RasGTH-`M-3`-&-sO(Q1He=-%Rx~6x?y8ZFbe1 zCVv;M6S-vR(~7Te{r>CBn}2$GLx-nFU$qdW;IeL$rIANLYR6MF4`?PDGPmNse@TDm zfat5j?P=WAx{I0F1-D|;)t2*Z?%ZqJhlLilW4})S7_k(y3yq0R|AGeh;GI}O zu*26jvvTOO9Q*6;nl|bdvvU(0JO|!`a66;#JWzTcnAytuOU)(IpMM}b3@|~|zThGp zt;Vx(Q`{Yc(r1Bwhi+o_wn_lp0UqXX`_=XFj|x z^t&mj4>tFW53%JdED}QIQU*`jXA*#C2AAQFP zb1e2n^;q8Km?=(Elvk!Zf3baKE~_5&XAG{JzxvJIPR;%OJB_p3*ZxW0>~FqizuWv5 zMB<*AjI6_<%RVA79CmhS8xrGg1RKGYv)=BawX@GAQDw8P=6~d`ci?C8}oRBf@tdbgI8+~R;6YS8>`xpJ=wuulsyqXH~svw{@9=M z5xyN7qU$-j4uu|lRIN_rd^==u1uwYl)1W1~=x>X6$W&7=#9dM!uWQVA)~e5+=*`M} zBe*F_i<0=7`V#%5fx<=^5y|g8?ytXB`l_VwReGojo`1h+t4F}fujG;hL)EFrppJG} zouPLi+UoiHb6vEwMGMkzNrMbU-5q>g=|_sCspM1e1}2D?eE7fbyZfO@Jmk)n^F)93 zNL;zlKl_{&H@oy%6JncUM!dx6%$ev8(Lr>k{ZWd4)@^6#FkjB7F?caS%gzsi&GKyLGG)oh5LT&I7~Cx26O7QeN~=;15p*^jkrZm9YhWxe4} z$(jnr&+Ly|{C(|Pz~+@RpRQbC`MI)RSoIN#zQFy@oU-7y7JJGa`ske#cbYvg=SBvc zPQBSF&bnc+T`VI8bv}D4>ChnBsru7f~wVljTaEFWU_XU1S z@LaDhiM#yq?4ybi$44=q)wGS{3hq2|wtn!E#$GBk1lkjSyl>O^svo3Rc7N?pD$O^i zQb{+@cl3JE$44D)?AqaxS^eN3dxq?s;_ezP2<3r_xfMV6SswB)&x0DG>?;+nAN>{9 zT;_6f_1jg|r(1nu&cX2cLvE?zW=Kr`0-h;-=^vO!0*U-=FuP=C6sa zohh71cUR6H87X});y_z^9DjK~F8#~GO*Rs^D7gdXY5CR6*S~1Xryq*BwTcHi79J=X z$@t^9S@Bw_XUHScE`i<_@t5>Yi9MB`NwLin<{P%Y`>jfU|MTxP-5-D3R=t$l1P&$K znXT%_suNGJk_X{rKUK~u2DqME+VhIPJ7faspU+9X`9F9cPhi}C_kVG2@8mrZzx~sD zq6hkWeX2***Dq(gO8*~Y3WcY^$zX{7_5H~_!kboqp>uM5UrNX1R}a~DoGEdu#Ofp4 zF5Y)LZNRmWdFu#0ZaCc3rLz5!*TL&$t}|(nPOvQfr#t7aHrK}WPqmj;?Y&I_tB9v< z2ud!FO5XeW<<-x~;eVMcn5@IBjI&$!wPbK#Lsv~Rt1vLgS(LsS{M@RPsMS0Znd4c! zSJ5bu{;mJxStFiR&2BGeHYm-VG2AjZe|(QTQuWg%#DZSX=|7HNS@VAN;O^=77Jk%g zi#V1Vq0BR&t|+|W)$1gU=!55d`{dPmZ#>%CA@#Srcx*lUuYWO|8f?i+t!LQG`cpqc zZqZ^EQ=s=cXS7><=@A2(H*;q1vDY7qqhlTy#5wQ^7x02DW~Mo_VN2?*{4StRwa0QqrGigQZ`)9w~H^(MsZj(BjtQA;9@v+RwB1iktV<_`;$eCV?_l&)= zWrn_Jb#(RVIDgzu(fzAFege{PHX6+EkKyeK56fb}8!@w5qt2_P-B*Z%^(ht?ZYsqG znX`?41z65T=1WTqf8wD&IPd=R^J?s#rgCQV=8D)!>YUU(qRZ`ktLH`vMd_(t&w=RA zsB<8=G`@LW7tML~@dXWo?;eordHA(n=mY)22+J9R<$w9$%m^-X*!xe$Hi9gv?827X z6Z6gS%!1419WA7AgWd{$qs|&(Go7ir&v*5^Iq`SVGr}A|{bL16*{d=x%}r}eHpBig z+buF&b)V{@+jn1ojyKo=?mTz`dIRc=7|bFzS|{3lzwUZlC~Zsrdd^9=^Usq`tiO`)AcYHLd%4=FOv_mb$|aqZ|~Z(s@81_ z{+IezF%=O(^i(P-?)UqhFBJs^1Vu#!tki$6hR>X9&9&cm?^7oyskQQCsvyF+^wCG} zt+#fQRtHh`3IGQEbbY+$z@MVnIkc-nPl#?i(HzDt@qZPpc6X#bdgpp2NyQthaW8{Jhc-yIxSeHX z10MUvgFSZDtE#}8P4XzWxIy}G-y*ZaYdP2(OxmtOdb6h553SkOL zMEs9=pX!mz5r6QAmN`j+j08Bj4N-yE001B`Y(%_xdLQZgx$0g|5H#IF!4#Ln!M;5E zkPB0N-|^O@fVS)&c(>!@guhB{L0XNNpG6ffG2#>PJT$$j9>bVO#JF&*+m@QLvjF7! z@)~5WcR>n3s~=BqD=m)K>xYiplS0>}y;_l`s}fz;-{L%t@}?xsC4KwAT1Y3z`PV=1u1 zZ2Lo(yMOB_qj&Doar*#yJOVHvZQ*3Z_&7G>_>pLlD#YC6PY3F3`0e^)d+DJe%%Gcw z7vjx*h;g5%V`PUEYKVmggL!#37Zk;OuqrH1HfL`sXJ@fPWxSBJUo*SaOCWPxy_}4F zR0J~Tqikrm^RK8>SWEBA#lrem8y|^Dp2x?he}A8Lkl-1Zrr;U!DJ+ZSc)i%xO~3BN zUh&tGVJ0p&1Z{Kd8zR1ti(7mGxX;}&$d5pIM%pea%`TZC^xSBOcZi`uX^;T%3zRi` z`gnIP3sfJN*(0Q$?eTr|LjlONZW}(hyyyger;qp+|qM}D!iefS7mgj zcvGO<#l>B6oS{_e6;-5{qr;6w8^ostxqqTR`7`#cV264ZZr`#IHsbxurj9@aZI1G^ z*=`q$>U=ypx%hd+<_jCCp`iLKl)`NVbo%Jze&B}|%8J_W0N-MQG=Sim!N zVtE%I8)w-t@ktt+A>1FgkALR63GK+>5`VJ!O7c?I$SqFtC# zm5leH&!b0)(u>R9w1YNN=mpt7efK#v+4AtY3PtkniXE3w{iX%zf1egFry`DyQ*$VtqAFzcDP2~#KxJaF31i*(G&kW`x>uLOs zR3^mDkNn5q5PxIOn8rDU7WU5bj@EK|nvYijSlJW&U-5VW6!|muKoUR3!+J?2QO?EV zvbkbka|&;dXLIqaRf{SfE`G|^9Za&QPa8ZP)?w8wCgb3{k1**Q)t=0y@~ zj;S`q^Ufe_7SqER?fW#UpO$5OKp%2Cf3n_xB;)gDt+>5(q09scqi05MPUKPsPwMD3Xu1ror4#K6j*Cz8XC=)S7#aD_2anxgf zeQ&LmzkCRwFdMqvQStYRKdP7sa_SSIeD4;dlxpn8cvb%1*SN~Rp06t;*Nt0B#;+RU z#P4*FCc4)-`MeQAE!O#VBZ_m@_+ldg!F5f4KW5&Cp`Gak{kh;P1f3e z1q9ilgiwgpElbpXStN5M*Z199+zE0&#iyucQmmS-03>Xk!P2y}1I*J~%xQ|=f< z7uuj{a8FKL(xv;*D;8(tP-L6*u^OBY%C$Z>a#>g(pJMLI`%s*?OnFkn_Lvus=(@jx z`hR9eXUhF*=6j=wgT~XX_qFf_?r=ZmGNYgtkVF_Bl-Uqr|d-Sr=uNF2OVJM$%5%q8Rlqwax?BOzv6&(`c z@_LFVxQr|5O38wiXCve9bWdXMv1TdyX@6yin-@K_XEWnse5F~Z_1tYW{dN5z?ee)n4$*J-&GN*pf(yJ{1n(8smTd4P&UXLrJsgabK zc3ajqx2fL0_Ht7L=Cjuph7^yjc`LMA{Z5~?b5xZ0ei7FP>a*2fRB>T?y7!)zt{&IU z#xhRVnrTpw_t|PZ^lJ8wS2!ay@qegKz1&(%vIe{9TrR<1?-jrT)xLLb3Ov9SIax=q z37+d(#T2X=uykkcwNXc!v70q5ABtqC9zrF)Bhr!D7@cTu;ITg~H3O|1 z*m)Hj4}H@}J4p7)9<6`OUc7VFZ>{TZzP+R?Q#XJ zeiy&@lgjv2;{KyxhuZRQ@&jw}kD?ul=|FeIqrSOgFCcYdtu1mZd@`^tJ5e)XG z0z^wf5VkLA$ZE4fD4_KEhC+JKu|+iqtA+b%*|Ai|I)iz~_l5g_(tqI{h3la@muq+= zUoLF_UhYbx!h5j+DHHnvtsaYTC+~t*2XtWb{ruJ50=eLpf@sEjNW9(N)T_Uu&Nbq} z9H(t2uIpIjQ_kp*INLp--u8g-r7PX=)nm;P6lBs}BE4|6P^RF|<;uOh8&vbKqb4Y& zvzJS+mjeh5pzII{+Aq@)X#wxvW-D2jMibyY%Pxz&v%)K;)Qm zO*fX8E&;Pwg+P*|1|7SF+M7?6oc!3Qcdsw5S=18n#>#@62ODE6at_DL#nY8AwZgy5BgtN&XD_^=ZGJ*~tSU)6=TEhWeZwukscEmu|v0*sFu(48er}#tnL7H(DFxP>8#EF{+ zWG5`itpQQbEkkVg>aOjM?fG*WuEUOWE{9-CUl`Ux5pSvt^m;;{jO@f<7?(Ac^Kn|xup-J+guP< zoo|RDHW%#<`~pW(gSeOXNLu_ zzXCvEj(_$gqAvVtD5YmXxUc=%Tmr-KVAF_Dw^&_8|LnP!;jo3qjvM8Sf9CYA6ey6p z8SW?5ic!6ftMHnF+980N)w$bWd1-aL@p8X1O~C(;oq9qozs?u8q6cl4Ac|vdPr4b` zuk+deK*&C+7EsOL#Hf%2{~k8{$)hr9eK-5t!++;x?@^ms(&W{K>>x2bj;ODG@#(22 z6hc)um2GdyjZz$~`o z&J#scR;y>y;;P>tn@SAq+s>gc`3))w21=jU(9u`LsyQ1=c>`tFO9{f`S9lG$WmO!_ z)PJ_u)R~9-YRpp4FNxq!5vTf%e+t)r^4i2m{i1%UHq_>Qj`cPjD-_J2Osl)9$E05b zO_w;_1Yea_)BY}Pows$N_Ngc*)vKmHAuk_9KaNh-d%33p1=Vp7xh3IAyLGNi+8c?1 zTc&U8@qOO}EYW&ea}0|%^U`%2Y0CD==zp*FdW^QPv*S}s-dmtMn_C?cP>fa9kQrXN z?U~8X6tn$wS5!|b-gViGEysJ)$Jio0V9=M4yI%bSZl7!4H-l2)8o2fg5s@8m5&U@N zwXNao#D!GZ;%5_|rfh4-xr%e-62Qll+urv2b_-C59pR(b8ywTw@+87Ut zUORP)I_PNXB_@2A?=2ag}OBBy$mExc-c7dl5I4gU3c#VJ*eSu8uJVPI125UuqA!3<#4YCg<#kyCjKBe zQJaOc|z_#4IdQ*MGi3c;pr zcJ~PxA_q%Z-aSs{HRqKZgnuAETo;jBWfktyB4511)qxmx_fCBz0{5;TRn=>fy*cbL zwbYd!zg8A2e_CA}X(3o-AN`{4hS=z`0`gjX_;wq`!9Ew6^|tO_V=%?UezORXcS1&h zw@Cjco}xOrU;*6}h{USg;jK)_Wk1R4(DyInDm<;h$M#3N6$^?f<$o3ZqS)7l-f!Q; zVNp@ks3S?@FYasp=o#;e7(37>v|=&cJ)hJ=xtzDB?QYe0z>dRRURY^Ru&sIVq_{+| z!Mbd$*9Sx_*qaKGe$_7lxn2)frgZdfU%KwSsAFC#HU6fuI;1rbEKbtfD5I>4oieat zdZEC}1ei&pv;l|CNq@STQ#XDJRlD3xA!Gksh}ZbU;c?HdKp{UB!UMimZ#wW(Z!z9VaXn2A~*2(ALDcKI2rg7bwd)f(Je05HsT3%b%da0)+ zxj9D_S$GN@l9khavSuZof%v#uLjQCP50cf~yUW|1fck`$4S$rVAds7qV@`XkJ{T|3 zX%@knShCV)d9M%z5QCk~W=9m09wK{sOND%W>^RhMRP?yrWbpCbamv_2AmNXs8h=~lqKwoGbWt_sl9$Yvc#@IvjpxRIZ) z(~{Z|?^X*}sdBV7r&rj4GeoQTg==TXO8T?YJo|qjmVchdDGsK>j*1Fh^-XRKTz|X3%c!7d zF~uIBR1#SpgtW<{%|-2zQPp0~fFc)e!$o}y%KmQRHl|z!YsE`;2S`E^QgVE}mhX%y zt@}oN8h@rVrJr`=_2J$V;~mm5krDDvWwa=X_4B=jw2*7YqI}@&Yuyg|`%X8v7ZE;? z?q!riC=UO?oo~ZdJKR*|%slT3{ z-hPe})F>RV&w}*9TXIrWD$Wsc*mO^`p*7uWN%vGvafs9|**h}GqCdzhuz zXVJmpOXj@oloOa~Av)jZ%OY>RD72=NB<--ww|-ae`})VJaRg`=#_c0miY=Ug{6g(A*nd`ts|>F@ClZ0`H)S#2-WeX%P~q)5P{OD3LtZtD2? z(RAu8pYOMBy^~M?M&ItcAYF{N>u1~nXz#iw)~Kj?yc|qyNK$}s!RP!wKZN!)h29cRZ57=kESqD#^J9PO{kG_DEa@i9vRY$Es=sFt9XO27#TB#owq*$*>2&N6@n4@^x$7ZTUyJqp{c+xd+JgKG8Tf-$ z9KL^3jIFK*rM)~}PPd#tMZGvE&0-LQR+;R1?hJC|9y2nK1?R$y1pFfZ@b&=WGc=&r z0ccFcRwICn02IAn)!^+x20|qQ#$y(8=kDL{L#^hE{F{+}Gn#RR2eLdc-th`f8VN#q z-2Yc#*hh$J_d($}*wpao{PMep-h&V|OSpe8vTv3e_#zc`y!<6tKe1{w%SM9^n>B$Y z6a5^2=G0!?Sr#Z-9{Y(se2J&8%k5TJyV&%D8yYbO^%>FvIIwxM?i006S0FZBTObyc zM)2``K>m5oN$Zn;tiZ+VbPW>Hk6GohimDK z;Wbz0GRUAA_8eZckBh%h;X0~w#s;dRjl;R_73|L_Z;$JgUmWcjQy2lq0>DNYQc)yO z#UxOBsda5crz{S%jKUc95EFkHK>jI<<@SPF0*L)JI>ZAT$&axi!&U??I$No#B(A#q zy-x)$^c9W*Q#B0D-2PkuL2@mbsrey%Afx(7o7-ReY5#8sd3OI%Kx~9|XwAR-^gE}1Y!v!+O0U1$Xr7Bmug~@@`Hqjj=FHsp`7HSTyYJ_D z_%nun&9!g)K!*;W&)4DAu`ev}oAQ(TR1#B^krRNpykwsTY}3%$P%Ci8@WujI=<=n^ zk0s=Hx~uIqh>w3*wqB(o5G#8DXikHN0gh5y55l9t^q$BOMBqY(bKGSj>g*%dL_4XC zO+cg$WWNwv(nP+ocpgOP=nu}@&P%x>c_rA{=KF^{9K<$xH{>}%y%?^m83C3Kj32Ce z?sB0DU5W7o#6I^hCx`+}An>oX$^5Y`Kkagb-M;*^n|gobIM}ei*MRePe0|r5-}cFj z|F&)Oy!-aSgN4n*^(xrFZ~w5j#^(-=ZceCt`nVD(`fbZsbqMr0>JWlmANM>!bO>@@ z3Mxo7*H4jbO?Y@0Sj#rSY^*ucl1f#DtLXFY4Mjx|Y8*}2{t%)DjSD+uid^xw`5m4` z>JPp+qDjdTcmaDR6A;!f)i%4KoQK}&fG|b8bxhROc z2Um8GKS20*wmV?P1pfQPKDKN$+kFC60bjfCzF63szih~__3*n^&g1h}+y0B)|5;;` z+1{XSd-)qF@m%RvF(8K@j`&g*Y#lQ1+ z3CxM_nt{1%Tl=wRtNdoVkc=Kn<$yNyyv~2}xjyBmrzQ;B8((f^&XWYWN(H;+Xw}_5 zST~>7300^-YYkZ^c_5%Dz`itqCq#%`A^}AUpT6O}evdY^F&BC~`#}^N4r{CixF+nV z*r-)=^`#*P`CY7Y;Y}=YVf*HW6UL~kD$=Beptjy{Y?+{+OklsRvWprWS8S7pE%|>@ zG*yq4^+@u3R)rtb1Sn$SaomKmIF#LbULS{>Jk4&lf(#7)d3F4wgeC~=uE`R!-3|Wm zL1sGva%vFf!;ksP4ov26`>7zE`QuOiK8C+PZ8g64V@sy*9GbsJ$*igfQ>4Nnbi#wY#y-$J6KUZ zdon$=ODqZEQFTqwp_K}l(eazJpOf`{1RWN)L8eI+cpk&`>r1L4`YD12o)SD3uHz26 zRuW`0OTSFaG+`g+%+hrm}HkS zq%uG2e0~?f6nbn6$L1xdsOP%KZ$Hv{D?#cQa^|VydOmXTQN@?JBY(CYU*)yvhQRJs zGJIqx9(I6~5Wb4-7sigFtX6-h!`#BQ4|ky=Xo>4!hqiN-7SHlm_{$b#oVO58gB&wk zubin#;1~SsJpZSkJkR03+X)1RzkKW;TRFGM&pzYtb^kA)2X+%{<9ANi>GjvR!dmIV z55%!we%_DYqr#tsU+kF?Um}ZJrMWvoYnVtePl1o&z;avn;Nj~9k>-DLxkM2scH%k# zR#>b@idZ|bFz{j-w%BV>&>Zw(p92AbPQN!j4 zt_cJ%xEM><^lI~j*GGSof}D%nL0CQw)e2DUIlp>ZzS-9nQce&+!hA*sd7jV7Kj-tW ze}DYu-{bPvzGU8y0QL3v7*(+Eo7*%&AEMp=>5qMVi}N;@H6+R`0)V|$@C z2qBaNT?89*^qVM4+WvIoI#HS^O?3E+*QWJp3t=bhXEc9K3|xmP{BwF&z(NX9DGTEz zm+%a1N!4cXWbUpju0NCpm~sgwcAg!}g&&i)==Z&T2^iv>nsePyKWM z@ZB#MlX`!{9P-ova~UxTd)z`%DdpiOqCg+%edw650T`_&DCP4_2fzmQN-JqzO9ftI zEsVF>E@CduQopuUcgx6iu>%(o@a%217NL})nmpCl0#|iC)1`tJp4l>mqJjUKqWo!8 z@LS=#BB8`UjOHYwywEFQFZ{;fys`i65DS~z6C8i*CJXri0#XUcMv1!qXaUwk8x-;f zzSd9F3&cZPAusjiEX)W$3k14f|Nn14>ZhHcZuQ6bJ%_*I5`U~of7|45TYcw8_1pjd zw(XxjE{WP*pnXe02)d>I=`}dd8#tx=jCuw^yw#TmROZX5lySt zu^NA^QMOoV>CqEq5h1g^FY<2}K4_VvbckYt3-+ibwggD{NjkmaqHN&HE>UU%HnztkL0XXszfRvoY4tNguI}jWcbCx$k#)ND+ zdsZLtt>$NfFa3?v{M-FD=KC+(@P{27e))g@rHH+ziMIav?jPg%yAQorxqC#FC+lav zSf=RwouG_4FSH>MCjjem^tuSb1+!RzSpLsyx8SzzSnr z*zO1j!T@nMKlH^>4FwnZ_u0QWbDr#@z}gc_jX*GFH92y*&CE4nN_EN~d>BQyG&z5i zC~}aJh7Q5W8d6Uf^80KrkIwGJQeS(CQ0D$)SNXr)kN**a!FWoic^>~`jsGKl_}wr4 z5kvgvJpDVy^;f$E5cZz;?(?{t$83e)C{+sU$1$9PhF<7;ZIhW)Mb|*U!qz!(`qD_w zTO(Q>PCzYXjk?Dw%A?-yEU;TniaUQmSjt)*Zu&`T-Ui=P9NUnJTOOC|*5O@=NX`Ul zg@ewv*v5X6qgb&qZbE@=u+p$!*=wjD%&`nN4NGa~`E6_Y!8{+)-b1;^-a&<*Aq3}q zk3BfY^Lfs@9Vfb}Do(kIencihx^u(obc7$Z7Y0{oJj6-A^EjO6@oJ6>{p^32PUsK# zQTf-85-W2x?^o2n{l|aA2QfZL2;cm*e*Nuh<-h%+-~I7>E&mzgf3Hv9fB!M&fBFK) zAPM$e(|&&U1!UQg!`{PVrHC`Ne~l%Xtpx(4prs(OgvnPDcc63;Cme7N9L!nQpU+d0XEKT5zz4yc7~=@WY7!!O9Rc1Trp! zUtb2i=UQBqU&YeYO-Y1^amn$O>MNdDNN&GDq33Ph?>h(<`$NwwQ;PUY>VjW~Vh5s? zVP9n~h3^mBGo+!0~8Hpg4W zdHnxs^B+6=$M}bD^>-Vu{<;7C!>0TZyTW-t`-=uzDX+t0xT%qjbOJJG20zs>|$eGc1kq5 z8KrS*kayZZwGo$dYK4DX3uGWUHn)I<1CTKV!jMWOj>_~YOcW0V1Q-j#nBD?zc=Enk za!6i^V+#12D8h$EJbqpS5ya-PkEu|uRIzuHPmv#EJFKgSdk`qWTxm#X&h30zGQH3P zi!`T^Fdv=}xBO#Y@!flos*wNMXZ;l$kX86Y-?>slpntZ}zvq7pWF~&?|1qZ0^qn_7~$ zl5m87g{i=w^P|KnHs-w+1lzMvLbTmY1NgDNJelJh8)b9H;3ZC;S$;Lw40sL{oIshu zcZJdy)>Y5!VA6kPK_POvfV(@)@ja+ZKOht~%DS_`+con_W4(Q7>`w1OH*sK80;CO_FcDIoxn*)58;4y4C~uQr~>CMjI#HxS5Jc0xO^VXU`N+ebb#1Kzb z)d4acv^m=>WWee*C~o-dc2#EX&KX$)`2%QI5D~^;#Ri};Jx6so0B_-%rm`AdPEdgI zbsK?pA3Cgae&M?8f#{s~^U(^)um4!b zFplQ=%42^%PI3T^b}sTvA_c${-p?%xbe9s?k6UAq!C&iDJ9Gj0+g*bS+e_F@jJcF; zfO&*t&z3+s_2ME4R6Ccuj|Q8I@gk@ru@%dw>wujFA!KNy_oR+8U1Q!aRCA6M+TEwc z$QkJz&jEP}BC*KX%<-H*_Br!?M^5=Gc7Zka$0vXOiNXEYp*be}SAJ^Vf6wFMnW zKYdC0w>_KJY1o_}zu;%gti+$#`$Z0k&h*=(=t6vcfNCkCI(k{?2qYr52pd#-g^FXs zIwi4x7FdA5B}jT=op@utH_UL+dBEc$&M6A#{t7?Kz82vd^G zu29&KVI6>vR=@Qzg{_D7fjQ)&=3?#GsGLHWK6IyN`vj=ADAeEhCcw_{yK#Ttxcl$D z2G-->c6(lrf9B&)yZ_#6{@tcy2wxk2f3JVh|G`)K_4oCQzH!mt>&CDB=by2AFcbW& zA>Xwd=K$@OvHFxUh>atF=N4#x+m9NLJ0o5u60PF*k?3pUg`+U1(p)VY>|LSIT$LZJ znOH-xz7D+#k{tMtBCu~E{Ksu1@nX-7rY28e1gTYMMCEt3Mz@eB-z@Kn?^Y`6BjS&}vzU zfe!dMB*~8WZupJuk8Xv$=vn6SqwL)vsGdu3pE=(P8`15-P&wyygJr^+#XRqkd_z1B znUcTe{nvi}y}k-)|G#|q-#L-LV%2|t#;|_+=GWfx_ceaS2IpMyZ@Ykff2PSCXuK*c zfV7iv20Xe2Z5psW*bD#Ff-69_7 z@)*1Dk8Z;O-$OhW`)n34Z8dd-5_!^JXcv%E!6W@$neqKsX8AAK7UCFRCaRx0jk%KK zr=kQnn**DO5QNqowl5Knan77biElUZxI+hhqJzVI2nC704J7=ul)b{A+3Jnkqr78) z^3MktMC_ZtkQ}T%4oZI@%;+S1J}BGXLZIN^{Um*dkNvb0r8iLm}I4A*_x%CCupijE~v%b7cq*78?P9&lTcjs3b8% zmT#mK^%d;qx`beY&I(&WpY!gUo;|Y8ajr~xfhCMZIfYh&CSZS=@)Z1fPQyCtnj9_|9YhPd5JUZ2)E~ARIr>F56|=O8PaCbu+mJ}2lfZ^@vZ~*5dWzIkM&~4 zJ)x|H9cO{}zuC-wzY^Xe(q_yb*A3-Ng5dx6@LYSd{6Gu6L}iXX2iJHfne%P)f3R;q zjr+x2{ap47UBZ9GepLX$9^&%fBlz)O{eUy$k}sFbFC+`X{36?dw{*F1mctT~VSlPc zg8Izh(b||XpyVi35rA5j_Iw61|EpWt3ocN_RhG(WS+`NzvuX?OgYR!4x_3^izETmH zjq&F~O1Ynz=R~V?Yt#n$(}0ZJ#W)~51L_eKF*pz^^{RjDe^4a^2)QQo9CRnJy13xH zmPCv1k-2ag)c&kI3dka;Q{XjWRT-S4P^l+uHF-$e@;xuL3J>{92}?1$Yv9}o z*Ql^U5%WC&C?;`f7G0)!zd3&o_aYjoRni$Ya#!V9^$|7VGGWf9{G1B{CD|=|lLIK% zy~Ga{`MQ6V;Kddc>A|}(|K=2BVf#$y?*bg3ss=2rR93x+GK7gAY+MIzLd9s$-W4sE zh|V5}T+og#zK?P8bG;ck_5Ggk^SxpI-rUaJt->)KPFy*Ejtu5}JrrH`WiMp&H3sX# z9#W~zVh@w5jt4=s&OPY)V)Q5a4&TLf*-?;**PwrZbr|1=l!%Ok5QpPn)vl1N}U5q{ib})cIVliBceLI3cv8^!qQ`Jm&D4mGoLHk+2n1wJYlf?gz*m zJPm(-GxK6NI5G*;QBSufz7+?P~7hr$TFt|jEA{>OPRs5Ma|r=ZY)O`+O$`1SfONX6aNIF~=3|?9$Ye>xdl%1#-$$b&?&-se!1;f8 zy*-0m7YH*;|BgdkQ^Ymr@gw3sftIN=^lLit2*`8iy0$RyBq8N_9e+5-;?)V~)}s+g zV78HgQ1<;nh{N*{&nix%5k{4Pe*Zb<^|(JDQSqPS5ofFg+-+M!Fu`% z&nx2D>xl2M7HiPj5Mje`jFZ0O!EAp+{2p9bA%%^>alNj2T_t7Qe^;Shxzzvr{XBl> zKAr6#uEA^Z&*vEbj!OV|CRz%v!~g5pnSYC(=N%R$6|d($M#e#EzvD19=AZ~9uFb;J$wiANIw7=XIIe)CNHvhw%bC4&@IvQ4cEY1Aena7YxGm z0&B7+VLZ*dI9?TDC-HBOg?}FVKop=Kr0VG*FoW8A^gdr-_z@e9M-O!+dD7-HJRe)**$!-jVo%lY5qR~(mrj?aJXi_8v=>u6sd znRXlpv5~0#9H)9!Ky7c#;`J8mB*@F=O!2%n#T>@;y!N6E4fX|l`RoyZz~o!GVOPa2~G;5)Rcd)cDD1ma3~Gdferuvc)pA0j(?t8_>T2o=Sx@{{{3^zHt)ZB z&Y$CKJOdl%|2%;@+lELW#(Y;Xj;wkp9iCJE>*vQD7U%ZD`CxjIJ_GKr#JyrY#8~~W z)(PB?F!vR9k`(%e{@PG3MKK3edYIqy+=Wfju-0Q8&1ZX^&c1)typH17WFy{$z3JSq zn|VEv9vI#88Va#0tiwOwIfy1JVhtwDN0xRk%#{)%J@fA||N0$lKAZduKCIcdkn#B_ zu<)bUr=a6m5cr+_7&e4FAjh+k%DwP!D#>lj|E!z0d2OD*uc)MW_Ac-P8jQ=!zZ+v{ z55NEI$B0-L8`ytQ$t9&`ZeI>{7yn%!%=QHLfw7RywjDkNE6w(L_ACC~eb4?LZ2jE6Wya&#Cj2|Mi`Snq z2mWzCvn_J?xek9nNbr+TOpfb?7nP@f^(SyX7&i`l;J<&)floKvZXiU^{>SgVVSZsu z!ER!#f*0V7HAT?iFKgjXU*R5MD=Cas_?mdmKab&hC|jP_tl#HC&yNjTe?BLcX>0>j zR-O@Gzs6SlpU-mxeGJ>9!!~^FtTX-dzFpYiUwvPqEs<448~>X&{Cf;?*jK>DH#o13 zWT-C*0Q!G`zoJc{7?~LlNFRDG>>rrA{^dON5K8}Y-n@tSPp>zx6W+WYhqz|~>AmXd z!e1;Msd@DQ*817Lie}#n-Q!=*<({)|`5(@8ac=*pA(iF)%$K=s{=3gJk2S1o!))Wd zUw!{G|I%OmUZ9@__@ckY3H(Lm!_Tu#K_5_m|9*dpwW6E-kw32SKR1VAcmDIaGS80* zuIy#@!#yFDp-IU`LK=U4NX%jJ@O>}M{^P%&KX?|jY4%-TFkUfdMfj(fFYuvs+%qt2 z+279xKiy?F`m`Bg#KO6~{Mm;?n`8elk45YyF!nwG1kA_hd!IIg`TcG)&zDFQF^6XV zX7+!xy%0pM8`$*@$GiW|@!5~TeQ-AH`uu)CUpKI|ob|PvO^CS;JKe#L4u1VU?k}h( z`2Dz^NX+9AelhkKVmiNuPmI^$_qB)D@DabS{qO6a&x4=y^Z9l7bN=^r^mG3AHTV$b z^S`g*m(6hi4t+}^3o?DadyEOskF8`sU~PXD&G|g|XMlskfBl{}&F7(GzUSe&<~Tw4 zJr8~w&SRzTc?ksC5v%%j-nK^@vEJ;%$?z%AUO%tVP*`4nUc>i64*cge<`K^K=f03J zPjLRvYxqAn|K~NvFV6pYjlQ1a7UFjvci-o7IdQ&^y+7Xve*1eK`VAQGU*|o3o?n0Z zo_F|pe(vWy;^+Cp?|HAE=l^xy?3W3{;}5$r%>J9hzXo^yu7YQB*{#lS7?b1xq564# zeZSL->U{oZ_J{vIAJ6BCf1O|a%;^#JJHO8-zt{Kszio%|9osv62QhkbIDW@9f%C=r z{Mi@4`M7>PpNnU9#lO#;??3qK-0gq&Uj8~4&+W2{Kj+M8_TvQ#=g+<*xBOna#`$wU zsp!l3d)d+2=isk%v0t87qHpy_WD?Nt2lpZdCYw^ zJ78TzV(<^&@rjZziZv_5JTPWb8otC|YuY{;;$P()Vg+T-G{0~0eh#gD-_vug9xM8z26GF$Iw!T$my3uS_Mp8FkStu(7h++x zRR22H%dkhhvT5a^@?FI=;W&RJ$W!cPzTd+J4bPOt(FHhZOxhYjzS9D+CEvo{i}g^X zo3Sb2KL4du1A>`6kjx>Q0k;603385QD3Ao*x1w#GkauX{zY&LKmdS?z^cC2i|(ZHP`evFnJSfQEgEvLEFaBHyTuoEoT zvWm&>UGB=t6uyT#BYY3;PC*Xa`N~i<-9v^=06aA1PJBA?s{Qf-Ga~@FNJ%8UT zabLL4V#fI*?+9j7JXbj1*95_z`F?O;-TXXQ@c}BGpI<83WZ*CcaX%~vGMjw&?$EeDg^2T0P$vF}Gv({rzx$4m{8IIbVOZuaeH~2qKGe{vKG7n*Ygh@aKaUbkrZcjX?K7SAGPR!p!`}n`w z=i7h9+>iJ^KL3t0v-E`Z-1j-F+cwr&p-WvcD&5( zgy+Ke!2Qm@gYW0V{O@;ueESLa^ZDAd`Fj{gc;9^Qe{O#-d>`W)^BPP3fO(B23D4oo z{ekvC-Q|DyT^#?9*ZqzI2>^h(|9F;KKa+p3B-L`U%G|-Y{=a7hAILxo#Bwbe`J>-yi1l@cWoQ z-}~(U9FKp-fagXUAFjrC{NXy7A00l&b#Ok$$#;CBpKyKHf$x1|{G&SPFZ+Vu3kHk> zw9}vIswGm4znI|u6M>*_J_$>~ciq4e6e8b-ydD4)82@(`I}x<=bG~2P5AF+D+IhZX zK3FbGvO+%B0n_vSVt2iTPXyd_u{vCu!4&74flq&T0veCq{vUs{5Gzj?epXR{p;1GZm0jIgB&aQzxaJymkimEI9_^n~d;D+L`7-U#C&RHW ztLJ~C2;zSSdG&w$R}CfdPZ1^D`Y#bB)WiHKqQu>=|45q`dIm6YE z4Q>;c#eM_nPZYB}sCXP(rnk(+`fxF=)hX#FXs)hy(3C6~LX$v6Lbu%DFHlCM)Jw_i zkNx&p4nwtQSGQ`;!!hpXv;!SjRJd?k;lRNj}~TV#iiX{$U`Ho~%Qr z;t%&bxob(HkuGbIHlhj9alPixp=j*Zm%Ot&-F;*k2v(lEmw$CKONQFpM$=kQ*RVSb z705m#gZ?h!ygv`8L9uUUk{_Aqu+O=503AUtesDVC9X*q+*PvAas3l9Z@Uo*9#(96g zSP|=x^{Re%YajX9Zfe&jlsL7zD~a6L!B7M74W(=7+PALWULQX9TD-|i>MTA;C-*&n!-bkG%MRO6T8g9M8tCSL}4COPMYvxN?eSMRz=yz-|@Xea72ANkE%I2G1$ zlaKm)v-Z3Vtretwd$X!vijF!f+GKz2e(5W^$Fw7{W8SvsZ+6|`GlaJ%lO`#9&`_-e zri`&!xu9%p`hyg58ya+;oD16J!? zSu*;)VP4l@FdBS+xn{25PE?9}d(>%`CzrHyJagwepgfgfZf~o-c+#{J!&86c=t}3J zNH?Ha1<`T-u&l+cwD{z{%Z|mGnL=@xZuVxQh4OY-3j5`us`Ph3e|7_WLh1OD?z)8H z#p}UUy3uivh$o^n~-k@LHc`tG1)c13eiZG^^?Y|knavAR_T%l0hoU$ z^%bLT4`s8Q)^*n14Nq%9gmHg(Q31~`)GV&Kt4*aRaajU@x-J5ifLkk>iL6B=?U zt$N3g*=*W4$5!0bgV0xHuI>@O816OE`-fHG`ky<|b_UUri? zz(oiVSus&P&)UW1L)0CuzxKhTYU2ndF{(Y=`$$aoDmW>8UM-7JtovJtI%&>AwRp!T z{>y&yI}WtcR+EB2$1aaSE}G$+xZfj9GLCO*!zRL~SSttLm!U=!-d$*`KF#)(BFXXm zoZ46^60!Hn+UAL4P5FOU8dfaZu)H9S>vGGc{b$L?Jg9TAW_C6#P)&&Mr{4*Z6|cZ7R+uWzCDUWr_#WR=|I zK{-hvs_CRq8(Bb!e-;SuL>>}bPia>=B-m2fbSG7WRJCF zRIif(IoSt$490)q+`K8IyC3h9Y3zB@i0Y$ND*O&4c<3majUuNY2IK>|>sGa% z52)s+AnIPyA%IoQGSLvvh)^(yq))W>JAPFM)wEQ{R`d}u0T*1$Hgc_M>|Hj%*m>3@ zmOyU39!MgTT;Ajxm-mZ=63LSVb?heo_Hx^>0N6xLLgERyK z;yv*gBNcpJac)Pmgc1xB`|)bH*slCukb2jMY`j)ZzCFbc*eVq%+IpX9%#k+wD_ve6 zS@HC@>W+W+IU69&wB8*a?e>_0mSn7+?4ms7P#0eenR)XyNPN?-8$x8;0->&+AMYf=z1EK_w{|8FhBiDRp z*VV(iaL?1OuQyV$T-DD-{x0q6Fg8R5QjxWfy1VCceBSE=r|;W_gl36&wFV1b(rdx@ z8w)FUZ)D)C*6%w9iB7Oeh2U9JlyU$$?e zzJXtB4W;K!y*~ZRK@OqRo%-}3s*uyOqJFy_-ntC^hhEaaytYj9qPN(GwD^#Xr5e6R z+penDCW^Mo;R!u!+wo>-o`;gmy}E-WsnO1{&(@qPw7EWO+-@%17+ix+1s)-TX31s5Gs z#gC(=2-{~3?)8Ogb(#pyQeKCy!KLBE;&G|g{jD_ooM0Gf5Cu@f_VFM_pB3z+)$V8+(_c>c zK6f;qu7lS~{8&_O133lmplx-xBQRo4%&?i) zB0t@)GcBpxPe&B__)27V9X^GC@av+`nC=GgZ*~tK)c%Rw(gIev@x!fU+yU8SsiZ>6F?SZ z-$whOH9n!8vN@1mi|J^S?H;^M!t%E8WUX!nh-%1CIYK=~sPKPF%IfmcJ|5Q^OS`W# z+3o8`r%#NaS!oK*eCx!ovTh9}tozlH$Tg&xsuL+N`JsEDwmse7pKJOJ2mQKz93bWF zPkSTYw=~7LCn!RKwO($h%m%^;yRT&0u9Pbnna5SvmhSV=fuW{k?sUP2R{-ZPH%8f_ zaBzLH{m*7k(Di>&-91*c9j)q|znH59O&pWWDZR0Y-Z!V$^}f9{-w)JLr<0BeR7JRv((H^>cH|Ok+d5 z>$NscdP|-I4fUf0aSoo^(t)_eR5rT|e1&F6C6|4;3Yn~RiePcy0~qE4!OlOSc9_3P zW(SYe;*@^}3|Uxt@>QkJSy2$1WNQQgZG&CZZ-wYRIM0?g4~LU;TG6Bumf;Gr^Tef8 zZfR=TWO_O*Psw-_>c>%B_*;GLxNf_c(s1+giR1vyN=J%sIwbi0ZO}Bxg~DQewj+exK6HgDpb#-J92*^6rY{rRnh23+pSGqJD~{ZM(;O| z&3^cd@`GCCkR-{I7+VazdcUb{VC3SyB$QRVsbX!teww${`n}s#@wu^$v5?v-+-yWh zcbyIoYYJAYaf2P6FIP`5jKCr%KT z5wCy5rBH(1e#52-2r&iw-BE^fX$h|z>%3xc zGcL9U8JM0f*OOo9^r0sR=maA(txWtylU;v(eX4{tc9l|sa*S!o2v;Q2kL+7Iom=yQRO#5HOp%=AM?@+cZ%Eg&wQR`kgbISy z+ir?CuxDLMQD!e2E3Z%zM=O%eDwe3X8(-GYU@p=r4}%rIDnllD#m%aUE(@h>8qt5c z2Cr^=eU|+xk7V)8u%Mwnm4){zvMqm}%4er9mKh9?MVH;Bq8>abfiqI4V_S=_GKsFX zQ%F`9}2zP+N+S@dru1%?08=U8?`O6P-$(X(}Wi{ zDe*p54vQD|gVAl|+MTf+3anxrl`em8kWaG_XhRkHGF?db!5gZ9zI((Y*m%b$BkIm) zqgGVcg7o-RJhf+9@1X~+r7!XxRJm1+n?t`p+PcIp{gFbmPGaX@gHn9$&l~U_yd&Cq zEVvpHCXM%mEY#6v%-d!Jy-IrnbtPm@vw9oQi_^8>GyS=whi+*@Ee|Ri6iI)!#;q~h z@VWGEmkl#81$kBU!2!U0{#Hc4t)sSr$OJa^>*G!<-$Sz^oiR|w1gdpBKBzs zI5fFU7X>3oAsJkz>AIm58e;z|ExJDCzFx10>#^I_(HS@}u>#8cyjtb^gj0_3!s-XF(;RCKWfT~}F)J3xY$n%=!j8R~y#uUEU0^Ch2T zfETI5L?>%vVs@f{t#zEX_XIJN5VhM(N^UbkVNMX^I%oT|=>kjC1%B1X`QqrT+gy9B zM)vuF85|)lWF3lfw`e(mD#)#!-55KRACp2<<{gDnaW38D(dCWGVLrzUYa=uRsLOh@ zGPd{6v(P0(CK5UG&BuQ-4yZk0zE9$Dxs&uNStwo6u2p)$>y2^;{v-uo%-ulE4p_0) z>E-}FH}RMUR9dXIr+55hI)0~J`7=n5?2FT!f&=PCr{vV%P|jqp>;invkj#lH2=5+i z{%%g<1tKbYtzB1F^3xF~K6}AoeJ-N0^41wuY+vDaDxBM!GnRj!tGhnD>Kv6aaPC#g zEDjKaeweCqYX?wp2iv{AEw-nl(}&M`wQCbn1ani%*U$BGA2if%{!Rig(BK=B-Lap$8kH*W0JVpw_}G{uF1Q0Tz^PIa%$eLL-!c%+dqHt)dq@(f$fV z_{>o`E%K&(?-tN#e#wBM^Ic&ASy7yEX5{zMC#S!~b)iNw{E13djs{*$B=nTNe`hIb?ZQO__TbGqr+Zqh!{dAOzS?&tOY_) z%&76Te7b*NYMVCvOGc39;Mh=Le3$FXT`kwW&p9w957s)Qx6{3cC`{utUR$r1)>%4H zCq!$HbaOX{<^AM>0sb;R zrM!RmdP@%JeHS06E!Fe8_hb3&Xu-?9+fGQLy?84UOL;TZ!RvAIlI$|93WVH#ywyrM z!4$pr(h0h{p&3wTQQwl?P1>2l8r{E7ls)+-kOj!wam{FDdV~7Ko!dapY1>-6f)5TSya+RI2gOn zcBuQ!iec2IzETaMg~q!l=3Lc%?5i=vT6+-|_-^v&6e+yNcXU+B@4o+H3%$ z#pUUb;`L~pdb=2_ym{1vzFW#?e{nIf9n}M?8d|~j9=SvZ((Tn;ul;sr~i?%;KK6qVveJb$G4neLGq<6`mjIUhqH&eKutAa zUbz0SrXlGCPqX>NDV-ScQ3A(|tkQ3keNSe-tQilkC;w8lsjkQsv@W*FC*#cJC%qbv zd&WqMadljg0Q;ZwZOxx1X0UBTIkl{()T<7JYc(%kU&VH?TONXCMMIB=upfV|cXg0V z4~uqgRN-e=)Qe$?AfDBx_?KIG4vS}GSM$%yyFEyy3>+XUpnCwAIcQZUS%bmf%hc?5MhuO*VSGE$w8%s2` zMenA&9RW!P+Rffs6hpev*LiG z&Lj{cjWP5BFRy9AUh7_80vV>!`T80?vV0*N6F4=ibvf4|?+|@Ig#s04BlMQI~Mh9N@0=`^9_!$*FZJBOk`SH132golD+p4Ib zka4LL_kGHps(->q$>nCRr`-^^=KifR(mAHr$>wt+jgQ?s`@o`@sW^97>Sbn;YcJl+ z!=u|qg265^1A1t-ES7&;)4PEU&M3U#7#c#q7sc)U;LxI<7W*W6XJ=qPM-Ra1{3hmfWYP&0)w&3}Di5Xu_Xyh#}VLY7)|I&ZF?ntTH<(@RO^%zwR ztL@BXAFNxg=V_`vQRgt2G~<{xf+xV;ktTa3UnZ+9DV=}Ct$4k{{!kb=;t#YH)!U?I#6 z=Y&B(WNM~sDJXvt3?*ao%MO|_@fk8Mi+*n{q38VCB%Ap7gg!Q7Ew&e5Tl&uh)fA&G zLVn*fizmna$yzuoF@sP_g$ zbqIG+H#8A<$-T3vj%q>DkxJ4f4TgUxX=({YJ<3x=PhWC=RJ=!D4o5pK~Fr2 z<#ek03j}13OGCcwbCucnF8VP}Hp}3#pC~8k(o=UG&xEWWuEE*pKb5Uar(jCs z=PF1Zu7ah4Qq!2CP!S(@k3e#*1z5#4PECQId=a>-(6~&m&w64^)hcbjJ}uOs-Xg%$ z3j6W8dP#p5TE~`H={UL&_Bn48o2Kd#BhC6{m)DUlbfRu#VtOj-@f?Kd$Nf|QK|sF0 z>L$?Z^ULZgo{!59K;xbOu?xoC2s>|4$|-AipzD1`{Dp2dE8_HVy_E=2WLMlYXVcm( z6;}G}b&SCEC4F>iu<+h8V$Hx$u+;kS_&hc*>(r0Xel_EN>U~KG!N+X1OBv9p=V^H& zw&zO$snfgjv9$Ap#Tu{^PmqvD`AzQfHSbytcFPb`+`hf7;BOtnK9j2}1X*<9ZXDT@ zD$(&V;1^peZ?obPjt!mH+xzjYE1Rxe$8Y~oR(Auru8jk=ZIl-iRZhGUPycA|^A0LY zs6Z;fBZT6AEP>5B4ye!Sxg}hcT*MGt860iW`WwB1!u@BPfDo>Rb&NLCHa_gj3UZZ9 z(3zX3-X)!RlvF5$ol&d?c=If6`-r1k-M*d&Y}6*YIg(lAp-d0l!49j={@5ls1hPAgLAcbfsoNyUXh}y_Cu4W&=ND zf3&3D1xsXc{Hd~0hzSX-rCS%y!rdb&bLiVKVw~x4W1e&g0xkWzLX5}>OW|;6k$Kxr z57!facbXp;9)JrC@q%c*)B4xb+v-!Ae`e#7k_zV@rJ@0rz1K&UU(?D$>6_kEhy4Um zvFd)8gBxL8kHc}(Re}Y*?6+jGF;gtvv(#;uhp{pq4@(Ed3D%^bevSFv&w@2iWgguG zJ%3qUp7AG+)|zBJp(}K3@8Bk^4_R6#_v*=iF)ck6`hD6&hn=_dpz3XSFi$0>E2s<8 z3tbtTXN)bL+r;Y4gT(^rWD^#iy?Dy?dY!DE;Z(k;_lvSiTi7Cdv_%N!?34m1(5d|v z0%S{~Lumr;=+Jow=fWwm13nqlwv;+_%*+&F=SNW-Ud3a1en+frZ=?!v{$#~g;Z0>b_gDNuR`}gX2RNT$wwJ+9(q-mo=0F2s7O_upk9CBgx28L_=OV!)bZa^3H*%^a>8jlOM-y*+mjgZ!h`PfQ)T`A985L>94F})f7rwVb;`fC;`IT6iAK@#R zqRc2V-c?4|toQPYz_esVRR7}yaz1_%$%K6s{eiS*H5GsNi)ms;lEvZk)LedlBv*iN zouw3hUGFQwk{>S86VN(V{_4KS^P`Eq9A7I|8xCu0=9ehG5-;Gxzy&%xzki(dEWt9r zopoL2dhc0+rT9v+K&+{V!Q~G#xQ9%+{krs9@=ys9-@0*Khp*?ffvX!RH}EfQ7R>99 z|NUiq6-S*q#pz@J;om2F2dz4PG2M3=--SEYrab-t92C($H{CwEUYaelCP#k3ES85( zg|t!bBNHbWr`m!vVuvgPv)M9I?B%4+tjOmsB9#3J!sf|sgWSV9+%T(6m>?%MDOrAW z5GzDjYiVAoYCSHBK$eDUzx2fA?Zg$`b8+ozD2b`4!amaLbg>}z2xcmOO?=-#ATEn# z(IIBegS+Rig#4M9`qt$+W{b`0`TD9_h;y6u;oK&7NO2KYD&>>dIuhwhXP8yF+fA}& z`{czr71F2J?#!D1c#?vgYUfd-w!2?YTh^27{JFH(jJG)S-jakc`5G)YAhkk{1+>-I zL0rm4_w+0XW4nQcWGwuD9mm@o+?#OQLZ{AVi4~$h7S#NYm({X-6HhLN5R4Lc@6+aN zwefOX$HnQfOpt&1(4#?mpT$!7$QsGzLH&r-NB+1V#~JLIX#cuyVhJ*3)Gd!bjm;|& zD0Djz3j#ak2iKURG_a{xr{#T-IN-;cgf|q$ba+tQPNJt(_jw+F_|6TOJ0d}w?Q4ZP zKAQ6lS;*|ncgh{`z7m9U<+#%F2 zMWXAN_EH)f2edeU_sNQ}6A&NB6dVwlFE5w%YID5s%H|et=_}mcj@jjg_UX-pcNfJq z10=?fhq3F@`o{2;Zd+teztSbx#+UOKwI({C^QnC8|t3D|xxMb;Jos6DilivFPjjZ6jW|}*#DM)R{ z{Z_4u&)o!s=J4FY<}skM!dkrucG>4R#`*PlJVy7NEtVG^H8Vy_gWKYgzw`d>oE|lo zAqL1T(4;fNnz1w)xd3U!1U?!&IQD5}Ui2cD?KVVsh zax(vr~Ja<}E1@OF)tk4M6_PYAlO7zUjQz^WHQ-Vqg;)AiKA3xLwMAQBOL1q@QIz@M zHOZQS+N}y1Dzk5Z6PQK#IW|-noOd63vl^v7lC%o!7+x)vD?sw3mked0)w4J}AH)uS zWkV~(Q2XO^wD~*t0CkpXfpOa$MxS}VJE_&BJ4Dg*`w-oU?d8=*+oi4^E=>GHu+2LA zYQrDmr@DfDE{tv$-;PSr;i?AYn3flPF~0#q|s zRejXkbWx)t(pQ!D$qR!N^$>Y~X1&>ulcO?q1vX?Y+OeIIT@Kf0_?Fx=aPIs4un_B2 zKwCZF4O>xpway7zU5l$`ojw=R2@hL8`1?Ey6!{eC z>%zXQvnx~yHuc_>sQo5OWc9_}5`W0o{LtMP6nL^@!^~TX&r~m@2jy;my(d_kE|%jY z{_-K5lD)qi?;7LWZa4T7V#eR5&psDl%A`Eo_3nHwPom>@-r{Di943OlaVEtjVh)D% z=?aT*g8K#)=g_Yt?#sT5Zw6m2Nui^2P`vfRD_b{+R-{3?A=Vw(F{d)UHrz{PpS}t$ z98+_V+C)tfe_!d*4Fj)#2xSK3wvQSLKfc~AUWgj3vmjgRAXi{6e%=wia$UXy?QT&w zM$Y*ATPYOFYaI=8{#}MRj3SK4u4Tdr9DJ0Jmh^7*UONnu!kJa=*W1HzzZ3j6>nD^U zeEfx%9|BYxyrzc|EC!zJ87&~FTDYV-ljfHKTza0Th1)S`>9-tzUg%Z0W!l~ubJ-4% zCup9Z;Yc1%$iuGGoquZ%ur*7;DtN%nCjPy!j)8m$)vHt#tJ2?i#FB`(=G?t6L-`0F zPqKNH%{ynj1L#qo`~6uvV4a9|5K9=3;O@!$P0R1P{l!JFE4Ki+gh_ewix>b%$0-@( zECUX8JAfP!WN-I>$zaivM?X#-1^|TSblt1ohL9vuM*+YqRHt#3FROTg)Y7g9&Q88x z7~Q-3yh5THt8^nhL072IplDF?&ai^Qu$~#4EGs^jqjLyI=gZs-6j)--;UdU~#q%zt zLP5XRO9+vH0&oNg#2Hp57gthG=2r=EzIrN4-YAKTdb96;HBBtW3rMEp;nv-v)(+m8 zemwZK`i2e%_^VAjiIEtiy{#>2YQ~{AIV0Bjdz1n&Oc|MROgpth~Ecc^73NI5f>aP3TLj`^} zS?`l%>89L&+?TiDl|D8m<1qW!r4a!nmYwZ^4oP~lBosUqud9=d`mm4u!AtgozfD6c zTZ3r%YW}~py;-xW?AHGGU2>MPQlO&ZKPRE@ksjeBC8%^FC@M`V-#r)3wbt`Id++y6 zQmL$}RdYINgu5O0xW~ADo5KmBr3dDDow)ja-&6j7uxTE3(UxGSV@p2PHkZzP$&_-c zo>i2r-N{d(^X@lD_ZeCB5%|yH`n=fGxINv#U7kqmWYqkLX*O#tKpgjuH01F}T*G#l zZu|YTC)Jc5qz@op%mebsG1n1m`NQ3G8tHCKkx7Lmjj#K2x0@2SC^wHbC7Mg@$mHrp zWY6J$B}Spfz+umb4K}PWd8>oHmC0Q{?ajj^K9~LV{o(s6JX{DWP9O_;Kdr4KGo9#q z_uyf2OH zIf(LpSQDhR&7frAE9*nC*o|6xF_0xP92UEO?#cFhCRU%@0rD5X0h!kd(y)g^$60VV zyC0Jd_JT34%%Wszcq&%)%E5N_$yk;9K9065^)!~y6nu};!R~gKW3z>~6p-P;r(0k6 zNGM;TK8Tymew|a;P0fa@#jT($cD{sm3WAC6Xky)MRl4fwKC8><6B6Ar6V^wkz28oM z>nTmTebE<4qDKmuBy4ZPM)#myNHAb;#{_;hr8j9 z%X}s7v7ssN>6;?#l-4dn1Q7#c#JMLWw9X7Iy>G@2!<)U4qOT$PIlD%nh?r#T78g+;rHSr2Q_=Rdqs4**il#Lw!CXfLZok0~~F>J6d3W+GnGQh{1De zi<~uARe$ICy(h(K>Ql>t%O1nFuATRC5sWH$-~zUt!}#{jiQ{XrwcF>!t zkzPloQ_F0kqvHLD&0*hv>zmb9-5BFR(_ZPaQN3y?N7|&;SQLvZ0QJ*VLtes{MeqUr zO6lkA4UA65>xlSRZy2&Ox+mz$o1?YBI)E2F@QOk&hsyEd^bGeIM8}~FO*Gy@NT}_H zdMv|aJH5A<-ojmaHTH=ZxA}>V*TIvZ$|S0u7ybeBez9yml6hKxd-eL^m>>jlZPh)t z1aXKq4z($pa5vOkm@Zqy9UV?cu4o2zT=Sfl+V4xa9sHX~Cb!P^JfNhT&!eG&ynnkl zvR)edw{{umTFromy=FN@wUs zm(F_t8TXV|4+{GBIbxL8T-gp9^A;*zxsIJYy=lA5m?zR8DKYq zx1Ls>(-*RTW{F(#Df2Zwb~^ENPUo8*Z=Opm7C*{C+P6B~3k#^cJtjgwe4Yb4F5>r* z7<%zpKS0snFHdjX?-P-E9-{`d1V3B7x#!k_Ui*dLMtjCzuhR`w^N%|{)FfiNrV9D7 zfh=-yH!E+BvV5{6Wmx9rQzHPXlM+4?Niw^~FRg}u#ul9+DOW~ZyE`bFuR4t?M+Pw0^oo6o3US?>l7((xAGqE)ynYs$IMxpy7cm$&SEcE)zw zEKf^&L!WmwbCARs*{}R0C{Prit^%UbLrfeAxQB`EI!Y(*kgRSPtMbW zd^f``u(cjc_2Na`9|=h2i%^7!ki(|OtI24WtOuEz0GC^}@x+(V&YY?XK@z@&*gZc&S&UxU63#@5%Z)4!0vnHtlJ(i=MU784su$w>ZF4kQkTRywp&Y7Ub`^ARPHdcH-h}lR|ddmvf4cE-2IiPdy z&(}n93bWug{B8x;ZFbi_#a<3n-Y$xI=a`VR?Yz3_3?G||3C9Y_4pi-d$w%gebblUO z!s(md19-qSVX1ri!OcxkMuvvUike4%u)EYAW0P9dW){!F@lx7Es~Oj(^jaz-N5;Lu zJuq)U95)c)`VW%UBvKXGMW$YuSIP&r^UE7htzLuVWF#gOwx;dDoiq~E zoZe)vLqEE4(15sX<1u6cF2Jw0z|Z$V90o=)1|YU-h718XP~yoU7a=Nt9zY>QEwco% z&u+y**kYK~MRLoxjfVJ)hB7v)yKE0} ze#B}deIkP5v6DK*p3bUxb&{KIo#?6k_zR}rDB+<-nOt6;y1D8Ag*5VcJA`HzKb_}W zK5@4*|GG6~zbYUGX7&BnjEuePf+v7&!KJsm_i$4k?bKT}Jh7sGb#5urJCZ&r`&)Cl zmD9Dq_xqJqCmy}2cZ~T?V)0QX!%*JNlWnu=rXm9`K0Mz2A&swym_BQ*1v&cEwyF0a zl%Z>};;;IHJ?$?!a3(CsWXVD5lZPD90sOGxMc==p7l&eSD57{P8E3YD z6GvxOB%yU;tL55%<{{doJrtF*T#!re4@kQUWgEHg3V{pUdC#t>UN}jo zRdS=J>*nH;-E_;VCCl=jUc0MMJ4@zs)sN(~%5Jm-KM-X;-rfZLheI7uHFxbZ@pUHc z_i1eq$;p2*Q9vqmf4&MZ1Z;#@(a1hM9u%zV{X5{_4b%{Z zvO$!dxV*F+qOlDGI%}6kxn_f>ap(5j0nWw?!jK+D%?OKkS-O)ra>slD1zA=I&LZzV zK&iZ9SuL&~M%O-FPI8_FbPTFA->#Y-JAgyDVcuMH?MRtKzD|BP&`2#rX#3;C`Qh`% zVkvNY_>y~nCOKJ94uV7;yrNydr;Q84UhpQ^YSsoU{t@wguh0H&-n3X;V0-Af%?0n! z?yXIowon(>t6jwSmbN@Et5PSVt^GFZ3}R|;|L(Cwe!dvd0eOri=eJLNMrgc6B#;HH z;#Tg}U%SWmHbe}C!Aga$R3D@U-=ZOxR*%&~Xzt2?<=8E}sySLn_0pRS#K9gSZGFs| zI)S@>%-;ny`J9vp5L&$EAL6v>tU|-V6A}$y8@otlI8|k&LB<+g44((m2FV-Z7%u+# z7CX@z^ii39?XV+uzXbG+8Hbm8{Ls-@Rc+yuq?O6*A;O$+D|cy0?dYkw^Xx%(?nE`* zfJ+&FC!G$|M&Hyvi_FwxA7Y!}vF+s?_rSg1AieHy2r3xvoJ#A~URa3vL72ACg@|cg zN)?=gq^E$4Sh{r)o4^|6?S#qF4X1uGsK-_EN@e%3N`xE>r41>9%hLBDblZMLVrxqReK=N zj;Ay0#o0-HJs4UOAZ&;AgBz6rM|nOz?nzho06{A&uq=Tb>ITGwQ+2gI0T2`U_0|f1 z38qqMNE&<=n_ltzEB2T;EQa6?0H*O#Nd8Ij&b*GT+2!O5O>x~lm!2I3pzizUmyw^z z*i^O~?s{Iuxx(CU*66Pv9Rfyc%Jm$q<#Oc>S}IkHj^+-MvTwS5tWFQ7*yfXPyl;2q z2IGn$^2KfmO(_hvm+jHWZcSZpASz&gJdR7xEGq=`NVk|oAe=CkE6V`JzI*6;kw?A>?KsO@<{?J{Y(fj-`QyE)MxGQrB58Z;>&+{I(fA>!{- z3!$}D5;GK{{R{gsa$V9_dcUlHoM;CYpuI37@5~@i$PvR#fA7qCi_p*H{b`F8A82=k z#;?}v{=UmBtvO}3{WuqZb!95o0%c)nkUS@kQ9DQ_B#@8^)r$_e9uoOD#X(fHqH`5n zV{8WaQPlcjOw;)gi|^bhw&P^)q)Gso21$X+H!f2#ewHn+l<{^PiB$m@0C?8yfq0{;Ffs68PZ`{ zcVObnhbxlcFn+|dbbs!D*=fJX5p}SQhk>@U#iC)ZSN|ltpL&?oAQZ1_VsU=wNMEER ze@iakiAo|CZi?~K|B(E|4OLvsUCq3sJfb0D@ zAD}lRNQJ$IQ^ztX>K-Am0_-n@GVFU!F3u$wrHATL=2c7x>2y+m`+SKhv<++bRc!X8 zM0ZktI3DRKDlR0nd!$K~o8|KDF)9Yc1aXz_Jt6l*?z5|y&-`w zShIB`c{!^qN5+$XgefgD_TaE9p$9*6kQJ~5rPS%Uw5(sZ;lV- z@z_ENz%6}*g~q;h9!sf!{0gs~F+7cRJc;0)H?-RFe*sq|A>zbS>--A6da zwjlwNw-=D~LSo#1JScDPa8oXr-TTa#(35NF%^6vp`3DiC-4%FpDA&5j_tqZ=3 z(%Dne_c1*zu+Y7&D?UF5SxYQGj#m!WYE zX;1JZjb!8f6;5SI@BAF|DHDCYuDz};SH*)VAsW*Q=*2pM-K&MHL4&}uux51^-=Z8d7z zWFGsC#GCO(bNw+Iex2b@_%|}Mc&RZ<+A+o;OoE%@zcJow8=6o`0B<=mOi`X0b0KJd ziZ^;an#B}RxXQ~PL@Q4b!uB^9{9iC~1U~}+{~OeorGQM%fVdw(;vd-Pzru_s9}-2s zq2mCr7RceoIRJnL0F?oPOkDwc2k;BD0_2l>>gx@)HvkkB#TZgRX-NQ5DG`#|1rOW? z%k;>g^j(ot4iH5>03eEC&oGsKpJEq(6zXtrF=Q_;0MMPc1!!FhPnn;gc|9jcpA+Rm zLL3sbGfKQXY1oPd0UOT;r5n&pp$-LEb_wp<15#-b!Osel?Owg(%1xua7@ZVuRD3}D zzM-ZJ*4*1nix{W@pCKjNP2F{d=m9VCH}K#LIQ)U#{sS!j2R4jz{(=-P0eAm@8$9j{ z+4}`Ejb!oXyq|jc2h8{vMD7dHpFyQFx2x zdXGJ!tvCSB9>g&^@JP6U7C*#&smNfv8O-iVJ?+YUT(Et`<5HXN6%{G7iWFj&!SCX@ znZbp~is__m82f8$SpmKaV12KIVePT+EVZ!g#CMY{&niCap$2=!VCxK^=@y`4@xbMR#$inW zFonb-_xs?`xhpFsLPrkp!BsqrLan3`=|+=Mjy5-QzbxX;?r>knXG z6!ULw;SZR29wUE$wW~ErXKK`{%wUMYfQUW@((Dc+bD{~gJMxL3*N7jePR$1!(k!MG z2rI5se3a;Pua;tz`(2trPf)N&orfjc3oXXX1b|kQX{zx(^>Xkq21}lp9uCMHf-g3C z4FGD^%l06!l+Iy|7YHe(x`-+GnH^nXf$lMbMU@pOi{{vEQu1>Od+Ctpym z{|o**kNYKn%D=(N7+Y|B{{1^I7=HlMfA3qrA??3lp(tkx*Z~8O{|s{YzU>3bDis|P z{^TRaHN1r>kg*%*Bte9nH7PQI-DViJ`FkY?y6>hPdZ2l z?<;|2{Ts4>2hbj3iK%Ct1LQqNd=&)!Ikq`zHpCV(e{O@Bhpj|(FL0eJM{qhFvciO1 zkTNDs1>>O(Wl(_|Z4KjB5$N@n8yn!0W+dZ86t;SVtT#lU=lP+6>4*L{*nfM^zqN^< zx$_ID{h1R0%bfwszu>FC+SAW?^LJqS4AcD^2>q{rV?H^lj#B%n1MocRQ!gj)H6FfRl@^sJKFvb_bh0KjidfaE+`1h5_?607B40 z8#|+~X6_`c(1%&;#@@s+M$CcXsCK6vH#Z~AQBF`q0r>nNBkT|^qI&=)6Vu!1(@JNq znwPPEK7Hh@XW5|w5j0D}!M}j>WMM}Vc)#eQ6o)vNAq=cckc?46j(!$xMtPF~6`6$I-W%3Wm_W@%+j{T{Nzr$DorH|G)NB#?#{slV!1puGn!*lz^ zXRB9MFxlQ_3D-P!yA-qwNOXp}8YhNL$(ZYZ((^i#MQpBsdE)_iOKC_woQG}I&M=O4 ziR$>exE5!#yum!i3*r9F4qb)MkW*9;TVU7hNL4&ws4{DNa*n+!25`VJeezj#VfsoI z9GdU}y5}8OXMxQ^`gPAU7~AZQNAkhBlx$VBdtPOHj~hC0x*(dxH6)Wm%I(g<-oiD1 z;wv-z)CSxY`aCqeQs^%DPA=~>4*f#_#|k3Dv;E6u+3)ee1V|I(!-zQqZS_~1{aw~S zbLHRrJL>0GnSX7|8BULK|I0r58{ih|kLV+S?EGl2(^Wg<6_mC)M}f}Af*XIO(J5ZMXLWO64!1~|j zn!nkOf5y;xy!c&K-}Z}g`p5VJ7%G|22+{Xi#GxArq01+a%DwBykBfjC}rHrci|Q zZMI*&^YW{o?KEt-)3f~&y9`)NuwNMOUts<}Wi{J0zsmr?X|$WaAj#s_?gn`L_xOna zKkJ#FIq09`^51~t0AT-~`(``v+a}SsH1r<{!DUYauyBr_p9Qu?X*^MXsU|v4BCyJp zt+C&0O8|{GP~OKFIF6XrK=MSi1;$H5D?1f?ydEI<-gSwjwaNZKL>*h>oUXO%U_LrPY2OtYe_%2bbqhP%rde)PfsN`Ls&K{z zq9Ov|>0xr+?cy^E<=K{VqX8thWuTBb~AW=IggjW%Jir0{(`RKIR5{mpZ^&fe#6-RtVyScz2`Ic-WHY8 zKJbGtG8N@r2<&tNsf#hrWJeNRu#wP5Pds)-#KJ#g9zXFt2^(Ed6pXoS9;q!!%#3R>@-c-Ij!kwqu2Q;~_2qFbe= zgE)tVPpMiBb!$pdrzF<1X(!M2Io5Zqa}|KS%^+Pm&ttdQ4?^}vFvAVdz8@HmM{P)d z?DL=c`PZ_4`>C@ZK>l&|fBYI(ezoDBarhte6#9WWukCaHXFQqf>sx2(Uw)I4%nqz# z;8`IC?bi@h4wOKM=gCb&I1=h*aJ-0Q9s8=f)JL2UFq$(w`a#M$%1UK{BZF6au3NM@ zSsfw`li*t~vbTyzaMl&JMnT6$p+rJ~jmSR2cMBqaU+ecZ97lW(zsnH5U$GN&$m7XR zE(AcZi7O(AtXJk?)&P5EV){qYt3vS)wW1AlA#zt>d1)=#r-fid_md+)Dt{d+xgJ4bVL zUk5>d3&*Qo7WNu>C`A=R7hp~3z(zXzH7E^1G&*IrI;?PS|7M6z=4b=0as!RY9(h|! z7RHgzUTQ~o>(x?os!%>c1*|K8)L9(JekzRizHmrRx@9fqtrw(lbv~pE*gY5n%)hPEWf)xOTTH|`A=;6D zjotRK8rUAM=a7NPQ#=smupvSbU|P51kth;s_&u*^4~||6reHCDMo`@n~*YN6z;=l1o|sxQT7w zHW@O{>>T~RhaFj94KlFZA!m-Li%*Qr5G5lqciAad1V@v%@5VAqHIz_ve3?!_o)waQ z&1(g$;Z=l&+y!pAJ0Dpk>M2nnB}ZY}uA6$W>mpv_<~fs6l?)tXPc*p<>yN^J&rlao zE~b1$^P`-5U1V>Od4u2jyY6pz$JQP*WCiW>1EuYMYoD`S`m<*LTig6KxBd7Xe~pK~ zkN@2#Jo`dRgz}W>`0evQ>#47Q665o~*>Gsbv(Nb7+OTlExdwL60)(>Yz&lvT-bLMW z%pfE%P9v-}ub2=r?DSnwZV5Plbb^}Gj^fa<;Mg_;#S~~qAWTAxc}78|DRvez1)}qj z_{WA@XD8MN6&r04cHWCQcW^ihnCp~vILz~Am12EtA)yQ@2qeO?{MeR+qteqh4}SvI z2D$5C{I^?AK*C!nlTVsG=gwD-jw)YlI%amh4AqXrCDZUPZTmVjL6s zp%Jm3$J!^jf!Ja)xeDKZC5RxvdU8&GyO5Z_LEdZ;-|o-NCZ(`$)Q~!d`Kl%&^{vA)mfvA8;mnw zs9gcl8;1^vV`8>>P&xuLSK!WDHq%Y?EW?L|!Xvs19)turK)B z_Au9BjioQ_V5zG)wk~>PAXk~PeJcuVRrosn1`<86Yians5nNIXux;nEq!<`Wx)kI4HO~Y(YZLFlYC4D6%An$$f^MlL{F%zS3-e6&C`#Y}6y63aV3z2-peC zA={S`db$@)5y9PXJ5#XdvFF3a@&kRG-9SSe{hrAhLNA1ejT9JtpnNn;bCxziyNLx=toJJ=`qo4qoRcR#k|&zkWsyYR0z^V46Ev!&!!FHPMu zwDBCakO-@P_q@n{DXbFm@Kfq#B#5nO#3G6CQxL|o$~xq9E7-Kn+$&ota*wF?cW&-S z!D6zR4(OtelURSmT3>+irenSM#$wk--;1$<_I#FWaEM6wFlRs89jOMkHv8G&cY~j; z=e~YR>=FF&eNX9v`LTPy&;83L{x$y3ZRGc{*=L4-y@h|wyI()g?{Q`JO`u->ZpZng zo+)!V400$urF^zCBh+*Jf=0x=_7Krm5EtwNtxLn^q+mmI*p%&%*Ni@ZxKtCo%U+@) z9^(zBfDQaYJgYuS5$iXsQ%u1oFNY$tP+qPgn|4||H10dMCAErQAC57kna(Jqox2Me z7+$-7@J{NLcbK#4Arln-U@RtF0rI3oYgm{|MfZ>#UF;tculs@?Iu>h2R|@u^9Bg_g4vKOy~Xx;>~F~U?88z@4xN&{gl&ptVUb&4km>{3|M|FMmJ)+WDw@qe?MO;A05e%2%g0I>?j6Tyhe<|1GX0iVLW2gvHN zVlX+{+CawAVAzUZKy^Apn<4M;{aj(_W4`JDglGb+y|IS>`VCojy?JwtlA<1Xw(oio zbEf7rl@*aMbTm!Iz890JwL0(TF-B|TjH9u`f}8EPd^k(JZQ{movRAo;HNKelyMUp8 zMJ9M{gfdg)9)1-U>nl68-Kx_NFTX&lBK(}TsWK7l&c*>qIf3Fm_8Rxl{gil&;j`~` z?$1|Z4fp-rpRq}X{``br?`!Y;)W^3^JMbF>zy0t(_Vzp%{PDm4HDCN=422Lm>VJ;8s;m&Os`$NC>8vpo*u_pd^U(!GPB!BG5@1C2{Q>QlSfFz=I zY^rV)$;bXi)!HNp$E$&+L|)Kd9nRGNqZB-LT#E2H22j)X#Nbs)8TEnvR|yzzxdLqk z0<38mOX2b^GgyOR8%CJ-*KM?O*x};3EoQ(j#vbhB8ad{0 zmC1Ta6XsZi23z*uxR$Tj6|e(PhWdtt;NSOW#3m9q7mZWgBX|~l6?$y-BdpI*MJi{1-S$xmu8yNKT>X>^p}m!tEa|m-iP@W zJ@bi(vRcv>#8<%Gw4mJeAO2ZHf&K-582l?gqEvrii3~3J1xsYTxt)D)VO$uN;;_4S zg>M1;PwWS?3WtD;3)9hmcn{S>z!H!19j1^8Dg(`mqqzm*Ss1{I6~;xBNY6v+v>~_Rw8!=*W>;Qr_Au)yABgjy@Bt_BP1whx(eenf@xTqTh4ywpI4gSmlv;l zSR#$kVc&yz12sO+lmFo#l(#qr@(l2F%;o}DWZF0Gr(`sZdCv8JBG2$wjHmx{;ar6W zw4K8=E0FzGIJ1Frfso657^=G?kE{^(#0-F2&Cx!}9=^KoiSxexPsx5WZ@%r}JLBLX zzHtt-KG}TmC*BDuZoldQP|Xvg@pGis_b)I>GXno8g5Zhu*Z=$HA9j7Z88aOOxfsIg zcab^611x~uI4`_^6}`#6ks^M};)2rxfSv*Q&)@d^T`YEqA)hx20Qa-d&_+%(>EaEr zq*}3@4#F;=@t8TI7jU?pG76>QLjG**vsgEH_^mM<9pG^`a1LPon0ah~`&Anerwni+ zat4ro57<7IX)BQ35qW#j6-+HE9|;;FNc>a*Sl=cOBm9_utey8bU&S+MBj!18eg-7| zF)(g#VWNbLJzuBbSpY6)jAzFNe8?G)@sN74GWkE}I$ai{hht^kW9&S22(;wkCXL9j zXR9E>ck*g*1MDlqwE_~I@th)n{~TsCu}V;uhw&L{JKUla}D4C<>`0kebCk#0=+3`mZu& zMwFv2$k-XeyTEe-Y%k#D34rIOO$O!n=Z;|`0lzc;Lvc9-a~WBI_n)5!(0HWR;5%<3 z0y&r@3e-5{%vcGk3?5N0)EO2!m^prr|I{7Y7dwtU2e3cj`y3;fu<%w`aN=*S37ZLo zWJDl;L})ired?ifjsK|Uu|d12I&6~7?|DF3ViSrj=6*55*D;I5G5SBU$q=CUeNPQ= zb`@|dD8DNYl)wz`|JA-aw6A~b3s}f9nS*y^+-@#sv~l|u;roih%oP}1FbuwRhL}jS z2b3?AVCQ!W-MLt=!Tj%k@9Fjc!=s*l?(Kwuy$LSfox$@` z-~Nw&Gx0VC3~1AYoRBCZR5J?!$7w+770yYJN$1-y%xri+abDf`JZYk?=I6%mqsigk zwoPKxLOaAg?O*N3(Q#am)Ed{7ijH^;>7XeqsTX_`5rHj~ETMo&D&X-jt}eWMfa?W+ zICElEXUy0|x*e1)@OLK^Ua0qjXJ;2-S7y zHLGJ$Q-Y$_Ge@*Xj4`G+)H9dy(yd?%(gow2kX=13_0 zVytk{zA`6a4s&_%bDs(^KS-#5hfrF=z(;pSy;hnQ=>iPAH^_O|O}fqZ67YUl(BS

#oAseE5UA8cFoqq4{e8WZmn)~<{UZAAC@ExR%dl&S;eW=Gvze2c= zPV#qiU+F|GQ*w9h{G)q>aC;s0xx2|ASc3XrHSO*9%83Ql$JY7cc>K<=mK z-JH2RTH{gTjJy60zJD2iqNRGqhcmc*p>xn&P6e1-tHfNy~IDi_B6Ch>J{)nymiYqAL?M_feChk zej%pwqnKm7AlJ0W&metg>7W0;=vm;p0q5}M(%1X<`eDRUS6n>N?Wyq4Klzv!ts=OM z18YqR8QA^Nok5?j#4xpDyVb8G_QZKB1z25wP zO3_$4*HkNAFYp!VS)>w)J|g#qxnApr9Z>%`=_AUlfOF2Pd#=Fz@hy@|P*-t8Yjfrl z@V&>rv#rgVq5F;Y3_OYt;ZC5r7L&IJ9s1VX+hq1l4eJzFGZ&^#E4uI;5xeD!?-EOvwH4-S2@R|7u;!PX>3aFwaiqar|%#1Gj4de2?2l^ z-Xb5Jrp#K-L4Ny*yRn~~@yC64xZm-|7|&NUc+=?;=I`04%+rUhcs@?>4Y;$&hWk$(0 zkLW{h_ILq_;g*j_4|Y&syrGPlBbz59#5g@wZ5ai`SI9-vEDNmQy}s zKfQLRL?_*F)-kmsD`p-{B07MA!O*X{`gH?`2-xMsZOD9zZciI3>S(FdqpH>%qZ1D= zhIpwfcNK=r8&hedn~~hA`-vcbWLslk#7kBGJH%k!{~i-pRyl{ zhVFT#?V#^{ZR!24LAJTOX?{Oszw7yE{GWQLLdRZodbHj!cSHJhwEpFosOS|fF=`IF zhAcHs!OL!;xtuZBlOCdft~CfBkQn$%4^iqlBcJydy`_h&_9@y?)LBYViETWrUcq9> z+u=onGo*uIBYsV9x4X6_{bYglxT=roLz#6%za`q-shc|IPo(RJeanvL%?=+QX_Fo- z(lVV7(AW~>_U7RDpbD-bI!?^v-~gcIjg8gwfpqVH*6)cVJ~^U)b)fxsRZF?XXDY3h zYaXc1)AS63D}3e50jqLJb-K5eX5^2~%L`Pl02>l4na38y3AT&1nD-BlhFk73U!ki{ zFI7KcBKFafSwh5I`)#`@M$Ebyo%F1b!wjN%I)f9(>y z7&qy&rXaz2bkQ1r>$|OJ_lsJx*6lj$T&?4ewRwvzLj#<65$AyX&AigHqECkY1MsWm zIDYKoNCdz7U9S#2{WQbw53M6+2EzCGTOB*j)K7izgv&|~$w}9)>SYHH)OFtL+8|m7 z#0s^~-C5U*bwCfOp5BZzshc~KVV?5*(vl3;Qs{M~-DGxur2}@~e=hT(Rc;?fLG$Ea zznK*t3Lf8eXrO;Yj(up=(cH%Np{F2PniFXyLSZ^e(W6V}%uw~GecMN ztxbI(MH{~ise4VBMWOxic;1f4L4Dy7edfK$UP5OeeqBBEwcd)>6OeugEH-uoO|SD@ zn5#PVS$8OZA^wXS{N9Vs9%SLO27~q!=e+5pJtca_qFH&KuTb@X{1%t|zOU7q&%x@6 zhl~2AOP!qtUi#c-PpHum?!;{3B#`aJ`)?K4O4ky;G*oc_CiXg3?Ywr<>ew@eLHv z{%UF-Za0Gr&a~)TXYd`$bnYMbyEIG99*umzpZRRz4*Vm2N&96H8&L1G-zZTngBMk; zyRHYYuH-70HJ3AO{+`dD_0O;7KveyT=X*emBK?XIOL6}Hh;y!HqnvS?(`?Q@`V7j% zkIAfmKyv;1ZBONJh#Guiu!`ON3f{qRGv3|XY1#z|VUnQj^st$%HE^xy#ZpId$}WB$ zxHMgPYUj=myLY-Z_}N|R7VvBzcIJ4iH6k5rT^BwAXVrfq4*Z-SXIsx`K1H`cU~&Br z(5LO0@$LLrUoq`dcuB;3Ei_#5nQ3P{7G45>e#RYnyXmcc*Ui5A2t$qixVi3F%WQf` zOE+auv*!6JNcZlM(YC+O&0?mF6S&74(VV!vZut85uN%HzUWZKMvq+zQkOb=25fA^^ z^WWC|Vs}d6cwpoAvvnKY93nKo7m_!nd!Sd*j6r01?HOMb{5oap(=P7)K?u)HN&M}9 z`vEmU^FUPhzh$l-=0`E-cIlZCbcnv5%(-=iXC=5Ig1`Ux+}S_BhFE7KYr{yqKh}oN z`_=l_-%H&PCh#l$;nbpwd**(i|0o##Y0mADxee$#wbSKa*Hq?Hm>&Dduv^LN1MoNU z6*RL%lJ38>u6nN8zn$|t?=Ux$W(ib(gF>H~yY`}EB>Zr&v|9gqzAyXyFK6`<7o5(7 zzhL(73q3fG{dK;yPV)%6O5i5kh1S8X&M|!^f(!XZ9!tKFJ>*Aaz8!u z<5d45myn!DuS1pR%Q|q@IJ-K3=4#m1sYf23{%Ous#pSECi>jWD^W3F>wXXP1T{GEC z8gVLpyIvQa!B0CwuN^K3>$^3LSg=eV9R~CuF0a|bkmi^BQ7=g0R}-tK&xpAJqQku= z5utubozYC!5=VexPdeEQo&lPpKl^doGx&Y<)A!2aZb;W6KW|#$Y(%nu#(CHC=D=Eu zTEDDM;QvhAQPQG|2JKRhI@gX`W&QM;I>uXZ z7;o1zWmNn1-_1q*r+6Z6tB|?rCEvL{H%bpr*Uxs*`>c2pGkBANWq4<9 z1=>CG-{poiM!%IBe64js z=_ips8RlhiTfF>365@rG&d(-3zcP@-@wO(l(LLCOntZOmNZQ`{CzR zx&>z}RlZ)$*ZGYfA~>XuVW7Twe8H4x&&~>PSUHoavB76B{4cR+PQW0H zhVZ|kCUtt52cE@$zz^y_%(<4ple(}!?;O2uuc%v8wmd2!F1$5tSj)#lFn!5`6WlU= zG=P}9A$2%3?CcS?uQMq;)@#=#so>z&Z<(z&^zWJ_^Y^;L5qd(Z*+Ks=G^5tmHcCkM ztts68L7WQjxL`UGJl0^&90`eSDm(U=b{k^H2FjGyvD18idmXz~>r~l|ibH%gu7!>@&vTb$E_s{! z&HdQE@AWr~CQkP&>drD7hjWAvD7Or~O5YLnd2+%O;CuHAJow(vRfn_L9npMes7?;| z&+HvkpN$%SQ4`6i?HX1zUYetN%Y;AWjq=XGQkb_G&Rf#8=|=6_q)#DrAL7@b-ih}kGh^tI=z+mAutv;(v*VT}Gu&f;Z^Dd`eO}B2 z@Jd=P)69Y6SMN%nir}Rbs~%LFc`}lhNnaeSP4UYo9)|SiXdZ^xFX@^8iJ`8va!&Jw zujj~UEl17Y`_YXR9c%en@<|L!I37WL!&VO&g$n1V3&vwOhk4&OTl83X-|#R@cL7I+ zxSsrf$e||u0xxFW4tYO>?%YEWSzz*u5T|j5^l}&9F7IN6hg35l*umw1XFG9m-rR!>S!plW2+ZDg0n8 zpy<-jyVK#-qn>kqi=Sb$|&zM8Mcg%WT0`kes^yPE|a3FYInosF%el~8+4w&N7 z*={F2=3G&;mRX~-+IK5-m`4OBl(L~Lp*J!E^lpNJ87|B~9ktR<)O4-eF>^hT-Y5%y zzhCU;?adqHA!j{$NyE-4TYL~pL7~9``)?fF*Uo!>M|`kJ+HZ?)X?J>2@=#-*4_v$+ zO;>0lcBa^WcW5!QU*q>$Z<_Xjf>CUy*mG*vCml^a@BRv7`oT-985KYJ7S)_Qbn371 z8g09%A6(6-U@p>Wj@rq8>+@UnLd*MqUTYgyeqHidVke#F{`3**nbmr)PqCENGRQ>F zKxPV6I?(5NhpPg6QPi`M)GuIQUSig-vfvL8Fw>59!8ZEI8?I(1s?2)~H?wb1= zc??Ek?=cunj0N});}X%mkHSil@DX{aV7p?53sXA8eUj^NG_~ zr#qaZZX98@X2kC*~$Bz+p1tWs; ze&^o}(1&S@&csP)uv@@SbHP6ixp%}TsVy9|JNiSvsP~B-(>R373`Jt;Ge)QDj*^p{ zW?laHr&C>0Vr0!Ry4btl^nHKM+lBl0!(%)53+sOi?*JeN&MSN~%CVt;KZIo9NdGsQ zLv5LH58tvY*f6v6L|=_ON4R>n>0%q1OFBe<63tH>8`e9Md2p1<&<|SK>moBo37l=Y z{!F0Bn%p6uUQbDOn3!wA*b}V$15MHht|asF+CAcfG>NQ3Fc}tw%me5X=)iyX`y`27 z<|)v>QRlwW24&b0akKw_9ugpf-42Y6R)q}R)y>ObUV;TYB%IuyaQWBN3P)Hhm7 z59tOpdxa-EqJIXp>taJ~^k!_x^Q5mXGaAVIHD425<^~G8=)>s2HPqMPp`CD}-}`qf zu4ez(bbe+xCK*3Ftie;A;|s83aQ*omFJqVU7Euq2q-Tw|6TA+8DX15?&EM2lGB~&O z#|L!W(D=mv=Gz1J5-rFeN%EYYeD(|5rONqTO#7YgH(a5s{_LAyh0KK|+m*AWXEreN z<)em0UhwSUcuMQ}lwsfs`4GKNW6;X5 z7W{!{39ER-o&T)&=N|ro;K0NWjbbEU`)$4Hi{LC7hx?)L%&@^wgNRRL=4VpZy`fMn z62lrh_XG`p;zz}X;#B<<-FV}vaK1&a-(u~}oE*C9`@XxZJB(<~R8^Cy@)Fn6Il0V{ z);{Ua*<`x^{$yYN;eH5C!YokZbl%_Y$-lt0WVS0a1?G9sG2@WDUZCy7$40N7hK;ql zO_;42%U&{1yR!|87VSjKt;}?5VY_61mJQ#E-8%Yzu*4r>Fb;0>sj$_Z+pUEbGP%m$ za$Xz3zqwn~=%tUg-@4qJ>T_m(Ds%!ft&o49c>CFY~?VwxE_n8o<3I zCK^6}T>YTe?pzC3v1Qp&`VM^GqE6vOc*ZmX^aTW`iQ*+@7%M1)O zIZm^`Hv#&PdBJQh4^Cc(Xx1L6I({w)1u3V08GfpEcQl7r|HWIqcV7!Ddp)%_VgC1~`$q6o~8B>5urxWsP$7YC(;|f`8>q z^VWd*Ng*9o3wU&&z30BX2YX0uha5=12hVIC=&RGZ!jKKs5I>%%0k(u$$iK~I_1P09 zo%~>Y%(XF2ebbkG8Gm`g9Iv=doVyo){HEbJaTx@iFflg68tctjFH&X^1jhQ*7RT}E-aC-+iPnbfol$!o-E7bi0u0l8$6aEkd}b2`83T!< zkef7&eF48Bw#VC7imcRU?R;$C&@qE_a6#N7wfy0Iy|!a^64;k$gR}n|>?eIq!qZ@1 zH~I|dxZ*1Q5kRu)U#|N0{xhF{Os|E^#;dsSOuZ$hgD#VY2YS{Z{Qi6UUr+FTaP(~B zJ=jw>ZuI)`zM%J;(C**c`si3^Ug_>-O^yBhJZOW<^E9e>j?Y->3|GC+C$4wZ+ppuM z8v|~^#Se~7vt+9H`G~^R*VuY}G*tif;>SSA=5aD)PNX}Rd4Yn%A ziQh&~AHPlanlh@n)jxP5Jm>#3eu})i@hB&+u0H2~J(hyg6K(T=7@c?}W!C+g9=-p1 zY^Ce+*30R9swn^o*){y6y)V!X<)0N+Kp))!d=3Xc-%5lt2UYiZWjj zqns|ooE`?bL-e_YX#-9=m+Ja_W17)Fmh*k?Gdbxn{jSq6+~ z3;->SYnkLw@Fn@3)GFd9w3V68il0}%AmM|V=)Ty?^G(F2T8xfRB`lA z-Mq3xI+oG#@-6kG=DtO~MQ|bYs%7NYj)#YEhYn;iFq}z5ABkYSwItH>+E4LfccqM( z(i~^G4?h~aKmELaTGvv1*S}p`b6;}4Qih3V!n5?E{Dtld4)^W|B^)7xTW<_k5iW5xk95brKhtl`WvGyyEGnoNeyS zi{FJ&>c|{GaHZfaqTm=hvbUn4!{AW%_WO2ep%=Ufb|nPoHnyx=o5JrN)8AO^Zo?{n zK|bvfN>qXkix;HX*nS+xX-~mk(D;YTmq~L!d1nA<4vrWUT{>)ZPle3Hrk~-g`&eN? zdSB0f`GwRc&asHRR%(WlV}$T-g!?5wi#D^GQ(FyezmIv;I$!E@o`!{dt|T4j)N#l+ zq_&w#pAlSr`c~M(E%=q$UeCbRy0_NqVNlz*da$4cXXK8~_OBDT*^>p01~Ym`>Mj}( z2XLU0J*FNT5MPhv{NGz62b&(>`$55)Rs0Kobm?DJYX;vI>Y_*|{GOjmE&Ifw(mw7P zO~pTE#*vfgJg(v?p!sp(|DgF+@r=$I{OBQd4%D@DuXQr@MyI~))CP~7^|9#*G$?4z z!58!Tqz4H)P()1wz1=d%)@a*tf6>HSq?b__$w)jWS_ZI{af6PF5aMWmSxjo5(S%DW`u6xc&uvG|vA3x|f)hRt!}_9ErkB^dYqS!7x2e+$ zsl}gPZ$3_#7xIgQ^42R}A17)4I!I|WSmUnVwck|2)9-2i(oXZ#?Wqqzr{S@E z+%sT2XI$RsaV#^ciP_%h)nX)n)6FZ_PWHbmeUqD+=!o2jK5LbS|H-?%)Y&dG=6>dy zoa(ZA7SN^FysBsD8pugY=%-Iz|J)(-=Y~SNq0C(d`n7rTN#A)3XPa@mP3;38F^%e> zX_v_}W$jRv;jmp6FR17dD1U~;`Ka_#%+IKms(HUx`fx_wmJ>2VX*8FA-dDqFe!dPG z-A0`6x7|pQzwgs_dubCVQHEyda>Ktx38aVNA~xA?4=*_?i0Z}LEG_9;(*w6FaEqhgv@~3?zuEYFyw{P$2eFqam(^SElMn6w<#Fr7!d_-7=qP zjjbVu$F|YiL2W>Wu)Mh0?wwjT*k1Z>86LCKS&JZ;$0b^vcdbQ#_mHk#10Nq^a|$Ph zOW1ddR-Yb^F7?D$%h;1DIpiPcoM@KX&GUlpKwOk^{IB>H`tPUwc44GU5Hl#linQNqoY2Muz@dI z6U(X;>UZ0x)auOm zCKpQUv#Zkc&ahog?0e*D4@Kc&KQDt(#@-G8%TLchyqbA)=X)-DVqf;=O53dQ+ns-{^L`H#W(ZaPC2x|( zN%}Fy(zn^IJ&e^-dh*BB=0# zYBq%KB}3x|{2Fe_>X*C+`ltnSyLw=HG=>+`7V#zgLhzP{Q(g!z^{r%H7w3@uta573 z`B_iFO5yX+hB-!r3=KWbdagdzys_NJI{(!lqzrQ9LJ8tZKFJ|L*s4rd<>E_{11IMW1!z{S(LGoM%vt&%m)U{lq55ujf5I)6#N_eaRj8?O+MT zsKly|><)W>9s&-=O{h zn}*EK#D9T&M&z&KS0sIatV_RX-je?CotO86@%pMuk^?ZjH}Xw?VJEslU+s6k+An>8 zfC**2*kOag_m5`X#u@h6U{AhoH%ZNccQ8~)+jh0<0103Y`u3i>tC$qLkCg6z+2FxT zd~g=C$YsI2)ktRc3VaY*K`Y2|y;ufMvLKdLE!aDN4uTc16v~Rf60JPcTknWn(mQnV zSSCE-)scPKBUmv5@h9X70S9Np?+o1v@cd7R9umgwZ#h%H* zbSpkNpTJlBu4DJjYg~GK5o~sU;YLsGHBawc1AAUIyMjQPJF4Sw^~dj*O}N|^Jg_Yi zyXt_;kl4}6ldW}1`*LQ}>kL{6kgg*fG0xWEHxBob+AI98OH-bP*0%C?z_ndq!x=#@ z*+O@oY{lvlda`iF%Anl`orz<2^12AWB4BUcpt@3NUA*Acp1u?-DY8?mY{ zzcThIL92sZaPZB3jXn6CKe_~ipM21%CEBQ@kG;Wf9ZMr%GEf$OqJ6D_7atn05G_4* z3>Eb85q2cu<8_yy23qDE=6ZEBy|8;|N+VnWS|Rv}9F0Bu8STQ)fgIa_2j-U4%@Ajr z!%qfW5y?x3z4qPbwLP(m!8u2ciJsP%{iX>B{Xp(8mweTVHWRBxZZ4+!SRowbaSPQy z@Q22XB{(1w2X)1Nh|1mdbXPBs=YzV{6zT!aDWca6ZWOJRu74~C@Yq9pV{8$=E~10_ zn?B@ck39I*pl|bu@&3wTgw8&}zW{T?m64aYi69*b>F{8m#q~v<)bk4sZTeW+i4AwL z0>bS9pOsMe8_~KN_HotQ+zJpX&v7|myw3*kadOWM{;uDD+Hcs;4?dwEjpuKU*3bUn z+h;WNv0rnfH$fYjSRb*_=NElHR<X=p6Q;7>Jq z`bvCJhygGMrRUbjaoA#?XMs86)34_Aatc`*U|+uN$KUhuzSrSTJnc{Ucd$jePH+*g zGAK4AEoD=-STK}#u%HvydolDmMUS$+_A(v4tQ#O17VlGrixnQ0{T z%5!UAzhJT>9)Zah_5x-Gp;@j-S%-Qa>hpI@_=j8iQ^y~Upl@Fxa!&?)hlp6kVDAr{ zLnWzeJ0p4p5bHsXrY&d};PobOJCX7rh+e0Ez&ZQym!{w$mR32mEjD9to#^;K@RjmF zNG|sd8Qa_#czbZa1(D_@8P)|%h;b(Z*9g8`k1Av21;XD8up-I-fIa7vJXM(aZ^F9> z93_PQq-Yvhi+FX9#C7+S9;U~sm!ay@YFV+E|gPSEL9GA&| zdfh@Zrfmlw%GORc7MJ+$vt-{v*f6(_X^9%f0h=W5n z+VzISndQfO7Xv5Gvj%KD5^N!aGtOOSpIj=fl4nBWg5 z=S^TaFs>m&a|Tn=zx`XD={V9cb=aa$=(LBJ!$b9LpMk6OA@l>ch7=&r^z;2TFr-mU z*aI0iW}rRn849#E6o;c#hq2@ofpz4Oz%jg1pj)Ik^uS?2`~k@5b(aw~)h+aYl8OED zqf0Z)*TY|GDpK_}omJx7sCCO_?&u?qO z%MIrzY(FoCKH%vBnIBoYup0b-ocK%w4&537$z?z^7P-}kzXIa~K2LBM416m~q)yPW zf&B5B=+l$jSd#bi6AMKC5b+5_;Mn`jBY)1zV!$7}AaTDIV+HjOBa+1XkWB>I$EC;o zLyneD{4y>4Z^%~y_Y&b6z}{-GPx&$hXAJUmNV2w;*awa1Tw_0{llH-X4it8&L-NPI z;$FjijdAb&_%&?!qket=DOV>hus{CtXG}yA!-~L1IF#);#N41AaFSctF~o0jk2(0= z3Ew%-UcfnmL&e-+__sPnKDBsaj)`Io(=qU1OtD_U1cP4$ zwxpq@L{pvU0uXTrM}uwiJ=dZ?gPiqDc%rM8v?tp2E8hrP!7pEzjCkJ1=bd^}-9uW6r*DQ4A|MfLGg=c_w09-~S zCP282TP+_jye;@zs44Z)zyn7V{M5pQy;Cp5X$gKhCBXwrgbuYxU7!)&f{>8tkc1Iz z6Jnougu|Qcm7$4$hHIXe(F{1^zj4h6U%)aXUOOeu+1tlI`^A~`oqLS`KlAvI%S!YN zkn2GHTZ6X2=NLZo&+)%F_x%~~LUOu>xbXjX&s%kf*8vNS_#&CMMl1stIp~_PM-*ak zuzhr%2;bn>-@p3*&(3}R3*8HC3a(w`_x<5{5!TZGY0ndXv=04+VlQMYVh=49bV2gM z0h-)l|L`yG^Yj0R918Gd0f^|S)B|%4a!Vi!;JaRpT=H1a!SwQX?}g9>IG&;XH1OMP zsWHKIgWFF>f5W)^u^T`6XG1>4_g-X((G525H*TQAd z_%k53Px$u)@GcB{(eHfPk0#f!#(wsp|G*RT%PtvW35fI6@C#tCUlqPML!25DQ2cV) z#hFj)G!l?MN_+vp9qQwK=hC(T%qI9>YRto$D2wiY9l@i`T5c<434Yo4U+=jJKxx_r-u*kvo?I`t%qK{w|J@J zDl18kVmrCq0P5w(@{@2m3V92AmiBb*20L*SR`$6dd}M!!c@Te_cLb$7au>DDM1mWY zXy<%?VjfHT1{?juPl2PhP3L^B3{1 zq)Ru+(gk{E6gcgM_#{a`rW|m9eCld{$e;f4Qvn$InJc%w1m`m%}{q@uv}9 zMfhvKWAwlbfp6&3&xifex1X8lQ;_lbyH5G|-|`nxNRQ?PU_OSmAKcW~D}w$Jm^j!4 zh`GenMtv)ABx=0gV~?|?dp%{;x0R#GsI`!I2J9{0zW{ zZWly@=f@_;yf$#K7c}aYP_ZqiL$2Wm3-*m;Aje5)gFg!K9Ad{}9V7V;mhfU)_|Xt6 zlc2c({R!Bn)CuuUD9?cJ0e&tLFVfGXzadis>=eN-KRCGeNZtc^M;_^aEwH}_pE&j= zL%bH;!4&G4hgR+h{_zE4YZs#R(ht~(LE@N!-wFFI_?>_^#eNfdxrmRF`unBT{XO=- z){KG63q1CpTxal-fdBd9144NK#R8|^her+^a46}pXFu#i##n{$nu_WyvP2zPdI|FG zNq&HI<=wp5D&X$Tb2CSO41#drOB+_Bp4{g+mRRaV_?l)}zElg8qDsKEf6%=%avF8x* z2UXKDuNIH?2_C%DL21t=yfy#nyIqF&(PW8v6mU-9{hl~R{Xy#fkL?J~*#_9;0gw7G z-seYOf#AjPZiuN3`Rhby6c_`dBMF}|(Qc1D!j(H<%;bOej!|cVM0&s5!V9cG7x2Zd z!Tedm5j@^QYYBgUY1CY-rN_~voM97(t_YUeB~U3r84-%&x&RlEF8Hc%1M@X?wrksS zz|NB3d3vfW)5CW6g8tMvfa;g=-a%Ca-$e`1Aj5qHs)i2_U`E}Ee>gu8=w0=}@qh9D ze&>dN^ICk_q1b!=!tkIyKU|T4_{CHF6=(lveOYag7j}|=CkAwLENNev2+Q5Z(fpz9 zD|NGUkzVr8-_t86i_lGG=TpN7z%kOd{p02B#{D~%pa<}$_i9Oy>u0n= zm=`~}^S|*!mc=9zG9R;A+>Q1b6l1*v5qnVK!A}nyNTl-T+6Hfhs`82|GRK3o0=cj? zaaMGx5S)a6AYF|f+(@vwX1<<=4ARg`Y)ot}*MoD&ziaiZL52v75!*>jt+9*~E;f0$ z+8p5Yce6sLo^6DV#lxqKSWFA9wvWI3D-RrY7{UP}-VJM|1YZc5Z!zylPC52{Rm$LKdh+(K4!qLV0~FW1|JvhDM8z_#lAwi6M&iEK=&3(j(BI> zbHGi1O)208j23tm0Ud}(+-F#?$exB^aX!8_EweLutI$2EKdZU#3-x?6hK-Pz2v#t2fm75MRc8^|3j>_Ag&^6|pn- zz{$dnKKAh8NK>M7*P5^)z+-d<KA|@ppp{h+v(+3WR)&?~dxF@%N z9CEr5_mzti!fuJ~g^KP<2*JrmLzIzQ1{{_Ror8w(sGO`076O2UkXJx1I4H&&%=gPw zs+=5qZD6=S*YyN1&V%R<5ZD>YeW0jDJlHDUANeN0a1ovuU_yH*1yJ21zwkpqmuzgEb(A^1JwuNZV< zfz?ldyRXEA!wpEmQ@;#FP{<-?y@XF^x1c5X7os5tyWywc4nwno zB*lU{IN{8pT2%=a!es_F5|W1k+N2kiF0B*fPk#8mmhvwhr*FTp_$NLua9#a>)BXT~ z_1}ES!SDQD(2)3p?^ppp-yb-=prrWc{zUl6gc113kbn5AysJN#_s{%esr1jw`v(rP z<>B;O`Q6dK&q;>$2=bmG0oT3(WyI{bM0U*&&iS`;{Dq$$JobMmf1~|v{xtC2|Do5&(C^9FnwQZ5y_kU_|3Byz!t+iKSBA1a=w9ok#EHw2?rv| zhkJq-gz$SVeR4n8!?+)r?}zn**fU?bqh$PA&gzAG09Sq70aF6`L)cXYa5T6VFfNE) zVcv)T1lwSi)Z4%-H6wXdw%0n16T4d-nfs?LqZ)wPkc!m2A#^+Ezh165YZw3 zS+IMBdTmD>6IhW=16weE7FHANFTCXb^- z|NKWiqG1gI&x|ExQ}Bb_oeglyz|}}==UiqPTm)NV6DFq_nHxj+@x(EI@R0#cau5gy z;l^=%6_`(T7kC?R@_gY)t)=e?5MKs$`f^KbY_f+Ro=<#izsKo+iJuPn**~$(H}ZqN z;`=OYM?IabulK@;<{H27^7k96-ln7e^eT8r@+tTP1&u#rp zL%Zxs$L(%)7D${Sho(zMf7?q`+w!4pUWx7&>hOybsTH7;@56WT!4}wrSMLWKI!5_9 za7axE$HrG)0=^%AcoeYL8TvHtvwwa0VC=mei?SR)z*F??m%(?yZ4*x8xInD+xFF+X zn*#XN)db%ODqsnFR9av5Hk~c8q7kYlRr%0FORyfAC9}HBkry;a zE+}%RY{XEoZ!Uq$L<|M%*L`$r;e#u;3)n8iULorxKOyTSoZCd(1NO@AYvB)O<161D zb?K4y6UuwmMCWVF4Fl|7qfdml#x}aeuk~;VW$NG7J=jH`we3v&1<0)t5I81WFT#jB zg&D0QdyDUX27QqwNA@@vpHY51=KGl2XN`#8G2=C>h+g&4+i=ONCGT<@?IkUSxPESMU#Y6u-3 z;-4V-6vY>h#IOCw7uF8ZzZ3?bB;)#zO`t-?4!P-n2&v`ZMIvjzftbl{DT9j$T)9<< zd@$sAfhU>pLQ#3l=N97sh_QX*ciff4?~q?}{^Ei7o(ukRFZ9Db8%w{m;=VNmzj?&J zF;TJH*xA_IsC^0Em_WasTk{>?RXGa5ez!qwOf?FHi7!nCd;@v_7AK!S+D% zHl=zO#~r3xGJdBFQL+r5?Z+;G7i zM=5^+!sIHKyqnQ~ zdgp=v9>wZ8B)7+f3xD}BNi9&O5{)^{xk9J)? zPGp?~be&)TNF6fb?>U@57_MKsu~>_~aDT*S{gp=u{X*oef~yR?FU$QJ`|$ZVitPih z9tz~0>ViY3i4O;WGx5*hRybavn|~$aSAQ0r2CaM>?+!_fb`!&C;EIQeMtuB#QIVJg zxLTCdT|K*O6kIph6ztapVSQd;V|ki;u(DtPM?kp0(Y&ck4EyB~EO!Kl40HP%`|?XO zY4B|fzN=q(bU*mU?{_Bttlz)?_KgjF0lF0*AIUIwez?WQ_#RMS2ZDmZ6B{jIBQUOv z3?|Isn7fgSl1dtM?XB&bR-i3-@zAEofB%^xpB(NBtbeISqrI*c=H%_xYV8&rSFLCA z2n&M40r5kwGSlF?1F+XAp$*lOJsshNYkkdb-?mfho8qJ+H=+*X24bJ}i-&)E&=5?u zwTzxg62%Zuyh7*Mgzi3k>4Z1qWblp*DmcuKB>q3DA&N!pz|+9<3v3f?${$|DfA8FT zGNwd=k;Xdm!S)e6JHe9VE%x#`kj5*_6Noc4z}mrPBjsqeK?t54gpix7g0|vctidM_ z=*|a$5|@Xp1x|WjgBM)`2eESjj#M-1b>@y6IB^h)yKu=8az+5He(=%!U`K!Px|6?y zf8vAvbv5LCleHHd+N7`2MAsE-e`5z5aBgPKO?zbvS-AQ zekI(igb%I5d_(k>h$gCSZ8sgAZpIFx>*&`|UUxv7io9!_M^6xz8{}6L9aC`DN%V%? zBaUk#l}@y6QGe)ffd;;Be~VyQct0c>*uWZT;kMLbagtQvMnL>B!~O^LkA4bVj!1IGzc`Fyjqoy)dL)`t@TWw9Uo?=52BwLPrg$## zLGcn|H=`%}fEFA0U(^Wvyb*j_AaB=Hmdq!ZA7P96L!JPMqa&saf2`COcPQas`Do;< zM%}@ORt>h6;)5lahT7rf*PMOQsuY7uJ4KES^pL>$`?;?%1@=cSmJn+l@D6{rslV`% zh{Y41+9y_j2AKJ?R@wM%=LDt$SI%GT0ibYs7B-S-?()`(!$P0J&00Yo#v8+khE6ov zd868p7qshq3^y};e@NJu5AO@-F+5bzG5g?t1`NYs%YQI~q+f$)?I-t@#Hxor{k3lX z)qVhg7KG>Em*1vzNgVm``98PAX9X@|t!CfKeDX$Xk7Lqov&wCrHjwjQ;j zTl|)gh}w%&=j(C%SN8u8FOR&i36J5xYxb>Q{5QFVqJsBGf6V+w{S)pxqQOi^y}-XS zLkcQGoKoU;+I%?hFrT`6?S!=u+?ibu8129x0$&ukkj1y@Q_eM-S?M zoQv=`uFkn9e~-LVr2yv&0u$JTmL``WDB=n!OmJV!Wdmp71b&P5?{Ym!z#jh1cm3OZ zpJaT1pLhw4b@2QUzBTw+zkS}{`?{a}{2}janBPBnDZjKvzqo*0_~Cx%t!cp~1_oGY ziir?;=7bEkCU`CQ)-kfah8-CwLJ1C!807=c0!E|(e}a~8tT&GwS08*Uh6Rmk9-1L{ zY@v`JgmL}v@z)Ty*f-zFH$M-bGT;0>@LB(Xi>22xyvrvR0`KY%?&%jF(f1zfSKoaP z94Es`_KCEJ692Md%x;1o&TM>BiDS!N9j74 zBVh+%Oj(C`j9h;bb7(smnBwpGSdqwlj860we}^3GntyvvA^H+T9|qU2ZRFb!oP@Xg zTO4VlJ8}jSxDIUP3Ot>+@tVKMkH*8V2i}~>i1+Tu=YXzQvjbKT{^(_~?+Q4OVB_)j&S>G^ za=~wh^Dqj*3kz$m@Zb~-gaR5rWPThl!DMa(7Z`Af=p(j^oC4%320Z2~9~vI8+ZfoL zM9+fgQe&_3VpH%*6P?2d=g~)yPeHWRe{A4MzHvanQ33}990{q(Ka7n5&otE6ue~^l zU4HiSKU`68R1@D7nR^NL6Z?$;GuduE>u=ycFN+$u9*4wxj}IH%_x2Bd@}J;AVZ;Cr zn%#h#A?y9%j{v#E_xs!ieybsG`=f_}c-yb};9p}yL5TmI6JVrR%uQZE{a23|e|ESA zej|{XJgGf+F|{gAz!8t}UPG_%yyLtF1h$Wh$Im-8;2Lh=e}PXhZ{`OWNHWbadjY{N z=&7wIOe%u+5d7i`6H2xu$C8SfZ~PjT+&5*|`smAVO7aK3eC8ic)S*8kkJiT;%(vAD zL}K7`fsfEu*d=6q(b$+z_Z;?cf8Z;NfiqeWF8@y)6#ZfeX<{rWPmk z=H-Bu%U~QkQ5e|#?$I&EQqOD~9@t#j_6`s+0wBigk}a46;@yqkkLA4eOa6G3XUje1 zB)p;{I>{HB62*s|bMuuKcl4d-OcjFDKItdwhCUyYX{+XKR3_{Q`VN;9Kg!fOG9U!e z(l&J>10exmMe^#^J2H{de^+YFG&acn)D3i-E;jBy)k~?YgrlNAPvru3p-wNWqZ=7I zXH|-x4v&}(< zc3vn}en_3RP}lkSf9f!udZ**HD_=nun7i)Y(eu3Q+|xp5rTXIL3*&&Qa@!sDB~Q)e z9_HL@A(9bVPTuufBeZ5YJ>L5As>}h2x`gX(KF_zkygAYo)Q|}os91aU*f9&a@}NA! zjd*PWTkc^} zV{eV~2r{-c&SO6;G1zwl3ILsyZH#!r=l2!4pVyM(fxqQOj#UzG`0aQ_&I`@wc=-Kr zKcr&9j}tlo0wCz=I&KOpHlfHAKt? zE@AA|c-SfAxFWj6>mB$sKIez~8CU$CoM#9U16m}&!IAvMWlr|_%HU5R*a*x!IFG#R|z;j9}mAp?1jb`xLf`3HNf50<_V}PVptOwv5Bj?ZLykg^h zacsRnX^fyKa%;&kwj3zmaGu0xTJn33oY!CyV6x=X7)NLiDMopDaEf78CiQ`1^pxgv zJUkD-2NfOKZ%WE%ll#fmazy$A=$Cy`KHvt>o|omdL4Dx74xa$2kCNoQ;=J0Degx!L z4Hj>-f5)eNJ@Pyq`mtZfCsH3uya6u;ICNvY-`1ph@;o36asQq?A6p%I2}vUy5ADAn z`VWWpfXDW`zo<2IcJTh_f6y-__a{7GP%;Cqf%tRrzG$BbDbK0}u1&;VhUA^1Jm#nV zMIbqT@81Lh1xgBHs-a(f+6N+=gT035|LXt9f1pHv)Cg}2;cvnHT~spS?d=k47RHr5 z@Gp_)Pr)gMrU8B!G>tf}KlQbN9Ye+k;`+G%P@lX+3Nh5bWaB(fe~xi}xc=*ySFq-{ z4tRe~|9BqnhhyfqW77XFpZfz>&JXhdm^myo75W3LD3td``p@uu0Z?8Wy&LtnBfK0k ze`#Mh$%g*_GhUgYe+{!qUw*m=-V!FhBs+;7$5ei$#nh)qAoNlgFA2^M^f9Egw#0GrAa6Gg>Ok(Jsp+DTg+k<@}`UfOP zaV*E5@(%SAlKMt61dbycQAX4!Y@HcgXypEf&-?vFeee)JCi`iOXV4T6^DC%#pX3$B&86WMZ{xHq~ULj}@F(JH?e{!wb z6a&gZ=*1KN#h?EX=Z>&Ev_v^)&;(0kgK#I{J_dN@$shv4TqOAvfA}R?Qr4WkT)#=4 z&iC!?FyA4_i_<`4FME*!3Gx9cGsu=Ezgb|viND7h`p+K)vd=mWp)njO16my2C#o!JVG*e3~XxeHK^!ifR`h}J@=?04BT5hF|e+%vQoKB%& zo@k|)9(tYCYAa28W3640dg2B4OIQeXf4eT0^Ek$=0=)*X)&pcjR3)I7-D|5;A`*@@$4V4II)PU% z$pxj<4teq-YDA|tp`k#9*j5U27&#^lP-farOXyS+hz_JjV*MNx3{$(;Wi5}>t*hHw zDW##H-?XYu;}oIZnn`olNVQv>c7myCPbTGDOGBMzjo`oaV_v-6f4!(jTp0-ALm$Nz zn_k8`)!sJ!4cW0Gg?||}GO6}5qA2!Fwy+b5HL2z@%Bk6w%3dRlnr4un;bJPAF7DLi zNgd2ZeR($&1h{9^yP(!vj(NQk3XD?eT2L63jiV7iW7JCwPjNlvwwLTYg2dwMk}O8j zbn?DP4^El3+lX7Rf8F$XiDs+$e0r~<=W0Hk-uKacF@H{P@94IeU#GV$x-RDX>2(|B zkGV6gsVH5{^=VZ_=f&(jE%(uWF*{F-OSC)BR@3|ySu5r>jqj0gp1!B&>DfC!>9WIZb-PU5OQ?x!W*mDPABcs z?ZJ@EmnIO7CQuU4TYfY!el!K1rq13$wp45oVJB+7_FWQWNWeEPpp3w$U~9m@(`;$v zGrdYekezvIe+rxv+$R%WXau=XN%kpHu#kte!>o0}^?=3U9tmPuG${kT(je~%cV&;f z(cyO}iv&eI(O?k;9LJgFT1mr$*1Z3|Ha}``zBk0T0#9AAdoO=9EsK*WFUhw?&| z8NU-hzk^Cxppkm6uL;f>Af$=O_lQiA`xyb>AYu}dd$g-^c?{$t#J;&*rhY3*Q1>L1 zWN1&c9e#`ZE;~`7pRHO9zmMDr_Uua@*LB#1LT^veL*I82v>k7Zix71UrmLhNo6?P~kVE<~Lc+C-MV%OIj%{@Fbnvd>0YhWq@~5iG^t%L31l$h)Jw>tXz(9UJ85 zVS_D`a)3W4@!dMX`HgA9TFHHo56TjbZM+|zk4jxGkbzD5OpMQ5W)~&Y6j#R34{=Wk z_ayyCLOG#Kn1*QApoi!Q`e2%_Fz%p&e<}zDL1jwrO3->-n|9_7L@ywBk$l%}Mc>OJ z5(Vu$)7vE`+Wm9RX;3~OjV7P-vd}~QRTp==6xkSo z%A1(OaE_OU{$LdzA(pYxS1`xmypg7Yp-qTV0P<&m)eYQV$E5#~HYM{Q?jh*@f9HGj z|BX#Yyui zrNa4q=b=4ajn-lZ?MB9wNZPdonp+X31aeKVj2Jn9AsR!y%A{RU7mWjAOI9$Au1QIo z$SJ8W?~eA|qkVb!&hZ?K6I`8($RU+s@|C6FGTkMB$z@6nDL8MlP6nXvFHU*i&pVt4EWP#${XxEtme zl-qPMzW^N+l%!7AtzPozFPO&#WRmQ`DTet0&xG&;#&Q3In?#q{?mRYtdceAo~-4Xgq(kK zxwa>3GA2`e##l~)^x?yyttMb%CmPPAeMIc5fBK-b#$1kil>A1NZs-%d?Ckt(DBfBhb0&iiNY`8mG?;p^w0e4HC@a=O^wod1PM|y@X)8pu7_`l7m_zxX00U<1 zV0@FYi#bWhT90+~MCR*&*ivL2#kn4SMIP}l(ynu|p2&CfZnB07WK9H53Hi-YvXH(E zT$^*QGn9;m;y~J4df50!{2f^A6{QV`Ki~ z`9EK$9X&e_E2U-TD;_8!(NOxe{%k8wpi=Q{3=;ZBi8pnUCTpk z<4@PxxVE{cK&x&(f+Ws^Cg0FTzqn%pVqF?<4_M+XRe_>8R z8!*3~A7HJhi9Pb$J^shWFy@{Ae5{c1(ZiH2i5>1BkIBY(*olEZ*pL{*(s4MJiT(K3 z;|K48e>x3^^#%PEV;0_c3F8Gelz}p@1v&inez4Q+<=p7!9VRi4)R#+aIMg}z52P<* zFM+=IPCw^K+50(|F3E3mGG2C?gfT?y8)9dJ2VcVZRa)bG{V&cFI|k+8*O=GIaf`Oj zG1u~|VcyI=jCIV@HSFl^*Yi=nZQ8@`$NeOlfAmM##e||iAfI6qcJLWC;_%rW zK7YR7ey%@}&)(}M^@Sb~f9x@Oyg#;*4PqDYyOZl+ zpOM_5uj_hp9U5l14(~<$oZfI9>@-}rl!xo~DXv49`g0xEppKRfvEdZh6sWJyXH*oH zf7j1v*gn7ne?DUz;d+;}g@SQ{>p!1i|KR%1XY^lO|M`rzCjL!n7{~SSUN$e0avWSz z4m^9f4(&!l*q`g}pZAxC>yDrI2c;Fcj{dyAH(dApy#LpA#4Z!t`)~6`OYFA|6sc|U zTNUqQFFAwd(I-JXfHuYW8h+DA8oB;Wf9&C3*W>+c>F4_MXG}vY>gW2&aDTV?HFu~( z-(KK1OY)m&=+`u^m&o!JKNH)Yf1UHKi6_YS$?j?d)v#=?_=gd?0AvE z^~9Fsso~x!t|#pTlKbI$mKVsiF|p~l-`5JlaPOaM(Wc0Y`@S|A%FO*%kAghhf4l#x z7mdtMOX|B`lA+#Viw<>yC4=1i=UVhvTl%>+C1u|JT#NlOS&_)T6ShI}L@K5xI@s%D z-A(*mAz(-;sJZVJFniDxy2I;18)I;8 zZ*Xtp`xyBUJHmTJjs?sxz`qeke>-l`z(&QqjM<#PfXO|C{)T*S^he3yxqB>x?J?u1?ek%CV? z{O1^a!jJ+a0&kX=MvJ|-=7O|e$1WZdc-M? zB(95e6g(#(WDcRtKbC4v&L!gxL?bRo6z zB+rLF6UwEL^C&l-2Yn&(T$F46_1ypDzTlUW@jFrvs)wX>e2@G5yVvhxQlDTHZ22n* ztq<=96d?&okvu%qBjVe*ACTnacjqO_3-T${_fY>erM8-_&vTy1`ugST!(UPNxLb??;+)f2NBPM$c2Zke>?CK*k0op^~sQSBT-4j zr^$QdxG%Z>S3muXGmi8F^oP@@ALBmw{z}Fp{$e`MKjRPg8_M-55Bfa=aKxvaLw`c~ zhw`93T?n!bS+zBgzeJbBxax?S&;9fE6xN3rR{uX)c6da^Klm*4YO)vrf6s`e2ohst-5}A}mUMLjwLs1k zCV!w^fM>Kwd>dT^Qwz~qM${cmiKzs*9UFKPo+Bjy+cVl3ksiYH5B_GnCmI7(yo6sL z{rExN1NDUZ!V*K~3yde^>ky7%qLT;w0dNp4NRPk^hw({~ViLIvQV1yTqP|X~Fpyra z$+&}Qe?^miz9EGJ(%U8d3WeM)WrBw%g~d`7^T3h7&j;1^E7Cn1MT1{Wv6CxI7n2i3KfG_pr3d$-&L;KCu5p7TEVdr0N)Z8=&(3lWWw zEPag+TGALEyv1@w8s%WiH{wY_<2+z50QXB82`RBL_(IWGXIq{u_b1Y5K%HwtJOG_z zZKd$&FKXL9ZCq^_<W`sWdun+tPeco$dzn40zeN02Y zD3HlXLjm#)7=#;wwe>J{#t`gHj>@h*EQ5S{ z+6Ii!-dEdXNCE!-y{Ah*=igs#n<2aOyH3DbF9JdTDW~4x{mS$AdrX}4sO}KI`gI=c zaMeMzt*`RuLD}W_`~3YLLAjt0-|Yl|tgy%P_aUj%mgfRxd4&Fq7Sz=^Kz$w*nf`hg)XR6D(7M(4yJXS?UqNx8-q3F{0BKnwe{sP z`R~U=6U~|bfAvfwpMdyWKPDkI{X|j=dm+df140ZC)_`aH&y7iB0LK6<9^w+or?eGswIf?vGBb+{`1p`^Q?TPj$tz;8Rs>e0 z3kgt>9`bPAMcQ=K_BKPJXDnYD){66qA|0$%nCnUae?j0Af4-FbouyJ&496S0_x7_c zTT<(?os?Z<%&W1rO*VIb**{Wq-`!s4y|8#47A&vHW8u zEg&^E+q_@ZlzlFlMFL)d_Y1B#4ZO$&kgU$62d`_F1-c(G)^=}K_oTR2^5kh2uSJ*1 z)cwNxf7vZR`;+NZogNq4=k&FhQl{?}Z%VzsM`Q2iExgqm^b%R3s97%W<|SRumUL3O zkCSywGwOT{p5A6%Kb>=!9c6kpSLRP8dhZu^VXu{ryQ8W3^F9TUiqJiU-D=iFXRvy` zj3yq7-u$rj-P@g|?`KtQ;v+Q=&8ni{zin;GO6|#5gBg$9E{$zk-kXk}$9lS0J&+SrrL?{A>fP*DPnbrJ5f^U>ni!*M%1VGaH9 zZp->sbh&@qFHh#-cz*i}lesE%!04j6PDdC1e!tH1Xb!>mZ8$H{6)9tV$!Lad4t|!p zh^wFNSDgem!_fuiu3EPkXoN{yOSv+sM&~9NAI152e)YN8o`kJ!A??_m7U@oRe-UO! zF$_4p-es=VlJjc&A;WpC3U-Pf-f9nzNH$nN-k6KM0`OFGY( zLTF5(74`JT(UaLx)#5egs9E$3f0Ijd8m%iMQy0^%e0Sbz2AX&C{9LeYo^Q4ivx@S= zPWKk?1+@qARWVjdYufT|vCU5@L?^O#+VN3=CjB9bTHHuE1Nx?iqQ;vo6<(a!)~tYP*PnK-Z!kf9kD8v3%DD zk@n*RmtWPTa=%Y_Me$8#jZoD|yFRCS)$es)$xI4Xcz4 z_sy#A_injp*e2OE+mIQxN-B-?&{Ae{X{WK1-TlJLPhAk&v27Qk--|MflqfzPGu=t| zN4xXg^Emd`D&d!nd+F%>-*7;*~mc%6p{4eK%FMnfZy-dx3%}!KOy`#o|oGUM%$UIfi+4O=iJpe7k42^pZuke}jZ*o8c7JJgn3U zcEbmjHAPJwzxJo}@Lp`*QrK`)}gvEf7b~gp67edMBuh_A{wl!?>1HM4tvWP<1%CD%+FW1N9`_mnLod} zlp$rI@HlAWbL7vGb=vAvp?bxQ({(I%!^v)PFhtsXyZ3JEwl#OGoi^P+EBU|#NM}`? zT63PE^=R`V2P0W9wV$#T-LD>Hs;8%}ZC~aQ${o{Pk=jA0?_OE;Ivd5m681q@|?_*PA{G&Hg?U{XO zio&pb!K{-F=**0A%)8gotHh3&-@T#Txi9I%Wx3fD%2T~VO0jxgq2#pc58Q%|ZO#t1 z55b5;pM9!gf9F6BQ_d^a*76^JrR6^d+-O z&&S=%6>3488{H$8rfdk>fZGMH{GN@d%>x7r3H#W@)p4|GT(ul!AbUA=HJ>XK@#g}-4;$$-=3Ma&d%5#WMR5}-q$nuf6-#L6jwDg`HVIid!k#(TV|5( zof--1EN&7%OEhE7^%#oM-oB`@V7KNY-Pyn`G5`z1(}xk<{l;ezR*H zW9jWJ+(3w?@AZfk4kxSH`B|Iw?0q)t(vIjVh8Uv>7dPE8Gv4azcAAS8;LD106U#N= zEJ0^IX8m;n>viSTeJ`#QS5aeTYR-1zvGfkKf1&9>FRbk2HM^#u^pTw^R95t?P7Lc= zcWZYv0=M9LeFzR=;$Fs;65XZi>9qDAP>3od3wDk@AEzn*gusj|3-?_RuuR54R1bCQ zy(8-FLh0YK_+i zf61is{lyyBxmw(k8I#lNIQNq^Ujz2Lx<9=6xS1jGx^CmXuGf;H9yM?JKyKqJ--D!* z6Q`A?9g5TYY6K%N1Rl@Xd!isC;jRe5`?(Cc^J9FN)!nLOy4_Q-=c;?Z)z-$`MjHzv zs(B^qi~6){+zV^U>6M;y0XIu#*naO`e>!j>=&^I!xEWldzwT1_?vL}hMTVtP-g>@q z*3Gn*PE46~cE{)5QLs(#3_(`dxHtc{ekz^{E~X`|jOpXiupuj$V+SF6FU0VQR_S-E zsc?+X&1#CA=5Q33ti?JvqY+bOE|%e;n2KZZW}d`UoQrkGMYRK=V(iO1w-c=Je__7` zFtxXpyphb;=onsm{-p)1_`Wp8bmoYob@W~=oDn}}l84eHH&A&(R9eWyd6J(_hdz;Z z;VEtXdAl$l2@;GCe8FFi#kqeJPI$mRvkBj0@FU;k9e;G^tvN3rBJ7f+x;OQE>_Z-6 z>nGXLr*9`^KB*!*I&C;AcCQ<&e_ZBMWosMzJt!5_MjB@?;p7Is>D(5RC{k@LeQBJ1 zxJMz+d81@^1Zk{uefBEE_|J400XRA5s&z#N%LIJxR_%;S@vu{#!C0$ADmsW0Up02} z0v=GEp4Zvz21%dUMgn|6;I0R%bmYzc)wRkJN(U{Mr(xkueE%)D$0pUAe|ifbcbH$R z^U-^pop2Mi+k@T-&;HOlSNC{(zEIfm5e8~mZfCmd>ZQB|8zLA3ydCy6`;`m?rQTLX zT*zg-I?oSbsd7HF$@<;-_8gn%b{_Yl5f35a;nXG1%xiZpO?w9Z5W-+U1)2zfi48wgeg~tR+-_UAY zjkkwYC@4CV&U@s%N63f2tPA^5U;@z>V`qJVBw z*qW1Dc_=sZ=yp)czGqcEkCXaj%+)85KF2M%@%YX)gy6un5ASJ&0`v^@t=ms^E_JEJ zaTL$tpjF!Wk};4swK$%@*5mG|$kKw%(S0OL%UqGQzoC>=h>qSG_yXl%S^L>dedKF- zFG9b5dGDrDmlg4(e_9BKzBRV3BDcl0ME=W`zFjVGZ8_%EtF0vR)>S4xF1b)|PYBH* zfn*F5>9N_0LZ>e7H$j+l?qSYfxBkjZXtxWYYu!B$hmpJ7CWfP^KpO1h@Fd1b*t<1A z6uDMAYF*`%>+Kj$n_Esfro6f<+<7&dmA3?<%%@#HcBbLZe|u8S)B+{rELQ4Te%(_G zyH7~0JmW1krp-E+)9R4UjoqrO6@Rr}cUr8z=9@`6Jue)pFWvUADSV@zFOOB6xv6fh zz#O>D(2>*k+In3K@XTEy9jyq-Gag?ir4O*aDYyRURPR@Gx7N`?cf#`AQSqj%EAU%C z_xk;)tjCgOe}XPE63dg$HtG&)wiuS7?t zT8l^aJfqU+c{%J8_UXRrYJ00>$(z1)%9y0t<&dmTNBKUsB=j+vmQljMXR+@tZUCXG#G*pT{GU@yVZBq3H@OaD7p^!GUrVa zoD{+Ge|1}yPn(}Jdn@Sbrkb=3CyMGLL?Jiwb3!3@wE*#t2m-ak)dKjAs=?a|FZZi5 zcQW&&4dG&70uoQ*Sd>UhplKKJ~Qn$w*TlMo)1d#ms;gIy`q3NJy!L ze?(8YeY-YFLEf@`RNu;@@sQn_dSB#1^fIYs!tVk=@*XCacw4jE3!C;XxR(uwY4U<} zcWQJT@ftO=K&?No_6-~|5C}7FLjO3LB|oCI!dy_-`AmPzu(ylN&B>5Q$vu)r*v__k zv$~4%S{=KG>S*2OoCbqa&`0oHX=c3-f2j$(2I&bZc=q0xs|T_z1#E-Zv@%L7O02Wa+-J}W?^@N2HI8% z={yfn8?~0MoDrpBBVxg}Ox%@| z=~0TL=rZ@t`*nL~`lhd7Jy4)?+N{Q-EON37qg6uvSk%*3blt{L&++A&yJ8co@5AFJ zPNbXodQT zsV1$_H>NL_TdAqB*dMR2lUh%%e`iUyk-g(} zz3LN3RfXMH;?7_xoUPV%;ZFDE$dsy+TXV+&dt?e`iw}8{lvhF!f z3}Te^&~E}J`qAvfY&^^6r)^%%>HK=#aSctGGdjDc@BERcoR(eIV+i+mx6ntZRtUxC z^@s#?t-p($wYX)EEZ9|N=bhcSWsxSXuvrK<(~;N0<&e_RTG8Rke`!|^+GIZ&&kiTb zTVdq_u2R_RNUv{Y?~r61zkYXBu;`!e0?I$(7^(b4NLSd&ckko7hKv;)^4e$8!Nr=2kE-x@<^jlO}Fq=j1#_raITD-(` z;o6G&PBZ&D10&g?f2%OmJl&nKvz5t2$Ks*x?J^GFMLMsy^mtUboWRev5XBG{!uqvo zb_>P0J9@vRoyVE>PSufYufhjyJpBs&RgBD>3c@m)1$9NE;Y53sW`VvfBtbX=<}g#ukLF(R^?lk zEL8g?Jl=1!JdF*ujDjk^jU5+!k?=hx^Y!{6%iVR}iCaFbHhHiwt?M=@Y_+;fL-Uay zX4u8N;UeDbSLgIPg754I)&XXvVMh{HZhS<{#BVRw>JI)^L=>(4o#suolUvZsclQtx z#~ewHvioRef6C@rZA4`xDbM7&zb_xqm{%V6^=j2q?KQ!Qmao-GF^XX)hGH(m59}S; za5)|;^JvU%lj2lQW?DI(rR?hTj{N3+>y#Pg^7~j#DhoOGH!*}x(I&anFb%EoyB-~p zZLZ+`!^agq>vRTkS3T67&3HR0frJ>JExVWC9#J+Be`+-EZKzl5!7QX5HYe$c*#xh> z1JW|KLXOvNVg_|zy*mC5f@B-y2s`oleg%0F*1~l{k1IyeL1A!j7K}Cnq1oi8t>)J%G%OG?Q2(@4xR z|MU#!`+DUb%&2$>n0$}->vD6Djq_w>oxt3d?y}N&bq4f7M%N|ijwhqz(gnwit-Z|0 zBRV`E;rR%E(Lvwz!K*kwz+kp2wRgE+Y0S%zf7l78=Y4!w-)F^iE$eD=P-1ucFhPI%FeZIUy|cz(Qzud{V>r;FLlN=M#)2na_ys%9)+% ze`aondQphG)s1amfCZnkvGgpU7&xhyrFp7So$jpaXL@_^%6`Y2?L8c zz21nm_>zTeOez|m*jZtp_4$)B(m5--aJ+}DcpRaxxSh;^#9B_V)AROr;&@0D*Iy~SP(R{qPHyzm08X~F8AOnG&6 zI9%7J?8rvA1ikVuQzsodb!yLbfAe~|AK2Ne#*P~D81sdI>}()YYYe42KML~`8wbW%wf(5VW~YmzKp;tXeEZX7>DECl4*9P<{oHP zKUzoSIL_O9Sx2M!9v$O-Slh<>;nz=kErh1RHU;fyW%ATVb&|an%h`Fge-93DCC=(| z^m>otc;0Yy9x|@*7D*-qHvb?V;G8iPeM*w$lS38&v>J_HT*2? z#I8Qg+jrr3@}-ink@xx(8yZf3oXnleMyoNjBp(vPT?5&!%z|pFJ2ah|NxNv~etFlU zn_V{J-Z0vJ&0A09wY3#-f1!;|FRy>A2$n2wd+?4in0TPl7S4A`W1r=eWx`^%8QagZ z;qO)lW13D}puxJ8oKm+S^VSOIsmj^8X=*dBf3MH+RXyg$eVbKv z5J|0FgW`~SXvMNTzN9kAce`u+*gT({tm`K~>2{0Wx$^XnX~fC=oW_8HahQGmX?nPCCsb)svg~0Etq-Ih?yDo-jEF#y5`#mwdcOt zAJp`co`dIHm8ImOe>rKAdHJF#I}4_ zTbi};bhSN&beiStMh6X%|+dadMd#W~EL3w0#QfiHDb%ydnE`J)YZbRxT3LFDL#~ zIh|S2iJlH1zONnpdChivr?02Al_e7@o?l#J<7sbWoKQmI)K5P(oN^`Ii`~kl-}}>@ z6YiLHqOM{?+5nrn14%-+E9cX49!9rxd^z11wRqRle|=>c!Dg+Q@5MBEhIdWPo+JAC zg6*~uR~5(-FPnAozB3nj!5wxMZ=4u+JkKA&gfX|=y4_x`{_2>l&dph~#?LXQregrq ztUN8AtnZ?pRY#t2b-bBx3aMEIVO;NTyB@axZX3-?K+G;I1M7YfzY%Z7PP5J3z$0(wJEx)^Rf5yA3^CG-bE9k$BU&JK44*k}fc-FUda)g1s@PqMLlAJFvxo?baMU(b0=KHT*+Rq(ZtIGj8|&SX2e$xqlNX!9WRL1Rt*hq_uQ)2%Da(wF{59tsGnwk5J{?5| zO5JDUb4yA6lk=4~7cJ)V_5n1yP|UX*On7SJc~tQdp2tg*kLmX~Z8ky0yqwAS27g$} z#2?q)BAP~dG6{_FC5{#$;#2Dq+^=r239|LW=~8PxPlV#_y5lj^h6{b0rTLs@-%4fV ztd?w6Oz=F!@LX;-4&36GZF9WMq|MQow}uSyrB~MMOHSp$b!PW`b@$@4`J4);lsV6@ z%t7^clLdRe(8^|x?yIv5El}=#dViX%PuKm_s9qc4vc5Zupvi+8#>#>oA72R#0Vh+&#mAamo_PZ z$bKGYOb_p0#^@)&3FcRGCL9*?CON5NjIsMTXkm;;c^Ahe_Lq0=mS3eD`8;z{N;ftZEQXi*>KY*ifVlc*)P_^k^^7>~We?67re<&zZ8q1*@iebGfH z^)Y{6R7Yt+c>aEs*PE$F>zNP%`a9Lx%VW<1+0yN`D%s@=P7o9Lq?hLte;7mhZnwKk z_phgyl@+zunc!^pA~`?6UjQ6WPh3q*@*ZePk$ zx6j3hxob0KQf%_wJmRN^$0=$$^`gQ7E*sa{cY5LrKT>SeLOtwR%(n+UK%voLYH0F;Zv^Mocs95QmS1XU&|8Fwts1~T$KA#@t;oM z)@qnq`+z8aA?YAF)-&VA#6lTBiZ3~3n@pI(`1Vem6x|dE_!}EG8=ufp4nD`l81ggHGeGLs*k9Ha!9*zcFqB3 zlQ)mKIsviy28g;%D9{&1-KBnvg?f_};`C^|UiZd=5?Xv8t&jFd2J2$-E|-Q|yM^S! zcQZa6gXslU$WCIy=Z59%_2?vl-+p^!%{tq!gFfFgIp@yz?tXkb8b(g<`_4ho@p)9< zDmEtX3#dZV;D52c^XzU>Jo9UO3paq;rE&3W!X!NIbfY#*p{hsV z{O=P#+0@5i6F6o)V^6SkiV@ z*0CPXtck*l`$I2EL3|l2tTSGoMz<}bD%&v<#=}zy>3An%^ zbK;)YqQ8eHb#eh({MbdVatrpe$BvqC%wn-FX2*HDk1LL+Dt~-k-gKtEIS=>DNGEB( z4gmyY4}ZJc81Z|1D^1T0r`$~rzUA58;`IvPE1_V!)bJOx@(M@e^U3D3_1k)AUIAYW zc+#xR<5ZNiIwvARTx$MvuK$XYBY_gGVJ`dKKH6u zF?*w8=5;r*=W?7&>h%=5)aZ#vEoAF-c?IJK<$rG!;7!54#N~2+?;*;*+j6wVQe87@ zk1t4NTv|^&auo|QA;)#6H~->WTzakQ=p_0RY0Ks|>s_u3K7PV=8YRiIR9++H&dPl> zRAK|ySdGkQg?FB)X%f{VDKrupp!prkpjPQ4O$v(<^e$PWED#EtEv#Gj>~>bbqNWW`68cT(aU%Yp#M&Pdj!Nm)v4hljbOi zF~C4IhK$iM+fDp%Gf`eh^tv-b%weX*0mB*MIQogs2syd6L4 z8<5hY%{X#q+wKkS`5VRLjG{QJ>sDi0fDvK{08;~D}tTd0mS7`Pib&GkH%}U&|ReQ z-$x?{6fOa4kNSCEi&TAIBw4tHUV zW7KAY7KqYP9du$(hPl}BK~rc-tyKf!S`DTKU)$IO45 zUJtogctYlro5H>h*H~IdJ%63@7i-5Ypsp%$biGj3#U?ly`MMGg+7V=AuNQaj)P(VF z*Nqvf+KAgbP=~we+@s0o3Sz*Vd5OcZh$P1xVaTjy0f znMLc1ahtI!wQ<-wjvZ~TUG5%~Jl1RI#YM{N2Ch!UUafC!0Fv9+%`Xq?`kiD4>zIwC z4ol`~LHpxFpaO{$Nq_URMpdf=J4Mvs0nxZv9u?~o+-Pj`C=&Cw+o-$Oy_OcDx8ID< z)ym#L$T^3XtvgzWML&Jcj!X4=94E1U)!W5E$Y$9_i;MC8xb*@GanJ}siu;o;yUir8 zUz*-L@B3>AF!vMQGZ1WJaj!O&b!y9JcB`afVIG;x3_1R48h@+1a0{_dDW8|%4bPnE zw^4LdCTey%j^7I(dlm>T3$Ls6=o@SqI6|k$QVF07;YxM^GR=z+9=W5v;wnaGMtSX0 zfnuEX7S8Eg@doR@o}HV1C%gCi<)}`n`K>p#M|XHV&J|`rZlbttG7S=wz3nK6pE#0n zu$qw?BT~04;D5+kGIb%R%z;gor*6ws{3x;M=OOpj+_8`!_Qidc&lmJi`*gj_`tzh- zbiyeP+LFETKJ>jGn~fH}7lOLI*y3z9p*SZyZp-V)kSHv{>>^`IwG(_v>8F-=RB|6_ zxobw=ohKmE@?icftIRuD{^Ygh11exojPqzXZm&;@YJXYeM_CCug(#SPRLdBkAYo9_ykctVCH%1LT{+o8KGVU)<74$wmSroc2Nl$sFlUO z*~HN8KA)&&BF|nAddu4f#@~uwCdHQHmm`GVF5-lR@w1H+J3E&2RAHz3?Kyhh7W+qM zJ=qxpqJO?_JaW3ZbGypse!dUN6dS^QVb!}j_m6W(pq~}t0BG4Fy7-9i*=%<$9tuQj zZkIJXdOhk4(zhqrKQF!D&+cq;e6cxLH(7b(oBPM&?74`(QcLdg7Db*(kX6D1FPekJ zq@cF^EP$|*h5+;g=H>&hI(p8P=L)*7Kw`~r_kZSn*zp*E&qp%bzvstLmy@cc4eFvy z7hHZ5;pWc7fEEufkWAM02$4wzGHd2aVk)2_*n$tS;7jjaQNGwYlVN?q z-g9**xLbNInf0OZ&)lw2riLp%ifWH!i~!pwB`F(zx>@N>!;P|jym=U{1}FDL%>lhR zRDVH{PcqYFxXs2giiB;)kX_uYFRw+)j1E%tT94+GzL;F>au({#)PpjV!>i{-Y|eMq zEte1<@i+4|g_Y_w63lnxJ^jagJ>CmNnu>SkRb?4m=4WMttdY&)qEr25dwj+s60Frd ztK>;N^{y!}>C=}?ua%wWq^`MIpPoh+Hh&&VF-tcb&CfE$7STDrESfu_C@$+W@9}Zm zF5-tdiZ?*YF6&Er*-?C9gyZ>icQ@aoJJ=X|(9-}sRnd=j+LsLS98~4icRS>dQ)5Uw zRQKu8^3K(+J6+W3juhjtq3FpTlgQe4NAs6zg%C??xYH|gkd7R1*GJBJfBU-dJ-?yci^ty6b{R-x2& zM*8>hIi7)kdxUu1Q(pBu&RhfMcg(m%zSXXtzf*4}?r+CKwKe^N#`K|eJ1?T@P=v@R zxgnxR!Ec>GPWr8k9$e^G4jfK)wuiIn_C`GCvAqF;AgqtUq}-aU0e{>546`IAW) z`um~*_>c$N1#3*XYpD0gN4cND3zF<($>WmOX+|y5@}5ffb;hnBYj^1;(}Dw8O~h^U zQ)_U>B?OBHd@EZ_w~zbC-9rL8njmG#Q<)UO7jZ0ABZm_n+yN zW*i@(p_S{cU+GaD_p8=#wj7q#We`DDVAMlX;nE+>Q??k75jmfza|0CpZ_F>XunHCJ*s*O?Qu8=WUow3DJkTR2qRb#9Pe50AnEh5uPT6&3Jioq1n9E5AjK z#=*=XzElH|PmYeS8xT8&d zrZ-N$$}hAV;mNAETF(8qKP~2CdfsBai|!LfquAwBRDY{Z3iLyFSRATK+PtxtgyY~n zbBj91#@#}U?QyqXN^lvyM#(`+S#t!4GEz&uTE2D+z)tr-dq79%#Rk(`!M;XNk)W^? z-6^)_xnQOcaQZfx!!N-3^ezqRl4n$zEn2GL#fy|nQVyI9EF)l(oU&3e({1>{Cg3O*M;vWqPH zwgjF6#rUI)A-lhAWqIv>V>`hSFY4tk`)J0snjG~^z?f4=nM-~~F$ z*8~^WE$_u_aWkJT`VX!ipm8DgC=e|NZ4wXUOZ>+CgfZLNO{FtVLP2&xdRx{Um7J@Ur}J<1j_c?zI3fhi={0ej z<(50O=k7*Y+Tz*!y*+{bem^_8S|m!SZht26@HCyAa=B(N7!^`3?6d;gtqc-+j&JSe zIUk4po^TZU+dh`C*@o7?4VGIpY6c+3GG3Y)Jt8kfGDoc)I^0bGf|XDAUf#M!C;8W& z_fqanvjlhNK9|V4jHv%TqYuL%-F|o~G2(E)F-hMc?u^{`UWC%b?YDtmy6TSVOMl+V zgl^BM^+-V1v?%A5p-0;Pi03K1r?4Njn(d{$2!%u7#f=GZl%D#cq z@o`YEyO-Zt0p3B)=gir(atgn;Yk#O#9!KyX;{$rHktT`NE_rF!=R&auV+;G0e-Dp% z-<=B@PhK{<@7x|nZ7@%MZTv`z)p|Kft4=Tm3)sM z095CW@ry$>z6+=3gzLI@l8MEM*J4w$FN|63ezo&3Eb^x-&W7q0`vH`lIe$IU8E2CA zyaW9*NeMlDaa`YW=7pFFU2=3yxnCAT>eIO?>C>7vr%K(nW;KeHWSZUic4*GGe$Jq? z{#Z~tQh=Tig%z(Bhnty+fdJ8rRMwIfxdXJq{GGXqIg`qC6p3z8r0zTFlNt3)!_7FA zY1uFlUvSWh#kvUEX-{*knSXs!nWGqmeEe>G{O;*snh4`If#k+34Q6lP3I+Q zz&o7fb8O7W-=P<-BmW_$<8jY$ghZT*w%(Nlu`#u>Q*XMl-Y#*gEr0&zGIoI}%uyK? zl18C;L1#MJqrB7y7w6CmZ=0ke`-JR@>LjT4DYy6N?7sW=gN+!TzjoU3=+--DyC|Gz zAT)xv`}u>HozA=@!aMC>lZ=<{VaNmasMlAj5_y#Jr+A+!?_rb4Dd7BJVplF;J^>k% zlZ2ci6QEHTM+O%;aDNw*QM0;_*WvZ!g~@<+DW(~?e^NX{4$8RVJ;qM;rUerD(%$g} zja-Zh5M1`~gWRa}@v{-5BkoLRcpsE_bqNcsk)jjMJnTUf0?W@=Foz=PQ!1w@hB%?z zHI~uXw8TYAecOChYhTExoeHBvpSOBw{PjKpFjAyXMX@iPQ#e#V@9Iyh=p5{ z&+teEGxt~etVJt*RojUYMLaB_Tq^nU5=cQx85a*CM=x_fWo%&4kmH&8cJ*U(V81kn zSjikFX`j|kyuE5sB4Ht&Za;P1ABFic$DefBlYC08ivF7oe3NXpX^1ISN_>OP=`{{( z$GE)bXs!OfSbq?O7GbOvj-h^xV$S9Soc#c6tQe)viNWXNhFBq^R7>$o>rBUEiwI7K zlKbweW^4^yO9yh*`u*r=Z3{upAXKJ}W`_(M&O6adxxV+v zA8OZohhV{zq2~vBmD6c9t`48SqTg<>Wyh5~Jo)SNybtE{wC%ILYJ9<}OFkt7-FiNp z23J=q6pq`ORyr+C0^dca+v9-;W7!~rM?`Xki9elk}2m@C+@G>!YWA5 zlA&^BGJhFuFm2J{lc6b2!s_IY*#E!6s^=V$^+oxkYAi=G!5H@lCcY7>m&seK?Sp9l zO*66r4v6Sc+M%DbM2f~!COxeSr_1xad^}<~oGw?}FxDq84)%BRsK@zrYh?CKX)rQJ z#P_~Slb|Z^Y=1putR7#tTPtAY)!$3w6K?klvVSbiPGsv#P=@czE9mYntwlUaqZiL{ zquS>~>Lk7Xr~HSSV-ePMBLC{Czi$mxXHPAqS^AbY8uOqIwvljq z?|;HE5q;Y9Nhd?bfA2FhXZYF>GkYqTdEc<)1Hr9H`P7osvo|X^ZMkySkKKUWZEJ7Z zeUJ}Sb82{*3p#Tq_LB8OPOqHTUU!P7!W5D@dgoB=Sm!#_Hw(u)%AM5w;TBybTjb4I z9Qtz+4#Y3*=Sm2_o!QOmK20LWMbM>lv40KX&qSIXmTd{8Cdv0tRUj99h|%bHAsM;b z51XfLX)YRbzMJ>C=6)3JjH2@`6^_1ea~Be92$Hxz`<;Fu{bO@9FH``DFGVo;Su3XU z^gV^FB+=;0v0*3U#?z2moPB2U1Rb${9Q ztNSgzEB}aWOq{THH3?)kR^=(%@U^JLo5(6}KI*8}^!JE4OGvClp9iit_nm+@8xr&O z&P5h7dfXN!$UvpchTYX|gHqk3V+E`&W?2|*Duh~m~?5<27f?-c_)MZcH zLqxV%RGeAqj?;sVPQ>vUk+N3LdVh8K5&C@Er71T@@4Ki+aw)-&!41$3nUQ-_Ey*1Y zfphTzOqTvA)Xvc1jW#*Z64!_CO7Y>rINXx; zqZ!Nf8c;0%s76564iGPLIU7Kap4UTFy^~#lf#jx_`r3p{(s@ z_Ius!ggPd7PJY`z0YVt8#KT~i7L0x&wY@nJVQJ;!!Yk$JHazp~jNr!6i6-f?;(_(N z4t;`o!|8Ls)e3WO2~`-1>a@g6KE2xCr8dH@*E<4m1>!(yZL8iuK>mf((CeY5I(VII z12&rtGhAi-Giu%&i>-cB(tqwaFj$+@kX5x_Ik zF-CU<`>U2~Y+g5yv0m>>YP;<542i$3H&>FOvE?}#!)K^IAu)S{n^X9)rrj_lduQ%nn);3F5qw=QnO>#|oo?47&D||` zI^Oj)bQP3{VBT&uInN1r){(oM*7w0}uXiQ^+5!|Ya!r`uo=^v?Hq4K|Dsef*g(z&X zun{^xHHI}l^ND#jB!7Wp!R2bb-3(l){+@O&@yuSc;N3H3)X1cZN-cXgC0o zK0UVSw4O;EJAYW4>!(f@F^OO#6@#6Gc`+q@3;39k@99D=e2WovKS({5OYI%YUeu?y(q zJY>HinEDO~oPY6^H`02e_#>s(Ak6n4`XNNYC#rxyBQ&PZYw`TL^A;K>pXZ2CLExpb z^)1+nByRpWufBB1U)oYrlmQd!pEl_sYjh-X599Ob4oiNU&rW(i?CL0qp1baGVmhLB zr<^e|g}10feBO`j(`p8H@739JYVMdA5j&pHl)Wi6&wnaBgIJ1T0@=JW5JQ(!ct!82 z7|heP!XEtT91Hc4mU=fXe$Z9P?;$Xqwuj&-$9Br)QN4U{Lvej{X?2Uj2)l3=<7a+H zJP!7$o3MUH6To}#zq)xoW~j0e;Q`ISuFvLe#5fRaG|8WjoK82R^s*VxE$o)ADr9C| zKUCR|n14wr%sKBmQ6AOea=TRiIFGpcmVklu`xM`e8vi#Hp_q1ivjaX?^D5)8Oz}3{ zRML05F~YZ!Ii9Hpf9Ck)TN&uD*z|KKj#$OzoO!g(0_>K}jV&68oqTt|F1DYqeaLs{ z!(;$80qVfOrIHBdc`5eRp~8(lBDTu6fmBp9GJiUx=J

9!L6$;nCs9wqY8lokFG+ zg7P&4pMEK+4FMGE{5H3OD5g8vgnKCbulKi7jsAMv)LXU^Y}zq_@a1KlcTo;x;if~{ zxq@@gj<m3(aI+1!_oKTlk&D`dny!Z|-Eske(FE;v0sRR|xQQtYNa+tM6 z6x;j!zMChXw;f<0_l^?4-Z$WRcG^6nxxAJz&5{14^SyO3aJJR5-%Ow{5+3qTD#?Hx z_{KKR^8Je6-Oqj=_tK1VIZ1kUAk~*8=6@JhxP8L=yk3+yAOLxt<8m-ce_sFqo%COe zNyyZApZWF%@7_2ZvGr+gTKt@5AQ_IN9XP7XeNJ{bJO}?`@$U318OYE?V!?Mi zZWAJ_j&9d$T*>EJb$TyM+CjVPa;`(h`q+dt5Ctx(OgLNHhyK2RMu;D7rMn6rZ@wCR z3&OA}q2ge4_DR@ z8)fP#wtmERy+2ZFKW8zHO@C2|jtIiA8t<}QN?dn00@N+zlU93=XG6P|tI*S_#2n3t zI#Sx;fFyGroo^LXkwI9Vcu=!wIzYEU{K5x`#+&>)Dp)A762@XPkxjwJo4P%oh)-!m(~dfOL}#<*4R&bld;cu1lfa8mXg<)hVQ}}*?&{E+M@+oj7WNB z2XJqQBX_U4_imh*cihq9R{|i5_lFo`%2_&~eTB5xW%jn&G{>5Xbkxv zulpU(2(KH9JLoHXfqySI;$Pnjix`X}W=5^h?!sSmY8*tMFp3ZVbax5RZ$Ak{un#kR zw2$#7=eql&4zO}?IUl_-?fPg1di=C^3`(pl`LFYRj2X5dI7WNOHOJg04e_7kCk+vX zq(D1kV83)OYacyo!|)VcPMd6Jh^z1DLmVLiR!wof+$gu%RDb1j{ERnHG^A#e=RoA9 zJVr9@(fOv()Gwo|M64_(iTiW|djSmOo!I+zaApd<0OI@+!}yw>_RXvL9QfeXU9~x) zNz>~%N3$t$Ab+!JyPwfEPu9I>nfFB;N%P1|3qiJ_b35RN=m6>$s6O7={^0W*YBC?W z`s)Nf`wGOLj(-5;8$-bhNvoIi}c=)=KbF6x(x0aAXqhx-`C$mLUy3{zyE zIh~J&ND4WJVrWTyAqjIo`C7EV7_u4n%dpfM!gunsxi4^IT>6*K0qzLL&U2ugiTh3)KIk{#Ig__9h$dl}O2E}DV{0rglmKhv_0e}5dna65MM>-pJ7?-%bJmak*!Zx1>lm`jO( zsLYZ2c7Gh|3Jqrr(yLxjMfgeCtEQp3L}OccklzeV;|qG8!0NGV@lLrrd!jOnW{`p- z+xPyc6IOXT%txxsU%VEfhJ#x1rNhldhjx5u*mbVM3@`=nfXMVGP0J>>{dEs=v%34e1 zIkoq_7pELMX4hNK{pDjmX8D8bWrA1vB}{xpO-?H4af3geM&AASf1bVNf(NOH)hCm^ zTWoyCC6%wmKZfdm|KE()e+bt9@?Zbk{|#pSKe?>#e{xx&_xfL4RvgBcYppMfKh(Eomaf0CJhO8h^mz@Hf(jA@*r{!I9P9N7wv z9WmuD)$A5=8^f=RzXHks-|q$989|^Ttbef&^&5W;Un^<0s2+? zr|l>Ieo+~!070C^HawqSU{+grwF%6>uaNckuTm!d3jme_y}z?SCJ> zxBmM+7#jP_nLf383_b%+!2*=TuFa)Hc=rT#RZiE_2?Bs~QTp!i+-utWdQAv98V~l_ zy?`yjJcgbB^ZzmaAy>#22XsMp0cP=N)sl8{wn zr~SZ#FuvdY8XVd!-x7zVb>WwNKz}4?ekegE-~8O;F0@;TS&F?21K!U*;x47N(&KSi zhf<6BD8O4Lm`lOllZHh23wPXeG;96*hh#%dZC*kU1dU&Eg8KqyOCt5(=hIDH2X=dJ z87{q8`lwA(;>2kWIR$d9H7Cq2&@8zEZ z)FV4Qf=#*+;8zUNA@Y*jc1eD?AKAgva!Ad!2Zp^R8(tnt<^5VRa>XS{naM;`OX4eG^Yb>x@1B<&U^ckw+69ab~ z->!stMS)Z;F+m+GCVy-gOib7O)Lh@ISv1bU#KqoEibTGq{hb|_x0=UCb1W(5gli%d z_Te*fG5-}$yG7brty$_{R0t1|yY#KAQ7$o-^KJXEXE|c77d)9PJAd3X_X?UNr&}`h zSXf{UZ|28A;OVgF#_`Gj;HOl3RyeYBc&_iE+^;vM#7gll?w4=jj~iarr^b}qj*saE zGdLWNZ0OzDw9;f_{?@N%I|)eDOjQB9i{1!VeIYHEy7tujngK8Dc+C~_!bWF?#iQME zqWm2RQN#WV%<%pN3V$M~g<5gje^9FKsTVViyA9iw>o3CqA9`4oI=BJNchy65U6WI< z?JT=Jl`bw-u-FsdnAZDx-TcuprsWU|Y^mKq?{$l_Xh|C?k6(t#~_HyMV^yez> zbNVvWzs`MJnijWVbvcxR4)NVt+f`!-RO~7Snr9T^V_PHJ`+u>PLJ?jvg(O_~p9D=2 zh5h*N&w39IlBo;k>6Ve#qKLgd-R-WQ!WOvJ%AjLZO6-UfSpr3O514b#+ zZzqj8nOeM36v7XM4T9;zLQfd=*C=03Tj1krcFHgvnVVLB77o9{Pz?r z3GM)96F1%U`|{Uj%OA%Kw|*PZq$^)LIqz6e-N|Wc+J8d)39{KPRnn5K+-Rs&TJI~S zlP;)|q_3aZgSDil27=X7{n|_K-_-ZscczgWmRhzOU-r`{+)_#DZ01*D$H~jI8=6;@ z^xsp3#`S3p?2US`AfJ5qKaiL8Zi0~HncKV`6=tU{<0k>wPc@iTH>o5!AuG9#JC&j% z;Dd7=_kW>dD$BuEi3bLIw!)H-kALd|6t-3J_F8B1KWp_v2)P|(@#H@eFBawdvdQ`( zFcWpb-x48azW+Y4zjEU9jk33)z>nOoJ&cB(0PDDDzN>`4(@!h04I4Rfk3Q=+=+K!( zX2axU;VmcriKpT?>HW`fi^2h;HLhQdpX5ML$A75uXG8xPr;yJoe8tS@P>?7O+!P$Q zF0V)}z&?J+K|TA~_6?p?wWi>;jSs368GWa-3)Xef@j+x?-3>z|)%&7z~?^09{#N&_T%n*AFhU2 zg3nGJWBNUOUa{!B3NnyzVugGsM%xHWynnsnn@8#xo)eqIk7$lv=PRs-dL|)D-?M(P zx4XT1y4`BvdJG2JzE9{ykVCu?0J#tKS20HVG@skg!B}^e`#{d}g4yu!^ra_N%i>G9 zMTxOc4OT^%tzqOst74*{U!UmXFZy+7slWZs*ES=6ew}`nRrYd=caqCVQ>>rd&VMO# zjrz5{c+?khDKY&`wG;&vu)4(a$5k;Q9fQV>G{F!N=lX1C&jG>nY`*KWU$wQOto)pb z`3bP>{v!X2fjanI3ZHLJvrr&fF-cj^%o#lIe zixWRF?h{iRJ)>qq4a4K@u7(Fm=6FYgesiN5BV0+L02kkt?SPSlo@x@Fdbb8Oe}g?|j~e2cUtiO^~VxpIDVZ!NeL>n9C1MSYaZb@iws2+?erDP-bM zkZ!OOJqKA}oA#5MW;dD0Eem@EeyU19_iuM)On}Vy;RB35E4nKybrWtz24h;Hr@qwa z{fG0D(LCOVzv!CAOJv;~&y}vE(dMc8A4X21{oTPokWOXt*MGh)^S*w4DI9gGbD~+> zn`!QrDsk5YlY|8g`0G2^^9dC_adM5Pr>38)2ek`dm%qx0Qd|SC8LewgDndJ>QTxc9c7!vS6K`=Z34+1Q^NzvcV67*u2JlZqbluiJ-@!T_kVJ9og{tG^Mz z8bHFp?|(B(KEvtvdW@|SciSSzScy=3kvpRY_Hbpv;t%J;IPrry@Ay}a*B47*`0I%{ zf0g)wxX2N!T`TKUMMI9f5@d7(SsR(A(=vr!*RPT03+kDzVq5*GEn^^-wqo?UkLvb# zsmbVG8m{Zp)L^^=7eCPWNt7S#C(GfzkTAJ;6@L;!TLbt})_+AicQ8Dvkm@lzdc z-2MOqy!xO)BEexBrrL9<0z~q1Rp|=&0hasO?bAILK-hm(1yoMFnD6WaKHYKOfFpNz zB5#yrH-mxqE;c*{U=RNqMB47{zKeHKTz@(<`s^XSmMY@%;`9if&j>TS3L74=3xUT2 zD!eI!at5pM(4vs4i8nk}^IiG?O6YiBf4%lbaO5!FgiL17k<2Qsb1o)I&JE1uNMmyV zxU`Mf!6opOfaj>5y#CR^__Cdj7+IRNuEY`2O%^&zWA?Cp@wO&qh>AQzsY$ZyG=G6Y zo_5B?VkWkw=8bc63~94)iqj!{WiG(>azKbRFh7b-pmpwg^Ydes{L!~YuVUCUHgWC= z%mN}bo#zZYmihp}U&b3;rX|{x=(#<8-|d2k1ZPqaGi1rTRcF%d<@}AAW3UhxrP6cg zK5Vs>%$pf@#CDwH`UuQal=>&mgnvl{-Yg^wuj;e9NTz{A+RyXn@MfQMpamQNL)e39 zJDPkxC-wU~!F6PExiO^TD}|+udyq6h4f&O)eL45o~lzP9v zn(>PFFJ`UTvQEVDa?tnJwzUdnbNsIVJdtGYj}gt&NToi7Jw5sPbvdtmB!7WC&bGy; zQ52~sWgypAIQQFpF%fI4q6{_N6>zzUoqq>gBCI4Vq7px@zt0YMI+$M6@RWI^Vs`19 z;}sNgrIS&3mbp7Ss6W2ZPR`6eNX?tl7)cnPvE241_A9rTm9JwXkY|lp$)kbST+JlGQE^(!__^7qP~uuLemC zJ!@=m*F1I`wU9g1s|$jSh{LUNk6t|9Y^HHm11|Div5rj$bj8UqhTdv3+HLK|g;^ksWmk_BFUJAa;&9niw+cer%gB=aVb zDg&)*@06~;1&=b;akqNqc=_gG{C(ZqFp<*}`8v<-F?0Bzn0l!puBU9f9T8`%+=U8C zvsy(Fg&(*XK85MGhqxU5zvx-s3|V$rhDtapDg%aR8jkKIZTb(?Eb!qd41kycvga&vFHzG z6*cOm{HX1AqD)Zyx#w?-$24Is&-?b!RER8<6J>;9a*HK|9_pt#F8gryLhB-FhLSMK zY8WyspMS5@PgXirCrUz$7C!dNvQ&h2LL<5O58ak$-Ddux!Tu;?|;{^E0>3y_Fu-<++n@ z_o_I()q#zhT{|QMl91=kq+V^*Jdfh((}NX8oCf!Cc%9p#JeR1^xbTx*IfiWB@`@cy zf$-kASyEiE_p`GnbQx#8&)#vX(^P0~3%%&15h!-9!;N|8BnvG}WtXmZzJ@#PqIToK zq<{F_zK8w2`N%JS`*m-XLti_*Z8SUPuH(xr-@Hfb622c=d0J-UunSDwqeF`Y@un#) zsalL2E{3vngh@88)chDNu(Z$tILPnQVJy^{bN29L-2+1VkKX82d zfPs{>WXSOlXEY#*Nzmv*CzZVo*Xi=M3SMQR!-stRDSxU2 zOo_{Sc=_7Bh2(72uBl?9c^ZN8VtvxWF&~SF4dKszsBH;GN;xF2@#f>RD@uY_&-8^1&{KI4AI1 zMYD}F3GOZB)zqys<2@rtngBW*ZhvFIVvl`S4-z@{L9o{)#$p&%DlKo-DBIY>j?&(e zer|W0H+sWupOshIN_pbz9awmL~577$4 z)A4RM-RC;K`4xQ-{=N0F^1(%Ypz3!Tk%d9AF(7e2jG&PJNv%46pQe5t?JME#hA3Jh zYxhhpi!4r;Z10Ij7G0AFM;LnsWz48by;>RlvBAn}?0fdh-s(rxf)|e3igD*GRy=90 zKEu6OZS+ZJzUN@gw4t06#DAHGSN0{oR2Wj8hz4JZ;kG^8A0PeAP}->-8F4J@#Yrf# zYZl-dvOBQzk6RI3t16IQ4ywW_xZ40ThLE2!$@%E(I7k`+8|i*5p?^%udP$8n}am5q&%};r&BJ$$usUG9IR`?~?;K zo5Nt*&V4w6RLkkf6h&X;e=f=FV2qZBre zlXx*@V!&*UCS~+|^MCkV8VN|yXOTQ~-x~y(&)IgTPjxC=0@OR9SLN~i6RisiK=b*^ zrwC*fLNbEbcr-~;SSh94y#qKUldjEPINJQ|3YTDzJyC(;jLILxXR;SUiht{uG)|ea=CZ84fk=X+ zukH7b@T(;I=1GIEFdlsOtaYgXFF?@0q2suSC>A z5s(DuklPB@zY9I6e*#@RsUNpH?Wm%Ax>OCn4erTmQSiG1;l9Djp&R%9g_H8M1jB28 z4*BMVdnN$;CXjzr5+Z-52`cdY8coGoO&*IQ@I7MVF|qQUWz{hCbRGKlsEF^VdtI^3 zLay9qZdURgGR*?_a3F3sr+xf_c1djEv)m#01XlLBZ?3%T>LcZpZ-xhW zfVuz$`Dj-iR|zRn&YuT`F*bt7{Jv&*xJQJK1aT%ri*mTbRYZTNe~E3|@m}XLY-Djy zlj{SWdPvNz!It@y6xQntaHJ*v=IhQb_YOGH;_hsk4j`_9iTK@P7Gm7?!xh{ z78u?g${GU)0{nztVmz5jiCn?rGqSU-CB_(4 zXQ~eJ&5iTg{TL!qln{U^PI@eLLNw9Lj!yM_)(pyTFY}pplS=&tP|hL^ne2mkunUPp zCdAr9fnXdozUNklCMT?}h-Tm!_PIMTBFhQbG{MW+nHhhYW^ZKta2W}DI#OiUuI5K( z8~VhOboeogxD3Y6$(pQFmU*#;4jSlRkg9n*X#QuKd=MIQqGQ)SZy7p7zuaqWz;PjWqXkjhnh&HWzPK3to3d6^=xE zPm~-mPUvage&_pmvf~gJ>bTd^FR)GzoPzX4`UKlmy?jXs&}{xIxEFn>?jMMWm~$Iy z{TsZK$iA8LxYhCDi7j(n%+3BmW>+sqri?PqJOY1DR^Y)xQh_Y+dt9@yprpRAul6`-Z1QJvxr&2F$l=Pw$5FjVT8$^q%`JV{}9UqsSkr;Pon$ z*XwanphUa)wyjijuHKgAoGa1yIBSNn`F+Gyk84C%bzeuyV#>?T04h*wUq9EwfR35G zzO#Q{XKU=lE?u8gGUXejIri=PZL|kTl{Rm_+fOsz74GRhq^ip_AcT$Gs0B(m3q@`b z`)H-!qzT2X#UXyMOM4*kg&ef{B9-JkFwiUH4RhkAf5eRwmek)Q}13tVG4iZjV80GC2IY!U2mUYJr=kt?n_l~NW0jW z(0#3G5Q^-hl^(P1A4vq@DgSzWWPUyN#Mc#!-E^=M8DkXbl2J$HAkO1E4?OA0XM+N{ z)@5Is*T$1u?Jep^v)TLfDvODDO!x@d8sq8VypL^<_k6sz_+p>Wgr0%5=~Lz{pRVEa(cMsrKm5101!=>7Eayq*A4S%Y%?Bv+Lol z>ZE%glbfr?J!qauh7E53=r(^cEF7)}4xSqN*&)kWNA7l=2Qcq|{C$>^t_%O8d+F z;E(II=eO%O*`Qgz7*%tk4THb4XAG~KbUYx{pPz@@=I)eJeW>;HF7QI^zwf;V#C?ta zMDygf-*O_X?rAycBm@N$X9lL@+LD#ViWJWNRp_l;x%?cQN6l94sp@}14WTgf7Em`q zQWm56CqThR|C{{EB)dDZyBlc*Nlpm+h1LxD@EkGft0ZqT5Vt5XJ;?WLPh0%+_tDhE z%Vg{8{d?fKpe_|7!z}`eWlc4?J+bxY?{3|v_Kyq@Y(^fQM2@-7z^y35Y+_Fi=J#X$ zq41LszI@RidlN|*^3Z?UjdFQ|Zr5%XxZRW>Z6)n_UGEQEtl+XMCe;7ZVrd_H?Tuky zBn|r0kck61f^@mJ#X~aD($@(SCS9eDVg@wb^G<#b2_({>0{bBEC3j@i6I+Xb&D~`k z#U)OWQs^!eY+soBgqv`k*oC}yPfiy`tnS&3eI25`w#Ecx-=BY#CLgJjQ+&GbuGnv& z`^0Iip}&%-aRJ^f#(A@!dYJZz#yj?f{_KOvTpzFVj!Fwrrt9)`gO^FDo*HvG-Y|4n ziIROBQXDw;B^ho-d?{AX>-wb&vfHY6iEufX>_kxN_9fZFz;y3kN7G_uYC?PO2>Wuh zya^t#28sEWD8PS@>1p#@n8c+cBrPNF(?{K#)}$rRdE8yJQ!rM6Qp?(x4I|$BgfIl1 zpjrCp-ANKk@TT$9nbr;+_TAOkH^>4Wh?Tu; z0U0uVcV~8Ec$h%wZp!a>BK6(5)m6BaAgV(qH^4glH|m#lm`Z3_8)OV#)m>A&svR-z z@Y|iOdN9u&Tph3G@ACjZpxT|1>ok=u!s&;KHkM(GG32Yj`ZHhdLO>0_I-o_j?43~? zwrkQoI30fv%d}BfhOy5XMEi_Dtf@>A`^8Kh$5eX-gPXCm3U`Z0ef0E=LHUq>_fr3R zOm7pkx6sW*Vd@{hCib%%!6|QT5Yqy7!#8~GkJUc&TW3W2LWlct%?Z%&(Zz;0M#;dwed9l%Q{M6GlSHMoz!l4|L$QbnhY_LIissZ10QEgCip(u@6Ix4`IFeDh$)sVgHFG5Ft7aoRct?TX4I?Z+V> z${hME%03?$7*QI1)UWB`<^ztj;`G)tJ5Nvcn~s4N{kyE+=CbEsklj74KBk9RV}|q<`I81T@F=Dau8}Qxv37PAh2v8r@O7T1Pw#B zM}~R7uwzE0+Cs3G?|z((G_Fbq^p_)w+y;BPFG&NQR)#pf@%gQBwbRa-;bRYur0sls z-8cC7!SPgw&VM7r_kb=*w)Rm*d%T9Wyi&A-@~u8m^y*I+Yt`1m2H75hIglj#>(^5U z^{scgGfj8DYG^NbP0P#MaXN;$N7R3S-%E$~V7(7c)#%HDe4nO?B?@kUWH}7P&kG%7gkJanXlIoGF$O+ie+@4zSg`|9>XW15(n6Ty>>t4q_Ix@a-(m5 zyu0!B^h}F{>8$~{o14`4^~ExGVxs-G_ALNXN+WLx5S-5Fht0 zmzd2b8c;HiKNfDW(wM*_9B+U9@N3`_qF9pE-S(Y9o}k~>DL^pWa{65lpKN;qY`Wf8 z?|xVpNweO^6=~;eFpV$Q$Z6e*))-8g@zHGm-S$=|Xztsj2Ed*F&@6(zMjX;@XI!^=2agGql?OAMrA!E?sa z5NkZ{u*QfO8UpZ+L84;7cKJ4=zO}1an*-DLWUiZ{RpVMa3X3tifx0JQ_XzaL@awdD zOz)&MIX=HzBHA@@ffRD-*10Ve+glKGXzUfoeDUos7e{%}k}nsE5v*krOKWppO#!ZE z;aaxu$Ai!4Ee#%L;pBgqzv13J9|sEQNtN4Q{W9a~p@|4~x0kl-x#d-419q!w0@9}3 zj~#ONCi_K@&^`io%D~4_TJ}Ut(LoY%I=zG@rMAO+$~%_RGN1+$uEPqlm|8s#7*NOf zV22%6eJ8pjk~Yff8n^TQb8&LvP&}^4wAKUAb9f$;jdJz;!Ww^R+{G<`xj2)|`-3E` z^sxA2?k@(e0`b?cOHRqi!#8IydvbYw1xD8Fbqkkthj-NTleF(2Qq0i`H!KvD5m%-( z%Ey7f2;iKiX7 z(KiY6W$3PXNS%M-gA0^O;gmoKrp^x@ZYR(Di3OFj+}7K#(R9sm_)mez{W4#XZ4gMy z>Nzf$ePX|8Bxc6(-qDyiY=RAMpEAX9l{y^T>55|E9SFn_J3kCWB%UDCoXxhjR@?z;6B1u5R zoXAICyoJ%g>f44BO=lYaOld&9JrIra)1?)(KB*0Kh z&2qih_8FOvf+`kf)mOoh!1G`{eQ#3jNZTF}&ml*r+O5^Qg+<3pJmLW z=)4}bHUED`@tbeYFLCk^Mhb?$48QNO{z@2ydxa9WaIQG};vei3} zES+~)${X0`u5IC~aeZBXD>VBmH2Y7ewV2SO8F6+i=V9Z@>hFR-va2cdr;NEacuEEC-YYnf#?qVD6L$b+n3VLSFN zpfUOWmcXot8H`w}66w6P80$~v*Lf1g&JB&G6NR9h^t(l3P>?&Y#vC^@U-vrMFnNFH z3Ew4M())D0Yf(rM$(_Eqwk8TW`ap4fTv%!!{+2l7 z@SJ_sx!^h{La=7Oun;vj$kfmJu6fh89@tpWT(&nd+$$C?aw zd~?`8sTNx32On~$<5^2IhCw6=M8ALIhr1IWCi_DE3Sy?S?%}INDW2ln7)T2qyzQ{P zxNXOh5GUMB^%}apIScZA*fk@WokNXuoihD$;n1uolgNML%u^s&;{}3vz~cMQ#U+^i z%jX_P$f#QTHzUzXc%Sn*9so!kyw+5~7XSxvz<_gI9Mxa;Lkx%dTR_bMjn02*lj49j zg%65}g)5hReBnCb{WvTqzPg4WmrOpLJ%r?&+D*d8Zg)yL)-M13Js1L?6+BW1R$D6Y7xo6*ve><^*L*ef96pYn z%;AiL6gMa(>DvCG@^3=^)xV4~-75a&g#lA=A;ddB@)Ec{D==FmYwg+=bo#!ZeCsn~JEcAF#C=j5|wajtE~9Ga@n({QI^p1xcH9Fd6? zUFv)M`}r_3JF5mG=PiG6kHX)z*MKk|LzdrmvsV5IxYPzxZ`WfG^(DVC11{6bV?EsV zj-tK}B^n~gnaIuyMxBIuo2(0*(Cbw4JN}zx(7|Ql+kBMPkg=OHLt~DPWTX*Mh_$Tt zGBO%4qHvq=g@G~Cg!pd^NmnszZMmwR?TUi=_n12LaVH)t)CYg6BZ+m8iC~R^oG*h7 z&-nvKjg9f`Nrl!UpcZagqTje;5MIWZL4#u9qWyMu?jZw>QJ@$mB6N;T21k&}3w&sq7Za6Ox; zM}kJp#aUF^`@w{Qdh z@X+ii<6(*-aB0dZ$J$m3MHC--kvMv~RjwEDw*`FfFKb7gSUJ3Y1X5=*ETOub!}mB# z`6-nMJl4+R_@gfGMY-lUQ+j;)^b|FW>@nJG{jHHtITCz%_2a;CWeh)_x5rF3#ZNQP zSRQ%l1RsA?m`oyj69-shBE{jL>E4`&NV4g9jq!UE&F-h#VFkQ3HY#@cNYBsPJ2@Ew zurJpig`ha7gpr%F8rBxlbN{@t)>6NgV7 zyrF+{AyHMm(;Y00>Q+w((wlPe{VG`N`JNLCq8NYj>d(c=JHwf}4SD&@B<=^@iNzGY z@Y^gm_x>5@ooDfE?&z2s$*8A`j!=oY;GOo5n^*e8LP5VE9ND;cS;8-i7J&Y&B>W z+G^1oHR0TfT0}q!X3?K*Cm&?R(eAzCc>h+(!RKVWp{ULZIK;nbZjIcI(UkdUVsbuU%h;g5fzYGOG% z!tx|;qVevKYyWSU@k~e>RaJ+IJE!p$TmHVA{_ZN(9NqJ5NO1(Q;;2v*znk1yc zPtUm4dT=nxlOvqkU&0*Uc*AtF_Qp-*uV8z31p4=*3B%0?!M!2Ai%ab5gM7mme6XCu z!<0wCFZ?zRXHd5juhhFby_R&wLYR*F513{=M(6sLk9K>bs$I6iuU>zgc&+~0ZGKQx zH}RIJUR^ijzDc85^3r-uaWYuf6Or5Hx0#RpQT6HM={aWmg;$jK=eK!uoc}&ue~O zphm&3bZEW8DzTG*`wD+;Jw&pF{%7tBPT0etJ#Ka!AWKsuQGh)ri$4vviuUV$9$d>@ zm5sP2^Y}pEp>)qDMQ_hfU$|7OV%4%j0xf-O~x8VEFm$Z%-gm4fjmq_rTJ>>TWL;jER3Mu?7lgmAM9au-*4+ z999>SUSimAQ__lhZl3v9MR%UZG(;mGWu(bRsJ242Y( ze4tmIIK-F_)0>Kfh9FT7u{u0}B?N1e|~i3Cq1`P4V&hmmTka-Z)u zkM{KKugG=WF_3v1W`8blID0z>(L#s8_!_%)O~>M~cjwm*jW&F6lhg9f>u#-YeCr5S zNT8>7Z|kHHv!hwhVfmGNoynC7EY@pyI07MqwGvS-qVUe;mT@?Z#+V_Qb0l1T#DUAgyD4^j z&Rt*P#^Xx+tG}Qsp1g>b!w9hgX1d{RM~aGCymYf>Y$1r=DCUCiVIc4G3(7~qs838A z!50pY{yu+S$U)xboo#_mgYPB3<08 z5S7(sKIRvMtCo2bL^X2|%8iOU2dpazx=P&}ePwd~Wo#ac{jG_Wp%UV&vS_|w+(*IB zdr9+nNdrR01dYp-%X*MKuscKxASg&;@0b(g41s^-+TKHl*!269L^Lw~uMZ_0Oc2cQzYR3O1l8^y? zv%L8UiSbcd)!$F8%Lni;D)C*iT3M$3{0RtE51)D|o5g`PXtr+P!rq(mKDAS zmp^~#I1}N0FqJ`=!wTuEp{j1&J3Wxqqb*;2DcI4jaYgJ78T3^*p0J|!lgaThsI4xk zNz#qu>sLw}cjtOasv*F}#Q5@CG>U!x-V$u8FHb^F_5^VT5QJ&UUF~&Eb@YzX=b0Zc z33~!X2^Fn_e2n3x?3MiZzqy9+zrO{drnY~h$Gse3^-`p|X$y51r38xQJ7tGwxLa-O zCQuKXrA0x#)Wc25_-tlomkP7%o?}pPLe{o8Oo9wZm>MXSeQt$qRwyWGXu(VrT8KQ9 zs+R4n8g!avuEDIcZ-R;)GzegL{8C zed3qD#%y45zf&vMpt^=i9ZVkqMFisVxY0Weh{OHLiD;-){I4|^*7$sd5cpBFu5}B| zEBa%_X_~Fdcs~g~vDswYkRz6$?@A$n)8LBQ`Msl{1F+Ir?P-5x()p z+mp{3okmQX72Njr-=83UpB`{wM{a*Qi}}kV)c9^vB8c6y-*g1tL*jEkZ-#$WAuf5y zju)MoDRwAcn6u;?NAKfhSA6g`f)FIMo`ncIQHoJ~f~&p~GkYuf$)M8Sab1H!A-#e9!BjLG876Hzz%;r2<0_dzdc zQa|1L?=l{o@sZ+T@R)l{JI{Z@CflJJnC~!iRIR1)KIkDhuc-edTHFLf(j(r-?@UjNmV-p_eFJpgqGk2slnEY1e z7zpmX@pwJm9uho7sYa_`sGNDdNv2=6vvMBkF6S~v35L7$_w`_JFEopOJ(2CEpMjGd z3*OytaxE&;_s4^9sglbIor+GRbOiV!)=}giUaeUWm3d#^vV^J4M_HcpbCZ<(C@q^t zcf%87*MWe?8-MslFVlZ5kk4ZO%i>=5KF}}uuhl&FP!!wl1?b}ek6n+x#C#Cv#B57) zf;ZKlguZvcZqKjjCpd4tasJAxK7Bh>I_Zu~lf98^+gsX3%9?LCI`j!LRN|l3gSi$y zoqKXo3WatG#%KHs%|x&HHD4~kp?yth!T^||aNfhoE8E#NcTImmmNp4E#J^v1=5fcy zhv;*bx$>&8y)Ev4rxGSfLE|+$m@b-tnEj$knR0?aOsoVP`}uT8$At3Ft#R8K(-(W- zF|NB(U$kADpZdq}h)Q@qs?`eL!|YioxR@N&F3}J_omLkTN=>U)2ce{sv(aG)$#AMe+&Tg}TJP~QYFo}t9AaM#yu5`VzDvv^biCsCGHF0WspP*_tnD!w&JTp z5V-mZ7)~fwuQBD11k`$W?qF0VE{14tmx1GdG4Twq&tiWU+Sz(y3o`*+ZMi(9FUiY_ zSI~5O>R3@do$Mr<#|!RnO54{Rc`cP~%KBS)`<+tpdsX?Qzp)p5-Y@lj>=T~u0hxF} z;vWfLfdC!J?hEn z?S6@yX@wpc zLmHVt!}6}{ezl_AU-Fg+a~#=D59G2iO}+28%X5z$*3BJ@5db!-Imi8Qi`m&|H7Uc(8Kb)UjB0(YQE-lcwVDI>91&GR4>p zx|ikqHD^mtNifyU$T^|1UY3yKkGy?^c2P>gJ7m8rW)!Qo`~6iY$s^0}bpc84 z{+bfrcaPXKyI9?%2!~0T`S&l%ObMLUBaYe)>oT5E)r%E9ESHR4g#l%)Hc{hgG3|fV zRa7_`w0wU}HBj!EZ(9O~xMx1I%NtWCT8=qFr0R?mCy`9XwW4XB@Kf<0yu5$Nyg$rb`7l&YfO^6!gWF7&)>1Dfp!^O$2A9FENval_3agCw6_!$m!AtCR)ThUQ9^B4ZJ2 z+3O`!LV`*Ia1LCwfv5ngT~J7?w*`PrbTUzC zU1%A&K^%vo{QiJQXea2E4379!;E{^?gCpXfXBPb}#?RXAEfB!f z&9Mu%Z5c~Q6JCGGkSN)HG@hV{|%szZP4PU7(g@9^%e zx&xrBSQ?WE5Z{wJ7UUJ;Zy5#S0?#XaeFym&rqg6fZ>pbzG?4X&P$%VJMXA2ycgD(I zDLM<;pH6+W^hNN$!C5Z5_-0o$LFEV@X)F;>feDc%FtmTd?ySEB^G>kKO?c9bx=VNT z-5U5F#1>y$?4{+dEICL0E+L8CoqQPKC%;!Q9N)KlecZiI@{W-@ghBnK1=COS_MIKW z-GJHJu=nJi#rW;Iy)-#P#?f#d+=jXrAuLDrZsWWcrIW86Gh>Ht)#b&kcWaCXcbGBb zs$Ni`!fSs6Tb*Gz%feWZ$zQys-GK4j`zSLV;o83@J_Yd>y341%%&|M}q4HImq6w5O z&B9Yo{q(tG%?gk;#Gp1m*R`x(+x04H$ImYCW4oUhJBhOqk!8Je-oi7vUJtWU zxCK-WO8N=|?Wp&v27!oXX5nD08@9RwkG`~dJC|ML&7Z%a>x&nu!*YHo4|pHAEvv6R zA+K(2lwyZxcomw}43~!U%!YU6ma~X4+aSo>lZQM1ToC;2y^Xi6$k~X+eUgUlYroST zr|W;S^^W<0hj}}{V$HWEXF&|MNgrX!5S;uR3ZUfYA9{xdb%4%Xpp;JYRG=(-sg28g z4(W1Rj<}c;`8zcfv$b{=K)Lv3uKxaTP+R_~KTeimhMHze3_e`43x0srztQ$M%I{B> zv<2ieNaiZ(d8y|3cVX`(KH#!EAcxf<7$Sc>y&Q}%o#E=m4b%kl9w(7@ zxq%n{Ib5IDqurM;Hmi^fTCPr#x)8qW%U~Txg5oKtth&#WNr0{H&#}eOJFeEY+v5}A zqdA_gNpqu>)W#&H!+854ew6KadRc(-?GEDSz&f?1oxo$;lGRo-SF^6?_aiWP|D}JY zxP(+NlcU=GEui;^7`IS9fFz;ATqF~dmPf}Eul%Kp#`5!d+V4zaw|G`+QLGNnb~g9I zyI#*#6&{`>H9w_8fxoDXRL-V173%5Yxo(+$_rB`*WaiIeLG~P9DsX#mck38^)9I zTnX`US8?(TB-=cShr8LG@N2&hSO&)QGR8}hoi>Cs-jV+{iK^l1?6fu~5r0psS1&~X zO|fImw*#RBk8MF<@$V0oo7iW$`x#;L1`l8VV5;Kf)3BtTdxa>h+S zR)|u7N1Ya|kq`Yzxj!O5kqqUt?OAL^i zC6BW1p@8n0!Fw>He6YehU_7e$Fp06bme>$Kxsx}YH$;ow9V4HIQTFiXn*Hyd`f1f+ z$G-Nn2Vc$JwF5?G*z1=OlQsSgTrhYaUAn)vv%(LV*`Sbd#22L0hQLN}up@qK_kMQF zHou>8=-#oiAmiZVCpUj%YS(=dZEi2jug6`nn$uk^!9OwCCP1v?Z93lekTz^3qr81! z-^7r8Y=~FnaX%Oy?#3PYXxsR<^V_Pp^8JT3i!}gB{@1&wEpmbyw?{W3mw|49e?nhj_xoc zSfScHNl4p0_EBmF^KD(%ox+Aab3$x}|0UO&vya(aO+|Le zY~)JQEy9M({WDtc&;qI)`E8-a*g4jv@lCtwg$s1{l1zV5&(D14&9A{p;Xp_w4LV>` zVm-s)L&IY=p;uX4)xWvZzZ=W3FDW|?I}^IbA*lm=a>@RXY+v}MsW)Hxf{f>KSNv7; z6G6MS+cgRDpK$O9>N(xJ*D#?9a+PDLPg#9*RumG{@p`WV$r}6^AHY5U!9beSmaZUvt zKSD?D-_q;%#rt;(91Co=a`>^_qkKMo#tY$y@J{{@(*CtMd2MOe1pi7$eX0s~2Vi3x zzdJgjhmHskAPE8DO9uxdfxy_twt$NI?{(!6VEccW&suB0yJpN8(>4O6ambN*=bd-9 zDCj`Newo8)XlNME#e+o+nnbCnE zFmQkE(>zVpXK38og-eQXxuioeHKDEE3+ZVpouPo<{`pH7jtoIzghWp3QjS;mc!-Exl z?($~q*fl@V7+Q}%;`jZOHV^K{SNOU-wcUU966NKk@3(UMgLcuAUY+1&UyGZY?)GlH z8IS3I**$Kb(^;nz-n1!2Tf$mS$J990>SNH9Bxbq*D4y~a-x32TJdFUb!^+GQU zckr-CULMH#Wdq1to9=wG`>KDK_b;QH-A$mJz#h%qhg}uI|9!19M7wwOWzb_S89aAy zNIo?mGu+tj=f2XV=|}hGk=$wW^xl8|gzPSVooSD}oCEw|5)=iSrz$?iPWPid|4ID7dp(VZ{X%Sw6u$|*Rw7Bccp+5w}F zUV22!}HSO`@uKRJKQ*onff1T=2&xfqv1qmsFc_W|cUHCWHM_6wgFgLMEv?y) z$W`s*W7$2tZ0}c7@Y5+PKl*skfIHZvcPB`dyIs)n=OA zS4Q(}ewfCe4>Yi!)6q3WGkNS%4EVW-o};H5M01+Q#>>9C^lNaMtpj@2rAvawiHv(bgnzP^S#z^}9^KOd zORnO2=dqZ&LqLXpKR~HPxd3d^(4g#!pt~UO{uL^NbTJ5bGdLM;HV;DHnvllk4fKM3 zTJ!Vxrp8@572W0GZmYW>=&lF6pO9YOZ+s@B+9rvfDo45hs@#7B3M%f?Y0=$QtB*9C zXmXhIh6+8HCX}yBmjix1w&ndD?k@$~;h&@OhJq#<+y{kis1eWk&p&RqG~7;U-159X zQUM{?e^zc9B+8oXt_DDe@;<^w$oouxwZ}nBYF#V2>#nh@*?i<=ajcFBo*_mVodJX2 zokKEqXphug&u@RbgJ9c@lC7GY9v1e~&_!h14>mc~lC<(s3H_iA6`;HlJUm-zY^u~Y zOyV*6|DJ4>7#6MVm&#u8;T?bc89xFfgyuyJf_XAvSK=5 z_)|;S$Myl^XeHI|+Pm$Z{A2f&l=mIRm;X}(;ICRM&-H&u#&-vCRbO+8t6q`NBR=8p zkYhLS4gtbdmQ^Aoo`@#RzE#*JECA=l8O>?)W)$@3vi63#iY*eQ9zKAj61tfeiN!-2 ztu$%VoPjY;21aI+(e->gNN8}?9&b{fD6nlXUqe}9*gQwEV?q`TZ4g{F@ZRl}+#iZZ zYyYUnn6ZCm%;oc&0835dl-&9J*Il2V@eq`+$@3lQ#HioPoRstOf-3LI3Cqvf41&yB zJ)?DW*ikVtq^-%VSNOu{|FJU)IrK7I%B* zvZbPQ)XFk*xKZ{S z1dV(9q}FXSUD*LOW+(f@4XcOtDxWe?!@lW#g_fVU;}6DsPiJP7^Rw7c1%zSc;W8ED zZgs!3QL|HN9NTHa9*TpWG2eBL8nsR({_!+a$=?dOlhcTyO?d@n}%|P zRW5s=XJk7|{>EIecXqa{%U0oc-~>dYRhYT)D2kw(Q6Mrfp&+PTz}iSGzSmPp!Owp- z|HbxU9+-TGBd;4_iU@~iRCw*(VQg40y``*|*nqUzk@XUWxvUq%zpM@R4fp)QbDV9$ zdEJzIMQq&Z*E-@^SvQPHT-9qt_TA;$a{O+&N5*S@tLD2Ndpn7?)xbW+5%=8mbMGcO z>_JnUSwE`3cvaT6QD@HlmSH9Zesq7|GQ6*0H22J)F*XABe_%UybrB;?-NX1#t-^EY zqrqpRIv3I_#9BBL8r}gu(*_~Ep^aiDT}f1tS}x3+;YcN|0kuddd!yel#I{;Swx8?t zQmWuQH%u#S$51C#Kah2id^8U<l|Bb9W41-+ooj;FJRWR2#Ow#&*gw9ez0<4RjqcI!$qqrhYT1MRGGqW+Djl}M zbYw+^Yp_3p6rM6RBMx@3-_(Ap8fG)A!q1v?C$wa2*16kMSXA;|5+SlJUq_hj1 z3uoI~t|eaMoVkqEbmbT6$_Q{40*}mN3pX@v^W~-y1!EcxF&x9hr4xqXB7BK-jS&Qn zW5^j`=d6piZNv`Ak2ah%ITy_)iFU)OJx73;{ekB>Ms?w&^Ko9^ql|weHX*ool`nR{ zGkUy1`Pf!vDa)!n7G6)i#%Lp|s z_c_m4Ue^+vD9?U5@8x}|yf41kQU065^DpV{&z6zD_f*|hndgqTa9S|veNuky zsD|vSjTpqM)pOViVIzO&+w;I!O-_B>b~8@{IxVz~!*0)(Z#gyf(X&+KgpDT67vr$z z;T3vTb6Bq>qiWkm^s;Sw@SW|dT2;H|p*l1XD&;U5$DEr&adNkSvP-B6!dH5S(C~0>%CN- z(R#ec{uYEcduB{+xcsA`3t@)NYL@W00=JBQ$xu+)x zVezIJ#l}-eVl02$EzSJ3W76uwENGQvLuRz-+k@YyOvyTTn`%FoR5R!Cs)l4L=P=`P z@>kchSJwW?i|ld_=<|Wv>aovWVjW!03HiSsGdYL3>L}+csCULSv2*gS=XYsmL9tVG z;$RlD7RP>W$0l%)~5!z>WQ8l5FSKd`88daYX#xl@n@@0)VKl~f9vy3G+>xZAC*Cqby zsr2<|9LM^auVrjY-;nd^B)*^iThp=pJ3g1so~~3paYia`Vn?uRus7Cfk_C;A=R)ch zJy>hL8=LXSj!b)$zU=SQJrBs5wrq2-KCF_gz8rr#L1%OfI$aALLl=GLz{w{;Q4NZn z)lU;Pl^t<5TKEWogC8JgoVx{l+02(>`D@+s9jn%JpdWOUS>W*{G8WtS;&_#{+F3stP0}3?YWSrzIE6;nzOYg}w zdl-M8r}Xo#XFKb)v|V~^i3iSmf1P}v4W$i`_mnmu^}3E(G{+_iA84j>4tF_LEi;UV zChC&h=M$QRAO*#nd$2i&?U|J&cVp^3+ox%Nn3#dO(ly%7XdUub~XzkE*f91i0ZT+&X{x z4n6H)V5R%^-`7$0ngn@DpSEX``{Fx&uboNF4i$07@^M%Bg!h^5pe%)MXN{p3f50S; zR{p(uOBKe=-PiE9@CUl5AmYDsJP>Gm?S;R5pwfQxdP{r(_T@SL`i@0pjmfp*Q(t2Y zt|eA3*OZJ~-R9>@JSS&MrhSR>EIxnj>bcP|tCm@C@BA05`WT;hOp44Wr0kS^$4@bU?K$ zUkSF7Shv{CP{<@kwBO$;>}RbW+K2~O0pCqqKNUPX{ZoTQO-m@+l44_I|7yF~4%$2A zlKOq`MpnxHJ*{p}r^pS3s!0eu<7pWR7HfbzF;WJFq^lXY3{B$Cj7p z&}F|%42(21WxR!Vxkkoc$HsczE_RfEQpZ&QZzT{r&bq^sva&h;G*pj3j+1me-gpKz z1?vuC5q2PF0BbPD-(v&nxx$ySERPo7^StYja*UD2c8u8a>H#Y?~JWV|tO+>eUm^*5m z&w>zE3-JRnJXv3TME0*^F7Z;Ild(p4PEoYPOKl_g9WSl?^>y7d*5#Os-PC74xPz?U z`S{{LYFwy{k+zq(`+_-~F$=c##b=mb_Lz)y8IPxYU%}aqDvL+Z&sp6Hk!(d;#azAZ z#bzWGL}feaCh&_Gji|ra+nPUrZI6yKV(ZWt8yT@x$0~}Oe##mS;wE?U)pqszXydc@KQey6#wVE^!{&8PTBFkjq-raU|zVxwiFx*oz;Iuf$n> zwZ}I~#`)r_|G1Vp72EQS4;|+hzw6tMpXLMTLVPa$J^sVBf7)&P)IBu8PZ?wEPkivh z>Vn5#G%wdEzeT&hx7Kk&3HaR8KGeWIgfYU%>vbK2@$iF^C+1yv#gRIj5YTDV$bL`YJ~al8T$>S6vX~r@H{0ZV6B>lMfpyDyr5f+_FMNZvK6hM z!FBQJ$C%)w?Ld53`x(F-_CbAJ&b%M%F|ct$_Y3eNd7d}H0X~&HPcF~H=V*Vay!V4X z!eSI!41t)xhOp|y9xR`k;K4@gX%#$VU|Qy*exo)7}&gI*CwZ43W& z70j9z@ef@u-H7AtS^Oh>{}u5|`hBiE%DmzG@SHDyJ}356+k8XTf5F}^@k8Ck@ z17t+oFXBhBRoD^z-Y)ymNE^fn`b_^1`|dA3yrI-2c+C3w4}0@xY!Hwpvlq##e%;#?4q;65T6dt?04}T$jaWC@*#)lum{n$;! zF&@4ZItEnzGT|Fk&VAKi>anKHgsqk>(`!>sLp_fu+N*_6bQ225DQ81B-mKTN-L z6{dzQd%M`3?U84)$7M{+>pst(KOOn50_>=d?3fJJ#SiBA5r1D;&F|2$<(zjo@Hyv? zJg@kh_&LcPvnl)cx8JYx4c<`4il`^@Y*+mEU!6NT-WS{P!i;&SUE(djL*4kE!?Ttg(&Z zCfd+cKM*`x#?p4P<$b1#VrU0->_of{WBREW*f#+H2v=iDHd#;jKG;m+Ok1Bb*ENQ( z(SN!rf|d8OVV%L6B+aM_810-HD*f}Ene*O%^BIz13zctFrnrJ??yKzPL5=EW<*R>F z;Q-{>ZN4w$zO}oc=ZikZ%$N4d9JZ&uprAM->! za?rljbqz1+LItA?~f05wm)_}I8^`b_*d3@x}eIr<}vo1J+kkK zLoWWZe&6zzHJM^>?B`YebR)6N4}Y3pH4a`Ayny~c6-5fMEjV~&kIBOc4(^osPQ3@p zywMdGPVk>JKBa9o>-LNf>T8XWx1FkgpcePvpY9f~K0sA^n~!J}O;6PE(Q9~-(~GEo zrXMRMD)TVgse!S3{UWg1jE*$Pt3JOEbKkhhKjQW(sLby+#$X73x^ub%@OtiBf>vvA zwqFCe@0b=F3T$z;moqrhYod%h&)AVZ&n__zc8YUG?*H&}?|sKUXIxL#4EX?mVybk0 zIzQh9x4d9R#GGQMs95D3GOEL2$8N()LlG1-*lRcx~O(M(Ca+zVo<3<@@?}7ypp0W!~UAfI(0osxp*I= zr)^JFf3MST+a+uCJBM3;@(R{xJMO`n-7jyEZp&J!*&~&7KkWPO)`&rYLLa-q5 zqqRK(BVpKsRllI<^iW@LU%5kmmH&I@0OSxzVtTM2`>IMCM4(47izmsY{<4QxOu%>y z8>#V9f99{D#&?*LFv9Kxsv_~iF$G^`K8a%|EqrMF=n~`7cvu^r%;hhk>o2SCajqee8>W9hLFF%%jE=&vX0jcO1YR@VkD*zW8L+B^0?iu#?+Cu{XQ0`pvan zF%HAWk*cONIOw`Zg-i&!`E<|s+qQcEdm~ihF*mVQBc`wyW>!?f;=^}Su+)W8BMsc8lSdToPhj_F?C;KEzKz z_0~9Yua0#{+X4A5n&|f0#;!@O8;~PwyGDKD+VfKw>kcn78k^ zCr_!*h@mHcz+njTwQa{=y>R#8W}?MLj9Tp5)H9ha&_l2hBPgg5*YT*`_22{ue&S@i zad>=ntRr-hfYzKTe!k#R!F-m%Dp=}#N=$Z-euh<#HBYR1Xgm*tm~$jHB!^K z;Jf%Y_^kfgu68LgTQ=-EI7knF4@{Jt6Z{qI%(HNm_&mYCf;hbI+1XxW#qecKW0b;v zcQ$5!Xk_D}5t+1W_#1X2aZ`0sXCoLXZT6yWzGz3*jtWwV%kbHZfVfg}7=n9gYy^xk zwkzAYJsMN}rFlz_klDJo>aDJ#037`QH4Yq||H^Z*S*OWS|R!>Hqx zf1%F;{l2r$#dTu;%kxt>1((?Pl$a}8uDD=-0uH#I#!5pEMiQ87<$lHQJr~{ycDL6o zid*o&a(oWK$7DkuJ81aE3f>M3gs6HPtEirUt5B>*yzwUSfpAX5?h>z(-=py`=fNCI zGBpNM^h5{3Wx9-sjNl$eli2&l`I>E?zU~Bb!4C!1#aESax+5PSk#DHWdB`|1uH`s? ziI1u6AZtw4njW8v|3TiaJP*Gy7)zbvAK2+%F()`)LF5Q$;W&Reb9_plw#;eEcV5O< z)-Pi$zQ0`0_gwr(t~Km<*2CU`sewQ2ldjr3m7VZZa(On!%YRtag@=UyWyFjz81`|V zw*vNPKo2hSb?uH2h!Y%F1uw2)SAFY$^_BIIw-m7fbJsE^&)zK}g#|cQVUJAO@n&|* zGS`|9<)Dp1U$Vl)cNq}pVsmIW##jrdNX{^dtcaSi2iO(nsHq0A(G|O^AS3DXn*4MK zZYkWd{{6-Pprl30r;gRWvGI^SCicp&Z2A2ue4ipJXs9|szr3#P$X5LCPmL#k!K*~l z6*{0k2PKC?-tg!Pe)Q9)KI29jGZOm%J|wx>%lv^Sea~OHPr#yl!NX(Dy*u{EpL194 zDftd;(j{-H&)sic&N2#N$z06M$-p~y$U7QEA)>r5t}|hL@;+RN(dgj2jeuNQ zj%*knI!eQE|A;2Y{VHK&y*IOCw<`4E%FiR8t?4Aj} zllH@bU0nPi+d6d9)@rzB@vuquOOHIJ3R~D-;eFYbmDy0EWX!Mos;j={ZHv@g#{0o^ zh%@V~1Bv~#|AOC$CU`+NxF{El#HLXDJg;(%#z7@KwK+TJ;5PyXyvB@w;3;-w7wp}j zNj{$VTewoUb&0=84Z=GFo8i1w7hassCu#rl5B%@4Z%bb3PyA2j@!#xO_T15<(h{s+ z=IO^C;uih7<;svnedE%@$$l+Y1FN(imw4TGK3B2~bK;z% zYkHBwR%w5aeQ_C!Z@$$Jo?gktij?@7!s#5!Qi4aK-y;|;_9C=@Ho`4_xoK1$@!PRg zaCzYqrR_ufb_}O1`D(KhO+su{+s=4B7mO4?1PqnW^E4jGb5*{UH&3Uhs5zX>n)r_z z`zdmxUCf0WQezBQWXLFE<@v(7cdU;(ANhDK=R&5a0u{c+~u=?c{yK%F?CZKG87>ye6VQt|i^xZ?!daf7D zTVnxV*i~TsnzLZk_V7Aq&OjNHljqQyqF6NG|S!I8(|5;4joW zVXlO~;C#pGcltS3p4h=le&#Y>n&bA(3Atc9dOXVYcJcGHuQ8j)Lwtf00vD7USOIit z7PQzhCwE$ZRt66(YqUC1El$!Pq~#;3xQL1}c`g`tfWn_cFFE@EdP6F^?aWFJl4A<| zp0+_c=hU#C?+OFhETNBhoY=#m8s?R@+B2gNKM<_*!uZv*P&Co}g*EG=`6O^+NBpj$ z^t+7c`utbk#)XHa|NfKXd3na`+OVxja}mco8VXT=7zp;k+y^vpN$Dz%UwX{9K^AOn zMLYjkp+r;lqaYiP+-w@NCX6F7aoWh>l8oA55u9W5-fg^aQOib3VI;jGjYzjDYg?o8_qZW_IdxGeQo^x^IAXSb(b^n;xiG$ zWfv}gp8ne8St%iJ&LcN#9{;O^ax zxx^?o2Am6>(=9C>sUjth*gV;4@8vT`#AfBZ_pxJ5`0Q8T$qCNq%H`bFSO)87Py2B89o($QSlM%G zK1v=Au|@w>H|zo_Ek!PMm40@P_5^J;I52QuLVPOiv(V11sQX6#(Nu_+SVrUCJML(b z^`tsSFKO0-O_N++%RpJ;aXS}I*E!FBCBABNOxJhLfhNE?U9=>Dn!|&(kyhRJ!G%|F zM8KT1Uu3HB?V)qmxbup_>`+hOXBvM~72>0&BJ8vP_ym88lP8aOS>p!R{lz+wF}wJt z=e(}2k@${5G%sG(jo_VUn<(=jYv$#AZs_s<=F!!ydTqE?2e&s3f+0Sj=1%Z`pFT%i z(_0aI4*Yykdp7M}XXQ_tsXs6W=*}52=WewF|KekO^cTG&LP5uz8~`=6Mk9lli<6j* z2ZvxVNm3}|9{F*JR~s`Y9p)=9`Rc6g1>@AGW~gXpyTnaxI0-S0MyiF8J?d z&z)`V-_GUV9DC}rVt0_dvq#}jdac&6NW*H%CP}+s#f%aE(bq4C^PZh{A!BBKE5yd6 z`%fxo3@c1xav?F{qCrdMM7Ao=AzVO0H#dM&YAA$#%xyru81y+Nxb6gyA3eeBezA?I{g zL2t)Sy;&)H9Xo}l`IR$K+N7j;wN2zy|LVW|uIV|CH8oq8^Q`ovFZ?8txLveD)(-j} zT{k$=PQ^%&V=_bX)jZ>WWo+QBfJg5t__(GTv40t-refcM>GFJH>pPAOHkLI;G@GOz ztO~&T^y!-f4=L?V!k}&OS#S{o#sJJEbU<5Z(gDK5y6n5NJ+sbrcvqk0;-6^l^@W>X`pK7irZbO!O~)s~$G%`tM(3y| zRBL9=(Ryv4@=wnA6bz2AK)oZjM4r^AhSYu~XvP*aTmpWcJAVt{oiDQ3^aoP4f3R!> z+nQ^&kou+O*J+Ca=h8nEqoy%;O(Urrg-cvOb*?Hb{{VRkn?+pSWrK?XV#W||@ zHl&WO^lPB8Kaq=hU+*X+*u%vG3oU{Ao>t5{oxrV3Y zw3O$A!BhMj9}(WpCAW7Ob9tZf7FWUDcC%(!v)zasmr09zBZ9tJn z5fZA0aEsH+sD&d&Hw`%Xkwc(KzC=~Yg{JVz!KI7|B_j2Ii2SzIw8)OE@)!6Op*o~X zH3;E82>##bL6ByT&WZPmk(X9aCjO%xSE+;~MpO9`BTX)-0ZKoAvx6)e7S)tG0h*mv zji!X#zkN=(vg6yjV+r@zH`*K8Sg z^7;IJWse>r} zB}%-v)F6^mvpL@-p8LOZPyrjW=t&ag)IpfBkJeC z)tux0xn*v6J`TJHS?X2y(<4^pb6W@s9_q1k$EML`-ihYhe8SzF}X({4z8R)#=>Yruar*6@;o z92@a}F*Fa!4581*7tk=xW zAt$voPoEmL_MuK~Ci{p)Xg5o;XY3*Tku9Kq8R|WC*7W&Y3B#d(+UIUS!Ag<8BLZDc$>o8wgS+%Gn_vh7w!L`2lJkjI&S zq|ZS<%$$Ap?7Y6WGXhb_=PnJR+J_u2I-*p!wIPgRM-8)KbLGSfaH4F}I`pxhHi~-Y zVSdxXatq#3xp}Gd8Ev^=(w@5BD9-bHe9%#yx}|enl9!SEh@RhH zdr+^B?>aZ|>66s?>fDTA0>Ahiopro_Q&Bu)(slmUIv5=qo!poQYF*QP=vw=hjTDn$ z(AfDYxFkM>r|p>;g`{fpH7r&$1Wz%H_QF5fB-7(IYPZkMU`GeQGG1iy7#m#=f5|6k zyt?&x9Cmvv1Wi*PwRMm3_$x;8&bWY)7^4`{$n;eiqeJx^BcggGapTJ9#=4GwYsR>G z+cS;?4GSD;5I^%gf3AaLY3I&7?(A0%UF^E{?XNj99e2&Ef}8y0JzVS{Qmhn|KgMRXm{teYG#k0Q-X2 zf#ADBkNdVSf76Xa>R4;Zs4y6YTbjl z1AfpWPyf_Gn2|cbrJGjC=@(rm3ntX)ZB;gB!$1XUCHRd6)w}oY^SRf5Z*6v*>HDE^ z%pW@D{EjbNzE;teXC3Ua(HB#qtu3Ogm-_`t+NOOCB zj_=yPGV*)Fx8-#LiiG2&}J?G2J;C#=l=75G3kH>5)0DxG_*(UOgLFm@8Ea9#NW{mH7s)n64Q967G>rH z_y(!{X$CuqiCPxQs_d8d;b2Gehk&zY_p0;f%gVjNttMN~`iD}wO+X&6K zFVFYyxa{zQzpXh3l6xq5DQ8=8J@-pIVWxVVd*HxdtaZ-Gk;61`zQE!at3?WD%rS(#NMU9ZAAWOFPu}e(CDvrV6Nmb>@anrIB&NP`;i&X`z>l3 zrS!4PcaWz|j^sk~K4%4t$Q>LW?Qc&yYqST%zcvInZ1=?;z<+Z0ReXVO9A4Kx|40As z>Og1U`I+5?pVTM63MrF4Qsj9^9Oexg z0eRF-hiAyb9eGGP*ISPsd8{6sksU2CqlTHwHf3 zS?&Q2T@?<0Om)Tje{DwDGC$hG6$ltII%W*q+?}N1P`^R8x zWqHT0R^OV!M_`mJp&fmQkwK7+`!{AV~u<=O~J#2td3b2w4|E9_q(1*z*mbkvJ=enVa zPs(ebJ`#8+)qqlnji%1~V zF~*3Tr;RtG0Txj-NQf4nm-k>xj=^!?@~co2qWLX@G3f}dmF_mE67{W6a+k8s8@H70 z$~xEhbU&-hI{(QRE@Oz&kN)9%UgO8h+SHm!XJ1ol951iwTE%bM`fV$&ISXurUfVQ( zXCLAB_~?9qmo7SV2fg4~Rx$sK$n8@v)5jMyx+`z#s6(4(Xh)hK<;2q)27W&Y>*TEb zqe_|F2S?$ua#SuQwa_)SZ|CT30s98UA?ThC#Qo zZM1uvWI%grhyvcB8O}5I3^2*QXiB|*6^@sTrzd%gOphn{0^PJ)-l^R zhRT`)L%m=%mv#CRL)EtSx~_l60A+6Qr-p~D_pcmK-KXnhx+lNsQBMdxA?-I05GA7f zJU%7_XWfdkPWI%6014qZD71cfCik!(m?3|wAJ!X@+LoHrtjKT6r1I_ zSE_@$TCAE{&uoWp?Q2|>ll6k-jNtJS_r;m40mb@|c#ts>KWMC8gnKBpO^kUubIq^& z3@s6A@qCl-^ag`QW#AlN?$ZwN00BtWf0{||B!#T5`I^30nfvrnvyJh83u-%o=(5b< zfpvZLUKc7N9pr3C7xM?7+^Lz8&HHIjv>pD3?w4?vFT9`9AOE%uP2MLw#fzOT_XhJy z-b-^tf7Plne`+-C?vPj;+Q#iWITp*ezpPEo88=bJ3lFH;Dw-A)H7|7L@h0?o)p%k* zBxG&jdWRmqS@oTTm>Q6O?Su(edC@Spy9mQ5NxLpy|_wH z)O;Tz?a&;2$nDhP+P-LyGg^Kqu+BZCE@Fd6)wTm)>VDhknlQb4_f`0L64y|*44;Iq zpy-o`p3j+g2zR>N_t#pktBG1KUj3G9J}(K za(~28#6-f!0psKz9Q=N%&o+E)9OF(*MZ6^S_{%!=_q}v~C-o+PxSEaBtKHwzuX+*^59Rn>e=>u zY=ZFDh~MBp)6xxHCa$&K3)ZZz3u}J}W{qtVEj%!aJ*^1g!#Uvq`^VWJ8VQ{BQpYZO zPE#hc=xn8bmePtEqNOL#%UTFQ<;8cQ24^PxFL+DAzBRJzgmjnsG0%KE*Y&pw?Wf#< z&HjjW^*NX^U)Ngyvz`A>wG)zWFEujY&_H+k&-JelJ#>1C4|vOh`*`FuzBMNTSHE_~ zgs!zhKkD`j=t0zo|EKF5(MHLCoC(4=y=YAQd0zZ~QR7YCK3%zsgLs`FE} z?5UnBJT|SI`m_rMvt;!5h;qB3)UvibMCF}ti2hL++-Ym>+ z4P*MYF;`-Te8)@&OXd{_4S!CHj(XHrmApxGh<^KX9sM1u-C*4X zRU}3c7gBWhq^6(xd*`-(Y6sI`B|32M0Ks;F>drO!MZ(!8b!*e!1~;X1Xe2-t@ZY*`Y3^sahxAV~lSzBBDhBEdLyZf!HA?F?YRq)ud9^~KG zH^YxH=Fi}c*BBa&6k>|sb-#bH3BNGlvL+7R)|xt0;yAcJ7hfHpa&|z6tv7VZS*{kn z58-7?jX{_{^hzGT*o9&Du93|{>d;Dmy@{LtLAC7lFbUJSVP)~{aF_OwM-V-gi*CeM z^JTuDX9+l%{)`HKjcG}Moe_Vazs8t9y#@_(|9iCez+__sV~VccmP82}roGi3XS4A^ z@Xy#g8_ikyN8L4cB2NUbBcGpdu_x2!s#x~|@hf>v>RSG0J0^fb2fx-?;qw%KXuB>0 zYiaHuh})4F@hbp#E4(T2IYJ@Y-sn03@oQ?FW1>^2HHj`52S!dsiKL08R_H8nqT;rE+^-NIcbi_XCB9w=uM%oIM0#7@GkI@gTO zp;FD;)F)56M>Y1zIddHtGJ2EjytTK+vJA6PJ_ZOy+Pj1~`X?Y3<(7 zMX8YI@VndAl3WFCYvw~MbEe~4^ag{XZ93VA5@v4*Z?AbkP-@^0y#v~}g25b}QIs~~ z2yGeSi7sn!N-w~FUKgq$_h+Ps$;GBOy>*MuUgMpuQ z(Jt6ydd|1%`nmI&0(~VOHXMj&Z7aN;kTd4~z)@(QY6s|lh@=EHMPj}b@no8lmtV&( z?9;Y+h}TVEQo5&2D0wDL;N3;Xf|{agpZJNoxSf!(@XO%1rTaB?L1^PjeQo04Z-?r= z=7hv$1B+PMES}~G8g*I2a|cLeQoD!GNp1~q%HGsm^@twSe&TMtY13#Jru*tre|T@E zdDtD)-%uTYJoZ-V8C}+%VI)KQ011lzF(QR~)**2!$ErD{#{b_cDv*$T=`7e-=2SKTAyFT&XQA#ZToH^wdMH4aXVTymJ&l4#saP> zdkem*IgFpU563+yFi&a@(VQc{z;Rf8oowUoD%dBBiy}q zjJoZA0dYE6(Ic+xc8m0dwS$(Y{nUz9-L6@mpzXTo;9F4e4uQp=n+}au?$R}a2p!8w z@4LqU4JvAa)BG8GJ5h&*ZH1|ee(9ZQ(pxazKZg1I7-z|JwW5~R$g)m#1J^5v;PT|$q1kPr%u6ljtC!r`NtRuwsq09{?6N;yo>JjuiVSIX8XdS zr%%%rn?@JJ%}iCNjm!i2WxF6}3Fmf~4R&3pODy4K=rkB`TI%BeiS|(Z)u*iqwap*+ z9N2tpR~|fd%m!fIYSJ-tE<%^F2pd)aj_(j~kxt_brUr)`Wt6O=h{EYZvO>GH?Wfd# z5{ZX<@L0T6^1*i0q!1lj-BSlIw3;G`Ab26M@mbFoP0p6ag#v?k7#>M=|KyZB645F? z>lFX0FR%SrTau&Hn9=nN(DgFc{K$EoUHL;-T`aAAEB;bx=hpf>FC3Trn()|?9`v7O zVUM#DVpK&=-SiGjkQ5blMrb!p$Eq=Z27sqK)b22(esd5^`=$*SuByH~2iltfzX8qP zx1#Zk-zq+-;TrQ`Q?T*i1QQ1i4a*Ii#rB2o*tqK)&TP>J$64ySqhozY{@Ah%+iSg~ zPyUGyRWy#u?p>RlL6A|a2QC4Y50=bXs_`f=l5^g;6SNY|&(S;re<6P9b)TMp^S>8u zl;8Qw%h>)SuY|7oRbNB$!QW?yj*+K52i<&X4VeEp!8VB|V9B8%KOB(bXp8JUg3zq5 zk#~#-3H-#~K*T@nadXU4gGXM#Pmx0)Y&$7I)k575PadA!+} zySwJ0wj71{#=!!9vaMQ@ufcsJee)vzEqL`0-pr4={U;jDH(8o?3|H&r9i+%g16 zh<}9tNsJlVq450Cep`gBN8*o?M>v~|hX;ge!lX-3ELbu8DDwG#_+L#UU-|}i)9`y< zkjH3J7&KSz(qVRKQWH?}vC|g)<|o951vc9T%NEUgTTNl_mHFn^O2s$Vnh&h&p5Y>$ z`>dSTvnn-{&oxKjFq%_@j>NYvI=(VGD`#yF%?p0B3Fugd#MJo9a2D8$r}$(Uq=dq> z=B~S&j&Mow;T%+dhqxzdsAwFiq_#nFWQDm+X&YjCPGt(t%->3Um3Xw+-9Ddo)v4K_ zjc?GTFE2Ud!B*|IaEL@lf;c-+^q;SqZ zi@tQZKK>1VMop^~$j=*`Kl-1b(M?TE2EMP?2m1ctyKcvSgtx-9sBr#8&bIS6`v*V9 zYuR46vrgC=+1QHq)W8&k4ge|iM=;03Th`!!S_JSu&WhCk!v6IupVS~l2n^%r5N}D~ zwV0vJ120SOANKhTmQ40JNb>a5F*Kj!PIKcfy;90JR6pnWoNWHa7JkIGXFsI9svGJU z48iC{a}CXZRu~VehOT?C@1##O3*Bd|moc&DvpO8&%@;iP5G}8uVZG0oS2(d^-*lZ6 zxV%-2svkkJ32XEX3W{};{xS4)iZ?6jd}y4`-H$7$x$%&a2XqBbG2xoN-7Q}hr`c_= z5xsR|AbM$6&6(04`1Ym5UewU&yYT)>-68O-@|mxHF}Jqc%=S^0OOl5vwHmk=BNDCqMiGS>xzcdLd7o|^^IrSUQl~66`J7pt z+1jU&I%2j0MTn+7hJTIri|x5l&~#79b1S^3p8@e47`TatNo{6eWvN+LqMen}t1ur$ z1M1a((95xp8DQqVX|0~Xy5P^y&&WE&p4Xxo3-L@C}+aP;q$+ASLtqa%;9hW z-JwGc7RH{GJPT$Vu2GxZH+~3q5F9nMTW-zYwThj?S;Wd?eTUw>jdU3GvOP0(sF{d~ ztKw~s7|t!EegvOk&l!Ox54G2aQkz8lPF+`jN?Sr{q0fG1ElZH`5qV?Iso!;_tWPla z?=@P+3YWcn$$OW0=GXmt;bc@Go&d|%;p}|hQsIh~Ky`P}#U|p>8geJxVA9^CubipR z*b=lo=98=n(Sfss&(byHMsQCI!S||v>J;2V>Kdgz>|2CFRTGT`i3`0i`uJ?13m53_ zmi4WBG}sMYH#WHF(O>l@nJelZWNgk{T)`#`-M{I~|2XTdUFH|q$R0$H&4(>8mR8Uc zKah1KdK3UP=qBJK?V-IGtfFA!a-OPx&18;` zi0w^@t6%xt|+HiTkO`WWK4@)AN1t8FXxNiF>a& z?3L5VTTgWZ^g9vuE6Qjf}M z=hZ6xsK}v`7=gFuoMMl7qGd#X)_7AisYOL+GAvxBj$s$jM2PZ^^1->b%`(`9`DNtlPVmQFV~IC0BPx zAH99=iF2NR)}GDrDZh2VS*)TV{mjtxn1g(Uq6a_@vtDh!NKVKO$VoQM=EqHTdV(Jk zpcn7seC`)*v7PJ1-mX4zmNsxv&b-o(f;wqB-m+hZW_t0r%NX>peDGhjZNillUCoOQ zuy>~0k62u5k6*cv=bA*#6PetF#ouaw+8QgT0NJbG4S-&me+On^I=~0&W-i7b{t!eoy=l07+eA~#2 zK7!6S@foSRcl=}#miNr@?^|M8O`=7PVsz2U;2ILY0Y8M*FZj&f*c*Y_ad&h&sQ(zk+>vk41;!UfHP{>b&2u-M4n^4W0M9a0<>nWwjpfHGUTk z#~H^t&jEJ)d(GeW3lE)b-_rKxmp|a2>uG=F7>XviJ^P5TQ|ix~!DiWy!GCIh;c`o2 z30xdL7kQ+z1^_+GEqPLUV9s7|sKY_m9_%1^of^mc0K!tB4*7F0ucNX4nt?A&tWz%Z zVxU^VH5&ci;p1Q&_r;J{G_far)2Vi;>5ais%6fs#>%^+hTdu12>3C8{8=-2i$nB}S zo4e%$XJ~-F0R)gk{>ID12>~U4my4vG<$d_OCT*LsBgmBE=XP~n0D0%Zd=KA1#}e-L)JDa_PShA&eassBdb9&5O&GuC&E70NI;c80S#DL9=*7@~tD#NzikA7r zx#@SD(1vKoc^OyRBt*wf%{820S91rrUF2a#huC$aMQ40VJsbV+q}LnQ_vX^`Ic3d9 zlT+71cVZQPpl67VUkf@=(eq3g#>3=9k39?Q0F&&gs6xKFj01^MMCWVR?61&BD0Pm$ z_kNlC`=jQb9FOSmm2v!kFaE^0Hr9_E+IOz-M;$m=!#xX0HMW)=ZkuSTa1?obZ>iG; ze-w`Av>7&bL5*B*mcn;1>CjJXxpBtB@e}^%$DnQc^m+7OO#0k}5gID=2IaGB#sDmv zo=w7QfLlipjlhdOtB^|4b|avtYtZx7uOGAM_=%0_4CCx}Sc{K;=Ba@%?urLf1QX`qVp;E+TG$2TwiLg@>YXN$nG8-Mp(m^w?0wefQcOplQ`XOo==TDf`{KQ?%f``LG2zP>%3T${vGlH@dg1 zQpcrrNb~x@taqu;r3P?5YX3KQ*Muf!PJQN&O2>TjeWft7t0&dF1%f_c{S&8 zds4jb&6C}Eqv?m$t#@a^-PYQsy;^Yt@oye{9MHF!_~BFNHEghd!{{ly636pwuyo=C z@I2W#`}sHspDm1NGCizC?`5*eb}e_CPoH=kw^E;fr-!4(skU(0n$0wR{yIFF&4(iG z9IdUF6f5`G_Pj%}UatDDEvKH}9fw|Psy27W$#d(i@!}j8a0jZp@p0X3hxzL~+CH?G zjknqHebc_R-yfsHxD&)5E3@A2yfi**X5+1MCxeF{Tjpn!Ia-9RVWZ#g$I2@DQkePA zfBKbwSK?i}C_3nBNU~i1y}Y(lWOejs(pN(Njt#CTvu^gu(5rO={Q!O^699%Z}PW76n^V#Ev}AR=pPJLbjL6%uPV$l}I17(k4Yqh0?F@AbIs*f?+mB!R)&MFVfMnP#^`S(BnAul7^@lH7#U7x51+2ZyP??oF=Xk5nhY{ts}M=;yany%_(%+7`WM^#Gz<^tS3D z7nz-q`LyV0U8{pibwzz%Binp3>8k{EK;8S+R`q7;z77)HTdRlIdQ-iFda|ur0Nc4T z&irSO@q{E*sl=7WUFA@@SygDz{JuQCe^=Tsjko@BTWOCQul?hq(wa5$%Hh7!Ou(M& zI$!_4_7AKD#183>H5}Q)3CKzZFtiuPpNN!nr=yOlf=cd2BV+4%uS0$}TO;?LVIgn? z$-VVv9w)VOwmhr94xy1Cb!8>C_d(N@Jsm;Q$?BFJQLMel&pI<@bxeaQN}rrRe~{KA zl@v$mh=i@kLMwQ;H>%nzRNh6jgw0~=pRHsRQBY>o_YzO#T~ta{HVKPtuUb~{=%ESk zm`$!%Ijrn=_G!p_Y;VnT1tWJj*=P6^@;>f`rVE8SeilyLO}wMM_GW4nRQ>C5|SQ_>@mfA91+Be)qO>hQhrvpwQ_sUD?z7S}r5gL-Dci0oC* z*-X3+dY5t6Nvgw)BI8}`@c!WZc_f9Y7~*}Z(C?#qe`LxyzqJ!yGdD((82lZyMLy5m zsu`93xB4?I`tIBslhH7r(N1QwR|hM3WIt?H%)_UCFQ4nf3GM8Sd;R_=k{4j8FD<%?d}Xeg4n)n4$n94=kq|%4xTI6f5tQoij9vf-fR-DmS-czCUCk5>+9V;Vzr(8I(MwEW~ZZC z!SNHb1!QzqkUwYZJya2 z@_nPWcWb8;)(|L-%$an~V?C|#NI!Wt=0fJx$u=X#w1p&kc)f?}zyHjR%H?x#!+wpC ztY7ceu|wXMn74(slKqcc_U?D_$*ge5eS~|u73ObV#Ouf7L8QL-ZkUB!Ptzay{*>?S z2UX@u?wg+(*9p}`e^x{la=b}l9r2m6ehhoh>p`-e@;*PMz)>aj-8z{;b)?J6iU^i# z=$Pnpt+$VAVX_axJaqDWuWB3bF6Hk&e|M6o#=MuW&A30R^SkHQgZ(~$0$3X+`_PN9 zD=734k+ZIOZq}K%!M2b$9hwBiW{cO*YL)e1wRm3kBx}W#fAK>~V3)2|T`JWT&(FAy z?C;-0S-;Gw*g^S>)v)|K>))z&oAUf2pF}c zM6$}<#(bysa1?FM^ZFg~b-dnkhP=mKp%-Y6m^ax|RpzzDx--&`10)yddc;RiplOS# z$2DMH`F!+6e@*kVSRMB>PtQpr<27s_Zwp7}H;mu;9t3&Fi$;51&w1U~ue+<`0S>Xe zKI3(#e4U~(VlR2U8S(l-zy8eYUio^1*K_@P`-tto&Od8(YjlQAy7tTmJSli=kt6$d z-!aJ;fsmf}Tk(8`XYzce&+~byJYUP?^`m}$!|Nmtbul6JVVRw9Ozvi+p z|IIp6>tw*#%UNz3+zV%H-d*cGiJg|cOvnFpG?KZ3S$k{O-J_nL&kgY zXyP4>_i*TpEQ7PNCD)#~7Q4Y-`JcPK%5&Gwf6rZ$@3~yh+3@%0V1M)6|N1%0*A>BH z>E_tMjdxld5T+frSN6x=;+!I}9kQ1G-Sdm>V7$1VUX$6p!Tr@`j%1ww?K#2y1h>o; z*{&WovnOi^*R&O>PtShF=aRkaZKC9@9Ui+)vDYIPUrL|FytW4W085m4?X9r4#*}?3 zfA0z8Ox%?3ae*BS{`O(DZ%1u(mGP{PZT)?62L16K&%AGXRn*xyKMk=RkWv`q4q7Nw zFp7^M=c<$J3!2_UYR3P!M)*2Gib3{oTc6FmpYNu7>V#*W$oc)ZHO6c9`^AqjI2R}Q zakP!gAk%YhhgK5eU&h^3pJ_Ap;y=eee`8OJ&#~pZvYxSL54>98J9}Wpy`1lZXpibE z?B!qVZ6fwW?&DFpV%za4@H4R2+OPP_eLv#wZOJH#pW_C6pU?jxRhssUPD7Jl_v2uT z5H8@{9PN!cwnd+Z4iV?)c|O;DOV&;G$2IstV!J`2!xd+R+#CCgHFd*2X058Re^=#9 zIgaF9Dd!4*09*OVS`}ZDzyEkm#x@$snRU6=Wo(>Q%o^fc`V54afg0J-Kd-ILzuxD^ zI`&vKR%>j7*jWqv{qw$a>~Mabdr`}B4aWE{W5{=1)*yWl_{@7();aSw4r&?vjMjuC zb7K@e;NRJ?*uNn9$90ig!?*qOe>#aF{^jS%{qZcTp}RL0*2wW@v4g*89VVT3o3lQ^ zz8V(e{Ltnee_yL++PD0Dtjb~P>c6wLs8_15dt{H#-|uo(w8bC!@frWm_AqwmU+)#!9|+;^f9MQG+=Jhd zvu~W>*3mvBdpN@{<#$8-kAL6y*gM8lpD(Oe_AFWshU-?D`ntVb$HeIC=3K`IIQqJAB-b(V`Z}IP^6TYwf6I=Xi4o$GC`H&uB(pS|K*4?HHYuh*t>--GM5*hcQ-daaJ9e=0x6G1K>rPp!wr6#Jz5 z;3#@rym@*3c}!ONzOUD^U-|6owZzVW{e3O_LTYCu?qtu;_M+2P9SSxhXVxU(o=lf> z?+B5C_~YU`b^^hwEDy|sn9S|rOBk0k5A_x#6xt^mlYnTx;fF5Fj|h!>_&Db3@_SYI zfal*g%y3=;e}Sp$;G-(DiZ$2en}P!F-#$gpi8Z{Z&}=0();7IShx`rJ+@nWlRy?D5KFa^>KLbY5b-@2%;Y1IJ~I&3R{*oi8{(9xOMIci zjt{;``+NIuVCrA~p-&wul~qM=wJ)jPL4NlOeuwl2f1w0eiIU)B0Y82|sND47-DXk) z09A?i(P*fu-X4V~b9;VWYN!OqzLkEkAJk1$Dr>2?)A@w*pIpsB<=MdK^T?$dx4rz1 zYmON8_v=heqTnT5euqxI16;oT&Y2slfhfNO@|KIptTgLmx_aa)RjE-E7`tPS)2i2JGu@(3> zayV5*qh4OONj~s9gD@_PuKd1G2CDuZlmugsfA=Au(;}H5pTqCD+&55tL+%r~s6?(i z%5#yoVdUtqCHEsZw>+oK4h$gE^L!NfEENh<{9cv&V!uHT^$sY+wZ|J@SJO`7& zfBR&dDe>jHLikMcw4BGOd`@{i^Oei@9A&;4fAU5soSo-|-}iDIK+5KPU(BQ27yWpt z0_1n*gVHzALFjq8=n`a<@l8Ir{|IU65}R%HBx`0?~8E>j1T$Ta((}c^{e*}w}_GAIbP*+ zx&C|I%J&uWxrtoQeNY)9-_PHf5AuLY))(WZ@8|P;U;2I)@;m!N-)Fg=cy4_^e*;;M z@PN5by>Ior{`tKewSRnWxju&a`H`T|c0sO}{iN5SUcZbN`yu@|_s8|}{CfOZ&wBpY z$8sNXpYpkS-IUkq`y!tdU*4z4LXnly>xAoI5j-ffp*jWM$M@)Ugdzsd%lFsM_eq{m z=7;;H-^!OfH}mC}c1k`++kr3ne;lq~pY0gylFyg*UG4{-|9gG#eSNVjjN>)GT!9vM z&M*J{AISsi6r0YHF~quWfz%aBT`eY$DJpm*j5__V$(zn){jv@vuPozVm2v($?+5mU zKEG;Gj|*F%PM%ht;}O3PkQ#eVzL@7kjuetXK&un>3ttKxRL9kmFK1k7fAuRjK@)?K z@DR9}iE?&|2z#y^xP-zh)EY6=BGF$RT~Ks{=;2L|G|}%wYqKv#1I+~L3w*@K?I9Sm z0gs0RJf|Md2Jt5RD(nT^VT0b1+zfbPN}VG&)e7vbiso{2uPWNnZ<1u|-SBfdfceLr zg}2VfY-BeVqx)Ci^6$H9e;7~QVKoUReln`9Xew@w+}w=a+=*{DFLLd0Y@DW}!`CeB z22(e;;%4{t{F$d&$)nG@7rQg-gG{ zaY*O$aHs$8Zoc@NGre_S4%zO^bv}|Uf4_V_N~N=x*nD>L=Iplie~Pk$+{bb1Hr#o6 ze+OgSeVyFaGIbYbi(pZ^e4n~AcGIA{mV1`J8KWl>REx}bDZkqfLhd_17~AsiKJT4v z^8#+|;qv|A8htayie|K@^7V~F?~A)LAgI(*^0bZr@Bi`tTJGMr(~tlCfBSgHPWsJpZ&^ZI1udo(`VJ|9^k_k^K+-=l#cV^ZZ|{kLhML zFaJ_JuhQ(}zv|WM|Jc9J|LgO8^S>{qAJhMm7c0eXxxD=!v+4f%q5l6~RG(b-NXhAd z9IWYE{PV0;Tq|tvf1lHZtqUcs@TTb&awV|y&aI3+cleyzf4At{S?MY5q{BK0p>kGD zubJLe{dC~Xp0@OsJiei67{A7?Mx)I=j=$pPcoO!jFP2kj%wnv?Z zr%7eKylHruf03dXmEA$rzd44t&#zzoue)LO(>dLAAMDYryRA;&;um|nd8*m3hXqeb<;yMGTpH}9jWO&_i2!?HFSE?!$%JFUKEl|!|^Zf`yZs{2Y) z;$!vQ8Wiz$QITG>^0hV}At8htQ6Z;y*(zWMCl z-nIsbe;T(F=V?58Y_8t-ug8z$mpy0=moLqyyN`N6Y0_h8-;P|+zkJ!huT|cs+nfIV zSM;_TuPd$Atd&1Z9?ZM@MrUC8A6}zBs8^o*kIUOb&qE!3ySeFoR94n}6|Gg>|GIs9 zoqgU$MK>)bY3q3%x|5g9-NW>RyinW-SH-w;f8U1;bNkeIY&Q?-!$3XFU*Fb)ZqI+L zZgwZ{=JwR;HEy17g67A|>3KBzx=WwKr-@onKPJce|IOE%?YfF>-J0)`cPUcB)Nna% zOkJ58?kG)NgMp{dzCyorj&!6W?bwP4t(c|F7`^q@zV_M=n~iYDqh&E(J6zDKhUGhP ze>gP^L6;Y+zHmGEm{wK&CE3Har=1ku;5Hs``g^UA_INbHW&n?289V~{Lxu{e;P5%Mv&~r(e713Cg#V4MbWc1zdzB7EUC|r zCMH@K{lCF2<=)j8MxU3_UXkWXV?8PV*+=F-=_sIkMIXT zH_sNuKzsY|gfDG3HHDe%To3^Df91CK{+BDbfCQvcE9?*FTIc9~I%}*cP!WGS96@*s z(YZmOMA9#_10aeHRNqbWgjT;Yem+OgovXz~aEi)3e42catR`09aUY&Be1e=qFmL0ze` zz&5?35?*rc;q`H)p%T=>lOAP~?EC6nB}Sow{Lp&P-6*o{kEB9;Sv}xB{J2y#BbV&U z+x1_veoAVPmW!|jBBt-n6(>P6+e}uA*dny?(+Q079chBvet=?zobXE_T@tRd=Zs)Ebh0U!E}oman`47mjMt97*p79Byce>fFi0ecT&1Bi;- zH9x;}hRmjLPWKLKhdpW>#p%zkb2od-)Q9$t-k!jtW^eiWCE91*v4|Pn$YxEvH_|~s z?jhU-kPla4qxw30t{+`gYv{2EJb{XU%He8MWWd5?qaun(7?0^ zFVSD7fsbKz=mV#De<{XJdOQ28yFP@ByLEgR(Abj{E!3=4{hzv|6L|s>PwM)s>|pP% z63}o}sT*$!?*7k;G`#36yI~f9Y(db+y}lG7!WGz9{o-id)p~?=7cc)a3`0?q@q{np z8{5q&K2hK#0cQ(8q26fxwSL2~`a?@b)!KeP>xtxq>DxP+e^Tr(dE+#KFy}0N!#+*r zzF`u;DQJc$OUm*A6T2uh&I4$31|NboL#D+&sH2LCNLaKbkPZOv&|o{Ai1kSrj)M{~ z6$zU7>(0!^%6}$+6{*QX)a1|*i^mKRQiIWqCxJT!bpJ>R-7+2-@+noJ+}vD-$}znr zUjxTy-Ux0Uf53HPz?yLre01A_;&-}Z)SzY@H%rUTR=>Xp6fghpQZ zdyRH3H=(qF`60Y;<1gz9g76Br_Q6iJfZ)XaVD@fT1uM7UNgvl6T%8#~n4hN|Itb}p z0o?J|oYLH{llZfrJAo&{!y|~yGvo6C`7vhCu<1Syf3Ctm2zz!pLSka|nck82!17uxD&6oy; z{0Y5dgF#>4ftq7D&XbtR_fAnlPD9W8eV3pmRjF1SCah#)4b8!KbSW&II0v{Eu-@W% zDaeI3Z>DR;-pA!%me5jd*+P8pE+_I7iB!h$Art6QujH_p{#1ll_5?cXd?tzyN>%K z3IEAX;`eOegX2KMpkwT($FB+UDo^>rDOLSP!K9_bu{A|+GO|0Tg;2&zi+*36o z*~KeJlL*HjeD8?;7~l0Xc?&gAMH}j^u=5wv6n}K^YA?0jxkPzlETNZR`&9c^t9`|h zlif7vwko2#MEN69^RFDinOyxf+g_1@e(Cb+l8t}uA>Rr0}$Ch1V>tn0ujZ2Fw0y&M~ zM|IVxCl0;KI=c5!R8=Imi=7Q*=to4&Y4&9*8t+(6!CvlP;yq0DuS!1KKQ{FBf9SA~ z3;vqrJ(wY?*~gf+3I|~P^cSsx$NG3(cm0=?)o!FI>;;&+p40_|kQcG@=TV)b8SJr& zw9_~gN^9I6QqZpJkk;c%UQ8d(rx@1tEIBpgqbDlt@;Bz1CUJEGlKx54nTf060B$Z^Tj zl$5uivdXmEOXRem2jzjR1)1y|-4YE*~OJ;H6>{f28+-?ouvW zsqP_x=S5grxX_n*NdYuC&Wmhy%K_RD4&z6O6{X9PeE@3XLhN0kpl=HrFmG~m|~MkWrkjy0aZhgXjdy8k`W?Ze@{UNr5h@6%f(_c z_&y>Bn(+B72L}x@g;(%F2N@18VrdgO!6beNP-p!(Qm`Lvp+9QQk=Ig-OOe4o@6oJ( zER4$!fMsAl-2PBT5Fe)13KFJ%0}X)_$=x+=lV8OS%}%9hLzXH~sdM=`m(SC!qhKm$ zBY`UYcB@F@RYY92f5J;5IWfze-EL@KU_lIx^H(l?1>zDUuwpCDsd#w!j)?f0%^#{* z+%&NJAGY#>Oek@{!?HdIgM;)J*l?!nDz}(gXP`}N!UL^{Z`pP@XXp&sr})FLEcY2iztADhwe^^*@Hxv%RbDJaOF6dF0l2>`6ED@r`RJKA5)n0 zPB`*VL92nvnk~!DR3V?nxyVIQ{q|{&=hqVp79twV#+T?gJD(gX9+%~?)ufi<@GOoj z0UQsIjL)15e^u%6IE1!1sMpS!%*ahnVc8`!cB}z#{Hb>nsqnZJpQvK7jGHcFuF2El z$vNR;g>xH7C%d+&7yNFLn1AiYj0;x0hq@JGTQ$)Q|BAi{geY&~U<{thZ%!u>NoZ3fxnp=U#<9?Ue} z_SoHae^atvY-aG2ZwdSh$J%xO(>WwTO==}N@j>LCGE4sQoM~$fhr9|7+E0FD&2wmF zAzQ)x%%JF@hmlr+jB9oejdUuEKC*@6mua!aoEW7OVph-Vt-_}`@3JIv2uo|oa-hoY zM!*i@O&6j+G5b60R)jYzfo9 zxVneWNqZqKVxEP+5!oW6?jX8_C7@hdH|j}OIC~TO;&~med(v#E^7T^Y!U#{vYJU&) zCFtuMIv9G^ie6%zAW^(4LWzZu{! ze~(Z2;CB&GP-+5Pm`M}x=v2(X8ei4*rdhE;Ai?ak69x9!*B@Q&$%yxWian}8?pyCS zHLPVnJ`!!K!s?GKGn&Q1a`65GH2UircuSVv6O18}`WBG(vjd0z`MBw=I;;#YwC;V{ zpca?X>&P)%jo%B6VQ9%S$A)5AE8ckif0vRYfTTO)VEle!gh;8KO$b0}O3bht${HXn zS0z3CJ^eWg=CI8nKtNn6Q#QD30|TY%D1}*jxC)(kMS>qCK}{6rBYyz~7?t~9sKq{~ z5uQ0|Km9W4%3N3rp%9%FsOE46QYUxC;Ub-aQ>$YL#(Vz~E9QN_Kb@{BWN1$Yf1uLW zry>8cQZ8<0&wEyiZ@TnDcnok!76B!69Q2}Zy}=-cpdlO7HykD&u@F&YEX+M5Ef!yE ztJn8xOp=KH!OfuYX|zZho)R0bUXFT&3_H~EeEuwMW@VDVZkSpMaYmV(JSRdk!w0Uv z;rf;QK~3b->L1F315?5-jA;9Yf3eXG14UEP%F4s{RQZ)Q;5Q`oi!(?wqdW5x72!*m z$_Z*^R&WP4YkH?`!iP0VjG1PT zo}#}V$8CumZYjmY!SsiLuEo6$67_I3Ve+1hC0-9n_2LU~j2p#|F)Mw(s$h6NC%SF8^e#x7{fi|s@5B8dtc?#xzG;t7_dsARG) zX_-wWvd`D@)Y3FL2v~i@f5`Ny50Or2x>SqD>SP_NY9Xk`%GErV?|FE6g+s63N??+d zh=2TyTc`a2&JAP8pE$Zk(_uxW7S>?XxHtL=e{j9%o`7K3yNcez%xgeUwh1SF(D!li zp=7-CRC@sba`Zmso}?vE>-8_P{le^TQVKQyP$>@)Z~3UG<4(xWe{LHK1oB>%GSN3A zz$hVoBfQs$L9lsit*oj;m`_vw78Ce!G|URRO8=(l=)Wyv7Wav?#5~WRq%+mdWfUgV z+Wz8}k=x3B9-P{jP|~kxS2C%fw|pMbDaUf<^QW!(*XZe5WjrGko?ck5ADDnVRu}ia zw-;(-KpT$#d_(cye;C3Ye#%shAP;WseS|O7F>`RyR=x>$-eQjSD}dI_;143OGn_y~ z3C)Oc@D|r?^YTl{p(N=A`1eofi;9-4iE5iQ`)S3NfP60m&e?mlY#`1C$g zYH!Smf!sZYhV*CU++Z{O6Zl*%^O;6ORGAb6cu;`ow%nqh|4p7G&6kLsmlwFth0}Sv zxj5rhCRF2*f9iB>;n`bqiy|3hY8v3RId#V?j8S^M*34xK(#bUY_3C^WE==8d9((@p zY}M2YRk7xjy2`<#rPJnQQ(;?TH$F!1t@DH!Np1((cJHl?yhE#ziDSS^>X z#QHt}%)|}UMyIQpHR&WJ%=SDZ4hf4p0Nb9II&QJ_nhQhFHw?qs5tD(qU!` z&AwCif->~5Fzv;m_a`^gD`YDpnqD7R45<+y);$n8k5BgJqX|xem z2Na-I=bm5_%dJT!nLLm2A^gCVhLO-xL7D)Q&BgmWUx2`WGGy&^+LYvtkHxuXMgC^@Bx_K>LkR z7#rS&q%v(?-`RZjDl&PK3ebtt)hiW(&pO ze}ei+p`So}L1zgAf&fyC$VPhvZPr2-K>|1VL=z?+JIEuVY;v6p0Y>6g_OBFa+`9Mm z;9+sMDBd-ZHdNt#XA4gUjVrV`hh~gt*x~(+KYXKWv(!u|#&s{VBX8W!NWOUgF~Rym zmdgV1xG62}L}G&C{R+wxC{BHxX$eLre^~D@CC0LtlSorlUMKz>nYB_hvgg{2aWHOv zQ2Fg*)co(2fJghBsvlB>O+myU1q~SE@+DQ$Mr?FL*+8Kbq1$QiFQ2{P(<;(N)*tej z+7T1GkH*WMiPJtMxBY-b4`2x&5=3zDt;p|cTnwuMte}eRy;G8(73l32d8!Ide`e6o z==I3A4Z)sjOlGC*alT#XEQv5E&$lW~wh@9*X%&UG@3X0J~sBJn0{c!~FTH-%tqu0l*t9J*- za5T2I+}l`sjFVHZvA)7n&o{Tge+2DdxL&5qD`;c9_i)o;mVHyo>GoR<&e2Q(=2^O1 zUCvgh3-UZI`h-?#&2Am6Z%GSrYRX?>;Qg@TwgK1R-*OT;rPF^54VM9@=JI}_FJo{@ zFm4Cus@BDeo*UYo+IhB*3OnP?K1`V*U>7*enf0q4v1AnYK1&^}&-7-Pe|B6i%x5DX zrV_;H0jxuoT&&GU5U&24p_kK~Y4iKe30Wx~G^=hT)5)g>$83TM%EpZy>Kqrv8|;U8 zNYR7L7mh1$j9o3~mGsg0VIZ9GO4j|QmvJYVLoh4V9N!=H_pAt1sE^nFB%ZV*3OnK9 z#r2WN47k_kE!_TD^1q0rj~a*Xxx9r%iI&Bqd&PparC=FpxZ{b0V86#^y) zTXdzSOilD>y zU~z#d_{3!ZrTiW3e=3!OL&RbjYdrF&MTH3O>18tI>jnuLw>b@hx%0%8V^KoD+$6tp3x0 zkmX>6%NYjie?{VrQ@`%~1*9jT$TabBsvENggm;9;i-Mg@dXKl$_V9;)>13PB^u0T$ zeqa|q$KM29aM-0$4t%+jnLPHN{AgfP0F-kI-1`P<+#(P2S~=$BK0R9MzHW4wwCI_} z19VvSAMW|L3l}(x#L~z26wOm7^Y|==whbtQ1!D*`fAIQ#gVG4_XA61K=0!wMW+Lj- z-)wUZ+I*1^+;3Y0rVY)Rt*EAB{coUoSYFt#Ouo!yH3PVtZOCs4Zz3ul+@$DEqZUAL zRym!`-201M<~YC{<^tXz=y=oJ&_6}JX|6;-!U^E0I9(x)BEe@AqT)hMqiK7UqnGF<5hhXdP# z06*RdRUokPeZw_{HGLgcOV6BpcP=!a5Tl!rYAXQt^1=eVm(8c2eTAe7e>D;%bli61 zioNN1=s7GTwI&_U35XDR9pB#^;c{gubjMeTe^Rx{T-8OIhx*$QU|DaR`cD~x{myji~s-4(1J-u>^eId=XB(y4hav=kQ&B3~`GN!-Mx_}#E$@jzv zf4A`=7Pvr>-J08EIq>GU2wrGn8c_@HJb{wmRL=?|ihP@3%wW07+}=7q3nT1Q5xq_c ze^Qz9KzGn6?m23`KScJ$K?CHdX1=vR#5(@nx!2_0A6F~h!ynJ!1WK-7pr@+Dm~Xd| zNmDA!zUQZ@1D;lyQu$Z-Kd`%G~ajJ(Ahh)vM1DY6g zu?ooRdAxk>=3KmXiIW46DIY?On-dSS2dm5Z(f8Xtm zN8%Bh)He_kEDQ@tL-^YKvd>lxXD@>t4g6l42Uq{k~*X0fA4 zEBH9hInqYuBK#t6{rTAgVpAx1`(O@7aG+FnAzP}ugV8d*bT;?h(NWx*n$>AvxWFn1 z@{DY?ODUCH?1X3r7vrKAxS)LKe?JSeaErJmWUOGfne2syeqe(E!H;)ma2!F}am&2} zI7)Qe+EMWAq}g$8`z<|MPNMRcjH2q1rXZKzKmEAb%;9I=jbP?-7VaGd_PwlO<$Nom z?`02Z6@&N_yt<=H;_z!9eI_EKw#%sV^Y7sm@9zdZXw2M@IRi+*Q`O5Bf2!-ibaMob zC{xj@5LwvMd(g#fYYK#vP1M#ru+kqu5kqPECQPk~d!I+>ufDT!4W+X8p54&emU#6t zU6wSi6u~H_YdztiPMQ}x-)<&P9U^Bl0lv29D?M(wQdv+vdPpfd%EqZ5<)LEJ;xCUh zTRG9$NT>Wd;foswTi?^ee=|>-K@;4deBThjY{IP%)axa^?Cp>mo4!7H^icJgqFEsH z5G{i?dHh7`QDl+2@&nM&W)bwSJpp36c-0ple4+Q^&tGt5b2RdnXT|wwVOv?vao~Fl z8ykqwAfvho4qW~HV6AU~ae+n4;|~Qx&G<-{vwl|t&nNARB1AJmSz-#pblX#N>Az&CdNaBf|akuk|D?2E0%hn8ho((ivS81PADI%nULQE@i6Y1 zj<@Jd#K>Q-e*s$FqTfS{pN&yp-)QbVu-6IYYtA(xjOH!vYbiAUwv+y0_vB^H@pm8? z{uX}26KP)MPF-)%p4zIFFSe-rhG2idjV2aeJldk@UqI zDv8PFqEM<_-WDh&O~G^YSdte|3r|}v%FWQQ`hNLAe+U*wVpd;{3$a_U5Xb>`e)~)9 z#Y4=Ev2^+d+5gVV$dfgAAbxKxK&%fqq`Tj*N2LbY@qJl2+{}BzPPJ!Peh$7kTp-)W z?c1me<_z^~XhV7HdKeT_(i>kN7`h*w1S^~Qv*M_1Tg`&Np#t|YKkidb>Y{f99isE3 z)dOZ~e}f$G2RwT2ET*L_eW1r!wh*HhjN*rk=0au{DBoYfX{E0+1267lQ4;$lQKF&% z+ToCgvM86jXW^_rUc1^INBF9W*df6qR4$7EfX8?{cY@3b&`J!(_FNLgVGsp3Ukp1# zJ{&W7j@4)U1oSz=Md5SAeS~aovL*ps;pXasf5R$z?Gw0h@uk(pg-GLw6txc1_GtUg zZ&YD^bNol%C$*A>*Mo?n`Z6%gca-_tI{E`!+dp#m>OZ5a%Y!uq==6U{H4WruH|F0j zF8^au3!GSJW3y_@2H=UqVM;M!4+M6|`i$t~m4NCUlDAA1qF-3@Az5EiZ8j4WQ3(($w+4g=XHu=SBUHln7|$RD~<}f@YQHe<;U!5XH=gEVbe&f-LZlVksa-(c#C)CA*OQWEUHdM7 zgfYYXuFZ`K6SSO5KG-N zxQU6Y9wxxPG9fJIpn9%ODB1l2Dw?aO>CvP^#VC|X`T;ie*CZ=9@%-gt3#D;j!vY47 zKrG@vGmjmVMexI`VKa}`ye;Os7o-Ss9E?nm6X1wvpwivmi|uYEQ~7=9V4>SW0L_E* z94XRdXByoBgd{~lN@9qKkAcxifBcx~P>ILXW&ff%wepRQsOR)Pc377zcn|Py}gC$x(AsJm$(6{~SShDGHXw_J6jRFeH_s-A=aZPe|lqM(ShLEG80QOg{q5xSr-}1)^0)n)O8C-L;2({;8~l!RhiygklQl&yKgIr!brh4_<2Xq|%LNw`pq@isCJ(1~~uxaRF->pQOrP$v{#VTHFOtapcP)U2A-H5L_1WGjRoDpWD*Iq#J#7|!@ z9_sjTDNOR{#S(MF!)-`3<{7e<|C-DY_=(qrSVX@84&3|67U;@Ve=Gg;t9N~6YP3ub zqm0|66dx`md!tB=2syJ2XL;`9w}Fj7?$>pJNcTcR^AZmqy8sirbx2E=Lp9k8|I_0d zFow)~*`}gUy?v8>30xA9mO5e;-q2YPb2*XV+5iC~*M zExIZF1ht2Hs4w%~YkZpjTX!ezwvl8vs=BNsW41Cqp zqi2LZVfz`zc-K+N2C{QQ{0aj5@*8MOk|gOL*dN~Yf7VDKFN7ovn(wC0SSh5d z6n32KS(+5ayk;=%?JiDjcvARYx$%bNh)eF_f3nq$Pxdntev!U4&|X|YV8XE}-qK%SVGCtC*OXMfG_o(LPI{e z7fTt_XNdx}YH4_O#EO&P#^Qn}d9-UOGQbTY6HqR~L8+2?5nHZ!p4Q z*DIIgG!Zrde-L6b`y@WDDOX^Nz)g1$eST%^f8jCAA2w&^e`7U6cUyS00kFwU?4$W2 zHP!44Fd75-{@HS3kwc1$LWSpJHY5a`V4M-L9@57jG8v)pFcQ&?6r6#{QnA&SkN>f= zV#T0TTrw5q-R^gQV*>xxw(@QKh>)#xm%SSYC$#VPLN2)+@=onsB-Srq%;ZYxMClAfy&o!Oz{JE)2AWAKn~!v_}vtD zy4rBE5Auw!=AQvkmS>h;UltN+!3C}&E!J#sj!fcUfETRgUgAr)aRfvzkoTCo61p#N zf@J>zG09d}UJ-?$&t8FF$NSlvpy0;oe<~3siFk27I}v)b(*{;b8u1RL1f@i)AJEwM zMl+6zl?lNtZ4e6a1zD7Y@q84&(FaHu+oz?#t^&AWT8pt}rgQ8_Tr6Ti_-z`gkP_iH z)9cV56fq}GF&aw(m;&Lz2Hm5Ds6Unw1>ahzgsD}+3h9qxU)6F_AFg^nm!G#JbVSs zV(GOfKum7sy|?}8Owe|60(~=2+lSnciN+tGZ7$;yf7rh54Clel(0~_bt3?2DNFLfz znXujKL#6)_I9*)|kGK}4Bk#%qe2enHS91;)C^hdsnNNQq-E{)RrHz%?KX@vAypZJ{V6rTzf170_yibzud&H!SPo&e-0BPRc0Cv zk$FrdNpnS6b`9V^m=|L&Gf<@SNqx1kF%Jy*la5@b=qI#Cd&6)AbM=&%I_>?^U3H`n zYv5^CcVI>B@1Awg$-SjAKIvPC+Sm9QQI^+bSfc$6p#;__^?md^d7eAz0KTc*xMG7Q zd4+kPIFDQ{-g6uk**ub7e=ByhB_6@q@Tey2OCGS&*}UJ^YGCXgsoK;Wfm~o@hus=Y zdY>0>Vw}zyPo>bJ0*NQ>olwuO-GhV7PXz5V`)QhDRWoAe&uvby+5Ex6y68KCzlgB_ z1J^PnTBGdfleKqnmq93HSv%`^s0!FOlxi=#kGQ;AL*BpEMzjdMf4(~q9ADDEf+DCp zFK;Ch0$1tE#{gdL1P&mye@_M)V;hep2oRq3?S;$Sy0X?=&J*!bmc?GkLld2ca8_=g z3W-Ci9qlbX%1#|^j`4y!*UO?fCK0BPAZ}1<+24%X&e5vrV?*8jE*5Pdm!}yP&j3ne ztd0af`N#Ky&!;V*f3o-cRC^_JzTh@5K&Xjx3@ndu5hm4jy&L}UN9XIMbPp1Yo!W}w z%XW+X6q2}yU6VZQ&3v9Nywl>82FMLkUH4Df#yx~A=JgKV+mXNC%+IsKoSm9hzTZ|| zz#1nvGRDRi+l@~rB{+T^;?}IHI+3CQYG^@M7B0@RS;}Szf7n3v*TJyb%;B(SW>NvH zS2o50Vn#R^1gOyRWog2msUi!T?m;|}`T|C=cOMhXqU0jy3&aiS*I$FBbS-b!n~g00 z@6{)o-b{vIm6Csw2#Hx>U9jKCnBbcBsO><9&0Y#+08c=$zq>9A$7+E%^vjJp2-A?L zK!@qW;D$B0U<2jCihmLM82FV7B0Ijr_aJ4S$71^Lm?Ju@fr)wxp;iua|FR5n4NQnQ z^+z6Yhhamo3K+V9i`gFZpH4*dVzybtA>SbyFQQf-Z!Zq2&ncx~Jy7OhyA>KiWq4Hl z@Vf|YVpx(=>2_onAlivx&b}|6FBaQT3<1MA-k(_w5@;6m{(nnO4xR)-Ob5wK2Cfk* z*}=?v$7}lr8AGVzS^D&T&HIs2)EzDT4v>06e+ZjqqKy4rys$Bm&^9!#JMf>;0!*o=IaE3S_L?}Mt`kj&WPRm^jox{mSrybQVx?w_m z5#SxiH+$=5k$*_nK0W)xfu~ul>1c+&=qK)KV+RX*uYw!-Lp7edxFx}$Ybv}44)RM4 z$_tjw0RrlqeP+~S!SME?%w=@4N%;!vK3$=rHJVgBn5BEQrH6b3nMC`k{yg{&`3)-US9K!*KaW&kamis67#ba=`9)B8{LNRO{0DKG<G6~7DI7c{= zG(dZjaeoyzwV}j0z*1R%$x;om+c%WB*8}&>@P@7KFn>ED2W+@|6RMf$zO$3Of#~PiRUsxg;rG>x*7m zDT}{X;7QeT8&E)bJxV85=1D7uRFb{X0QGOBH6 zN`IS3R=GIobE;oz1l98~On~_LHyysn@{lyTMJz_;3Q=%-;faNmC5Eb?Dg??^g!N-O zUU8ZZuj(qwF#WM&D_lRfKVdI4(0+e+4+9fsclAnMPGTr>|E^zB^EU#QZnPtt4RfJ9 z!M#x_z=R9O+pag}ZL%NRwI%PVzN|M;I)6U(q-<~Zow#6DtROm~1rqZ(7q?|Jsg>~8 z6NEpxfi&$kEEANKbvg8lohT|k_w_WuEGSYcEU|9$PF5RT4)Wj2O~625|A6FuBe#@K zABE{pV8;Ij=zge?wCjyYgWE8Fc{ISm?xYQVHXyR)m`C^ogt6Ym<%*pJMFJz;`+xl? zN3i$Zo)0&hMOMBjOV{(wgkHql-I6-I# z%Ey5Z;O^T>%dvpv{q;C&B7m>7a%w@2a57orhqcbLfYAu7Icwiragqc%5GU;=6}Ylb zE2S!(wPUeY#q32#0)Nagp9HBB+MePDs8U3d7`^LOCif=bTBonX7Ztl0 zZoS8Ib8^(OQeqYeX9CWFVS^6_QWxltjT0ox2WErbe<$Z2$?sS*c%8CF=UJs%#QKHFe9*K&KEp= zAlb$fdr+wzCoik}LPb|tA~IOBHkukFDn6{(L)A=pX$3!pLo(lHZC6P04E9S4$`4z* z!#SJ+S)>#!6`(?zX6`O%JXC=|_21mngf)$?U#b4;ouW7PXv(Byhkxn(^X>}}j3%On z*UA^TlO=ByY$`ybY%rDzX3jtvjJOEiW~CY1>aL)iGtVu-^g$D%R&1wz&e(Z74AIqa zl)%KV_I%?r26YAB1@!j;OCF1_cD?oEq&k&h+{>HjnbDOH05{9)CwWgm#t*hwtDIHaRFS_;eWcwMBV9HSm9!Ued)j!&(ItdCVX1 zJSKdnmKWx;vL7gfeyge#KBzMO`n!+bH92MA%5D zS1QQ0DGb%+sQ|T(g0|Wb7UQ#@;nf8UrM7f@p+tg4LZ~n(ovVQceY^@wz zc-nK(QlDukx_`xXm&Ek^74V#7tdd?wH-L9Fg^C4OJ41RaiB*iyNKAooh)m*ZqkaT6 zg1Z}YTR>nu>67KVJ{X{F=-Y>kS&*~y?tzOS2IY=7{=bA73E?B?uk%DUNgXVU<&&*X z=5$$9vTP$^;1Y!#L6`ZvWbdm{M-yC(fo$;g@63C{Ykz*tzy+;yU!)2BG4B}_ygLxb zQ3j%VQxhzWfmyRzpQ5XbQ}!bT9DMzg9e&vxl5SUwNG6U&Q=n-zRQr(ER;1+-AxjUU zD?yI?aJ5x|ox)E9MvxTNdC8fYay^8+3zTZ3^f6jE>KsM8MX$)p#FuU9{wE7{R8Hf} zL0_2bwSRH?+2we8ZP=87>+$J4NCem#XbE`8ca1=?%$QA?g?`50PWPqyg! zF~;s1MR*k?*n0V!(P!0>iM>EGM({bnID$H-;Cmw4N~;gy0SX8>aje1s&1sI0)g**Q zAm&HHR+n2bdUO9`g{wV0+Nh4`I^3y|EsB`Hdw46TUoN7ZBoRh+hTMY3lT?3cnNN8LM+spKZ9$-~4DZ zfPdj1wYZEyEe^Lv6p-(l?Y2Y0Kabtljvu}E-~ua=CvIi`%Vo_Humb0!!sDB2m{V5c zg+pFdX5XM)^8pFc&sb+Ncd6ePa02{IpzY&*o@Di&5Gxe*Vlc~X2#d{o#7?xTaJB({ z=Q~lvG8C}h1Oa66AX)j@QX`iNhQAE*L4QXj*I^q<{{ZU&X@)n<01)dvu#H6vq8^WS zT=c~z<}O=yyy#GqKopw1`|KSyPjV{cra^SEyr=0qqP_S|RjD<5WswPyS7or_mHmaN zKL7H%4;=wQN0qJ9zzHCT=uMhI>e3cC>IshJ%i`^0tIDWo56t{dC~O(c4Tl?Urhko` zubn?&d%!pN5b%ZG+D2i!tT0pd14-p*74zcp+W;Ar!8Gfzy&wv~1!`3mA(?wd&g~7zPe#%Xy~j z#cUi}&xsvS-L zEl}jYQyr=;icyzwinJPa{6e|pTt{=>A~!9=!_zjndjbQaq%z6%h_D=VC?x;BO-S{VWx%GuFyW^`P#Iiq z&WlU35`8OS%P1_s8 z1e2J0enpaPwjT{Q2Y;0#MxRL!>uzA9ZrG=-2m*|d-fw`o1P{%iJL1s;q~U0+Ki!VJ#!GGVJ_dI|VQv22wHKR!m z-ljoup!Oe2Axn|WSd}A&7Kc>&P9_6DA%D8~@$NzEziPLZa*ygiu7$#%Ad;HKMyk^?aQ&GA!(97HF^(oTm!Bt9>863J1WF<=(}Qrxb0jq~STx~;w*o}Y-t+XnTUyHNLH0ka z&~|^}fD8>Zuas%jIsLbI=Ga)uj^b3z3;~)Ct(odX)RFpv6h%t&_e{fN3f)90CQ(@x zl7BKVtcc3U;BmB#A0p=;+F$efIWI!+nJ#==(x5;=i_%JYy1c^so-+wfzB~N0?v~m5 zKZKg4fII77$G3(3Vt*Ww1C80xg+NE+y%MWgoM$e(C{dct$+p^!VEAREPWS9rv@9*K zR+{ptmkaAkKJSTIOw7UTBK<6$2B~+~+Vmo33Gdc!9ZDT_R3ERh z#v9}l$H+1@Ff^a_tSUC%Q34(^-YC54>;X9o=PTBwOoD5*f(!7);@01h5fcR&_J4wC zj^7YNBwv2D&MQnl5Xv&sDPWh6Ge+45b2O%=i>5O$Kmjb5G3#j*H}iDv+Vq8ZcK_9x z3>oFmK>U57I6+yQW-}aSU8=!0BH?zP6`86cLERp{L9&?VQ?WI%%*mF~?OQF%fF+KBNsFYEv3A7~P`n4@-OV%o(Q)^Z|S8yMN0Ds4e0q=GU zqlz4Ga;BRAn&wDWjhK3(MX?W5*rr26b1kpb09pdS#LZoQCpuTl=4U4I~i-eO_E z?v9$>s7Ac93l(NSpKaPI+V<%-P z*9pwX5Ww z#yeoDI@fG#x@$K-3keO)Ho-Ay_)|L-mcO8HJCiqeGGzUJwK~UUsDC}gCLSLSxDB1` zkdqDkYUyeYb{s>oFc5D&7GT224_c&QY>{jE{dSVArHA&VS46|kM?K>Q^=LK zi?OIsk*I#p-U&uJUi)nA2$uAx`G$=?Iic$)O~)T5Wr&ZP%eRd;Vzv9Y)>|{aJ|s-Z z@>(=7$g9uMW^@~Qiua`xU_H)g$+T50X5+2d@*KIq$)I8(B7eCsQ9%_v9@>v5B$5ga z3WUjuE4vd2u?NUCXCIP~@-2esIc?+h-J#GnH~%74_H8kLSqq&)-*Pn?f70vUsLCI8 zfLH0KL2e8J_+!|buMV4<9YS5i*BgF12+AAR$6#kbp@#~HVn0J=8HBG199>c4Z$il- zBxvdLk7fVAxPP$!VO#&7|M&mq))=>DhWT`Y8g z{Bi&FpaN(9UrDkZbsC)ATLk5_zzF~EBeef`%KGmuqhBl{;>SPIzFq!TV*1}lO5nee zcCOFFe1m^Ppd9?~2|cUuwdhgJaHW`6*+2U2yZzTM{eM5O=KmngpBO)>_}||GT>6h6 z04e?~^D!~9sROl9fU<%=D1iR=wM75^ACDjyg@aiS1||4+aR2_-VEyk?y*`oz*sDau z9>6*Ov!D_KzN!BjpLffKJKDCUxbd%tmjPb$-=FdC=foGrOHaK3tH7lR|G54u9r#Cv z{`W;b#eW|or@gQNFZ<7K{(U_0--mu@9a5Fg7%W!?d_Pa=Kd`U=UzawAr>?sb&ny1- z^LzXMeUtsauR>Fp>r2lCoE62NPl{}RZONQ~J*{b7Cjq7#NXm{U6=K3d@ZRf>SbscsG?+kua3^|XzOq2ePo8`k*i3S845EeT)XP45}X|7{TUuWo-83viS^e@JgqzvA3Kt7|79Bv8&HC$wXKxIC3qx*aEB6~P zh$}TaE1!1CUriw#$N#3*f+t7)0tR1nUVo?-ADI6lkrRv=9Jz3eU@$X`xed)CPl6&d zUO4l?(@4N^}T{#N!FofGhr&bfteP^cmP4f`>Ap-RCr5AH?4>vvQvF(x1o1EdKL z5X!AQxCe<@3u&?;tjH&rzu|%OTrlifo`6zg-p3Q%j??UPpgCTN5#wLC`3=uNCKOdvE&geW+K9cichUb{VfE@N0sDSLO@CFT z)ttTrQVX!hmoJ%z^zMkZ%P-W>gW!0wR@-SMaJf%&&Ht>;yXy)Ir!PA0n)T^fOrwHm zcUdzeU1)s*{)2Ba?*b+(eV@z`1M!N^r-?HzHJQQGhV2n_(&OWo**eDrDt{_rVZxO- zn{Q(8m<>T(9FzC?wL6GDD>c@=%U}Th=Tz$5-38ieyK$uT7Coq8Py?k}Y_j>sA z-tO0BuOpp+l$$p{<_#)GkdPT2-TecBv;7eP5)2ISrZ~8sKEoG9{_*8`av7 zIhvi!Nx1X}#V#77Ab)=YK@($chqx!}2vcNr|D?&GqvMj$d)l*ll8x)4 zX(^(4r>$RQErMU)Osw{i`}j9>?vq)a~ zL?Hlc|6Rck9<0H3hbsTdw8C`5iN5=Wv{+S}Sy3w*Y<1yP@&1T`(3l1k&;mF!yn$o5 zsu^^LGA{{1Gk*XsgKdiMLiHHuL_LGaZyxr*Az-R5)O!nNG0A}@M4HAgJXOh|wcqgf zNS)c&Vz6rA{Ug|H3FE6KKDgH6FI+|V2`=i$t3td%J>?<8VXYQEq>L7byCD6k0RO*t z0EP=p>+HM!?dW&?u)lDqLNpXA+Q~lE6h44_P|XvYNq@(?R|lU&1bp9Qt?JV+w;Io* zkr^QTQ!|pguqH*pLHPvZi;)b=n->`ck4T%GM5;~-!5mQ_AvqODHtPx9o^ZsOT;uOZ z!aUjbv)~^Sm0G%^resE1dzN|QV*MY8!TNj^FQigfK%C<1l~#cO)w+!ezm)d{3=U~6 z-rn#49)Hzi++$_}q3d0aTd>cQLjjkJXiPHpZ;%hs)6W|Ri(lXT1)oAIKJ?JSuCWuL zG3*s@U?%vDT6+|(y5w9AwXy_?<8qw5F!7~<@r9Bc* zfPYTI(Oi@ddwWY6`%;4ujH0BxM>}a@|L$*}7COPc!rVRTuaE4>3$WOmrX%Y-@?Q5I zpFn}_0)hI?=i@K4v{HR8-35k8UVhwv62$N0_v@=QqaP*cDM1492ahLf*QrbrPRv+4 zj4D?S6RGzEYRyU2{=)(o@J;J2eHTYr+Osu|0fIT?gje(-^a%g#tg^Sj@)daK`XY#6nGBj*Yau3Y(77M)R(Qhh z-bN^v5?sZmoo_4apu+a~-VE^wbqyn4Jl%J#4Xh4WWgQLef`l|RklfwSmhUTS*^;6)UJnSVF)OhW!h z$oYWFiusoL-0Gd}Li3AFnXEnT{&$$G9W`Gyi~cm{uRc9ST*s{ry$^Y7sm27#uNFXo z_y)}PgkSUv^6;Vq#9;|oVmbQ9^lY0ixy-z1NGUI=3#yEL(fMHz7i$@SOIB%scg&D% zKN&#ca!OqM{NV@93+4;{9Dhq?mwjCz>X$h81~i-qmh?|fc)y@(O~53yT4by|_SUI~ z9vK(!WneBz1Gix;E(uzIOO4h~WAIjR&6}O@ymxpSQ6WQZs4;&1X6Dg_TDyWyc~Nud>jyW%z%xi_^+P27k9sIA1@fdnodae zmxbgW#{xAESnF9_SP0`v+X~eHV;B3bt4e=~4+Qaw!(1 zNzd7i1t1RioM#7EPJeLqC@H{`I{j+TkcRD7ySX626lPYUqA> zhrV}BmFxZug{cjO4VrGdOjcYm^^?aseN6)pve*^-Yoo(S1b+y`ANiJD`$zwS+yCFA z6wz<;X?fO=J@4;^To|0<&L@-iG6qq3R37w4G}7(EJg5850nLD)_Wq08t{FQZsC@)f z&D+$cK>-XNZAlRSLh8yNt-yljw9adLogtjodVGb zz`^uHKFZyAw*a0T3OC?%N`~xR6@`IJUe>!D?hun1gnw-Qmg(t$uB^m${*u@yo!8+@ zMwl2H61>N7kL;F&o~SQi_kku!9EeODiW70fY8F^fz@;)eRkr-516C3tVSg$vdJf|| zL=iLgiJobgBCrsICcuv2M)mi6Z;kG&W@A>GMpKdhG7J%0Vxqa z!&-=boquu-lJFD+&v&~5MqGNn(dRrRb@BtuPI55$J7Ub0jm0s0slt1{*`EdPsPr5E zevxK>$dCnurEH+w!o3hV7pLDdc_v?#VKI)CieMh2&%c0wTfdbz#3Tb~uv3Z8eIlEQ z45kXR$_L{AZ~x&;@@o+Q2OtR+T2CTxCO7%m%71Kn8_?p_U73gY=t@91RcSx$A~2T* zH%VkH3^UkZz|~F`|6xYeLGc7u=u-u!quhk>!&JXDtpgV(_A`P8Yj6h!`In~xk5@vB z_sdy}9SJ<1JaGC5@Cp)Y{%19sIGOL!InG?#B< z%6y>COcOr?2;KF+POn7+E)EppLHk-YzOn)IcS`$%l`3kuOhIoD>D8asZ~+RPlo$KZ zE&(1+H&WsiUjXwm3xS1;f6_o|Um@~_M}JS2xIjIHw30WpoBlf|!LTS;7k+a^3m@({ z6cHn3y_~sE7ji@Vt?i< zBy`^X8i)Q*iIO32D&T^cUm^z7|EK?!ukYz^>XbSHKWctkRxq6z!(f{X+=awSlGT?u z3*fGC*65plEbQ|n0T1dfC|Nl0^ZsjXkS5-*sD#=rg`ty7AU9Fr(%7MSfaSlydJYJY z@{L0A`>j6M_)h_CfA-!pG;hRUn13s}p2&bma5Nt?Fn{l|feB>&v4yy8(2E1=6aJ9k zh1i#Orfcml_T2R%Lh0J_{)A9g+z<8GvLOLiFEMmj4yw3dbVljo-GRlB06w{Rqz$0# zh+4dJS_xr~2hOP(u#yO;c7NRMyvwVMaeD-9yC6~qSk1v83(ScYh_OLUPJhZm4Kcy9 zX7K}j0||?<4@8A(u#X)yP=vFS! zXu;WO@=(xw!CM1ZfXA-*-dQcTvOYoLKSBedf==5$5vO4{yUYP^YoVLc&ZC_|7mkCu%$5mwchvZ)4;$QhzMR? zn?yWF-h8})!GX!y)2n(g6@0VwyFnFMnea=kK;8}-fW|+?gVFYaUw>A9lJE#GzBpJp z0{9)RaC;pM+Lf#Y!a8CRU^A{aH=}jyA|&%P&V_vu5d6PBST>YjI{~^AB$j)r$&vk; zJ-m8C$Q;#=_>uV`Jf@-t>!9>}AOttrN~hAyf{p%P$*2< z8hcg0-KS~xDI96v0)HHrJpZZci_FBIkp>?PySLA}5ASbTKBDl|Hqhn;M~~M-A9Nsq zZI<-v6m424Xais*P)eg5=*V&Gcc6yE;I#P>d_t#K^8Vf}G~ffn)d{+$4X2mP-wgaV z$aG-mr`_w-@6THgs(k4$egq7I`=Fr(!=lgML5|t8pn(9xt$&f>8d*x#OzRV`?FG-p z8NU6g zn2cf>UlZdP?pXa|*p^3At&kTB8kBrVVDlSql`V#hG=Q^Lx7nMI5JJ+ouL6k?PYzL2 z@lypee(Mv21b>jLgMUg55Ogq{G58Oh+7#G7-ZXVx-x0`PhswAFSw`ZmZ?io!V9COk zh1eNRSk8FwRgaw!qj}wLDa$XAq3)@gf*xl`h)!)`J?QD!+Rqw&<+M|(Jm$WJbvX5_ zg>36zFC^--0(fCS;1Pf2U=5K!%WnYQI`k#~YnyDxYJaJqZq@aFK%_gSxMkmq+`ne= zA+E?<;Xlmr0H@pQuLDY*M1G3{FV>2AB%tWGK4k_7q}N0 z@)OHBY0Q4U--iDsd)@j#0)yyn^PaNj1q*?0&mPoBsFN(FkWv~v2Xe@T7?N^yTCeR?p?_ z6|5o=1IBh}Dv*Uvp* zMyD$5CyLR#y#RU(q1zkz_llEuAmBA;0e^nN7tQbEOCKR-C7~WSE}5S?bj_we>Iw0x z;;p%tq$jUO=%aw@=@$5bboK|Bbs|(*$lgHc(DEpGnv0$P+D*iIK{A9A=LZ4mQ(1vk zspN-SCBaX+5YP3i!}IJp%i}!w+>$Tm(#hOM3qi zUBhZ;>_1m2dyzAIGH&zN1WIRE7NY0%AjK9^X)x>Y_YW@0D*AWHHPOq{ja_Ys4zJCv z#!GC5xrpXmXtH4L^9 zKS{2r_ALoLIQsXR{2Qhah055$fgoD*_)a!NJVUrI<|!5V^(DeS8jR=P?$1kKR$0C} z0@4JsI2KU!+61sveI+%xL@7ld&1XC)U}?AE-tzxb_9k13D@(BG2Qff^0Dm!f1NFT? zEl4vEpcVuOQ1kWALXcUtPjm0NXRSI_I~y_yMR>TIo11a;dmWr8y|{FmGA*VFTqEVo zV>CwJz?nYYzS?t;)yLIEifyPD(Pjb$v2|l5?S|!alwciCdn%HH>GZ%)Dm7HCt^kBd^c@Ub`f zMQSS7YjMQ5J!Sm6{py+N=$wGC< zpxRBC8A=B{D%^`3t$prIMaJ@k}j3S1Y%cBxivzmB7K7LN`vVShUV8$&DlJ$@bL zGW$C34z4CbIOl}1iiCqjemXg=-Mt9#)EkduN`H@&P121|s>3X=7aq~c)Abr6b*2v! zD6wTh(i(3^ZtpA$nC}~1F~5|s`#+U|a9`wcD)8J>@kC?qU0%YN0T~pd7=4N;``D%p zc?Jt0Q_~_?uivjFoPRpohbCW_?^^{kB&#bT?y~D93R+Nt(9U#MgdU)+8%by|ekq(? zPT=^YpHGA<{iiQ?8q@vcAlA5IpdX2U9tM6A@WiFmX=O93ts+z<&(R{Gf*J^S)f^vt}}S zEp8wfZS}RZpHSO)>u{3q(iapFFGw4uQf+9qZZ71lJ?E**K7vT;vbrSY2$~n%A}bI3 zz3-g2?@_)!ufyURJ(aSq#hhMmnDgpnV>GkpdGk(ZmtKN}S~f?%fSCrUgpb>*=`l@m zKgHACxHxX1;(s)9de~!)Zvtn*tpGA&)JUkSP-LO{C9T#%37`y0yC;4>eTl`RH_k#m zk+kY)zr7jswNBm1%((ZVH1Z41_Ob+{d!$D4>@6LA27IH^k>7r=1wtftz3`r8#}u+j zEwr=8?Gl3S!gD~&e!&q`XM1KLpINdnUZJ=vn*2C9RUs=6L zuWpIzX7{PFcg2nyh3k~b&Y(OIBA$6Rs5kt{U`r)1I<+f`45A~*o4QNtZn*AIy*5x9 zoj&)o3B6FzgLGwE+cHigdW&S0ue*AiV)`>+pMW*=2V_hQV=A@B)dX8gdS;b_K7VB& z95KSE;I+TN8zjM|>zuKi;6Hnh#&wnOGT}9^hSt|JsL`mMEOm#Zq=;!n9 z+P-%*cbFw^7xr29o@?E*t8+Qp&e>r9i1+(kZ+ai%KXsQi6T^>Jn-|n9F#C@e2QK&Y zV26Qz-Bt_Umb=iO`L3G6EY0*wey7*awXl`(EINBfpr&UK%?+!DO}g+ZPorBoksp|T zpMNacJ%&y+;jGO>W|akSty=-x@%0prHmh-?^T+dr#$dDpKGe*wrYU&$&ts&F*Itml zy4UL|OXJDxy=jfDTsO%`OE6zwK9$_}ptzGjB(rp2CEn%`UhVnRxxZDa7F_OpB|CF6BX-L<{wPew|fnY=c8>Q=ov3xBBI zAdScI!A$Sg_fx1ZF1-e0g^4*n`(-NE>U&zR9;iWJ!U}o#>kfGS?6t~T3>j#EW@i24 zu^1csH*Zw_>(ppFaka(>Ff^&JH<0cUG`z~9&g@*=>2ThI83nG6J5-y?tvY?08h3f5 z>I-f*)3yf1@s0WD=X3omHIPCzuYYr!kat+vA$N9PcGIbp6O0SPjX&adYLBW(#NeWD0%>R!ou&z|u8Y*O-=o!lMQQp6=jG=5lJv-e8)81200 zZb3gAGtTuxBtT*aYEiJ|3br^xGU}u`jLS3WRh1!Fx7W+o`!ER^ZFiJn02ask>-u zWX+W++JKFsfWoL4Nv+e}& zg_f_kD0a)b9%D53KDl17?rnCH;1L9L=?>e{&9pmB^NoEI?dw)?gMTG>V-rW5SPKTW zXO!f`O&egq9=^s^vB$JT?y%|FcPi7jL3s+a5|x15)IBUiT-S6f(>v<@ZR5AplxN2l zb!e57P~>c&@hW;vJYyUl%_LvRtadcjZLBQ(2?UY03+ADpMW*Cs@gA};>Y!|=+}C4d z=+>O_#m?xVOv!V3+<&jkm(gFg?{Stv4)e@TXowWFm(e^_HunaPaC)$aH+{)WTDwk(xWmNfOx;1X;gX1y9 zj{NX?bMk$8oWRAvURw24M^9$!XZ$O>omx0lCDBFR%G=9kjei+VPd^^+IweEWEQR@P zXP_DMXl3at@=iKmEacLxf-8h6Cs!z?)r;G5Hr-tnznmYZC1k_)Oqa?6j@Uw*?W@to zj?xWv7mx2WnDQ5-m_+bsE#!*0w|NH*+4}0}{OgrHwVPv0#aQ&!Y&Gp3=DkiwtB7_? zZDwuN*=slzqE*&}qqts!G5=Ki;G8Z^u!48?C8y5J;*Rp? z9IM4)tXXa&D+c>sQ-Ecl!s95N!rFw)Ygimf!ZA+qN-&qaF>@qw_A)=s8`=9+z-TR_`uu z1}$0mNPnc3cd*{CoNdM-bjVq&(ZR+@nhCm1VruKKO7Zr)TiqY2>-Emwq66%i=Y4#S z-IX0U>4Sj*?U7=m+2r-;8r-#xYF5Whklz8vzo>&I&QV{^bc558D|%GiTBv$PL>`bn9D>u1EsAx93A^C@iwPR+uokHYp00lyWc0c za!>AQHDA9@ez#QNP3Nvx`^C&@AK|_gP=4Gp*h)U-SD9bN8~Ikn^ZQ2M_YCy93s^3M ztAAB+5)WuLoYzl5bdDhAmM_Yf6!a-HY4@(pdMFMoTq|1|&}>KX4SSW9j}yz?66m?9 zE1KEm2bF@C%Eb)Jwa1LmxrV6^6#J#ZV{Hi6?*y+{fUbQ&Z5%;S#8H>Q8>qn}(o|OE z)%Mcw^?B)Y_z(2gMzDT3OnD9S1ouPa!+#U@al9|)YGGoI3Yg`h8NKA~wC$md150#w z1?jl`ifdlGsLu|RSSn$-5m%Ltyl1_x_TlNqhRXA7jh4FEi!(#L>vz4&@cs9tm5ove z-NW$Wa%Mes0SN%;NC&+lD%>6dFYT+-w&^6fH#0XpuJf(vbo;}|ySGOXQco<`NPkXq zD^cH)%+|or0dxdB;KkJ+KO)FxztN8t9ZFI6$ib%Km(>X;@=~ky$f}Kic$M7)o zp84UnKgmmBVc)Jn9V$1N-0q`|#%Z*+eY&uTd+s09DxyXP445W*H|L(AihlL-rKra^ zIFs|<}^pxkQvglU?;&yfO}n?c!N&- zNCa{j3qd4ePU*Z{@a_Cc1Jru&1>FxSs$?*l<*u#}c%q!R?8mVDeGB5_DD38j_7dkK zdcL$mD!pGUsJDXPu9oH9DACloNI92UPXmdiharWB4TV(y9Z=nAJ6lTQrhg9=p=oQA zo;^pM=8*B%$K=&(QZ(**?s1I!n>X_6L+MY(W)z~OzV30hZUNBp-RYIg!DXo#kA@IU zR`iQRdOl)MY1;|BLb>WW>CQ~OUcA(s+a}EBX{+vjYr`(Zs;p>4*I;|X>vHr^yiQoK zPl(XMhinVa)lv7N^c3c?4uAK^oQeS}{PCVfHx$##2BfsSK!M;>?E)&MPw=wWf~BQS zKZ=cuQ%3jA4z@qZdupPKw4W}=tAn+_FXIchEBMEZf!cYXS+OGu_t^|)5}p|1GU`H} z^UhKOLYyb-Qwt6{T2o}_dzLQ>=yOg}b6mCemE+$djD#Koh;DgKtbb7b25yYKwLhzQ z0fEAD&aSx=^_;*c(mqCr=X@31N7|%#=!eU8F|i#m*AMq{Y3Rb`Zi3`hDM!|ZPvdoR zZqhdJyUC4zO(5KNg{MfO>4jHZ$B*_R>p;XqXbMNdE|+dGbJV_vEoVP7KDr~;os&)7Uo_p9`9 z4I)eiEt&pQ*66!JLo1FW-lz6Xe7$47$x2pb)Y1XpTfzpjOn@TVL!*2Czks0|!WA8z>e0F=|^ng`fwQ_5S8Z8Rs z+Uw_J0%lYuHh-(fXv8gx{iEVsZz}16Z)otRYeqZda}*@N-H2m%eU>F0hV)02qnDy~ zjn}92suxwBfh<(cd;NsDp=c2}M{WA$8s3GM`#e}+xV6x~Itw-#1&nLXbR zh=^LG>8aX>7Yvq+)1LRiHDAj1TWMjloviFt06%T}(0>AGKKURDKJzoOkYjzI^rx3| zRpuMw^)X&_bdt%>4cl?HjzM&9%ED$7I_^6uQKHhk+jprhwZN$M!G?kMuC$!q4n34F zrR`uzl5kU4uxT+6Q4O!K-PP1*5kyHkYG1P)V2bj%+Q1*%1iJ}KRid^2Zmle|p$ zFVM!FwQ(Z%0aUz4`(sQ|dIzsqL4}03b%Hr{wUjwiY@{QG^96@x*lM|1vP&ZW6<-#n zb$>Ue=Mh{_f^eErkeBHjx_b%nj@mTikqg$f0#Rm2dS4Nu{XSWQuy?<#yX%^hrZ;BB zf>^SQWXq=~>uP~B0B%8ll_`3m1uvLjRO{N0E|>^AF7r(?tD`GO<#m+{OQH29V-BMj zv+1S*5C$WXwB@$oGvwn@F!5Zqp;LcS8h_a>IX$XVd&|Y_Y%QURqXxy&XSYsuDc9|& zyy)nA#es?m1sIo3bvOiX-dchv5I_ZG7!Do98^+NbN$?ScV%Wz6k zsP}WWV>z@-xhQRt)l@yRp?XRU49xJ*602PZ!E>xM-d=b!xhHyLKl7BmR^_%**MH-R za-5{xt*ZykywEn@t_-u@<&ddwR0d|k&+YhL!`bTq4&?1s$-r(X@BE%^8KA1O7DkoE zc^SAZ15?9JnKwHZ!`d-d+x0kjN>72Jc2{F!L2$7i)3-TjoPn`i+-5odq967)xjI0_ z-Wk6*1@t_A7gyB|fMH;R@7NG?KflGn&;o zeG_$9;M4iIF@2r2j1x>RYD^P8t=UxUxigIdl@r;jDjPw}?m{GA4CCym&VNij6oguM znHhO(?ytFCQLD2n0RtDSF$P=B_%s!F&*-Wirgvcxor;*v&4_yM9T-U$el$6}F--ij zo7A|+lYYLh@^bYur`(*%sMo5YC;5q4Y<5uCv?R9T;_IO<{7Z^1tL-=1B&`qTE%Y%n zb~(Jpr(iR(j<#q_)}vK@Z-27=`BFCp$KP-4WAz9}Zy@Vt5m0uKH{kv_8n-u&FfHBQ zMknRI-)$*Oj?2n=E($Q$06##$zX}#kPiR>b6!(|YR+!9;>Pokd7XUo>Fgm@UV8O%Z zG)FsIZoy=t_`M17R97izFFd>YIp&i?2|Z7G4W0#8bY3}+*Ij$QVdx8FWOje?D!})7 z9Orbhby97tH)6HE%pMKVd;|TJRV|5rgg3TU^`+KaVc}o>5nOu*{2dhTY+<1cIt>WXW^MTkdm=3RlV6LF*uF^J!tR%pEFRv5V`<5+D$&m~jFNDF#uozI3mJ zHNR%Cr$f4OkMC*9Tl56RQ&bzDOhbYR74N^nQN{ve4>RbDD`|V?>$fU{Zhg(`oeB?M+Zmb5OR$ zn0oZhZgMZK!8E?)nDK;Y-E^hfwTwxHw?BF@*W%rdPEvx|do$D_E5gf~WmN|@QH)#V zNBM@aq^qh-L{`ST)nnrd6|`HlO1GFIq85fCNF%s;#w~Sw*c6gEGCSE!-1l-72(qI~8W=#&icz@&bojIyRd_ z|1?1>&;^WSLVIkF4&CimnUnECpy#J$ksf&U;8fIJfwq4IAok7H$b8j~+2D12o#&}P zolm^&hH;ifLE#n9L3$(saFFJz+e#gMhfvQPn)h>_a2*kE6$LPt#3RkL1_# zEns+JZPcR+6{Q|EZzwoPE#Lcd3iGaKcTZX`J8OTvccDAys5H-&n7%W#vKi;7k8A#7 za3VLV%KTxKyAIX&%i5&EHGhVC*GSh@!0d$66L{Kkbt^;=LjC>aay5>V=(ys+&Qr6a zI%03``*b7S#_@Hdz&rA(Xk+7sygqo_Vm7B@@e(XFkZIs-e?N@f-P>&QhPmBqa5879 zwn~3G`*4P9FqS;WP6e()w6ykvUob%D`204x)WuCD-wL{={&?-Yp|ffvs8ml8<%IdMy~3`gF;K zN0-t<4!h_LE7*r&F%h@I#Kn5aENA*oKR$m>ehV*|SG1^DUpUX9BF*o{m`Y6-TGZ}U zkOot8Xia2Vz?(rEAwMRC(nmnl74Ot1OgH5{!8& z8zxu7;)_HR{+_ldu76XQ_Xx za3&GkhHdXr6K3e$+R|=6TIcON+%EOI_w4oC1ri<)2(5OG1A;?+Uj`0#5JVN`P+Wi~ zHflZWoCDluMoP4o9tUuNisfqqWmt1>t&DYKZs_Xy+T=U83k&z%PN1ZU%)1L7t#b%r zb&r{{%R-5=sntZ|V~yT}zuMV%Sl@qzlgVX&g;B}AzaV5yz`|^lb$#6yS4MZj%oU*J z`}F#HqD)3tc4rmrHKY9tbqnvunQO48H+q%EYO;O4@!LJ%>ohxF*)w&vxltfi=y5Kg z^Oh?Z``GO$dcRy=pJ9AnjdvM!tL0|CT1**fwz2vMPP#H%@M~pLPGDa;Ke&IdeEA%$ z*X(kP+1_+|%9o+Ng)H!NrH+@aq`*A>eU_gMZ7tZ?45p8rE19som5KW}{ZR zkxJU#d{dMcJCoyBVkydp0M>pAG67QA?NbX@ZB6bs)@hzab;XZWcpwI|gL95BJ}C6n z0UDZ4YN!jh^Q%OJJzhS~AD5>aJN@+*UG}WFJn z9+f71$pMFr8u)scGvEgp_{12`?6C5~NtP_6*C^N?{n|X#u#<>ef!R540d5&I@=DP91ao$LDDXbl2f0xQkLHZXTzX>)@g z)6vC>q=FRx6f}R$_0jg0YkTWC=aK4;)N^w*uCWnd4hyJw={1M5!^E|~smoofG#T;@ zBVQGu+{?4LP7-S)+FmWjk+5E^<5zQpzFCo|3mE=pQ}v}3b#!{datuakEVY>IjP>!s zK0;1iq@gpdjO@jww`eERTb;!KSI{m&De}kdD#RRPnYMpYB^+qn<>@7%^IK2J=NLre z+#FvhA-f46lli_6TkYgB3KIb$t|`M$=KX}P*shh9P>G`L^?sYQ#9$z3CH=9`kF{O; zUb4g;wV7?p=Jv|A^E!O&)I}L_fq|=i^q7uaDAAhPp%4M-LvyK+Ajf^x^s%lGDlq5SFb7~F22zFv%+Dl~yt@WRNw_itk< z-`B}mxS+b2TlUn!REct^{b1Ti64fTGzjJ7v@kMw|N7d`uhviLvWm)ap-BhcDG0-a} zwa|YatS?OfUl`>0*Lfbf$N3C4w5P3}&6dv+IYIy$2kwqQs8uqzAj>6L$$Owk5Tjl_QM1*>D+;-Vbn$>hW|vKQ9$%i9!cH z7awzH-ox2M$iVL3Fx<=nCucU34Oy2~Gk<>*pg%==FLmeop=ATdYOM7Nw!I!EBOzQ@ z?8hQqm5!moaB101v)ztu*`4FJVze9u5}0l88yFd;ygPRk+FFUxIM){=^%Q27=SwPD z`5+cbtG#-Cyk}FvIz2WkdHpiO+51*@t8jcs)a@SskQU*te(i3ot>?4Rir&3WwAOz{ zlpW^N#TZ?``E(|ovIpl*>=O|g@6%Yvt_dFMX8dJZ_w-OutRgEa!jel*w$7_10TnIv4n`P&WH>D!z`!3BlOzlnd%b3y9y->{2vkI6ENv*sv!t3m^@KERQUFBsP{&XCj&KLxN zI}wb>q*HndvvNhH zAqq|d=J6h)4gKVJG*TRWywNVmncJc~0CCv)1xEO4)*exFZH*;+oCK=bCMLDnosDZ| zoRThFY8NHYlUabunBVh;e2N0o?Kn99xG^)m&&-iNuh|N=!~Uzs#lVbvicYK&hWg9L zildeb$6|~1A%APa4qAU(N+?B>%WLsYFZXu(nxx`I%dGHy^y!U{AL2j`Hs~xHP;)1Guij;BKz6u+Bbj`OOG3WD&gB-pBlawtK~ZMoYKSWcj+g$4 zgNe#zYVvf1gFS!SgeU0J3#(9}sqAL0&#rQLB8hoo=ze1?`sw4eU5|k4_pk@29eaQ8 z=8%=TMQduB@7-QlP@4>c$jA4pImZYuRLVq_f|_DizLPl(A3+L=OQyJSpEL6?#th;! zK5FaTnP#r5d2djfi53z7%@)6i-q+3Zy7Axac{ZN(Z9ISLpcJFb%PZXXUYD}oQP3i_ zeSvz}D|sm`m(#~M^`oRm41J~6zTLdxh{7$DTEuC9*gkyxZ%f!OjCCa`V#wG$ET(~_ z45z)W>U0c5wOAgvBWS`;lgnd%n$%KWK|}k2oDtGbuk!fJKsk{b$OMV+<~E&B7$aRC z;7N7OZ%Kc+=qrg`&oCbCL&K8->_k0b2yj1*?obEbrzH>cY8qeF#YTYFRulAzIxi~e z_|in2?g>@fhm%2IavO#4=5jWnS@pm(J;xK3ZlK7d@ez zY?Qll>nO^`w75>WSD@q1XRuTnTg2D^{Ma1$;lzv@4CKux^Aqng-Kww-igp)QZ%VmD z<14FPwZ2M^H|q_l3M<#Bd!ZVY2MZq7ZX$m$O&T6}HoHosHycyDY3m2})d-QjaX5ci zCr=ZMSR8EGPYx9~3sv}4&w0Dh`sQZxZ?sFBH-yz~b7sRtE>+jpggFUwWc)yw#1LwN z4h$Y^-;>)l3?o86GZ@&V>JXcWXfI>mC)0dFU_8ApPbCp;Jf!% zWs3$sHv>wWoGviKQ5H-Bv9Y)6iRpi=(^a*z^D47#O{R*O>#wv!b&>g~(V#JG@JBcl zo0XDf6MX}nXQ7`x~7GU){jhArk3WmB2#fu}KMVZg$A>H!g2-Gl?kk2mH8ZdeHl z=+&Dl!h6Bn5I$GV)OdNO?4@r)Q{B29md9#-z>o>LV(0mNN_&^1Jw2D4-FL~%HhU-N zkH(2py@hbQB8~AD$L1uvmKuLqYW!0l0tQ8jnWFyx=N z1AjCyqJLq^IR%y~1eXKL2yg4M+;nJKRdcvLc}+Jl;J9ZV?;F@(qR(+Vyw^9-%fs_o z+t6CCC)bTA@4RkbZDkB4`Y>z=r{_Y8F_7-x9|8-V7le`NN`CAx#4vvoC&$|hSi4(j zXSgwuq(`R67Zn)+dcFG#z?)^nnrWcIR&CD1?}mHC6=6UQvt7#K!~-Jn2}zUx)|jR9ZJPyZaIL znxC?xwIheDnH0KMUW69gD*^SKnkVd$d8_OtU1CD>z|77wb^?EyQ?yoiG@kA&R;<*` z^nAa*sRmNlYVLX7O2`FcEun#1oAoW|`=-vN+Jjp~ET1Q}sjt`4WPDJgx%FDsOnp2Z zS7+(COP{;B<(4#NR(WpTVR9ApROhkGi)USpqqDJ%FA*!XqQy$hW0N6X;e0^b07cpp z+5*%2D>wxb{49SjF+l3@q*E)(XqWbMcHS*JeOhS9h*#ZX(?Gw)UzfAdG=Zq0wj4#z zz*Nny5rCVBddiyH5ghi|!)&2a7|q#Q52)~tH=fq4n~JfQ<5zg6keX53@!r@K$+nDi zR`s`-!AyCH!l|fq)oUu+oNMmnwvJL@G>iB#aiu#nW>kN>KWft1!9dGq&-6F+aoA?A zZr4Cv!_(8#=H%j$Ks{QtTdR&94jP?%zB~%gvZ9)42K$js(=4peT#!LSjPCb-bxP$BaEqiJkj)?NO31*q3kr9} z>J=VfvHCboN0~BOXRSkzLAp~tkfS?aS7M2(aKNz8VZKVGF++=$f>u*gwpoBeW=jNq*v8bPwb`kY{87-Fg&Z* z>?310iMV5H;g}RK+yb1jJtl#iZ>|SpFLth<1+xR+02Q@)~3fnbKv>?G-d zcJZx5PJVC`;U&_`SyWkb7^YB$qi4R9spd9{LrwA3m@R1gV9(e0{D=Zts}7C5iBFCV z#!>UI=3v9DrkJ>eaKIv7Wc6K)ti_CCE*gKv1&Rd)lU8oASruev>q+50(v zw3L%Y(Of|!v6dPd(lddax-O8QW}%n6%bTCpsl{yUz&4g#IwlX5*xmM4E- zy@K3)X#2Cib>YQwK%>5~G~bPsQi-OOB)J-s4=qZ|`&ZgVYzst+?g3DSD!JZ`C?CgTR^YV5d zdNG_oihi=(K`HwLrcgvI=KT2x1x+=%78}=}-Q$yMUK-a+wOg68 zFIaEQv(7PGPB&IzGKwGvNF3#p9gCl?`u+@dmOc=svx?n-m0g(N;;CWH6&^4^nar^%wB)pi=~;h0kxQRcerE2jrB0HA5UpI*N!{TWA^s#B-%R+(A;L6 zHE!a>I#c{)d=9T8!I|J^KIq~Zz0s2LPG($VkOb;HAq4D|$?h$&^mw(zOd~bbMToTN zv+JERLi29ctPw+Px81M0NenFeDMOjD^iy=zFYB3j_E`ivhh2Y2v8l7J<=D0w26hrw zqteZ-HOo6yby3yIrzYTfdODl~y!q3~wwJBP&Sl+_KRz z)PmIJgUe+hZEH#uDUh(P5S^}#CG1H`RDD}*b=!8^i{jn5*UY_ayoa~B&2CTzZO=QE z#|(?VpMt8yE?@ibl;ULyx%Zeq4XK@9RU04e4I`; z8$O#w``v#=x~Iaid*l9n1{-Um%K4*PGW-0n?~LOzad+FXCLi9QoIpYse3*-no^~7M zmA#3!_mKZKEYu+-Ub4g84T4y2kBy_rcsw4JBf*W($AgUmck*-JS{ud<#j_A#{kUk*0~qOf86 zg9@>_MeH=ZdAo!Gmqn`FZ5>FfTifIyghd=IE|M9Vy7t;Mp*I(FE~pExwd{;%)T7|# zO2v%x{0NAGHftG* z8vOYL`OfrpL*)m~g&VJC4pm4a>b#m>$9rK^j%SKSU$2sAAD4^6C6-`oHyJmO-WL6C z1+o%0p#28rBnis#Q?i)Tc6)|16N6|Ig=>FUB3!J|X0kh~3w5`Nu36U{*K>67g~OUU z%($1fW3DQkwU^DK9v$Ak*Mtj(2Gz~pPdhO^o@D3lyZW7-o#rPBCSViWmv4F+qG54T z94%r@}-}F-tVP98N21PQz(y^j@mxPFgIc+gaS0pdVMkI6^@@S&x5b zr&MjLOTXkEvk}B1J^vg?(EEJ7Z06~OD|(;7!hOC_Aged3rn9|Cv*vpGnxpO}=P8uI zvKWso0Ye8ZnIodqtUbE+X z-rc%#(4{cO#MJg=^bA6}_o^+&3Dn+WuJTFRub%C_=-<>lJ-->;5Z2p(yf=TQdz9Nn z0%}dk;{LIZC~)lcT0FI)bWe6P`?k4N%Gl4d9F|MV%+C**-BEWnu}n)kzfC?<;SIY# zW!roIXzW=WQPAfRR`tdZIZ59w3yhi`po)A`u1l^t?pJU-Z$=4!Ke|x7g2u*?w;4XT zE7w)jL9yWGE+2RA*0?S_dLVya6q*`8pP=asNUy2S1{|5)F!!dXYUjbAu>asM+<45e zLi7LqfB*D!_%}<3=kwRm;e*()@IEK}H!kre=G!c`8roA3upMj*AmK7NGz^Xm0MGI% z`5=;rownesGVj-1PwF7|ccR{Vsvq*20e?!+;TS3uk}$XL>y-0S=Nmos-g3%8XOa7 zsJG{#XXi4Qf2<73GyXTb42}zf!vb-gh=%XyULhO-b_pO%PA0VK5wHUNoMX zy}HBi0txIG^785y|As)adVwWtu4dt6cpvyWtH;0XP*J<%788B&EJ}0^B7Y!t$)e8! zs<90b^DB6DgM)t;gALbXwh?!3S}AN~iYzDtTmowpR#n)L$PS<_hC0s}L7e>0c2Q)fuv4j3_ zC#AlReWZV2EM+I*8MFn%mV>~)GFvd zIMMK7>BpNy*+DInUE2Uk*=(}z@q0EZS;ran3f4)5a(_ItTyj2RtdBn%)~(0BZ)3-7 zu`YjP-?zSx^&;!tW1Rx7GcPzNI1lj*EnS6UL|L(Ii)$*0YitS6SU6w8Bi^&=P$xWW z(-YCs!`}iY#n?kN{61m1)WpWRh%%U1%2BQ|2bgyb@5xrNPh9-l=f&1^!i#|ep~~Zn zEwY))bD|_bvxLLz;h@F%+QS`ExMHA@1CM_JsW(%xG0U|L1l@4%H@r{I{SJE`?`1ga zKj+~(#HEIuck=#i@~5{x@nA;&mK>kN1k|BWX8DO3WIr{?H}G++8#U~w8w1_`817bV zx53{+Ywy(wd*vDi&r`+o^c6=eVG9ZObEp>>E#kPN7Ft=%uy3G8(aa6Xm%)M#m8^f+ z;W=rJP`{K6=uPWKHqH{KLjjJhr+B?sPC|)o8f)96L6G{Y=*M6tgNP+yA4>5N@=7F5 z-$|VASUq9|NM}R&_vtZHg&X2D;vk9Bg9hWPhAE1$UdjU_X3-pKPG z;(1H{Z6Xd0d*rijIlfnAc5Aopbsm4J9IPq{g8Hx=HpJ9lj2RBm%2VJfO(?b0nok%> z#O#&m6pYdW4CIgmWmGS)#YA)w9yjg4vDt#r`g__b~J zszd1eG-9(T@JX$RJp;r|3b<*Y1etx-+bV!H%5WF5!g;K-O3B6y3uhy_ZW(`##90gP zQ$wt^dU8jQ{XV=eoCk6ihWiVWcEehI_Y7Gh#8)rO)bILnO=Jil5UYtOa<-4n8kgEU zU2rTg95i5fL<(q@+Y19*)|O@1FUTOM7@gHEa(90wH_pI(oC3jR z5U>yo{N37Nr65rnXB)_CJ9KsMk~KTgWKh$w9%#czERuGlLlJ9aam?A3BY2)y`yXv7 zN<&vuOzKvF-HDLkZWhI5!p54zswN2IyA4krt%eQ|bM+*kD|3IErFsVm5~nz^(2|wY z8{Cj$bKO>VgC(qLHKAO^*Rq18{~wzuVHoJYY;2M z{Ukyuw?6nCL}w{yv;bnhkAM$1t=9fi)=8n&5wTh?J3-*wKh3pEEeb7PDjj znV%ree*XQ9)j95<;Y?zMKe?XE;VvWlamZyqaq06p{9&P|GvrGNY(T!b)bP0k9D&3Z zd3g45O@l09^Od+K$00z3L zEu{pGT?>CUgxYrmLaSbsN zO1GVv>Y`|{O{rT{GKM1t@(0_+?_*=%3dk{$6F8EZV|hs|v_oNxJlpJm0ltcQogl}u z^|ez#u@1QfH-#gf&33pK`4NkBT|}brfLuL`{1Sib|2>z(+Wx6iKI=Bzg@5OWpPb?E zoN|Xe7GKZsIM3L(ByPwXhf>qb**mb|hSNyi zXhW1zw2?RdbAOV3H^jM5{``|qhZ~lxO^@2+Q%Vf%{1I078CiA5@Z0(G^#Tke^H; zLz|fpc%m@lO7HAI)QPEzf%p4of5n1;STgJd2pff-+QdOyN%$Q0G2$lHwL$Fz+vY;N zAx4osm5Adw!96Sjf`OE!M&JbT zEzuWTN^0&jY6Y;Z$^IgF9!KtUNVwe-c2DxWX?XbNc|U)#b2!&S z{7h9mHxscDf5^u>@z1?IbupDUpD12FtQt>@v-dSX3zI)J<#7B1mgS4^zFkyV=sNF6`KYZyCmJtVKm z*Mkd+JIS^4I8oWqbXroA<28R%@rHGjz)H7q_Hn;Kua3197S4Dn^=Uv)5Ti7Qiq7D`clKtu?G_i_a6qXR{+OobZS!daO z2$`b>YZqe!9bNE%`|y9_s4Hzpfc7wAlz=^i^F;1Dn(K#q+s{#B2yDv$Z~Vrq8MKxf z9*(YvSDgBZS3E7i`uL*$+-aX&VyKgcdY7y-S+_s&Opyc=xD)Cmg71C5_w)Sl?DBv6 zzTeu2#C%dq{n`KIIpPn=oBu-|GhjH#`+lA=?iunr`EXBvekOnU9X@$d|9g%(1xM-k zF5wb_!ytb9ywH_rS@J!B+?|b&J?n2EyoiTn3hPF83R_%ae=DdxH=GM>uA$wafaMm8 zXGzwS)N|x)bTIVAYYyjhz+%WbWlH!qf4vv*IT55$f_3;7WFdAejH7Gj?we{tl$$5N#5{z&i$}YNR0jb zjhGV;c++1ovX_I}|Galo7LpJSwB%lBz!8k_o~jk@JlquoYn`iZeuOnZrQ!UDY-^uY z^1i7Ato2xjRoLfQrEE#o0(T3(k0yBm+>q{X!>Irok=h&gVnc0+f*Y_VTWGMx4el10 zwC14dlIMSeJLhxu20Y=4xTeiX-eeNow#EKHTm%076SMw`ML)IA5Q|8R{lQm0=Z3_j z6#JdrOGEA?!{-fg8@V}&+k5A^7TNlf12v3h;Ju%q)CYyjl?T^N17vMENfbF3FRsIi z$3|;`M?jHmc>dp*PfO#pmN!ANGF`JbII7~XpSXW^A^^DMhBk-Io(H>UR)4T+O>o$q zNO0KymIn^M?@x}5yYF-61vA0u<51h;e*M9eK6maRk2&UWEGCnaowyMsN z_zRqh&owBY;+bL(nHZ=;&TWW6oT`#Z>de3I{(qc*>@kwlkbh&Z7HA5PIG{@7S_PNe zUXykk-=Ho>&OmTiu1#=n_^$zDhcr3sgxh~@mbLgE9(Td_Y@^;K^(|`)GWNX$K`EFE zAqL@hO$t;1Lyq-VZea5$1jv2>n#}TPgZ%*KbLv8*qm=V5E2AnzPqBdmVCP5?>K>5r2mqcF28@(_o!Y z8-2ANl;9|YIgU6Aa{0a|O+CcX6ze#| z!Vd;FoR>d$VMg-PpEdr$@sJ-0va;KJ@}F{Gm-M=D z2a@vx%muGKJS$n$@kY=HF)HADLMvm9a?iFv@sfU$q#<@ed3hn6E3PXEqj zLsCNvL!2W}P3j|G`#ocqvURXO0lSiWDoHelv!ygqC&0kSy@5EVoguQwvnyc6sA+6K z?)W%a6a0;{1gfW8n3j;lyd=hMX{(-)Aiw9hjw?u9BWEzQP_vUfL&twJHAt?a;h14y z5U)Y<7Q7b8gWfdYT`!;-dXL&bK)fRljr=^GcSyW4OHj&zr)m53n4a=HzNcYG4l>}e z=6ejDolQEUJN&l58~wXNTDrg+S=7>`@9^BFQesQp$+~QM<|LGmhEC}t0Zm5UX zXtxfe+x`Nr0u~>E4%&aA5g(36twqPTH+c-s85AfzbB`h2<*|>C_!T$-0bP{vYqZzc9fe9|s0c@_6#OVXat))Z4%JuTDjv8+`w}0a@QP$izs(?aR$O))1@qR!|wN)ERFd|%*{@^lXPgb}* zf&1l5kNhX%g`#&*qp-Uph1&MN-%Ub&Q5#^a2nRg7U#FxFMn0^iPgy6Nj0SjLPvhws z$YyNTXFNao8;yV57_ujUf;thKOWiB1A0rSN zCaGK4=Rfu---zo%UdRg%4}qN3#=I&SlL$#6OS1yTT{WlXcEfvH=EP#s%~LTe)H_L1 z!1&dQo{WFmvfF>lOUPNZRm*fCYmqvU19@D zd0@R2hI0m!Rm6VeL59g4!SBGmhkhBs)nNf9vyw{kLq+=53qwpI&*pP>{_(8QM@64} zsDXcmbN6$PNyuHl{p7!%F={DtR)3x&7+KbEHh*eQdkR-u$@Y$=(iQ?{?>183#G48M z=3>r}yvEo+)*9w8h)1NphRF>8Wja80MY9TT`{`BL&b>&}o zU`l$n5;w%JZ{M2c5i5o@#XUpr1AH&?yx|@hauD1NVa7(x;q0zCwfW+ZX~Rek!4H2# ziO)=jr^2H`negff58z?Qe1l2al->$BckIz6{FEGd7V`pCWmB-} zl?q4kq1G8NV1m*7^xl9;kmm#26o;=lalomIy;3$?^rv{Pyh*xaeqI-xqA9sYhBJU1 zv$dyCPMlh?*v#|-MFz%p^S;Msqy2_Aw6@X6QUHU&I#MDF3OP54$7h98ixwL{z) za0|ropSu+C2CIuT`eI|BT>5*L4tsEjX=Lqbo?yQJh;cvlG1ix0SR^ieo*8&l%|6_C zse5)X5Ad(K7*!z`vq>%{h!o;zM>Wu(JK+Sq5WjgyfwpVsf5NyRaF3j?YG!|%?G0fo z)@ATgSmfIzkdW`k9UGE3N z2=FPvhHSP+9Kgp>CyLm_5Aer6{k3o1Kb-G1?tyQ8GOPy}pQM)`PPBx?_L~3M?|*&I zP(PCIkvR45d-jVxhCD>246%Rk!9^rk*086rZ^9{VCB$J<9AgX){eIxvw&0?MH|*N^ z=CX#Kuci|E?!#Vpz^Q^$-x``0ZiGGQQozx$mtY5ooCvwK3639$jTEV?Wo|Ql;PaoF z!qxyN)!!WUTWe!|2-Y-CDDJr1y^!ljQUhGwF4h2$un#xbhxVTALvVlFfgb_;9q^-J zug(7Imy@&l6E`nxj+}+8<@Zjs$V58!+`nN?|6YR+YbfR)e#dXE(SLK}&s{tGoB?BR zZCCP0?~2^Rq?h->dpEeJoK`d73gY43AhA#4JtmXy3mfa?0yh&>yV>s3m7%dBa#fQz za1KR|K@(xP|4covbi{u|OLJ^e*Ae_gk{rszen{D<1;&GW9j*dDb-uzwDAH$a^!ZVL zB7oqm5ZeXbrUN-M18D7CrG#Mb$9p@Ebkw|(T9rCpZ|EiBcmCY(u_%9&IxL~^p8CQ3<9pA$$N1{5)Ws!0r+6%Wu5@i8-I#CyO!c@t^t{ z-+G>$Bji1QxH;gg{g?Ce)8GE!F9X*0$*caocKG~kHy*gV1cM)H&@cZDYvTEwvzC&A z>~Y)=+}5-okfwj8um?iiv%~XUt7W_mppcH&Kx_)v(9uj_IeAc8!g(r4(1x&Alm!hE zis5^F+w)M$ljki(yA9@p_?-04kj_xeSt-#c`Cw-FJ{1=@&Aq@0R*>_9G>dh|z94rA z035;v!|w!jBykHK2;t$8yJff+fB~WAXt+aZFHLZa`4)fpSdt)a;_ktHGd%Y$A#sD} zq*cO3BVas`L1t^b9fCgtyBqo~vC6*Q*r$n(dkQ&ItRuG|aS+onkQYt}UdE6(cm<~3 z=^c6?fANTr3;$pPAFO9MhXbee?{oRD&oRUILnAsKu>|+|XB_-u1#s~F)GgmQi~24R zuaJ2_9RYt_v2;l0f%qq)E{ky|n|s+UUEVPvAA?vna5??xCI#`80ehSRQiOe`Ya;(lv3q|bkg*O!5tk!Shjqx)_z6DGSzc&^|j zk-CFLA9CO8cow9;MfU4Ye8gG;OR-sSjJ`c7a(A1-H=Yt6!sLrL4$lXS`fq>tdlqt& z-?dQgu{Whw<&PgeumuV(pTOI4$VF#GwQ<-ZIBi@8z0#=zXKx#P5RUK$5G*7N3G`4E z7-F#c#9cfyz|7_fZUPgDR!QL^z>`gx;Ntdh*EU<7vTMkHB%OCWRsSEy?aei^$3;o9 zv*)#<&9{YYGRp3j&B3*@Q`sY_C|Su0_g-7MTuMg9HSbl#<>KD!u3wMG@2_+IJO6z? z@ALV*p06iso{}Nowe(;z3(fMbJ#Y!4S{B5FU2j7^r}uJxEsfXu0#;i=|B|20s@=uPTjCyV?ODb&cF-4Gm#tr z=)JFgY>+#-d+F|6MFXh+j*(-V^A*>H{uqY+CjRqXNbaVpL_Eq>gG)>Zo!o>_hpN*F zS@pET6+o*EVze7td7#-Fzq_2;EX}iz~W`p{%GHSu}~}~tUm4f zCsn&w`uBsiIQdWGzLV_rJHh6E<}R-GpM9h@<1|{!wot%umeQnvD5xf4QDo zWh|}EJCpuqO;@n=mERjf^^@O;{cM(|(lqUr=PU%xBZ1)cUu(e5I!CAS)1j!9GO%3N ze4Ti|fB0qEoDM5uE8XB+L9aDd6)wS;^e4kSsKW(b!3p7iNcIU@cmrT;eu7wfDsE0f$%gZ7X+lrQ16~ zEHX4WVIHU5?*J;>O_A2{f%o|)rz7>tZLV-gij#f`+_CIsX-L7pc5M!HNXM&RI~_r6 z`Pbz7(*CBB>TaoZbFk>`$M5VoY>ur*GFb(<R36fx!q|<~J@yXs?2@YMwG(e&r@5zgsm88Mm)yBt-o-aa$u_fJ`VZ7P=xnpN z&*%^4YRUt8JPKOlPo9YR(`BzF#9F@TAo}OPsTBLBtuo#Oqxvn*z}^o&rnfDoQ}T1( z*Gcm3s`y-iqAs+Zib{JETFBQjdVTHf4{*4znC7A89eA*oDhE9kcGVSWSNezlLUf!} ztj(W5clq75&kw)xi+o~NNrO<&w7gdkX*CrN;qG$)-f$Reb#=0*$NTE*`N^y-Pbp%+ z*h2lzJZ7oIxa|J$tz*xSuS$!{tLAjnFX|%Va<<+H{>fC(N}rdaYZj#UGw81A;7f?a zK~a$noRMC?_xFLiy6gN!R_*Kg=5-1Oq8o%WXsd~`GX>?=3h%A=L$d7gsxk`i10`F; zOra|PccJ%z*Vx(b%x%EACkpPI|9IylAgg{=M(OL>(P^eu?!tgT!gG;7pZGN8#f>!2u1hi(%(4 zbSIPFf_G>3Vj0e3_Uh!X@qNy--+8VWJXGia9*)+&R0z}a`;~vds@i{V^oqc+_z6e3 zN9;%C;}vTsTi$y?d6FW0H|v=`c+I!l!Y}w<4XEybpN`bFE$zE}{K4d*=O?+sv2-U* zAqBO^HiT>z`cqx5UX|LeODaLSQsGH&I7^359faLxl3vurCr6)^Aq5q0TghI>I@66g z0U(NPz?r1#L9XW{)y^hD`Vlxah--OOm)K5ARU55G&R}t#6m@Y8ZGc-4`6bV3HDvki z8V5g&#dYJ_gYiq(eGY$=qArcsGuH|y7q8~pF|wWt{&4k7pR+!4izp~c{UU|D&9gJH zk>mlr(I(Vc^RBBn6)NbBJnH`lDv=(#3J64g(T8hA89r?HDuGSF_I6U{#){o8mGVVx zKcBwut^B3ndXic}(ETcf*7}Kd*gHGBK&i@9By;+oE#=a!!a1$82U}k<@Y&M0gt_yc zg>eQSCwHcvh=pI}y1dAe3N0R^?$pkdKkTBhL{j!#NwFT)vzKP1jqsSZk>>Pt9PnwD z50o$17vS6j8C_%1w45?J+)wS%z8;X_v&&v2Gi%|^?ASw) z>ycovI+0I*%o<(JnzL9Wj8r{)RFjjy+n1@J#~iP=@`#l#&}ApKc;J}4%XciUzocc5 z;V*!Gu+xAxRr$J<=#@_lpzC0yfi)BnQ&mc32tQ}EdC)?&!G8+ad`|2AhCm`cA7EOq z9|lyPv%{ZBMp&q_@`D5e>st6e!uFISGT`iJO2hXS9TROm7@je2tRH+B8Z6~WzJBV* znqtRSsnnA&O+zE>EjK=AW8FkyodM+AK5yZ2@qt1ZADjOz3G@d;5!)&tfJbGGEll^f zb@kGl=$|*RDtVmh!YD41c(Me$A5|IKT5Nstl)Nb?4eF#c?TgfV^)DJYs% z^|8TmJT4oqQ_c7Rl>}zw0yUsvRV=aw3-6HDjd2!&+D00G~x|5+B_ylz5i{7H&7`*iI7NjzY|^D>VsM5ijNUk`pW(ry!*B1kC?6I)>6J9 zBM(P1@&Lji33!0#f-06$+$3*$|L`F^Y97>19)r*Vn5aGoYWa4;s3g1<0wGjv1j1=j z8T{M=%@;zvUwPo!JRa3yhT2gx54gfTR?;+_#rwt2_WDp@vAPQXIN;eN%q%DF_S7Y7 zh;Lfw=BfLM3e>Mg%QyJ@62o}^`=)YM$5h*{s_$%%AaEEUBSSvh)%b(w^!*`ODrEGO zv)Uig`wHg`LFVbhyvN|AyIz#5QV;+=QV^`5TV1RigZ| zYYl#HbOZO7ZHVV(i983l>Re|maVS!0Iy_fO+ zMIY6@9Gs@h3olk=(EV>JWa(-bCi_=cq(E^yM-qRT9l?i9Xs?E;_Lxoa%=LAgAYsrMfgwUxmR=cnav-7w! z6^kW~BGB_IFE1t6%ALuBD^xm3scQ1SDuQ#ld3(rYsZ?^I&R=X49OV7|Hi%|Uc2ae&t!xF;R*8gHkB5c{+Y3! zp@`FR#_0a{mP7VwA{o4qeE7U>JzLFF!2Kt_61{j1OEZBr{3R7tLHeW z-#g56U5#`HrMV9PYLi6ghS$Yuo)?T!n~$(b_7<;2}%A(P8t0ur+G$ zbl)0R3sVb!TP-k|HHVys$Y1YdEi$rZ1=(iKjUdOSj&8*QC;{$ZVH3COF?5-#ADZJn zl}_7bTp` z&0(c|B8@v|rEFfXzZtZmNXXu5JzBUQ`bpQ-;(A@q$;70%g0amOPQLT)fXr1FgEU!# zWXi{x;2w?}IoHI91q~lq*!f=8h9Wuc2ahEtrge@f0i)_0D+QkBQIo4uYpVALRrMvJ zyj~sh8M_Y1F`kd{nVKg1P*?B6?p{480hVyU41jzgn3XO;tw-y%no}6v9Y72@UAS}D z_C}rF;_q*q)2@)&#T8g_IZAXt@UI#jH-|TdIau4(}{Wuez&aSpmxMos znw%tiG^dVKO4cs`8wX5`R+VM|J^H`X;|WViRVU9ep5W>`@0B`soQ9!*%QA5qEMT@h zVX1OU-AM)C_z)tfV>se-`~CIL<9E*Vu*mS=xfBp~sKva&0{Q5PIEJW)pPMSa1>rYH&R;9Tl(cb|4u#n7BBK?cyOC9?9|Z(>Pf&U5 z0*Q;FxXt=d(OM_Y^1MC1#l#GDa9mq>ERB-UkSjXO^opAWCG&{ZB<#gqz&BpdJYZ5^V9Zp828(Wd$X^Z{zJB= zgIWp!blQqDZ9&F7W*=6`yGHVV#{kD%`Yy3{Wx(yw17~Q3n(UV!xcg(DTjuolXD2^7 zCDsJA0Tba@U{;CnErMF#JndLE-H5MXU}IoWC8qdcGIce*Y=8AzeDnqI4ri;Sc=_c) zWmH}q;p*+tGr5ex*vgQdd*8gBIO~dUvDp`Ui#K{=7wkHh6!s^Xpv<+rEQ69=x4!N3 z_V~NFx+3&MW|VD%Q{uVY<+JFb4|sjs{=C&T1ZEPqmdAuCIn#u~!P!H7W#bp&A360% zf_erFeDqhoX(n0{OB$!ens#&joup<6-bmYk;~wi&PQj>YkI}%dXK>xMea(=g)wa(Y zHz?z8zh0w++0l1!RfdOUt<-Hf+NuS0<-OVGM-0@9c%13El~Jr*NJ8(T!G_%I{bz(b z!0aLETF+_sS2*RFE1+(;z%lFPdZulOsPfk?$KUkl_gV8gxU~E^H55l_!O7G!`fn=X zL8UygZ?Q`Ks?Z(fFVN7=S6&TC<9^RMdO|9RNb!qi8FV%w8$)UMc23o)63|6c>Ofnn zKKt%xK|OP5+IYD-QxR-p(G@3Q3Lbm{Jf=H|X^EB$1Q-VFEu3kD5TA=GbUg1As3@rF z30=_zKA(oi$XoBy$Tid@5qYR24Er}Rkh&008n}G2)Q#yii<5~rIN)PXUZ4+IDmtCP zIzx!;@2lq;ScNjZti)KaW=UPOT1SmH#DMlq+>$qTfjs2 znbg|e<6E2Qo-&>hP+<#`l0|h-F?on(2#2i6YqDk7g$uY;p#Dl;e3h<2(}TnZEEaZd zV}-NN((%df15mkXP!ogGuj_#zmBU9-eewS)2M+P*0U1syAplXf`!N7^pzzKgK=e1Igv~#w7pLsK^kZZ`i%Bw(z(7aeC>MZ8LC(kbF=(*V@!xqsa4gQg8lUHORBf( z-Io_oJ=K-XB`S}A3pgc;;?!(fp+(W*J!aWL-i~(O50N47-V>l-(K}4PL^F!O2}{Ty z>f0cD@d4P%EYL73yEdrhZ-T0gk2wpo{p%4Wt(%|jyaDtnr*&C=htP(e(ycWm$eNYg zLyw*o&KRH{<*VuofImw-*tgl(hAYhu4c60=R2F?rV}y4^^i+jjJJE?{&EDa>ML(?iib1{x_R_&Qznj{pJS4Ul=!Gi1`fm-}$y**q zElrQofoLlpQ8xcAq%{xkysjgnwd}e5}@`U3aE2Pgow(*CY5< ztf!^C4iqBOozPv=>~#Mvn}ymn;S2L9ekjECSVzuJO9I&1h5kL1)aOez6PGvWU%D-Q@1FwZ)XE#dZI%$e?}|Wr z8~U^Ta3Tt0x_e!)hW08TAI8T~9P0(ze7*a?V)m|fl3iuA>ruShuUGY|LM}Y9sOKYo zPm<}0IPQhjbDj2snky=o%JU-DzG@jh8inR8>eZkhTE(9(4xuNWh z6qR*Hh7e>X0C$}5+l%3rMV4deDFYL~l07sCXbFnj2&)FYHk68;W7pA?+QaZ3Y$VGD2P=(LJU;c{v%wsb|p8_(NbuIaq zzA%ljH>{|dN>}Te?tne{%f610XIBzu+ijN5Nf`ziOnNj|K^@A-SrB~QU&8Zs=W%Lg zlNdHKjyi1(94{fB%m91L&pJpsrx{bl&zb$?Z+l|Tf_9r<-BH_!iQuJlV%x z5t935%JCMsizAHI*PY=o<}jq#eI?`XwfUo=Pf7Bq>}$)49Rm9_?N{v655nnpChiKB zL`aSmHsa&Izv|0+#I4I2l6L;J>(dj%KZ+g#L`QSBz`18&2N8}9?(i_Te*b-W=6_qN z@_^N=vR0(fxk!1v=mW?PJ?lB~1Hy`EDl?6CWNiajJ=io)9L8p_JGk!9%RZQFOkgZu}MU0G+ z)_WU&hEj`RE++;WHI&Cxy{wnNIM* z9vD^i^A-6w#yAi_xJ{qFNfI@oX}gCYTKYW9QoGdii!KUmcf4D(4?@ZMo9?MxswO_x ziq@OyQeX5=+wj!;`8JWJ{^_)BNarxOntgz=bmD>663BvK0Sm^J*S+DD4f|L3mhn}D zx=`O0n$GG9Ao7>%+R(YC-c{p@Dd7cK<5L3@Sg8S)AR;FSRJGMLag?K8j z>IuLKs%_labNll3EXPlE|_49a$er` z|Jq!xq#vtjy%_=L$o-b`gFNw#?>d9@XQ_-GR_d?f3_on5{&fU#n1ejjGrKra+E4lp z7;klqaW-)B8BYs`V``dTR+tc=$NRY(>eYDOgcKaiE$-uqP+>LCH(rsO&jm4?`pPKM! z2j!(9WwBTo-%ItkHX5fodHybaQgvm0_*f_RfZ~c?3H^$iRrr&RIBFmycjGA~sKDm* z_&?~Fg9aV6UIV!NH)K$2rYP?*!&{Qx-#}}NV0^QqO5uRN{B+k2fT|w`ENe1ys-DO! zHbnE|Uh!_A3Qmw1iK-2CODq-pq8t9h7p`)0Q? zf;?g+t1cQH1MfJKFyZf$d+l@e;9|y?>vFbR43=1g*WUqkQb}L-hJY?f!9NHueVGh>?-P0I*D(Y@_dFZ753Q^46z)`H$^{6@P6X*4LY^sWkgDR}d!u2Kn=m$?X{g)*!Y)f8YWxOGBHS&- z5beJn%K^Z)HeD7GH`{?}k%x1dTkFt8f8X6wwkQF2(FZ<8XE+I<7lWaKr=>o65fTI_ zUfHMX2ex``GhP7>$^wOAflN}5-`lJlvn8bXVa~DZRI<5#>pgN-8yRCqd*DV?db3Ni ze?_N|AED>Zhs71&vnBrXp+t~mK?sf{s)uJ*JwuJG;@#B9WF+JsG(*#ogm3f{o7Ovcy{9aeZaLZr@F`7iT%jS{(EB`#T=* z!ucOz?V6a9HvJYUnbps^ro=!`MD&%dTMWEq!tktVEnQEXI_L&$jni{Pzv9TTaxfrX z;|6ukd$GZGYiSzVD=G~nK_oS>j*@)(+{XHxyX^6u+=bQ=u?`>o8(9o9V|Yh+kvt&$ zhN6v2<&zqnS@E4FIP0vbP85tbZINEHFc#PlpdER*lprR!<**!a--G zIurU0p)R&H9V(HIf?2M7Oa2UQe?#6xWV+8V{I=tkuy@{r5pT(?71kQBW4sqjyC6-7 z&h+q|Ij*z+K0wZkyJxci>7hal*9ophP^o=_xWJmfDLtef;d9>89RSF)w4Ann_!hbg z8=3IDmxIY4nRz8HB{n@LvEI@$(iaRrIn%z<8p0APnwO3@w9>r%xbIdu;lL+$24V@Y z&y3)cd6B2_OgyXNd`s6*S+y+Wvyun2(WFoI*)shyw5s={Yez|TSoe5EsYjV zZ~|~x2}>T8IN|&N1pidVs|t5Gerso~JqWfE;HH4^zG}-=#On}am)7RR4mmPsBfSGL z4UQ^BK#>TohltKRfY=c#M}l}|Fcc7}K05LoXA3hfFTjG(Uo`Ee`&qSMqUOCtA6S;; zQ9e^WjEg9fyr-@*^QZ04&-UK>u;`UalZYaX%C1n&u)wyU)O!q0dpo{7-S$-0u}Zef zpSaln>M*}_!24pJdLog@C}#T%+jYD8VfonY>(T#im#Xi3Tf^vR>t9mRSG&j8j7R>{C4AdB+kK*Z4Nx-T^&cm%Mw|WDXAp=x5eoY~+Q3-( z&M5zZHJjMerl!yQf&3)`Kj93dB%_fM7hg{oxl8NNurT+zRF3*f zw}yugmPyR6Rt9?Y=eSi6s=}fD7=U2tSi=Unw>-{?@zA|m^5j#B6_l$*>p-1H1LOo^)@Gbcrw{yLb9~pHbYp- zd|T#)v*LBO--XQ+R$U-e|7rmacx`HY^*Fo_LfHW)~iGp5=(>c)Kl zj@g0DLjZae@R~%7uKW#8kEMsqisLb7 zdqUoDOW1B5THRqH;#S?20tVXzSNI=)R7E1*PdYw)(G`*T#Dj0eq$7(Jpzina%l-_+7%rvtD+U3_9p0S1V&Yhoq>X*=!dT$`R$twI2;9{X6Z8&w% z&$Q5%XLqeznLRppBkWW9WG{P>Q+eB#U!}rl6;9ZH@BcF!FTTRojX!duX67N+$1hW+ zkCEnd*(Sz9?@ZAxVy)bfDL`NKrwm=De}8+Io{>k-*zPz3SUiLZ={=)qVwJT7nj3kw z1YB_!2%}o<7fD(Cz0niMyh^=eM`WP|KspfMyPbn{wokdX zc46WPpBz^J(WEjO(fAG!yG&mmQ)n{B#UxOe>HVqa{|$+`MbdbQ665un{7E*ymB!8B z2A*N7bP=rsXijD+%bI*t^L&Z7QH`g`sm%-!=TK-ln7M{kL-iL^k2d2ouLGOcWJv4| zzn|$lIi_=CFlDMZ=J(;xsghu4zu*=?){fehKnL9HC^~QotH}CIO;hF)Ry=e#lP;=> zJRP&h@^Ssq7wcCXt>2%7eKw3e-hZ@u=j#l|8WR}Gmu$8nzB*$9?U<_o2J`>sV?AJ}=vSir~x^HF!urqPsr8=BAF zF<@t^M@fr2dJfmCz_?ycLL|n%d@J)vdNTKiGtbfxM`VLT3XqA*LWWG89ii?H{H6R9qKH&aG*{?wl*bOZIw4)a*m9b z;#`TvruyUpPWls3czA{K058CnGLZ8b~?gENm*C2&Q=Is%*CZn2ho12Z@zgeEaBrdTC4EUb_K|I zrPnECa$~t&97A6u!e6Gy@aMqQ8dlYNF+n;eIE)1gX6y2;7hlhp%D5VooNIB|ot8Ms zk29>(BsLN**Y0NfRpH)5@X48b82oy2^traSy0^h`tA|xKZQtg`Ow9RXW^}P}PhbtV z;3l~D&0a!@Z$*G$q~j_vPl)`GOH3|R z+{z5^);z)q5>Akuv>w1#j=OB#>SvZ7scOv=P)mEsY+e@CTa3ZT?>%4!fHUW)A&Hbr zfBT)p$TtHltu}Ge@L{GMpD;nKZ`&}%iLTcq;p^wAgPLEmJrVj;PBEKJy zGY%3r)swN32x%4;3x;)r|BL*3rAMr4w23EtI40;NvlSa8NRHU;nucc#9Wm`8yJQ}; zoY_X72#t!Y(zcB>7hbOc1_+LXhtF4^T%gRyEr{65674;GeV_Qj!PAjf+BqS8C+(cN zpA$8QX*pp3MV)PJA4K9N-)x?-a;MHHMc{%tW=-quNRSM&*9&y1j~K%hy&9GZ zf6@TZ4I-J@r+Lo3jQj7&C{e*VwM=jB9do4w!(pH8-J@e+N;&|Y^|hrMAP6I9$kY1Z zjF;$deD!OJ=SBc^ya)eDcVZm-_l+~Uf3TVrM;(CQtG%8>9RWB&SYvHWbMTqb({Xg# zyGcYjnrY6LuJWQ&lO+7$qeF`yve9L=vP7~w>6?|G`CKq}YF`anLx-jRKvTFg_O)cl1gf;-DyNv%5280=6f|14=ps~MC z=6hj$U>7#D!< z!dU%-stXPa`Rx;4T4%fk>%RZiUp{N937%Voz1LuIEF5?kGnyn~`LdGNpI0I^>>atw z<@bWUb8*sLlU0bXMV~|4!O@w%7XS;IaRCXY=%z3ywV!i+hNLidwOPoCbHr|6DEB zv;6dd>a<^tNhhjwiq)s;EY)wC_Tio_6>nIc2lad4fM3BJj!Mle+RO)D(^yGmreSeG z3!ftQ#Jy9un;yko7>FG*YznLJ1W2YA%5O^6s5>^kSh}|=b|L_Z6ua3R--C&q%#~RN zxnhZgMZR2n3{et8WR5@Ui;0*B)Vlc3I0SnM39Up_r4M$PdO@AXtNLy-+ji{TQ8*uA zq2f6TI24O0=^>%W)`8gluH=A00hpv`0;FRGCh>r0eh>q?nAZrteMjP(aAgIFWkMu4 zXZx9%tAS&?YexHRYg0S!vOC<0GJp@@3o?_|nP#J$5NK#G^ckRi!)k#+ks)TAcQMbshxX65NG&(-VB(GS^CcU$k> zNId_@6jQc`Lcip-YO*c+!anVh;K6!nlJV2u7-kFz^fvM_eU4?{aSN?1G$KvW1ciSDeRZb1=26d1%`! z67{+#3g#luRy!DT#<^La?ybkCzP7fq(br@DP@%Yp6W!FVNB`QgW1o2PxCNG&aqe}# zG|NU5SH%Di09oB@*NlkTg^b*H1?N^J$VNWV>Ki;*Sv7oELV&V&r&=S1ujm3qou5Db zKHl-|6;#@gJEpv5`~`{ZI*!0a<9$Q^Nx>B}0(lOFc!_xt?A^G%Stw&jXNUr?s=f^Y zswj*b%_U^|^Juvf92~YM{4IJh31aNXhVzG2r0t<;f|wjVu5<6m^J_+*vLcqR*2H7! zs;mF48hRc?;GZabGinaG3lNHpSGv6Us{7x0BB1v@Q=U%wnm-ZQe-6mG`X2k( z{#)YTMZO(Lnh=!Fykmfxd_|}TSa{*J?YXN-D$AV}C$W1aAS1nD|6SnWGL@fKK-U~9#vnX9?Ddk>`~QZ>*-YxB02o8whVYjkrzJJXIg-|g*l=Bh$QaL4Q|EF(v8qq)Vi zEOH2+3O=TTp{UB)vl@co<4&=-me~VURMDw|X)W#280rXBDT6?5W8{u8URV8Fs{0AZ zdjj&Tp9a*s|3U0>&k3eEEqrgR7Q=y`o&Bnz31Je&6+Czl=^ca)yxZC7?SZ89;ltGqPk&j`n0M_^@KC4u7c9o zu@-s#F|ho@8$OJA9QfyhsfX9c=7pYOM5sHA{v%eVeI)icir$H40>oSXPH|Lfx$`&~ zLU)pmTjQ4!Tb`AD%~tqOB27jDq9!~TVB2gDd+NfUyfL+9@ zj3zzHKk;E7(fHb6Fkvybm}QU~H4Am!*KCx=?XUh-o=9pv9T7x88$~1og`i<%)hx6O z_ga0;=1({!1_Z`yL8i9Uuo15^sLUg=DRDuwBVDqxkem(;{3uh z4ukLJ`c!_RTdho)n$Ji4STLF?XV8f)UBPZW?R4WJ>xHn~TKdPUo8tK_Gf(!rLur)o zki+7<)|&60A*6}cF#zb<<%~$*_CU+kUA~W;9A>=c_gAAgnREGIqjNhDG#R3;3{%e^ z88Bhuzw@z!kurA!%G0B_Aytvv9|7~wWozmJdVtutnvm4qr{}UU{_OJLjZQJleD>DX zt|{VP2LHHB)JMQ{w&1H#J9PvajB3D62NGy;BUg1g#u@W8fT_o>8|=5q#W=($td;jOM5zzhz@*^7R6>Vi!fhjOi5JYDgcwa@OA6|D(@vm z6slNK=~l2ySoq339cv(xVFN|JSooRA|g|`Lp(nw*44XhDM=;$DggN)gJ-X z8Z`op*A= z+%pM&dY8HEuRX<1UipJOZ-zqMZh?lt=`w4MGGhnU#w(79m9F@*f-> zn21HkZ;f^UXlbp{i!z^nO5|2xVlA^dU~S^q3NrP#fA+-QZ_X|MwK!AdtJH1k{oJSa zK!rWsxQarFGu3(XvF$go*L9kF9L%yuo?tyEvqBKcbAlq~(Jf39{)P$W1{FTvet6(6 z7#2||uKCZ@_M4zhm_QGjk)Id_JrPbPy5oAKx{+ijmpX;ACRv=%?qHYcfi`IegFC-f zR!Aat5%j9)#*Wm4-9tN*^INrI-P9$G4)PcF!6;0`DiMMM7!O6%BuP7niE-SvIqiwY z@kD1XVqyq7yhB)u^hS;G^`v})>EisbdSj8I*)-`HqngJ=d?-WK#F)(l_u-upj(Nx7 zKZQb4FLr9QyO|r4@ah&5NWhgS7vVZ2bHEJRr#u;1LSo3#4c=*%!cT6n^*4FNtkITC z%NTF8b%49G?w4e0Aiu(X`oN^IPM3zR7~%6dxY#B#%3IfBv|iY(&HP($vwCi9yLP%3 zB%52Sq@-R;eWcpsvFFp0R{=Fl*txsHjyl@*2Yue;hsFz?<~(lGOC06nP zk5A#RkADPnYTiF%gzGU{eoAc&soqVzrY)EV^_4_IwkUu3S5~?xYV2uitmlj{vp=cE z5_pj@FkQfCgshjz2C<$=z7d8yVMobz5 zyToE!rZd4oTltnbNGsuS;Tlh z`YfXrM~M%~fGv51fiF1VRiLxX@?>3Jm2k5G zC^?>SA?Svh06P50F&3ddw+T&hFv`5hC})Ro@We z9@LIvi1>x0Pz%ULz#5H2!O(bSpOjXWU85e>Gd_TmnhZP?PxyCh25}QOfU?W zA63C;NwsL&Z*XG48r`44D#NIR%tGNb`~vVF3ndrm27q`&Fwt`SZ(hVuxM_M2o!qM# zW6tnjMB!+z;u{fuGxPS{+=ee`ha%e|U$znJF%gbKLM?>GZe7QDhOK4(53YVz59{a7i1I+*+I&v+RA>>qDd+_E-g zW&f_5;oot80sXOG_2YlN4q-n0)~ml&FkAwx=H2g4PIUH*yGU1#o-NUQ*jxQz`}5X1A`i#a=TjaLSRN6{^H2K`(>9z;JGi!h>spVvKD7jt_}Aw{W~E+};7C0< zYS%^nVVbYwQs3U({pb1&%fsgylfS<2Fhp`G1Qf2Bp0C+^$G!*ch4AOQ zPf%0E{TNpG^Y@duPyF@65DxH(`2GC>`E4uL9ijO7J#L>AX!8EXpX}#4?D4<3`hk52=3sf|xzKb^pN-#Q$htm;dNr)&G)2UGyLG!R63guhjpDqc+{F{?~-q z$>_X4#aVuV;_|n?3`re~Bz`xGf&SOw9O$4~hFWJhp<6#16qF52+9|8i2fpaFA!E6-x{Axe!5c-6*d07T0LKTG>&TZK_LjD`b9?1VcR(5EYFq?HKaM z`?Nfk%=LY-GPTfOiF4R?C$2Tj!+~0ai_N7-EKg*$qv`JxAWinCtRyB$vC?}>ZL~#G z!C?1Vu9olJvD$>HFCS>XrW|&E6>A^DCs%sATfL~d=5{oS^T}sFb?_x}-0X&lwOGP- zWOrunyZvrgCnbv91i_UMQ)@rqA+Dd#m!LzHx9#8u+XwvkIZ+7ftEz@}Oe2_** z5zi~kwt1JQmZ4BAs>8NcTj#3FU=8}8zB>;UtI0a9HPMYv^C|DsreAG;x&7(5olpef z`cdHO!tJN7E9*54Rk2v5?p1GIvMJtx3jA>v*J0h<&`Lig)6zsWC0n9jTp`_M?~_DIbGjLDyu;nn`|! zRydZT1=n!hHo<1wzHNn8PEz|kEWLcO@UMGY-U#~(uip=sEEE>S>&1M?&opdjajN4J z1VsQ3f+kkn^30u^o%r_lo*g;6C(4D*Zu1GlAV^-jupQRY=H>8z%Nu|6u5a?#b?4P) znwZ1-m@lgXMu-ER`v=~Jo7m7l0B^01$NP2jP9wTI)~S;^ZMJgXfB09fz%m#R``}^JZy@@Wx5LCn-p2hz1JWJl$BD>-vw>-yQ8@+tZG|tD~NvPOJ)dlDPnw6JD zQ$xG=KsLF6iRY#s_9|_|vr7WHgMhlh8rhugSK- zeGC^8uSw*8YXM4b91-L73_NOfew1D(PI)1R01zF5+h!w*wXwV_2l03*aM8!A`2>fr{M z8(rf1LoE7Fxw#8ZxP-(i5ctFWDJJlZxT^ALb8_T%kFWxjrJ_86A7slgoyZ=RXge?7lApYSDnWD4kwhnN zMJ(~LywhJi^F}OXZuo<DT$ZdIka;CuJ#9m0pgi05X1p->f@c`+p_^7UAXA+g?HwUb&W3QE~k5+6rH zmAdk?J~UY29F6hy3sE&M_8P)oW}VkuIf*ZOx24ZAz`DAm_2Y)~@hy8;r)1sg+pmm=&?J-|prajovHo6Dz5nhnskSrpn`JPgJeY@h zxIf@rb9Hv5akNhFwBnj{wDp`^dp1i@H}5et1JbB>!z)F~OePe(D_wCbXL}oN!|I?? zTQT!93(0=Tw+>=?e9oo)iYlQjHs&|UQJ!I{ly_$z#i)@$GYPqGjlN5!+?|xe$%BH- z%`Yl*eYut6t|JABrgEzHmzn*4F^fm`Jd_#TX3QA03`04~RHfEG=#by^nZA{atC)f6L23)%7o z%Z%daC2cg7Ww!oHNL*;%dt>eMsf^mEaM+Re!+KmwPhy{aHrQZp+fWRDALp^I?*ZCy z_$YM+vPRJn`)5>%#c6y7TiB)RC1vrs&rJ5^c|Cl}?lw#c5U|zTI8vIVnBjGkm4fVm zUL-Y5mpCuDpO9sBy|mFfd_bS$wo>P`H2erAN?|_m0g-aNK4r!DJeB@Rx~wk4sajjv zS?C=HS5YdtNb-KAtkvLuDm)aksH7{l6>MoF)Sidgk(~7wo!vC8GfYzmR-E49{W#Zw z%)WXpo?L8RRGQlvSGBI_oqEFk)Bb8d-h6r$-vun3?_7lg`2IYm=6!e*`;EBTH{ojN z?_5c2j{Q3m7CY8C?i{6hHCYFgev`U|#jjp(Lkn~Avbik9XE?2YQ{A>v+3omr<(1|20k1LsJpXbYaaWBOHNv0CQQ9T1O2gicAhxB@`wzEdh`l7vMebuq~ z+I{fr(<)HLO7dfW$Px`(m8^U6xNi=@y`E5k{8amJpO9GS@~5lY!}fkRid+5+@~gfG z9;Deg8KJag{_@6tX!A$)D3hG9UQ#mLuA_9C7Iyly>Nmpw%zALbmZrioC;YU@(0`B0 z`hC)%XY4z#)+tJ-!BS#?I%bf%VHE^p440zIA7zP(dZ-S6T9rOmO#p~6jfjgP$w;nQJ$Hhp* z3uSLn_c(1~^4YAj?Mhi2PC2T!cQ9X3TJOMsy3}47ZCrxUKF%$)W%&q!id#!8E&AM^ ziQR){C6DueU#+m~JnA~SdAoQwm191wWixS*Ai3=#)t1lu4ZSvR$Fy?}`DBw_89^-O z@X#;uG1M|mtS(2?2HJ5P9DCWT;`6OD4;arEl$1uhl^GrKfE`Iz!-CEqscOh~mxz#? zTCCCwwALz@ZPF*CigfpCg@xrcC?{kOgQ|>p@hE_Q=!6SmIwk_?22_6E9Rt0&xcX8# zJGuDw+wq;j1@&ZSD<^3e_lbu(V5l`0>!Iao(9xSFf$YHU@OqqT#MHZse5BWr_53h) z?XI^27&?#e;)T|;8?3eT5JW77e7AcVo7L1BQ<@lrtq08dn(;NnMsMtv3?1vmgOUYCZgX8CtirK_=@>mmLb2WFpWpP zbo4IR zg1w5)S$!`q$CM5}SD`5qwmo;bCK_}qeo$Xg6UE-j`*m@q>UAwOis&>tr##JC|7hya zA>A~cIh_k%-DUF60z?K8m!q(6bMhlMJDGgPdemu;m$Zdf(Q6Z7$zBoLoXB6x?6o-5X&PRyY;)=_!O1q4 zPeTqD`;M;9DwMsvR4HpOab4$iH5%i*b8@^pbEv zZ@lyHhq6}BaNPT_t-c&!CCKQ+r|K+^le;n4T>LAT6vO$R?jd(qMXQIl5}vOs^=z=$ zULRud8gH+GDakBV^W?Z4wwN!Ky--^}0tMSw=7HIl=D^Pgc3=C3klSLh@`nkQ5Ucx@k{;mvVQ$wa+7S zoP@qPq;KFtJ{{2x4=K0#jUKH9bwQm6vJ_qHNm4gE1xu|mUl?FJ3Q}V4eL&NMd|sW~ zqLj6I+N|EvE=#MIOEgR9ET)hQ{xG?Yxj8sDl~A9$Q`vRZbIrFwRw;KIpEycK##^48GuJe0F>ej4YHr(mntYzwk?W0rEbaqHKTe*w5bFD9 zt3=VT2quU}-Nw~kR!THcEDemBENOz3$b{M@C((BqSjZ)oDMuN-?vm)k+MAMY-pw5p zs~vTTtLz+gZmt;5F;wr#eR8hGR-i9efh8_=DnG{Huqm(o<^j@V9G@1l^5`Z>-W?Y` z!9W&OieOM)@vR(x+$rIH7d}^sfRscEa_d!P%B_3>rJZ2+6&CH z$;BmSrcv#s+mOo(I^mI5W3=TYp(HYHbOstimvp&O6$Ea%VQhqzK%ue>Rd=)79JZ#8P`!PcB;3Gmx>Nxlp1pFWvBBMa^qo!elxJ45 zb@wHS4*DtyW8KR6o2ezm(J~0;wlPEF<{)}h$m4a|iBIyytq(gUd4Iz9Ef5#}Fi=yv zbyoxpKmWQUPWc^_%}m&BZLNv#?2~Ga3v;UpuNq)T*xu))b6j)BCnNQf?Ob-0^;mum zcP4!{D&}^*1)=xSYZ)SCfm5Ang#D%frH<4{w2zL+uqe8 zzbL$bhaYR#u9ePP5y|ba5U10*@y6kiIFM(>Zqo?vSK7Z>JAErJ#q%fjZ_mfxf{)6I zhGfsQZb*Nnngf}nSTV1uuyi-FxjH#aa4PEN4Mrn>xdgu2U7T${lDpv!Mk}*?k%P87 z`l@;xQ|wCkgF*AoiFQE z5U&h>MBA6o6|{`UTYgYpi_H2cNR;hv*VZtC;Cx%r#x9Ob93n{r2HbKPQtqplI>D_z;dvAz59{3e`NLVEJCx`Cw*h zygsVYilQo-hbi+T|7;ox(8X@S*NaZLeeTYQFZO2??sS)(FqKv1kS91Dojwd_(ywyi_IGp)=6~6Rm_gEXRXM7N&9#W;2$9^;S&+{H5vkv`<2}RQCDv zc&&$QY<=NPsPrN!TEZyqN7dPh3#n?DhTdBz%>An;Qg7L08#Q0;9PX25R2mJ|)0O&E zrNa$Hu>Jcg_eB*aq?60!F;q$KzMo(c9<_JlZdsGg(NujoVc)al?#zAMwxFhet{o3M zSA=QEY0;s#-Xt5!8@wm6rD8YmK-MEKj_L`|!4D?4I_K0sT{9%bUQY0_#Z2ZS~iY&de!oW zZGj9Wkaf%any&8l)e8YW~vfq&skokLCdeo*s zLiLn%uf;_zcGB`jCC!ICYJ5b*h5S5tB_Smx)LnWIQci3KZ^$$!m@HX;aNVMa#Y~y#21-4xPn> zxe*MT=fy0Bv0a?^U1vgnC_R?lrQa3xb9Yi)Ww*D2(VpTRxnig?=#2HXcpKaOX5B{Z z+8FvrN-d}o`<(^0WO{ylUz2oovfr1Qq8UjB1f7N=%F5k($!mKv7EcuNjOCl~o8{wn zJ$k~T*WG7VMOv)z%cUi_i8?LSaY_tI`W$j0z1Yv7)yQ_8pFw4RC=OIUWZ$>$8)4^h zNKB1Pxp#f#kEVU-`loPQ%1n6d-EA*p313~ebe(PPOcQ=Y=^&Y=yIl|V@}ockJ!p$n z)YmUNJ5pSF2;0Pexi{p&@Hnq}Ac|?Kk*7BCXWCW%(4_Hxu03f1(j#6%Mz-=g`Kl=0 ziflk8^1S)%?oqN07JY7~dab`k$0cE44bjT380+k+Z5BPJ@&!ER1Nn_fcVY^yKQya{ zZNFcAN5{%O*i1EOoKIp{RgCpYtA49o-4?s>h0366xOj|zD`Ju#>apbdt4+~xle|yy zxP8EpX<)X;7w4dY-?H&xrAZ4*aBN|DlHN)@Hjns}ELxN7*o$JymD2Wy^!S-7du*8- zjANMVk~o?yXq`)Pjo@d@v}Li)_vC5wS+V4X^LSWzE?V)}cUPW&J+pFHttkFiFdcy8#~hGu3n{whucTKQiwy` zpo?O)=1pd+$;zE{{A)FJ%g+(%p-XP+OR}nw{Xe#pY3?`s;7wnfijd^hrX|k=)5(dl zTT(WE2|{lm*o3m0_VqSW#&t@D{Fd1XLcJg#_k;XtmHmccR>9$624*(c^;se((Djjn zlV3b%GN}iAKzk;gTWtaRIThXU5}iHk^;x)oFE?he6S+#+>AILgiZSAg4j1Fzr+7>e zOWPvJ_w9*w@UeQ?nDD0J!EkhP>o1!%Vrd#sm~|$f+ZXSf=@lOgVb-I#kydJT+`$Y4 z% zQ-(a{U_hACN!>CpA}{h?zDOkvHmDU6(FFV2zMNAxUAZ?!opY&t?@pg3C(&w!p-*Eo zMkp8c?S1N?yS@16NkZ|iTk6vhRR(YWhH{OQFJkw!d@M`K!4Ri67PG&RYiaUsHZsTw z{dRV*MxqjxinvEcMM!j{JZbAs5nEz^xEzziw7lGs<$HKNKK#+B<$D=q+vh%4(k;Yx z_f*#J^X9o(P_Gw3qM;Ef+ABEe1^~eCcz3^&+$>_3#I(_#GVyu9)+lu8-NW-kXq&%j zi=fC_J!D{ef?jYg3pZsYwXJsf0la(0WwnmT#$4UWI=D_Fkvn3_(1+&kJf zMZCh8N;z>??elxII9tVVM4j$Q8SaHT*>?4Ox@GrJcC?K|J=7uG`Yt zYO$?bH~5@)R(_4!ol^nTbKFSU(mz7HJU950e|Ooo68-2VFLv<#XO44EJ+EwK|FPp0)ozr@G-3t6J zBJ*=_BBFQgk8Yfr1AVJV`hMPSmO8d7@Z0fhGa5@|)XHm2y*XT#Pa2Phxi2#Lfmv(a zKk^f~zde%6G3qXV-kr{$H9#ZE%Bb{fnIQ%1`A3tWsfN<^m*gCtyUiJ4y%&9Y&4t?n zExjK=mwa}#Rt2z5nWXrrWaItQ!20|c_@C`|O-lKdJmmDQIiECZwLYZe#lKiLi98H? zad+~kFNx*&D*Ao!37aBPb<&6)*JS`?2eqZQtM^kapByiLzT5hky-T(BZ(V<<2*CT# zdL3^Cgi-YHEJ{91<(!byxNKv+fx_&AGE!iym^I2P#n4vu zGQKg)k?w=74}?VhZpI{PgOl!_!tib~k5?Oh7<0=UJ{du)%mTZ*l30WV^e%?q<@1@nZ-dY_-QuW8#Z--0yBn_kLQ^b+O(} zjXR(mX`Vw-4Z|`d7RMERI&$>#6T8gfNioSI2D0LRVb!gp`;;FKe4O-zB?ay|yLP=v zYZF~3@E@|nS#9xRie-xW+cmC~MSnI^p1v77{rm!{!wS~V)`!nM0wF;^6WH#dPczFFQ%_-TQQ^q8G<8 ze5;c*<^$lb;yT_Wi*mDx_fugtvxZPZ>)=rQHl$LmwIE zm3|i*?V+WlwB4lc2AMU^7qU;72Fh_<5aa8AQ!hi$x=b(9G$r*Tm=EuGtZr^a%bSJ~ zP8nNOu+2{{#ezunXhUD5cC@Y+NXRT>dyEMG*`@d7aBcV#Gm%L51nlRTb8tIl{q5P= z8lyqp9@B6aB#ILb)3|(BzMu!?^mg;z#@s*8gZ>%%+9R@lGJ& zvqvT^r^+OrR8p^^_&~(BYtVZg3LN$lqGbFky?$~+&`+nSYMD;E2@~SSoj&F7vsUOK z;2_IrK}nTY*^UKW((`FYKQ7`yyC-&ppb{eCdq;grtINJxZ47mdzHXd-RbKW~bGaU+ zOLJPrzf@CQyO5p&DLgo@lxu1D z$91z&qE2UbueOIzI_QwfU5L)PEr#qK620eHd9VOgx9*%dBsg_KB~)MsmASQ7Dh@&Ml0 z(EHsIstxga;|#PD_xFg5gZ)x}S3Oo!FA3j(k~Vq8oe7@dI?`;Rr}Sn23AB@b-64qn zWJ>s06-iGSavWSjFU+a()AYRX5S%Y!ph-xI3X$>VVs5+e8o9Q{L&AGX zHLc_1eD~C#4z!D9!X62>c*&yilRdGm-tUdPe?L%{c!0~We6BC$-3wrUZ+BZ7){W^e zgkkFus`=Dnalk~4^rSw#hUFsPqXdvZ1keL8G1!EiS4G=hzHg(=L(O&g#Xf_j`ncuA zYgZ(X7vNcr;6kA?@KtIfXrLJwYfZMIi}Xaryz{JV<5j2gp4SeF`PyHP&13Vp+;3g8 z)^h*;PJIL%+2dpO6ixMikHs;7ly%QnjA2WMK#zR+@k}W#705-vKb74|I#>>+v$}FNrq0^?@CLD?SCS;U`XidM1wK^6|2s%mR2{ z?7Xe(WBUl~WXrbaD358Qz8+J&UtdV)nFojW zSf7~0akct6de`*^QjOK>-C6J#P}PMQeLkD@WGCTmvGL9)JW?hB=x9Wqh-$kNk~;6o z)O^0(i_%j2pq}o3Z9ki^FXY7(l~;049LyD}M;m49k743B3+9>=oIr`;)4d1}*1UM! zd4J8u=v3N6o~pae@L9(G@qt2*fy8i${dI_ql#+XyS$6`1x$-kz>x9aat(({KK(^28 z{dKP1s2-W?3dk`hplq{c29Le9+hXBVwz*rrG{Y9r;|+Iz!#16hB1DfaqY167x0Ch} zR~bjQ!qcC)j#anEhIi|^GdlB~L}7W!Uz<_q;0*3O~r zkC}OBI&ood#E0Z<7M5m|A*((Qbc5Q1Et&kOz463R9Z#-Gg?9jxD~Cmhh&|+W4FOnl z@TurzI2hM|@k$Hz3+A_NOjzxRPQ~aKEOwjqJ%!3|Nitc-SFZesGkV3@>HADBk<21$ z<*o8|F`WHiMaWUvrQ>#mR7q!EyH!Mmo5cyi2`#jSzr}=HaBI6qUnyO^wV(Q9ctV{oI$)*rF^;-YSB}r~%ZKxUmMt~n zF)sPdKD^l-iASN0%r7ntfuZjTw;`!){Dm4bi%oWUn@#cRucm(qjDXpDzIeWu*h33ceA>5y8|>c8wubP2tv~Zr__(xx zGMd=kUN%irsR-~>7Zs6P?j)*AF7=^*o^Azs4a1Re0iuzJCL=!aCv5A55nNPr*Nj&>hDmKGX^f!BIncf3pyQ`*~j9S0Cv|7di@OBAZor+N+SsRb$6HnKF@c{Vz zL|ZWyPT{mKi5gv6AQ++&##;a#Tk(=CUVs9S40>N(2!~PKIwy}o`7ygH!RuLVmWQaXE85`c9o_Qf%OOq*V2L3CY=0k&+TqdwBsfNNE9x;aXnf6kivQJuw; zJ!nf)KKlq2-O`1R9+(!zm`^KuxGhh8Or(zKU&-Q;D2#QTP(9qR>(!bY^hIdv%|;BI z%>gsQ%RZG>2@m0v`p7W%8-NgS#d9#}<0_0E)JLQ@fJOoLST;p3>`n`R!C-=qzi9H} zkdZH6dLM5+rrxsN^Nj}ij{`;MUK=jqqREmc=N7dGRDt+12#|?wcl8S@yg)(6=an{2 zn|vWG!6i^i;A1^ukoO_>IxomNW~k-4u3g()PMj%*59ICBy!A!_Sy4Sm*fj$@z+jEm zsRuGSKOLbLO^SD>R!i7_06(hb!!yUTxTBP42`OO0E~GbcAmnAXeZ;&Yl*&Tu?2kCy zImOC}XYslFdzZYNUIKy0+xGOKUu)2OEJ;-EVpw|nEZM||vfW=c8vUljgZLtzZ273x zM!*Ml@c)< z$8p&0pi*0%rdZlVaV&^-{mQHJ%e*bO>yddZmex+_4=`zYukQQ9ZhfZ%ht)^Ew|8== zk2e#-)>ipZ)(`W4S-Csv)*Wo9s{mbpeL3Nqeit0-XuU(&kcloJkDLvG6Cx@6au zKtC%0&?6O5A%LdqbYau>?Z>K{sc z@RmccGT%be8F|0C4VNo728+~tRVQ^=FUWm)yd8l1B4D@o=C7u({Fp0HASt#PFRXLL zj(9rznBb*=Q8%`8I}$LjtGPW-4-qXtZN?KXo0zWLGNnz86?Gtp$J!OI-2ZzBnd-Bws@4pJnNCnexYA+swu_Uomhc zA*%(zMV716K~~e;(p3GqeI&_elWWfL9Br5|A#D2?HpqAZOrJvBNPxDL;yF{X7Mg1o z%a>Ab<9kCK`{K!vr$%dA-9TehG$SvhQIFSoUMCRXiyFO3`)6N2=+#9oZlfC{UHmv7 ziW?GtEv>5W#1FZ>Z$<3SQlZ~nL2X1H7#Y|P1i#OzLnQt)+dJE}KThiHw%LyFM-p$4 z6gl|slD&-}A@l_KxK`>)w|!^DKe1~wkPkaS(tNO>XJYJRLCD+iYN}ekVLkH1)x6#V zT1r*H+W}t%}%)9Bzl4G3_o*8ICPjX*USZ&EV`h$^Ub@XXnZcx9bF&>Is;ypL{V&+70G zqv@-_u9_{PyN3ZUyp0o!SqM6}TI+U}b&S+G4obv2ScBX3+HD8>RG%y$7bW4aK1r~D zDRp6Mo&xbbc#ln|YxXBrIq_ub(YQzO{2(beJLX{V$VxtXlVco8Xo~xGx-WKRdvP+? zam0EX3VGgf><*4c(n{PyeU*)>;Lbm>^^cRp23|4&&YXQXTW=6lK}rFxMC{>>bblBKy-HZ@)#pqf404)gG(yq$bFB`?Jk7c_aPClLjqRyz#gLh)J)IK z3PvOT5~7OBr+yXBk2N9hop_6cv9-6e#w3sNhdk7tabfQdnEALH`>34^LW<0P_s$io zB_(7BNIdCPTY0Q;c=3V<<`dIsS*RY?=|`3ppH;a#o!x*;826*S#RMs#ZUYQ{pa6dm zrc2^9Hrn)#$^Oir?_CLa1#DBHV_6%OduR7jYh-j0Au&TnNS$enk@kv^a#X*dk!^2RhHLZ-ng7a3PoIoiWVK`A6w%9-g45|yBFxQ-R?N_O$oE0?G-TGlbdy$ zpn5#s9dSd&g@GZqx<=jwBLN)UNW2o~`^4z(hd#gdt}KR$%yMH_E-ZzAoUB*vhS2cd zkd?PmNj$N00<*V~?qK||s|QcZktu-Dh*7Q!U^=#P!~0jB0*8?1;vMi^ZcO-^5XbK|w1%5~5@{1P!taZy*$(lwk#Q6_CqfkS^;>}t#`?IUiBta)jWa8o<~ezsyA zX}5gSY-JF)M#$BR{k>;@CjGPF7l#Z4k%k?wTZ_M2cD@E!NatQH zqge93|4h{7a4Gin<@4NkkNtz~a+t#yD>HF6`V%8e=P$>)Hf}X@eUivgb;{K&L7S?N z+u8#HjVb3!ce^3DdxuG!Vw`ic@;z;>9yCYV-ETr~AILoNqRv}?c89(osW{YL*21LV z?q!*qRMxoCkEin32Oe)4uy8m=i;5N3nwM}-q#@6jGZiS^?MlYFI8u-gSCB@3v77qN zqos4(C_d2Wi%*wF&=X}Ejv&I- zqJ3}Q3v%EwxIwUgp&!d&d3o{ywJ%nteuKf6BZj7>KyPmJU>#cRPq}z?j1M~*F`QWW zT&-LJVS;5!{N>z{_JgR;O2vDz(CjYf!ntO!{kVd-4+Eix_k>(zkT;hCVy%^r*MOv- zn}ZBeAXoHlmk@h88+iJzygFk7VXtKR)?vZI=I@`=t9S|b zkF|i1zO~vjer{gkms2k@6yWP_iF!i$Nv1GvLMfq=U<007m@i+E;l28rQ$7@2iWWS> zb!VZWsU1t0in>RM)t9ZQSK62#39()M~BN0pasbf{qHj1t*i(#CAQt`iELtiE4-|_;g*9mi&-T zrOSH*Mrr4#-$b;x0oLT*Z#i(B4Pp6UQC41B(`ISQsrcB2nERmbXs~~a1AJ-w|Lwp3 zum4Qu|C7sa|K{?;UE(*F{|phTA7h4p%i=$}(-tCsX>2>s7pw$9Bg|iRx3J&xp$pEl zHURk**!ZeuSR*q`vW$j;qY=v<;1+~`UylbLKyf*_)fYK|K_%PI7c;v(;#!O#XLeT{ zyv)`p!tF}c^@!1@cCWNpzj=ZApz0KDLf$=HfDgTCJ z&?w-4KeKuij05Hmwe}~V5hIx-pgq89-z$UK3qEjj({6idgdoLXzJMnLM<5n5f2pw2 z_8{kCYn#sYbNu7mVqXanD9P*DCW>pRgK!HJpXWm|nBNe=st@|LNrsp{%%4vmzIQTT zs)TC%akJcWQ#iMuaQfaUbtr+5 zz{9@vZ{~E4bDK2xM#7nkY}~y_VU0TdDid5eYV+h`C^-^w7kH7 z1=qHSJ*nAz`PjUu#E7CSx^LTi3Dsa%TPxM(kUqO`*dmH}Y!rVzj@}LQ*VBHbt{z6- zm~l4VKK6TW@IuXv{EgSeX1}NjIoEW?l;39)y=KiPM|*+;%zdjX_M4wy>SL09r2N?I1V`G<`N{2`8+3Rr3ATX&=<<%!g$UI8;_zjeF*X z=<-<8*Xl)VirblquraisG#3uWWz$ieC?IS&brL}K?prNN0_i=% z4QX6n!z+Ea8!y>xPWKJJi3^equf%4vGatOZ%}uBhD^QEPpPq7;_5j!VT20;f>%Imt zGzhH6H648r`WPXt;ZF*~LB>OWwVJ7cFk4x}3YHHq&W+6dtJIK!PJ!M)A37)beY1=^ zDwYU3sF5t7{B@{Q#}{m)+nRcWL#_9pF4jUvm%QHS;mY+MwE`h_`Z_7} zORf4!Xitib4rnjh0M^uhE+qHFyg%e?pID184gTFl(vEO(@VL@GnNH6f#9MbxDYX)Y z6CeoMlHSu-lf+c|Znv#Z9i-uLX(?jbn`FkDKD;T2GCAw3)PJ#Bx)3#~+_ge*b=XbY z_q+Awx5QDdwS7Vyw&Yz$%M>OrgacYV?_IF=Q@F4O#4Y2wRu zx8TcNdfjiwWRJpsnn1l6n07lsWPz!o_Xmrg+5~)b;FbdBlZxJohDSJa>d3h_lU7Gy z^DO%&)$M>UZ1av8G5Rtgf8{i7#wy$%K68FO5At?$IuM zd`<%2*s-h25Uq#Bk$Wr#T6W(D&pnLK!f+}5^;n24*=Zer=ua`9NfiL%o2%@pqsiQA zr9k>83Ywhr;6R`X)7vzUlug%TvAE^S%dT5K>5F~p#?S2{asXH~+XIS!?&dB%PR83I zPADOcQrnuU$$hd7Egbb*6k`%Gg+Y5^|tv? z>m9;Ve6|UH-uK(Q><87~tQ#M45jw5kG=GWa?N{%`YSkxP8FA1~IW$}f@rBP8UTqM> zkQKXI@c|>`jJ-F;>+x){bt-L_S50NE+2ZustX3n%MW1@7zRc|51=0|4d=GB;I@kh$ z*m}{9^7)J7cvE4F9bh09k9?(nbSiK+ydiZF+Ja(-kav=wH>| zezIc0UK94gg9!7jUO`v+%vOTAX7$8nAuLh5inM=81{fVK6o!_e8v|DhpAh2$H1J+S zDbc+NFNfB~H!#(tQf;u^DU$7f!>juI$7Vf3J2&7xdf}TWd9u2i(ZHk2+FTWpkE{b(Y^JS%fOkUcBmY z&9NZ(>@vvP66R1YZh{P5CiWvp!=<(lub@|d)cQX1t6RRfkXwSaZj@q|+bj;2jxj7B zlVTY3a2Bti(!8-lUh_?HT<&aorCo{msFii4r0O!>J>2qWC%*IXKKH~-l1q57kGy|s zeK_~MnO8l8$dUJZ$$B6>Ht%i*>`&J*j>ZYk4P?vT-RilkN)Z--tmn`%WCb1tBYaC@rR zX@hWjKc2_SGXeb~8J?cMD`jxY+FDjdy^W6XE9OHru^(s0UfIdDAr->-)X4oCG+j2O z2}tH=f84*;3(Xj%OU8tS5iZP)=ASqyZ&-w%i(Bx;R}disX+)!&2l8zWABIJLe0N2% zP8P9uB|%aLPDlvA_y|lZefpMp^CHA=Zn|DPQ*TqYDgp%A7v)QEXo7eWh@t8!f;tz1 zk+4sAgI?bhv)sRr6Z|rk7a8gvqWIt=`hpQdSF6)n=9LlAOPEj9kK3WW)yIwe z@J9Ao^^36`+LO3iE~@->D4mPYyYB?N!;L%L&fC$1dDj8RA$`3#j#q|%1Z#42wi@x7 z``o61%+(FJMH{{_>F^XS>2kfx^dn(O=upEXs|-CtdWzjs>jxTgA8_Y^9^vM4ifQm* zbLLoX0%{!Sx9_2R{dny`&w#00k#6LoME>>eWiP_7kQpGx`*>?MFf7cXU`UoGX=tlD zxOvC{(E9MXS8cemR5ONukl4v7U)tc;n9etye2ZjJ=|z`x)wFC(ebT9I>(;Wgw-7`g zDw5)wdq`{`9kAT)p~H2$OKj(n!+OGy7&g(3x!Ve}C?fYMQ&D6?IgV}jjHTqv)qMLe zo$9dVguXJOOx7;aOGsreu5It}qr7VS6D9TX6>_Ikp#gxK0ad4e&-{rnxnHTIJXXkn zFJ-Px?~AzRMR+f5`t_1_TjI(YC#5!T8oB+XJ7AO6L~u*`sr`J8XXyjj3P>75JwC15 zt3U`nSvT4a#(4QUo!K^-9{tH=M@=ismAvy7#GA}jtt$i&#;WHqJf4FSM*~Ak`2^{# zqKz5nLMLQy6PA5{CVIY(?;xgG{_F`d?2iCVK(fDCryt7gWBPCnO}|!1Y$A|Q4$wk_ zKQ5AM1F3Mt6FMG<7=V{P+#|q#L_V!Ry+n^th3IlJP43+m3##8h1`}~7)0zAAngXBC zy78Mu$Xr%Ac!OMU{;Y!ts}%=!FCgjKe?zu|p6Xv(jbo(n2Jzp&8v(_QSH9C9mnn^2+V(aP$>rUl|t>q-Q& z{OTZO9eK8}WV;tfBZw3kiLQlTrVW5#=k1=SVnPfaJ~%z!P;fsvu5SsoJpy^KC)pkS z@pDzg*4tW^z?+N*=v(>N{k`*v4c z{tx!vV@r9hTNZpE8WJE}qZ;zw8xTDZAPE5i1jsyn#S*0XwmY}(txkkAce7J9-TF^MjjReRIOLzR zS2PB#r=>%WJ)MQ{VwqkD`VdczB81Q&nJo`H+w&OtGSu&igpR|?qm6x6YMn${o%U_L zxJg$Vq7aX~e`#+ZQm<~KW1NKii5nb(sj^R!n>;Yov%vW!jH30rm(`a-|qH`#M@Nm;-Fg&<1x^!a0!Md@qDCe`{g|rA^Kbm)1#)*v9J=+(#J(- zz(Qvx#!b%j!|C!`SSNis&bxdxO0>Oc_(^Bo5lXqhe|XOx(|tz`0L`#Y;;?(ZUM9qP z4tBkr?8^JRXdP)6FaotZRAZ*yBh0psrLRD?iPMeA=7fYhZ-pywd|Zra6vs<>tOhg(Jrv~7jFj1MI+z;aDnspeICNIl;BhN&#pd-Af^4QN4}0e+=KOKzpO)47 z7C0<&e-cI|c%J`+D#EQOhYwYt(bLsD8~M?ZH^Z<50>{nNDJ6ri!j&{iAI+1R%Itzq zw}!fp7g>}$hPP+nOBwdOSv(tV(Q*jc4A2Vir-4`u%<^u#Cs}4CaZg+FsgZ9l%G#V& z$+0dECm#;N=&rRn1chg<`>Qbu732`k4=DEIxm{f zyVm85vbm+28&e0lJrr!0Q5)M*P4Vuo8A&mx)c7>J#LLZAdF~YZB)nMgbZcv{dS)sz ze;ZI7S$os`B84+x3B4qcS7kB3rk&k#}Lt@#HHu!HM_yz*n!XF zjK@|QS9H2g-3&rrr6`RcGjkj>GxoSCT)NA5D{<3}fr_?^3jhlq$!#nCwF*6h_XbUcN_N?u>{gB6CXL(r;cV)Z3 z-;%oVpyQf(&xoTRsB}GCc}pEP*c?6t|CG~gJbzuw+b&~4r=M-OGxTW2GdDZ$e|2T` z1dC^w?4Dp%ayCrfn#HHzFHyo+x zjZddJ2p&hOdM$VA)q|&`dWc`{-EcN!F4679dex@MTy^`J`yjyq{MZ~JvZ%D3^`sKt znuyGPwo^eZuwwoxc~gBNjl@X?!I_ojwQfN-*UP}(Q}JsgjK}EK{Mb2Ge})gA55jIT z2Nz*DnvTYR{Abz}&C%EQcHc@7RM-xuqKX#hWN8em1kUtoOl&%R z%t1pD<3)RfBuTZ<_4Q7YGF@z81oT0uY=JIyuDMX!$E^wcYNbp>Z_-SZi4z~-pB^{e z0iIGhYG$Ih^}~wK>N2Npe~gn~&)cwAPM^!TJGEu)0P%K!z*>Ce^C2YJsU8s_F4Fw z7c;cYX15f#MY(?tO|#9XzERM(WOeQ+$ghvN*@W)|@jiKG8-Of)ADe4?0NMaey2Ynx znuEbno+tY5l55;kpFnOK%{LnM|hqB^#$4Jt<2x=6rn-a&P&TvF>qwZ{9$&=rf* zk&c3q86TNo`=X$qe`cez^WwUT*Wn|gY1cqqb^s)`3-MNv)2W#*ZtRCKsHV`Y>_B)Q zylyLF`rPo4AB_E#JXE}?7hCF9)>{Cq@5o~_h4RRT9%gvNI|3Dz=rMnIuB1}ePHApc z-0nU}(d%zy0t|BD0-LJ0&;W3Z`gT+0)vXmN?@=#T$M&>Pe=oCQ=JVb~V5`^R#LqS- zBZo}Ny~JfAFox#;@gh*KDyrxycBkzKf?4j?McZKZrs27_ZN)3FV(gSd0_@X{Y(P zh9nOFZTquDuK}ZGpoC)6jJ1)N_IlEwnRc3c;_NPF{Ai8!gkgBoZt$=5WYChp(U$kY( z^~>w9T$?M7?;3hFutK+87sJQa46_{sT(*qtnAM_q+Ndc%*l659gcn%>tpoooR1Zcr z5EfDTe>}+L)O+oMt6srrL~}U$c;7vpQ}pzf!*FY^9^=HSnAr{bRGk{_oSNIL^mSt1 zQtB+!^}D0fn12!zKJ}Thf~24^S}1<^${Dr5WcT3fN-@ zHA2+JB>)F?ZlOjIJ0JoyBQ;-d`$ENY1`pQ8$r$Y?79c( zK0FQ4n7%S)WtI-3t!x=6E*7B&t;wU1QddaiEmnt%)ah!q=IQ4?h16)EQvsBFXrMu% zf3KF?fzLHm!AUK`qD7*8yA z(G7Wvx`~dyTs5n%!PDeiq8sUK`?yZC{YyJ!+yeTF;q%C89UDpJb?pl7@Gx0qt21Zo z-CzRIgsmNj4G$E#8efYeGhd$v(?xZde@4K!y(9s8crkkj?`pUXl6t<9E<53x6z7a$ z)wp;}*jg1gz!du%O&9YZIounIo@8+?j=fS8_Xm&Own!idh}*?UcU&xPbER`V#Fno$ zoi8>Npw_y+xtX#5WVEeVik4T&ZY)!sZq*q|4H<;f)=_&gqP(@+sU1SDH1PSS0ug*0&Z$LMmG)4{n=hv+819~4bJ)!B{OX6fAb57oq1Mj7hV z0?_6PH7WV|Io^ia;zofhx~jL^e`{)FP(etFor6ZJyV()$m=f%Ydv2|EY&pJ&m3w%= ziG&shUvSZIyum_g#T?xFr)DnO%1lm)mrMxy4Gj z&OHxU9AR_YBBU#CzFusCaVD(mjl~?=iXHAD*LT)Ubh>dUaanV+2qxn@~&r_bCzgFW|xUJD$5**EDU&@7!O162Ksal*hceasL+o6uo zRBv9im<%#&n?aK2ada2FUg9ecv&UnH{F+(jbi?b=`MbB~@pTvse`(u0nfZR=TqlAz ze~z6weV4Etu4r+Tnl{8WX(~`N8S)9wXc%UqJsORH;(@&9QGaCKV(MPTuL-Kw?0|8M zOz2FgtZ`?m0}a@KExQ~MYVN(0$6V)iHDH2c7o9{S~(Nzs^PvdUX9*&nwx+gxdGBh4BL96eFpd1b5aXN&D=e|ZcN0Iv>%vpb6YXg3P7 z2Us}Q)U)KJ6H^fKGURmQM==L-7`=F2Jn%J#6ZAy2By$&7Y&t9jX?Yg(;7JRTgdXqV z7K@tVf%TYPmzD-Zka9jBx;kDD0Ioqqe>Z8ezHPRSxKV)p596(H$i2&Eq5}6444}u;ZJ3a&)Z-vsOb=z11*i7> zh`afM+6BVuym$@_94me@KZ#1SQTenRvP!-yZj0gR$XC8_ZX70sd`;`Kr^3v3V|Tc{ z&f6n5TpXr)6i?PiKe>Vbns*|aKkMFEV5peUHCJ7gf0S6f&_PQHw_J__&7eBB?R8{l z$QL=g^A#F}P=0xy7;7rs+)%b1W$xWLh^r0rC{CMVvSuIComfnifkVHLp4+YJ{ba&J zb9nYVY(?g9M|SLTjGhEee|kD&6Z(Yu6`Fpht+}W@@UT|1-E)X~`wIcnVpW^h ztwVR?f77@Pkukn7TlsqS(TCT8sM`QtrTl?Er9vI>8K!d6$vy`iIK8`xI+w1iM5&gq z?Y&sdhO41NKORq?4m;FuPIBNPEI2)&n>&pvb+gR6au!04?(lLCYM9QWbFA*>bA;(z zj=eIwvt#(f!*0oqoQC(c2Lxy5EAR^@D}Umpf1UEgnH25M%kvQSc`aX8j&xp4ympg3 zyQQ6paq>tnt37nC^y1#8yQ6v^c6X%vXnAxKlYD>LUo>78*iMWd+=RI|LkW5*8r$61 z?0JGbqg|jh9)r1mx4IeX7xVaV;Do${`GYiTURkTtO1X^H;eetb#hb@+ZC?Ujh4vYs ze-=P~MdQ|0@Xbvi8@QhZIV+xTtwYwtKSwa`BHkJnPg!DwkHZkT0=*3jjbE~y$oywoHtt)WA zSgzX(I!Qm#`r8}cx81FAAQYDS3l`ORe_+TEcoB^O6e&-E9QY2HR*lb1j75jGb5N}6 zsHh$J^g|ABn{`-;vjA;ypsJV+CfzjBs4K#dQyiYF;Lr@sxpl<0*aO?y?Mi0aAt_J@ zX;S!0|Fv4K)OIq(Hp!mQx!eq|v&rI>yo6=-dW`|AyOpOwao?V@{R3UJm!l9~f3O@H zOuk-lo&lVbiTHNQ1OFDuWCV&98O1x=rnyF}DWBN_%wW7AQwh z`*!RgG`E{#eGDH7{}|efYW8|SPfiOT*QYh!?#_?$4&-q>Ldu4JoX@JJf1Q+vQ3YYP z?9x>?XRKS4XY!dvxFm29sJ&uXLVW$MF{2J~)5+RC&m((qp~lqhFdrQsPbvn-B^!c# zG;;RIU9OwUAS_P#8A8p@D|;LdN(M#4QW-rDqA-|P3}rRL&Em`-Qkxxu=2#t+gRCon z&x;q?n$m}8wi`uvX-tKqe_+v!U+dSIzn!bN4iu#Xc|RSmHzM;~@(qrAIljdZ7(WfB z+f_NA9H_$~dk!EMz|p&v5CjjL)-hMlMtk&?wafXy+-)Fl-fht6Jbc|A6G-}dLK~bG zp?yWW=?i_oUs0j6;`m;>XRUOp!)hYYq5SX<`=jE|0E)M#%WW(@e@25!KF4FvqhtP%IirvP%WPVZh9&w=7}4*iS9-aFHY zAV2L}JA2X)JJyXj**t|p_U6*Qqxy3?7rA1I}ibq7D*I}o7WPz(?s z@X{GqW~A$UKwb3vc(Gz~nVn2w<(Mp<9o%x$5> z0nZ0v`j~(L#6RfWKJ1Wcl&1mWA%z8r=t`y4a=UXfeSBcC#c{4DL=Q6DyRPih)jY>2 z>GV;*M32EZKMwBeq8%3}WjOI>J7(-G^p_g1UffErsPkY3!9F6TF&01YL zQOYLoiQ)M+e?ZDv+5k0Gb@XvvuCk5BKvCzhv69{G9$rr789?%E(=H1qOh=I3x1V>> zqZM}@+vf_kysGR35T=W1QfoZIJ#7__cGwy2Ln->>hSs8Z=mbB^0aRDSPZjbf0RsAPPfC@b2sR6zv~Vwq^<7k zV=%~65NHEA-d|p8QCe-TzPQ_*tIK_8sf1>UHRjAd%gayR|XZmoAFa^-*(gjS!z8-#VS2JcqU>wQUMe44hmv^BAIq z0=nzjvy%;!W> z7XCxU7m|f7|)y5n^I{WRUNWUo*h5 zYW2q*c;FoDC1Gt$5_eT9dLnlFYd*Y>d=#*mrt2CUZcO}qmGtDa0Svd{_98ViXAZ)q@JEW_V?Tp{txwok5d6HKZL#fatpfiO^cuT9qyD(RF*z`zry(9%~lv zpVpLL*KIIQciF=z9j=DJ>F9Mb%net`LsLdwJvI>ATJ3`6_dD;%8fE~G+w5@4xsy=> zU-V{1@b#`#ek;cQpw*lOUHE(Cl?KgLf4JQ0bb$=o_QA=o&KL6G@mXe2##n95u^Sxe zVG2bBl(oeHjM^3b1X+l5)k32qFh=u>vyh3?lYEXw!@N1aUfR$?OFxyHU7vPf@~Pd; zN!KWd-P#8Q{WSR20FLPl^$JL8DWf=qw$@`1(cq^I4BI+C#f9=0k!ZwJ) zUdk&{9U$0@9rw^%9v_zOm z)8n3-jpXOfLiJCfi;!P}e}p#DAg4BflVvGK`ns1CzLQY7EUvCTF>L$1E*T~mG-qH# z_EOQtSk&QevqrbJHj3rTu__l&@Nfz}KOL)AI8heeW6vZ5o}M42>1zTVA7h;xuW>X$ z%De`YwiC=qs_SV!OY6;gnFxTPdUkf`SW`>rakOLVJbNBOm$7d^e|2)t7|DIWEaU?rW?c*C3D5}XW;h*R{F{AM zpZau~%iGzUPpB-VW?P-oxBjCj=Sa#ij|((x(=Fg(`)HEsauMXu@(e&8Zwx|l6a>S8 zvdbY3xjVN|y_zhfe|)*56v?yBAh|9SMUVj2m(H{cFb6L#A(IC=stTs_a+J&A4FnFs zkY{MG==c&<%hx>%e{eKfPzY5Y)r?;)h1Tf?5mhlei!#vV&YkArU>=2)5 z4iU#O2rF8=QI}}FuC@=AfyyW=qNM;dME===-ft$tN8sPeWCNVV|fp=anK~eVsAAuC{iA$ zggXZa+39mMf0IBNzOUD}MdoE<;pY1;0M=(en97hu34(lJd&pgZO<1A5H4ook-EC`l z7|I2#17>08sBkSIi_U5E{)L_$iJ4Q-x$+|ahW0%?fbHr)d$pVM$yjdlWYFN)26Igw3k4+`lbj*pMSyqYWxA+*QPpHV04 z!Hbipaki{8p+G8hn#1dy3%fy9K&sK6o-0ez1KC&h?#6{6&W^sbsQ=D7oxkwfWUyMR zGbGoQe`A?kX$8vwvG-tnfp+8Ccm@;Rxy7`$1UdjI^qV*=*!g`4M0-++rvnGp#i;I* znT)ixINEpmbTN{$S3ak@#SZ+YLy`=Y(tK6Mtz!-X<5_~osgLBr?q3RGfT#9?{FO4` zUx%BZ>-!ZubJCzWbSq|fO_{hk3=l!hxVP%+sDS06nly_Y)t*t!^OkhtO06!#!DKlvp2u?Q>|0Aey2G)tbs?0v6`h-Nk^yT$ zkVj>knu|sGMZKI64?^X0HJH!F=lM9#rHOe{X1Wdgl@Ht9c^;geLqNKGXzsI{V^MA5 ze=T&kW`m(Kq2wu~VA5`%q{mG456HY4wmL%oMxEYI?App1NC9+Rjf3bw9 zFl`-+VVnlF0)S>~<)Ir~kwjkR7jcI+y@_x5V_B`7+k&;XHVQ6v2Zeg(3CVZ1jR(?f zX8}|ga_u>K9oFIAJT#59wJts&77H=hDuKY()9Qj=r6+NYa!a*#2k30a_|oIPc{DbR z=j*hp7|lb&#>g<5^}Zd(xADC}e@C^;6Kp|Wxwk{;A_0EHUBr20`W1w|-FEToi!L3Ap>a0~0LAv>H1<)1;lmpTToC$XFxvc1?5X%M{k0^^n?5*?% z%9-Pm_LgY6|fj%URXynG7EL&@+;gpCHne^j`D!5q(< z0kTP)bvLSHr=9c1@C2glq?N#94~G>aEqo}lf(}m&aB~T@y+bJ>v&Ngtm9jm~q?gZU zUVxig7%7ToOUS(H_sW~VNbDA`EARv}FF&`|0{zvJcIc0}2I@YYIt(tG)SWy>kJD}@ zJcg46ryiPpfOfE_$!h3cf3i`t8;yo2Y&tveYP8gp28lm;b1<6 ze|oj@Cj|m~yd2ackD5@Bzjl|q$#Vxt!K4bi^r$%d#kv(aRa#)}$%4qriisb|z^ z1qp|8nIEJbJ91;ko(;~&*DJA3>#Jd-!I808A>3Qxq<%8{q&&+ke=p1|;lA6VyB@1L zr4O|t>bh8*RLGLn2Xs@M?!2kpSb)n=`tpbZh>b>|l{TU1kzpf_qy{T0VrG=kZVYMw z{u`mdDHR%Xv(kq|*i-rNRXqg`9a7&7xqEscoZ^)A$-j}yAK4JN_ZrQaAGE-;Cc}N)VmXxwNon<4AsqD0u z6LBMP*FY|r%`v+#(cXA#WRE}|7u$#TIw~CGBDv$qwGN6Rnqw$HPemK zgwRf=T!go2o5QJHhFQkT4|SL3DzAZh+4o`wrIIi6U78`HpF?E?6~eEs=YReW(ykO( zoCf>sd7z*&e||MY=a^B^x?1E*uE$I0%9zidv!?*1;joY^SWz#A#l#u2Qm&WR9z)tQRtB@`AM1ofZ1Fj-XgChh?;|Y4H$_&w&zd z%u$o>OZo7 zj|cqZ(S-3vVf9QLbWqV(R*QtgX4c3iI(?k;KvFJBVIF*6PTD#+i-NsW%iA+d&Q@d& z3O-o)y8D9Ww2n%kH9*T$+XfySoqKMQJEQ#bW-(bJ@@|fkjLy^7T;GR2q3eTZD zqXq3Hf106wFwLyHS*vQl5l2Mu+hHndu@GjDi*w@U^P@bRGtbC+G(!a=;@tEDq&UO% z>lsW(*$J#Zm`?N3d0x7e|iB95K2!_3~o$8#F^l-91at6R);Zc z{V7#-)&e1mOB0!MqcxIGl$17LsG+`)qj9+tD5h%-FC$~ao&3Buv2ghN1}%NC=aIt^ z7Xnn@I9Ztv_5oUj-5u+ZoEdLA**2gOYmes$%HN|yoYK@}c3zCe{-(Cs3TgG%W=KD` zf1$Fv4ebo;@S^?PKLz*A$=cnb=k*k>uf=M6Jt1~KwaVj; zX6-zCVn>S!bPbIf`GU=CJH#CusOan*?!H_t9tVi^QIiHWS>VvdD_g9Z?uaODJ@AhC zBPbsM`rB@jv6UaImKz2885#mqPU9{ne*y)Miels|c)W5lgYXA4x;4w;_>NYF46<{l z71Tb@sdiDP-BQUP&W@i0hWZ!}2lM=K1sN1wzHi-}TgZ`c-BFK{pYV{^J8vU&d@~{5 zSz8=mqMH~f;1Vk@2Dzm?tl@D3SeMh$Z6Z{(Wnoe@VJN zcdDE1Y><+9kVbS{u+cxXx+7ml1e>y2`ae<4MzLHznSK_5JO{X_!kJn}Z>Xr9EARYHrbzh9#r z&nUQ|UExJu%@C&ew*a}_u_)Tc@sv$n?g1t~$k?wHItI(E&-nmbR`N6(IZ#ZJrAtn2 z&+@EO5OKW9j5?N8xObR)uA%}R#=NY!-i~8snvWMWTkj42mh))omG1r(495>QncG`vVsYZl9>-m zCzA>c6sHY#9BXhoZX9&*e_|Xs;43!AbdBUFMD=Z@l1zJf@2hG#b_h!_lq<5F+T99aW0^$Df4JrrLdYU%CsHaf6DQGJn=X3TolGY zE-90?(!2fEdAxQ<`?OihgNqGSk4z5skSak#?IDD;4RO3>3k7+&=7LB;nWAgvIbCzr zz`Y!X8Rv%OpTp-4dYAwp)_p2%Rk$CpdJ32Z^$9)tQhjv`urf}3&5VbaB8vH0shwq=Y zAaK!k5iJr95x;aEt#f>JHUe#t*~ySXltWQd4D;25=k#@6f4lO2ryaO$#!l~uS*4@y z;jIXQdf|rVU{8jIM+3$;rDf&;x!xoy7E$`J?Z<-6piKi{@jER1rj@G zFx3SrWoT74h*Hja>i*We-XQbLLp0SwY7kuO5V|nU#8HIl-jA>H@L*liCOFZc2|xjB zudnib^qM63e+H@x9dnV7+bFq9`=P<8)Z-BsD@QY@K#oasS6T4D$uKBwUAws+-NsVZ zCN?vTIuG!y?s5y4c~mB{B}wWlAHA4+dObr$kYZ5C5$)+EK$C3=8a(hrR-BJ5@B+;6! zr`hRz7RPMcTulfoG`xCwwp3Wi$~Hbp%66r#EZ&2>DbO=$70{~MM4(UB6rRITtHgqV zR}#BMQRa4B6+WFp;d!Z|k^T-60?)FVNE7=!1BQ6f6-%MAf+JgG+Y9P@DmpP=pUZt7 z@Rvm~e_l`YAZgZ1v^R%bBl;0_BZ@8Vo=KxC<_<6meFpO9VstMcn~Y$1Z`tGBwkY<4 zT^OA9+5SlzgLGvs0pO4}^p#JCm!v_LhHjZS3ukk+JGF{-_M^HLZZ#5yORCUk-tOtIqFFIUrgN2v z(s18xc6D%=E2^+su#dxdq){o2Hg17F(2!+nR(3HyDu!|uEvOxWlHV+%8I)!mYCoMC ze-A6qR8>Id)@ceU3`lLW{MZ^n68{CPyfSXK-lGt%k-Bvty7mB@leN7#1Q!xGw^Tfa zA=u1PsLHx?4a*K%(UT8>P}q$4VH zu*;kHCG64ZhYxj=Rs@hr9^x3;yj8P2uPwB+;1ZYV8s8PpI`9|LjHlznWpZ7lQ)HFU zG|f{pH>)@3B^)}?#E8K-$er+If9Quj_TN&r+aI@!7}=dhH#YO=@kDRTL#GGRb|Hm1tj0S$oqQQ8Ex z`gFk@$e%9pmur?TRE@i$mAg=&+3JP5P<-Kb0ej$e6^d2oAqyP_^V*~_fAx5B6L-$z zzjpjWN22Y#$=Nx3k4%^7>#tkZEvmx)SIv(MecfZNQ3~9_p;s42Wvg`_MzInkdCE|O)>t%GlTZu-Y?eS!So(i)A6n$71{WE$MA;jC~j*Dp(#b=hK zoxSA%q}Du}^JuocE6ykxkC&^1=9tC8mG;FP-RPJ*6ya}ae~HFfMlM1S2*`sLtuFN3 z9!#f`8KWr3%T#Dw2&L|cG@MK)@o+qTz0#7N4+aDMzMY|E)Mj@$yk5uq1RA`z2bv4s zhVmWJm;7>X(%Fcf^mJ$6{d>57IWP2z;>vbw`lY!y&w+-YA%$lj%q0@H>TVg`yKE%h zL~Et)x8g0Ze-xuo=bEUt@W7^PQ5{W0bs&l8V4aUxZxb za|Rhqacngpw@@)%xTof9dHy5br)8SOI``8;B|r*ln9o$F4wOz?_sq z8k7}uN>Y9_>Tck4^%_qsd!Q(mo&R5d>Sp~-HtX^De^$-KNBH>kD_;F4ZTsBJaNuRVrKf zQoZW)X569bf6Ojuxq*HZPPrheRW^BjkZj~ATg#xeqKg76Zu``Z1tc3(u6k7~^8LNU z7w?tZf0f5zIuar<Q$`peu+qZL9px@FNdd? zehe~vH7K9>pKn8ss@qw#FnHmQ(xhL$=(G(LE3Q|q!mn}vp#}ylW&4k6Sv6vzR>t8i zW+QeY$f}}mwkI_0Hd8gC_EpPnaX%?X1jKOrfBFdGUaaQA?t65=K|amQt3b*Uo~vW1 zUP9>+4Lz;E9YZfzu#OPx&61_WB4bgbX2~|NR;%)#y6h-WpvCWD+m#PArGY>Wef1H3kj{q=eYOTog01(lU=Sk!eOPV}SlE3dQ z9)0fb^DHLC9z!zjLrXwt27M;HrZclR>gOa)6iF~U{Ea+s3+7r#=8>zD5YLXeBflqT zE*UFQ@aOLwWYYiqPRtL+i4DS|N-|WLi_bk|VwUDS2Td?>;i%wwEQfU{L79vEe>{K~ z8e5keF)MPeF-0ysX(|t~^~UufWuag#h{$|Pa?2t3c9A+0$}FWxwJ9)3QXyiZkq zyst)=G_l}V2&(&BfUsFqc3CmNxMO^)JzACtY`9jT9AYRL4cjDR{UjUF-3=ivU<ebS^Y8G1)-SM0yJB(4$q%4B;(&%0hyZ+&8e|2k;1@Ozt>%k`OVFX zgs#B-e)3#}Yx=pux{WVTN6BTY1)g0nePYFg)UZ#;_knp;U+z{0sIPdxVU*)3&}TLVQ!?^rYy!)ttr zEnRfsN9z{YlC*e<*w2@Mf3ZX#vtGkD$y}7^iCACAI%HhuF~${HRTwW=FyvI`tU$A9 zS!q(1AMd1kuCi{}!Yv6ghGhM>WNgT|p#fB99`$y?E{!9xRo{Cig&N?`Uckr4 zXrc6eFUIw~Z^*c3=J{uARlFxeYIxHQ=bT#-O^Dlca(g!$~_sUoMENF2&+Sg^ciUUWz@a%~2yoBie{dq-#&Pt3svpj=4h=d2@NLL?BesIW+Jm$N_B(7NvF%uA@A2r! zy2RfQ)1wW_m%YrB3$Ydbx$s%%4%E!v_6Tb=OL7k5+>a}nkA8jPwJFwfjlV&ZgREP8 zAGf4WHz8Y0(4L_%+|$``V;<|K)@p_m1g_wL7f~|vVCS_Vf3UG(cPI2R^=>5$dF4bTEe#VGs>NQu`B}4WVyoyaHq$-E= zUBVo{&+a2R*U8-XXXn>mfOuK5#QPNAJtu0Ks_M1Ye^@Y>)jBa&O{}82hvl0nnawoC zQK{23;ds}S5IhUcQW!Mr(k9>U9B?**xBC74uoc9==%2T@iDVtl`CseFL(2l}TTqx< z_XucX4(B1iVEz=h5-{$weNFQ-R#iXW{mj?|K7|W~Y&1q-#VH%)ThTQ^LB)owY z4e^vie-(7o>99fl_Y0ph({p7hAT}25Cf;W#Y)A4w7tzk0taq$g^182i8RNafzG4F% z^Y=ZgBkO%Wk1&R0KVhHXc^*kFW$8uE+*tE)KJj79u2c?ioRh*L`{~yZjY}=9!CYk} z7GUV{O$~A!+`<)kPi=@`+ry`TPB+cJGIIxqe@VhM4wcayV|j)jVnOQq1i(b1rJ|+g z1-D~G8Mc~NQd;{TUaRmNdLR*Z)nykHQwegaU0Gg*QmnZ~r@vN*A7Hg2S&JY05JsR7 zz3oQ}cpTQt$B!BkAI0%SPQ%=^Nue{4w~#HTz7V#q7b#x@3RS{r<$~e$J22bEog`M1&^pBzlkfHEP(y-Hr{> z%L8+T@4&S!j4;+E(Y1x^*he-hTcjlJf7n3zj3w(3iP{AHJyiGt|(1_XF`Ir51l9`{ix_qyCKP&j8|Q;x**VPDRx@rO+HL zIgU5Sd7O?_%%jYUtEJ-Nyax&t8l%_?w#1ohnKMIT%SbHIw&4OP990xVX&-vGP2o+r zKy)`u(rSFu!XB@@mOD$dt44(we~JyHBcfRKpn<8L$jrb_lKI~iJe=?&8AlmPqZr2% z)^~=zJcJ(yUpu7LFaHeV`u6d@_60nqCg#xb9=*m{2^Pe@XViOH8p3 zYLV{!oS*aJ9jEoN0M_Z-*XjLt`1xPH&d1;H-_xJvr!^TLc&?N#6YL8 zsj9;;wNXg6$(=Yo^<|20; z`ZN&vU0QHhaF|00XYL-Yf13Ij59?;9vU%b2z4A2($1`f0Do}i{S)1&`rgX?YWCO_L zw(2`}>-WpA7^ms?=9iBGfY;Z&5I--(Sj`3Ei^I;7|2QWr3dEu_H3j>>DLI}V*6Z^y z7Zh)>lbcXVMF%k*0=|X36yl2vFsh!$*q1n8Jo<#*ZD>X!ntDo~f1xiV@x^enJev#7 z&t}OgWT1yDM3`^>=;C{sxm~9i(zS+tD|5uq`KO4#1>_ZK#NUD0sA);!Z>&44 zvv)l3Grzr!?Vqo=cVh2fk-0^r_I_3}?-AMWT8Ex##J@ui4#{^n*e3z(AvTvOv5=|Y zJp7v)8@`Tpjs?J-e*;jbPO(R^=W;h1!TAo@iBab!&gw)KvCr@sK1bFH`8N;GoGYLC zI>B>=0uMsxwVDUw6iuNugtM?iEWaZ2_$2-goB_-uY!&f$@GK&kyPW-reR}^A=ZRo` z;@ST1Keqk7eu+Q&BWHo;CFjE61L#(qPLAK!x#AZ`cH<%^Gjz;QEnJ+0 z(g-IHPZq66o_9!|_ffO=-cEj#l4Dbz`p#Z23FmE^y`+Si@Y>`XO z12=%ru@=%h1a`y=&~CzyJ*ikFg>XI*slO*9qb+iYRfQja@MkT4;`R3${NzONET}Gn zEZEzifBr0c$DA(Ng9%Np_x33Z_na_9MUyseAsRry-XSMc$$n5wmWUl{5o*ffR?d?x z=EjzN27=_sGj`==1gUDwnp9=$Y&W(Jo+Nvyaeoa%nbRl zA72+X=PSSTi3P#hBWL;B4t;#~Bi1NX3O>(HV!zc-EWYj>8Hl~wdwJ3j^EpR+79M%x za{e++SL&Qa;O}lQ&%!U#RkIgW+VvY*=zn390nA?r_3qo5D!>f zf1UC8EJfw%y_cs7m<2$y)dl`>gWH{wf)w1?Cb0+&4)DXm8aNe`!7Uk_kX1>*t) zU`q0zuFYfJk@bKzKyrqka|B~j_jyBy%~i6dhh$8wqm#2W9A3-`=URPy2KLq8d^a|j z^BjaaEDviDejHWJ;e)z>gMGaZIZ=`if0P93MB>AwWCdbRkPk%ON<$8cEr*xCjEEZI8c$6aIw$dJ|ph+KYU}HiI9Oox4)r?-OjPjpk-quVQc|y#4+|{|EW*3 z^Mdc$pI-}BVh}ILNO{+++>f7W5lAE%zdo+a5v(KU3?E8*uajhisP(=7hYYTTe;hEK z7GIZW3^^=RPsLzJ78;KsC&+TR**WF|2|dJ4>=^zw4V<|7mA~uluR~@rL8_p)za?4E zEjEwy_Deth!h2kP&XFJA2zKb>&wcqpUHq|8UuP1rVOR%U%$X>jhW^~**-%6-ZRzGI_!DDvUk zw*B~jUp9^W44rp0+g%*S?Y;LNwfdG)wO6d7szqt5_Nr=8l-lHH@2#kr8ZBy5d&C}9 zLaRcl*ioAxiA3`DuY2x4_uO;OJ?D<^^L!pbwf@-D9CWigiB~vo|Jot`&yl|~k3QkM ziq35esO%+3SMvcT@I_|*EYILh39vt*5{dDh2`YkjdpvH*xVSg z#pVC5&A2fLGt4IUg;ZsEUPdwH@u$tF=S#~%CaBlP7PiIyCuNT=l{{NJ@6A}0KOgzQ z71lPVIyb&{)zjR4RX5JRq|bR;#X?c=LW}BvQBC|GR;sZ(4!~UCk6Ztiw{7r8V5O@J z@V7Q9){r2$0siJ9$h1C!zd370Ru6J$hcVwxpDg1alb+0yiZ;8a&n9iSDG3keqC9;Q zQtImDBt+x>uJC)+2vtgjtq61;;zQ?D7M^OyqqGGKodG5&pZ$)g~k zCGRwBb>NmBA{uzeN@GamjY^4UHjjKn?=c4 zOn*(*B--5wN2t#~EKR!DK4F3I(#Wm#i}=ywQjw9<7f&ReAj98s$>i%bF&)Y58&vxo zo*pW3z?gp}nJ^Q-X}Bq?malJV?#EMB4bz>;$YAe>BG_u=KzbOhZbS}#l(+1MFj!-} z5B~9i2X*~lQR>;{8;jC6mj{{L8d5QmbD9yN%^L)o;)U(IRuQ4~vDPty)Gx5GMbup- z`$(}Uj6th>19`7e9#4m&?1D!B4fMDNX_!h9a2@Md+C3r%Crc!|5qKwKs}5(a@WLJ+ z117S7YS zo3vh{%jf3#eHKypy$o$(xlyex*HZ`U?fVDDXC!Y&9QFtyNr;AEhEvvNe$uXZISOGg z!01aIt`~8CfwN|+#VFtCeo}Dg&;NF*A03d56^)nx8TtgIJJLEKE3noP-IW~Q*T-z9 zX#S`<;h}HYX;2GXp_tWsTBGiIZ&RuPnuslzHl0|gxaBb>I-l&@~NS*K~O;HpxEXr;T1H0n?+B=Ioe3ha?W-@>dcrhb ziR#?|rQ~&|VTI_^9YY=LWR4u~o@h8;_6u(*vgQ*ags;D&zU}bzT0-;**n*=o<&v{b^6R6Wod++@xPsgG$K?srhxr&0P7SV$hiIynf$JX3g%x4 zn!CE(^fEs}BKX~#^^4y&G9;QtJw_BNx{1N~shylE!3*HNK;i)KaY6RJ1}{M- ziGPjwnR`*6u%c~Smson|x%eJi8~P*Y&V8QG)m0IPq-@e?tZw#ZUdWYdXPlA{-@d%W zUNx-7nxEt1s!s~KAf~?{rf0i|2Ka_o1+X(UmXr(EG;*_+oGqrQmmtFZa;ZOBR( z0D>NppK*>5{VkeZ6jBd4_mFM11&!8}Zo6sb(w3|=KyzY!K-u6b{ZMUw4fmFI*Uj9! z=57PP2j;Q?km-z~B`^E%YhmN?bB?%LKM7SWRc%FG-Ef?~%#YL&;V=N}Q*eyH`8@`ecH!jkfS5xc-y&ndtFbBQL@rzix0g=FgtcDaA(ZDxALwJWXy! z;+C%dWKq!TgJd@*t8pEl>G1STIe6#uk7qgGfi)gulH*t>XRrLlPcBlJ=4gPn=;sUm zE!M6R1;=k9aA8|s0@sZGS9Zye<><{aoAg4F7E(p>w~4ti?Zq}UVoz;7ixDj(VGP3D zgOdXFrt`m_-PpRGM}wtt*UwtXiXFDH>Qb{nA^#YKcb*wt$bp=r)#uC@50{VdTV0dK zfL8|hL^Bgvi}DpVp2+Y0aR3`hST|6us$S}O_1wZ4!BTV!^rrpEqNsc0s=H=k^kyox z=Rh@FQZegcgB$45C_wiyRQz`6%%|*T6!O0_54sb|6RS*a-SkQwwIlcAN!8aLlwd^{ zj;!;FSnbaukm}M2b=@SxhR7xy1{To(TwdD&YdfoL`db>t_53j7(vrFibSeHDDRKVo zeN#r&Ml;zPoSwTCwxKq%-+NaLyhoRG^_QgD*eA7mDwQ92a6UYueEhdkXj=Db>tpOE z^%ZNE>kGcb7y*8LLDIp*aQ?B$v8BJuRbAr6uD`wQ=EHM@AxvQt$KPqLH-rC11E4ck zr=*X{G>)rJo6FajJ=)zpMhG{OsBrW|z7mr#fF!WI1KRb0kQFo$25e(4?fWk~|6T;N zL5k(iJUz38rLDEG#Y+FRP(9`N=(Zv!TohAW^&gfqwAX4xcJuq}WPvfBD0pV_e0a=> zP^hGhO8VB2(09aj*F!jrg6|jw{L*zeAIrW-)g1c#ldz5tc5MTxAu^yB`+R<~+G?Gg zNj~WxLF~dpND;XwG2+9g7zfC%e-7E~+_*IFMe^fX>?NiBYQJKpOLhKgS?LXt(WO}k z+=bm5r~8FDEtL8W<*YuB2YmPNOEQN5{`D3g^sLhnD^$Jj-j{ZLM%@H(!+L?=ZicN#w#%G{)?MDaPzYAB%S00d?|u(|Gx?-wlsoA&fU&oFP?XyeNVHQn_Mb z;OU2n<||W;HGU_hP=wr0kSwqGmVu^KgnFyQUU-yc%HJBQojjlyUQ#>lX;^f?cNcMk zQk(Sna6CigOS9}GChi4ZXka%>Q||=ie-$pL_H(j8j}QjKYWNVBJ9Y+}@anChxEGP{ z%1ARXCBz$$QQj8j~c5Pv`M<5q=S>h z=r}nIjsFgLD@*=9UF(p>C^RQCl-rh9EB%WZhWCawdIBXX6mNE6jrP&NCCQ~bXJ}*Q z|3aI=6N0Ro%Nhvy^6tshGsi`5kWRzsrV)yLQetB13WyJA+3xnG7-^o6W~OT&R~QM+)B)M@L-ED1NA%Bw>@yaU-Vcs^TTS4MgP&>6lMR zk7a0k2XME)zSF>h5Ot5l|9VN2WF$hvuBiiyhHV<d54lW`QPD z8@3)xQ(so0|03slgs@#z<~qv-LK*F($GLwNwu+S{u>8RquhE%uA># zV0;b#_r2-Gnc-LCnTrvc|5t2d%!&}g7%N+ZC{~Y#L`PU9~%rr~Ak) z+h|wcPyuqS`Mao+AS9xVqWBnMBZ^-Ej*&R)T0=SnE$pmD3>~&mmy<<)VC$H1y;hP{ z{@X)8UvHYZod%X-Cu}R@=d?LdNAsM@Q+s-bdk{}v1G-)N!<+Votl+X~!L z^}!A|AW;|kew}OPsC}(FZHpbG6mA*j|NO5Z4y2E$ zEPm~xPIvcDcEn~EyA!$h?oh8@OjSi7x1bSmr75!;1v)Q}-kSFz?r%_P3cGzW9>Ai; zH4>^pC)N=WB-!$V(*EZw2d^FGPd6X$TjsL%n?XHetpl|?pwE(cc4(>V5fAh z*?x!QqggH|sI9pa3Pm#dmY- zAl4x9iHd}V9|b|Yi7&qARK0K+;AUn*avgM4w+Owef;GJONJ=ZiUHEeJ)q!zS$vM|{ zJm>juv}xIImTM_O_Y6~WqCEyk)VRWFKY2misuRiXBtIqJDV^_I(FQJWmr7v2#o;BS z;G3Ls!FlT{@ySl-T|6|GWxeNfx0D;X93oNa2d=7X2rQFg&{}7$&&jC+64`8?fa&Gv zO&rJjJ!DV&Bd_sw6CWlkzqE%BOa$F+n6ESFVxis&dC#rnf|gNvu*C37`b<+LZ5W$^ z6b0=FUp9?*0q}KWpyAklQ>0z!bO+ul;!U<}(~AmiS$DY;ul4RWuSf7@W-64-KqDxG zcTlE)C4)@5RQh5s9V(YL|);nVV zlk188ys*aeX=3mrRUOjT-tukZg2aJufhb7A_43*IF^#88*>-6*L{xw>+RRYy zVV&8UWnb5Uz~77rhgDz0@G~-#36gWw&rB%o?a+d@4$wV+m#5U+8eZsSH|4@B8mE`Qw9CJRMD}~9V-L}989byIC zCV#)${It5@YX1Oj!B?c5^1zTTyanOFs!9%}tOL|CG@ml=1=$qa*L2oCa`6j~>Q)@f z{&$48|6{VOb1G8&QpZd@>aw(btkk)YUbs4{gExJ8;SiA0wwo&wM|#)jL|uG&qosPP zNx%T(zBv9|yJNNSFvM=4me3#;;i9)u5z%D%A{@Li-d}nth`Lwb6#35M?6l9`iEAY& z;unx+^~%jyEa4Y2H8l?mFY~)1fZ%RwyKKph8OjK(nli${384H=&utRp*v5w$fUHE& zI#3RBvKsz(UTfX(B1&Et%oJYC3}I@%c`1t#+bZ~7x~zXk{8-cYnjS^vFm5JG@dg`7 zeQ#9|H%sxD4rQeJc5U!PL>gBOBd*{Yfi}SGeuEH>r?`|)T(~3hoA+zK6Ibz(wt4C6 zh6r=+t>}JKHPwaYcZ~&8+J0%0zz>TRRo|XDVm@j&vua~x*ZE<%RkWkKJ#4cN% zbgHK!P(~-;zZ`^k=k@E|o^mFNqGRAW7u12brlxTh`I#5_eb;5$J2$e`Tc7|=ns%Kp z;<~uv5^D{K5V%NS^uNN=K7O+6PokhQnjbXEgP~csh*W}oiOp}>vE4eiNTZBrd>#)L zqW8@L|Ehj|pJ&<47 zd+&DPclPHz2A>Rv(zhio|Dg`Sj%LioDloS#)t{WGYka=+S$Tc;;~x@17?8f+(Lu9n z5e*k>d(?OGq!T73>WTyrW^D6&LSX`=?J~`sU?O-8v;g)Pf1A?L9BI3rVY=X)cLG$RV#;MdQX_*Hel|i;FVg!A$>q+*{n>i*hltea z)=>a$xWp;3k4?zX2VE)TetwZ!nxd&D7LFW+MihKtcl`ZkIgz=uMtZULKI+zkY7;$? zdX|a-){v(suh%t%eim$U+1MHlJ-!ES;i~|#(*EJk@a)?G5(A`}N}8QQUb{eNo(r&4 z?45S5*%B^Rf^ogtJb%!g`tNxyCLPg6zIU_ND37~ma4R(VRA%Jv{GQ78DL3rx&OHlN zK?1Q1g(D7K(Pp-yQ@BEW4rxB_{uNY@_q-M%o?PlrpfC)L?Ndyf6p9(uV)1X* zdI0#;V9Gw6Uaqb=&=mQ1mQeysn78=-xc4a~BAfi>JOwcZ<5ImmO-A>|S-X94+wFKr z_kx+?-UIU>x({E2_;l`RQZ@I)mc#0;F;8Rb{8WHbbH_k5HTlWR@_8`FNqDFL%<;`2 zO~}N)l(NM=zD*r z7B)7`0-u!Zkw$c0w0~XI{AdP-91Q#5SGXJVee3?M=Ao;NG2Vvt()+jFcrE}}_ ztR}E;E}~zYgb$!AwUVJ-68J+bsNXw!n9|>F0e(bwLO;1g4H!@;j;XBSqpC*q?Es4wCw1!`J)`j1fiitNrkg*AKP5b)c7@HM%SP^b5z;IlX<7w8xt$DZ+OEU75h z2ME^M*o0y&EH>??exu=0@ioFJVOGr$r>=}MzwG?)2pMi_%g#Q#a$+?N zI`~i7?Q=NoZ#L(Yet+YbKRuuoh}ghV%i4c$-&{4cR?{eHV=ja>bO^Y(dd$qBNd9@z zxM!v0nQC)l`^y^`vWW1Z4vEY;3BZp?Ny9QbuJ(i4H}{!9cW&9UBu!uYR}O?n$-FsQzf>4}cko`ezwA2->+U7**Sz^Zg&=b<_Egn; z*N9=GyKWxJ7LZ&?mD~qfo<}?trz$_bAYf3V|F})ArW3A139Id_B_u8I#A%FE7Y(9t z&1F?}z7bF5n+_>N6(JL>fiWJDapGJ|w3tIZd_uub zNj)in747dl;q*<~&Xppk$Y;>{&bwo}8J}pTQXcXHHScG>FF%s1%sgK+q|f#p;!?cb zyQ~9ZY^PT{iO2kL=rRNcg^=yErVl=*5*jbbc*3jvMUFbDPH!U>4X7VoixQ?2!ZXO5 zFCqwk>0yLEsd3@b!^RIiXz6k@hdoR5;}=;%L%bIQwyr85{=I6~WBEMRjJ%xM#LHQW z%dwC$(KF6Tlzt=+-NcLjLF%<;cX#Sk@)D7*W}Qr15;VurTm%p8r+b{{DpJLEb5n(j z35{zD!~A$4Ix7?c2>X{qH6>KRhiU#lS3~fQ*c$DVSdVl1e1dcWrDpfC5G6BSnmJIg zGk8QmGQ7TAui@dwEZ+CqeQ~1HtVF_s)m_LO?O;|;NRHM3jf29-nFom`#pI*fLT(Q) zsx7AsSLr3mr;5sCA1cS2*X=~m#$F=pkUsSCxRF#Srxp}ASr!7HncFcUmj6Ck1MjRU zDa_7uM7bGwe>VKF{aowC787gm65F0jb=NR!nvt%@5bZmk@Z*kQk~5ZM_moc~n|R4E z@$L2lS~pzb$WPH)cIXc?r^ZlCbdfew(Wx7}-&1Xo{s*_whMD%&A<90JTs_%jrGhkm zl2cib4Z`MRo(|8Gbi1pKnwEtKBm}Viz*u##dG$4DJG*9WYc{THN z99=C(rzyXd^8<#CL_{BBU4=m4Ajhy+KP+9b?#N7{Ba#j>QIEAp0-GMV&UJpv z4Ez`h;>S&aNJZ%z9-Y#p-?ml}I4`{a27*k!^TgG4q5|N&EF`%t82vXs{y{RhHWo;F zn@9h>ieah8=Q;xb5Z~sdOx431Jq?_Nf1a`IiuL}2baN)@U-e-Y^f9CK zVoI+AV!OEqbAs-(L3@5j4^sfqav@LajW}1;T!a78D&E2N%uydCIG^a z&ui8!GS0A{R6j^H-*AiqsNE!@PVl+b=k*jL*wF+iFb>Y^%aBcsxeocIPPKYtQQswq zLW^4XI1Tk~Nz-5jlZK)QFB|_aQ+>J-OE_2(#=-q#ybp(e?k0RP*f6-!O)tbxalyM!qaW?30DE2gJ(*1fXQh<dOv^eID z%!@7l*~@9ML-HpTZJkrpQ|eB`kg)yLD4|xNpc-e^4$05 z6MZmhEQVk@=3Z3pP$VJ@JUHjXfP`%(PN)%^`d{h6t}>DT0!jprn{ypC`{cfpj(Ib> z0b65wW;$;Ule>mi6+Tg)d;7``9gT4df|NECV*NctZz>IuRIXTCNxm=Zf95#86LlCO z_2FIOODAA$6%p7uZcS!I??8=7ja4DWm=K2%7;XVB)xp?SU(66t!~Uo}FR5%Mln~do z0bAiV>>7VgsxNsqZ^H9kyYLO9m*s7Ati)s^8I-A4TMg=ekiaawuK^p)4i($sE>5;s zps9+jYkn_yLpa7fYd5dvc*W}W?an(T5B>|DIZ^ubAsnwHK!6I01OV9sCV_siyb?uj5?Yx^bS zGnnPylR-1Y$0GFfYm%3x#iAn~4z#7O;HWL97cAUBFr}-8l6JtC($@coLtkUw~ zJ#CQw+SSPA+gOuKd9`nqp6^gak?zNT?Qc$a{l~{T^S*+*uEfLNo6?A-kDt;guN*fxXUsuQKAv4I+|riQC-rB^=xY3ij%Ig%h* zP#eNIbo?9jKHIgkr$f&HPl@wp(w#lHUFJnzJamK5*y{v4jzCiW=*YOd{1w@Vy;c{# zSWuDUX#A;8ySvelbldA;vHc5X5x2j;it)Kt`&jKiIzV8F#E;jGUCOUJ|FhN9z=v+# z@4emcNU~Ew!hD$^XP`9}Bkz%N9Qy1VJAahF{`NrM)lAyfQPN4pV&}NfdXTC~1S7=- zsV^I&_IZsvjC))?k$(Og^qkX-ZxTt^;UzI*ekJ{E3Ibkm$A3&mrDTwo#w2^qIG&z) z5mLlXYJg*NSdi(_a6}T&DW{P1WkY!85#7fkB*V)D>kE2v;-FDqNWgJ#@zQ?uD^Sfa z8S#>uV652(d$6UOd5r(kah*GEu#j}QpfIFBdc+a$xl2EBVzui2x|{D^LV;L}R9flL z5+5whOn*z-y-$1U|M({G+x+ai zo)vedEwyLviO8hG8@k=YUK9n@nVnYSRmDCkNBTNB3<~?(c(u{-?3?4hY~@&LGdAfk zzGk??xbcxt#4DU+%*g_pLM><`X?uSZ#LFFsf6# zccZJ)k_nUaLO@8%!Q;WiGtqNW;d7SQi(C3$C)C1p0fsG1AmxKig$wKq)Rq2z=_NJf zkuM!ZYh(DNN>_ea zSJ2tz!fmqGz7h}FDiIy|xn-)037bIqj*)!WPxe$@rQLbepQ83q=9MO2jHn4Tl>=yT zd124$*bxO;+86dxPpE#uu1&BN+e@Ro9`OJKJ621z)uoo4_xq?8i8P)GNV3BpzpbbQSy5}V}Mbo7XekN@`J6=3qtxJG5`>iL=s*rP(iYq!~ZhaC}#~R}*7sW<%$&kbQr)=R^{}u8 z>QmeK*p0r3y<_=<0JhcB-*60iWeu43yqckpc*ogQ@s5tQ>d)h%=558oBLH{0FF!t^ zR3((S^L6Jg`HS#>&V&>VDo2(~X!9z1)j+41LiWBM7O*9$%gt4n6fZ z(67&)E@UX|B@0JznM$wNBnOHJsb)<+^7b~8J0}Zm6n@0@37)hEuS;wGRFap|G5*}D zf@X!Uyn`y@)z-n%9jv|&BOg#jtnQGkBDD{imjNB!X$3`DK(+qYP;qVNJF>u>#RbFw{EMt;5dwoNZc`!p1Nj|4?1Cn&Wyy3VT zmx*EFD-Rm6IS!@$Dh94cJB)K8A2*tDOFvE%xb5=WF?2B2D5X6-w;0f^+U#(+^Hur& zL(E)?K4F%s0mCnK6~IL7h<}TLezHt$jI`C=2rHhtMqOYyGbDf0`DkSv1(j^2#b8#b zts9AIRFpWFYtvhzaKpWZJ@ZZK9n8_qAE5<9aLGyAc@z2Oa(S*oSq-DyLeAqzQdp?od zp9H#7N4A1SpOSEiZKm|*>d&L!0v4k_c3Zg9>)|I|Ew?mP&O(lli7VQbBEEuHliq$| zAXiiF%m>pa4u^QLy7YRjJ@VXobO-EI{`WeV6)2HN<=z>7`!6mGx*4hsya*o!hpUPs zCJQKlEiI^8%=?!x;n3wAB*T90vabtuu`uP4@_NSg@Q|U3@!H3i2ZEx?T@HDg*}}o| zg)qyKtpJvgVd7ucu?*)cf^p)Ox;>DI|)<(k~g`D)tP^Fe90o0k!@l(=iUL_j64SRftN0JZxs0CQHM@APMdA z2ju&CZK64tab(Hj>j#0nH%`-si0SkHq%R-Hb1mqT`NZlzk>qy?Q>l1_e)QcotnV2u z;In?RGM(^*+agcCs${0@`)RMgdQ2AMy|Nccmz!tA-$WWK_5(`gOZ925?B}K zfnc4LQg)@XR2!7?wCmE z-io+&G#W?tF@)O2;`!Oj&+UGe$#ADiQAQHhlZ(%KS~@hh|NES;WTkV5 zi-G&LI(EuA8J)b0kHw8NXS@VEA-HT)C1NTapV2Ob{uN7EW1{p)Ld!*@iY1^^ATau< zA0bfK0V3f?!~4t4VzCO#W~-q8TDMR?2&hHRTOCd>DyT({NN*Pi{bh>?Z_*C|-q7dJ z^e@K01f0`m&CjOaIv`K~yQ%E8r;X-;z?Lb6?&S~?Pg z;U=p#`n}wVO0v)SWU5r2#zW?w5fp-+JK8NTgePJpOgLr|iZVQecsW)(GA+jm+pO}= ze@5JQ({9VZH2Lmt>tauGf(4uMyV7Res>V4v##%Ut6}31Z)>$(W+YTUrr3%AS_9n$% z^FC*a`(7v3j+_f{#AWnrr|r;S!-d8PRoAp0sTWG0G&?dS{08rZHd0m(vwt>V;26u( zT>c@2`Zl`1n<~$k@pd=jPs{}Z@)ypB6}Y@s(+(ruhj-jHJ3l>;Im-Ww*1lx)y?H16 zT`Z2J!z1Nruq$yNMHiL>pfin)^i|&V{|lX*=yuJZ`5M=C1Dyu3O&NFZk&W1gi-LY$ z*9pT00v<2S1S1~Dp`w(kYsGGUja3c#CGBh$^4!R%cR^)@S|_)hre(lJ;koCbEdxPl z?ReE9+-Vg7f3YImKn>Y1BfVAq{5fkw#Z#IBtE5KNI?v3?N%fz4K%egQ_SPz^+Yak< z<&73c=G&!kk?el1g_9a*^HpAzzdnzJUrC4Ay4_s-MKnXoMG zn17Nuf77AfEG;)@z@a%IbNwz`W?<;_8A%V--$<~X{Rqc_VMLb|Pc>W#LP$z6xF z@2(*B?~Ocv53BRAT)$FJ()T^>xL4sOc0vDy4eg{3@pfjuf`vm{>S_DNfc>QHR>jpm zwH^FwKb&%zs6bS;vH*5RJ*GgXxb$M-7YM%aw6>_5=dE(UCk6L=7NJkDucsTDgq&SZ zx|kQapQv~C3jgE{OP>rKL@?k>UvPfmqU>iMFC0|AVsLk3b=E55)2XfLFi=c8Wtk{d z-wf9=Aahl?H^;Xuh_cxY^-2_lY47*OIqW5c=OrRtEziPll`^w(h4hP+W>|%sg{zva z!7}#?Gfvk5+`hXV^dtQDu!&BoV5=X$gb*69q& z^O@07@j$*X-d9!6)i3-h-`l_Re>G*+Pj5s-^Th&^W!x_IAw|E>eHi_`S-mvA(tYU# zZc}VHs~iz)8xt_@bf3-%UAnIZ(cuolhQu=g9(v*WRN{pi0zOAb;h;}M-Bj3iyzN32%4}L1s*evx`@rjEU}e^h#$wG2+3h*rmG*25d$d zmImeZPASb-i|HGhL5X2OepJ`nS|Hp=7{_<*QiUTDGyQ+r@=3Bq zifRH|@;`iA?_FjbIQEOUE3BGH?Yh651bFznw4ub;X<4njo(G5H!Fn=1i|8yCInr0r z!cp=DT#QhYt%tTQa)jYU+-{P7c)9?Bv(ol}^v|bLES3#@*2TR0c&L;GI2GO@QNt5J zUYIu{?ZZ`0`Yw%0j;GA%H2CQ*{~w&1UDPrA1au@Q+m=0S4SoG$gll>o(NYP7nt0JK z1r-ZdjlshMm8?}#5REm9Z0-C7&sgb<7k{cdxVt`5Q&VQ>sF5fxJ9%WbWfubD#FIv5 zmG#R@^-E%LFr~ZgqTnE? zwU30)WO#lms5v${XdGCN$U9O>c+@yfa9nofT11&{o>wJiK6-h}$w< zKp3w?*!!oS@{^^yt@XbiYGXpbK2=F4AdiQy{`zWeBLK`GAS zE)K^xM(Rflx~#T zm=8{CpQK;1_qtU;Qy5&h51kqX@?wxPK58sy4q2j&GO_h&9so~^0R26&+eEK@pN2qPr;y-w7T_ZrwnuYC!+D8+ z-gT{q*mXcZGu?4=>;~+65k0NfVQ3-Z5;%M?gzlmVILhDXSPkOYyk3&)Kq9t@&1P#^ zq=@TAw<$#EGSP->XQoN$Uz4t|5<&R>FGPuZ3F7cL4DX=;b$8-X~m-# z=Z0jVHos15Ljb(^4r8hW0sCF7?8wj9M}Z~|cnNPPn zC3N_`<;<|-E#~loC}|sz(BPH6m0hf`V!a>npZ@SW;0h=S$OZn;#!3i#*pW&-9q8*% z*8M5<9QlhpP+L)h$UhY_`<`MBaZLP3jCq57UFc(OAmHqP4*Tm^NwL>p-a)W*j|1`U z7Rym#7>IcLE&?VSfmYC)<9Rm=fQT7Hk4zoHuI$>It3%1mNFrSyF_?MSX^$*-1?W%M z`wKABVH(l1EtsDSSY%jaAU^5?0j2dZ(!uK0zbmWO*G_^N0GGWTFip6=R>z1QGt6ts zip0gcXhuYhuq4X-1@GLvnXPw0Jrg6hyW;dHl9@7GFL5z}>L!M-~+fE2nFeuQ3)Lc{maKm&A>rGU&M9?_1FYi)UXT|7~$1nCES8*(-V=L30uh17Fn>Ew3Z`vE$%K5IfZeCQJlR_t0f95z~+A=m!># zJ3vr2^#kq?pLF?_m~W6lVh&9ZBA|FgjDQ^`V$Tt$a6JHyxn@FAhck6noe1EGek(*F znD#Q!z0XPe+FEXr2~cG^A#~U~T{8e(w$ST~4D?6GYt;2@-3_@}vIl_rJynM$VW&Rr z{9SS!>%ER|&`a%eckfKmNbnhSn0e5%j}$7&LJ&1n(Z^)I%)pAe!w{PYnkSfbpkLFhDIqK%xPn z_ObyX@82wkOu$pu_HwE-+7Gr7sQrA;vnVLB&tXlIw11+}kpW5#MIpf?NbZA1M95Xf z1>Rncs5zr@)bZy;23tT6p6YYPBCaE2jd?Ymi;^-EFcv#*5HFgR9eywa}O&`R_jnb6;W(abbdk z7w2(cS>Ib=>jCIK-4Xq4eN6v)Ba8`7syXIlM;A=WbB4dvz8~ZaNbIFtMC~&_qxxnf zf`~+2ahs6Csf!U!eoW7i4gb8TeUJ>&Tdhqk{|PwG`mczLJH)xbw0d4?#~%satNmz3 z{%TR~AdMn?!(dZ~odHZT8%VgDtD8X8F8Tr9C-0P{PxuGUh9US*b9!a-`N0x@l*pc) zF86C%W(dvW_UHS7|Aabptm>ZfeP}c|SRd8S(+J3X*6A`=SuXN>!5B6q7sSW1k1ZtuBIVcs#0ASt zH%$1cDM4&DodzI=Cg;LX?Q#SNmPF(*@*guNx+b6ASP>^GMv2bS7tR9rxVgF2Q6j@Q ziF2Zrkj!?Fbu@_q2`63##Jb!td+$=s(q~>NU7g=+&hBi&bmYHQ`yRy#yY3O5at2gf16)#uc zB?Y=0q&_mRoYLJ6b2fx$F4%@K_#E49`OXLqc5Xn1N^DwaYZb-bFK)z_4n!g+*>0UT zWlM0#l?*(QX)?|grlPklIh|J6@E<7F!g9aMV+MRnTt9ey{>w2own;0Dj%zRYl=Mm6pLF= z4gc|fN94EMT#`B9&dRBR^WiX_d;)hPL=NJpO*F&VFU=bW)s7wunbF+3Qae zbTfBorrSJX)FqE=zb_Y5n7W$VJ`irWnyJ|P{;|*Z)Z^(A2YC5$G2E^;1 zSJs*enT!y*6ufGy+^#uqM%oO;&>B2*AhY`hI09tPOTy(Xjt0x{olOTVStUm)#iWkMoX$_@=t~1qjc~@< zUB=iUdi}wAlAW1l4eI*J5|KZB(bP_k_A?`~CfAA$^sqlPlBeRay`|fu{iE<%9ef5q z;A+SuaDqMhqm3{LG~k*2(d+!r_7AiMgQ2zkAZ;ruOXg~2A;d)7ecA(8WhXVl#EC$& zGYv^l7+LluZB1>&z@7>!hN1h@;)Y8?n42fb8#}FbKj48aH67Jugp`=CkS*HshS{roVaa$@_-%4u(@`mB zqv7`M*3Zbrz~k*d>STKa28a1W2OSgXzUAWcWW~&lGLcB#zQ^*8vbaCIuTQ+QKV;(- z7)+KcrXP%FagSCi_<2D71{CH2&)2@y!xX<9B-;KLu_34PG&+C0PlwD}jdi7QFDl}6 z`xkTKW67Ni_1{u!?$bpeww~W&GjOu0*;l3}X@k?ruRLoLsRZrD%SnRY4_8TjLc@>PaC(h zoyz`<)vQ;RQWNZ`>$}+L+$H|QqV&%XKkJpBB|$4|Q{-*>p6N6?2#L&_J-V+*9)ju= zA5T*?SmSy3(kI`0S^fkBof5Xm(M<7>}Z^uYW{E=OYxK z2XfQ4{N-IJD}5ZrRr*`KmCHNkK)HlO-=16*w8p*j!7s)Fx!7%JOCw;V7G_oo~7FWjc+4yfyYN&YUYAID?#7FCz z7mEr7EADY6Q(0u5H8Y*AZVU-`7t~=2v&kR%7WJ~+(RG?!t#lir^g-q}Jv!9+!pWJG z=RH#HRp;VIPoeRnQH7rYNcV+;@jHjROag7**N+TIgmUU0%B#egHSb%;a@=#d-^$1H zu>Zl~riSeV?$P-tW6F-9_hqo{du!HNh&U|CEd%Jv12V(0K4(c?-(B}3+o2hKvR-0z z(TpV7E0y|<0dsAS$o)>#j@!qYC#mFfbXJM9PBo~&)#_TZAe37tKsQ|Kz9}x;5VJ9y zpQ>$+X!W@nIc57M@ipH5b6~#T?*S^(H7?$^DG7Q(%yI&0_+Fc^#v z2-u&AA^R=zUej5c7MHh!`s zQZcg$j&YHVS)P-nvdV6nDJ{@Wpas?ThoA`Iax_jXjI;|Gpz^!3a$XY=LDMN;>3wm_ z@D98^@+*6N>L7fnpUx+$;zeFpH2?^T@hYMzL1?Ba8Uajtjs7;>Y@mM+Hg<1qgKZsq z>pZQq$F``QxY4S@+FPK~dE*{#a`vt$l&rw34rlKWguBOTI`&)>bMm)=Pi5`pFXpNV z10U@ffiHS_>#JAu%?t4v$NF@MU2nJSC}G$|pE5LJLy|bWlI*`7@xi_I#rXW|<|^|- z+!dd1lP!8Zke5tDE*pPHQeMsuKw5x1tX;%>uPigkH*xS)o-ptp;xYCqU1AMItj~F= z9{xRh#&K%t4 zC{QKoWg+ykgBD@$4_~VU2LM@`PJ^yE}he0v}Mom0{i5K(`3|5Dz^>zSNC#s2^Z4yoxk z5ST*7{(};1;vR$GxHe+~wLcaH$oO;YIm{%k{VIJ z(>W2nU@TNAuPiOzawg!^sC|mJ+Uj6(fTLm63q*V81Syt|AP`ZQ7*z)NGJySx| zI1{V9YlNeQxLKT<-I^JKlw^g-;hgL+LYOfG$ZiKU5|9*i%?_00Yunp>c|Hy z%iV%*ClWWKblGh@&v#Hy<3}c`phsLbI;Fv|@RfhY<2F1Dl7REDyEE5#Q>)wV zm^9S30eRdwA1{n5%nYO&gJb}1pRi*p?o6$6l<*p!jo_?Dot)q}@3~7eYA!oJ6wakTe`P0^dP36Ky|C3fL*xGT>C?e@q1cs$l^((^HbjLsN@EIl169VKB#Y)@UwdF&0EZ*RRAqhdN86rk9Ns^7Ty};l2-h6*7h7 zqHAK2`1%3KHoLXX4T;}|o*ym>iAsOZY<;q+Kp$q=FzPX}3?IHz(u7$Q?dro9DJ(VO zIDzfA)Wg`=VXWfJBu6J@?dnvut*6Vl5BA-;YvY$hzRK&I?Dwd6ZcwFTCs zd$7&X;}C6nahs5Ia6`@YqiAR@d3{RP4NG#3-BgwtF9@9ijkCIytxwPE)ZbZ~Ix4s8 zRqH~2ACGu70A~ONHC*V~La={&Fa>@;SidN%pz0BxnS6>HCQF4c5Q=QBtUSs?nu4^Z z?Yngs7D5@4+rtdwYFX1|`?SF)ks+5lUTn48yrI2ilwPv#*9-5V&qYL=j+o8V{Re6K zAzqFsrt}BZyvqWj!R!&cJ&-c#d(Rus%*|a0$UxjLmt@SXLp<$}iNt?nUtZ&Vfz$sb z6StH3{2Z^IWt?C!6I2F3(cGuwGWzAiAz61rc-}nn>=KO|d@_^I+d~1D7Sf=o?(bOJ z?WyEeTE9HF4^XZl)!X7Y1)h?b#%N8KCDnzTZvnB7I~-?^xFJ`BSDuhprd<$2<{%NX zqxBT=UNBVHsc&UCUj2V0id(J$JP7k{P2a^THxjj)YUajkQ>RFN*(^HMxsD5rH9(d6C2Drd;N&EIQ_>ADgm1oRGWBq^c;l zgL{%y{r03{pxm`b?!^i6;3oJE0(Y+yeAEtMU%m^=ALHYwv%Y_jAWWX&5up~`9^zsL zm;v#0^(2j686)v+-sIS|PPFNqG;-JD4WlE9sUs zyHX_=E}`GJd+srg`%z%~2>`N!7|Qa42NjH!g>1BU zVNf-Oi?r`G4L*N-A9&!oM?^0M<2_Z9H?q0&{8%11g&$xn$U4oM%YN4j_8XrXqo3fb=IYr~_Ad*u)t}2$w)mA4`qMjKa!5tiy>t6sfequrY{cDG3Lwi`o8*TxXrzx^dmP~{ zqCCeSHFbYS%TMgagg4QgE?)IGk{|OJHwzZV6dBN*{SCP{`F->L-Pj`>>!gv%`M!Ky zg+_-==AQv_4)ALxx=D!4I}g5Jyel8YCke}kFx9G2)}AtCgW^89#S3jBF=@&tp{OtK zBsZFa%f5o#WApv1=5G3iQ zm;j}G#Rcr4k$PW7ixuaq)T?Y*jF^Q|HdbQt3BZZ+aM$q~z}{0rv6tRx=3irgn-&_r z9Qv|Ja_!zDx(pZbc%e^Y9={x=IQ~t2uu=icH%PXs_r^Uc3^!LCCN~JADQ|_SlwB*W z$zgwoP{1na7~_Mr{^|lStBCMnz{T*rl1&RK%KU`c7SwLGmEN!NR^dB-_c1d{SyzPY zcS|$gF0F)RKdi2=p=Se+3g-En)%H8`#)7sGC%*j5wO6T*&f$t_7CjCj#gnUcP(4aB z>@_+LNh)5{SI3L@uJ(~F3kU%4@9Z86ct(Gun?#p(33JdX0*XJK2GW}|k)^2g>eX^Y zEXp6tz9s&aw$A5$NOI`A_uJ&V$|E6gev#ksyTaS5N;Pz3ROOj?EsOJ(CpLIAX%9S| zCesp*GG`@oc1wl}Pds%Cbs{k)9gRq344vRun8#$++s|>Th2di(D~a<#lsjko%cXzC zlj=?cs8oskb_|hFE-{-;u|<)&THNVqm9fcNEr8V=S6^97?jAY-`HP=*$^M!m5J2{g z^pj{*QF`$2xdE2Q0(vY_c@<=b-^q8YN;|d;_}uo1Z=@e`AoyS)rY?=k7uQ}*VT$c+ zDKTI7(+98C3GJ}D{d!oW(>A%-W7U82dGV3ik5N#8T$=8LK)YEuZe@df?gAF0_xb)w zZRr_*a0w(5GRw#`M9^UQ`{B3FBCWubyduV2-; zRt++o?ewzrqY7-tehV&;C%!y%9M{0s zBioq^Gh+YHp`#FBhmpzR>*WGUE@~uXRF`71N|-*NXmbl75zS%rb*Nv}B^OQy=FVLd zYpom!Eand0bzcb)q9#Tw+YW!{F_ywTc?~yr^has-sul+nk;u&wFxvAnubLo8- zedv;N<~@1GVNXAtfrfek)ue~kzQ3g1_YqD%d@=PqKg)&%PTWn*%R+yYs4`KF;7+>< zUTH$tBBvD^R8pxWKYIV3{+jRh@WcQC<8aQ=!svYxJ27D66oRR7Ax3|R=T;|N^0hcu z7biBLUp2#z_M`#v3ZX&?M{k*La!#$z=<_Kvap;kcE`&y4r#S)fN-j|u1iNFzvl2hj z=~{UKWNwl*bOrhmibhmZGcAALx?Vwqz|f2b_JM4SAnHh#hH`EI@H=ojZPo2~&}}2A zeoeg1g5tVYc6&UgJtTj!a3S)`AfLrIlasETM~RDq0o6sQ$e7)>73=+o-~s2}HmC>d zloEKgX9)UBLP0abLGhDJQ)$-BNQ=H=%07;>*l>87Z4bm~k@Fdg;*m`E$}_mPMPz3< zJppPClOdpaORmDdah>G~(EHlc3&&XJ#SPj1Oz!;?l3jH%-0^=NEx+^m5PZn{)~Z2z zv^oi8tgu<{*OWR!P)G`A48AZ?j&!G6;@gJ_6>sWS3IaipLG?ZFT=7S#^v%bCsq3e= zLNcv@bdKDvv@JT8yf)jHEutvv+ZwynrL<($%U%&;>gAT>9~wiJU-D$FjH|7}*J@Qvj!uaSKv&jJ|O1?t+v49y;WTW)3kG{N-T%rppEk%sft`z3Sy|7;QA^4{Q+ISr5&)?q5;+#nA=yk$X znKBc2D;X#+xqP0lCNjQ|{uQ;Hk8GSACDW6f&ZJ1BLcSXf?gZ1Ox6LO#;_y=Jc0{LB zAlss{Q`CQIRGe_uZsL`Pbvz?TKrXq3WEP_YUrvfWzi7tJDNd-bu>rN-OMk%wW3$k? zlu(^t1jqV1?A@TpgrZ!17aP>if$k zd3|a|lMgkK`?SK43cfLl#M98+2qi^ zo?Ke9$7qrq6d>gy(q_lqP+65*kRz@ph9lV4cv_6Xb*K3cW@naHj?6%W=QwIq!oz23 z(fL9*Wby?OJY*Syl&Roy(MA&wz*wKi4r01$<|b6pfxrWfs2%maN2r)g|jYbPRmZgffD~BAb-aOn|}0$7a?R1`VCvMT^#oH}~0P z&CRZr5mQ<+18l|DgjOT=Fn3(G_fdal-y3A{4hc=EuQvcEVhp*L*QEvKr)^LEHApFE zj-982tK^*lWD6=k=n@GUiWiq@#u{%{Pu?JGw4g=Z^kV&fTF+5nl7=I|h&>8Xnr#|G zZyk08YZKG+*wyRI>!O9(oP?1&RP$7AFl&Llj3nj-eVpFf+Gk)x_&~zAS44j*-w1Sj zI#E310Ey;dti^YUTx@_TM*2a7%3v_<^E7As zc+s%YcNT87gd<4eW=*6`_|ZlNOvE3GQ#7DFy%nv=cla|?W5N{$3T9-@I4MuW*n=8H zp~7dm-J>dz`{3*qOuPLQCAEJJvd7YCY4Br>^e}j>NgDp{GZ6o+|N1F+3Nok;j~vk# zz%h>lJ`uGQv9bImij~`$$ovv7?z9=Z1mgm+uojdm}Q-`qg0v!8R-&A5TUY<%x&E)qF2ajO>hDS z)nD@R{YxhDyYFk+5cjR)Z_@WRQ8v;nPQ(b21eN=T(lF>|jxm0JIAq*i9p6gNo58+a z7#rbtdO{cNLl8P|AgF(BFytOX3qaF3CE+MbAs9Q*_?vIRXlFY#m*4ox-6oukhgsQQ zALc1P;S$H}pi4^m**_F$M@GHERx|#5C};}(atSvxP7`nhy512rVh%^Iq{tC(0_`IY zHH8RSpA^VP*6+{~xe_sx)}ZRzGRx#hrvYSjZkg>zj6icRZo+>qMds-}2XrlMFGg9( zuB{fa7*${UnoEI@>tZ4_-75!>1s-`o&KAeR6L?M-ltOq+yJKmLzg6$ZIw(d^@X*D^ z4mT4iC}t(-&d1u@N5~M(i;ld3f!DuWk!TrMsQkGqb9xd_0>z zc?^(jVpS*x2k=S%g=gcbiVQGU&(6iTJ0Y+XG0qkv=*GfhTPF52K@_psF|#KwBw(UJ9T{-H?&rtfe-wz zOa(;lqetNG8X>PbP{Sa2KbXn`pXXwA-6AnmRRQVDxV_#c$-=W;DDs9pME1rvv-ybc z5PWc-z}>wovgDpOi+%I&u6WbL!{J9N5$qU3cF2E@Ja;x0jeBVc0fuZc`sJ;##$|)J z#c=+Cj3sbri-WG{UFf^rP900cBOCvbbey4x0 zb?1NN*O+pEtKs8~!A4t69x~!iLXJb+%-3M&FIpN+kiV>eQ$mJiAV~@Sbtoy(evl3F zmDvZlsSb1N=YF^di(K?^v7)&T4QQ{tY|~~NTa`bl*<{77Sz-nR8Yopug(B?72LVYQ z)cc-$hC}f^9Uz!{T;qJn;}z(9t5<(4=RB860)Sni&q1eyJXf|e2H;$P;8GJJV!wM0F2-cD!?m(P?#VtVXN`FYBnp1)?61J^Tag0diW zXXuFojS@#eC7Ihs=Y{9G-WHRG=Xil(1qD*0>nDqBu{cnPkKPO@lVBp+d#-;}-ni>- zc%7o57-%KC$9#67{3~8Gnk^u&DB>P1ZnEHCvwPPBa zwm=hZ#J^Mk{v&F76NnW;IX3d_sU!%^{AemXfO*Pj9rDdQA9cNgY-svGX6T{7h?%0r z19XL!Xfql-Co0nux$W=@JoSH1kq|>%>b!0F9?ucX*Ft_wnN#}r*AurL7iySxQ?cs) zIamT3x}h@v^bMg_?=rBeaL3&pB)30dm=<5H`t&^FeG_(qLIg2#{LRd%Wm`x{-{$Ac zkLlCKyZCFy<#pYt14NL(`jEnd(z76{b+|#qE3+Ei6nb-r&*lN+@P&V2t1OXRZW5#_ z3oUE}^SeiB|Kay%UuRGdi;aF~FP~+s8!gBQ{Scu(!5}YftM{OlHj6 z*xU+Eh~+RvA&ZLB{ZiyBeym$4Itn!rz0vxazMGmdm%-j^d-zzYab8EzF(K`B1L0Sv zM6VJz48NnQ;eu4W_zHhHQWzaWUsab%IoJN&W%A2q-xBXTQJa~E)jm6zOx-K8yXqqM zNp}OX(6lXSq06pQLkhU!Dv=C+Kpxat&|x1N-z=w6T^(U2_B3({ELsN$T5G+=dOKA(A|7AKsO<> zJl%$8HL|gcpj@0Auj@(?S3-s+*Id&6Ra1%d{C>q5ZLvtc1JX}nyNLBeA7vdMe|}cw zS*DpRNsHAQVwJyjn})H3P*vlq9Y}r`b5qBoTnUD4Vx+*JO9{x5JH9q(@&az`rQVvg z8me~1lO!aYSRQ}9$BPG(00q~JFFg36%1w80ze50lO_j6|J1JXH17Rhk(Hxj>uCfTd zNr2D}r`91OJe;k2`=~{Txqb?G4NS0ma-DbIg>jNufLe*XeIg*%HVItmkP(Vzyo-Zi zz*E~(1k?BZr8?HS!@RpIB$)@n22{;wm)$r>FL>$5lJkE>DBq~_tFJ~mH48}##{1TR z?r*3-`nR8$J+T?EilB6aD+qWLWIZ~FdhpC_@S<%8-6e6VR&ZYlbn!3HzP1jQl}4vN zco>u6*aPI2POCjeiNcx6R$LbUVp_{!$7Z#Up*FeoSdILMj5&SS&j=`5NbWF)6`#pF zFE}G>m6v}l&AHHH1tPv&DiqbFzHV~xG7-G@>^5IjPRdg3eUI+XD-iE z6K+MbOveHNIpQgehCiv=QY{yk&|A)?ALBF7UeAAsxVnbtLMf3Tu>w#oFMXx&*@AsR zL{Wln61q%g!$9l#L8suXxz4$mD|e7dGq#0CC~~{Iflfyuk>r2xJF&-(81ZlgCeyD52tFCynRwGPLo>~l(*{|n#cO7PxuT;mNd7y=yfb(1Nx_>$Yt`!}z zBb?YwXvnl%Tk(YDIc+uE9GnLDi1>eo{@P;9L$58*#CjSiZ;~GN`oallUD5KqAmX~@ z*M{crBG3EY(u(0zI13e1a^u;uYd7RPl;JF0j1RdIk_^h*%H34J7sK$7&u_?|Ta*WgRZM@%L-zG1bHx}R*r0f`{s1jUn1@FaC+W(@{2}_o-Tv|)Z=gq7 zvBoEt--i8$H4w=M*~_|`%eVYh9|d)Xntm`V193Jnv;jMDz5X=Dt_5?0;DId%*B{2$ z7%Ghyjqe7tQiun7>MCVzGNk`Cc`GANhJQmx^Uj?!QiLTW5sQC1)?7gEs0PnG@*lzV zTRc0ITyv7lQOE=|*I@=EGKoyw`%xG{rsvQBz4ErX##${{@w{f?`M4)s%du3sE%(Mb zETbeJa9dG)xMR3a-UJ)&a-*FKa(Zd_r7xK7y#Gyx0R7SQ1lz*7#oBE1Mp@+IL zLQ%J)wyVnO24KdBR=0y0x{{ONYiiLR^l^-CQG_F;7~g*^w}l)d?FPL#Lo%+tF(=yN zKFnM(!&5tDC$G?ITt6|2G@LjFCCblWvH}J@jb>lW26NDC1BTdzhdZGEda$A z+zR*#bRkusakGsQa!%1m_FXgu~-mcW(LK;I`}>3lww?y@7{f$mp1C zD@>_+Lymu>bp9G1)$*+Bhtol3#ix0@jmuWa`!)8|o&7+h0r{>FQ zU8r*F5Fet4#B;bJaB={?*Qn98>pTQO06wB&!jm(}yLE;>;_b)v8iMFWdA#L_VCK_z zge74y9Cxq&IVYbLCJ*-ZneN^Q0sgCXmkn$vU}t~9jn9L8KvOg?b&yGNd2hqMfuu_F zS_E)66-(MH?cy!GJzbW?9qRP&7LnSQw%@#gb@y`ks_pHTW@%^!%agd#F4c;cGTb28 zjLNj)%0$>ER2r*u8{NQh7v9l~vqWQplfjRXvXEs6Q9SyGu0&V&aFS*Cju)xc#B_Ug zdlY|4SDAr?g0nWQwdq?-yEQw=;iBD~Z>|Vy9<Xc1aAux~yzFb8+v ztLkGNA?HNpaYD5GplZ(rkG0l>g5N~ey`Vbrr@p_4 z2ed4z66;@u#D)hTYd1-P7_Yt$2IpnS?@@nM)8{Jc!D|X2g8TFpXa9(qwdk){W<|-X*ZU12vF$!`%oAtjB$_D10JME-T04| z1p=6jKLVye0Pvhll+W|kzx=r0m<^omXe*1iy^_M$jR%9x1K7@5G7kGPS5Rc8a^ zZ96a$AE+l&DXLttJxFVYHyVFd{(2K*KF5IKoB$Zs`ubPBdtF&vsAK11KZk#+`3ewZ z`4071aE%WlVqX2VVWNav%ze8StWc?=YxCmeseE$lQc-CrfHe8@EHmuG@$>hwyl+sd zdUAsMys@f3WjXUPqYBPr2SE8_?L#R8>}W(X_?O*qSn{D7!DH6H=ysiW=qq2DRr?$e z>-PZBuL-`uSh>=CVaV44o1uSK5BY|Wt&|M}@VG?F#G+!k@Y>~T^UL?!DeN3r)@taT zz~Aqprpb}TL92{JuVDd7-0N{iK1A?(=ygkld>O7|2B%Vqqa5eC=ETGy%A6I^W&I?;H^9A$p!mJIBKQ`3`=08@(??$s9x^Ar6eWn9V*T^W4z}N)umWoGq%7R8>#+8VzpJD5WEvtJ3R%#a( zvqeju!wemd=;po6cP!mOYgDXpwD;+AKQbdw9uWSqQ_DA=psRUbhxH>TmSQFaDanw% z!q-c&^1uLHPem=;@&eHV)1K>JK zCRFHo=j=1Dd%bUvcM*NZCX=ILDD2kaci;@QEgCP_lVdLyT0>L46C3%AB2abv`Got~ z7V{?pTb2gJ@lJ?%K)bN=tZ?EiMN z8xAy$eo=p2e<+s#xPX5H$f+)sgti2~6tCO&mn;bYfggO~jVKTBu#+M7Lp1<&^Y^De zKjwYp4#v!FBBsl4@jvPO0Dgx*t3k=a&?5XU$_#1zC^#QT;hc*8PxDEk;7kQ0D9Ls)u zA@J(MMgb54=n>lvr*;8=fkV$FF|9*aa6AbWwkH#v2YQ%Lp(QQFDZzyXqML)Ba-MFXS-AwL%~F4E z3*--{U~c72%Iob)h|@);>2D*?X^6$ITgSW!PEY&CkuH9E>D{9-USVw2Whvd_lJO?( zaElOz?!G|RSQwqqB7CFJ;v^Se=Iy3kGn=qVNfFq!JcWs{B@qRu%;x|S%+NDBG+7xR z(Mf{jSY4<^ck;`0nWkv%qv1>!XbFFZ22?76qt2k8N8xfoKhXpf(!qhaKq+{tS*;)6 zgInG}8j0re*x#T$Nz1f`+!)&)nXUE6E$`irb6~0=IISH*WdMW3Dt6-X$HN1?{E?15 z2p@~Ugv0xUC`~Dl)QE)8A^rr%Nlln6OBs&x9xor{yBH8qUG5)lCv>Df^A>+(s`AFj zEfRr?BHft}7=-Z=YI&LMv?&iV<-wK)pzd1euD+p!<}bWo^5^Ts1`0&9pu7%h1DY%Z zH@eXxX|E9Z#QZoeI(XnH!=;S{lr6(uwR+r|(06(FXW8GIsC)0gN712aX|^*6@D-r! z^L6fqR|)mSkiVdr**5RwRttaQ?X?0vfC@jdgJ-2TmKyXXk_Y;7zIWGZmBGqz@aA4G z-hKwDfCII}QXcG!>%k4BM&C}rgU}xEDb+a*yqaqwUw!QPx!vv!2v}J+jeQ?bt>*G2 zJM>Y9Ld=;;{Xu~ihH2gF#&cG<9V#eQRlh;*%xbx-K_u}EZkn!`X3KwZ)Z4dg?6Gt` zVtns_%g>_bKKt{Su?;|XSr>-qq;9CwBUf4I7S3DRKryL3R^mWh8LzR%L@5D(1}I&w z_5g_rda*&32rl|AUB{7LLfqfKnNYvGZ>kx+3~AfXo#@~8V>qVAyDC5HHrrm>tvE<` zFx!bGOvH!&k0tq_JJG_6?8r@CNWMMca$jkO zxBP0|Q*b0-VfzdRi={W*AfcG`VK%51ei3fyCb@4dndD(Uxwe0zUYxF^X;4JMHHXHt zh0DL|Qu|S9fx^(rVc<0bO(Z~+8z>orq;!`g5X!W+Rv#DN7OHD3(tp5##%WgY_G!73 z$=At#HMX)$;o(uBs`)M^{zyl@9d#GQXaWGs)y9Cmk0+&D`EYO{9Djm7sFlBr(oi2h zNHkNJZz;Ze=Xu!$S3-gDYS^QJcG zrUr38&J;yRr0$(Y5vhROJ3ROz&G;6ZTljV~nBOBCG)jL$uw`Zp3F3N!ViR%koMS*( znC9-$_lgXoj<2Y0@i*SsKGeaoAkX~5EuBhH zIXFECD&~Jrj5e9NH5}VUET-g`$76ZgH@j3GX#?PJ1@q>yplFL>ror4D57KAmhUhVa z6JsqvK|wfLX`E`EiY_(tTK!3WKEh$ioM#ccVyr?RO@EdpQQ!nv;#k+IscYyO5~K*` z*9s-04m=RhsuA&%NGvDA?w0=)6`xeT`RzkMxiWv%)>N`_^AX$;KNx5P^Bx#?{b)W# zdr#UQX{jNFZ`-XG%Z0JWwQc9HWuGIYH`MRaXcK4MmyfBpHM1?+T3j*7{B)rtQV}#o zV(REGJqS}9L}K#U7%z#w@{eI2WeVD>*f zF*Pm2SRhZbRTmPlXjbF0FycCl;ALhB4q;8I>!b4-I~Uaw2GZ>L30)=tnbQnT`hcFG zTJ>tg`6Fkj$2za!s+}1esVfxMslh?L<(_)7K@G`0aMKOU4i(B zHNNtN?Re#TZ{XCkG5h{t2$UQ50>l`Fr`q{9&_&oux)Z9Tfjr#vE=*{>qxrhEesV^~wCJIr|@^%uD1P1lknmZBo%dD9XQ z<8~PW=32@%8@7XqNo~K2TO{KjRT)x5z`W*TdaW_VTu?J@JIXfekP!m*Xn$rjP5E&~k+ z6ZR|h#xuT@sBENkMjKG|O-yyoRr~WXtym5{u;_DxQPklfkS=XQ-|>rmD^TEuS6jtO&O?-92 z+Pomc%Dj8-`!j$7U@ohQ>#M@Ni6pQg)|FtCxoBUtUPWp-0r!9s$QNWVJhW8bTp%$s zJq3x-pVwjo&*=GlTOoQbA^3muC0Wy7)?r1enO2(|j>-bzLg2oAi#+))Xx3))`|R)@ z(90o5jE4eaR3SUh88j!L$D6npa;-IVVs*iC>!sMT+|P|g8wg|t#e$Zmi|iK6XZ>mkjcig+t%IBN4QXf4YH#7|2 z)fYH6bsN{d(yXF7V-o*u=*j`tFE+qV-R9R5>awfLY}Y=q5-los$W^PU>|&c zr9#e0+YhgZv><2EhUzM0lTEG-VSxiLVz+65#(OY!VNf|{WmxMaPz zO)R`+)5l9c$8adHTW)-au?_&o(7Fg22yE^*&o7l8ujh4pU9K4Hky4X)+9=6VOzz;5nSVH?Q#( zn+W0rsdio8`6xr8N5wg`3ds(hDMH{YNE+6+@)~I^H6l1$KM!>%U^3u&R1O7bJUk%< z`z{Z*gZbbh#3isIkI>fT>g|$tSzXq5Uk*@47!v{v6n%eoeOb$+3?{Xs-dSx8WZvc) z^(kV12aF7Omrc7xKCiFE%yJv-WsZcc!onbAk)I_)KdQOkwMALc``-gr}Z4I1>8$!z<5`cQ4D%C~EMvTX}k5gHJs_ zrVxON!`rrJxVPEskst?wq4}sU5Vr8jsTub;Tve0$b1hQ|(Qxuo&}dmVKmRS^-m@t< zR?D((LUWwYDI0bI5}Z2h5m@=nN;{f>S`|PjeXoD-8gFe^4g*i2c8Vr+uLm?nG}<8( zSZ84>_1YPmIJ>ifjUbC@6XOU0gNyM5U|`oVh3#p6$F>Vvgy`M$sWY^?IR;m-41hO# zq>KVx zCc}TXGKc%(BfzsGUNNQ~hHR4YzTZ;rl3IIe*1@V#ZvoQ1A(~WU2!vhE;YF}YZ4<29 zrFmuny9S(JHg7@U*?yi-Zqym@QuR80LYsmTi=4&^Ft_%0UyVrHL0Bs2vp4{gw2AwI zj^@$0820c-?Ro{y21wc8gX12Z?2==8M>T(xheDGKAvz#41{qoi7#REU)bO*}$^|5U z4ouq{;tLX7hgI{_Ppkobw~_s3ATTn~h@s1RNW;iEaN$2&CjS&`CkybTm)cGs@L1g& z+Q(CgCX`6XRVdto5zSa=dSx-EF%|ge-6D?FioxLky-7;nk+4|K#?kNdm%KQ4NChl^n8`Ex|-xPz)AN(JR6R z(!g2Zu72*RpYdt$yd<*g?kz@dO!d(c8AMxJYR*|!ISKB8^zEI00oVS>Y^4MVH_Q_7 zgqp@U*(lJJ5N*=bHzZDY1W5z9Bx$r&^wM4D8vn3pw#P{t2arQ5LL8BDA@s z)cO#59|4#C%S0t#LPr}vuI`h@B+5MN`P)%K2QgtMJ{zfJZ|u{h`a`DRyqzH5oX9v1 zIQ@T^i0V2$yi;3ty&AvY3W$!@F zNE6^grXR&(5e~^_1;>BAXe{o~3b7wraSNtLLWRu$CiIfm!T2Fb3Z~lxDy)bmz_clr z5S~(=D*``MG*IGtmVkGoF2H{qnNyI6$Oz!%cg7Fy-&i+h6hec)z_a*#z6v3+sFU}g z=@7dm_bN658MXLXr3TdT*3au5aW~Pq0h`i6?kCyJ`RsxBl79pP{H&ZaxB6Q|L=Iy^ z?(0tD%SG>-bL9rCcvp8CECKPHYw0jAfb&aSHxdbF(+Y6?Y&w>PpT~b={5>dcmEu#v z$bnn7_`=N;L~|z#*1Lu*@v+sHn}>{^+?xmj1KK8jHw!>BpF#xLbve{u=(D7aROi`D zP=>GfA|U5`v2>wYbDd8+!cU=~&;c73ksXanOKyeU?)qx$c7JxISOShGCIRXJ9k{Co z=_g?PHl-0Si^bW+S1^At`iDwCTw+q!q5u&P3<%Uu5*Qyn-`2snFs27{A}39?rddZt z%&N~9^DJe8=l$!{9c%Q;QS4e%B}nl`GT{eV=!WqWq>RU+QKXiivNWw1BwXwc?_wp&H2JD$G~*(R5?VKxZ!MtGPH| z9&mk(I3B~J-5{SBB(gw7|7Q*Nn%;V_XB;Gc71a0a`hcS=|K{KK+vV4+ecR8uX zzTI5!z8QaTQ`$(jnKPE5k|`zkmIBJg1cLZGhi@dLxlo+maYdi1R40;IE}&|jMfA2x z2bjQsTT7!Eo>x5j5VSpg9;3bb=iIb@87`g1I1?ZW zUe9Vu9Rf#VIuL-Nq8+zSaCUm@kIXkU?M7NZ=xcZM0>qAddDFh=RMLt)7;; zWFvK87LP>SdV!jWaujmA5w_F#)9b^*{c{-EKz-XT5HuA7%0fB@$RCR#Gu>QTE)>3p zACm3>`3T-YLtjm_xm<{gSw)QJ%48Nq|ejbdY~( zJ&YE2fFcHohMiD+?a6yxdwZ_=gHfm()WQr9A)M_!EUGIazq7r)VD?q)ED(P--r!}4 zaM{qL0@|roIdXSd+`NL&GX152e4kr2+LKBa1$qVh}FPOcwCq$AFc{}<5OF*>0 zE>-$$|Dx?ki!B_+VFh>$DwuDtK9cA$m5D=trerf_8fu{gnRl|83BY1N0RvxByJ~%U zC4ty@oUsIuK8bnnkD}YG_|x9GqjJ#uC=Nz1Sc7>*RY2Zp>b?HyPeN_zL?)NP=dqaK zD2=Fwms{y7;n)j-kCxWJ-%gk(_)+Cb)$qAC>ptc>2?U|?ky4;7_ZOSfLAi(VH}JZD zvn{fHAMv1r6|>S9=+qtkZiV0q&VhVI&^KWWP1^=fc`v>A3)Jr8kL^@vu+r!}z(h)@ zSf)*clqFfxiRb?Hm4W12g27h)T%Q*QHBrFAy#b0q&Hl}PJ&ApO#BLTTPS3_yV>?jK@*x{w3=1W6im zLVW&GzbW1&Y(*JrN%=v^Wf$jmCYRt1?#~ez&@_D z=cNUl+K=_4jNl%&yGUP=I@Wf$TQgH>ZYdxv#4yI_P%p_YjoG7e#a}lIORYAyYN2`?ru_ z%(TS>oj|eVC;-%~U%)H^^vsL`6q7yx77Xw*$Qv*oLld@)l;Xp91i-O=v8C?~+<|aG z$;Zc~AzF}AjDanGBw9Nqdob$*L?Car+dq+k)}foZu@ArEwr4u5Foy|2nDD;j1s0DC z7SYC)w$xIqXQ6li2irT}PlvE=I2BDJb@@uF=t@tO0Op*b7RK(`G2N|P;KmZr5MAf0 z2?=7ee!Dq`1NiGN*nHQKY5aKEETzi1`B8v&kcLOy8X6;i+>YS3Te4dKF`eWX7q2UR zK3ELyHCfGPk0)=BFWqV!B)MqM_UbjX3ho9Sb8#MAFe_VvW&Jf2XUF%+N>o~%RcBH^*^Mlu#!M4SZq2&_XJ&H}4ic z8#J3)1Cn!pcL-wy7`j=!=lo{c7}(eG7t2O67}FNlZlmv9O7jETz3zP^+5d)mgEkEjpsmm>?1F z`~&c&?Km2=sqEnAxqTEc*jhs6$r`;f76g>gPSw*2J5u%QrL3(x5Pmqs>rM3K8XEFi zh_vs2UY`B(iSOD=D)q+~4gzo`t#1)tfku69@jP#uPlE)ed#cTvEIoUJC^Tj|ZyeS> zFBquG%Ou33uU(vwGW)n~79`zlV-~f8y3kg+Md%L1JM#^|=IDB8dr3MRFwhKbgAfq~ zMzLl@Do6-E%vn)(5PFT1mbK&3uF=v_dQNDZ=)5J`CpM%}#JUDDCYY+zu+|73HIgGeE zLuTohJ+I1yLH=C3ankM(Cm?q)EhfE3Ru`Kpvx4HsO-DzrMP4UUychGH$aQ;Hu}Bj03S^FFvYw69o&q7E_^e8iOHS0XlYGZ#t}A3@E3%ee*IwupQePN2Yk9^+1wFx`90so=aT7nEtW>UhZl zE(1*=*O)mCH{`!xOag6XcD`{ubVlXoTFK$}N)F+@(sF_r+kOUKL_u(Jm?^XN`~n@h zV4#?Cz;j{D`5}C5*E>jEEa-j;w)h0kgjQ0q{6O#bDgu$&L|4Ejps%`rq;9~iv4`?}Yo z?zQ8lZlXjgpJl=Qoo*dAzp2bc_-AU00}cu%7*yX;DI-T&W*_EnX$C2s|p! zNEu{jxzyk<8E@{Cb|A}AmAjXOt7&FS*H^@`94aG1NAaH8w$^^%9IQ$qp`WY8 z8%wv{nI4hKw^nk0K14raSyZ?v@Ztc~kyM%Oi;$KAyq@jl+?VNhhX;QiFW%_Qu5S!^ zt3CcbkTAmk-c-oV;f3QVOt>xgrwQdv;vmoYdG|cr@h27I3hm0+Y#3$wg{KQKFs=71 zy7=)}#>2Oi*^ap#F!T$E#%*OsF=hRtnl{T-19u#Ik5WGKw>i)#k2&<3Y?$I`cf-Q%31)_pMk}Ho`ej5G*-JiRl-EgQ4R*X1FK6DP^imQ;mnKBGsK_mLVlnk3=fNof@oIR zZ{W|}#_bB=pAb5ayn{GBOmm=S#H!!o8qmE+r1iK84|c@+xS8OWxm^5bt?szF#*H@t`@z7u zbB&&V=h(Ij6h%a%+)gRRcSH;XQu+!!IY$sCn81C9-E`Qj{g>QDD8hj0_EDs`?+xy;Zz3?@77Q40 z6Xn0>c@49wzO~l3_KtGSMWU2cRg-Ds=50oQ`;2ESMgHE-cS zJx|s&YT~iUfFmI7R}ZG%pv8r?d@l3(b*h*sokFRjO>Nsdc!u3rV@$+X&lMU*Wuh{F zku@!vuP9~bo@VI9BsS}|s=C7Ligi||oy-&EWVHa~ z-4b}Zfp4J9T{0}-2m8KOE~r0QUAnVeH9CbsjdPcM1y^Y3S z2hHoyT;Z@W?S5V|#Tk0!(XgU1W3zOB)-0$at;YHa5#KGfauNRYrxWU;yf1wC(pq)b z&0CiStf~Up`_73UsbabgIG*Nc7x!+cQ7eLyy{G*B#@OeQ1N(|{)tT7OkMH+-DJi$B zQvv&m&xb%_D3_bh7M*vkfxnjJx+xXpgkL@6uBU6g#FB5y;_uA)k7e@4&4zy9P&}8`;KO9PM@US|bWPYAC?Z6~8w!@VTR_PexK^ zpvw#o(q+T|_fnxeylOx%Muqlm%E(K1wMuC%Wp5IjtJ1^Q%DLj{dK?AkQSP{{yeM{S z5Hx_^4%99)WXTRES48W7g7x`%IDqCk>f2LEuL3Ts_w6~IW^X8t_6VeGYjV4jkvb^B z9>>pJDpx2EmKQ5=c`y;4nlGMiG={e)>2E)}yw4yfoXd|gKfY$^6Ft|#6};b#Kq2u4 zX)wr;7LRCt@4()9c`RT1>k9lyXZgBaEZ@)7f-!@;!ROI51@H!ceVfgYPMP~Tb#ohB z1%>qhFi7Oo2GyFe6;36iAL|T-MI}-aGR<9WIpju z#_S!lh=K;7F9aV)IWF$uyb}&5Fzf5x?m#(NFSTIPegsGFsO~_liZ1k11W#JPBr{uF zf+H(iYorK!{T!QrN!G+?M1)bWf%af?kSVDEtje2?0TCVKH38qDB^DtZNY)S8;)GI#fvz%sUX&SBM+apYr z7aITM@oENOcl?~AEyH!rIn24AE29~j)}f!zS~tCMXifxweHXqXDMkjjxsSCh2lXYo z=_ufh(C74Y{$>!8FApNUQJuVdZ-I5?!pY4XhnG<|o?BN7MLS1SWtI#z-=^iYneE&% zICpUt@Y%dZ7j6jco<;cgy!7_KqeQ#3d$?B-^lpqP{Pp1X_7Ll$Iyc0LQTRy+Pth-0#vpNa)4>SSYbEXJ`erjlkKSSxPO@M02woCImIG?R9NjgfMO#%pLHX835J z4&Gl$48g3ZF_Yu@>N$*!C|(6V4CXRIGWBiN$KFFf7B;pM4vh-O;=CX$v*ppH0Hx-Xo{JzyahRTX`4OM4>osi)3eRUs%zB)H)Z9Cw zx2d3va_z2_#jQI-4500oGwy^m89$A2O^5G)nf}@xv*zT*_Dx}@Hg$MkZ=;^sU5@rd z@Jn{v#xcFj(Q8rPq1|bC$POT?kAf-e2nTk|WIlbE%La}fI^l7*>)G4Cuh(&u8B`qG zC4YX17+CnOl8_0bU^^S}OHhhb6O^GCchwQI)(Nf;F@KFvdem-5u|pqL)1CntKq$t4 z^{&5<6y}6FmKXZRBK^=!JKq`6quVUFu&5hHiH)W2Z25TQoAD*wy&N#JvZhTn>)D;& zO&Na#GO(vOe>`9>@8|WpKcDHD@IHgPJUT5!@XSEvx$Qfl_egHfP+SMd%2J0XNmkHDy zC&Mj@XxgK})-%(_>}`DXQCBi4W3;Y(k9ON_rm5rgY+No;yteJOPoz#h4Sk2=ts`mR z{`oVMQh|vYJ*-I`7X4`yJd~@URwQ8n3slf}mlGHe9@Lg9GOwB!Eo01pGjM8uSq{q! z0=5D``<6g8o`%kTVw(Ult8Xn{Ff z(z|zhhB~c6lv@~*T!+^*8%t&tuF^u6TniK#;*_JMxO;lW^^Pia}i zX4~ClTRH0K#E^Fu>QrW>oPS31XZ#@(XRN>4Znqx8H$zd}KmP$I!MF*31(VU=-%0wE z@ZrzNtk#9Q1>LCN*~{^4{7|3Ise3qj?|V3rVTP1R|VLANnYM;U*Nr1d)l2b+_OGI1^LTdt%W^3nWT^PVWB`_kHs84 zAC$w=+__VUOVx)k>L2U7w2PZr6&>#C=B&O@KwblAzd|i{d?-T=4Sl5J_T|C99p~k+AGNDBMW_7w_NBn_@ZC?u-Who*VZ-A5DQ`UW; z$-pd3x9F~omB|}?A208tCt4md9N;C{8u>+7c5%uJ@x#gk(FoUtZ#Nf^8a;8dW8AN1 zaeJG^{f=5;HZpBTI}uvJBbH#y!PkaDP5$}1Xv;_bDray3SBMdFI}7QLXB#b_oejj} zGpc72^TFRT0eO{4;`-OTdj;Sa{!i z8%$nYgb2b^RvgeK4UC-|rb zVW;s0F$0b_YG#g}CtPLm;`_A{8EHMrOc`O$d_UfQzqUQqA-=4E`hUEaM6FkOYouE%^ zX^~F1qWzY{Yc@Vyq-8JPXU}()j`OMp89M!c$WSS^_d-sRE$A5rNb_(_G0jQm?ipw0 zROZT)1rX+=MWBt38dwTgWf2IEo0{#D>)ATK3|2}n&&&L5%;p7J$i!Zh(ak=rlADjP z2(&vtoh|6d3zvzr;&895)%=xRmwmSj_j8M{Q1a7|AnDsc(;flhP|{ zM-G=b1wUT15?gyuGaD^BuNsM}m?dm~hV^vMqkHz{$x%*KvS9`$hn>XP6HaE-jUWMF zc}HjE+1%MnD}kTMg(uIuTI)}TEfNzsx}ioAOwa0}r4qWdthZ_wPo$fq3m}m#WEE_; z=%|7MU68I^l87)JUtN2aAHkKq+4S?=u^x{{jDSrhQ;zGgXdmW`y_s{pECbblnHVXT z>NGk62b&^HZ{qa6xCzQtKV7SlOC{dc27FMNt5RR>fWVs`(i`|APtroqO-qStTaEmY zG+sYY->5vm1n@wJcUHo2Jfq1@JnPEh(dp9Yfe!qRFI?=oJ@rd5KcP15+|BCyA~?=> zoyY4ERi4I}4#&Buu4|2$^BD+#c%K9GxZ5JZw8>&appVjdgw7X&D(j?S;w5=sB1<|Q z)iZlYnZ@J5oWxwSaBcb|*+mr+u4cSmD;H{ z1k|ZKUVE>oW1-(br4-?Gy8wrhn@diAJKxIu;^c%k zX>vBNN$Zvr-JP`A^rY>wlmDK_*?zG-_Q1&H2S0v@r-@E0<0k^?D78eBAT1D)ty^=e z8;HL*w_Cr`{QC?A&+&#{_`OY`3i*2X;G9*lWQwBl&Zckrk=!o5!i{Mjc=XNU>Rx$N zT`IKYi_u9idxeB=k&6!01t+p zqRx4$1Vy%7f<5ed4}=;*oSDLuU?eU$8sAij^qJ$2)f$u(}-_kPx+1${b#UUOft+8KDhkg%=ZnOd@^D)j4zZ9)9J~={lEHFsBT;wrmBV&C&!4S4 zVK#Q?S(|i68aC&iM>qgIMbfr6uj<`|DfWhdZ`?sytcj+%ryQ@V?pZN9ppfoX7E24F z>2|FblqDU1!C6Hevn&u3X%}!l3KYud_DcPS$)SzlIsyyx;lYjIcSb?@EVJ|8ughCX zEiBk9x13r~{o=R)?mY(7mr2&o5*5C3ULHy#j*l$3BVS3;74P^esPF#X!O^?!F7^uz z-05ZozAMOWI#uWA1xjUT_O^%_WBoFNso|s6(%Ei*mkasIuX!YXsGh$q5_UDVlJ*G2 zrI&A4@5amo#0KZ9t@A+{ytO7z2Bb~ zFWFeG-V3DOLl~Bj%li-*)@H|Wo@P)oe5mv(%qD1^nysW(+U$k<8w`)N7j8~q*7n}Ic3!5t5lHdYzQQQahocwW&AzAS z%`{{Kh6%5&i^77lz8Ei!<4IW|1+$~9d&2am9OQmn?X{Y|U-{41T!_xS<}SLm2=adP zhP%2Hh*xuQ?>5TI;?gF2S;U@h7^*-|`I`ZM+qmf9500;6uCV%+(V$d}H`VVGbQYFC z#bCf3Mmh8ws6&)LjQcV75_sgTSbveTVGB)Cljy=0w1rM1jnE=mttja0iOwPh z4+EU-*jm|K`IteS=0&!`$YJWTh*0fxtfXeD%Hrvk_mY`qhkPXgnDk2g1DILUObbtc zY4l!gCj+xgQ@B)5Q_f9Kqe3fbuUF_!3j+)(OKsH|1XT6>F zXrP{NPH3L67^6aY@-ULkCh~1q5suPW-pugEc}d;v|gWM0|fWvTKvK*zpN}N5zR6tsdv`!r8r4DJ&ferN$Pwxumy^QGusRb)51h z`NiZ2EvW{c@4Lsj-=@r0Zw%hmptSM^q zvFxX9`F_8*XV#vtam?S%#}nSW^VROr9U|h49Y_hlwRf}g0^+ufCR~fl>3#uj)fI)nIMV>1 z*t5?_YBp6wJ*$J2mvWRHy|K}Il(3?kr|YM@QG(P3m9qkxNqVUkDY+H_S`;Py)=jJ% z$I%riDfRxs7@ctgR3kTkQWVC3aCz2l@2hY8L>`YDX8TvJrhlSpdOm+KHGw#Yd=u2Q z__txv1lf2SMbKwqjYHf(!W2pa^U}bygqyHKSj@8Xl;oL&E{R2BVHQnb z(WqbtHy+$y2l^+@t`0k!9t-h{Fe`zafQ0Q{2NckU~B{@#$TgDd!o)rQ2lhOJ0Rw9K8xpP*s#$+MpcvWA^_sXmTd&i z$ZFT@jZU<{GlfRcrW>ND%hV3X_;`xn;e8=O(m23(z%n2ACONMU zdn3hM;Jw9O*$ww<5MCnpgRRe`78KL3>+zuS1WgbUdx-9G=>3&gKy`S(KC?EJf}w%h zHk_x3mM6xtz$5r1op!C^G>XSE6vu#o9299s1RU^x9F)p40&jcx9uorkZaLnRxvIB< zRkP^0b3WGr3`_+f8mjRz%nbnci~v$F30u>Uc`Fgi$YVJqMp=rsHDqtV0W0xVEis7{ zUoAwo7(?Wn^Jr#4!$+GVlah#XW|!FfY|^N>FP@3DDtN$d0x#I9HIA$?tali!2s(J& zGGSGJM)VkLcI`R2MIHrL0a8lq;}MQuFxF)JKVzCJ!P^j}(db5h=>XwsyijbF%t{oHVb^&4$(trsibOVe%lgk{k?rqHvRc zlzOvkT4Wo=w;`bzZ8z>1vKMOXEVbd`I)QV*HcONC>>;H<-q*N2wr6atqP2yEA6FW4Kq z@&$<=fzcD7AXpi$Ckx<#Q5yw&QP&wSM!OGN-B}cGf~^B4K`JB)IJCTf$FU4sHmOZG z698ot+YOqD1h#OHT5qnWMkyz>uCXDCY1UvlIw@PoHoaqR8`e;nrsA2#GPRW2sb_lS ziHeOdJ}RF)+hVX!8wyHCU}2QPDnULC`qCwO8Tyia7(P7dPHQnv&<((C6J7C@QyrbfA(PGEy}UfAx%!yaA}>4bUXk6rhqYue9Jybb zko$p_^I^CQI$q&^*q3pS^+wj*k4zZWrUKn2^dYMYwBj-a-b*!qRaZsV6(tjsrvW}m zK}_WBeOf^u;Q6QB4dwUK#0G-jhRoDx*1Hh%O<=B!&@nj(Xd37VX5ZrWrLOr+^Ss$PA zV4)W|uvoINZxbO_z}+M;F?zNOB{K6PDz6|~(fFgEI`gE^IjiPC*6gC)0PK?M2s%=c zn0!Pt6hvKu3^oltgy-VNW3UC=jD-4f@?1^956?wp508C--fyslAV;td%F1H`#aBD8 z<*n6`Z&t{EK|zj|G`H>PSm_B1DFB)cw_rTjI1Mwb0~Y#tuty!p9dv|Ka%*ZO0-oz6 zrWOtPp>Qf@#v;!n4$P>-^B|dq=ZXK}c|0`7``X1N_IIM9LW_7rhcTO|;h-Or;XPRL zJ;wd9@4kBNqq7*ZQ5Mhx#P;HlfP>7d6drn+$qH0|;x^D(g)-(>u(hx#KxxFR@SY^| zEau}yqLiqbLbe-k$Yc|Aqk~lli+Pay(7S)tl|M3lutSLqSMhoHJ=Q-t7k13B@3GEV zEoWW`?4oaD$n%a;c|0@YQ<2xYl`rQpn)!z(E30$aPIR2J*tFJNwJ0u6le@}IK!5+7 z?Pg$q3ur1%>(*7j$2-wH0%fkjvh%E(bi=^VkOx>zJgOOaqe3$rR!EtHdrg&qf%%ry zy;4#M&vcHQM3x$%uho0rVu7mGRoJQQ6DSxEt@8#g_E;e3vIe{Vx~HBTYa5|Z7&DcB zG6K#@Cm0JBEL@zBLYJ46Ew$0kYd-q}_VgXH0pm?vY{*G#HH^2(VXwpYVu@`99tZNg zcn4xTkoS`YpOM%TKQeq+S22-;M6MHCZm=;-j(nfk(H@QszRQ$3(3O(ieA$ZeI;#zu zvB{=Yoog_>HGWV2oA`sqx!B>r;SJ9b3pxmZnw^s7jTSkQ4{IKDdF)4jg z9F#X=Z!7Fdb+O-M1_Ub|SWOw#*`~mRCEhK78RsM2$9W0lftTx|fPFgO2xPAn32Z(= zvAYIL$qKV2p#@wUnrzQ-*#EkBlv=DQXkfe-d_K%IY%>uQfsjkY|AYP_Hj*%Z__C@S zx}SaJql+Ys-$fzg*Os1v``w3ZwB^wl^TINa^UM);x~oZ>Co=q{gn7>3Gr)Wow;qJ2 zi$066FH7*(+pGvAB6_NmbvYhkUp1OVgeBJH5%Wy!2iL@YAagY6)Q{aDf9LPV$N7;1 zA0G+&?8pB5?tSP$=t8oFyN&37?O%6{KgW(&E@TtWB$lZ4Ht(VP-2oa*@I#tI$5QB~ zJ=3wUg_F)=Y8ec37P{xSnxoo`ZNNbwd$NYXVjskehU`~(X{xGVVq3XQ&25+(3qnwZ zBu?4V;<$;hR$GMRvz<5^dp~-tfCwG*AV-1zW%WWq6#Dzm55V-rCFjME;|7B<2AjAgb!epB zm;z!K(64nZ*~h2lSW7s61*|2~EO+rnl064QJut%pji{5rDV3U}%hcRCMPlX$}+YgrZh^3JV-| z0Q7=dK+Xuwkrx<$jPL3qQy~;N@W}EL6l}*}=OlsB;*k+AW4%eJ*8+`+{&S`~=3{Vf{3W+G@i z;QX>;3eJ6h`!Fi}U-lLEu@isY2k})9N?QNveK6K&Xg9>>v=^5PE!OsMU$-SToOe}S z!Q@-CS#Bh3P9c!sREzJ0{m*u#0y_n&1!IRk0~_MMK@Pz-hhBjkf)9gwaI9mjX~jYO z4Eqlyeq@cOYHrPPkR5FkVU0zGAC~;M2AY>qp$+(db&6O7$de#qh;mf0hrw~q3^ueZ zC5V3{9O*2;NeC~1=^vP65@&6Y+@eW*@~gMWy1^df2S5F<@sgi$A?u0gV)wT#%b+W| zRy|~u>ahR%a2yWn8EJN=`+}+(SUo1y9$yeZ1;#= zDQsAO+C6+Du=Cqjfbq7o)z3JR@jGqDQ>KOru$U<$z*$5S?u|{Tr_n;rRc6LFcJP<= z|5qLM6G!@a$03$C=&Yapk$h~lpLiXS=S*b#TSNzxDMfSkjR9F!;AA7kb&}dIjlD>h z+OA8$Za@W6C!Bd#kP!1zqLP(`Cb}_peZii8+GDMtj>Qx#yAqWi&y6mE;Q3u1in0_z zMv5^1F}Q+g*C`=I6N%3S_>+Pih+-jk?CIE`i4mH5N|%tFX0aV@IMK1P!saFfW4U$OpqQ8`O6EH{S3&SM$8+e;a~h>P;SaYtmKfzltc z?hSOfAbm2h2ovZX9bLSc)?)rNz5@B;2FLF@qfH0(TD$Rd*HoOPlApAwXJ`}v)O2JJNrVt#r$!IC!o@L z6?v8Av7mj{`x2~@6~g~sWP-d?W!mx-L=e*2#%RIZ>#tx|SiIhZ!DVWkRb|x(cAUWW zMZBvr;BTRP#%%V7VntezARj`13@>gm#HkGQd?EQG%IC3{iM;_WHrV@r+Lgn6C+Ne~ z8T|0O*6P2y|2vo9ufGKWC$67pce4)|RH~49OvHdK)eT7QPU!UK`&PSfvelMJlN*DI z>HrcFuT&?&MBQ7EwGLH38nI-~%cm7mu)z_LNd32c2_2~b#qrRsgZ_Yb%pI`@#L$t{@DPvX_Lt;iQz6#LoMOm%_-627hfFHk z&?B9ZG7kgx*btWyzVQZsl4DBXa}D;K3R#cw?h&hneU3OE@joUZY(W9ZJ^&&br@Esf z51>;X1G{?sOcKSlDDoT+>}&jAK-sdaj09IApvOHH*N|VqdOgFg1b`On6*kPiM>!^0 zuZYPHvHDL;vc~?kGI#o1TPm69KTADaKarRl*4lR;-Ep5e#*4Smj3H!q z%y7-mb%ynibpx7h*qJ+`H{>DS1Q|x+P2k2S@AH+#Uw!$1(W65SY_J_6QxXEQv)ChP zTUZEi_`HP*=X&sq@m$DWy%C={Gy4R#q9Kisw6HA|4UwZ z4)2G5z{Jn{{dHb<$SXL|ci2}y?+e|9bB6cHV%BU_G<{yZICL8p(8YqW+ZhM*7^`Xn zsDZ;ohH^9;FQ7VPS1tsNaadg~e`>M{>0RYv1CT1=yE zpM@`Z2-nsjp?DoMcn}sto)3DWBjb*^?}ONh$aDE$|L1FGeSPX7hC^fuv4MZhNeMrH zVTkd3Y_E^cSrzw9!JyerpMZd#2_lg?A)!x^IYB)aifoSta@r@@Oy(X*iXWZKO-cb> ze+sifUJ|k(3rSvz*id8Ia6^d+Gm?|w1Ydy+#`DVu_;31>v{O?^-V)Cb#`rVl0`{Z# zk2XKzhTmAukB!kn|FzJAWKMeM!4~;{o8NKk-+sVPOq1);GCO4-4{%Sad3c^%9_m%a21tWKM><*`R5e0$u-HpedGAUd@crm`Hd*= z5XW~=l=q3_yMwJiU_kzmMV~pw->+YXLNT(Je&T(IOa83Q_Jlcf(6t8aSH1CLFVmE2 zO&av)70~2}>Vs)wdJ`o?-bP@kXDCjsoNGjO{8+L5Wxu~Pzind1LimVD?krW%iY-en zA}S05A67!c7XfG_#rwdoBl``1*lvU22fHiz#wV$-?Mt4M*tI0iO!AJzj{L(v_=!;t zv5?<;?YA#*QSZRZPvEa6K>7g#VUqT}PKB6^B?+ww987ydx0lD!uT;kIR7W>E@BI463uyk70SIE5%D0% z9Lix^n>5g48I3fzGXk;`;kOYvKjeuJ2SEH=NMXZ+P!2kMz&F8`Lq7UuL*Enh1oZ3} z%Y-5JS;&r{Ek}r`@*3xksPmHKa$w7=pz46V2)&wo^Zn4P*TFvf$!C4-hQIg^gUyXx ztM!R}{@82S`@`OP@#Zyu!Wu;UBh)7G4rEGrLJ_SA!$4)}eIOl!1Mn)yC$k+|0Wohn zhgWKcD2UKAp+cbIF$cXYdV8{jQQf|XCc!nJDwI^MVq6H$&p3bAHS{Y&8f07=WTvo> zfPMMIV9C4!%KMGm@I)^Xe`m(mSihlcG!6Dj2&y+G(7X8&Wy-LBSz<*_pun*nRp=(v z`v-DT3qn33a;apZ4kLwJGS{_a+$w?sT;|o0Y;>2Vir=Z(}1nr9v#L)NQoU9{YNCd@Dmi}V;j8e36Cw>6N8Q&Qt zIZ9X|GowU_UP5wzfS#`ojkt*$PkD~#-X>XTfh9@D1Pnm$1+FW5?%hZXaLB^c+Ph%P zKrI4d@F3D+EcE*%C`^HDF@|}EV^h)zIyY@*wu3hz10z0fHN)5Z7NxR+O*oS$?5J|;#*(@3MnaA zV`!_ew{GS;j+loBHeW2Go90@j!2hWH3CiYK^ z4FwLrMj@C))fx9n^}#udE{I6H~|M<0v{0b3;VXjz6qr5@-lwKNCTklFeo%} z{~N(Y+^t`@2>6A={oNm|$5&qXB(Dh@8L=MNiK=~n^Gn3#E#yVG>K>F8Sa{gLe>d!k z#CH)pQyF;_ywRo&GQ(m>3;=I>9;f7fki)nDLqNR0A55QzVGsUqBf!Q)?y|?bx`-|1 zM4t}pqhkWG+$TXTh~DW091>di>&zqym>0#3v+IawD}(`1ZIB1pm%yG3C|lrJ;4`D6 zI>wcBJvCqpe|+&J1UKivc7^OA7?uGaB3Ur_ZX|fNDuwQr=*X4ws_OySFLDLv zWMRg+AAiDlgIYA-jllr&r@yrO81^3IRh5Nt-()NdDz>CKbSiUHZ1&G(x$pO zI8~xj&{s(XQfy@>GcxqGDkE{TVOB$2uaFW2^O#U-T6hv@?j^*1fC~kOoAQEP4BQGT zN5_j7e;TS(a^gXGBj0G%B-xHI?|Zzqlmpq1z=1$cW)nRNf7_A31wlS}zjMET#qg0F z@Gt);gI*rS0>?4mWDH8L0@$!&x#VVettYDvex9g;L&Xij8|mHPL$PMBytiB-^DQlk zIVzGra-AE$z}5x43~*bTpBGzBQFnG$%fO2uegY2&m@vQ8Y57dsm) zf)}u(_B~>nWStPdgXk9$J0&{ugYo%SaW`Tsrq&k&3hXgCuXqKKjlg^cd>}*OXC8-l zJt5~{m;$gfxd;3Hws9;NOju`pVls}fK692-92SiUe4q$9OX7+=*cyQEP>h|2AY4)G ze>o>c_x}X31LRrkhWDt1K(ra>jJRRZeB-PnKJGmgf=@ww9Q+Bt{3pmq68rty%s+Xx zk1t~~c&CruL@)<~9{csaiR=US2Co5_qr@9+Sx+47IP13deF1z{OMrw|HtWy@nNPkg zO5o#G9)q#ZE@+PajX|XK2DsZ3W1Cljf2|RN+`Kf5-P&2r7^|o_sk}jVZ-GgbJ+xH# zth+PTogAVu7<&7E{1+|rZ0N~OLt8828~@6S{mNYqHUr7wq<`A=#7_9;c6(+Ie+u{k zP$4;@w?@^I2_+C~1(GJ?r@-qDNe50G?3r3^(d4|_MhC*2VHu)qC45w>2X9gZe@qnT z8na5_`;ItEdB3vw&8Hz0t-$Pv4mr`k;$ueoU0*EL&0yaV%pSo@{KT#Y-(~laQ3DoY zz)cWY4}XWqcGx_S^^SRR6CqQq1Z$406ZpQ8N!3Qi79o`1kVxJ)qtgjOy0mgfm3XuWjtVR#O{NC zq5$h=U4U7%iF{W!CPU6^Dbp4osfPwF;Chv@P5p$P92Sha^4^yGlV`o5%^og@5v4{~TxAFwG#>2bumW z-jqlQSm`uiT1`1^5ML@Z;xFnP#iIdm$d@XZ<~?@Xh)0gESo8@2e|DTc<{@m{KVofs z*UZ5_u!64;vFGKE*d!ptAZ0k%qC=i|$)L6n`CI%BdE$fu{(aCp#NVI57SFIw5PJ<# zkMbky_G`rE*gXT=B}mYEjvDS|U0_?`Z;DvXxIl~z7-4ds@dC}Th)4Fh58{x>6GI>V z>pbyL_d@jD@4Vw6e|t!5;j`vIm8fISVGUG>0efC>p*Y@)pIEE#8xu#cV&vPa7aaPA zxq7fmVGuS#NMeKVomduqCg7`j2%T^$smW*)8+#wIG=$BD@+6wbdq^1IIba$>uB4C` zTlgS<`28MYlb;$8*i~Qbb}TbHJBJdIYgH@0H2Xq z(GV9h*s+h8-A7N8JPave;(^~qYH`3jUu6;x81gVAmjR4zYxy7S!&hb!OnLhYC-T>I zin;m0^pF_Vf4}@48lWNHRcqwA$sB-D-R-=&*n(*u_!T`bVg*h27*N3MtYqsG=Fr;(-+x;QST2C+A$g|8*eTWuiC-d5k(+SpEf)x(WbH;1)M+5wz#^Ws zfc=GzEoh9NK-1ZtwgF~(o5Fql=3AVh<_PfMK@SqVf8#Lkut5~$rhutR3@GM7m;Ayj z|Lqw3>-Sx4VN-tg>wn>$oQ9IvLL5J1vXB1xnSm1=kCDD=WD|%*22m=K4+Kn&A@=j` zi26D-6pOzby~0=o%*7xXRbn+F~-LY?#8xN7P0Kz3ssLCFFG^#&V3 zKua+%WLi{TTq@K#)ECGaUI%Zf=T2gKvbqRpe@w)n3j}ac9ph&scaK{C$_7F^7MR z>sHB2?_kF!$uZzU1F|>MGOQJ@PDHL=%1HgNnWAqqy7qGK0sd@*N3&S=JsHDxIC8L; ze}ono^irW+s_vXt&}5b}1-3i7nJ^A>6u*E*dRX7YUP9ieZ9e%oB+@?dDuUG|Yntr) z6z>66%3)m-S^US^U$JvEV}$>iAm#x)uO&4O(Af6|~mLm=ypmQGke`K(JL0$GoT=FY-V0&i9@0|R{FW6yBe)&J7 z4&W1){OX2ZIM*L+n_cmjXtU*w17w`Yy1ob$SSBkf zx+|Ye8&z{c(P$0SsDO_Si5Fn2$i6&-r7aT~Qda=ohFs5vD;bpX20~&x<4y28IB@xmeQg|R2LnD(4}YD*#Dmdlz+Xd;koiLF zc+k(lg!AP2pdW@cPGX7VeZKfyhrAE5NB0-li6kylAcm;QoPqWxuwSr$cb2$^!I{oa$Oh2ZVaV6xe|<>4z8UNn z65I5#HhyYsw@kJ{+e=DRWB4+Uz7KLeDMnx_?_23VwQ-belav8=KH~$dEj9lp)mPt>j2Zx54Dk`ri;`ue(G-K zP|k^nSfm1Hf#hxqN-og8e;v4g;Ly>^0P#W&0Q!F2VP7h2LVOD7Ji9?Zgy=-_gQzD# za9X+7qJjmez^^yo+bfM8Ta&GfI4Ya2PbN+2==fNijudPmaisSNp&~?IN3p3AGL{3b z8Fo;?Lg)?ne_(2d^$Hn^ca3+0-P6PwWH!kolYD4yBDeJ`hS{Ite?8G}Ch%$^1P54A3Tf$nF0Dw;6@34 z1aca3mx&7vv?Zvkf6{rRqcPCFka%EnMm;V`7TC;ABK47Ymly0@#QoHpm;{0&Kz`OS zdhTmFeDiT?-jJ8V{=+4S46)GHJoC5gbW#sx@fI^sB`nFhVo`YbFg8QH?rRs58Y#pX zKC!h=jOPbGJWZ@qUnA~0uG#S!nBdp4mRP9|PBO4+Q9|}de*jWG+(RWkCQ#|QS2hwuy)iU_mfA%BR$}qVI}G|AelPaL zr@jjFjO#eCqkdr+?r76KOd`NN&IfqSBM6-GGYPPKSn9o*biy38`vlQ z2gj=sUj|Nt;qrvpl5v6mImnCQ+JkLoe6U=|?M}Vs%%>H|e2<1EtU6$6fyD=2&$ez% zQC(DO)iS;c7g1oa=g~L8;=r8({Z6hs$VV6Re>V9g;~y2`VA%vgA%Q~|Le!D5sso?E zM!g)$?Qk6ycHkEI2;y%-!2PO=LoD|5Tm)xWf+UQcptS}=Tg+7y*$GM@(vLg@DgPEG z7WuyR$Gp2FABOh>wguz&qLC+|fRhg|xV;!r#>tatd}-@GaI6KZ%!-iL5rp!E7Dz5oV8*yxFh@F9^CNaJB_!J~nK zBnb9_N5g~vKKMPb&VINZ{)m$UuS(wGe^dX3;|$s|!5>%i6>3JPE9x~F)}}88Qs(g8 z5i{{p8MSkeO}Wg5zGSy+sjYx{uS@s=piPK5=&4iB+lcIJP!N;q8A2S z*RR^R560>zf6;~PrbT@eGZBomIH0kq!C5N}>m2%_xJzw~3tB#mOfGQ$Q`rghf2#*M zhWs1i1Atb3<2Z!#V31?K<14>xNyzc<+Tyt4rKicsEE@2J7&q{4g>oJl1V^?<^F-B| zyPRcO$eEOSNouQq@LJ$Q0X`C|81Ij|)_;kQBzB*<18FI+O3i@PyxcJJ7sLeZJis0bJ%dKc(1RDbo#)9=55k0F>t8OV{urtfjmUX4F;;@ud&m z9rO9c2LO&j<`a8`8So4#f9wzvQ+NKT8T!UM5W9qygjevnx*E zhQQiG9Gu`*dPFMhVGXB4-2?}g62!ShRHP7|5Tma+Gq2!z2;T5}e@A))8-UyhYPc-a zJHMADhuF*;`Wd-tAwyFlY&>wR(wXiGOVst{9Q?B}^kiU(BoD`G-D5=eAt(LKFAycul3EJ^E#2+u;OsPm?!qzfcqcT-={uzsMQwA zAKr*T29VmD;dw}1qR6r+S6^l;HRkmLhaPkNd%4ZknB_&}n!lK8tOpkIQQ$Uc-V4qc zcI;VXj^SUjm%YdGVP~?5=Od4X`dMHJ2iytK!NC9Ec?aBOf2VA|_17ep^E)Q~6G!{{ z?<9^gG|LxD%8*uOn>Oj#3&GB>+7uqvVo$qSso!IA{1_|6kk}lTr%*3T( zHxrG2V*Mx_|0>Ql;{yC=bYWh*xFlZ50&kzb?agaX$dodOj7lG0Q*TMv0|1E5>{ z0k_(KTY*~ge};OCH5y8X3H&Z%HxpZp=-UCaJj5nGvi?^b4*AuAljvjD<2{fE1%B*z z-5oGD$VdF<>KJlsxZa1`hG4KV@Wl+Z2Q}cn(Dkr!1)bO)tW^kh51z(keX#l`s44J! z)K-bGe^CPuj5H1IrY;UsN<-Hk9cvCmej)H!MmEcle|t(`cR`e$dBxeM)&%hq$W8@u zAyQ94KQ_J*f}be_#T>H4a$&;LqZS9rcmPLoD&*N3p=MV_$pn)k4u1>WUDrLM|Z&Q`Z`JHBwYb zPeWS({zj0b{0a1!<#JfXpcaSW%t+{o;_;bk+L9+DZpMPy23Vp2Zwbb)YNoOE5ek^w zqUIiU1EmTRboFl_KTwfY^=n`P%;p5rnVlN%f4F4BrqCe?sdqQdaz3(MFDAG{a?Js! zP5^Esz6Rj}lLz(TE%xIdb!NZUj7P?U>?f>4V+TD+VJtr}F*2tARO>m!kp`T=uiE&} zI6{|)sMTC6qTO<_<)ksugDaM%zuf8zK5Gdv1x2{fJ>;`79J|L_ZJK`zp%-DTvuXezrO;B$MD*#;u)2fqf!a=c;WCpc2IcP(mKhSm#xseiBB?PlUcR28M z=rlWN#O+Y8Ht>jeL*5bcl+;|3nEGJH6F#{iAN=7m%ic+YChMsCZ!jk$KWZr&e-B(H z$oJVPTa8f1gYiZT2Y$C?)o;Q-6i$! z-W?@K8ODxqNbF%R@323&h|!`xRGAU~9@hXN{OX21jRdo>*&?_6H^2Y?4nN-i->KyP z^_@x>d}WBO zoI+7M*q;%fwnGN_MdVWqR!m?Logo~^aeP=4rv{%5@)@yhM4dp~{{0MY1Kba?(E?Nf zp9Sjk{o#73&Bq5AnNQ|c0|ccV;4g5O2FI@}*hRX7#3l`{rwI`9U}wVp zd2)RGlqeu(4Z81^>1w}1!&LL*3!zmggb zrh3i9xU_}Oi0i=Qjmh`hhKC=?;Q0{WM6N<0`4gbmz=wr+*n3=Iu*iJkctp;Bk>`VN zh|d@D8AaIvV+tHi;vU2Ga2)==41P?qJ_hatc^HqFjE@EiAk;5%e+cc&z=Os-EXSwr|F$N4 zqPvD&Ffz?l3|A53>cv1_=P}n3UOLoW&Meey&J+I9Y)*&-@Kv+9Fbh-{;%ya#3c$2G z+#mI8SidM4uuwCA2;7|@T#&vnEM>+ZjMG_q$UGaqXf`1;e@Ew!WP`|p0R+INf(1ur zH6G&+JsxM{kvyiiV}SDd@eA}_3$q8*ZCPBLUXsG#=Si@`Bne! zO>hY(;fPIW5vI#99{+v*zz469@nw7;wMby@ah90MQq*fEa)UBMGnZGRAvh6%Ti*9G zWS0H(^(mmwf8h>hsM}AGDFZ;36#?c z4Rj}gc~ig`qYQ%nL(EfaD;<|Asd?+AG9e6R& zw17!%Iv%z;XxMSSif7O|@Ejt7&2kFTA^X%M1N$_3akd=5&L~780q_ITA z`E-!ciAh?cy?+4o7d-9|eM|5RppkzjfnP?5zH{mUP75qnMpVJ-%wq2FU0jzJjDxdePQ&-v zxgH)+M<#9XdozDw>x5+)MUsfQ6CQzIV65>xl8@(Ri<*rvmTH7~6m^aj@f?C0hz0(a ztqF_!#JTlA(1217ibJj;COB8pzhjCPijXRr5J`HD!qL*3OF-T`~sCHtP9c#z)KbTG&kg;5N**8ppF$*xyU4?Z~pjdi%&ke>BNj!qe ze<0V*8f2iN=mBLmWrgp1YUsGCLdAl=SGi*ZNNni4zG|$mf7%O%j+JZU9T}%4xTf`8KemqJf}~#Xy<7o_A=aR#Op(wqHN3v@w<1Upi7#*7@3#_wJH-VTH(=KXU_!t>#{`#p}PuR!elbs1?+w>b%Fpk~`d{;Cnu_;75;^qGYX|oNMg#lKLWw56Pv%BolOQ1L5ch^m z2d5C3Tg)Sa&e0^7{8_&urV7VFf9Pk2$bRHj#zMP+$K=77m{;r}WK{GR#h(}hn0MGU zpoAXA)x*5v`cexgbFwwf#VU5^Pl4tj$5DO zUDvYqa6HCw>2sXI@pw2cXI;y&!|^kYtHW`whV1`6|IjOmGqFQtdtUf_eY>{|r5 zur}Zse)pw#VRc_jas^R9W{33%8H4LSAzu<8%(7ujm&_$3>jl5l@i|UwM6nCp6KYLo zw%QUlLg8aQA!|b^iG5%;WITAXr!LSB66PV0CUEYs-7&9)riD5%qui^>&>~hT$+ZN$ zA2JzuKXj39;bD>Y%kh4&e;CR8L7!O0a9njTKHPBpg5%=nI2qq?IIdxQ!4*Z0-*8;{ z94F&j49E4XNwy?2!Qp4<$xvrP>}R5z*igVTL-&%sS93JvMnpRl%VDlU&P+FKe?82P0oDI&4O+AW zs3P(UdsOz`!R;jYknIh0XEvOR8gR&s$A9&F6U3w@>;>}P=CIbk=X?I&ya&d@LHo78 zeb3Kvn3Y3+1K6Abd~6zQT)_KODaL;a*|}1L_}{-j_OONV!ud0uqjy_89|9@KmW=bi z)f0Fg!6b79*4hMfe^U?{3Z^YD(KA-Y{w8}DGU=jaMV#9mmKt<1jwv+aQpm@U zdByr*m;n2fd`B2`qWAeu4S9{<=~jn6JsFL?!mP5K+>hv>fB25Wyzm_v^N;ro@p}R3 z7_H$bBOp2ffJ^L^tF6e^i{4&KVdRw4;aY`B!U< z=w__3j~!#9DYb=t@AF{*auV1P7}LY>StL3wz+U_pWAC7|0KmA5Z=#wi9qK z*aLq)PfBFbe+u?3BR-fNku7B3u}+xiKl9lmKy*N$`HypIrh@E71WgEY=n6a=_7~RF zHp7MiGm;1$3fr_KdgU`$um=Dx(=)79*qY?`zmJiz_3+vHbuNpsVI5!n=q18^moVWJ z3?ARL_4oVSc#-V;R;eHx201(a>vP|Dg`0op9*o_Hf8=6}|HT;MUO#J4fL{R_Zx8Dn z??#y*Hhww*-(C;*olC>m;F+yS>PQpVPI#8U zS)fIW;1T(1!8Y80L-!xfwWJBLE&qD14cWTsUK)9yVZT&lZ2q(Nd10*?h`13M&x+qM z{OrG7c{OT7-NdHsJ=Oe~{!jd3NadhJ4p?$bRt>>>*;e5<8nk zUc|-msRnz=8TQk^IZp14=Rj4=jC^*hOsn#gMn zIQnod>|(?xP$T&S69@U3{rn7jfzWIZKP%+t@9&Mz`5pNgHUl|-_}TlMKl~hi&L4h; zf88*^G!1g^PzSXG@iEBz2Ts?M7=SgL2mL_uE1&ZY!+Fp<^D_nt_x${vk)OZ6|D3OopQF$DpPxVHe}4X)|M|Hi z&w+O*`&dfIdyx`;@*J>(@Ep+3pZ8}!=h2_{ z|D5+GYiP^WzvYIExj~2s9SOnWGHqhmXb9v!Aa}_5-Di)I@4OoDYvX*(lZD2!-}8^e zCU$?FKlz@|{yrabW-)86`z_?cXxdMNu>EF z%m~1S_CWL#>(K^t6WW?U#sc0hX5g7Mm`HG14dTl4GZ1caN}zwwMO>!_e^w$B#qgDo z;HH2w#WeT7-vcBiA~Brc;2k+|7&8fdf1$%y3R#~YkUtWfo&`Y#&KrWA|AjvQ_YqJP z{s(Rg?=s}_dcr$S_=6etNaC0Pkxc}0K3P11VHH65{;di6Ul4fGZ>Ic!dHYiwo5+a{yllV$buIs z0aQ==G6Bqm>(t~ko?9Z%h4YZ#2{Exp05r-AE*#L3A4L0}H}J5EkzTx}Edz z-sE{edw>=W<@de7e>07J2CA~*{)F~TGLUaUUIYnBJO`4)D{?-VS7{ilZy9!m{0sw}Fn&Me z3GmzrDaArnG~qu(Y6{O4HOR+-Ir4w}`uSJSh5l+_)FR`E@BQQXIF9E@@Ekl%>Y5>t zD|`>*j``S==MwS!!}tvIHzof^!?+FW4C@BPi3}hbWSv;JE*Yo)>N**4%@tYiNYp~k zkah6a>*D+se_8Joc^^DK*0o1|kLNSU|3B*s?=xH<{}11rkk7+=Vw~}Ln3rKcV7+4= z0X_u`5)2ui{r*q)HU8-O)S59i}OFpeO&8ukb31W11O^IkJ@ zKK21pSLD3Ub77u=xEa2O{2mS^IR4o$SSLtvHK?WC(av+R2Jm-dPB}PKJx>ui0gyTb$D*f*O2=9S0(Rbm57{2b`Q!AtpC6_5T0|0EFuyIJvR(- z{e485e|#C;H4IW5f$;@TVnb4Y1;LlXYr}Js`N8{r_7V0ITIP~IBWP_oBl`++0a?JB z6cHdbix3QYVL~sFn?pa5ygYdb6l*2ee|Q+^(G7WjJQVmG;Bmo2B7O4Ep&Znq2@{a= zd6I`DdRHR(QM6q^JCs?%gJaA<-j`3wq{60=e;BhS4{>D7f|&sa0_+cPtOD}KHG~CH zv*05^-vK16;aDethz%)62xr}skHH)b-C0b+adNm6jgg~o-pqgP32$>>e2*iKW_^zr zzz}-J^bkza?|J68)|WH(G=%Se6Sdl^#NECPr|VY zf7|bWu1N{7X~-W^zcn6X-u~C$T}q#i*Xh~aPXE*Y{D1y$q(kZda^A$LeQSp8n%XE>eDe1A941pZ`Hq)PJiV@qhfa?J4PnI{j&U zdZgA81n4KB4YU!%5K?VycEYqw2o*mGf8@VkqM$HH5>!_u7od~?E_$g+*V3ezqhM(B z#nB@2>zJjmNLboHlve1ZMKy}TlW}xkh|#+X6jhX#Gjk!2|D{57$?Ab13>4e+AE8 za&#vL^$T}eKl;jA>j&Prp^qQ{zI?rYug2q;n=~)7fd8mZas4&tWGjLTYI zo4mJH{powf+I&9N7w7TpA-T=7qc^4=hPxgmrQB&c+K!4=f6mJ2KHZrcf2kM7b_7sa zW}7$xM6QSWy7ni*mG{}3!@nwX%%!>9J<#KS zM-?0HJ(@n%^~)cfig>Lb&xkv0K+m|`%{+BZ#TKwgAjw9D^ zDghmVV8)UjtJ8_gyD%Qd?~^=j+NEN7c)E6@)5%y4&gdo{$msJ2u!>h0U|4#(=wVK` zTFc#7^c$HZ?DjF~w}!9zS}~bj?88!)m6ey=-{56A&Vu>l9cXAKfBiU3%=h9nUEjy2 zSGsML^Q)1o%lg2%#reEI$FuukgMRjlY5m&1^z95)9VU*6O0%k59*Ya`vg#UeEK zhsk@)pGVKrdZBnPF6$Qy#hlHqH#bHFG0`&j)!{IGRS@JOcU{$CWrQes(b4&HAtlzR z)VC;ovQ2*(y}4x!U%^J6k?5q~I+ZE8o7TRm$yOl^(8)gLboV*jNDbRP~KkI^LsUDqS*tz*OKaK=Mhcc?=q-(E^S`r zJHH1=d*x;7L-&ejF}& z)Ihv*l7jNYIWHXwyY>x=?(XKNAum$?4BaZpjc|8E)Va?<5C8+_Rt;=?WMIk+I_uGoG=%kFMn%= z#bIOwf6-RiT_>v%8)`@Yj;87L<4!ArH#>0YVm(VAB9JcO)^oUcwt^57xO$6T*z+ilsm#od?IoH&!t2{v+T%+kw(Ti31xZb6p&pBpoQddR`n@GGl_S?B1PG>FbFA=w( z-S)T)`Ev+bG%DC-`z7$F6Q4Vpt4Uq+h7OLtvY*U^=~YQzW5q!wY3}-X!0osRp4_NZ zrd_dtOcFLI)2aH2H{~w3e5>C#iy#~6f6qwU8uoTwwSM45xsWGSZ=tPwcUbE6Y_<$w zF-^t}eRY}f<8D#s>Gg;XK0<#zXZ%@xE6O^$sjYsw_4OP;prgNFg-QChIA6PUDF>c5 z=P;hAZ8XzsVZE?oYBaa%$Kv{q_SW8(rmTCr2|*_&e!Y*usgsl$+ET{%Jv-*&e_XPi zww%^JVvJlA88x-E)1;myrO^Q^4UU0Jhrr%N*~*$W6V1Eno#Ik@GFRt?R!q;68^fr1 zfP;ZPTitf+l=3~Ffn3R{4F8&_LB=fNr{HZ(&QXjEa*A2``-gN{7IWsdduXO&UsucN z;n`TvskBctns(-|k&CYP*67IOe_8vg%a=Ee#C6XuRCJoZ?oTV-!Ud#>c=vVGO>OG) zd3&D4Yvz5{))8QVvlF{MW%WIjY57qs(iy*RcPJ3153KXN?5`kF_U08jYfX%ae8T%m zw^}no7NOS4^F*;kQ+@V+#@v2L*qecMX{igL+XEG>@uG4GdFU z)SdR&8_!dLX5DwG#FzK6f2kM6gPGaYM4M}hP_JKjp8#2@nLsUp{EhJU@Nyh`ek!zhH)j0fW|uBE+my!N#b%q_f1I&COOoz*a^*fR z*S-IIftf2PuCJM8?Ys7VopTTGS>7irbHdRt9_?+LFpeK%6;Wd}+L^yy;NF#4BkLie zhBIz~DwXkei)LYbZoORqXE)-DqOKhEyoD!8JwL;)JZBZRxam_-OeU6i^1R~#RV6ED z{oKY$k-4STKhQ;Ae?M~#z4EF1{7zNbgdTJKBYfUg26a#$fu==lo2}=?I^_F~qR-Fq z?4TQ0EnG~FC8KTk$D_VZ4>p{vN94P%^}H=M?orq8!F@#U#fiD#A|>X-`$$;7TDqHi ztLx&ul^xyIR?xpsHqqU*L^wBi{p-Z?k11me+f&taLPs7_eMNsG=BFK zLj;MMvM?3R0`W^P-KqUNKZvW5=$gfm^&b})O3%;%r}ic}1&?VFgwH7B@4I#$y*&eRj%wq`S9y0Gl1SxxqLWNr9xW8N(`o;b61njt0Q ztDr8bPV`Nff0*-o_Y}^ET_0Z;;cYS7ql^B#Mtl}pv<142FBi9_!mhDK<90pU2NBS< zwbSMFN%Us5^^!L!47dI|zI&JbX}o@=?R!*STerFTYj-}IPwmZp^y<9DvrV>--86XS z``bZVEswQsA*pcA4ljHE8r|^I_IA3o{lvLl*#m!_f7DZf@}~4|63$+1bi4J2o(A;! zbhuToRV1Z_njO%kJlXD-O66{xz6r*r`sjjjIDJvZJ~!yS#>e9ODL*u798z52&EC>Z zxM5-JGBND=vWs;0ay!au%e%fKTbuXGx%epk&uH_cn=bYnF&iisk`auyJas%rmvVs zr@PKLgS1S(p__7@&+eHtz8lg$)nYIf9*_Rxv2>OSEACwoRdRjtoImr)?9HyKbfJZ0G;ApC?407>v|f_ zf6eo*@1M>nO;uja&R`!)Y&BA;@rEuRYS}xh-MSAXo!&TyvzI34vrf+!_ct2Tf?bQY zw-hpOg?KjOn!9M%!`5({JzQO)+BhPl;_t|H#&CQZKiA9 z?6J)eA$>>A%J3{Nxqzc%ayZL=CQQaRe{fs~vv;12p1YZD9q;eMbTUtz^GI;}E5|%H zlJpX2cht@%tsbk*i{lf39X?ymwR?98*GTL{!-t@c&A^?*+%NLa4f64S_ zZ>ek1W8;Il*5|HzyN($n_m>W}2Mbi-oOj1|d%#nU*J1aP7h~?|UoXP$y_*>RL5X(r zOER9a@0%&g%F>;|n<<|sFoCt#K#5bA)`YvNdA!P@9v1bgkKWtz*E*ZPU*+0ho}8KK zoxo2R^=@ej`i`ioo!qozHD#Tke{*0&9{kn({&2>?E}d<+H*8H0(}gJ(_98MDb~;0; zw6uEplef3??Luo-t+!~zH9Jbo*LDZ%d#Wzd^uDDxiLNcDLUY%S*UdDxyVOzL*?D`s zqTX-^K=N2VrOztdYjgh&SOh&;Yd3xL92?T*vD=B~+h|{^KAOwQfK;jbe?pG;^rWqv z``uk#Q)(YB(Z&+)U0BVnS(2F-^(NH%F)zRly{4aaGtn!BHbw8Ac_Z|+2aC8l;a`#* zske)_dmVR%VK~)ww3_%KdbwL8{WiHB*W+RyRk>MF*1GV`y15}v`zq7}4>=}}eLF|3 zdPdvs@|mU4GI#^HXq!4}e>NBNbQ~!1)qA|DQ_tQ&cpARPPdUB4F4qPxDaxy*U)}i$ zQV!>n?MRQ;`)MW@Alha}^aEd`l$p0))ZI6aFJ(VJPjvHE8W)&;n=>2^*&mM3*9^8y{_?M&8}n`EORGdiT=ge}%vHSlgAH&HVjF z42#;QFX+4Xqv^eQG}|1Gqs308z<0ada)&24fOaWoUcDO{KNe2k`P1uG_OwEHU)S_o zc{1;O!*9^2h1(%`!rbQb^5Rvz|2l4W@5($WZ03ntSU*6Usv!1TE~%-Xz%yUz#iU5r zw^=@oHfyz*h`Yn2f7l%!`gy%C)jUG|Z7 zOJ@gJy+tVKwxjt=KFp&h;w<;o(&qX7CcMk4FrTWdyooywWphfDSmR|Pqg6>$J*Xs` zZtW=>99O587iQxuUQ>yb=j-E*w!xFxuGMTw8Anx2)LAc@fAXupq=&f-4VSv>LqT)L zOj#tQucfn#ihcnaV7(jvOvMMI?S81f{t65!+w`&;2e_MNhdk1FEt=}8(Iy%_uY%#Ca zJ5_aH2`&@*W`yk_qSq&VcbOj_8_R7bcF~H6#!c$=;kefNslR__U2L3=RX=;|=B#@T z0?$gbEiFxT)X*>XGnIbtrmuFij$2ne-A88uY*t3GUTTl{zVloAU^~|C@QkU$`aVrG zNAsQFe|ScDdf5<|&oVsys16+>X)bJWb*dqy;IYrIStk9 zINh7}bYHZ-G!u$~-GTYguPol&P4!WkT*5`TxYeEwY+0f$Q-4k<9q;Si_IS)nrl#WZ znFp7pG1^s8rl9{qunb-~3Uk*M96$B?I#;8$e{1yuVp_2@x-YHUIeZlqXhW_?8`-5x zSU#j)6Ha3cV3K-kO*Q3SNKU&LW_t4nOjhaR8#buJzN}rl(`f4Y6*#*_?1o}_?D$SGW>$QvgS@p`7 zf4nzET>s>h-hDex&THZSBJ97GUDdX2QSe{#ErW~zCU|N7d-0`em0gkRamb zBub6@S8WHD_T*umr>~;|=wuu_BPZ(ue;wY3*8-GNzSi4^@VvWChPFPItyUJNCu9Qe z7^EbyguI`n=cij_Lb-)7^o;KnxZ{p@b=#}e$*dIW-6Fd8JmS3jk_Y^m()K&YHJ+lr zvx5EYzN3c&(7H#7m>H27;YQ%4UC@NzkGkg1%{KaIyNA1h#s7mGt%(KS_A+SOe}Dvz zkTg1&@O0;9E6NuAkkTe-N9S}hyHET8dSu>=ZFcBo;#kl?0TP5@XwN6qUZFHQ8CRtv z;ox$Q%l-0T%odhB4EY_u6XP2r)Ge3bypzg0%bTYf?YI z!tne~ripRR#{2LfOZ{b(av)Jsf9bjQ=$?i5(61>|Zg*ZOZw~ne(7YW}bn^2)5l!#1 z`V6ef(|3Q6{5M`}_S6_1?B~OX$cHC@}KPe=x{*YSTPF z3zHMii#`a^7#_M_Q2eqmt+8U`;v3rx!FcXdH^zF3Kbx^>3_SUA=9#PUF^<$@iTZuH zI2@r}12qS2HZQ!lZm@N^6`w-kvbJn?N;f^N^x% z%3ML+cpO~+)Ra3be6(O|f8Uf%E-fLUuHuX4-mjG*tca(h#7^|FH|azG zLqNR0=-jvV))4*qQ#tIrhHo@YiWt%k*aAU zS-+h~Ptj#fg@y@8(tqy74OqHpc%UJZZgp9{yT>PyMPF^>TtJ{Y9Llw@l~M6)?WC^{ z(;z-v06xbCY5|)0}lVSI@Q*LTte2y3mH{zw|c6 zo$4H~)gy40^K_24UhBo7IrtQ+HTl!FXk*?f-B*VLBR}fQtAD;2DiWBtFlIRM`8Vkw zV<(@P{Z^&AfMwbQ+A@r_ueRy>k(Tv(4>Tc<&wfwC$f~(I#pOK1-k9>!P2~C3KIxkR zGArzv<{l6~I~|5tXmqY+hF9439MsfW2cFi2Z0bI7qYSat4HpIwujiL;n|4{tq9c-) z&abq(cD`!StABEG5RaH%A-|tPC2ER-k46&>n=%0^h=JrHr_ObL{{wGo)dye z?w7pm$P6;N>jjO|E0H`gGrccurHA~9mcFFX+l57=v)ZE(!}7}ADw;3!i{Q!GO!Ve{ z<{npT3cNP(0mY9<_qd z!{Bb()XV#Fb&EZfo!7GW3UdP{*?7JirxVU+N`7fy=r-P>-M!a~{&?1f>3qtIPtO?HY3U6e8Js7Z zmQ6$kS3tv#bf2`u?i2B%7P2e7p>;OpA?i&JKnEr8Q~ENu-Z`7plwZd|MN4XED2*PT zkr?-%W4|nlcP+nmb=OpXzqo9kjzXfuhjE>}(|^hN!ro3PHXj&INOm zr6X}k`D)8-x)zFmRQ^#(oW`U>YuobIZnOHr%_2b6Gas!2I|Jb-u1uDht9(O|Z>W3M zJEHNJfm+|gmLTL|d2TktGG6bmJiczJBpXk6jwXuvsJz`h$1h04#N8kz1M@SGyP=PE z9DlM6W2U4|KRi9CGtRwT^M*AqX(>wYhuNO4`SQ65?cO?|RU(I3LhXIld=RIakfc%; zAM|*U=V{DFgQ4T>_!zHo=I!eBA>L{28IJ2?c+%i`ey~oVJQQq?<7CN(L*SA;uSeTs zd*}uS$vC}oUy5(XYJgyVX<7N!-y?raZhzuH(#+88nEUEeM{D1`+08C0wqKrpT6ZQ^g9Htchn53tlb%%)zSml_@wV_M>po;2y2|8DhwN48WT-lkcG}hZ zRKoe0hHY)^=bT+g&U&c+=~W(dXzR4FzdKdl)f;XCeQyRMVK1Gc`LvOL;p)5YZ6qj$lC zvdcno|G0nO`MDS}(IdCyp>Ej?Sbw`o->=s5-MGs2*1wJG+j_&ht?~*EPe|_{6GxUv ze@^>bU6~i}P!pFABydy2k%l?zw`8{w&rHKTEN9{(Fr9IRN7FldeT{8){`BEpucjGie{pw;|5YC|F z44&3CSNCQY-;MsVJC^LoPR3DUqT$fK*x;KSK^Wbg)axS3_s3E^=I`~)=l;FhOPIh6 z7c8g3>ffyU%s%IO>^~NrX@8rY6NPfu4gdbo6(eUQiOd2UzT5AyR-dxZ|kg~zx= z>>-ZO;o9xC@AY|f>V9iakE6`osQsqXV!@>Hd-K{o`~5i67`pDgk$<~!N+z)7sYZ9B z?_T=Z6)25~-ZgbwGX2h;*{MApr+{L?of|+Cwq6eP4MX% zWw)LRRggY0G-F-QxPQ~`=RTZ2QK4AdIM(N>%38&5&8^Zw?^P?Ym973I(B2-OU^G{5k>~7fh^UG*%BGWB(+qszi>>>_)k>>pklBGC+U!bZA@Ot(Q3Xz|x z+CzE`ntI)aNKVg)J-4e(W#AReW+&d?^??|3(}doR2LgTAxqsAQe=-v53z$*zt#5C0 z3PnL+j$<;4xq9iQ@q*c`TJrK%o8r>`kgd1MV={<0L(Z5Xk z^)%+dDPaXlhkw2iJh7J{+<$bd(`4fU`n&R2rnjQ1O+wZW+w_zwI1p|SgZ+urkuK)y zyMl8Qd=51+WJ;aV}+9B^xt~=WV{`k(#_c zi0;Pz8hnWeNXNm{6r>1TO|%R^8ZAJgm9Irogdfx{Vdc>`l0N2?rjTEEZNEhu3`EMVZxwu@38=a& zHO1e}Cc2m7LvNnuxG!^o{Ji=?RGROoi{m72wto$XY7h2lyfRuKLYyGJNu9(XikO_u z6J^^^Bsg?$^{fvSP0i0uiTSVPYhp5(4wHPToB0flr&Idp(5pW#j=$v*mNTZ}BsWh- zrRljo4ZcwEeNkfZ9E;-dFl6$1Lrhm`<1kr|UI^~a2IfdqwtaG58)gmZ!Mf1e(@Ye) z7=It`(%{fU<;mjPKqm{SXVW%Vuowbg@p~xk(8*IqjJmJy)IL@cB-!_m;H}b{UPKPF z@Vmqy{KPsmI`u>Epy003aDYj`N{^DO?O*4asi0zA-Mcj+R)knz7+;-3Mi#{BveR$L^+JCeS zto8%6L~5Swg|hX!Iqq%m)u)eD_(~D8H@;sgblr4rO<(a`DRkP8aWW4&-EGm=ie}ZF zPbGh<>D73?Pe}*&{PSp;2i2eruX~U^b!yT_rn{HBlBne9W_w)$3W0VDm$!`z(8K9y z)>8gDW*0B{?hQg16?MLu=@KhGrhlf;ZDkMKWo$l#97_t(J_zHa>u9stuw9Yx%B8$} z497PR(XV*Ny1rbF{&*r|`*bNL*_8a_>3(*OyYI$*K~2&U^ruO$;IzIMCkd(misE+lt271l}@eS&O3jY`}{>;w=V-xH-2rGc$%%;#USHEry^x&Z#qhCb$^SYA+;@L zfl_7z(AjxK!zK#X_ZH0ObW&gGQN0lf8M*!8>P%ykz{!-pcvJ`dv-M!nr_F)5=x;$}&gsqh{=SJ-@$y-~-YNDq(I_lExHp;lb)&Q198f-q^eEz&%=4c;Xd$-VM3Q#h-t4qTiPdm6=IFH)i#Yqa9 zdR`xz%wFF1U^pDLaV=qK^kEZjY$DA9SuL)Q!RTHCPKHM-^+HQiA2<5o%!Qec5`8d% z>ES&kwiKN#-G9A;grd$IULx_e&ex~q*!7uPo#DuK7krb~1{gz2yd}w=JgVZ7y=m!F zB2(QzHlI}|g*8M+Wbf{*civR@4t_OXlR3c=!4)6g5$dddk218=liW2o!jh_avShAU zgPz?#h9G7>;{`0gPE2<8N?v2k6-jsVoOt{CcpPw!hJO-8vXQfU*JC`0WgZ4$5;gqtzDH$BOHruGodn?eLMFJG z2kuN>+JD2-H6|N+^tbCPE%&@c)KBkC3@|`C)79?z|DHQBXH#PZh!i$8>PsiiSZU+e|ScE%zpC{J-u8m zj7EQ7@^#F7IS*nM6r4~l=Xa#Db?v`v?$hk|Lx|&IkGVSmMHa_!YLUCx;Q3j z%oXwU5^}F9KNCYrTG1W8b#i~f1{5ePYPnsrTkHawIdV1WBrAImx8iw!tJFtDr^|D3 zuYYv=g<^AX>*8Q{y2#kJXu)*v^Z6NXsoRtDpU2MQ^Vg7FYw`@;RH484BmspF-W-4w7M20dShEpV1;4wmse|SEGy$c)kkAx1 zJh(sbT}ZLt6J@6U1Edc&^hZpn5RtRjZhu(ACk+&TydNqpEdseVpPk;#w`|)}hbHC; zGgs4GxjxiF*kq{9{de*?e_Jg$xA02k^Ku)e9Ju)@>x}Nx+A#!gzo<7oZ1jn%-|;R_ zHd4-L+r!(I<5zsj?KFy63;6LHcU(jLknNs8%-|T<-+jlzP7A;7Izr=oG8B~{tbfnx zXcKD_J-j{wn5Ay2<6J$RE}I)5+2N9N8MVeT$nN#=);o9eLo`CsEjS-(-GiVyJw7O+ zgR+yo&tIW44XsCB$1t!T<3rkC+4Z;(qD73b1Go}KZ1*>Ffsk~YKh~c4_Gaq7sN3-n z4=zGw{=)8p<;vyGGT>P?a?)jbw0}~*KF^QY2yZS`wpJ}0FugRK*O)(|dtt)}%6eo} zLyMbR+U*GAFcwi@oU-WJ4o-h@HE!P(<)x>*^I)^@{gVi<&>V*L8%e+R_t0d;<^>W6 zDe=uCXnpeA2?vAq%X1-WfQb$eG8du4tZ2IqX3Ix~BHELs+!pN#)K{&)V1JJK$RSyl zy?`g|46Bj#hdY0#(r8b#ICzGFZo2CQ3ZQ<<86=?Kee@|wgNbLOMPYN@NbPP)Yq|)N zh9RK1<*EsK*1~spP*cb={8R_SIF4-{dg`?z>QHX0MA$7Pu2zbgoLriwetadlRG0OA z*i_MN4-;~)fp0(20j4wUCx6yGebzUKJi!BP5j8s&p+!F7AJ!28R6&_3y(D1msP8G& zuixAbsP{J9K+gESfBBosc_*E^LLe)1R}d{B!MpQW?S*o4Y`EUJ$UI?>mHZ;}nl+Wd zm|45M9&e|Ml8w|Pk3VTwlOJhkY0uC_3~g4!I5%(mc7H=L+8nIvw(rLW z(MTKWj;Lut z{{Q^%WU&9v1#|zy1(SX}$A7qB9`R?g@PEewvx6^$^TA4lA^>QLKc1riAu%+;?$-Mf z|F~o@q5KJH{=_OAAb)t2-SM)$!u1ryV(;%o{)ll=bt2WPe$UkWCx+Vn6fuN9@zkAp zxiMU+=5_XG_wW-n{f~n1AEVN_ZkGM^_gDBa@S+5sJZLEf1bm}>xI&cir^WiG2o0Yf z{BLu4apESb@o(U`iQezue>&3myQ6ZD^h{KmDt__+(BlLT?tgbN1m^R)s@U4;#)4@T z?DPH=FUE@=POnRu-|4yl(-Qgye{b?f?}Q4dF&~`tuDn62<&E(7dMd>KVP~t_Vbv{j zy0v~HT%W&-@BH3M72>U6E?eP?SdQ2D{=F3Fc6b2<&FZ8U)`KqQC{w8-${Fqfo{t z2J90U>k{=HWp8(`TQI=fY+&sE5nj@s5!g_EqK|Pq_}h5rZQk}2;ut%RY!xOLQhRU9 zPsKySDhKlV%HKygJ6ILxbOm@GpzO%6d#ZYm;+x*4*#7`nOq{qqv1em^<@e@Jc0>XD zccwguZGVn-DhxOdvb`Hn$E{~bBgNY`T_9b$VUICnN~>oF&`rC3f;+N@QMfDNhKDf~ z{x041Y;|`Z0J!|2Pi@m*&@H5&gx2cIJ}i5`I9V|b3g58aCsZ%$$2E_33D+vPXdI6| z3f@}o`di1s=bupe9<#4nAtU1M!<1e*4--tSARFB{$*b|lFXx+jLjTXII%pXyMOyQ#yjL9 zr(crCRZqfQiOX_>#PQ2~edqSX_l-)eeM}cmPi-&s@|{$8TV&G~KvhOf)MoYCeJu>> zqi($&VC)YQ^~j3Wsk|!!afQ?VfDR7rQ?`w7eAkpoG1q73c`r|W;c-VB$eWS* zI;ZT!TOpe{?)zS!&!^2T$Hb$fJ{%d|xAE*}h8AIO+b1-U?hmRet$qzx(l{FV$cM~% zybUm*5x}}uDx5{NwR)c$A&52c5`X<@?}lST%gGkL+w`a2QrhWZ)vf!327FvaGj~*S zR1M@3evT`P^WDeT+cje$w@Q0zi{JUl|D4x>oWR??snsN{L)yBT=G2zPF z@ni|N;aW}WcX=1a&hzA8Y7gIAg^5uLunQyfiv4n=e%S+^y@LtDi@0viVSj~wm(bsA zuh5wC8g^sf10nzRvh#_yJcotoAw5EAJCQce@8P;h{nsn$?{OW4+kRaQ>wVsD*=v(< z@J~h|`8YnQbF&g0*j)a7KtD4uPh^F*%1d1DZ8Y1i#f%YiXH-e;59}U;3oUQAdCbgT z|2$=32;@L<`jEE={Kxh)UVr~9fOcL91$`QO{rd3|6xnGr<6O(O^zB~4)xFdt>u@ic z^^soSWOY7!n-HnI^IcxU3$A++$Z#a7WGuFHee5Zrbcu5Smy25x1d2xz)$1V>woc0rq&7Vqlz(2E;_!T=b`Z8< ze<+D){ahdZDJcY(gsc_aLmQ`$to!WG>9~pK!sdYHGxTEbbyu9B&xHNl`^(wBkl)+| z>RXw7uWZPV;beyRW|VW~K;M!0_L@(^02Y3?QoXr?R1mb0Za*94WB~R7-Ej^)w}%Ob zKkH$iPWgVHY%%+HJa@@0S;2KK%w(y+m(_UYzm3|!mmHL%_%>vpv-!bnQ($E~Gi zu!a`L`0#91?tg4v8*98TIAi?g3q@Pd^o-M(szzrt>EU~h!W40%*y~nW&{b$XGp$HB z`#1Q%fUYita?Cpy$PJ35RqKWsL6gmd8tu#TD);QCxPD_g-?*|aK^_G9#RVW9_fyLm zrAvz@;N_L6nhQsSQj3=aFE6CQs?qJ+hS$ZrO@D^ZYkw4$80!a#N2CcC^;O?K=2&Hq zW|N`I;#9j`X>i2MMq*PI@ZHe3RTGD?bjsug8?R?w9}0qaV(gOM)P0x~`#lA1b??do ziM`9M9T_L^E18?`2{rJ(h6pwsfD$WF75h65y^s2OP{Z9h!bVgY@}^by9{UEyN* zHK8pho`3Z8{I2LdF?dBW!8KQ+_xjcTuG6&_AnU5-QgE`MQ1K#P+X-F{hs_vZK5IVg z4=c$oI3P(hil_nJNfSyY-Kg=Do;D~W=dlOaMxB|c-bddj8g2Uiuojt7IMSij)|6B( z?4XDH;Tr|QkBtAAk#Y-b%K~u_j8k*+UhI8Z?0(VslR>dx}3Y+ zo(`w!1j4EcV(q8hFwOm5ZAw~yqB=3kTz_GwJ0_5;h)ipbwoVJ|*Rs6~EG1hYA_gdD zUlQ#~C-h8>rgY@VqonG~RY5+*;~51))@VHm{bU+r0eeBdHe-qY~F zZOL0`B`zBWCn+L#UFD}IAC5(h6K2(*UD876b0wkaa6=J9wk|v864kJDi8^Mu%zxV1 zofd^{B56B5&!%q&IQ04@JKrczfnUeFH_6xqlNPA6%D%FFA+&F)Kiw$yAQ-Jn&QDe! zU+t^jBp0fimc_Lb_H~V3FFHP-*=X#Fuk_%Vhw*Xg39U2cxH+$16+rS`3m?4(<8eFY zQjd_vSAJ@S@R28kyw>J-7f)3PV1H6NeUzc&Qlk3ur+Hm#)a3Ufa3Fx9F0LYdMSBRr zb$6c2#w^y3xtfxtj96$?2NA$ac$`1n&AL`7k+JS>dAV7JkfopqVkjQ zQf1b<8Rv%JhF;p*b@V(pEY#fOmt6X-o93uHc2>hY-O{zD1a*h~;rABV%wT`5HyLhIiyc91 z7AA+=5@vG#R?-7N#Oyho#D76b)gt23ju_828jwts&-D^ z!!5Mxvuhg?FCZIz0OU;X^gy$9={_7xPan`W-&2?YV!5N6G%xB;i30bbEq|pNNZ&mypxXA^f1CYH zy?;IhLf&H0=UQKl%bcN_U-W(?u2CVZhp*CnERw&AuyEt@J(oz&=NPy;t6v74{_y!H zx1YusN%p;9Pe8GknOd1L#%_+@hiTwBw3bo?r3diT-Gmqg$qE+1=bBBGZ^Lk!d7a&Lpv z?gZM4oNk67?-bA*k>~y6pMkz5%>rIJHW5=$LF8D6IqR zMbF$AVSm_j7^678*_M@PYa|!PEliB_^p%PqQX$!!zoIRCT(Yh@m&@(56r%Ti(Rti- z`~73tomj}Lp@f+DK5Ud7L_C)1PJ74lS-cp({(g;}>@wl?*$;pbte4yU7*~?WX!nl{ z^S|{47nyX^T&HS*y&#Q=Up1{7aBEPcQ^Kov8718Wzn#`XkN5^r`Ly5P$jv4Pt@m;Ns@%{0FDr0X#w(@Yw|9PgkWUuQr*}o;L{d6en$ly2AD{Fx(Yf1;am7o4?FD=z0E z*~US-x1Ktw<(H)l$VOR`GPC5H9Uq@xAl}5yWOc?uY=^ChHpHB*Znaq@*;xdFQEtaCdf>p!12Cqw6sn_p4H-ENy5%nnKTLh} z5E&vtYsGxO7gni%K1Qo}#qNdA5vN|CmNiyCdbJeQhAm zKhgJ#-)#57OFZQCM|o`2_)J2!Kzo7+50%3-3g09_?-=+>gH!KCFQXz=a9=-ktTWsJ zX7FS%xeE(leEV~PGH47{cZn7DHGksL9hi_iE4n*A&c4J`q3@H6XJ3VrwH6yYrH5e% zXFXGct(jh-3egN6GwmhIk4}4J$!9+bn0M*VdxPA(`!GpZ)e_9|yGhim7_>ZsBaB*qEMz8lFXsl6)>vqIP?Jrp9YJD}VHf%OdW; zWUDtC%E!#++k+E)TD$E>3iasiruWw^62|j(o2G33adv47MY8sCbmZIN``kR&$Si8e z5$$6purYdkt=DaM`!rM?<&v9n@3`TW(<@18CP4q&8lbb1+*jT281F#)ybUM*FG1)$ zBZF0y1GgH-+qtGck>j>@*?+wVj#9|fP*2HeN|$3hug>vw^!>B%Pw&YWbm@T6(0#{2 zQjfoaoB>l%ND*iZP`fHBf8U_S(g%&}pmJ{N$*Y%|({qPX!D*%=Tu(hpw1O+9X6k>6W%>fJGHS$YnT>?Z`(ghOt{P8G@5)) z_sw(JPxr1>4w=~_ntvatnfsIg?I;UryrW&dIg+re|NQ75Ky+<2OsQ_$by|d)WmwGj z=LwFrb9DFk!9|7->LXyOJ)z6DRYx~qcvC?;gan#jEGmU+shGq1{}%C%Ka$2T&cVcq*Go z1ZYm;)iE51bkSgOu^$rc#b#U2q#tMf3ivCGKHt6I` zH!8dp=$=iCy~xHqz@wI0wzo%Iq@{57xI>tO2NTvjSP(}M)=DxDN=F4`&PRHKivuRL zC&&n|Z+&|_?!@fUds5`}A+6=Kx7&`^TguPWyO0)Tv0kU5{EnM)D=>f^y@EY;uI#42 z+=RDu7k?&tdRWCSm5W0h9{tQc7VXtwEqowXG5yE;XJX1v3Bi6=1=ct|WDg-_u&Z#SYVBS({OMku5a@W&4jP&R9>gT%DrBkKj zBn^+<*}Ld0QyRPpOk%zkO*-v^GS_*(KBWGm zM0#nIS0C9>wB846>k!+stLYzS&maTJOJ&PrdNDmTdY8UHb>y>lfeo)acQY#~>}~5} zZGX5b1o{j(uO1E3qf0_T&xq9?Bfm`rr;FJ$I1~0!xHRA>YkJtN|&2>T=TzEEL zjjv^i^(f}o8-5zelbfefg;|sM>%D%4wCZT)56>kM>T7M#cTXZ`c9?j zPu0Bhi5%qo9Nndo&f3Gqx@gP82c%%EyRRbMKv_BYpYT4q@eq>&bM_89G&iq{G=Cgx zq{_S3>aJ2fNINv_p;OpWVaB9~ZdOnEk?E%XdS}qHVUrU+L{vY_`q`mjZY10C^O{}D zB~P!w2kJA4Rr<5mB43#42)B?`9Q5l8EMZHdRqgia!gOxb+$YN|&hXffQ*z((k7=)A zJr!>*9l%W0&CQ9JtLTBV_0W*rJbwa$wLe=~`GNsHD!y{}U_MAJ4?8%X$ds5j$LI45 zr}4^oGsas32T_zHM#;C&aUz>=i-xbmjL>k=N_R&R7P<}_mN+&p&^5DtM1PtPU%S8+ zr!6sWfaw|?ZmJ%3_-`Y?rYf+A5;cT`Fbag-T_O^u5H76v1a$|$PH~O(i0NVc~pLke>z+jh^JHg4A ziF37-;gEj!SZP1A@23Ytb;CBG3@{d-IYwZGSg!*YKE-`cnNC|gAwA(IB%qtIZy6oi^A>BaDQpl2kCD2alyrS zk1sYoQp-g8f{!+yeH$N8E}*;?84pj(-u~W&DjT!xgBaG`Vlq5%Km8_m9`MB-xse4* z>9TedI5R0l`pq+Y(K_ ze$n#P(Guj|kEnEpoPq;fbbKp0(<1WsXQjFA2V@30pay7YD*G*O+j0X* zo^CsQ-o`i{^ZV9b?@%*vqvRPyb4HpsjE)ZyPm4uq>v|9U9LrkS{&mv6si*b*p?=E~ z?-K$I390v{JJ>6~7Wp0~JK5fPS1}bcY#F^LhhTV3Ab*C9JxFiis|F|6Kg?gj(n99~ zLa}&!p`R%1FI_^FFR->ZuwZ?Ae zuu0A&a(|~y7l}3?dE1>j%trbbrHpNPu={6Vyj$=OLH6CgU7zCUGR(eS&iNlge?Fb# zIJ7eTN6RRg_kPO|Yu4)?!WDR0lrb}Rt*x)@*Xh7G*>km-zJVui!WUqCZk0j;5Wqe5 zkoPxw*!Hr*j&gsrp};J6pU*c{MY(^*+%`JN=oVgb2T!+J$L3NkNv5sY8wCArmLjfjISOfM}pTh-hulk<$ z&wof~xgdxSfWd~|db^Y`(h9%aT zQh2MTL`{`v?ORZ-u+e*eBiU8twinMA*}?C@;@!8@+k;B%i^|a=cv#lf)epSMG1mgp zTS)t_Vui*rEKmuYH-9-j!Q?*RV_r0L+Kn@Pyg+u+tjBW8`SNSr zqhRA-*7gGfeJ>N{{O3U5OgTz)p zC&G#}4b;^OIg{J5w`P&7d7>h|ogF>RVn#pXd*m6Ygp=<&OJ1K~F+@7sR(f`YFn@Vd z%;Go74i|_8_N<||&+r)VGEp7AuK^OXX&ttpX?09zgxKbM0EEm$ICFBB$d{=Hh#1J6P^hU8Kj1ZQ z#_OJ8dII7SK~C;Q$OWE|6CB)Nf6VJ0kSeg?EniD~J$^6DI08}XJcPpTn#!pgk8dbriEgDh=l=Lu`; za!R>lOu5-|o#;TAI5KGO>wf~ebHNxM2bOHFcF`GkrgyD9u zlac1r9_2MBK+G)kAi+s*PoE9>ILGTWf6Al&d@Q$~axtEzIU;4B>VG`E{2@4c!;1sQ zZg;ywF#P!ZJVrfi9D_T$R%msgk-G3$6a-wIn#j9-&(n`y)G>_JyvuGlK+0E<^DJ-xfw+ezt>Hfp^-5^>@uJ7i?=l?M?X*Cu zgSZvNrtBR%JrMWz=ku`=>3TIDi^jEYS_z>3(_32nE?&EAFhAe#q26paHw6-dR2re_ zmToy2#L4WyMHBHrRKD`8iK5)UhqI!jcN3guH9aZY6snX{V}COfyC=z&byD>Tfu9xw zFSc4J-_f6q%^n6p1}UL^u$TNePYs$*UL>5M%KTh*)oX#mn+#BxYAjvY*kkG>3UOW) zl;PoehBR3ie?fZkFQaJ-?4{}tR)0?yp2|G)7Ey+050Nq^l)A>g9-PZB^$x#vbve`V z&0#^%@#xJylw~%1JGC^`W>+yfj6sHm=i*`^`Wr$lyWQBe2yzSJQ8 ztJV0!*3coaA%GqcjRVGdecIF9DjeV zg?cYQ0#!m*a z_wi;)MQwq)Z)=w$bx)RUrvhr+o`3ofbC<<1`g1vg?Z^UxqMvd?G>@&>7guiJk$l{l zSM6;-J0F*n*^Y|lsA~n%iV;cbIVQXkO$Jf8Vo+In%?kqJY+2vE`&>7DVscgQp1@jI zmhoUdZbs8@!$OU<75bsp{@{XxyR5=O2j#;%(f4$fworjOBxyQyl~eWCc7IOX$5a+& zk$gRNOMXSqk9*94rN)R+P1O$|_ZWB$Fc5dw$MI!TYf@qP{&*Hf zBuK>X{bKnoIm1N&(u4GY=}gDV^}bt}y$12pz3|=;m&1mq@jq^(;bsuJC2u6BW(QD+)L=_3DV50FA^lBbf!#rtgn$@!SVB2-QZ zf6tny1pq9?QO3eJ8-K8oneS=S>P1d)-zP9y)HOJFTVez6XkBNB>pL_xSg1=qPsyj6 zHzR_XVNSL^XjDOMBkKw41=0pp>O7+%&ydsQL1iRCMPy>cZ508O00olhzNcz1^bgWF zoHTzPcIIC9=<$55W6Fl<5A7)rMty0BZ@N;tDW=p3yN;iL;D6`67M)>ya4FutzAp{R zSB7JLSR(6neY(%-n2232y}N;$H@Cnng5Cj)-vHQG+&fu{6Cbkf1wSCAcepDKsZ-%BR*0yyfY7yrERc7?(6OUkQuOWlWK z80A%{Hg=z?XW^b}y?bbPC==&>(&|K27lvvM7S%=j1~23I^lqmOTzX1G%P=8%dIoOt zBrC2mM}O9%_MQD+soOFeZr?k!dJ-sL&E0hU9vOHD%(0?8(S98}a^+L6k|1D>k^W%q znN320=LV5o2!}@3?4ln7E#uprlREqJqk)%Hrw{4u*jeE^Wq+EV*8W9o(9}4DW1Akb z16&PU>9{x?MUSy~v{!nV+S$?r z?|*vijS z=d<@xT%cuv#|$SJx$)&G7#R~@97!_mM}No+8oEcBk0bL=%ckcrx;scOTAIlrmCnMnfHQT6Q ztjj1}(GPo2%hkNms=l?oVEf1l&wsqno?q)@SM!s1)AqNoYk0!R;4T$$-ETo}J>1qd zhRXkk(Iw2-pd?72P0>>TPlxXebmn0co%cW0?;FQ$%HEq$Mr0&=976geQ7L2`BU?sx zj`KD{W=Y66q@j#MvNDcM)}di@>?7;oaE7zKegA^{m;1Wz$Njpl*Xzj}F*(kEhKoUE zPk#xYkvzMw_0#-$>DPC77Vr39GDh#qo64gJu6eWy9dPG@sVJ<>uwd@jfu5lv>mboY zH?j>13nZL8iub}k03Npa_~DA8lgmmMwvmU6Cc`O&tDdFVse<3wE=5)r%f6biJ*kkS zNeSt<*Bm6ljxXM$UwUz^do?!iuNih^`s03W`nQM2(Vw4RG}zn&D#-5ztTVthubYjxdIzf^*Dn%g@yEF-Ig4g%LB*K+A!n$!>LRoA@Y z>q+>2!qiMVV4$e;C*$kHlM+n|VrjEv2V^?OOLOnEb5DIF(=;yJZm- zf3J^d+@0#hrU-=Zgww)fOURUkP7C}nfE8`EA22KA0uthjt zdS-gP;MI04lKt1H+pDRax)$sAjK6TquH(`ve(Xx_w7(1Mk>QhDYM$1wwTmxQs14|q zBzie2=BG=uPJv=1c)FvG$-n8W*38AgQ4#UZ;ArhqGA29oC_wJPtCc)3Q4B}-l3 zD55sStf0?j8xG&gu_nh-9dlD^SRS#32nzker8P%!BE4+ntnf1041un7uRL&HD7^imUVysHD@1 z>R$|T9r&zS&u$(~hfO9sthn9c9^d zDU)R@0#TjoCr>-nem{6KzvwRpZocDM{4-QL1%CK*!f5ZwDnk@Vzo1j0B}`BAu(-;W}OF9`eVXSQVvFh=b27`U&;PN>oo--Ggs4tMlRRRnF+StQXE|&Eet_ z;a*Y#LBas{gw+&?@98%=4MSuobZJql^KA2b?bRY97n6XsX=~O-Czr<9w>mm}5n}QW zn$!Gl8N!Hb6bJ(HExgstTu6HlE?lV$01tFC!8(3 zGGEZpnLT8#4M%u1WSOn$DE#J@eUIQ5ZVn2iQ%bDA-|LpKASCEmCNo+oyd!Q3UJ-G+ z_sH9}cvLSa1+_QVyl=tvB$~t>Kpv0rb7*btm7wPyp|V zq0H^SQR2Q^z}nRO%{$d2Sq8TD0dlC#b4H}Ypd2!Hr_Me52y5`vkmX?JXD@@+S5MEY z#ztGey?fAc_*CcYa>Uq>euHuFt%J+F-f`n^!ODbEJDqg6UqOYy?zKi2-F%>99DT=Ht)-KRnuV}NxU z^Q*@(epDJ&r*9titeYmUbO@ytyz53t3e6I!t%U2FW-sOgu&>{Vy8?_lec4bD8+R=i!+(3N3|trXShH6itgHgq_Y!)}f{g19F_qN93zDVEDo6(8s%Q!d|c z)3VpSSV%}Qq|V>IE=Uuh0=?3ew3+n8Pr7X8t!{#}`*z(lzgF9NfEt%=;p(NysO+ zm2;hW>-U#|?ei=EAy7wAkAB^6`9!|_P_IR(Zi2QNHI72fATVG5bj`UOHgZyzZMYDO|e{TDnh<%)6Fo8yo{#R9H%DZT_QLj)@Y1M(q${xOKm*mpjaEdVc81Iss zjH-TR&-~&Z_`{>e$zSx>k6zn-X0MT!>nT4hbug%V%^>*@pyz7;%9f?n;|*Da13*wt z_xk`ThxeDIZ+uW=jKj0|-y|Scwb@Btr@6Bxe696vwWKnVDU<^30%A*_@l?OY%r^~*iatDt3 zdZWjS(5_$wOgb!MUUM`P@D7fQWCLiO3{n?--J-SsOnCPgBWAYp{s&z3UGuSIIU=dc z&hzQ@75TfnJG_f`E`1B{Xwy=8!N3|8_0_`nRQLF3+fNYu^XcGvZoEh1+T(XQsH8I3 zEvM0w*g0(@p)Y@n(grlGE>s)}q90{sru)*V%H%xE2S}z`T&fY1-8&Mhw{O08yl@c1 zNMgtR<>LMh?=$P~izE#6I_K)gjf)JPsISbF9fNicVW&*63+zHR=Q;Sa_Ml|rUVTdK z=d4Xy-QB~Mpis_w#F2aw`7zE3Ts~J9=qS5bGIb~Ki_1s5 zj~^eZvHkw%6V+X9syJBv!dMDIkZ+AZSnxx(8n?4Jo`&WSt+hztck$yHU=d~?H-b5e$C-sH@C@(z}s(o}=Ck%^` zycc`w@Pluy@E8-Ze@aY@J8JiylML9;-aTxwr-jv0vsh_+K#f&n8#%0VdX!<4;aV}! zu+~zr>ag^9@S?5*9_j5 zWFVZ`8RRolLa<_zBxhMSyn{$M6CyB7Q`eTn_ku&liP>kggP_j6*dy$mW ze9^us;&UVz6e8x(e!xV1cVTc9T0;5;JQKVsp_8TNTvlKeKvGP#;2%E{=HpV$^jDxO zi%8IKm{3!>^I&xZElMNeoq5aT*AQgH5Fqxrhvu0{lcZk7#5p9(?+9CRl-WF-(qLY+ zaqbHYm=MjRH9C;q#W^a=zdE=b=rgk#v9mfdNT}fUJWYJZU3mVYE34}R+QKf7<`}Fh z+R7n%v&d^1TdAZ_|LxpX9&5}n3Oj_nkuob>rjRtJu)Y|6sE_ajIogDgVkJ=w*SJGf zrtRIFp#i>wF^5sh{|Ho#t|4zOOvu!^2jldYu^1F|m+zi8El%1=d3r0jRy$CZv0M-< z{GVgA)25czmZl2}o`CFvU%YV5ijDnQ81Fvyax+1eoEjhd$+Z#HT(Q){EWuj=KUKF>Z1|8*s8Y4}*rnlMxz5WxtZCR4EH+&!W($bJ% z-4(KIth}w(BnsEZ>O=81r8`>hSSOV)|jwh@dc)>oK^cO_(qo2Jrs zoiuDH;#5pt*gOOEZ~n93VEGsI>>JI|5`bBDM`a6a2$I3na?09yBv_OAXl!HDOkqVT z{ie6)a=dgkNBrZlUb5&;{-1Q8+I ztviI>F~I`s5Jq#Wv!zrbN;jjm1^{A}NeaOPg@-i$Jmvo6OI1CU)PddF2ks|dxuh3T zzC|uwzEM(28Dt(wc^i!ok2%w zlPL!nL!n%0Sp<<}B-KtG$O14&G$FNz8+ql9NnwXZJ@g5d0jn2%PyIHfw>Zyitay%e zO5{v0cqFRM@#rU7zye-e>D{%8*X?CJe`koLThT_$myp0d{Gs0Rnzet`(M8#8h|-4j zTc8TtXSN|o&s4a*SSVZ@d6A!`z-{Az0UFOxG)Vhnpx1llmW(T%^klrSSIQ!Ms;q;C z(ICsyPSE8NfN5?3WlVOl>`&Uu4#44|typd01?-NJkz0qrAE> zK1qe+nccbcId*Up*N8{jkgUW{jCob{$aXIB-qO3{SnJOciyC_uki>J#6?e{n%`29r z0Mn#mygc8|o5mKi-`AHCjXtQD#y{G?yVOh7PrM9v%b`q<0-Iw%z$R>QOE`>5&9WpD z(Igspt@q3+cCHj(#raV`vEi*b+ff}jNXG7%<;q#(7^Y0@sreN)q1Zq2*fQ`n;ig-s zuU>+Z68l&}{%fWajrpSboJa;f=OIk1b0-dV)^JpHFD63*OqAy=%*&=o^loDX#kmiiib83Lm95rd5@lXtciqrnqDZXD|Lt9d) zX(6cHS#6*>M;S&c6L24oD+nv8RRd!}*B3(`bDfDEYz!-Wp_6=_8k_%Ia;@L}siWfG ztuD4sHYxtri^DkoQlRLTLHC8>pIQEFmCMYbmrR_#SQxQ7t;Ea5$5c!hw+5s?e0KQN zev3aFO>3LraE&LdqV)Ub@b=QQu3f-m9QY&cptgW0IfY@;K|KI?H`Z4$V>H(9bJ!?rtJj3Llm!`AHsyDPiPg1V~V>5TO_q(U=>%M!4=w>Lj zb@A7u?L1KMeIWcq3Th7{jIFTN>P)iM3s$cl3Ic8w?fh03@8rmt-fq}Ejb0AL6lT2xy zYJ9TsrPLU3379r*;UGtw@1m zSRQ{}rYKIa-y`R~lo`(seYZ(@tAtPw~^M4YYp}(e*e;- z9H{M=wtcqRm2&>YzqTyjy-tHY<3j%hrccy@5<3pT8pka`G(AS&#e8656$n@dW-wrv z1PmMh9_@`2rBRrg4ilbbYkRchDZzWQ$KuVS|5geJnNqLToby|gRNN#@&_?yODW@{& zKdrK__fPDu5wax@+igEQ=QYwU`LFg8u(T40@QwZPgwvlJ9QudfT#YaX0U@tfxo!#L z`pO_Ul{&eL@Ff@J->STNW<@Sad!GFFqkNsTx0@f%6W{A&k9JRf)&^N8klKR)bOiTi zlu0i1_>%+X14F|WDngUyL1+GDQoJwbSHK;dfw)0sqWV1iA*s4(O1j$q4L%gOh+lEg zv42@&GPcS1P2ZJUU2BSe=X#ImMd;Ihoihu$bCXdK*WnwGg1~0g#DI6>MIQOKKTJAL zTMpbA4-MGW1b579dh@V0hS~#d{om=<5Aejt{U}=GG6f%f+TKEtqwPF7c|~=}1Mue% z?J(~v*JwZ557BCBcY^G%Ok4!^FGF2&7u~wLS?lcuD^7KHl>JRTKT@diPVhQ{W zc!BYp?ew^RQH- zUeby~Qx^v%@PmA&6n};VykLF+iL%F0+m|76QpfY@ z59|f{rhDEL3<%`nIn|WW4|3lf#Qu&Cv4|9;7^6O4_s*=t0oCiyzhgP4HJ_S_1~9Cp z^W2KNuGK5Uy-12lAS#?q|2|tYgA4#*%d^g%?$)c41?UF4!^1}ty{mvS}jkVHAIw%H}UmLbX+WO8=bvk1h1V!<$Ip= zG0}PThZX^9vHQm)UqkOs!yGM=4MHkL7$+P#KKWn(2kEGIGz6_VVLf|7#c>drkA=#Lp`R3!di@TYhs@r!+Y3SQM9tm~}a|UL3eRgRb*;VN^mooW>>)#>*LM?zG zaq6F62!+*>!BdkcBH-Fmv*O;GuGR2zrj%e0j{TU?8N20kQayQa{R$+{g_&)|gh$x1 zy^{WNPJ+CFnrUmeo?V4OZK;OG*jHYPgA~g2%fF<@xV8Cj`f1>1*@LGc0U>HryvK^8 z9Xa+6u0bzbB?GBuw%SRqLl_c;p&6i(<3J{kv>`m{kDV@vI{l(c-v}6erq@w%P_12x z+`K|ARuB-44w4IQ?tfM^Fq&%Yr#WaJI*`sYLnys3l>SUCEj)ALE7<#a*v@AepECtD zQp*7m(?A2oEzx#1U6T>B$H+Mf7<~q&XgbOkY9#PujRdZj3TcNOubD&wMgewUgVAsD z;Y&Hp^JPq%=N(;VqfAcZ)2@XIXx=q{aqb-Y5%U_SWrE^e2hCgfhPP*h3mlM6mKtj! zlg*vhCw{WrasP@81KW)xw+DeHjgNx|C`&#YC;(5icOr|c(fsfIqM!})XhSh9Ic!en z;nF>GHp_>8@6GjR!qOyI2M%8=Oc zed7eDOS0vof_6Ag1zRtL@Da|0_ZG`#0tt8AC8^b^w1qfv0B>Cz!nV>SDZH^uTkoPQ z3{g(Cs1ruO-l#ZT5UR#>qyOJyF%AmZryi_6W)#$R;M_Nu2)wyn)1PH_a@zYEd9V_4O+*#NZTKu^4;f&ExPqkaE|Qmb_XB{3|st; z{T(co=$?1>sBsqzL{fBe2($*gw^4O~b;~U9!v!#x4SYn}kUxkO$B$bli!ZvXi>T|4 z07!M@qX0|`u`BFdp81Q{SDsvn4qP1j=so`Bt|uhS1mx*|Pzi2GA_k*GnukpwhsDyw z!G*5ac`$Vb9(v|;o8vlG_Xn3d8`m#U*T5eg4FU>z=r749!;9vbyiT0rzAmu(p*d=1 zFtFXC@v1=XLAte3xy#=BH`5CyL4NPG%KW!PG(g#*x>=XfP%04}UymdZ6gN2sxh-&z zIiuNgK=b*x>&=Gmx{EHSHFmOoVZ<$O@s##i_Ge5LpyD{DA+uJjVdur&n+#o=Xa42h zB%FuoRp>g?+Fh9%DSAnYA0EFY;^W0BWB}@_2Ay9S;x4AaXoFCSXa753?p<*EMF&i> zo_Ba-acXKjPavfOC8%OMWy}F%aTH0R6moFx`KJ7x*@3q6E_PbO_NI4qxb}u+pU-|q z*)l)nz<$@uU8Ctf=qs$B;X|{nc~s zz!@FYBuQLspF4&xtuZSxj;H>{s=?DHEP8Hcl#LOaB>GaE<_LFF<8Tz^%np8?{yAR2 z@?ufL=tcLuTiLmwuQGP24g#ZvK&b5M{&H3E4QNuv2#4s`H*KD^Sm|2AyvyzW;@Pip zLYalJE{$m^qFnHL%7Qd+WpDH^bllA$!b;4ErNl2`UPyYqKwcYyNaTdCFQN$7hOhuR z2@b;jD03L<3J_oR!3l0@`6Pn477AT%kMTz?%36Q<%50t}cw;d}@oej#;;Q3WEY1Jh zJp!_v`0@!eg`#p1HTP%SsD9dzeb(>09ibXEbo)`wysLYKi*dlC@=hPdtk+!gPSh>? zEiz?s8Bvy!DvQan?s1Kf>jsL3PIYv$yDvYHNYQ&!QLVhzV|xEBz@0xK)pSwnPS7Q{ zVEHe;nXT_vj^ zeJ(lOga+1in}>BFsat>Ry(sH&3((PHg6PU_%0C`}qut;=O9l@hNKM-!MeyE!RsGpf zJ2b}zz!mTe@%FakxC)E?Rpv(AhV|5vYXkmkINaIlAcCRW;#i+$&rV)MPZ={d2FLB= zJoF{yK-XbRqm+~WeY7nlO*;24NV9UDfzRD@;%gA+W4`5cXtLc4Ng<)fac`=h;=HLH zHiu6Tc|_Bz2kijJ9y}^|Y%xqzk9v-nfO3B>C~Tyy;oUHFjl|QJv<-H2^iW5O#p?}) zgN!!Xl6|y86fma@v+G*d`A>%5wb44Uq&=;d{N%zLui)2V2j!2>RbKE9=2K;uuPnh% zbh}m@|28*<6)Rr%vcx@yT(jg6S8W?mG;WtU_$U&5V-k?q);74{9Fk(1w400zmyuFb zkv4y^bnr4NP1Lvo3T}HGv6nMD9wj|DL89s%yiS)p!b4_2q?r2JIizk#J(4U5Bd=ow zOD(z1C3ZWNRfvwK^A82|^vH+Y4cR!e`t)#+pqTY1(1%C(O+mQQ z6CZ=50qibH@j-gb;W!8qM??&Ym0@l60-G~faG$mdK%oa1XcqIX=Y*D%)iQeHb^i(Z7j+pwsYLH2pb z9Yj$#ENaeB0UbKoy-d!RCX%Iu5pjc^y=Xo?*25;64nWaD(c% zp0>!3xaT9XivJ-4>UxxIk?T+T7^PN{*Y->$COxx@iCe-7hN|(!s+8!o`*O2jy$6-MgUOn}x+vqn{38&F9YN6ih}Jv8kO=$m z!`|TPtB=`Zo?T-S??J9Psjq)MOs$VHA%pFgwL>aAq;3n4s3vz^+-Oa-yazoK5;DKy z!$qC@w!V|G9e>q9!DrYGT#mF3`n5&@tq(o$D0VX}K}m-}my`%driwaY(>7C$)7lrw z_s$FqH5CDjOY(MC^hTho+7I5{O$oKZ3(s^bk)E}^l!gOx()gg*u_qkxyx0}F;#2DL z;}nYOEMHdz22J24!M~+ZMcRN_fe5!vgOh1z2(qgsbg0H#y898~iE?9qvCggB zw)q*IHP!(48^C@Y`-CjrwHBNBm4&6X&Bs!QmJ6yzeHQr)C{`m`FMH z#>fu+9M=$ZMd#+9aE?mB2LRmHkk;GXTf+vK*lt1-{Eki-RMP7F-gZu0Nj>aF2g6aK zUY*f^>hZetRI~BS-Dp|rfh-t#!|kgH^BHazqrL1gXM5-p>t=g0kn?jxB4_gI%BZM_ z=U&e0cPKshj^xeAD~K9JBwe5$bI!FY~q)c(@cF-IwUo& zee<9trmkI@xnwUfJPdp1culM;Fq0#B zFm5Eumr?^KXUIZNxqYp3_$7X;LH)}Ny35HgRtczRt~DJ8G(esLlkRNz@L(dKNWDxL zy|}njw0oG58$|JkgK>ObadvYl199m;?fTTi(z#GWB{?9WtNdcjYpfPdedJuzOAhnr zD`DxG{ibv!D@<~NTi1{hyC*uch|KQyIc%5-5(Uop(EtxR6rRQO%@D-zustELvx=Cp zVn^hcn)!?HT@L^9g?P`&-_w$FrHkdpE&o zCCvX)YX;Vl5=E5oDJ>)K%dDT90|=C_kIgdtMh<=s(XerAl8y{rLL~a6V(J;W3D4&? z|KTzDxP=E_^`B7WS}=8581uH~_hr=weuM8JyMg8j>z1pTcXAka+DQwGG0)z6+D;< z9Rk)Q*h8rUI;US*AWs|oNzR0U@4Qw^S1dn3U?vX|!+gNf{t<5uO*7+7YG2WjuPk~^ z9%L&zll{P%jlhe(d`F{oEOh+tB6@p%pHar#kmL*d{YI}r>&T>(0U2(qYK&}}Bf-3=IS{BRBm3{Y04?M2O72YXz zk3y0BtA5cf^Zi5)Z#HoJn81@KqJ|#URsC6edbyES0RQcKg!*Vn})SO6Vw?4tvg7w%6N$YR1MxlS-ZY3_KrA zbHJqWX?>Fe4QX1&qtTIM$ik6=&()zvNMqLXx9`*6wu<0SyEf4MXto5bG^^;@6q?LR zmN~H+RTce%>}|Cxy)f%A@4?Vkq0 zyN5|_oKJaFh4iBC49RlS>9SYe2M{euqtJ%;BLfG*21N= z%qilL9F5nKpbVk|2~)4^3`vk08{il2k1KMuI@Kk>2EbZd|CWNTJtfBa=HuD+gc8Iq zRWf@I_M=^kOW|{_4^U8jT2kkk5r$^4FL)o^lMcG*yT@AZ36KmIl-+<#%g6oR9o;(c zY|5%`cN%nmB9VCZX*lEcT)1LBn3(2pC$LY*YUe{i`4mV_YH08u10dnrC69;er+z?u z&fJ@oYSgj5-0+f$JS9ol?Q`g$JLG7EmfXi9Cr^|}uq=Yy_^1Y^#bmowPJ2O(W1rKN zQ+c2$wT?>G4*sV*kwE&X`{3M|-W{o?82)4TMIS5f0?DV>x%Siw8F|1f(IYcDDYnjm z7YRwEcdSB(PG&yUhmE|>fp3{NrX0NI>w7wa&Exum%;T7jgVV5XgX5<+=Y~QWC1Ts# zr2OiHiIT-{hlXH=JE--Db^dJ78LfN}hT)_FyJ{+71_UR&5Jyi>W zj}v3Yk}I!-{-&6mV0iR|_Vb46)%N1=y64@^M3yglJFm`H^qv>}ZPP89J#c~gzz@T( zzBW;Hapbwm*T>1;`uD%Rios662Lev-a~}=lHys&LrzU_MO+$i5civps1S>AIE{Zr9 zPiT9?xA+(HcpT_*H*+gI-(Qh$w_8_oFmcMe78+wEY_wMDEIK^)GP2%2Yn|E^oqyA6 z9DB5YFuB|tMs!q5ZHs%rC8Im`3x+FQ-whM*k@6_%5E-ud>Y({X;b!aWm3*G!fvpnD zQ5pJNQa~yq8uq(vXimo%+Y*Xnr!gS*qGx4XpOu>m#Q^lrJ#o9<<|-HhR&RLP|Wrgs4}9k0M^*QozA z(~5ZfILr?JPhDE~5kJ-J%B@JEu?Gt6Ne_;qLNJfMvQWR>Aso=qlcK{XM(R?1klzGoS*DgNR%elUR;2sL`dY;tZ_Ta7Uthl*BXg z1eCYy4w)N3;fk*xoa^{Oa#&2Av*gi(#~EA3AXMT~X>3;GV3G63@l_gjT;p-3aUR+O zVi$*l*(^xk6FSwie~1-)i?j&r;0*%AwrI{gEY5m5aiN<-=L&y|J1)m)0V;Yg}Y@9X4&KfzKW)_0D7sLH-#n%%bC4(0}48O$iW zkU_mjA9*NT+P)n3?MhA(V4w_{ZM2__fbXzC{PKBVm z&?umbMyAGg^6LN0%=}Rrx|Fti%2*T^yB7^jID5#gKH!}5f}E*7b|l-M2RRfSBVl-(Tpf@{(5sr#!9^73 zA#cPRwqP9gqbN$IA<}}n8Vl)i)ut^=9!Z|dEC>3J*qFX>;L5vie+?^BVbqEH+|!<< zIxe9@f)>>GkL_o8ZL5=jXk#T$`p;2Yx;*`U;4qqL1B*qm_WC$QE}n zeq(XX1~>0H!xsx4vc7!0T?(wS0kmMLome}egIaj!nHJ}4&{j{+z{*|z`s3f;Qa+Pf zi{r@YTyYf*jLjAa#O(w*m zQ{@rc9VNqKPW7@9y`2y}mbCr9dW}uQ?1p>Z?ex6^{)H}MDCa@@+2+?aRvVwY@SjA> zu1kp-DBEtjSyHZ8KfBs3uOo_ArDu+Qz4*4lkY&tl$ycz7X$>xS9A~g%U<}-@ssF`C z(;7|bS}b|9`N~ywCUpSXhJS3W9MP|IJ$912cF+{P5gzS_43cxN^9x~I!~>w zg%$!Yc4b0q*=cxk>H`jDRN)i)+ZTUdnk|y(Pdt^FYmDKDU>OS^)8cUa2O6R83iDTd zGBtHxt|%=?v3ttRQpv;a{Ec;B3y$x23G-)j}l#HrdeRWc}O!u(hJOoxKy`x&4-MY z$2eE|X2$TRy3!F}v*PRx2YZ5(+hpH1nJL|Tex8_4jEWYzn0I3fD`6oGh`CCe``)|F z*-HqkJ4vuh$lklEP2M5mrbOPSq?IS!dY8;8eBd|stjBM7wPoTydnowvSDA$5+FH%m zjNmyLg)E_HDM87Yb0`5%28eZ)$cY76>U0aW+ji*Ch4382U^o_G#S6rb1G*!N^q$*+g2N(KbwfVSdq~@h`mT(=JeFE>Z%s5%>s0mBufyH!-+mNAqdY? zZ(eSY=5KBf-1Dz`O%#(!3R_>bFFqEV4}HVc641e}1Iy5*BQXJ7FE>gj&!`zx<)fI# zO93fS_xHFztx}E*ZFI|wmq!oC<=30n?-J2rHMvg&-#rPO%GBuMk$!d9M)?&b0=~*; z{e=?aF`m-i>7hrqE~0(pxs?>#9dQpGdS4-`s&V3ct-f4Q=*#*<#V+{|Io9YS z)a#W4fjznFnW0(uLEZZk^;c%^vr5d`>k1Cl%TKH)*dKKj=7}gTE<&B-I^RS*c^D1t z0L^O&P|ps=)`jEr7kY?d5r^2QOrT7ClSq+tPW?-b*jsU(j#^GYdP=2+p)Oo5fefv_ zDBi^wrI?`3S``qav7-*~x=#0gh_fIg`34=p#~qgmEk+9@Kp$uv%pTKQs9dYn9NK$g z%SioJodq2@HN_2uA{(e2pO5$RqJ!Wjq?#L{2jC&Zx^Y7tBe2V*yS&zVWTg5BrZWo7 z`J}5#m9V_9l#Lkj5*|XK)|s=FK|@Ls!HA(I?m!xhIwZ31U^o0Gt9L&>yfCzI4KNI5 zE!8~i*u%t#4)*qOAL&SLjOpFOGBZ`*D#T1L zsmBI}f)A2<j?y4*sFa=@X^R6XOCV4@M% z6@pG`4MR34BD>;H>XI>{{?0q2%VX%6C)Wiz%0vUy=5xg(W+h6J+!(MwaRlkeA;P@c zb~5$TdZwQT_Y)9&QzFBsM~vO#6{aM)v7${)RD40Q!>~nY& zl%qV{0i}+K<1eE7rnY|Az)1V4Gm3fpe0vw+M>-$&45z2RSp9XDKCB#FYOrPU!iRV& zp;1=!#9fV+)yeGCjSSo4m3{l+HRw9Qb|ABE1`-E^EwS=v(jyH=t@_rh8qyZA&5+3}` zfEDo+oRee-7*Qyh$%l0%g6N!x{SKvZ)?`ZX&{lIq!{{otgk8(bFeyhPIpXiR`Mb&0 zZ$RTs2DfD*BusnUqr9T(@`3o~!507X2JH{oEN()!bgT{>lhW6cUu}{KEQD_5j*Apx_78V24 z#6_l=_YSgCZN#Z#pn5CviNV>Ac(t>Dl5V&{roo`lhmhjIpR`4A?<4xF{sd*fWgQXB z)ptN13iB$HHgg1%=D3AdqP`XCyzAsc7+>59VMYj`=HN5}25qz_Ie*gL<<{Xb!9Yab z%GD@>8^H}86jEbAgug3dRjFuoAdC^ZP+b!Ds#6;y-L>mxNJ=h-trxb}9-04+q1K+2 zs;Oq^T_e>t!i+rsqSuc&hni?3p>bV0WqnC5Fod`+V6gBAS-LOJ!+5A5u&PU%LL7iV zC>Bc0z^Ox+RL(Lw$%sXH92TcbXEh5#27_?J3P4x=K`Ff3pU z7!Q+Hr$DVOY0W)(?qCfB@NPFumFMXPUA3CfVjkne;Z8%iUT)Kx&)?SSEZfQh8qQWT0-h9|@TN3pJF zSSn(om~!X&Ph((?^YJ8#jn}m7#2~Vqi+RF4Qy~_2-)qw+djgTLeTFB`N#m#`He<#p zFq-!ESMwp7EI}1tv&JLlvQP0+4#7N2{lP%j`rAR`?5Em64;*s{^@1X{tsQepE6JcX zrhw38TRwPdi4_1R;q~MyrJr~wU|;U`5wVR}@e;Kah1_}6qTU1m{z@3 zKZDy>il|78TVg%Jl0%*zE=%AI%!UW2ZK~DVgj(;ZiCF5`3W#ZPML(kzQ_Un**I;xX zUN6pY*@boB42UM7+C#27n$lW}ZXOe{a{N5VqcHDts;QVG(;KCLw$SsyM?clhH`ODs zcUt2)zb)zn!h~Et8Dk9mntd5x<+i+xU)KCLj}<|Bi0| zen%DqA;JF(_q739``dlLacV%%aDK%AaiM@lV7_i_0}L%e`O9Xv^KpC!Z?KP03*y*d zdc^nV3`NKDof%nSjy6iwkhQ~eUKj`fWB4OJjWJWAOL5&CYtsx##DDW$=IeZ)2O;q9 zdn4M7`|6AYTp5Gwfma|Jk$1rN6cF?Iez@-E+YTkFO8(7#5Rt9sVgURdpf})U=kK_0 z_Pq{BJ-_XOegwPV{2lF!B75+r&esLM%lGN;b{(MI&;Cp=rNu!L(M;7v+o4Fs+PWF8N+uW{5kzoA%>JNT1*O&f3KaT#*a~;3! z`0Z!>?w`-cb0e}lKL_~Hk)Oj>wZ!kx?&uGsKIWzVr+t3>q z_VF|B{+q`cQV+OAIgk5tZtv+|u8Z^WeBIoxc%Sb$#QcQOL;H>MzW^QE^Zdp8%-5gC z-Tb?G+?GG@nay?eKmCG!{LXiE&I{r>4>NM>JMaJHeg5^ja53>b|LHjXz7O8}J8u7@ z=fi#R9^Y|}{(t|@k9z+1`(EfboR4_`QmgsApXUOOVy;{I8DG|Hk8u1uU-10+{+jE6 zF%O$xb^_xI#VqqU{dvxBD(rVZgL3cys+J$H=-jR|m>u>36vwame2n*R|6p~*^=JF{ zV=u8a{cAkqe%=UsGZ#|-PLUtLlg$5>2md#V)eP8~mwy7h6>!E_oxV%gR}>OM>dUYE zg(_mGQ}a@csR?`oqAD}adLByO_3qo1bG~|h^l$suU*140>N8(2PJ^L&Rtqs(>#$y}c?=R7KkM8MyU?Ash1Q_4a|#9-DyG0ipkFk+}B zvBARztQ06P@DMozN(soc00UkkC@FxGa6l3pwY$JW0jGi}ufPl?AWnMBJg|dgbYITH zZ4VU&M1ONK?OL!~7;APNJdYmpv8VReapcSFUw_A2K*apKN<*^xb>3uos2GzfsL2VE zCy9@^b1LRs^~{;=*}&|R70xAtFh<8v{25^SRL5Him7cL`bYDXfMtNL`3gjuZkD=UE;_6Fz)%+^2zcs5 zG-bkk3O;vsi1=xNMdtUnJnS^@dU+<@Vt)fx{`*iajrUtNio@-;7c*!7zKUjnf)UQR z#^j2pmxQ-K%`T_qOG0tOqkM#dbr`?mw9?!SFL9L%nqOOS)a}g8jqtTzue>2d4Qdir zr}Vj8T`$4?xxcU9`=nV1m%fmQq&-kqHVf5dfF#05u^06lig3ty?>kR z2#jj?GM?GxYR{zCSTH_vtS)8^^yjh4N$Ym&*P->S^>q(jr29i9_Xk-51Y>xc5MR7z z=F9XX;Xt~Jd$OpGFE(5)9$*&;2})lOrON{BXoWWrFMsb?X3HaH77ePCnX_1cj_=EDKa1XqpAbjAVND>YhuZ&XIbbe4Fbr^`*1CXKcxK)Sc zySs^R zo^Y_~G2L&^Pg15#J>kxsBKx{<_Pd($ggoR0 zmdip)p47tFPDs@>qtjgK>V4j=8Y@yl;dwpxY`&-o5D#oK7qsmw%2|~kF-*k)DypgS zzOFesD;wt0ym@I!M8YObru7=!5#D*R?%#2GB~yUmsW_H7e@QPs(|@8r(krjqRy)#) zjF*=mIq=lockv2TO!s(zLaJh1Hw#J-?C@Z@Y`Na7#pD{ScZ8JQl`*QAmwes(1}4>z3()%YW4l(!xyK=6*KSjc0Ue`sUkif)-$)sQv>=W#f(mk2UB$$~R+5 z3h&>Wvm1%>t$8$QSoG)2&8O0AFI?MH6H;`NWn3*tGA9oX+7{{Xqymv7v+F-I1@T~b}seB-q+NV zS~o>c1rNEGnag3z@f(jkgJ1kQHs;gLR$USz8ygEZcm23K(p_JXDj1gVi&m>XzUuME zVSC}vhx81j>3>$mih~5azz=Q1nFTpEEeH@UU|mkQZh#%_talPbu%ditdvKu|9ZoN|}7yzS+BgUw7JW8~e%?A}Xk$rT~9o zLHw!=Mp+kqKlp?Z=a_JYNJud_NaKHf{q+re7$3 z6|2?dVSfqYI=jiBIs+cI)BNjAF!&;Xyya2i8pm)b!!QB)9zP$VEpOM&JN0iq>(~S) z>B$)Sqv|VBt}v8m?Fu3}KWt9oq_)lMtyV0jWNS!BHj$U(!MNORnVz#^cvxR$VbmW^-PNvIdwO?LKV5a-ivDS%5Ei0jkGprgr6jrBElR}( zDa}TtxlNZI(qxy$9|?l2TASU}gBl6z3fzK~yQ%jZ8ho74!cO(e%Ann6_e681w@cIQ z?teL_s|$Wi*n59aZ-j9+Pf=GTfc{*o_oFe)*VT4vJve4J)+86H*NDNer&Yv{*gV?j#bDk=_bIDeFbQY%x#fFdAr>!fXnz*# z>^(MW;&k3>67O9jL}o1p5aT60)Fp_{rEFig>75iSTm)W##1-$5Hh- zS%C}0F(rnj0WC=51mJR4^DJ~1DwOW#iL}{n?ssPBiyhE|id4r2e7(RKc~%gOBmqq1 zwzDC!wqeS1VC;6o8?~ghxCfKcj(=ad91~B+dWctX9t;bA3gLWvT+k#cc%yIQP?fvT zGeZvEvBS`J$ENT0h<`WB8^LVy&fgM97uhhOlCz6tIXM@(gD^;KOUhmVl0DcafeknSRVg#(!_g5<1)1 z;zNPuedHIAPs`GG+6bDFsMmKLg%!lF%WwoEs=!?ySMInW+zS`@v3749HH%kdnF6*J zut&e|gNszWQa}}o1qxiZMRHGAwRu1Q_~4t5j)0llw`8#uP+`jiwPb9RM^L?k1T1^< zK{+U@RBjvVITS1iJe2|kaew?9*cVcDEJ}Tq+XAsvBDfe&m-5oLYf5zxFXD0S5p9x> z-dU`e=y5n~l2uvDFK@gx9V$68@3EEjH~ZXCpX06{7B8!Xe;w~w%mC0Ebhk6t9a6KO zCJl=3j!|h!t!jM`bPxh_)b&}|+tu4mO$Vz7PyYnIb-RwLwO3ykZ&?)U9@SCJEv!~gENu%le+^#szNt?) zBFR_3(N0Q+z}@vcNg=|s;3OsweK+iU{jLonkCb7%p!(I_Y=4*Luo+ZR+m#5h|hFGS}Qt?@VrAj35yO;<_pv`q z@`7$v=wMA_pYL^B_wFSK&q~$cftyj?dv%(~$79^SwxQjf9AcHca$$YDV&;hNYd|(S zA4p9_RgHG21hd~#>ZRVHx@5U}3;xl0o1{89ZpUAMOug>2GHuqGX<7HzBSC`X#ONNp zJ=^AsK7ZEX-n{Z%?WlV@qdIRB2hQeUfwm`noi@j9n!Hx8GixpCH`P7rqZgY`tDLe& zZz=6#57KpYQjNo&*2=5>k$wf|#JY_W-G1tEGu5S`NtI~T%MXn%{cSW;$t5LSU3y0=)|Midf2kJj}j zcfZJ%o+givNxnTlMx$5zt;1eOS-d=`M-$|S;$DBQr=2N#OH}M|G{^Ep{!li_#@KZc zO~`AJY&)WBf+W<1Lz8ar?rKh6!X)YJ{;^ufMjPnQ&fod_JbT2&5t_tL3Z)Q4S^BGJ zd4GluRZ8{i;q5%rHCOQun?5fu?&UFE!t5^UM{^Ti57WEP$?J0hJAv4P%SqzT)RTMp zR=GEKUZie7R#zN^YlSLt!CP`+T}g$dfZN|RtJVIx;*MKeyjegXz?*lrPg$kFb}@2_ zw;5~H4=Ru2b-4pOeGF`~aDXD)>roe<7JsH2rDU!-j{5ECe%STdm&k}$u4l8Wl@8Gh;Aq+&nFbT>zUke)4Q`z%G>*ZOGJ}VSu~u$yMJ;e zPqp)E!mVOrChl4N9P|>eE{Xg0<7)1s-}smdSHKYVm-_a~%2kW775LENu)ebg3DK3hj)bi{I`VmKhLL-$?@MS-`>V8*{JeB> z@!DZPiyzM(5BU0rcRLNu?X=O4)>1xJH|@UKfd|`@ zn)YOG1khuIst#^(|GX1X5;=VK&oi$?w@O%>w=UQUoUr#lZ_kSmUEvG{z zhNv@$_RMS-U<5XA{OZy}QFee(IeH@u^9Rh--J%a_@cmARkP;nt%Fg@P4a-x6|6Y z)nmUmyc&}KR%)F|h>0uv6E=7#x)IFt81ASEAs?uI>xPsZy1MsPw?2bdUXH5%Ez;39QV7lpRNnQA1;tGCbuG=D3(cQ%~4~sYrXy_4N6Y{PKL_m;HBPGc`V$Q4bt=_lKS@I9msqH&16x|~+ zcp|GoG$%h5!ZKQ{{f)PabE^<;D1kg|Ec)D;fosNHyMN7zGm6cvySuj8{or%h?B4Ed zBINqTNV3MgnPpBrStqf5ViR3NZ4%yAI?3USzHi?U5wN74MH+IL_YZBqG-RPyr!$4{ zvc9`n$7tM~Xhyii1F9)ZE~gi5yCIZ3r`qJkTt71ZT%4JQW)5!Q<&&u0(y)((VTYXJ zM7w=`(|dDUf?w-COH@+q2Z zyBxv}g9LhC=`z)tOE%b53#V|^NTqXprlc>OtJ7-h+?I<08X`$i7^~G0bj7#hs@W!| zX#XPB=ly=0PW&58n7s86A$d&km&TnUJvy(NB!BjG^|2>2WBb0*Aal{G;2i7ZVkJVz z+Slau>VEg!GJVmJix9$@HntISQg?6N**~k33;w(ps^sbY(M3q)Io^`f<+9xp?(!kW zpN+oW9ac-y&HJ%k&>y#72X;`n%0SDnWs|>7Q!w2#i>49M!Q#08fV*L^r+pKpp}qUO z&wp3=g$Mr>dZ2@nTYZIHT%$LlPErmizqZzEI>*#0%@q~eWoC=%OZ&lxyJ~VMM`83y zHjzrzxtK0OR1nJ~kZkMQTQb`FNy!c4@q~UNvKLvvuNS@s>cM!0uuy*PbqHQQOvk)8 zheg&yKe|r)7g3l{f;*?!C3Hl%%c%7Ol7IZqTlmZra~VY|3hFkIdQs`Uu{zh+=XSIs z&hMH-vP^Cl{k8-i`CPm~8e{h^5A}-V?S;-KSNO&Lcvvo0#sQQ|<0a4YWW0h}(;d6fo{TjpE{tZr@kUOH zi@53=C`fN@f>tF^jDhL&az8seIAy6gJul}XUf9M)TgS^_8%B4hw~ssa#dU74AD$EK z_|eHC*T=k1FN-Ue?Hm8KY`5k<5`S+Uh4{BXR_CR?er2XULT^9;_04%%Ulpq9fFWAJ z-@0mxrMbDK#N*oB^xcX_)awO#v1vbK&SM{F^kbRbdwub|U!KJnEk04gZ8oEIk}NeU zv~{MvxbcEoougDJn`pVM%FX)jh&+22jufv!*BQlgkIrqBWtWDNvAL|*)qizS)y2Yk ztuI_6owp4H?JRxDB=1@6Kh$;gd8aaS0mR+*)D2~nz25q^53yz8lIQe@A#Hdu ztf6byirZof=`QHS*d1T{)P9@_1i;4$kEJ>z{WP{!B~#WyB<@wIE)IwC&N>~5{h-KwR6g$mdj=!3{tQEQ;bLWSY5j<@BwqF=ixD1Y|v=gjtW4&7*d z>kkBE?EAx`p*weKmYa3Pke%E0E_F4#^r7GN?c$sgdb)Y6DgAaYjv)WKEY`r;x=d-1lscRDOi)03L~z&f&2pDu0c=nL%&r0KeNg8u5c zNtAWe&~=h-5VHtZSAS5(hX-*}e2Ays)|=Ad4%Cw;Jl|h*uZ|SkO}v)R6-ns(OB$`- z^^{9g71A|*jru|BCiBS+g*tdGSW{}a%5q|EYZRh+;jLoUB)i^0a7K&*ZuLgZ=To`z zT0yO|)0y9Yj_I0;B(PTrHw#iI?jyZP5`_P0A{wG$Jt8Fd8h_Rnw?nc1DG)hl&^%}> zI#@Z4?b?VDZH7QiaeDP{eNXo`Aw#pd*qkP1Ir*Y>5npvKgFxYSy}b15y}9?KQW&a6 z(o4;uUMRO18}TB@L0Il+u2|D8zsz6HyMSmnsUEbXF5XQ_QOV0U%9)rcO(P!>wm`c0 zi#08y(6>@-Rex>wxgBlElXKoG{87j$dUuue#Vc2OevD6BDg|Bm+SACRd?7ZH^$9MN z)UH;DR;=0=l33}sL?ut#FnKGmO6FjL$zo}-rbLpt zE0;r}n}_qm7|OHN^M@+u!TEbQyc^~OKmL@3c!^2rXS zMxb&v3q)fH*`4m0+=4~xk=G_H%EVDuh3g@rt!yhIzjN;gcZ{Mcr?RYkp+1yM%F=BD zT?xlr0bJ4hScJisC*n0-PF9*8FM*Uv4|C_McKH}TU@)SogltR5fq46fq8s79@ZTH1 zAt&?V>3^@K9qVsuG7pily&u6$z3ZiB^9r`>URn9%IjF1~5#@sh>6}DUH6(e9P0mlK zbxjZ}DMBRSf+5cCYqFCKxs>#pUvF;prOloP8=N-UOyeBx_{|uL@=H5&+i^g;)?6#w zAdfF+|FP%j36q35{ySZ4`X`5tGpmN(n5K0Io% zx&{tcULoa~surX;^ZUX*pSDU%Cyvz_hiW)%yaq9bW2`_4@xGIm&2(q?rGDCTr| zFg1OM&DvS;_3=V4UsK*`89ns~;%|@gKH6|sLkpshQ)KFyo=i0*i`y0Z7&#%&YO;Ar z@_%_-gErudI+TfYAJ%ib5}94a4fJvmzJ%-BO#OBA<=f28+n^wTN`T+9JM6*1QbAQ-ePSWTz{QE-5%t$R7z^Ox2FpAM#e>o9Hw{L_q*MG-QMExVwcS^zA#<3u2eIB+WCzJn}4M3 zdMs~j_r^Kbj?ZF!B0k;c2oImX(s?4k4)-hy#&Vl_UjA^3nCgQ2_O$!V+3l{%{UggC zj^5>P-}|?q&&HiC2UFy*dtK*|RuiJ!&%L2=iioJf<@WSsPwoC`Sh|{zhjFvoCULPI zc+oNChe4wa<4ZNZb#9o2Zd$= zN@@+Wx~10fwz_ZVJ9Kh(o0qxwrS$w+E|3MKn*9_t2lICQ9F-wkn8B%IZm?{{8ve+B zD~qq~n;*<+O|QWJ^4v+<1$< zs>>G!PuNBqNBi~SMbq0A7&*^iYYJW^7KV#)zwUBtdha)i{MwGueyf*K(?iwY$J#-6 z>Dyetl-29npLWYnuMlm`R6CbF2oec8h5dUSDVs}B#hb?jinnq7Zk}mhX8WTiA(B1z zQ2^Pbi@9~45m*zH_6DHILw|oQo?DaQs+La4s>=}tS-Sk{xlN|W;ci`STK}=n^{y(D z*YkCesog^r`O8ATDC^gNAX^{;Dla9E_+13)-Q4ir%JBP%OE?2Sw|G6|FMYQ;OpE1P zleeeOSK3h%zroiKp516$=360 zU?%Hob2mOh53PWE`0^_6X6mG(PTWb^c))@jw*ivt^hOU}ZB*-BoE1&)~18K+UayODxRr`9{ zn%b^0Z@G%np?`qqOouh^7hPJvq0m>aj@9<R^-p{o0A#lk9%{%ft|JZ1BH z1)#R&S+h@xTHMawX_(sWaM^FVW~qyEdfVSzP%EmyKKDuoGNU4*BAHenN}xg9A*OqYWNq;OrITiR&##vQworK)gaupe^ z7Kzp?(0f<;K&f6;h8u6ZK46Ig7%tS0TyGl5MHQYPsI?ahI!r&O66Dsy=fOPBT3{3_ zG4qp0#+;0-kPGty#e}%;{0MjRDb_zMKV`eWytC!=X{`@x2!GZKjyY0W2Qunll#0>P z%vV<&Jb#up_;6kGpUAL4xz~h|eqT*#uHT$BWl?*2b+_iJ7RnL@bXuTJ7%7vzk zIbx*im-_MdI^%`e*4ssl%O)B5xhySUhxs#?NpK_sNP!8}nmQXxVsY9S{R@>|6^gtq zHPEC&IILdU0}`OC6!fB64W{=%eow;Zv{V{*R}X!{rQY7Ttv8NFhtJ50#X}XYThvl& zaDM<@2O*bX&Y!r@j(3WB8{sIYg{zZZT7=;5ssRlIKXFO%1On0T z%@$C_$z5qL;y__r@)>qG^ZA${MfT9B7om$29H_YboS}_)-M(;!F*JH2`E?`VQoy&Gp&{iB>UVrHb zZN0+YrF$)1dtJ2ixHd0I=GGE4*!aj`wXw(ewd!^{oO&y4iS9%5qI$Zd%qp22{vo)p7%{~i$~T9j zC_UQ`K(Y@xKlKTFw!IPd)+1uhh=$aL6^f)OL;HnMQ|ACEh1)$ zvZ`&bG9=Dd`$#{9!7y!al6*VMi)6EdjxoeluXV*(@r0m!0l0^M>Nem_EsRf_DcLk+jV;q2{;#No#URTDRJvEPsGpS}eh!<+( zWs92}E@*32Y^Tu7`F)+%RqCHE^nyODR_w;vg7^;XjQMnYSWCRit1$&SHX2Elgmmxc zcjCEj%!hoYE1D&fjY3@leShABSG4;2_`CF79OC5>awk{06D(rVWFy@#08?dCQlt+O zwKA5wn~^PFNUnUKr008}(@GGb{kQ~zKJDmGdcII~=^6#O@PO0^Kk6|_0h1gIXK4du z&---lqjGUgjbhUSv$+KQS^+O9&@xa=iaDZDtzl3PS6lc|;l%?i`+pI@TdFC+ACX%4Dp#lw zhf;MY={aX`=Otcca(}*a{G6Y8ew^wmY2~tTDtb*ehwXcJEf$xBz1GI+o@m0_{Wxh6 zEKnJ-CkgU1$yk&>Yj>@(P*@Ov{oFxB8`MS>OS2$JO}JrEt+$6@d#az8I}z`@5D<&o z`I#M}%V(!hoA%EWkRgj#Z^!qlR)1|rP ztp7OP?!G}+J3XP6#1kqtfqB`k`Bf+0-my5URcdTe{BQs;v~J&%O2^86b-%q=)37^* zSWk4o6PLI1dVP?a#T&Tkr+*WO7B0Jft=anat&78ceVB^l=0Pv%)#C-hon_v#a=TgR5%Um_pIFel z3X2+dS)5IxqaaB8+Eaom^x<*mNtdBjnJp|}5x6^^I4Z}u@X1e7-v;$~Ip~=#K0o4W zav_z!BKeax_NRAb3R!JmV=OPXRk~pBMbwB5Em)nW9e*^*S-n5%Mlu( zYx9)Iv-JxF*dZH8RT7(s2V2&RWE#nE*%Yi*b8H^jINa|Ho0 z>K1nI#UxGa_0oyCb+M0Xazt5WTJ2W#QxTU|DAm26eX0r4kUN1w{B55)%n-SdNq!Kt zSpfwQXn$m8f?UG~QRI~l4@Wm#PSYn)NudWF*Ae+i+ej+GwJt~vd|sVJeNbOQT9wY< zx|`Pr?(wj1x%e!v+sN7-x6u1Ny3!^qjfDu+uCmvS^rq!!&afhy+0yqD3Pp~|+wbU$ zcS|xII_u|y%8gg!C5viGAEv`If0n}=gPq^mG=Db8rae%X$9^Tz_}j+cZ*9K;lz$q| z$jX@|cL!JoQJ9bX(9iL+?-q;L^#nF@gUqD&4(d$LbP3ZNXZxy{00_E=PiW4DlW63K z$RX*}KW0eYcF~VOX@*DbwTIQ_?w8W;|z*1E*(SM;n}-F}F<8=(A?t84YmQ8p$hmd-W`PQ7yU^K|LE^+wp4m zNyLM8#V)NQ=%e$Yp(0|yjYcIy<-f&k`5_;RGXVaPtpz8-jO>1o>Mp)9OOskUWHAYr zzrJfbc)!7vvYyQor_Ux#Z;Mi&rS-uvJb$>uZCXnNgzvg+8_K2wuK5Jq#OmFB^9N!j z@{Rqn>d5MQ$`yCDha$QHAuGxn*N>?ZuCbKUWt=(Di8A(!b984zSr&)CRjqtWO8$rc z@qhZ)LX*GOnf$20%ylMbRIUE1GeI@-FO`-5sKVsnf1q}US{YQC%?0h>5?-fFkbmwl zAMuxGe)%n-`cj9Y@CW=)L%dEvB@AvIP(98yzCU%i{TXEVT_)voVajQyE@bbs*uln# z>;1l1+r3@ai&H>vufiiZUY3`& ze0bdhh!AN^0@;ZLkMbZH6(=?_J5m;gZTMVl&Z2RclKck=MQ_p`pHWZ=vwNU~ot zK`u9xq(Y#>I-QE?+3l$PqkpmyX}cfNCfKjT-P`Bh#Oh8yZ;N*GDmFuYz~xr=qhdc9 z<5ct{?J~v<3&7+?+P<8g$It#&l#3)?J@(hn`t@2E#re|dmvmV6`MWymdy=K9&vx}L zfDWY9&+57E#tAsvWp*~n=MHV-s`S%|_Yi0sHac~G&=Jc@Q2Sx0%YX9xuHt>fYAQsv z7sJPA-ilTqpoI)tPyiZ)`^}5sUDunKIz6y zaJ4?21SY~>*T}~X_?1Mj3%+c)_nUGuOK>;0z)gGj{?dL3`Fgb~PI3|~tsA`8@pO=1 z`U^EEOcSC`uRWb_Ykvdmv4WBNXS;`H)MnRpRvaci&_Keq^v2`swUq?*A@7VOHOW+z z_|m9O{YPW1bHe|;O~1E*NwUHoT+q)k7@p_d8RIcJu@e;((yJ6U;)^xh z*XeooRTkEHAMoN_wccpo_3MO;6k5B->)>yX{j zZj+?p-p~htK7ZYJUm5Lm?ib6Fm^@=CZ?7qL%&oq9;)=C)ZC6k7nOJ+TU-$;Hm)l|j zInH{+f#hhjGp$z!ar|{{bCwuu|ambL=BP>Fp1)D_wv|~`<+C8xb^tT#HhTD@^afo59>`yCddoV*7h}l zy@HB?zJF)JrHO)`)hUEp@w7TY9Je{Y&nE<1iYFd4sFooxtcwsJCVKE$!ScwEyTB{&_BGJiBDjR4ff8_4z+pe`%@P{GRXq54l= z!i(gtEN)#ryW6)o%apU!UISPn%v~j(ns&-WLEX@X6y7L#>#i48v%el+ zJb%K-3swy_#^JE$FOzq1NJxxWRJB@dIC9Zy3{R(l3!zccRQjgGtAcUod8QCLZ^tubbnp_GZ~r#*4qB$nSKE*7uc=0w|Q^JQ<&Ew|2T5 z3QBlfjPuEpZv|@0u;DteEdw-^xRa^>aaJL*`YKOY_7vqZVW*dHEW{)&9nx^848+40%b?o=j?wyLDif$1*y!P~1ofqz|Y-F`T%&HXFB`?&%*F}>Kz{;RRn6WFCZXmf`iLV{-_ ztagaHXV~uVqV-!5VjDoCgK`fRTXW;NY<+}LsFK>@NTiqZ!&TmY!t+Vbv!hG-yUiG4 zI9WWD3+ic~kUqJ8u}$B+7(d>bzENZBQiWl7G5!+Hp_FurH_> zc(&(ubEvfYc~EvI=mvmMryO<5sEJG!UKQ|}YGR3p123N>w~7F|%y2rEUPn|PhqaYtilpKJq$Wfv zcoS*In?LY3;pp7&T7O1t1V8eC6%xaH6MdAdmKBf{+pZ==ex#k38+!=^%j!sS>6!@^ z$om`|NY$Cmu&|w-c<9e47iAWNPFSt^${M6LLUOqk+K;=t1j%~KETi--CG07+^3?-L z-|da|4bQ#0TW9;C!dEYO#%gz&lzldQp32PTzFj;gj<>*SO@Gq&$r5A(5n7vaSAic| zD@(l6_YUD*9T@uk+{pu`!%aR7JNIu!k-jj#Biv(iy1!fM%Eb zReS>rv8TE2JuST#k(KoQQHZ`WVOwxJEcsZpZ; zPK`vF7G%dhbAJyx24Z!+M&M{YvyvKiQrR>X>BB{(+*W$H>On#&L>3`GrU;kzb{D*X zT6tP1^5^FRrAAA}Hq~O7)J`Q5dGPk)mI-99=(0TME(YLB}0^uuqP zflj9Lvg|L;fjC`QmD-dy1!z@e8^8N~Z%MIW(u>3Od46w-w@7ZS@NWsJIUqkIG5 zmyoZkwN;w1EH3fw&P-4mwqM%)LYh_BXL}UV#vHTCC-!|K^^4cd1Q%NHd2kt@+&j~QeMg@Um?TcAjtJF;-xbw;zpiUtI%sj+ zg1SZ&9)TEWHN#j;h3%+MLOql7^os7RHyA)b&cl1{}pSwo4UB zarT=WW$ZzqUX|W&BdB69sTehT!+IYfEq`WjNR)r-QQx172RPSu5R!Uso? zNs*S;^v5hu13&FA$g1e>!66O7aZ!M!nyIK889n$g}&-2+9#b@_&Wv z=CfID#HPr1k5n*Q(ll<(J*Nn9IK1>51Kr?^S&k2^q|tFbHLn<`z3wQh!($!Ycdc_Q zf#*4F7sOdAD$s)!j3QVj%7ZUiZhzXn#@se)XlPv!s#E6mm0Ohw$^+3%A=DEDO`2^;yla zP_3Mv9DS2~pwQahi1YpS6iSr#jKyIsPt&H{ElgOA#kNLC3<+NffSk+g!BbrrT_{R; zJ?@8$q&C}AD6gX3ev=o*1zFMb(@|UNDAQt?XwvEY;#Z3XxVoqHdOZ0l*?%FMndgUe zd7Jt}UK1;sUSHnN!@_;g@b-60Z`XD^i#PYhF5?Y7PB3mSJoa{*=Tp^ES6afVzs%K3 zf0|DG9@OUW4z=AKto$!a^+iY{ZaMFgW*?Mc#U~lMSrw+r3RSD)^-Zovj7BlF1$U8I z>T?mlQ7vi4C^pd*OU~-i4S(VdC8|Pd8jh3gg(or8N(!n5iRJ_G*7zZ+4Al?FO&#Z* z*c}3U1HV0KWkK#UuT@dfd)(}@XR$&xCgmvYbEmYg>d7_J`(CRCHtq{|2MD@|^+nXq zS0`z@OU~h!GBDn18!M{aorMo&-AP?&4Z=P?ook&?yAGuqk4E%7!he%SzPtIgvK>-@ zrJpN=K7?(3%}90LPi`-_KBCU;@S<*`3Zti`1XInAvZm zM+iywyEbNEG_YE0=jw)>FMrtJon}6A`3)9>u(n+J4~4 zCbUG)<7a`Kg2qdWfnH*E9ssev4MT5eH60fu4Quz9SSuj1bM_JaPUF-(9f!*kKF)E!fLt4(Mk=tF&t2X< z)qKg2ZLMN^E<*#yKIra!%biM*UQg7TsCOB$RiD`59G5S-f=Jb=U&hHR-1EkU2B{Pc z?1FT>ZzFXd6o2=DPDe2lgNb;dw5Q;`-Sl(|I;^MT5jjq4U)RIwB@+OwTS_f6a&@N( z>n9>!^MJ5>mm z>$~8lNb^vX4H&!Bp6(kDn2^w^5KB&o3ANMZ=aQP4>3_>7WDp6IG18luQ=SY6L;6uX zEv0uikQaHIT@LAX3(`|m5(1D3gN>4PK#q#!8WnDLEh9+%SL2lu5@)K%*B#cc0c8Ak zJw0FbiNB`R29hybaD8c-G??5W+H)7tCtFDiMDuaABr!TT^mJ!tmx#UYwmUuZNo%=S z-20MQjDPuaMa8HZdOQ2%^qB56^@vtLGSdTUnUYB^={SCRCx*Y+mQ^fYTLd{0fBYyV z3usvX-b^c^u0ZsA^-+;(AQSG9v=a6xpjcjCTzos zX7odAN^`7*a@>Rgz1M~10<8aFjcVd8?S%r7U4LlVIBXxg2;jBbnf$aDVf4u0Wp9^~ ztss~|Q~G6%{mp*5D@U7yF(j}3@y(zxI7{1wu}%}&Sd+UQ^-9e}wJJK1eoEWDs_s!v zm#i(2sXT8N`n^V)@|0xajW$??x6Yk&FB(*c85dT1O`M99Ct|0IoTkE^Vw+8}y3pm) zyno%*cX0Pg@+t5_uG zPKO?}=G(^yG^j)8U>b*;E!4#N5*&Huh zGEoKjO26V0O&Zad>}?VyUMB(ji3 z>fKUEGw=LvJ7`(mZ;lp?4R&xhVs?M3q)>aiNC2+sm;YQHDgS=HKe8K2A?i=d-HRoc zrxL2dq4rO!3ZEIxbtSEoa5!m6_=uf8tt69P?pBhV?=DWf8TRBEd{Jk@ysfua+4q4% ztAL~D0BVq>tWC2kQI*Z9DzIQY|(eM6V40c-4C+QMFkrJNOHB9+|(vcnVAtKI< z^3`p$%|jWy<7#aKiLzJ#@>YM4M_+mdjK3_0bE9v?3~IVzqHN!Tn7owCzp4nEDwc(6 z8!*sSVSbS0=HUYrAa4Ip-rlrFd2ZVl`!D$xp$Hocn0pl|?)$#O{t{sWZoz;7+eG>A z8K$b9=G}Xro9Ie;&v|wgW#eY8x#pVf7%7<;qbDyTMc-|`!M>v-R55>PfwKpPf4;Av z^+&Q+0JZEjZr}WKpi%ZDQeavJg;u_~71?}_>QA+5HNhP(hjPDd_Plo1uC|6n3=n1@ zNKWb8Ls&V1OYYm~g{oyLh-dGGwl0wj66ZSqXaMW*JRRoaF_YT%yxm8~Gc(+6W9sRv9tk#o1?=(tu!&I<*cRjC^n~)>qR&X+O2dDA`&)l6UC*iIJrG4Z zFYS@{wd?NejYyqP@k#_%K?PW~kvmv!7TvjOHM=wo(g~Vao{d{@o}C`+r%%Ij-XR)x zKxb}?_UUtIN!*$&g^Otjd8G|+h4l983o&avyrOYqAJ!&x7k04ij-xG$0TB1J@>vXS z^_;)?t5JYrvrK={ulL?d<|&nDa{>Bt?aYVWrao>@GOF1hnVH`jrufaA6C{F-r|h10~*!{SaUD$Hu18sgC0nAKPZF+c{_aJk zgr+BCW8>?NqnZ;sGdENoo~Sy|Q7WJ<=IgXugFE~n>??m>0glH)8!zIpu{C4k0Y5If z-I4AQpPaS_+eWy2>c=ZFm?xs(&q&`N7y8~ubB0hT>qWLQE^bTHs%7pL?fP7+=KH?h z91a(9buDXls0*Er%Ud$r1FX+GUvyy%au0>s^5c2Bbi^FbE1Z8aC!bN5tt$k~RV3|% zz88vNIM+7?mf(4+wrQQ`wp2X`+JaJLOyM^3kI$H0?%Ie&%>cPx3K}M^65Y*Xx%6k< zH9hP55Jjb+PuP{0SM+26@%XrE@mn(Q+xf{5%^rUVD23i-$58`#&qXe3^C@qGQ4rNN zxmk@dsqx~v--Bq-UQy;)-?{apYhUyCeins$)h;pOQh};&tXP*)l zMJ#`2<$X`ts11|IC_sN=Nc;D`BJJ1*s6%ZznP6BkNG$aB9A8nnV_;#Y6`(Vd^Wt1C zmg9w5my+(WYLv{c8+ZFUr5m-8gNH;{)F$joZGwE?pDRkF+`PdbUbCxK~24UTnJ&+Au`mOHfUUqUS8}wt?w;?2D#bX zo2@yJ=b_KN+nam>Oz$xtdj|zghdY1k+jnvwadiT=U4W{NgR*))e4QAx=e@rP@oK+w z%B>U*pz}oix_v@9pHQ&u4A2b;dzJOFFi8F|)9v$scyoP5rMrG6iD!C|Rx|yr4?BuI zo;Fz+Tz2vKy_v5J2WXfFUs^MdG&-=GH?gJ{E81UB!o^Mf*X)LlO$s$xRrh~7munt{ z<)Hgd(rliJ%cs|D2lO935BG6INBWwukXUz?C!60Mj_)@w8Imr~suAqvS=u?~8MrNp zhN^@8F-V3qssZzBb8oGjd+pU*zC;IcC`Qw@9^FXG(Ap3yAQkJ!ZgD6JCEaxwG~1Z9 zXh?0TQ#+ZL?JFe%q^mj@3}JsPyv5MG%tL$ep0;-@fg_M+5=7T=doYQnsNUAy^-ftJ zp|@#>D=-*ljhMdCAk8>?Fn4uTa1*-6?QSk(6pevifYoTbB{P363&jEHYB?+l9k258VCR4`(d$uLuS>c{_Ta5w_p@3aYv8E7V&aPa7C@~W zmy5`oU(jdpOvGW!&Z_%{4dro}-f!I#L73ulcWgGKiJ7^!4*{P9yQ_~ZZl3Q!UTMNS zDX3-d$q^g(P60}*2O)ossTOaynbx5-0vz*H6Xsyy1_*;2)ywn!-oV&#gVSRP(!2UO zMPHxy&FhqqHzhJ9;WkBfSuQ&=1X%O{^ylxZ3_Y7yW`2mwOPxe!$d3UZUcBSFlBf%k zc%D?v)uXRfcHeN$%a>j^7eM}MoiSrNXm zM_I=kXhoO&amDro+M?n#zZ`Ew9kv3x`todnZo&2t-e=KcInOUS#Z={Ee-g3A$us58 zi1BG`uM2Ly3Aca!tEU``EXn3ockmD|Cm>YnTg92734+z6@rtWas)QgPkZ3jl@32(g_``BvU%^gSDG8C! zJL{(9sJK}#uKD#nV68(F^X!m2H}(2(ujpMh8e=6wanFBWev_%^2JqJlac2ZA%F~d% z+v_lXg5$CReFxD=EnR?gKH0Oc(}?hE8P*Z}?>ksx?e?I{&TF$nQactJV=D=5Qs2~( zxB#S}7E?0jfr+cObZe=g`nL>05Mqnj=p2u1@e?QkOK@6P)b4c^LSzPQPacci%H_j2 zQZ|6C;$(mE{MxvCf^2wWuUT@r(XGpFaSbz+AR$PJ;JE2}0QezyI&U-up|0IN3Q9yd z^+x13{tXxDN9!78Z^^)#pv6d%xMB@fBR}Q|yNV%x_|91MCI{Urhw} zR{-Saz!nuyhcK}KENP&&PmKE@5u5<&#~N(oPR58J>zjb?7cKmHz!NF> zfRR%6qI?T1PE7RYet^0^t7nCc*}5I_bGrj7eF269QSn=$D99j*o}I7mM37JcgpSM_X)lM_xoDwuC-xoW&A=DxNf-$1mriUxPT!ZL~SVpi`QP*JmYHuN^ z>1^iD_=ZT%jELrYeOKpz#f25GfcUvrF#o9r$}(bASih68!>A zM7rJgYcn>sve&n7l+}{{=F+e3(ay>IMa$0@w9b(ty&>qmvTfR9Q@v09rQa^>bL!Kp z^M)~>X@cBGIt*2Fcmu3oUDZVBC)VxRTUm1Iy5j*3a>1mT<4{nUpqiH!{7HYmvHR_@ zkU-?DpbQ*2sq`ktbSTkrbkw)cIDHw%`w@1tJhRFH;G(E<&p8ubx)K9+ngC6PqgG3Y z2lushQ&i6OR0F9#eRyxKF=E{}q~V3?8hg+Btlj|L!!FPSv6uFXw4vXp)zKrmcT?0P zm}!Ba>wzfp0FMIrhluy&4K06xrhqbUPyY?*ed`=jXOyGl;^RO#Ph_#@={~qKaY!h1 ze>e$Vc`Yl12a)$~LCt@iTq+TlH{cWB zp(fpXX(YQEiE}_&5b`iYE9S|?rw++YMOhb?MK9K9crtG*RcM3ncGAaI&CS zf<#z;d0jr=E7bRF1^+G7fxQ|tX*}L-0P$SRMoYxmMZ4YCh2UF2u3DY5E67UqdJ{8& zt&s<^uJpOIcAfz60`Gs=Gr0Kdyg&*twyY&pal!SjXA9%~6bbGWvVcSJ0LkX?qyyN4a>RXTc?q^)>r3% zy#N5Sk6=XH*1cTd8o2foP?0EoJ8{E>Q)%7aqW_?<1k`O{ccyhIQ*e%y-$Eh=YKeF0 z-mF110L#33(Hno+Tk3X_{7Tu}>}m)ZVccA=?fb0q^Y}h@dg-+AckR;$@UARSM>_#$ zUvL6IfX9k6->X){?UeMl<8>9x!?k{=R!h5#6Y~*{owx~J`HNnJPdB5xu{BU#KQI}A zy15hTmd~<00)^rj$?CXy7lFC-^yM+Sxi>Q^n1BfC^IU(wy^H5~+Z#Pk=k|kh((}nO zv^I<{1sjA7m2Z3Pu)Cq6`rHH8Kv__zDe9GOK>ZK}RGhu!U*h1rwxc;yt29vUU$a%O z1pR5q&WMZh&9Gzg)vc6*y#jO~-8#R?H&X89X>$k8V;nxpAUHgX;y@&(vPN3%ut?B> zb7|}`Sl)lSywJcdRR(LLcD-BHdYJUNITAX)Ng02y5yP!3pyEHmg}ec zYSyDF66EGVK0SB20akppMnVQH!dzAyhC7|yJivddMCM$Q&@N+KG7gDR&PA(FxS(Y+ z*v5x!_6*3B=z-#*+;=?tD(^+nsiL(`_gGpAsYOrHWTbba3B!u_XUSVTE89NO+vEV2 zdoFl6wQL{g_aQVUU@CwkM#O%Nq8QVNER8c4-c>k-u)B85#~^4M(K$D&9( z_#=N8{$cjsvuxGtVMc9oU=Tbh=DA+&(dL@Ykc%5MDV%$=+mb_0?R6F+8}%M$B*fdt zgSyM-iaya>vPPLa>T8{u8T%;7G0rZKto0`vHrM9!jp*mKT=1cG(SVV?T=thuKK05V zuW>t+5hvyioo~kb zeXtVoSs3Mdd^;Pd7GQWlo!}$(mK99d)qV8T#evb%Kh2C!ft)=5z}7vWE_R1aTtqw4Bcl&%&+|*AD;z_ynB5<~-Hk zrpqK;ZWzMzr=FI{exiXRnsG`SmqvdTlk*y_vbFja&N_nF0stf%5Q=bVF zoe^Sd{_)08-lE&Ny}>KggL7p@^BF}pnQ}BvV!M&-*)l!zXcw$N2dpJ#9GQPW#}FCQ zAT=`oJb%xmL~e3>C%&=;hzr+;`~~mMh+Q+hCl(8dYgH_4PV)DN8Q{ z%~(}gE{YYso~UJZD{ZB-bl~gEogKiY5YN;?h|Dr`FQT_;UKrY7Qz@~w-J+7D-Rb)l z=>sW?S#cQ89kJU0;s@!9(@on2FBnJEQ4@rvWejTR-7jZLtV#l{dJlgfD24kCAbx?X z4qHO6iMmG5O*yWYLE_KN0vLF~nM0i^4Tg<|Av0Q?bZ zem2)+Vb70LruWGkz1PmlI}Tk>p=u=W6EpL!?c-|D`*N^@iPz=;!1FsF_$g}mXd&9` z=RI{H&b<~jbzCI3k^z4zxfZSBXR#B4mnCyiqD#_z#VoY#+T&0qIx|QiJ|Gcpr@l10 z$rkh``zkmBv*}p8)F;t)yFqd1&FkqzMLo8v6-1leHZ=|*=LD||zO0=dHt&i@V9lnu z1Fu`xZ58B;6ghrbfRNlV{=0h|8D*bE%t+8&@GHkJH~70lZt94e9)A6CgWc)?#2@Y)fS(W-n0>s?KDn@$ zG*X_kB~Imt>3n~Sm%cmVK0YqA>*v4XlFH4QaVkcgJXX*pGu*Y_qoya3?6rds90LHz zPezdwjkR=;lwy+tlGU zF>!vi<+0Bgq(pJ{m``nT_UEZQC6l(u@LaU+jb=JjDNlcY-OE#l8~^EO!^aJh7q}q? zD4+b@i8K)1DHE?6?grA2H=>*PXN=9+nOM5R`LP?B6g;VDj)S|*&lufeL?Gu*9eWC9 z3;;vra!Vu&FgdWTV3gfbtc=lo$LJA_FDkn0@%!5-GN4$;bEb-yCXO3#B@$h5xaOH` zR@uUQD>i?VBf!#A9NTA-G8*UU@5-nkUXxwm5LZf>olW1t_b03Q^L?DBTo^wqaa?8x zWLAc3Y39LM9Tl0)v&oSLr}Omp>Aur_@N>ur75Di4o%k)jr(=o1zMZ89mTbmpBkP?% zVkww&&X_F#YRlyUhi)E@XCjc)0?yqdRF(nZ0+WBwJfLcWs0EI{^3roKWjie=j4p?m zw3XC|)(ng)xV_I0N~jLG%7fGe)yfQT#5uMUQ0*`p^H#a+0em6=fR}d+pT`B^zC#~8 zB;REgira{qOZl8rul+&cSvLzVUo6NiF#dvwF}K*f_F3j?h&_#77Nz8KbS}BZ9%Lj9;|*2f0F0x_ zq@hbmweMBVMa{m`hl&eG5~k)ECyUTvqVTKLPIs@0RptSg8#@-@5`bq^u*>n&8edJ! zgCGG0sltuXLAiH75_iqo2Yt^&c39<0UcP_k3EXtxryDrRy#C0_=O^T4 z#CNA_e*eQW{G9Xo{^$AJ@8`jB)mJ8Y|2jV1d-{$azs({$LuN~MG^XpkKiBtAp)Y^Z zEEk~o{+3y2q1M_fB*m%V2*&n+K2e&Nw~R z(5DG{KS#{Y0653QZ;7C)@&G}`4n_8p+(LGsX_e|*J_49u0c2`1$?vxup@83re#z`~ z-nWi|EJZl?M?XPczGdxOcj15MN6&rh(&?N@pJE++t~H$pxyJXtW%bv&70%0p>GQWP z{Fb+O%v!7f^)m7FwI`E_S%hz(yabMWZ~g010e@t0Qu@}G8(ucvKum@ip#rf@cH17P zavB3m3+ujCyw2ts#2a!$1$sbQ(H4=pSh)+#7C z?*b`#f+&R9Iw=B{d5eE*FtmV5Q%2v88Bh8O{H9ArQJ}95^l=B^w7#A6mH!#{Y5jlc zt4T*;j3-?H9R>XYIr!06-}Cf)FN03`{?1>zX3`Ho=gZ%6gkyi~Utjy*FFmLFcn0VD ze8}GP4Bxtm#a{eYr{@^)Z{891NF%SBTy?93W5JHg$Oxc&&9HjzFyv8DgF)Kv(G;F%M+uFdj6Tq~Q;m>0Nm#AKnsF7K6Qs zbD$@RR!h#Shu43gc+7EhaH4vgledWQr(#7ukF`U~y-G6!(o(Fexq0kZz_zqXm3MRK zs2+0V7&00itQxUQfb<4Da+9Y8vj}+xvX*(4HazBcg@f*zEGlrqZ{{C+r!$zM=X_X^ zWUV~59HWd{FMQh~pE<;O{Ks14zt;R8d(l7UGWdH>>EnOhxBOsUzvSaf4!*wkYfh(s zz5|;G^Z9kZ>743&-~4SGeXfJ+PJ7&MyJ?4KJ1c#*6N_v#{1{}sH7B53N$V-&3^F9o z>#^_mC@-pysH#+fI0iCU{LRS=iH$AA$z*{A;*0~{HjmlBKH$~72DWJal0gQ9an_Yl z1QTj6h*5vH)(3*(a)lG#J#->qY#FE-x5XudKpvfv9h8OJ=UDUmXHXw1fyre3c1sz+ zzXE)jgQ*M~MM7y0AngTmEjh!5cY+Qqnu=VsnFH1tmZ3Dr2y}H9DutrKhS7Dj)$_0y zq+U>U?h$CLAu@vjk^^T4d5gt!fWmSh=vMR*#l?RI*gz<5BLG2a!1jV3%=1Vs+g!xQ*{HkDUUrW@qWF-fN!GDPz|e4m&HrRq{Am;Y*dRZ$ zgL;3`U$QZs_cj)G4@BzEo)Y}%kVz&j=I6fUbj>e0#k2p&()2#lXZG7p$M);{cX`#1juPI*0Pae2xVxMwlxl-Sc4O0t1T>N}LAAy%D2o*@_qDbIn`m0K(VQ~ZC> z=H9p!xIETGwu8NH;63Wcq?e=-tjTV-mh%9|f%xn?y~Q73`=L1o7>E)RpLmA`4;c0) zkC^nN1%0`bCNAiRyGeNleGZ}yM`=X2#=Se(@-_TFh2iaaip(eXcHp+C*uV71Ed0!1 zD&;Ory69`~`eXn4wO4)V9_W}~w(ftAZgIYk{XTEfdEYk2Z(ABV4CIw@fbTdDsiG9X1N^wV?sq!G;f)is^r^ih)Ux zitea$UBu*prg@;-^l0?G8j%uGz2e3-Y6m$U%+C;@6Jov6=Ghuf6d5-}1K({lgx{v;97I+QYx}qWW9TetbB@6tLg;!c%0? zT0YT7_6~2{P&ZgIKyEm8C^m<&W9T{>har!M9|U`#jDss(#nuR2F~tsC_&_e5Sw3*r zc8ttdj&> z&xnJvq_q*?E2$_Y0yjK#N`yfH(#~I3li!(Y8n^vKKpCuT^T^x}HoC&@JGv z#a{L{*cs5#xB`d|YzKR*U>Dfic!1;cxw3^ZM_S9k#eP@sIMjc*bF{bvS{U|z?9HvN zxu9$A6%iaQsrt?%kF6og9eUgV?$E%#|B?T) z?Y{NgZ&_6HFZ~1w`n5KG#iV}QZ{Iq0k|p^Pfqy+x8kA0=z8%FLOs}L_(4RHfSRA^b zq#U{**ag_Dc>J>@8QB;oZ;k=z#=x!vUk%8^ANepb$E<%;5G0S0@JaYTL(f_+LUX%r zBl9Q{DRUk6I0z@1m0G5S++=`lzs}%e)q)dD4sQ@fXp7;UgeeLnqT7fA=_ck@5emwc z-5x+ql)$}{N`}9vpr1+OyJ;?=Ye7zuQ#N>oB57y~CPAxN+0sD6%(2(LM8;wH9O#NQ zg>m}CX|sQ+0w|sIixe;*;4%O?v;-dR8>L_Hy_7qHILViP`?D8KF+qx(^v7=>88$7} z=9doovc36Uu^9aR8Dq@FAAZ-gkNxa7-@4>;ohcqker&N{dSIHLZ24=S`QCfrm(8$- zzkK@d=lG>Zrhg)^yimE!3|bUmA3+=KwD(Nw9(#WkbJg={Uxus}E!GWWc@QZlrxn)E zz{uQrtV4j2oZ6Y^7D*R%i^eOO8%)Su{*d_jtVg`@$M2u^wU7Kvd(uaK^Z0^$VK4cymp*#rC+72Q z4`n~L&M$lD$JST+FF$dTGsqm5^jQc!G50~Y;#Uac<$;0m{>1dK_5~9~-a^@pI{1HH zJSR(F^J=PRq!N4=fi*&oA$D3$U>|aj8(Xo6mnia>d01R4$o!5i6^XCn8)O#yTei65 z%A}8N6}}F~H0d5E8i)fkD8yu$7`CcnFr%5uPH>8z9JUa?EfoVy^14+2&uGr_U z4DH6{-P>YY6JX68&t*I})H|N3SY1fC$o)mm8W5*%4`5f z3+$OFi-ukTeMGMae(gc;!&W95#QRy`$^$qQzJaH}D4t@xG#aZB>FaaeGqDb_pUJr+ zf$7%Pg=v4z@cvj5J_Fk`S8NG3Q{I&ZPrYz0kRKgl=q>ntSsE}MZ)3kO*$01c7mbeZ z1zkaiWAgj>*Bq(dZrX2T4{u#^pBj542 z?en!?9hoVf2swn_SpFFk{OG}t%>{X${I%)#uYG4X#ehC`*Vj7OA-;H-o&!4K=Wj?# zf7#l$h~dQnpDxTy9zz*L!BFX)>9*;ApAOJ^q&43Gu@ z8#czW=f`t>4CaEHsVuj|UU=4gKew6Fn}8iJ=`MVk930g}O+ZiF)RKRg#eR+0q+@2N zt}qxtN`a-DLmZvs9xCDzEbAh!;r99zV}^e(>G0nWQ-%?&*a(Wca^KzAv7O0d0OEGe zMhs-qnJiP*xm-gw5gS1+h}V2W-SP1p@g@)X9SMzVGaCCf&`oGN2jd9#3l2U5`~vK8 zh!?P%`8)SH<^gq^GpK)dBW%W3?&hEV`>&s$7|yre_>L3*)Irh{U0%eNe52nVm1F|doTaE4&vnBviQr^fee?X!FUy`upHI_QtfX)^x0?z{<$<3}} zNrQV}B`c&DYXaC+-z{{7BJz%f0k>y&#H9czk9kAQk4UPBCFp0Tl~Mj?ih^4n8ejtT zt!T6DdD=grt2uwx7ZB^o)PMk2^de-i_YQ)%o6$qYGv%;Hzd{HX5s5caG62czk67f; z*GvH>iGm^o#JQx&uR?|Ld;X_+#~uc>{~sIjdtMOB z{@AAeCw54fzmHY_&;|c~5A2zbpY!qAa-D`>0iPjC_H};(1qquQu?W~G4)ARJXtjDE ze}}A$B5>ZMD>|K5X|(Oqh_3)&!$DOT2MPc9%a}yJy@||KjmpMEi~?;3F#LdIj5U_! zY+-V-W9x3e9IzL2!&Jdmq-2#E$c4|~3 zD}_G9rG$UjrHo*#(11C_ksf$v;24SSCtfS@h}Rm;6t9(g7Ul0H2}~=b%s=+RA2#Kb z_x%;Goz})5_8-O{@rYk~`eWPwV|{+&ub=qcSMKmThit$G`DdQ+J9qPIPyZEX`FfUb zds98}XOukRT`UwGitM#A7flMI$_TO7ggbNyA1r?;!#Ea#hZT_u1=4$CVpy=j)og$)onxM@F!Z28+DMGZaY(4fM?p3MS^4z^)G zQmKkOkQzYtoH#d$$S}kfuBHhdBu3tYWhETho6i~sfN!rypc;YwkLN~I&Md(e06z|E zR1<%YKZMHDc?WBs$+7$A1JgeYGf193ix^f;!M|&d2}y1|=ygCrU64P(sok_MeEhiR zHAney#-ViGWK`sd^~kRlS3o$!KbYcv5$73g3^UI$!$Ip7+!It|v#sGiM zLA-H5Bpoq=$v;5eMLbpx1rq3GLl#LYuezcf z#JJw<$Sx`kwNd@{g??0B@9~_BffzA?d2w39gU^8*LGLwYctHR$hdmh_KsvkA+!}we zSq03!H^<|%hfwDDt|Pmnqhn2Rag?)M^>7u$7nzjiA*;ODE2w|SL|oiL*H>x1JD+=J zjV>BTujoCVZ4cN-j*4H%7Gi*iQ*hkSx)SJObs~8p*80!}bi0d1hZwC62TFsUKpxpS z;{8yRIIteKLY;!*nicEC74?ZD1gL+JKsfvw%im) z3jWf|f5+p$^w6iK;a~E-xaKEk_!HBIzwvplDc^I3?WIJ7&~m+FO>yI?PK zql5j=2~lfhK6)z$Rh^xsXKsw4V{A68)8z7k0=o0 zKuhQkhl^0`z&sJoU>Fkl+i}yaZ{@jdWCXZid;Jl^`5mMF*h7Mo zKu)sny$$#KsXNF~L-XSseCfKSF+De89B2kCS&C_;X%B$z0f|bgkSvu)%+hsePU79Y zD*(`OAnt4~au}Z&AlZMFT1+`Pp_=p$dZCa%b7j=5JWNKhgC?POq)WOv@nBH`1iN2R z5EoUvP~rwBgzN$38VIu-@;y2B1_ymPCE_8lMa+QNO?4Mcfxt-*MjSG@qV&@T5@#tC z0oZ7OvfG2VnlWI}bv<7gsH-zrt}zIpft!f0BJ^1wLPQ#2yC8ojRG`=hb~~fq*oss}?*}3p#S=~bpQdGUL z0=P2d8-6Zq*II#n+9Buk(8eB(B!F_qnt*;f3W9?kVJha)DucN_!hf|{)UK;cjrAd* zuxrh01M-}s;=z9=LtF>?c;6uh>X=3|n3z-Km2|cHtrU^lCj31p6Nhd zJppNAoDX|lK!pIu>OR_j1!lHKd8Ry~F9r=B8fuLkugLZ})E#{7Q)k#}@Y#P5tMr{RIJ-cZ^_!CuyJ+bd+;@9W zQClIhaOWKQAfaCr2!IIjbvL!P76IyUEU+FxKbb$ZXvnKiIdm0OUq^{V=VD#($g|AA zlM7%tS2SQJYA%46IKe^7bSAr@pbZ_?rixfEY}moL`9;2jeJVv82X#UQ5?z=o>_akY z;Shg&z<%TiVu%FD`;i*qVFO?1Bu#cQ&zPubfKLHGFV6(5`HJT(8OXa8TCqpuG_XGj zHn194HeVr1preAf{=0etrgVCZ z>Cqk0F0mItzYte-pi9lKHQ0}w%m9spg*nj(h6uV|tOWP9pa9Y)B8JiptWHRsmrT8=dI;J;AshcOkZ%WkSSrA4bEJjB$*iGiPE=!;JP{Q!OfFgPVcKu<5Se?Wg+ zJ27W8J^?~FLw}CKWC$0aUISs`kOxtNBTV@YBJXv44s6;^&v& z@~2<;mwoVu??2T;|GNIi*O}J-$1a7;_i_og=qZlY!+m5jpJlH(YCMC$MNTJgedKvj z&yZk}P_UWjq&bmexUq^no#|E7GN@|6#iI7WLClkm<~;v64-%m;?PfJ z?CeyZwdwKBn3vMQybvv5VNrkAFidtD&3)|gNnie|b@??Hf3B_XWB-i5eaF21_T_)- zDF4VCfB*Zo6DPm)I~Ors>yNeeAM?)N@!_8V-D4>s*M-`<3{79|4(y|-o$*44)WWR-#MTyd!5m_&kkOu#$b$hU+m}|Z z01*a2@I25BzYQNwTL*tb7T3>-6Am$1EL!)p&4GIY;URWXp}2qKgCikV4)FaBua*;9W##aBHTvBa7Euo=y41lSNL%9q*hhxVxKPPu z-*NfCvkijrM2Naj*dSBfyTH0G`^fWX5x(3O`{GoaYy0o^$rgpGfUON{F2c%ob*EL3 zqoD*>1sq1dlqP>jXmGeNC2HQ;*tGntHx@M@g*w*ADQwPQdxN1j)xIze)P4_sF(zks zV_nLg$n0JE5QFJa0SJq)(9l;WKL9Zmj3Ow?cv4rDo^ zr~V0@D=DDrpzhZ(XynMId;~5>y%Yxh289qym0%#YqCtOwA=ZEfWF|W#ylW|aJ;gX! zjsuDVPd||xS~xy~xF>Adq0W6`%9BkowJDEI+wPf0>&3KRTp)*lNbGtHDdZ41j32%9 zXPgOk_s5?5=$PNS{?A&gzw7-bY==K|&`;dz6PNh5%|CJm{?T84Eb?H;1^r=H{K(X= z80Ejj$R>Z=;jb9SbkBXN`zXI$D)Wk`?1Zab^oM9%U%aNe0>rs7Q&=xO5SiWlsiEBi zF`Q}X@~lUk0k=KV{Ic?W<>@p|G1c24E>t5ohvrIA)tz44U66BtF6Z+?gFGUkoqz#u zl?hNNEig{iQt-ftF&KG2#TZ%C`-{*42y1pL(M5k>sGB2}WGFkx0{G~Fs7hzp(=jej zKyJ{o%mOK)H&n%065>E#W!MLhZNw&x6nuWh;T`HHwslpKDW*C)E_X~khb$F2!UPm* zUr_P@A4GBrGiEVje9!UGU^`sBmkTCzgKWzVEmj<}$KHnevXJ*&^jc4SFQSd!Hq>){ z^b&tW%mkS4{iF^0jfkA@zL&r1$Up1swv)a-u@+X^)kf~s;gxtFFmg7ZKOQAch`vAvc; zQFfqXUb&p2%y*5LfWiyFtH&DWfLs2Nkx%U(Vve>yTyxZ@xQHD`F7_bAL_zHWf7b|WkVp^6+M`s{A50FD z8Nv=n|Km5cAj6+#9-r)bn%+8$&3yVX|MkE5d@~zzKVcppvO+lWpBB^4fB4F$pAz(p z9}>pTB}Ri$dJD~!GmUPyH<97cBZ`-FIeCs{J!NU)~-7p~ti?1kxK z#&QfG9vR=SG`+8WLj4u4<51K4ynDRQ*Esz1KOi(A#_vd&iK**UL;QdDL*Iy$dds0M z6fNYaaG2KGKmMC;wHc;n(+>jG_pq>MH$qwMbXa3}#il7BF(LjT000opr6f{RpZ|0~ zB7H{oOC&zeJ*Cg5hrrK|Tzur^(;thQ|I9;<>oKi8pGYn77c2#h2g>EVnoy2TKS)(y z^qW2YKgSq_kq{iv{HYmaMKrG$AQLLs|Bf;(`7;Wq2Kt(he~Cq61#>4Yb=hbzf-% zgM7Dh7aG`TJyV;(k>h&AAg15>IP!W{1f~zd8Tgz#cXY3~ebIk0Xvx^1pryz_YJTyp<0KD+%E4Bh>tj_x5OqL-KB9eSUU~nB(`J^*|A&BjG-B z8r~nz^BfGk4eogk!U9!Ik0NLqK$_G=gt2i%6GcVBT^c3)UjLdq$d|0^jOvU}=pdN6 z3zoy*l<9wO3W1m|a3_)3o8+cU?hK2uU=hfdHOK|$_1G1r_grBt;c`pLv@VGtTA%dl-=%zI#e90F#LcacGE%Y7MmWcba01C%(yfYW#evt+^ zU?5LSRl}b-W3b*JPbI8vv{p|tG|j76G!e#B^~`_5{q+pQe|=3PD5mQ$pX>Y{qe*^I zT_U*nUd%0ezMR%Swwb$OLhqIN+QJIO6voR7-M>HazHYde50 zoMdMD9pjpGK+W>d@Dnt7-m$pmBtL-R$8je<@9{ljDsW_Cs;%M_+3N+9muE!I06IX$ zzgVGiN(1MADxF8Vap$;TiL2|5Asc0xzFB)^!`Ne@ofD8sM6L;jI2k*D`}aUJY`Xa6EQxEJV0ujDQG2*+z2H$KOu zavWHH_H#VNaq;VTkK^&@xa{Hh@Hu|OapmhcjpOy_IC4SLyMNC=Wc18}VU=1>0 zwm`;bTDP5!It+~8&$^6a;Sogm8+LUzL+%0d{pf&2p&&9ONa|-yJ1)s5d4VMXt~7C! zMgjU^0jsWOI)=XE9XU6IJfWK&V&zC92qV} z+kg^p=*rKv@P61_r}r!Ie#m;@{Wt^fr{vT7sT_`@?gPgg92dThPvaYW9mn{x<>z>d zX?)>0PUC~|MgPNe9FQptJuE!0#(d^ zOWWWd^p5e-U)i)S{|h-omA8+vpZ0Q&#j{|KtusPeueKjnsE1QJSVRHt*_I2U>YYvupl`2^mZ{C;n8y3fz~*c<-+ zdtmqvqv_1TMY ztU~=i>UpMlwLrGZuzRf2={wG}CpKT-kp=)s;BOD$#z&f7qN*%qi@)_|A3KJ{zL>y{V*|5DrtxKv<#b_Rf}iZOr=<+CqyIAY1=jRrbDVLn z!U9=G2hwsxG&!7)@aNZa09~YiXKU!of1_`a_5FH|4I@pu9X18*4Cw2RUGZ<8djoq9 zy&4}oM{;mK13wWI;)E2HEy-WcBSIJP(18pSbs$FpQ|+|w5F2Ek-}717{3JKTALqah znshg^25yPHVtO{LFUZsk>lm`i>=Xif${0=i%GX@M9)PZVLRKeR6Mz4IJ(| z@0lMz>*X^x|D*SrWNlodf)6?#3E$5ceyzXr&%9%v4A`RIass=aFz%2o^!iejrT@CW zguMcBSlBDSukk;(hM{-<^IDnKN0*mina7+8usdq38_`*a9~%;XYq)@2iqGuFe*E|A z9{LVr`q^I~uUNAM>{AIax6`$!eW!@hU+)JyU0|%Tdv-qdh0l29lTEVGCja5H55|*C zNPQj0+O3TzkQt`$s-N}Zu~G$yK{IB1?&j0%T>-?|Z$MOBay&7?E5o3cVgI_Vdf1>M%+hK2-&WlQ%H`%YB z^SbFgOw8vz6h2OI0`YU+&YRA|#DC7iyG-~LU+10l$yX77CL0db(9`c|Tf*-Je#bpO zzr*&K%6UG&S6}Dn)9>}y`Jdlm|4iq9em{Sm|M~s$c@BmZrsn{)8{SJmvKr4J!48`G zM}&_|+&=H+e4qFA{_NLz(6!V12cPpEU+*t|&P%`E|NFej?qq0f{H-^#$v)%zJ392H z?+(-4gP;(9dI#sr%GVm5zO!Mu&-s%*?Eg9+logd&}vlYU}s#8E!uqGVs^e~(G|dG255V!h(pzs`m4>vfZh80GjM-C!Q7wE0>kAfLHqT2O3j0N?$>AO zAIDARoU_N@*RvSj;Ss;(8Hr{x0BbTJ%-uVP&!Rv1FvTso9QZ=ab$Sw1!A!_QF!Gsy z2c~(7ctZX@@l4p<0h9GCif%KI5l!(w%On85n;&7jO#?AjqGd zE9qI1!B&aujEoSYP7&7wMUI=Zuj#pea2#>#&vQV$KM~#a^cfM*0y{91IBX1zG$!bA z|F8Gxr|)Au{G01zMLBFR{XJsX)Au-yj0CyBssOelRP^-x8S2H^ZhC$^Z}b`89NoG& z@AG`q^Gr+t7#CO&2*yt|1MGP^AKxRUaosc1_*1{e^N?fQFz%2Ke9wT<{y#T=?_cxz zdH>II;XOak^%;lh^E59Q@5F)YKaJP-XRMRY^=&lKGBVz?$MaymFfMp*BqPwZh2#Rx z`x$>`#&Grj5AyQwo~xV2v6#jY-}~qJIF9GZ@fB4cZCF%{2diUk`FlGf9r|#Cv??4)?|1 z@m$FJFq2%>licAwsA=8e=jVHnQ|jyea6f$h%m?Q0vz{@3NDd-t~rYy8V~ zv60~K|E2tX-Usjdk=Ot9d^jKPf$$xz$h%A1- zM=(4N;yefEPkMmHb*A(G z(ko~U`aCz*U;n2ZzZij@^a@1sCqKu}(f9G4pZoX!eXviaJlLeSp|rsOa%Rxc2qA#H zw-Xc!))z62<7--%7}s%sTHla=68ShVhhP)o!9X(YPuN-QsqcOnKKJOVLCwg2K)8L} zpU1~|J^nBM>;L_KOz5`%+X)1%|JV0#=dv09n+C##-T?<4u&KY#BYpq%_~m+1n{ z*I)W$nHP`$MiIn+@9x=u`x`EI!q8p(0OlSD6k7fy!q^aFi`wIVDdjewwtnDv{`*AW zum7O>(71}Kj{KYfU{T?2E1?B=2wdqJ_z*L00Wski5yg0NexZ!q?Nbhgi9!nI0Y#Mq z=jE$~Pm_*lyJ0#51Byr8uJ@~Sr4)N8mNPV}Rf+jdUn_F3(ctVQ^H^HUj`M@;0CX5- zoq>~qLAhkM#ps29HF>#fC7`*JQqN4(b(|iL`P1!AOFYo2++E1|-q`OOi3Km43Z$Gj zH-H=?eKH!kY`42PIS20?m2A?Ze1EtL9?|y8stm4YbI^iyMprM&XxX+*QIBy`#S0-^ z>g{0!y*Zr0)9ff5kmHp3^W`dNUVgA%X?tIzku=-R4uuDQ0;X#k-5=LOw#n0NV=bc6 ze0R{EV z!R>KK9)&1>-JNyHKQHa`S_myT7-vyT@1Mk4bn+`|9W&@(VGZQY=!i7Ftth9Rjt`Fdcx73bb@N2mCAeHsdARcMV zC=$V3D#WJ+dJDr=>$4T9NxX9WL464`?;>hsunE(DZG69(X=q&bd~`GI(0zr~xvb1_ z)tuH&ez@+93v*=KRZ_CF39zql>ACCeEaL2Tdkd`lV`ZN}YME~8@wp?%#lr7j)P1ho zGu56~m+kGIY72Ukz8_C|u{D>^d$idcX3uAYJGisi_O&qQ;cNXsA9?N@yK}`;MaW+vIUJE0&x1El@`D5$3?1v3NhpN z-Y$3m>czSP;MKivvk0tKJD@x$Dk0iVl@MTmWZ;LrvO!q`9|p5Wcx$&+BG02@y9H*^ zqgrj!r*{RVvUHoDQuB2VM)#`8ty(wdV1M@-#foBsug}b~rPQ-|9U4t9tp=eXtY+OJ{urqQmJR8w3*FQux3TK? zy#;+pcrBVoK9bnD0_nAy-DjmmqCuB1p9T?jAS~-bbKvQc1Vco0V-~M87KHP@q`1th znDQJoOzz#Ae3V*g9dqsV4DX3MJNXl!@xgu#!~~jP-E(EKEB0_zpFz9{?PPX;1*<7l zzQ9D$1G0YmUfeb>)FhFIhgHtv70WIki{(mXq>`}DE0WUsW{j2sw`a5LR_kdKE*ryH zHTR)&w<^CN7u8l>f$>s^Q%gId_%TMElh3}c7{cqyPv2;GcrcmQ>2XYByV86PJ!0Lq zBFDp4l0zj#wqSIRLrq*9KN(_wT3G@2PrM_!W_ z4VV`^HlBIF>jkDHeMLVp2;k1IVw+$Auq$wX+#8FR-OPS39dfTHyYWEtT~l1f&A6-# zLDmS8P{5mbAELFZHa;MXU zV55*1=_3%b*Ly}JeRKwlcca}RGh1GHdA`YGsz7(u>b6wR5?BtSVSXm)Q7`Y{p z%)4uq#XTWN+2*>t++9oFy*A0-BZ7Fn?9WyUYW>{EW;%aQ?vbj0?t&1Q-^6ur3pC*_ zc>A*Pr3!=%SQTjpo+jFxU!zCxa5T3l%=|3oyY_tn&xM0x|Hn0dALAN$q2wy6!F1gM z*_xEC(2p)C;s9|1w8YvviOp8K9`-TWcfzi2KmfBm#`Bu=^>zH%UdUqKt`@KBQl}m* z*S!|cLU>l|Y<4?;xTJY214IyyS^3^Jk6PW`u1?Ze#ySRyKn*T8D8mIM4$pphdazdT zv@8le)kE=k1o!vSx;}ICNCchX?bMy)Hkak?%G4tFu#$Ict#{$=T$6k~kNq=B$gRz? zdTt;4nHu}YJJ5Nl<1%V8I$gYjKBaCESh4|ClO3N|9b{vF`gJC%OP$@f^l-Yof(>Db zn>I|->_(st@R06c`flP2OM#%=+&gQZ@^%RQJi~oIoL6)una$gqFCWOw@#?;GQXcR% zV_EN6+#B`G1b2xVCsgui*q0>lXlITX7gB_VXxq{c`QqA+1ZP!IErkS12o3ToHV+_h zr_SPNrJE9eOLbEe;ftc=l=etZ%f-95Rx(2@&f5S2^+*&YLSmv zx-o0J?P5FJr8f1P^tvSgXRv^=6JJ7pk@X#QXCxWK@ll;}*5bxlCXdr@Y843^u>>Dc z<*ree9`j&n(!n35Q+u}QXkdKKc3xTF-xdEF{CnVkUxk17;4yAkt2s+s-7usm-kWSN zQ36}$I;@gZ&yQQDU5YoszmtZr*hL$#O1A4~(XTm+d%D7S-^sNH0^bE{A?p#YQ}Oh} zf=V^UrooGBWCg$HH25K>BL~7xFe&!8tZ$z6TctKa&%OKdRqwSj9s};S=g!epXnft! zieSBeyEKw_zKgf}Yjf_lO1+EMDCu}#-0P0H-ITbvZOb9lqgGgG)D?WdWnF~fuy}}O z=^Rw&>@HuI{*Zxh80+;ZgAwO0$Cqj>%#jAc7E91Z0(z3e-C8V0BL;d_nrHLcLm&J- z?c5pBzmIc6R#6_QJQf#3I!}7IK9J+qy;&@OP4q|b9>!?33B6=hoD#b^`x2U)fJ7i~MC*y=UpUF|a2o zi*QTa!%*_flPrQD2>Tf|tf=GSa-6R>MRcOm-g_6F-XnXnNoTwA>7?^( znmn_^?eyHni<3FuFVDN%JZg)7lip;lItO26={a+g0t3-e$!E}t=-{r^lMA_ENOPyNS z7eqL5U-@shh?nHaw~mFekr!H*s;7p!%qbcXx9GWSZpDI2NYkE+{M%=L*Z2CiG}~aA zLJX)}&f~XBkv4mLX<24p&>=bMhxGPr7Ej`|Dr`N~jjKd0bFr~1wgOU60WoJY6L0z8S(i0 zJ=8VA?ykk|*1Y{WcDyX{NOX}~^%BJx>Uvm@D}m6R&#yW0BV`nq9yY+Oiww7H0(I{+@|{Yk8L`aYpXYv2jvR zw?}RTet#TSg-oRJ>u`EJb6+Uj=Ouf>wps{oD%zBPNCzY=?d7)X_7}U}x?0elo<~U^ zXqL3)y{?cqZ)QdO`s8Y`@kPYlx9_!et(3K~wow?y+2VG`le0}8Dq|7=aPAzqNPCYr zC5fNOs*T(ioSo!<@E$wc=Pxw81<8 zq2$wAN?;yKwtE6ZUm3j)R))X7l(N5d5BKhTT&o#B56*snm1VxQX6#2|-We?X+lxJW zTx>4`f7vIm-Zi&!KzC}xd1QCigC{%R==bB{Fw6V<7-KiH$!_T0L3it4%>)^<+)i4;_-pMh#|P*o9Y(Ej~Td3?RT#Ur>#9Z-7OxEW(xLY>Fz0WJCKj%=H-gV z&2y&B3fJ6hGkIHG`rTuZHP$w?syom>Y+C0oQkIR$XE#dO?-0OA8mXdYi zfig$VUMNa>gxPaFru6#-`adeBs@mUF>HJ`90q!OZ*?xJjp6Y(_s+9L@TY^oSz!S6C zYp-|q!QHxpny+osnA6#!*{#IR)pu|8o?i6LZNVto`%>QWXII+C#?q~pn&0iknzq#4 zY9?`ix3;nOFYX8fL`ZsncpHDM-RiuvlM^ym0&EA~lw)7e#SYkIbXe&`pc0o_8v6~n!~7S`%< zFy=$?JdG#NT%1bzIFzgHzS@!de%0H5qOk0q*O6@JwU7>tyMRx*2)!PBYaleoq)Uwy z?HUrDETe0lZuVR<;^X}F7?uler9HKv=;r&^?hvkyda4YoWWIkdFU8W0x{X0=%f`Dc zu3DOPPWfW5w&3rLho0HiV)eK)gqjK!IH=J8?!KP~L`mKo?;#~rGYj%@`2b#j+IfXI zl)Q62>XLyj{aQtd6m6gP+j21=81anl_Y6LS^0)@K=XHM2F57muz4yA9&ZvI#C`$DA zg2tAXE=TsZl#8{tIx0&o*_|)IBpsUdvGc?*BRfYv?p6O8+_tt@_cY8zM<>rje1Fc3!v+VOHVK$ zh;$Hs@tEW7*msNV#(DGaogeS`$7AVTVan3s0+pf)yFV?01G#@iJL^L3cCQ1U&lZ;h za!XB@r4eumNBwxh)#mfNq}}FCA3i(hE;6L_-XAZs5*?RWq^m6K$1u_V|2tqa6o3B(#j?}O}R_3nxiavPm$zSJ_>Jt;3B{F%K3mcvy9b_ zt_#MY%+S`7qDh4&jVOrW(1QA!+gHnL1QkW8myI$Ld2@+Iwo?UupZq~{?)9Y|fQkm|iyuaI<(H4Dy{SjHnCbmvMWnBs)L!;Y zWuEj%pf^PxM-*ll+8KLl?VYugDZ|-e!I$g2#Roa6Zz?{b^ZBUlvaPZP&1+L0#G7*Q z%5k-z`>WA`T6KS&SS$BwylS5%z38OWj{z;+ijDT%_O!8o8O=I_2O^c$+(XmGdWEz| zQRF@bGC0Z|>2Y8HFce^&o@oHXS7#Pno@d_%{>EctDZ!rV*5zD>gGg!XNP4W?y}6hT ztQ(i^rA*q(wrs+vS^E7_q(uA3XB&kA)LiauC=~n|%mKr~cIea%{bL%?v->d|BK@_G z{b-iYEcn%b`@lcPyK)z(;5=7VEt`NADpU1X6oZe-#@KaWdoF>2M$2_eyGh$!W{W`o0=WGNUH2OW> zTRC59qZOWWUP!BodJv&zXgk$AQs+&indfSn4ft&B+m7ALClxjbIdg1Nonl! zG-nZ?bLS?1JKU+IXtf9ViM`F1!I=7_Ddc@19!qz(Q!_IxO&BU0Z?QVwH&0SrddtJ= zNe2llv8qEi<_JNu=MBQYO3a}i|lAC1Ha|X*Yxs1(N+3rI%i12*r(^=R&DdeqwB7U?!Gr*p0@Jn2?|z9ZRZD_ z%z@JzUS?rOG&+99w-;qbyT3doFN)rd~uy9sYyXbMY^{p3`6d%u~9*@{|o) z0KfB;H1d?+we9~QLum&iJtcar>nEQ(eGl-$3&~fbFCtL1C4RaRKLvx1a}k0%jF$;9 zrHVy#0Z!#YpOo>Z@sK@d(qSU*v~Sa=PXi=n#N}pnJbx2|vdxER^Q#N;^Cc%)U_YLJ zeg&;C!@MTC-JNKF4?ZSA1`707@PdBw%}5;MzeS8v@kR6@3dP6uCkgvWNOO)<=X$i4 zNMd5|lK`@EVoy#-qy5Qr_#U&NsZet)`EkxTzPB>G}8 zui^s*2YBDgo^10x?z^+L_nZ^-&YlY8E@(mKX%u^Nf75GYEJwyY z2^V+g6w6={*Dwd>0AVDn`TM+S-RDj5kJYY94@*CQJ4d@5irjGK89YO%elf+yuCboDsI{raT?i)R%&QMW3SlQ^>^SUy!gY2z9h;i>$d+iqpZvZ@a}f5dVB@YrdTNyHu7rO*?)#)X zo=%>7=Pfq@leIK^R?^8_y%&D7)1|a~ZQNx(Y*sJZcq5;>Iok34n9>osL*#S~yA=S4 zfK?G5!%QuGd!45HllidkS#Hny=ez=u(rHt8b?H`h@|-2L>$;DB6!hma_T(4|+qsAH z#fWoI_#eg-X#*Fvv5j1-ENAr6{H49~67;#X+7Y1d2hD{pdoE#ZOkq|fwRG*?s`XfQ zQnsznhmCuxlQ;F29ss)Q`!M$UR6J3v_M{dxz#vf*PiWVLa^+T{bRxz&1mD$#+S7TR zvDf2rhfaIt=)D<#Ex6={J%31xJ_+^1YdH|J%cB(7Ct~wx>JIst0e*IUNwVWJOv$}y zvirv&@RU=O$Xav)b;z-~H0MU+=3RNUdA{!r-)PlnAKmGA59yxc& z(_)p`IU+RJ*VLMB<78!!UK;O~gMe7!aaYSqWIx5_9F*RF+%V@OY9<=)oMPs6ce45y z`kYaqwo)l^IqcaaegvpsDrZkhB=q?x&#>7a-_awam9=spy7~$*WYCWHu6DRR)LZXe z^_?BOz3rMwg)_9?704DF zQD~R;C2Nxv=md`8N^5)~K%H#d7L+DPd)Dsy&kW*cREY-OWXCN zO;X$l-v0uyxME_Trvri*QVorx12_fzaN=%&x0MzG|mvZ#M?4l0=$O z%HbV2-4K_;CY) z>6nJIeDP|mgnhED1fK#qaF%&GhYeXB#zCn&#AKC=n}`+SNsHdYIF4gtMWWT;{U31X)0GI<&w1y^dT|)Xbr?6u@?A~@Se_OG<+k1y zF_`=@U*%-7eZ9zPu{bWN{=F5>kOO&tMpyYFPn>2=+VNcTt2bx1w0|j4zrILU!x0>8 z>vb!mP3OWM9#XaT1~jX7I9EAPJd5KNa3x6SQsap3leqB5>Ezo|~;T1jr#VkFbW z!#=?2IFYEG*@E{GAo=V5{I={h4A1#Ser0Mr)>0y;#z^}j*tW#`{BWRkeuoHu-D$qN z@Wb_;u1BGZR35>N{y4v$3gg5sx6UpjIm-8=<(n6XtG7?-*Q!sA<#r8A)D;qLy(mQL zy`@*|i{`RpkCWWz?0&j>$$9S1HD~bU*10%a>rzTf=^_f`5fRuqub-$n616LY@2h^; zn!3@QU*0TV`&aq!ZYXEusYAMdK{16%J;`B5&)ojHQiV2;J9>zkgn9-K4*a+se`Qu; zB%Wqe*%Z~W(I023;^@k366*psIi{JR)p_n*;WH{-7y%JjBbszra;~D&>)VPZ^q}cg zkJE08LrrX6i0of>ZSKFMwX@keX zkvwd}Vpn$rc}YSQgmtH_r`N-KZ6xH|MkEMu@ELmg#@Esb;&&JBAB$5CvezY5Zgf9R zPJB8y>-+rKp}&32If#gg0`lv>ipCgoWaeDCa__y~Qg$id zo+TqkL`W!fI{naEZS{HP`HgJW(K$H29Nn;IoL6DPGD|I#Y1`yG*q!&CCU4fuS(~0q zr6SMEZLo_va$&b}^7JCa@H7Gu3M8P-t6D5cyhVl$cy?!^r{+6<4RJnmeA5>*E`P>l zb=u2wdT})y7vQaErpafzy2(VCo(|sqP2GZhD#Y^SI25y|b)o=!ux$yw( z2@Ko)xN6CnKZjR>z1t`!$eb8KRa@>(AFzq_I>{zYqHDUGIA(|XeWGuV^;Q#4@`2fC zr_)xfj9Z&7V9UIJ+`hA2Ovh}0TX`yuP?h=Spe>Apm~Ya~c?xEBsvkS69;#iJ^``Wy z=_lEl0#(G?OOxN^V^Qy4+M|D_hs}I7TiJ)(Iz3!g&bTHXR-36E<8@aEF<#4Py}fn| zB4DC7roQV&O!o)EVfTyMkateI)tJ&*&lg58kZ^2%ovA*5TN6~sZm#!S8i~szI4@24 z_{^}5CHDTjgS#)GJXnQLmJ7>)GjAx?o519l7Z)AgW8PlO-AK&r@fOUz;CW+Q+)n80 zicXPUo5dlugfDqemWk`_8m6RdV+m<~ufV=X*x6ad zOQ2@Y-l-XXmh(q)Ja0D1jkDy(>Ui5GD27*qy$-JJD?os%E7_IxG>Od?_-sQfhLK8F zRpD7!xzYiYA4`t8U}wwVrmUjquf>zjy*>C*kE=l+IatS+bjr=Z^d|G$u{sG0e@zd#$LC%dj@e3&Pa+|oS@pEK zFUEG;X?K_vE}8q*r}>$@3$sL|gr*l*{5e)I_I6}M5Ny^geF-|u?zq)8`Qh-zuI zbTsM>!(P!+vcg5wf75mLt2pBi)-T8wuN!rHdO=)p8r>Cg^;ikrrEurIwxU{`*`8X# z%Bd|3?gq=_<#xe1(u#B*(j9So82sM$5BLe6hkNqOxUmrw%}JlL{SscOZiJB+atM`w z)YAF<5vg5B?gd{`51VDoMz18ApH4Ni_La=%_?j9oF}LonHBv;Nxsq*la3Rgz&ggP6 z@nC|Ust-HuZtkNwx4F&tn>n*$yG=g8PbyxG>>&BiT(g3Cs%Cy@0CB#&Lz!J)%WIua zgO49_`I&@yMfK8G+_`9D1kU1De#u0Cb9RmGOU@KmMr(#{R!n<;?YvJ=FbfxnK9SB~ z#6b*P4S8D}<&~qtrHjuGJt{kX{`(kreGRN) z`D+8oJ$xAZRtox!!LQ)nn|lF#rR7;3vO?BqDxaYSInjN-^hG$+F8DJfo{6DTgLC_3D22F(-`)?JvT__JgdpGfy!bEwIrN&m>x#<5`1cz+`E7J{ zvTxD5l;ID_hXpp%UG&`VW_7>4tb3@Hl%suVZS~s4Q65}gqe4R@CTw4C1%?Miz373= zKcC2fg_9-<*;LNnBJsdcJ}d8k=a2i_UY_%q4S~g3Sf~6%N{&K3n}WW=@3a-@qZon> zw`>70y3ZEz^}6dG0p}Ly*SPbZ^T0bJC)_w7*)Nm&ysA2e@1flN#Lxt0Uc;CGwtZ4} zr%O>aZ%8UFGVbyaV7VOmIeZOIDZhLE+}|>8a|Z0o2w%5yUO9YkOA_OM)fudL;a7^= z>Fz;QJBg#XI=S5$Pdj@%BS?1C5CsqtD;Oz2sDw*vCj(r(jk3or8pZEm*#KQI7+vs^*%&C5VqwVi(gssD^5qIxR zJ1dw+Kx3MFN{1ttb^2rPURZjGJuJy*-o;&Pv*jLJpX%DN^(^dj*p6&c$#sF<8xBjr zQ-v;T&fYROCi4zDlobb&{8Bs*Qb(}m!yCz^ci@`m$rtuf)xdykf4KUxz73j9oi~Le zp7XJ~tmBHUmCPl7Z|^SB<43#i3(2Och*Mr+%RR|4kRYkJc*LJa(S=Y;|^Q<^O z(hnt>5A{=hF~`P?DAYZi18Hx-9)^^pW|ohDdx>XjK08Ow3+$uk?kEh-9#NJ9I8(?n zcfo+35*Lfuu}LwL=!Dok9r!@EJQA1{ssTDp-9$8e3*~En9e5G6UH2u=4j1A*u++UG zWZ^l0FwU?E^tFe?UcDOSFQu)6Lo9wHhGwKP>c&S*#W9 zOqfi`gJPzFiAu>^R60HR+Bw1$wCwLqjwrWY4cw{iH0O0GJEZeqn&@vXg}LyPx-n=< zJDkC~-#vDJFGLxbhr0(_W+=^bQ55C-wzZU32U$&$9d1`IcuA<0YWNww)}oCDzi-)69eAuISUylA?2Aw%Eb*POfeHMmYA(oSIkY zsb41!{;b02q$OtG6oA6n^V=VzMue%M?`)ZEP4N06y6 zvPD^cJ>;9cq^s2hUZsbMY22e_dMO>!DuI+{_Y@o7=c|}rikzUFY-S-)%+iU0|3B5R z;GJ1|78$lHm-|Q~f~R>p9K&iYzcs{Z&%UW7i=tL5df(iu1WE)VJKwZGz>4k0fRHUC zT9Z9EmO_blQ-X3g4^^{v$L1dR2f`)~7!av{C_JB)?V(3mf)$-l#+u#H7sI%|qUXi) zk{V7E?c;r}U@`DQs3D;*j*(IAa`CKkGk)%Lj$G?oP;XpS?Z|vinE9qwTg|(Yh_zs1 z-QRS4mQ`9!9y{|}kQ*}_iP4v}D$`Y&$q+&L!fGvUi-fFQSQ)qSxQIi>ci7!Z*H+Je zTi<5yar)UEAyl}yt>JE#qOI&>LT$w8X&)fAmUhgpP9x_5)Wwn3hW?(5yCW4+yRjxYHOeb=Uk?aW>Ht~u04nYJh(FB;iZeMWA#u+uI`!1X|q+jyn5EAj>~0S zVuRRkR|k{UU#EB%s;73%(@o7Ul--^KUJjTg%1F$C(HKC=qE1*qmh7gpK{9Q`Yl}-4>=r{KQagu#i{6i> zMNs5!TJzd%7gk*zS2yPcpN}d(ysxZyn8k{z2B~4_E!@qj$S~9sMC}(NdB`W~bZTW5 zIf|ptmeRl-S)v-2fa$Qu#TEsc<;*^=U#?FbKuQWWQustcMl&~;Cuw1xUYpl;Yc4LxM9*N6`6%)&8}%-smRf{>IB*RMh;R0F zF74h|FezDZ~hhg)4&O?OpZ9aO*zUweK8Bb29~hG?hYS4eIXP z2T=XoL$;F)ahIVTExTTu)+r`d$an@sN1PVE-W~yhY_d&jymG%D`TWt_v!lAq)UEfV zx6}w5j!U2f>*Zm|1YECwUEtKIH<2|wKmY=KsLB;p$t4xiu8@&-D1F*l6zX*D1$5w% zZQZSXTsB=vsk=DYy()P;syp^|u|(Rhz2jlmM1>E6QhkkKUKoqf?eZGD-0?v|ISTc= z$N2S{UwmVqbTh2vp$)hf^3q%HxI2V8bWpoXO10A~!bvV9TQ$3XQ%7%mwcNweyEE*j zd5rStUn#cj_a!pi*8@x=PYw9Yt20l}H$HJ@FA(!c_eCL$nVZj^PnC6$zen&{U>9W0 za~bEd48a?x4vz3!Jb@+3bxNp~IU;8X(p~k$(drojLvKduPIm{XaJ$^I)%_uMBS%7M zaP>U`3GMxUt5gerZh2g69B0|ymmKeJ_i)a^`qSy@7PjhT0ceJO-V%tl-ze0R_SV}C z59+Zz!eX`G%Sy8@yoJ#NQ0VEwjHvS^?ca$xE z)}xc}!zb_jU7wa%yCO?l;NM*i6{p-nnPMe@{RVcDl~Jq54=YJ(b9jo}hAYUwH5TBi zgYm%FsXNdE_K;VU-mtwV)KKV6&5%OjR55ZE?^tDWEXA9#TbQpT-qpK`eX@uuZV&W= z+}zKx+9EN3VXhN8x(`$o?M!DsyCDne2h>6=)t8yhkdyM`je5Hjn-_C`?4OV{&e{C- zsBVy)&fe|ZLjpnRF8+zChwW*nF6FT(LT(PEe_{>6I5P<0urfENl?;P+nY`5?F21h1 z$t*=k806XJ-KE27cBYlZYSeC7LcA~U^Kr*9_pxGs6n0abQS%d__z+k)B5ln)K2y$e zE5$ImtP_f}9Lrl0+3N+tLVY=3oj|)-U?6Z?iOToBt@2|tEabI_d{@od3ryYn~>zUzt}iV<#q>jzE5h8f02MZpT89# zKb7d4PX&K3Coa?XKitvJ`|KbF zJ;eHp*kZFUz@P`elhhvt$5&I*eJrhiQE7Y7F7-gi-p7J`x^+OJ>{VD9tGenwA$}0b z&&k>KN3royXyC^k{Iapb^P0ofV^W1TrjG^}Y5FB`P`PumJ(?+sNFYf0p8t;id6e6KqmhqGlI(C*6G98533mw&YjojqFsi<%$n# zbma;sUSS8m1$Gp;l=d@1)A$@%{oB9*Pf-r}gR4V4;TMh(?V}ao&P061!mv64ZITio zKa;#1cyba>8E$U_8Hi}cbl#FVop(94$7DbuSe`Y6XF0H;YuVC|8u z{78^sQFI`hUW$3(;->p2kKuIhX1I?reD)Il-895ZMnn-^!4+=P`O;H6Pp8DgZuN_i z2ht7g>jU%fgLeCcg!_$uF8TvEfms*0FyL)|P%?kvKT@C^KIpj*p7M*V)Q&<DVvc7KXgrsZDUb74M}g*;NQ-SE4*%r;`BZf9fSFqOiEmEkX9? z@mL^0>o%q-0M~b$aBx6_yY?5o;2IHMVn+oxuE_>^LX<#L@fiz#ut+riolv>IF_S+i zTp&-d5(eTBL*v62bqUv5VJ4gKn@hKEEX1S*-j}fy0q&@%n#~(t`RC{Ki~)loi)lfX zK*C|w2Si}t8n8*ze{fq)(g<_Hxxw#bteL|&4 zuq+eEV<<)$&<;=iD?K4s0xw2=JS-95ub%fx4p7GL z&x3Cm7ENHvG#4%TtpV-*CeE|_1*Vl-J|f8pBD4}QkUH2UCs@folPKV#ml zKL}`C7shb!7us*SMvNa&j-eqH5mkG@N4+bMxMk>wTewGn#^&``No-;L8zS&$+1k6# z_$tK}*5`Q0*Mbidc{eO=cj=~h3UMp zXqnFYe=^;#X$<_q3Qlv~4{j*=X|D-C_%naJ)7f`?1ghpA&ruL72rkM>lDrjMDZmqk zFXZ)IWs+;%#43{Dq}q~oYniCg6vgHNpRwrqNOaqW1&1p?;AWr13+tb+4Q>GwQVVkF z+=fpdGg<$^9}^h$0j~uU>Jt6NlLMU7sQB^>e|U8~Bl2{o%a>#@xmtbW z74UqFrCGk;~~6TJiOSjdEfT5 z$(72}xd?xp%OscmZ_Z;n*S|lv_3Dpv0RnY87fh#BZ@FI12u{v4rh7X4{ke40c{==D zf7gH`!t44Du2>&*rI0(OdMS{1>4bOs8BgUB2y-5N0BGiE+|{uj_>CBy z)&dGpy?-I}Mz@2(EC*uf4L))=5r}Re;zFgCNqasODM5zZE0_Zk>><6|K@whm?%f?4 z`J9|;q!Drc%bAF$F{f9Vt@2*HV3)Pze~p(qGFVs0i)e5WdPNr^QS^+lle|oGrHh`@ zxVrzkn;65fmIRduB@i(+?Jy>DTP&#{yvm&w=E=$3jyD1nxD+Y>gSLurP7~^N!t_t` z2#{kKGshI<9i`r(t;t+h;cO)#QNFm!K<5v(h-NQjf0aC3 zSn>hR5713y2C|9UOlar*XUB&v9ZAaY3o~F-;w&p(EXO)R2G3)Gz}Z! z>G2SGNDEeQ3*oO8tju7^hmrQXe-;IDP=H45g8>dd^M@VuqH*pK9l_!oW77ZDIUX6% zu2-1bWl8pc#KZmE@hb;d_QD*~g*&p+=A1rsw=(}%u0_oM_C0={yX6M9&JwO~LI=fv zU=Xzlnf9-U;}0rtj=A)MSN@G$`$3NV#vcDOwo$R}_~nE*m9V#QmBF&ne{pgBjEi}4 z=L^*d2tZ{$GO1ZijHev^a^=A#8hkp+lnSuTi&>-=tghv#J%ghwJVwz*aM#H}LU)bRH)^^c;Pt!`V>sO*`;{ArHAAL?Me~ADq{%OzwNN3zU=^t z;$%qv*ZMWZef(KB{6jk=e^~oYYptoxl3&RBX)Fas{~MYMAe|Tz3S2 zMQCCJ&LxPDz3TvB+k$n5y&D6TUvrFrUH1j7_qXZKJ+tKRJ=4L_fBLug44(^3>ihru zd-m5c6Q-^av7p%)XUXXR%20NE2h&A?ANvl^-x2huiF}IV!fBCv+$E5AFM1042-_*b z0~}W;dtI7r87LKi{z6S((M!|w{{P+cAiDprfBld4uO1-!?`Hx{xCXck_+2K!JmlcG zy&zn4)nkA_3_GXje}8{2Fxqglcd;*Sa1E}hMd4nn@{{kUAf zpgKmN2A0J@p(61Docd3{25ujlKuU{awbM7PZSr>sIzRQ#WB%{!`_e@28*>0C5EuMd zvFekP<-Z&MzWc_%P?jmdqrYSC`5!(H)3fvw3;E!qC){Q_f9;e0=xc6ZPd`ui?;`!g zy0B*h4nDnNF8x7uPM^2?`##C9B;Xot*wk;a$k~T|=*Sa>7ieAH`?q}|5c7QN4k>~y zw?J@L9`>Ao93XbmH%IHI?{c7-IRVzHz~D;D5Iar8Z*myA_K6G6rN15P)~R~~0#o6A zBr-+4P!+2He{LLWyhiayM)|k>0G!QTt{Zw)L9V4I;TzVd*rbU$yM_Mtp-AZK-@Yc= zFc*dTnp#&_|K+gyq+$6*VZVczg-Vs+j)j^8(aP_2cVA8GZn($-Bl2^92TLjQH&L zI!0HtKl}6F{#En-WluDLCq&o`a*p#jU#=Q=hZ6>HF+stg1GbYF&vwnxc1Uu=Z; zgt=EK{J^M_&#%ju(j!M47U3`z4yPygO!UqYfAK2R+VtuRR3xZr!c0>mLCO}~Cw?&P z1A+0K_A{{L#eU|qzDLc&H!k@bAuH&3?mn?Sc*_5d-+iC`&-L!GNW*xGeT2jaCs~N- zD9C{>)ot~SEhffI5Mj=y>Ru1K9NR5j!)LC=yd%17K}gYMAt8RXxmY#`G_U;|KBV1J ze;H-?_f!0FN)7#rE2j^7B!6PcG}g8*A5X_ge;qf) zeb2DKp-tYpn+`DU(>m{O-%lQuv}wNYM=no3KKNhXZ#wR5pTY)36KjxLm*MBQZ@FxO zn9T>ZDaxNX&PHtiir=WHyh4&}+FyO+e>b0fRS1kS+n0VHXP8BO4|a5I&&=BT3NDMSeI-sF=enEXsI-4Yn+e-<$q zIs4J0PH7v=Bhx6U(@sUedeZX{4<9Bjotp9a}jG2KkKy5e2Pj~v>ReL(-@OafANt|yG^k< zNMxqC-GACn4oFXyxL59=N6ErbrLcGig(Lj)dnY5#U`6_Y?}k0h39^MH>;mIIYA0kQ zdG{t94$z`*(O0IwP0kzArtsboi|~Le=E_UJ?|Dy~azOvwPOxBaC(C(Qk^V2W6VO^e z?evY$;n|YsHs+Pj@4kIZf57COD64>yjyqP;w_*-sZ?k;m?BO|RhVW2l;=gW><;`rOBfFuitI8eI(gZ9N7 zfEeWp`n~BhOqZP9at0xCc%vQN8!8N<9iIVA>szFbrS4z94~QnQf0n)uQ&FD6b3c&vx22yS#eDcJRHAPVEY8iQQQwJmI`J{cijPzJ4$fyD@<&w&p&^U28Sho zoC|p9D*o<@3*N7$%7U0bFjH)==Ic~EVK)LaHexxp!!s2K?e0lXko$FmSO_YG$Wu6C zBv>0Dv1sV}+&wpo_ z44C3jkz-*UBa8KfmbkUZY~yFCLSBex0WE+f#e`r@vGGL`9dB~z@%e@<0!`^nTob#u zfF4g}h>Yp)&rn}Wlk^-vQ|K?>pTUoi;ERgMBG(cgfA0B6zQ_1H@Hg*o8;yK>_Gn3`2%POJ~xYr_wnf`ztH45sDrur z(Me!YgIoMp`~LjlH0@)*DYeT#-ZxZt*oNXZim2($t(+XeubgxM5rY#DF@}9mnzuw$ zQ52Q02KrGhVAAF2MFX{+e$mUe>_to0IsUqsf3Oz0dlx!D(g}y8;nPGeM)1;!4LKh< zkEGtDOJ@YP5a>oaxp0lA^Xy1+G)-rRWwNbRo>jE;gSD_Cf!iAo0KSAA0S2 zLi9nN=0z1Sub51E*7JcI3}?5bg-dWN7P~}Ea##@oqorSxLxmd+{LCEDrwP%GkqbV_ zf7qhf@}IbB9>}{1=Lig?2m~*%o4}*;Rtp692o-+>D5eC=vV>!RT?Zam26oU7fnA4G zWacrz09rr=Lec@eC(wbR1`iL6?-RPX6_twy^!S8uNAXO=hkAeUY zj@NIjRX%@*bD8jq5^#!>u1V1B8118_5nw8noktOaH-?+Y??J zIPM9#{{6dt`n#!}zoXB5A;JZ;lu^X2I3JQ7$8L_{Q3utjQ5z3-@*@$ANToz`#cD9L!9vI)3E|ffS`x` zV(UNuqRmwS-}4I4xj;u2lCA@he+}fL347-+f^_F$?c}O+#Yd$VA}7G50xyX)^@M(e z$r;f2zwz=DQXaTC{(}2EbbnZ*jsZf5X6Z!k$ADEQ;oY2$g{*)`9Gk%K-@Q64_Z#j(s2MtPo{y z`2BlW1J}RCegD1&Z1?oKJpaC~Ojtm{kfNv@)`TL;Pp*t|mvILG8$Wd8riVw~kh5WyMp8D@}e^0;I_b)bk`aSwT zkoBkO`n3t+{;z#!`mBG4b)GV-6WSjy;9fcpgsFko{PbTTZ29T+Dgx>cY9UI`J>0kH znUbdastO{|xip4RG%axBcxsRW{Isj`!9gpGk7;9{;{RAtUqe>%c{dU-bLqpYZGIpZN9dZ~S`y zkN9=rAMoqLFZ}v?Li+xJd^i3?zHfmd{vzM=f5L?S4cZ=a4$$^f+dRD=yggE(Wi1X0 zDi>P4ef%KAQAdOx^FgZv>FLTJw7NdwFu&+^U`l1_`#KPwf3EvQvrlOIFM1t_Qd#}J zKDBT9MYE%Q#V`6DXj0kuzRmz04oqy!R%9w+JdEO<> zYr=>-dNY0A@?ZZZ{0p-Ucp!fN7vde)0)^4Ej^Tk8g)RrMKjDRc6GgpT#P7U$o95=} z?>`7~ul{#<^#2Lu^Mq=i)*J~wOJ-C&17c-=5ZNDG`{z8AssF0edhzeki_D$G&BmCYgoFm3c27MK4O^f&ifC{{}<8!8$S;ZS!A+Q zwDI4x;a`0as-bC&|L<|=!oME(w}|y=5Ak2V-!xBr5dI;~nX;3xc)D0G=5Ray0|Rcg zt<+3N^nW~7cus4}e>xVJVyS(U!0$WX=VfY}|Mq&OzJ__NpT>Ca=iYyxzvSn>A90_0 zf2>7+`U%#Hz-CWa^Bni!4R`tRe9X%P0%S&&srZ{QKtz*Mc_PedZVRS3I*a)~9%0u!b^m&KptL{dIjGtTT}L ze_^2g4^F#%VE^zNjjxa8-{9|o_U8F$f5Ogx)~(Yz+Y4b^xxl#I;B{c2|LyD3ItJ$< zY%s1*KLY_SRTHiq1*;f0s}RpR#_0y@=-}t`CoFsU#=0NxCj-fS5U?(uQ02+=J=Vk% zs{Hdktj;D>`R9B8iz@$oAAFBbr|-k>@t^Ob@A04SEtYdxlL7RV%`$3z3kNbr-{~U*P+6Qg^MYjK-&A-P1ZH_35H?83iu$2*>*$EIOVO9RK|u>mMBd{T}@n$A7=ay`JJWUlco32opN~2gUwH z$NwPPao?uWhVOBY@Adzn*uU5Re}iI&<@|L0Uu661d;LGheK6(ypUCw7Hy=lJ;-$^Iug{nNK6)cOa9 ze){z53AH}q(1mF&@I|;!ICSlUEdT4+4^kUe+Lmiq1Hc0 zdGa@w{ewftz5F8FKREP!TGM~9>>qTx_#4aqITrWyi*WxOTYS!W^+!93%5NbH zk9oHM&RoQ^ukV0hGay#^e`D?V?>qiQzmxp|t}zp8y?1{h*s<@NyviqB`wQ`}QYn9q^`<({ z>lfMHpau~Idce6S*uVTnw*$)_M>RhCe+SF{-=Wr%e?_hT^*T~ee+fVQ^*T`N$t33d z2h=)Di2p#X|HJD*ts^)1*Uul+`uzW|Q0o<(nDH~-?{ARi>U2IhKOo&c2x%ag;q5b_ zoAJ3o8DkE@^?wl4|B6h<`7frQfjWNvAhxf(hU=eD-MGHLL1+Wb+)VEe1p5C&tp7j6 z`oHTR)ARB#Nbf9tpXJ49rr{s8p&r~iE37wrW-J%W*+elgvb>GMDTqH!^ze(ulb zbKjqzKM3~^!aSIsSBST8-ku*n2w*GqYqmB=X$2+ z2lxHIJU<*i_3No$e9-69zes&f=P{k%{cU=m z&-33+?}Ow20f7(X{pY+tD0sZzukiz)=S=q6hnmX=-s=U;l~264(vC5Evm^l*f1Dj^RMm|9Til{P z1svaG&OMX-^ZU0Y$#Y?@pz4vMk6YeIw#NHFA1i#l#QOXDoocjr&HD9P0)Nfv^)K>W ze?yh@{XFbCt^VhI&-(25>lJJPeqKj%_2+f0B7R~p(4CV2n^lme}c3R|3un1eS=Q)gS0=1QvvobqI+J7TM7UF zhK`aRR%EFAH&vC#1AXfhkg`@_pHOi5szU$1e|Z#O0#Yye;oHwFWUMU4eiui=H{F3NIm5|843)Wb?lE}$4LIe1Qxuw@Oc( zXnyV6ed54-m3;Nw-pR3>uLGjx&YOAo}>2y@yN_w-{ zfA_|Dc;cc9ZO0vmAKaaq# zYWQq$r005yIi7mIDjTZiA3KUEt(k5Q$5*&D8cNek{;0a{^U&$V zQ}?b3bWx91brOav7n}Wi3)c6{hQab?e@C9p!l-me&>&3X{bA48&Y%Tw#ew_vqDh7q zVOuiI;oxOII~)QHbb<>xb}RUyIUSkNrx^i*CG*Hwq&=fy+ddm(-gQO^)l{ft6R(us zmJ6WTmMU{(TZeX?<~>u|MfE=PVsT&r=~HjQ<6a)*_{47-+S`Tox|jGsC)2s$f8M#h z;1bO`+Z^H&NVsMZRN{CDR(?FI=lp6TWQ^4AHjh$yjk8yKXF8)Hs#UAfLv+rUrw;p5 z*w~pI@a`w-mI}{JvgpFcbzzhn*$~2wUo8TPGZyzMx-lyuDG84CH|^sD$-!vC)wR@G zGB2Z7UGLA_V_$SydxEJ+b4s3me`6Gn`=-3zccg{MtWNV;p4{tpVYE$H--2}q1fV+%9H}~b99zd^0XHTqRg{%^QQS;(2R(T6 z<*YZ?n_i7i-1&6|GC6&*xpARtKL;IT`|y}hzK*`c>|CkZ4~jZxnAI`7e@SG8qSix0 zV`q4*w4FsaPoZJvBKMZ711tjhM{^eXXHS@W5-D6)|({UDMzMR59^boHgO0L1h~4?RZ@y^N#G-o%xhpB|FC#~FOK?aL#{51v$^m#CvOq2 zmb#n5Y;2yddrFK!?W5d$e;1~(g6Eeli0=OHwgjq9=P%dNtKg7&n{c;}n8z%iiTVq)I{kHS zUg7)wu37L<$?F-`qw~agE5`Y*IX8cf%-TtuiUHj9mU72(of;t8e+p-IRs^;F^qiv3 z$>^l4Pfr*QM1JCwXI$N)F605jC@KsOXAGTH2;z6IdVB9ECnoS#&lj+0s>feunt;O6 z6HygMo#gZ5uqN)$y=rWY3c+sr+Max8k-K5+Nr3U*Ug4G6D{_9U=<_)*WwVmKP8M(I zU~reuLjzwAe8JYaf12I;O*^{!6Q1$f$mxJFHzv1cLBTCou+YxnH~i`#3+q5swuZbc z0F;oUX#N801g?1Quvay=U#NgY`LYdx5Go=DRh8d;n^SLlzBIIoXwp~J?8p1=;``2H zyI#T{6-3upz1#}?PUK;&3GVu3f-#7f)Z#lH=?Me>zi~Z;N(0@4GhHb7pjt z=9{&7lsa%Lu1x67jnx_4LC0@==IMIlOA+em@nSuDKeQC7gO*_xQe2dO-8&L(UbfJQ}noxPD?s)t#4`P!Z$=%T`aGsr3iy5e~OjFHy#fG;N^u4*17|2l|pQ3 zC(G^#ugd1+zN2wp(c|j^OI%c>sz*@S!Fn|`)K`q#?h=lmsBu^4Mp5nJ z5X{*TOuJ$P`M!(b*y~?-@?2}$(hBo*Dc_&gVnyx8=t;<2yL8AbKQ8CU4JnM+&Qt<4 zeII7oe{YySu{lFjc@#!5rP(3e8d$TM)g;f=#D;<6F>svo4IN1j_}DIBLj`;%nZ2vS zER@vyPA4{pd2qcRtw(ZyUPJF;gcKILhh^d2cI(t$h6|Q0<`=gNjW~12YrnoMFC_fM zL^@(yGQxzCRct7lfysz?Huvj&98=-h-QO=Sw{NR?6PwaqEyKn6wQBm3N~klH zSomts8o)?ty40%l*g$6nLIc4b}HVkht ze@WEnP&b#QreQ`t-?!;?<+Q=l_r!BkY&I?6x`kjL`UU$qYum96R~rugoaBiYjtfCK z!`JzI(btEC%Fb2enw&02Sk2D27vyZ!XDW!#qap0sB)cZd_;?p*` zunGRsUBY}9zL)W>5HG8?Gmh;&%CROaUL$<-w8v?kDo@HSq-wkI^Rr08wMh`@f1;qD zEhPa3kJQe4twX!{%6-kVwP6ZxI$B+; zI~>$Wd)@I@@;EvyxvrDvX)yys;ks@&hfHt1d=+XOI2tF&OgJf{Jm0QP8088<_HB zAO`jsL7r!2lxq_?6OeU(O^*A4J|!65_T$|5nS)R7jXQv`21h%G`(0D!qkZm_`GQdt zw&|uMk>pTOJTl}`bsogne+T4zxDp#m6~`0Yt!Ji^zV~yOaAby9nOQpQ>At7eo=qWO zoEgvSnuQ;Awp4XsM~*1GEAGT&_I&r*qZ8jU9QkA-KN8(2jB#WTx2j$byVINOnYf|3 ztGa&m6L_LZ<)^&lK-c3i3i?fGFb3o3KZ+cUP4Q#jo1WR4KtH#uEAk2CFf zIIv_q)787NMnEOJf4KQ&YiVb;xk+DyCRQqpho zV;;8-B@qCdJ@7TXJ;$WjKC-(|ztk5!En{apI^O$gnC4Q>=~$4L5-E=;p<6h#tg#Nc z?6)w(zkw5-yeVR-(s+O~q$Qx6PR{RYm%95I3Rf5EY^5}*e{-A@9Nf-~7j98$UU%j- zJa@-A^ISOUTr&eR7>orZ(PJ5>+#oz>Fi@?J{@l>^gmfAL+Iydlh0yvO?kpQh^DTve zYHX@ht_dQzx|0U;6XvAvz(!Zo|9 z@@98LR%Oqfe>gI6UOBhqtp#k93$_yz>24|Xo{aR&bSSuG(;fZRnIgK~T{y_@R=vUT ziDNjS#*wVO+#ZhP2}O<9_HY%+4Em*f&@4c))%~eW9u3P#Yi*8&k?bbcam6`{K*peV zSQpOabs(4H*1is8D$G!h7&dTwYz}Y{uk`nVBX&WNf74q&xzfc8j09#prYn2Xm=)X3 z6t(N9S>F-a09)EI?kCDr2_zzzW$OSEAE76s+v%MLc7Do=8{_*)NS||Uqs44IBrEZ@ z_xe|m@ENzepak+Lt=eWjF0Uw^8&Jx2cF**Y*diaLw3&2X>{ih8DL3^EzV>;c1*yY| zJ4ZiAfBUOSh}vLIH7uKP>@?0b@*v#Fg})Y>1;K5JS9~cB=+AKw0|wW`{^eSd?t>fv zl$X{pIwiPx17qbNCNoTpJ!(pQ%%RKimD*Xjgbi@bRKuOjOt#^i#{@}CxAh}%wQIA< zYUz0g$7i#k7>CsKEm7;y`9||?bTakbdByJGfA`F&g*VT*2zeN(zeI0iHfBS|BKiA# zl?QP8Nd5HCl5qV9<-mWoQLrPxfYeTy7z*HruVCuZCyqbTC=7YuB=ve$@5`k(9%)on zD97wYAM2dc+{=+(zY9S9T^63mWwYB5OL>=&9HZ8AjwaZUBoOyp*2O-i?X9y}fqHs& ze}YJ!1@Y3$?*5I2TCQ}k25lR5XFO|+daK*GfOL@JYOOBS*~LAI?B;^; z^27oiJQt+59dk=saTFC|C><1!I<2?+{0!dUDQw@->J(}V2dGO8F@f#7+d0O4mmP0c z8pM7@o5N#tAUDniTbRdvCW4S_GIMfIf6DAWA>-12L;(8kcC*>#;LI)du)mzzb2Vel zvjK9^{zlCo^ZjFiO8y;JncSs3NjLUI-1rO4+`^Lc2q!!rN4k5SlIU*Or>m1=d&AfU z)4-n-cFO&W4nwbG?{h;q%&@kU%z3i# z{bK|I)zz+G{%4Vwle>>*rnX?Y=M200Y}Ir-B5SIBwkCnCPa_lblk5Aoc;%yleb(05 zQDrteZ2Eg$#o=RjJ>4sN;bEiae_nw5mCa`r71{kJ+3{mVcBMg|iU+GlmwDI*QWaLg z!n-){Xy)v=W)w%Jwr+>}7$e_mJ5y|=x@ z^gi^!B?cExJ?Yu~O#2EOcPpb{44L!cEekFTO#s!~5;p6>Ia1-2`3wD7zL2w$rc>$B_-&PTa~=Dpw#_41;)`b4ItJ z$Gs{gPQdn@5wWQi&4Jm;Ru>cwG!lD#o(?1O{HVfjGa;wpc)L#+e~$xPY!+o=aJbzp z_v!9;lB()OW^LEo_xPlT-EAIh$!7N~+S|797p6@-y=@DJMM9-&##(NchrYZ&%fnkb zpV?*cAONA`{eyl}H7IQzd3bG~i!fZzUYi7U@f}%qO~L{HdLL4DNV;|$RvXj4o7K%( zGl%Kc9aIq}!xsR!f7^~q?vZG8v)OVq9r$w$OXbj-()+>r`_r?^76&tOsBHO$<&3!4Em=-Y^wv&K@L-VTb=xWCSbqwe9mb($Z2 zc|7q(*Egw*8jfKlN|q4r;+4o<$F()on165_-%$DBj zy=7v4xPZTatp^SG3PMwH z1C>vuf9zFt28i@UgtYCRmJfERJi582T;80xvqodh)%hiyNkO-)Hc@KaFZ=UC<|5gA zJ)tGB*Ctl(mvI*IL%f26x51QYe0M>MU)D0#s8yedq2pT}s(H?41ovb0Zeg8^*_g_y zsdO4v)4RosAh;{%p$_=jMXe$RHV$6@9d@jgO5u{yRGPE+dXWfsQ)HewO}W^1s(_-q(>50iY{7xHA@CULEO z11>Nr^)0?OObNP+uZyy;2lz58Uq9eWg$D;Sv%Pn{_g2sC;k7<6gCp%zS?`k>Ygq@JQ)a?xn|xnr5uDfh8@=Z z(q4Y96^ClE*zzRhOd$7b%-V)yXB7PMp629IY_3Oaq@vjvY2B+%F5s-**^rr!O5M%m zg{AKH-iy0J5~_Cr8)mG zcHRCIU;bd%Q5_X=zp?Ac5KqOsli1GkPo<^kzp?Ab&zt`pc71dGFR|Ro9yC3R_uUs#+Oy}e>B3AB2s}6 zMq8`W%SAA2XBaI_u{O`f+C`-{siuiNL$R5v`se~=a{+%De1u-Xko0o0{FzF#**Ul2FNfv#sTi&wA9nTCJ~ zIe+qRYc&Q-7})A77)h!Reg;B?nLVqD7LQsb9+u!)?iPik?bv>|lyCK50G)`SJGR$# zs*m%>f|}`T8h&1$$r5*XD+X-+ie=!2%u!ocqOTnEfv%X~V$J4Te<+9#qjB?_rtj}! z_l}TX70Q?6me>1%_2PPif^ImJIgiuzw$huHJJp7sYhFPaNwVIoNTXs29%X-q<~q;I zemL6X@n+u;@rDNDW?7>guI5;Y-yMu&DdZ@GAUSG4mUb_Vsn#Edj3BZVU z-t4|rvh%iZUC`A_aOfb2tkkgRHvOeeF9&+d?x6bk|i1eJI5Rr6dM7w^f`=wo$ zJtjMEl-^B}@?pn&3}yGLA#2JC&>__~m(V^%dNN-e;U~Jr!{k3aWx&*E>iB#8T1GBZrhFQ;;z-t07vWDk_^#5f>>1QF(W&> z1zU|cK6V~`>Iu%G85uOpejo0O8_HhxcmINNRuK3WfBE}%zD*I3aG}|-j;$0fDq|{J zI%hhHg2vb_PgkI4=8q#VraSHQx~w13=1Fm5CLB}-K+rb0yNkW`_J?cM^ve2Pk9QQZ zhYu&?!P9p7G)ZIXc$+Hi2=hAI2}|no>Olk9oS(r(E0Xel!4@S>-D_7p^(Ht)ynWL* zG~vEQf9Gknphao!(VM%bz;hRI&Ae;m&$$8uGj>_-_kP#yABXPAlJ9;^>Am*o+#m;a z8l*dRp1-AQdS{MUIfRQeY|!;85}X+LrYz?MBhpPdH>&ue0!Nt`s?^8PGx3zyl_xMa zWt+2fdfx_=S7@d=zV;i<^)}-EaM~1gC=Ar}R*i>hwgO_bMV(jFeSdKN#au&m@N$uyn2`<{K)@}r3QyE=SanBn-I`G;tog&VSX zEOiyqhNHiR&gZNGi$TJE64rPcT&9)#PKGGnm_y-FVM)xNI%!kW^c4v8eQ)>Udz5EuYuDmV5w#~5MON;d)s9h|e~~*S-Dp_ibKL$X;bXpNLp?VrBu=fL~D?7D1wra!?I^(s?NfhcRTs! z=)bkcS_6G*=^IOZvDsDLA~G^{c=_)feD1WKOH~l}EY~IHg?GC)s&@iDB01L(e{U>` zK7$rzSiG#}4SB{xC9jM45}qEp2A2>Mp5IPi9@|}$%>xveDv$ydZFwJQO+un)Bh^{m za}Q_U^)N*29Kp7(C(h#Z^MI%@*tj&%{o*oywMna8#A>SaaE8A6b@* z^y{pqPJS~!o$ziwSQ2(I3aJgZe|AnfjbLBLdsRs5B27d!ygeHFq$31~aG8DcY|ro) z<%WQ$s;^?^sz=E>_lU`241VvZX^WU`9ySPQZ=Bm(@aE6zeBHkn1?9OOxe7orp5IL* zzbce-mn(+qW_JK#nufhl8oUkR!su1@3S4Dy#3EUVzDY}w^H zAtXBDDZ`3V$Fj6F@zMu2J*KD9cTR0kF(d?HY4LX8P~e^%Cq2JthS45P{LrL97+&n_ zNZPhWUtx9V3;|ItPjVLtsJnm8jOT7iKX2MCzpky5xvwksEe)4zf9Ky9SxOBJ^-Sgm z)345g4_5|@yDz~Hiq#ckQ1Q8Q#_@D_#(kYqy8~nlofFSbgM`TC^`Wr0Wm3u3g%v_S z95wN#3HxJyTe)^I#Iuo)z%x>ZFy`RkFUGVmM2EN%C@^7-_n)*e;VtzF@UOHTq1q>2sVYBtxG99R{fsp|W>U-7he~%r6MAr)v4#Y5<(<&XA zqxIxo?b$y$FbxpRagK;ScVBH+BvYGmcJ;)2*&;JX(#zFhJG(9D6Gbnh5i4}>e27?C zuI`DMkj&~TouAJq8*P{C7e-yVIw(+Qf3a9K*nWMPlP>+Oglvqsv*5)L2|xAoCiGIsf447T2@}r{AsRjOF$TSt3pKq)&QsaB z&qO>|jR7xIrJ6gZcXjIySXnc=FDp>9?pGN{L9K<UPGnmbA+KyKrdLQ@-znm#(e*BOze?yymAT){xqw&l7kq z7z$r^;&}xh-+wRx3w@Ba{#z3;!C5u=e`zIr&-M=!unE)cFhh*$0=V9FIS3FEj4|JEM-0`CGgWr>5NzO^g)P*A=T|%rQ%F9@ah}9Yxgl&6^DiqsBk6fA2Q2 z2K+5~*i1wA-SC)$rtU>1>T-Yrf_VQ2>#e_bw-NqUs64g<<*9?&w3js(P)j&mR}j)8 zd_!5%W~He1VpU|aL|ukpa^h3gK~f7_A|3^~brUFmAli2V4Upg& zC;a*#pBa*DEAIAyD&(v`r%@qqKE%X!#rk+$zznPs+ZK`{O_3jf9S^{;F;t7 z08l`$zj4`KJii7BUtf%|Rz960;Odn9q-VsceBQ+EHS5y*xDq%ZUv5Y8*@&#@pUC zigJ4$b+)%`Ccn3Xfz9Nk23|s=78WDdwM5;64lPRiiG0 zu~3(jOWWgh{;pGR{TN)07~BmQwCD6vABcJN*t%776Q0HL#h=v@xe4c4PtN(h9dG3O zTSSo@!5_h8Q)uyg2=?wV1pR-zPv=N=ujE~LPqlrJFNXtvS-6)4#D9vdJ}>AuXw`Nf z2(UJSOrV@u&xei#PzwH~V}>_(CP#=cGzzC(s(9ma?;2!swgg;;4SaujO@eL@E9|uL zTmJmIZZT9n?=qwnkM=lk=2voebD#3f;7?}m?Jk4cq;A3RfoHs6Kl!i`dJ5+SX!b+T zUW`?`Hd}g5E)H}!+<%aM=s&8c?+@5ZwMOJk;q?hJ>+8F%`Y0RVAoKP52nZ~Mqg65pAMj<7G z_igo{vh6Uwj&i@)?bpx!dR}BgHlWz2WZB)t-7WgAn48~Pa=wtjlYD{A=d^z+sh?w&C_J&HbZBMDC{4ELlZ8#XhZLm zlG&KSvtQ;gj@*@-t1~=>7Mn_#HrWWP&n10i{n-ISIS<`wO5g|t@$eZ^+AR6_I@Xhh`o9AyCT>&5PhLDv&!hABy4teUsdJkt>Hga zRXY~G`QfE(vA|&$eQ-?+Wwx$$N1M^tMWDT4I(|Vum{%dMo>4DWSbI;qALY{_?8^FZ zHT>nvHpg4czi$_b{BSs`+rk|+Us##&su+dd(ek`Z1hStE3O^;pKRTvhHf8#`(RuK*S22gqSdLk9gpc&-*To^O$koy&UZ9iAe+W5ey{xxEz*Ko9MBI-(BG)$XX$$1Jd0^&VM~)qg#^t^m5~6u}j;ED#ZCfkyBZ--mYw5LP>6 z$AjFiOMcdZv;zdo8fWd@BA%w+Uf-5(P< zED|HG<0Tj_oK7VMr5@HI#;@f*(l57?#FFn4L2V-kPx*PB->q?Z-0Y;z1=ktcXMbBz zU-98m9c<;4PWnFeajq6+X9c#I1@$Q0jhfyZUoaqB62~lI*9)hHlr7yn?y_cc zqJ)*rp(3_ND_gI$>jTA^Gjb+f&kUx972k6wTb-NH^<=&shh9k6B`1v%ON|h%!2y(X zbAo6>^^!dwwSR9-(xF#N&O90#vwykGZnrcJ9`uuLm=*bYKRQYbHDp zsxUmA+ZoFr)GOHC8b2C>*Y4&91v^jp+{XD!(V8n3laf6c$eJOA#W-uP*s5nZph+%s zlzoAKAB8G;0zd(>cF*E5@Q3Z=gCJVQ6ffQG}wUd10Z3Wo~uCZ5c}|dVhHb%N%um zc)#5<39*{oD!mv227_$gXMOp}fg(RSt?#j3n`3c(Yh4e-!zF-bR|O=^BYmV#>n7Gx zI!@V^xwks_rTdGKZeFZ7hunO7j5{KSvLpuqA$CeNIAfGjcge`zqO%l}2$&5N()BxY z<1WXM-dZXB*k`Na$YtsArhnx@e3FSZENj@Pgx)l0{p$D}*5!5KQ74@Jp15Yz0Pg+zwG7VecILu;@y*>f z{CxpK^2?=jUM?-4GJl6&%83Gp+en+;NNRc5FVluRmU%;DG)&v)%lTnJXhp56#ddjJ zn)~M=eO62l4MN$v23%{t@kM zz?%7A(cVz#{43hKg{$3fv^QdFo8dRwJKTKH-s`vfoX}|~lK%_s{ZRo-pO2`*Z?yOE zMG51S#lKIVFP=VV@93gLC@hEF$wx&H{-C|5tpB7!Kz|ecf%dkE$)d;r_x3m1TNCRm zq7%H>eWV1&C+q(k?OlIV2LF3#Z;ITeA^?&E6cf`O3k7tuUxRPUEy3=Wj7@M*hRsj`C!;>4Azu>o>NHP2Q*1b3T|mxY zQ4Y2S+0=ne@Oj4$qBLga`zhYyw;mf!G}!TA6!Bl48}`*VA0151416oOa>vIu5~ zx`0};%cCpk2bAash_v9H3d{lQ&@~Rmqc?q0tGGNfUGEBs1s=It*p^P$`N2O<*Eu18 zf&$SkP_uwj;Zn>b)A2CcMNJRv(2K+*{Cu)R7k|);xMQzrg2sFi+!I;__*QG!=Iqj+ z+Eo)!Z?YXH%P4;S*XMo^n8mXh5U@)whOaK8z4Z1i4-0==vTw9@8e16%YV$ z5zfOkFijG|$kY3|ct4)&r}z6H$Ug`e;7&bxlU<*@x3|zQc^KI#*PYyNiwUe!!9vQqF!tY@vU+ z0zKU~eQM_koi11tj{Y|S2{xC1T`!O=zp!$Dq0uK~$Ao=g#2 zf5L*(sUr21?Ubx11D`33;lp+-#ImYfy`(UY4TI2U2+2kNM?0cXsE`Lv$GN2*hA$JE74CvVv0y3z3bB{8xnz-l?c(@RFFi(i{?6wkihCQs1 zQA#%O;RU8r--ZD&(Z|Yy)kCHg7Jt026-^UiPuwow!e|3Sr2|hkiKjG@==B*-03$sO zTxcylcR$w;v-QgE(N-VSR#KxhS|E!YSh-f5p>OD}21p8UQ0aETKkka&+gjAWU|(*X z?dnHeR$CI=PsolB{$WCafAJ{4v4tPhHdbX5YWr9J{*7h(AW%M7=jqs7hJS5yp&0KW z1q&QAAJ4&{1ctRb?mIXR%n>^WIkh9Elf>O7l&Bu~vk6(Dh_I%1;3l!`p>WomwCQC%5DxNx(_3=d_G|N9K_6GDB$6zX#W z_yjylUu;MRMO?;!6atH}Cx7mzFM|gF%t@{RL18Wm-;%spx<-*1wxc@?<{J22#u?WC z6Ov;R+6f=*#-Es>FJf^*P)``5e?q2zvC|)v^MrFB51cQ^jpwKB7W{9hdR$=PfCz_S zXdmi^a?%2SHE2?F0&@Rf+Au^Gk$+KZiC_IqTeMH$!2%t~C(Q%q;(vp&%?a5Rw07b$Y=MCFzC{Vf@B`>mHZ^<6}gWh;P=4N@XngRtR=MDCkX!!=QT~` z|9oB_w+JE$ebNcd&!iTvN^iHjwb_c>W5gckJ?0T`m~RCnMM0Ft7F&#x{&k z(|s^?*j`64vBP{XSdim<#;W+Iy`hDN%>09$#_zx5;(vq(`k4=Yj+}?E`IQS(V0Ng`1xh4r z*CGFOdi`@8>9r3aek52V*%o;hp*HmS;u3~U zA5bX<3V#T{a9|&#iPMNSA%_ueL}5LiT!Pm=>#YCb>rWJ{6F=jc@`HVq3yVNO^ABq1 zJBCjE_y@!GMH7DC6XVx!tStJ%2fY~I#!!U$(;iU*%xe83ZZN;RnNfSonT2!JPywJz zptE8PCJnlRR3YeB`ALn@BCy9^V^Snu0qE0^p2Vj(gD+ax)V1!Xp93*7P0qpp z^t}OdY{Hkt*du($Dj2$;4e>pmZ>&2%^ZkSnp2oO8=lp5k@_FWekbGtOUBBhn)BIoj zJb&^94_v1ks5!L(E-W=?cNN$9vW8WmjcCh(IVf)&Gwn(Gl`puk=Jw@AD=s<2N3r4e z&uMGN7oXzJL?$gZS2>65P;!l&HKUAC19rhLJZ?;{H%jY0!1dK04O>i$0lh+yW`_FL>{;NY1V1Lf6 zAP|K0UIOnGls3UIO?Bm0hI(e;rEy=m#AFG{NiglQ1? z(aXp^BPDl54kdl-3dgO5 z*nyn54{!yiw&MbqIpvpD5elL9KYz6?6WWd;1YspTKdzgwM*bp#YpiV%YWd!u&pij~ z>}Nj+&_^O`LmO#1Bfi^KAnO+BJSK#_AbOQ0O2R4y;t#o3a7XJ^fc693IuZSRjXFd3 z0Iaem@Rgzf53cggf_2l$6mde@5>D=B{4$x`7_syDqJk%f*B_Me4|eni$A9}yczn^& z$Yo%!31>GIF1_qvojn0?h)u8CKrlOe$8cC{Nrhl#YJOg#dRM;i`W|~cM8PwBzLj8| zto;Aj`;&H6v8-(a{XsTB!K!f@QVIxCEA$kl5D*mwVOk-r_sgY2?l; za_bhQn!uc6j&a|TRsCr@fPc0n;}-P0KjYxxKX=e0Lbr}}+amgqzp2(g>csEQKGzP< za{&!5a_f{(W?-y}F+ik1M1^sbHq~s^E@!ZUph}CYv-OIM55xfSF5!tR6yH6cclOw& z5Dq~(f=$$dm~97hbD`xY@1Sk$mT#sUg`ox%?R%of?%K7&FAU~D#($DSKY1apk?b{~ zC(%#-dX^BaG1;FVEi2xWYwhzHX54<)#t-9mT-N}6;(k+OR06j5XZtm~ocwwFH!A&d z4@^|A$A{ClT=~O(v(Ubi?Kh_)m4i?9z5R(yfvNe~{?&f8jail-gUCqdJ*K@~b8=s= zeVv?k(k#B?QPdER!hb#@9!16=Hj6OZ+yu6#jrx%@u*_E?-mjbm=_eCQuZuC0t#l;5 zhZ$J0?nh=WkB9MukRblSQA3>RH-+$d)qj37o^F8H{fb9%n?Ej>8gbi0CcS)=Bg{#K+cDzRVz3nNT@10*rQp*aiwK$VmqpImUj+sIDQ`4)z<* zZ^p1O4F*XL&f}LB9P5TAmTN;0_Eubt+}s-XG1yZ8J_TyyxPy8#UY@#@hWG92h;s|e z0m{BK5!g{GkAHEP=Od??0#8bcw)EB3WGU{zqV3{-BEX(8TtA<<-*1Z7&%K|tsqZlu zjtlN@zn&RH8UKls{x+URKV?OZ6U1>uMM{3_)7L3iH>93i>J3UHq9dvcEY&%^dAn8S z=4j@1nws|mSdqlyMQoVJLQBy4&u>=vwxPoi})IkroC4IHrlIivB9$# z&qj|EF2HczoC5Lb*_qXW8uWsGdhl+0AT6#adj+0JV${bC^vNqb2IL8@=L8z?S$ID( zDBiP?L`qGNt))Zw8&&x4M8eQ$zZhpPq(`zH6ACSm!&7Jm$kX^AA2+mr#2?8xa)`&J zD;!4^ZGRH|oH6=z+O!HDx^P4M@jWY(fCws_S6$K zanRZ)in3yu(3x2FD)Tt%lyiL6Hn$S}osPt!y+5{3bm2{6Uq|`rm*V<6#`)=|{3viO z#IDij9^z@x$9(!Iq>YmJ65mS=ZuWaaW7R? zSby#-uJ`p>qDAYBo%5`5XJq84A}8V+C3(bS9_v-;!}$o2B-z%0B(=(o^=|$ISz)zu zOPYC#7N04{(Dw*OSD&U+3pAfa7Rxr_X!<6vmjKJBq1@28MXf%gLQQe8NnH<6s??6tN-_e}11f`Q*uLxaIEZ z-VzDeX2giZ6;y7xIdTVLf8?U00Q|Dv`8UM3P4rFBACPX)DX-TpeI#=tM>!Y3*(Cdf zXQVr_th+sii+-sGA7#TOr3xzRlJz7R=VY2K2!(q|`r zW_$4!Y~sh!OP;yIPw+kl!K(sjcamN#5H3CPGv4*eAnIXmMO4QOQ5~P!VR@ixsQGs` zHsbVf8MXmu5|x09Y?nlWzn1x^kD-)9tR9nT3OzM1AoejtU4Lw}L|y!OkFh*D zhYFEh@X!g9u|4)!1xg*+$SsG$QKCBD1FfG(QxN|?-~XjL=6nHJ;7}c3&|#&f3+(^D zQ5_@XiD>TsTh%e1z~ld+>X^)QlXmz~9V>sMI?jMzI4BvLwQqf+j`I5|t3)k15|sF` z2A}}&z4}kzQ!?RAzJK>o7^|b?zT&^WN5NW*yz4QAiUJmInq^IS@e))DgGn~0(UZ9| z{`DlClIb`zcgiyu&eH$n4{jPYk;3>s=&;TzaWEMp6w(HIEq7O-pqWD{zEmV3RGmU(2K5#>a>L#3!qx)?4ZiB&Kn|sQ=#hAT4m=&Q0+ol z45Vu6aaIcTDV8NlKlEX6=f{l7w`TW#X1GmFTkUQPyH<(GlZ}(8Xr`_7SSwjR_sSLoQ69G zx128pF#w1-jzkaqrRAlelO_6Dd~eV*V&;kHUCD9&sKIWiNnerQ`_k3k21T*)Q4CK4 z^SRGW*Kj)4pl%dn(q2WSK3Wl>EB`^rv{ahid!_YJCr+)0GWILYF|*E{p+=Wg4B=l_@hZAiczZwus)*a z9W>bdr!A~-z3D_5$3styeft}ou<`f5>9i0q;D0=Qe&^S4)LhGrEpN6uAnT#JeGY#m5gUWJ&PU0`iY`Zpa%spmM02WD2NfUvFQ8I z4v9MUtNkE9_?K>0*$|EB_M`blUZR3-FP62@jxJc%*$`zdG`JyDPk>e$p<@uuri*{K zcYop&=l%1UjP1kc?$_tgydh4IBf)Ni*;b;<+(8h=pU7= zANtp}^IvI9Nneeqtf7s=IYUkrwh70Fw0~a^$WY9Mj4j^|YEFUDki*l2CD)We&MVp- zRFTNLB(W0omWf{1s)pn04f@la{AblJ^mpRwqY@?xH89?ZikPU#KC0o6sCGY}6K$IH z`5fvo?xPxxh-&xqIZ>s7Xh&4U(2emQ)p1NzyTj*v{YTyK>JPeM6p?=EhWS70hJU3$ z=!VfAKf2-PpA^UC?~3DyDDnrzF&=Aw(Bl4El`}au<8PhwpmN@b9uI0&_J;DDD0BHi zWA{-J6P?;eHGCnO*^f$?XzTQkiWu5ABn*(xOMH&GY4SNazQIRDOjOSA;qx0lS3aMU z#mY9(W!i^UckUhHf++v5BH77 zKV9B{vfcR7+8aY#|61Pj|IL2jSU}bH@AvcbIrM>~b4@yTqB$r(L1+1>)_+N`b3wGs zf3FJ1y#vP!%QG9E-J96HEw=Ta>Ws;?9P~sxOSF856=cwgX54yE;}2TYQp>(Iqf&~# z)bK3EX9|4xq7Uj2zB?#3zm>J(A2hVMU*q4PHGLLY#^PF8EwT$)A9)7-={wFZtt{kd z`1fB*W2j6?AA>wsq1DqxSbtNX#0RuK@eVsr9iE$fPM*#9{-E47WE`9Sw~AAuCMA82 z4C^9l>^ZvMG*QCth@SYf9mtd;8rZ+q8n4Lx1lzGizm446iKf~k_iNIx_>1kva(p;w zl-1p!Q5MMGzZCtyZ%<0@MNme5tDlK#82tz*_@#~QTheZpzb{j-Du3L&5#joSsuc?0 zj~a7O0Dm;iO7S^Y=no*!x5>~}(bxPanZG|H$JWDp>t|UG$A)u!_48c9>;r~580}xa z+t$Co&h;OqHF@r1yP>!IeY?R2{Lw7~rG~*Vj`3fq_kXoP5#x$~r2oZz>mMnF$r$3_ z{vOsl9hBAhd~c0YM}Hop?}TmHIlmO17W#(U3<}^sE^EnS(zpEMvNrCm+xJT&`!ifG z6*)Hl)&9KD*7O&mF+>pxjpuL-KiA*)dB=GY&wmH z_8b4JYna@3{_V9gTpx;Z*9I*&`a(p@&3Q&jwA{lrY-8WhAAcG8kAHdnkbS{1g|ZvR zX`%GEW-*?$anBfjn>=@*<^0*dL1S9|u8$=u(-mefFn;(}K`TTh|1Y{{XHdg>-+Fh_ z&n60Y7oSfw^p~9BditBs$-1!};(W!B_bxft4$%{%FN6E0jcXnEX_@q+zt@lL+v0pe zE3UOJ>DP!~Dt}+1iu1RKB0rAC>Rt^|#7* zPzncMD&LQK_)F#cQ3`*ne23+ZUn<{^diYD_`&sT=un5^5iDx5> z-+vr~^a)Y9B|e^&nfzi%ghv0X^3z;Q%g_n^f6^YXuxvt~k+$OuIyL z4@%xav5WIJoX0@{JhZECWi)8S_s?w=LMSbl+s3{mGs zpZ4?l-EW04+97U(_@qb_Kx?1B6?5)<9fmMst@X6a{LFy_`j?d z%HBWy?&rM!tx|i~pFb*=|8@JYJoW=hXYr%@#os|f{vP)0563$o+l%c&x__1Z^*tJW zQgU9ZFLgKC1EwO0Y8dOodT@Mj90%1fxgULP=kM(g`3Tr=gyP!&8@2HdMMJ7q zZ;^AYp(fElXB3hhmVO(V&L) zTM3MQ(T@^X_@xZCekp^0DS^M0zEA>xD}Ry2^R4{N_5Mc*EPN|}p$z{1evkCEA7$|O z`hlMLTE9ViUrJ%3{QcT)q6Ge>4E|CQlh41l51-?Z{V&zP%>S_(cz?yBy6=NCUjy-n zd0ptM|A89V{!I;BjjQbPr3N0e8~irT{tgfLGq=WZSaZ!$&UE{9-O@^K+j$lU$z!S2 z>e%pB@dPtLMu$Y2kzr_Q3< zrSWhASQ9l@&~6(oHgaATqOsjx!i^P`OmkN!KrZg%1B0B{bq{#NZHC0}$y;El88e++ zPgjEmadSdTd%Ca{fQ{id`K_}c!a2JO1NrtZI zXoV`az zXNi}133R5o!Aak%G!T;g2sGLWq%8UF?X&sZ$(x%QEv*MUIWrsVa(x*YGeMhBR6&kla9KcDfyLAi< zLAqWshuINQnAX;wkzD07>_#@f1olIpT*R*SB)vl=_e(wM>1$_f0ee>8%QS=TqD-c} zexp{?QJR|aczGE&5DYI=jP@6KHuX1VDO2YAs;>Q4!Ll^$Bh&8nF4vx9BDT$su7H$` z5s5t7oPTSqS5cNT+rycTI;~QG>Ukh=JOP3EJRM)_gWRd3UA%uCSGBod4~HY1YK}Tq zGLgv#xw`>IQm zqFe2%Z30Mh1>uTWz4nXs`)MfgCcWR(=bmGmOXg>uvQX6dG3VJV&#gww>+9ABjs?uptWql__?l&_51ZSA8WbgyQMSZ7J2Nj zdJNX^yP0p{7om^t*x|X$bHT8n>c=e8`xkI}@9HIaT+Z^9>j0)a1S?H?`NuO4q^5FN z-Sy+_y)l#{|7!L~4WCe>eA48`E#KOyJ%3#qV?OVzt!OmYGhpzSpnSPq0S?YV7FCPN zcK41wmF*1x{zvO(uIxhT(i^Mbo>M(|J*OSqu1=}u)=h1h8&aqDGYrI^Q%ptb_giVM ztTa!x8C+HU+Plt0I$4W!ZlZZ;c~zj!6nh(%KCYeQ7RRTzswGT5_sc%OWR$;`dw=S= zFY~Q(8#U3c9*O!ZO&fzoMME0*JGDpQVaeNYuPRcBG|B{7Xjg|rGowI(&NpFu@YA*{ z9;&lF-3tl&>#jJwmeRyBronR2mCCAyL457sKSEA)nDQ*opajF3ky0dTa_JOC~k>6bX(d7m0Pga%qXo#e_NiN z!r}#hU3ytK`56O74}`h6HM8;30Ovl*~9SwYUM~q<;-DU)bdGUq<^3fBLPmH z>XETIs%}NT032EcxiN3D=c?+o*Lk$0Z-U-Ln%!-RDdUz~Xcv5{v#Y|;ocrDDqS?lI zZbi4007MJJ+p`GIw@AxY{qrm@OcCxg>vr7gMx@s^Yugh*w<1jv8>d7|l%nh5ac{j1 zTL|g8y!v_j8Y^v`9y2~lntyG24qw50`#itmFl>v+EjGzDWGZSG1*bk-?ksLn%TDKB@}H;ZIDJ7v{wC)#$Pg^2&t1cx*nPC*AwUBo_(;?Ja?%Rs z=?pJNer!TzXUs0hxphlZ;i-W2Ouk&8_QRFJTWD0*@qHt?IhS@lHx*`=3;DJYY54?WMa&qSXy^6Lx!r;+f%AL3cc~nBE62F$s!Z>4O!+(EOStJ{iGBsC6ph!N z$ChJDy$ISY+tAMR@IUE`(cIrPTA z(o;%Z>X{8IInzv(M+0{t3l|?vZ|0$%(=aEFS$fH@E9!8C5yV~#%o!Ntav`vRo`*)k z=<{WLKfhM>gDRPLw4mO_d^Lr61@j!qnLF#d9zdaN>3_u@$Db#z0#kIgJy8txI@#RT z%1&13iFSXSP<|U4<2B{L%RD)^ll5d5KW2B3?_(ytw?ea0Zn@>7f{pMlELk6LoJ5r2XlH`mU^@D7fG$-|-e>&(Pp6iLjI zjHF_dTTD)UY=BRaC{dkmqrl7KOL~5A{gkOrw{qs1i)UA~+i*Va%6PrrHg|f;+$D+9 z?ZRO$CM|AWr$;>5o{fdDMqfLO&YSZBvq{aar{?JTz1Wjx#!$-g!JXcY86};M%QsM% z^nY`kjp^-m(Pkrw9c^}rV8SCzm$hR(msFFN>-&=0G}YwD&c>y=pvMifV>>shXZ*2| zmY#3|$7{4&Y`x>nNvG30=C<~`{>pLj>8vge>}d`>+4R|Tc*-;BvSiou+ta;gGDuSi zFn}}^bK6KR(wi*lH>+B{pDu`*eiq)0Q-9;J`K}vnw9W{|bZ(8`cX|JS+LIbz8*?s> zu7I~MCoU9-x8k^V?v(D8J#$emnsjEJtQu9cSbCS#Z^{#PxPf z>hX+UBLlEKZLa3w(LG(Cu>!k@Ra%qk99s(|O(0xHZ~Nt(<~1nyhb;}mV3Ops7k^~- zk9Zr6FxK(iPVyV=V03FFte5rv&2JQO`hMZOFqC?Nmz3L3Q+-yamv`E&yCrww##?&C z8k_AEDJ@RdZs)JuU@!V{t3(0;-ff(0X6Cc;_Fe*TKEE#L`O@4Yz&(%N^Q|VrhIhQ3 zpX2c&?%K0^h;Pr?vZH`J^zL$RSbxv?;auDzmOWjk(H-W8`50E88x_XX=s@>T)^9!N zHYrr#JNxWL-)=>Ic=AzNt)~%+Nc4p=r_k+MoiB@B7MYu|XG`Id6V!9zwPNe$^4&`l zr+A83nfE|=OUg3Cx1L;3i!LDJHD(!5>D}TzJ-|p7X(rM}W zzBL*&EeGIW1&LQtH9cN0*`lPVels@_4;lN#RGE3=j9$j0B!A5GQ1jnfH1APC3C8uN zJ{9lPG(K6m?HsOH|Mj{N-?Eb*fO}V~1Z10}Ip20@U-W4F){5)+sQXX8xWvXzc%8LP zy=xERk#l0lbj8xQYkw~a0{x1jJrMNxu;=#LQe1KYx8G=&t#k7Pdvq0j#+4lJWF1hA z{ah}rF4?yoEn6cIF)xjq+(R=*a-V#cgoS5H&~hL6wLW3tUvlIuT8e>w04}rNw&=}H zX+eKry83?K}#<{MM7wuYj9eF zFjGW31DxEbPQq@sNuunrr%LmxDmf<$-p%Dw=Rjkwj#&uP6zKLI<>w2}mbtr9R=dls zfgkC7d{GS}7tCFa^{b(emi{QuZ0T)pU2|>iwoyGkLHo;6uUym3kZT0+YO^W-d}0gx z1sv4ZWOKhnr+=21;g6DJFOW3HOt&le3yc{Gm&5ttKARhG^4`NqadQ`_Seo!}2nA0w z|Ak`h9p=Lzxw7YJb1gY?w*?quTHO^jBL?6?3<*P`P~XCO)KC7>{OSvs#X(515V5yLuT?&yqP# zpJs@^AMEb^dK?~yY;+G77etbtlGZwFSf=#rqxY+?Kj<}!au)e2;BE- za_sNRk(N%x$rA*;x8J|w_hlPvw?m~CyZac@wST9IAey?s_%I#$mxGov``67s;M0x8 zWMUAWn}?kTuoMer|KjX1qO8Z)3>`3e6`$%&J-<{ahRCR?FfZ3Y*sF4Tu| zY%A8nYPi?hyZZqG3GQYK_?4FxbT+}WaIc}(zX=5Tp!Dx*w7#4j*4k&Y>&`7NhxXN^ zCx3~ys`a@aP5Kx$T}Gr*-4O+Z)e$iJov|#mhYRF$1b!! zxk!n*U3#cli$dXQtYP+TtdXQYZia`{CRVJ4DJ&=b>($STqBt7clr$$c?Jd{xY1cW- zkGB03POGW8-(OGjl~L;qqWG_70-84zcYkj5*xz#C#MfIZV4zBw_9{jy5M1rw!f3-G zkN2Fmucg}6%|(eWUe3*wphM^N5^ljJ_QUU)_n8pT>;N~`9^E@Z+{(+ZQJMXzALGloq&OZ{JLC@%_e*6#-yg6M>)Oc%pV7Nn9@D7jhB& z6?MlGb9=RCBE942Nf;acMcLV%Q-4Ba4&(ldX^SQd1N6A0rBc2$;KONL)bqEWFpv1O zPe$faEig7`=W~RtGAqV7G`wG(%rY`nQpSkkaE|b%?ule$csw*wr&{mQNP7uy_Ig`os(!`k%D%W*ilG576Ai9E~k zF6+&@aaX#jqmL>-9~P|s%JF&{uU88+!MBS_8XTSGu=Ej^knuc&{?D0}J6EX_xqU4H z&aL@1H;Ff!ynX!~b3;)Il@=#vVCAg>1L`U3#gT z9BsmQ0?4&_`Pyyu2%bv>6T|7dSF|lPJ~aE1>cReCuC{R&Rmky`&4fbr)m-KQ-k|Q4 z9Ia=&`>eggL8w2|mFgWQ`cs~tple#^=OVo{({4O^yU!C9*^6Q{oqs^vJ)M{x#jIzK zQG1#))J9_#)_k&vQU5!o;`)8K`zjorMR?J;!~2Ew{jhpWy2E?J?#~b5 z47+41_SNHo6~BTLx9r#LSiFR&Gfb%tel*)`m{6CVS)urkX@6$C*K%`LI=f!_2w>^H zFft~N>OEI(Z!Z>$aVcphQB-w=Z;2rM%r55-#?sm}k&V_c>vL-vcO=pbY85yml3ZRX zC_F>ri~<0Doq>_O!sM5oCgQ-R`=#39Gy(;?wfuCojaDN|0zN(9rU}49xrL%gLxyzDX8=MxkhBqAFPB;` zgF+CIuLZWmT8}XsCCBfON#zdg-$~YvpoZ9jk6v-nuz&9#)s3A55;N}DI?b+o^}$}K z<2c)sM6JY4R`;foM@02PR5i^*+$u6x;DFwP=S={J)@ny$c!hC*%mCvs%4rfXN!>Q< ze8aZq{B;OGg+^A^W|{j<+O&{JzpC4`1hC3t7Lh)Fqx4NjqQ)F0(C7z!R6PmOld>>F zZceD_fns`>DkK>q9T02qS;*nw2D{So4sx!vxXX6Xu6Ns6yLi{Cw$~r={7o?nY$R%f zh`B2-cf51iRPj9fR9#Z>XiqJv=>xBzzj?)rQ@ngE)tH$#XH3_{bYh}sXz|{}(UB+e zmumeR8h;Ffx(`-UHCn>CCEM z2rFpWlWrpDEM(%HDy-i6sY_n^71^g3zE%-|3uPk@Cw4NGUBnTQzeu^b1Z*S+ALTCX ziqb7_npI9mQJHv{NnX))wXup~eL&LCG?_eme}9$Vj;n;H0A@r(9>$X#p!pa_{(t`0 znws4-CnfT$%RnglXlhnN#GAx!ht;Wn)YQly5YittHE3jip{Y3ynwKw4&3G@-aE5#| zHLK%p(9}qa%zBbwF$Q(NG&Ms+n}C3ZG4z+F#=!YR+Wd+q003%0mA}Xp_tDhYZYNGi zFghM0&ia3sriMhYNkAK;<=-?l{)mL>2TI(JrlxLm2q{RGHu-b?qo!s*ASM4tni|2T z#F}p`jN}Q#=%cA2I5YgnfN6GsGy`%D_`?B(3NRO^1MeQ-J3-8Gz=G9PgS%jZ<)&+d zez~N9qm_%Ms|M_N;OMwyx$-^Ul(@+WpaB3Ad=r2C!a-r9KbB219aFVQfz0a73D$#X zY?=-uYL=LOkg>ihkk(BQjQxNSxB*SzaR+>b#j8P>NX>q{)MKGJ$!0787=LwhNXI3c1;oGW$T&zZG5oGai3 z$@aBmc`~2z3#aQK_5fuFL1+^+An19(=qi5#gDN+njYR4>K2o(97vTOv=ZxVjpa$+D zF5<345E$F3Z{UUZP?)QO8xEXuE>h;cqqNSUuB6?pHrN)_4v+}YN0Wn=L6i|T02&6> zm?pPtN6Wi7;YFyddzkyLj+m`JFSI)Z-{ z|53j{XM+&VfWRe)W}?RVpmKp~`rs75v5G0EXi#|xB{u^ad{DOtGThOLq5}FN@}i#! zgF;7?R0Mw4a7*)}MDmX)iX#7xnow`wRXLw*7M>Nz+Ioys zFhE=^SWNO6r;_y~3ECOTnib9>S_XgA%`K#`@Uh({S~#%b@PGTPw>9St)vOkh=d&8x z4nf!nvz{)v9O5wm?w^+3`Jcd}B@l z;`mm!5G0`q@QpzbhXgg5eqkYhDRjtr!8tht{?r0>y*!_hgEbtNRH{@PR7QVLbUC-W zTP;yk;JBpfbe}A?C}aUTPe-B(aDa(u7+i!jBV@a5YNDDzgg8K(+Q~THICf7>>Y35P z1jpbS0vSjOwP_$=En)2}RRnTI4K;5~`=p)UKufPGv;!K78`ZgR03vsiY2o>-aJkBt z@&yPuS`0cA|Krvkk^C4X^{hM;X&$*DVNsp z?33`C-%Tsimw+AZwsH!5?I%*I@IkUmpF@o{!y2c>GYQlii-q& zx@|in9#^xCd%i4FPvG~?{nB;^L7Z^A3rzYma{^?p8g$xv_3}{ik`#Y(lB`j91XLgj zM|uy4QgN-Ol|l;Spv;khNkt+4iW?OPkLM!L^DqbuIvk4u?i5(pC_|Aw)HqQ|mignp zx9C>@684w-zMkz9712i;iXPNz{b)cw%BBx0QwO=}3mrS4Oow(fV0M+>DsewzjHc1` zKA#KXn1b@;`Mr-P?QVYxQl-0rbl5s9(n|w>)fN}ALd4oiIG*qVxa72 zg%XNi#{&rZ!+nuFUkfpfP%ia@j{j&&E?x!Y261u|6`p3Q8X3u<|rMiIim>h&+E$sE|l_Z*A5D^Jygp zW|p~?d0aRE19OM!H+-~cFB4>?HCKFA#Ab6u@CxO=Rl{?abb)^e!kJ+A2@)0KVV##af{zAzpFCq2=+yxl5m&T5+g_>}rvyT~xi5f6 zbrMzMRtsB5F3ZN8jJRepqK>pGq@u~KRC3vHD`}4HQX8^e(B`aEqKg|4!$X^LMIfsQ zVi?yA;Ptw*%Baz%6+xy0FzNMp%tkfGbNU(Uo&g597I1(3r356$<8k>+*UM*&4wtn! z^T6q?NVWBMZx0V_9I@{ii?6@Qr7~12{bDbE<2v!TlMozYw0c>>b!FxpaNs3oRscd% zMd-8mD%tE;uZUlg0c95n*b{OM{=#sotx2cmRXkuFEKC8z^3u(u797bu!SCYQCo?(c4|WsKWfRpIsO($i z(BCC^KYX7Xk#fAR86xRU+J}UhA2I_pYH?rvuJZchADmBq)!>di5Gql{{=sCfE-Ou2 z4Vfov6#n5k^0R+me}>-q0z1%?F7Wq>u)^HW$zgv~`1rPZ3)B`28nheXNh;Wt<$1tj z{!ZN-j{9)D1_CQK0)G!r&cT88Nr(aYX!v(h&PmE#7htS#b25X5LQ0viSyu#m+1A(D zAln>%5jB^K72q|0TrMNaB}iAr)Xd1F7?avlvmUcFBMbt2M3Lw>oEoR)lGY}q;{9(k ze|~><1V@peA~}C2`XTNvaI_&c#RJ}?Fb+Tv&_F6yyB${oO$VGDa4$P~irrUMvVkZE z!5tDT4{!sbez7;dqyK>ZAZQ#QMW9;(?oubHDFOJzyoTxncMBfKB-eUl7E1(@AhiYH z2q3+~peQ%IGeLLZ?t|ArC81bc;@Fz#%Yc6`0tO}x?G}g?;4tUPRzja>jpbLlPH<7~ z%Eos@eDw${;6<@t8o~X6;|mNWvJHXD1nP1?7jC5@#GUI4Z;A2UW>pR7$uEE8 zBS94sv?10{&|gWd6LcUs&L8Xy542^#;orYdhD;)(Jfz>re0A5$~XsliU(TMK$?~a zeBXc>{W*7NUqEjlPwoA}7!_Ta4{j1Dos~-PIUls6O4`jgG7l^nWWNzCrqh1{NksPZ zZxCodm`Gq|l6;G%Az4|K5`4`MUK3cQf4vp}H2>f<2Lu=Frf7#g*0%%tQ5+Df5Nwij zw!-y>_LMmhafb6KSreeDicYxztDODFnb>an{MDnCD=Z; z!v+@0&j+lYKu{=&D%CPd#;||9NUk5C#;{)VgSW);cunw@m{$-SJe53xf#vXKCj+969T1YX1=LaoF$y8#iq!u}Iz<;jE{@Vn1_ZJAa{{oRm z!ZR)~slfbUox%*bAfSpq?bpZZ@VSZ!gRB)Zpj$<-*YP*n06DK{cUy$QX=Q~gRulwN#s8mOM>OmKbT9PZMYA{5;!GM`g~4M zJi!NZNidJy@HsF_qVoBipm*}&bM5^{lqE@l`$1V^rs4}_S^o)Tx%&gk(*8zSqRsz~ z&jgI+H+<&%8=r~#%|9X+|1Bnz92@To6^iE(FrhW7!nHY%5{G{ej!_}Fo)5+{BdDGa zrjlThln>^TV1+&y%L1RHl$?A{j<5ImoM1w;;d9L1Ato_=PL3}I@8tLpwAlxv`63v` zFPv?I=Om6titCv?n;;e<*w!7MwLtW4aL>UqE$(ZLHphRs@94w*DJ0S@r(yoAF>wpwOJ(Mr)1P;&60jJtyS=={pt^Bj)Y3%p- zYrn^2J>SdY+3?rqXo8){pYAmoGZ_`5TxOjv>}Xl2(nHQ&jWUu+>?Kt;I4fm>~=ExQp2+tpIv}p zwLOoV*X_mF)a3sOK96AU$nziTBG^@212v4 z5ge*7*Boj@B<1Ky=r0Og_!}Nhz;T`tyyPF3T>)G0FUtb?wwh}!%Jz}=Pp+5Y*!)-f zL)w4Zdxi)oK_UVXM~>m=`YXTBJI<4izUWswLBF2hDhVQzXMgaHSig*Cg+}@#zx~Gl z?i$8@=igr|12#R$s@s5kMZbd}UnQa%82b9a_o82l_e5<#kgR`o-Q#|TV>&!v&|V2D z8U0gWD{;RAk{$n^0{Qm6e*+>9_|e~xrUZYJhht9058s&40aN@hC{cB|UYu`qFzIIl z56qFzgF8M4Qtxj+-;i}T;(?=a=?F8 z0!KbzB0u=cFHGcTxo=G5upF@D119o=wfw?FewO>jL=MXVPmW_Gl0F=l46lJt!Rr*S z$utCcjlK_FfBqe{k!1OVyv{$%53kYxA?1)|4sECVAThs@ zlHe_p>+6HW9I%qnfPVa0o*wXwKg)kz(;p<}u>5I2KhnQrD48F`<$$3a+BLye4rs~q zfUP8mOM;fi6TA4%xD(g>vBq2pvZ_gJhCH)E3){rYce`6`V!ptGcUrneK41+f5BYhz4nE{1db9BFqB{Yj=#j; z7zSg|3T_)gSk@rhf6$g(4h&}X!C)rfyrseSvOA0m)pkH(&Je`MH3wUpYyh?Yjl=}* z68nO6Z?#9lQ7oR@$^?Bvtf%#J5ck0p`?r5ofg}9v4mtoY}AU824c0`Rjf1$Ik^zEe8eT_xl8Rz-2{qUFTB=WEI0^c}bC%>?S z1b2s7M1r9N#t(SGC&7Os{tcG02iceW`*nrcAT&2%7lADO!4cv(e`D_kydJ?=0v9;| z=&Jt$PY8V8U*RkN4!_y1o`1DH{(w!yOwxeSgH@I6=LZ*v21c0uX`-0py9{nzDx!x@gq z@x*?jk_H$~f?34(aD0aIKVTcN+-bm{4tU6+U47$7-2tabfAErj-#&)x1^7mMKAc~i z$3NjSu{@4jIpBXC$@Y@%BftBN$3#2C_lEO=^#j+4_DQ}++FSC$c|v%O_XCDAoTz1WWd=Q>~{@pnf49`@^x$2RDpl7B**u&oxr*NYJb35ngpYX zNA&**HuGox${Yzl^K*awXUHhR^MF7?$t8#6EQMST6q$tDC%Cf%v5dr`5P1{WX}&=2 z2~wv-uf8yn1T*=Ksnjt7`N2%GU!Nn-et2H}MaJHL2Or7) zALAoAGB1DoDVf(W{>FV86W;#-AL;&vkGx}a#sI`c@R6*f^Ar3y55);OA0W`_2`a^n zY}@BT`0%U9fj_F_w#SPZ`BV|{?&$DJ*Y071q^-9#a&O|H7Q+cQKKRCTw4<)OOY%bQ zx-HHZ{-b(Zp%GH8P|QvG>W+==PReqJM}Dh54t0Mcv)VC$@p{?udN>NAiLwo6v-vfe zEvR-a^WziKP|KS`Mf?shpoQwhvuuv*H=o>uF~+39O*5}Me`&oAssGF_Rqsw-uh&)Z z&)3<^m?Wn%p;4*X95jEkANjKbcb-KJ9d0|M%4DdId{L9ldlo_kk1Dg-9Nf83JBKDx zUoC&oEi|#8M#a{Q`*3m=nsBW0Pa`@YGzV(j`eC3p^>|zL-PYFSi#ZOmyICyKsd-(_ zr}5zxn`h3P;TEV(ZOHDB@3UC+0j3StVwI@LWv>B?a^wA(1W1zCO73P8@uoAMmyW>L zr?SzW?{n^f>!&CI+f~a+bFZm0*Coh+)wO>$ips(pE7j{$do(M161fm=(e_BB>N!JU zkMt`F$|_BB;<0Wzt?wFLKflJeBNP>0-J^hrzx<{NVak8aaWEny^K4l%#Uy0gP0MW-B*+As^4!AE5?#{YkI z3IlC;oEVSk*oo53^t52&u75wGR};jd*v5Jr`Y;;5Sd{Lb=fd{v>7$V$cKyPnZx@c^ zQC;tyH6&!LpTW3rcd22d{Beme^K&N}j&}67fN}ayITieT*T22PNt@m7C~*@5L3jPr z3zqyUJnnMA(4YBJs3uc6Y@VxLIOu=k`xS4Ib7dUwv!{DPSVDb-;~X^~n@lQS%szfq zrRwyVu{>$T%W^izA&!p4D_!}cNSVfy+M};o9|XeUvfpkdZ!Z=%*KzOoySuktHxp?d zWhvhtARN3eASDcHlYYxxqF>RV=;hg!W&VlUz;2t!D4eyfSCUb%;@3?FUaNmESC}{z zTWLKuOLlklJ%ii!+1~G5+M3TF_I&%WrwnbIi$rs+xSijm+MTuIv6ckMZGpIt{Om8+ zN9f)7C;hy;yM&oSKaw%|IAkWrG^y(CnBQPxS9|V#d4E4G7Ox=4=l1+@-WcW$H83S} zN?ou#=L`EGi~|ucIxVGeKP`XHl|Q*!=WG%Lsh%D~{)u^SRk@WD{!pAe(1HS1*ZN{t zPp+AXN#05<4)nQ>s-<+^>-YP!wQkQ~^P0s$c8ic~72d03K8}=2*gCqHzB!TETFgbO zZxbi&{AoV1{N0i_$DFxQ743FMl{{l(@CLAf@gA48;P-CU*tk%P-WX261;KHmorpuTFWb0oHkT?x6W(`q~ zItxJwuj{($PQPc@7=lW&{r7M zr=7DjysGZXaa?avP6yX?3q@9L(Tb+(NTFI)nALM~D^ieKm7zepL*9}@7x!IuC|-w~ zhKjL=lSxmR2(;@^I$opO@*W-g)BEgPF@-DH_w=S*C5?G5kC$c(*SvK-(U8O+C_oqV zZrhuWx8S#@dGCKI!fCVd&ab{OyWFJS@n*L=mX*`Jt)?17j5a-zlSXHs#cj z17DtRXG&gyK85nMsz{@I`&N)wxsknbgaQj)c(MxobL!|l?*+7T5DKPhDm~rF&B!Mm zrUs(vMDU(od!ct5|M})GS82Bk`p(!Mp2l?czGpI+f>g_Z!^yyKXake$x2sb22Yz_w?Oja?NF3s&?w7W7Bw;4UTzVDHg`eQt2 zX?dTnJ#<13uBLnRtr{y~E{UqUcDaOYOIE)Ig15A8qPxGzH)B(>uN$xy<$4ww;(gwy zijglgR^wE)gK6mFj{ZyJrqYnyWwtHrXAyuMeT z)41In?(;dIy}PuVyl)Q(3j5RPaol*Q;WD-(I`My|;<{Ar9sM}ThfHH%J+za|a&wc) z3Hq@U5NWI`bZ)A7^MI6?cHVrlH)EN~mU3fbrqvqtOYvS7TU1w{nb3RDne*rmGr@W? z*?~TCi;FicSIv?d&r+lcy2l3XOV==$^ETZlDVM$>c5=JbZeA%hi;#{)kIuORK(HSTc9fAwSrN#cUb`(x;~APW3sXIv^shImQUnx zJ(d*INe9!a$<Yqb!U9zYFdEY4Pk!>Xuw#+|xbX~yoa>3OudbMY&;=5~0P z%=FO_PmA-S4dzT41h5~=BR>au%=Mh-u@Vt=#jC~)@XI(N7T!ySKp zf!=?rtGu>u&*SP$BU`meyB)~~W)u}@qmq_YIxKklpCOeMSql zzDc53-6YQ|nCWYsZJNn$8Ucw_vUq<~OlwtMa;ztU7*uJ1NXW${HF9xmk>o&SsXkS; zK!eVLv&|jVT-MLSk#T0eE>jH_oy1$EB#pS|$&)t6<2G6ex z&gOWbn}Rr6j9;hA!8u;8)UkM!`~98sUPtCRo*f#_S?ueo+Sbp>(TQ?4NB!BBA4yls z0hBtJ8GiO~>y1(CCHX#laD!rpJF)%R%L?56ww!Yoi^ZY?Q6re(6LCTjvwWU8(g<@NZ?G zsN3x|SL1!OD9uQ}VIJ?s-IpM3UM*{nlH+JAuoGM^*_4xSl$;gSlXh=&!&amD0{K?8 zkwwRwj{?~X;fLh_YIbq-QHMvvL!F(0na(t2~XK#O-R9lK6E(*1` zpQh1I-PL1X^oPSbKQRfSJwfO0Vx0IVMjgC1>1Q&=cZoCL`iuj+rr#M+}0b}vj%xJ$epx0$m`mbKL0?E|k^=H0yQq(i$+KOc*OjTq*{$Z{O)tm?E0;135%C;JKE1#QcS6;xHWI7vh8=0x!crXaEZTLCx5JF z-g2tDRA;E0+iMk$mi1H$&Tg~nj{409NB)Ax#Ew5j&Z805=V^agmC`z$%(zG#r`A;`NM9+J(}$+Izu7O z9M6l0y+>3V+v2rZG#C|7l*}{cB%k3a4vRPaIxf4czBQ70K=Blyh8PB(L5k+=S9V1C zm0muy!|_JN%ytuAmU5o$>S!6!NA0qy*FHG!{%*?Wpv!;HmQ3$ic16{T$02%i&9mf_ z^D$o-lkL1IT#dWEsq<`Ct{=#DW2-~ajPCF7a6CUz&*~y6f8C_9z+b6;+%2EMnMO$p zIPTWL-I;yRDD>tQGMu0ljig_@MRIcj35X)f-zpA>Y} zTYelhaP4bt2Mj||GgK05`)oN;FO+PRyNt`H+yQ`1joMPE9#=(X}T|IvL?M3W@hp)p zt?6&JI{PYLZ*=4uzl%?ENrNTnCy`M5UdB%4$Z4u!rw#oo;fI*ao3JZ#tT zrF66VJy?wlit&~<>vZwEeFTg%y;9R6wiAEqw2dZs?)UAPKa`l=Iy2qINtlH=jV5XM zydp31pe=)UZ)Nn}R?}JQEqHU}>@8#R4lQZ9-(*YkPF>srX1;^RR)W6MP?uze?@*<0 zI%UQbuN|(Neq`PGvQYr}wq`x5(Dw3TF^Q9AJxjsAO2$ml9(G66iZy*nT~pShIu?JG zj&_=1++!%+<}!-!);kKQIyTg_6eSCgp=o`U@2@#CGIymlU$owO8A{V*^Ws68rN`w? zh|Ps;@ly}zqTm#6tL~y+8OaVYxO=weG&PQkPL0RAbBpo*Yq^vaC7R}!%TC{Isij

q9CCGnVTy%4ej=b%S1?XmmTNxFXxAR@@CO{CdUR-A# zpoyKei_cKqyfC;BXD!Xn+ZWw0E{|9J2;2e#wB!3`3zwHYKEbUa&=w1h3@f{)6nLj< z4@kqA+U>_rYVS8?25>cw2H!3*lG?ty>0G#s*3lKtgc>Y%xm+!KTQ_TTNV$I}!fobL z?I^+ET+nE{wB2#%^<>@oGi5hg*X?3H%OgFg=6)P+r)-=umzNn|C#UYRmFCW}ON7Vf z<}OF_badOSCs)=y&zVKjbF|BFR9S!%dS*`1mhS7~!THC0&d+cdN0$>@ZZBRly}@+< z1gLP4=@VQl!DCTRjy}lx6X}1zoWgO0Jn(GfQnh>Y8s|8hbQ}BT#-g%++z+w9`9b&M z#*MIQuEFRf6eG#W<6Tg3KJo~-iHyry~ z1;X*lCf(aK+At40S`pm@`7)EG zay8D&%70Oeu^8xTn24-=G#Wp6gwtS{k`N?KWK6`e)c7vlZ=*|N*<0QT2n2gt2f z%$jM)$TBAuvu zBc~G-6Yz$6?^F?W4wc$j+Ia$Aw=Xt*dy?{&zMZa0yOeh2ndW~5?;gH4)m?ZAJH?xE zhjj_AfugMM-5JQ})!vcUZ$>~WU2xIzpiB9kaCJLcsN9`>Y}7Y$hStt8iyZ{=eoavKVcX2F076y8Z`vEq^HZqtF1JA=ZE!z@;6KZRH?w%=j}KTY~IzE z8gFh6omk}B8o+WqSg3)+U8ojRXF|-3BB>~+fD^?q!`x%52TOp^kxcSQg{s;jTwW>u#=8<#_s%$td!TQc3FR~F@4~vF1OPHb|zk3$2>;j z{Iqv-8=>8i(i)mDp7j{l^ecZENEbnTUlEh^w)RB_-`q1Xa_CV)%GWnLvHVK4!kEpT zSr+N18^1j-dB;kE4ry6N_!g^)SQlFX92{wLRos*4S#!~|yP0-_0_es&j;0p)ztHm< zcavUk%O-z+#H=xyHfb-=&lz{Ar=Tld>OwGJs8#9$fTe1j(DyJnnEj5SvpCGN?KIEg zDI7|Ym$~?_@T=x}r|ysBzFCzpJjn;9+b^BVEq_nE)z0kb?#6^mp;~6959y? z$TqvPc@d4+u%@|HJ};v}U%BgAk5C+a=cXVtjRAj{iSDOMcyNMqxV|iIf%1Go$8e2x zWwbmxtr$6>G(WHRdDxGxQlL+PV}5VJyKTdeqioQ=sz(z13G49RK_Q<|wfr@&L-@e| z427%{+QAPB`4w*C@*9Or52(VFOvr7l*KzEI-za1WvxNT!g=~;=(Z5C^p9avxzegcg zq`-gAWbQA?ikIIg@Iti568%TeKPII#o`O5wXCFOnP2`|uod&g+PpQ7Y! zEm0_shdJVm_~km1$WE5Ln9o<9n>c@NCFCU|X3E@WjE!_9S7pp!uObBrJELczn(e%Y z6ltk3*<}|bAh=O_yWJ*mN-9?h4*d>*&!7p|!}I<=Ek#R2Z4d%=ZwQ_a9c{|B&!#-= z(+ldO!;<~3IIo+(0bj^2iMu}9UC57DHxQ$G#wU8)Gm zw0L;6md&0odq&UDP4Kl{Lq8<_y!X%VDpzef1vyU8pT4iEn{zGp)D`s<{l;11_B;wK z#MogqW1ZwipM-WF(g`$=>9c>HZkIG{RJ(L}nibC}s&tmd`hqUZ{Se+?E7q2m_AIl! zlle#{P_|Ueik@LO$jGCIyxH||zm{*28`t~GY}`DzoW3>o;Oj$1HhDscG7T?=Z){rj zgxQ)6<6)ZcnY+=2J_OTim{AP#Lg=U$xbhS|+3Fs*76?pAxp++LLf(H>iv>H1*LUw` z3TJsgnZ^*f*pf3nouL5QQ1vV+%ef3_W0=1Jh>kTo_6|tKaFdg6jdEPlUct^Et}>Gz z-gFx5qsQgGGA_HbM(uC@;v(H9NVs{1Y!Y{{|5%Q&f1PBp4(I9mwK?dA+if<#cu@GQ zLUy%@rl-k6(9F|r$*g}aQdq;S*E(zwqWaVp5T{U=Mo~w-Rw7P2!`n?2p&JKk7 zMaK@+v@m>KoM-nEvfxsjwit+%nBVfw9VQLc9JVAD$V3W0DLs)j$7AX`Xl#9|CoOt6 z)IRMm%e4S&IGsMmP(W}i zM7JfOQ62vz^zY-XBGYH@ut@QcobE?Gy_(ak&U;ao&{2{@9B>5GantOhfN}TF<{?OM z+g;`%cb}DXZts;i5D_C`*N@$ceq}yq9jR-~OaQ=nHDrq zf=xR|)VHIy*X4iP0oQsSy4%MzLiEDen_I+x9_hUl%6z}I?nPju1A1;uq>^*B%BQ2p zvCso!e4T@cTpjc-EPKR^d*$%vpxgn?F7@kCD(8wJXe+aSZEyTyw4Pn|{((;SHw^fc zcE50M=ZUiFdLV3;<~7>Q&Nm~S<#y*bB>aYpDxu+$@ZOXcupnL1!)x5Ji4?5|=H3K6VEvdER zDfM$t;SQa}+G77Aavqnp?8@bd>CYL|^}{j;1S52}He^ zKDTQ2d`y4F%DuFA#Zj7LJT{kDNo!{dRlXjAj#G}u86^_Y>#EX^*h!JWv+nvb&L>uo z&KifXaqS1^JO$1AK3N)CxeuKQ4gci%wVoV9?J4t-bQ*mtt-RMgmmwiaz$U z&Hf}W53VnI!ITqxmN#&jjA&^$xt@K2)=yJ#z}J87QJdBfEvrf#ko ztZ+Qz@_zKA{vMX^fU(rsXd29q@isn6N!6YDF=&vgGG||>WQTr9b1dE)aJ>Efo}V{k zaZeZNNs}PiwCX*kBy!&#O|E-%F|}oSUU<#5f5x0Hp$PX;-Y(nbF+b<}b^b)#Yol>x zZ3TbxkX@(?xQy@K-90&(t4@}5R@eJ*ZXD8Dd(A84Ze(J9>{4xP74vNeJ*9XU+s z+OO9FJ}&A#5%nTQZfx`@7~AHBzl+6m&lKOb!Dg)#0-*)!}EV0 zy_zQqA`pI=S(Ur|4+}F!$O}!TH^gLV3-puaB!y6%# zj&tAKjcYp1U$L*z&SN!%z-POQ+)>4xe=Hpj zNKZRtPoePIL~pgW5ygnt4{dVw0uYKUWiFqXAi6$^YI8~RJ~GA{gqnXsKPdwMbCbvU zUO%+Hyx%8>nV)zj(4!pvpn0mC-^An0p50vSw$-NmvLNmv_x8?M{v7u8tZi|rp|h8t zq&MRyLBG9Kz5)*6)O*a>tTJo${t8P>iTDsL3T)}J^O)U9qQcrm^k6ONIy;NS9C40I zuGpikpDiv_aocsqM16leJKu3mnNl(xFye90VglVeo>bb`SSb_Uelx}(7NY7PHXf+< znIKQiQN^ivKhuALEmHUU0CBd2|!yTKyDRZc;cR6PpK+ zls$%L8r6SdKYbe3wPGnXV7aXnR{VI6}Ds8spKaPeO#etJm3~Aab#m zWBnKE9(_z+4w3DIL8FmxlqO{@-dago#UZiy58xV`Bpj=YRc#6T)i&pXWF4fW*4YE2(k}^?|}SN&RlX3 z*VaWJ;k@Ue4Dmxk>%taFL}Ru{V^N@s&LyNViJO7e*DRhB9@CDE45u{tE_F#uNZn+> z>4dIrDU9>KR0uMPdvUvw64?Rv>X7b4E|y{8VCgkFy3%xl~soU-tkJ?GG>Jm=;bp1A~}xp>_%{y{vVhRJm{q1)~H zaG&~9&%0H8_}|w(VX&;x>^Xza3Kd%79 zIbeDpae24emPwVG{3Yr0VzbMqr4(D#?cMmw?g12fDj0O`FQu1PFCXY+FI`p;@-%;# z9ZGc5$NIHcU{thANETf%>B^@J17)T8G@Q2=S2(D^6;J})s7X(sHvC1Oj4zmmjD_+U zp196D01SL=>y5YX8@H>VdQZWTGAKS47(5uCW?zCsGnqc~-Q8%`@)l7B6arpL*Y=Hk zhUoURD@N~bO5gL5$UQqw$PigMBkO-XFcz;$dW>gc2-ir0+HCg7y{glt^U#9~*=90D z4ox?E&W|%CW@j5|eHm#iK(v!WUZp85?qPU&u1fQ?(%10XR7tKZmAqS?>t>A{gsfqp z8^?HYTfju^1Z4%E=K6S9*;V)mU;Ex*AJ$R>3hg;g?FJLF-w24Fly#vdm2iY+>DS9gT$oD5zihRx)@5#Cs(fBtKws& z%x_}`^sPfGZ1vfEeYzVrYI5*4w&qyttP*!&c!+z*FARTF`3jWre8mpcv=Op}OY* z)PbY;azip6rsHpHm5SoAvf{^bN~7ek%DQMg@no&E`s(gFN6f_@W1XXY?{bm$$JVaz zx_>#Vn`y`ts(O9-&%%F(BLbDr_2YfXQSu6?_bp%wQ;q3*K6~bgwtI#Ong800PX+Sm zhDs(QE!mcGy|%B^)h% zI`M>@q4u8e;feO>u+LMf_L+NdF{kQdu~!c|WRsD(*6FK_toqRr*`u-gu%p@D)S)D% z7x!3OuE&dLezo;2@&x9)Y+3Cw+&^jz2=}|tlh5^rO}2+(>6li2>IFnXFZOdI)YH?H zz01ejdTwrf1+#zVR>B9``xqC#jWh<|kXa7ahDoYCd*g~)NDa@vla7sXl#Jd<=xZua!TI%$b0S>D#9dNux4K_=QrGpeE`Zh?gaEq z)H*F1H-tWvr$ZB_tVdPzlkTqRlhUeZdYPXVpot^dJ5PU(@9UI5Vzx(<-AGlH8nm4p zAwckovQ7e@P5t{b_sy$ZibxlQ>-!0@+fAr!*cRwFic<05OzEr z#<5~D!q$I!&BkYOtR4g&x#6iW8aE~GJxGP+l(3p;^RqD1&adrjzt?638=S{Lwms5h zKlPElCueKlc$%0?oE7T+P$ok7zVg8WxeVg8=*5WL7_`1c&oza^|Gl{a&0gPSr}y>< zZ*Ls)L}}Zy6eKB?rz(9n@7<;gEW{RWxW1F;_%eTWM)YoOJtt=$4vb03a=z{9lUMA; zt>D0Bv)Z2)&+~p}P*&s}iVMHIQNY=n3NoKHb$!we3;*|X=d9=p(B+p}cZ$Ftm-J>kLa8biCKh9Zq*{%za3V**AKg_YBC7vb^(MNVh`cS^i}9QAj@KRb{ zcbGZb@&2tqa#do&)4`v(^4;S8^+GROJp|3TJcfSoyr<&a-h|6LdQ4E|TXUrLMpu;* zEc;O)x++8ec-5g~v+wM5TRnQQcOVI`+e3dAg6w_-#Oz#2%5-sAF_Wz`x=ag)y$W|s zEcdaMjGs?=Y8HrxzbS4yqmtD!URqOjv{KIvd~thRff+*JS$*l3=%$V+YtfEQddrS* z>y2r}Z0-^)0Q18%zd!h(u%?|sR^6!t0DjDi$gMKms(_HZ62lrfqm z2cJvaEzp!b}F0aUj-09T85883_R{iM2 zZJN@#e|G6n5Hf!+ZYL=g zSRnq+#qro}b!xkpxT|3-EX3F@yM=9zH`7hBm=xi%n$Mqi{eA@DzpXZl9j3z;`<^ZL zJ2ga({gnmV8(r|!Qs3LPOR0DIaUS2P34cDl-x$-o6N$P^^O+xvgQo{Bh@kf_`<^j| zI@^rOn>4oGG-jV4)vFwx%hZ2&d-oMlJFz@1*!XoX7l=GO{in8}jd)b-p21``JxbGV zX?JlrJ6(F;FTl^Qqs^4ttDgUwp04jWsQcNvY}Q9iN8LvGX^pLyn;Wx4^mO{3zi4*k zFFi?N>IQBm?R7gu)9~2Ob11p5Xu6%uxy`b$mxp+{olIVgW->vA+OvNp)dt3ES&lI5 z4AT@ThSSlzeC(J_xYf7EdGTQ6I9x*EH_OJ|G4{_dw`NWcx;sYnZL>p4)_63d-X=Zm zLT;&*_3?@J-E+Pr53o-9_?o`v^=`6ZuekZYLQQmU$dYQr2r;p{D2?~$#BeHlf14iqwRm)0l-k~2``a6@uNvZ$%>3ES7u$ini z|E($khF~;GROS!J)h|`TFe6SN(1Z10^G8*Jf*$a{s1mp>nMVGzDxpG&@Q*6t&ia^D z_*NxQ=Rc?tIBA$c|57CkGu~wVgIeKBm4G}_B(7mEzz;x>Kp#{IyEPXQuQ4G5;tyuY zhZ%1&!~G-k{ZW4yIiyTw@X)$Av$Vqr%-Gul`MykPfPc|CuV`IqDYt zGC87}d)PFhN*M6osGhe5O#)FAqy$s1^I%4}z)oZNz!IS$Wu?hzl_VE=4lQ6G89}5fA}M1@Y;l;+S);^`~dY zi5;tyxrPoH3Z0&RwBC~XO%w_$v*Y4^d|@3|_HWpI?&FKp}<55WfV)!44g;-Wmu@o&TFoQvy)4F3=M;Kbt@ekJ)IC`o-skd=qDcsl=! zMD(#AB*HHVcHATI9KWRKA=y2D-_e$78|grh;Rne9WFL|TG6DB6*6jJM9f4AS40B{* z9=^k1!8El$1DNja#r?R;v>Kw<{NawzYR)^~bHh}FzonxTryhREDDW_tO71W9a z)T83j5Q({Eupzq|(-G&+$MH{t zFu+fLoBctC%u@6}xC8rdZTuTUQKD_}bAAoI>=8R&$e79%kM;o(1GosTl#ggPwA~S7 zPCo{~mCW>z{TN(OtfUHi8VCYS)TY}NqL@$%i#++`f|sQ|1up`Af|pRT0E|hv;P;Ut z07sjlZh$klA259s$%{SJ0=U%*Ab zcEEf*uNI{>ivF+tZ-UIyuCdmmMd<2prJ>p+*;Kr@H7!I$~BOuk>Q_qfmJaf_Z zZBpp)-4SCR;{Z4d;3}ZFk35wW)J*z+#j%kjtnf?`enZ=9zzTq*hh0uu9S;*W#sB^| zWKB>4jucP}0lGa&e|69IU(CW0*NuBcHaN>hKKh|I-{S)F-U3y!sF=f8G=PAyILMab zdcaBn0%Wh!5uPo#9;FzXu{xKct@qOAIm$UK1#C(8v*o~QkFPxcUkHZGCH5bGZT&@3 zSl>R+|IEX``?i+N&SEY_()K(LKHP8Lmrbm5@9OBBQ{qosA=U0*+RCKHHs7oeloTAW zBOM`K8;F9Sya(!=5pE}fIO@^l3X45kNWHbIL`_GZecv+7qc{%o4ogC)Fu%C(0Em>X z{7=VcrFuw?4Os6ugh4Q$fAK(n4%W0EOu#)t-~ELMIN-Z6#^q167Vbv}P$&H@RzR1K zD07e)61y6~8hJy}nEkufPL@!G=E$%d%?t1g;yQpFNk}LF%D4t-gjC$i&qc8={e7~^ z7vGD4WcWPqKfm+-{D|2vq6f1sX9?Y+UsKZfT004RYTw9*kM{y-c~}d7XY|9zy~DP! zS)A)wKkLI`^+oyo>*pd0hII}Swk} ztf167%s~N*cglF1n9fm(VtR(%<&K+F&6!T;mwWsJn-nmGcE^0^2EYNLK?e5P4s+U$Hii` z4|yO{phAP>L z#hvF(_!i#q3-ho|kXjNvh`O5R8~(BI;p`Y4{l`AzXWYkk9TeDq$bl{P#YTPm{14s= zYi$5xw#*|(oZsJ38m?d-yYnYVu1Jg8jxo7`(lcX5cXmG@V8 zwoI2bIN&Mf=gltA-m)~eH`HbDye;HF&esqD`vE-BymtT0Z>xM+tn1g7Twz&y_O zC&t|uEmO~b+lcSyLD4^TaQv_FCE)iSY*HLUYD1x8(}3~KG4Uu|=jZ($->0APz4v+- zg%2)a=Hgcx(VFd6lpqwiUVK013)qRg>Cs9=s{=`0){PjL>d-d7YJb=Lc4R{;5qI_^#??z)d|lT z*N^O%4zeda^zXjw(qCwxUpS!jw_W-jhnTZ9NdYwdyC$Je09Q8ue45b7(LXZ=W?t)I z3y+Y0&cTZR>-yAyXP<1E9qiMLn3}IYl@YB$X$Gbsprv?p(0RO`&#(n>{60%*qVKMV zTz>ita<>tuJnDd)JnN2|ZCc!m-Zq%OB4zW! zC>o?1hdzY!!TfbiBN;!)4E){KhWc<>F1)!RyaRWU_DXNT-k2*Rl$K5`2?jw0UC9}V z^am~W8z% z5aim~*MRv*WVv&7*`tgp*oXj37c`97+5TD_yTdj$;OCf&EO-Ohw7EUuM)|>J!EOMq ziJ$w%B#bH~DzP^}`@=iUFY-w8uoiGG_z(bp8GHS;pVeq z#v>HKcVL@KVO zIoPw$>%;*TpnSVlTljRa4_VB69|M6Q0yc#RUeHDS&O-s#IsFLV8T!QR3BAtWAI{%D z+pTjuYd`mnu@HIRG4Qwj4Ei9c?<cXFQVptDdWPg6K@lizKDX`kK_(`u6`{psn-$KZgpo%wnu*5uX{6eqoO(8Dp z{}Xy`1K58If5^Q5`k$cJI7AD2>~p|HIssyy0-eU|ths?RenzLcoSY&h4|4=WdFS(` zAX!Z8>PNYM5Pkw&R+juiuPxxmJS4N-_oT6p#C!aGIP#DE3UHJWC^<*N`#nlVI(D+~ zp4}WuOs)I6gFOqs`iaJSd@$a#^Lt!hJiB?;`Myc@EoQEN^ZwAl`GD`&%KdS~^}?eBn0=t`B%*9sOUS;J#&pcvRob4Br1AP;ieHRTDCSnHFWQbHwJ`Sq_lm z9YN8AsC@$1>%I#PYxvxc_v&#t7}p~eJ$DA;q8^7dI8fo3&tkbCpzDD*1Jf|wFpQ7Ua_8+ub)fpKec!BNZz|MgGx8K?h@CWX)4m!@dc8B{V z5Q7v&rjYeM76G(F=GzE>zz=3*7@NVzWEc&H7Da6b0I_zc8D0D#6d{KlY_lK^+$F9g$!gW!O7@T?{920+<$?1i&t z?AG^O;HX5P(SUH9Q4i-9%2Z#>nGWO-uxImkPTs{W`e6QBaf4!56`<4^<;Kq#oC9!w zabNrws*v;a4M!-}Vcerq8%9;K61D+|Speex5u-?-_CfuHz$&a5d9$JVh8afL?UqBjF?LW zKt?X-vkjC35If7i;mL5_Xz%mfmNPbgi3N^mM%=+EeV_4WEU5!I0RdegPG%h37uAH% zm=R=<3H@o)Jx~~;^JXFPHRIssbJ^SrMCunt@B0&NF0y!yWaMndwcNSm5y&pgSPF&e zoi7sQVxQs77FU9E_R_~0h$*!9-dzHR<^X{;qx$~f?qkZp6}d6efw{$TF@ z{bzPiUYh&E_jCTV5oZ3$eFUz54Z-Ozrpfq@{~2Qh9~vUC!h9X#qyfM6MN^qGN(?!F z<3$vByoWmC{QWaRhxtLo2|&c{X&~aT%J91K?T7eG9-le)A5iZ8!hubP8T0h`!LT7G zP59)2kTO5B-aWf*~2L%_-D72sBGt$ESLB*lH5$~Cg zLt2iPevZ!wi}dF>@D+z26kYpII5_d|I5__g4h~qwzvJN8f55?iy?^82)EP1KHw2yh zcLbe3BTT=Dv-F>^W&aViH@8iXB8e| zX#06B{~sdeW;`Rl>-e`mh&_EioA|8O7kocG`E^XK^f{M#6`xi6B51d8o{i^9}_$~0$e-hW3qV(ysR z3+GScOZbeqzVv^No5QoRR@#gO+kWRzHURc%liC>HUp!dm$yn=uVW5DBg)KF&qc}E! zN6Nwrivj_Hn>@?SBJ7HKU_unQ3zvCrpD}{MJYUat#lO4m+1~r&xPV@rF?X=@ z05AN5Q2mGNndc&N#;5*5^UY3R;OKw``;GO|=(*p2m|qOluXAA2&2#rZp|sGaW*pTo zL|glV(UaYuXN7CTT)9DCg{?VbBmZ{ni>ZS>``5Vv+Qu!M`JBI_ou^-Y>pvkFG0y)& z4$kX7uKRD;!5?Jc9~|C{+C()F+W6n0QUCNoaM$NK{{M_;WB>KMe~WyZ_YnW_^X7Hp zixhN!am}P2xcAWcP@h~iBijD)T(+NW%YQl-*d(rNGvENtpYbxc&HwR! z=Dvn?4XD)BX#aZlKjSa>_1tfGP9tp5U;PC3B4T$lW-Y}t_#2{Z#*S9AJ@U8D`2RMB zG4K56vGPR^!<7AE6M=R7ViP^!y1zCg#xRe6vLL*l@!&Jo@ZXL5Z{Gw`5$%NjiZRQ> zKE-&M5uUha;{e@=Klk^=OabBd7naKT;=Q^S`-eZMz8UrTuQ)!S#z0g2gWsI(Y#=sy z9M`i7$AP8#ug7OQ2G_wFnAhj`kl5z(86Ss&1Lrv>wC*u*nB?fcsPL8pCRJ3r{O?`yCN=JTWZwfl4a_qF$P{`d9y=lt($ z*k#{(wjVUyFVxx(^6fWj?TdVaefCAI{hap;wf1w~Z`9iNJlJVp)Y=b1?iXt9=RBa+ z;7_q<8%~5xf%f`&jfTSV`tusL58_dOKd&*4aK7`!v|*g!{GZpbe{lZKYxG~7|MMEp z`kRXJ9mhcD&M3QIXt*Dg-7kb3p4%LM`#G=ux&L2ixS#v~g@yx)bH4u{gxu@r{(qhK z#iosaN4Np4`wJ8I#jMTfv@as=i)D*`kgUJ3a$hvt=MN_Cd;W}1`y%4L=ih#R#`G_& z-1q#|4Ve35lC`t6IjTl~SqeerB~mOn_jFP<%(ZTc@J?u&9e{K3S1&z&*IU!>gk z-23;M>A$t(LHL7-oA2@X#ZJ$E=(lgX+`sKUW8&sE!IJSC6ZbuLMkIeRao=;ZFRt#- zx!5nyE7H90gl%wtO~_*aLJl_lyvOs31IGUGGUH_hY%u<^9ed#M!4kT|=Yc*GY++0M zS@Xu##-`P2T=@%vUg8wTM zvR~)g7wik4KRCJSIHyn`oN)}WfBA!z12XRHmH6%rJ{z7}@%t{wv~t>wBI51_Ae95OzR_ zk^k;`&Oa!xUnr{a=RSWBaLB)&ynmkK_ddSY1%%u8x^O>ef4u*q*0lM)fZ)Q~^6Nd?@B8`Y{s+x=n^9eOPH4w3 z`YZgR?`E{!KO)(G{{QbM|L(fFxgC)@3&i2Sp8ppTjN8n}I+!TcjB3O4oRMpD|NlP! zFZvAUt^Od|e)`ofq#tX1QFb$0@ds`9U;4*9UXXu+2)uv2z8NKle*H!3%_z3{=W+h|y{~_0Z=CnNE~Ggi(dXyy_It$N zs=2+sXhmdV?dJFn;x%|K+T1?)`|thY`!~O?6P4J1^}k`hZ=mS3{~h$)FHy<4VrHYw z{q;ZphjIFU6My|8Kr!UtzDO}1IHbuy7XdRn;%1PW)KRlB=XNN_7n}1ckZ*&92zeeA z3lALfp-2|F5a!JFM4pF(JO~75d2eqAtT5uD?3@)-{<^nEem^y|3c{z2Al zt?~Eqg8%FIukXci`}gOQ@P__6Zk>K3?R2bne~y3tI&S?ruK)gil3)FGJoFGg z%;5KN_pi_Yjm*;Rvjk zzhvIn$sUinq3BEQIk;$hA1y;DmH+%aLr0`7v=0RN`Z%EwL7tP!?11S+Jd-dpY=5r5E?P48! z;HlkjpO>P0(_4Pu^osn1%UXooqL+H-W9O@%3;no9tD3o|k5{HlL9&(J`eW1IH;bWv zyFQbl3`sH;WcyA^P`S`hFKsjj)#TO<<;TpT*@uFB>Q>70k<<((C%Qj~_hTx0oYKPy zE@lf_4adwmC2!#rE&%5fTY60p%i4e4t5k3T*_Ll8I{tD5k-ZfjRmZ)o9XnjTrpw}% zDner|{b%f(rA#kL=hDW|W)%+=&nnq}{3(kGtfib$O2YxWGVr8#84`PcpQgkO>)`mn_`{YhPSnSmSf1!4-K7`(rLEN{3_~&Q zbzh;l6{<7#$+J3u8-%zzD{ z!8r^xR1pelZU&TV7fm{LP4Y}Or?&010J&8cSd?U9zC?zd0)&7jt%9!42f*tA+cwUmEADo09C*z zXYTta#)kBS%zW`U>X8{PAAD6MM67qltDDE43mQ0uNHLp8zDwqueE97F>PhX3_qCl6|`3Vf&=*IydjK zwX9D7t!1Hex+vZ`_HELdpewc8UM@&9t)I0lXZwPY?bm%Nunm~ScMbvhIWy(R^k>xq zd8J)?TgCp|;d9y8!l(D&k5sONd!1y5Epq6z;dCDhAD%^&C+`Nf({|j$JRLM~)k*og zlXL@=)B1j1chpILlx74ZwePJZ7@_U{6t2tW-WThnf7`8l-q&od(6*ja@H``GY=|^U zqk`KfhB;2gVhq_%qG`7_srz734&UV)UMQ$rl^`OiKxo9^W}$H5 z`XGrEbP0L4C|4(&&Obc;v_{LjPx4v@fz=cArE}D9x6&U&zB->r|z=y1)CzU(J7Z5-wPxGeBwg}nVC)YzS!~v z$VZdwMeLe?S8@+P5idX`oi^EK?QnDmdcp;4U}rcTk0Gn`HefZEVJu8k(THpiT2{+_ zj$n4By(NInQ3*kDL8%dR#-)Zpy6axz=rnr1Kn|#?jX16?BVZVZ!*D*Dv7yxqkYCTl zBh}i^8Y^UDr2xDXXXDIzPli6TuH}(85mFAW4574tK*<8bdo z_M|!;RXMZA^hF)7C13AP%grTrYF*?|`$?2-;arU1@+6s{EXh&nL3I68#>&*Mds?OV zcn1%#vHI{_{Tj#B72a8wg8TdS=y>D3Qlt&Vsxm^7VeoPQl*>h;8mCWV-$VbHKh(8< z);*bjkMep1(aNr@+SxtN{%#>m3yIrbk8J}8t?hgUqf`6B*-Gfp$A#oM81?i;l|99O zGJX6EBN0RtL?`*P`noeDR65nC^l6s>t~U02lPF5AK$4Hvjv_#>?Q{@0$b5Ph5%QXH zAhg0NoVPD(0>IcK_fQzKb&KuxlhPa41}N`;+1Z0af2K(E`rJTVg-ce4@^ELn3rwNo zl|SD$1y^ul7jDs&XeO>W~s7nk)D<=<&ot4i?!_3;c+6#cG?&*4TWCUaDDcuJ*R=rSCb z7j|D$$4iIao?H=s=vFIMX%O=rC~X^mdaos*Czxzj`=!#(pL@nf1vaZ( zO3+}D&+ehW>t`tur2R!H?TttS6&5V_DhF5>SY1~=eC}n)TAJJJtnE6V9@rK#xHfWZ(Xenq>9ylD<$v7lb~751qFo4B1>wznU1RmWk>q>dDi>uV@h`d z7C-h`Q<0haIJv+F?z*S?n&(!*$wj0O|=6#g5tBM!! z&!(_}93fE7G7$Bot3JKRw5t1CEY&4V37~+Qt|qj1>w0q1?Zubl(NCU#6t#(sDtyp~ zX#HN=?NafVi{Wu!NUtu~)>V7wpqn8d0~Q%@<__pZHzk*A?3P`P+=mC^sozh~<>Nch zHIsDHaanhaX2V`_p344kx|DwZp|Uzh`k4V)7}hv32nap4RK$FY!dPw=Z@E|BIz*El z3cV>y9prqYT8z5$b+BlE$SHsGuCF40o4d6ZS-4OZ;nS0N5m3*OOmJvrq@hnexzZGyD!ilZA^ zDJriKiq{R;5J)jtnK)YUFZO3M{ z<_C}{HkZxgeNH#rE_U~*yroFzFxbqIa1jrc>0`X7loH1Cr({>Dhw<&pZL7%o-fjsSZgH$w* zaQnFZ1{+AQ`VPv5v=7k`FYkMUUw5Yo8I=uMaYZ`rzS>t&bQvVId9ROx zL%3Ln)M~x$+sAvTr*$cG$!2)lPHyZwdRz!{CIgSb>sOw8cjWoLMA5)|UC_(7eLnLG zp=+e|eF;r}FNG{?(_Jwi>_~^EJ_AMJW?5WvD~b2FYU6k%wm#zie2bIu4b(=iOtrP4 zeIq`Ng!4hnN+ccK)FH?Ys)c?)O(V)h+S4S9DB;%$Asigygjw%1>^!#%r7DN)@7;XV zugmy>{P6_A;DAhS=M=@xLEc=i{lvaHsO9X5$p!0w-Sdg<`EwzZ@##Ef!FcPDa5}^7 zh1b))ImPY0L$w2W3SPuDq;yFU52`I~oIsIa-DiAbvupR`w;UZ{~+=L%%JqE$m_u-PvgSFx+?dX{*bb`=@Eu~)+ej0&3FZM#%%ren#lO5PV{Ym zk+_iAG+s9dSJ4b9FYDb=R5q6t#ZG+NG?cn6GXFUWrM#l+SGU$i0qRtr@P6&i+$=oN z@o`{gXo$?wjG zsO9B=Qh-Msba_*fHXH74)L9u^#PsqjDoWE~3r+6H&lIcF$&V2&kTmOn%Puy}vv{%A zRzg3ukRTMoVBP`3aH9ZPE4PC2I&{r_pSZ-MxkDxSKnXIbboQsR+p%g$Ll3QgL#9-& z5)VQ_WR|67fmspm1$6(DxvmL@@yBTq-N8i-hW2JTJRk|&c12Kdypt$0U1e#~`KJ~# z-qrM}ho^Cz$PRhJRFGA?PEJCn-G8;txb2U0DPF&S_iD6 zuKR3p<&dh;c)ZkG0lft012V~f!|wLk?I<>{qM9}W`C*ggjf6n(2Y>&7`XDBg_?TQS5%0CH%V0-)f zR@s8VtQik0bu85R#6%(Q$@;~}>8f#`e0FmwTX&r8j$lxatJ?N+16^K!&Nz|p{tA1f z0Ua$_71tB>s&{JsfaLZL17c#H>;NbxcXZynyXM`F&-bQZ$x)7C$Jdv+lTYL{cd zpl>n24bSAEs9^+jZtQ-48gV@P+$LVN&^~x_OEQORs&~_KhG!Kms6it6s$2$bQs&|* zRB0&1633npKufQZ3q;%3efG@OuDH?21_;?@$2-pT`N=)V#|w#W8@yLbgIEGoV9wkA zOT{N1J+!OS?t0=pAC(bL6OP60>qbGrR^|v=98nf<+0qw>5>@Yij9S_^6hr6-xq}Bw zTd-pAUVmc1H9}=c2aeRLcTW-9V|-Ls!9Y#)?WIqz<82l4OMiL84oiE#M!OJIb75sV ziVbt8mpS9EftY##UFnt3C|NXt(ZN>JH>Pvdy?VGxCq4vGS=B{#89YX|Yp>78Fvai{ zl5hs31nAHg`)IU(B9ORsRQli|_JO#7o+>nH4Uu66Z!*huZ99YD2#0|nltM@uK($F5gZ`NCSgbY-zy|j{m)Z7Xh?Q|1=cVqu_SI;E8ZfSQ%dDrob zL~9_sX9}tMQix$oLZYXp_|k=pNSI`=)2U@>GXsc6v_y~?ELj&UrL7AU$kt<3y^|L< zecNu@a?gjorr%2-te(M{ZEf>rZiWp?(;s~QYFP-c>orB(I8D?r6k7IdA#{BWX(0NWN71RPPb)h) z=?dH1!!LN7ty=%F?KAR1fx=g2GRYz<>2*o+@EpE>4!uiI;@f54ug~sXC{uWuj$2+i zT@KT3BH^ECBBY|R6HL0E8pbo6IZWLusJ)MtIk6S!V|H5~5!~8pIVyYMch`7BeO;J@>isUczW`BYJ?6!X}iWmT@29CV& zVZ+4~?at11m7Y+h)JmL6GiSh7^&&cIr^_3EP{w?;<^EYF&|QP$c;S~kaZYLKm{p4$ zrOV+VF`C8F-Hg4zdq=rpRa)=!*sq!Ss4}Z1yb~}JwT*BRvn2Y+T^VPLk2Lk;Cpi(4 z^77a8wLyt|y@N;oco^Wz6YfHv-;VEi{Rv)c=}fYsnJou?S!qQ`a;2i%cs{fiiUQ<+ zmMHesd~WgL`q=7_Uqbh?2XYcJ|8c!R!@Y+HryF=?^b*V^U$8F42z8i}DCVgsR}4|= zQOf3)dRnU&#LP1+Y8}zSyCn-5y!`!oQupf08ScgoS^inLq5~RID?W1fDto(v&VN53 zz9KBSa%a5S$MVGa5Zb?70NMfrv=*X&&)XU?6Tf8V88%To_BW``v`u`JoX%9wEhkIq zQ?jnIcRmc~r?w2pv<+_{(0MOT=AQQxwc|p97<^0mb(OE7 zP7G&FM@f!K9j?2f-jdT)SKXsgdq8VL4qWL+X69~qaJ^MvYl63V?|qhoZf;S3EL9(l z+b5#L0>6{5X0^E(JNjKA0wo}vm>gbbT{@6FLl^PFntbYoEch#@ck(9wY%Tx>r$9AJ zs@WKKG9TK_CCwiFu#qpeXQWrRVc_{mxQCN1KSFcJfjD3?&4P$KMN?oW@7B$x3}fdM zgM}2!uew%W2?%EDN^e4D&*X%EEjqJ=rRLIWp){B&NA#)E)e5SR@Y&K;ep%1>pwz^1*9;yh!4lMcNF&hZ z%}TG{LxTvjv%s{tYTe}_Sb9jMZ}Lzi3e~zER^fg(4#ny@HSaa=Qapu!V2=~7cbdP( zk{v#^Go;#srx=>{s?uYN+Oq1hq2#Ccyvo5;i-GWZjGQ*HVVR z2djnrDdVr&Zh#P#Y61{{2wij;ggEwo3+-ydM&7X3A#b#M_2-9bU+Lq`Rmy8qtW0*< zyA)b@wa*#x_ITcPL}SLDD8mP^N>c~Y`yGU9)NroDee6Z=YfZ<^>2}?$8=X6sMx@_g zk%)?<8pztsvszwmBl;6j+tz-$Xv{Hao6 zFIg2W?kMbBq}G|p();8JyuDe|B1*f}?rNNQr!C<@OtSCdwzHo`=IYl?!~%%kd~o-T#L$X?FXvmC-?|jOXY(fzQyIkS;wbp!Jgao6L6vx zWTOr@Q8sopK)x}eGA9z`OlHar!$Gj4T<{vY;Faa*+SS~h^0_DDsa@t&{>FN?GP)i1!ELdBoLmfmHm(XcGgVNT;# zXv2)Y`WrQj;jk66_ERmNQyAteAzJnOtsH~Ku({P!k9{hLB^MwGaug<6lmzTu_aG5C zx1Nl0_b3%AkGe9YGn-^Ze8$bIG2YH4$zllzo%Lu&zTk=ag>aQ~ z6@Vvd9xsD`skK`x0=BdbP^@|1?!b13Z zF0#3QU>f%LT7XB)k=(JgeJ*$=Ez$t$TUYpL?n6>rYOECql#8|K9usY1=s0-lZtc_~ zbBciuknMrs^qSvY&S4Jg;k1Vw{uWx}(`h_P@_FSCT%n7AKxT_7Bi?naKJUe7p2reX z5bDwpvV(Ag&3@YQQrciy)um-UpGp7xP)kF9(9*^JW@hFaAEK`y@w^BrH{rGC&iYLUW=y*N$f zj-ZiH5=%E zRvF126b-4ZqByO)xHkL8;?t9>*Z5IJKSaQbSdzRM4%3?`f#)aM^_kG$WyzkO#WSe% z_9FJQ?Y(c;djcLk<+b`jfn*tiA$x@XHuqj!O7y{gGJ~^<4nrm_pQE~fV8aoPY9bS{ zVbD=VwzX_=pFOUZTL!kJ|9)MlwbM6$Nu^e<@Tjx&NI>QJo>fg+OD-O|2Pn{CySl@v zaW!?(O5*;ICV<474HC+*Roj^-@AVGvV;g&fxE$S@yDw}&rUlcymy7Flli1Yy4) zwz>1*Pw(+qD88l|*Ly}-E5-e4O+6LDeKXBje{b%{G8&qUlETv+skQrBd8L2 z;sF6O$MUyC4~fGAv+K^!W}Qi|({ZaTj+(wac_$wk13RfhEi{CJs&l=x)+l;7PZy0b z{P@Ml}PBh!Yvl;uBH3qj8B-vmqvA{x|h{xJ8hXuh{{lP1;@*WI-5THG~`<5v9 z1=rYM+8{9g(PPH93TPdLfOENpu6co#4~@#o%e7}LfTlV70ORk?P=Z8w_`G=C&Az_g zK^xNzdnQ%Fcwdi^2_S+w4CIgUvgV&V3hBZaDlVD0yZoKU!3M4OyM_66cFit~VII159^q8<@u{Yb9tEbZfe;hiw1K16GVwNF#U z(W@}hHsF0ecZ)zh6EzSK3lMh2sjt${~7i-9vqKE8sn>+c_~f z-M%op+u;H<<&a!}3tk!30lU<5%gRUwNVnU(H$GVl^rEP8`}1L&9Ci#MQ2kim#ifrR z)OJlgdafgDWxcb1>qOXIHd-Xu0Gs8^PQK8@haRH9inbpa`joZ#P1QXvqVin-|;BVBaY$@~FD=fhj2)?C?tzz052lzL^#lW#Us9C=P7 zVq0Qfks{brq6iwl1n1|@-%6-|ex4WE=_w^HrGyNn>W;A;R>5kkC6YLX9WrB1bnLw& zUzKk#3n&vb0xzs!Y?Lp5b9tOW^LK53XGP6zI3qfUHcXGMxns*uA->g>O zp)S+C<)^K36LZ;Gk4Rc~c!*`PJ1pgCz zGsOLl0Pk!+_RIKPADifs_~rKD1#j1ij_JbXOPOkaAom#V$om?kPr?`Jti3L@{aesl zk2-uhf+dXO>juad>Z=AA8n8mnTumxL21~U!tm52SR|>~YyhfPJNNeq(fgW&S!jlCAf)U2}Z^3X4$Nk?wpDu7Y}byj_8A-@i^< za86y>K6|u{_*s`75KUvF3&_>*Syh`UkuIZu+-;nw0I-rj^wMjb;4b_?)xLX=QG3zr zxGBVD7hUg&CdWND@Kctc96U4O#y)oIYDHY#<-%0BbF$Wrq8YJU7F41CK&rPA>ZaQf$(jt?hi*^$odOkd2crdP8M~YgF~QN&4J#QzK*Kde3vkdWMo>)tkF=TY8>& z7rk=0-MIU>umsSG$EK%K>@z=$l3$Wiw{%w9*Gc_+;OMC;B3XQBrq+cseW=v{8{OD1 zPQ>YX_c9M9B|@dof;6mwT?fkR(P_9TZdhdVE~HH!VxC3b!}G|3feTqunu zz&w4lSiLy25!|ct*h5xJ?FEDE>8)S9?W@~X%96_Y=;_}eMDNMc+OD*=7Tv{vhq`38 z_Q?TFsghg1ek}k<6E*ULh+9EcBB$178ypXW0)aVyaa~o1$Z3O@q3T*K-<{nhl640j z%B%YswQwne&h&(Xt}MYnZCwI+L!m1xJxvrH3 zMJT8Kx-9c+!@Z)oeGc-++!}U&vmlby=fSEKvpHDCU3J)dvGPvz%X?qV>w1%7tQy>1 zBX?UOP%pd+$NK7OQ+QH`f=)+cY2wg{37^o|mdNjQ~(UufM*7Cd%-< zYSb7^>x|wZ2D(2`B%KILoy20k7J5>K9ryVF9;?KjJka*?{D4LxlM3Yr4A-)sptiB? zvSRpb?({L;@#iZ6_lb^07@~jd_Sx^!qZ7_e|bHJ68-sj z@AcNT*#tSSy99v%sQtAsXg7*)13EfvFP|mt5I`u-4al;Q3jcWD#*ZWFKAYYkGH=Tj zTzbH%v1D+ z-k7QU2_E{Qe@nt;g2}d_i!Jj;>7@2?qBERmtEY3xIYT@&TdGwz*EY63&(!1(PW|rX z<{F5FeCgWq&2B%katQm(S|H_7tKr!-_sy&Oge@h6w;HaeZE^R@e*pye{pBGd)f^uF z-9fDfVF*_tvT^#9Qfm9WVwZOH0tu@Do=BaLM5NXkCxA62FEguFA4L$UEbju0JBwqg zr&=I^>UZJA`)s^>w+f$3js!8Rgs%cS#6~IV;GJ-W{^r;ng-EaP*d|l*(VS*Qc*W~b zBdhz}m)lk+l%P1#e@`4j0VxV9i}0a)o7+AP5D7(iQ9N6MgYLIUmo>uz7r}n~a@QO0 zy4;zUEZCmCX+VmY55Ms;*B2#8vRF@tGD*_mfol5(Y5m(xGwxw>NZzgJi zI{dX)YnyxI5L;$gH`XgcL_-Db@nK3UeswTf;mlIvnlT*ce--i`!-|i^E?98u2cGGq z9#3t%Aqc4~mhM!6q64vq?7d4oz&b^f3q=|$L+v8A8{P*P*6coN8=GlEx)_TJDs?k?AX|Pm&JME8^-l9oR$y* z8g9sV^}>%{a2&bP_qtxw6~mnTE!#>5WJA6oCW;`sA4S0wJlK#toz$lv`6cRwVm_~{ zaJ7`xN^4dR?QQybiB#t;yi!2OxZAhb*Y|D{)u)fKe?vMQ(&vMy@*^U16gya6n0|fT z1Q5bsJY(@Saq{r0HD()e2x&PJFJ4o9B0@n~DmO9Qgz4ohO`l`3Qps>z7~-OOmMs!f zX(n%@q%2lR0hx$MStZB*bgQ~TVjn?fz^m*hJ@q*ASC16Tgu4_b#P_f%xVA=qW*+Bc zH^oIvfA-pD+20hvVwb?C09=_18eTpAvb+I=1PI*^V*h0?e3^h$=>jn4K5K8*0Rd-|uQmD0>R6yRi%meK8vTl}O zr3~dqi{7jLUeqWH2;+=DN^3ojTl$)Xbg%^de>`};m)xXLf&_eLWk1KRwhovlYOVC5h&i$ zZ3o_ikS#`ISjkB5h@l34c@~}ZjvR5TMZtpdurKWMjgk)FVA+$0TO9_$Q%%J4{Z$*7?~L>gx8~4ulpZ7VU)L7z1&yJe+b$_X^ zO#cqONv&RveLk*YC*-f7)LSZMkn>ZZgjc;v9vNzpK#H7DRRf$@7`)v z+o776!GWGam5EnAcnE9(G{gIS3?E)7e|8&5f$ADv2y5r4GKoLR#JK%zCq7B7fOmn`%&(uZ+&f65`da z@_a6kvw(%w#qxi?nvO-irbn!&)b6!*{q7ZealBaHPvA7SI~d1dtq8p@9^OUgfBLpu z#Wd32lLpg=4=@Bpbqhkp;a5b|s$f~2)Sn>PP z?#gMod~o^d3`(KgZJlD{ZIP}Gf5h4CeBGw+qDK5nGX>^TH`i>NI^uwNY?Ez!3Dh`w zJH|vQf0{?__RCd5HamjHqOW>YxHvqVAC zeRYF@w*DAr_{Pd6bA{z`Gv#_sR>InRQ+6lF{p)y1+@+)VAV5&G!k}W%e}tt0^COWRJQ z^{ebRG~9LD>4ljt`@j`(T%A|@g(%5QxzFM4Ki^)=)_J}mDG~SNKnuF9KvuP|T)VUexl}({5N9nm-ZP^2qUk4KxdayeB<*Ra+r=JCc3 zfAi1;rEBZO4if(dZw*`ty&bFvv$YnRli%Dl(H)S{yVZOL`mlOff2f(YEo~+?Y}F2R z9dWU=lYxN3p)xFb`ESRgY;N|wPdwb($f4^pK)hs{Qw2?wuI=ciE>uM9b zy$yZzB_()uH6D)P_9;EQi6J8hU!TK016cW$8K0wp2#Tw2kNSPQ$Z~PFH26d(SVxcJ zBWT;4Ns9AgfQFqMe-`HlBrX|Axvnpe*Y@xJ!>Lw+GIlW$-^nSWT6Yt?T9*Jrtz{qg z_4@F_wAeU)`Vd~bt-cuGq$U>&)S$hGwLCrCPvo}G%M;ZL=k&3pRAtMl)v9lT30Sb* zx?{LxJXlFsP}CZpzdARXrr}q%;caXv&d|dorNy*aZYLC3e@);{tIGuBC;z+vxzZwn z#&6=gF)$xfl!#AfL>D5Z^xWgmX&vvBLW?c)HGFp1d>n1AJOb?bCR&v-n-JllK=xNY zX>4m&3HaB2ojI;3P3!cK+#y4R2=}Y>)TO@@LZioqGK4N(d})Y>lC9bf31|x^E*-vv zkJA=UW~^xEf9|+mvixLaO}N+M?tXbN^!rw!rY&Z#H?@<=ai_g2+HBU(SK4iKA~`?A z4-y(y($$I(iK2(x-eR|D_f2*`7}%Jxy7Zymm?+gM_N~!Zv}lDWN7;`K!;6VYzE4@P z;=IaG+{YjoC7TTA_jI@;6^uEb7D&w1p9TgBNv?te=iz>yB2!+NqTp6Q_pe}$Bx1z zs-L9tf4W@~YZ}!1!4?dn(*-9@ka=0JI2#cuEUQ#ZdQx{kfRm`uJes^lOTxvxgIev&WVUke4r5#!*l{ z6{VjR1ah4$!Y4k3nSK~=6}fM!h5yfAJdB07U~fm}_A*WlN*aMmU_0GeEf>AI1rd`0 ze*vLfX3qh*f@SsGU3ciC(-iYNy4o%I#cgD9CRP=0TX=XVmL`aZd&+F4tIVn^v0eQ5u_H&FU z2G8Sg-}biTWiq4=RGCliG!diE5$j#)e^*0r4-^)WhKrUG`mugE>14{hNg7Ynq!Z^| zEj`2e$XA!I4e$}C<`6V@I0&t$jA+H;avNGumD98Mx?&j%1i$0^<$KWCyYbwe8Frx7 zCv)gYOFcDNtepmz_9>6sy_~FA=I&DqW1Uc^1&YCMCgKc-Zlaj0&NiDT|I**xe?oY- zyH%!Px?F5fNxb0Ck8RC##&z>qs8;?NyNKH+wZgY==v5Wvv3D2R9f7s$riyMs`d-n< za=Q6OZc25yI;<;hgZ^rLWZybQ1PX6?Z&iOnDy^0w;omA{=TS`u3F9~N6wE$$;@rh1 zr+j~?r@Y!%2^61C2n33m4N*4Zf3iNIm~$g|RGdEz;~gU2zVt9ft_1p)I1+p=sQfJL zW!wR2QPQhhZy5(8O_t*gg(2}KaCK%21!+}6amT@_#PF=KQgw(EPeJb6M@!eDI+4=F zBXl=?lt;F`^VKGV;2FCK>$Brik7|QTTKfr>1#d?NjHfENWK;uW2;Er4e^l}=5ghOf z$rg#>`S@7PJFO69)Y&*`$(MTd`yG^*ffz1P(a5d#&PpP>s!`jAgZdALw3_89uHTnd zTPSMhuQOf6USm}5elodJ^CHxRpT3V^`XRuT9lfyS4&j}(FozrrbR zZwM4H>nvA<_Vw8ec4K#H(HA|b-(vR?DJqM~ar4DD2JjBYd$GSje=Pd2QBXfH5*(@? zNJmwUc_WunmTj!4zNI0umE39qq~g9F&k|Qg#de2E#65F6WQYa1jw>rup-P+OUaT8_ zt12B*Oys-apeR#)J}IxUV>)L}t{t1NLxND{WfMG~4mkx(+cUeXdfYB2rOY9GR=q5l z%PrlpKaJL94N45?f7J*1$jCTXyL9Oetiw2lx@O(mp*kP=JaxRFK?WAv){fhyYui5D zgL|8OV&i4SJ3-JbugVUt!cx@v9d`v8G~?MOpSiRvDOMWm6;Em7omXGG*Qb1Dubo1l z%j~AKr6!QM=vv*Uc!&;{%DqxZhW9?G9SU%+82%cKs*fTge_p-rnoaot<7cvtJAtVb z5DQ)x+O3s63WJo*>HOK_=B%S-(}M&Z+f;0jc3tRB?eV?HMU;Fmu10jXimEo)SK*(I za1cJ&(>8d;+YtMqhNc#Ev-bp#F00Ihn75~by}uuIS3;^3FwRU(4aB6IkqPX?Yk0X8 z?1aTlIN$OUe?7x)_GbuWFxb}@`u%`}4PGXhYi14|uw=VLdTdPUUg@>o*=0>po}^C1 zad{3XS!ixtlUiY8A3<|R=7+kP&Z7Nx6c!*${-HVV@s0$VQ za}BXnuMp3%5FPvM5<09RT1C;T3-G7}6xp^lMCQj;e_kPf>V~ppK=b#d5!>6POW%+R zc8pGjJ7fP<>?SJm9U^Td5L!vQY1jVdk|@bOs2`L~0n9i_ZNA(?@Z)v^f&Zv&N``KF z9R;S@erLSs$qLlzz;Q6?>MJ0aZqmi+e`b5oCGSvpb8}1U%{>pg%Um{7_@vX| zlOGO5zO--oM49T6oaDDafcE;SUsc}RE)#Yj!e%mfbK2=gMZ6TpIF{pUiUNQQdD-?y z3DxaU;nc>aM3N^euAfIW4-S&s1o17P-68Hek1&r94g)lQFU8fBdi7znk1Fm%oRD+k ze^kPQdo-u$6R~2U`kU${dq<3>#zPrJEFn{i{V;ql=Vvrx5VY!8Ha&^_oc1C)Ez;$qT9hG-_V}V%mvwkm< zwsV`Ck|)f1c_F9sl`E#LkE{~nlx<&9f8o-H!9`;0-tm;efy3I#a-=*o`nmO*Tb#NV zy;QwM=q<100NSMlW1V-&A^`{yLXUUUr&DinmDJIwG`s2jNyF<^mEZ{OHzXmEs{;+) zgfqeutL}9!%BA;gAJLY~;L+-+>j$b0g@~M0v0avo_OyjAG@h4@3`O3=e4dJGe2h>hEBra`(ZJKbou!!j5MQXsZ*_G~`uGy|^PJtFx{xMPVQa%u5tUh8(OH!=xm zPbL+s1!EOnzipE1r#!y6=uHumlbb}q*u5IuH>c13e9Br-dyP&APU{tQyD#+K4`{=! zMr1T|2SbQ8dx$PFlU?^+a}7WJf6LrlPj`u_rkYgAd@?IU6(@;7nqY+<0sZoLGb=6q z6Hk5hFF^JAw}02QvH$4W>>w8PqiYkwGW_o(4%T!hdM1h^&wm!V{3?B>w)@h@^OwMm z&x2;v48@)QkKA$_yYygvLHQ2Uq`q2(vr-jggHt&d*4r`?Z?o7nY0jVYe>C9 zl=zFt&h5VB-XA%s??+pg)q8uHUqb$Be^|^)g?1veZ2hr8VHak3#ObRk1<@${w+c@8 zL;pi)pVg+mSo4|q1n!2Q?D-4m>*P!ULgmc-NAp*)9BaoMDm$)Nvhn_{e82fiF%5{rBDPN?J6_lNjMrgiRwCr$ z=Hqz_52*7cxX+*0ejf&;gCIXA7h|f3vEJSiXzxi{?A=-5$36-Xq|6VQsjF zW%XjGFk8dFX1CZMfUF$I>H3|lx9KLZ1m6qxt6O%wm-g;l$S&6U%DtZ}gwB=`2PQCf zB)rr3sNqOZPjhS(pajLT-W-~s|FD?k#P?du)5p7w%t!KgAH3|L95)qM2sWa#T4lB? z$s~6qMH?4Re?Hupj_Gylp4unH$sa7+`Hs;!i~78h-bZt@-FOR%9mK~SUh7mNQf6%1 zku}7qLVnKjm~#YmE8aRXXw=oiKl=!Y*tRIVRPwm6HXf85EB+Fp!(p0_jel5w4b(@As?7%OS|8 zCv&9Fe-IoCwc>7!D0-Mk`s1%VogSP$dFTAQ+(}{86XT5;Xw-|I%KJll^u+51f5vM= z#chp>7k_<)Y@4u>_x;fii0gv@jW!4u(&=-1x1Q4${NWTiV83pS)CtS|PQ0&;>6yO7 z6A1^bSiT1;nBj#-PiHdr-7n;<9@=y#M_CYve@<;dM8!099@?nZ-2v%T=L}S3@<@!l z!CiiZy9|!&pol~4ho96_tw-&291Dh*2H?Dz_!v{4%OYxc;j}{3;hcMmivb#rb1yK> z?~fmWo;+$JCUiJnLS{UvnMXu}kbe-F;`~JlgA2ak~ibHyLo6rS&jS?gvrLm zf93AI4!iBHw`)l>7RTYOU8|x(34lt`0Vo`d>+C!@pUz=T1=ow4AzHjZWr?q3hirde zUEVE!i;C%D-<+ksZIn}OQ9I$h24jpjT6KB$Q9WqgA9BV>OBos1mp$=Riv0wCJ`AH; za0d4%nyvJCiS4UBdC$Yjdxpn_ZR9r8f0`}onWGvuPXyXd^j%k1?`}seDv@04hb1zB zcAq_}inW}=DxLhI1{UNY?B1nN z3XARK_DYyD@&q-)E=sx4o7!I-PWJkmyhs%8==v)_S=(FQodc5j$l~GgsTZ6cm%7O= zkjMRr5T8VxvFo4c4Yh9aMLsxlQ4?yGK=la#C{< za9b}W*5Ym{<}@iI@YXHC+M(#qSe@4!6ytQ(&U8W{4BL?QK>y!|A_Oz4fAZET&PP+a zC#VzIc`Gef*moh^dX_xOb3Wt4?`d_q>@J_EF{&WXP>Dz6c3(C7ybiKvN0EUu1)yHl z$oSXGZkr&&N&R0O51e{tbfk$GgAbZ2sR&uE^xFr9dR5YIarX!MPuz6}Ru&f*2- z&{3+d9z3y&Zo|9#1l2pP$7}1nmxTjZ34qgzSp{osHx&AHDmUA0Ro5Ss7=>dQAsxU> z$8(Zykz4lqCc)e45_kQs#C{Qf&)uJaQu$&jT@T*4On`@4az+cXwst(!jFDe~v1yo0QC%qkBgY@nYrX z@6rS~_kipZc9EZ!i%be*Xc^EK6u2A6CFjpW{~1*%xvkS4LOLfT$~^1Sk!nWMLztEr z<>e#}hUtIa`Q!C)kIwOx2$@Anj=VDp6g94f%aS|a>p8!zqkKbWZXR~@r5BRZZ43|F zHdk@2`xOe*e{SN|NN&aOAQF}SWY6w(%(Z+SBYjd?d}4=5qw?xq+v(kKY)w1~30q}K zBMCoRq$v~KW+Sp2LmDnUyG8wfwR@A~)Yxte$A`VBX{HLwn+vUzMNAw|lyI^1baR|; zy74}qeMx6GeLybQODomtXO1TZ=TfP+gytQZ2NcW+f0hA&7R79Yg(W?Iw5c3N$(!;u zl-(fdK3Uy;Z=1zw&OzBCIXa57WT(&R#)IB@@Q&Jz)}{_M^XJ`pn|iBmT$fgL*rGm- zq+x4Y0E;%x{XAI8LTydHOlg^H|9)C1cYxzp&l zSYAaXe;=Ejy0K{Qx$J0`lD2{~LD^PKFMPwz@@`?dqMh2lLW<)6Y*eImZDQ?)HoX3_i8s*>$^TM?V)pvH3eB8*?u^_ zUw*&ZB~NCDvTLoe3&-<^E}Vev75O|HuPWb5RSS*C#pV&7D77b*$Lln0oR^#l*F*hS z$`n!o?za^pZ-X9P9v36y_+^YF`Hr^Ahed{(BrYK|GwM?2?J+0v*q7AF_O!g-AZB)X ze`9CPJ5%=Z%=X$Tp)eref1zq+_c%$r=^0fNV?tBn#02J)mRhSRsHCr`T*&Rg`97=?fn%;F=D0I$cuGRiBSVcc)Nk*MrcZ( zi!p5A`K7NHgLiL?@o}<~vW|TseQ_vDe{!~Tno>s;lpS}&22o(zHR3`Q^YB>Ixz2pL zYuk{wv6sNIX!N1YWL-&oOSv`EZAGn#yLo6_Lry4&zlOuNyB@pWQHSErSjXDT2U6S?5~z zTKI>}^~p?JzA-OREn!dG6?|qWQeeRgI#fMe_kMJPtx+uMT*C{?(jR>h^j)Ev!T4SUob7az&$-)8U1 zn@|LdX}v5hQ#|_POF!N=ebu<6R32pe$&`>J|R??h0^8bwgXLg5?Y61UZM z&A>rid-wg~Akl_-pwv{`EoT5X`~!t4$4v?A;Gq=7#&RHj>(S7iSstZDF)C?dIvoG1 z59tD$dZ+=u6K+rDSLg~gV;-H{J|Ye2t~Rt@+YfAX8$;Y+123K)e}`=iv4YbcnxQY} z-4Dm>so#FA@>yOl63T@?#KRHDGqmtp5XFx&15eUPd`#!F?@+-4eDh_EJ;U0&Hz<{n zZzvu;zBb`)+`812%GNp->><$NeP5!q!DLC-IHv`IV&@y*EzQg2?VVI(aN(*=)N*xE zHOdemf0*!3c?%WHf3hv7WG%ys4)ojvVMbFy7ae4)p0rJ!SNGzS+C9|Hg;8uF81O8@-iZs0;|_$v2hWIK`^vY!erWB9XAJ0I$qrEe@En#g%zJ|$hT#%rm@{& zD2ehX`;}aGqLVfEVYgr2SYz4St8MIVg`8qhM7yA~$NA;)7DwC%k-413TX^+aXQQRt zYI(a&%$g3@I&dfBWon)1a=gi3?$T?p%TLJL^?O1|e&^^(G|uyTB&3lwy`ON{%6ldUO1L*n5+0 zf5nw$S@eTwAV7d&HxB-7OPu}ai?hcbTXiGQZ$o7=@p#F00C7YjKup@vAoCO5qT}@h z+d!EapRcLjyvs2zJXSf`wU*A%rg%>E)qOI)LcY_Url&OCLWs6+r!rc{>D%?5fA{G; zJy5r5F;XxA5Hk)nVWWD=J(!1TVXPn86uVbn-t&=tHMM3UnGu~#^QDbZaTw`_%l-i& ztl~AB`LnE!Pwx>8L+T!bAN1j&!N%JWwmy*3$PHkwu)cT?eMb2ZlE4hV$5`cF?Q9du zOTlE}c-oYH>k8;Kq5}65_q@8Lf9_h(UP_fqS--ObF87W*zSYcBS*_??o!YXHGMP1z z;3y86)_H)@uJ`Gc8xwmxjs1zqCnL|!vXeH3P=_n`y!@c&{cD@P*N2zg&h~+~6_-kU zaCW2BUUpW|oqpQ$GfEV75sdj}_E?Q~<4DQU(S|xa-|Lf*CYSf(StGS|f3CuAr4)6> zA^yd==~2`LLihbuVLeGK#Jg`^ZTYd!7Uj0u%o(S72kXOZ!F9%&xJCLY2lWl-ILGX8 z1Oc1-z4n7!n)B!Kh=zaO&fz994Q>#yP9s;#uJ7<7Ra$K4QRU1v_a?5nt8lGGqh=kM zM)-Eb;JNL?oetXZ4Lpf4f0`8Yjj=dyU(gRdU7{&xY~;7m6d2>FZ;NyU0VDYdpp4o* zI|OW~lv&SQ?BudapbB;K41Q!RxzmI`l#jkR`>X9VIVfA!UL`usq=_(tDLM0&OgOQv zqutJ3P_4f;fr(_-RjIv>PleVN%jQt^^(>y)vn!Pz#|EqMYjE6}e>-a#8{^uipnV_5 z?c;hH`SNiB*5BS{9q$cK0uDgBuJ5h+%L;p_h1=%)*jDo;6pM@#4aFcUZQaS)!T5)l zLdjSR0rmKWyfye7!!mvf5;SIj&5>2jX=~92fKpgr*WT_f4xQ@0Wm8B8+U-jOLEq|? zZC7O=jlY8>g{fg9f8usv3u2R)*0C|J!8WM9YcOG#Z*+L+M{NcCl!Af6cBP&o8QlTC zBtqQbDY2J;X_eh+58Z=f_NI_UQrea>rT3|M)oih4qiu2VQ|D!!UhwB0ozv;!U7vcr z67VyrX}YFLiDOom=doT}GbegL!PUFW`bc)hNc9(E&YNFseyyft9rh|WugeY7vor^R_}lg!hPY)a zlYTkYq_CmCv>x}UyJ^6Wo)=<^mxRJ#xx*J60cu#^it{d8cOB64vItB zVzRkE7K@m^e`llV!3}QJ`d(2-OGzF+#L2fCO!IYV;h*cp)Gp!sChh`14SW}@yp161 z)aJ1f-Uiep%m?K39Q`#e4RFHBccY9)=!_?MRcx!|tSpO%&GYDX8Dn87P$!W5x9e;= z?q<(tb17CBWQ@SGnQR)gP%q}IIGfPdoVvuf27RFvaHA%v}EFO31ToetbUr(=s_T+Btv)gZ%sQ7q%29Ig-EQP00Oh*d4@_oXQ>6zp7bzPrUNvCg8vq)x89ITaahs1fpY^J9j zlUw0_4=`qVrkCbnR4%|iyHk&mvc*`~czlL8e-dh<(|HDIF4}5F^yUKi?G+FiPDpsK ztYA0VS~7-;*PM!^Wjc*AK9?YAAXC)3pIuc>uyzoGnOq+$d2EOy2V>&e$=x~V$3^*3 zhD=vDT0?n7Z2jksnS*u!1@}&?t73%`a8}f_=~!3!`#9k{qXHCoa*ZrrTXWY)5PN2q ze-6g4=|KQUx&?ODuFtz=@0RtU+s^&Epu z3&n&&VO90&J8xcxeP!6taKfGT+T0gif8~#Tu@Xnek~)JH<|4oOR_O!BcZF7Kv` z%K040%Ek6n?Fk);i@uQiBALIg48k(@$v!_Fy(tED@EWPUYB32>f*sY(>lLPAe?ZgF z=ZEbaGCA)%W(sJQTQ8J!8E&5oG;q`JYPJ_=Lz^{ znb!Gg9R{m)6ob9I=FX?l6rQhJ;Yx4V#GB)ecBSa{9)&ku4-JvA`X~xL>!oT|aoTPi zlgV^7SS_Y)Oeuhbj@!d>`hx8FGi)}icEwCeeGFmLQbO_AX_!~sVBD@0e{nrK6)V=p zwMzW(RE%wi295MlVj?Vsfm7^Gvsa)kwz=vnqk9%U_8JP1U4ORqyOMPC8@mQ1%y>36 zY6YQBBz_URC?AVz$PkQq%L5rPBrE93WV(4u>N@4f9df_j|GZc0$;H9dq%^n=h zL$_Z~W?pmfAuUv^g+4Wse-h3Z%{}i%N6Wfp0?*}>Wh!Oz^QceF^%XY!*T*qm#O3yU zxXV`M2d^VK$D>(9t)2;^vS;V`jv^%4$Ngxw)q7Uo0-V8ff?+1;7u%-^GG-%yehXzI zUr~_iT{aQMKUO+8TZIO2JDfLLY{*R_i-f%>e+7iL$4=KL_bW9C z%(`opy#+z71vlSkX7<=u))o`jponx56sk9^voPwh6%A)A#!KR#8{*XCF@zR%r2Gg8%iu1~B4>#ktpJwBVz0YFGS9%8Z0`e30P zw%+Yw)~ohh!kuQ+jiEx=htq?8J~Jm~X&1|Y4Y^WQRu34Mf2hqC^i204tQLv|=fXCL zH;n@gmYJEiqOtDsd{pi8P4>2->eFjW%aE<7W(SV@R)rTR+H$;5cFd9Q#G98g8ScCXb-g2M2mYGUa6=vX8@CsX}A-wnlbY6oSqo9 zZKaX-I$x8`el8(7@@u-f3gX+I`l6IN_i{CLio@80W6+gu^miy*SFIk`2D5a|S+u<* z*OEIiQ)S)L_F~d-vl*D?(S1slvS*xB(ZqQSf6>KF{RBgmZtYSpxcV^g{9?|Sy|u+; z0Aap^f|v#(!0SMYwT|~ncO%8Ja`7+0(#E=W ze@@gJ{^_pVCNhKuNt*qT zm((nWoYJ13KiY>_ZJsj@GYH1(qC?UCt#>bbe>g33^$rm(=i(cVtyuMG^mJ@2HYtK`-+AH^TFWfG($9S_-AZD^h z=4iaO{`-DjF6T&K_mrR?_1hH0r{Xq;1H=81mb>)n*cd9$L4knpOhP$^19CcM2j1)p zs;<@)EjROKW}raVME3l6Jo3i=scHRUGDkxi)S$&UdmiI!3=5d6IcYB5#^~Pre`7M; zYzoK=8W;!I?0VdHcYX~j=(?IsyeN5CTOLfaHy-?bZ5hRkHXEH@E82L!ngBBwwwvP( zjyXvA8r;_0laW%pc|=NT^3OZPT;H-6Dboq?##Oz4TvM4CsMu#_cah-r?w`Bn-5Tt3 zB|``KmiT@opTR6|tHaBIR$%ZHe?$~kk-qPXXmcJVMEsXNO?Kz-jmdZJOkr$>4j${- z`3XgtY3HsnhXwu6$m$^HSv;vIL3l0S`}582>5B&a^SA0xbMsJbgx&L;_HYke_+^(p zgI1I4;ti;2s{@7-`d&XjBZhv@wxi31YiA5>zpQaou5Np1#ppSENuio0e;bdpzPpDp zL`AUp=Yjsx!CA>b|jcJaU-gbnly4MM=4s zdahKNk6_9yrGdz{j_G~5GCG<@@VvRH8<$w2#Amo3;pac#MsugzC7f&47OwxNRm#?Sl6 zeM@f@1!q2L;EgZ`qhb3p9&v0Iv)BE4(~LDTOkpFOme1O7!^5hbe_WnwPF-KosDF-T z;u(+UJdx}RY5`PmTL0Eotkk-X88mx5>-d;K|0s>#Gq;psbtdk|m-;Ghw??=(Xa(5U zV!wODZhi^Sr(2&E`sJ{ksN0*Jys8XJ`ZM}os2ODO*Ij|Zy_wRlv*hlbXdQzb-^38{JQ2%wtD-jfBjH)J2>cF?<0GuMz4+V@^`Fk1KSC%-pFY#YUk||OHf|3To>!z zZGX|mE~;42I;UGS+vIblUQY7+1@(1?QsC_!HP&qmH7(biPp#8^gvtQ-?ZZ18dvgm~ z6Y3cDs2;3Uj#}*UdcU@jewn91UoPSFb4y_HH-C1Zja#)tf1iwrd|vu#Ow}=33XU=7 z4+lN4`NS!z2vEeyf4qBnJ7p{*9382xn%v@A>YL`}L{cF}bj_4zwOeUSF)e&vg`uV} zKU^A&g9~S|SyA(?)rZc`oX=V*wg6Rcc$6ylHhJ$R&*x*z=O9H~#k1qwZ&d5}4-?g1 z`*I)V1?}$ff1b6g*R*f8>4e(c*xp`JujGDG-F@FOU9=xLWymu?vtyaggGJmH+y1oL zR`lIu9)}SeV&TPokYJk^mZg2By_LL#nl_!U_k8}mU*?{aJ-2|ECu{bofY!|bUoY)h z4(qxk>6pm2gexyd?W->~+QQLVSrXZNG8-T1onOaUe;oke-=tu)G;eo3S1-t`4h^-s zs~6=R&N50)*VP=O;IF1D$UIFtQ$h@eDR1*!maF5sZAWL2EdzU^W9WbHNhxlExb&7f z3ec8b6KC(+GC@(LWAxGj=U-^!0`o*R-`(aC53ysV$a~?`=8)dp$1`|ITK9ttD|};e z>HVj1e|tZmCtB>OWVK{hUL!0Y-6&S}m%2U#O*NhL4LJL5SSb7bYu~4nd$KyO&vB># ziC(bF-O^MxYe=-p+UBKh?hJh#m-6bRU*j_HXqYgJ*a!XSSy0Qk%VU8En_pu?=K9e( z_MYmL-?dYb_*WhdPw@(He@;4{SvYvTEN?Rb4dXCP^AhvfcU|d` ziL3xODa)~z?UdZoBeSUA;C6sWA1%!qD#-dNU98x1d65OWnUBOxQBHKQ0Pan7O)u3m zl6e1;B%Z#bn)i8GBy0g;LE6s}sWRRzJz`T1SnR|IDi~z)XfQ;R2mMkmD>sYe;<LhuTN`>>Bul#wT_2Qqu@P^zn=%Lb z7Fh2^eTo?jfLAx_E~qE^phr}p3J^w5e-;o)S=smYbuaT6<0YHPi)|PDjD5mMCU-7p z=g^O&SDf9TB5N#_>E@|V@+Fh|B6IiduT6G!=8J-=>-YOC?fBa)ovG!iWg$oKKm{Ym zudBI4E4ERh#cDB!h8DamrrZjywB}aS%SWa-ER1}wW$fzAK$$z&yMq3P*4!|TYC0^#r+w&mUOHcZ!b`b zjYQ|GRr`FO;C?xMOx%&w#1~-Rf798c8jaa?i?)lsuQBGg^yO^?BE7xIZkvX3nA;e) z<2gP{?7G{kV532%6EdpyxSck=JTF@nEkal&y3ukAsoDJjO0e(fDEQTu>rDANi}sT@ z6d#g&u0b1M@_s-yu$(L~DI9NsNR?o|l*{E?c3PoYDw^4Xeq0{RrRQG=fAu@k=hMg8 ze}cL!%lj>tzYepupT=#W@ z3G`*ZVsW*Xce!U{WNPeX$7c-Q2T#ijJO7p%L!0BENcK5c=D>5%O1k8a`z`f3}O+Cg>L}OUr1e zFsJFVG*jeBZwU0)U9V&A%F z*X^6c7U9N`O=tG;e>_Wz3S@|dw2xu`a`axDbsD8-q-Iu)%2xE*`;y~Ml8WpfDx*>f2!IyDm;%E%4jNUfI9!; z>&r%;h*KqoS6?PFK7Jf*#x(sY=a%l0|NVco?WyHUkRoF;>Sq7oW$AQU*(1rCucOEl%mOq zA^vR-UHbbQf0L#2RPWYXpi;Yd`H&zuD?=FD9GhSm-=eJYkO1i?y&FmC4wB$#8PD~+bbNq?$fVrA_lb8)_ebJLh11_ME+7W(o1(OZ&g}J49Qbk7FstTfsAu>%f4kOAHPltjv+ohGBvn(Mm%~f+ zd%ThCmKPHSGsP`?3f}XBUCp^Lj6-cR?i}r;#~L)mAcmt0FoLVOQ$!uMLUs?b4ZyD$Q*5W;}&p-(=cg5)!V^*R*QJ!sED*8}&kKb|1*<~%k^*)`zakhrPd?v{_w zym0|H>FOP0AwP06J>D$objs$CBEOFA+d6v2e?2>b;WlH>{Zeu7PB%7u{oS2}=Dcm( ziDt12{XtLdD>z~sOIeHrh_9@Y?Stz5$xeU84&R&%WL{-UW#_tUPhM)J6Hs-WD6eJ;O)#j#%y-TlqvB#+BH`VKDwh8H~NvOf>|~} ze}n3fO(s{K)%17H{F9Q>pM}g-n*^%h}>$`b@u3@ z+Osv72b&omZ++RHYo+-bN9ML#jm(Q*+{eCh6*F-@%B{(`dLH8H)CWgu0zw?naCY*> z=)R8$trzqj=N#1HtcCqxj*TCb*@0&-I@94%r*CC^BGUU=WM_-<%mbVrs zVt)wVNn6;9lNTIF- zRpc4%9lcRnjuRNq?Jx#_4xwT_CO1a9DUJyr!3j4X8TsW73zAK;a>SL?ZbsEkfQhkv zVs`Lgm@u0D)T7$39vMixP(|23g7NI)9kU0+Jr-8~S}F0HS&ZQAHkz$y>G_B%^nX8J zsg}NWAPqk+LCG8*>Uc&$y<$UOy2%qaI*)me;DJi3f)!)pYg2F_CYaMR@5~j}C{EHH z`!j4B&NE{z-7m+9+;k9y=1=n{uFYB9y_&vHu1%nVS7Wr}HZ6+v4ubGc6?3^>$3IRl zh!LI(@jbt!>MUNn@GG`izw*(s9)Bm{IRv&BxtqQ5tlk*qe{Jg7`(AG&22Hxzqpy8oi=dyIY+oPX~+B-z}P=8qbQCev;Eb zy%yuXouy@e3?o=_wyL0=cW!7rFH*UQ!uFPpbSC6RyR(?3G#_D781`QLR)2<>NW5y10&h3DsVBcz9TvrAftCpX%|Lu1t|o9MY$#oL5NULw3o zOUWO_!Qp2{TR*!JiY2MKlSO+YJ*I6wG9>LzrBVvje~b)|4}Qd+F?8aptjb%ei%xm| zf;&ORN)j;g{N$WmsCfWdx_|gJ2Vd+Q#*J)o!K8dFThUyC5z4VEVU$n%>K?t4jO^;570!Pcp#poI|8&(;ZV0n01d85)^LnUi+o$YO3px~K* zf;4F7m39ujMwE>c(c3%xgSpSO7=}8#NV`HTimkVGFq>Izw14#7rkp}AM&)?;oE$ad zs2xtl!G*gY=5YZUwv(}i(sp?&*2j&Mzo4jGdFq%n=PLW)%N7SMtcUe{*J zWe{W7ZD+<9BX1MP%WRVVjGO&h!8o~frIYo1EN`&v2_tl@N4FLD`S1@uO)=)YqX7K* z^WO3AD_NJ5?tgWFSmAEGnH>%+0OUHG`D@dKVxh_vx%c3g;&ZZ?E265_%S*sjhJ4$c zriI1%M|)jDYNTmOQ|l8<144t|rM>qw+*G~a=;>h_^3fza&gCVICfc!{LOj=+@)dVb zPGYm};8&DM_6qfnqB-v!^(ZkBdaWTs2dQ$;g$H>mMSs@qps(mgE2#1qo5v&cu;}}C z886<+mfkV~Z04sRsBd9`9rYar8J@t#xl+~3UEPnJJiN~XIQJgwRth2idBoFWZsKjL zx48Z;?p3fATL&#_A?);h!ihz@}CjleR_L$lnbXoCx0DX@hg_-6F_~2 zW1S3ch%e>rQD|~3=i40)Hr+M5;yOd0VgX9Mg=XAbayFRRd&9<@pM$P(L`L6jmAm7G z)dA}Dw`EJs`FbiGFr8Ap`8^YEqiLYuWoDM%Zhr;zLk`pD1atXtl|Ef2PL$fcYzxb> zr}yhIxJDs^nFn>k!T)Q<}qlKUD#=YVU z?B3pE53$_xe!y%9ydu$QHV~JNvxAOF8R>xvIA>o>eDG-|r9fI}|nz3m-&r z^M-!JcqOJ!c&k;tac;Alt6kqEbA)}AP=At{e&`IakW+!k0Qaq@K%@n@3?YBe39unr z@clnn<*F>ST6|VC*{l_7c3<%>i^)Vdg*vU(b42u#^-dhL-%l(?{+vd#Ta?XNC2uft z#Pz0Z40CvVXjK1cC!rZMgH#6K!SYacg$)tYYhehNNq0h8;6!n6~2d4Gqy zr&G7%r6||#s*@-&Nqoc8JrOP-6)aXHDT6GskLse1#4fC*sE#@=PNlQE26w}drYbgE z?#h}N&!H|LZ_S<1U2FM<+2$N-p9?dQ^X>@?^c0i)2c-xPmwbuA67hKO#wV4dv*67E zBVaQ?OpuBab7Y|hHL3FjuUcv;7Jo5jLDU<10k|U7xmiwpPuFw`*aMU#j?FoG?;qMs zyfE+Vq)Tdjco(B#b%*uG^U!Ydm7^Wepqa2NBMc%G|N5UW#;_kILLvS?NKN4X4Sy2B zh;pYS4TwcVjzY+A1uQEJ9Y1cfuJkHpXVU((Dwy8JtnQ()gO%Xu z>Dhu943}Ym@OUm{y*M7nr}64_=6n#q3hZ~dbGKW8tXDkc3i`ZGT2s*RU)b$^C5z>p zqtxCxvuGK4h0Cgz!1gcBD-S@IJ9ec(NiR#_!xds8D;l2~R8-O#gzragrDUD}GP4YZ0g~M!OK*{ucyq)w%LTARsHXqmE zBEhp_Q8`CkoNY=eA*4&-f#MP!61<lMb3IzNA-kU~RO&#@ z*4BM>qsLC5wRT%G?sEK$Z&WfKQS0%}*i_m2wAEQPT%YGu&P&unfXAmAv^8zcHGguo zxA}&076}Sd>9z!{;P$B|9lia@aCQvl@xl??&egSjzG6E55oq(HK?PC`5$0}fHCuqs zPi4oSQsHr`seg7_tPR6y+l3XwC&q8<9HN+Ps2<>(;(KTL_UJo92+}U<|SQ#C5?5 zUn_90nhHnb#_d*zT#jQ-m9#xtmHV3#Gm*^1>w~d+#((>ldAJ{@U~!D>qVs4Jw`d7s zx+zoRltbub_pSz~C4mEd;-+f6<7?(|b2p$6c#ZEX>IRipvAo9qv%i4VUbQTM^O6PqO!dxFB z0P1!qiP>oBdZ#e9e1gLk|MImcpiGg-V5Xr;)ZR6G^=LRxmKceidIMI zT=uffo#8d@Eh_rZhV6V;X`?JZ7_td>_S1S^;>Wnbgr?PcwbysNWtbZcMLYbLMW}>B z1n8Cr?i}2b>X@J{etyKLd=6rJdT!sCi!X(CBY#QvjHR!qanJ7kOn8d)a<;9l`Et6W zaO*(&xKPdYHizM-Mu@vZ)LhXp0s-3x=wQN+~Y-W@7BB-O;=N}&aTydkt~Yl zCT^|83}h1xMENR)6h*_k$t9f02m6&AuRKgsFgYyYJ-C^xIt0^t*+ zWaNG!IIfZJjSV<1UbmnIYp&Vs{b)Isp?~ZEIMA{L8tyP_ZWU?(0@M+dxfWX62067I zV^Yy|tE2%kNXJ$1Qz5X!G^>^wwZ!ICBapw?3kG6D^_V zT8^fZ!b=#;7ckctx5JS#-qkU80nIw?b$w4jDruMH6?mU-%Sh=gBV&i- z$)ERT28}!%c6WJ8?ZmF5y3DyS!hb*`6cbm$^#TQwmN6uCO&z<8(i$$nZwKMgZrqB#p^T!n8d@7)I#}8+)=k?8V>j$}ZJ>@K9PocT+2Exwi#wyN6+kg42_V`87 zSDR|Fe=Rj(W_vLNdu@~VrDw9ZKz))f&hza;Gxg&%K)>^9^&le94{mFA5b1aJ4DVx} z)n92IwUESnBl&n=&j994Z+Ie7%X9(>`sI8AUFQWjdhTn%!C3EoXD|;~!N(}=MR|2U znwNxE=2(zYyGM2jJ)6;cd4J9K*G2!#_tM5&7ZN|-M$oSbjYr*0lDPN4mrr+V*}4F> zE#LCRTwZ`FGP&vapepv62x{F;EwnI>6h9dsa1^h5T=YHH;e1h$%kz!%X@X}yY{(0qaF^a$=!qU zZ~?^{i0&5a^E!C2$$w=Hf4VyaPAAvu;!Sl8)c$Q6Yf%pE^79$OHVp-@HDPkTu-vmT zmb$4JEx#!G3JHZ`B=(DSZVU^;hArfn46@?1Y{O5tq>hC4?e`*Ipzlc$l%pe3^Xlz*P=<8GbOsmhFB^&xR6 z$5JkVX zDW(|il)24s_v9Sm0j^heVVUIctRsCUkk=satRA`yLS^PjL1}QT+48>+ifcf zTz;t&N=MRIJAb~}Yje{WUOqaW!5M*%mw-AQikQ5@)&6beGP};p=F5?~@|os3eoDuM z>lu?UV~goSeFgvxZ%8J#nG~~Jag-cTdYIQ8u5A;#?BGms^!OVqSPSbl$@C8RF=i~+ z(`DtI#wJ{%3~H0gmp*%@4p8mFjIIt_%voLG^*PCx9e-Bv-K^rz+iWXoC*-X9VLXOw zj3u(V2;r4c3_&ZPi)QISu>YuDQSo9lT6m6}@%`0;mfLc{Bbz~+i2OVaI#pby%UZTQ zE;)P4A#|QazvM;(1apYd{Y` zu_RhLgf0HO3KRKciB+>RdkBO-u2FQGcbLaWTi8$CBDt4ukVa8*7`2Z=YWsTV#n`uL zIk2qovBotS>iruOC|Y{GYi)XKxH6+6C~q#xjeqk}y69wkkISN(9HQ-hWth`_+D$j_ z-8jsFVjTgz-zSqKDW{3xpFl~F=A1Z3eloddkbr-Ht|t|c+<(i*OW{%8IN`YIL%+Lm zw|Z8NgMA^E<$gb2TSpku&6;_Wc4yR=bK|{p^i1w|;0w=h-Tj!wj5!N-lNJ_f6EMBU z5r3$)2W>rblH*S5Smp+g`v#1MiJZq*NQL#&jl@j!^EFFL=PB$Lhp<>R@e^9mPiUj9 zmu6$~K00fzzc1toVlg(sR^QtOyX%Dt{;FWdqHaY{80ePop^oo=-d9(%3D3;EO9P zP1bFu`TpsOvDx{-jo<a?58W;5r=x-^64rWf>mKWnS!>jjGL zdNg8IjHFZ*p{zWi!%WEpO){vGEPk+I+ znd0w9NVPzq>^6xX>wTIx6VCbHO}m z5HvY4;>8ZaT;HmtzGX{ooApkRxqnwo2G)HtHu^JbzRhbT6s4KVdHa}d)SjWPMp!|j zw=>Ha@{eQOVFUhv>1^N*|Iq3DmrUp5@uSlDL6KN{kd|z@ESNy<1*<}Ga8|;64*rJ* z08}^mL1hz9J6e+uT3wl5TxkEoBpik0)*%oGx5LrDz`_*58F*PzOjIas;eT6YbHGWs z`(^lR4k?V&dELktYj_3Z>m_CLG;el~^~?mE7IH?kCs9L{Ggf_guES?4Zx)Sj32`swamcj*CJObey^{d{ zjwCiNp_@CgguShLFt=<&najneNS;id9aXLdnMULlLCsi?fx{0d_vOqu!Yc6nQAO0x>4Lq@IF;VgmItG|s?@Y=n)wSfg3YWzYasjXX4F#EZJi}{) z<3oto7R&e#uO~Tc`0tR@HwCX3D55HHP+?$AnOlknJ!kMU@Ndoyc)W2gOL(VqAt@PR zKpqVPd_C8l_OVNc3V+u`!dlzQuo=bc-ma%)PpYg&7WYtrd&NCirGajAG~G{JG-K7f z)w?6MPkJi=v>Le3ohR#)OwHb$T0LGb21=xkxJtRbIsBV^4)5*zV|nsW_1KN3*eAV!sXcxKNrK?4Zz=o$GVAZCdORB zJn?UxZ9iWt3MTdUzcU%PGL)5FP7LrlPXT6DYyYGEviooNbF zN@8>7e{!W; z4*MAU7C*a}p!}l2E+6|~W@Uv)4`kW`t5(Ws z2QCN#-&KhzAX2mxuX2pQdoA{MB@k~h2G7fAGsS|N#Z_Qo(w>BNt4K1;g0Sa2?kyVZ zX_ByAq|!+gaK=CT{Gye~S$}>1tRK!$NU_-eIB!@#7XLQCV*+B| z5FcuKL_V2$AAnzkLQH*>cSq=+{2OZq@W%`41%-2i~Xe9LW{~GXKlK!8n1Jic~SE zOn+y)q)yq4XFC&gLy)8Jr}CN{C6~rtclovrgU{bLD~O}`c_8Wd{z$7}o(3HtiHCv! z&WM~?u&H>=P>p*+eTH}@N94h@>O6^NPIiIwkC(~wOL!!rYgQeYwC+2c!LgKR>iQTJLGQ$b+>IY*0aVNAz1 zszu08Ze4m zaSHm9f`@6n7BLjR_e$d5J|x{8+_}46WJU6OB?-6uUvX>L2RJXoeM|Pn=UL(Wa1TFr zI2I`T1fy&vyXS#OYMCmEakF7_3X&7dL_v&@$ov90i6(@wIaf{5#nX9=!+%KJDe&Rv z!joIygVNy*AqQpo7{d0-h0RUUKn4d3uN}`pbBEmSV)?^f9qv?= zEnjgx{G6Llj>PGn>VHWtg_%)S#+@wVPfqYcPdsrha+;XKpS!N!g(u=Q>niLCN-_DZ z!n1!PufV?KoAE5R^QM5JMSWQ$#B=lGu8!d;0_Vv zg4H9rLupR%kz2SlD8$UgeoxqSt_pJ7csxo#zRM(q#BGhl3A{Dl zt9*_59QNepbgdx=N4_|8iIE>cl#1j>EDR2IJ=*XjK5RZYellf;JoY;eXBA-lt~BIf zwF1W++>0tVCV#%+6OyNrgbXsTI0pjqZSFX+g^v6lm4)8Pls%~Y-ep7bym`!^Np8!L z{8=Q=l|0LzyPD*)xJz&*kh2c=$k$y>?jW-EW5^$s}zHw#p+=x5y=uL=- z)EXC@IHxylESIOSqG3!H1!&$b&cyKdc$;Y;IJsN#gnv8+p4H(LDJ z56@#K49{c8zlZw~d*Kt0a+0GYa8)NU3ws0OyI*-fIqQg_J59`BG@H9NC(vgeGag`R z=)4e5xLjS2ALd5Pu+Et%aG?F%*xJI{xyU=5pbK#iVy}A8hH9a7$JGWO;9g>2!H}8h z@cc=&=zl6Pd!|XrDq|o^8RFL`FCwoI=Y_7=a(=(|6R9D8?%mJF zu?NW-3_0hp4o5FR$1@yrS;oss+~ORVw%cIufyUyh)*2WRS7gDJ2>x8|Hx`eD%X@8RXH7s7Iw;df_QHP)6MJ=JO29NrLUKINmioJ_}Jp)Z*nzA%8Z(YZCP_g1*eTnJ8xscV`i2JMZKo#`x3iK?&G{ z!o@!FGag-Q+$We?IewoTwr9nJ#9hYXn?K_2H>UH6x8ytw^%2QafI$rV;uD*H>aicp zg`6h<+xQr%$v!dq_e`B1m89C#0LX|M8>hU>ghti+sAUXSqP8E~qlS2hn-h6QKVD@@#$HSouJOn_KKUP}vVU@7 zU@hc%f8Os8uA^>L@2~avrmjYm!XO${@IG`=TG?J1$ufP*=PD5OhcEV+% zf_uC%5{Fa)@l!yZ&(X( z3#s!ES9>F`{zIKNJV)h|Q~$(6a+m$YyDwag)B-1*m13x|OLdB$^?wxy@%{;}gn9sW zQUF$RH5Ql#ds<|HDLHM0ST=#l8P+%xb=)vG`!48!?bXQO$bHsSfQRNmI3cHc$VPv% zdEexEkwc66v9sSDW_%H|aNj{Ss>a>8;YDw^u258Th!>R_3^C1B6|rY>O>o~@k-g2= zB6ABG9@>y1<>|FZ;eYvM?32{NSf3|Jihv3u-c?VT)Zqor;s;0jdX|4;1fS>lb=Jw} zelVT^qbU>ND~5dPBCO?56O;QAm;!*MMO?svTu@CG_och`d|RY=F=Spg*XALns}(Pa z6*e|B+*-beQ|4!9xoTiFQU#LN++gyn;yvOnF*{(SZ?{dhW`97XzHqf4xPy)rlsDkU zpzxSq_UJXAF@_>Uz~#hTD7KD*y{fyJrgb#(GDaJ)VZfav{s*l50&xrK00YhoOm@I| zgWvZ#+>zej|K4+2-ar#JIjVekmf;|$bxfwY(p4!kcyEGr@>()KehN}`!%yEaP{Xq%XI%R$?+rl+ zRYD2F?L505-C7rLstdKf5Z5)m7Fc66h2fPT)sDoP!hZyxSF#3wVla+nzMLKYcyQ_KObl5Cg&9}Q|KhRfPP3_Atb0P)CXbVB9Arj{@;2JoTtROFF-c@Ui&T<@gG@# zB);1{>VGC+@M1lF#D>_?Dn8Cdu4@WxfcG9NR_7jgOxC`ju#PHJfVUSCSQLI%0{jKI zzCc+@CBY{VV>K!-owt(ICV!~`FD>*cV$?sBk|_^Igc}hMXVuLN zy>B(8qa9%Vwo;fOX!@$)hA!trwh@h31+I46yvn|M7oWZb?hOoS<>dV#{vshSCZ15E znE8Hx-}%G$p8g~E9O5kE2#K+u81l~;^wrOBk%NC>l|$?$_ba{@$}8J8wxn*Y01U!! ztAB0&6ps~7wK-tI;qk=FK?%VRQzL^5tQ1m%*WdjF6J#IQ&3tb6<$F?f=bR)BQ zE)H9Gw z&w#A)6l-T3&gl(`EzH~K(09arZv9|_Lp&fi$4`Cy)n{n2UcdD-`TKX@tTnNRkkbz9 zHsrzq?sKvShdlnfA7vHEcm7WLAAdNHHF8$;1>z^z%1L3f46?D>Fey-^DD#2-g8~R= zSuo5_lGj#_1=~Niqe|qfLrurKA$~*y2&jf-DE2A2%UkrIJcu3_CmDIO z9+gQd7tL}ww=9JATvapQ;=#ZjuM2oB3Dy+f$D=wYaa!5J&zzi(kD2vfqZ-@{29}-b1L9#qC~=RvR2>r!;jmnNW)1zQrAe2aEWcX=29! zpPgtbZe!CB6HPH~NPc3M7w(WAamcu&>EHrlOQHa}7E#Oq7vT)) zj5Dr`fCbuz0mmgrTjW+`-$EgVyIZ&d2Al-1&0Mf${5C(eK7-KyVZ3z~b4tD(*lEX7jF#d=&e* zmCCP=^T=)9?;1wPfPVv8QxaGdvo7)kbj<`(mt)_5aBtXIV!hCxDOo~Jxtq)X7uV~+ zerl2O-)j_q*2u>ic_n1t|6U_{2HE6)uhAT_9p|93dN0Q3;Ix<5G5Y+xto!#XiZ^F0E>J+UTuJi$Q;X9z`&&cupVcUr+?#~82VKNZ$%ypyfysB zSAY7c!}}iY$*)@U2h$ny`0v`DoMmzzzHuFbWem9_?vo)dVjVxVd?qy-CCRCW7Am(r zq0^_@K7)E3p~n5|oFk^2w%o&9FzBEkFM_Jw?4+DZ(Sy*0UA;#BgR@OXQcES;%u3B3 z(uR`=xR`pvdw+M)caqkp25XXlW1!|rS#dl)i2_=Lw6^(F5!0|=&iFh;g0hGL-x); zu%N$io-DcGJ%d6~q2Cflkm~d#HVVU*>=S~?WC1upB!3nG!%?v|m5yhGv-0`+&&N&j zadOWP3@7~kdVJgkEy^RkAhh^~IFxO{l67uK2F{j@LzEcSdV)H!$J=p=SKy`k8_p9__%|Z zN8$3G|76_#vMf|G!|IB!;|UJ)5}#s=)}kjFsR% z!g*G4Z*iO_9h?dRVCbEquX5l(ID)pU1T5qYet!mo-;Za^7&0a(`b9RyG~EKS+UP)4 zDQa2`cjJ{0_%rCupe68*QkL+OS&Y6nnB@mKnfc0cPq#(9YCt|1@GgzPZ4$LN! zynn*H1vxCUJaLUwLc`uEkho3d4paVEEASl-(2KzP{mx&1u^4a?kbkp% zYDLt7pSX`2D*BIK+llx+kO&Uq70!l;5e%$rwniRj@I6g>FUW@&2Va940j)`H_&0~T z`^04}Bd{?9_i+Dru`6aC?~*zJ)+nUkj(%2(ydFhwIxcb8fpftqJu`W{F2pSKv=|=i zM!pVboAgz|Cu@y@%97ij7Bw6sEq_~sD!VdJO=8Z9^z2C8fbYW|Bj<+%DsDvLzBt7F znMlt6e~bHH_^g;Go3D?5#ew7}P7L`1)P?`RGA}H8tN*wI?q4w;XJd%jB*xR67wat8 z#^A`1xKHA{DqP#|_)fkSjz{rM$pB>qWO2VC?{G<+|9`|l4^1ra z2W0kSjIY#-Zx!GMh|^$>;h+sU1MppZy)$BSKq##!$sUs7wb=1vn)xfHW3r0ip%WH( zC=gQM4eG-efysy;ya{WCn)5gR|B8{EPtMLSC*==34>eM9-WJhGKzIYM<0*>=-h=Wf z8zd4U?hCuwRnxFRnV`hB|uuxW`1rEip*$&RK{}ap%m02x6;2=J)2XHK}J% zN7NWQRax6vp+AFq&1DOc|B^n0+5r|seZ%4Pg7-r5l*`wjPxku``0ihRcqtnR9LS&h z;e%;!aL0|v*MH&I$bSWgdxPNLq_*R_YywUgrg=CDCP8SylWTh8+Cob(JaMvsWcLun>kNgNM2fwG8fNMu)+Za*{BYzEHZp$UpgWELVgYfvB zfR9P~SS~9ugl8IsJur$-9r$k?;P18g%8h~ddjlTuxi5x1;Jddw+#w&%BjN{emrwum zd+&VZH;CE4d*{2Sg8^UE2pYH|cpj!ILPd8s%XS8Z`HU|15X(r79wQcz`VmAf^sq^; zm!Z}IE@Jv=ihtS)wLguXD0L&R^(#hDI8b-}-KuDMXqfK*A7VyqIzdd{V@SJ&=X1~sud(4N}x42^bBK%)FI%Q0K#Q(hlM#}ZlEE@`syE| zPnPlfmP4Jk>z#b)FVDVs2V`%NKFAMW>Qj^a^bfvr>VH46g&(fs_cItcLErHgdrtg% zet+Nxc@tV={h06*eBeaD7$aZj!4b8XA5I)}JMa)AF;4J>}$Wu%9@Lw|H4?ZJyiz(SBAb-bve z=O?2!JL`c6f(7ZdkviWcb)3SU=Oo`o9Wgs1AAckD7WaOuN7VSAzFRX+P;!ot`h-*Wv8fx4x z?%z*7GqqY}f#(5SB3sL>2=AHrsEVgI^75}(F74O2hliZD!x@A;p^;Ovik_>HAt}e1 zB!6GW5x({r{o}7({WqvLKPaeByGzw$I1dBRsM zKjfaj+#lqne{xHQHJcI~yH#QG5CX479)H@S7R)H*T6n$e0(BDdS%T+yrjPmyKaZ9i z@JeuIAan!HIMNfW6)2pfW>uj>1}-U82P>%Ykhr10T;K1JuXmsovI%}cWuj+|jda8~ z&vFHh0>O)`O{eSge z3RE!mF9e?4WHNEFoNx1h zy$d^*2G7%zp7O&0KXbeZ8w;-wuqrhcv(DHpa|w%zctYw5ZeX!skeC(&*Kz#+g+&$uc{o%KKzJFMI!f^(!^wW2O>wnRJhk-X; z|4?QWJL_RH+-(F(3r@4zqr+<)?NNLUv+AA%%bCkrjH#8M%)b!wBO)C71|5R7N&Cvu4_zSxp;@4Y6+<&W$nnUlQ+wEjr zg?RrK-amdO?UDFrCSnV9ir?G->w=6Nd8m2!!Ww&jhlM8N6;y14BT)j(L{j67_5U8Zg?z{laTw z%o@ZN-Z`P4jIhrNrGKRq#A<@UuACex7|Bx!ADYA09OrOA0_SA7-w2l0;<$lJ<6=*^ z!kX~1>6Y-a84|}4$VeQ=Ip{?9yYBpBF6oC8w)9x+Pn`Mn>NU&^xhi4=V$W%aEZFZ` z9}kP$wGxO&44Ee)a}J1sA!(i|?0D+nHBP^9;RTr&!I%VI2Y;xzcM}p(UpV>jhTk*X z<3xRYc)$M{2Sk1C#)=D^`}cjh(jF%EfQr2^t7w9sByeg%oq_eJ+%aSzN$Z`JS?=x1 z4T)b^RPc0>e}8`AJlGS+8-DV3dAJKmo!Sm{(AT-!udqG}e$fS30A@kZg9D$)7BX&+ zUaXLT1K`#|krC`hL;)XSjsi?(sCwbfmobNTWCI%a3iy<4?E->8&zkT@FJpN2gyRu- zt^{@j^EtcsA!|MbhgdJL#(K7YqL zlicKsyMO!DQy}rHjW@`}zHow}j+$8)#O^#iMnR?86XcJ6CF5KmUb&UQXT*XEeaeaB zPU^cKQjLz1)9_7AElhCMfWh7=H47^A=ws+iA`S#()`|)hJX;v|U{8@dH`B$A^qezt zHo#l*HV$u2FpC1tRS1P5^ys`8!kA{ozR_m|2Y&{4JYo)T1*Bx)Ck*=ycnHWCz!iLo zwBnJchQ)xRed@mp3vA&}Ufb8q+IL;^%cso<&*`hKA+ea$MVWYVdz@on^(09^?RQkV zWP`m1(`}NIS2JHQ!G8h_>egs`qQwzZg`Hu}jc0C_$jAmhhj=A}V@=K^`r|agScllO znSa2!@U1q1*PLNN%Yp~=oCS}K<~7RU-A=**FXwpd1H=nRgz`h&!hJ^aalFr8yzFb< z@{jpdA3T^p`bHlf#-IHqv0T^5#Fmf;wko(>m^=jM3Or36vvL~n0I2U{`V0ItV0{6{ znYxi}N4Q-OUJ&No7Mw0>4Q@#^^t-c%ZGV?5-(nfm&`HR59(WQetBi@hJmIafXNas3 zT%^|tU+FVf`c-ERyxc#Z5y43R;74F=^=m#B+??S+EjRT$#I13xhX^!j` zvpE8T=SYu%e`;1GUesXAM`G9m@f7e3JjVsC4x&gxHH8g&3_AC zmEbN{CE)4M+1l>KY!Tr>h^=V#t+yu6Y;9+nAMYsw$;dN08~UGuQ-al8HU?m_rqUvo6aA zB{AE%;;Qz#DnC-;$p%dVeMK_EESXSLG@;jIx}usuQ62T*!KxtB0B%1#b~&-~An=vQ zI1s7CQKUbII>&svXG9~n1}=v4k3I=VF!1z%kpcAlv|`0UNlI1xFn`uFXdD(LB*KJ?X>{eG?Fb%&epIQQw%eq;PKk9q;ztlxYE z@qN%Zz&s&lu){*;aK7?*tKP(o;%2#=!4C~%mwtsB2{Z7hKLY@H=9avtECgCdy>0M; zkSOuc`9COcxPRS^r-p)hlyH4;FHNsz%YmmsYI8i#%^7}Eyq~FSxn*qh2Zy>5BS)wk z8&Wq8^J2cokot)G&9}fY81OIf`AE)3@cHjL{JUN!=X$8EziY#v{@q`3?-%R-U`UdO zb!x?6z%|b4^;2&{x7O2SF=%xcUOmWnd1=?Kfm5JxAb(=eBo2ugslhoJiXa^REI?rQ z5ydj_cUpID5$+HA^KiZcedU8`-~!hKwj#NR=XR1SM3B)Yco60sk)wnn?sTW60n9ct z$qN)=xalx|FT}{z)S+)W^oqI0A;B5@A9bDIHbM)%64aLjCrSyv57d^_-MF(p@$=7F z6x15uoE|b$_EmpZqqm1RXRe3&$=nQOw!4U#kNF?O!`C2MtMFG#_c5pb;pw0T0Uk#1 zKAbmI`CxYBYz|r^V>mhC*~d_D{K8ehvm^24tEc+?yoOlw+kYa@Vt6Kh&4!Q|^1-&g z?yo=c=pkMc9fZGP5#cm_-QfdP9_8RtNpZVDlI)2dgol5<kvM;qWe^8pp~8#jijko2=lZ$Z@qYi$N*~>nI~o{DPbbBzT_? zeEGf4hkompfBf(>zV^oda0!O2WWCcYTcN)X$qMduEBQYrfOb^-h1 zRxRMu1ptUMw*j<%g=7WOrww|mq&^hKm8-;ez`NO|9 zfpLG4J^R_y!`}bgxzfMupzmi=%?gS35kKMN)|KG;V1^Nr0H1qtgSn)m)XLF6YM`%Y zf}R}ao!6DP*Wg?1^4AEQ>f?ISex6T9W4|gB);)x%IZtw%E)5|<3?_VZW309+v|U3| z?|u5nZ_Egg=K*XEu>*P1z=!>wYa#LFJO6+DbG90V>F<2=r`{d*CaISQj~Mrm_6S<^ zLPXfTb83tpnmoi(V473(N|ASFDsmPZGTM7Gk3C^0LYpxJ>r^H&>WuqlJ;R&=YG!Df z_xajI8WwpS$(2d3`=5N~KRJ-!nhoD| z{4eLZ80wB6j?7Oi%0%(ViyNG=XoETR2pljzX!e-V!+baU!-Ygn3cSG#Ow7;885J9t z<_^IJg%a%17Zd2lBhP5tX88IR@zl-0%SvUtOFV&XOb}r-y<-5-0rw}IBk+GB!Kfp= zNFcZ1kHF4H0{==R{G9kAM8Yy)xlckYv{-FH5Qx#g`WTY_DL$YaFL81!x`3ydHI zI5IFaa46~D8c(6JO1yTpH42IujC*%3FYPHIof$+|^8K%th#%o^#T6P|1U)b=0q zoVzn6xu9bMPa*G59FH4NtFXs}VGezRXF}c|iATeG`>Q|tl{@~tw=Vo+X8s31`0erI zZozq5J0<#CZm5}|M(z^+4}4(AiFjddpt$^e9NdNBeV5D{F$(t$dWwJERTbXp4dM1X z-zWLb{tUwhzJtrkPzR9shWCtodB`8R64G!A#X@A(8t_8^IRkz;#9&1wnqvfWdP5!N z!%sjDdnkO!cw`JGe$sntID?pF^b`qq*7=}Ud#Zl5{*p`SNshkV89?^>or-)e|! ze{j0U8jxJ-^YH>}hWJNv46Tg8RSv~S`rrkav=lky?s#70jAMWP%f!Azas-n?aE0j`raYLW3uJ z=HN%U4Khz?Cb2YfYlu<7h--)ync3V5eBJlg6Aqn+RC~7ufe3RtpoECtsgSWRSRhf` zi~>Je)c8rXz7l_2ZRihT1cdYlv7h3BcZmCi;4mH0HTs@2`m1+A)~JwjhX3Wf{PYGt zGeqDu4tVjOIW1~y6PU{I`L8qh2jBiv_q+}CMOf^4RYr9ABR&jk4m%3*AeTvjwH@Z) zZ#numZPN%@a)g$if_XcbrcwA@th<=kkzo9-7A&|6;M9K)_bGBi8R(vXwj-;aTNC~`Z1Re$uNplipv zYn?4}73U7DFbel0jqd~Rpm(9yguZpWYf-0v#j!v5<)qi2L3Q$870@G?ztKuU{g4nH zm%sX0m?#{dVt64|@E?dVquW5Ajc^h|h9AIcksW_XA0+wgOM}dnSbR7{PIN6YM*etmkHlI8FA`hc|=O!F7ng6HWtW z<7$6s0)n$}pwZtUy()tL;8}w+fL<86*WkKJaZ}P~6b8H=`PqQi|I7fBS^>Q7Z(Y4V z^%}nP^^nJsc-a#k8Novs;4DOk$#p$gh;6g*i2IntEgkm* z`fN4%JI8yByj^eir~xq74UHLZb&A{gDPx=jHI*8lN%K);=MHqa{EF!AK&Oz*uMN6f zgs+O{h_)DbFmslQPzQ^=kK=l6ck+w6L;G;3H_ao_?P-XvA$obcUi;{Jk?;R-&5(cF zlXbuu`lGg?dEl```wn>3z(4sz2Y}=Xq&^td_dE9_&)^q}sX`HTc50Wvz+KS{Su>Tv z0mB{#2Mv;T6gZMZe@bo=oolgX!&(CRK>CNZBxKNmS2+b46YI=nI)$~x`=e^L#%m#0 z0#YIHOmPn*X5vh1ysoI(6%%G;k0pQ5G{J-O8VL@53QyUI&aY5z(k_py2vFc zVtR4c^BVS445rGlQCb@MiOCAjpVvL}F0wRQv&{x0(^v7`xR{TCcX@)kOoe~=8~X9P zkl>jpUMPZR;#}f=|KgcH93K_2@~d_u+BT$aA+PNWz!`I|&?-d#&te*AdEt)1^N=~3 z1`jg@7nbnE4DeodMh=}?!jh_lOZ!w==0Ymz8KL3XsQ;;6i zpvO4$ama`9_RHk_6cz z2dYd#C#nbRC~tGj{~CA>Xt=VwK~)&kDG*yw`;BEh|?3G`9(!yTVNd>ruwZ9;IR&;y3Xi49&nn{QvRKSpgIqQ{Nk zOHwRw4n+_1lSKEaZK0xs`DENv(9^>F9rghSzd;ePb$H!G_CrLA26X@#a>eT&>fKLW z|5d+!@|$nH(LZ}hK3jiT1=0yTJ1&kG5BI^!L0(7tMAtk%|1U6Y60h4a=28x%4#9jf ze*Z}(bqM%OLr)!N=(A1)BlynI|H0+_>fe3Ea>{~E^Zy=Z-R=K(aq5_b;NY$$IP0K$ zNc2U1<0I;aU%Y3?k;&`h{?K@_UKDcG6u~!v_Q=4m0H;etfRlev^EWn6xUQKEJ|*s0 zI9W<=%^~C0MNS02DCSTvanSBmFjqu)7&2Rteg=_~?Hek{&A2C;Z`crAQ=e4vz2VE8-|(3BC{cp^s6GEI|xzkn<4TIhC8c zGIYm~MGm|Xgpyy~F>+6nnDx`wk4&l=d z^P#x2$U3F00*~1NC&v>ddYR!Dp8A7@{Pq;FMk(@DG$((BQx-ZRbaNnKAV8aL4sIxh z{(vELCUC+8*g3e#`1wJr(CnW6%v+QD_mhWlCaIm;_|6WR*PlG}i@*J~&j~h=ouS!y zT6d-mp6OO3SbQ4`f~W70OOyOr#@ZAbP-#8~mkZoswk5nL%nM~iKViTx=HrFPO`I_# z$p6&+Lydp;FWo;AVfX&;Q7?|092={x9=|8|==1aO@w=3F`{XV0(_B z*$X}#?zbwFJrFuw&^S>t#n8#)TFjJ@dHF0;1dk~&FZb!6p;iX=g&Z8UenP$<4Hfq@ z7XuxkL4SSUB5p#L5qVRI^+CUuvNi5Bs` zCij0a@EoM~gqvhvpfEVv0J*-lCUL_e`O4dK;W^;r0Gy$vii1uKEPl z{J`@g&r%fcA+*^6CSVU@9|7M-&kL9YHW>0VXqs_YH&-)Nw^lb2ZB zz~}bRPor&MP|za58A}clYF)d`IOvxkSKDQT<0ND592`INV8jX54Y)NlJ)md%qirzM z(769y2KhLxaL#f$eqrx?&BYBG?PP!NWFi&j$`V`{@cK3*F;bzWPwMhIkcayTJW+y^ z9!Q@ZTF=l&*t$fwzB9n%gGI%lTMxX6Xblst@z3lLY(xI7SNBIgCVYC1pI#h3KIDs^ z-qw%K2EoB5z+lP!9jU>8&6N4I!C8fl?q&3s?p+Jse-=jaJ3N0IU8;by zg}HpP=b&#z*Bg!SlgXa*<4tjK!LP-9NlE6AhZ*Hj+|ook5NspNmSi#Nb;5gYble|g zzo1uuZ~n$15Ott4j8Ton8+aN(pD01>!0eJK0qZdEVo-2|AOtl5w=+5Lf{mg7ssJE{jI-%e#3wNiT=WW)mK0-@gJZ65Bdt5<8093_|jJxo-M(nD(eP$ zC^Cx&%p-!j+|S2{*^CZ%$(QC1=A(`SX8p%ZE%XL{IPZVW+5N4f@X^^B`0#^X!it#{`i}(@U?e-`M*Ai#iYOV#ex6jaeVHa53k`{ljsb`P4FaR0Puh2#mM)-;{Vnu zS-p`tk@vA#f&-uYg-?wyB6=D6`iv{lA)MKN>61V^0T-X(_OY7Nm%b8mBl9=p7N zFeN-MQ9zV9&@lVTMaRVtIeQSTCUKhD5KjnU(v0!2;#^e93Q+_l3RZfY~Xhmnaow=D~EL$ zF!8T-04Id#ejzXXj>!X0mE`lE&w)RP^Yp{-`_f1I^x_9@+lT8{H>h#qQ07Hv%O+SC zxQs*Zo#Y~H2|r509f;^T_7C|O^eYHYXk(xrvg~1g60tpEpjD|_n;vJ5=vn7kjdBl*vh^+}T{k+1AajWyNnq+6HfW9GcAh?|8pZWw^ig+H#ms$?`z33x( zpZaIG>ru&n%{JqH`;!~|9e)P=_)GhP-~^=i`NKo~*7tw^)`eF*&Rn+1$lPJPC&vp~#RP{3AHdxpuSL8;uG%4Rk=RuJb03FV@Wvzn z!9zS^1I}>Mjs`QU(TzZL`FcU>x%77)5j7(yt*E6tBdg(9tz!K%O@ej~>v8XBLw z&}|@bTH)+MEeQ1)pPg7}|HD8IULtJ;5C9)1_v(M=>(HA9cI{OqG*Iw!nvV1^PZIoD z-BlmrNL#WxQVLSjH?Un#Yz$Nlu@W&69N3j&6rBW56qv;}8hX7gwE7PkpJImLA*vqf z+2d<-DClQlsIiz&D#U|x5mz6)2xOaeh&`@f3BEb#It}>#m(R|hGwgrr7j)r=qmn^Y zyUTy@44Vw%p5R{q7;32q`t``k!R-U5=aSb)yw|ZlnV{DN%7F{bq#Sdhq6+>6GPeI8 z{Whppve{s$Q=q?$--{IYA9eYcw(=+6C;w3$nG--@81ys}VhRsjhs13aTIv*H31!Ao z6MyH*vh)}kK;WL zy<~!IKr8yds<5e?Ax9W|BZy?P;5OQzT@d1TVIN{BkK`O2tPn91vA9K@3Z6%co)W1^ z(eH5GK;?t;95F(IO$yOMLd}5hAsh3fzY4k%oN|C;4&t;Q!+D}lT7D)4pq2XbKc9c- zc_9DyN&c0!NP_p+uCNOyIi~{-9vXHSl<-cR(ugP4Uvf?(+3h+Y+~iHWS5fOJX?rXo zDy$|gjBza*xlM&lk!ehOJ@Jpq(Avk1hrqs@y`RuEvUcsX7Yfr(+~r-EP)^;Bm!e#| zs}3kdlKAjz_C)lQKyMldmqiCRN~?d4kh9fNR7V{br_$M7Kl8+prs$P*ApB}#JkL)D zc?&Or?pn*YcsI@;8+lq zIa^;os7akKc-2x%vA`~xsW-Ej{XeyxYjYyGl7_#ZU*YYD6EU+hwz(SYKBs>pifiKw zHpZBSFB^a{l)1U7qA_;tf8Qr16;K!#lzR^`)2>S@C8aW7siaIPTP~H7S5MLE?Zx>; zWO!rOdz|7px?i;2^LT!o#;5(FP?~fXHvWnF^gGoa=RcFDH|LMJ*H`vwP3Le@-SW#F zpWdXw+`Oz;=|)q3)w>hfr%HeQIFgBPb#;W^YcF$R(OUT3J;}WFbe=XScM9eeL z->OZlcpkB?j*rE;fc;C!DqE6JK7!oYzmKB`S$C2STpqF|jidQ6 z6VvEr$le&DQjhKe)O2nl=T7TgSB#~6&LjxB~ny{Q1S5`gQzC$ zl=84Yw+eE&w!Wun)a*p+%VvNt%xBYuUtU`7Bpa_>bLem{H=2K6cP%t+GQS>KC(J2T ztp#VzA0G>U_`FNsyF9ntUN&xB-)*EHS|`088#(Lr$Hz~{>6_zUH-ANsPg2L_Ux}CF z*3IeUSFC>A{E<%Fygu+S9eaFj9GBba=;LcGQEd0_+!yB7RXsobL-(8#Wxl`f7wU|h zswq^GV=o-5c~^fQ+DshI<`;!wuW4s5?(glnW#9K_C@LEVF$yo+tgo}8mud2Ut`w;f zSH;KG?}bZDb*^GiMJ%TMd~17uBkiuaT|GjK9ZWO?4C9SeU1ZWSm;{=8lo@b`NqnjxRgQ)OzE zN|92#R23eD9<;?vBU5XE953_V8-o&c=?QJ|_P*D4D*19Ha{r|MQuaokYpeE?S&(@) zf7P?8zm&(}NNVbpKOYtvbmVPLUuf2|hklCby zZ8MX|#HZ|zCvC=Y}@+UXw)h$QDxMRs3dG8 z7b(+LQf{sn$}hw!Wz{o&kC+1dM?m3Bsemju}suP;h5zSe4Z!IYkR(B6Nd zM^DgJIRdy==g`S!{IF_1L9h9;5c%QxHY#*YWo+@^xK)Z=em1$ROh_Ag%_6-)_PuUY$hQSjh z*Bb2#|KbY^9aE>|Op6QECUuclq3Y0{9ojd4f3b+oi`m5s-{Diw+(MDQ9dY)08!8h`#XblKW2IVdZ5&o^)%M z*3CtVzHxAmKC$}^Q3bbc>so*0krSI5(LXSIhisyJ)_&o(*~HZo6YTo(=H8# zSDyMYvq$|()Q`cG)bEMfB*yIm;mcA*&SBC0)#rB+R0=$f|lDB_=ZpQ!MS1vF3 z^--GRQ`c7NXE}*s2(|zg7IHi{vFG#)1O4kj2?f!#$AdBiOpq~0Y z$8XEWfDHcfH}+M+xvPI9)_21icO!{VY zOpjx3IYqUC-QVXLb%J9UMu~%$iB)+;WFls=&C#Rc1rQN6oJ28r!1x% z%(#OmGG5%$2CRR|s&ZU2ZWC;0Oo6`|crD|KE#-zT%V~0IoRb-lUrzqY*r`aAc->c? zzF<+B@vT*uT0aC^#O~P7vv=vW=j&Yb=5(FL;T+ZqJFb@UjVJmF%7%TRQ#UpSBRkX3 zhcY%DO24ApitzxB!tBzTma(QhU%eCA*0`@MX*0+sQ@wxZi%@W9ou9QuhwkP_6l`&+zMTX?%=^&0eg7LRXCaOrV!rt%Exp zI51;;m!XG-4&2(L>${crbn*@HAwTj>vbhbiq0E(NzgmnkcA_lx>d6a=UK4y}ll=L$ z?QGJrdxC$AE&a5&VC(f_b|Ljq_A(LK?5fWUUAvmGQ!^N!DHs`Q`}5kJcFHjZ_4LHC z+#GuafGO6j^lGp2m%g;ecmxldC6C{-VZogbZAIBfsr2(gPHxLE7HVd(hg!McRAWeN z_?Egc9#P}Rt@a~zL8c?=U(nZAY?klITQEcMmVSTdezd9|V+p>5rfXwJ#0wEtuoHeFsFIP``88JrK+;0fcKI& z<;bV3xZK}KTUWu_JYyMf^A=kNr;9R<_VTBE*Rq5sItD##gkWr=}*Nv=P7xXoPB2Rs7 z%e8r~=}U6m)b#pV)9Y(Zudf$U4(cv;Opt#W^^$Xsr5ucd#Kkm|7rK@$^+Fbkjy_E4 zUsH6Bu75$%wRQb7nl7R1ujzh?4&7v(6>cy&&Nw7}kzy*3v>nE1R&G88cZ7bSZB)|C z=IrYPJ?&JSik>#BSf>|@6?Qx*-Hwn(;Xp!_iSbB9^ya&a1)zUV zF<%dDSH_fgOWG8)nH4QTIcyN((LSoOC)RD|FU8x2Ue#5;)vUjur*Da|LxrXXJE6@o z9|LcXwuEk0@+(>?^WBn`18U*BSd(K3J*`;Bo?QA;?i^!0C70}XO>bmEmjUl3EgPS} zdr8Z;Gv!#)($1id`tHQ$iz(bGdgXtD%*{xjRj-w>{XYHN)u8s>UX2~A5oX(Qq?S1k z)|Q@zj3vB2FV4J+^^kQIYL56P<^Eoo%3kQ=|2XmPhpdnB1+DJSK^T1ZWWupzWfwn5 z?+Tjh={06SFQ&>>osU^Ij&Ah9SdPLj*hyKpZsw~m7r<5R-ev(fTdH3gTIPRsoGS6| zg+gQ2v&EQM!tFoZn{;gJ(3ClxdYc%ijQF7>e$Lp9sSnmawOO~wS6?%Aj?K;!W3}Ip z`Cy!D<*&?xR!VZlc(yI)9{1I9P0cFE2O^Wv>OFoX#8Fv~X--a(_vZuISGH*t+0R_! ztWGrcR5*vtC@`?gZJ z<$ZDEec2i_o)nT*D{-6hQ>sd;|R4?bwGRa%9 zTv;ce=IE}pG#`CMPTrqqo~>d>LZI19;EHr|U*n#20sdn0gq9BUeU^VMzCz{8zRoDq zDql7MV2)1c8L@Cd0FY}8glzIN5DrZm1SCjQ-;tmp04kS4-<=8VH)lZU>q`E;%&UJ@M80z^4|X%g?}f2|_|21^dW?jgbT}KQS=W&Ek@{(QKlG3* z^W!R) zANk8_N)LS!xk)~xU+J~BQ}P)p{Sdw)PpK#E(-FPl(}{G-ez99sPE|g!lOBs_Iv=H9 z$U&8l_QiFZXnucLAJgSh^7Q4)`%S(|U-0r!dd@;B?-f1vru0(jGxedqlc4gEFZF0h zd!XMq8CTdPh`*{{w40WXYHvB=Ps+uPs^Ui?>C`$W-H_-Fbd;Za;$>(@(OWHF6>LTR zw2vw$`iq|xGvH#Em*RIt`_Vs4jNPGcENRW*13kD1ow9##_=~<9yA`|h^o9KH=HL!; zTCUID{FPg-&u;y(9#U*^%{4?1}YPnWj?fywnf7r=1ppAJ`ZBdNkym@`S9D zB1dKe=GK1=V=IXll`SG=pYe*s&*p2=4jk`~lR$-ijG4hjeq}445INAZ5slR&LKVHg zuYh-~Ki63=vkDW7ul0^qh*&>*nMc)Wi?0H%*-t&Tohc`*D6TNL6Ils4V?Z5~nPb*} zZpACPrd3We)rhY*F+a?%;?Et);CFH(lEH_yYF&ReESGgK#);n_>d9xKr{f$69Bq#V zoCu3QUHpF5x?}g|*N%CAp1ZD?_k(iAdyngN;Wk!q+x5JmyGoWhbE{Rnk2EUHt42L) zH;egfy~r{1#X+GK8;*tc{zvAuRe$-_?v>f_V34bQFFzZeua&z&v0Wd|9)93Ap`Mi@ z_yvEfy*H29x*#|I_rL!S zRoSL9!@%gC1tzibl;XNr`=)8 zwSU2~Z@b+?=egDCBBlAX^=xuEoOjy;+dUlFV%?!6gJUbj46d*aLVZ|}xIGu&nA z?6GQSe<7uH+mA|N$Y04Q6^Zy>+pd3p?YN6!_sn)%L%ZW6n2t2TV{2p&7iTMd^HDh_ z7avu%HFm0^gg#Q&Z?O%t5kopD9roODX-=?aasr|e%G?nnt zpKd=5_o}>+T0aWn5LjWG?0s$SJld`=1EJW>{sX1A#~)hxdb$l;_%&n#~zn(i?18l0~={yBWGrH>YQ(fqT~s&~K|UfxVk8;ShW_k5+neS0i)d$3g!14>{ykJ91Ecs_jK)S7TG$Vp zV$!O$O$ZMyr!OQ+N%(*7HnUv55FL^St1JKe>1?>VnaS`@Q!g)d4MaUwgQqo_$p9u; zu{+rKG6ymERo>ie8yamij4=3iWRJ()6+jE0&1b^SJX>+~#&8?lz>^JL>rXT%o&?uy ztq^b8_JRw&UO-W;?c`d!j7cl9!(`;2Uwt;tgJn`_L8D=3k%NB#wfY#S{JOMDWj@xD z|3^LF%1d5$_h;t(oVwqRrjv>56X#JZWq#@I$S>E!qonsc>8?5ffl5Kj!|vFbO}o>h zY1ip$@Qfa*e^-NT`<1VV_XK9BsNgmX=h!aJv31Tzu`s;jyLiWayc1z~CwB2p_;`ns zc5)Z*q>nePygz^MlU=+|)_8}Kc4`;r)H>&I(oXNyCim@i6L0G^pn`09iC1b03(LL2UVeX2f&9R@Ih2K<^c#|2*P8NP8a|ihQNFNa+m!Zf^{L01J=82Nv;2!u&#Iv)icGtvYs;ItiD}$u8(H1U~2p!#V&l41o_Z!mthi z3Pa!niZHAL@WBxH@F5KA09Y^t>#X-g9Gx-%7Yspo#L)=@z`ziEX3|+F4Zr|H;QulT zrP%=>UWBX{1f|5r=)h-y&+iF#e_H6x2Gv-76AZY z2tI)=I%NO=7y=*b3|?P)V~0zz#zY9+h;$0OT+PpFt&KNdds&QEcE+M7uBS+4CDFyG`2Xp#VOG z`_4Yy1ssN8y`5v}(|9tPPDb|}^=f|>0l;Aheg~X1ivZv-1U}%1BqPiXoyPVwYRw`5 zI1GU=kCPa)(WfVSnuO*MfE$LuhfT4FvAi&O1i*(O@a2(8ry_CrFH9Z*@L>pk2ck5K z0QfKjzXMU4MF5Z(g76S!>?#08JPJQZMIx~{o)pnN`wGKkw}Ja%BoRMNOD=zVyKF>u zx>FenACM%Xk#r;y-*-94@Cb6PG87wdg#ktq``V!45#(fLD13kwPbH(VNOE8IAI&1j z+saV*GGVzXmX4f6_cTMpBgpT{Q1~z_9>)|T*kh<%f;_Mcg@4!UA5O)B{IU$i26TVLqmks^Lr}vb$U)0cY=BoZ6+4OU3tpbd1vzSYB%g3| zG)eccc|W#9Pft%+n@#OIe2DIL)qL-(DL{F8g3U+v_4V8Bw)x&~QxGhgOz~N;ZO6`AUh#jEczoYp-tY(l zm-qgi4DF(Te(WT&XE$kh1cA$YUr&LscoeJO_ruNb2m+V)UY~00l@bjIl}Qk`y!ZYT z1WVF|ok+-O7^4irmMvbOf?+3-C=MXu%rX#Vws?cC*W!V|mXS%d1{3}$DtZ243S;u` zQp7$?Va)ekiui{qj46M^1QQ>t0Y501iBw5Sv6m)BSrh>1!k?6 zP}S5~z6yvQ(oDR+1VB720xGtJn(#aK5AVSva{kkVRjtBJRA7HcYwIA5#8lQ zfjQmh*0kkLK4_>_?3gb$$Z%;*r>zCCUN8v`r|}X%t0i;T*+*XQlM$1>C7C~%>s_Mh z)j>O2ncBd2lWI{ZZ?Jmxu zCXpvs?^eXtX+3|RwP&uo{_JUDkBJ0FF5XjhqqOcU;XRZ18PyyrFn|5&zcO zB5S(0(rz;9x}E-pdcTkO{~+?rA46n#>AeWnW9s$ec9(zs7rLuG3RasFkc)(}+QWBo zoO@;5Zw()5tI1piEtz<+kL$3}0LjQvY#T47&mR|P$;E`TZROS`$PlgjM5$uQqv$3d zZOxifEcwq6HQeO%xw`qw^m3@>#BLC8%N-#OeCw2*hYx7ea73TH*tjn)8?hQQ#Is>Z3XWz13>Ww>j-h95L zzW#z9F>B=W^OCGKM6gNH{W(4y&qnV||Bo*RoymVpTJ8@;ng2O1PyRgk^5>VY<~iG` zbf(>rP_HOfS~40Q?!gPe_^1qH1Vto6@!NI?d``y-q sx{}0l!R3Sf>%e~a_ZNxw{qpT!$J}dTO+yYMslh+~4}Y@>Kp9#?0O8T4F8}}l delta 935424 zcmV(#K;*xk=yaOsbby2bgaU*Egam{Iga(8Mgb0KQgbIWUgbaiYv<}QHe^;5NzhZZv zJ{}w!UnH!@m8Xz~FV{X<5%p)@p{K0%Pj_07Jpj%CKG(SWZ}&9}=?whfFL8Ehzqs?U z@2Z)88{zPAkKgxpw`_xf_XlmB=%0^OJR}H$dG+dW($e?;`&kFSwY}TExXPZp`Dq)k z%=51fw*NOx(E;=GR)F%jfBW=6jQq)nUdktTPV`gaJm{R?*}q?UpXs@M(q?{2cSpQ@ zNl1uMG>8B6OMC&zytHFc`0VUY+ZBMR`RRD>5Tl-Nh04iVhJ`{%fU3230;Klgw$n zsQf4{=$(h%AM%GX>!%KX7q5P5@`hF4HK5mg8uKt0bAQtKEt`feen$Tdi{?1)XDxof zKHm)FC*1ip?dO>D>lPpL=C@<`kTt(1dwuG$tP^uxdyNAf@W*_n8 z*EAoo)u-N^Am4CbhW1GAU0(RQ)$e1z z-)y7zc<+mo1-M@bZi+YU|K-7j|NOUidcJeMKa^!Yb@;nj=2MfuU%P$P{n5Mp zZC^L~0e5>doS*Q{*R-GGov&Mb%sbzX;X~f}n(T90f93e4*juG@^cRTxljGSIrNXztUriDe{L%NBRTZv7SiwM&>v{G zpJLCS`|O|BZhyp{Kh|#lL)kOnJnt;$6`ujPnz^^Pp$Kzi{`j^3@yvQYh))|Bsgd zk6*sSK>V#8A;ISDU;fiS|0k%g{-`AbNgSQ54dCYk-U6o;WDR&iZhhO+tJU#wN8dJj zjtczjgTFwN-tXsGBK)0Ce**V>Y4!gBe}a1~I3&;bFFzU1Bem%_eZGc^{p0WTJvi<2 zZ}&ZL?T>oLui&-MkLDGm?T`8_f6p+!!`J=egZYZc`<-9oHCpfM)MP;sb@9)p<^55- z2KjyZ#U7IW4|@BfzSA8@`1vpN^*H=|o0lW=^DVxB6F=YND{%4mthzh??(_7|f9Lk^ zILV&DkiVnF9Vq#E@>hVzzpK&j-KXDyFaM~s>s#dJkB0P|#>0c`$gdK5sE!$&{yV=h zo@)_2$I{$$HoQr@KmvlcZM;WDrbj}i`%PqkU4?^g-Uc-JtB-Q>CgKJD{*R2eJWoFU zoSpB#F_Y(|$M28+5ptf7v%Rw6e{*jxNQzkQF0Qf`eO|)(WIvCIavk#}FNpjiS@O#? zkv5UxxeL}V@U^|Emop+7eT3HdwjFjDK1$WG-7tFOh?o72N0(f83lXHCeCyWnd8_Wr zWY>7elG~3f2|~(n`5;!)BGlt^y|&%CYIX&h8AEFhlciMy32*EbeAx7ghT6|6z zwL~tY+i_d_42kHQvyjVjFP~|b3;;WE%<6uyy?dl%$eu=mohID~MC*7gOMjEf`!f-CbTO*+pfGB%=mQKfmR9HNhI(MkXhC&_f4>q-Ta0*QM&tE{ zi0o2{?46TyI7?;RDZHgiBbT^Z$ObrDtEj<^6J~+T@nYer$ zb)R2$Dj;xy8#aH5-qLb98pZ%5N=3Rz?@qT%szCrcx7Im_mkE zIU_sV<_OEr;1l>_f37v&YI4{c+aT*oj*?q$u9`)3qGW`Pd4J7_*9E(yW=L|pDsz#b z7-og$P8qv4dR*wdSdMJHANXKkSH(0-=Ty{*;}N$itHeOS1vfli!&ld_hmYn z@Fd66RkRJ&O0X)F-rg4cVSDO=kV3-s`7$Hd;o_X+hMEYQQ3>MNPNp^TbW6)pnKxRh z*=(n}loT#*PGt*9=cYDaX&AaPYM$7ZYNVdzq?h-BEti)rnj`CmLKr1t(dtz5#>Tp? z12U<9t}mFWGswwhwz|AoxNC97Xoi3hqfTR3=&Rbf z#}GH-=0wuEhYC)B^X`nEuj|z+f8Hge*m(<6V_a@GJF+akF8q9kHf)I1 zGsI;s+Zh+q=-Kp&hUHE9Y&F~wu@joDm~JH&=9#5~9c67odg^zMAXU|AF-9Y|Fl#tI zg3QI4CNbt_yjWw^jAH5H;2&39#Zd0-7$fJpJ@g&1Y`7YuMoc>%w4^|8N5`cQ*l3c& zf3>~8z0$26s$n1v0iw*1z{8=gNVmjj%x_QA4V4Atgj`qXDxCIC=+~8zAVM6jqehI$ z4Y8)^VX!12xi%cmdDmT^N7ulCf9h4G$f{sY65>zu0``5i(aBzj&AC(0rCOtzLBzMZ zb&{|64JUQ0;B!%!Gt98Tr*rG?kcrFJf7Vvedwz4;Yr%NxYnjUepvtqTM}jq-#sIR(mV8CtVZ*gum(KJQL0{%16nPRZ_bfr7_d} z#a6ivsF39=QN3E}Qo-Fy?$@c4bxS5WIZAg%T$+k^o>D8FNqfF6S;kp8UmWt}fB1() zQA$M?_JguWq`15>$8s-S;f%47&ZTK1VTO^aa8|y~*6sEhd6sV$r+ir)$siGQ3O!fr zmRKy4G%je}HaCYJ-yaVCQokAXCKdeC+}jT&N@l{Q9H@O{kybDz$D^`aq@D!(Bsv~I zsy_@?=%~RB&kaW)AYjDR_-=TS*Ns1?^vVtG-3* zr5xRuHGx_#Sw}{?On7a3>}yFg)}o)_OiPT^M7lhsGpvEy%kX5oHl9jXU&r=q) zI;tvGHDBagd4cTl`6h4lbAX|gQZAx-U+vLE>=(0gcb)CTli2Ooe>NhVRI^OGoz*C& zeOO8&7c6LWtu7CXgRIYycHFgM!A8Ak99Q@#@8f+W&Sj-v9odUwu+im4Ey7}4#-<`g zRVG``!Fk~Xb(E9s1d#{@eU5D9TbB-shLwk8Q`5hvzZ|lZR zcI z7}C&mQttX1BR021USnCD4D>>2j1hsQO=E(?SB7jVRC3^q%WWm-@?~sk!rc-HK~{Tw zHEiAMf29NQ`!ww-7r*IjA}aH|*erNF+!@?_Z5+9kVN5IYLIwNl&YNjFPn{`T(;Iz* zE(A|J@5q~FWcEOpCnHHkkbR_xTdw3yN9p3V#JVKe8b;-8`P({=$CMd%`dmn{L|2;Q zAk_TGQ*E2}X2(ZqqF>Fkh^*1Qiq(94+}o5=fAcp{)zkU8lCwObqFb@$&%7!d_)1Fr z6Tc|ud&QvH$YxX-J$g7B3pjCE21eu{5_olZ0vjZ?&l$8lVzSG(7j`3)%)VWq7DFrJ zT-_?`tAO1n%&H-O%~_l z+FYY)N4slfhHg?0pRST$%hBHJH%YUSmA!!7=5*X&w|ZiZqQaQkV51Rp+jx6ho#Uk> z;J#hr>B`-;329huY3x;!&LXx=rQz&Lf9yJ_kISOO$3nr!_AFcvy^Kzy6FB3%&$jEs z*-JZXboKdR@&wTrt9Im8^XlRMo-~#8tIn9tjf;~obay`Vu$^5I{WrP zA@c!sdCn!Yljk7@WtNP2wL_dUnw@o-xTzo|c$#mO@-$uT=^!U%yNfGyF7jPQg$K8V zf3;vw)x|F7{ewC-hxTj>$M`zle_BV;MSJw6DDFFS&+lf9Kk6k)p`JJAqR#Kp!^y|? z9WalRGab0CYNKtm*O)^r?CpUw8IDzatB}WSG8c57Zxef)6jOPTl{iXJNszYv%#=k` z+^;uT?i=S_5@&@=1vq(8u4TCuSWy98pxo^FNRBOLPWPb#W`JzBNP;FMe~$|x z7428F%&g7j?ig-1+IcUw`ymxhU>d4qaV1FRLe!~g(n?IV$QVN?I?#Jpxz*#bKt;6D zhlRCImD(aGJ7aCmc2h1&E*h!M_r9`%4$cOZ=xMf<;-A6 zvT>viIs985mjb;5qrq88Xg(~r71g3_r|YN10TuW*7muc}?c_t>f3j<|x6WY!$3*Pq zxI*E$%15y0G%@V${=mdYHqzXAJwvj+hKj}ca!3=-V9W}OwvvMBs1A$vwzZbLZR7E} zMy6&}N5{yrcY(c>WV!aDV?s>(lTV2A4cg#GmOm%M=&aZLdSjUqd$#mmn5}mKE7XQ~ z90tBu7xq4NMvgc0f0Lzjc(gVyB@@Y)tF>2oerudHyk187rm#T-I@ja1-CMVPsaJTt-)eQl<3x- ziwMYAv|^k=CAM}O&pF&M85C|l9!A&e4-;vIbAvE+GEwy922YUPe4lQ~na$w%d1qro z4O>1(WXl(~f4A_`@A}Ck`#L71t71s|y?CABmP0k=XzY(Ot&dDFBe2`GcdK%yGE>aV zjVMpaP(-#X*1riS5eJc03uZOwQ7uYbr*2tFNi4D$%C(br%2SbdRs8QE>NN}|oE z5w?!btfDp_mdR{9>61L0X^HGaJ6Bx>(Y_UrGe09|3~TM;nWP8tJXfxid<|MDvCUbC zDhsEWf9sbuW9-B&8C&@hJIHdNEOh+{S^~1Ti1d;f=cBUakyCt>-FUs_Sn|>@Dsq=l z_?gawfV6i5bqiISqvp2*s9cG7j%h<+?KF>*l0NCIlos(JH+^@0rlvW|Dhn*0@{^z) z=wrNLSy0HyW-6p^uF;ZhaoiwyGow~?OE`wQe<{G}y(>#CazK5~)G4`0=ke+US)NF(D$;1m?N11vz!MHP}b&~mw!{S`qS z0L~+kmcQ9pdqr_Ca?iH~YIO%485&WDZ`~@SqkX3k%|&$2%;t6~Hs*1&Ki!U}_>8c0 zBtqtE0dduXES>#Z8e$l`T!@m-t)rhXSIyE zAQ*?6SnhA!E@Rz^N48~pc4fvCRLoJMlr)3wW_~?r=4`Y!ToAu7zoAEt4)Vz^&EEcUOAHhm~ zQ4zj2&8&9g6T<1b36e+6f7Ng_HjHGLGs_y{C8WfFQDk$J9(JV0=~rploHRAErN);- zVNRWjGH@HIIL%~xF){nSIRlw(KTH`hU$kuD?AdNVHZ{g?gl+1GvW7tl!e1w0%VeKXU8tEk0xDMi7~Jy7P3YHpo?T$_pHU8}n^fBH3+{StAmX-TSU zI^wU{aHJ1c9n(Bz@_3=tQeiQQjk6kQx-V0vY{bQ4pG-HC)2yW@7??kh2Sp;mavV5d z3ED19$zBRvy{KlUjx86CHXF5GJ?2Vz-i3KeyTH6_E5R3AF49$IQ<4W{ zV^_j?lb~FAFucOoyrjMqob z!^!1bvMFncp4+Ij26=_7sKj6o=@k{*u#PMg4;h1rJiMNAeCj-8 zbpl(${@l>@f5lj+;qDN4#tK7=AtSe!Xji4tkT$Dz0&WF)Ukl-y4UW3FR&J$k()zAp z`|><4*Oo$H-m>-O6fvsfPzi|N`dfHhG7|p`$JhRZ*lH16oe|1r3*KorztW=pfVy@4QWNYh; zph)pHmYph(Z5|l?zM8H2D_5`RbWjI@X3^q;AP36Q!B!OAC{4$dh*26&^v{OQE4-S} z_AwPUxpwuf{)Ww6&PRtfD7BBNu4vM1w$S$!JM1r+t*2*ht%iA_^^#_l`ySg8E!6aZ zQ;MqRYG14SxfHRIoq)xvD=x4y3d~5B`xZo~F0RRyyB7;4UmU3uMyYMn9{Y4Z&cLgC zAg->MxO(odz&#k-Y(JMN87z%c2F^;ozFg+~fB6V!^@@b#ZrahHT)W@s;CltBKXeSQ zy%|X zf7rs`ACbLD?<0KIapyk8NwDb$9jOOVrYUxtEGnZ%*Pdxf@m#_(mulFl!&ea^Hi?Rs zV}M-sg{;lzIoc)>Xm;u-lDtAHT|dt;4C;u52LeZNGaC{P7h%N;d44=fri9shjJ zGmzJ$K{f07B@(F3{08c3w&4V^I0-tOe;3J!xz*`XxaY-I#GTv54bP~y(3jjAo$cXZ z>rad$(oQrNn0~lPTA5+>d~RHXW?k03w{?^_oSEFsGPxmM%*1L5-|raOTY;BkiW>7> z6vRHmCDSm;aWl0GJ|Z0OnKx~8ptEpbjg(qv!s=YKjJFyQ+o6&xrn#rMQ(0Zme@tep zni-m}4$2ZYPw9Fy-^xNJ%wqD^=bkw$*)CUPJ3m$0dJ(GR(jy$KAl4gCD-SjpE?2iD zaXD?V+YF|t?t^hE@_BOyvrFN6G;hYin$PWO5i*HsUanj4W-RL_py~@nt|PfUTzIsA&%D^F zM30ZAOO*CuQmYNI=r3To#4BT^xw`E+$zCn<9@Qr2n5j7$A9vP(RM%>mW8_J=Z6&;2 z?0g0f@B>Q45oR-UtvGu{f2Ys0F5I*y`4rJaqw9S}(7iNy`$lqXE63bg-mV+5v75#2 z8v7{lh@9ofwMggAMC6tv?N5NRKeo+LMUF{l)KxfJ@3+wrU+f)^SW0c$iU-|QnFSJ| z6|oM?ZGEki1>`Pk@~~tBqnoEV&9sDYT3Q!Nv9AH2UcG(mU)&+_e`ZPs!#=L~!&JEZ zC290NAHPi#@BV zCr>k+IG?>ZrVEm8jCoYI5)<;gIBus^b*|@VU>>)lw*yrh-eF@+>gBNR)9f;`QBh|V zqc?eb95`Fkf8#bov}Kf&SdA~r0ozyQ)~1~NliOW(fO7_mGi0tY?6YX7u4cF9!}F( zXiYY&0KnrOBc2Y2vIn%m!9KO#Zok>|3B5FK$*yCmM9-?$TWD81+RQNXq|Nmq(#Gfij(>W+0l~8LXtz={QHV!N{-Ff3|UV z7(#=Ab2TgHx3gAz;Gv&$AT$JL#8~U{KJ4NEMB>a{oPkfI_F z(_lF-x%0V(tcQ!*KvBa{cQ>;-T9=H%s;nog0LIhrlB+%1?&5kix9iTb>vnm-RwsZs zm@6%{)P55bVOy8p>Q=j3XJPZVf7@bRJJ&5;aVU9!v8i=W1SLn~%kglHcyH{sWWK`G z^|^pU6`u}gti0-ZdM!A-V6TJKhM~)Cu$w(C^UmDiC)++*W@-qT=bJ9O=VT{$H>YlB zWX-!0YGE6eVrARojw%V99z%5D4fNO({DIMSMoI9;`LuRQ%UE!>nrB&7e}qam&W9zv z05Id~LR}CS_4=cFGj&)H*!nh0K+&Jni6MmLjq^?#h;>r}f-O0nlX1_6EY^8C+FVzl z?t_{{I6BGq7kbP=f49XXOW77OO_LgUOYG~hb6dX`$9QjZi=-0^-YYNm9C!nl z;bM(cYB00ZLLUtMNG|ucu2P5%dnnHY-ikJE$yG_8RjG+XTD>gF##L3K?iP4YmBHP0 zox5OgaxG^k@2XI@$IJ1SxEQx3m;G_UlRD|Jo`Om}zNzgkeR7Wkf2r)R0P%XtR{GKESsLlQ3_K6g(87e822DVmRps zVOo%KnA)JcYl_#(`k6?hfL)}w?0DJiH-~{hR#AE0>N@YoR)Q@kHsceMB%RX zl$&Ec=2Wrt>p8#zjev=c!4hED=2V+QdGdYVNsKUE(l%o?qu_HkGCGkDIL$sqE|(GU znVZ|=LTfH+aYi_+9VU_af!i&^a>p>2UgbFHSbCGQT+Xl>tjk9#gH-qRs;rAcxEq4S zf98PBE~Mp7OfHAb30WH`mLC}r^EJ0Xv!DDMMrxu+ArnGF8e<>*d8jeEbcROx3Ua8}3hGtiuMhon+fHV{* zrd}nJ5zkcia1Lj;h!}8?XEVAz60u^e6cAndfC79Yi zMZvNVAGVCN$f=Z!7YHsa(D6Xp@}f^y-rm#B^KLE6AcSw)Q5y0#T2#kkE6O1@e@*j7 zAIKH>s*HXbf*ovMay_(%YR?UREBFZE#0+~_OU@aFjK=HpX{kuOoLm4|9$N0&_Lc3x z5*R&|xaV)fVMY_tq~mok27wSQBHOmvY8?-Cg)o9;3^JQdc?D4#)uHzpP*v?lM;R{L z?mz_zxSPPv+i_+`$~i@Ed9c0ve{p$G4Z2zmW&mYBXp$Il@=2`lqvc(g)t2I@Es|`d z?9}7)(%H@xqCd}Do|%?g{W!F&;Ge50sYrWW0zU#cwm*VH)|jp)dtD3maxntPPg5B% zWs97BNzQ%qQ}I z(LwDx964u>S1E#Xgnrh>Gp3xdj80@rNL=oE0j+%+%VM?&{S7H8r$+IR;<$~Zo2H}2 zECuAU>p~gpoUVi|*zRYmYGrlX?M*!GO>OP%nl#P4ggrB}8!?x(e{;!WQ!x_^2k{T% zmcg8oJoo+SSY@Pu+WO)o^{1|)#^dA?sJe^vg112*v8l6vFtlvbh%*!S_;|UQ0g&Z# zBjnw@OzhlByX$G%#kp*hqkkACPV1U>hvsY@)N25|g~BBW0-CLj1!Y=gAgjf#-_-%; zSx2&nxq}Y&nP`2xe+0-)X+YhE?a#6)X@|5^U2%oZawMK5dxG1d*X3@0l0B6ku3*ZI zE?gk5P!3-OoN%4yo6(G{qr5qTrB|hxZZ!L|oDmGsJ2Jnw0CJt5mZ}Y~1w=4C%JA3| zkt+v31JP~DCFHtBrDX~rI|(BLOgp=Hu@*UWyH1w|IN`f3e<#AOtq=G>&>DB4O2EPO z27d!A(tI&A@gU(80KnMgakoZC>mGfeRXfTg4X_wFW^pq^*576;0MqT9Zg6Nv({^=h z9a(oNPipSAo~zc;kX8J4reE}O)#L!iAybGI;f;FXU$*nXG-d}o+9kxAwEJM#gVk{) zc@E(0r((aqf0T#dGJx+R&bKxAqk1~ED(eiJx==gTG~8p(Sz%adays7;C#F|W;&@mJRqz~N}i5VRss-?Wln;r zi$r+CL;uFdF{%hLMbk^q+b&4=td|<)W87>+i?wkNQ1Rx0Y0fOW2>=3}N&YgC z+v0dVXUOCS`R>ZDDsR)>!kRA46y_~4t7|m{eMPuz-HhDlVs&;-8#DJq{N{!2D4w>B?5iNyHEpnWPJDQavfP>59Qf48@V7A>VGAdYS9~2Vd*kKeo z;-mrk^ervy_(ZWNU#uOw*W9HEXoKri`^!@W-qs#KPb9ZBT{A9@Qe$!eSICsn&rttn<(lcz{n9uWu((}RbFm0BJrkr9^UdbfkS}>wZPB1*7kJXt`=UgpOgq( zH^(IaDrTh@F{KHxEh0|96tlDPe-*9{S{if$b_SiTE~){RI4i;^+hYn6QQS>( z=7wEXRKX6=W0UTH$Fa#R1JOxTagX>&?4>SY(GIL^Iu>OJ(+vMiXyevAr@QP@;RM2d>30bASu8E zkFRERvm57djx!=>ph&YUXXy&IkTIX(d3?}_N=3LWPc(pgjRj2se+$Hl7cVO%QEb_$ z_M&myFcP-2*VLsZ$RuUd$m)~Zy4=e;ad4MwTpXFDqgN&}5Vj3Vs!KsSN;#Ni_N0Jk zKecO4qb?gnM`I?QqiLaRJ!)%c-g(V=i&H*~8=_hjmvakNysjLZT_j1zlWQIcFWk#~ z+6nlHQv(7H#A3zne|GzMDbt(d5~2u-b0R^wy^7DSkpi zUfD^ycfKsNy;hRl@w8KTDU-Z;1Ob7(a!^FdNgKyV4#$11wJQj|IJ3&_xQ|x)p=vhW z;EhXf)0a+qf6!G$OpjuzT*8C4i2=Vgk-OczsARjiX}7CEUB#_+oA0l;nY}!#;wciH zo=axgiJlf-?zsp5gxAUX3L^PVJ7)oYAs}*n9~uR~S$LaBDK6Cy)aW1}Ld6C?XE)bb zi-XY<;EoQEInB&hORzpMT6~;UeHMhZe;)R&+PaX0?+yqoG`@ z+>|+~^*x+xXs$S)jQT{C5#s0hMQg7Om1f&$t*=&ks_B_U#WjN;c1gl;$0e>R9IQQi zJA{#6$67Vf#n$b5&2U5`2GiDC6y3@c}h;QKwV8#_t z#inu&DpKa$$(L@uwR4aaxIvcKB|2Z~c;49o5m*~-*U5ow^O?02M4n>ogg1e4q{jesCN%=AXeR=0`jjutJi$^+!xRG8B0gQ~@y1>t!;z2GT` zaEGnKL132Y<8@;Y8p;ibMQn8QTEI-X@(jNcO0$8}-itZQb;|{H&N=k7y`6pbe{{{S zbINsK4WYZM7YMD>A0zF6UHepTPpbufM6?@))1E%-938E&m8?l#sa^@;TE!BFmQAnH z>L%0^CXo*eQ!PNG(*%RG?O91vR>tP2SnVP9V5P^0GpLi)Dzt5Nzcxd>LaE_$!J;$M z$4bk9;1!v~v6ZcK2Jr=zcqonye{VR8_3Ds_xFSf7I=@Ai%D?bhU*IP=@|QFO$xAB6 zfEGfZkpe=#Hx!xFbb1QfnFbzjBX599Yld!e8#?E#uL`9uU2w5Hv*~;veml=ly z1$(!z)7-3~lQVh^F2q1=$c>>2?W)^^i_|!u0O)o?k5Gn~Pe%wj=am2=L$#&V`^mml4$-WcX8mUM`L^Ky{> z3S5>bWtD5B3+QWkiVn%@{Hj(iwXNh9hRK(3v(n84514P*F$Z@{{seNJf&2B3}e~@b736cQ;%jWuM zEK-(axhv}vtC2M+V|F%?41>?PC=Gn7wu`l2aqb1&G3}0vL=Qu<)^=~=0tIhm<*bv$ zU*Qr@gX;_`eb!gPI&y*yC3N|ge?a4+U(LzI0Al$qqykMo0{|>H)7=zGA*SLNM>b+aU~&JNH~2 z9f~ew8jB$$9LuPibA|%1gLKgfnK>NJptXo)Yk46g%eVl*u-$CKJ~ak*8X%`hlAL1G z#BCh_ndia*L7SSkoV~PXtHx8g@VHxXdT%nTqYn8VG7wfhf9|-WXII~>{R_zmr??*R zV8x*^#sqX-EyIEmA++jPTMLo_PeV8I1aWbkCC(hQbj5P1s7iZjtPW8dSqU4hAP>f6 zedw%8xvJ7t3+MdyxU{#695Al}fgz~VEO&4g$~L@Joz9@Nf?(VZ4uB1`v&-U#AlY>8Mi5u0!uE=YRD z#$A;dm#xg=*HS8a0YXW0wHXv}`1@INEj8|jgEc+yFStY?;9D?NsyNkvia1Ub6R)&c z1On(@f1dV;0SKo!*oG&ASQ_T3gD^DUyGp$|vHqZo42~Yt1Mh~yOa#z|A5qL6vP$4t zK4a~D9bLt44sg4fW8cJl=FKdDtvJl-vK0&ITp(vo-*U&(mO*B_1A5*0ZI^ipowoL_ z5c!nIYi3EQE~9Id=^V{H1eIL1R^h0X0XX$Tf2+l20KVNCIIMLz@-1%P63@j?iY75uY$6Yg=wek7!f4;^Ds6Ti(;GFIvtpWhPv43)?Nr8 z^DV81tMsCuIM{r$BzM~r1tILG*#eu|fBk%G%|>xp*18uJ%?xEtILw22SQ3|h!h9{g z?et3tJVZwJHX013kTbL47}Ai=>C$i=c`@fZ)V&^tn5tH2%GhXS12#XqEJ=vvf#LVo zM)E+Nv=Xl$I0#=~-*jxZu$RF9w~ULC&+SP5A&RK-Rw+2RTfNq-D<@ zs~F6ew9Gu?yGaLU8^8PQGbrcYb%ocJyTtD#P?6ASEiQfG(#yfbGA z#QSO(FsoMQZLpPDcD_nMugM#h-ID^~qO^qV2N~0O%LnOdn;?Fu*4oIDow8tPQD1Ys z00FeL-ar zaBERu07Q1fN$dA7Z)5+9ELOAIafGPxFN*`BCgIpcd^nHx_@X@ z-ZJ8Kb;FN0mq18a?GB4GZf%e@Lac|ks*EaWyhAKeu9cQ>`!Y4LNO!m2NC={&913m&|-zoWfeZCwTGarfRQk^E6i`2+alTSy3EF znha1u;5*RB&mO;)(zL+*$MvL190Rmk18`Za_kR3+~m=C$5&{250t5Nyz0+XSr0F<|^NZ4jnq zYc9p|nBohtSLDIive%V2r{Z(ww`j)9h}ogV2`4@KlWpvnP7Spio1P9<&P#NT3QLIJ z?Dq(WnxPY7mJ&+C% zgu6F*OQ4!`!(7>hJQhXIoe0Mc8KtfcX@nMngfg%`+oq1M}(kEe`c}^V2d@!=roT`(p0KTsvq3d6md1ZLEv2oa5FQXe4J~ z#7VkN$$;-AWu3c}R~(iAY`9(AK;wrKzFySWtssx|+V>D&1#N$G-k`HmNgx$7qbavw66HgMdAl1y6nKRVLbamXAInW@`c7dL<{ zBirc&utwYF+JCjkZbCe4hPYrMeNO?9qr{s77%fgn6*&KTISF<%GO?zNy>KS+8|Yoz zK;0MhydNMGo+7H@RPIT6KJDCc0^8Cl%<0)Fs**h$dxun2RLR}CraTV#utn45RX95? zQ4i;}Y_pi#nP^!k)J7+^%hF7*Yg&e&<(eUr%W_Y})PLFtW$B7e*$7T62mmf#N7(A-if>pmL^Q`>ag9*Di#vxr81zP*7B@qkvFTjrjyXY|E^7`} z-YO1_L)R7I4)XsIEHb+!2P{88Y>}?J!*yP3XY@GK`wZR9x_RMUj+0(idqQMs!Nb5+ z)!y-J5PzOnfJ4IFMZCGMJ7ll69Wd~jh(j>!j%4!NT3UjdOr2RuF(RtlO&SI`Nxnaq zgnChy*8yTK8WU1?S=UAA!eJzuQA3x`5rVg7td1uR8wFG} zwNoRlnT266f6_6X|14sxxmnh#eI_4cAtquX8*T}3Yk_PYpqC1l$e5Q#FBcuYrTn8x~ z1Hcww^K8^_ebq0QOWFy_&5~Ji!yKDYd4HP7lDG9kgGCQiWrIn3j-OJ zZjgo;TP#851SYiuhB=GbtnxPK#B&;cYPRbwWR4>)G4G`YURVQrh5(r6JhbESvZ?_N zaBdE1wwjN41!|ZXPZevQ?aOUIW?SX(!(?@gtoPW}u)oe6m(9A|$BPfeFFynn)y9&#up zEPj%QD!=ZfgxJm11f^bxr;Ow- zhEbEL1c@dN$CJ)!Qenx=w13tI{Fc?hys7XQJucCqT+Eh(Iq_kv>(;z1!T6u{fX6e3 zlOaIxx;zh<0N)zEV zeYNxhcT+AW4@+<%fc!6t4;a@5$qIWsF@~UAI1($)#(qHKatt?hvVYwWM|WCC7f`?` zScIikEm5b-%Vl@cJYZCbuDZLirX8>(v@9gRoz|Vg9oAIcb+kl~fAhVYN7FqI0|ErwOf4fuw{(mM5r|Mvx0q1$S3d`BCKMhI(Y+r^d3L4m+)OCcAyvWa!&{Mz` z+VE#zoSlNCq&#&`50G9M-K_ZuH0hbDZ)%5bqJd-+MfNb~~kX>x8>kuIs4kg`EFtK65+E;>g6j|yV^hl=$IRQ^!6VnkGFVUa7(o2xNFisG*lyE(HxbDO-p6Xm7$HHV54jRm2$PT!@>g#|4x8N$0 zCl1m8NKJT+_DUJtwixx(v72i1vJTgqH?&`uI07C6ZiUFzvl0$#rsz#ppGmvOykeaQTy z;l@7n%e_XONY+O%=xRv>h&|<3Z_=}aHs2la2+3CfNaeYC010Dy*1d8kKzNiKiC5dm zJ6J|>oe$$KYCB|Rw1zmZgDXJTda5VHl7I9miY+n0oT+nS1{p1Ll}1jBBcm~$C{BA& zur7hF@^gHgosQjxhnzLr4qU6K&%=qWlgN>lk{b!o(r^}oQKw6_nN0;VNa<<2nk>ea z)sr0SS(|FNh_3RPGMlOHkE?;2T_>=YG!8Z5T;!fs^TgIxwFm(6(vCi4&X|u?W<(7W*l2n>tDsUZisl?5!J@lB(BSoS^~%w_ zZVvH|a{GnYg6Z_cyV_nrwl)!5Vow3%MWhPV*;hBv1eAUGU0~1%JmN@l6 zjSAe=20>3dK@7CL29tuK7$!J)+kgL)uXAfs7HzNaUrFUCDN-eSG^rf06J;y1=ZuXA z3Me8V{Q9h(94C{i$$T^K^z_@^;`7}1TG#bs0plHJtrg6>g0Ly*hNoEz1AE|kTl?HZ}+ZDuZcx%raHJ=*AO8EB)e4LFF3C%vqU4LWz^nMq( zwQ*XqsyK1ZV=^CXOIIj2JX%&OO!QZrtZ#mWlF$kk7{Zb%pius(mz?rP&9xfsc#%{& z--ai+hUyaa6+^CHw6?ZsomoCwQ*psNXi%6>5s%?tB_qs4ugR_Eq!zdXgYgT6qbq z{X72D#M7X*z!k$?PiN1gm?!(i@y zO#+WCj$BhJQ}x?n$v1%t*8vGt5&VP~Dah60<)XXYM1c9LJjYW#ya1ra^%*T%dzq34 zxR7WU8x*?0CZn81aHdM@N2KxIvzqrWM{MtSrkIa)rH@hgoWCIWwo2sjs15PgHH$A(wBWv8Dh{oq(xcO-1@M|KnF%fa5;b?IYEKlZnu6lWsNlY+A4wj0Xkn196PYy2F5^illXlj|tl zfHLu;|DBg`l5nN!CqDa~bOnwldA^N=k+!9=C=*eQQR7upysZq8SBGj&g|)W#Y@8|= zDO$v6OB&P3L1Vwotg>AD<0FniMNvrF%}RY;0Id&S*#Y6(p^0!Ly~oV4Fyk`h+yw1q zY_JbDm8p1q-hXtd{)pg|g(&1A(JCG2Ih7Ff$BUJEDq=)b^hD`mu?L@OPxjgCBjEF5LYUVIlet*T&Rn_ASLIz9geVi$otDI~s z28&!@oETGHRFHgxOP`EW$sqk6KyVAB^gemqa_029C{~VpO&zk+&0Eu62O?P9_j?I6 z(o$m^%&Ze?7K)$mXzSXUVmt}`vG+eDTJkRSPWC^lt~X8k6>BF>jEfU{MgtjOF{m>) z9(3a- zzQ;}^oY?Q4xti_9l++pJ>^%1ucYX4)8NiMN-u+XZik zt2Q+xLnY~a{50+Cu~E%*KdqUem9E<1{6?kX!JG+94Am2+|pJ$TxdI8fOV_)P!L|fXH zcYo34+~L6y`?ruvLgU9i7Q$^$JU3=sKVtNpkr`|^Idys579Zum8CfYTbEj#2`I`l@9bb+z1H$cyRvp5iRHj>6~8vvCj!-2 zM!Iv&V-S`4<8Tok*KmA+ILOE{*el)+dGojD1ebH(hj#cz=^Bw$k2~&P(GevuPk-&t zKBIU}$=d1nV@mxcdbA(JIj8&UW0d_YUXr>hJD@0seLU%=xKH>D=u-Yi(HXE@&3M^# z?)YnN(pvR+O2{p-CjA~r*R{c=_Uup78>}F{y52V)#16k0rY&G+&H@LZ)|sR{ym^Fg zmh?@Mf`2}ssqcoDzxq~HtW_cTyCl#tv%Iq7bfLNAQB>3+bHUoW^F^g5VG7d5cYhgo=splp z4)QapBo_MxanKUL3hJ5Z^!rX*mni%B#j_O-QOG6bbXAx&|3m2}JcoovbSq*K3u?vS z@b!o>?uNLkfi@ZAEXY1?yJM}AH}A{00PrQcRx6)UN7?zlk6|)D4P%-wG+NBlxkbrI z;4wULcBeOhGSi*7WfqRK3V-1H>M<8l_PFFEonnMA9FFuW3}sH2m(-~DwQ-zmHe1E- z4&;=yFe-Ms=>f6nWpwJC1+?p2MEsX-lij)A)}y;-`{z5j++-S^5&q-WU*KDW{58|> zoqb?K)vj7+_Cx$9B%L`qsRg#dA?~Cxc?Jiv?lGrl5*CiBqfS( zeBV)gMdM4t2<$^uc3tG4ljj?wGs&1Dkx%1JE74`A{q&GOvBGxmGAmZzH&ELak>6dA z^GGhXd_h6pL$IWG<{g7gLAzSoCAcyb}{D9raCFBk7hx>`$ zwMhS^U;TkLoohWwb1*ja;@7h#fc+p8H$@kS?gLChb|BY%i+_6vCK&F|Lk=+$k{K2Y z!{70isRU22&}4;!y%Js&F<%cn*Xxacijh>|@GOmwZ*-2F^_?AnyHNk@p;N<8=q27i9E1Rm)s^S`0Ms27^ou_snU!@VDE^)xdgKmXxZw)d1+7Hsb}~ zbk6OT=KT-{M1PPXqG;$R2UbpQa`65Pj}IyVw2-i+kFXzC2+@(w)*x1Q;)7+MYm#uc6i407402fX1kX6x$kMB?$GRjHs0*&C-PV? z4ni9lM8tdcfgt3tIq9?jB7MuV;1@@`X)=%dTT50Z2CIQ@e6JOxHL>k1{t&$ne!=85 z8RQYLejQD%uDx! zjDG}X^fu_u*lY9)e7<~8zHk1x%}U}Ra3;L393Ms3({Y#qtF0uEG&B2_mzwvhllYwC zDo3PVSu=;|eKovAwl;Hpcq)^Z?rp~}5S)NCzI(SC5cY*Obk$`; z5rObQw*PQyD)i0OuAkKXXq>@sLK*#qIWFc@JL% z#n!&#kl?Q1pqNzS-Sru_QI3LD#k~A^qI=k_nlM&p&?@wOv+jSb8>L=DT5w z7bT)+kj>r)FtpXS&3?LB6(Ch2wSRdgt3JwA>L=gy0)#u~+)3n9nj4HN;t#Fi%7{t? z$%SXS`yK5v9@^XSB?ias&|7Qy8ORgoZ!vxcq#Iprpd)-l`YmZT^jZGw)do==?H#91 zca-|9b)GszFs|&(X8!YiCw*Qj%f{7V;l4)*lBWAv@%$tAfMrJ!8otYgEPwE)5?7a3 zboZFUx4(`7{cKT`w9Q~^gs#}V*-9OvAXm4<$u&D(s^5QaaFO~ziNzH@ew@ws)1N7y&8!Lul@%x(|R$vaIQ zWosQPf2F>{Hm~-<&4mUM+JEhrrrVuuD?0&2((Q(WKwYjUQuO2L@gh_=wNPi0_h#=# zd*yL;?0SqunJ}v+D*&|~aZ4C;Gk5mmXJ?C@kTu+q*uto=&(_xZ`_Dy=3PU|uV}_IY zk6Du=eJvb3dM7`EdEDs^uU`KSpF^H8`ewp**P^(w5==PyMQ(SX?IO^rNmxyN1 z2Y`7Qe2$IJQnVC8Uw_@F4Js~|FbUL>P?M+6t-igfO<{i4i4aM6fCxZn`#Rf!70|x+ zUO{9}R5)wO^XnMR;~V@})Gm$(wq)PWmtdZLDp6Lm@U_mF4eJ5UDEyITc)}cD9bIkv(0R4Yfs^8q2}$D#j5Nzghw=}knqlDsh5m<I=cBZljYDSz|Jp1R}hiYjPL%4+#HaUKMfKz4+!mh)oy3 zk0k|RCJF(W;deBwzL?y~@L6qu#QwlqI-i`+@LEHcntHQfdc`?KbR$MVvXn33K=IS@ zasT1VRcX_O`=R5>URd#QJLTIZwe;ceLqh;RqrbcdF@I)V^ZsZJhrCf;?eQ5gfBd@r z^)U|nRE;|FH6ck<0d!xv0r;>Gx$KwPHR9@K*o_1~z*3Dn_>!!*Tzg9){g4S(ciJY) zg&y1|0LrE9uw$fxwT_maHI}3>a`&Ktw!M{;T~3cWFM67%ea6YL&nEcJdnc&%-Ae47 zv>r}$*?){rm`8n+aM)cnAKniUW}HMbaujAg`|axQ8Z)zFy_2ef6)|A7ui;+%%z5FQ zlqZUN?-|gK4z`YVBH<+jD5Kl)QMMSTG#}dC=fg6n%p#&%kIC^79opl!n}u*}Tr)(Z zmhadebGWBT{fayLxIVJiTV;Rz0X%B@-8v$3r5LfZ2tHj{5`=&hACOR0}bfaI1CLseMlQT zJAawJa%FfYcQ*w$@phHgRa<}Pi|;_)MjMLWWf7(qe%E!|4UU z)z%L|wzA@$I9YL@YJS;mZrj zDUexNuxUmS4rmITK{)PDy{*&Uvy5{UEq{>a`dUzYG5BOpw!`D($VB9PWci{w7Q~gT z`^}gpe3|ATRLMzhXB0A?*I5}Sf3SsqCB1J{VH|fLR1_G3!;aktyQub2L?Kemi^MloOp*uk{K&t%lX|@e)Y*T<9EH{H@%Z=g^J5x3KaPaO zsO8uKKomr zNkrN6_jF0rKOOFAz6ul9)ezwrWxQR_+C%gmgGc`x62G6KONx+k?@H#tlfa?H8piyELyrFR9oRCf9UN?`jxD zU$^EuFo6VG1@a^{^FE%6+o?HvhgWRzS!xu&$~hj0B;()mbmx%O)O@6vN)bC!Wgi;fO1>FU&A(*AC3->LixAx`luir;Im-&e2yCNmCeUX+ zcrgj;S?R$#mGzV*-Qn^%)1-7{MW5X^Fbf4G9&qR}6l?SC4)fU+HPXL#-n zucCf{>O1(2AWppkDt!+=&_Zun5;Vg-ub-M##r7n zPw`xN*R<`rLPmrMZ2yb;?K)gEN-fLjxD~q}>}=8p;dMf*ZhpD;OFGpF&5uVU~8aX8GFXlF{sB zZ`k2pJ*BjdqaJdTcH!tla@&~~XiSI)W<5W1j}DfeoLMn^L_6jCxUi3^>@6Y3^)z}Q z7Jwg10js6#D3dcoAZ0?b;%~oOX6Nj8?g6q{w2)&Kcr`0Al7E&etKB_+U)lp)i{gl9 zH;RC?-Kf)V%2($3xqxSj^VU0g6IR1@v4ZK9a33&suK21O(SOFq2`(EA z!l%6DfLlj?zJIckqE9y6^vU+yCATLvopG_)>B}X5FqS;V=3E#u%S@M2`H{jajPZ!u< zwM#d8SR(9p^6L|rc8@(y7uy}G(OqjY$7a^1ost=u4F?Z-OtC^8){u$mW9aajbplUb zI*=-`hvbuBA_f4pvE`1F|L)5ire*aIWMwYB-`&-D-d}TRv~+QLH_w^F+K=1yh^7*p zZrtPRuYWs0TV{yONfk+l@3ag1lwL-(IuRf}#gKE^S4K_wc~rk-uP<|QeN_=2*T=|U zsV|#q@Kx!|^BJ|SXpm@1BrwM9c18<6eIZmb>F)&ash+xft=O-8r1=G4X1~0jV7m3} zVw&#BLDXcq#1~F1r)e7a)03~)Bm|!(-MUG>b$=D%w09saf{vFR|K+zF?0@rbA;d}9 z^ET)opJ`^%tWkECeLB42XOn0TT1te5-DGvp28v28eDinbGy5%9`UklhlYVx1Nr45@ z{bB|v18(=Zn_F0KDb&1Ee=f__ATMYbPtbLNZJW03se7JQ`xEe&RiumCR2{#UBv7g7 zO@GIRL;(-bNcc53KEyc5fxwPDxoK}r8OmO}Q{Tjft&L~2e|gf zVOal-{l)W9a8G(zP93kN=U~7K>wzTL;gRQE2|oAlMy=t``VHiI8}DxwqF?Wpm_MnS z^$|=8qn}BGk8h_FLcc;sVz8}a0uXJDReuqlldt?+8!zLKl+|W5!DA4}&s9z_AHVUY zG4&2PW!$@mbM(%?eWrxSd_JJ6t?a6c#ZC?uvmtZx`A{VlKM2!xegHTywbOngVoI1| z#0VO@+RJ?MC2u`l7xmHmnpl+b5a-IhQxyV~Mt$r9*NVp)Hi;EY`PHqvWsvc>5r3pV zJI$fJW3o9M*$wZ{ebi}^(0=|nVErJhJNEJ+h;P1)x96SB9waZFC?70Z{!0txT=};8 zwYIZo7O6wlo%OjD%*Bg~xBt0F7#qJpSWj=QSBumeas?F`S%ljY%pQtwT1ujm!X?Vh zgbL5Hnb~Zy(ukzoa)lIJd~06@j(^FD6#wkny};GZ+2{3)C~tmQQrn*crPhY`WB(N% z?1RQ~pp&?wfvzGt@paqc*`!cI6Wx z8J#qh*W`S6+x|%X7{VqMagq|)trKv9+0Su6ccNl0X-UTp&vmISzq0g+JAaOc^b>Vu z=tuSi^mtJG^S~2)yzbGxT+>UUi^qd=rC54-mkw#V$#ERTIyKu?f^6WBUxYxbAC&2b zr5q`4s5fmV^Q?+ybg(Rkt-z_*Kt5U zX?AP$+DAmH^>sDNWB0KMf>M%~Pj-Pppow{l$17`KIJyr7ZbX9oYI4!zl(b&BR%uvY z?@Z(+ds|6`hzMr%v-qli$OkEcKsyq~E608p<%?_qdzevcgmFf`Sbu4qyT(I-eHxi| z^p9>2CHo99;Clx>#$;%=f44h#T+XK-x=ok@wC^vdA-_o^Tgvzi3Dw7x{3eG*#e-TF5+SPnI@hX&lkCGzGR%gvvXlC=NYc;=GE3Sm<)UN5Hn1}Na% zIy{rYwQ5$Fa-91DV1FB?^`{lSBmeseo~+@&yCr;VL`tOQJ_VLb#|c=isawTl3NrP? zuA^C?>#o26rKdc{I}^BF{PtTIRx-|HvPzdFzQu$w9eJ^AjEJ<$0g7rEmd^8BUL{=7GSw4)5hqzXj?V+R?{BvZ>$Kt_U&!uY@oH+XMwX`F!#deOt&Ryi^N1he8J5-(2x&(-`;N=kYn2 z*NO{)?Yr%e7?PZQ++8TF!1lhVZ2TZ%E+!lW=YXCI%6~EU^h+|VGMG~^+bBKnukcM) z@=;YwwVanayMf$Dp8AK%;5=GyhjCBvPp>*mJ520IbY4)RKqRiry59Ce!z&bZQzR$l z-GhGKg8b?%3BFE`TC!g*G*n^wzH6F?c|0Om#1AZDnII2mle(-L>Avl+j@p~b;;`}j z;>6=rV1KKj!6RW`kX;(HKwSSU-SgqR4Lbq=&ukKSbK8|q_kEIYLBgAV03Jq=&;7U1 zhcmg=Ve(d9wLn~-#mPX7)Ej;KvKx!-BxEmfX`ahLo|!>72;azt%d>C)s4gh4DElRy zo274W6Kd`$_b#5v3{%PY*IgiXPhe~SMmkinG=Fp3ix-N5dQbw$pKwAmj4OeIB6kMz z4W#Vo`(l>tK?%7}_xoI%uge?{9q5tA>GwlG$Bx07B2(txGX2YCDxaLZYU02@ARWC% zZA;&V`A7-@?<-ujSF{hemwQm4C|h2O_)cGswBeFKBiKa5L7Yc$@RU(sKS#U$Q`Clk zh<_cJNmR-cy^kL^@F|^)*1mjaw5AZBnJPRyIvKgM1{lsZ94Lw4e`mJwn$P%;se-7D?2 zZ2@ZJzyok%u?J9On*q=lTghq4j`!%cm3&Q&AN7r^0c_RIt!+6#!JxZ02r8^5hQNVR zU?cc{-Z}+TKo4uN<+!H9>6=5QVD1t7GoSxnl&53cmeC<~1z7Ivp2pcw_V)~L+<)A| z;e)E>Hb!cb)7FaI9C>6S4$SY2#6SraYn5bM0}R2fpW<{BC5o>i{Q)G8}#_ zYf1}}{U?)vTm{D+dXEvnLg)8mW$#u5n|e1`UW&)sPyMyA?OXGBKc&&VUCG*2KhJyO zzx9w5P6OlL?jgs)!1^I0tUR~XxPLpf>lskGCInB;ex<4y!~NbTYzw)U7Pp^%#|egj zEN;rU{bim*-h;MDNzP%PXf9dgms4PNZ8pS2pk?R&?_n5*`Ko?0aFMTg<$9*;(Y8nv zydqyq=r3sDtjh40UZvA99zP|*e0#9p{?JOWXufzkKr>N`B^pPWWEPFd1Al4P*lkd^ zRSie(D!5JD#>ZsfnKL%Ke@SXR8YkmYA?46rgWsDGup_$rz+I=UV11uK2Yi8%e64I3 zibo=)$hXh4G_pc=;Cx-fj)V^RwD5)`OePx!2%sIjBD9;*t3s;EeBz|W39CE*fN~84 z-u_3SAsnOePt4){Icfz=Fn?xkMB4k&Z_5eiIzcv9_3fRLePvCMi=%3qoSpKmeoO6m z+MdNwt;Y6z%f2_|;->`{aG?viq+b>*^LskSrqupSM5Z(=I{?$gpa;q<)cnic07>ja z8;cqM@fMv{o8b25FIip&>x=>R)u#|B4vA0icmWOn&WVKOe@jG>>^giGJ{ZoE1y20!XWTrCeVM0A z9yn4c%kCT$R5C^7UAQstwkerW=~@JRdRZ`WnEq9PtED5}kAK!tvo)h}KUj_wkdMmhLy66UyLPIXc2i_5q%AC&&%$7C-Uk+g85ne()s?VHk4*7pG*Fyw1`+ zx@>Iyj?wZvPRED#j}|*8G+|;c6NYs=Wo$ zy*hN)Tls#y?|q`G-3<8xyb_;BeR-AH9Gm+6^F%8*0NT%b+-? zID%_g(wqyN(&W7@F_cO$?fpHeuNA9m3ROv zX9}(uT$*B}j6sxMFhd7y}M zrPn)=DP258GWrLidX4~Cdy34=Z1;F@xkk6oHU_2nOC%02@GQ=zfuGz0Q;h1BE5<1L zLU65>_AT=yRcH))mUl@D#Qw%wUsm6~Gpl`$HGc-0_A*@T(^uE(`5RWC9E!xyc@s?OtE^kXf_)0`c!^ zsDC?GtY;oQx%-*c`h&_TV5!0iA|2DQ zljwtdR&C$UK;MIX?eAAp&f3l3{Ut+<3INTwwFvzB@ysL65g;nMPI-E<&~DE(qcs!TB2U-G7wGN6y6BUD@6t1+`Y31c0~<9MdcFjyCJWo z)*5S17g}>?VF9Q;ybMk2hw9PZhJa}awMb4Bb5}L2>bByfV;rrldv`n^$A4}9)W@q8 ztGegmLpvAAtdAx z*^4|aT`zMU?wJ{FMZ4!GPq_s7Nj^OqvEreG0#W8`h}(i=|F-4G~PXd~Rlj zp_KIMH*dG>m)i-#$kjVhd=2XA(VCAjBHPY=_UeT@SdR0}omW~Mv5NG56Mg{Y&IF)B zt8*|EUGe&UF1f}P&VN&!-t0pcjSl7d`*8&x_6O(Ts`|=rI=XQc5;}b^PhS&~R#5Hi zM>0vPBG<}!dPDpNF97h%=OIp6y)Bv?Gy)FNIPY(=Q1~wm^v+*|sPcrdlSbrcO0n)3(-N%3PJmByw z{s0wx!(R3_)O|L6ZRY&iea@k*6q#l}f>HIPXN|pfRn7yQtrtmC#(Xy17;Jh@;^?5)oY> zMg|no9@Q3l5r1|MN6VIzdqx@D)A25F;e`fIU!>XIgd*GnCrFdql_QSVtO@K+UH<#u z|D^(?DEt1~zb>z;qW|^Z|6}^J{{{kE_Mg?O|L*?&?tkUkc;ngsD9M9%y^E6g_s7{B zMUnmIF9{uga{u|u|Kkq{%f$VP|4;t&%Z}&2zjwdBFn|Aw>~9j|f1bPmf0mUy{!ID8 z&s?_zeyP7-S;6s!KW=}2vcy}1u@j2v4?Y!mWuo{uyle zC-KB%i+m-{8tDDv-<0(2RF*8V;fh4_A+=S?4T+QtbB&F#t#CeDIB>rxNe-j_???bBvb{UnGF1Trt_;|a&->JC$ z{j1OV*NV6INm|o?zJAm{pWox}9~otPHTdFY4}V4EhIjgq@zlJp)bF(Fs#7@UT=lf--bzc zGk?aKj*E5&cJ#W3ES4Yy*@208Y^kX36DHWtNi{e;XYU~BdH$?oWcnNqhK%y6y*Pf7 zGX2{AfQzTEPjjYD(ge3=>2{tQ`bu>F$Q4ob9>oi zadm$Gc8F<5_@^$r@z;({Vgb8x^qyp*!he;2(#YQ}L^aHY5Nx(FbJ^aa=^W9}^Ti53 znMJcQ^lG`v9L4h8V?6{$p0y}@ihf-RMlCZ%pl%$}QCuFG_FjUYK&o@=!t5<_9vcE2y0B1rq!orq9D`fPb#7 zz#_!B5G%cB$Tz?+pBOKx=?b(t1M;L*i{6*IZ2?cv+5&aww=SD+*^Xg0QN<7zpEiF& z-$I|?MU^9;5PJK!PsJ$xiKQeTKWiQ7ujSqvDcwykf*2j;?aw~7b869q?)7Vbt!JZD z;a<0#1`Q@QA+3angJsf8GzEXaDu0?^$|uZecO45Y$<>;{D<4AmWoZ`a@Er9!IjS!0 zH!6TmeVo6_)Vo{L5d2m3u_oF?{}M-(5pa z?Gev~O zdqM^O3xe=f2H<3|eq!>`{eQ<7R6M{os`%<=g19joaPx36-GbKJisFp4<)qEqIFYeF zH&Y+qfM?eC!rT10UfE8mD_PDf^K_=%vZH4|XnM0xA4TW*uIYF%diNi+H)P5#t<+z$eAq5e%D$ z*VXdGN-`iS*Drw2yWj&>ji+~!%k(#I|Jhv6icHdzLsis|thF3$>na##35n4)xLj~~ z77;RhU6x^Y``r$J!hh~`Q!>Qi0Q!8lvk@uh{&s3;+DDRiVCA!PySoTAG%#|H^rJb~ zn4WBsst);=drludKsXs&CL5)=u(NFruTD5iVVmi+%%z3X=fx#uoPHiMlC=6uYQ-!- z$-%9yP(|x-Vet@VkmM;&U`yQF;~>o2#(DPx7Ut3cqK5}_-G2w!Vxz!Q`GAJr0^CZ4 znaLk?Z51mb8+d@2`24|Tcj4K+eVPa{$%8-)D!zuj@P6N{0@zcNx?R7mH@04xrF-L8 zp4JV;(kMA7kH9{@k9?)zID&;#p$;9IG^>qQmLGm=o%K~@ORr#gUU%k+8$$0bI>-35 zqI)A=t|1*60Drl0M#QT6u`+i`9X;+_Ouv#{@nJkXRWY`F{z6}#sNk6blXaf&s`&Ec z@+()xtzjl)sBw?rcF=l2oE>1e;kS7lC5rXN<(g?be|7`q5~Z|Bca$T;dG^CQ>;!vW z&M^(aw6gp4s(r7GM4llou=ABW?qTYuz3+~XPH(UIsDI9N^jm8AyqxSvN`9I2xSYZo ztWfB*vt5(T9ju7fo^avdRA~V@pq#T-}Q|1 znfBGctbdurGZie#X|*o?(boGXkw9$Ujcf3haW5yN>w9n907F2$zutcErEsx_rm;?l z%RA3@eoRsc>;5JNcUD1P&Ftd@ALCkC;i zZ>K)?0g-z&S8?YvZ@Eh~eVBin_j|*u^h2d^GcN&(JfDBG!#kr8>5^oq5fDTE@Dav{ z!g~yWmIL0LZzv5Oi0~Qnr_>4M6{R`Oew*gw2Ocsq7mrRNfXOjITeZ#m-nXDnH-@;y zT&A0|Eqp&)Q@Y z9?H_wCO3a4s>4FR$b5*4UdZSivu!tKyTsnEWA@o4IiSF0u~KS9)Uk6;w|vF+cHVbSNc)br1}Fze`&>tGr3FP!&pDv;A|c|^oO6n#4o1lYJFLof z$#}ezU3A6k^-@r}-mV92;pWszQ%oz~N27#9KtO+^o8IMw$FbSls_A7{sJi&=N zL_=Yzd{yQ#?Y_`JS(*3Ce@MGwFsnyk{`}n7PFHO_Kr)hT>1kf6L13P#kAaC*ws_Oi zun>Q0I1Yw09Y`=5k;Leh@(1uT(7)<6?RHuqH$nb^DrUgs2%djUv0>T8a#z{QNuV#| zYkfTF)*|jUMR7t$2Zn^NM@psb*mgf$od)9&Xx}T{eKePSrtu3%@>_RoN5uqsjJffj z(_cX7iUb9=Xt&tTsYe`$BdG+O5yPw-(hq?qm3yU@C(v|`aK(DMLIxN^i9OcS z(~qt=?{I6nd&{q2Z|>u_88Vk%fa$dGL<`m(65^wvpMO3xQ_kUlG|2Uc@oRtgQ(}KP zyHEYL8&tQKZ%=aJ14yN4v;A?)kP-*#Nj?}M>>>8uk&o&aH-DkzYZbQ~R6xKEAdD`y zEDTfEpG+OIhYP_*eMAU9YU3}|wPDftNQTl-E}Y5(H?LRIjvnCM^U5_xN~Pq?h(Rji zstcIE9lH8$vs($f}aFseAnXzu=*M`-__)342tj ziK2IaZ$Abu=a*>eM=n0{B@pB>RE-3PN(q#EjgDrx0pu+XKRih0ej-3TJ^**=#U|gn z7|ETmM*G2k^xBTm^;s&wB7XfdGqJ&BzF>@H;Q>_VjmPh?gIC=z-MTc%&$u z7xnFNw7=;;7a=fsca-88#}EJM>g^8brB~&|pJOk4ors4o5C{WBGIEb_u)&DWoR=XE z_g=Q^`ttKX=^vcl_Z!<(drW^ir&@9TYeWRI|U#t&4G6c<-edRgqRXfg*4OFZQj!)q|MwBa!iI~0%%$`SZM z=$l9nj_P{e;SX!k5DGI;M3w(gWzX^Ytvz z6_VWZeBD&g|`#J{n zOwW=+K!*46#DlkhflSn)>$6AgC2ZX7)n~=6gY*JE7ay#Q3qkWdPn3!>O$Xn7$smsH zhm+ycpFtkkkf*?~&TfCPx?=y=a*}az-2vH173t}dSN z!z_(IF7s5k9YPGA`2r88C0`D(u&CU%nsOqxo=_*Spd~-jqP=4L&Cx;m1oIuMtn&yz z!-9k5WsUi#e0hM;!FBocdN5Df6?Tby0-q4!>i|ehOQ7~EiR6DCPm5aJtob>RT?%Ad<<+eALF}4eb z!SidM{#f6Rnfrk80KO8@$NN35msKEf$m0_jSa#)dpncYQUFKE*e%*S>qY0iq=U=h8 zi8^4GVU*1xm5qNG4_U4#JcwEX;lBl;{a{S<}a2Yr!G_@ZQDXH!1z3jlNf zpzrj31?k<-K?)rZzZwDV!7osSWow!^`oahson3&@U+;JSeeQMuf%L%&bg+ne!E*St zf4iA^xOt;-1CCM<);HS7``{163Cfo!@m-|I>|M4>@lbzr!~mHv3a37ZVPg?973iyE zfd^^I$&Oa&0`L2LG!B;sd*vZ%Lgx%k&nh$>WgJ#=X4{z*^>3t)bCHVZ7tyavm|700 zJbrq?SsUVHBXt~l?fZcd0#z~ai0i;H=?R9^(_;=<*bbCibq;Hu&l8Gfi6jmA(v*bP z9<+|zYnp#L&c%Ms19}kEVrkaZ*WU=-yn=oOZ2~nC?&lorn7z-RIC^}1pLd2hhW!4R z;>lUz<>2ziZnvA|9Rqv-*mWnnhg!^I(3j-Q4~JoAze`^S@-mU|m&x4F`S8+43;M+6Rwa?67*#e|DqK?p^z}+OhB+kGzCb+yw{#$D0*Z zCB*eUi!!F;!2WEHeL9j=M<38q*8v5ro&h0eswKfOgs8d+Sf{%lw<9GE5$?c7JhM zfVW@xj<=>w(Efb*D6}!r^Rd)-hxe;Lefs{;j+1aNXgjB)kX>KR4?zU~PM(tIzWslM zGOxyT`d8yv&N@4GbT&@aX9qj^F7SV1>fS?ljh5#SAN{?rI@eaN5KvF5Lj=AQDDlyj zkvju#YWjT;>6+i*bwfYGIY}=zVdn4=?zxT}D7I zadxBck#bn%YDMWFdK!mp7*TbGW0!w!5{5}FIzmUp2rn`st+N>OsxTrVMlI5=xW9Nl zMX|G4`8oIpRU&K2Se@~94(84t=cd@Kw+CPZx>qs&BcRPg-`dJ^!9!h}&57T=kY69a zql!@*b;OaB;Cfm&@PNmu zsOA+b{D%R!9G#*3O+Xsror;(BzVmLa+_E+_k-UlEo&xe@H$C%v!f1e_Q7F1<##3|?exR*FE;~AP%vV1H~ z>YRnEHmq!dMMb6o!zC^f7@MBn-sItSaBc)Zi=k%h{A2bbR-c9{lKDixt&VY=YH#Rp zOErVS*r2^0?*XF;oj9MLq=9$Ek7fsfOXjqzXwllc&p3Yr+K9P%nRmCHo;zD_}U=T(v zcFzPFp7#XzdpxMbYf*=LmZ3ZPJD%kZ2vFhR zF5k!GPZsQ9*F_^|q)#@*yQmT#HZ}?{YM})kOpq9 zjwRUZd8{p^oK8U6T&Z;lPZj?CzOb6BYrT#uugdMhVY;F=yFYcnZlZ;IDP9HtZiYMd zgx-J2T|K^>)#=QxGyDDUcps1bHa4{|?%6ss#QQ!`MNjo~&4@)KM}e`NUq?9sO`*Wh ze-^h(jJHlkXD-@t?+oyJEv5KUC)7O3cn1SecptV3yV{KS5MrwtT_^n4B42-Lj`5H-A-R(v3 zt%${9bdzt#S&caT@e2;$sCItU?k6H_5`K?<5`v$E;qnhCo;DY!0ZO880;>|+;2NPF z%bc*$b9WyK4;!f+^QbM?A&fk0+`iV@Kc{!w(*5p8 zxL~1-V)@mgdVR`HOdI=75$Xd=%=>?xC%EzMMjakf#;T$LeSq02D$@Kd9YT)w09`dl zVK@Gqz9?YJhX*ybAMD++`HWuKV(h~hlBn!)ujJ{!v; zgB%(8&XkZYd9^zT6S(XAlpBA9VZTo%*GnS0HZRa-#1p*(OAZ zVQUH&zj)US{Tw`&`F{8e`q?}_BqC5<_wzZ(m-U#@=)heT>s%~n<<aRi8O5f9rKK3rpinlb5A+Wi`x|UA^QA|C zG4hcxQx`>K)R{98s&8KWhB81QbzoySM|pkPlmB@s(^FvCF=~I3^ZT5Agj6%$WkvK9 zW_y$Ti>4-EVIxp|Q!2Cm-3I&=Ma#aq1%6F*S1t2CTwxZvG-x8IyIchwL8|OXxMfHE>^WH(bPVBvs9ct7DCtWzC z%@w%a^-x4x06dLwY>tn_JW<*&Y*!Sscem$3i~KK=aj zdjgSXAMW?+`nsO3_WkhXh+Bc(m#aiMuMEXb1b&z3cd!cfcyrO1XTV@MmfA6UzoZ$L z81aS4Z4Q5EYPI#d$I&eg?CkSxygtN&6q26`=1W-gWp{xRyy3p*L4_}-jqiIaS-9a% zP;Pz5jL85ix{P-AOKA4boE4+;jOn_|uv`RmVMC80OF) z4*OfeZ;6H?Gm31lgW#n3H2V*eWO{y%Js}N=ZRUR|x^f+Ozpy>aad-yXuik356KvgO|lKDrw2D<{HQ(I%w2o)u1 z3MK{RRM@`nPrs(JNqJNn%ZB|dypBb2Guib>l*>^Ux3b&E%xS(QW1odx zW!`^iVSz=PxFKAMADsD}xo1}bDZz%!Y&ydQM^ws5hrdL9-;QjCs+k?V*L1>8*agWJ zfTB@Ni~2+fVMgvHyR;~A5zh#VPQM~SmihfjPyPI&;jZ`7Lu^vqsjBYhaDaa_+3=JD z=(+3f&+wB+(fW3OR)Lr@ShUE}VaOi8Q9FOiFnzZDhuvM{L|;GQ@9)zbNW$i0RC)GS zsiKjqPHDbZPO%qT320?Y+wJ|m{aE@-o`D4m*8A7AUeP$l3F3pnaPyI_SBM@ZU2?`n z;-W1RO+IaRzW0oVhiNJYpcVD}I|SN`LE9O19vh*BJMd~HWYj=mzpgx7n9hx_xL1D_ zii3F=vJEhiD2?yk@s75i(E?V&`KLq<(#5qtMPM(QZ);W@?uP9?=Y~@CFx;DG_}M-J z&IIh*OuPAZ$jWJE$gZ!N>P=m~=t{qZHlCGZv@VaNf2*U*+UxcbU!fGbU~^S+ z@m$;Pn?>;M9b!p%xXAr|VGSEQDnDQ9Yolgbxby8ZiN5N|`^>i0GUwKt1dD$U-k-Pi z9ZT3zBaXS5p%Q{ozp2qF#8@*?XbrxJB-if%82l6iM#o3~ezx(4RR9p-RLqT6Fvvd& z-~yBqWk>dA9X{_nkp-ssYNgUHct9f|W)HuhWz_CcTX){x{qj~jQ(aqo*y%lSRyvm7 z@>fxKm!OPn(EP{8qk-d_IGKMtNJKpJ`>CHn9azrSFGkaaLAz+~ur+mbKHoUwj-)4w zF~fdhf(QShKlwbPaM8m<_k9&%dkXz@39ty6~?wg8C~PTxWJ3SSZxfcjxqv2e`P%zo%y z6N8yc-4C~+JfQe=2#>nY5MF;PD*rX}O6~Edw^qNFkxjH-_cZ#wK7>6Ce&%fbqER1$ z1!I#vbH~GxDI?;HLy|@X?_?r2v|_j@WIh!9uCD2I=WsO6KAG7lAsvE_*bfnecEOLz#p6|*S*cH`5#@MI@RhcoF? z5YyK^N#UjR0TF$Fhf38>X7gOGulI-OUbljL4#BO*1_-rKy7`G!N%qax`>X1QrAL=# z9D&^zTt5X)Dpm8&9&)i8=_8*FN(G`+8;-#{8Y#DD{RUE%sIPyUOV{)oyp_5d5lFx` z;rA}MvEL#+Li@*&t13`V+qSXS%qZ3jUuHXy%HQR=J4t>UR0Zm->X{6Gj? z?JdXj&Zv&NFJF$13B@+{gP>1a`0k4t1?SYQ#MhFdN)f&%+$dp9yh3sktdTP7jlekF z`vbl6i^J5eI?#W#K0nBTdb083p3-1>bpVqF}S{SLwq_-;L`Fkeqb(~}g3 zaV(3(u&}cfZ^hA6foZ4&?(e2F+7*_1Oj-fs@m0%S@N6lDh-RdCxUDrPYG{p99j%qO zmb>TI>E$#fz^d=+_{P2@i8OznNWnV`ru01EZCprzs?dM0bnPF7B*@Pa1wSqQImx4D zG0P95FoT>_gAU9E`>*Z6pa_=^u4bg5A2s2?zD@w=xK?KM{;#w*-4%% zjU)2X#7G{J8=IDvt0n&?ackk3U)fL%ew!iS$0N9E)4}sms5T>bVAXdz zg$a`_#?t9e>tKuP*@F7dK~|*a9cJmpjLL6sQMP|--BhSgdiYhc`zCb33a-yoT&pWV zY4^p$8W@uG1EFO8SZ9YQJjvu86;4R|%$O&^RhHU!xwfS3oa{?w4%%~lBReZH!scL? zj#_9~+ml>mDl%6?4*LOhZTQ$B-|AD#w!6-eF`FxdNPK-3E&#VaW7yEs54qfT@8Fvv zlw*G!_ghs2=oE)6dr=BrM_wDSv0Al8*=R|2oRxcm-;XlW!r_baA9iLq7~c|juJ>HD zhgGMfq;XgMy)+Vy=C_9;%|5z-nuOUcedUqdG8uz)4u0liiCdp}@x${cJkqM4`fBe_ z4kF+Ux4U-)c7j(6;_?DLK71!qZHrGu4H|#e7au(5?c;k@;rm}Lzs}2?<~zamKJgB8 z^Ts@ljia=ILfAD;-Ax0L%H`Mm^&k6dq!ultAuMsieB@7JvOb4yEt`2bM&v6p{q$9H z&>$fDV`Ta9la2jw5ea1kz#*HMcUX%Mwnf{)$VX@0iPOyjZZsH$k+qjHy_VaF7yC=hAX-XVq`j2_3&$&Ae9Q|HmKil*BTq5^)3lW-x0@=Gfr{wL~#5K zNX=Dd6wP^&nhYA%%;R2oL^sU6mpFedQRGaPO@F!TK28QG^?ZBQtvz2nB9E%Z&ivlL z9i5zA^BG`~{!@?m(H6aM;|@9~@}7!O8#srZ1h7$g29HUG2RqmqcrmHO=X%|D?3+xQ zjtau#qj)g#303rC^;kG#GHJQtA612-wS_Zh?LJBQ$rPvJ`^8zP27;UXh7f=%jAJ8NHYWn2&~X0P?-gpk}bu>#Uk?gc?$=RSA-2V>rQ;V#65!--yyz~R*G zmz}>)`S2|XdNri`c(LJXRjf#I>$HhTaBTq$`UvEHyW0Cn#-!>zHTA3d(%hUKYkgL= z8q%{0#{I##?}NjIoM5i;%;$dr3h_7Ho=2}0PFk>aEqu|XpR8c98qFoGg_t~rPv9-9 zH+-1DQv4NKp0w6|WI+^^}l+febj1w6db^{lcqTUA>#ECsadTwB}=gbJJ`06N-PGqsWo=!W3WF zLJhtabz?OwYT4xEgUBkcXAIHyU#=_ z#Xczd^+a9{V`j=g_?~|NhW~c3_=o`M4Rd=Anq!;Gt%gdtx0d%Oa$_5di~P}WhgCUU z%cp5+FaAU|ueX;)!P|L;nH^fMcU0mfc*muIPG0wp9#Zp;)vBmx8V|w&)$hoNaDB&k zA>a&nS160RzDtmeR2_7QyHG^W5IVrDna~MYJ~J^!_5Ma2)0BU?ao>(eFhf2t4ew)D zlPycq*L8F`+@EH-jr5O(f7ag>EyiFdKnY#zL?76pIamSAKvmum`_&{e_0Jsh+dt0l z6vhT3_kt1iD07g)$FVxEKj%q9M*SlWT5^wAzBP{e`H1xehO+iGji;Alh#@HsKOqOs zs6#~o(2wU%7tnvRBY?EMQs4CFN$hDEM5NTy&Qulu)XuH!k&ut14A403nD$Z4%?? zEWznb-m>j^N#E!6lH#0k4Ne%BzRI(f?RDWIhCtKxpRF2<^N9@)FVa(|gHCOrkQ}In zqXcJ$66NCFf5YTTgP`s{<=?Tb?AbHeb?4VU)kgr)tk1r&*%TCUcWvLnkv9Wqf#+U8 z!F7Lq$f!a9FBE>OD8$!~wruy1JRF72$PX)goNM)+YOb$;Q1M2ZQN zIHuc=;}3<`TUAwj*iev#*7&|3PiSA=2q?l|>3B#pM8%xVbWcPNGRYW*@6p5tFg74s zWcH^H=>!)%*YlITN54wn4Knf5Kjs=u!{dKxq8b4eVdd^->}s$6KA7lUBBcS29@_97 z;Vwv}PZQ%ef`Z5u>SI1r{nU){Q!(}^28mLUoCEYln%G8rONbxI_|Zf6zUS_bYA(;l zw^c9=zwHF&)QM%O6-6s}7ZZzkkNXt&ae0BR@xt=mei%)YCMI!{>W3z;WQs8-L`9 zx0&6o@IWlSNSm9I2Xa@8U~di?g&%+W8!*K=bHJ+g#=(wCK1_(I?INnpsBQ!OM)NzK ze(04jb~S<*<+-J>MG-cGI<{76$j9Hgj9cfmkubMFn@tG}62Uj&1vV~gZ51Di*$vU? zJs5{IF4e?d8Wy^p;Bwm!1w{^E33!hZVhsQho1a`CW37=NdHlnV;Bta#HIIL(GI0bM zELbO)T;(G5@n|!4o?9-jv?*lO=F*Cm+WvFN zZ#2QGG=1DX_a)=1@#}k{r1guKroh#Yjiy|PsE4XyhmOltHo>&5VUHF_6aX^pVM;H# zb9d4-$k(a7`wSW~hk2zz;+%iscELRH-gy7+R>ku^h%#5l@HC&Ee~$!84lD9Lj~CqZ znlH-dqCrTQ@f;_;hQcIE&tEv}Hpw!M;Q%rUB`+26{IoVX(%A1-w=HwPwLM0D3DV!Y z_@el7C4W`5RS_u2*LVu~^;ZeIcDjZ~0R7tH9BJ!)@=C%|A_Sif*86|;DPKS@zU6XB z5TJLzb1i4Xnm1SQe>1Q`RHb63@AkP^2j&!Rz4Q|_vW z&(E(eHyL|4Uq=i3SWI)Z9^qeNtnWn9HoYsLkFK~0kSvvd)l^;_kaE2W-q#oO8*-aA zW|{Tw@+80?L#H+JxIR8z8HpjRD{A}A1-|EN2KVEHgV`^_eUa1My^FLY__2*jiufL3Yghx8!A&s=WZyfz?M%MG{YdgO=`zEFE#+I$J z3lG;2vA$L6)WpoSA*tc+Ad;hT{IUq^V~AMO1q^4`Qyza=#(F-LKV;CB8EnA~o6tGB zQ|!kT(xZ3#wWkY?`TQ1cw=7|T8|-N2{kBAvam4H2>O=EB7HYbw`srThAWV{x)!#qz zDRO|B{#?1yzMQ^*GR-@#RJVs!HYwg3iBG|>D|CY`U_JbX^^Ng>;q-b*X0`2uMrgQ{16Yra|NyCaL%iH z)kLLT9e}$d<4cYuDUf2F+tDx}0ATl4M>JU#2`6^qcX>4B{r9-9=+ic5;=U1f3ZA_ui)OjMp5xNBaAb5hmY?__fPtPbFRUrq|6d>-eG{jI<2^;{Zr zU8C&)g7^na$o^BkKmL9p))#6Q*d~ z?GAsO#R=WK8y*kxoE zlUH{tovhE#4}>7q&R|8bk&b=+7%BQ`YO>)HhYt0bJ%cnF8tb%PsKX6RM0)BvNx$C%j@Wo>GW#~K!!E4Mb#ajtt%0Z6wk#>LA-uG0y)b{n(7Y|?V>E?0RoO+yK+1vS~;y>@mf%$1By&Ez#bMkA`gg4V% z1v`#)X@#)hrI7n?N`W=x)dup!!vVdAC+?hoKD2S|AK#4DzC@>TdR{@c?XCOa${Bx) z++t+&&F=cj9=M|#@Jg#f`JiFyq+havsHA4htbKo1)AmSL;LWu!{;ojDlfkC@`I6@JPY#Q zu)kJH>LcfP1PC%nzcQ@#@BBo=7NUP0-pV;@nbw|QAHqD_p&W(1E>LO^bRW)h6G!`z zW2Rotb9yaNCDf#LYT0DX+b399%jv@3@$PUX&2jRmXaTJPl-{J)0#)qc&6fmaLv=75 zI;%tC)gTdrs)EWa=(B}ef$cvHUneYCe%#~5Wq;@EbE5}_m%FR)Qr7T9X~chzffwkO z?=fgPfv+!d>-%2Vynx7%L@A=kv;RO7oMrPu;*0Bwo>(WCOq7lP!`YuTDa*Fqn&@9y zQCn4EMIh3MYi)tjH`0Ubi3li)^xdytBk%Y7=A2bgTXn~iPxfY}TwKk0j&Y1H#N>z$ zm^4=)qdyHub~iqBdx4>M9$A0!dG$no?lkLhig@SDq?a2ACg^I@;7f8dX7i6o)NEfF z4r3D_zEk&u$U)}bwm8DQ4*CtMCK5c915s>RfYQ*ho}TE!Unco1BMhMp6t4oSJ{7-p= zyEFQtS+ceF-Y+@^r%ZpMang=kKu5mwdsYL}H6yj{uf(^~QPf?ZIq^AFh}`?aEBZE! z6UAv;qDOaOhgjE>f8>7dT>43D{@?&ER;Ak`;_W-Sr`!Z!d&Q7A^iauZ=3{t?>E!@a zxb0dFsMW|nsF37RbVBvkecsGh?y=sU;q4WjOT6; zzmnb^XHQ108ztp>>jrHE|?(={C>q8$dG5I^xS?^FCW@rfC zS-nMLbSVP_HD>&DWOOQ55X`9?-v0LHQ9aYJtJbb2d!d2$|x-&~W|*cM?ZT4Y`S9 z{K~A=q8)$xNMG$$f$sApcn68Z&OL~PjA`us395j`dLj-@I}K%iI8h#x*0Jm~`*kis zjAYSi^aI(#{@Ms&Xy=3Fef_Y{@6V#rEqTWjUtGIq@X&n~1?%KhnB?!aC{258Q<|Jr zQ9)GW=Y3cTAC(jD6=zTQ^*P~1f19$U>FB_AkK=#E3vD)_Xnwry9d_`p{d;G(UF(6} z-!t@8eV&d)H!Gb!{93&=mvggC)VVwdNGr+sDkU<@dc=#LFRww@*I%l>pjrto8**k9 zJ^#>znr95D-bZ*(l6r1-a&tte?Q4(Za!-2yp7E!x^Q8D(1>?qNG5ov_H|DP*uE5

ZhHo@&HcDF@b#g( zw7sa0m-LkiJyq7ucdQZUZ;ZwBFn%&N;XF2P!q+w&FU;v4gyZ~ZPq@pHD|@;Pc#h12 zMlB)C>NqnDFCTVL1bUE}lHsJ*M<1y9QV)OYWxkbguY8!4L?Go%*2ZVL9S$S%uqRO< zR3Tz}?)}`fu6!V|rr?>h&(UtF>jGgNAaHF8yHtQ>doNtO2n>8X$p1%G9O<_SP;Be@KlMX{+3E8rh z7K1Y}H)%Jl(fSe<_E@RRRpRLRdF5xBzt5ZD$oA{L zdVHz#)|X=&x_KQ%j+Q|JS~THILacxHVZ10qqW5T?B6pUSQ3r2X=EM3r+WmUl^#@m= z?ITOv6BfNRQpwpNQ+H7D9s%wiuYShU-|}uC<87hUq8yKCa?V7Z1rBTNfVO#~m;2Im zC-~l4eUeiTcH1Raz4!c9Ed8RuGR`AJSO#PpasF4YneXDwlM{>BQM{Auzx#id>2<{8 z{HFkec%;TXec6ZyHJaH45SH7?&u8Nmn0N0(zz`H(-r#}(v&p!?n@>L^L#K+Sq${ih z@6&t$o%vheJnBK?j2=w^-EL5Xo`AJXYwwd55VnM-LE2ynU8Pxy(1A&2awNx;v9P zb+j~jDHrY8HcUvQ)g3}uTwM-?UT&Lqn1jq;?JZh!oII!#KKK1#?1z7NzQNF7NpTj; zy{$?1nqN9(FVGzr=#7sW&PNX+>D2d=OhJQZ=*9l_tx1zY7Pr5<=RCA%r*1k2`?0b) zY(+E754Dc%NA=EI`tiDS2%h4+Kt#*Ae!8exAviVaKsODy@iXawbHNCCGo9ND<(t)B zauBc*CL&m8^vQ0Dl@EXDO*+apdWmN(Y!Ew?)J9^LPZuZ5Y6`8doGGW(I>fD1JzEpL zlx!anE|PMq3zT5&81xqwDuy4bHpnenynqZ&*CxIGiCCiXPOn0#JSiy-4i>{4%0cdF zCO;e+`Ya!ZSlo-(S1!&!MP#3LSlZ9!1Q;-9TaUe3)`!eB?lFHoc1fgE3*(bA3MJRK zT8hM?SO4kd!Ur}lTJhVsajtNHfab%@#$Y6A9z&sJw|k0_L^ zk%Fq3bm`GkPBDxEKf%pXDbx6Zdg?S|8`9LjkHHxGf;@lP<12+*J^JFzRb7^~V%;JnM_@Qj~NoCg>LI=0|R zd=`otPp4scYVb~6{IB$7%u(p6r+OXJ8aXbi!QPnb)FNsyki_w=4T{b}=Z-o8DhYSa z(~h;!{YZb&-}|Z>4O67b(st{=`r3&WYBK8$SVV9i{7V84Tu3KtSfHitAufcOmm^MQxT3CqS!gASDNY?Du)d+%lOFu`=#ipx1Pe#^jIIcBQdpFIcz0*!g@3YH& zHzm$b&fo9qSPtif1Ta;R_31k@Aq%Jb^_#rt$1tsFZ3(GAj`&v@Okb3p{O~ z)t8@+5EhW+GlFR$1tGptDuQu=Ik57kY&&|@<3KEtcEdb?b;)8$LdhaVjG2o*BVHV^^G$Vcd!8yW%f&7f%(JKX#I(!77E)AVHa z#Oi-m#i*2C|0+9&mNR{zLj0wCo#{~*+O27!T5Se1wS&Ua+ZyQ+*AtCmMd|RP-GW&I zWX>0?2yT-I!$NKUX1t^H#0iPmAwVnlz{NI%4!XBV&<(1mK^I@cOYBD8LXa+eK^ ztN!?myHnFgEuAhQas6J_7%5|nLY0Yhl1hL5vc00qES{JpN)u0cf-M9P-Tricy_#+z zOW_ExYI99Lgs1LeYK+T1)=xj*$q=@5;(gd#jQ0ooVc_XO$*SL8%sk`i8%Fyfr>{>k z7G2XWUV=Dwy}u^DZ6k1St(AfLn@igpD92tG9a8_boZzS1&eVTY zH)9%x#yI_~~8(MKF?f|Q`VBS2sL7N-sSqW*u67)w8K;7nTT5Q~o(qx%X0}`aRL!_vZOU5PS#} z!ts#U;&RU)3-`Q&>I1V2KkmFxR5*Y67i#zDsEsT#*tKwY)LwW|y|Z}tywtKigH^PpaPc$UH_Q!0B*lD-fjFvo+Rj4g2 zLSpc^BEfoBqV&XtY~leX2Bh%3zPFEyZr_=^F2Vk(bD*h^ts8_5nW!;*NSuE@-i;tb zpxCc1c4g&RG|*lEonhmb^f!%o8(#f_Ea`Uw&S|37zWLVUyr}tuFi&~PMu+$9{_G}& ztd92DmVnI5b;WTzGkKVuaq^=TN}KBoZ8m3Tyf1$uq><8gMR`2Pu6k~5q@hn_#__>FA^uhJtNtobOJIwW>>hYs*A>u2ut;DlRg~nS0ri?BH`&| zvL{z=bp(7->S;Atz?dV#Sj5Nq3fIY3!3HPsDgsA z0J+*%c+D40gdgNQ`ZmSgGn%Ut;%w!>+Q=-e_fF7bAGJE zH}CyRDHz0FCx9OaIOHB_=nMry54$uK25v$RoX(jfD_vtC4EoU5~ zLlBnfq=C@ymK{&nAc%jLCzHID93)nHKfXH9Zc3OaKFy!X>PcS3Gc(@N(#xFO#~A5s z-l7RKzNTz%F)}l^vt4zm=LVdwTXhF+-OKZ-K0M2k!)94w(S(2S4NO7%>n}(WOHR;XKUgG6`ua?`>gQ058Jdw2JM5&6X;aPvj)nknj{bMB(J#_sOc-99${;~Kvw9gQ2tx{<&KRma!) z$DJhcM&&1LK?4wznb~?4nWG)Ir{ybr1FRI6Mo;6g7kz&SceT>{nv`;-?`7=${torN zd?55rT85rjbTK})ZL96Lb1uM_*rO0|JE)=g{vNFR+QMDBPn(xw)K{K<*f#xo1U#(_ zC`bO(n9~VC95>v#n*BSjt zhGw)t5l&5|-I;yv$aT@H zRlkGm!QEB}P^jk6q$qjLU)yC`OI&}K-FTB%Y!$9NvY(5oc{ZOXkV{Y1t27N*Uv6pj z+Nq^Yv$pOpCYv^1v+;Q!JxZk4eu`-PhA5_3;U|Be#SfRGFU-#;`fxaZ=zF(Bd^Amh zc-@&_g1b}ero;MjMC9x}j+#Xte~D62;k?V@;Otx;d^!rKqgw_Tl!x4f*M8WJ0>Crr zk+jNlG)K(Iwx$hIl9_%81GdaD#GK+bfNS}VpESmXZqc>pw3m7FVere#49Q*B>RLY{ zOA>!_WUiPDI81exaaRrzhL_W{fF%yDBXdW}T=Z9DtM&8zic{}rq+=D4GWyo>yXHwV zibL-JCwi8B?%7?8&DZzITKo4++mk3B&(2mBAO7@Jg>C@!(<0PQ2c&uXgQ%y8w&}HB zadzPOX6HSv&un0b3J~#(3hW8fE^BZ|SFL|{N|4P#{(7sy^TP(&1bxY3WB$tE_P@HW zcd!Y&CX8R^;74QPrF7TGrH%|^+V;cB!82=aLXp{@zPlMh|<1IhVk)s z7q1DT=0INQYriu9q)DRP>k$hs8hS}&YvIY*(gagmi0BZK6T?(n%=k=N6c@JbVYx%w znZ9?-V<#xrQ9$*lL4^AxeQuuyb+mu$D>_FxC;V3B(d>3|KilW))BYGgg!Y`^Vu(t+ zk)Hdlp&cim?K+YWJ$?F7r;=We({Rlabfg`x+_tb_*z-EA!60#V=AMBRfLZ^p!tZW3 z@((Khy=%Ir@)FJt4BI2@K^6Hxs@BAsGs|e_Kkmx0J4Ee8JlD4N$&2Nz4uyXN*2SBD zGb)4r!Xb*Cto)pco>+mERs^t1S4j#I#(5TpVmr~?NJ;9k~RBNXqMmY zi>Jul5gzTHDUHOz3is>_cHp7hd~g)GH6Eh;*qgb%i+>M4fJFsqfU-ZtZGIjB3T}UU<5>LOgfBv^ z*h)70CdlMGQ$55M>;A@U#p0oTKlAOgJesDiYLRX=T!)NIWUJ}@U1Jh`H+tvuVb9kv zC-AjJ98WqQXYNxzx>gyZ!GjgywHCU(dv$FCTXoyLv9_Yu#`h5p>*a2E5 zp00{lXhyzcE(`AC+jpF{F8~l=9j^DbJ3vqJA)tr0Vc=1c+qM2?STIpR<&~gg$1(FI z()-bm###b4053q$zbA2|Q5W03NS%1S7@u#YF=nrvir6ak9rBP1YM5-P#mapMtxpTiE z>|Q7?+6TG>o_Y^iJY9d56By(9is$2$)5XlQ9{`BGqyGlV><4%y0&Qo{9QmC)ZbY9& zA#j(Gz%xuW+*g_}sdxB)o)kO(-E_v!lahv{ohm~Q!B2>t3fDwr<(DUm$-E&XLv{nQ zxFUR?8epqdzJRhU-F0I^nz4=%`x*@B2|a^2y&K~kvcG<0kssNOly{PV~ti zF2UzFVzcaNWF z1~$W~HV`5MQ%zHUJ_tQH`Oy9Ls@n2JCthB3y5bv=i}AVfwZ0lH%9y+xQ69+Y?dCC3FEo#;$rx=FohMAVVUz(^z! zYyeCv+qFvDHpPN5)E|iHSh?}OTB#S1L}wVtk&@g&T7u6grwS!wo8$Q}oCl zjd%S%_M|m`un(UtRuMJ!JE9=&(w#la-9gHe1RaOC{%MBKGj*M>EtUtNB`hPyC=HM36IyJ7yAh5 zhfGGkO?Ua)(tR55 zyJItd-PuA`G4FqXz*X2L&yi8(UWr;USURj0^wZjD48901;DA0K)j8L)%k*}QhkzoR z4UIcMR44QBjzqX~2+YhV{8n5yl+XV4T8OC25@KE{kIF4qHCMl}mt#5~vDH z@hKk2lFYW^&?olUv~!UFHnXY|h{PgRX?ym6f`Zac?vS`*AVjPU2_+NrNRTiKUYh3uQH%Qe-tx^6<(+`CSvligm)drEtM zzb(W0l_B!XQQe^Ohb*ZIyiV-N{4#T{lFya#Ck~3C#&ict0U_Um452!usE{b9eJq@A zS73AQyG=}s9)UA1o@<;iKP;;UXA5HB`pM9;mM}XG2+?~aYa`L*RcHQ=79ww>6D>Fv zW9FZA-yea})MW;8$@g+7Pef36zP(w0rw=-?>NDkT9>B{~Z#rL@f7><&j+{mP^G*U= zhS1&qjRq&yhe}H1_(6kp;&^Y>x}s#t(`dv=Hyp>k?0c)NqoFa^H>)A>h%22PT9>ay zTqjf77Ad&wJ@YT;-Fph`(-(9E8N{&FA=>HeBw=a=$2Ch{!a{E1qP%nKrWd+@?>!th zdR>>$)pXeIR(AiE`7gVCo#saVPHzBS?A*PIt#axgRPRJ{{iug+d4G(UQl}x{&c&Z4 zq=(N9A0nH#(o;RMSWG5@4do61(nTLg@{pb3LFS@^Q-xRiWaP^;l}rb4awKnd5BFfL zh%{rsAGCi)fFXyzDKMLo;YlZTlF z`=1X&|8C~3-YI$f0!fld_I^|~pvxr`s+4^EQtZ=Ts?WC%1rfW(<(stE+Q4$}Jl-`Z zaol!O9|X9TXcBJU*tLf4^@Ed}wrckPO#?Mv-$=F%ikFh0V zBV{t?q~7ncr1Oe@TQ~Es+il23Q}1Bp`x*R9SC)qUgmACOp8=vNf1V}e6!T}Jvec}*_LlpR;h5vGk$%%ULe$d3F`X&8^MKY+LSG#RkE`=N z{?yI3gwjU6`15HwIuD+#C}Y+W4P@XbSsjm`s0qU;SiRSOWMG zlLK42_ILe#e#?j$Sc^{ka>`}SXsV=);ik6rXUl4r8e{`ug21=$ga_}bk0hkI~%us7YUevdt~@A$LllI$l5(S z>lcLX+I)0>(Wl~y>%64Q*?b_wlwo%lup2I(kOZDplJXzG%@hw%i`k?MqWSl;QSL`FblAezZh)537 z4Nk72;tbmf8+cb!^GZ+N3w76UckkYJPWMd3#v>|?q8xhVE$r(BCg7fr2s;u$e(hClMcG>=zEgo3%igB7ffm4?YZ!GVzBM?O? zfvlaGh%lhF5x5_nIPUJkRej-Vny-~KbpOZxwSpFc&@P+jFf^=FyIO5*Ev|xV>5lz2TPYcaSBVclAug!u3BvOtEy=!Dt_Uw_%%lkhf_F%5v%eLe_D1I?jqDuC(`fs`ss~ zcoR|?AMT0!reW6Oo;5oEQ=v5Z(7if3K0knh?CMdcGH$xQ;93Pdv{*S=p{K*41fTBn z{+M5}F+{}D|M^oPHsxg0Mx8yJ_?8K07Uku{-rCL@v+KXgfY0o4ea>( zbVOyy^2KR^0`staaCrDm%$@DmJ(fJs;*rJxs>vjH0Sjcwln<3s9E_V5XU-jmp#7&1 z3#E#M_!ZZmycRD=D}6og+3r^)_fV&q=gf0I+>_0Tp_S;RvcMtz#?Sk-0GZ)$JRdpe z@$k!P?}gpzbP!oi6>{{_PRIR!-)f4mY|UndqXHhqXwnW-UML)GQ3ZS&i*+QJ2k7~y z1Z@r->C9gBMg@IOr+W(I-GEb%)AO7s()QGlKLLuk<`ogSo4DF^p~B;blh7! zzfgM1ZP%qal|X9VK>DX_1538pVqjkD5vd;|rVRX+-R zCNhDvEt;qV*Su>U5Y1hGk?5~j$ThLCguTI|4vD@$Tcy}_ZTlf&F}qe(0!87%+>Y7> z$k(HfWr|>hj}Y%pXT;s)<8*n|KD?x%YI(}=Ue(&F9JD$CdVE{)SkTtw%Y~Fx%2pp+;UtW9EAH&3J9})wO=UaSkm2C7TTIFUWY7^ zd$h~P-*o?Ez7hg3{#Plxx8oG-a~>nH{s>3mL3hvzTs|->h@8VUu9*ZMFj)ORtrhq& zg!NG;(I@VrMK zn8?~ZQ7&$%Az3L@TCd}Vu%+cKYu_2xw0#J!2Sh7K;&PhhURo;%IAJk$%`2|HL?xX> zB_Dx|gi7>ype}&oA=rG3;$iUJnrsW(YTNF_lXz-tnFbPntR_FOU*Jh`>izf(_8O8z zk7z9**6bi+MFBlbpl+*8(BH>pt8~@)t`kss5sCc6jUwbM*|8&1Ltqnc_S%VEYeYW; zBSFya9)HIw&^~op^ZB~-pZZ^u^doz zj>|ym*9GiWJeinkOKt0#QqS(K{?7B0&dhRd;6$=Git;g1!^P7@9GeU?t=1g zZ(v0lHn6`SCZmqYf3812^<;(9(S27>GuyDD?2hrXppU4mFq~Dq;urU8zqrGoi5L8` zNctFmr^)Z)@ekVQl*d+MS-vi2xz{X*CZ+db3om>0uS}+?rI>wR$uEMGvKab5NR!S` z&k$+yMy`2HHSZ(v-E{x@_+f^;Rq$C5d?GD^Ep_#Gyxn2&4`b=h5&rqP#wy+R+)shB z>laF#nUcBxL7W^AWO_c*E^?fI+uK^E$5vH;C`t`K+M{c6ez=Z4bK3575YBsDnciLE zu^&;~aL>fdisRX7vx}VDKE&2XE^9nmGJZZlgsZpk)P*ka&9Un}8c15>>?$O@_X2EX zaiA&2t!Z%Yb{viR)Ql0cAy@muZ45yU%}VRcRdqh7A&RZOL+tG3r-JO#kjLA*n(p3z zo{lXO#`R;J9nWeeZM^s^;1Tp*w_S64Fa=+K50p;CNF|)gxbl(v_pC_bv7BLF){fw! zM@jw0Wtb5dOZO)Oyj9~UkVT|$T}UNGg#hM`>*^lcpW22*r8SlJMLwp89yfyX3%G>w zHZEMQEn%A|%GH#D@cI@KHX&<`8aft#HE`O!W793UPf*7dIo%N_>=9WqqSNSdEi~m= zot&$@pdujNe&=$uUM?r~5((z==`d$v0|H@#Bd{Th^5uQmV5I6O8+EIKT-D(Vh|cv5 zgTh)B=v!C(4UNiq6WJYgIq$C5$hdL5QGWVbwK#Pw81>U^pSr`g!A0Ul>T2tMa_Rd1 z5JUTOexui?`EqyGrpOA|a1t}%tQr!O!xfpMFJWY*^fJNVxA406|KZudhed5 zI<=*gBYJ224VsxV#a+1tO&v0S)DK4`!n@ZW0`VTan1Pir&Au%siG5fVcmLe)9%Oa`ZT?eB8vLE9FYp*OrX}j7ZDay6;WGJ zNAxlq1>+t4>L@7u!v28+>uz2+xQp!xHe zwT&9~r|ap-zWZ9y5D~ZE-JX#~?p+w@MD+1F?xR-xkkvc9u2QLFe?~{x*b~y+?sNWm zVcC%xPVf6pmGIPNy#cXfNw$u!=FH7x72Ou!r_EsZx7|B^PJJGb=>chf09~|~COD2w zjh*!=a97n8ktgDBQWCy@6A4vC#`M>SFGS)2bO^HCPW3P3zQ0}Ml|?L)?e*uos0MmL zYc~u7W7@3W)G@~pLujlfww?OgFLbvO&0avLeisvbULP3=xp440{2ZnK60lsnP6E>E zMyP*&e*IpnU;q7l=Sf5(v%LdY#8>+qA!!6XItieil$JA{_xMzQntHr%lb?)iKr-=k z&xK?Xz&HPW*N`&LVHPEPCC)Ctn=oi@M`ba;PsW0FWdN~qRdaJe9yTT z7&f~yrcNVu_1nNBB*BjBIgG#T5ggfeA5RM39fq!yFT|v+?Ga%YBZTVfLS;`0(63|} zSS2N%LAZNPNNUu7+hgUW;C!6h6OrH)3EE)t4Vfn*rbFfAb^bCFG)X1eRzL&G<|z13 z#;g3GaA2Jelmy!b!PR*Fav5fpsmE|OH!99v`;*wprn?gPzS-Qy}dvR37Ji)K|_>tb>|R; zgZMyeen@INHX~U7h1IeuWa_4phfVVrE*cwF@5vV82LctsXtc#S!PXtpK76jvMK29?~|Ea3b1f~^6e7<-RvE#cPxbSn>Dv_XHV4KHc(spXc_{I z0Q3@nFQ9CwOReogzX}y%k{-3CR0;`*0B8-pwgaX(3Dsr$TGP2XJJh(p_7PGe1)@?(6(xT@f_R{;Qveh;N9U*9QkTG`G!02i1l ziDFo&^r@wWC>b8TSc89zGEOLr zzqqxPeZzjRPK9#%=xC#*rqo&evHsp3ub_QBOqe*w@b%5m87yaCk3R}s7^8f2X>@W+wkAe4o5SdNYGNZRGs+n8iV37MT9?g1|DR=S(+J?kd&W}&|Ic`@u)o*(t|V7vxt!{ig~ zj(g=&IEP{Ls`mXWU%}4ZInddNW3@BK?#?+Uq8|4*dyW*e-mh5Qk<)T~+vf?Qqb~F3lApzbChu+bD-`m@x4V6P5=4xHlqubrHcPi}g82e91RIUH} z?L)UaAD_aL+f}01)9Xs=$8P%+)c30dWN(ce5hHuQzJvEl&E-9Fz02b}Ac6v(4U*rR z9HU7{!8e7+E+WC91$=s3koXeB@-h2;0t1vzBV`z^bjPuEclSS_JEtJX=_M@ zh!MA!Y%s*8+^JVEjWV$4i2aT;?#H#yCxj=8f6d$dnCdbE z@^U{AZ}i{x7nVT0D4##MCyv0Gvalx4D!$q$%>@3g)5fP$M3K}vR!5dSYMakawdN{? zyriP&7kQk${ja=|rH4ZN2V_~`2apowdPRGT5ydr|JS)!coaWYvkWZ<7MmGBIrEqZQA;?z4Y@qKt}pw&>&xb;*FFOWDrpNmSBT6 z-UpeeGNsqt41i^I~eF2(vY~C%y@UF?{8x{Kb7_ z(pHgS`jF6;&7b{$<<3Xr=(aaHKKCyaa>`+`lPEV*fO}BP1;O~BK6``+8*NUP6L)+( zh-vj8bzsySYBUvzxnRh4HDcD?c-jJ^xbn9ER`-F{4)~5T3&az96GL8t=V$1{Po^ zcC>KkE$M=Po-LyQSLrxuHWYmjpCWAM!O#nKPOhnL8t~tA;IKrP=Z0 zNcL14O19+^V&nF|-!R93!a3RVvWf`Es!z79f;-f-fRuZkUpV`J`7KWZ7=)h{9v|~KcBM!^KellW ztNpriIOQSEWqz-#?L7}*%h$hbe4bjM7kp#;bm~IX+{IB|?`~J_<@UK8vlQF5+lyzr z(-6vD4vA8qU7U}R_GI--fRRcb`!5ZW!PXw&^)w$9s}bU*oS}?x$5v=XX>I4dOb*?j z_mOOWzC?z+q~w|=x2O4}*PW7OFpL6g)IC3;TCC{xs)&ak9ye&wFW8Kp)f1GGZwhPG z15*P?GH^LebC&TV&=Qf7^W0Dak(@8rDA|>Mxgq-w;B{}Cgc^u-Q8Nze=NdNP4?sbg zM{=)=M=;el_x9azA?N1xlD`#9Nd{yb(KRH0`>=Z(wIxX!kzTih(TghPBK4snHpe7==}TNO;BYuf=CBv-np&zW(K&{fdT4+l2yJIvc< zs0o=VUf+;?GAUIZPu_ihMTXpV=-@_%fpsXtO;(ckbhXG60r^lDv(8fl@ic}w%occm zDytr2hRlX*w5-cMt)n~HPB^k@iQo{(%FC{YHQKYTXEZ`20ujbVCFJFk zdu6@!>$W&jW{^5z&3R15p&DEA(4woeL9=OCP*9Io^6kgMDm?AyA%IWyxkntTi~jS| z`w{~&pDmi5HHw%g$CiiN>1kfcb(9l-f~GIKOM#%s^>9*NBs*ZIkmE3R;=(S+;+ZBL z#fH`Br1C_C?Oq@yWWal`hQ5MLCLGh3S&}ajaodmr-kDyFXo!l{{i|acw<3k1bDdN0 z;`Cj*_0z8eK!TJ(j)O4IuwN58)n)cS++Tw3%i^yzhSEbYmERStMEe+H0{J2GY8YQ zVl(mwTO1uZf+A38-|VUST!_G>7EP*an5rmz`iG)*cRxe86JLu2lKONM{atAK2+E^x zVdWDm$rHS39k5xgft56Bp`a1$B+Ykj%do`HN~&JoD(P=K7iH1%ma4>mo#4*XcYuOu zXT}C}r+!*I&>j%77jy0XZok(7ezBi#B5&>klV5+gT3IgG+99+;JCQ2lji40BEN0X-L)709*RDgKOZcY_EXy+cGL)m^GC)nCz{fK=>;p(OaEY`4kFh}m9uV<;HmfpjiQEA86$z9=Kz{K`{(ctQ*@B#_yh znr?&O51trVD=(OTHY;Zc);?#VA?}ZKp!REAMIh?)Qhh-w zKMp8cf8_CSH)%ja)T2^mcHb>is-*sv0`mK}kcqSVz%gonoBF_PSzf2RCM1XZ@sS^6 za>G8*h8Cm>cKFk2igUc*Ej12|;5rv1M%}rxfr#hn9lE((oQS8v^1XJ^h-9pRriVC0 z68;G~2nR_MKOB@waAVxzF!lyFzk!+jo!IDVS=v$64)3jXZ8l}GBS_mGKrTkRCgB26 zY!__zdwHaP4`GjZo7$x+3g!%{D|V1Yun!hM=ilXw0^?+xWpkbri8Aj&e5PmP^sV0D z_v{c}9RsdSz$*z# zOs9K2q#vJ4ZAJq=x7qhVIIe`3lTJ=m^JW4ME1sf%lGveF50a2zk!1i(0|wS1>=4d& zW_t5!UL1CQz_yr+vAz1Z^}&B_t1H!aGAb#R{N0|h8+o6dG9?Xx<7!Vr$O0umcPiXe z-%;XS<*un^ob|5V8;EJX?n)qmhPKH+VG>4?rx;mt(N0fqu!$V_1c)``sRQT4I{J?h zAhuI~kW_j1M!JWR4iYmg7S|=Sa7>OLQS*yo(ukpG+{W=q?ir^kLJ`~Zn4H2YeA}vk^AkMkw6g>@dPwoaIqf&=MBnsI@#8V4KoYP>G3@SJU5+K$Z%P+_*binV$(Kr6yZ!)GbQX|* zY%e7eeWv`cJaMIo`L4#jQ1BLls>r}4EwJQGuXp0Q6MkM?<4??2zcagjdrF(w9-JfmmfKR1}{AD^O40GTsEaC|UB?DCu zS*uzg?;pU>YYMMn;O}6SeQVgULiUpn!7SFWGy%M4BO&S1_}9xr`y35I&joIOzdtdm zIm*2F@xX)ZECQ~sD$*{;_5<&bzX=4o?+$nPgdg;!+OLr|J8RTn4-4-mGINFpyfta$U6Qt(ufzj>jzfU#lvple*CE13i?+5CA z+rV-;9R;LEsoV+s^=oCW*zy|+4w!-gO4teK_+8Xdcjvk1*kBiXtCyjFHNhaeR%H+F zjkuFiA1cCQ2)_3K!3Fyba7JvlQuhXG*(p!HP}sN?I$#tMxFv?}4C^30WQRT&0Pz$m zSjL7obayJ&P9=49ZMz}OeC%9nikBBjXtJbSzxV5L%j~%Y^{vxBpXKO& zEq}LBT?=#+@2=+rae0w{BOpv<;4}hl_ON*LP_+Q;N@b067@vg*IOv0Ax#=j#yIY6z zZoaNXcyTfS&sA37ME$|{avUJo;yJ$~uyW}uS5HgsowJK&UJM)fSo1);uj#HKHftD3 zail%}epVXQ+07zvq3HG7y(Z#8faSR>PCdl58J?A$KHq-odtcIDl6hKw+IUedrGM#{u#*>sHHoDm|xa zOCUzQEs){`WR1+M#)_Zpvj-^dqYEgiz0|k;lzN|OA;G4UySJmL{Z%Xs6taAM!j+Mx zJJPjZ^Ugvh?-2qRS42^vyPf@05xp5&C(r;>r zL0{#c1lrm2(UFLy&FTj&x*)ZKx7BpJAD@UD zx3Rry=M!UR(=J?gZ&;ZOPDISzY7caA^7lt1Kvy~kqH8!1dvG_zU-R6|fV7&mE|8QK zlO*3h`g}A^$lM6qqNZJK;Vk{k*vxxhq|zboDofF%b;;-*59xF^q$9xnsfd8omjYAv zC1NgrWP%)!?WmBvKFQYMOQx} z$#^H%z4{Jw_hI^G_Bl}$EaZv@i3^Ud(NWyGrH4Oyyelr(Ptji0i~4p=;6mj+*8mWF z-FoJ6e*m@w1;bT>^*!&J%nw7Nds7Cr9W^9>;>V|?R8MWjBXoTC{3XDe;q%RRfzR9Rood4tjoSSkc{i7YKIS>HB2GXI zbmnbErlE4xty`f)zYcUC{dRiXpK<|b^zee_nr&NqGTg$xG8;|pcJJ(}N{{dz3ZN2y z+-OoqUMo9(ZoJDJ9>`;Pn{Y>vP}U5O6Zv$JcgQ|OO?aym0x)_`f?CZgk)v$t@Dok-VPf&FRagFYcYKdeHE^hZ>K=-R+dT#4 z$Vu+DbOD;gAn*O2gvKV>_g&x!MG5ll19RM#^Q}KyLMzF_nUK z)*#lNDy>QzD*Id6;o+o5LGo6gUa4+V;M=DUU#U<3rkT4t^{MWovLFZHdOLql3ZqfV zz>pfswBI}-Ik=`}T9~x+nQC*_ytH!4;&A?xvKkveI>u2S9ueer-Se`C>%^CTKB|t~ zKv8LHjNKL~rbB*QvR`WQL9>JiZ6^=rh9I!brl=>>ua z)pnNdQ9Z^!yuHMX44o^#=l}jWe=n#-IK2GteV#x6{ZHo4*57Njpb`G~U;hLZ;g38# z5-Q*EvdDvnu@%=zP%6pfZx5k=a};}q&)A%`&iRe!_GU!g!*4r_Cfv=2u@WH8@Gx$*j6QYZJPRgy!@Ac{!^#<9j@0# z-EozD_IWKXJ|{1+HFze^Yu0Pr{s%y!uDfvdoj(8?fR(rYdL1>U8q{WeHao`~4muhk zBK$svy6^XAQZkZi1aqG;NN4I-;$U8Wb8)%orZX?mMMjjtW3@MZXKoG>5}XmcEK<2d zl~q0t#XC){wWnTwK3x@mo`xc-y+S7(U%Kv!RfyQ;!WMaM+(^QH@;mv>D0cJMAq5VQ zGIZiDyWSBj{vlo}#|=@FlwqLB&QhfkPR*Pr(YQHebNTM=0?1i04!p^|Hj=TAUT&Gm zd61UwHYi!}oV`n;j$#G;ilza`1K`KUNW_Wu)zH%R5N5xv>#yQ}_Wo&c#89{dSJIU} zBIxRpTYls=crY$Ys&?>VJP?47L6dkBX_Ub#`48y|Ryz{_YcPe56uM+=haW4NrAHO+ zq}%sXoxwavmhcxuZI+tk1iPsQY8T7LC?nax8r@`2nlb!t{t0k2wYvS5w{f^`$>meQ z5bYud6~(@6N(kwHm;H@zUi>$D|HihJ1&hW7vZ4@p-$xP}^j*Z;c( z{{8o#8-PC42j6nG)Dd11T-C;AZYG6vUfspp(61!qv-Zbo%pO}XWivURyk*p)$jEiY3cmmhUB>qF2Tw7@KXR%4aQc$s|w)vxH_qj_-i z7Q~T>sMBvikwux;&lMpud(@q!r95A6EK*rGk3L-W=aZXlqt+)OyfNRb*fQ)B!PPcr zD7qJF{n`LSv?r3@r1K-YPd6Ejf*hxLK=fpBI_cg9W_h#%a_fn|Wk1WJ*bfsb?N+y5 z-8y|;bR`&nqDKHI!wW&wa@8Qg=yfJiWnYw4-B(%lH45$HtNepnbatbjnfp^tg2nI- zaIlYI_V;QSQHKzBHGU&N_kEJ^6#kAn=I~q(d(e!(%Snoe49eJbYOrYo+H0 zd87Gr|8}ee4)HxPy08nKWP2J0ZO+VwnC)8`_4gS=M|t0G2cf#j0^$~Tj(ThS6E^k) z_W(tIMtrqbzUFm<72vadTFv_=gJa{rgd&NA;{G-64tGII9UFy&o8^nTV!aoH-%Yk? z!!#D=yfc*s!Se{knssF0{0S*iAkxu5xpV1!=Jyt%Tr3qn{GpPB;)aPIW@iXe`M;kB zI?NU3YS9sF0kxt##U7Y3XAnOF$gYtl!{PXU_i}><|FEPn$MmE^5<+%9s5Y7uPE(xJ zXkRf3dq3}VTqaaJM_bn$_I!?BpYYWaC14PZeVD<-{2{f|=7ZnauNp9aznD%#t+}2L z#bN-sI%wa+-lbVb%zv7+Fa6u?xj)-CT2x<}nCXp(loOrjy_WCwR$bwNir7|kzpa^n zU0YSzefgXA<-xS(6Y^^s*;hncR}Hq2{{qkX>XKbfkl!RY!?OTI^C1K+a+|8Oh7|lS zg`V7=2pF`<;n~75g>cO-xpy4fdGzjOKZ?T*0g^AU2lT|m+<*GEN%57F-AleP>HUn0 zKOOe4Hu!DcEBV zIC`$9s(b;56rWp4I8@hV;7y3?0>cQ6Is_uDD{>7rRQSRA<#f~T^n+XkT#EFv5)(qq zcX6)Q#EBsNBbZDSndC0BGjamlIZgqYV6qqk7Y3>>G`=ia>Bsle^K4oIAkoIGNi|}OvnU5^E)T&wI@ru^C@RS zfJ2&{@=5LP4k)%qO7EdMbUzLo=f^ugI#oR}2(6oo&ET&Om_J*QU((|wIKq_w-tH0b z(Zh7;-e?)s{PFx=F4OL>P_Yq^j-GzQ(-ZDt_2WzTp4OhI#{Td6Eq~zgEeTX8qwN*NhNx&`GxgAeiz< zXFsTju7~LDU+&ti?}-b7N{S5*(ev9pkZ*WL^UD{O5@E6Lg-xV$Zv-*)Nz3njh)OoH zE#jPCsM{_X?CqQO{DB#Ns;IRlMZSg<7#w2#_1(~_*!IXX%V)Qun9x=`Q5Y5hyaom; zXg<9mFYhOHBvMgISb~C2Pp888%ic?nE+wcy2c{gkg&mG5tL8Lopy;{N=@+jaU^*(x zy$Er%Fz{bH(;U}`Bb1Fg)%VzxqL0#hZ=`$ zrQWvy#bLD*6G=xGNpw*AK)}ceRrby5_M- zK1XLpX}^DYJL}elZ&^(5-Tmji9wqbKn5$PlFzp1uOP8nh-7KF-l0(%j_S1NuTPf z)sQi;C8`)RmTuQxbE?k`OicbM{*BYSe(bceI*_qG$Ah;!6I&87(gxgF0dZI^3Y#;b z>npu~$J-;ER@kfbKS#!l_geoadayQ7H*G5+LwULchG2Lh0jdu05A2Z@m0E^=-HENE{ zW5qV)o_=bzUz$Rs8Q6f?Hjj@vv$Vb{<8llaR-0K79!ZPW#nDlsa5QM z$0HRnnvzeUsHJH105wBv_;uxJ`8I}g6K)iDOAiCLX0!VI{4~Xs(cPKz&#w6^YpL6HNbuS(H~Obi3EJ&{r$~FIkU$`j|8py(k<0b?0870zt}8u0u6s(* zS!}OA)bTy?DexM_`Ph9R#Yg>LL6XxCd;{#*5kst?$d*Wd2ViyB-PgETLHlSE8=*7jeB`;dYz45T584fXip5H= z3M6O$gxlsKidMXvqIl81H6Kcjsit1jV87^9uo*?w1}3wG^9^uAwzX%fh!s}@Mg9vo z&rT20*?dE@^xQyjsG}(9HuL3Y9c1D33lCta8oo64TY8_wK{RVrX0L;o)ra=6DbHf* zs?(L$WeX%|T}AhwYhIVMwIU^dA;stIn+ChL_4VO<-#ysJ{tA`EfQH}$sOdG{+h!LB z^dlBcI|Tu2IDz)rkH^~%vWnd2_!!ZHij6L~jdZXN%jwz>9Ca?--=EW7u_;!mm942^ zLq1=QY$2RZ?k__msA7J?TSy=^>%fXT-!2lr^CPCmsWK(PnVGUroc9-hS2G3o?>zdB zHK1J1rf`<%_)+nW`MY=l!gEWwFcQcpdA-@wGWPp-?edskGe`XeMd_Syi|yz{P`vuo z4yJh7LrR3eq?&U#+wn^*hTRH@jD9d`#=E-Cbmqp9dH29Zqg(`Xu0oSdw=aRW|DsXg zZUA2<2R#T$A&G|LgCN|04YwNUPz7KH;feXcP7#_suNd4tGCig zvVg6@lx^-#-pnoB4$F=yUrFa_H=k)mBRmR23E?`o1+3r^fQX|+n)}`FCB34FlxJ=& zEH4Zm2Nq`Mr(U>rA#6!N{sBuWypu#H}3D_aqCv+E!N2<=gfNn z1m}8eZrdb1mwt=sL-aKD%d$7GpRto@4Ri@v z^uIlVSKIyFvdeuTP?W9p75{i%vd?GOpdBrf+yeQIh1Gq)oDk1qj6?WTO%3Om)U773`)>_{Mz;}Y_e(Yhj+_n zsP~i=lt^vzWy*UL4u}S=kGso{xdMGVrX&5VKgXxtF)Oskz2|+-0&S}Ezk(UrE^N+D_T>#NaG z{zb#}HQ&rgj=jTKc<<#+dd>7+AEa`BWiL{(s7`Vwlm4KICnK_j25IBh!z9tlnXt2e z({mzO2#~z{y+~o;&6{U8Krx+nNn9g1txgcDd+&?ea0MIBwTbyTl-C20s=wiCWhkbpFkfzNP($)6-9s zjZ2Z^TLIX{#}SEG!<_U#-q(*wlB@^za93o+DI2B-xY)v6vBfS;KqJ!Sg=5b+Um50l z;3#xw>tQ?w9tD~L9J(WQU?5mpK=tQFLL2T$mKs-JA_A6@MJ}&lNVj< zYe?_V($PVui+_Lr0A)a$zZb4leC~X@odA5c`4|89_pNlG9`KxSg1V+=q|k!Bw{n!n zP%l=%5#;UvHqV; zD$Un_1>;#Lz@?we>JHN~nk^{v|9uiAf4kupUtmF5!Na)Rrhm7Ti>xZA5Ll2+@h*#Y z%-#Sy>hQza9`BeP7 zJbyprJk5r}UwR&Gy*n>E{3YOFf9e|ip!)4lB|@XS|497ft-c;#3ny1h9S3u}QI_x2 z+G>VE0a%^ufCqAmrQ)UtFSXWQ`272Ez}HhEUgL9~SU&!~W;4rMjmN{|0JX5vqBfg< z-yci(yf&+E^*@id3G0ot&L)zM9{3&E=rgOf;5;OB+a$j1AIh9Q@V0tce+rac0#TTK zRY3m+Z(B%2wh%*nK{5B7-)_1nh}s1kXVM`Rw|FR`+b4$WvPchvia(6Jl^3t>@_NsE zta`nIlvwv%JOsftk%uJ;vJ;?=EM9mQ6tY!(^0O!l3xDAY^>UdX3U-aoWi`)!VP0Vf z*fI9elmyNc=DrjE9uqP5f7+!-ahDdP&;HDN@%}DGHt)z4tT)2q{(h2!BZu(#>muUi z-=*&P7On>V@47tr=X}m&d}qTzYBHg&a7@cO$xNFzX^jrv8K5v|r+ja3iff7!S9-`9yt@ZgKv z-(zhWuD|H=f7kUbmA3$zfIxC^4e@am5wgM({EZC?;+?98P1lM}62#$IdoNVBo0N3pH0|(UB=2;N-C%HiMIzBJy*z@B$ zYJ36Pf&hr{Wp4-=PsX~x*dbExl#?O{z_K8fubovv;8h2A(4_A{+M6BM+e}R^1KAe2 zE6RR&EEvgV@r=LOMUVI3d@M8As}y>^;d%c1{N5l?1Cz?1e|zB_5Y~Rqxe#sM<#myV zc=Zow>HZ?)ooVMnedd_S0=Ws=BEp?ImaL=xWat751uH@{W$XRp# z#)2CjZb@R!w1VrWsF+Frdp_+8eSp81FN*V>*Khyk(_b@&B{hIdi)|*Jd+XiP|L#?^ zKmX=Ee>|W6o__()-G4t{xM%*v75`87=fCTNtRf263*+el!W#eg{Q=HgUv0g)A}{{e zz4q@~zznMf(%;tb?|R~z3)|n^`gh&_b1%yCwfmdrKqV0Vz5c(yjlcPSzWx97`TzAr zVq{*$5SI=f^26N2?|FqrODh~1&#@wuwEORSe@hVryoY+RoC_LR4DPQp=2P+NSAX}f zpau-)U?5AEzn_hlLNts$KVmb!A;k{BM-`crdWVloysIHN@|@i&C&n1S^})J;`-8`R zpZCQ7c`rI*fP3rTy=WB(fyBIw$k>q>IrBVM=X1&9`f>uK|MWBAT)xC(VB9HC=T^jV ze*zG<`t*ZE9XuImWaa#6*Zs^(JpZ?}YsBAt{eTG&5y<>!KEt_n;xF2oRO(KDf!AUb zH|am?DbCe@_nXJp>RTlLFLUx6N3jdGQ|H^>Lgijt(Y1SF^Y!l@;{UCa|KtAqw}$?G z&F$a1iMc2IH`n6h=JwxnFh2Ko{_{0Ue-Qs$AO33`{P#T$B-b8e1#^1vZ!G=&e)xWj zxBr^+k>sr|__?!5{*9-9zr$iue`^fHDum*wDnA|H#DFfVb2D*2>Ts5L|GCa8O5;DQ zku&Bz^~VGw!r-0U~Gs$ zDbVft@JdqBwfmJ)(fc}Eyi_s_SPGV-}}Gk6cyk5H@_pjfB8S(k3GY` zz1-jT{jYQUzjgj^Ui$mm|7DC_uvdFapbAPQy5Nf=Oa6c6qJLlew-&?I4lShXG_J7cYRQc=&3193DzKsNJ{+lnb))xUPvrysZ=FBS=2*%3{Ohv`)q1P`#1e>4m{{0!$O;W?F7+`s?UhHr(v!rvSf{F|$;@0%np5`lic zFBO3-x{4%Sdagj_OmuDrzQ*}Ck2uc%n=ykuVZq{q|21Yp-VreFol5*Ry?AzzT7dl` zzg%`)e#8mmZ?F5q{3Ko(|8Fc{?07v!jcQ>`gn<(%k^_49T)2Nre~mT~BR|gPnHPLs zHu*bhpg4K(=NcTZuUjC<3Ca0t2Y|x=eck@Im;ax0#to33M}%wnH*frX{O@}IjfMX| zYiHJ$yt-`dAH)HmsCby300EK^Ae8QCI(uow;y!5=;C(dp+m=&BgBA z_bw6to7 z?|P0`hL6D4y3uEy5SyO%{cW33GW1d(;=nWFPuhd}NK5GYf2p#Z*j7i6AoS(L27wpr zwLQMT<>wRj+U4(t0}hO>9o)I0?JYgnhS3SW2k&YSCax=k00&Q#79vP`?O35oB(X=Q zI3r%4CVSoI4nymkx$X#k@HA zf4Sb<@@`8!CvhG*8h%G&wNq~BsXr=mFJ&*r62lQwT=(_N{ug(BBX^g*=dP=B#J%^f zzU3A#-uzcn+aP?k=5Lyur}g}nPtex>hmY}J@o|A=f8z7j(VzCC#?KV~$7CwQ!>LI! z)+08Ii4F{HDK8O>wcrB8$okiOO-f1X|Sh7O?w zXUf>Mj$3TTKlrXo@}hjr^h4+Cdg6?Je_q5PrT_)Waj3UWjiu{$$sxri@E+uE)i@-| zen5!re+}DjcFkZ4DgJ_vFtJD27{6B9m{40yYLfvfcxEE6`48;SDJSps{OT8?)Vd}g z%Rg5O65SR)W%2pH_&y9)oR3C7i1?i-*TGrV_31X=^LPI|Z)3fW*tiLrj6Y+@=m%s$K}2sZB*uM1OF_q<5^_A7~?ay@G*mH z4tbwgt(SbRexH8VWbgUCZ?d+G`oMy#pUWeMaVwR_>eCxBg~K`GEbzJj>`HvnHU5pV^<_4c6-!nOP4`!hGrq7w64R&Y;X77yIPWf8$>ucBm;IyY$f8hFS zrjM!B0e#-?M_XppNt}8qFo0ioJeOQR+4us#5&JCrK>n_5^fd?cnc!07@w+y&^;KS> z@;6W+?7hnC@!`zFAa028_49RU3ogVGfA!vG=4e#E+Wyb`XW7nWW-Vc@5cBQqQ*&=6 z*{+jXmOQ(zWo_=>Soe2z-UFYDU#@|*%paM&!N-$2Q+LM>K9ko^f9!!_oIIQPA7JG1 zM;8H7Dpx)!e74e9+e}MwjBTam$>Yw zUEQ|Nd*hl%;@_2jw$8EcCh~jKfA1gh@Kei|tP8SGj_<DreqWYuvNB zZfZw!*UGcvz9uge7%gploc4o;ZiNYh58IXagx?RmtZ$_IL2co_;@?ej05hpst=CuV z=@&C*f7)8Wj6~wC1-#8yi@HxHO)y&c%2@kdhY^EXVWrgCN-jV{rDZEM92 z~i(bmJ5QnSy>@gxf(_bOXX=Iv16^HLDJ^Fp`Ue?5(#NpUxh0V6XL0MO^ zr?>m7{HVm@slsRq-4nfdina_d0s^~RUCH5s|DkcSiRoU ztTMKr?^G_2tpJn1f9o+KioIp+`THlINS@cERw#Z!3+JyY-#<}YR2W>#iJ7*dYtz~! zz)1T?*E;jJ@sZ$(!~qg(^&XcDh$r8|7keEojT2pKKd)GK!)M48cAnQVNus?T>e(N! za}Cp=!6J|K4`MQXyEiphFM6Ih`bK~1Cr1bnP0oY(*k;uae~1CaKL_3Ck(Nkq&t2^t z3m@I8bp6(Wsj2vQxu>y`3 zR(e%bB59@alns*HQp1mP)-(nS^R*Fn0UJoph5xDI^EFyi^?ZXdNqS~$ShYU*7r;d(>b8bPl zsZFri$I(ghYDCD0mFOzMPLY$=>m55+{+|WKM{rMNf6et|vZyP69?#rU*45yS;4_&V zG<;jMvaY`5WCU)60-vlqm^Rp9((I~r_6HSIZSNOvjhU6~iA8R4bkE9HC<;DN`{C~9 zk?!Q|!Du{wJRL(YB(c91V(z+k0Pp{LnvpH5lAo*c=7gG(7HLMN^%LR3b6>Ogxwmd4 zc2c|Ee^=J+iiM~c3cFh5yX~;Zov(E(<|m4sRQ_M(z?&c6-zXeJYG~qHix`32TYhf$ zY9nCx@iyF@YCPY4+#&WQ-EC}t z5;?=_vDzQuo2}8#JdI*(3>@2qj-JVjv)#t8)Ok$Zmh!6tr5Il{=Ui${%|8CX9jMFJ zAEv`#|9kz4JW()!e-#s9-~J(YJS&?v>?RVoi2owqEbCA%{)qi4VqCFZDQm6phbs=M ze}1=MsMnfPfj<yU|nH4sS1DD53OX1%xI)Z3xYL}N1 zj}{!M1zs(9CD+`)>pjieU1OiK4?8UC#z`$zFKDejQ}^pcU=}t&<%-UjZnExq528D2 z=(_Sxf5s*qTa#Q-#U_U21-2%&Fk+Kxf4w3&Dtm}fe`92yoT?UB^mOeFY^+!Pe5yQA zk20`>?a*7b8gIOhxEC^GV~op0J^UrU&;S$KeI0+~hKZzn>uRlLJZhaP&Py9GW<)BH zbbt+f?X-*QC&CT?uB&tZhH$C^$>&sU_a`QV-Dw&nepJ?$e%+y#R^m?o>)qjwe-wU9 zuxvh7HtRe`6T@3;FyJAA%g*L+?Ub6TZ*SsIFvy#YV0_ch+y@Lma-VaV`}9?fOumNC znm_dAS{HRRTk^O|ojshd+%IR zb!fpC%&hTf=!YESu(ABG-)M3wu&VUogQCw83zJas^w4VkcJBXsLoP zDqB+E)T$;eXRo~XWqsm`PnJ1wQCBVEAh9Wu5BG+0Zq@6GI#}Tgir+&Hqio6rGI?=! ziZd%v-#Snm7YE2WJHIQwvN<#UfzKHIsz&y-N+- z+aOR}N9t04h{>h8j=N0&3pW&f+VC2fZ3kL z`f}k`^dNAVp>=}L81=Va?1c0qS~*+iwarhf=b*>nCr#bkfVf}Pi&Pj{u zdGDwFGvnHPZOcsTbpF!Ovu$VXFY!I5p}B97;Z6G+ci6f>$6i|=7AZA=RoH(4SB#~~ zYe|W7;VB%|fBui^(;$qdCiU84|H)}@ycW*jZNJD{q;~Ot;w;3;W$df!?AJI>YUtP& zun2~%>eL-_SM0*8bpea!?{)E&@HbY(=PWyUDtnI&N9qPCj22|o|f-}`qW+zjp^y7hl z&Q`kQ3Li~!PjHQIY*9E9{AW*VtHs@I*nI}F+u&r>Os%m^PF4435+C0s7J8HWWAIXI zgLAe_)777bQOinM`dss?m72jYi4V=rv_)=E$EF3iwLMNu(2Bp}i3LYN)$ndLWQoJd z8npYVe;0Yk6(25YM#WwE&S&{|wfjYV`wvb-ad)xt)F#v&=5t=d>*fBDJNvbEI6qzA zM~P9qXb*=cx5z)H?Hm(b)cHO=KaShR2<$;V_Y;TMi`)&K2zvu>F|3hnYEjrS6vw2U zw5D%sU3|5?-ZS*GGo|Kj zZTXw<7y3S<$P>Rmzck*7K#=Hhb()K?LQNwWXP>P|yL8h1LiwKrUIaeP$eY41BNi~L ze~)=LQv5QXY6tfYe$W(tkR=$4+&T72-H{vCU)Bm$%yyADO<|F;@9f_-&s4Z0K34t- zmrd@Pis9tG8vJ*{?<8)AFDSefn3&|UJ8i9MOC`4q6Or$&?R~OQxFG^7V2{SwY8p<~ zB-d;Vu`9NogW121GIB-vSO?2xECs@5e`@WScAq>T;ZPwi_}rV;g1DVjeAVFW(*&H@ zXK<#8WpZa3Iw1c0wr4(te1DGqnr{w{Ha0GKflELMiJV@3ufUl;<3p-<6m;RBh0*eL zI8U`O>ocNwfDPedM@Dtw^}At1?`rm4hcgMUuxX`xv3qW{ax@i=PhZK>H$ttwe^+*I zd+0mMyZX+dM$8`Q#6WO2Zg;Ai z;K*UIZ-ot&ef+h@CzVr8XizfL{Z@53*(>}nxE22ViZ|FQE-Ux~@H?YOaIJEmi#%EG z?ojUMoOIM9hbNw@r7y*v?L#vUe{!&0$4thEfPq}(bNt_2pN$(fRCaiNN23bpB%~r! zYrqkaJj?1hBe2)Kmg#!5g!I9Mm|L5cma6lj)k4Ng^(g3eQA}yIx!%>u$Gz^K-X`1; z?EHA1kn6FhYuv3b98$k`nCf1K+^HAC>60AWN*X%+LwKcat!>0$QgY2@fB*LjWA$WL zk5F{D^G4n>@1JXqAvT%@4tPax)EzL;#Cv%Ykrm>tBqG?RY4x>LdG@*Mq1MvIS{pq* zV#hhPrvlfI*bG~LcSit@T|at3Q1z23vE6=J{a96+7wro*dKw@xX?em~#Xcp+l>s-Y z`UQNpiRR9!w$}XV3$X`$f7#e-;g4Et7oQ>Z3?Wu`sb|qEr|DJAZED+sPZGi@)t64Q z?j+PRa;vB*6laDyzB&t%f05(K{&QxocU@}EiWf3RMML=u;f@sirjlw8Ig;n`@f}ZBhBY-8VAwo#epzX;X6%Up9m{ zWYH}>=^gw^CK+)?jauvd^G=`7fACRLNhXOOz`2D#Ud9i; z{v`3kaMhpsipZ>+oE5lPMB%02iiR|$W@UW!ksm$vGg2Pg6W&DRhs zP?Ijsr^FJmtB0srEiYI_w@v}j^ zeGKXZab-REM}5OQ57)gDU#EXHN#0U0N zTa&AS^I?rOZI=tq_a*%S#OI;H`L-OEOuI)wr7bw$@7fdf25f_dk5|6rnsXI#qT-gZ zXY?ir-iKe1*A5kqM`Mcc)2LZo+>oAujo{SUX?|wf^<6J?CEs_j0mch^RMwZC?=(L! zsQoW}fA1kkPdbV~oYUN4U9i3U{EB)4F?e-Py?-7k<%=q&zQeY#NNXgq?f-jHy(uxYb)R$cL+Bc?xQzxgk7x;sy?r|n$U*jI}K&WE6 zERQb1)X}i(u7PTYAOfsIQgxMZWhPUljQj zce9Ak{>-7ZlT~7_!qLEPkTdaGg}+$}qfznru*8D+Jt~$Tfz?r0fqH#{n=kcG_@b7N ze+F_g>=dy;MtsD5IAz!+xH#A(@r84-c{%xTLVd*AV!MxHnr+X~D09L189~q2>!T0W zywWkmW{b@_%;09+V^_EXOtwm)VX6(LxzV{GHTBxRhD${?99g)^XXwZQ9m z=Y&~84^P8{DPRydRgDLeS?~3B)MQ6+f5_qMt9$AYAJ|$QtYbX=G6K#e{D(4@cku7J zo4c`{*n`#hp*hUqGDy4-%=IZXJTSR-m6osNE4DrG!c{q&R)Ib`PSX@-s~n|XbSuCm^UKlqO_w$Y^U(Gk`G>o%LnV+f4%ok zV3FdBbuf&yX?W6)p={)QLu;VIS+1{vi&Mms;NOx5!XYR=r)I4i!XLQtPT>j)Kcw`@ zC+;`(?-gUEjkEP+pqT{ERpM)EJc_SlYxfV&<}mK1hU@WK%3?jrur~Nn(s!si8{xyG zpAo2~?vA@2u}3k_4|3G_!oK1Ye-kR*F|pw@?c%+~s|MdjitfgHZzyAzL5+iQ;L0l?NraK*XXPja8-wXVVI zJG}o5jhzSMlIg=N=d3h9(ZSlz4ZH1!a9PFvNL+ZDwQwD(-l+0WyA$Rg=Hw)ho7+XB0-ibCfAJHhD1W5e5ds4k zn_vT+uLf^qiYN^65dAofARU;EmlpHQTZbK;>|hFHE@SfnuCdi}jkm?*M2+I? z_18rDoRuCEAk*=^*LUI9IL;_YzUqW|9=tnjP2vowQ;TikK*fhva3leU)1KBOsEEFL zk>^`$Mgk{M+X;_tf8s{n1a6xo$2Z}bjO)avoNJ27`0lytYlClWOl<0P^t)NK2mu0A zALX2XP(-HMTJZjkaB~&!PsQlmX*n}RUy%3=U!A#K4-O!<;aV#Y-q`I7T+d?RW9F0V zSyOxe3zt=AOE_gJ-cz`cI=_nFbnP<~`*o`sMS0{+i6YuIe_R0{5L@;c9s+oY-U(SM ze`_Earf-cb?`6qm01nW-L{(HWV$P3;*Lgfsxr*&QkB`5a3HO=QP{VK_j7!Zz(u6;F(K=xJxR>2YV^jUH(a4COtw$-X(FdTJu|N z4s1tpG|PCsxZ^4&zx9LBMWgnjN#0ptt^6IhB8oG(e`{~|euO>d+zXEZfhF36eK{2Q zz{0fKy7xS!?x5w_CsDQwSM@|p>zIKAuYBc{?HJPWMQ%hCo|<0H86aZLBf;*=H{oK3Vo2fW>ytk>6@< z;Q-E`e>725jhFbOBLO0PlslT^rJo3@WixNuv{6?EL;#5zZd0GrB`ye9qqV=*i3@te;oTi& zf7pL>t>pJ_w1ae#_bs9#`TY}Ym;C;#JyABnHKJ<_Ti_A=tt-aGx`E?dIk3v-WfiLN zuKnN+?(}&$d=9}_^r~?rry_PcMLrYqt54RE*(>#hd_NmJgr{>&FTdXg!f6X6Wu1^y z0BhN4U_hY;c987=vi$yqp5S_;a$Q3`f0z-^TUvir`P4pKV*=>H(@o^;fUU`UdW8FM z#<|_%aCJvbRf8=*7aT8gW|#I5Hl1k;P7@||$F~9&(l=i2n!?$dq4#DoH~eK>+cE0B zLv4mU#zr%eTvBQWaFEGE$y;Xgt=M~s$%!ne22DB|aSiX&TUDFdhp!(KD%X^2e+v7d zdd$FBysJIGeyy+{y7CQW-T%t_kp2^e0T%W|^`n*cZY7H}FJxSEs zQ9Sv`8&iAjcTnM^ryag1@n}nUnkS5;uA}@5E~5)J?ldOb+LU@h8{2BOPSFyM1DK*0 z(%(Zfuk$e=#wXV&-pB4TIB`jze~*&@@-gAWN7KacDapaH0OueVvHIB0raO!p9(^eq zf++*=XmS##vu1#HwkMFadL}(Nos0Br%RSHzFeVJ-M8HR|8^n8MEnQ;fu)sNpvu=K| z+0l)tuaBFMsD1K3%W#uD@udq%sZqh_PN@hR<^wsRTE4bf&G-v!QzZPQQG`LK}Y^xc|iEy}S7d5LJ4)QC9BNs9%H) z?rG9((QilY){~6_hZq`2e}r2!Ss~;y>E~`gO72=olB05v_qF+4rxzd;U>_U{m5v;| ztn?fH;-alx@GyWC-`%=3cYsI>_Di10pO}|k#S7xTv8llyhC3L3v@E~a=kB2TrcvFb zbhqaVQZJ!KB|KYI2c}nYUaZqM-g)I6{fLRm-YR_XkJ$Zcvu?e7f7iO_kKV28USECk zEk@GwDQ7UEb_#a4-GX0IyU6#;4x#OAJ4XnT9MquSikL_{p`{5<;j;d4mWgwuUgi3x ziOfgRtiyKb82WfHNjdrT{J~0+4k-!p7WiMi(Q3m#;%^F^D10P~(NE;u)b!WMdxx=# z1htFFE(*MZHe$ige>2nCwcscbV@rHR&JTAl@s_O4jd)ZSQpf&su6=mS`&gy$P0$Jr&$!<#P`d9vKRk%))Qt7pQ&D!I!}|&eVUKObLOI z4Q6W?&t?GELQlVFo=f3{59hw-KRwUT(E{t45j%r{0gy9`f8j~$dBOt57hzX^tUI}O z5yx(By>POp#J8$OCOiVEaiPIcV&z4Ty-#OB9-Tbm)JOOb+UYB-1+aMFv0IwtB9VJp zP_WV@FFca-G%VpoY00qia1v}9;Yx!_fZrY8(fdfP%R!SO#P{E5X1ny8?_DJyw-We+-iInobM#^uUh+@6;@`PHYEeAB}B zDn8b9|3Ki40u(*-;GgngE5z@#rgvl-`<~`HGv_d$=1p*Bo(;@Q?xdYCIUq^u)Zs3$ zk?sy1dkL@i6B`@q>uk;M0MmpgntD4p%#?>34oP^>eP^5 zfnZQ^HUu+N91ZfLvR6&roq+nmRp+4K0hD}q{uzvun+iuAez@WQ5hqH2Q%m=%gH})@ zH3Z90y_lQX{U$fo`kp#v-CGAN;4sNs!+C-b#jt;B=voy{aN10*b;GB2N1aLfJ6-yK zRQ$fHe^&NJ0Q{_(*c4szU+e~2E2`%G-J|`h$GeQVum1O!zK6@+$!mMz+%(=}s@cck z;Iw}W!;?RS*UB+?jf~n|WHpCkW4Ws+v2E9{>+DSnjP8_vYi}4m@qAzw!gb8X1|t!v z9S83IV_|F>oe2V^^ltDhiR1vC&g+&!ePeB)f3b$>KfD^^(AIsV;Zy!5wTe~9Y$peEhVO(x3m?8KB2V2O1!PLR63p8A&2 zL`fNao}9_td2>J`K5;m8mJ^L|n%(0L%f3MQ-y0|>4o-87#uql)ws9TsT>Ytu%?auI z&57IS(*u9GgA9Q2_g-Bt4$mIA6{@JP?^&b3>e}_3{J;~Q1N)c2V@1q~{S#;ve_Y|- zm3nk}cX%fHb>``HfAGa>Z(9j?rtDKu!;s#3*<17@uKSIxFZ4I~IXHDQ@2-d1NdB3)?ZNBk&-_hoaEMG^dT ziR)_~_ynvelljWz5@5_--T^+>ehk-sdNfnjRx&D|H9A61#D=x zM+Jv-Y!W-_9<{aGo`SW+;JoTAx3GGm5218>h?t5q5ek0;n^eXj_yjp0oCCruzGmsi zcm19QD*gCLzwM6v2n><9j=H|)4=Z&9!i{ha&eeDsYTIXOurI>d($1ruf0m##KUMb* z%o#&|`3MAi?BpbrP;=ndSj@SsyZIvMgN=A*w2^bqY&6R0*Yi#P`(snz`+%h4&(gxK z{Kz|`meg_!&dR-LZm{3@FBP|5_g;M1q`a&AoM*aW4<1kZjhfY8pOI&(en^jlJY%oh z;FwrYHEE295|uB^pE>_+|_hdBT6^_$Kl)#yRc*I>cuw)E>@mY3+=2$R2gztfG>-IeMZe=`u|eKT`kF`3yT zVqtZczVp5_wRk=+_ot}umV4lmzh3tsGyd3vYVFgA@|SMvy|l4??Gvh-DTP(P8~!_M z-Vw#R&}&6QYOSWmxso0(a8Gkntpib^=cjdK-8I!;aLr-i4Orh?r~==HCyK7YdLFpt z+QW5|IB=_KYLYL}e*;?Ll#SV0a7*o_LtkYk_?f7eCqyL;sg!fh$~Y73r=vTZy3fZQQ^q!i9k*m=HQ=3o*#Dqdq*gDthQ z6enfEo#QNTcO9c)IhUlhGWa>PrL@Z_FgEiCc`##oM1VKW!QTPt=FFcXsLlOPV*WS( z&O1`id+BQnU}&Gb-rKe<2J9!9we>p;gvvU)Rme^~2&fTL7D3&a{7E1QRdk$Dw> z5Ns>_m0^9z-+b(dHqHZDxWo9QcrEMs-Z0U#*~~kG^Sn9B59R>DD$dvj`U<%@KyEj6OYPqWq34(5J&X6*@w2Ou?~#oR_jb1b4r@H>!sC+YXd5t>8*% zG=1Nve{1ci#GbKt-}g~IH{f%r*a1>=EIfCQIH9G@nJ5_9tQ0&?;&1=3f`4b5X@X`? z)33f0XOpK=>hZKN?>f{2s>g=kr8e!rvp{EiMQoO!Nz^hCqerF1*P@#EcP1|U5B@m2 z{oJ#KMuGesL>g&!JI8{9P+PDz4LUDu5tJ(Ie?RN`rFG$F3SnC12L(??Z2sk#+th6Z zJCOb-i9xLZJ(a>wi!D-q+8;IOy`TH7Fz##$PR~`VOYu?u=HtKny!_XmsamRNGNp-w zt?I(}4&Z+;`fK_GH22hh1y@*mjgAk;4st2}(WwodIO|d$mEM2f>zdvxxrpuQM|jB+ zf2T=rne&8i5{VPyY-|tDakWw0!}7eq_r5(VHd@7_+&4b2p5~MG9r};q{td#Q-Ayg# z*nWZM$1yRc{M~rz=ACv5Z}!8|XC_)zrx@%6?nW3hx8zc((#KQ2PamyE`5AfcZfBTK zgXMkFNr4b&Lz`nqMVU$A+Uksc>*=sJe>YvNqEF>k<5K5Y&c3=6C02Ut0~Vfjzfr`U zmUU?P2m!{+*`d*+<^&fir++r`s9TN+^07_NOu2|URyIGOk=6B0XE+MHhcG-$mVVQw zA%p&-w|?y;^?Q&X@>cJZIrw71K8L}%=9{A?y-D*Pyn|Nj5_Rc^n)r3N8G7{be|`s7 zO>i#Z0X@%=x}ix3zj)=Ns{-|A)dX%!a89ncejoAit zPwg0!jFyGL5PRxynL^-BVixV=+vfA%b+-hxXC>{i}Y`MEE8 z9_cT(Pc7|8AJG2P2H)4=?OfJ7`i6buH^W@0wwgbL20xQM2FG9%|7Up1jsv zFl4CJPZn`XUpGEPau;geO>nvC-DvcBSe|R=xhdFk*6yyGS9{Ph>d{A;@HeWDHKXQt zC^gICp(*jLQ+MSKmv!PFf1HJD&M6#X;roA|EvZGS82*Mum$5v*tKbq!KaXHgaq#q( zXZ_(2;!NUEFRz;u;qdDH#wrDWZtu~Fvb>j();7cAp@To~tQ#G3t#w;EJrr8LGBpoP z68hzs(eC8bNlqX1e;M%onfo$1GSK|6ai}L!gJ*5Of61kNuIZP3YjSXx z`23jRptinFLRoX_DlFvjUSgnv>$LysvWD=V?&e1AGJo{gQ?dI-vwCUh3#sesDqjl} zmMH6Svp4gb9c08(tCJl^)SC@=xIcoI+YWqVvGX6=soPF+hr8FY*BN}HZY7bvclkrU zcHr+2EZzPN`2-pvf1)cW8j{ysS1>Z{)Y+f2rV;#47rnbZl}2mTroH_wxiDtoLBvV< zXIHgJqurtA#NTwC5WD{CdH5VZR@CXxnEtktS36Gz-w^E&!TgT+6p+5=YyT{o6P-6| zv{rTB;u*37JZWN{WZJ%<2>?EV?lZU_!)p`3wD(vfPj7gQfB41HA4*J8*e#y*i~b30 zD2*@kw-6p3YnCEJj^2yl#KTHheeg`{_-blW!rqUWWTUSVo{|21;vbFp$9T=*=_o+& zZ}S}K8JCaKA2E^elNF}QI$Yx!>2E6ddqv(oA^!Pm4qAP9s_wu&HCl`@J!zRYR9|Wz zG5QP>BjdTWe`yCUda60BA^e$v{LG#`srBu=^D@m?)2Izcyoj$$|5)bMt>@`De_|^i z;HYhS1{#gh#;3)G!SE{ju_T`m+!#Ee^CGiOeYMWNawT~d`8fR}H)5pl{Eo&k<=e|V zU1IDtb&~>PF4n8iAq*|{YxRvWFhY@MCVa}R>h7yTf6RQ#nRu`8GTTf(ov#O>XzThP zVZb`#JG7J<=<4B-HGr!RZXoNX?cW@~L(S!yBNlh&niJ};9`y{H?PZc=06vksFQXy+ zt<!Jex|K4ZY6^s`fNcQFY;(54dOD54mUX66qb6e?9=}EhVnl8%B4f{Vw~v6*ph_5-N`( zZkC=Qv$QdS$EdGsegpjM*f!SL6C_c68sYk3!nORy0^HgjIB+zA#uMk8{s%&R-s>QZ zpvlke!N%_#iP5P$dSEe2uz-n6ABi6A-?6suVRG1Y)IF;0jn$<_%+QA>$Q{Z~4mpIZ zf2*^hSMP83lXw)|sOViQFv7BrL7uVTFl|bm%M@*hALm!NxYSRIHAvBqeeL0|jkUHU ze%8Bb&*|jH&csLzBTPp;`+JbW3FL0H)@EhNeW%vOxsQ2y#se4clbobo-Mv?q^dM+6 z3i+XSGB!Thxj$pC9%K4`#~bM@K&gh-e>G#n++ly-lU@LWZXRmk?}G^cr;?6yMLhsL+^ zN8i}5;D*?B(gXJVI8wd7NfAu}-1yZ;aBnk|{(NJB^ky~5la%iHdDJ%t4_)4O=}$DC2fs=Wcz~V2rxv!h@V9%LIsWMX z1_2xy{9hNFW)H(0zwC-3;{4_fcl8KSzOfA(P{I#vun zp!mw&C~%XKBXsNM=9!9|zu=<%%=@pr6Y3AL7FQig;U!-@`my=^iYv$*0?AeIKi|5= z?0wQ2_LDAn3||j?`u~E?=FG%xY@YEtTaeE>m>rvrw+^+}6@t!i)VxQbLUA9Rb$pz) zA3xCd8PY}+6RJ@e>LgK*d6lyXMlb!cfXT1<~^-JZ^dRxja@juuVIizw8L=5yl*cIrJ*wh43B}FaNs6@Jg<0T;eJU|GMG|!Vmnee=UO9pp6yLJ9CTO z;olgy8eLh#Q<%>+<|)^@tS{Ax!Kj<4{xG%ny>qm?>D3i~OX7Wf6P_RO+hAbIS0WM> z;v=bdr+&J=!1IVJSKQd81Sf5f)CN+a~1!H()K!zmZJ9U7CxJvyR2r)><3(8W*g>&9NNRUC!o z5!;G}VBFqjF`66#48+4Bt%1T@0PBGsxh$1| zB(jps`@mkCMSZU9zZ0FmE6=3F7gQ{E<&pg4WtH_n)iii;2|UdK%)2Ko z<}M|Vo%JbVf9&KDU);Rjof8c=*Sn*S^IB_ItI=zhm;u@={8QkN;bPG*V}_Xa%jc5i$t`wtd&slAbUuV#>os2WPSW@YTE z>L`MFs59RnFN{*@Q!i~zQ9CPaQ5pBnQ>l##ucW4Qf2!SymHM!ag2{xvhkp82Yq_q?gvk3hbc^LOk6Cs4Ey0m;M%}Xe)m$R zhxzO>3RUuA(dB)5tJ1Qqd=h#qB*?7CuChVL*&JYille0SlKTXQPw6fpwy;hv=tM4; z{-W8Se~2C)dZD58tGmf7I>sNK3#n)Wxjo#^HVsg)TB>qx^GJI~rE6(U2 zVVcus;S*E1^p6~CNs-%VGkZrzh>o)RwfxT zn4hj32l`xC$?cq11URs zJe`o&_Z^}waVCd{A93Sd|Cc-uo=5kz<^TmhJtHS6KPLbD@Aw~24E`5Rihe5odx85~ zJS+U1@Hu1Nw)7xk=dvd8aj`Ced*y|!dIoa^)VbfF`{2-y%3xRFLvV(R8l}YSWq(G| z+kd0_$BKDB1^4ULJ6O~RSqG)hCcHyOZ)C0?GBy^@6g90E`g?TaZatq#yqlyB+U7%Z zJ$F-B8Z=D4(02-6>&}Pa0s!SWaneMjAglW6xCpGM)r+($aX7v7R-L-;6P!2Q9yMD* zu~O1cW1o1!LR`sR5=S+pr`Bl9Y|$dOb+|Q~ zdEeZ@u}td-={EYuvM?kKv@Mwa0A3J`$Y=ajo zLuXC!<|8Sy-5i2x`uFV?dcq^&ZJX<)9_B)qY@1xECnJws3`G3d;8_@~n zo(ArOzP{dKi!P_!FGo^mvhx>NXNk&WCP%@`Ex1BIH1WRYxG2mJyHR|vv@JJXMCLoq zHvXA9ewt4SB7-iN1gE{fwaJ+Sf#6yIM&2(Jn`w4t+Q-|kD zPk?El?aDl@kq=nzkLIskAAe04AFd#GjIYCmrhh!3A0U2KIbe?v9V+L}&$!7L)x7pN)T6l3*>Q{iBVt)UxN*<0|Sb^C;t z(a2kL;Px_iY+3zev>yFi)V&3lVx~j2QcvLKR-eY&u@<$0#=e!_SATju*Xu*x<9&4H zJyegPA8jcvkY-=#XJZc5tf|r8COA&1qf*9d#o5*v(K82^Cw7`sOGY0&;hdlej^+<+ zQ2LB1(^g+8i;0G;a7Jd58cQ55i$a}fLDR0t&kDaz`c8@%`5I?mW2!4(lUU)J*Z%0e z5>CbXSFJ>NBv%f&QGZL5(>Tr1&GjbM>4U7NS&7g*pVs`#_|z!BegYraV-GSE5dy|- zKUl+82DNAVFFmbyro<@J5zqFzwr-vMPA^E-T{4H6>E^v?{^6$0%-EpcP9Gt9HwHS7 zcOj#X#E;Bs$Edl2SCRIeF#3h*^*yaK7TKKyW2Ag&knswV!*?G zP<>$*BX*9UU;Gofob9Y5G_s|qmK+Sy0evDhwi8_h(cTgLUsM%#*?WbyOE5a{_iFVh zfA`F95*=5~kU1mt5r$S_pTK>7^edn_P}Gya?}%6MU1|mf_lrx*ya2q$;}iF%MZ+z2 z2OcAMdi9-#1%Lm5Zdz+9=dYqC&#n9YavkrbILw?tIHgHPG_Jb<%eKCH1Pir#ajS+N zJwvzdMhM9_(cB>J796TsGhWHRH3ZNK`*6iJ8rbyGCKO!W0;|A2h%eF#-AfgFwv}y# zf5IP9Jq1a7gDz!gXbbRJiLcT2L(@#fzOe^RinjRb9)G@XQ(=duW=~#D-1)CZCKw?- zs-aPlCd@rSJL2q8&%=Ka5PwNQN$3fY^q#67S9}TYjbv3vuZN6#WIn;WHxB9?jH^}p z)jgdF!a~d+3pc935bY*D4ULH-oau&sNSTB1^&n>jmAVVFcbIMW*+9Sou%d{u1Q(X` z^3&HaZ-4R{p9og|qYvew$v*7c*jmJto1&!*F2MgIQwqi?xnHfHpesb&iCo6J$vj5p zx4d-o;}raWJVSWQ(er?69}?-y-MUGDt-jWjKtkFVUs*T8dxWzu#Iy7@Hzuy-%G0~3r=EZ~6 z{J?FLM!Cf^t?BWQ`j2-fYf`*_22R627OpSKV`vSFUHqHZNKc0dX+ARd4qaKEZ}3#F zet$)^S@-T)J!kmp94>j290f8Lab#`Lt$q%+5N_Z$Q(vLQy63DbUvOpkorm;o-lV^W zGn};S&7rLYPrR1+MhCA?IARN?ulcwvmDr2lk#oWwTE}M5A9d3w+Wz=>&5R9fL`((|k?*y_S=MADEG|o`2o*pwczy&j#~zf_>Ick7#1-r>>g5>QMou zzi;osa})pPY0P81`W^9s_=ZC3=pWwK?_0txI+RF#YO8dw;NE z$yvNkm;w3*vD>=rYu(`bJ!Ws@UiYo`@PVERIiI#?p2uv!C?4a)yi0U%XBs#sI2W@x z17}lPp-=OI?nBwv11$c5rd_VRtNug5V^qz%===Wece$O&OC$FDupYFXvi>lfuchc)P*cm_y0TLGr(Od%EU(bOe8Yuk1^@^LV}v zlQUow>28TJ{p$6m*Qa6qikEn}Hk&{+89W7R!r2&B*6+P0!(N$%RAADQN>BRLfP6iEVwD5`hYfe%lJd@1jT z7iO2F=!0RayWx4fg^O$r9-sT^)8LZV`gb4u596fpX5S36-7rHRar7}WH+i4A8}!^Y zboT-Pp6%beVbVhTX@AFzOg%Y^npKLP@$rNC$U)AeFa3RN!0*Yk6Krk0O`n??y0Mw{0G6zd2iS)me|vE2Jvrm1wzFxaC(gEBch<(<+4;N)*r0~fC@IJk^H1(J%_+T2$8e%2` z|IHS?uSd}}lRAL(!LkkH>+(PNS;YHRqBq3O6#5~B#@=^*^^f%hD}S}cuKeDY>sgok zqO_yR{eK1DNc!*jVlWsk2KrlbU?f;e$?-Px(Amrv_#nfaQ*85mYM|&2rN??Jm70Hj z6EjoDd~9}uw3ZI(meps?zBW4c^b7jeL&?L)({fQ~huETL8}bMH$gDR#ZIhoTd+KR$ zQ#{e@YN0{h_k%s>tjOP0oU~-PS}xPFRUJ(K^aC;PV==v1Bc;-Oilgz|^ft%V0qO3-~ zOAor|SQ!INSRDFGTKWqdM$wCBMjfvMfAO{cpb`nbg7qZ6q&ncM_y}A@`fy8qMfOF` zwSVFnh4Dx*m!hW)`=5x0veJN&z0v&;=2%FLd+-G2-hMXor%L|y1hmA=DKw9X2L{!c z8AyHx0%Cyykhg6bTm4d7wOdWF8F(esDgS)0b+dep1an%o`kHqiwQWWoj-Cp*km6x% zh=XK?z?yyxg)7Nfi_$;WR(Jy6y{;0kH-A;!OAmG7-+8~)qp;H$&{ovspA&NGaP5Lc zlP`r5PrGR9@-yo`K7c+2W*2Gx_=5hYXkrkrfH^!xFK)1hubC6iOTOi6=2vB=eeI3$ z_j(HtL#_8UfB2W{Ek61?r;=I`+Mw&FTy3f=HjFsQ)SC8FFju_{!^N}?(UVq1H zy#X;D92lRtpQf|$Up@nKNBt?i0q9-%Hs|{LoRbf@MLezQ#kadH*n=khF{Mvp%~i}G zv4e^sVg{{qpHqJ#n76DsN3^l@=QO}A9+nhy)%#7&oPI|HcX^JP^)WoxK8CqEJ-9yje(1tiGGv#ikDqu zWTv?N^EpBfveG>>yG}o8#YzMT6+)?Y;wB}OK5wl2aIr~avO1?GCKd3cQWzkfJW$Z=}< zjNS&}=pYlq{AgHgMps^P+yD6!DkfX`GIL$p)DP3wA z+B(&{Copkp=9~_6iL}qA!7NSg`m#OTjF2LgnJd&wT;WsYqVcNJKS-}wCN)9e&!R?d z&}%DsdsKz?^d1{cpSmmfL8s_L5xYb2zQpC2-%{?|79I<80&aQAYk#3pU!J|XN8Xj_ zZJ)Cz=iuuJJX@pR6Mc-(Y*d%zSw3Y;-gosy@M?(x+C)c9okBiWR?K`SA|tO!@Js`t)AD|G9=j^ez8Xcp|+9>D;>d<#woMNJ{=B zH44$%6&{wVWvc#Y#ea{J*V12f<5YRV*D3Z?^bks$#(WB?Rs8$=_tNygf^F(_UJvY- zzF0Ttdw|DEJ={IhieLJiuW5h(<~QIz(3e4t8}L%T4`sP#JmE`;B=vS<+wVmVPK-I; zNe783&bFLoen+(h?=aZz(ED$*kSDU-0_#IpPR(FZcXUaQn}1qE^f=&+f7^;*c zGm7H$u9~j^tbdPfXH5Cl&my>^uqv->BhhAQ3m%wfkiZ$fF!6@DO6=jzWrn}vEn4s3 zx{ktiN1DDZ{7o6d7d@CiYaifD<;({mb}Fs28(&HM>$T_MXTEjSnNo3?niW)>=Ym5m z_~!RK)~f~&zjLv-@}%XN@W6zJ25E~$O&rlk+@ z#`X19#$o)s*8{fD{d$L%5&JdT=4 zJZ!9U@KPd@C1$c3tH-96JxiID z@SSQ8Q~RL#wclR$4)qbVa@83%-@%X(tViYFHGk_AB`kEMz~&s`%ZyGRDsVJ_MPeEct=VK`<7FaxJQl#Lj9h;2orI)FgI*Z^{=evi#_$ z%YVTq2KT5ck<7-PGSj$3M+%#qBg#s>}q>%iaD+bLRrUk&($$pGETC8LJc#eMy%(64nz z{YPYrMf-@k<7gDio&BzJ{NQzRMhbnsGJnTb+`v0D7K%QuOPnzymMy3;ww{5*5PDx10J`js&#oi+|;B zQKLeFiL*2BV^Rd0=I){AOwnrWTTcV&h4GoShd4L355ME~I-{5oqb~C$n$$hKuU0!m zxdW^t0rRqY4;mW8L-@|hpL|H%L@zaZ`4d$aaH-q>vWI<*bEV=u6-(W;*S_`h71#Nu zLn1id@7dn+`xHk<)eG4N(N8aDMt>(3Tn#k3Uu8Ce*BwL|HJXe#+-kQ>);K_z#7q

eXYdf3C3j6fuAZbOOGyjhjvUsl#@~1QcfX@5exHe`Hc)KY5-5y3yLudwJf+ zR+u#e<23756MxTKaqZko6K1BPFE|v<>@GQA4LfS>)={JG$AZa6^r^0)Nk(8gqCLK9 z`lq8iysLqidQL0dQ|oL1s(;o!a-U>4ZZdNY-HfmaFQ1u^oGgU{4Vo{4E706E(Ja*u zsOnTQh`m_1$X4_D#9s{OTjq-Y4{P;r#5mx`|K%FN$^PfqX8a$ojZ}3m)Csyv?Jd)& zQV1~yo`dp(S8fOQ>{e$LTQ}AVK1Z>J*LYEE=9NRbhJ(PoigwavhJQ_BRww9fCFIt7 z$;p@}0ZVl9aeCPK)&msRZZ)ExnQ&x{R=tt0o)>6#qI;oFmvCgJ_mxuvp3{5OGMDH< zYoeR|@=07Pyhw7BUeFzWTs$;qu(7=-LJo3g>q)R=G+x@<>AHUpbITX}8#@IS_mOuF zs$WLzRH@}~dk%(LTz}(UdOQTvkbSoKuT|RjHG*2VwHc#@0v#28KmKs_lvY2Ew&fo) zZTbG(^xbFrosnvc%<78aZ~{W+N4Yce>ZkCxscUT*-AoOiv2>zog#WW2muUHu$9lDo z`wF>UkET9Ldu}xPPNT&fG-J7wJ|od}Y%%#){QmP!{64Z+41Zf<3*mVeSVAP2=JyO` zv|&pwoYc_4$FDq4{tM@?oG+*9q@~@x)^V`yNuR#JarJY%OHRar^anUAx?nbKuk5CF6pgu zG;gSH4)ej9*?(2y+lKa`vwmCA59^1+xX#Q6;`4oE8oegf;$lDVANVr3U^&$semH(o zt2@>1cdQ-fq-BTO1nuKVu@nz$Th=|eglsqb_l z3nZ$(Q|LH+&yrO8s`#)seS8%g7xS@hxrwfNqg9`hv%b{qZfE{A@UeNP(E!5sj`EMMPJg~Z@lODg&PjSpB|8mcI z?ftlJG2jdJ2@O;9(A$=OS;BIXvh_OYB$hcJJ`Z z=zsR9IseLrDZbPX?vHSnOFqPpShKjZ#NjuMS;e0-L_f0V$5ngwHfMI2JM9JWTVE() z^~3pkRu_%Y>s00uiO=)f%L#GkYXH}S`8%Z5U9d6OmHQg_v^Am93crgQgK%x*ey@w3 z;Vbr_*%-jlI?cjUFczr)|>Qf7W$W9dR8`8qF;dBkPTXO{N9 zw?C682dWwDL7Gq|yvfqG8S|9q(#X2boEeg^8LXwJvmdkvgk?|+B> zDNfZ)H|cB0=lg-{O$`rzqy}#nd6q#07lYx!+4C|f*X@456m`Nlw z>xtMyZB0E=c*mQDXllreK$)!p9i~4d9>bq8H_+C3E&KG!Yl##6bT>>7ma|#R=m9pg zCFtQb|MMmPgDlIs3eTmqeK$=?buJ4$;-~Jjtn0sAL%|yheKXa|P;keUw%Jv4n*3e3 zPUMoMPbMOC zO>uV&N}mP#A?|TKB>uyGj>LWfZhUI?HNR0ag86q_*PBM!?^)8{=lQBtqp*L4@BYLx z6~9>3Xnt`i3+*b_I!v)~!`7KKY99yYw6dpnq!o?Pc=D~zP-;AZpREtgp84>)(C@~~ zpNocI8`S0yIf73qSbrl7qQ;W*Uj1Yy53#(=zF;;NZ0!ePetWif5Pcx&_m|#bI3q#u zfq2V{lQ0`1$Onpp3J-?-fbmWG!md6H`S`0#Y#@XuV(b+oD2bbDtu7oi=`&L^VyIwI zic@%^?RV6Xg88bt`OP-o^oR@Gtbf@HFW@%wt(?hzoqrDmhN$X7fAk$I%(2)P z)nj>^W2QJwQC^wu{KfW_xvYB7pE0;@{^~b-J2m(B?=;SCU;8J0v%mS8{ciJL5Q%$c zGO`YbF8heUaM;V~Se2SRY^-WW_GAZtQT9am-1PIy`eT32NBDMV zh_2`8Iuv^JQMEde^X-tu6};fGPlJ}|qQ5QPAyZAg5O+y^ysk0dS*t#OqBkq^jo_vz zElT2R>Pz&K1_~QxL?plWxWE2h>8p~ySLvZDc>bcT9)AHVzmiK53{|HdgF4z_b%x%7 zXshS%&vnt#7A;7>B@HqZb$9S}r5`Dlrjk#=8<-$o^5Or!@9u{t@sK-P&J+FBBXQ+I z|Lk*C-0ae4O^9uZ8SxUMGiRbZL_pGn zDVXlwj(XMM)n2Y1!Ypj{{@=-q+kZcYEMxA9c}( zifh{ex_@o^p`8?;XOGpF&*O4DWhTTze{XGHqcocM%y}nX8f){o;t>{0XLn#u0Ph%RkI;}a-IG`pG?hJ{D0OWqld4YXFt}ixuNQ3l=X%?C2J}e zKeIn>@%OcF0h?FOe7bUl<>$(NVbw<{`U3YqbIO9-TI?xz=%aT|+-df}oEsT%I`w9! zIO~SNa;-zFWM6tuhlh8IE)3g#UB0RP=nYfccf#Z+nK_W7c|hD}>eBlRSJ8Lpt>KY4 zbbsEn$zeL@=E%nXQD5uW4C>@(WH2u^GD5Jxn_H@eEdaHR0)Jlk8P1J|Dh6e?FcbrdxP|HRracXkigW^v10FYnyrcO3yjpuYA=b z5WVwWnNLI>z~|t}e3x7$snLtUoUT(eQhz_^AK|qeJFvmW_7*-Ab^_ZXx{qihXaW6a zP;9(%t_lR4ShuqdyOrc3IFxF4vBxc zDCj542m$)F5RnxAHK-0e+mnWb4V-LBj|#8TJmMVYeF*PWG;LUe2widP7Pisy(SPNX z|MGbtvgA^)-!+--=XR?{Yb1P#kon4;!PBRfeKEU}_f5}3h#sr-X1;(g;3qz`;8J-& z-@W=dtb9^-sD65SYxN|$k=#?M(St{0aj7j+^(G!f1CyS&(oQRTYKwdYHiP$rWQAy# z66ddSdTNue3Zvh$>I>r#t9NzLl79&ko48Yko7O+bw{&exU(4VrtG*wvf7kx?cVW*+ z9h?3Y;s0FN|3GPT5UbOF*P>^A!g&$>o`9Y%UG?A)bAmm{*LE^X!5uEX-xv5T!E?R3 zB<}LZvyUo993RDaR?{|)E4cH>+4{jt8hfeG5NJ>Q@xD#ttA3DP*|k5ZG=JZmN+sPq z-_h$uA0KtJv1^A%X7z)E>>09iio0vHAe09x=2raNXL-oKJP&G!vaeLQe)LyZbD7J{ z)o)i-pKkSuIS0e%54oj=n>m5Zod4_EcC>a{omT72iJPKxFvS-Ze1G1Dn!hHxcBXJ5 z-Ca3*WTf=Hhy!iuape8D^nWi4H`z$wqT~*kr{z~OU;m;lpMEIj)+!$8Sa_glB;$|Y zX2olzo*|D&y99b$#9z`oCH7Q$CdD>Sm~Ytn?zbxa{m;MGbbtJ9TlG?I6F8J`XSS*z zt4=(@N*;uh{Zu)t7~pztY0oSE?vM$je?BMm=KtV*Jb`il-N(7TlYjR_{Ps`pi5}?h z^{F0JU%#B~D*bEB^pYEKy+FTpgKh<7Zwf8m!tRkMaAt<>x zDtYhgmsdX{hi9%}vVRV*`PFc#&FBv{P8{VNYzi55DR)ir~f#9WzGB5gS)5STli70E#g>e zgfh>7x}xxgSFe*aq7R<;?UPsMz42&kht%Kh;<5GYzs7KCuzw{lwVq)!>red*xkZav zOo86(oY8LarAG{C-prZ3$6kLdj*fX;5a+-vT)+#qn3?9xhApYL^1FaO)jl8gvd^O1 zP{w-Eq<1}g%%l~q@1Ol{-yEBmxlQV9vQ}Ub#m6!yiyZAokD<)ZA!m9m-ZS>fmKpk@ z)zQ_b<8V7g_kXYY_z6hE*=R7sKZds}JS>X=Z^X=MjXJNEc3&Y5)~8rpxTzE$WX?AF z6<|3VnJ+Cd{E3J9;Jo|K&#SR}n#!5cn=4``sdG~Eh%UGDt)3ex6s4znJqMycqt1cg z()i|iT{P#_#}_mVzI#Bf=i%3Sp%3&6BP?eOmgj>rBY(KeVedZ~+X%9xvI|>kPs}&R zGYc-4ceIeg4SFm1jXG^^X-OWv|M(G&ij=*$n%~Y`4g8 z)qSdqZr^?VIo@Cgxbxr%=nbedVla!?Xq{;H{kj`!W3mMQX@Y_ocSqHcnT4yZo$XOK zMos3uNq1c&YnjbvR`09Y;_{&lk53Cih@#$f%g30DNdK=+d?jmtakZP? ziu6f{ep|={F2U69B5WaACjXDUH)~c_Th|2tN;O4ZWrkS+i0wI15k)Vp0)ov=g@^)z zBBCNx)PJvMU^D0Z*0N6Gm{Uqb$@4>3I2cH-nC~{t=ks- zFZHcrDk6gDsZ>(j@Ao@jDhdh+ii!$YssCOLpE=i>YrpT_r%p~%YvsvQL435uIn-6Jj9zVL*SaF^O0tfS#O&!;1EAr1 z(EJ@R4HqwJe18S2-5qI<-nm{$Qt`%W+{+-*p$(EPZf99pdCB8?98QU6tEd?Cx!~l+ z0GuE101ZjVOW&ZdsR{foe4P^ikz2u{r{%PFXe$~Q`XaKHOPTR>{ezJ1zS!;efXBY^ zV2@q(sw(hilRU~TZje6Qx5(@;+g%~mS-5_!bPM4)>wo=9G3~1LYRUQ5&vV} zr+Vaa1b;lDWloYHBLPlsLsTF(000OK8xb#_-bea=uDaI~1WmV4FvaC?urJR(c`XDN{i$5`k~|Yq|kL~uU6z|qVnlBPJaN`ML_~R%^Yz?fDaZsIyZc!%ZC28 zi3yT`zT>de%uBW(7t3P^X4cYYolRe1O4O)z4)O}Q62n+xQ$~JHFuL%G^ITjZ2LAho&gkgrL#yJ=4h(AI!h8aw3SSPJYg+y2ny z?tePU=$*TC+&(}aj{povTR0gpK90>eek59?3NbhN(}6l0e!ITdUV3Nz@C{3|LI*3$cOv9SKt#z$h3=kf9B-+!kaBzOj?SUN5$F)31B6SNyeP zn2E~`LE9YrhKMiZ;ufC(?sInx@*_~5k+zFUvrA?OJvSQS9b#xu8YDpc0%gsfKHiLcUM_J0>< z>EkvpE>mfBbjg=X(z=XT-mzI?kX&2ntMa+Gix2cDKu3N2AoUKjGhnM~*#WP$k0Z6* z-R?j&3)cHdb5A-@jjHk-mO#pH+=$D~=~2!V(IvOBUmFADXlj1XAmb~)5oEmA^lJ*7hBU6HHKJTa+ z^mokW_PU-QBUK;;yrb8)fH+C?VWJhXMJH#+?6u=&L3($SW(=8bLdJ@LqJKIF%MTe8 zZiEzddwWW_!?I>%bOwD!hnnW~$W^m#9ZE zl>L4r;LisKk9vfVxN4A7n7MLdrTQD>5Xt#jnSV9LsFlU)dv-rh|Cx z%W)}luR!K2vPZTi&0fs7On=_qojhM2PpAVP9!P~JOxvsk&N%_JY5K1(p0!+P~PA?>gq-z^da-8BJWalMK!^o`s+f|1Su4i<0#unjn&(l zm!t*3*jSG`MPW8e)9c>(d0@OL0sOm`+3oTD__&9Ph$u%|5pQ7j*MIh;K!oXJuT(5v zx1Q;J)~YsCmYl2Y@_i7F-t}m@G_m7?i{Kr4$XnU`9Ud3Z9tTq|kKDpKk2ckA=|o)S z%h&dHA5JOl9H`x*OcoF8sD`0!VkYvi;y`36o}&Pr+(mcP@4|7Vu1+Sl-3Q z##uH@e3Hgy2=~YBqkp+>LOZfIjkQkS)dXI_r7`S;IP=*!xx1@zEmy0PA# zESTp7if`*Ngr@zfSgZT#n3&{^fq+bKN8D;WZM;Aii4@U_et)@qpC!h!+k>9m9w+v? zar9Sn&PeKYMQ#<+;XxYR76GT2SaFr@Nn#hRwmzO;l!zpCDmtJ3JYJvIC0}(;C%Tb- zv;%B^>beFrJ@FpU)w`?tHK5n8L$e9M?qhYg%tL(Zeo_d@%J5A2MY%6q1dPX-Kkh2-H@TP}~+k z7fe7R{=ks}e(Q*;v*oa(^aSG6#SvBN*Y3Dh{Nralc7OiX*Z{(%A}%)WH+yBJy)Nnt zu*b6kQt0JWCgDlPu4)|Arf}4F0sYSH=V?I7l^giI?A3eVy`Z$QWP`P}kq4&5EgPHF z`9aw?uJr+?j6HK;l4SXYrXO#~x-CZD2W(+OQ@MgQE>b5M0q|kQGlTiadK$kYl?ie4 zBmeO?#DCZ`rg2W8g}w8mc?hhPSl>XET%0{hVZlyD$4^MimZZJ*ch zw|^h9<<+M?SUic$L0FT!j_`e6Z4qjpE0a@@gK(+swaNSo%0x_2@s(mh9QD{=-&o(j@vDY7@w;6> zkg0mGCo&Gy3&>jHH+KBn$7xSF*!Kpt{C_?!A^lorLyW=H2@n3UBu4;dleM;A0YP>s zArxYD%M!I;7Rg-6^?mmix1SRGie={=FR%rt<(Y-MdgV|R0^J+*_1X#alsg8|g*Ip! z+>;ZRbm>0yipAMD6xk+ytOnooM)$F@27siI&+7#$+ zm^8Y|;YFD)2kj}?pT!ls_kk|g9=&YztA!0m7|Lf`MEzSnrAh@ad$>$vMTbPVyq@9- zF5^nNQnFy>*~s`i-ILgRtXYbFT7Mbh=0y+f+03}uU&&&3JY9$-GpUphg~&UZ6qDALq@uc^J0c0_;p9K$8?WCvdB=QS%*O#p z>A3x%Z1Dq44^$&jVo6_!#xwhEWM;?XY4I@DdGiDbtJy|Inn(GgFRS;qMSqy%@$|3W z9{PnI1WBEK!3gb#9?Tbe-#kc1a;kic%&8x=^lA%$rh1I?7V7<`*W(IlY9wW*-Ile@ zZL0ULz1-A*`RuiYA;n{B-U{tjztd;!92F(LU&Qr+`fT+VRa}^!?!Bj_tH-sov5eET zW*QXaeYP48y_&t_70w7vJb&s_FSizxtif(NmrL;1dj+sSweOvq0uOLSPS(+Dg6Fzc zF$HS|EZv!VZPbxw>}E~NhawrOhfs+xN%}NzM;70Z`n<0OMkm@EctS9mP+p?{?UzH*IX2_-ksHsvnXfo983U|DAuCKOox9T$nxtIRB}M@SI>a44~; zqzl3*Kp&jp?(ySbynoqY63pJ530Qy2HZArjr;fIc+{J>iLqiBa>I?!G5sBiAr3rL+pB+slr#Dx&UO!|w>=2Gu<5s0m8x?B&wy zDWDgMxXkY-#3%ypn2apL9y*$GQ> zYe3X<%MjbWx@)^*d;Xk;>#!r8%OTj(7lyTv1dss79LIB8d_X~mGCANRGyDD;Y!tKg z{jtz*kPSZMu1me_SOJX}DVz+{?(L413l^)*a@o^*< zwX(45(W9323*kQN=9Ow%a1t@|25RTED?oHdzpEPW_?mf29>(}#Zs`N*HW!3d=NqDk z%|*Kdzrc~yAnxTod1$wv6Mggvl!aB`;{ZCrxcFeGFJ$XM4ygSh%XTY+I*qdKuK-Y( zqknygs0)7@O6ge;?rXm`m%wm5*fb*4Eml|2KYQ+FIBcP@<3>5-pEwNY< z5VB9I1ynOQF)Ad%zlRNf@~BK&-_8E^@PB#Pd(>u@GFjY^F$Gq z)#{nFxa#-ErV<1DwsYu9euGMafzl^7bo5oRYR<+|-ay&)QiAaK6`u|8ne{%OCtDF#HoJcpTf1Dyf!gXzo=iT4YhfnW4%qs3I#JL)9SA3G3ggU(jt9_cC?!QA++XsryL|&MI?v&VOUXiW37; zi_b+9mg!^Z4)Tc%lI|4{nW2c((aAs-9sf4X98_7vN>UFnp2fB7X{!fz|mtxo0t#EXyCN0k1;Btthi#~ z#5E{Qzki_4ci)?Xuq~Y@?|&XHC)?D!D7{u(7`0Tn8}uT%Ul09d7W7t0su-a{-j7Le zp)SpOF9S&vUN%m=WE%}=*WG(T4{A7^#{9xRjzT*oY)Rj1Iozv3As9A_i9ZNV)FbRQ$c2x%3fP%3fK33@0u6N4?L}FEA{33CVy2=p6-vby6gHK zuq}75-c%nkxo(&<4ZC6j*ubp7*9l{L+$50y>n;~Mn+)@wpaSXKpmIiTGXeo^aXOO@ zDzA9=*?ioc=B+j>$Sw5F@-tA`6Bx1!pS1;dNGb@jeH>=IYJuUdWLa=F@-F-ra z$iY&UcaM{K&3WYpA%DmZ*G1%3S%tf_$QQ40bs&b_y;C2Fz`g57RrQ)=Zw`A*Ep?^G zua(8hpH>$~S_l@|N581MAvU_KfV>tTzTHM~u+K$iy{)^~7)&v--z-AposbdWEz-Y< zr>IUYSU@)gBC#rWcq}Y3QueBvHj6*#e!l=d4EN}DE76X_uDsdSX2}> z>PV9Ki~E{CddB-A#t!retyoNV&nNXzF6ZrOyIVCLu;Xx-7gib+Y-?UTDJ~Igur3?x z^#KtJ_NGFlU-e5suGhnrDILArm#%v+>X?^GjlZd^4rxsUi<9&=$|&n%rwnYEUMTP~ z0cMgYZNQ;(l7DXI)Qz7))h>4v`BgiUUQn6bOdpq#8_^Iao_2y4U!4=Eme{^&_7+ngJd=L?(%jgpgv(`1Aiqd2;`>ZnA6^>55|jhnnkcC zmaMc{-YWzF#9(K$*%8I0hsd7ZQXyX-I}UXm6+Lb@8GL-VTr4^k)!f*|zAxiPi(=^n zcuDc99F!l*;f7ef4jO@ywpDH;nHhmPhX5N4Fg{1CTTKVi8qTa+9i?^cih|CDPjF;( zArXLW$A6uH88$eKM7hQeN9`SpqOK`KN1@xlLd1E13TJWV*d3}2&J_YWV*c4{L$$&Z zdW$dqv!-_hSnuV^PLk~>t&apC((*@Bx)pG*Et6W8s{%ANvRMZbyihwYZsh0dw4`># zyVb%~svND&=@oY14AE+S;o2FplK$*8&;B2XrGIDo=es*Sg5`TT2j?%I}6 zhJPtd>8IUzeYp3;c!zXMWQ4p^87)d;{d{jBE##W9C?7ccTDOD#zSGU^MT8HeyBTjN zvs>U++b#&4V+hzqF}ats4zoM-UicB7*oIN@dg-w3xmJ#?Cu;u5&(Fnb>aVA#x1XZ~ zH3|ppv!K0_z(onrTGnd5kC6XcKd#r6DbZ2j{AYFOO{Vs*LP9%d=_S#+@Y zk~wcX4xAzJ7J9i`v1eG@Uxj z=liW&?<5p}(YO09NEhSn`Wbfs+Pm(FH7aTzF9#DFk`y3Z@HxK^Q2HE9`?+?-YS4Ow zdMVycp|`{n`8E3NmIASTAI9CuOn+)Wh9F9o=u&h)NBe6sjcbBC~P^g67GMC?3<+qzDPwKFMkQvPplfvveBT!W=&wpL_f!$ zIkgvemIaEI$9`fDU*hTOa=R7QE;jw(hDOXmeTK9E4s716`$VnN6^KpO7KjC<5qvx! zkbj1+{Q?P;R5n%UnZV=Nbq2nvh&TLf6C`AI4;Fl9HG-h$@GdX zO&&2x9mLqYYv!f1S$f1a0fT>F0-`!i9*IC1C=RvbjRv(B;ac9n68_@Z2uPf=;Xt}D zj|i4OFPXtOvryOoLA{KYH6S1Oz~B7*3-;Sy=|}o$qxt!<7^c5p&+UV(3EF3x+j>41 zfVK|^Bzb&Z;_n<1KW#quLr6gG2jBZ?$8X!t$G^w&wTjOmSp0Jxd6h^?Y0I*SpR1^tRF$vUO zYF!)ADT@OwqcDa*7vkblZzxxJv40AhcQ4)MT7@?&hsuoZ!e&Q_`_iL35@?^A&b zeTAdIR1HHjw?7v^kX%b_)nYt zv@zP0IM8$Xz{cXmT+Xg_m zjl@|GaL25^Q8Q@V&sgNb)j_a73kG1G20e*b{ya}Vzn+bB29N{e88B{sp8NOs!Lje0 z{La<%0};vj*}s2dNF78%^Hm64=h6IL0`c~@oi4ot@plT0@fuWlIjuE)u7uk}m?vpa z?I`fFBO;e_h2c<&A0sG+`?JyGnYr$b1FovdpxNLKH!^@?6m%+Oh*Ms8M1HW$7`$it z5;To5o_zcc00YevswG5F4Q#TJ!Hd{m!W$8-;$I((CUwn&%?Y>$5#ezT@MsIWzZtJ_~;T?)!Nj{*0kt zbM4zc(4oWU^L2Q2>fepGJkB#PrF=Uw=X~KrXGJe4mRxXHQ@XmU*9$2w|z3>zirz* z@4kKTU}5ucy$Uw)+du5B@wr2zn-eOZKCT3ce%ta@9RfX$I)q@?$2|`a9fF*ff(la2 z^;0BU6CU0L*0N178*9$Aq*7JkD*C*8Ls1cg8b=ehKZK}37 z`45}*Gj}m=f334W{e$Db$2VJUUA4-i@GO79v2P>aR|}^a4BrLlEv4=?8 z-*XjhgwMb2_Wk_ZUh`P~uBGAJzH|TJvtMKV_Zso}WiJ93^zeWCJ{Ib8@$dXy0(0WK zW?=5x)_&~SD!-X7B%{YtIiL+aud{!Au21>tsR_gO#+O@}^CUs8Qo(LHT6MP%*3IX2 zLKP~|T0_=J9tbE3urCeZ2@xWfNI=oTr*C+#-=hs}%!MA$eh|fm!y2mrt_eFTHfq&e zeQC%+ei!RpcoR!p*uJ^pgfZ%>iZrPqsI4~~TPEly6WFh-?4m};72Bj?OMZV8P1R#% zJ(4`1RpAFU0g9M-95-|PqUk?AOnMcULF4^p$P)JYqG>_cY{BCkl9Xv zoEn7r@MHe61C#mNekw?3{`ix>kKyl6TaEAi*plfxhvx55GOH@W6sd5?IHZ?)yP7^i zSz1=Lf@d!3k~eGkw%4o#j&OhMcp+wKseXgO6c0Q0Ncn)cy6#~En@23c4px-Uo=gwz z5=(-3R9zEvXr%&Xbo}P*=VW~!L5Ia{kZDo{p2u+g`jV=Meu|)hrv#6M>$rn1`K1Ow z8}=Yso=f}2-YICKz#e?yhr%9Mqx*Z!_-UVCwi9dg@3Hr{&-SYg|IvSTf2=)!jnm)j z+4o$i`+><==EMmSVjZ;AP*G!UZC_kVZmNi(x|fC!0J#VP9H<%rcMVD@CfTJ7sm#wh zpWj6=g&y0&v3Ut9>bWlR+mE!~N{~8+oO$ZFo{wC7RPkl*$e*ppS9vYEA+URu3?CVa zhaDg#gs)=zg|VY3s}+ChFt@Pn!(FHdTH-p`q3v9y#k2es{;~xb=PiWOAjizsD`#pF z_yzwu&;RKs&vW?ib^^iSFCY8IR?cnmv(NZ@-T%wyf!)N~_?^>rdi^!7uvWV8199w^ zpZDYUsPJdu7kg&Jm&oE)Y3`2D8YWWAQ{ZDbu-w)?c=&ojq`7}wE>XmZow!bb6&CA} zBGyhU47^x|E%q7|GzWdy=RiQ96R~R0r*bAxWu7>h``O<$%Z5kDJXgvLRPqgmSLvrH z8U$cIlqP}~SR|z1lqC_0fN%Am`$C5SC9vwE|Ro&aa-9Z}zo?loJGyFrSe@p67G&&-wi8-ygsE z_qhDEFPZlvKz;o^MiuP)<~B{xhiLbI`eR?;;=GNqr#lK|A4(|t1XzY>#9Wm8Zsb6j zt$C>+kDY>jm@N`wCs(+kyzLv?A&c|49+Y#{9Pho%Mx2@_?Pdc z!y)Yi{}SVK|4HDDpmjw z|Fnm{#`AwXR)75TDV;yN7+fmxz{(14cW|I_hZL}iJ+O^E@dDv~o+W3>fE0)vQ^ovu z6c)bAOjzw<4>MxYdua?{0*c5?g|AX>)1(MDP+)3rRL)K3E4-n$Bx_6 z7s19H{U*wiwm;ptPLw7}6CM8IwP}6YLf8rW8I6Av1J_{+|D4_xu#iGj%EEZbB|HOL zQneX8nY-(X>kp*?rd)!FooB~#;m4#cdVBf0XKSo$1=!=i>kVLy4kw8MU=4_q>^}hy zr84xx-#*n}YyXiQzy0#<2kYbh-p~Em5f9S17=wS-3%@ZqZ|pxi#KPwG1jm26$wGdBfK&pqQKGItT7dP?28H~Auk{o4 z0`bsR$V+`W3p2vc0)g(=|Nq;M`e`SqTm3P9&*87Q#2;(Y-!}Q%R^Rzi{r3OAZTqK> zOQLocXx~y0f^MmQdJWF=22SZdqn<%fxlkU0Z|HWW5ttd;`ebI7C06B8MAPbZtVVxp zlr2_Tdh|qDM96IKi~QS#4_f9Z9io`vf<0=9EddgKl1{I!{)OO%>`4ekVS0A+4H znwd*L!ir;Fl4q3lutnDG9f8NrI%lVQ0FHbFASGw91D*r^4g?3qoaK#>F(F&dp4A6@ ztNEGWOMl}u|8~EP`Tol`{9y-&U;cl8DPpf_qOE_v`^R|x?nCcY?jBL)$@-ZumMJ=a zCn#gi3vEcm3BbA>y)J@q!E+B86NV6=X>CGBAMPBDNP;zw6%a9*Di3fnu)-J@wmSlX zFhJbR4}Eb|L%~J={65>uqqBRl)Yo1jl)3-dRsL`Hp ze4g`e$BAyLic_wlACZZW?%c3C9pOjqg~1gX4{_4(JPzl1yqe=eKl^{B6Z!*wRQ~m& z#L8UF`xW(X|M4I3L5xol!Z&}dUw`{r`ES4IcYpj|%YVlB-|N%&-+zqxpT58`NP>OW zw4dL70avTYk>eMXemf6Ve*y49VlJI2?v}52Xof-=ku4rl{{DV zoAa59ct1QyzIuc5n8SZV5)l3q`X)_J-qyE*791=hFNMSz{BR;burdS`woJ|{?PNvlp_9;y5QHL*nwze*jJfL z;rqk(3~8ugIm5ng4fOL{tPuVi19obMT&fY52jUD8FFG5_2*%-8Sdy=8}ap2KwHtsR*g zWA-CPv)mJDi0k4(lr79t27-R~izUb30#_OfU4nBF$+e z%!en$E&te8eD_|YD&)WRS%1X_WEK9T=1nTcQfe~hVg{nuQ=pX23% zB&Uo!>@wgQKxjvuFlJK3iy04lMZ}VLt_%Yig=XnT{IFGd#vaOdCpBfurk3QbBpl&i zVJh(F{3x-Cjd`yH!S*ba5N&tU0Di15Pv$tsM%mmkc!`r|mS4>^1D*o~Cs1baU7_@a zb=5OFn6!UcP>5VE;O-7{d=Ki<4+w<~b33SiwZlKw`G2hO-@jje?cZ@?#9ET?e(LYf zeqZaCef||+`P&A}enJNkHY>n0I3A&)-EE}F<^bO%cnlj3IUF8*>hhRZb1YnQw{sk& zlf6EWgcYw6s=#>|8aE5^5I1ok$7oE*5sj=Pu_}Ksk01fSytO8f5{cCUF~pNqb%0C< zZO%3e8L)Z{iW@$=U6q-;b4J!c{s7t)M1(O|u>ojI&rux?z+1ScsjP;V6BM9)-A16@ zhYstUU$`!NAUfxLJs3%l-@(2L`+DrQf5nHsG0&ep&+li)4(?}-`W~O{>p#{pjH7wJ z@|b^*lN>;!or^q^NC7Z~_j8K^-K7Nf@Pv zvn7yDy|_pM)z0Paqrv84ya?(@Y{l~FI$&o(2pQVwJ*lHi*O>PU)tqC6cK2y9az;AG zb3k5#NGx(Tb3Esdea?K}kyHMPU0{v<@ri$bVsJlpXpRa0m7kjT-}AWm<_K~8PhV30 zZO`U)8aC(0FZdZVEAc1xevw0>GyV1`x)7fqpjwKkj$Rfz0*Qz%!UmOIq2idZPD$*a z1r{K136h>zC*D}^4KrMH9`Lw`bBe;bzrxS5uSNLAypy1k?qwe0YV5!#W4TrYrm%l% zYHyU3z_XNc&TjLZ^;s`p;kPmWwlV*_qSh<*D>Ba|s9GVY&uhE{l|GaAKb39vK#ZrtBD?*4nPf%W*e z-JaLupZWOH?!Wh%f4Auv!q>*%-)n#LfAE!l{eAtSZ(Q{Ey76oO`Dg4N%mhDc$an3= zIY9emtUjd-V&e$lxdqza_M^t*&WM+ZM639HB>I|o;V8_hG*`<8dsiqlSLFw5Ce{$F zuS2hbBnSSZ2<%%3|8W~hyx4Q2smW6qL24D+kqGf2o`blt&yStP`zdd7VrqYv6#Q1# zm(K*uzRT(72Gyt7IJv-)jpe9%T{b_ME#)5P>JNwz-#9J}kOM(UzKFdSv|3hTpaVV* zNwOop8-8Q^qgx>_dX~BTD0?>us^=2iXU_M+Ms#~HRL*(bV41LHG0%G>-w@A3rsS`A z|Fxfgudf2y|1aPDcTVK5SoMFOF|41y`L(zFeT`qS!8up_+b&?=pJ_4&8m|hAV30ns zXXn^DwDvw_*u1J^9fiNseO$KTg_=}z^>(y|!Lh;;)Dw9ql%aw3wc=TGZ$V8NdvJ~q z#ZTmR7q;$^$0&5pD~Kf91!`Gd1d?7ULsMu~Y0IBZhcNS-_wtKDzfOPUMdF3hTo~Bb zsi?YyO~pPqMLtM{9~uT&W7+Z&LScALjeLc_AyFO9cI7eKm9Xk&yTbj%ihjpA{#rkN z?8$%hGe?LS{@B)k#Q1*J=Gi|*uH>)y%b)v<@BXy@N33u%|B?Uu$rFImRi za64dsCZG(!7lQ4L@dy%ma+CrLmP+e_>~G(QnW6^7RUl}XIcLCb&ZrLe+=E{B$VXxQ z7V-&s8i-M;p1|#n$(YVQ07#&a|Hh0!{6!2BNL9mMK#r+Ugxr7S+^2nVw}=P2JjO2k zquX%6_YjZ8KAQzhTTLCIM4t2)+6ClP@JN4GW_=-_Z4LP4T$0|`GZWv}pOwtD0CDDN1c{PO_@ z5&PyZBnNAcgA#uTGdc;M56ZT;5GeR}{|QU_w965P!|(lK?)s@9nU8H|;s5V{;S7}d zUP8FI?3H~D2;6J!TnU2BP{_7h2&qzZ=+w z{iqoKzaD9RUg8Wp!tJ>j6)Y#l!}I)mhP0V9tTYq$f&IaJyz78H#DD6*W4)MhPbh0) z$64V0Z#J{vuY|XVv>EfqbwfFmAo%}1JlEbVKhQ!iQJJI9!8P7V=6u`yAMD#t<9=~h zKbQSNmvDcvUll;Ghq(Or2!8xmKj6%`^VpguEr zv^Hi8C^<@11fZ6sJ)eQh|LT_Zf(uk}m8Ei8)@_vbtlEP6;QL#M?w!-BuT(^4WBj?0 zQtoHwIngTJ8nr?GG$12)F%AgNfOCm2`%U+*Ns2eMF78OqjDNKj(r#Np{QLcBV(!200r{2x@|rysuz5Uuuk%+MT`+z_6n=XMN=qf~ z*Z$6b^kY8|Fg%tt-v{<7Af4_i9Cr+$`Pe2NGFcMw-o^9b_tB_`d;0Jqa6W%tZ_gmt z1;WhIzvB?s6miXY{D`J12IVb{&YqTx;k#!z( zi~;R4k6#X0y!m(&h3HRcgsG*6ew*i%F!yWB@jw)MhkY^Nd0pl``c)i6s3G%WzQ#`LtF^4fduf1qPgMGnXK6?ZpaNYxdv;7*s_6sZI ze>u*0tFqDd^IEQoIdto2w&^VwdfvoC)&ucJ6N*@!n`Z#wtuW?oOE z2S)e2hC-|g>+sKa4x-75Sc3`kk)_=WbESkx&-{DLzkUas&n7>E4{P=0>1b$~fh7BPP$nmVCaxeUwN^;xsKkMdgUYqCdD=I0Ty$k$+2IKPb@5UI~!|#9l zF(TH*26lf`a!IL~+m}O|%HQ|gcTIzK+rMdh5p#O>Ia2%<1Q#&}0I8JcIWXVL&wT)K zfM=Qg#edfavpvCmU@T;_ZHG_6O0&J5{fd8g-?P65TR*pNnelkG3IERR;`L|Dfq&f3 zY>OOzuEXCC68t0-ljC~fMdj&V{Rx~8#*G6X_^*F+;M2{v8we4!|M7com|qxEu$vgG z-~~8iO%XKs%Ubx;SGY&mN(y5Yz9ydY&tteA%9iIf>-V|P^JBx-pU;V98ruMsm1o4) zudx;X=kwe^AH(+Ounk{3>rDT=Zx?p>SKpUtOJr5i#{Z@b{~m)J_7$-44bH108R|;{ zfIffVuV_;!MrOtX(ubZ4`v<13e>qP*gwns9H}4_-)9cOagg39pA?}$#daruA@E1!* zYF>SSwSKm*qS^OC_xP7{x##R#{)cm2oZCNYNM$)c^JQ+E|L*h5V-4%tFxz|Wd6c51s{Wntj(7j91KA5&kLW3w$UY_Y4eM_V@F_ zPj}gkK5a%Av2boLfA-Dd*18k`G1`^`(?uL_`_}tv;XGsufd(atKgYjcB?ZS#w0mFsD7Sb-|zII zI-mcU{o%jQ$Md=3U*{J;b9zMm&hPWd@AduuZ`+}K$Mz22L5!Xpj^A-j;CyjDfA$4% zKCWNS=i-@N@$Ylz`w#v)cl&?6m%q-%bGz)~&pC6N{dj@G`Li#{Ex*^UasJ#-D*AFh zp8+ix&Mjx3KKOku&wsD|*SY9ZSN!YT>U+=IKiZL(zSo|9wF@xixc2gQyHwwHhcEhV z6D%3?wf{O7iY2J6jH$XKh;-Z3wy*}36Dvp0z9&=yK4phpF?Zk_w-z=$BO=_!Q8^G&Pna`W(SQhX!zu&u9VP=Jf-|x$R9v6RHZDNrM{p)>+(o>X3 z0&e$aflxoX+{DTan+;rt-kF1!9V?q~G;k+~AEPA)R%qsW%V};k+!|~->;%iTtYWfz zm%Fkuh3}!x2;akd@bOZp^TWSb-talPY<>?0Azb=@;CC^O-9LYqnt#`y&zC{?QvSqK z@jDe(B(y(-VHJN6cg^q733#8f66(Od$A_Q$L_eg+jv*Zy)du-zCRGqpuJT#ALi@g zdh_!O-#pU1R?qiE%Rx@a9{sJ8NvfB9ekpZ{}a zB>3M>`~3KS|9W+)+v&gQAjeAnFMi+FC4>U1I{)9lNBig09{<~QzD)b`$#AU8>iOR& zg81J-Uj5(xRYQsVQ$z{3{!2s&^)P>mC~^1eKT>G_uBE^`;2|QwXLvG11pA@xl2U&} zx$>O)oP$!pMy1Wbk)>Js`C(VL62$UHR#eJFiTNeuDz7H$%Nje|O={g<&TutkgWJSq zvEM-Y6U8hKDjvs{=`C}yK3q&|bxOJknyafFG$jj$&?Hci&@DIk3zShQ^-?nXW50ct z!%!{S)vcQIaE$vo?LY?>6|cug_~d^}4cGRAyNeunl8-lo*s;};e;5d)C+m=@_{059 z?pl&)q{~{Qjc7u2T(9|aC>p!f|cj)~n4%Ku3^^ADoVON6%#IHE2};YRM8UyzJV4F zy{g~c+DCr2o7(jWB~GpGN+LIQFw{VNL+Kj2_N}Y8*N4x&7H{&BI*U(|AJ+Hxm`-&{ z_6Key9dv~m)%d0OAi*Mu$(I41NzQrUY@x&A)w?bdul(r}+KIZ%M}D&wPK9;c+q8f&28@R4fYmxzmW+OH znAbHJj0WFdu9+*i6P4oL9(9`K$tCR^&)oSAC{JaW+uLd{o;2;m@DzVJy3)BQ(hX=< zL3EryENgKqEk3#LvSYDkrcfNFo4wg+p}ZZI!hU(ED*av1pWOhTP&&S(yDp)4@p^ET zZuDMCr-&)@hR64W>^vvfCgj^ekpA9ZOm+>hLNtu){$W+P{^t&|&4MPl*Xpv~FSx_G!A+hw@#h#3%#8ZD zXyx7(d+`nC{Ds)uYeniZN84YyRt$Uj$}YUY)?GS0QivLK5$b<5H>Jy~m)&Fza1la8 zR!mgSvvzU$5OqiEuYEA7+BkwqjB3yJJ`$6?3Qh{2SIc4)>;4v^PMWh&E#C2o|FWO_ zjsvZ<)ubTMvCCtSi)Q#H?)ONOjN_Zyu!-;~*2=;6WvCH_cNf~KPqTfcNOC+sr#4oK zMC`q?wt3=MQ~rOIh84>;EH8-Ty434$Uxch^HswMs6 zcW#szS3S0Yy_StWOQP)-tv zYC36|k<}@7;9E>X)feunuj_F=*>?yzu88#RfX581BO-tDR$O-9DxGXRJKsY=?WqR! z^SXIZ;$wsu<&9lY;^CUvzI+79n0{d^C@6*ZAgcrlxS!Z+SN!IdE6VwRgS5eXou8X2H*X5*?#KIN8hf5JqWWl+3cmvh9y*Fc>`1FHEc zh`N__2w+vSOfI3L? zMq9!MQ_~A=Z2!(&nfP{#2ku50bfG7dv|AA8W$TgqYb@i|= z-1D^S>y1<_SM_s|ze~G1j15tNRAlX=?(Vr9pZEH}>HD@Jp;;nct-*qq^jh%!#=^?o z8yPsO_5032q7$rAVR;F1K;}nXNkUXt*uLtizIHWb!!t*THqVDh^l~H8sco}qiUxnY z*}8f!Ys&QK&n8jtq+Q*(jWWD$sAcmW;rf^@D*jTxinumt@p64bwV@2cp!kTV4Y=ux zvk}X=ZQ4;OUugi<$a_*1N_7mht}z3`upq%ay`tsmWf_Wfxh(fw>M7H@Spxulen$39 ziEZMgkEk0ypsfO>-w(IHy zU@_t5-m;|h_6KVm>gjUTE*+=|lUu6n*{4O4oC z11$F4^F0zE6$M8*_ktP}78!oGQByt)>%C{)Sm|0w{2Gj1TX2$Rlco>4=RV1)O3jr& zgqb%!tf4!A74*HHB(@ssmfU|ik%GY4c~u-ByZo|D{PFnH@h9WY#veyNLG`u4AB{i8 zJYh_n@cuNvCzhvfIOR~L3yTY?lJE3xyr01iOE33Ie7y+I;>8(@^$Rph!9|Bu@#Cl| z!uDB%dwro=ohE{_l-HqaaA`QPcwDM=e=7|?Cm2Q|sHb&kB;f#&oa2A}vO?;8_Y@T8 zT{$3H)Atv_y)@^ANEb^gc!A|~Awe}7w4&DT{HZ*5YwsE`v5Od+K35Rc#S8O$WiswQ zoMp|kCb}Xbwmw2cduMOf#Za0pm8S&1koP-R-MnQ8XzMM=+eZ0DZcjeff&<-3^Q7i4 z6*wl5;&saCFc-hV)ii&CGexrFjbb0ohi1JqL;=*WeLRTKX9YWHwL4nI^p{h<&mGOD z>)^E#KNgkSKu&=>Xj|Pa`Chr}eAm{^r>-QeCn*-v>fWU&r!DnM@2h`pGC(m!ll0A#tERr$ z3iUEuu?0~}A6`CMez@|H)VO=#ern;Ya4vE}1ds*Ux6wXm zjZbK&Y!0N?VmjJny9aNRu)HliS*x1?q8c((j!=&gD*S(vvbwyqkH@vf((da_cKiC# z=@TPpR+>UH-#YQDtXo3~>wdK)at$e_>O=}me&`;kZBO_2=bC=QLBDPv2S_>l)82^p zEln}*35t+lt(O}rvw<+e?kkzLE9DAC=5f`vrTaW|V5ljXJ6-VM6~Ot+jZwBJ99*Al z|FhW>bbWtRcaIfqN2@yLFXn1N6USt8N^fkU_s!{by>BlKIqr|d!?_Kg!$>u=?7MHY z%z4$mH#KwS6G?ms?OC6owWmWZ^gMOYg4&da>`CXh<36;+TVA$jdB3MmAX~mxw_d%m z*C$jNGJ9RVp$|%~%MV2@5D?Fs_EdnhgL+;CB%Ocnhi&-cUnv-hQLub2CCPp*o{M$; z+TCyWg!WG-(`@J~HWMemz7!|$1gi}7CJ#sH4!Wf0$ZR2l)raP4{oI@~)7TL2daaF< z-je4)L;Wa0oP(#fbRcdqmCY^#U!fUN$z>m|LMCgSB3Rt_0EW3hu=7u-9p!{OwdRy3)EWw?UuJaH+NTbi0S znVt^IQ!?I!`f(H&{#IW*uG=oAG~B#=A~`^_(vjku4hepL+w;dIR8jU>tX-91cLjS6 zDvrERyLO*OZiySSU#3zqu9GXB3f1%Ij$D5T#b@VuRWv%%c574DPACGp(fiF~vmZXA z{Ge7jBuVll#uh`b-fwCf7`eDF31!u8s#sgEpXP0~e(!cwd~R%GETpyyHyaVsU8lpt znu678++c@iOcwKsE;kD_)5mo8nsoVoR_){!a0R(!BT5mV_9lI9oD=1rlxJsxcKx)81;E^3`vjjH?)uxf_bQ!ke!lq z_%<(ROOLd&*dC%5{3)B$4YhL&#XY**xCpc(JkO}E?HD-(awWLJM*pDJOEU8R(u9AjEC!WGH%Bm0(4=Tp@LxbwwvM& z>{-`Rl-bM1$}5z_(TZfViY4mp#+NlTn2U7E!(hd)%8*H3akHwT%R(ueMznvf!K>R| zpJji_BUwB%ENG}tW#PSwY|Edg^4aN&Wd;Le(Pek3s0R;9;EdGi*w*5!Oropp6eg2@ zB$n=+q4`sslUrS57nVU*Zym<#he9v6_9`U!-qV5wJKh(;Ms15MR9YM9G~vZfO1zJi z!{UYgV00U~c4sVy0;?EDrOSUC5IGvRc=+|=Fsntwl1+tf27c?li2y!pcG&G^9H;J?}&CD3$BKQ zN#i{s3w5*^^R^j5uhQN?T?v`ftlkFn;&d(eOn)xvp!__DGJ#F~`nc1|_t30JXAD#^fodHO`BZ}{t}PhkDWy>F zVb;9`y0G`_7Z$H&3#$J}*&p`0$fQJzMzQ1DTPW&j<&MjofH>4`G}S|SdqZgkL?*ry z?0EI4RjXG4v5?@JEiQi*L~H5o;U3XmJesIGEI_x^9zzzefP0hehIay$`bA|D4oz;; zMZpMCNCuZ_x^5_ihS>i~i>^<(uh%Q$dhB*}bOsJgtbj6q?-vUmvQ^T$g$P%Dzl+Ms zuo5?P$o_RB>tJ}X06DFu_s6jy6*>(#F0e90#n;6>^% z(aD;an4KtKYaOTUJwXg5MC~?{lG}_>m=naf&e=X~y1){3fnW7;zBoGTHrF1jk$t{k z21kesS%;$BEm}^X3UX^_H^vU-$D|OIc}JmCoJ%)(ba|t4n9ni8+6c`6>ayOfjP3pN zEOZHxiA2tP^Ra)718Ps0?~{03?j*fR7D`vNYn5K`dZXNdKS{wCb2m`416Hhcx;cQ) zO+4lSl@_b*=^a0rj^All{tVJ1`{FdG;DEZ(DLM5wlrz~Yy8vG^By*w)!n?L&t7m?pNnX$ymdwu+gG@q3g`CbjOBml>aGv3I!C1poO_irivt9q zAEv6@+5r^Y!FI22i|y&?^x?B!?b?JC!Q9mH^>e-42Mx7dY=rHKf`T=T9ETM|TZ6tU zi+-&9>8-ab3P;mb)#$+jP&`Lm-1%tC&75KBOn8>dkl(5KjXJ)Txj4or-XzRqu~|o- zt^6=uF<5`Sy5Vp-sI~BlKgHQ6Ii-`JxV1C$j45u?{!vBpOkV9$r0xVqNdqWV|J-mPQtT1}&mD5)b(#zaF6`)sWKot)n znoH)QZdb+${L2Nr^2dZyp7XXh;%70YAh3D|-szLb#lGF1KheTw78~@y;$Q=&+X?B8Jcj)4I7fw|iw;t+sy|4DPrsH?Ml793!I~o~Soe-wgYHt`A6E za1&x!WL%xsuZsiPsr9qv!qZ#XO|5vF0AqX)%aAVn>72ZG{B(xI=CrlT&;CpV*>tVN zheO3%+nNX^x?086-YUVnp+p5r$FZ+Ee)x#axA#z<2%p}oei|6dMC>>_*w*@JDKCG% z-jYLl-^IsiOZEKj{a8LbTJUo3wiA+QFW!p8Qr=8;@OqrQB)bf&0wK2_Z?#fRFh#Gu zbb_vKXa>|-)VE}JlXj-CM)&U%Wlz2dWC8MaTr*mk-k^SQ=QfaY+P2oN-~$J8>PxL# zPnkl!jwm~~9~;P@%a3)I?wH;|Rf>OVh|BA3YLusih%>PZHUlJo7L{}n4#w`Y9qN9w zVi>iluT+C*q4DmCIahVxd&wE>@wVR=`jsr|x4b~!Eb;Eyt|B*-_RcntHXFcbae4Zq zcs&}Y-Y&)}Zyxoa@0Rk}UtCOVNAM=sHUbbB?|YrmbIBz<1VLb!kaOtShM zPtQk1UHwX@)?%y~kX`A)zz{u+u!(mwbcNJ!rYZZ0<;btog0a}CTW`+cy*sZrA40q( z5K=Ekrn;SXnRE;7Iu$k|v$*H2=sfal9Uigrt;qE8)`OFD8ebdhzNXUsIj^3&z1ztU zPw`S^;rdL%zNd=V4wsw;8@t&6Sl$vAWQNw3D^o-xv5 zTpd>=!2aiaTl1%h8Eo57PA%&x^{NBmTFr~sSFs)JmWN_>m=T^%IT!=jxV zRruKz^Odx!3|F}0@`x27iAQgKggLDz^ku@%hQ%! zI}Nm%Y+447k#yL_OARXMy1U>CCVSA*-3(97a%V8`q0KPmVRrKTm8}Hv#u80!(Yxtx zM?lhncC&XD#gK0Fbsm3FCXyGv!ypQoNKhyB;{JTNuheQkVo&X?-p`%hyPzIML&m`_ zx?=Wt>dLEA%3h&#^FiN~Gn9MW@qDc@XI4GU=WnHRcGGrwQg%=riz0ViwKqh)GYP~< zV+_5(%WGP&*Sgo2K!#~_zP?6}EMEx61WwIrT~52RqSGY#ITn9?5+B};2>COr%J<#c z1MSHE@TuP|clAtWvA-+(WCL2g(Set}fG<}Nenv%4Tc+Dsetd4%0rHE(wkj$pWLzr6 zeV=lt>YwmYa=F>-X*UF}xqqvSbdKqDviY1y<74;EKCmceD$X62dYM_|+KV^y@aVRY zV6aQffF7DHi{*dT^lo5-GYT&_hKA7ZMR9vSIJD@e#XgB%n%84=$v&`zS0LgVY30VP z9C{flJ`_n*p3X`qho!u`s^jp|tERNwyOYp4(QRI`H6mhFGEXBO!MAV*iwYzT&a$6J zPj&-mcRDh#LOb@8+U`oHEqMN3V#Ze!8hMLL7*A)yzchcZJ5s84xhD;6Jw{c-YCCh; z2kTbrd75fZ)Hw_$%{XR_;0bVdq{&{%m&s~NO6Om3D_*a#Kh#J+5X{S8do`1{TelMe zh*BDUvmQWH5(&AQM?^uAjZn?=AxBt4DD>LE5WAf)1tWOaN(5Hdqah?RWt*=b<}8n&H0bC5lGFZ@2mz>b(I`9l~AI z4Nb&da_=mvqgs%3q>^+=gW(@anp#3pkMa}|`RaeOdQ$t(dCwyaN@wqD&=ZehIi0Hh z0s-0M(vUCvTxB-Ci++rg%`$lGCkhGy-j%qC^weF)Ga>7TYj8IDPh~69DVWmuxeAhp zt6-_1)HJ3jRK&;KBamEc0ame%Q&Zq4Uj*(dG%nNYvz{1JwMyHsPYX4uw+QgG!hXE2 zUebSs*0Cj4I*u-cea_p&rm4EbNV9&~<#nVBov0g`n4XGyJO^R=aX*#13H18>vbu`r zh$h>EbaVYu?FnK6C~tOev`X=&AV2E-7>@!w{LGN_*=)Y&*bU~K^9%O8%Oq}N_2b- z_{EmW+pPG6V?*cl_I`Zp%BE}A@!LO?)!jg@YvVv|8|B4Bl@ss8(?8n#yo1UTDv%2B z2%$KCOJK8(1M0JSZV6W<7cs(jSu^>f?OpNbmr!% zcS&a+B^3%`XB4Xe-aJd&KH}(Bx3A{`8?}k9&g?Uro*+gUrqmkAYn>WiBw8KK9gv?L z>%$x~ZK6G7L;~})hbTUTy)I5v--ukXXj*%Jws+w;C?u^X=V$y5wp(Z^J&_i}CMmfz z6t>z&46x#nU+MdcDGm_aQ8^c9Qt;Q7-u@%m?vF=Kuf=_5F>KJQaBu1WZt&Z!}Y{} zo#w}d2jD_OydYZdwEp$FP?I}UMH((IF&Ez{i5vB7PiP9Z4rVwJEZ^$bZWna0NIl0 zP?~@{I&|K_xo}GCfKLXsEu{_}Gc!fl`B4;ySMgY$-w|tD`OS4wij(O_s>_LgwuaPp zdV`!~GG2Bdn|qz#po&cS{=GUL6?b!a?ThsxY1-%z0HZded;WkPeIY^rqKGno`X^2Z zsMlXOp*1)*e&K`wb^Nzf0)HcfoN(ICk|3Zx`J&wC&q^;?-J1AELIogv;Aa7h{xUNF zuM4W_be(~l4k!xXXlAvMp9=neTf!9Wh3~D9_%F`pFfCaT)&DqwoR6PGGGSjue;}<{O~v2+Vw#wdWO4XBHJ2ZM$rT`6XDNkW z*ZWGavN@6Oiu#faQT`Y(_f|*Kx6W=!wh|6MGbcmVr z;O_Y=A%7;OzIAzy*@Pc9X2xK6!CY zh4d-5JG16Lo}?hB+IiHd?d})Umi6R1e=hAc<1G%owIz>zBC5l6_vBM-{F0^DZIZ;HbILbEMz$~{Cf({Sa*M%*p=09NO4ks}L z-CAz;(%a>HIbopoosLr)==TD0Esc= zVeGoJzA=2I+ZNf=uXG8v@$8Tvjj;2K(`zCK$Rs@gAEOCr1q?s`iKqTunQBfmpl5x( z^nLMC02SMAPH*$26Jl||JlG1_w%C^e*-*uM-P90&no%y4B1aO1_z@RoQ`stU50Psj zIVi`?1$(@zHadlvj-Lm!RIGmgs!s|EE?Ih5C!^=sr1w5RBP%$sndVMw3R2r~zg6qv zb2kB@IXt(pc?_tmuvRaEUG_PSaeh4>kI_A6i{*t!&5Y5~;I_Er@4SCIr$^0Yhyikz zX9jJ5zeH&_1lw{wwC>ea*ld9ZzrGp5TYo~*A&Ac^Jx6LU7Rm7dwO;j^ogqFM#|mAq z>_F|ZU_q_qMp@y)ub|=WQLnAcLxxmX?>63a-rem0n29NQ)E9j-Vt;Vf4_MZr+zi39 z6GysYLfs{x*-OJ7BwX^XS({jwCSPz!DFF$HenM`&TDW7^{3U`}gYY1v^ z#ciy{;$3?ld9d_Ol3M#JF?Ax~ZB<>4hh1ez;gvp>59VH5ZPC`(Qry{e6lFemO|qt- zcB?{$%Iq891ZEL_jtvzC=iP_itVXGiB&`BFhF44F3XnYMB|}+g^(+q02eCtc+0Y6x z)c*J!ZT`+ZK%J#pVB9u`(P!T8PHJ`O4pH>{K16q7dwI3dcB!j}3ll#PY_rb3+VF?? zsjgt33!~e`w__Hj+4^|tbDFjHuQ$na&j?R@YhRHMsUOj*H=t==6=~9~i`*|BheaaZ zFV{(=LFT~=YVI|)ceP1CnWj{Klj}xwHbHQlh&`Z6#=VM*W*g%+WD^O~g;GJ1($^!T zHadxlnx)O0%qY$lOK43E5yUv}pfpxrOYO5>o5x~<@tlsMUkXw|-JcsN7GruL_NdQa zZ!RNwtM19^zC@a!G$prYyDe+o&n_vfypO+A9w6AEUr!Yg>zH+jl2)yM^G&(#j~|kQ z5@q5#1Rmk8!eZyQXGCbvliclUt7{vPI^d-4RO@$x2DN zEMmU=q%_+@QE(3(U-uo{NDgz2xw*Yn(u*uyQEMor`dJJrJ9av1AQK(00M*P@RUh>> zUDW7^^i}13^1>iRJw%>=S#S2^}LAhIh?+MnXi{&_pzkCR% zWbZG>yT&-T+YSDNnDMviv(LqsGAYk?y*r=Flj!)Jx44-rhl${CoJnzsn1dmGy22uy z;J!h{IrJ-u`?Bxio55F0Qt0R$6mPxo%GM2{6={%eh;;{c%&APT4fj&nr>{Z_$JAV; zHc^wr-&cBc!@w(lLYV=%?W2amkFR%&7orC1EXdY6$Q782pLayBT$k@ayIa(aku(1O zRtm-PT1SJNf0rQ+qX;9iYngBY2OlM*CB0j{*A9cEaAsBe_4Y8_?*zZi`UzzSAAjNH zhXBoT!GpwL6tY^k1%ZktC=o|vl`7$>H1(sNIxCruL@w^MEP|)x7 z5<+C4031OAafX%2#g){P`Bg%kub#@1H%cO--t2pSO%sdp0+Q)?xOKOvwS#x29}j-5 zzM;bb{%X@sIYgQbbEDL;!0w=0Fxoz|{`kl1Zfu?LEcOG>R1aWu-WMb%h$+)Yt*>Pr zaQu5Bjw0!wKUe28RY2qlSe=!AueM%6o)2URVPx|L%l+t&!pnq=y6ZmoP=Vh~*83z` zx+yn*_vI~krH_rtILtnFX+!{tWoLV!Lz12>2?bBZ>*{2qKI|iZ@RI%DZ`07q)*xEG znzX$KY3ct<+nY73%5Lp{-z8@$D+MYl{&N!g9_bNIQi4h+f}+x-^4)XcTx&hgv-f`A zB$djlS~aJWM!4H?k9&;kcVM2^iL2lDJ>?I7o90m$Z3%`tw&Y`NbLq^NOev@8Sw+d( zo%|F!?|y@HpOIA`f&Uz?&x=is+tUr)<%zUTM$Mm?X0yfu#BuLPLmrRBHEf6Jw%<>C zQcdYW`T+99JRpx8a~-jkKio~Hk?zJ6nN(QP__{xLyD4Fda`R|YqPfJ5Os-x;_8eY+ zVial&9QKUZV8aTNw>sEcncVf$-aJg=bJ<_tAHJ``!-b&Y1hSC#)7nZh(}}Kk4<05* zzRD9(ItsNk`xP0Ji{1X2ocE8*vARPrTYaVJJzT%L*TRr?(_$aJFVd?`7!UiCgDCHZ zH9=b23`!QhvOW}x-KeD(16d-&VX^Cfo@~EoV)eNlAb$ZIka?{j4SP6roCSxo`!VTY zFBs#>EJ~J!r(#vF9BgNwj8(bs<7mrLPh$y9!S^T~>~42CHd}a00T~{Ay7h&Rgz_co zgSgr3*ExmV)NHs~+zQHK=Syg(Aei`$Cf40nrK_Iqv$~8vA<-=}VSRMk`|Y%Up3sfCGu`)e#sJp6#gd9)`>7ry$42`Y37VsJou{ zv$8*i?B%mQ@AJT1-`MLdU)DqHw<~l1eAde=)Y7cudRiXH-_bQ8j>K+KZee|G&D>-y=uX*k3d78Vx*ua6^~U% zem*%NQ`5+e*uRGu-Ob#c%3A`w-AA?C(=Oh@x3yioXtmn3i%57N&SiUgxEt=c%va(b z8=CT-zA3^^Y3(9J5HT=DoO@D2>&(#7`(|8qT0Q3O#?|v@;k-y5{w2t$ckY3yQc{IYI)HYIXF)lP^^c`QF|p&aGSbAw`Ja z+)GqpnW`0&#)%^I%2Rk?UD-hO(SDaV?38L}s>(ypC0!<4J+lH&F_@KO7q-^jnn zR}d+^T+PIMsF<8IEnUBVDYwUh|AGMM^T}0!SB5so(n!Mu@MKm5?v=RITK{?=AX8CN za}CjK{&*7CRC87lO$;fKrv*=%4Xc$*lh55hI23j2v@;idQdLg+LDG!9WtF!z52)kC z+%PLuTmh5JO^3}%+VA3ARVT!hy)(o!)aN4tm}TEJz|rQrqXnjaeKwki7(A!8$XRn$ z^>?1%ds3XHKD8{k>@jTX+IcS*!Ki`2*{( zwag|uD&CLS9QM6`zFBS6jWHfH?UgPY)vJbbq)lp#MX|U7P(NKY19;H`uPF3#s2nd&&v2hXbR5diMB^=lgxY?n$1+T| z(|e2QE!?G7W1om|o1f@-9XtuDOrq*};U6&X7t7`&nWwdXSFazA2|^&(R^4Mu5Qk{v zP@A#|cSFsE>9R%K(cy&Tie^y9HP3me{l0|T!M~Yga_el*14_F2JQ^y<`?q@|>!q=O zYnOpu-mcLz39p?Vo|J(<=MLF z{(@>-$VG~O(kd%^>$#>*R#O-EE#J5k%dD3>t20l%(;QBTO(aW377!uKCoAm8T)+oA z!LNo8vE{U8`)BA=r#yPx@l>>nY$|%o3WXhPFRmJ_14>+-j?>UzQd&IT6OvpJ7hQO$ z+{f$Gj2P_O+pw-Vggtj1bTlJPJszjyw8f;KElO*DuR*WYV+gMfcksd8S1~r9wx)Qr zyl2W^Mo0xaQ>1T~kdE#g%?w`W!`r6X!jiWTivk)-AC#obwrFbEsm1oMbcSwp>AVM! zaZh>mprCJ`BSv}6mF=K0Z=vFq>)6TDo3_h5z73}5I7dV_+3#JlUJ^2y0d_NZ>uKdV zeIaXqmdGWaGGEhUrxQ=-biV2F=DEaT@uM81eXGO0uzhUb@3o;-O)Q}3G6;Yw~+)RjXHloD#g=Dl&QoT_0D3^*EmOU%L)Do`4QY1vku@{=Ewhc=zUFjaX^d(s*^!mWL)ghRIW)}qaNXb?DCBO1Fu=Bd;9LW=Z z|2XNSUzEP-&P{mM^*0!8uZDj*s?^pv`P965*|t%rBJv@}US7}sfdC|7^yYVFR%o?o-s zsgY#$P7uGa@%36FkT02x$FI;>a4Z29u?)ttZZ?KSzmwH8t%lcLFo~*Cqa65uz)1Fql=vf=R5qW-pVBHD( zYB25LrMt)jyZOWJV%-(8<+I!EoC#{YUu+0%W5wr#n2jW*x2%BOaLrtr13Ksad`%>$ zFbi(O?^bZ#W_RsV?Bzh^?V_l6jtNQI&a0cw@Ugj=aIBE*K-C_Yd}Llo_vf)CoWAKj zfCpR?mb#}O+}sppWN4_YsCfi`yG!jcHmOx@X7MZ>FQrYinsIGPucb0_WZWCv1M?Qd zaRULa{~&2iB2|%HWa@=^rF>vp4^~PAvJZ0hY)EH2S+h%8UfOxWS&^D%cZI7`?T*Dc zxAQTJV1U)XXqTUj27P5w8}t8G(wh402UZsNuawH)C9MJD=cDG%G?3(fGtR{NWp?9v z>sqlgt@LNjy!PfLeUKn@tk z(ura_w4?DMb?^SkTW5ttDU*ziGeUv$tyzxgzN6Wryx^CWy+rR^oezN+R2%;tiFyu% z%?=NV=R^5cfcJ((Kq!KL5svhAQC|Cf#5F?k00oLcxE}i^8SHNQACMq)x?YuX;%Nh3kc=}qQ3 z^rIUG4T#G&9z!PJ0{m(V{Cpq8VPF(v0Aj0V$Pj=7C7v8|5u)OM0TfcyGD{Hq>{c9< z{goI9+9I|T`+()mYzaf}jwRf!$?@`f8GOEayn{<1YA2>nOJx0AG;&Ys!_HoSZ$a54ZJV1=TcF-7Vr*U?|4e^LK_6v>XcZiycxell2yAeB?SbpZf9R72Z4;vGGI$dp)+A#u`=RTkc24T$Wp-axxwB)5FqXo$~fC}X3#%k~iGN32HD zCn6{wJE>Fb>8y%ZC%Ng?iJsbzzhL@}5*})l$>rs#o2w2`NF$%OLuhvK(|Nw-6L&lF zuUkX*s{&$RR^M;U$k@v+cmmiKTzb2E4>#4(PQ6vb6DwMO=awS9Bk7Z}zcrUzIbHjE zzh7B(;?bLW$C&RV79V9Y4CU=S**2?gDl+im!{gl_()fyq>9f{akfTp+n|dEY8M+oL z{;EIN)BchJXTpL^mK>x$dB_nRzz-W<^!+9->X!Ls2=)1u5nD)Q4%;M$bL@fV8_%wvqd;5V*jd_w0)5g_CqzB{zDy zZZ0m_O}D&SvMlfEwYv(nvt&M3{YXx$>_$uQ15xJV?M={sIMe}EbJso-UuV*OpVkJE zoct#f1*Agv=c@oiz($A_4P8PzNOx|@YM@S1tO+20(G;oYLBXotzXSf=Kn-Ci8${`e z%S+248rwjivvz5eYc_ZqcW%!e;B34g4C!IijIel@r8|iucgz=1kY$D7Eb{IHl*%iX z)#CbLbnVmSB*R+6jnqPfwm&|cA3kp^mIAkj zFS%!bl9L7HAV}oFE86vY+PE<61#g0_W^KUY9}(a8`t0xKO^d|^wuheET<{L<-rCe@ z3w3e5+C_|SY0LAnDs@8I+HbSYAg1>A?;cC!=Zg^?kjGeZe*4sCgvMJ$0$IQ+ZslJ6 zwR?PTL&Q)RtW@Yq^+9UzEgEuZ^;kWG=B`|Sj@`nmnxlnOFTL479PA;|*2k==6S(Wg z{9RC!&q;{@p~Y+dAx@jlDl{BCA<^))v5QoOQ&mP9WUSG}@OdC@kh~#|;o_ffu@kL9 zAC>9X4m)D^OF-Y4ad@f64;_tF)fPTUTA92aBFqW5a+j9Wj-Hx3&mLswPE^AUxRh~! z(&<2L^iA!v$V@%%}b>$tKw@1dh zEKnCK-T*jzJ<^B_5f`3YZpgT<)gaH<6XHzJw3wVTRbBBK@i+^v48UwFd(2csjFQ zoSoFygP}D6!gg3cxKSB!l;`8)o^*8&5VW!a%M!?;Za_>pRafg105OqYZ>^AjU@Db{ zq`_yg=@q}fVvmW#VhHX4U>YBVDf^L>b`$|8TpxvO=Y{` zuIE*pE6n|7jsE)4Az-woT+hK;E?3^5rBcP{Xzm~>`=;B+>hy4mZ9WOd`*v4uFs>LP zU+k99l)_+p*&dDT*3|U|q5{T${m6}d(~NPmrT?573%ASd*1j&dG(QhdI({Ik86ug4B<-J915tHydjd0< zZGsid5PuH|jm6tvCS<=y2J-XBv!CLb=N)#}w#f-y3%Uj2ducM#bkl@?VZ(cic!IsY z?;#O~iFspl{s)Px)|sBpfpIGutXakoY5EKQY#QM~ndnpSD=>fp$k|{A#`K z@4L*>np0-mk8=T7SEh0;P!@&;$#e1;wS!bb0tuN=z371JA(4+$97I(sI#;nZ#%6#Y zMXevkG@TEz_|A=DJ5Kgassw;Z4xNOM2Ik=<=c2TcnUOME{+wrj*OPdDohp|clBcm+ zJqO+4)l~t78Ap!ykGnCf8{@RGyf>GZN9;ru=Di>g#I)L$LB2XWG7OTX)l6}<6w2%U za0f=>W$RA4k=aj^-uC>YEtdyb9C&`U&Dh=bWEP43URl+`Ta#b~Zixq+Asv==2PVFJ zxFQJ-<3~J8_vfB}o%V|yQ3u<27-&0NEE?u|^-r?g-mqd|fUkD+Z?bjOG8xr_}HCsoL zm$SNZWIRcKn9?F+4-UH$dhjy`SpiE>N}Zld+Zq7AB2c6;bP)Pv9I}Lusx;VQ+cb)O zKDuUor8ij8`!(Cs{Bsxba3gI_7%S0#;ey)-&S2f$eNL#KN7!AtX;Gh@P?j84LJtp@6S{ z*PlqhUm#4A=j z8h9@F*To2(PD164&}UrV&tZiJUiuZu_p`RemcaZkzbEnyj_SspgXm^uHE89%PZD4) zlDV>1^YxFfa+_IyBlo_Mdh_>Vjf?_+beiPoP+pvJ8(b=Ven?b_v@fIJrXM$WA+)vc zmr6g8dUIsn9DO(k*)beQ&mrhBrwf9-@chHPY{sUtw4cuWUF#-X$`Rj4ypudf=CR*M zycusa*B_(d*BSnVe& zSxgaytGxU{wDJ@oY=48n{{mOIZ-Yo#34aD zqr}UThOJl-uQInnm*B2FAe9yo{H!q9?$tZ4+%(#Y(MjP(#Rs(S8*0j6 z&ArXEh=CgL8B(&{)LmzY9`GW60}sxC!ynk~KfvODV8b})FG%4MaQDA|!Q;M=yB_Iz>I%E!*8f_nc_L!1&~Fk_t+EK ziUaWMK^&t4kAxd&@k88~iVU`!!R)To)2`gd1>09VF17hyQIRsMNFin!{4S208C;00 zm@aB1!IL3>hB_7wPAc<%May`l(Xfv4g8s3nWbj=N0_vN#n4COGMT7Q0wQzhKJSQuU zvA?#K72vx7*7r&n)*k!LQVYvYd^gGRtm3mCYOq%fw$1>WZUH(L4_q#29M%K?Q%Edw zzYh+byRu>;bmRaZT*bpE)JhtWZZs+7Xmd07%OdVf?sL73kr^<5Kjc%;I{@J_Iau~tdgq-UBU}p4 zsG>^@B?1upfscEoFICIM%%pEI4(Bi4cYP`^TdtOvmBY*cPXh_6%md-^@l3`C9J13K z;UEr1psw>S4qm-~=hz4){(1%(=0kCh843q6R}M;A2evV0*gK$)c>Wp2>CZsU=|6!T zzwh{MlQi%r1_{V(X>b5({sA6FB4C`Q@*8kC8NZ>afIj*Nu7kjIe(FyHc@b+7Dm;d& zfQkFa_mO7<>CW0Q*XLKj<^XIK8P_IfAfT?>>P1i;lDFx9(#xpZ193~f-*MT+;$~bJo>$29lZBcBL*jrf7;)O@fZ&0<=Cu;NO^ zM~P1NYAHs!-=!J!1O>~ zxL*RO{2Q!{u?5HH-@o&M@dq&d_rCQT(*6q;igKoa9WVg-&mf2I+diPIQqdvdPd;*7 z!&{gF8M|>#5=6*ZD@@1*DPz)9 zFdphq1{Ju`)-Zk*fnIO9u>n45Mlw!BVXH^TdP4+yo*yche&}z5{kQl0TbuZqJHL?H zpE&`r+!?U^3%>fRJ^hR~e+Q<|Fx|g_(EmDr=94pwc`feF#2groYIoXkb2HK$~vR)4kG-(~$XSN^@f zqkevs`Pa6b;q)l?zwD#G0dArGh&}>Xt^?f8JQw}lhU&48foDcJid;_J5ZbEyp5d$8 zWCm=T2-ox6!2onWD}b}Rr~nmz@NOD16L=t5CUOH=3B4U5$oayQrlrQIhD~tUnrI8Q z?OC?zi&X~)3MQOF4!t4K&=(ln*H{(|7q~Yt z2CFW@Ffo?knMv4S3hYXMe?A&bP>|{uDoiX?hJ+(x7L~@l17+ziRCoq|tp9DU`J3(d zXAGUki{EASZNDg|e~d4H0i%8WbsetxIhLuiC~cMiSStu9^Z3#qWfz<_)_!a2Y$pu< z(i;p*VyP>D^ql7bie1tv+Dr?2Fief+h*SuajUUK4>BHIXYY(4$*ow+BkfY9N1epin zwXh>=@<|7_qh3Un!Ye+1=6MeNU*p(_1~v8(G7$;6O#cJ$mj263Pm{IX8Ywk zFTeWPPQ!*fJ=-s_%YelM`-SoT1?K-#Ry9@xFM!We7k}Q7hZh*&skB|8Ov!3~x zgZ?=#{|z_}0QT>>Z?*%!Z4!M;L;sNwT=p~o3+MRxSzv3F#uJr)YNGQb0;^ov8vDJr z1kiW`<$a8Sj8rAU6)8&o9qungkTIY&kwjT@<8TthV8

jt3TJE}Dk1<* zj@Z+v)ZAno);Oep)rRS_c`PAwhjN&lPNf`6W{9#AP~Rwfn5WPehaEsbJ$|+WFz)@H zU;eZ$S^R;1|L)uW0^s{LQh82} z9AY2r48AKr#yZ13Au>lCD7cBMm2T4-aDpyB&=tnix6TQF5VJ}-7(M%Wj^rHyKw&_o zdq3rb$kgh+ocaL8^&J#I0W!=;%%@PG%oM{ABNJnyh_b@v6%sirBuqO;|Fn;67ikB! zfNUhekOok+=VU|Z&SPCIq_z&Y#9Urx0Gnbiuhp-4`nR1kzcak-KWpco`5!;g27mf4 z=Jmg9$lqmuH-p!g^LY8IOn=($FX%dsjTZBZ%h13&m8 zQ&HZ9z)m-ix)|e3b|ldS8wq{%#A8=PEc`R(@e|*Zu+arY!I;bDk=l|}9%d=l-5A(V zPk?!vylZbvH@m?$Qlv7Kpg0U$$rq6TYyfyG+D-+3fVaYZdZaq{-Sxdw2DQ9_YHP5v zYqKnyx;)9eh)*p#gMvPUgiuG(xxqkxqfL#ap!ME~cO4-US#;t$6`43Gx>agAh;wN8 zl&aNGx26EqBcWaf$BRhTv9GF2eZ&a?qdCK)AEcb4tW*{_GI+J;x<#9l)gjU_ z3BL6rd#i{9XI)`y6m)D9N+cB6i0mVLw;=L=wSHg2am45FyA0v`6+1D9Jf8gILI4Cy z4%lwmKw*7?b+lcrGZR|{^W2B(W->Zub5)DNfx6^K#2procEcd<4E7rTgnz_PXa5M+ z6jIm2Q~kBqlwWq)A79Zgd-k_I@VB=AdrkFg{WRMa7=!<^_x>8!zt=Ojb2LZybr7_F zaJ=eeVXu*gQdBW?0oH^LY^1YagVGR0qf=(9!wUEIZ-(e(jyBLLH_({uk+-#EVI1k~ zrFL|;UM)4J3gsh2ewbV1ANM|B_bj0KlHdq^TW46y4y@`l=d0ghkh;BiR)E-%t;=)O zp?r{6;C`cF=5-aFa_f0rg&-ZYx6OBdT{X<>!4&On?%%&`9_*|BtgZewx4}2`YaIC9 z=1?9f=CNPvs-JP;Tb@F0ESS=-|JYynuYziew!w){tYV-H-Gec}{M$NRhQWot#YCJFq8-_P*li!H zf$i~n4jGs{#RE|e8xrMU$HD=M`UWRZNbOyxKnF{+jR_xu0_86BZ&}TewrC4~t=R#a zV-Oy9wGD4>Pr&C%9jRcp{M!G&=J9{DmD%?EZ43OiJO9Oh_G`}iYdrp2+xw><^Y@x} zw)6k^f-IPo7nbklOglW z&e89C*pUU+AOqVSa^{G-_{7)@Q8E&9mz{D&a5Q=QZY;A@LkUI4m+1uLSt04yyjH*( zUPWlgUEr3x^O04eo)Q&OaulZRx~T`dF5)F_o--*`$-pu8M3c*~{wVx^40QqJV#-G} zKgzk+MfMh%H~6i;>;8s!Z0#{aR?t2_P}=^t_Bq?7KWp~Cwas61+mGMz*Le8*_}_iP zvoEwnC{LM=-#-7dp8EPHF+TsB4Tp9-`;7ms4GYJcYhd>*Kq!k2yn}`8UDQ3t3_=3q zG{Rc*iU}dZPTvLPmViTlC#X5?C=MM9j%_ngOo4U;!X(6)XB1?bVrL;!AUYq3e{8sQ zc4B={vC$S`=e?M72ZytOxlUP!!#r1% zKf9XbfIUcqz=MAq3$s7WM$T}Sd3qjUZxZ`@*K7^H=xopN=no)&V)+XCS*c~pY+r)w z12FY$2R8~(=zs=coe*&8cb)Ki&iUJT_#GF1`6GY%>i=0^|Mb6n|84?l6 zL7L+~WeaOR-zlk+;j5XcG=%1tRD0Z1+= z0=|cx$ySaVK%Ac^JFQ1{g$>RtehO@gL=e@L9NGoKhg5O}?IVI+oi)n1!8r4U+7%$Z zVJWR+K?u~(6XVJ+JME7>_OlKR@XWu<>sJ}TH#L{T_$}LidCd7UUi{53!);3D1V5gA z82V$pQsHayJrk*p;FMxbuWTX1&T$CvO3;2p#*lT#HaUh)Jaw;`-0DH4|5&X zSo*>amb#i_>!L>na+N9Dx1zvSg|E|ZAkhQ6mWJOO!6n52+jibA0^o!oD&{H7>&kF$ zFDAq?W`E{?uQu|Jb<biNu02OeaBJjo@a}XT-%>{ct;?c1os8oHJJ|0e_ zzrkLOgMz!m79{ixb9PUMB1>|Z+-KN1sgP0QE6rAaaUrnFMm-X$pgN_9fSte`vV9q$ zr+d*95!?;8GX;Addp>L|KhVe74K&2j@0qM2^g?*pNP*D@%16UAXK53(o0x#SF`|Ki z8UBuskkkB*j~E-jzPO*ZgMEU(*(>vS_hU=`tQr5Z3;${}Km8RsTS{K_($qae8_!`2 ziLiQq&x`Dr!YUyTKc!wqg4l{iERqO61z{|!tV2$>f=%1Zy|R@e_lRnL=jMJCEGC=j zfG+AdiSE{q_mnP}AG`Pa+`nw%U*rGWMt&cgeP-ByTlmMk`}Onu9#>}H1nTAQcAQV@nKGBd zAcw+J%4a(>LOsVXXhh6w4-tI@alt;&x-@J~3N}QCP1z24&FBM&OEtl}>?JDVG2UPb z*uXEuv+A=Hv3|ok#T0Dvawswj<>e}}X{WVA5MYkxx0{o;k65Y z@1$ONhdHYrGC|=F#$v)1AWurPhK0FQbPvhV#r`4jx-aOVW3gs*rC`4~mQcOKZqqz| zf0baybncHJ-khcb2J(6Q{@b44PdR^zx_SqufF=T zUYz~JE`>GyAKU0>ZSva}|2MnY1l8kzXH8-N5UXH35sav8E&|pN@F~oDfUF)X29u+$ z4P-10hOPJoRHrkv8S)O_&lQF~=Bo}sh$g_=8*BKl-;ib3n>WWODe8e|`>q!;XKGGU zSrPd{N7H2Ndoh_>tMh&yW3)!jI2t=FxY>TohqKh%CTtvU_y@(ZLY!p~`&Digu(Y#e};6DZzeuW=9EPl?AEKKowh{(L3Y zaNp1U8JlG2&rkUEzV^;feSG`01HVD=+YkR^Z_jhVAOHJb^Tj{Lz-T@Od-T_Jf6b@x zN5!6y+mL^;fk;<44X6}u60nDVN_jkd6bf5)96LHE5*rJ%b2C?v65T~izegL5p1#9K zEUP9GS813#IX5G9kq^Z4b=UBIrt0wuoK;vtaBYe%0?Ko{baE<74liUT%=SoR*lu>z zh8VUC_B?!GuerFgN5p^S^Kesr?G1GfW%$s4+AAP+b9|I)MKa zxcd}B@q4)I1=uIZet+kz)>5nXIQY zVU9&;ux0;^Yx#;@0XqO?sBcIJ{(XN&jPgwcfez^Ryzjr?86g6H62Y-fVvAnhGB=7_ z_V7zB8X@jIh=BM4LGq=kV`OfX{I^-BjpB4f7$4~dTPApeVAX-GoOek zt0iqgdo5V;8~Cm~LV{wZt1vzzn1-dfUk%mBF69POj*;j8G<_NyKM)jTm8KSyeP{{oXVBk+$R2%cDf{l9VxIHnXF%c~1LO7< zCQ8WI^K}ZI1>kbVcy?^ShnxW!52+U`lmBzB(`7MwI9Ap@#?DiRKuaEO(ufRuwhAJA zC$9!Kz`inED4qlKY?}WHMbWx5ht`~mY&(EMD z{=E)=&~97@@2r@8%ekl#8O5}AD1q%Y?^w_GW7-UKr{28H_kiLY`^siuwzOk7H36q} zg#b5EMQ%Kcy2G`&FE1F0yJIfj>%mu#k_-_|2mIYE-)K6~?H0#K8SI!@NU}m%<9Vb2 z&y9(SjZv0rjCvFiNFd@l1Qp76_$k{<1f?*4X$d}xqL4}gsaf1Z%y6!x|0+{vL^0C;ZNWKe#8?ie-_@H^u_6qiFVmys2C|M__UjYoP7zVjv`kb_C0 zK#fDrjFq6u;1TsgonfJandA5PPu-z?vE$ft0Q&>J&oP1t3vY!5C;sM|u$e$eMg&5C zgmx3vryfez_>X!X8?=k6!zS7Mo(GgAHlf&J?iVwB9kW;*qyHnD3;~MY_tXGqR{^(z z^1Jdt3C!UBU+t?y`}()OfQ2lRIe16L?dEbu8@F!}zON|ET!Fy_!{A$Ih>1jdK>0!m zc7C_eo$D1jszHr1^~=ZyVEyMA%>Vv>o^B5?JnHG^K7aKEv|ks~o8aQz89Xob?f>XE z6K`X{fHqCY35ha7HM05(=240v-?J>cYzhxL$yNGbdJc z#*AI0+dopGwK4Tx{M z+0R)rG60fS2*2%N#QV;me6*Fh{OsnTDN0kKjo5g9lq;;Xzdq0H7j=s3zV}dWKYyRw z|DJbt^ZTFYap>RM4!(;=itrnMgYO)fGS?~UB@4s39pbw&CJUoE*DdOiS#+~63FR}l zU-()W9Os!`p3MIxq$v-P5;K{^E`jd{Z{9Cv<+CWzxt(EvpN^7HW;$HkxHbNNQ!;~L5r7R!7bIq}$Q`yQXm7oG~nF-ns8}9EI_6)f*VsbV%}l>ht);3-kCTfBWTs z)6SHYK%qV`muDGr7tmHWe6*YUtp( zTC@*_3DHmIYlQEd=>J^PVZ6rQbbEZ=iUQP5yi-xm-#5>HLH}}%%e--ojQYoWM)*5? zOt7QCF?eE?INKQWyy}FyAt;QWNjpOT1nUI(x$nHmRrB18>&di?TvGs6K&ih%*+}!a z`XAfaJU63_{n#-MA4#ANGXWpLvxB}UpiGnRXK|j>uA@%NS=dcyN zkDq_ewv#j(=mUR$o_rpQXxO)m_+*ar*fRG!){T%-{i{Bwer`AWzn{}F4aRQlMhH=f zxdP9I{)IMW73eVRsTVPa!Zt;s1_ZXgbp?AsbRxY#TZOGTfB*fjV{_U1_-y|wa#$F&<}@(qL@(jaNgnO@-^&o+ z^}7wixjK*W&bOW8-AFU?*%$I$v&<5!H|#qWGc?Y_xb?5+wf3_2zC3Lp~8afo2R=o||@KALBdM?s8X4~?g=Q2suqRpALNSN_;G22;wIR`me;vFV?2)hi_#gYv{4OZddY)g< zUMKWftS2Fa{cfpvyJA#DYca1T>f9Q_WyoR^G=V5;6 zXZ!VgUh+K;^Ui$Uk;8d-_n-6dZawTFtc`Hqe+B2kNA>HxaYPwGqy)c5P{YIZKfj}( z@SQ)u7xVAG{{C~mGXEa`od5It&-p*U|D6Bx`#3)b-hJ-JQZ~OAp7&d(u!Ha%n4jnK zPR0CQD2wlTn8W7x_rK?LKkqMo&r5#ZA3p8*JnHBDf6n{NZOB6Y{-1F}M%~z;OYc}_ zf4(+_jRwco6XVW&{`k{J=WE`*J=c?rda{K-=bvYr*!y$-^4Iz7U+1IFY-X>Ge~lgI zZ`&qu&37E2zRv`E#Gc!YJlg^|f5&q3xtH0d^!}U+DjM$n=iGjNmOtlSzt2wnz0WMZ z_bzb$_g&|;8neLpm|KudF+V%b$JmK;e>wPLaPQyeDrlRw@YlKXGkbr|MeqmiJN~U4 z6|_^EA%2%jK0h}P8}C~#ocweCuQI`ufqVZsx0s*X`*SYV&UlVL=c1l5`h3@&==0J} zLB5=cWFn+FVlE1O1${rqJcq17x%kU=bV+9RRgJUH13uUi^6t#I5K?|(kSOGq&&sNbJ?bZLYx|M`rhD#Wz^bsS;g3XA-Q-=C#_ zIc_p{1r{VNN!CpW{)^U*{$2t!LW#N#H;3f9ER?HuJ)L z=Tbvk`}#FuzxYf#?^{SIlYqxF$Eio(&rqz%*mC}fE}G5xP~XoSg`QGn--rv4A zo4*$=6X)6N`^TR;e-F6jNGN3_s0Z2eae)Ek1aZueZ|djogU|~SMCa#BGwfa>KlJqd zY+z=duY>ry&35y7I4;f4e~0HKA@I#4@JP+aL3lt3gZMu`7ryr}zb`xlcn*JlPlO*` zG49Wwai0qL6Z3VrABYe0XS~;IuE%(OUm4}Y%+CRDdXJxwbHd-FIquq?Kf~1gmJ5#K ze%K+-=FccU@;^Oq_^pSGo#yYs^M1<(ip~(8z#QJ=Ib|E=iza~ce{mf-zu(XMQ-8|+ zoS}SCKL=*M&VYgc|E+!e)cg0nzR!j7_&%54&-I1RbN!+GQLf$mcZ8&VKcjrV_qTuo zK{h{tCwLyzE6M@SjdJ+bUxxG8&-`4-Q&}(^^6mbAZ&&~6xw^R=QD4-2-M^lX<9MD7 z&%skD&)L1-;5w8$f9eCtaOf}J_JH#F*57LWiA_P2+qd1l=kxGfhj06}ao@R|{$I71 zxxX>meEzq-Wn;eIU+w#+K0XZSPiPL=g&!u9xx!p126t*`HM zjNi}Sc8-KCY&FgO-TS`Bw?0rF)W3Z%rm}yz?@v2`VA5@Ve~+JWWj=rIcgWxP)jm?`&M$wp&;EP=xgFuT@fq(= z{TXlYeb(F#e^8Ep_VWmY)&APg`!A-V?^YOWILt3SCLPXa)Qq`*8-dL{7_4Z~FJJTb z=>1PW!2Kr72aIVmU^1t3|KDIOFpnG993-)ic3viR)g#*kGb$$ID*-oOC zpgR6P{m=jB|28WQ{4ZCC!v7z?eyfYy_&+p=UmgE zFbc@5DZ+>q4q_GL8mT~;QZbZDbJV=EXlHDcFIY;+ctrpi`^Gx1`AG2x&z8?`((#jf zY!OfTf9A9Gc>b^?&=m<&6`ERPjdwp7?fZ!UWcM*{))ae@uDMejGCJ_4&F)h^EjI}1 ztF*dq3ayQIr_Q&?4>>F$h2w!ZeYlvFP|TadD(N^>7m_?%S&w;u0+XuW`MK&Q=}ynX{=}@N9I6m|L0%JV ze~NsiJbCR$Mn6!WrcO4C1$H3mQCO?z&fg*8Bk8PU-8SyC)tYqWheUeJA<5=ME%(vs zL2ev-6&2OuL0p2UjW^^FE3bAbcju5?XSS*>-M@y-Cf%J#@4OoU^E$l%0a^v;f7fmwa{fIRmpe9bA9S}{B6kPvi1XQv;v&-Z zC1LS>wy{JB*0n)-+xpq_+4$NX!^))2^*-~Fb);C}2~LSGY<910nFaLIrv+dK+k6}i znk>uqabb@uncA1{>$K>n)B12=E~~44hR)%wA{J#Gr1T4#<~$L2H|6XuQw%FYe?dC~ zdpr^3q(xUDPJ+`j0HS0)Y_}x+gix%ng^z>*r8bS1X`P0|_VQ`54IRh(-SE0D=*RK% z;o@Nj=r8o}eU*O>R;qLpzqP;`UElldrL$H+I=F9Qg0g2UoJT**v3xiJ5P6puxA&(9 z(tZ)&FGMNJcUC@wM(+sac6Azef4lyE%lWcpUyM9%#0U_&Efjm%d%RHK4d z?VgLJp-j&>I;>r1y7c1Kseym*9@CJr=VLT!=EvXP2P4g1wO$i2_()g2st;_sR>NVK zqDr&hkoMsf2=he}X;hV)zZOqB8yD&g$X=4K&9v+jAGCXKA*~kPi{|tPe;xAo9kK;v zeWUUBAv%0Yh)bWs%iXP==6F+*y_iV1&565P)noMKoW9hqbr1m%l=_bC%)^TShsdKS z#5+=X>n=74q<>wWPc3p;RR$!g&W;p1mde@)nWvnEq4QLzHq z?N1qO#Ck!BLO)c(q*eC=;mC*;`>+c_n8}8&QgX-hE8P+P!)bFP!(!AOgYBm6PE9|i z2uB?<&}XSGqqTHfAzB_FA5+?x;p63QZIy-y={joY=V}@V=NWo7(C?M=skbkZ-dOHu z=d#FCfuwRBuwknme~kXM9#;BAkeWVkJ?Q;BkUE+dw==y28p{XrcuAmGE>MW$7lWLN z5uLZ>jahFq27S z9*;YhYuuDJoeGM-BVpIu>}mp=uv*IdwUoAc$rwjK(-9A8B3sE?A#5a3-r8MJ!qA8f% zz72!t`t5hBe=~152gyeGIInI?fidIi*w28$iuVUg zT`}FOEq9OG(E*=Ky$8E9knokXs>8UI{QH^DpbI|A`8|pul~5oztYCp9x*VSm@oqQW zcog0Nf2V@*zfAXLn0nb+Hcs}<55gK++#lEM4y9g0D7_ZDnys$4A@4`PL{H02lb*_B zzFsaHCajv!Enc6*;x`N0S7O1ERHSt12U2$Obt(8|=djalDF?eFu-qgFUXJv%KuwL7 zuT{-@PI%jI+W75KAS~{ZCu9&K`UPmTTdtcFe>yd!UXmA?1y0*3t#fHudcMt~Zhf&Q zo<(m!9$QRAC-uH9H#+&MIriyyHIZHJR9Wox%yl6~4{d!TzHf);dtyts-%T43K$mPz z76rn%MLMm_B!|9L?WfDbf%Bn1`5t=-oB;qa?Pa${jpTUQMtGdEeE~UfaL=hpws#63 ze=V}Qv)eo^PVS*2<4cImXsdhN5m}i;k$drYV zHj5`+U&F3+ufat;nc_a6JiDLpN0m&ayAEpJy>_cb*4(!#%@);u?n+V+(qlQf4P@^ zaR%QpqluOVRb!T#>6&E{j@sQcwPd+;&y}!hAMOA${!2?3oxk=%b(#oYK2V!P=TG~r z^iJFg6aliholtP>=~_v2>bJ6YDj`|R3J1CLsBn`UpdF>1A}TzA{&C?);iT{6X76tb zxKxm)4zH+!*Q*YLg{VsAeaJU9f3*5^Cckh7(!2wvf`W^@AV!zt)!Gphd%REp7w=-C z`UHeSEO*cNLL2;YBh`jxe_Ta65(Dje2`-(U*tc|Rn##3mu!{}@_@j&M`l*c2lkYr; zPhGC}p0}{5fe2R*k;3IERc|3!1l9{=E>eH%4aAz9#$61~Cr1q6W$pNT5?I4#;cp)h z7Nal3*z((8aXVmcky-KahIZsxJVbPOt#^lFukh~aen+3=AO2(Uf0C8EvhMe0_gL-A zO&mJb+G2aRBCtZRD)V!QN%9gj7HFbAO__JY#QC-^L{)C!2K zaJ6iAI zU;`Uz;uoqMXF z1I`_z!ici9t6oET`%&J>)0t=x61}AlsL0+GQ!(*`Qa`)RV@U^3HPEqFH`o~gD)?kT zlEmMhytUh@{POu3xe`e|`2%aV$n7ZZ8q2JzLBVr{P-W{*B9bH!7T9`S5^4*Al$aUI zHCsQz(5RnQe;tVf(eAxPe&egn=DO|^Zd9f9`=!_#(jz2lX9|z6E)iem^$d z3Oy=!Kd&W2#9|L<74dP%ILXL$f5ZfoIM@waC~JziQu`p{)6G_-yt}0e#}DrcckP;- z8Rg!-jO~LDQ$USCR~?Pm9m*B-#1Rk|M#no#NosSIdZKKsj#&)HtDG7_=>S_oeq?V| zyOkjufBp*cA!U34zVj79Fjp~;(U~B3>T6ry<87lp?68hM;#=>JpvjvIOlL4EvXAL5 zbIAanQws}fsN!r_%p2?@6K!*2(_TOndF8EDFd02LqBXc^1nXJfCSx2zr?p^PfnHa2BE za-urdN0tyj#!gSq&tb^uWu1G&coBd|O1k8=_^jE>X`2RpPw`aC8->&TXkX4zYdct3plW;Jm7r_ps}eN$od=cQ=Q} z)jR-2eyhT`yUI66W*Z>34cw=m7WdeBVZhipV~*?#d%cOh=2psHXygg;*g(f)f4#q5 zN^=E?<9%PhK`71!pvEIfqgSlA?KU}QC^YfTA&hVk*Qe|1bneKUk8~gan>-gDY#Uv4 zh=stQ+ayuB=7^1KLWP@NFAa6&?%+WZ`X``}#JJZMY_ApaGM&r&SPaALbBFlqcH$h^g-luog4PDBdA@A1u?Lb_+%igY@ zkK^#sU#~D*e;(Qk4Azf25=!{qh-+1E-R^Tpm%IV3Gh__J_ucJMeobz^KU2=yO$xpOcUTw`rVDH7aNa^VUPmq`wn^(p{+ zwZ8Lr85-#O0J#ABN7yY_`GF2D#`tlf;l;=6d$ba_uKz0ZwKHMd+K;?(td*_sek9SN zHUrrkfBfhj^7`-@bwNr;oc{Vz0-54jEhjcjSo23WLA5AS!d3=TxsGsO=#rbO0@LyZVXPpR| z?4!f6@eak?yKj>FUR-)ww;|RXxg`qj{Kn?iaIH_Ja2suN^lAfid!TvFOdu3VwPo&gjU0`u%WAzWwnvkjXg7Uu#a?sY zPX&0JH-~Kav$_)R7>oBAXjmd^b?r@&Yx2ym9t3;y$wiAiJmUc@mfp0I{&F^&#BWdX z{@mHn!ru4RYufUcjWe7|H~{m?IBz{Ye=3(BEKrY^R$C)1W|w(vBd1napM~T57GI@X zw|KklSX@E**Y8WN*$kkb=-b!fq93>Y3xP1^soPuUP!l@uvM#F>jedKoM%Y*(k`tTz zo(HG@t+3~5<>(+KdCZcxKOf`@7HR+S%QfWUEencIc0q5h%X(LWsQeLAW*_d&e>YrI zwv{|Kmrb#{xXH{}v8pmeYN*kaNlI5YX&Dy1oM&zF)vf)jc2AM4j z*GfMe35Jp{XXay>wnaVHwSH+M(^2#F!9spEQIH3+1&^Z-;9v6Ux>;eRo3p3M&8}Bu z3Tmfd7v>*J>Ko!nMT>ilXZ2^D#e@nlsySHCm zE1{%o_5FNYiR}#t!bv)Ow$APno;9gk_+)oY0`K7y;c5yqn>cS1A44$!XAaUi;bmVF zsLL|D(_ptuUE9J-5t`(=jDZ_otND>_wB37QkJm5w$+&o+Eb~wA2V0{|S_s`LD*@O+&TymR+e}x{__UkS+;24B= zY5?PK-+x}W&C`6cN|TpY0KfC`yy5e1v%;jIYR~n#@d@^R?$RiGANk}ORL9K9r!>|O zs4h+G_R(yy2e2HDyU9$gfm-a|#oN$10Vme5cm$h93%eiKcg|4sNn(7Ul=ob2`!traG#%cH2?L8q1%T zcXx%`?F5d`gFI|LMAS|XkLqi)a>Vf}a*H*ibDs^D?QRc`aZhV(5l@4+a5v{hmx}12 zyy}oy?GM_eGFF8a7U#QE>!y7o2$`!6ms@PTcdwm)voO8B_6OsnOft}!@5>7Cj#yre zyCa#to-6s3e}SCvL2!JtJnE0j)1jKoo5WNdg5~D~{CSg7ki!k$QjTa_-*2T`)dATHqh0L{&!s%Xz_Ea#R6$*Thg|7cu!#%qvxw{Al=T3@5I#?^4w<^7> z3_KDm=+^rVQ41)HF@vxlQVKj|Ny6RqA$1Bpf3YGQTD>VYx zH^XhKe;!Wqid|olR1E~;PW79`=@PGncTyJA**`zcwl&{F|C)R__Og03RX8A<*R_2w z-mK(@J8>~psre?NYDge*cLxT2XE*?gWoEIlNy0 z0QcpQt>M;qG>2jbKzY#nWlsBx6Zi|3xRlm$w5hR`%*t6fc-Z!f!mjb8(v-@<@1%kYOe!qw##<+V88XMW015uFCO8Za<=s@f8XVEYu9Vm*}1F5 z-mtSIM?0~E$53Q9d5GUTYMhdIBZ7t{)D_U8J-vUv5Bq>c2Scza3Db?WEyt?{w5 z{k-XO9n?y?T_!7EyyoloAW=wEZf`Fkf2qr-bG|-_l{7wb0brO^jvyPkrQPMq=-r{v zSuN6DYYEBFe!9{sfJ~Ih?5-5XNb$3c7`3ff{g^0Cv5Vtfi4YwN!0Ox4&TEZ>a1y0; z0n8Wpb3i0`lP)HPt|H9m3z4|l_IgRT*JOFkFIyw3Q*ktQuVca6-r2gsJC+`^e=8!g#cLJ>(YW^Suf33N6c+Jn&>jtxh0p|V7PA{)>GE9Tf)Zn-;SW*4( z1z3{f6?72hvy2w&WxoiP6HQjd$C1vb{&4UgnR%hE?ZfF20C7hm_s8;n zX~3RcK{D{x0)9X=<|lnd6W~_u5sF3UfnwZCxH>B$up7h@7#0l=78rNa;Xw!FM-CRNbX%U*p}J2F@+EN0Cr#5sy+}l? z2gAwaoor{u=|eE4)lxjiR6bbKHB+xPrM?GMl>$&WATci!>#B%v@ma3+Ho+{nVqLa= zVl)pG@-W?P+-VB&SgzvdLEoO+NkTAEOJq)b-5*zFu^2-A4Ltt2f8XqcsyLcEE`3iR z-gv3IxaHJw7(1}~)2&)N;qg^1Aw)in7)tbT)4O~SUBlZ+xZXQcppDJ)NUYa4-{uV6 zTrF5`4A;k}zq<)HVtPsfxee1s)c2tQ`I?E=Nez<+&tkv>UUH}@} zS<0&d9bS$W?N)l7nQoeZ{M6qA5`W75#VYAZ6BF6>T!EWbigGcEw1L)ip}bdXz6FtM z*S&6VWM9qJi$1G+xK)-4BIE2uv$BZuPCzWxp52z@QQ1D6f0bNr8tkD8J92`9jmWpg zYL}%Oc?{!f{~QjZ{4jZDl-Vt-6DRv{y?`<`of#L8&t7)Lahs~NLi_x>eWeg8YeA7* zts~W-tZr*=jU+V*;IE0^v-^c~yIx*2w&#lG^uadnRZ!35r^yNWT}zH73?d;uqKBYO zatg*OaR@Yie>*O2m+^F8vzu`np2ka?wB_1Y#y{8VDHz-g6LnL(;>OSJ~_1@%`>4s8J@~W%E$uc$p2`mAhV)!Cg~^ zE1$CQR%!MK0Sg(e&@WfQqM@Q3kZ?DY(i8zeby%(ee;iE9c=)8(!*sdT&{?Czw$b26d#i0g(5hFUe`#4%nIP{Vz6DwsSC#LU_nPO^8k34_ zR^--D*`2xQ6Qwk3X*tlI5*PI3r#>NHix4m~0cMv>=+-9CWPb|X+on7WV*O4 zRpD@-h~5QylJeF;Rb3KhIK=Nw17dP4g>dcfJc<*Brg`k-6)miu^@mt{{2#LMxG*<bJ+Z$M(>;xlFA?q!(A(<`T)%iQ2pt zcOnbP;4{RZh0fL6GEEzIjF?4AJZyxSP3nP!_8N4eNFQ_THdOFJI z!-`v#FBjWIPOm&~&C*;x;`?4&&@Ui;o-Jh37n*l?Zs|vtVn;_jf80#Oqw$Nfu@50H zjpeSV)<+GSAW88)hk{d_Amfq=S4G#tMgE~**Uy_3yp3{cytlUlV4T=Ga=SfFZ$)n3 zSWQ3A)a^w3;mGWACZ7U5InejOgRY;qYlTm=HBX?{enKdC2w5_T(ZV zLqrC((J-w+^xZlLe^>SBox=6Hd^9(6C0)fqmM|KPEEy^qvhn!E0-=Rqi&YiN}<2Zn4f(PMKtTI)sB zXq#ZLtD;`o=M3bsR!UOC}_jzC5t^HxDiId9$?d_38@Lui^c8pkOm%R{+g?!r5WZhQNHl48Q zM@&T;q8oPL2gk$Zez{8^>@^FnF$hBZMD{~zruFR=!Z+_>Hxmvp_X=?}8L^YQi8vo16H|x#F4ML< zP&Cu!01KOv>)Q*AS-BeD*q(OwqKrs}ZPH`318FevGd-|fAOMni&uv`i`--R91&O?JjI&7 zzJzjbF?4J0FF;_l0`=)1{C%Vj5Me`P&B1=C{f&h1V{I-f>P>B%6{Xx-?tS0Fc@+@q zqHE;q-Z~|*{ULTA)%AxqJg)U=b9*47(?MJ(2myuBj&gUTN4Wd5<3b+fC)+&ka3^8O zy2kc-f2oh_-IjTo0oe|BKavat>yOhzSf>}`xV;(MLoctb6IcgeTPbW_AJ~Tww;yfc zet-=-*(Y-yZs>BnFLF9UmkdsgedqPwFd(;EA3KYBzo#o#hM>|ApZ8IuUxxQu(3kLn zs1-}Ilq1%;ExTCk7j3`uQiNK?fAsu1J9h!Lnl9&L4?wfy?%Dl>k?RZq zF=Chr?Q(y-M>sPtFo*l+@mA}eb+y3CF5m=1+@tm`+abO0;>t&~e7i1g$5-N4(lQK! z20^LJ@{PvnnP=>6LF+Kpp3(j8?ZSgMd^#aQSW&d3sJHMwFL#F{C2=0MBjDAnKjGfh zf8&K>Y61WBwcC^5rek<^wC+Z&_o4zzHy?LOhP|Vy-beAcEcO=A&D=$Oh|*i#S{KDK zN9=bQS5C(Mw<@_089PMr3t9xn4=l4u+?=Wd^>qBS1NqrW`zkaiesic% zve|;+Vt&HTx$@y_Q#~#4)ZZ`e1{f1?P`a!gOO;sX_2ao~)^>I=P8yN%@s9Kee^uF@ zQlQVLyG&f$``(m3e$qVyXLAWL5k0ZWc)T>RVUne!PG^dSl;^Nt27GL&>v8`I5L`ZR zYia}e%znA5ccC`!qsJ|Mitv}J56S;n>pI;W&!ET10}Xv;hqC1Kn7%gMVroOh0-Zb& zmI$xa6u|i1m#6`-U_zblQe5J0e-zF7bFf%yC*F}z2*NMJjqOpjCh~HE-&Wn8$!#E6 zMNm%h=T@m|>dyNoRw9(Xv1t}ve!4cCmy}_N;24BnH!;hEIea1vG>8I)pw(tcgmD(xVP;2e@n5&qC~s1 zu72KO4+RzwVBv>iq4vhd+^)9T7I3b7D;ml(Jaw6lZYtc|s{!xNd(EX~n}qGA9wNSW zk^d7Urg*N$*m{9~zCBB&$e-`Z&Fz*a#i}n_C<*dQmdfLcWeM;Qo-Y-S`021anXRz+ zh!7Xqd!=VZoBLYo+&7}4e{SA#@96&*XK%KqDz>G8K9CnQqT(feCm?-ur>KB{sHikg ze}0UJ+*xN=or_cT zeeqf4pP`)G?L}C!pkaW4sBsz0Ce^eX7h!^>D(NrqY ztn@nphwXK?loG$(?1ne{xOJoAb$i%ScP?b~>F@!wWeye=r9eV-D%~|8f@s_8*3D1H zbyQx2mAF2gi{l6{xb?ERe6AF?9&~ptZl;g5i*Eorsn27(APQ#;bW%(&ZPI43c^drX zdnGPeow0`q$v3=Pf0aW|z}5KdF38nkPwKI8!bV;cmnekEW!#%S#_MqCmkwPD49yx9 z!0mLn4)rtCk=Ix0<;hHiU8T&X`E>iW4JZ6pkj6WgAWZkTQ`TagJ^?X5EDjt-I*$t= z9E1h{0G*Gk&($CAdnCjOE3H*Z^pw~Niq+C#4?ylQxUZ^RXFsg!MSwA{LM+ zmPk%0=NT7Vqz9>xN3mk#*!FMtRrJudbswc$J$dn^x7uLDeZ7~+v>#w19j2}lCB|$4 zt5C}5`QFx5e>y~0GxdEHY!iJ`?8N?ogm0E?YtpMY);Y(#Ro$50>QKx4Jy_Jx+e1^9duf#?2 z{R|bqi--7LnII1Z#D1tN)q0ZNUR}M`Zf%{a;#JAde^`0@0wzo_FQpsdbCr$7AwL|* z11L@5{m{hZBvR|E61|gY1KDTrF$_tFh+9&&<{jX<4^G!9JzZzhG0O2){l zK0Do(n^p7AVJDZ)`bwxP9Cu|XdrnxI+B8l&I#kzRpQw_mFI z%Gu~ce@iTRhV0BlTrDH#vf#CabzhCA`^LXT>2fPZ{rae*^#kz;I`Qb(Z(HZxfqtJsKHi`N(Hc z9#K~a*N8Wo)Rz2~0ruLKw#bpSD`bjXbM{GyqobsKdIY7d3c5ts*bAIKc> zVQHK;d3?O8+mF9WP&1S1B?dWVP2O%y&?)R7D(Yawc1g$YMz>qG-fTC|)p2{fj~N4J ze_iA4Q!>qX@`_ohOLk|Ye}25adg))Z+%IMJzuds*ul}1y>;Ca*#Xab~zdYJmo#J1z zwEqi(b`NXMSs?fRwbzEN6TX(u7;v`vu@C2j<}3FGIyTtIemyx!k}Okl{%AA$a+qg5 z4$$dolDkq6%s$PsypP!)oQKE=uEk8sf6eAh|JsR*KU%>*T zYv!i75Yfqj^EGw3VT40SLD|Hf3cBY z^ZlF99)LU3b^g#xqr5I|*dLeH{CuYGM}gZu3nv41<*k>q{rLRxKNo$S|H?SEUiZk( z)+0RUVNO#e{d%v%omZ}TVGBJzR|PJI4(uG2V)awW!Yq~NTGn_IHTKMSz^?S*FCdq<&sFK7gT)AJzMS679m4+t&3kt~&^QoOqfWh@H z$)(=kwU_6#<%m|e<;NNne_EBb6QMipv`muJ`_YUyX!~L43!!Io8rLfUusQ%yKW0iN z@wu?Kv8(PT?_6%0E~=W(Gq215T>H+yry&3n^25vh)9Im4!*NO%>1MkK*BSfPpG^-# z&+tST(KWcipX0Oy!ZX}=n^;=#asPge2Mt783%bLJyw8c#^(Y&if4iY9ybuMWf2g(U zU3QWS;ZSZFf&hzdgT+|r%AId0#}Oq&Ob`{_yL@6U1rq50;etx0Gi6vX{7)(|CqyV(Z9 z9WF1wr=zO9mf zKJN3yU&d-l>IXvKU1072B=o*{#+Uk4tl^IoWUL+0pXx25ajpgSRlj(od+yg8dA)b5 zjT7y?Msx-rO^poxIgOwTEG;JX;iU!3C~-{;dj#*LH_a<{KN{%d{ZZ@)l8GK?iE!!< zCj!Z^#CB51e`J#^Z?}5yvO9vP_5BO87HWd^XEFidw@X3plxo?=q_f$Dv_uXIZ%f7- zKNN#~&krTPflR@Z>-Kmv8YxllG;qQN1PVWNxZbwUsH_*eWGJL9!ERDhCNyjLGAykF z_b)pHf{SXF__X`cuAG)%L}EQ{7ni*%aqBwBUfbLHY)dy}?qKi(7qK zTs8n_`cgzp=W24%kb%MJIpNs4%wbj2_l`lwR$z@Pwd4Er86F_-Qr3{N{d* zsO0%UQECmZllCdIdV^wg->o14k}i$wW`ZE|Lpnb7m$2rM+&BFB<*^OT?y(46TDr@n znmzKze_MdDYP;&x*8_O6v7uMGa59?%q(>E`H|jH>&-qkPtNS@1&VIUpQQ-P!FiRuM zHmk?#N=JqFp#0G-x^2L17or)tpBE#Xs^%3T~1ANq*FyH76mf6IxX%GKs<6V5|q8*>+Rm9|ZQ*fj3( ztrBVb$+e$cog{xKb%In9DC~IQUA~es#D(8|;G*`^wY~+1T}=H;92G3cGAZ?jn(*hi zS#GXy8eDAI0~s$((O-Scxry3s!|gdx9jCo4hMPBF=#=+9Bc6|^YkA^8!s*7bW8Cxc ze-IoV*Rqcudl?hL_tVIS?D0CQbu<#)E1%%0)@6zggO`tR#O@Oh*VVBRUuiSuYwdk* z)^Jr<&n3*n+&kfZgys6;?kkXyr>TR~;cB;Wnj*#ErXfXS?n3<;EryZR*O7d^tPVXk zoef#f00Q;hT$kgru};EpTTJTt?O2b3e?GQ;WegVqj2DBn>N9s7t8}w^`vt&H+w61g zh)?RU+m7C3@WO=104y~i^bzRWCo^!VNu3xuOZ4DOKg9qRzUsjM!Ju3jmm*o5L(Mv7 zXgjX~*W-z4xnE*6tOw(MitiRtg8b2~P=u`c+cT#o?!}anyGll9pjRGbO;Hkef2{{3 z^SfxD*#lj+yJqN;OHLc>nSo|^z0iDp!dRzVUCtk{PpI{V&=r_~D+f;AyZIs%uLo-y z`o{!V$j+c`fa_28Z5SeI@jl8?(&G$&z$wtRn};e+l9n7x4Oe`k1|>eezy8tN=bh!N1;<%JznaC%ZuVK-n=ar3K{2mci zEKI(Xt`S%TBr&-#+;A1gyY;ZRH;WY1PQ!bAf;8G4K=`fgF1iPye@Pei5n@igZMgH_mj6j?tAY% z#Gp?{Bm}3A{PQG0>?o{MI$6DT2hZ#8@(ZZdgR#56YRYBR^T8?G68t11)LgEy-8KIr zDWMR+r)Ps|%>yMfSi%V?^nYzPev~(R9`hVXav;8Uc5$|^CtnLjnR|$5%%{V}s4t(i zf74Yc*en-MntdNn@q4)`o?tc)^N2T0!7~L_3&XCxMKL#uWQQK~jDo)o({xdsZA?AP zED)mz0AOwK^_R73jE_zU$LAy5Ca>~wTOfJyk3sRd@tbFDHQc7VU4Iu4Q;G!sbWlLN z#IJb0S7uXC4+4w_{*pv-H9F%vwP-Iul&)cWFD7Fjj zBA<9NzrWq^o;q?34F1EQ21l(Fou|7BMx^4R3SlFmCaXXgxz|#pl6Q5g4~v@ENyeEL zx6xveDU^7$hzqeeWpRkE-||i_Fx|Oy3m?Gl8%+OwIqN-T8-G{OZRr?5psDHx4BX|0 zytgUF92AB}oH_%@)zbr6uF@jmquL*u753f^^unV=!0Ka@WymlNL6e!Ed*=gLd3uQGDqP;hK3Y6Z zPii8zw|2F=F@LN3=cF7>^@A7Uai3Uq;hnH~vV2!5o5ZEz76|0hioKXV;1P6u9#@rd!J^+63aR{u`L=Dx=RdNOIYWez9Pq{dxH1}cLeX_T-1)v_ro>Z3oinKkD#O`O} zp{34NfqUaUFKcfl0qB1TZ+%s77y~7>8V{XjIdrf*_kW&4s1<#qul?E%NS3KW^Q^)O z4gezqK5zOa+g)rR4&abzTQBmp**hD05;vcd=uVT8T&z{UTP4S3PUO$qo(d5JDtV({ zr_ba5c}Qz}fqscUT-c>@rl|=EH_kG#Nt+J=4%%K|^j@5jm$W&zuNCVLxAd-2EM3Jb zajHdo7k`sT@mN)5E+xpup0(Iy_qBh7>Ji!2WAOGg0Qpom9FM2xC*)kObPt$bZZayq zBfDE^Fz}&$b-yTfr@^X&=_Vd-36W@*+nri&=v}`iUTn75X1DC!EnkP_z)nN+0S`Aj z!H*Gd0S0E8FR#T!AIdT_Sx@92&z-cbOhA<-cz@UicV&6i$TymI3pw5RCw{#lZ_$Y1 z!@#+ISU;zohil_Xi4Yak&L3B=1ec3~iWvwH*L2tK#| zl%LyY!@dqW2e{sr2;brx%zcE0&2_chT{TVFv9)S!RA+tskex0;Q0#V4h^J3*!G#}c z@;Z&!%O#0WtW|kUcTFCl-CxA5-kKt6;(vDYBJHOiy+;QoxZ9U^t{5us&ehGoqwVV5*d0=VY98)b$D7ZdCRd15hh^+- z+Q~bv{r9#9DT4}6;Z(2Er8pg|+k={eK5DGDsC4aj* zoU`D(w}9c_c&{a~RG92yfDykk5hSzW{4{sXfqgQp{HmFa05SgNa9=ZbFxNh3*qQ)f z0EH6U*Z57WFo3JBr~RP^4GM6hrNJEN6tlthEC<)yWr)aZ|5i_eExrT9O?r#e;VvgS zT`q%!{^r5*zVFUG=AN?7V|&q1{Iqkm42W4w`@!*OdhMH_R&_dRTR zEX6#4ps0;bU$X$v-s(@huz}mK9+>D|Gyhy?3=7`$O?F{2(F|N_>)6K@#6A z-P~XUn~!@hP*mtWJYV>KEn_^%#;erErS)>I8@FW-FPP}T?s#ca2S%KVKJue`Lmd@@ z9ZR^krZ%%FhaWt=KuCya~la z?;h52;FuSEq}SU}87oE=v&#o82m89(ChNzR-BZU`a=(C}q28aZL75$7&01yQ?)Uf6 zk%_bUh?`-uGs9~9UVOL9`q?XohB6$ZKnoiN02}HCp?|q38L0YF`h~V0ItuyDk;QQG zj9I4nISuC{ef5@SibO`_zNljl&=qY*A1MBzn^C*swNX^bG*2`ie zxWX(iFzbj|EzY|q6YTcl6TLI%cnjOhL0CdK;=$?{p@^1b^J+{0WJf~WuE7}`EASC? z0U}BL8NkAiL^VP69}U@2<+v+L+uz1= zv0FU+jkFOzPas-1uVt(QV}96O8Glj8o-tP5{`y&emM0dNb+<4tS>>s5k>sI(ru4!c|b8q?2GTsF#-hQr5uKlWGfkCwGxQ>&yHZ1bFT z$}-8f$JjF0fRrZmL0X35do{Kzh%T zq1elw0EI66?EW}C(zoly$A1&@^t+ka;QQ*t{pqitHikJC1orNJ#FkLJvQzd=#_SbL zU%lBSodRAk#Nt>qvl#8Ozg^h@ZOGGhjU0wT5RUVLHqjE-uad}YsYMrNJt%jFU3Ud@ zzduqjpp(r|?5N~L9-0#*$>F zW8-*bD?g%%$7|i4+wQ&*tfz2vkg_+bSoWza>T_P=_8xCTQ-_XG+NLs zP5ENye(Wm?SAgE<$$zs6GE`$v#~iNK39({Rq>m9B6)w##C$?925yFZjygKj?Fb6_^ zWxs%krwng9s0#Nhe^otJ&BM;C4Ez#U$v;-Mh5di<9zIgad(o+Hwx`vb0W$A)h_C8_ zsDab$BIa$b&gZ6jsE}{EM$AQ_WMzMV6(e;BlhXrU=SyZye18CEOg8QkBH!&w4#b#W ze}ee&cyD?3Y&{8Q_bC|m{)*`^Smg8+H)V}m~GF`d$OX* z+e2q>Hx9^7X>)0guXo&Fi}~uXU@3k-70%j@(NlQ20)L4azqL1=W^ z-rv{YD~ZoAMN?^EG)vXVZq>Mj5$xdv)Srij?Or?*V|Hr%;kfLH_`p8Mo6@O)C!#sI zn#$wRt`(e!^gdwBle>Sir=SwEM%}PITveNZzZB`Eu#UWc2en39RPWWP0~7XO6ZETA zE0F8}9e<&KncJD&D?C>2!DAsY29BoUBiSdRa{^@)0&wXA8J=_)K1^rLUc>k-f0PHS zysVRceKI#ya10?kkb^n@-hc*CjRFWrVH1AiVaplOCV)|m5EI(sMJz@93%z8SGhqtC`Bv%Easr*1 z9e{^$Ad2h>* z7`?X!;m9{D@7&j7WmERM!#J?osFfo|69%lZH6SYwNGO!jwVB#v^Bgy7UN_qgT$Ekb zX58m-O)Tw5G0%4LF#?oRJc4`a#JuIPtwsxHQ9USnMx4^;%6zQz$(OtrPJin|;P1)) zCUll!*n)ce0BU^*paC4L&?H$uw|T7jlL7-VR&AmN1njpXf@y6(9w<)(7N--mb4D(pt|vK)Z#78y zOc$jZU5ACl=Hnw*c82nhPvvLliN~D^1c0W>BOxrhZIPf$e!W;%nVxol);r_s7ytxz zIUL(EMfPm34ucHYz9xO)+P630XC_QPZjbN*cEpaP;`Qsf-a6MyEq^>8E3^>KpZ5Y~ z?sEd?W7j!1pnL3EJV^Iy*z}q>Ft>ZU8#EhK#&GL$2e)kZ$x&(|Pt zt|w&~l8;!~2bZ^8$2MV-ZH z0j|L^gEB1o^SEtlDSvV0>Y%K*=hO)saNA5ZW5}1-s|CL>K7M>m?_51V^PtA>Ce+vE zeS03X4-uvB<`iRI3}|-RxP@XT6|a7d1gU@c&r4=uBKvH%@oGJIweF0?#p$s)Ah&o6 zuGe}zh~^>Qt*$oLzJix=+Beeqeuw&8eLDdqi568DUyhI`1b^VcW}0Sse91H`u>9k) zbI%stt+wu`T$&k~c+%0@-0QJkU+%BvttohOi%b^>*?E83Z?8{^((LAGV0Qjo!^aDi z(8c=PSoP`LHd*w%)rZG+IuQAL6K~JpCW+>FKU|PY7gWP(JDt5iVfK=3)qRKzDOjWU z;8*o*53oBvK7SrUr}7zMR<&QiAwKo#-k8@Jf|&NggQq1(0Pj}xXG24?q&{Q;g=uTP zw#C|dpwU`T+jpD`?!@9}M69}AOHN+O;`02@?>8M)`+po%o3(wkludnQ)Z@MDH;3sp7%;1Ug*{Il)h&#vA9m$Y5S>~XS7>J167r~M zvC?f6h0uXiU|DUyp&*^i5&3b3HM+KA-9QhR5_SM^1%=rYTda(OhY5712ruPzi{<0N zgCr*8N?w$pvg1(gAOMTGDVY0GA1lofLN~7uwSO#?djY9IWxevwwX$4Ejk}i{ck7G~ z=L~PK%9PZ@uF|eAG+~ro0YOBl(n=T6Wt4?gVc6aq?w-262bi9$v1BIWrh-M?oNVZw ztT6@k^S5~F*Zc|!a@>AXh!V9W08S!6e>-o*-1bxd#3+pRbbz)2O!_s&_Ehb51m2$b-@$+bnuCTfsXQ z^k=&PRdf}Tkc@x7wkz&=@?a_0-rVR-xqtP^D2_YoM!gAc$I2h}Fnam}rr*nScQfAI zExKKlX-}{*4Fs%ks5rotuH134wKgY!u0H2t-8PH;Ex6B+fPJ2LxKy>o?2myVr9+2*osfpZg#`oJdr-seaAhx zJ(L;wK+Jo0745O*`EdY^0{C2ASbtyL0W3;$BF=&f2BA4D1F;h2vyBdI$)w7azBIOG zG~2r%PnT)1;NwcTLh;N$q0Yj6$V{(VWt^t<@$$UBmp-en^0%x%V$#l?Q$@Ynh3^Jv zXYFs${yJ#4gVvpn1-HCh$$eX=SjE<9qcE=J55Us1+O4}4P*szYp*-FTJKdm2yKA z+rub-lG+yf6mq2hrPcO zSBus<_?LbDw4b*QG70q2kADuO7=eXRb&&sq{z7~O?TH999dYW_UX{A=EcXDPSNl^) z^c9d+k=$OMUbxX9)t=rN>aE~I!op(tI)ti^8@@Nt<*M9n#qh#vqM{&@j<-7YK9=Sf z3rSu16A6r56z`;EzY8|QN|S1ThpW%T$NF^bLJEvC)n(vxwlD|L1b<~GRO=gcIKDuF7*ux!g8KN=E zb8cP$eZ57_69V|O5^GN1lcayxzRoDWo&{&Xv-trllLYZL>Bi0pF7HG8fSt1PMqm`1Jcc1u>VN z0{OfXo>SLu?->o3Q|)s{!QL4ee8#<0m|&Dl+(S*n}eg_SlkBuHFM80%4xz)AvkA;FG4n!5g5S} zUoZHLrh$eAf`7u|TCOQ^2)nG=(W|QS1Rb8Y@N({rSMT&04?q; zRwu%KtI(s{s3{;M>!OLec6SO{i$WRA{{G;f`8LbY9LJ&?fhrEs)na=`V_s{@YknE z2}XJ)L}uAfiT>X6gm)Jhj9QE>DPcmXzvE}L`tTR|`p{09d)nBU->u#daH(~%yd@G= z@cOicvZh6k7stC--OZG?@ooxrF0s3O|0$z&vAV6sCW|X59|>SJ%sPLZ^nd36 zQNY>w@?yZh`48Pk13QKD=0EM(RuW!;8%OdvzvVMdvACne_qiHzXGTx~Ddujy@PcVK z2QFvqSX*wxh%i<;@e(7Yz1Q(hD_2h3&c?a7K8txKS2ukDFZW6p`}!@+kOrqSgjHsW z{g|T~fV>X95-aF%Iom>9NR9_da(_w9ovSOFlGD4^Ag`pO>27#v-nNnllKd!5I36H# zAoE(_&JyHi7K%l9gtK=z!pUF0;i!O0jZ6h`=$>eK1*CJ4A0S_!)ciG6!YqBx_SnDA zE2y`3GXy|%AHcQxhmZ7M|FOc04XX{=r%rKw*&ab-_$A1)O=Zb+6 zk_+e@+)lY9WyC}RM|Lq^bFtXamo}hi@c19xoxIZ1MO$wtzc2yF9%1XDn;S-;==SvF zDod!%w`zQnJfOWCsxb}#bLtaD5fHuPLL$E^5T1pQ!=lg0M0x=}Valf+YjXln#HJKf z>S*(Da4?C~k``Arcj*vr_J5+29i-i*b8C}!uXIsZ2|K(uxQ~sgHt}NIErV=oI{kbg zwmWqMYl=rDok_O-DJga5$q=F83^SV+jNHp&3&!+P zx`mD#-D=4CTlt~V_SCtJjVc29!u>2B@t*Q>!Qfcp!Q;(powhj1%YQD~hxKzKg1#0P zkV3CDcvU&clJd4wdFRSgoU*(ClaFNaA;71Dvm*c~u+>-i{QP-buA^eCAb}_2!FN{A zl@ByDIgwYK9Ax(EA))#5VtORZBtqyx+yye9%spBhW}>1+ZTfH~1!o6qgfvB)l@)6Z z7e+zIML5xLiORRJdVe!}gFtj`@8#LN4IX4A+AD1;>(53xadhiy3V4mY-4(Ex;&6S% zsKc{FaCkw!41@jNy_Mc*c#`3!14t^0+&80Q6KY%;yaq<3Y9=f{fjHu7Z_l-hLGshb zlw`ma8udq#fD}mH!V#uT_OnC^Y#W}>FSgs0?u%QBTm;h-Q-8Kyk?*Vl7Wk*FbAHdX zHQfJGKE~(Q%M=Ux@ea?SY#iX6d3{>R89|5K4I77EUgFJY(3O5ZL2c-{?^QAlYyfO& zsLt#9W|6)^Q!~dNHaMhYV5kDotsXYW%e98&UWt|Q2P97xGh{ygh<}*ExPON z1X?5IZ*KkS8O2WK)2&F*>`tI$c0;lU!1&v9eVs1-t$*Lr`{wAgt7gMl?_(-IuE$}x zLP`Rb!uqNrLqD}2@7~VRO9$O7FVR>)Qg_0z^ChJFA>5mUJ zVMB#YqVsP1PL3b*{5;c(QU$zla2cX}YdO?U++0geJgkUNZ|K*hBl1jVJT&N3db){2 zZZ}MuJAY_e3UDRcLf(sj2?NK>7|2mfDs7`hTqvEy9xe@6wizPuJP!|I4RbX z?niQ>0j9@J;2FS#{0ZK!b`vrU2(-EUIzjpDurvvNMRM}OVgbVftb)u^4j!@PPAV9e* zbnvh9d7?O-&BV5s%vC^Eu*+z3ddtv=4S5a550_vG=>9rN&!ovKh>j&hsj|& zh_97XX6mSaRExa>ds7Y{Bq1DGyLxpFPIc-6AQS2;``Q@q`gRUk_#O=}J^rxCSXIz~ zBguWPE{8;=ocg-SuFQs=+MCGRs(T}P?)P?Tbdg9fnC+Dd?Z;5}AYm*gG3PyKpJi}~p(n@73*Wo0$Oq7- z5NlZlM7RdxX0_ax37}lz2sUE?(|;UTz=EyFc4KRT-8^s^xm#D@_-q`Cid=S=5zJ-)){J0&v zrq5IvbjQ=+^jbH>euJ6g5^RB65;W_5L~xbP-r>@+fVy1JUxuuK9|WXBVt+1QJxV1w zXc(mK>}H~-Kx#|y=_ubxLRhK+q%H% z;X!9`bPaD%v~AmMQ+;~4m}pK?d|Lzii@0LSV!5H<6OUkf#*{m++XjO9;|>Ti%D1l7 z{j^l-l>G1@+srw|u@j)1Gk-Z--KEwP*LGBNTSD05Q?Ex|CaKMf!l|{~P_7WIsr4Gd z076!rh9{doJW}BzivVC5%()xvx9$hQo%lASS(OU5$thdB1L{cj+efQCW0C)q&!Xsb zP#lj3Y%Ps4(73ZW;7AN}P{;Adh&-&} zo?^i=(-6SNxJ#F?1UfEn)+axWG#;a*vEH^Xx;S>-dw0WxviII)tKUxxW|J$_>dkQP zHQby(pdOcpMQHvi=lBd8Xw$`{VX;3AWZll!1t zd7c|ns0+A(C@APGr(sgvWf!7T=Ga2}Rnwb?37b1RmEVO~uE)vQD3!DmO}kSJr{f;} z5pz-JP#qxpAfAf+a*$8216+TG8k1WaeqTY4{GhJz5f=9YrL~y zqI{9MJLi{c` zsNM(QMm>z=%YPCy6VNuQiBWLzPU`cdnh!M+ocz}7?-We5v z4B4nVH>6f%%B$g_&dm1Gv(ml%39r16kzH-8ah7!29)C`2V-KZrxS@77nVNxcc%GRT ze_Znz&wO^o0#vwn%62<{HLWXgMZx;c-)VbIekR;wf!q!C#Ei0e<2G5E4*X z&4t+$d^MJy3gQ$j<59`M>h-5SL0+1tLV-WiJhdgMD>B|pd}|aq6j*BFbA2YNU49hA zdw)ZyZGX}N(A_HE>8XnK0f0?Pro<)qhYb978H0KXcsRHbH5xH?e- zc+=3gMBDfWI@_5STKd#_bR=B&eA=C_m=EzM#eaK>+#p}}_J%3;Lwd?!DlQuj^fQls zSD^WI!s&T?)o+X~j_bMiqfqSA5|@uXfC`|$9k%RwyRJ4ayvDWleI>O3mMzB!8qv&U z%PQSHC*AjT-JafCJl&%TD_HDm7$P5dS5nUTo3PQyPm+ySnsCe5@vjl zNq@iYAtr;f!Y-T`)$kc~>ILi|uD2~zgjPkNjtxb*7hhk1D6BO}3^<5rjcHB$;c+m8;IZx?Tgrgr*=w9r8*`T{cU_c(Rl&vAa(Os z!coD1cTXPx20pX08?uXa2jbMiS(EC0Nq@nmYI+~(!Dfkq>2akxBz<7@9CGfpVJbS?dFnwQb}vO!#T4TJxX*J?g|OqV>0I2J%?`Rf>#cD2Hre@ed6EhO|G493M7Y^ZJgECl+QD2R+VB;f-YLNUK!1Kn zuheR_<$;;A<0F(mYb1!P(;J#A)P1>$tWljPaHLvj8NBO6zaK0#IhV6sFq8p`<@Tf& z%n=|V)n#Q2!NfkieHf*S`il7W15^Tk?*FIO%700%z$flcX$1xU+yJn@@*q8*`;_?s zeUb_H_=1>y|1wc#H_u2=_jhWE`8`_}ejp4D}L9 zDdJ`BMA)M8tCaHXRq*DlWWsT<*jMXDvODJey#oBaXq^vASfrSOIxlQ{F_`66xS8|- zWt6WH%B+_1)q)Y+79JH0gD7~!w_4Fp+vuolub>KQ4hfMJ(f$5B*dDVI%70h+1pi6; z-=F89i%-?bfdWSt*FXP0&IR3XJDp_sGQIJm63ox%9+NsNpYUI$6a1&VoqyiIzvkSm zG=u-q!dZHS$ETZvG)Z2Df6TFNoYBlRz@IuutK(7Td{1CV1VRsfF5{%0(_Rj1@VN%z z|BklAWRwd>(F#-K(Z{rW2Y(s27Rl<|L~$xh3LetPN$NmG_&!n>@DHbLiIx<&m~CVs zIVzBW#dR5wa_3e(taiATlYf5C4$EWyJ-$9B-zhx&VM8869{n1S-{0c~V3B97+-G;O zsL~Upm3Mg`ac;tQdJgfu!~o?D3c@X~aILOztk%zut>+%NFVW+S6@S(YhiVemxR0d^ zdlZm>LT=KYoS#Mb9ng?TuK2wHKO~cRWdv);6m8UQd})^Rby(MIzVD@V{a#-n=0J`V zNo?Wopx6j{TXN&`B$Rpld~VYC-UdIf$gaabdue6I<%1*Lh_7FQ$tPDO2M^7UjDSqX zn6|-Qh3|s|JI5-&pnpNv2UN=KB#tq7ML2rZ>0BqTIC2`L%o=1WXrIig7occ`uyGKo zqv9L|z!HZr;B&L$#t6_UG+;_=k}`Q&Jgo6$?)+&%FLCJmWaq0LUr#zz;y6;PzUn<( zz$qjL%o03{430ZYcrX0#d%^wVzFkMQO~l>;bZAA~xt`{((AmEJs&9Z$ZwK^F)w?GJ6yIh7bL>6$0g%B1y1^2r{yND%w_0NWz_t8@NOVjm%X{> z20)q{#Ng;jLVpkoL$&6*F3^cpQUM%vC~%e7M9C}5%Siit_b7gk&Z#`&rtrzR)BJlx zM6&t!Xj8Jp@3_rtLI?Ioo*Xo*5*7a)GZ*5oQ40)&a}wRfuk=%TNBA$Ldx zoMd&(2#pVxT(0u=ReNe)o~SN)&|J1~g#3Z^dC*-lRDWUdB$vtD0OjmG>@~?i2y@sZ z&-V@WSX>XUIawh&nXCEu)6aOClR=4JU(L_jlN|YE=28wN3Qy`~9{=8ovx1c(5_&Ig zIStuN&newt>X5N|$`kgWkbU>w_niYHjd!K+{fTzDK@kKfk@T#qU%3^>hR2|x1b=Xz!fv?I5hw>u_<6nN+V^?Ly>!!i zX~`8(9lvrno&yp+2PcFWHiD~((3t(|P8k=1;(v;OM<%!9&{`LEfh*2S3hx84<$K@Q z!{2x(_sP7M=UB1e<~^TFKu0$05s7_OGVfi)D~4}I{4L?PMZFFpdwmY5M_aIvNwmas zk-ZIm3z?hmX)`NtK^Pa!E54oY*W_~C?|_(x+7rJAF({d=At)>#?*Q)sYxx^fbXpQH zSATGJFr?)HY>)~qPWT+5g3R<@71jPMo_0=z6%Fj1X$hUY+lhfZKQAmA+?&JWv_UR{ z^^A3Z=T&H4Ht&^h9OIExa=$T-$2}wN;=bpYn8i367jg~~en~b)2-P4zjJ0XdlR=lT zP6V_IA=J&J$D1C)we5~JZl?N>=a5O5#D62+<_-kv5o;W-A|_T4dbQ<*V7~tgW)q_^ z2Jj)r{c{PpquKKJzDdNNtL1C-zpnA;{K5OdLb~{@0(nP@G2kIrXf@iT%r9%XD4!?D`x|u z{;H!&Vo%EiWasvE13xE5^nh@}^Y92#68Kz2WyH?UWB*b*nhe%Ddz_eoU929>9c&oBa-Dwf<_~=h0FcHP%U>~ zXLpziAp^kuLtP$0q2h2^PLxySYQt4~??uIZTY%Ege=+Hi$=5cX@GbFvUItbf`=)yADF zA#=3Y5Vv6{08rMrN|bb9&*ZNIyM|S*$rDX*-b)F*zFTr%BP3v75fe09dnia_w1MY{ zB|q=0?233WU(Z#`gPS!)%z5Ab#Riya;e7k&xy<>@&$;*Wwd{8d&wC0K+tArS zShUdUm;b!!DReqvafmjEXEwf{T3CJeWU6bYxd(^I^RX8uy5X9fAI|aJ)2|ue1A=fhP7|uOw+%y z|KE9xqJ(t~$c=WWZ=jRjVE?VHgg9?)rFxlr;I7@P)$Zd#O`td7P6haDukPTox*gzK zp?mBz+(D^ew%%Yt<=9+UWh18|)kdD_%=urh{pNolza;@*;`;b`9sR~$F)j8&``DvP zqn6!MEy$>XA$I978M++I!=~1$H~rjZ!~)UjbZcpvbk@r{*7(( zGqn-pu}8i+4bJhOI`3z{BsAw&Y1!tHowE@neuEuGN=BI%a*%bq!D;s8Yy@3-X>^c` z!G9uQ_4d)9#U&fzG0o|)YtEvBlca`_oz%SgNH;CpyS=9s++NJ#4x=v#yCfE2Zj7^eh3o=6X_Y&llaXK8}(a9N%41o|LpbX=N|sn5%K`kAo}!M zqyENre!jQ)?@(UMYwKIvV!hxW{^mm8)qg(}HrnxMEv;n;4^L}7aGfAMC~C{7>Cp#? z0~TX%$L(m<&DXi`Y1c=(y+*7>WEt-zh`H=bD~$f&GhS$yIE zQj$e`RCo)up<}88w0sNHaHuaK8cQ(!?!-M<4tlyuH!|g1Cm{chWe0T>g!aE{`+uKz z_;a5Ab4Edt=r>|QRRj#gC4UfOX5 z9iE%1XWB*{4bWW-8go9Mb5!KK$gflhlLlQ#_{q-;QOpt!KPLnoDdy~`issXE$w0~n zC^z3EyLjC&>Mm}IwYSCI+n2)|f`51|8d9b_RF~YH*E8o5G$|}|qvPZ`54J!S{k~`9 z3z_=6kI(aZ#r^)~6W?__e;v=`H&)JTVvYkp=Qo}iqki)-0nKRW4uZD&id6C>6mg8*~4qK=o^6`mJP&|-Z*fBxbIyu{Ovn=NgViAZwX8<>z zbNG_>^7BFM+4;N&suQ``Pk-GO<1B=;2KIaL&olUWb~y9rHG*gTjkgkNi{JgRM2}}) zV?TM_Z;buv{bal)m_*PP~gNC-W_ zf0buae!Jpr@x&2s74Jsy5|@K1I{4tnl%fnz33Qyic&Cmdp+(0#a~_L>X}{pPk0d;2 zSQ^gf-SF)n;C%jBC;MERbD{ZO-*b&?{b#-W^up$KHpdX;_}^!TbA0|fo^3R*zkTjd z-y9oX$JvDSC71VW+kfaDGWoIKVei(fUOO1?&;Zv4Ht^08Ye$N_P>O2^K@3hg5Ql_^eDnGeNrCIO%yi|*q4Pj_ZGPb-q=oapQ(L5Z}QZ}i) zbZVK`lz~i!;b0-5!?rIWQNPuEy&?yk9CN-1`Ls+p_68nF!hhjwSU+4V_SMh&nd9mA z*(53q=oiqQ{;kCmevYNs3wS;_tAEapd9D4PIp26Bq&d`Knw6kUp@bgIUhx9PDf4^B zxBhOpZ5b}xq-Y9iLJ}v1B>6N_vSG)arY+Ci;fFLbaIEer@v5;7R3D{5-KM<8`bBj# zTpP~$(pei|rGLS-jP237Mwge}d|w{|Gc-*|djtIihqH6smj^SP9zx9XeZiT2?#F%K z(?9Qh&i7}LE@$2^^SSbOUea^k^^KoD{Scfd=&6-+9+LMDV)ozw5R&`RCfH}BNlb6K?f5(9n5QiysXaep(^30#U^*AeI92aW;_QELm{ zGtFzIeRdfOtM>4MbjNVP_%_2)QHm9ZGcA=|ASaP8%XSD^ z;T&=1-2OK=_&q0n-sv|#n$MLI`PlTANB#CF|KjC0r~Xfl_RYu6^E!Zh7;-o~2T&Za z)&-^uR)2=mf>guQJiX@E9<*t5%{7N`7miMD$h@bz!BP~tol*pz=`x_p;5@~kWj8iwfJeXa5Dw@U z-r|HO?4Rd&c=)GY$-evfXPtlF@9#R8bF6v4U;_R7#pf*bTu0Y$c-*x|N&?Y!zK_P}!vy@CD(a;*Mg z08Bu$zw)Nz=Izl4oTn}(Tl-pJD5L8~K6^p?SU42*4T%k%EBy&MY^L)Syky{1g$ob{ z=?s6%+5=pmKO5ggZ^W?#seOWI&U}`S8qXT(PX@iMM3=ZkN8bjnZi+hjp5Ty!aCH32fX=`k5y={?2;!n zNCQQfEIH2>PM!OLh?T;#pi%-LFwVZ>P6Vl+v1@p|kK|&Iu8!V2>VU40iatS{1Q@bZvjt?`BoX zEvQl?>N`o9M`uVqiBS@}xn7gH*}+45Mfx6bbUtt8Ic8ekUoQR|Kj$3lpV&CZ4o3KX z-(T+a&F%l{j=&JwEV ztYrL*MmjHC7icFvs|lL3cbR{y`bZy6lpU&D(n{#>3D6K1A-AA;4os+e-)NYPD>tyD zLwXyVtu|uXSG2&7CZ%y*SV6vWUPGElRG-cC2Az@}vusZi7^iZPcRPMw#mOD#5_(G6 z_DUveY)Z{6*@nk?C6!Vx2_dVF?QtKnH=onc1m9*Z;}aesbBl9{9<3f8N1-uK#ED zi?jaQ<2?hMf97D+g*7frQt;MAJ#%SSc0_Vb2`vk`)>QGNn=E!LdVnkOnx7?%vv(4# z_bLeQUNOE9f*MSPmI?Xwkng70<6?ony(>)r7Fc z{6M-%(di~}o7Of)M&bdjBs*~$u$ zjmSK>HtYk>cgC>roV+7|?8SJ9L^}=rk12nD`ybwKe*RCd>Zfn>7bCxWeeQ?i@8{?J zo5TO~^Z(V)vWD-w`qshoEGB48R`m-H-H}^e4v~N5JMVvLFw&%pallz}nI=xq?X9v8 zKoS6*nLZwxJHVB&+`JVRo#YC|J)0hw7F=MSJzDGUHO`;XBn+P<^byuMWN(Xk{*?X7 z;__a_Zpp&fIz|WEKa}K%wF$Z5z}|lihtN0v5MVwrLf3PEupFkn3k2PKKL4D(f6w6C z^ZLfRpPGNP{2zU-pSpEEXMXw~-=9G~_7_X%bEY@XcweJQUi1#EIi$Xo!~LpqLd#soK$yIZt~+)3x#)(J@9?4X?IYMvtL zs>ho+*Yya7qlr44IN#%OE(a|7JR95SHVOT)7#03GN?5Rs+VDWN@Y>Q%VtCFmyL%#>yYMxJ^&!K@~XFTTm7y>1jM?IDo1KfXKlp&}e){dD|IBOtdIR>H*ZjqL%&Y$DB%l9@MflqHbCLhx z{m%QE$DcS)e)8*YO}YG8+rPQ?{5^kG*P|o;Kb|!XL;^GYr=swlQo<$}y$8)pS**K+ zgD-+PX2+M8m1@m59;CG3<21B_Nx`jpI*E5zM~6};VPieO<^@$EMjs{S!;)08fLc}( z>vaV(y2czyU|+NdNKI(|w9a1CY_6#q;FbROvtkTbKXBI0$l9g|HH&d2h|~SC$nG7M z(gj86OXaqAh>7`{T)1!x2(X#nxOv`)YRE@Bcd2sk@Ws)Rn6aRwGUuB>6LPU5z;>5& z*4zm3V^KOQ{EHI`lCgxaH2I1O{J_v!J>y`YMG0_bl2T8Aw)aV>PO5*WeGTzAK_t+f z{>FYx!3%`DdMe*ZWT$|Cig) z-uE!f_5x~fkwm|4{X8ZRMryb|7z)qN+&1-D2%oxdFY4ABdXM~|WhTEBcU-dZs>CYB z;OaZ6A9&3Z1b9oL0X%;goSi0gdlT`4YnZpn|L<$jX-f<9&81=7Ll_Of&NeK##@WIl z>*bVCto5Z{fq%2kU>aPuaHnj;ROl4+jr>$-C9cRU$oNn=!+z!!%S1RkJDZc{xfCSk zzW54r?g`KR9~Jqa{M#q|$y1=w@c+i_{DY&KbMe0!#m_u+?vsDaF@El?{d^xm52iGH zf`P?wtox|^9-yJ>CX~pN94Udh&sAtnk%qHE8Mw|F+XI|rUXf4>66BVXs;$bt=Fto4 zq}(~moROfXgwC^cM{jT^7bG~qH;sc=9Z;2t%x!mC(OD+f?_j<`dQx`tilmkt$`!+W zi%gk@Ob`#SlY@UF4C~A%pbSxR?*OKVGtLe5{=Cj01PEI{#4af^7lwJ2?&R(c3t|oe zfG%(;boMj<;x=ebh|v2oe}6uZe%5J5VQsm1c9gYRJLR>|&nYK-I5bs1V=5qI{QNw} z4>N=O1>Dg0=TVT(`J)32Kp)&tOY@qrq=`>*-Hg=&*b^pbE* zRfc<-^L2kBVVvW%N7TmEP66U5)RH_W-voFDqxwcKXhV1xal zy+wx%lo*{2k$gOS?FAehA6$c)OZbC42Ke-%jJym6C1O>9f#O$LB6=?{9lE;$lAPK} zv5VH~MVO5&B~ZrEhd$&y?Lg`hc`R_QKY8qE{?31+zt3{+>&-pr?>Q&uO6%t>y+{9U zFVqhtADojrR!_G4GlJ0b0052z8CIbUgVDj!%!M4EX#a zX1T85R;CJkcw4u68si}WB z*9j)4owyV5sURnC0!E$B?4=JRFn^r*?N|Q?2O!VJ^#8SQL7n=orBu#GJv3w0fA=mI zZxoodc<^jpWRLp97byvhT^r_Ph4V8%!29y3aSeJIvKLk^kNpua;SORFv7r=G9AN%N z=`wOsMekkh<{L{nDuD206L7!;ukL>hfI}0IqGJf_XD$qb2;1n#>+Xs z_|Llh!@TG_S zqZPVHc(*s+GhP_@NUUGbUV!hK-^2HMetLMoAOG&PznS0r{B&P|!3jqt|@T%P-!{^!N}j zVv@K`KF=}#b6o=DpTD^H|LuRd{(C+%ePcSV%@Dv7mzBt(BrtDx=oz8M&gxk7Zm07-9^-<&oJEN~gvrsqiBRhCVzvsD}b|G%ydkBvnaa%x$+66QI zd-sE5%dXf%sBiI%{NH#z|M^o(O!J=mo;N?W?p*W#)o{pT!xtYy=AM7+LS-5b;uMtr z)UDJSPxg?gQGP=I>!#$m|8vPDW?9`8b&nIq-3e01V+P`=5F@qu6$U8zITYZMLOKvc z#O~&@(}u)6vLqoDJ(G_lE`>q~=XaPd&NnvW?*HaQxSnr~{>7a9#oTY5E+dEL{CDhd zohA*g9&nwFx?2D}Xs>_O=^F$>v|81S2X0F5D?{-XJ`O~$kdxClufV=XUkI`i{fzuW ze;|8(&vo!se%91aUW(`VlYjp8(tdgc@!Ws2{C~do{eE2U&#VHygYR?4=lrfwo>*B1 z`dBliM}yi-35WlSR#rAQk#7s|Q9I5zLatpr41}yD+}WL6(glA8xvBHuF}91LQvofJ zi{`p^bS)Gev<0b1S^(lW4#>N|cS$z9f@U|E zR#Hl_R!DE#>rG&7=e$##=adQDB4?b(KY021-v94=`rh;Jd-?bCfj%tNn=WbaL;jRn zZK*&N(--vu@AZF!KD1^v;4tUCq*d)A4=hmKvPdx;4I{@r%F3fb-p+z%P} z@FH+o!jZE_2$3_mAAi^XfgsN4_Cl-&rrmruuYdSx;~>QPmvPfI`!Pry#s|#!_0y_jF88j z;}x-)&kvO=8BTM!=bS(3CI0Q3KYgFsytjU8mqD7J({Eoq3lY!$lUofA>i1x-|Gv+W zy-6^LWB3=xpd z0*j&S(QtpN9$uVwdY~18RPpa}Uy5E%c~WS9-N6B=C&nHrw~Cdc{l+)U#xXJYXTE~- z^cVB`d+tdA_jk?}-|JploccyqZhsU_R9IcG$pnHtFK=J;yfI1_d)?gyh^rlytk9YR z=ANx#^y!UwVz`h>issvzP*ywJZB6a)#c!GOc>sU&K4j+Pye_j}{MO6|_<4tzFMe~r zf7U6k3H>@?bWult=eplHFY*V(CC0!|YA!8ok%#~BgM0M7(3{Y)lz`># zQt*cn2PNLeT0u|&7nf9UZ0&?1+o?nN*okm+nL2O|I#aB>g-Uf2@D<3LD@#QkJ2NuZ zd*XkD$rz8$0r*A01M%OuhJ5uuSf79A*MHd6f9C(++Kmx$Z-_Mk?&(_(^GwUW6Ryf^ znkx}c{B!WiAP>$Gc>C zZSC`{lk_QSd$Iz~1T(=7dzyKO%vcy<^Ua|nN6 zL^b6`g1B~ta2k*Eur%!=KdHO`)KO*f524pHfR|iHgS$cj@=JHurF&<}P6UlRgzOBK zBUZQT!GEy6{7c}J9e&Yk;OAvCj{&$KE6hko+I*+ZTG1Of4fFR*1rxgin7%dykU> zw@;1+XR0PmE^$sNy3@^ad>H*X$76+RlgmlT0T1fNC=kO@HMcgnSg+J!9YG!MWzIc` z^5Gyd*$^8(8s&?N{jCG{nDH>K&N}cueDChGUc;m)TkFo&&|T($9^YbLlCgd@MTEqU zIAcfWxja-~xJ=@yp8I^~@=kyChhO>YoBz%~=4bFdBY)%k9NY2iuy0XQeK97O3j=40 ze5+S(9o1#Vy%OT2-a!M)-sLP*ar!b=xrb4)p^erRYBpX(w^Wi&-jS>%bLdzF{!$u5 zIie8;Ib&T4^Q>J$7E`)MXB#c~Twrs@Jd0r9 z|FvHkKl|+BNBM)<{m0q-*Z%$WL885W{Oe1ZwU)Fh0LRUEgeZUECX^4%!X2xsKESx} z7$^^5W69T{zV0+V|Cn``q!sprn;IjI(;zmAfu^#?Zas(_IK=z1LtY4n(pVSC?^wzfG zN_b?IKXH0)9X)>#hA^oGXZd%28}c2Gm%rjNe{#wfT*ys4;-tG(F2D;9TLgaWCvLyQ%@AAqgWvfZ z7yVDZ-hbF*INFL&to7QuR)-yI$o0n+(zO^Vz)Xd8yy|~G5VVB=Ji-j71x;1?bV!Oh zYjs79naxF>7IT2l^l!}A!d?(#69Tc`?)<2(8ysk0J7li&*f{q}p$uJt zb6lUGNPK@%<(hVS{ph|VsLejT&n=E&Z!4+L_U zfBYY!PUg(VHVWs*_W>P*%CSzKqiB)43zzdY4ncpg`_#{3_XF&Ku>>r`AIveZlfPKp zrBDA8f91~{TISgwUl{xyB_BFhcl1;PI!SFZ5h!V+z+rh0l{{5j<;a9 zrpxmX!vz4z&iyAU^r;e%43xX_j`AlX?4T(bFdyW4``HBSS%*Lb&T-6Q zm%x9ogwemOc7Oay%YO6gFZd^$#%`SHvBiHQKi^lvfazZiz! zIN-m-LjEgUNk*?v$R+%OmFRRYG;P28fcHRmtf%5Yj`|L43v%u-znY`t2(rv+3X6EU z^3h!%Y4x%_?@!KmTY2w_oW6JP$wmoH#!uro*fx&Ww z*$~8=NEeH%i`K_w9Ki1TgRx%LqCa!|Cx7-Qj-s9aIp6mO8@H7C#d!SWz5JE;^T#eH zz^AEw<#P=NuIX`dJ(jm-4QNSfUnzgwCfwrXZ!cf2ZiRL=^yJKvSdV}XHh`gFt)2dP znV;7tSsUl$d>gIlcGYH)mcItKurJH-Oj^{+Jm1B^QN%&rIL5wWbprWeJPLp zXfXXS>!Y}~E}&xBL|C}6zmX!uf)UeEE;9MqAT$hb&INXWdIPi@radPjRAnt$Ju)b` zNUZ$5;42s(#m2&DxEz0%xPgM*bH9e^t9}-SpO=E)pVuG(^Dc(~M6imtxXf^W z5c-~Be<`TnWet)CW4#C44$`~fordoMRoU6g^MVf|XE%23Ul7{RGWm?S$`Tg?9}0|Y z4-zf-MUx?@qao&IY|ckwSQtSJM|1HIrGYnPId`r&Tj$GHv;9IUgz;E)y zoAg0Q)m#Euzy1K{$Mb*kPXlcEq`c8;FX5l=>*=u;FT+??YS}qli1~teJI3eHKn3Ju zcv+_UmR|o*7A?-NAL^#x+b&zoQi(tF;D@AQiQa?S>E|yvpRQ%B3kxnx5!dLa0$PG= zWYos&C#>bApME@Th)-Zt96w<0)5tYYcVGgk>veGeh?IaaP5^%wK@iNF$O&mC==X|Z z71-`y3j7W~_4zOUTN1EwU|4VbK=oro-t55(WqaQE+1#OpvtN_Cuzm!=rZJ2sWO=aE z1Uk1Qd}H~l4cbV74EwCL=7YYgak^XVR3%c40ZA9H7g!(SD-8 zT|5m`#*VN)11o=jiI8myu`6)?tM@gD7P(no0R|04{`ss#ApC)|o_&F{3EwYJs;gq; z@4nW^=oYZYBYO_wfhdby+TJ*?UHnEhYP^>d=zjdFBG4Y-cc{x|VLF(`kLFpCF{!nY zt^*J?|6Nv}Ha7#w6o^2pyL$)RCCH;mzXqZb`|t(NgcN_Tze|BrMQ-?L26!%TkC6c~ z+rWKYK=9)P$bz-Cdu;4gz?Cg6BEhedHK8kk(+gDAhP~*4<)|n^d=7cTl!iX_t*NXK(QX{>=pem{`?mAJ%F2V z{{o}s^80`7ZKy_!JCr*(v<*Mi<{J3OV)D^$1sj{*n(=3rd8el`>6SJoHgE`wubxks=@+35ABO%)UE_;PZ{4&hzorEIJPn-O!5 zQrrL;aQGLAp%;AqY)+2~?`AlYY~uYJ zKg@rH&kJ))!}H;bMLVl_XD}TZYG4|)3Wh})i6K9Iw#IzJc)B7wHR9dS7fZit)jdF) z?oL+a{#^(1zrN>weOvCc{<+UT+GrWS2CbXK5BdGppX<)+1)dp)@}n29s#sSxkq1vT#x>p>f#ecNxFv(`6l8?s4^h|BmsmkQP|S zHj0b;tegKhcJM6wXWn7UTgKtiH}D3;KjRRV=yC=%Nc63TXDU$oL;l8}@$2JpxW3bT z{6@W6$RZBVZ_At_(XSfD-R3fUFNl9h!`46=i*rNj!cV&bX|miuRXrJu8`lY#*PlL# zmiF=?90!pc^snpncuut6t1|Kra2;Sx=kRk~zfRxH=I45f>xerq*FU%({aoLkaQ*vp zy})(j_qvGdx1a0ggthKViG?X4OA)66^FbmtX4U5(G{Gznx)@9U)%!1N2igne!H-d8BI?I3eYCXmf4NVfJ`Rn( z@~qm%nrSa%2&Uuiw1$84sDRD2%w6MtiaOJstnIR12Rdx2pS>8@YPgW$w_f_yzGA)I zB$%hmZ=7XMy#M}2hf^0i7VAMVLC3$pg7s3FAZv=>EPK$u`;8%UTZ7E%#kSZiYX=fC z(8habf{g)3%3{Yj8cuji{^V($;@@WDK4S;+e`sIxXK%*u@$7%fAY;69jCB>!*8e!h za1CN(f9#kI_Qe=>+{Ux2cxm6Yt3)pBOV7Ie>}kMI{EN1)FsB!rqrtN(d-K8^cq(p> zIgp|*f9e5IjnC$W_3~e=x6!hmEcFm@lvvi}6P|g(di`Tp{EK=Eu=n7*|FLs)XbEDU zj6gB~J1;db2mXIeeKf2^NI1v78N4=jGhFuyRSR+z^atLxApv8PPIvakI173=}5l_SRLVr$~>|8#9>+Y?R=^5123w2ij& zFov)%jSd3j>rEYj4fS_y-T(7>KCxJj*W$It+OVv%3f6z`f3Eip>+tPQ-vg)*uNT_* zU$o)Ey!vMhIx^}kbNVvQ(Qlzcf%J;n#Q~lZoOsxGz>s16b58%E+)1Zk+y47<%NgQ7 zz28zlybE&mU+~KZnvr@M>_r6{X+U4W(!lz*oK(_5Jc})B{qk+4B8>^3Z^w zKj+i3ZpPYTPZ(;l*wKHUztoRKe?D-(jkS!&^EBa%0bWde_F(kmiNBvu_>9QI&*$s% z`Okmff6H%|&;D=u-_O70e?R}0|NT5o^N;&%!m|-1MH~fZ4EX6&sE_XtO9TsNEcz2% zQEstcf6Bd9B)7 z3mxjunCO1q%lT98_x(4&<*?Q+?~fdQlpB8EU;8PS{=Wa8a*N%$2EW@s){Shj&y-J( zjKt-)-=*)57*Eb(r*4hkIlBBt*vLQS7kl{pcX@KL`TkM<&zyGDzsu*a-{7C=|7m~i zV1Ax~{l;19wEY>^63XM8Mt_RM7C`xzYAj_zK!yANT^9Z4sDGC&m%5+-E-U@q_n+s? z7X9d~S11p5Ym{H=uCOStqP)EREsuL&QPzMz#!>$+ds*sz{<|#JM%3f)vOni3;-A~G z`l&Z;>YsLzm-T6VgdgRnU5xCv{GWd|x&GAq@3NS$xcA>>advjpf0V_%!0PjJ?!=r= zUkjfg=`X&SWzUKo)JfK{?@dTh#`)tf+i?%RYbcHw@Oj`&rrWS3Hh=AT8R`b?J#sPO z=S7YTKF`Rh-N@GtHfb)uzA*WnM~6BaPXAco<`_?}<@*lbe+bi;?<25z%ea4ifjQv1 ztgEVQ1p9^acXM<1ND_%E7s-$SgI3XBWdocMcOTvG{3~S7mI(}k3LUA5KflAWgEKw+ zF7XRIX={h=Mqrf?u~F+3$UF5kZvybXf>i3iblYtKjrcWf1E zulpnLI>5{3Edd_k58~{9et3UdXFqWZa9b?p7DoOBhyD8+`{Pi6$ z5oZBY{q-IGV;q|P@g0#Q;Jp6v9U+l_bKP1uj}3U^|N3qtHAJBoig+C^;`N4GZ-FZW zbsL=Fiwhc^`IAS9%o3m-E@(ac5nxs+a!b7~OMSe> z|2r-9#dFbeOJ2c$$9;c+*MkLs`+b)B{NF7PO>wm6WizjJwAbhN{SL_DS=s>>#O3FA z>rJ!V7r!T$`X=k8y_fp`wjV+aI5)%35AQvY{4m=8pY`)^{~?I${MIY}Q!nqcv^%~p z{h%KfehPq5*Kh{?e%~+S0rv+;m052AVf&yy=pVd4>U~*KT7ZAX07>@p{-{^{0lz2k z760##kAGD!#I2V26xtEL`}gH>{m-~iAgjE*Cv*n#Wt^iwlBHe=^APjzr+z>6!u|0b(afLc{ERES|4+Tp zuONw7f+U;c5;ytj-+%vHT>tm?{TU}(w3G)G?_s(AQy%l^XFUFlW4sUM!{fiIAD)Z% zh?ai9xQ1>Hwt%1cvAh@h3*~>t&riAEdLcfe{`uVxFPVS&J6@J{#CTqoQsh!$ovQzg zCot$b%k$AMKlQ}8WtVnD`)7!!;l0ss*y%jk^-q5|0BYnh4|>g>WFjvZr( zlP*=n;=h07g)LESbk$GOe*RSLXa0frca1n7Q0_npU@+Ylya-P79~@p#U)1w=-eCTG z)+97Y5MM={74rz|0FSU7=E)K#TTz+3sl*8ZGt@Yp6sefF*{Cyqo@h_BOhJGxD=~?^_|KtDh zzb+~<|8tpS#sB{2Q**sf|ILJ;V)(yleR3Pd{(t$o@*1Xl@!#6z7Wd!3d8-M{;w?$Omf7pFCzH& z7G$2ueU%fGY5|4@^ALLT;$Z?HvGEEf0GuZems6G!>c)4OV}yM1L)Av+w-iflxSot; z+lu3+i@*1Df-X2?hu+YtBi%GdqDm{3%v-L>K!irWxUoC}Q?-5nr@PIOq88db&eu>bd?xHRQQlyGO0F8&aeBfoI&jY}{AK za03A&P9dmmTSDGQwQg)B^G=c*=e9Qjx`A9$ap!5KS_OtxhQxaZXN)#%jQh1#_O*X; zj-<=;u}l1o3kj+BKrnP%QVACd;geC>s}-Hp8qB+DN|jJHlQ>RLT1Zh|1eXvU$aRbG z8@Xg$e$7sR-dI>l?sE@DFVXT#!I;n;as;6 z`6@hXwc;RvKfl)fOPo|ey%*%nsPTWXyqrIhb;`5Nyeh{Pp?{e;UK=lw6si6*Gsid4 zZ*2OQIIn3x@F|(ibmDkECc2*J%=XQBl=_e# ziSMg8vs}Qj|Jr7P@An0tDQQ>jS0Ax`H&c9C@8`&Uybc1r9k-soS_R`Y8jLiRQ%#7M zy5Up0$k!ruyywY!UafIZ)=u^vyk7f+^F(7aBrgyJJNfGtf(h;nbo75Tw^!Ozb}LdA zHmkj6AJ+5%JcG(;e!?EnoLJgCpKsSG8})77zen#B9M7(1ux71&wr?R|pAo$WChCY& zmiq2?MhEpysH(oq#(9SmH5p+?ejyL*NFAMJ_tk?r5x0)uIf$M$m-|$G=-roC$>&=~ zof5+#-2D(}GL;w)mfn97cV#tD3AhosHg(jdVJ@X`DspgutUeH!NJOxn3v&VyzQi2m z=FK!Grf;6RHZkle%&*X0yr_Fsxt@;(ZBF3!HBDJH&a*9lADtFo5?)UN3Mw!LRouEq zC`*7)l2ug=<`weLt2pStAVE(fG57Zc(b6YTg&g*7d+g2eG01;i;5zS0d_^a3<*HWx z^ks-KXJFtJ=k)Y;z4?r~LU`gutV^>9# zr(KYT-wQjR3T0;@8l~mzt70_$B=Jv(uVs7cI9<&<1I1xCRfn$>w+;09)aIHT2${dK z&N=!{jP-;x?mK@NpZ(W=200-3Z65~3_S8B(Fy#-8EEL(2#xYUTDi?RU;COanwqZz_ zo^W%aH23hgF3}9~5a^@HA3rP+T=~^BAE4Z8^0l!Y8f6wUmh3`rtLW;6Tk-L+5~rPG zl^a>v+A)S|2A?IUG4#z!3<2Yw*oO#1}a zsb@wK_WjLyyZ3u=QST56rlKi5+_b#O#`Go!hx1;y$##F!$zX17iN`YbUJCaj=;?#2 zz{LukH-t4(M{S1&NiE6+6b(}q&-{JPwQivWML9u}cip|0}!zP%$c_G(J5*3E^; zhS~jv7NZ?(qxzt!*Lwa=HEeeCYah;)jI@n}v^AGlzrGHnpq!Sx-@fCKkc7xsrMH?2 zpJ`UckEikoD%Q%JFHXffwJKNStqdhP;j5QUF+`hP?Y$`4qIquO)AQ&wogUwIwobn` zZV`VD_xwh^&)F*Y+;^r<^K^|hYoEm;+uldxY0d-0!QA|%9qq@;CpX?knfv$* z-UuqbHr%N-A%uMqPvjc1W5b%eCRM7uLQ;Q)M3MjY_kP+I-8E?{%IiVP2DD@;;PB?n&v#zpaEjEt{R@#zb_YAKmk6#;Ku!Bxb$K@sQoeEpUbRNx9DDBii`g$4xsBCno?(g5PI&Z1$<619vXx zm*8A%cb!Xcz5KFY(YZCOR+h?#dEaZV?<5<3rK?nhoZ=?3AIY=Q`Q#&mtbl*{7+8CZ z2&%g7Lp!LvGHnmYBaxA?SygTJxy&Q!JS5|`kv2yNp}XHAa)kH6$d9Nx6Lea+u`i_) zL<2!=-h{=j%C7FKp#Ba)FbqZ&UnG7lQ0GoqP3wbRtr>Tsvg$>q#eCffm(W?}hAaVE z`r64&_^s-Da+?EWkU@O@PTqe%9i=>xE)}}mTBA8XAh{x>vQr4@5u@z6Z4{6ShWBSa zDP(*fd#Vl3R#Sx4y-Picp>G0|LtVp+tj2H(-(qbk<_U}A0*JoW8ENlUGtpnAhM&Zi zS?m3QlfqbBDem{_@lU8UTTJWT`#s4p!S=?ks5p^WC@vH4p|rNzNlRJzIT~&XD1s^*wl~e) zft!P5aPKhF3ov^royC8I)vTM2GZuS_x9oK|6K7%nrD|fjpTzHtt8UO)R`$B9XV${1 z8_p>2duo>f9cgeOp0oCw;$d-=hfWJPyp8!QL8QSQ+}w)7{C{ zln}%5&CI@%^Ty`$BtC6L?7|bNyakf{Wa@`GU&<|VpL~sGjYofJG?&kYZJTb#N)@lY zxJSHEz7s&VRSen9&k0ihGB{XHrqZNDXt0kQ2Z=NG^6~Cq5x&M{KLmGrSUu!TUZFnx zFwW=G899pGu9@r4 zRnFi@1-qED(FlJW7n?o*5i5FJeIE3_euL?67_x!Nt+P0fR7M=x2K2mgS;u_b*jjiz zZst}hxU?chgT(X>-)!+ZM|YUoQB(6JH(G@BzE|R-t4BJi8hgjDUg>Iu;ns`CM|`34 z`pn#Wvv_MaCVSke@0#Sc$1#g2CgsUDPkyL1XTxwkO5J~Tt8>OSsJ#7fZ^I{K6g<6Z z4$@KM>^osGpWf^gqp51Q+=8UpAKJB-2A&+x`}cGj!{bLxiSd2iJ+dZ$cV51s+HYt~ zO~tLY;yNc8pU=%bOhl08LQUWB1)Y-i_m>EcNYD~NST8#WXTB?BkKdot&|vZ{z z`A4~ZO0CwF(5L?DCot0AkaIOW?$~mga{;T*e!J}X?X6^H)}GCSQj)88``q{M>+y9_ zhDm>N0n1+7UzO=fTf3dhU3i2wb z$H$4MGm?3ZDVKZ$z8C4stL3f?CGze0ee!=~OOZ}gW8`E5;poR%%(<=oq-j#AgX@OQ z&+jsXhUq#p_#;(JqL9!j_rVh>KX->XeJWD66??;hi#aY^t>TvX98`AhVC3hX2K4CN+vs93EP-tA?~vs(rrf`Kb~%j)|WU2ltIr)o!=<>bUk>K&X>7gJnuJ& ztii1>^CEz0J_DYwSt&~=^DmK*b)(*iQ= zPS{y&b|*p`!aRTUQBa1^Ern4`+AhwQjJdsKfza+K>Ko#k9s^+#)7?I^A1!`1jr~I{JsExZiL=3lTR5q*Q6uDHpQi`PW?z1vAx_62# zr0A^_915kf9q!aUep3ysvveRRDgZo?GRilFDI-z0kwVEZm{c}yEA*W7lK zSi5rith`%}eC8GLm2a)8ji;>@gasaNvev<3rIeC7ff)EC+`WGfhe4&s?)*MH5{f4| zdEC|$->n}IXA)watg_*{l{wBo9yzW95-HENP6$ra?I%dBoyjmsJgI?m+;7R~>_cFY z>9ltBo~N-x&e*;dPMvXWPqY0BUzv|ILeghncx?t-=CKp+00J;4Xe_>L`os<$IqI~d z9UK2l1aJvVFvNd4?RwRj7`o{VDb$;U-j&xgWBM!;8;HS10~F?N>f@6snmd{2uW)cG zX*)A+Uz5|(8^twg zi8#9WfG})`ax~u1+3lZ33{e%13dkM5N&3RQUSQi|?Gt~qqmxaL=6q#Eygx>6Naek_ zV@So7A4;*g_ag<3%e`XrO}+-bs;s=sFyWP+`(_!Xx~>`OhS zpi!7Pr2WysR)3Gr{S;-FVRAvDQqUmDI#k#2@Vb1Y;E22>PwiSb8~PJiJT`K>cTd|L z?)Vir9n{VErgXzq9k)LpLA3I0NUxw-?VfNP9Mt}i z{ovP4BoQls&h0yAALELXffWsOhG4E+co>hsjC@VIWLIX&18ex2`=?i4;$Vu5O>RCj zih<#0N^=>1p5LEI`Wn`paO^}eV+aP@%2rM)Co0Vobe6#-<;*uRDRz8sZL=o66sfLT zvTk$`tM;iU)*;;f$C}CN%9pN(@J3#@Z{))3d)n=$uy}L&e30(`&E)KfmR=|3=)CTs zULQvO*nf6O6Wvp5#EIJ2!lyduChsz)aDN=Mn!5#mEBh@a1y|J!2$J|bpNx;c8)&3g za*sC!I6i&$yQlT$FZm1X5fTJn>*)5Ka;>k&-9Uu1k6Bt-)^3Ax-SY zLuj24mpFhcc$0+4^H8VyK+R zK&TIY!?t$xA!mWxo_(0mc5Jdue6Hxj_5pg5HdYyWJB#1aF*r~Og3ir6uX6~jsWT3G z-|yb}JVTHqB%`~@&XQT|c>Xgr-Zk8ewX*F;XC5}E(KNH{K|EmvlTlaoV%kOv=pSIqp)x7xzE(E6&gPjO} z{}hB$J-wR?lEaXbz{r~dC-A+z6HALss@BczFU;Mm9M?8MKLo%q0xul6OfIN-N}ZGN`>3b36nZIsXpw3?^j~k-S95r ztNTSn>R`Pas3XhF21jY6_*b0S;)qoW)FYcq?R}urJK=F#r`z7|D#YEUIk5MB8r5bcU$IDk9$7;7 zMfba+I@@9RhS64C@+t3S&}iFe#$Kndyebd&(^eQ&Uaoynb=u>7qKJTS&eAB~lj zF*JYToZfP0DP9K*vkoQD^Q=5t*v|4}&D{*^u@BAL*Y_=dDk%o)s&fwsfvnO{R*98f`6raU?pyxgr2@D6yx5Yk z=-g(6D!&pcHy*b*BY&pVvtK>WuAsicTIVaAdT0Gy4ptWq5025M&{yN*p#KuAM+pf~!e zF?B%7PA72?7@6CFkFN+XD!a!;Y=KKtc;Q?goc657s+^NC{isxM^~QFhjusoc)~7V_A8Q<#o+F7esAndlh%a>#P@ zCT9>QI~|E$3gC=kJeWvr#u&F!Jg#1=;85w+-~>%|tm}S%dmq6^612Bc;DinLlG~|6 z9KyGo*T4jEZ)P5G5n>z>*z1eAlH8LagS-#Hg|R6o9XauFWU1$`6K9Z#Dg7N7jwT`3 zjOX@Kh0ps@_d(=P4pRH<&tEe(80Sbh#6$#GsG$)?C1dNi$H%n2z8UJvq!6YZ6|fgw zj6u^=i6kh0!`uv4nR^_15U%UBV47QUJ6$D;dU~^Kazn~?dyYSbM~d^WImq*_tK)bg zpG3!PJN-Nd>fTM74=E3|KvB1o#9Fsc0YyLsE_L%Ey!jEX25tV1z2Fz$*q?L# z(PZaU=zU)=5o~Y8T^Mj8<9m+@nIvD`>DO~g45-6@Xmo^NX*m=pa|{mIeQ%6J1<+$LTQNh)ehsAO-VKkCF#08JSRse_cZ14U!(?DlxuY^ zR8eVv55BsdDAHm$y~A+0ia}{~y+sRZckHD!PP6URhD-e+>hFn-g~EtAigU#+QP|V# zV64PzWY;!S+@@F7&1*0DG*6e|?}z7&@PSE}vbs!f#RcJzuikKK-6o!O=Q(LO9UlIJ zqr$Sp`KRX!TPWPdT0N0&O;BJ5yhC4*x)s-dq3Zf_C&bP7n&Dc#WBH z`dalMw>2XLuq)U-+iJO)vP@1~yERE1#{^QNuPiAxTKaIpxqs|dV&aM8s}--$%?Z)~ zW%z^%xsrQ3=+#z;=t>-8HX(_ z?iIJDpZFr}NmbOQy1u-{Y9@n>NM)X1)k*$lhgBYzR;SS8jI2v$)%A}Hs3M_qar3F2 zKJKe_I&O;RsGpvlVtTJS@mGDc>r^IAH|INYWQVp~8*IRIgqLjQJfrDtnEC3qr8mvl zg+P1R5_g{hWM#Nb*A3+`W#ue= zqPe$zkxPD=S8E=ZKqhGh`nAueNwr{Zo8sPo&Ls<81*~*aBx1oLn#ZRaw#8i~z zS@7B)-g3-eIltZmp_BULd;5k0mOJpbBeZQ%npq3LMHRuoGf?9o-DtAN8aeQ z-=DQgIKT$u#V}>=yWv42W=x4T9I6MiPj5%Yf(gzTP8xn*qHSjYxyQRhmpy1PRqH)r zbF}05;}Z-!@G^joXXFD=xtP7nrFLi-NW1#kw%Zi0+%99!<6W3g&4eY*-B-yNwteb-I}Z+w%6 zVxNx}MW}CWO^AH0Jg%KPYf>C>;q%2)F9I5@0Tia#u+uqH2U`&TO2El~czHBqy~{az z7c(mkT%mSH5lmjBq~#1RVZ1(e$Fihcvrem|^-NyE=0=Jq2iw6Wk>M#iMyW_TtxT@i z&Ly2qbL-aK+4ZPwB9IqEeQ`1rhR54Fo5Y5$bfrENqtLl>m~ao#b+^{^Wnalk-*EI; zBoy&FWh+v86h&kjUe3WbiODU^^CT-g4Ub)iUFz7q? zY(I|Dk+&EzFF4&+JC^B*ux#SzbJ*IUB68yO7MP|nwx=#lERZvWqkJ@_!bQ)M;)j*C zy&>tIt$}LeMzs8~%C3C>w3ACGy8Sk8>rn2#XXYZpNwAZ+WbDU({-)0I|f^jS;TlF@?>TD?z*2t1DDAQ|bGJ6PvbUv0GaEjT`~epKMM7vK>sl*kf-@dBmP zG}a4<)zD3b^fQ=4mGs3?A-AqZ`C|6>x>ys_E^yxsdGkwupEDadk$i6bnY6p0t527+ z!i{OLI7URNEsm7*Nar|W&2N!xZ+8DUG^K7P-{+1PN~m*2+oZn?+`H{IBn^VE?^aLT z{ixsSOvn-HrY`~O@20(eeq1`zt(xcgTkw$(3+DmUh&7DKxw|l@HJm)rpqyakx4?OX zRJ%Lh(zr-}j8!*fFm6nB9?bY~vTnmn5ww^|@{0j=a{d|1%H`L7{^4P=6Ou}8s@z*2 zn1;GuuH1lxy{*rTvsG~+^}RplZ$sU19#EueV%WEYRkNr*^5yH;kRb3k8do~m+1@|~ z;Q1BPYjtT$x6E28mb3TzTXwW`+U0XSvu&5{(#w;7R3)A}zlai&{8d9!$yp4FR+MTd zE3+h~{qW`Il?kS`IvCF{?+!N8p`=&(Rlq^Rc!iyyema2}$gd6T&ez+YO>O_+)vRA# zYS0GGxrXEyHh=W~MjBg0V)^dzh()pGHOxLPYA$*Asvn29Sq9IOepPgGsOm}Nn`1$g9!yL*oeG}VdGwiR^PGVStAj; zoiyGm)AGGPZrDt?-7S5$I+?z7xjn<yz zy|y@IS?3sWh@$JSdPb6P`A57b&L1@gA&(A!p-Bp0NxGAn(|vw0?=q%R`}W1yzOWx5 zGON!urmCkn;gl)~QxSs1c6AuN$ap=%Rv$xv9oW6)n5#tqWxma6gY3SnxZYmv(R!Gt zNTpNui&)Y5)NGD$oF;l6129e7#tYF`1v{R9 zYnSTYZ^WYa7BzB#pK{-r>g4w!bfb9OZ-uS|Crvy|VA3_lT<==ZE0a6>&>prc%#u{G zPn*B&_Pti`n-@7)2YoBhd1%giHd~EAC&~Oen#t|qD+-InSKMCT<2xGOU&cVzYVeH@ zw6WuybGi4vd&Xx2)8h$AHXt5V5Wz=(5{9SD*wj7T2Wso>ko=z#JHcZ6{LC|^t?z~g zD$?cdtv~R~y!7p`6#d&C3WM8-SA~t4Ib$^?-MF7?q?`8lS1jy6nP5Jp2Pe~Sew)JO zt5UtIpI1Y+O*AkWrO9rSlyQ>57O;UIx$dtiHjRmEY+$1>%6&FG>fZS!seKfG?kky) z``sO^?i?W>x6&x{07n9|8LXMDvaWjQ5+Bc4J{s<2gG8W=y25-^WOcI`M(M5ib#b3O z5S*bt-n(6wm>&}6om(JtoZU%f8K#Gmiqqlzfg95O_T51cg4Hfx3*MHhjGXohKC;vaAI>G+;Lr{YWc z1}32Ry$@o1Qd|j?aL7yGGQvK7)4I1jGp(dbZ+EcAAvXSca*8vaZ^jr3Q=;V^+iAzQ z8*r?!{$BTw`LL3Je_Q&sG)m9eKE&WM$Cmb=bk;u)_cA*%w0hEsvoW6%-`;gxP^FmOx^;qjy~JS zd4WNH2N`I)zRX3rEeHrle%120$0$@iTTrNOMJbs&OG4j!APHrIw?v<+qk#7oc8Fo~ zMYI;#OtNeSl!`Nd+472iUlZ+@wZ-u2VK;tg0FSxX$5}gp|24Na?Ez>lzPp<=QGDTH zT)m05dYwad1!Uq5fNEe1@4ys)D>(l^`}!WuJIlvp*xE0owb%xeM$(i08h7J;?ptzI z4R_MyI8J>J-`=FJjB7tVyF5OWkiNS`aL$3Dd=~FCp~|M=N7E92?Q}ntF?Sj)xM#bf z(U{Oqjqg)Xw&!7|eX1fTWpE_>z{55vUB}`LemLi&o}UhNj2n3G#cfM)Rd_g><=Kt} z%G96oyCOwoE3OfQ{@zVEz8`1MdNCHL?D<`Eg|FJBCGGT#+2*BZN%H;<*CRphGNYW% z^=CDj@r`F<1TF*irdJ zVo7c;w(bXR$+my7Quf)Bo1EXgFJOO7-;eEoPCdi-a|j9SoNV#92uvT; z#Hts99nffEbV?Oqc1EC8*=gT>9-;{smH9rb-Vi$McH)AJk_zC{o>u2X4E%n%;avQ4 zed3UhVqO8#N5EAc*Zzbsj`2JR!s(s9O1^Gg;c@1C8HaDE&K#LR0~9&iVsLv3f9_!n zIK)~1cG~HG!)=+BRNo;dGjLpA+lk;>t1IGJ51qkYn;Z}FHCTgrH90ophg z=8JLRnJFJTDk$#<<&kgtvs~o;JzG_YQp4@-=d*Qxy^GdZnLSb658EL8hTBIB*u!9Y zp5-xc5*(6{wE_R#ff&puxLl6j1_+vkAvl^FQ@fKpF$`7zGBH|Qv>^F>Vv)OuQe3OZ z-eEX9HQ6ux%KO}GxNXcviZDgev@_qmo84a5aD=UN&rQ$+yl!M*xw=msn^2E{9H(v0 zih})r%HU;{O3TytNu4F2my5h8sqD_Bq9i&yU$|=Vou@8e`)!^X+0=X0+D*I0*iUC} zeI<5>&lquUlVfoCGDp^Z4fd%{)TKN-dR@uJf{O=6Tny8OuFY zs~xW*Hf!CicQX!z7$2ik-A^RQTE&Ea`^GGP)>g-y6^fPT>K68D{0J!04oZ5{1*g@i z9R2g{0ZYbU8AGW*w?^6Nt62cSLlwxOA{brVxG^BxH$^lg=JK69=i#%alb*)0xfDk# zpQIbB$%3pQqyym}Xb2A#-9e4bB1m9+n4#OsCsDk8ZZ({-X3L+SE-^w?hPu6aa@?4I zTjEB{d0Xh)>?70$7S_8R9iwBmV~t_Wj)&kgtc~nuYFoZ80py};nvO+x45Vka%^?HF zwj{}&LZ7H2sb3e5oZ&3whF9K}u`MucY{>J5CAq#htErNW&*_7Z^IhDo5kb5dJI&l& zkW@!%PRpEk8`!?a^XkZ8GCmB;~Fz3?tgaW~Z5W3STrO1SVK01S1DQXgD` zXJ~;%-kRsE^x{U)y1y3eq?fkPRLvv*K2^3@s8XL3<9ABsS+bHor`D>9I2{symp)!Q z<+I=NgS`WKeC0%L#sR%}(d5&^unpsUog9!gs1rg+7sqwLJ$<>X>tyS=+~axO@y91y ztBed_M7Yk(oZD}92WCtroWkzZ8hAU+urK-!zbdK_uipgs{9L7%QHNYnp`9!DEcM90 zGm?#Q_QMbF)oJsTl$uBO5~_25TGvm=Z;Q?)KYSO-rMBWjSp-!1L+oOl;@Redv|kW2 zrM$(`$~*`xGucFXK}H}Bie1ttJQpQ&%s1~_!Jp`nAy3EDKU+6>2KIn<N$2RG^SjcldE@V){DZcEkf;;h~zdYfV`3flbbq-h-`)o5hX2060t1%hmIhWOrk@|keUeIHzZ$DfI6JLlHQ*a{#MFhL6s0XIixgNOh``GjzGd_Q0GH@MzH zkr8G?V4i2~Dvj~**`AXYQub@0Su_y0X|(BI=Jh^^sGA-f*qf4EaLS>rja$364ekJd zCHsrtPWiRjSajT9ShHpM*L;1F&&$?kpHlIEn4kC6)*3;bf4Y#~ z=eggnR^Kasefx-<$itP?ck(6snr5=T48sm_<{d5gLT;~lYju0DuH3cZl-u1WFz<1? zKS%60yad(Cx!7A`gig!(q+0tKJr4*N!nO(QsuaR4M(Vf+a6X)0^y}1wy=P0|{S5g| zVJh#M{C*}@HE8vJ=@ZkmJ>Hz4XA(&Q&-VNJF=gsD@WJMAxu#~DAMe)f?p8dMYD6*^ zN!~?VS@E~!t=^m2E>P+)*!9n_xM%w_qfbX(RkoY*Jb_5>d9Vo+8AAXB8k^+I_!Va$ z1tc-l9&y0+Be~@p(`C{}{b|)?UtVVCEQ9#-a@7kbTdOpG)9rD-wkyT%aQqF3;U-#~ zmTqLqR{Ggw5h16cHLV0Gd_baXa>q5eK^!9ws!h|V@-x7e&DZqJv6D4_edfyMqeSVG z385a?2Qm)BCpoEkx5l}$ZYll?iPXYKBlpVmWh1AU)X^s0}hJ;1MZd3;{ zVCS2*i@j`r%pGL9e1*sA7VEqAWk8X3ayO+57I8Nc9d9~= zm$=wS)m+Ll7CNbXg}g;rkY^Ha`gqiVj}?KRRzKl?CN)YeK`fZQX*Me}BVaEj0=?lnAQWs<~--)yy zAD%$mI5S2La{7-{+n+Sp|KWf9pZ;TV>_6wmx_{-yDmFNM|K!FZ{rX?h4gVu67BT?h zvT{Cue>lOBc8tGW+?+c!frWU?l52baNw^M|OziFScxx**Vq$V3;5ZV*1O)%SUiRuVo04t=Cvj zFVNBUA93)bN8c|02O8|<|LF)yt5ZZH`uzIv z!{Mzj_fu~=2t4iQCq<+h-zR!0zt5IS-RnX>c?eU+OZ!1HYPnP|RWb9&>D6V0)ma69 z6nNNs9ppX-`5E+5tz|I7S%-z9-SugjNUu(d)d+{w^A;yxp`sA$x--m|k)Yb2OC^Hj zKHaV~QTFr0EW%%`pSHk_fUUeJRvM(5Af0cBb>O+Pth!*d@+E!oh?kb6Y*n-zRAS<)m!jLuHY`6>OqAnoiizy8W3y zXc;!0LaXi59mbw)3b%u@zs{#Ckf>#~^_YuWxO@Hpfcl%ZEhrFulyN%2D!#l+B;J{< z2uiVAWo}R3q&QAJUjL@;xAk{~totA-Y1Z*?2;>IZM$?~%y9*^DOtG;8SV0)c-K9FKr@pzlda(162sb6y*W2Cr z@{U*D^Z{Y8r+ABG;!;a}yh~)rzrv9ZRgv)9NzXaaHGuwwlOI3hG&g_^xsWsOt-b!t z(R4&;XRayU+kq`hLY<*R0lz$dxp`bt{aseKAR!pNeE8p2@35mEzX`STcdwm&dtlj= z039#lUA?(|uYtka0K+qSM*y4LxrQ zC`vgc4*VaShHDB2P@g()SGW@n(^`4_J>AG$+HQ|)DZVJ>g*f8q6wyO}vY)CQGe^@} zdcQ90c)a?nSe`nNBb_cYA(9dt$A=#=NX8-O~rOE*0p_b3n6eZ`#BdIBF*q6xdg+^r(^19&$* z`Uq@|XY#9UKO^;-wm92Pc289=w>hl{29!Z7<8bD{0S~*Nbyu4=IoVxPyQB8mgZao| z(i5aBmAvxAjo!0Tk2GFMydbAlAFY~}$h&imz~hjM9CNr`j-(2I=A6rbH#NOnXCLD{ zRdc-`x^MkF>~~!LIr3JJ!8uOSep}v=zo;&b=Qn#IkO-@;r`1*pi!H15kEbLMr)h`N zT6idEv&n?gSn?aL#_etF)|Z?MgZr>MkWb%B00@a*a!(!j%SsmbW9CF-#)G&*Tc3J% z({tEo)=Ew(4~uVqiP9)s7*3UJPtY97%8ym9Ls8EVsA~6V9YmhuI8SIyh12&$6Gj>- zM>vnFG8E=loAc#(;?MyRJz7=oULeIts(0l4dw>EG?HVie_l308`VR3pVZT(${d zt57KC-D$rEf%b#d`}^(b$R{N0;RIN&&!?eP5M%Y~YQuJaH}x86GkX9o+Y#fd5YNFb zx{>VqQyu|#6Uypi9xF1+)}mTosIU4Cja;`fS3PvlRCn3i{ao<=edO1zpa{q|P-=!h zYr?eWuE+D_>6nR3bV(cOoVw9FB90pZd=PWLCbst(sT<%lJGc$4C34*j%- zZUm%qLjU4_^?+>$H^tx_WQpBELXTgZSj?5^G|fF<&1yu$jnifbkeT=3bOE>aK@;PZ zV{(UW_}wzHT{fF(S3PuJ8^DY$qbcW1v?d!;a}3;d7c!{e~Oyir7cb!K?D zhUO(RFZL_Yc9(EpUN_CxIduH>^^K<$v4SBB#ex8T9*Fm+k)C)TkMxiL?-FMEO-P)f z3M8G)vvA$T;ZPqoWd)4~LhC2{<{P)eX=Y#2eMancRKNF&B-3pZD5e z@_93`UNV48rEK;#ChF@rANcG9ycT1{WJEUi548IXheG=Fk-XDLSOIwqT}X`wc_C1r zMCx{b*P1?1CcEB5^3Ud6tJaxV59$icdGImgXTJa zk#5|9)a>M#4~3Z`Isy#6|6o2E}=+l8G5gsgpVwuQ!sA1{{Ny>)-fOf7LVG~2TF z+%)^~bd@Fy_b|ym8fPiqhI&rvv+S!K(L^Tz=S`d}VvFFbh>lO@!C{+{9*R_?hS8RP zjw<3|NIFhv#z@#=I^2Lbmev;MREPw*kMzx#0;07&|FHQq0|56ed zq1_?;*2t$|6mkgP2~o0=ylP*;mH^yX2aKN^(^>D-G0pOs<3AL{qzG|WGQxdZTg1aM z#6G+e+;`Y2q+r1kd^ciwym zPxW%knG4ByL#1ZZX+hrY=Lax<;oc=b36l_o0~LJ&$enQp$8!v;c~PSS9kDYq>NI}> zv0crt7<#B<3GLRSTrgEFwrk-~bRm%(xcIrAVlMznze2h?&Sopbo@?ZZsi{E4#5k;l~WvF)N*}+jn@b|XFd6`uK`qY5;xXwZ5or0r^#9S zqg*n(D&60R7owTToe5W(Maf6@NPdt6bFd(t$#+2n9fYt~OiP}Bp8MKQLToh98qJU5 zZV#>me$5Zb+IK(FT7Q261his~p6xqC~@nL_vI14p^Nq(*mb_L)qsXqE7uyo5$b5N*^R}fBo>i5>O zIN(63)uIkj+-}VwZRcC)AFNhGIPd)Zc0EP;Ndvw$vmdG}CbQD?dfkyt4dT4gF99(f?+3QmoTRUhr>$YL;a#7Gt5UTy zF!dP12f3PV@u>)(Pj~=0-Tew5b#SaMpUu(N(+T6^^-eW-RP`fB18IOlIk95 z>o$X|4Zh)jGa!z?CEY(reK_5d*^Ae^hL29Zp6%aG+`OD=*7tE2WXdN$YwU;Hf%Nb0 zmCXTbRTuLH4R)qVDtH@AJ~UJYVE%-plC6YhRKllt={X;P9G7@@J6-+Wx{FX9L_Z9K zMEg7r=R-9DKM)H*W?yhLVdjYh59k4_*7f@l5v=on&!$+X7eYHI-WiF}J88-<9KdpA znY^X*;aw>jvc@U#S(Vj2%V2TQU}_C&ncW9$>R~@bS~j3mD@SLtP-Yn(DHJTf>yd5W zW&C^It*Vf9S*TEdH)X>5vD%doYt#Yk7y;5h>%Lds?alF^-u?6E^|+_N*i9J}5#Ns$ z#gSrvcYN|am_^=uM+>~?aDMnlJsU^%ZTaS@)c0CQKrs3pq3Np8= zigcy@gQ$wnNylV=k!R^KO_J4EWk!AuB+y2LaBgpBGk81Jbn&azw5gLWgqM({H`S2M zd#=yUlt1$N7t|M;*UO-;AqK|S7j?>>bqLdcY>j7UPuw6HJKdXv4ZrPYo(7=YZb8%Q z?iI6p+{vz#)7Rr;5t}jo)Vtf0R*8Pi?Pp+kI?BJ-`9;CXEPc~j9JDQT2X2;jxDxAM(s zo}PRgrnj|%5D>CF8JCy?A_;}8jUTxiUp?w2i2l=xR4viO*KMnvl-7mD!tTaPFaFM7 zF9PlBH`axQM!W6Y!M^X>P@n_Wg%|dJ2`pv-QOrWz^=c8FV&KpUf6Y@u;X9IIhfHZd z=qTzOIAv~^S@k}|DPPw>Ykyecs%Z(1lODf`U&7F~<#8tZ@?PyBq3nit0`fgc@ za{yqs>ESgCU0olF7m_h?zD$?XtORzM>dD*AzkIU2N3UER-)nJl zo0@)nlW{}2AXsj8Z7^(MV;zc0FzEz#%#60BPi>&L2GFWiS3VTIRveJ3?BdeqE;~f( zRy4fNSXyvMIc}j7-6jkI7h5|oB-^-`}AMdhs$1JZx9cWPnE*j3N{X-oAL|bdNN8P z_=V>9pb+;VdIZhwY+?N-9O`~7w)@qlh048Bi^qO|uD zJ9<74X;jI5K%3TooQ-ur^455X@^3>VV#e>4er>+a%0quxGz(lbjNmYYhkl)x{EB2g ze4VS?&kanXuzL0o6^~=>TOQq3N$%uDv@RE8K78Zj?y7w2cK4b$;|c3~=-*rFo>0wU zH@2MVw4O2N+w&V*GQ(DWydMJceVhEYJAIu&784Ak5Lu3YUbV9rjFw9ZcE#?S%`b%T zavJChLcQ;p=O(-GJMR>JSJq?euv2ix*WKKBQ8+a=={?kFz~wL7I8t`(bQeyEz3{%t>PlAf;-TbQ3QnE@`g zSW2ZMn>-?ayVJ9cRpmaua-S)$fihM_xJN8ap8eJT6*zC!kPbFXQ+J2h9N2r`0yTvNMX9B#Kyo72Teb-dbzF!izG+s?zNJJ)K#~d*9fPnE&|q2EdbUSKYUH+J;pV75!;*@biZ{ z^>4H6EUO*-SNt+o^rH#4+v{f=V0-(f$5om@b9~K9Z{`zO(Aw3wTv5~~0TKovUzJn* zeZLuhoMiXz3#YiZ58kw>+_}4SqE85EGHLyUPknaFb+IU zG4DDwP2MOUi}TAlX{Y?)tgKg%S)~;qgY0vMRc_^DR*NZyss26FBGj z3!{WnAbrZ0;d=ucX@uJQS&`+(vzMZ6@K7$?1g7U^+Fj1R?HBFl$O2AIfjPv@^emI>4jE1!PdKw99QxDpc838cV|k$%<=pb3li zV##;r4D=jScM;)9KiGgN(6DgU(T-ywwi^)=X#_PDlOG_@V#E7hc7VfU)Zrs6sZFup zY1?;3%t^A>Z(Z;RcrhX#C*lF02-rA(EDhZMU454Fc@%7HRr7V&9SL2s=9D{Zw2tJ! zTq(h5EOjh>^PmY&w)YS8SU%ewj?dtdYmfPaRAiZuuIUKEs#A7kwEi$O{b>Y^+2DjL zlJ5Fqeca_W@p@!%9+v`rqcb`_fc+wf`imzan7@ytf+z-}kODD$MXg6e-Q-q(31_1{ zE0coQMt3WU4+P73VN1yXnEKUu#jJ<$$oFozx&?s?AQ$&Nw_nJuO6L_1gv_Z;6YwM; zQ2~@zGk27=ah3@(RSW5II2Aa#-o@h(gj?9)-xc61$lO0=v0Tx60B+l}8cwg8zXypv+YpWYBj3k9Z7#jALn9{RksacVmE#eo*z`kdY>i30Tg zH6Cv!OQ=z%4u-V$ije{Qgq#yg+8hSCk(x)9%ZhG2N_oHX&zEou{heEXLWucQr_UDX zR`=ygkJ{*XKPGhF^F(mkNX_~kpBcalIydjYOdFgJ^5uN1`OjyYJ_kPxFChXeW=v6D zLJ*3`3K=6DjnI5Q3~Df!VH2YAm;g7;B}||0*ARj)F9D163`5U*^ursdK0>~RuTv#|38zu$Val25n-CC% z2|vW1KSBcBAKv>2Tr=-1IqwFrVkIQ$F(xm>p$)C`*DWKW^FD2PV#}Dj!}ESH^oAje zIF?7HIo`n~0fpz)xp3|EDhNNW$hefB-lW&z+zvXphV>PwPzs{U*;GHhq<0%v6v8;< z#bI8a@QJ{jSDlN08p2-t)1hper`kRy968!q$!^EHdMOXo8E!2K$--7D4OqEFgLc}2Q>VM5ETSKR)&b7Sy#tj0NL3)LH%MZTWMjW@=BJ>G7;7kGOU9fwBr`^cX2+o?={@8H#|sWAiY9D%0y>#Epr zHtB#}r3ImRZ_Y9boT6b9;ser}--8I{pupje(lpU#uQzsP|4Td{4%_fs-SBmxx z!gU$$gPqtToMIT@!&tp{P5~yO36Kc7ez2b}yA6`~%YYt$sGJgrX~klsfjfathMc%G zCLT(ziBAmcD+S-beBU9l=0hDnJ80(HnYnrS^{YIzUSN`5FNs-fn_tMT)4crFkC0}P zvgss$*&=rxL%YpnwIpk3y429emu`Ksf2-%DIvLKMV#Ux0l_;XB#8{Tdq&eCwU|NIl zWWc%kBr=;Zm3#MeutSmEUGhM6CO)6}Ate2k>Jvm;Fmmhr5bg9uxnz$k<*`@z4{hW< z@_@VZv?W%L(k2-9=rY8MnQzi+N)8VJFn&va+?JQ;46Lm6)gSWkmY6TjCVd-lo4tr^ z2G`%wbMLNKg1xg2PjJ=UB5QbpGoo<@-rW>2bw4EED|b%bPxHOHI3s{RljrK6oEE1W z>kDkUe@pWh8E;Sb#9nCtXL^uYtltjs#5dW^YTppYwdP%Ef=;a{7qCES5 zq3?=`fF>rpZY7L$3i+lJ$_`7lu!FMR*|K#mW zdzIIgZn6K8ZxM=Yz<{|?q`2>mdt<(TL}0)z7%*U)DE~cUnzf$2PxGF8lSo-Q`#gY+ zv1e1WX4R-MO#j8TDHWYc)%2*1c=N2)MM8~)F8Y`EJ4i46{jfsGjVvBG7WWuD_E}9` z>6l-yqS-lr0xUkeJL^;Qcr4yt!18lcwX~(RsjB9kx*AJ0tt$0Je^tWILyzfyiF-_> zsnR*9_(T!gBZ=pac2O=a)kzgzaC6^Nwv$CIe?KZ63fCYvNavSkEqDV$#vnkxT)~zB zEm&G&m8=fgMmqz{soir%Jpe7geA0^pra$P$Wa? z!z*Czi#$MK;0V(dQ`_Pl#E&|EGMUQHY|KF=SbLs%W`$giwq`FCq2N#A4*_DQ_M3bg0dXoSs;Wv zKQ1ZnvI@`dpn56;3)l2ePH14itJ$$d>8B*lcj9fw%3J1Y8>Z(3*Be`ZH}vKlI+swa zCUrjFZ-z!d#Bv)BP5o+uJMDku>Wv70dKJYa&sdPtq)?L+ChwEI878}4IlD(k)aF_n zo}9}G3D0vj*XKj;+%<%(tB?wN%hx~Z3NlfaSu6D3_9ve+=-iyz-<;k}yb?>}w03T3 zbd=o7X%%+<+s%5K;+R@MNrSENrmge(WX$qCm^Yl|`BQ?*NxZyy4Mec5X;e{i z-K6&V-bl65`qk3Eo?nk^v{_k;rflEkDWxV?Yq~jHowrkI*zA}7<7w`w>4y&FX0oAL zq~VDI$&q4&*oMUMHGx=%!!!BVl^-t zAdo)Xs|Rams7s(kSEti!c}wzgEq6$*fLR!&7TajGd$Y&eJg7bD$QPa$bG53ul>F^+pquC1{m@3~4W8@AF(`w|9T{#T^QG z`gfmb{X04YOLYR~t|PyBpxEX@O5St3kM|HzseJ%{-fKWR zmd`EHVvTrzrKqHicTeY$7E0$I>Q$6NeDo$l-TuBnIGZU^(J=J=<(j_ZGbf=OsOole z`l>&nJF=7@Vs7t}e!5&zMx1>BbPS+Xx0_6?Q+Mf#65QMEW4%G)z%!Twx4XTi3*JVG z?8>Zcu)S^TVmQs{mGEv5&fk1)lU%>$2q_)1Im{@3gCeqA0?+>GBME(UM@&UdXUGFM z3IZ`U#>_D#TO@`DHZvEsnw95Qk;KpdT4AIhaPIrOTl^XD+pa?PTIO(RNC%T` z*3G$p?0ghRA%A*N`QwYvf^xTitePq~#>`~0TCHeLt6%bkVXV#bdh_j-*L@HgcpA)B zVe)$%WYQ|j-(9;mU#V`KZmqs-oNEzTw7-}-(cw_1{8PJx3l}wV-KPNKdT5;P7o|kn z2xXkyw5U|?LcjF-#Y{j!!NNgp-`LDsqcezLj?Y@W2b zC715Go+1=))BV*IQT=v~&c+-`r`F+OFr>Cp=bpJ0m?I_Z!a>@s6y0juph(}I0gn2- zMt;YzEqHT1R9qz|eR_*@7Lnvh-_A?Qd*-b4*i%OIxGy0BaSCj{f9Z`-tJnAScwH}l zjCHW?ryFkfHcr+Cy!A7KyE|@1vAr*-T+xBPn!YA4kV4%0-lH$5u|J?(l1iC5s=9`( z>o(CV4+Y=$%yHmBnZaUS0D)@l)BB0i zjV|_~C@N~wPQ7xc;0O@k2=;4owc&1{6#w=Fkml1LOq#ySbG;~8BU+G22xG?X3J}xYSh<%g*dB# z#(QjL;*k`QUs`ls=FTqVaj|)Us(+?z7veew;5_J4=3E7=yi$M}Xn3)-&HhT;(&~It z7ON863T;zSITdHw)^HclWeL3^zO5%KbM4aak8(Rjn!jE@ ztfpKQ?nd#TS0J1p`=*Az(2)tK^M&EH$2q(ENx6K5{+sWZJ#)3_YJQ=&*rN(D&``8T{%YDrLn1>E{yi@Hj~GWonnQxK-Bl%^m>qgepI8gP6AMu zH}OUlCZjMIR?j@`|QKCI1n#b(opB6H z^Hn8!d6hE!5wa1eS1dP+E=)FHl@7~_d)Yjb4!C)k>9PXS7u$H5Cz{@}y zecmrGsI16}m%7ePc~UulyK>5Gg_~1@vM^p;09K9{(qibMn!+w&2fJPIxJqHc|KS030aMW-&%W1F5Jw%CaYee`ne@}te^6?IK zC+caSrH?o&tYe2*zJCn-*xfE~)*xUU?W?Kf<@qbO_{UO;b*?RCP_GJ0FpaDph_SrN ztX|9o5S8D$@?mU>VgeWG>X>5XfA)fwR9EY!btPxeC_A{9wjSr3b}Bjd$hyzY6E@Hp zYp$PWyz7V;u!seF_NmHRe{|Otrze88n{$UR zmFsfzo;-Sa4MZ*EJ#=I^{eJVhvO}>wE?(`swLB1Lpnf&2!UIwi<)EE;%4iuV0+?b%vBf9dw5e{_Bd6yNo=I~GElJG+E3Ls3~MD>-=UuezOSQ%I-wuiX_% zgVk$#@c?Kp=2fh>)4_UnS9Y;kBoF#-GM7yWR-R8`TE3R-sa#)jSyiG|`te#Y)UkR8 z35(hx&?xo&!(&fwcYG(W11lgAJ52733(lD{YPfxW%jxF`e_6X}s9ct}$HFakpKYQS zZ+}?F)$HP2h0EqXkIyE=RVM)M-4>TuXMq*N88l~*_4(U5FQv{}2A68PZw8RPczY$w z+Nj^MSpb;%+c_}|)0Xe&iSC0t$TV9;Yf|=8%uQ|^Ft@{+1+9a*_&^L#2Y}bOA6dTo zoYBb{Z!lo&f2(s|szqH;*GasDrp9^8n@+HmcppGEb1l;8^UUk0RnhS6Y_iC9<@%#7 z)O?6Hmj~)FrP=cE1Oh=~7;L=Rhdq^~Y3NYF>*7=0v^@$BwW{T4;_17o9WJ>oL#V~C zy2h0!5W4&MaitWRU!2YDobAQkeZDgpb2A6>NS$T3e|lczS7ky<$&n!e-NBRHTZi{* zwPR+76YUn!cHt@_4MA5xN zNN;=9ofQpyzf}1g7D3j(td}wsKHE|^nuPBpQ=}fxWq8JrbrL2CvL0@LMpUW4+1Z~)LNv~%Vr@lQoGY>kaBYX2Q z;3B8*vP@3YdIDuPWa4Ml(%EdZnc(W#J`M_|p)diV&}8|;xmwNTXzAZU&-p;=fZ3Pl z)ttKAOjkuRfG>`R_XMnpR~u9nfX-S6=n^iMe@tv%9#7-2?@l*}S^*txK?NZ`gRBL` z)GMmPgpYP(8O>LVj)%tLC`0)DBlREns3avMe7E^Ik^BJ;s}X@OpU4I7IC0B45-&rl z8N&K}*`eqTYzk8js^B?5)11EG9-**stt~fALB`~W&_$HA4no8VBR#ng)Xj{7(0&Ep ze=ilQ#9iQ8E#Zop5NLYk-bA5opkDa)MOm^I*;EwwuD$LSj5s6?M2US#VcmikAX(+Q ztS2obl#twd^(^J7zvqocdTD`@+l|Dq0qdcjPiexJ7kp3%4u-fG_JjeM-u@)N*zG1i zi85ntZ{FENd28Y$`=OX&Lh;q0MA`28f2Y01`lU4r&bHf*W!ElBiYEVF>yerS6N)&l zP{#O{>WtMoIGy_e}U2Y z3pU!Nk)KLnXJ-}=-?tZ!^=GQe)ffy@3-@M(i?V5_(_L=|@da~Eb zyq?x5KQ#>`I*j_E3XBSra=9@cNWZna!~=Z=zyCDNXWFh!RQYsEFv$3Dt#-$1weqH` z!!C>31<x==CG)K9-fCqsfDpd+37JHVqwKCw;d*zTD6e(j z3vrDmm~51Nx!N3UyWusNuYj)PSr@1S1}r*ShI(`t>J+qm+HzU0bN-3;bn^n`eOHv` z!PdAK$U;1h6A?uG2h_BxfA;{Y5)etSpqtaGyFQ(Z^ZTo%{m6b#yxDonL`S>2?auG_ zwYo+qz={Fh`n-37fznBg??ks!NW?O;dmm2`7Y{h*cz@JeWR5PGe$|#h0~Al^D6rtr zeu4s+Ex)paKj>+CcBmc!DJ5DZgg`h-mT+WeEcELobj)H*n!{@1DWs;Bx~*iSr{)M?DV#lhpCfcn=F^NdooiWA?SjJwbbwV`3WI&V4CRt zD#xm`byvGRXyO~he*uuf-juQEcyw!6pF?bilLfokPitD0)^`s1EunjQX8_u<71{08 zTf)v+opp|giPF?&_v*yM1-Sw6+@&CaOTmWip8${vXTkl^idSX3GF5P(1q`5u68MnI zI$yf{6B3KdECvs?cTqT>_u~&F!j}2Wk-fJfUkmDRQC1)Ie?EV{o17{e5({J=1=iJl zn82xZy(5pIA|z{?>rSR{F9f%wi$I-1uqd4@V}m*xm54g}Wh^~zda(zgm3w?yJak{4 zm&MtdPqx+5v=+(E#?VLRp?cQE)hqYNc|EOlqn|PIxs$kmRP3yp19foHvn!+$58I<; zUY;Lk28e2>f5=C{A@w6tLx*~+P)g@Y@^(Wv9cb|({v4zNZOw!&ELM9&NFQod()!3I zO9kY($L$>j0d^pQ!vA^~RK^O2?7H6`6nSua-r9<7QOXUmO>LiMHFGZ9CiCR2=OQ7Y zgL(XT4sgiRBaZ?J95x8)w}kSO(X zp{}~v*L-z7a(OR^IsX(Z=6+nYlkk$iJqfujS)9PCw%7Y-b)a^ioqzGpH&A*)wov-q z&%tDOHQd-M7MmF-i^Fq!+-U>VzbdobHIj?s44v-IVEimW$@iRSU2W{4K3G~HGv>eo z?@P!Oe@<<>UcKte_9=2+Qw+MvOp9A=B`9nI53!*SYQ1t$ge!l*S?w=3z5;cJ{4kAhnf-wP8kS??Bf&B`xHgfZf=>fQugJ7;`nx0`qEt}D*Mhw_{}336*vUs=RD zdk~W29LSR$p@?RLx0TfeWTW*mLdXbNpN(9#2Z3UdH0%moe$}YWuRO|7wdU_ul8lsxUwzl`g>Tm%%Mf0 zp2bm`xN<9Has8A6A|5&_KS9l+fxPk>e=TpbmENj+@7pgm4K2a6B6{)CZ}tpQTQgaI zmD6{p3+Sac#3NEcwp4bjXTUlv(x}rm`zEiILV_sl2MliM6r_FE$#NaiihM|FSv`OV z;Rw=z4XRG&N7U)SFALb(UOtpc)L&9X`+Gg{%?bemt*!j3^$KTiI7G)c#>{6jDqwPyT7L97 zmoMY2N;~Ye-aKlxVj$lhwH>c5e`kLejA_?_M3_kqf`^^R>{=>JzxYccqae}U04&o z-aPHY1vDFr_j)T{*0bn1>1?U$pf9hMdpoUqv4QHjw#{uuWA1l}whpLWsuuj9da;td z8peeip6(?m)e0|be>Uh8A1^OK8QIi|r%%p$RmY&8{?Jwqf`GI9P;XB`+wzZH7Kf*| z#TyGDgoqGIIZu4(JJ?it>KSm3qFl|c$@TOQp8%b4m;3x>y744C8pB~gP1yoW>t_?C z3hCs=O^!hO)59PRkci=oHz)yc{|O6*9t55Ry_``~%6#f&e?PlYM{0Gm8fXN21%H-U zjj#wD#fZ=d5dRjSjSXKJik|H*e8sr_Aa_!qTXXCVx0QR$Em1{)a^PFk1IQ`&Fm@N! zO>&=aYruN=^4UOBNmu35t@tQBvLGd^NE0v4qJyqI^1@0EA|_C^)f46Z!ad;JTud7dj)SJoOk{@%i z3ZSu_>_V1xs1p?E-GfUX`r_5?dh6bD6o9WC_|p{r{iKVhYTag zBF*-cf<@l}ZjJf0w&EsfV;lA4S#2kq{Q|rRKwq32E8N3*vR#*h;Xw zPxtmF_g5*nh#s`^;hVvP?}PXcMtEW?+kGk5pG7fEFfm}g;=}XuIkfTHd4n6L%sYJZ zVZGef_E8VyDx0@^a;|qXc|jQ&yz-3){M+H?ZqJ};!`imC1uDQ6&f_G1rQ~mJFImmO zfA#Bf4?h~Hx%{YKLc0N32E*wevUkV+A1tLD$Nq2s$b0fHv?tH!AJ&tfsKI93{C5Z* zO|7?8ZBMZBVBJ6^3jPv>u$Tbi4b~F8oqHoyNM9bWZU#94p3G72Iueqwm}=Jph(#xJ zd@7#DYm3ScYstu?!dr*&Wqv)V^`#IXe-2N+N+_m8i`gqtzEiA=+mhdh!;3%rGP0D6 ztR#fFWDEugUUS%b%$rSZgkd;)3}PtUL0Okc6uf*1UlqOa;kqv?$;d*2w=~GG@O}P; zD0iKJYj}wOiT4I9obNOUh_*9Z zOrYWfmFU7M?9KugZAioO9%Ro2D_T=1e41-snh1O=DqdCdzPkmR0fO(TMMf_GChJ2S6 zG8Yyi7U&(H$a)86NYCP1J!>>SF@zG)R3DH;u0iCKA|x~-CxUd^HBa25!C>M+ib!+9 z91lm$zNUq0y5^~-J*HXx5+^L$FwVUCYwt7ily?~Z|8XNr20yGh~ zz3-=#*?WOh9oKpRHaCyGaAo11b4E1cMh6T_jH87X#O}l>TJWBL*5Lz0sUK1f3KMv- zn>+Q6het^{5(ET=TrvR8e-?H#ZX>Y|o#$j^%0vq>U?uV$qGg`Y^}y}i>*gaJ?83dO z0U3r82Tf&~(-P7@CdcjJ!D4b0ZSd_2G8RZ*0O5o&B=(G?WD4DRnJs2)F6Q^;80`bu zx94J@zQ*XM(sm6Gda(!Zg%`aT+ZDUFG~#m!`gj(P0vHh2YN4^6e`s9K9qhf8@Qt_K zA&0uNm{=Li$st&$=h`Oyil;eHU=j_v7l@4z?fJbvGQMDqayLAePH}zoK}hQZBR~`# zD1@eQi{}QR^gzfhnrp*@PA#_Bdb}fvg&S1^#E>ZPQe=^7O%rP!#{bPZ>HJQu!9Pd?p9$)=T<9y~U`$>=cIOsW}8C)vi z-yKM=<{Hd}0oJb$ab6v<2MO7Fh3_i)!{*wjJ83){sY3?pBj)UMxE}#}e3>aDeP|JhWR+BSXT@<6=Cr4c@lvlRto45fnyn36xAxCP`_W zCIS{PM(Q)}Mo|_&a4HOGOU4hecNWGuv3I;oY!8?`wPGCa+uu1&<}>F;n~ z+{RcT_9Ptb$+zv33;rvyg7OBcc{wjqeNJ9PPzu4*n!F1W;CFYvvrq=3MuI$8h#5L5 zXCcF$J~=m$AOU!kT|^E&vJy&UDdV|Q2vihu4(JyVHRz-L>rxO$jCPIKD>A0P_Lsv@ z1onX3f1{nL2O+;sQdh6)QYzG1Gj?=)cMh~HaF!%FQ=P9g7m9G67NFb>`mIKUYPT$v z(8=1!2-ga($?QLD77rXSYyP0xi=>fgSglx z)lriqtq-QnMUOLCcEb~hm$It9JlKJR3n; z3pBH^VvOC4Lkt9NJwQVwYolPXdV<*(gZMixI{Yq=cIwg+|B`!|YC*_@3|ES_2=P_s#yF11o}&E+nNr>(#Z3rfow-J7vnZa<&w-aFj#*gkKH&iI$B ze_}o|qS#aSp2K9gydptVEVmyK|wF$MxAfQy=b_3U3bML>SlSQq3E%m|@(y z<5-1>f{pt#PDsBo0SB_y55*rY-%$zJ{vc*xG=|FRn`*P-WLACjQnZVN;*r`=S5ZaF>DKIubQG;p3PKv zS?IWjWnrw5_WEE>tJ;vxU{9~GJ2B@&!|v@(eHTxMi(Muzbm=&RO;_*o0~5zqgZO?dHQ8q59PZ3f!nyH;Xdz*_m^ z3+QD0pnTS%>G_Fc9V3^JP1O>eSyVxs?Cp}PkMhlY?y|E+hb`uNqM&#bb-3XD&UPA@ z+U+XY8K39wSl?_tV8DeP+1pTef7NWd26Q3vh3ezx9Ac4hNiz`GW4oA^u zDV5xYjOQBnQ$pJd_lYsxCy;3)>pGclqYe7KXEOgufAYI-mq6$FU=6#Om%sFM=1=pg zV%2KxoBmpHM4(%w3{)ymDIbzKpEcbFtO+~5*BxCuS1#=2^-8`3=5f-=eSSLdW=u0Kr|T zF#q_u>t#n%R?fv%$5=|%f10r%rJhf=2^wc-g2X*FK6S}@h$&@;3QsmRWImDg+D&tN zfLf9R<+sj=`1ki;F07grs8{hEPx$BjE;FX&IZWc8`(P^mxBmIpw?Yc%cm5HZrGEX{ zUq70G0Is_Q5dXIgQF7btCFl=Ab+=YY60Ps&Zcu9~NU&``JIKSge|_f#w>LrVr%2qx zE7F@kWEl*9qxju2sM3XAT6;%m;oNd$=L7!<=vo*4%A?`}vi|jrF|L2m!IgV5?j-n4 zP>&tg8O$j%4!{05@t?ly;(#qa5`^QLK;GjSRQR448>0=jh4jUZ&qvPrwMhzy`MZY& z;#26zxmuUHysH6nf3MkQw|6yfWy0p&ogNO}(D=F+9%MIrBo*S99#g7S*hz3@z(&25 zde>LB*3^$v-dVc1rVLKN@RY=ysb(ESAbW4egX7h!+*I_6`jR!mG?X=tO~K2t1)cWI zzqB(0v{#xy2Zn@Q>)^H`$J*A=9Hb%HN(fvhP^u^P$2wCre>bo~+i`1-00qF=`<&K+ z9{?ds>`xaKzwPq%aliWvGEd1q1#R-3yQEFNHhMeuea{FM@lRa_{H|#K?|fId4tOuyqbqK-Q`9k#k-3OZj}~5 zKrp#?SuORQe;G#zCzVar!TY>ST{_IkMuB{ zn*MA-n?mcPd~G;fYh&D*w&Whoj~%-MsOhn+xtXQONLLCRt3eH2H31BBz_i6<%u5}3 z+Ow%<75&?&0VA8<+$e|;pXA|=z#pPO5O@iNN5;Xae@FE-T)=UiCv7ygm4>#$7%Q+h zB=-Jm;}x_!S?9?7BDUi981duA^T)OM$Bu9=^frZ6W*mmyQI&NQo+n`4#rI1d6p?(vMS{nIVwoO1II$Af zjZqYU!QDlmE$}d#~jvDSNYubwHSHFA~_@6 ztAb-#j_<%*V64ee*p*>^_n``AFIY;RUVy*=f2)VElQ)+ngMx)%cCGL96)Fj95Pr8u z_`ry$wuwvjr$A0Rm)6Xt?%T@Uqylp)YDvotNP+zi0$@d0gP-1v32QF)iA#hsxI4oU zR6zSlA7A@3u4`n?BYU?9efl%b$=>HXr-&Vek4)_6=$HR<-@-OkT`EBu;=62mY^hFd#!tLM}kWSAP}0~_P=IUCpoKT&hZ;X*I6^vI1qU1*98ZgkUa;X$Cxw6|r+ z7P2hXy2SaSZ{F0-1@a&55RrDEEqiPSfA~i5|Ho(lM<0gtCnIQxiIDOByB>~h_UoU6 za>UHA&xW7#V~=LvHD;U(qfIBVlyPivatApf_i+zEw@GLO?S_FRrg-hWU2!(Q6vRmIz?mRY6&`wGaFVM^>UHP}hjIYhq z4%kiXWv=2f3w^-eDdPnSWad3YQOUjqY|Pp-sGynMj{5@mci(wI?B+NxU`xNYcP^8) zjo1gWzao1&`0Qi*brQzezqKKMe{3$Fqere)n4+?T>GY z9>IlEu}(UZ^a_{%ke=`ShA481Uvvo;&TdLbb zl^zU~s2C6%)qU$71mRV-e(2m-GjI4&tqtU)UsoFuIsxZQlr;_G4eu7mv~M=b1@gXDD9GU{}y@ znA%JAeI& zpV;5u)|~HJLgIAVe*maR-y_=z;x#0UTs6Ic7xuCTz({ z_d`POO*p~Pk60h?6@hLdeqKz&uzHE^*pI3VWB}roU;8$$e+58i{>)Vp|Fu7eB>ZDv zer@MBe*MRP{nRm9Ka4sh53L}r~kbxXzvi(_o3Ay%-yIA<7t0@%?)PYod6=QQCF zCAR~Qns5QngLw#65-7!Eoz@)qu^<=+5)erSXgj;Wn~d=$($9#oe{H}-5Yx}!Y~gnt z{lvx)F9$sm@$LWe{n2LMz1fewv}?dslK7Em;=OcCMZSW(SdxbmqbB33Ds++MxAZ)Na| zaBFSc4$1PIK}{$k<)Za*OYkaY%UI`kwFn_p#EG%*d1D;8I}$g}$a>q;>UX^z#{>Lv z(*J+?f0)0|^?jcr-Z{>ZF;+f4_qaCy?1x4lh1g9Jb3xk(KV$Ga#{L#38~s$WZ~EI_ zkP*hV8|Uj78z-?f?5{wsMD_`p0=!T5@^a>~FC_p7TMTS>=L&2lWW<-#no6vANdVEi zC)rSzfs7E^L~UPk?`|b-s;hmnz;be(UG z`p}dLpHNy1HtbIZsAgjGgXD?WZzVw#!AB{Pqj({%R>@h5>nG=yN5zE?!s^RKzRk$K z#NCQsC-S_792-x?rzd0BP9u}I9?n-kpv!s9BXXQRgwU| zOO!+ok~~=BZO&~@2(xZBJkA%ae+vG2EZ5rL``RoRnh*@{7FPqx+o!(jO9pH*_Q04T z;B)sOe~tP^6DbN8^LHwVOpBs+#KsvzfeG#HRLL@ z?-)jW<;RErhc)^Nf9yLBQO>7j40JHk3F68k6FA+ya#yA!w258>)$)m8+`g9-kH*oOX@mL(avPz8~TYktQ^Q0(O?T$yF`R&i}>u-F?UJWW{wX8NXN$_n)|u#`W+2 z!5CFB7Ej!`-x~J?FAki$-~AbE#n(rKZ;W&LuO0d6H`p5F(L$&xe?f2Ue`w4qiKBQn z4Zn|xaXm6#Eg;aWtX)dnjrVfPKoRRluG>O_

ttg`q|l0XCh>ImC-X*C{&7Ms7gN z8U%GUN#-QPxV5bicxNqZKc+7nwd!aKZgQ7C%?Gi6=OAW~O^5ce+b)TpA|hY#J10Jo zb`tXhYejo-dW2|5e>{5%cLYh6uw!?s5Zu||cFQyM6je);%QU5LRGUCFe|j2R{XK_yc~0|mstQ$x zG39B#h4Pf0Zh)E=^=_>l)&ICoBIah*A;0BW-3O|b$2>HGj!}G1zE!-j?Hbe?Sw7Fv zPj85yiX27~gb5+bgm@(l^|tj!i6M~)2|78?8d*dE-SxQM0Iq2aU9jt!hlBW?SkRF8 zlMExzE{NZKf1;WIV-Pe%)qt~C0) z)AOe1mykgW!35ZiPJe-4)*5_qok)+z@{4@AG_PNPmqT>S3k+PW50hPal{MC@yI?9al~-UzYpP$AM=l~ z>3_Cqe`3qu*geJpBdD%Mp8KaQR5i`pvw4>ge_Cgakgr-jj;)y!l$s%fk&MI{6cU zV;1lV8s&1>qs1got`YyP=04*-_rM{&k9}^hLc5>rb9I-*KY#Y?WX&tFw!t46aXuaT zf3N>zbADrnfB73YPU30by!JRo2CSpxyT5DfZ|oj%HN;GQ@=)CNc2v+_TU2KoJT~0O zFM-AFuDNuMb?pxDL!OG^9g>7KZ0K{tHzimAJvB?#u>t|4ATb?U@wqxuL8O4m67kh;Pbg)U2aaB6KC>!s>qnSdWe5qt5GtltDvDn03?0aQt5mymM=7@y%V!+H zo$NNn<)qV1i@p?H5m>b0$cbexa-Rqbt%d;pu>w^7U+q0~Qx^g1&uOqj$GWD_;a!ca zvJiBUdWSbd@{~;~Z=f^vQC;cIf9HLWb<$c4YF?My+C+(&z=tTvXKJWcv}&=QI)!!u zZl>P!JTzJPa2p&GG{p(=pZ$N~-}s{)B>s6D68k^y?}5h~abn+Ga!S@oV(ZBoO4eDj zmnHU%#J1r_{_cIp_!jz-z@Gkw{~*|k3_cFLgs)9KZfdD(5#!`Hi6zM{f4zfZdA7@2 z7ILXTRfMmt%Vb^&5tkWTilOYnXI@z%n_ECU!_TBmI-hQa7}%MVHQ=3a+2jl;#3>?~ zzHK@a1U8X#F4$z7umoqO07l2deX5rmtn3q1r2$bSDp z>8^=&fgx&o3RvuS-~Ij=f6M)k^LxxU{l<@hcOmg(5=Z*R*?;}$(LX-KVpC7gdVF$!A8qF!NnJZ*a#kY`9%Zd%JNxY=f z(xo{W10ek*7QNAW8;QF6nB?p5 zT9q1&^{?+T_qg|SS%QCQ`6eK~%3iLvT!<oWHogpLY839c)m8e0{aQ`yuI19`vuc>c8NMM!#jmvW%a@zg#B+b5sE6 zM|#>7P!6YXw~bUfLsni^xY~YCqt{D04Vv1+fI1*2VzwieO3Y+ocBW%a{ zfNR=5Vt?=re|~dLTzGjK1ue)%^{^^S6FUes@`-vHXx@%0pw2AO#+E0ZvseR}^|6MW zG*AHiCA!1|^Ca=WLsjc*2;~Q&+v-D&Jg~+^Zz|^w9a)!-WL?5~f7f{6x=36b`5Vlw zFWx5)F@AorK0onZGUjlOgc!`9xG%v0H%CTtCB$YMf1Za(fg51&ui21CZ#L*F&!RDo z1j#|%+VW9`6iriGDfEj6#X)qEsR?0XiTq+abz8rG>dks)@zQ)UIm#YzKa5&sH{^}c zDX5fn5RU>jfhOxJ`UQ>t-Di?`@)(02b9|$3`V(g)&%uL%dq5J=y7|C(aMj-J+lcRx zyMdPef60F;K5#~;HIWR7Mcjfd)1bPmWozl6j7j$FKbq_J>!5l!YKXsv4OdA2JCEOdDe=3J4wY!AeIrPEDad@B@S8ll@{E&=; zfM_vQ`P3W7VQ&%pOMEx1Tgevo0^{-9zoh-hc*HoRk(&scVxg6o_FNSnfjt*}+x@`gWL!llKeAVH^1yQ2lHt*#fy25{SKR7>n;Ao$D?zjmT}jR(~J3Z`G; ze@6$rOYpZA@(G9ujy^OQPZn7R$9V$35}*J0DkS&(vD6Pr-nEjjHb{Kzu61P=Mf$)R zn!tRJuaJ3Fv$%*t)uY75+;P7-`avLATH4#LH{>a>EN@T(o&{cmI;(|UI}28dl%Eou zs1W|(-`b1D(Xn{cc+nS`-l}yfwJMbgNe=Rh#AH_2^@CG9l%Ti{P5_n^eLGqTl*)DfBv(M zs-x}snb%k^#u(U$>;AJwpDP6F&b-?I(=Dy}VCUOitB8C)<#~zGkk>o-H-qon<^GY) zWu-rOg0;_4i#?^MX3Pq&Y9?!IYAc5*LW~)>Lu%3@hhdZ)LH82e zp2zI4PLenW?DcprU%V73|G#7Pe`juWz}<~@iJa5gm~$oKrW?7}O8#?{>cX2je zmGcm>He757ARG#2d^rVEqIY7{%L)O@vd!%0r?UTsLa)Qq-BUGg0Ee4Fi1a zcOH4D!^p@y`niv>J%9Z+f|DiloV1%o?EPH6=i)Z!W+V7OJu}Ae>VF(y#r?? zZ;)ehytWOi&Nn4<(7N@Tr4vQ@){Lo$rHA(n=hqw^>8lNC8 zL^oIuMx5igFDH8lf1F2G_@sgyj5*dXUjLgnfnRDxXwL~(piN2t6YQfaLrlO>_T5eq z0!EcwSLdUqkw8byyV26&e?e)|Eur8kX(qp{3_5!oef<*q#bMji$EWYC{LVG?o<7e`0??ESv~yS4G}_ z#Qy`|KH8rV6En_jV!yp{z1aR)?*>Kzu0;KfW&B|9h!6jbv;E%J@YDDE9Fl9bXUSN9 zK%}kmmUYgukAO@*;;!&fTEvAzC?xYS#v*bQH^*_iI{Zi~&o>WIfr|AuFX2yK!E5;v z%6d5YoP+qUw})n0e~>_ByaNo)O*GURK?9}PXhg4gpx0&4+~6PGd$&5sz^H~|M(rW~ zrEooBo8Nqn^rj#!Uhyd40xos`Q4yE_-N(RQA>TV<^#5vaV#hk{Z@w7CpLsXtA;eSR0wTr|>)k{7%IbtnlvWFMyttjCm3R{)5**CE@r?0C@%V-5~aHPuAhB3;ZC~Vc17} ze>&R7?U=J2V;R<8cxBwb_5qgR1R_kN_e7;b9k(^PT3 zycy;Yw-2F=pV|xL2o!{N<$ZOd5)x zst4R0yy#hgTEC;@C6b@8XI-=awY(*I#+r~VI&TARClh_pPiDi}-Afe@~~P z3>ieK1V7c4DAoW0{_3j8jRCA>8FKCp^)j>uvRt{+fguWyX#obnbj~B**!r?`gTD?t zJo;NLH9!2a3G5eIgJ=Vxy zKY{Ekf${u<-v^dwTq}R)FLHZ7`HnyJnBTgtuOEBn4xS643h9Pg2w?Pq8T4Gi={6CP z?`t1y4k?sCR4xUP0_HvPmU^YDyel)Y&y~Q~-VvWd9Rn2FLuQF}w~#JXrI*#q0JR6= z*T@}al-{#_BXFXl6m*X$Ni2^ z__!FqKXn!u!?BCyADsx>_yT<8CiD#1!(5z#%e5>71&S8CfW4YwnZwJQ+-2c>%bNzAGypXmcW9pD@|PDl>=%-e+?DC$yCJEfz%`U zem~3QY+Qe3f|vN_nTd@J|H`qE=lH=D{j9-=F~SG_=I%(my(ofRA#hL2sId@NRe`lR zaNVcw$B6@7nTuG%Zxs{Z+LKg`Z5r$QEf(cSbwkq~u!$AusVG$+Ta+Skn(pP#(ec5* zsR!*$@tDscl-6T>e@mbNCl#)UCv(ym-=fF5R*CF!5q~5x;-4`z?lq4C=jy;I+CrGB zptS{iVvl-mTaC3^rjuIF7NLf%?O2b>DWGPtNImn2BS8_)Puy#4lYg#r-JrG>pe+kN zAzg}vYr*b@sK3H?sYP_WE5$xrKw;HXFB+(6qzpbgmdxune;#s2@OShhQIP+aUHjcD zf9q7fw&BFVfB56i{}mtM?}%|ot8YBS_t;7wWCwmdVnI!v``|w8t-YJU$1FAeIYdTq z;hrTeK`|$rgHv`Z=57Owj{%J1Ll?o)8#bNwX&A&vw(y&bps_#|fCwao_4*(Q$fFbf zg5ZyR`pf_Ne;p(L5cB%=d6p$|@0_k0v?U%I6ex7r99xbQbre@`3o1~M5gSwNlt1Yc z#B7`-X1gHOUglN0BW#`mqqW@#ZQEQrCJ@tw8q`@LRI6{@&yf3Y1 z_jX*Bkoo}Y$SE!|X`$84%f2*Vg&Xa|iRu^HU;g3Oa2>NQf*&qmkBd6eMA&+~tqj)0EV+oME!5s861czlEwwUyA-RPR z>E&|If8JygqkbOErqH}B??5ag1w7r_8pD*R;~UpmBU4xieUQ37RKaudZ~lbz*ALbf zaan>n{j0|BJNCjaHsbGoYNouQy#3x))m7+jkiuXm@+!i7Ak`481E_?l!G>XZzH}A`e^4D9=}VqlUP$*<##KkK%&eV~)SYuX zvcKJQ#+$^w(5}(X`2X1}{O;3#Vu*;%f7cn{ivml%iyi!VUEz{j%9pHXqP9z(rLiPe z^3DzS;^5xoK<^}1J}(1F*7{=>K^{$8AYuvkg5$z} zDn>kzLt(#%(y8pnK8$}L$=3HoCBieoOdhZe=L-^qpKXRshan$qk-Xk1EO}Dw*m=*um9ek zlU`6>89-V_SX)tHFJM(6;rt&$R`QWXaspVCvtJ5aJR!k_f7xzV#QB?cVvi=^9Ztqd z!19j{6_`htUvWTy0^)LVQP21aMY*%L+#E94oihPOUW#1r#Y6B}?_#JjRLYJ!e*iPU zsP`;01qEwcRt>^VGTiQAz)d!3NV15V#CE$`C7#m_YHKfm(7961za^9aS3s!0GB&(G zOOE7iJh_(ksW_x^)NDaXwOi8^gcr`2+&MI7{9B3;p2|2j$sPpXv zET}KeOE`=`)T=N&c&u#>vLt$^&N!js*BOrE{bqchkoN|8Z;tP2^1deT4G!PT-xv-`XP}BY5fZ{1(o4wCnJz_vT!O@rixN_E zAg94ff5}i$!#6k|<>ow%Iw_q0K~f>JVEbDU1xp9+L!7fku zs9?7SOA)V}k)lHTDV&sWiX>1SN`*5PUsZ3QF2MEh=%)(ph3j!F|Dvc0X-fRQMnVIm z&pup-_T$L$hWy@>0~zqd6$uv#>8~eer#Oy0m{g%&9eFT(U(KNCf%{o^_ zVrk?xZRb0a11U~1V670`885Vg4E`i|mDs_x8H2iTEyei)?g3Q($aMiF=N6bnaNKv% z1vp!2{2uN_GEGi2R(~k@n__oG@4zyUhT8PHR4-NcxQ=nFC^vX_- z$!`wGZ$V$1d{<#^U{k>kP0sRG;&;j*bQ@d;ZNkK8Pgq>rrK8e|A-9ccFnk|^dEe{d ze!KB|;`e)C%78R-mBvZD9cKA z*>WKJMSr62@LgP&>qUirb!t6+|K!g=m(KAxuNRXQ!W)XdgRE+fzT=H)y+d1b_|2N! zH`*KZ#I^vEkPe(Gndb`~FM#h0V}++}7r&!wK-UX`EhvLDULz53Zqa?)ltbM-2vhl) zF5p=-axS*jiGQ+^crmo)Wc(Lx9`kq&9;QOdK=+Q-x5-j?lhBjMv*Ug@wWJocL$#~sszjjUp1d!VCs;9NW(ejkmBT%`}M4d>%ETe|fHs3F@J$04pM z;hJRpNVrc_C=@&NYcgQ*#TYCK8}m*U6G1TWAI`CXhsC)?Z$@&_^NJ_Mz$c1vSf22# z($Je>l$+@H?=i2%{rR0V`yQ8tq33vAjH05Ka(~zr4$K%}6f&lGUJ1`$M96Wnf*u-J zg|!;TczGNTKnvsd2=7w@8-wFUQ3!@MQ*i%Hj&|h}|F8FC{E|K;b`aMP^!WQZ#=qmz zC-VF%t|R>G*df2QAoC83G?3qKiM;M2xc_eIgg6 zy}}3jbBq4vQR9Q(!8}2{&pTzra6J1S zABJA=#^Vpz{=fPk>q;U$!x*2Lc)vwIYJX(j3P=dR)=A3Gyacm{KlKuA;9hv&k2VJP zZThe;CeZK~(x#hEzwj6@u;T)P#ar#5!9L(OGju^G4lk_Yx{UFJafR_E!A|1emZQgc z56K7ggPcD>+>hxF_C{R*g005}_0eiA>V6^Be&BLEAEya;K0(9tp|%>&r{VdqVt&UvXUd9w+Uaj>iqOFLn^*_#Maf?{TKp!~xI6;WdkO5_Qf1 zvW{z0%wtTGwHIw@axd7+C&)5docF+g#D4Xo{lW_QAIF(s=Me25*K$otGqmN#oq=$e zgOn3&1?EBrRPJuBdG1QSGu9&k(;N5BQ6 z2pTAm|M>iv!xCvPoIi9gFr?%DGTbZHLyXn`v`*lD5COzZ%`66evlwkC4}Vag06Zq< zH<`PzNjlbgtfMKh*9q~Z$U2H+1F+~k_Q9lImt;MW9~j+a4Hd|m7=6EUKw-IrHJG*@ zyQD!48kd2nkNh4urT_UmVP;lo=?ymVEfjp-BSelieu@U3B_TcrSy!Xt111pgCA{!& zA}e(*eAmqzS)0lCRc$7f-hTytK!tG$8TWr3V`vY*{qNDinK54O<_mL zdwyKgS`JlK|I+pn<}~p+68x6LD3}96Wh-P3kbC*whfKz=Lj1*l*9XL&;65-GcEq;B zr{H8_uZdsr@9vxUd$9GSeY2f_XB+S@X%}#$VGjJm{SaH^2$-utSbxaylR)N%>xCCh zfJVUBpB78TjRPO}&pGhvi0#${9_@b`?+x<{V+wW?W3}be5^IX6!(Z0JZ(rdaVJjJo zRrs2C&c7eS^<=!qn)Q2bg0=~3pPYlb-x0{C*ri4)k;&MK|Lb{fqK{#F4A_Rz&N{m8=%xx{rT3mwyu9 z@}JIiac=u4Af4=d=L>0@|L*gUv4(Z6BR1aq>HF{eOMd$Oh<|?S;EVo@6Znhnfbj*J zf<9pW`uzlJMMM0Nzg^@1+Z=}7`LE{+nI8jO*$eT*JuyK+U&}>e5|2J4=CE`a-^;{* z{P*(*&w@7Hjq3}>E9NXRq!Q)}d?*9=yb7f{`#%N&qkh2!g0H3aJ>2N94CGZ?gPNt5>3V<{9+Q40Pg?-<_}as3_=w|c ze|-J@JouiUlGjLhk@Lsb(f9oEb^JYld=0;h#0_}#?SHI`g<;H3V5~e_Y$e-{z+5eI z9{e+)!{NV<=MCgMbj)}jo{Qj8!|^=$X*dr`BILX+^l5-=`Z*8UeuP(<_;3Iok@ot& zMnhqF{l13p0}RmjHRch{2R9X7W1isr?`!x!IRE<^;}_?DU!$){=!IyO9QbB@F4RYm z``G*BK7a7r<9X<}j`-K(d5`b&%j0d>B(qHG2`w#w{%Z~T*=UhCu z3oOjv=FFM+@gjrsi7yG66LM{2Zd}q&Acq>yU-1GtHzPiM@O!QxjMx5iF8b7!{+yeS z_sssS9R+#3_VCj#8nI6t_1iA_*zWK}$2P%|L9YGhT#Q#&`g3kU?m75#F80f0MI!r7 z__JimIQ|(FwWNinQ)6nFQzpS{AjtWq zCQt5xWZ#>;Q3L7&e$EyE2K&Y#kt)U?SX938$?-jj9}CFK{Jz&v$_Nm^pZ6B$XMZA~ z?Z58hGH`wTF=Kkpd?p`AP7M>-HnX|>|gjK6V8iaKT< zae6!*Vg-oLlXxm&E+*ex5F8Nl0HB}-`j-SE(N;Sbl&a7K-*r6juDFv9-y`o`yjLvz z3sPfz?*F_8U!?K>=y$>N^!MKd34bkqw8}IA&bARJ~;reLzabDwh#&Lz`AAj!)DJqhuBKM1a zA@}!f$A9=;EXDtH-S0Sw#&*!Sv?IsK`D9*=<8d6vXgAD@>_6NeVG=?5c#-j- z^6&Uu=r6PbX^-!D<9#7t1C;FddmM-SSMwb&I1iciCBa#l9kE|fA|T_5q=Z49M(*c( zpGXFhe!_htrTQk%jemZ#$bF9U4d=ri05^d4Aa)Pu;r=l`aeXq*zw-jm|JOLd?>S`N zqaFX`zbFPgtN&hr^!rCl_n1!_RbGh$M(XY`R9iCCv4}E+LKB`fVvrf>0+$R8n3$A& zbvJflN4kvU`A9xXBSm*;Hx&%Lq)Gku4b5dWw z!QNramq9_5%uDHV_SbQO%_8lKlwZd=e<{N}AFtDMxSjr||M~y= z--#;B|8m}^r~mWk)upJ1|7Pri)As+c_Qmyo9Y1cLLw)*hSMvS(`w6Hfv;6trRy6&; zwvY6`{iDhZk)E0T>dYMJwSsZ*%^NBvwo;^49R}?HCx1eL1^l=9*1u(5FYv0$4pcCg zROG>P-A}vGlM5w*;!b5{fp*yLxoa;^SJ)Y^SmVI%plQ@K z(|!%jk4w0qmKuOAv7ll~|Uckax+E zz0W^uPJh*F^X6Q9h^85aKHD}sYobI?oDtJEb(L7daH@K?Y2;_TJGTdj)b`?80^wzm zZVObhK9{O0o{pDy4T|sgar&M>DP&(j7wd6{m;#IJLZY|twd*g-{_>(?KU^yXXk2CK zsBERftKJ3L`cfOu-3*c-s&h}0d6=%8xV09}i+|C|?aW6VJ@s6#s2R1ym02Gm@x?P& z*Rk&|`g@kHC9mO@5y-$$PA?%`@S0fGWJSBx%6WH`PrT(gvZ*O%$*OvWR61=ch1|FW)!lLj0ja8W~1pRxd{)e>{CDW z!GEhdM-MWPZ{94|^xqw-SNWED9bRWf*$FSY@S~DxgwGTH^k!|+(Rp#4Fy3`R%`Wvb zIGVG1jlzS+4}U-JB+;dM2|yl^m5$GZmrtFQ{>}b!5Wq_1oM|{EHj< zi?_dShbX_Zu289=3^`fqHa#z*={8Vn_J3?LnVvaY{(PUjL{xlNP*T4{{`}LeTu{Z9 zdE%{=wI=#|6MzSvZ@KFbbew$EJAwNC^HnS^!@7tk#p?jg&1BMexe=;q>LA<6*r2dozD;2Xd zB&V(?I3mT~<;UCr{!eZ1~+L#I}`34Z)zR;=zrS0&L_ ziRF7>MIEF*soUhf3*Obf-dx}D^ z8UJNI9p3P+0Ea7~UorjOukBjfKEvhWHGyvPL4Itv-YcF`!IL_y>UQ;7Yz~3Ex*K&5 zreF4!F6%|(?J|0|=MH`7XmK7G$ob!I(I zTHzeCX^R5Sm(f3DRD`ENQ-<5a2^G}b={23sfP_hE-CN#>i0)Hzn9ZD>x>XGX8 zb-&AFkO4Tea-mNR{SZ{UkbhTZY{(hHdyb__IW54$9Y{(yt&XW{OrG2MTH2%@B5nyY zS&QYPy@-xaQLoeIGoR^ePdT)Q`gRy%b&@&;p|SzLob`zyZ(H|1Qd@h`I0eGzQ?qrw zztx*#ecsepr@nv$ZM|^R!8{vl_IX{z6le!ps3GUBh zbXY!4(RtRa@7b0wA#*CeyZJ&4E5kkyn*%gwba#d6F$vxbe}fX$)8!*7KZ7!inYCe%K!m7L<>ArvefVNB zp0dtY#R%U@TgNC*he+J#eVtL=Yd_hqspI9aTru?~{I=SKVa2ctZ7X zHhV4t`LQdmx5E~~!OtkVuHKvJw1&#K!Cg?CDV|Qs+}xdmpntsHa>3eM=39+Bzn|9Z z5`^CC`CK>9d%4rBp9fksc^x?h46TaWGcQjyRT*Ro-K6@Qw{7Fs2l05gO3eh`3!TgM zi^||VuYrn6s3R*YkR#9QYZ{*$);>&^yUB}CHh4L;p3OH)XQHEwM?Yi{6#iMKJIof_ z=Y9HNMSFcn&VNa7F67qn^u<(v9?!W_9gK zv$shqZrfSG1+%>5x4=~=62A|YL9}_ka(=$1PbQS#CV$zbZ|lOnz}#8Q?nzzVn9TKS zdYv!Z?a@JL{f2peHp~PO$a&Z-v~qn(k}G@s&^ojQrMp)`268PwYZnpl zC%-VfRFXS~=g*gCHd)YA#VFq7n$T%Xzmar1ou(~ytMZN1t}S59he&?xH3MMJ_U^64 zYlIoAyMM~0U9HlZIpxR0R+g@kcVcfy%0&H)UTuYj?b^6g-;?K|57%;e9txtAnd+o7wCY zlnVR)>@Mg%u+kMTGmE-d1TD+G*nbwc!VKxc>woW^mgXVgAl9L93+6t#4ROOK_GxV% z2f9W|_;Nb3=j}T;=8yJW%VN>3v>;*9Rl5e;nsLZatTbCcuGfYUE{~SIM0UG0#4@w! z#p*RJquK6~3Ddc{&3o<6tQBsZ)@_>1FJ3|4x6=7?+g!r?}URNbAPd?G-4 zbYZp#tU_xFwc46D2DO&77rp)Sglhchgnzecin&ZT%iT(M8?CvR4SVCIQ|R7LNffdR zl$;g!+u{Qn@l9KSO0628GOH)SvN;9@l#s!Gv6)~*Hm|wrrnRv-AN$qG)Yj{Fz3}IN z9aBOdL>X2mDU_XyA`10j0Bm@Hno^_WB9R;wXi!U+3=N$3ey40p$|{n#8NN&<9`$`uDIN9 zLNC4+sLQj$R@!3omY>%#_FYdA#Zd3zP`nODf4!`|=_>_<7TOxnkB4%gX4=)bo;D_Y=s zExF0dyEcbHVV6LZamWOosDGE+fK@@56)m>+ay}DJ zDEoM9^~XB>tmeU^Y4>}HhMOFU7g%ULVlK431|W$7CF>(@FWtgv7Tj{WSv;=kijoh| ze7aJZlYOQ&dSJ6)AIX~O+7e190;@G(4YinlaS57tedW#z>MmX=Ab*IJ)v<7&{@!@5 zwA1muMUa{1-_Q+9UW?0h9!=STb)uu2OmAj!^SZ^!`#?(TG}#?du7`>!IV!=GTJeH% z<)KENQmXI-@%AkrCV|sJYZFDuQ5%9;(KrE+Rjt_-sA@6*tA*?BDz2XFgGv%qg-Sv5 z98?v1_?=Zl0`X%MhkrJ;=7;I$#vuL%d<_bMkKF9U@#<@HDkPp|88ExS2U?wZkES<} z>i)>6iB9ANv@PRg#hzM}-TpIstoECk{X#YV)_-zWt6^6{OyC~(^Lux` zpOy_0c4gZ`zXQ@YtNg`@Jv0l;YbC6)*S|sEQ!*h_DSoE%E}Q2MFxL3O)3A2;G@u=D zaYeeAJOb~&QXuGll9lB)dsxh*kdhqb=$jH)Y4aTo28gD+{cPUI0mZSjZTLzB1!^m{ zdHRBiI>X_oQh%_Y7LcUh_es7p0^V=r72Alrlaueo^_{_*R6_!HeuB2X-hZI?HLMQD z`{rzFts{UYZ#q$i`!@fqs?XsH$;bG3OS;*!fXY^$lmiIlZy%d&6S#k;=_!J^TAY5` zaK${fhu!;;q=y{Irn92Vx1y1q3YT%W5xd<#X{9;k;(uv&JDvtrj|ZuCY0J1Wo)r$0)zEZjYTUcJ-uL?G{M!VJpJ~?P9UWtsN~dw1&#DahtUJ zj_u0)IZdg<{<4oyQ>Cxo=5hcPCUc(n?=^H^9rZm)w8<&iu0Pf3ypvWev#p2qL1(vO zY@7D`w0|(!&+#1Xm#eyl{`BHP>!8$~X~iR3t@zcPZ;!|FDDtlb(;4}^UO*MTn3$LC z^(c1>Zhdo@%X)K*PB-2@7Dz3}i60aWQuz0KFnMpeg$)O&ar3KI$PIFY;d!%*DA=rO zXRCT(pI=crjqAf|R)=azzqdjDv7^gr_0Tr#Fn^D_b^!s&onkKa!%Vh#9y;+g7iRUZ&lAk9=s|OkON(*(6e=y(`?bITbS1wGk4*m;;JE?efjJzXjF; z+Z`3{BNi0jv)6g}n1N0~KY$W%G8+z{8lMiA&0(Xk=Lz&#o%Kx-d?OaFd3N3Glwxh9 zhJUh1<6G;VdvK}tE-ce@%N0J$qZQBZ{Ph8GzRE*@Wts9H9jU+v&&}`9C^!zLd*gyT zHE4RHUzItC!Mr=RA{z;5ISA|3b9-4WXR~@|EO+mB=~$cesej)c#&~@0oGl%b=&nEYUR{E>5ag$*zvh>V zbfwQ0v&(kA*Uvs(C8C17_^#DA4tQm~!T%JMUI$y@HC446&0=r7xVP(htDg_|RQY_A zL6$9)|LwD^p8%b%$JeEN52f%n+ZS7=N{&KxU{*V>@xh1k%E4T)*!ZvKTD|Y$34i$X z=g@+=TaQUSIrBroC~_trFVABVZ&fFy>zmf0^nOzbXD+**-(2bIQ0SglP`>0}1=ns1 z4T^VJ$3$mfk$qjWQu(!5oKPbV8s6XtO229q4g6W&b%*rOtK4Q8UoQ_$TZ9|&o?Vp1 zVL#h0l4&er6s(=q8uF>Pta(#CSbuIAn&)40ZpY-8bu1OwD{@nyEfMT|C-z%XK#N_u9zA)j-4_2hU+1ypy0&H4Us0%46{rY!Cq*?#5PyUvyzjvh zg!csa^;&tLimwSIWEBZ{h2pIW zWULRXK565g#dYmM-55mt;IU+^{p)#C|AFzL_gX-La{N>u@9$FmEPta|3%#F_dvBM2 zKS0Vr_lGJRG{HKLWM`PSvNM)Oe5a4Uz2^6Zm+agv-umXj()UKcx`n<}Ag^#9m6uwD z;BO>)izyUfR;puXVp`UGlxR)d(n*y9`Ne8EKy?N8v+P8=K83e_5UxdKpNr(9@m`5P z5;4PzaEZ!$HYCgV+<)@g=;bT;R=1P9ZCH#|$At-4*Hmhv!83Jcp>6(y_p`dk9IHpT zm=jB8M$REVp!UcLEq!-Ah|!7Qp%44qxWgNQXo`e}12OV2iPQFAtw4i(Pn`=fX<98c zUyo?8jzsuup0Q#lY8mYB`KjlzlLuIaZ&`6Z<{rCq$Px>rl7D~v+7=JB(U*i$|?e}Q1LNe^ts&@4e#fFLp_D#G8 zr(LCa8E2I@t4&EPDuvsMyxvcl1cA-3 z3$3nqg>*K&ngJMi3%e2?&mZSKDd}kIbKm$ zzos-T5Pxj-rUowDPG1xIpRG!(X#izQi}u|8rh8vlJ)M!lQ2sb;q4++|WpHtJujg7m z;M#t)AjTk!(rI+HtC~D(eYSBCd(l$+%MfC(gC+Sg((kGVk@kHKQxtfX{w25!Ai4bc zLi1#b$X2*~eiEWJK5l*CW$>OBs;3V1&__>0;eQ6O=qEx-*FbY*Zzu9SY;-NXh3cZo z-i@(8!NAiRyr%;Od_9^wUBjJ}?|Z&+@Y=YUFkr;=1q zlMr9Y4=L_t5tGvwfcEc4ngs%jpE_r1HcoS3RrNPW%^f(zLmy+@kyWK3k{#m5Z3K+b z+JE^Uo?3JIG6J14<9)@8n79arN`!@zJ*42iNB^=0;8iLdU@m@-0ugdYlqNC%xW}jX zX~Wq+jji`WS^l-^u?E0|+q=(&P*Eq|_5RAlD)$)}Gm%fryzk^&1i%s8Ftvjba- z{L61;|9ztGv0rWL_xhQB&YV%$uFQ>m$UllRC3Td+ z7Ts8+IoX%;fLr!!pRo%`-es$=dI5SDQsnpdhECqT#>EtV*y)7V3@p;8tl$4WMt{Fs z`PTXTAjY_N4sSj~O%H>r2xNkIm;gJ)@t%K_+ULERpeR zHefdZRx`|9M}A*d2RkxkQo-jV^>AN^$o*5eSm7LKRlcB5@@#<20*)Vl+3eeEcizi4 zU{-L-^ko=9puQ(gvyEBieW~{Sy??M`1!@1K@uujr0kE5mU84w=E<&Aw10bJdXN8!Ps>RTHo&D2dh33*9Iu?8asF0^*G(h37ugn! zpdQIm8oBF3>FUmwz_5oy7&M>u4h9hq>+oC)tO9wpBUs0FUq0qw%8fh_&vo8fWQ`&9 z^P}y#8)NK#|1@cji3YBnF-n88UBAxXd5&$$vR43>16U=(fF`J$t-t zSvj({uXqCd^xe7AaZN z(7E_9VFH6v*h}9U%5r~$c3%hp5*1(bXy{IWg^FXek*f%w$6WrRlL|vlg45muciM3e zf?jnd;c_LTPMZTT5O%LkZF~kv`FvSURRb2oj@kNd*njX@DkjWxx8K#Qj5c1*qz-d9 zrSE{W*=rH!pV7kpSyVfQA~NPRd)+Q2SzY|RSriLrsC#-W;Qa7~CzhRH35`P1y`Lku(3=|#E;1#!X6Pu8JO~z8j zdIl#u|7k`(eosj6Q^Lq?b;RlMQx;a-_<3bPOZHavXcHhfpaE3E-);Xxi2>Qh$lC^V zuQo;-yd5EgvJhe}wV&|nj#OsQuUW0mHtBoz-hT-rV&T0>vM#L8E%|25B(Zk;f}lji z9wo&;q=tS&ZCrew&&8&mt&k$i@va|q&Ob0QBS`o*eHOkBVQ6FboHrpcJMru)3RTAN zgz?s4c!E?r;M;iE=||XYJu}p6=HyWE`jxeMZayW7xbWs-N^BSW!1oE~9x-#3^XN9Lf56{k*c+V**4ACk6-G7*YppXX^NoSG=GjxTIR2!xu?rYm7@A10= zz5#>1vrFL|;l$3bh!MB~ppYm6IzD`VU*5a%>@Sdgg??at^=-z{u4m)xAc4jvCf&e2 zQcS=WLA)OVE)wP4RlCB1L1_}ogcrb5znRhf<9g+a5AW@+!NEgFkDF&G+4BKr>VJ+@ z0w-B@5&^K!vq2dJ_?(L07Q)X%?iI>+p7dDN__Q{5DOe=>dN;jhBVh+DIqia`u9Re0G+M`wnSJh9QOz zIruIX9$3G-Q+5=5;9wN-J<4rAmESkhKe0r`t3tNL?{S85(*#-p3m#V0T*>lK>m+Y)NnSxul{3h`wQlB$=r)fs;A#HZ)ebgR{Y%3to@V zQsE2-GDE;917~q0eMr@_M}JX{{;4o`HNSH{_4Ju3gy4>{!A%ej-nQQpj2@`+UdZs5 zpnS|Wt*{6;QZ74b@ol3>y?(|OZ~5Sz{#B>i)XBb4o3H8!y~;*s8^CJl5Y;iiGLSc0L<3ZX9?J` zuYp?Q#&z0hEj9R#J;|vX0n_{JBObuPF1nQ|>wdOHX!;+V-d)$^{=m9uGux=HmCqF= zU!`O`d#$eP!}?i0=w99*1F-Qh)R=N4@yo0%3Xn*fanb;U-j`qJK9I&PiUD<+uN?Or zT0_3o0=sZz1%p~hIDax-*%H;!t>wB=B?P1au+0WDyX_s>755@tm4L!>-1}PsjdxB5 z%)Fr|-x{7dvgcm#!o}V@>|=o>OXFo!Kc_dFkHLsfrvwb~OKCboEKN+C_YTh-L*!xMH zu<9X<541Wlu1WZ0cHNauiArcL$Az3KL@7{1ORSUe7EtKaNOP4UIUggR5>6r=kOQ85 zYY!b*vEmaR;IM?&#P&C{{}5+c zYs3uVvBsgoca^mRHJ7Y#oOwJNPkRT8`+VZSjW6X>fq$dt&zv8RbYn<4S<=9L2`b5c z1A{dWGb17@6RG_kzifJKJklq#(ZeV-<5QYBC&A|ePJF%Zrit0mHuI{Q2b?S8f0yc| z3Hx){a3vYijKx!KImGq&5Ikh(=duey4m(HYPkMzT~R&gcrVKE zt&tilKJ>9bQVaQBW>Vl^4%XW0d%>|^i0pj4k$(!g!WxevPDpF=ulLIf^G!ouXeta6 z;5+ai4BH+(a{$kTh3EU?lmv{k!Q?}I#$3ncMc#M_Fh)43fAJd%EN!px4)|y{A-VV4 z5cl~BD=jhWspF#AAhQADZ{pygOXCa!7YCIKie5G)>CSQy71sYpLUs?-wb_{*bH`QQ zU4PYGTUfuo-{iy3$9_oQ1{;)$ho@x%VS)uPzq(ipPNSPt!cZN- ziAX{8?7UqDQeZhWAsK>t%{aXv<%@Y4^Q}ww=i_dnDH|5c$4TE#AKWmxB4A#Oh_l6= zWX-$fv(H``ZSOyKTA3;Z{xhdU@TBlWB!90Pf49=g^`Ewg(Nz z$bHc%KQG7=AX%-NxR)TLY>_L$xfauJ0Vu52gKRoZ7lJ{QW#Yc6A?>gZnZe^R&U+CQ zg=vJ-uY2QVt{xo;_r7H#MR`J+Z(uI+8@;8q$r@+&u(k(T1Qjw#T?GLl&iFmyE`O^- zYyO}xkAM~Nm-;~b@+n4bo$^(ufY>7!Y!rCcCvBvVK>5gqkR;drU;qBynDu=Oz3ah) z;~e;kng34hw2kjYV$7A`Z78-8F9@l;qVH!;WdaEt-42{wLo z3p60X&%1$8R|6E!6MiG)c2Q>FlHNNzM7%rf&as~YJG0p{cUxOkxGou80i3`OMKT_D z&`-|Ko#X0D*OU}(4A{16Peg81>Oq7b6#EzvxlrP&MSN2B2E-M(r@S{@5r3^I7)E%ux4K6 zp;S4Iyw{YM=H?ZfE3R#bvFn{{n1Zi>pn!nOz zx7|kwVO&<7H~2huQF?%se-*cLb~F0k0OB0n^WR@7?J69b*{Pw_Q_ZPswD|bxrXy>W zE?%$?3#J~Sz=@dvKwIPZ941$=W0MQ)K;Q7Wy4jRMg#1>OXT!R3^MhW5Cl~r0E-Ark zI1f)yK^-$8N1Uel0DtX_j_m{=!LZhd-O@qVuYJUPE;#odmfmSIBGVsKfH-*2tNpa@ zwOx3Ml}Ex>Ck4J?7UJ#QRr3nCU9IYreQ zIw)cnGc1Wwxkaaucd81{(2x3&+sd-QF%9<>7jn&j{u9Dq4}bTF6^FeBW|Cl{`lSiI z7?iR;pXax3B`P8I7D+^iYtKJXVl||iRnjA>C5uD1mZS}3lB&+C*Fj7H3glnq)Bd<2 zLq4cWG?3iZE5=$c@3Vq~f;&k#C)tBhO-} zi1v_?02cuTsvG5(0hc~rJsjdc70i1eKhimH5akk__X|^jmM&~sUa+dy=}mVPX_2Fy zFV>u;UXRib&{n88$WEMeHCXtw2kCrD-XP(*bhbpF zOGA6{3I3q`0ij6=0t&718|3>xiB>U?Xp#Y)Rz6;?wC*-|4t%Y6d@r1;BCd@fobn{#8ytRZY1-h6mdkw)s z(syftjMX~Vz418*(AYuFc>N_Wo<*Qasd%ydA zhNXId^wc@>m_6Q76ba-o052mS(g_Be_Nnn6gaIl2%2jg+tna(pD!cr^X^S_O0Co#~ zA!U@F#HL~Rh(fu1Wc|XU@6K95o$qIRTR1II)qWpA4uJ!q@5glM%#szQaM!zf{yZ7W zkAG8_5moV{n6zkJgNdZqnbw=)7M=xbrJ)uVbuJVpY{3EGp`i*`=4G|9KJJ~{#Mk!1 zkyr4}T^MUSxq}1`*|hOan2%r1G85WlI0ZA?BZKUH@w1OjnqDZ2>%Wi?I9x-!C+rxG zMzWq-9=vaWZZ3`H>t31)moZu@yL3e+)_;>#WQz3Xa>7TE4k78JVW#E8l3OQpc%r-{ zz|QN*d`zx(;{rH1RooguU(mTbAlIwJ1U1>%Vrrf1UdAyi%`Xr5Jm?zu5P1d*RAdg* zUuMWj!C)8DahQTx2!B_B!7a>9JiiAQ3$H^{xHT;S>guh_N~qd2iH57`l7xN zg}u)FS3TSY))hcbdZpJDVp+HkrHT7*p1+GCI#`Rm@%%O%^Y|(5FiR@& zda5hQ^61wW;zR9o2gx2u)#Q8hM}q=E7-J*;blXC#Mq}nteEBIFf3KiX+=}-Qik2xsv3UbcMDcEkRW&j-=&chhwYSPU+j)p2m6}>3A%Qj4e^4{ef;T; zWg}(iJJrj)!SFV3_QTL&uvD!!A;q!%Fd=`P&vCTcCS<@xMe>Iy7Jx?T3nJ9_TkNwp zF8y$MSJQNW4Vb26hi$`y#(&RIT=24ftG;j zK<>=OPT+o$GmVk;=jQs~+9ADW6(!wG%`*ggl9qgC#7FO14Y9?&8r%7b@M;2z$7k}* z4|lljjrEcT2iWNdrOQ+qh5HEl6l+iMX)uxKUfOA7=WG3y!fsS$nSYNM{~^E4WAb$# zE$`~&dEfU2tN>MlN$*a3z=(R1y-eP_;16CSqVHIBqYHDB#`)>j;0tn>jOy35O5%yhU++0L^l>Z0()2^NAiD5nlA{)~@Xn>P11Vgp-0j^H*z8PiW`M%l zV~4cOyM-Pea*zGIh7m-IEL`dt1gFAB`w*~T<@`j%p>dS=f`1s>FfcTd>~-r1R7xa; z5-Sw+?ZB_teVBm1vpR;^!w5PF-HcCJ#bAAfx%9r2kTz%Yw7I(n9Olok40QzWwcVJ8JWT zHYYs96U&LOu73d>zlnK`Fcw43-s)JEeT|hG)ej~iP;SWwws&vyStr1OIGwnK=vv=B z7B7J+6QF|J9Zt5F*6KE&^r#Q})6+Fo5gRyKf%p9nme0;lwF!=4gUbH*4KH2DO<)J%L9WRxq4_J332K^+)TmNoCZ?2R~n*i=~% zc~`ubIgHU+-gwuY31Sc=_N&IYZbR67%t-_lP#Tw}&AnJLMTDk8Wy)j4J&j(dlg9ku zod_&F`V60uFlp}ynwgxE3XmIkz29*e5qIos@D=P)HnbB9xG?`-?jWBmoGWi_Qi3{p ziZBX3+JBUWvK4T#R;U{ZkfWG0A2v#RFYMj+$;ll+_9?FGbqseh`dB!l<>NISD_6OnG3a}y?=YY%>f%yrDftHD2R805ykx_RA0 z@Mp%B$EdGE2eI-Ul$+?M{AoUf9i$VV8zI}U`8$G&^tfB>AM0xN-pr0m@Y&H+|*F{4T zC?+z~MVs14|1uhjOEQcf2QG5$bd{8I_0U4~>sZS`NqdUrj`ArKV#Br|J&CE1odv4C zNQ8gob)Wt0Hf@k`St}`Z$j3|L`jH0k_y7$SL05fu7T|~U4q=952F(%3za`YVqNq07 z=`bHRcvf(A=gQ?;*F2+AIkTD&!Pfb+C_>hPSLE8^%D(aoR^oS$RM%t=*3I7@Lo{@7 z7=bm^5}jrRt33SbO!79$2edVUUvODq(QAK#8}*HBr707xSBVs;_*C~>mq&e<*cyfx zuU9h@x=3IT&~yGCxlczc=omNBy~B^|5|$AJ1K0vGB`rwi$F?Os-F^#;?x`L7J^VeP z3!s@BSKr0ugZZKwa-ohZ1Ll8$HF#bG4{9$QVjpm#>RHrY{j}0z%weM0>=@MDj;nu# za)bf`m4$<%hs20fFhPGmVD*H$_jkxEt{Y_bJ`dATJF;iv+tMTW?R8p`Hg7f@L-P?Q zv^#x&4OaSsXkwf(NWBkK%%K_b_z8XVC}Dj6#LaN0+gybu>5g`F?Vic7ex6@5_FI z*n&K!_l6ny)^DYkjeNKZEueH!$c}l(i{pe^$^NVXokt2t_T#={4rQUC>T-RYlBMHB z9K{r$_jT@qOkdWkGBhKFTT*|C^Z5zi`(ah=v~UAAO|qnoXj`F5z!uq-DQ)?gA>db$ zDGi9PCq0v-Hw4^#?Gdp>Zu3g5^*YuxamB= zTy}rapn6#n-_54;*Hs*;_KN!F=lRe8_M@PiqqEHIV=c(z4p!3Lv3@>;&7rBu%|EM! z>UsRzUyTtn3AA8YG*N$}Y}}}$h1}YRq8e1fhmH~fH9DvZffBEvnL$I!m%W@ zmJfAeM%|Dj%^}SFgXACfQDvgDtqDh)-Zi|p#lq=iJNb5(r3ZJ070%q=q=l52Alv9v z^~V@daN<7l6ItKeZ-6J>jzBTS4+n+WCyNmqXvtks?bS>7IG<_&m>?)#dm4^@~D4Bn4Tq?;{dB zd0G0;Z066dg7wWx(#>uxcE|?yYd#L|BidW~!X(CgVMe-h=nk?(Ncki-59I}G6!aQo zzBS$Y$>)C`e~IWp6Hp32^f_sFWza^LB^EdD1PTE$ z!gh0AVNJ%oqt-Xz02s+_?NBNl{a$kD7`tzbLy4m-KvPiu-6BB;V)weRLQPM0q4Z@I z`iB7G={4wP05`G*z2M`mY)fE)oJlA1PuNim*}i{L>;xf@KdnYEArBaU%uC3iQa=0` zH1R1hTHv&(eim<{$mFa2K6v}a9(Cl~6P~+egAwP44H?iaK`wlN^5+Ez{p%Fn&psgs zlK$+R4m)Rg-nmFB18&%|$D}1R*If+QB3+iW9>p@OPC(JW@?i<+SWj|GFF5MNHnF@D zQhR^4&QpNrxI`6JP6fJaqN<}n&u5?^POfpoC5jI7!C{ys_Il~lJshY@rR*RfJNB!aIu%x9%JounFwK-ql)maM1^o9>K^Uw&Aye7Iqm0y za!+WbMQfNZ$>ub@tvFtDGKB&dGga`l22+00$%6WTu%u1;4Heua-RkeSmxYdXU zjQFwcfwn}_a=FaM)b2Qiv12PT>{_Nx-PpDGLN&ODyE5m6TlcZSrJmjU2?L52hlPU7^+$hSar6g+f~q|{h$->}+C< zKSLOV%o}(*c$F=renr0+TQJg4o(A)woDdZ}ABqkzY3IbRODeGR{cOIguPzj{5d7QR z!_Rsu8Enpx<(H=}xXZ=_E7&-KC8zm}nxZ;Su8mC4EH zGn_kNs0Uw(GpLCk?%IF%P_*hY*nW?XUz&u$=*%I+@s|2n+!BGA09>%d> z7|oQ&?~8F!?*=(XLG+w;p$JkEyN){N($;%EhC&eRA!)Hr?#mh$Zg~CLe%(fBrhjg& zBkJCSF974H>k}gNrY(?Hok80TSep0SJZMbfTZ3D`GS8D-^1tq(eJ6#WpC3A zC^XjSPmATl<%*ASO-wC7%-Jec5RFBIH1sm9L#s{$6`a;S90iR5J@i1tS2PM54dIOv zmcRgXcng*Bi$Jd06waqJ(e$HP>*0V^#Iq_ida+Hj0GkR>G8#ACV92|F4{H9MTRZWe zZ#euOK1rqmxPpH+RWRx2qYZLOC9`~myz9vUx#C?u$$VR6gt=1^c?NR^5)b89vK|@P zQe?Ep+l~8&T>gA%f@)bjr#wO1WFI{Pmu}CUpvG}t7`p=p*D;?gp;~c3*+ZX9^N;x9 z+e2sHl}h*Kpl_j7o+(zmVE^ee<>t$=^WF{UI2#3~XDfe0GF{1Wk1x8fJHl@e40&EB z^K;83Vx5Y$^Vy%s(yg=GTAEJafMuv^qF@`6z~2t{*;Op0Z(ajAF2vZ;j4oD9B>hp=kKY80Kj@kHhNnsHm~hDe$A{rzYb*rdyXj)djTK6J`I9Z@vA}ncM~}t;UGaK zk5NRKi=}@_0yCpuL^nM@69r{!tA-UAB=02H^OgR?0Tyt7)jwqa6X#g4qKbV z7(ks;F(I7t?=3Jb>XBlh&^*rabl#^0TVc9_BgSmRf}+6kn!QJ{cOzU1YBS&a=~Stnt<`92WZdIKk68 z3+s0e)lvGM9t2Fdq?CC>ruzmHVPNpuo`Yr9Z;xW4mUC@9^+=VVzl@FsK%s)KHxkp! z3piU{yas3=1>TI)GAS4AAVZ(_>H?p>`#inPt?_ywDv=nJ{Z8(g`;wAaQx zCvbm34l}UZ!ghcO`%aop+0b&1=RN{;a=t(;I;`a->U}2E?1N5F;C{&ccLiP-Snf|x zsM-4h4K`KM#o^I1Y7{*MxP@efPXeaS_@Ag&6brz__FJCm%~0A|MP_w5DZg1PWmMK;OHAO`3;)8dq$z(AKNG`s>61V1beld2(}yv zRuC-^p9NidF2*H@-OR6*Pfp^#(%8%1k_dol0UEW>dkv+z>L*;_yV8G!_wk4b?5!~` zIsV4AgoMJvLXdgfrWDym7HLKnIt7U+70kJ94pzq5uniOPYe6SmDJC>5pMNNkR)zBb zr#Z%y6eYE7aR7JPOXG{)b{-sZGO8UeY6cX)N6sOg!Fy>Hbtx>tVD=lYgANj(685;}i~qcx~XkENTE*I}3+ zoO~1{T?7VDI{wafda%c*c43Wh$siv^zGaX#Psc1HhbgJorhewOkrQw2l2Tq57dU;) z_i~Z9^& zL4ehgpHC~k3JQN;VQzmeGs7nWU!$cEjFmh9492V#NcR393?^)!>p{IR8-1*>sTsBx zxk(#rcobKZGw6SnF{yus`|@j{Jh0j`A&b09?qrYOzH|p+Rrw2M;SiZh7*vbR@SBC_ z@ueSAO@H2(m&bx6b4EuRE(1OFMJqSy!d+6H#D5jPu87 zDtH(gAu{gItdT9Vg!3nerL$i~IMegDLP)9XeD@o(`Hxc{*p0MjlC_$0Uz%kuK2S}D z@cyR%aJ3CoW}CmM_~gobzPhtWMRsRBP?dAnh2hdEiJ1GF>#YAcU)IZ`c!dqsW}K1> zQzt1ToA-Z|(J+#1m*;lm;B!*H*$7|)4({WL>abfN?Q)GdEnd)t`K!)#aM$UB2poG8vfJW0gI zrzoQO0KsgY+STuLskQ~FTH^tR;VlxMCv!tXB{W`yFy>~&rwn2?JL9Yb;}QcTx&ClS z?`9=5glHTX=Nmota!Uju$WQ-_eo%~m>n5}(?$nYN@&P;5Blj z!}x!TjrmSK;StTjc?;owGrGc*R7-NutQn>VkX;eISWMUG<%pd>XTU9^l*1AHd@YQj;*bmDVKE%DKx zRr6!g#tofASfRAU8IZEPj;H*4SU(!5b%TE>H1|92%Qdt``XU9OVr+kan%!_lkZK~A z+i(oF5E#2$xcER~tQ#G92P=wd>)?9chDEx6?nCX-JVuBLe3j&2@I2XWl8+3zMLq995nO%Q7{vBjAn|wrjrWpX^UzS;RTR9>?78jtER zE-A=(Lr7j{l63y*VG&%&#JgXM`N*U8VGC%nfuS0UYTrZr1Sk~@Ik4VW=EfV^ozs2q zm}3B{kOlVJ$;UUA_G>W!&M4ex(m7FP1S$?qEzuB78-N*#e8YPw=;0>J(nGd&Joq`! zVS$^H#+OyTw$XzJ*9QEbXCZ&4-BDB)2?4aF_&1NL`$O}oV1eB3yYkqbp-^c&7=${x zM!Z3q_^CZRUYE>7~;2J5xtE@NZ>7fNwO828{{p>u=a+Gl_KoCGyp(v6c4 z`HVh*nG2r-+k$&6|7C3=<9sr1KZ~ZGmffT%4KzN%;m~VNnpx#UwAy6@!R080miBvx zSzT#a*jU3{eTrY1jTP@>BYOLjJjL$y>^QAJX8#n!us{zY3t!3Gexrf(w5rGVf;CCa z&Jb@$rgxgd_Zb0)8)kp6MKV1pjF>~21uwERKEDru)^!M2TwmI(IxX;S!viEmrLdz+ zh*2#@!{Ri5RcpV}!V}%$gg~c*33gwnWOe_r079ZQMyoGWf}a_gyY7D-0<#Mj9?gk<)o9VJ z7)tq>S3o$$a3th1i_dOP5|fPc)RANVb)LlQi2J3d4lPrB8{Lq%e}5k`sDTv#1v|`aggFN*o{7h&8(?G*N8;L@h(fTMU`ws6Ubd4qhfhme&J;-;tlJ;Bu);*5;l1 zasH6-6UCqL{d|AACC1-fsA4&nGg95$tiJMMD`o?Yq@dcM=TAXcDL>4yW%rz$KREpj z_>E*8hf2AhPofVGtYnVzwZXN+mnZwY!aNDx;az+ZOE)1kv;*^fGC2k`n(BaW*5d1M zv@Vz)aIT|y0EP9z3OwAB4g?uCvS6>z=1i0Rovhm(&R~B*8$sswN$jk%LQnrL@D(({ z3c}rJ7Q!Oq?-nNj8vJ07mnT1b*Q=(YHS`QloPx^xmG}1=yEpWUqM&%t&@^L!IbWHR z9N>E~-@X_u+MmWGz)y7V*y}9U*X^S~fet&qx59lu;_$nI1J>Q0ypUxU3X^RAU<>uB%5~SGc7i?PHsz2$D3HQtGT+cOp2#Lc~@n~obB@wJcxwCp$muQdw3m2-lYaHZ%)`>KDl*>lAE^%?`t_ni z91nul)LjwH3pGN&hTut)G$$xD^=S+u@IyED@w9*M^0zdD1Yn@z_!0Q5@k`lKtarDl zaFw{$0gk7U*_h*GogXLuz0@bfvbS;^HArGWRbDUPfn=n>5;j3AIj9AuwEGePbH{P5 zb(VM_ZlE?MCVU`&T17_tuDXuuZZ)O{@3D#N=Qy~G;Y7~cT>T6Y#$LB6!zUqW47$oD zzqWt&dowRzOELzL(H0L}_wmU-FTO}Pb4UO%_(I=Y zkX;UZdgZrufr)u|A*!?J^pOyp4nDs0=U;;+=Zo;DJOA<)tt!jUXqwz(9Z~>mf^kDDBVNuELU60+>{9eFL z17H65>m_Mm0FiEk^jy9^l z0_RNQQ#ocfZy=HP1jM)-1B^dFszXGWwqL zJ}z1ZY~Qm4QWIfvz!^u&F97D1Z# z4?qj_F@N3O98HMl62GKVGoijixk5rmnbduR=e_Gz{6u)SerB?=egMSdza@WzcLfb$ zw67x8&>ciP0|a;``0bk!m7S`KUbNYW65-|*Qt<_#5~c#i6O8NMkCD~r%+BGnJBEfX znDKNZZ|&~4>~VX(p2)G+w>5Lj8mZnT$FR~}u5CZ1T^LJJPJI~R<{Yz+tWVKS51!$! zFVrKkwUmcnc8P=yKEERltayJ2ahTY2c(;;vbHV`@ZeA{@6GKFMsfr(x!Q=-7kJR6GLc@x%~*6xX>(OX;Eti zFgqj5j8R`<9qKVdHVv2_?Bi+Y%}9O8j1}&N3WM3xC*Dj%8}q)>vW9=_8?bCwEVC=4 z{uBTmJsDu?KA-X!KSb7;Vh~nSL{Fu z#+^ZgPYs+LRqEyMFeClz?02nr?=1zU?GYcuS#;F^Lj|S?d9kz|!_H2Mj%M)l%J!gn z;BAT?IvApT-!O5Icg}zLG-sN%WjMB*)jVwANJ%1fB&p-S9#0;PPuEt3nrQe3p$q#k zJcJUP?T!Opy+rZq1p)O!LMYORq1i3lkuQr6`Y}IR>@McN#mf^^jjsl@E*ud?Rv5C` z9RjAy2T8BF(cf7TXRdfdFWtN24`}cDkjjMc=W%_s>q*iH>_~s_Id<*7&PIo(YvyYi zSUBQt%Ha;QHJ{Cnu+E6UmarR3Cxx9dsKU6s4QxN(zGLQI1s3*tMRR$uw99__Bk-kj zS*JJ{7)1}IfU%lgCbe#?HBi=&RPRB&pmJO?HTch$%Gc*SKC86P4hfyac0LZf1 z`UxuR%DzzdeTIL~&i)1@ST4*LTVfeEz-;5%m1j9vA+lITs2E*Znmxicq+=B`DxT=J zxoCevbn|R4d!`p>E%s=Y(BlWyT{gjU0Up=1NLxNRh==khxup@tNbTvwP8!W@Kc}Dk z9<&vRY^1)*jACBE!xJ0;K5iR^rqNeXtQe!ncWwE+E7yN`=kUy0CnEehJf_hO5UK*~ z4$HBTN1RqM*$>`q;h8Ct8Gk1rybY_i_l+5~x(Y{7B%&Rg5^ECz>ajaGDF5Ig)g;kR zlrn$L`+6HZ{pAJR}(YXzXbcz-4`@qG0bI z9C{Z`MR~AXF*`4xL7?#HN!)|O1On`a!Hi_%?Zbc56IyZUYqd-1*sl2wovwyG&jQ#n z!rqWs+`+zpcxyC-39w^IML5Loc=y#&1UM7#1-V0=9hMQePM;(RMweoQFm%`Ezio6PLhI7fgTG zTZh75KciBCl0yQ)6I!S9-w%!V=H#H{{!M8ZU(lzu=|@RY`0qO2JQePr) z4Nm;D`M0!6O^~O0YY|GV&<*x8mSS^Z^ljW#x35-&mR^IhFj>6Bx1WLU#A_zNC{)S>c0SJ~T7~qF;(dupHn!e)r#0__*YrQ55wamcM_m zu6EP3?k+?yWUm&J!H4<8JA@1U{aj*e0j*Fu9D*n_faQ*$ghid<42V>e)g{7~wG>hPi+fx6la*(4|H1Ns^p^NrRE znw!+)xqmMfB$V?{z6bXw4AV%Fw1R*0YIj_I$B-^@JYZEqFn$4WdUSS7I#O zEXV3Q`zzkxKK{LYPA~S`(3JswDws4t0JYD4d5x|R-sgA;hu9Fhd!Wqw5{`dz&!AfR zykl9kK;0$qlWqELU#OgtG`U;R>#L`x_{>21g2fN8v#nl7hGHL`M~X00#wf~Lez+bH zbkX*<4KAlEMGXcf*?1nb@nVAG)9o0_5X8sH2w5hMiD1!vfn}Dp)D)ODaD#icA3}i} zOT5D2qz|Zue4j5J^#>d5k|}@5XQc;&W7zjHFhoMTCK!fKY%=AnL!EM-67`9MfFE|? z*FnxNB03q|SCiH(yJ!ux8BgH(i;QhK#4r(z1}e`;hn63d!L(v~0INEakudYi2{~nS zPU{m_p25S)FMR5XE{_`tzLiITApRY)DsK1n(V`vU2D7SK(|R3l~P9s)%S!LA9qbU0GV}Z!MM0ohqy)gQ|DBSnUC%_Rq zf4VI1pBd-@Usgyhf+&Bf-Fq|2D)HOF1 zC>P$UB{r82kUJwg!0F*vJz?%?uq`rvXuJo%3%HUZ3d&b7`3ZARC}n!a5*+gfWUCdh zV`sYKeu);&r$B$4P`eEPh3*S+2;q^Ce5u0vlgVAHZ`+!QrY- z)m@?a);ly3G&lN256dNn?;jj^t2lq{eqck0seRo+vGWFk+%0h-1qUH@{a*t6(W&caR@eEE39GP4;XiA*CN7vSidJ2D@iL#W@%xA|xfKC$`iMeUR zu8qiNif%^1d40&0^TTO~u~o*m3MIS;&OkujxkolU2bzz2y|-^qe_TaA3zpk%jE~R9 zz5XF2PQ8Ph4i!~C-x6O!UQ5Ge>~ScVwN^I)0({@kFQ1^@Mui%+K{-jTM3g@OfYp`08o(wy@^?_cgOP;7!ozjr?j=121;e&@ksuFCarcI;Bcw24V(*e}5z*rNO>R?5Z=;L# zMb+__`%sdP_Y4QPRc88@H=PiH8hcBER2P#Ta!^BR&LHbzAhn;=DMG4cNV^I3k?|-6 z-fl4o5_k;6=Y%Giwqj7aKXW2!ouii797uoe2pTn&Lnx0tz(3&UdTUFP;^}1`;Vh%U zFOO%HGTP@FoC~Duj=b}rfKokT0!c&OHFk#K+~GUd~MJ*e0}o7AxIHQ&r^LNw|(d(=bzoS0U_ z@O=SDatqDHmdv`nw_?S}feBYGrnVKbPx4UG8F+P(A+z`{GH9@L-2LJfkT5|^bW{7U zA;6tAqi6)ZUvUTE)iXqxrix<}#T|dTCp#2PyA6InCv+B|kMY8ihr7m)mn_SuUg1)F zFTI=2Ngf!o=eyR$zW7a}om01y3495Q?~f=Tp76Yk_en?zVA6ra zswI_rztx(>+?hGN1jzc=A_l>}0qR`ZQo#igzoU;Jh_2aB3{r6|4qeG->CAs*J-(Fv z?GHXwwau=E@yoMHTJL_kW8!AFv1n`+0ebvyj*pKlVM}v+0hYw*w(5wMh=Yw87_7$O zbCm7Pj+(80Kz#(!y-G@x*lvuzxZ>;h21W5qFPswTe{f>naNUFROo4qfl|UQE-Eo zzLME%QoD;UBAj=?DKKK)>>v4ugN=krkN2>#g%8>%AFc#m%-|vn5FZo7SL6u?HKE|1 ztUm(&A;707t5fCtde519pzW{!uwSDm%kI!iz%?eisv6N9_{%C$;PkmICS44Tq5WV) z<}oi8gY#Bc{IS#@cT9i%Nx%29uQ!i>J2Wsdy*7a9g=atl!vZX1n2+RwnA!WxQO-O- zRQWB|-OFy`d3Lb2PaV6EeyLw_pV7as{sKu;0C&)t^jPQz3bV!RiTc6Itx87_Xu*a3 zL-eW*CxSI__nv4t1!i?!*QA04ylXon_eOdFnA;$M4J5U5$q_I78jI7g6F)sW66S&A%e2Z zPymSlQE^zV7WVjo7$>gk-UCZ8XIOM#8v~O?JQ3*2&}_wJF~jGUfkPitea|N)(ZTrR ze^4EdCRoom;TnG;Ll<>fFF*~pGJ}piIv80Mmj2WTVETzq@bA9wY1PAzvQf;ihajyG zwzL2Z7i7oKe^BqzD5-`=P2KLe{<@GAEJvHJ70E|YHRu^&_eoiT&G%qw_Jg0uZlT*N z??(s^)onUJ6F5r2#kBy&;X25EQfK`53KtJixQj&qhm(J|+3Y6LR};icKY$pEzbUWVxJNPu+8*Z-cGAZAgYftiT?od4&)IPT%D8> zQ$H%4vu$NQ;MOU-%bkCnwP^mhrxi>tkib137mBH)9{HBqbN_mJkNGYuaBafBF9y5C zfYO07W5<6oyV?$BZT*$wB>P*~^p`FBE)_o#L=P%pT2k8U9D!~$a>1H3gl4)|UYJXF zGkoTF3o4cvrJhADRCxNHc|DY8mAc6T{AM}!pm`LsWk<9%h(H34CYEo>p=r>A%>&O{ zL*OqFAGx^My~VK|1Gj#m`9e=si^cd_jg%B-?stF6tpxa(>Q5b?e=p6fF!d zyF-7JUduTkep(u3%VkZeny$n_tI;IOwEG0aE)5`i4w!n`H`^~e9rlDZ;bbCk{f}5e z`v!ltL2e*A&+E9Uu%v_E`}Yt!E$AOBMT#zFf37`5SRkNi7kB2p^|=D+8xpE&uf%yd zN2NuO_wCF_^}3|j0iWMovWlPWS6Rrju@L|hlKK){ zO5gbTj><;0?LE&{jZDOEUz!~r_7lH#wMGppRB23bJ%Kw7^4^e{TP~%qSnKxyq|N#= zkaqgkIP4=qz7=$!^>bmuFGrT(-a4-zndg`Sy7TI8(9BsP0`*~PY+K(SysdwkvbBGn zM#%o@nN^6Z(BPcR<=f_niH_%c?xik(ksni%B(P4LA%;}PH)>*cKWyvlT?W{$HAY1b z+q~NlA}k;Z3yk6O{S6(6BZJ~1${kQX2*A#u*Z~v=xHdad zNW-sJ2%C#%H${&s)N0QP_jVvhm)!3w3;Pqf(4bh20dj+R)F3{LUI z5F@I2S_kJ<-3c^YB@*Z<9$z+lhZAfYoG9 z2c4onfS^v`(eM3fIO_oI!_R+9pN+^2b~d6TrQpL1bBypdd9sK26+f; zB`FV*w3n3^+VyJM2VGV9FKBSl8zW5MDi2cKD#}?9U@9J zRXCId@MH6@qy@g>!;d3W%2h>ZMyrPL*gU2#$Ddno%I4cdR-C7Wh&L)}?T4*eK{|H2f5EUBWJA8;lwxHAI=7cZK;1^WEy zor>3q7E3=Mu+$JKVrZh!$#cAF1fovKJgvXevQOR(DltUy(MRpkt_Gud+`m-J_S*7x zPZi=He=@5A8d%3|aE`~q&S44}Vn-3GNA*KhgL2bqtPZ{+*++l1_qJr&i$ygZU$21d zF$T>xMZ^3F$$LNFW8hcQDvtH#XnhP9y0v=)rYloifc>*WP;`D$f*;A*>ip$gayLkC zSlrfI9W$96KUL(cU(IG)vJ0PHFDh^XiSkE-Rp1(DT9Oczj}PJ{bSlw z>4hzkCBv-9)WLtYe#hlwPgiIdPU^DM0LTNwVCG-tw&Sqx0h9HDsSSK~0Bq^KKxHCL zSg;VRaQ|#MeP77Iu&{5Hz9{nVDshA0_~F`$q^Tf^dbeUO3a}}q1oLyN;9d2ud#?C= zOb(CbC!zy=*LABcna4K~7@fuJng2o}!ID zb`xCEZZkroset8qo~wwkc4VZ=78<59!7k@(J2mG?deyZnVl1%sKyb``p~I&;LmP)q z2i%x%*OCBH9+ucG;R}$fd?KKFPd@@22bP%&V0SW&FUIMf;Es-h$*g6|({tg3<+|jC zU?Rk{{(^smAc&CNl7)g>Lrbe+Bse`Hpz?zFOngCs4d~CsPzTxM2>{1?!2-(bT1Io* z9uMEM9)f;YAaamDF1jK%Ie=Qw**B?sT_}$2sOpvOhWmV9n`jiV;OX~XJl8p^uxCv4 zOc6p*l&_n#bXTY{y=(vL#Zl{yYPBeYpe^*X?)!gh8aD}ABOBz`lKZ#!a4^?2yCS+K zWTcnuNGKi2oj3**Q6kpJ(C#&}N<-0N=7 zO-{d8P`iWe^$-D?EzaGG-5(=u{yt0(7hXYJ9?xy+9BElHIW$4(yc#lC?EeBL!!att zAuekPV2BM=h0t@q6HlhdNt>KRm?h~4kIfUiffx0TV{V5 z5Rm~CxfVbd{{3@>Z0TUP2r7)jSAD67Vb1;fgjR8N&uF=@vZOP)pZ4Tn3KAl$PpGR| z$7D8<6dEi*iF#7qoDI|e(4u0@_c`@?mJZhEqNunLspl!oXI&m#TIhwb;tNXi+5kY zNdgi8_1B?U`w--8n}_s}q#h}a%;j?>f10!eZk+IyZV0rtVT|>LbI=XeObK9Nh`S3U z)BN|Kif>1ZR}aoM?7d+NEEEuHC}19lmy{vBiWaY{k~|YR9>M*X?L@!>J{*7B`{7Ee zdpp1hNCAot=4pxzFt(dsfK_Z40X(WPd?l_xSc7gzk<$ny-^(^KgS%<@PI>#$-X62j z?Z82`ed%})pV^|x{N9*!oWn*?XMIV z&acob@3mip2lYFD`=B~!p@x499tjq%PQ@1(Ltx)79lBH?YX-pRcULn^3B4dbZui(C zpUA6E&pcIrCj5jloZ)W;=rnG@1j=e&yP3T8h9EM0_q4oklIz*g7FqxLp z0TVI}?57>fp|NY

C^2${^xhBD&l|s=YQ=>_a8;NrT?oMhYkJvtN$Cg^(907SAp@8^S<5x{j0EV zz1`r2!5^3MzJYcB|MSP)@-Oa!|KBZt|Mdy~>h1n^_Q_pBV~gRZ<0i#_F9}dN@aNx2 zub7*~WP(95_?8j>z6gK&_b12{giws^bE(+iYmAS-ue5yO&(tgauzV#Ibx!_!eg9sO z@b6!R*9WgKAx=8^`;R{SE%?sE-~GS@DCwMSOu}aiU#Wp>{O$Jmdwn+oX5KK_sp0Rz zOOrzQ=Wn;a*K-p|{{H#T-|JH@ z^(!Mff!F2VFCsnuy}tkZ^=|jeX63AZZVCL=4Dw^}4`hE|FX^bRT#9c-8W?=N)BOAA zL9=nYBw#jlDe4flZIODkfFhootbm%-hG7NWawL@L|P3Q+gO1@hxe_3N?x zWGtdIv;V!n&o>spTGh?WkIa=Ti7C%$f6(DPr4MNEyV`&K3ECuxoIiLK%qPcVA{C|% z^zm)O6ZQON4_#aCz5sF0x;S70OR1Rd`^X+&9UM0_7=drWtGDUg@{4TQ?#Uu9H6G7v`kMVF_}CAj zVvh{uZl!;ve~?`g`Ie_(zbIrs|G@q0orKbFm;<{R@emNMV%w0f1aiOv$Gu)R+f^~_ z?cU=b@WVR^)Iib-8_&-vT%UzEDGfo{?`eDhKmemDA5)yBA1 zO8tKsZ?D(ccrv=_$cy?rpF%{3raKN(A-Bzbso$+5IZ%pD#tU{nOD{jX6=@AN4H?$8 zp!UqombY%y(~iG2yBv{5YI^}K8Z2zWE0k!V(D&yW^xla+cD)y`+p2KVb=dvdr!;Yo zt67|&GG&$+S8k<^ItyPSkP#*k;5M~($W)G@zeR+SuW zYA}=2N*wkQuyoCW%G)a|%=5?8TL=)En@B%7dVZYokFmnb$=*^$*u3{c-uT zK=Dme_fQ0ibVb^&dCVf?JovYv-C6Lf>3<#Rc3`fbL-1jrZ}3KU71UPkZ4G}BGtURl zPYRJiD*AF{$Jgi^fEyl>41Zi+cb=N4)GdNEdS-i5HUck4aM zq2}T9O<**ibBr^uHyqR;*4)G|Vx8XOE`2VXtN|OMnc%!>W35KY?#_jGvLdgGGV;gi z3h#JvOD1kJ-F96Q2#|#0iA3aA0Z}w2Gbe#D;w^E=*0Gtt;NY&{SQLL%@e{v}kxg1k zvYM0M{jqr15$bo9Z!vI*@X@C~63MEfezb7s{LYCyTQ8lQ5zBn^_iR3qL31eXsNo2B zd^&@;Mqd;zaln!Y#MlzU#kG6yKO27+`K)Rm_e*$ue+#S}K4}lThm+J}w?j2qSt8mai z(|mn-p^19O!+VwIpS1YUCORD)o1&26YVfJITeTo8s|)FRNZH5e(OVG)NYW zVsFMvZNrV(dN)y(098P$zi+aE*D|BO9=ru*>e+PUP(I@9VWoj7u#y>N+sE10jDOwndfOfxT720h#H;`=*6PxUdv z(fWG2X1Jhvr}ct$;2BHet@0=n&9;<(yOmOA?py&ht(VVI zbtdO{KZ^EEA!dmKI!^O_Gi865Tkd9u(Cunq%CXmx{X0oHHm$g#eE{>iXnUM<#M5*z zMVJsz&0|}V(|vCSu9*qmp`Rgkz=G1iPw}8sx+WOHfuzGp%&{X1I0_<6T%MX2jO%xC zI7N~1C6kW$eet({vyRd!dUNH0b&^0Uxs1&UbWVhZ*&&`>9qLCMu64ILXb8qE;v{GX zRX(6|9FVc6Yao!FGsj;AE!R|k{Um=>1vn4PGitRjCQqbp zy{r>P;*?XFNx50S)x44=#Saz99_3M2m?!V%GTqezt$vsFM$``6|A=cHWr9cZrO z_mln7A|8ot$I9Cevzutg7u%>JAKp=sFMf$*w|G_u1z(1mTwh=VDZ z%Z|Xn?Zch{3myQ*-<=t|=~fy_*_TTJjx*(WcbMunnkJr=Eh>94{(*+maLM1n)B_?_ zD-p4gtZd3$nYaB6mM!0KD#jxU6Ex%zE;P0dmfatJhwJC1Wndau0EXn=&y1(5K&smne%cdjY;-QHL#6=WD^3Bc)qFMWyoPZgKp}s(V08ohE zvI>gP8JStVaHnLqmEncs`_WVf9@zX^yB9~=k0n;YNVbC^>We|J8)v|x!wC6#a~Gc6 z;kMX+ysYQ(k$g(j4kv$e72v6|{kj$N4GmZI*8hEaybo?Fg`S^1w$v@y#iG4o&oh}k zKL{Q`=y3y>Jp2@J^|f7aQ$munHwQFb0AO~w*xDZgF(Yd|DYz0u;-mcu(RmS?^V?}B z>UvW85>sNY1OwT@lLeHz3uh=I69`k&Gq=38vNdxH=V_|tE z7pFt#zM*qJe53?5ESNyP+fqACEV`9?#N~ykju#D>NeA!UYUd8$93aq-)z?l3?GZ(e z$3!^bi}YgIre|yst;hlGDr_N|B|mJ78xgWlNBUND^guU0%*5(ZXq3LQ_Nxj+0%*N| z{j{v#i*@vz{p~{~d}vxBf3Z{yG${-6HsykmA934Zd|Vcm63P1KiKFVlgdh|bqE(~p z`B}X#>QhnQ!Hl7`$iM7epk$=C*M57W+aGo`$RUk+D#InkvRT5mtGxG0Fu?)QUjd@K z`qTt`e+tiiXcg&wY27P3Z%${Vr)2hjQ;u5~9^CWlVvBu`Yyj)I5%CG-_HNmWiZ>oG zckG7iTaRs5XY0?wkDmNd^U}rfpS}gw0=vP#%Ki?n{ry43%g~ZB#mcuP!_Q&OcRA!# zTpXr{?o$Ukl7I!Caep{Tk<~vGV+m$@XXynQ*~wL^?A)-YP0~b%kqSLZ0en(1B*0`;f{V5RKQLV znCtIy>@#h+T$i!G0cxi2i6D5kzdE@p{j(GU}w)e>-U)5D8 z+f#ER9)2tXH_jc_DSIiI6O3L{OOq+t>+@?r1s@)vs>8WcX|@^m{dd)WpJYiSC=;={ z`28aJXY~u>DN;V-I`S@Dn1Y z6qLcZB%DLmv+iveqqqvcseQeL3?2vVL&ZeFW zuOa_ky4n?@hjXGm^!3aj{Bf*z&&=PYsG0D6_;F;ve}mg|d#c8-bW77fiMNffj!z{K zb83S)`rQdU6}H|%;JPg1*ZUN^SNKxvDiNLz(C@qGOa_SCgvqOacxBQ7wf=l}Xf&kV ztAJzsXB3Z1JlRd$*S>Mql+$H7$6mWLRaAq|Fg(l08?FdV91IaLfRmz={QD#Xzu3Wu zfPR<9w|No`0~J}oEU|R=noKjE>Z5a^JXGdj_VGUd)$#Jh7 zo86+!bSG|Ug9mkgA?)PM*#07#omiz+)730)I_%TBW1`zH_MW7X)p5PZi}Br(`-jXE z+18}7e=U|k#BO;aOw8mZXVnoC5+z)g_v-gSUS_cZqex7}hKT>{mn2Y{bHdc*CgfO# ze_SzoEeRQ$yUR&7`(c>9taMFb%OwiZ4`69N_X#B+ypGR*MX_X_jhZ1w9^{v(ozeY*JG%3%4=jwiRxKYsn53H8B3MwmZ zcW3}4xVd4ma60>%4Cg6b4@Gqm=jOSSJ}u1C_ZZ9WXE4zi1Khvcra!$w+#utWj>|4Bh&!9e(dguV&svfs1qRXX-3Av~r#f4P^62)gJ?f641izE}Z)Ud+0%a#ftIy@%`o zl$A*Ek4i#V35y_t&SIBx#NjJArmPBZ5@{jL8L`)YperO&ly+@xIM0FjWiN``ybDj) zwO1+O$tB8(Kf-N3Tgf;SXdOOJ6zZ*NqPN(7dT2@@FrE}uXI2&C+fu}7lNA~2*3v>iZJFm+QKm>YVf0(~L<<;ZT=o(70WNMbU$E^>ggzB(vLBBxDX=Sd`V1pkle=te*gyQ+JOzh; zewz=oj*=37BxUL*mgDy?7s^tZ$~w@gB*yc)WTWBe=a>heOU^B%E@?9 z)k%FzqRVte4IrBwb!`!|9NsF1+Zt=P5YQw_Er!JE+b3RG`(5P5D7oUQQcR zHkWb9%zaRsJxP(pr{Wv1Tabf)Wym@1e#Mu2ZcB`6R2HIDcR^-l^r|B*-DO$=-M2e@ zaE9arumV zcv`=bQoH?L>NvZd-yBlsXMHQ?L4PB?B($7nAHQ;jn1Ags;00HSD|T6b1E=PvIgNf1v7(@lIVI1p)39V(Ymm4aGa8UWW#k_}$|; zTNRSG6k;MToc}8+_`2VJ{kAy2Pl7E2OMKY9=pw%Ph>qX$!%@z&>z#n3`3`r&A@^vc z^;E=q3GmLne>ri24GM&+aEDP+T!+6p^!ni{KT=@9DzglNuGiQ;Opk1d-EzO7DEnvm z%jxoAFD+mO01?I-l&WtI!GyXjb!uOq3W}zp5N>W z!%$rfO}}&tmt97GeTI5RB_+>@2{v{plVgN5nci}LJ zeMbzxO2?BhRrq<_0F}4{v5LLM$zC|R2{34>J|`Z7U9R1a-8|%Ipx=eJ%3$>z$HVAs z*nk>rh%?MiYml)tMRUk2oh$G$zC_6wgVozl$RskSiu)*k?h~BH?g9mkT~)E5cA-Q)FIZu#pjt`E=?#ESWmMq;U#5~7vPMsDom z5_|ulBzYKrgfAF(v7ivw;O(5c`QSE8Y>&+xeahRZH2@YQx>Cy>+VJkXjs*4V3I?{# z0P5s?WX+Z^&Szx)=;mfW^Nf&0I~)lo)#t$L(aLrCD6v0kOL0=~Hg3Z)=f$PZ9qDjDKu;1>%+Fop0Kv%I6mayw%nb!c_F@S+p^w5pi}-aXF2D1t zwqBN=SJsaOsOlm+MQMU8WZPF^igfFrGYj>9d~1@dr3+7*3y&wOA_D^oY2}fZY7$2R zC3n%?nK|{jkB;wzA}(%IPpK5tH7;oOp~xgP+ZUMt=PBRXDd1xrTjPo-5&Cy4D}m)3}`zn`YWOZ?-$Fc;t|O zRg@8tAZh+lBhbi0eh}bi>GU^ z!v^-~STjg$O7<|-IPZ0yIzP;*9PM-XTHPp*%7J{$p30ol?@CbxG58d^QQRtC3gX0rUF}9xFA{H-NNN^umNAzW(F8h*6?=k!e$VyQX#*|C}Q9BySNKM2y;V zGNQ78unaZ-X^%CV?b-Q7{l?mVnWt7curoDX>8U*`;RZj~w-0*YgTwP~m85c_4yQ>k zJ@WZ>LAZlpo#cV{kIbehW)UNj3G_cKBfQ@Q1ork8uQcYL?QxACZsNY2eI`skUnY+q zVtYN9UPHkSmr)LW-f3(`-J;TycN}h=E(`q`0D0+|?;q0aL81LLes=nQeWj4->4g@f z_1$dMQ|Z0urE&o)W=`h$dVz8^Yq+N+G}0Y<9ZkpU|CM*bolo|`;v~DNXz~|3eWV}l znOnc!$ZcI*sqFU;lQV%;apQ*0_8F$fKNG$RkXSeMy89m0cUc640`t#>V+YEP3A`IV z%hDJJjsGav^L?+#6Qt&U38=s`(T@-RM@eX;`Eu^s>9#>qbsk*xnAi_zv35q)s~ z0Eg`<{VrfYu*Z8gYAtL$z*BCeD~W0F)qMeZMYZhiRR{@x&fDI(!Q=e3ZCx#q76_*G zYm0-#WH{lS?gr=ZP6s`bRL#M$qD#wuN;KZLWU*fk=R7zV_Xs?(^iAD=@Ge*HLlIkQ z6$`*N_|W!?&z%f*%5PJ)khpxpF9cK>n6qoc6SIk|XMfJ7@V)9Kr0zwzVCn}=AXMVV z;^uAJs98II{UbyON*F>J9bUH^I9++IxZ}%gm;0`1m}V@c_Pu78>R@~|o{n@U`fkqg ztM$ReT)jnp@9R!~ZgenGhPtngkMO+%eI35Ob<;Gp5EOJ7(#``Kdg(sA+OE_VIdnGC zueQAj_x{kM)%vcbm4Hv5KBTC6C-FMI1?7yB<5HS`V1c39V^57jfv&h-rCX)b%udLC z*-pY>dtwfb)Th!u173(4m1DAnzJmcuPt7rhJ~*PZ5-j0Pa*%ILqT4nB3wH}JbVue; zoW>dVZ`jY*1b_bDE*#)fEcLw2Uo-_X1ls`}z6`N%1af396Ch~-}MvR(Vj?NlV)JFITQ zkR`jAB6AoBZhDJTXSqC1@0F^3nIXmNI)HASztZ45y+MV|K08%6%j-4~_g4AR){#h0 zKb%c#*H`Q1Uc4eB(9G)zpI+P}F@oV!#85VWyf~WWOO9J*LerxPlvA3)7|8G+CNHa+|MSNw)F#g^{>& z*Y8e>G+1Y=W)>8#X?sL8&)1xWI)%@F z^tZC|2}|KCoo#ust;x!y$8fH2eLb z1tyrmXx9c3ec(SoJ70d^`t1U<#UzV=-=`Drq6;@SJO8;m)DSL~tKep;7eD3lO!>S} z8xfDUTIX#~QOBEa<>;j=U_M?yJZU=h0n3uz)omeP<$NODlYon&>T{n#waGTor+ii! zA2txghn|`5l6DIAp^!`O0cLTlW_c8Y_K-!Dt~bMExP9Z3dz$Gm-n0AFL2lW9zQFj4 ztj8QnDpOAFkkH`o#Z%O;Raz@DU}n+&0avLDg<$W1VMc|Y-fFR2?gF54abL@I7Ww+1o)EjFrG zu(sgT8jzkxWO zs>>&IL|M-+5Pu&No9HUg6L)*sUX?-{jdi-^!?VD@%{$O|&afZydwZXMz9RDQHyMP{4BY#fcU%JR5g&lb-m`QRpf5jf3dL`iRmw z;aS|B$E)TCvYSlpjELt4#F;PY&da%C-%@tHLD&yj)z|AEJbknu4OKi<_y)Z9J7%6T z)fLlfJT`Cq(wRJb<^aBbBBIRCwY{y{)5H?3nI%WoipWG6&z*F9_K9D8&4LU}J*VDp zLBX6jT+25O%+DRV&bQ-ylGIsq!DH9RPxpS3vxEi1sw5z?WyF_xT+)hl>^hp6-D?X{u}h47BvZtNK23Sb02oC@TwH zO&9@Au#=k3Ly(|<=NtP^)&ox0w8@u$U=fPGo@4pW`W+qTuf4hdCiV$M{%WQpON&B{ z4tV}jw<19^n!44OJc06_=(%;vc+-b9y#-1*!co@BRU z`oxUWzDED64Cdu_Q~(U*j+Z&~XzacSN~w(d5X{t^}#en504$Y zwa%6Dgkt2Y`21YDZ00g^xA#jLFoPBQlPI+N`C?=Dk6VnOoraK*FIQKAM-4M)Ps|k+bdXno=ejK66|1iEA6k$P)3jD)diQa*1Ftp`)UV{W&6n(Ik(ZKR z4|(EZ5F6Q{gI$?&gDLfm+U}q};^z6na`L7DadXPS1Nyv7`@4Vst}teE@mbS(CjlRD zwj(ms@*I$*fJ}>H6bFR;kU&!VR6ICAsp&#ylHnKL9RRO*8_THH7ww%gGjHMi~ zB-YrUx9B^2XfDtV&1>Yb(ed!*GUw{_hv0Q~>bnY4vU|t<{|e7|-*(37s3bCdyd2)W zpI+xI!)=q64_=~%`uTlvW)>j?PIuuk@S$c$I~`O_;6ES{_A&O z^NF~B*I|t>D?nPM!CeZ7w~jhR@bwzTJYOER_xWqc5XsKvLc)qawkF$caoekofVMt& zFQ!uWfV_2{gTm5p^RP8remD-|qpmOZ;k0j0Gz3E(U2mH250T&4t!lx^PdZEw1 zZHH`DUG=W`F|9_3;gA5u9qOM{tkw}BLV*61=~b8X1JmI1<5H5}T53-zvj_!$Ad`pR zvE5~sC|e3JE8HMcwdH6m?=xqwJQKu14~WX}8Oe0>dquJCk?hmlSdumNqSg# zrQMFF_m^55SP~-XBW!N2QgVm}*Q_~DnOzg6aOo3-_uKbwgMD+%Z7y{Nof&-Xi5f1n zCZr#-ewEMi8aXG7OcBH2+`Ke@pT6P~)Gn;wV^O)g#gk6@55Fl^A4Lr@#HW5^a88I#E+`hpL;CsVREie?zQejoE;O@mFk=9FVD5g2 zhGw4|Z0DsKQZmDU^p>4}x6Sr0*5vXM-S4sa%FYpvGCJ6_2z!+}?%Ep0+0EFjZ2a=> zKXR4{%we-(NutKptnPJWx6U}IoJG&~MiqHa++B?E+~2B)c-Zb&>c(BW@8q>F2Om7A zQ)Bz}v**v{@WbV$@?u{ngpA$S-sa%Pb7mr}Fh-Vn^{gTPeD5`XvWURZ1HP$52)~tV z0~<(R+i8Axx;;LIy@t3L9@2z`oy}JNj=8lPW)$eAdRdVf?3Nf9@cgW)ya}?H794Da z!5ld)NZ;E>#Vh7d0Tt46k;3MZfCM**uheup}$Y}(LRB8 zQ(oZryzap>s*7xYa^5zvJIE>>Rrvk;V>Q2%BfGZY2MSTZG#Nd;iEutSFa=*KjC`SC zs7w6fyj1LX%Lgfcu}(~wPU1~I*bItve*l7$R0zCD>Yh2@rhR+>4Q2i$DQtfTIjX>$_4g#?cG_DS$AkL zlE<1JZo@TtV?MqY%)y}NJ#_pWe_g02IHbwvBHgVFf_U&GMVuAf56FNpMAbY z9%lExyYcBOSVBh^n3VJ~bAJ|}-IIzn=T<)$zfJ*`vWy++J&)HP6GHMv?2l|3-Ad40 z-8+%OX8y{5;<<9m?{#NiG-h~RXoDLuJPc1==`>gf*{ht&@L%^iQ8?psh^{yJ#F>bu z#ogk6s1E?fKqd&zgnHN&OM&BXg*n@-?BjPl&DiM7QF^ab6$}`6z)xph-3;?q=_n33 zZm6D4e(pmjzF9k51X*-LmQyRx1@n5d9eD8&@_|s`Hi=w$=epa4MFT$^JpN;`p*(2B zr}a&2d@y2l8GuBmM6%&<1ak%3I~|sqLLTdX`<#7mnPomc+;og`1JT(T9LJ^BakrAU z`h7aqK<=OR`{Q`x_ctIv>2H4xL9_-*{c^T5iM-Loq{Bg!+4)o6XR8iV^92L!{9a#C z!UR;q6lGVY9AFrEiCV4rCS2#Ty4VUSP!4tG$LsZuq)_vvOkSX;v-lQYg%?M0ham zGVGLsxSOM3(|!-I=X{eG#(wFcj3TwtD_rUe3UC*wXPf9HQ=C5*N#}BC%%;zO@3P#t z54#Ik*0%@jl1$vKx5tzF7Kc3e3ZW6zY8FqzQURdjr0?5u4kE*e_){Y*IJHCd+#cSa z3xH-w{(9pP{|{mL{kvYsRV~j2yo?~wujJI$FHrwb(8JIKK z(|*^!g8 z-{l{0Qh36<59%iGi@f@YIOm<^QsJ70PJHJ}Y6GFA0;+)7J9(yw{$;moeysUL>DWbl zJ*3wVo_%=TN0V8LhjIP{I*#w+xic%FIWfHa=?+qtJ8!Jm!)Yml@($Zahm{ulJ9Whe zG@JeT{;PyG*JivYofm&T#;2&QaKVqpc)^##!uD3r3>)N?r30Xlm>Bq z)Q@;f0EZ;)jnHG;7XZbgx}PJH+d*P|X&OM8k8?Zxp~$I#p63@;S1c zqnUxJ+$afNwu@HKo_v1(w(@JyJ1p=HFuy3kK0cw*Qm;25k!-+lRB= zfb!?O5=r5b%YaoM=3dyB$<7hu`}@I>_&{sPFj{EC_Qwgl&>4TH`o3#(#=qbV-lg(( z=Zvr(4ff@KVpFeAxUJL^eSJ^$hx)z`$LsuV6X)72D=0aYb&Z&7v!eqoTMk=-LDl_% z5qYM*H(jpMcg_uu7JWo&_7974V}et!9$xHn^Qf?mKf`H+s^B4G+Xl1F!}+|aN&-rY zobMklR`D_IO;5lsFQK0k(ig!yOXmh^pU-KEON+dJ!UA|9hZ6J#e=7F!J@3P#KW!LD zzapdnO{dwe#fS6Whl4MO^HT*1J(ceREBV6h%TyDA%Q2%nJZ-k8ENa3J&-?!1u^|7> zocV-!wFD_jy5YC-c^86`hq`K?13LBwTLHSD0tJj0IyDp26OR{CNA*vvDd6KXbtB;% zBr(Z^2K8L8S1;o&O3k53R)fl@fafRBn&>} zR;*;BU}W*>We6~MJHN#0{(0Ym_P}DFzBSR|0NUTr#S!f2XV^~60Hz9d#|oEk@O(5F zsqTiidbYr`0iE=7c<-%#Kk5&R$|iRUxbIni1;e3JHt&e|o?_w{aDfBX{N&Jc%4sM&(l1UUsfSCR3s>Z0k35o*|v_=tXqSUu*Niuf*+45An`Gd=W^?4A$#KysT2= z79lym5=EpfDaxIj&?CHvRg#w^To2rTp3TYC+3%A*P_@j+K|64h*E==xlL}uTjr_O? zaM0H5;KmP@_n9Sv#8Ky0^J1tUw_da(7 zL+8=)Gap()2JM3?tu4Cx(9vanw?oH8FcHWK;SCIrMpIdmCb|DQ>c?~d<*^O8Tt41EllQsXZrIjkVAYdV`5F@ z;(a0e@p(0L=sTo1&7R!F|G`B*jMCv^TufI&h8pa8@qR+TYd09|nQBi@!pIyzBoADG zXtlS0y=sD$rl-8nRQ8)6@w=RG&ZsWk)@dFP^`C}z6m!8NDcBzXsa>$WxD#E?2; zdfE>$p@U*8if{YPspB2cN!l-6o#~z+4aYw7toHOv9UiByu_)8Om3UgHsSZ(pb!2#a^T93J{W@UXQm=P!F$iftP#9O z8D;hJx19FE9hshIgA1R3MOv|tIYV-jA_wd}=K~M#P+&s9@aTF2vpZTxzK*C%3GIr|YM*>GAJc_Yxp~wTZ^#jwOk2 zt2g%33O~x)0o20Uk7=@!mjN8IxBJbL`#3+-cKhxO_5?GXi70!2+d_56zRjLHt5zRF zZWcrd+en$L<>WrL@0EKL2KSb)&rfoUP6EA=rF`7P54?A^L^KDP)8b!N<*olBW&paEf*MaHSAiS&Gm5+@_}1=pjBdi4 z^j2vb2c&ns!h^*Bg5P(PV<;1QIJN@mHM0~*!w!y9$Ndd|FIGNzAF$at2Y+s{4~#hs zzGhm=Pt=d1>G>89P#0%WMXy#&Ub_aHcb&Y7<82kHtrQ}b%{hI(JdZuI7|qUcz;x1Y zJI1Kz!(liq+Hp>c=CHV03ys=x+gYFU>oFf!lwp0Yerg*n6@Ko2PY8W3NAdw%}KHC8raQXA(&M!hhii>us!6 zUsu_D>>Qx|^dbadKz}=7?u177EU}OPy>oih&w+uw|L(QX8NG?U&^mV>dsIh1A9Io% z2ib5CBOBv4)K>W+ZvpW*i?l56&9kw894kI^Wg7UL3q#9fCTSh97htm1x)1I+ob|vZ zcx*m@+Oe^3eJj={QAakTyil|L*(P)#)w8zOgY@Bzyp^fxO$X3xdxxLEDI|=JxZ8VP zMxQ>ok@y_H$Hglv_4k)wc>U_l9C#+Xv1j8h?r&f78dtJ+DB!QSYA0Y}AMn_rwhX_7 z@6pyNMuO6)p1!wv>`@k6e@BJxAlBl4T`qqfra}5nIv;(n4p8 z>zUq;%!@dEql?91tv*FJiF|f3(H=Em33ikVcO;C0H(t&^@8BUDn2Mmb8Ybk_Q6DSo z?eq7cV}Nxe*XNmreu)>>*V8@D#x+a2)6-mjP(AcxIt+F#AN6>HY>tN&T_G;VTaoI2 zuZ!}y39PGMl*GA9W)Zosfd?pbe8B?pcwWrt2UHRbX5||#&bO9duexWn)%WFH9d-3y zr=Qwb0U#J{1xjMH*KDKftZ6|L%F^$H+kET~EeD^|_xMriPAa+U-Mmbbw$mR+Iy^V; z)ESqp32iCy%lGT(Ds06bpEJ*S#07kR+)Vf!Ke3O*vGyH&S=1x?2Y|{2oEQB+##UQ2WqRw5;A~htqu`FusSQRk(yGC)UP)NgCA8 zt^u@o4>TPd)=U9mntyD+r>6m>p>j6rKNf^$nQp3I_z8>~A2^&dCGHHA=nR1Z9sPa4 z?wEPA;H76DoJKFN)_6i%tCNZ&Jt-W~ek|y*m?XSJK9l6b?KSciREn$OsOlXOze4M7&dU-52Nm8dN2qEV%rp!GWhki7_a;+5CEebs@s4~`@#$AfaSs}D^JBi3jK^Gde&AKnWUqC9 zab#^|)pyo&0%nJ)u5j~aOazWcG6L^HmJF?1}isL4;nA>9GdPH@5t3rU{QfmC=Z z9b;6%WUtok#&Im7py(p4WdxV9{!`m&|9i&?k@uM6RhhD0Bn^U?~}&x2W*n=_il?3zUFti#Nom3DsX9Ba8=5sOFGiW!0R3u5I-&lr7ibg%YQz0pY3)M zXRQTkm#Sv!aC}p}m8%;PsCO2vdWd@N2XEA~;nSZvlDC6@g_z<5RN;K_4K!q+zY(1x$iwf8QdHsC zJa`1!UA?QTcvR&}q1&fCg~iey3=ojF1X_QJGbKKM3K>7sk-FseXHD|YpC4>xJzNeU z9)WGqKRidcuPJi0Ig)KJ(1x-CgUZ1iYYy6C@n}9TVc!^HNE3e59=BZ9J;A*F zR!oO~(j<9#_N2hV;mFs`x`>IUfG$06~T%DvC3BNLdQQ z;uc4B$DJa}zt|hnu+4M+P4Nc+nzudmIBJ9Mzg<+*-P@7DlT7X{Q~&|)bP2}XRPOk0 z?y4QRxyHrMd=Nr~*6vgFeTD+(5*gvq7;+td0PAJT*M5IGxg&~$A?iKz>)NV0&6QFd z-C$~YFhODX#`D75L7neae^&VU_RN25_!C+P80O)60fOWEay8F6h@56HADUp4=%b@D zJcIy^k_D5T0{e8xq&VC$3?R9vG|Mge_4WkT#_d9skI$L>fwS8&YzT~qZKWafK{Kp> z%pI@NEEE9=qMNH)oz~YZC9Cs1D4+Fev`k{@$?r36Q_Zc)7#BmFG*vR`huIz_ImIRvm6^{?Y9 zZ`%r|W|3j%-Dbe%bmocoK>rChIHO9Q)pxNeo`0N3tls~UUb~AU0Z~_F~BSJJB`Wahe{r=wnJ}6by z=JTmb*?oB2;VS8jBN6SNm*?}VnXmJeqk|lr!tU}2*zWh45d2q+b0FD7a~uVKl_Dlp z2z_0i`*xrl>hZF%10KzH2a-tdCJeGnX#m+w&sgJ4sk~(ocj_slubjf1&mIuycJQbV z{z<2QUhE;;HXuBYb-#CMRVKXArjM}YMi1ki{oA|}*HC4m$M1^>op?1UNU5-Y8Pv@6 zSgbxtt(FX3x?Q-p=hhQK*<_D@=*I5nr#kJo9rZl%Ph0=kN%lMvu6A@J;cHTs$Rl`1;zy1q z$6NPNNO}95e(-#%OviDfsS&-i2w z=R}{Y1`VDcs;TUd1S3;eJNp=jhc4pohN~+ig4JD2dz#l<@wZwO&1dr6vHPJ-kM}iyV1whB9o)m7lPGhBVnDL25pGeT^z>8OoFiN;xD9LXHQeSCjahae z4DUUz^eK7sb7HWp%b-=-AZL5yq-pU5a3sxw!V89kcbXr>0%A4PCorj<_Ks2WQl!E& zaAwQt(7T77$V%G9f`fI_-{ncD@CbAsuesPA6rF(wFnC9Qs(r!Y+ikL@2;QS~tVHQ_ z>@BqEcM>3Fc5Nx`^?j8?g7~9Do`V*Ea3<%%0?n>iS2VMB#QFs>TR_xzuXuZjGqx*# zW0;-EII52OMYRU|HNG5fDtR0~OSzj%a>__xk!YeFsRk7JQ)qEr@_Hk8RGj6c5e0eQ z9hdU-t$Fi*AZwklfH@@JZz{&G*% zmaRsP@T&|Tw||eV50_OIFP#&_=WtTY=0m5}qdi=N0}^ROqV1n|8S_~C+ED6}9d4Zk zC(nQuYsu`=SIM47DXxvP1xtP@4uStE_r&{uyp_^G+lD2+u@0pI;fk=}xxs6jY^%n| z0fYI%rLbnkV+xEG`OPOYmzg*T<_L zGe3`wn|;q-1IN*krbZ`fSRNZA#Xk6E%YYdrXeT@!dfWPi)T%h`Zou0hhmcO(EyG2B zv(K($bsiYN^j6QcLdZmM+|nDS+*jujcj7UGRL>j>(F}#zg-%4g{}}Qez&vdqPp>f) zR%{QC=wsk@?3nFpo*4YuUhUyLeY_=NbE|zDiuEZ|8#*NDuUoulJb}|L5Ha{F>P*y-GyNyeeLi z4y=0F;i65QZfYVOnaOM(b{&gCXcr&iyGmV24d(b% zQW5}7K(fEZQZ@&lAGo!3?VTTr*4fVlUCN_(jBiHnQKv8_UyEj9MVt{dcK%l4QC>@I}(csr?g@RyC>bEdX;tf3-?L z_q`r13^F1;Y!?MOo-d7+6Y zr|zZ=&qD~KN;I8>k^g*Wc5vBze|k;#6DNt}xqHzQ!by${So807@Ys{pyth<1rIhEL z&||}X*W0DUfwNMgR@T@#VIBLZOK1OjTu$-h^u<+SJ7ottZ~J*Z4S)tAv@t5M)jd3Z z$3=Zx7V?1AO?5kf$%Br>`Zd43_`Bc-YaDA5pg2zP?)>ePxf3+}H`_>3f13-oqoW(# zcMbEo6De_P7=VPm{yN0F0#~;EzAO(birzp`SNJ6Fj&3x!hHwNg1UveRBwjLZnw=Lf zK|)95sFBmD^5GN+z099vTdooHJ#u9)@i)G^J%hR8Mz%ehr!)9B3~m?gbPjWOq&(~S zO2HBTaMY7EUp39~o6nG}f8A0meJ{@ghaDAhv!M)05EF{82P?W>YqGlX$PH$gBY19C z8Fzg5zhGB(b~=wEB_7W3$6DwUTGDv9A_2?5UmoJ`s#`O6=B(2?r~(RP^*UR_OKnGM z1DYa)sT+^}EPz$_wE;51unG4|7V*^H8FV=0yiL=%*xuwQC#SM8e-HX;INQ7XBa&)W zVrFMHvo|0KG7J86{lxiErFWZ0r0GI{^Yo+nnRMPl@rBU+r}bMZU$WzJ8*GI#nenSr zfGDn83jWuNg`lOoSzJ9zc6WbhbCuOs)~u8in!%D<163H*`t;s&&mn&pjTD32eru+( zfuAIOWvAm|GQ5-$f7we=KmLR=ui1xCFvfO|WqTW(qFoaWy#Qgqet!jbr@K->$(Kxu zI6HlLPNf15b_NMqewwP+nqyy3GHQYum3->6K`z{H>ij`2*q`}IMz8M)bV3*hS?p@x zEs`wI=~qOZ>gRnYUi|hSLZF3tKKFYGp&6Qd5ZrGHy6HF{eVB_j+xa_dQkAJ{R@Uw(2QuezY4yj+TL{Nb)JniZ+Fuv-j>!32 zjaNtH_OZ_$F0&&JKs zHk1B*?XyQJf3SuyZ}GKGQ@z8rEE1eZ&_?!J#F`GJQOh9US@!U{+{F%H-3a?cm$Rv60uWMgb;4L?-H6+m-%DhMJcFVHMuNMM=_$d2c~=f`X|g|ZJ~ZaDq&5p z!DpDdl}D~AB#2C%O2I8+t?uZA0I>nq3&;<;xb=ei8evlbKH6W{ee-Zd4F^o?T8=t0 z>%=(k^rw1$o+{XGQ3$)5lh-ME=SR(6Hg2)&f6~qqF-82O=?%hqR*U%ebK?)MhIs`) z!*mb#=b8Fwr{{i(rGP(yDkM$uU2|1FP*(mL1LKgn?#ROS_fo~1C9J6SvYErxHPjj1 zQGYBE=LK92TKr>8q{FT`|JH3^;JI)7z0dVI7&iU71MHeh78;i3 zf8EJU5V$9$F+W^a`s9`LUE!~f_`WslE+qH;*;=eu8%?)yut!OQpLQl3h`MlV8GfAO zwkM9Ve|D1_S2_gXY4a#-f72%nE=lgM2rfL{UiCGoD|DT!>6>5{S}T~w!STpn@3$Zd zj=!DVHejMXI@p}h<;@Nkv!WNKoce(ue-+izS&8=gX456yLb3S??z*)6B9i$KIJ

OHg-uWTS>a~sbmm>@9X(Y10^hDTBEGxtY)3?m=N<~Dln$-Lxey7> zhyw{~|5ZDOAMF4Z9b%N;H%CdXf`G|tl(XNJo3QhDxhsG#NP$H#Y816R##wt9c*=7# zu+1mpI%X{Qd1m3NAv=af1t+^0f8@Q zlFf}!4ulrf!!jkAAcr@i#b(1?w=xB=dSxsFkJNd%XCI1cKGWlKzyRQw!Il*O40_9S zD!FAMJw_J)x#!G6XK&D3o$wk{cfnoqsWs8`aMoX4Y_IzL{#jLUhg=Z<^6A{soD0Y} zI`}7U4>D_05wp*7%8P%Xe~!mJeD4gQSfstK6fS~r#H5xC>es<3p`aBTf2($n+l|)b(y-(x zhwx#okWIIakZgRm`R1SPmMy-*_-P}EzQ0J6)1q7$+wCcTByl}XVe~|n(s{o2%rTtI zvED(VVREG4)*o{3u2xv5*c3+QTViKR&R=lBHZQe?bcrV~I%df+Vxq7ZFmx=tkU8Pf zM~4Tkz<}cRdGgzL0*`#t%6%l^{fRVsL#f2$z!tj)+w|CZ0u&{DN8 zS?Mkf)JI;R0433@(aA-qR?5Pj3+?Or0Bt>11xjTtypO{FZwWCq7ch-zy6tj0kD9vH zGc1}>)MfG%p0l%NX1iQ4pPirHifsmb_2zY(dMc@{I z>s!=eMlx7W^X@=Bab@a*^!&(a$-R$xfBg|(Li^&?GJu_lLc2pJr#Bc%qpZOcW7sMJSrzFY@ke*xB)7Qo@>^}+O9fA-}l_PDRO z($bYKp3Q~orP~-I!HX@8z0ASzjALqG(d?r7MZ*#HFN^dGpFy^k*zSVqdb*s1i zc<3AZOz$H0zka)?31f)b&SmHD`QG@6mSvZ%-VtsN9C0Qe#<<|^ee>RRnu3N~PETEA zf9!qOlj5Bo%l$1kkG08-v^$^;0GY z`IFNUlibbo!R!N1k~+?pgT7UJS>D5rw_b@)r6f@~3ahvyvWXd-D0Y1*|N-LSGvGuj0BngbF%@YeN(!2>`2g}=wayC0$tCZqe) zPI~YiI*2>mD(|lYTkdvTer$yOJ#SXZbmqN1p*`t_dr-_%WlduFG~ysI?i+>6+er%7 zJKIOnmzJgSgj|3*K#Yf5`XG zwuo#P>2xx!(=o^`FMji0TA!<4d3(_nshLGSE4sgs2IGqYi*v8_qtZ0(*GW1bg&#P= z5B(*jfJe+Y7kg(uUGG=to+rDV54ekyTClO7a`va*1a!X9Rdn}BLaX?NXh96*#ZSL0 zZd=qqe^1EqvQQ;Pze_bS-~xS-f2+y35qD=%sSTiIvCK%|N06@O>v6h2P2J$7TYCAr z`O`zrcO|saO4>IrSg$)W3(*majtji@^oapc=d5P_%S^7YCLKNjjMMwyPrTo4hcMol zTvsRl&Y84WA>n)evE$ULVvcypj=u;ycGq5a3Ta#vRYt%ug|`Zey%Q-{f0DT=bFvG) z*-5bv%W^{SFqMvLz*mtd)<007rY*0Q)R>C-dE<9yh3MAEfH>rJOyo+_7DU;FO?7UU zsISQ2qma|iR2C{qO~e*}_s(7Bp^a)=j=s7Z&X3(9p+@@PU$?o&kB2~LT@dm1`aDet z)v^JsfOf{8>|mJp*$1q1kRVU6RUS7q#-FLit4h*z{wLD)LO2n6N|u_ zi>C-fXVW;?Oa&lj?H^>HWy?qvi`(I%4AD8-l;0~%_T zB#Q0hlgxx&`-MJa`JEp1PcGWC6$kc_;7B6B1^M$bM=Jw{!u#+5u!(Zb;h8QRFFY@i zh?2=7AH^BsDx}ePcko{_+1cVLH6Jqvp*AjgOIo?G4sQ=?NVmg|e0)wCOKkx}zQpqT z2ppeDdljkNw2Z{0f7?^19rJX(Sb03XNf<3Jb5euG@Q3ADtX3YEoyG!1bwOY+eb3tY zF<2+X>tB|;h8&WKXD?zg9fsV%6j=}d2`qFaB!riUUi+*c1E_lQg=wMyL6k*ee^_;y+I=7`V$Ik0OSnH`aHVi>$=abYF&UU-|wE{ zLhYQjCGGQ>jk)2S6r_Go%c*TArd)#*>56N_ zsEvMEcQwj}^Sbk!JD}e`%JBb~6)7+CX$0hVHl!Lwe-Nb{r^0h;Oh$D3^}Ml`+5)6k zAVbmsTmLM`dm4o<#|j>0;;Wb`|L2koZ-0xOdx*N&ef6%BDrFlAy$?PA8vUk-0CU9C z?HJQyqwK*t5ni9qR}Yg9^*A-3Fr49){uWjr6*d3<*iH0%zt~^)5?B;K>pQ4g7u}My zkX5aHf7LVuXWN(I*Qi(eX0fGx9kseLrPG!7CyVq0bu~iqkymy}%Z)N#Ol|XWQ^$j7 z$C<1|_{;m9jvuESkvG3y@4s_nahXNN=M9DV)yK}T&oHu`gqx2{QFKgj2`}6W{q0WI zaSPB)f0d~FDCHUEEo8Q4?sph8mSJ+fxkAg@P8CIdC>au;aE;)OfHgnT0&FvD+lXjMo z|8>KTMs&qXU?R{;Q>;+~D>+80z9*HymCevU>Fkuj`7aMbpyF@yB(7GwJkEIGa@4qI zf6(vBN-6u%xkx(5Z+iaYv&86tIOxkU6MoZ~3hHfWPy0^Id$p_}m!O%9%mcb*o|m?H zukkCXPkXRQN*B0?ONu+mC-!E!NTJnm$=9$ACJ?+6M)MuPP%vcYk z5|G>@_IM-tD8=Fkzq3KDVO`cioP2e}Z>%yIU@RSllrw z9i99NXWQYM?pf0HIPO%=4Ju5yA|@;`b@o&j%7u187!_i%qPT$(2 zE2QZKgwqS<7?Xm^DPn(h>KTyHbAB;>$J6eDo#)w(FEfZO0!s9`g*eKxf0@pH=>Pl@ zds%e$RfXzimq_<(YOwkpkYJo(q*SCdpXal0!QM?$lPl_S*<6H!cGL}LbC8L1)2v*2 zA`h}{k2~N%ik!vwlF9cp_Z@(yrenS1*1H<8jQhME0dgpJLZh1(2&Y>FR#znovqb-) zY3~_BD}=(Xbcp=D?2y7Qf6zYW!ws8j33Buf8KFb@%61fz`DCe|$PxyieH^y5oHa7b z&hITUwgIjvz;XhejmVxYNLdIAhTwT2r_pI3BflPJoC2$=ye(q7b7XW(^7k`fA)Rqk zeoD#U5y~>nZ`h)>{3?g^Tz=ilB+@U+3pr(`&@}um2-*Ss!vLr+e|w>BJKGMz8Lx$7 zJ{i{pvlaoE$ZvJi?PTaS0N|D!ao%T(s>A81Y3MIYMN|Erg%kEiA5D~sXInmZF6ii9 z=D-VJX|yiZbN{li?{_?5eV%5F)YXu_qoBRESMmz$BDI3p4><4xKXATzBm{^v5Fml*I{{bkXGoBh_0Kui zUfyBPSpTZ3Op-*>7~lAsw`ttBP(vTlDlXj#o9%96zL?!~d`6OT?P=XMn>~=sQ$=r0 z;!3j`wxc>Me;$9cCd(MgMJKuW+uJ+2rjS0}#>px>oAYaRaVvGhSOsvmx*8`0>9a-u zJd!~=Ro=zU{ZY3@2|gQXu?I-AslBiF9`X;f>$L|Wu3sCQb!x_Yccqo$jXc&S@_42+ zi&4jaU#!A-V-GZCeIGye^rH&B08epcO<(_gQ&rh=fA;&PGM~D^3F9NXy}aHT?g!{C zo&d>=yQP(dPG3_i@9_lNVVSe&J#h{#$0;wk8!B%6luze(Ydv7!g9nid+VdW53dZ@2Ay2sjJ{j_I#7zV{u-Z>z&TC zG)L#>9c}};+|Y}GS$pluos)0lbBb2)g|z)Tp3f?cx`xcgp*~r3F~50TQ0B&&wAHLu ze=CcqcbY2N_7O^**Yhy1E~+U1HKK$e@CA$3y-Tm!ES04T@TD;T&cEN9 zLUAplSWB<%3BrZukv#+k1%1Gc96Dij%{qZe_lmq z8O6@6QRoIrC9;Z5)V`Eru~b!T)j=71vD-h?wX43%ujv|fwQiL)a(36-*Sfi#AE=+@ zSAVs>PwHN~DS>-DE$ek4J-B5qjODqQ7+$;4T4>D@UI^NCv6{BmJ40z-TWd3!M_B(F zt=`l~hpp{g^0v(?qOzrU)GIt+f7A2WuXVfj_1!I(-2v{WU8QwD?4v6EqWk^mn{oxU zQ8Pe{Bf09*aywac8k~pYzPCFCV`qA}oWc9T6;Ec#eX$&d>8`uob=GBd99|a8Z<;H- z7s|JL*?hK-wPH&P#ytzhbJ2#Z@m_95t@}hBXlRRG?c@1+EWwrzUiAiaf2cxfxetT) zcpc9}z1@=|rpD^J+`s>1?(>*s9 zyGEVh*Xxy^*u`~Q#bNKTO(Z1sHe2MaXBBW3y<)*wH%$dP1WROSYxfrP$jzzgKtXOC z+QY@AgF)BnIZ7%HfT1uNf4xNapi_M(w{g3Bs9g~Xe+}M}RCR21g3d&%)-CJx?cG|N zYp;dS2ARflv%hw&c$GQ`xYbPz1=9#p0^>d3ows-MLj8rWykWUPRK~C~(k7*9JsA&L zrFVUy>g8S`w1FNW3cFT|vNtY-o$~HAbV|YVRV_?Re;4Ae+1wwef3vnUyx_a5HsfpI z72ru*SIXV_A-$)MX47b2tnQ-^`qJ3NYrUdZtJed%3&S=RmJ-7G1Gzce$Ax=Qdn8p| z#yV{SzZ<}c(i}!bv(P@XNwDE>K41N`)1|d^K?$0nU9p&6t#15U?s){e;8@c2!lxGx)$!Y}fL z`~KI}Kafl;`z2|mM#9gZzw}RG>hw|m*6`?;*Olh%R;ezAw|#Z-^WPOdC-mq@PhT<& zpn|U*uqlx+F6!cCVb1Nicb1-WeRn>C!tSWGn5IgaOwt5LM13vFQJB%{ir7+9lSd(dyy&zHj5j@s`D2Jnx!`%o&aq{`Ij zIgE~TWmdwB%92sLjV?&r;oAVo{rETq$3pFX*rY2^Ay{4PUjC!u9cJch*+pf0>doIM zjrF$I)8fSv!4B~0J3w5RZ0S93jH!JxMv{8k9LvS=e_`ZUa1U=j<6-nM5`jNT?g zb>`cJTB#M3*4yrEsFdwnsk~3i{Wln^aiLUGp10m?bz6}A8|@PO_C~eeT&|ZhNf^L) z4x{H`RS3;`dN_c`b<1H=E;pxBb!t|_cd_EBAXLyNQtj3g+T@~j#RvP*f3BTAFu+&8 zxC)iGf2Q`+?nR0#iVl`5+SR3PS*f7hoer02yxy`s%igeU*6!QHh+g-NVUQf*$J^Tm zRcNJ!p}$h`g=^L-lIL|dQ^S%=yTm^wwpF?AERYyhudusy_j_EDPWQ@gxT2sRb^>%TBT#Ia?uJl}`f96a&fqj&en^!_ZRO|;ya&WD6duG4f zqBrJCDjbo|=J@-6!zwx{j;Th2s`#gp{a^k+_DeZ=Pki0i9CzHVuG{Zru&gfElUa%| zOnEz%%+N1YpW9OLu3m9jqziu6_Km@Eyhw|K=H+l8)SzuOtv+AtB$~F*3J}BjCYdy} zf3a!r*3le5et(4B@Z4^VbQjiojMzLFEDV)jN1d?v+Nh4Yop3FmH{6{R=G=>-gMM8+LnuoBv=jf2c|Q zne)8dF7eGLtvn1W?xWemU5=xqg?~@_tj-VqQd> zW=q;2qHJxpw%J7g^)LvxoyWE@8njVCcgMY@{wN^US}HY0VGYNNWfyx8jK-_Jc3dn* zEkfhA{*JE8I^5{;-Y-l;K!hFTe|+)Zt6unoMjSuXyE0nbfm}GPetp;SLYuZ$vch-w-fYe%?aAU%ly}>zjzVMQy|`aco$;)wkPfbT`slF;<`-$T zc&^Z1nWVExt5m6gsvY%=X0uniJm%NU8^6icdDv1eP<#P-2WK=PeEKyaX4OduFr|QaMObSDdovsjduk1*OI+v|fxNy(+{fTO;{Vm# zyQ0O$hw@)x7n#be_NF2^TC9P{Uk8Uq^qes3-wugdJLWW3X{7@t=@lDZO+Y4_3hyFE5y1lulp*a8z{$euG}Z(;-oN|B)j{2@w%*cJybOA zRZIgtz1foe{)U`uxSdHmaxDq}kn?j=u3i=+#;&-7PIs=ikF0~)cuzW;+iA&0tzaLTkKT7*x{D zxWee&e`(Iu8We;X&Bg>_Fl#f94r^z5 zw`I(0ZhNF5s7bnyIK-Y6h;?v`PUE+=xX7qGRvWdMA2cJT1Km^(E5)fbi*Xwtn_*M8 ze{0711Ftkj6Yfh z!z@rGH>HH^gdA*CeUFxhTUI&}s(wakFghQrIGcNOqot|UPU3g!uWR?cq7r#5w}ah$ zW)u#Hhje4$lQ&dTHeuN~89lAKkdls7?mrLPOSRK*&Ni4o`C4!eBe%L;4kTc$fA9L% zb3mPiReQAQ2t72}ob0?JvS^2Wleu%*j+MAqOYYmrAPM3VgyqIbGY{Y)kA)HN&>^CB zJ+qEP_;H%zX}gF|%s*E~qqprdG`_mlY>X;6aFDcMUa%Ru^pAt;PC-A;+_ysHmy^q@ zXA~|cTlLU-auG2f@AT~o7t!^re|kMPCd>Ntg`lrd+O3K_4$s?LBQ$fR^;(bC=phVm zFFfVmC3TzGeYx_ zUW;T=6RkFaw^p?H_biQg&0IN6-Kg{0bysg&>rM8T`yAL&sJ#b`M~Yy>f3Ts}3gbxb z-@D$zKAlHJLn)T7$fs-VMh{(D?^z#q)W*t#np|G@57Be9E7e}BZnc6j!e%vo)WglZ zF)4SCjpR0QO3J3TGzTMKM z{ie$}*^G}N%t+mQY$}i1e#dl0SKUMKGA$%QcKybof0JyjtA=XYd=@yp zbb#q8h<~0I-1AHb6grDO8uye z9l9Dyb8asImX+pF?=acL+gZBXn63Eq9@NVtnua5_bge|Kn%sp^Y#avn)dR`w)bA}% zGZg}8uRnwyLyJ&Dq&OMQPlLCKZR6kueBHTL)bga5Fqd1xe~KD6ld6$)r;I4etM`1F zj@GS?sZQGr!>Sdl7?qZ@@~B&|dXvR-I!LP94z-8Yue=TWetx#c&8IUM7FVE=Z~aYd znyPZEqrz8FyW5_mRQr{3I6?fGw&QaFdt5z@&Bd@-$HcHxmIiL%JA-(tDnL_xUsDX- zH;wM_ER1sdBw^bvUpR3kxK1TWM-m#mlN27gQ2PNE_ zs?-w?e|42kCB^UBrH&do@lC6iZPuWeS4hFwKEl@arJ}2s<;{Y*e$J_@wAX=UK@8b43xcI@;bR^Ko(fcPiTN0o})%- zw(d2GO?$aATQ07YB?naAdxZyQpj`bY*4{I~Z*KMZ zC^hen5~rIxiRNjWN~HujCjdCMn!F+hnrcx+l>k$o6_*LpT&tWM3rIkwztvKOK|zYE8(G~&<}MSWD2UXwq>XLNRA4oUD7n}l}H0e z=T>LeCIJN-U{s35@eDo8pw!(C^ybaR1>HT`W&I3DYllBHHOfG3Z`^0uFBA;LFG*F~ zZ&-(g)ssQ~W2VSpVUL$nI2)PA7MhdXfAwLCWy5LkY`mSi;E))%?nAk^tWLFKgU8Yj z@~&Q{o-g$qNwZnG?96Wa_->whtxEHzV`nZEi>s0>(=5N(sc9Wg=VH7*pG~e9Lkj=0 z9^MS=Qg+@$J+8oecnmQNysw9~XXsD;lT$9Dn^9(P7)^m1s8nxCJ9OqVoYm{&e`mj# zJhiuZ8S2aJ^RaY?@$T}D&HGfT;AgTprSHm3-^{m_>R`I+FOjI!^`mRf@9oLVDc9@o za(#5XAfZs@V(PCQGV;h~Ce?YJ%bH#uke8e5)~l>t>gXlxYf;*kt4O!@=eO}={D#x8 zGD}dDkrdwbE{*fO%h=u$># zs=KPUZ!jF-npLm-4wK~)4j$d*f?|TQq)A}v&jp7w#2h<_Uj3XzCa0my=s(9VG z(KW50UPEv;*-X$TQ6~MR+T(S%Yk1{#)wJ8~dS;Luk0<)&T_W6Oe>$JLf0Mx~J{?cy zZa>+h?>r3}jjq>Q;IE6aLjhQQvN3eSK!7Rq=E2OwSnL$4Qb*QIf2ZCE;7<*-dat?_r3Old_x-+N>?f1?B4D~v-`yhg1tyb9 z_cX&tHkgmcebdrmfD6U*%JSEr&-DKTDTlBf7(cYDt>aBySv$7 zn&_@aR!9SQ>6H7uNohI7v`Q-3+u+$B*;Tvey>1tyd~L4MCHAD@bo^dc)l%z$zutO? zJ242a?q%pN<1W_sb98fb;S?~P0#F+bZo^$4CY%jJx>Dz?jn^&L1PVT!uEMu8th z{G=*sVU?B}?_*^)f33~mokr;OhIPPZ%gJN0QL8`;fNQr3!+LxwY-{`8aayg_TeIa_ z?}fwJV5h&)qHWl=6mJ!!8@~4)yqqnPnAUuBDceZK)qAKD^l#}JE<($-y7-Ly*V|)m zH-h8PC|G)t*6-72h7o%*xNk> zve2oQgTc};o;RqfPh(tZRBl*Nmdc00qPFZWcl)Gh&E|{B5V5nt0SU&f@jN;!v|ngC zG>ru6{c!Uby}J6L_ip(*xUfO*G(L?U7p*;Xly`9)D_#5M*N@4#87?WSeXSnf-S?hh z0pfAiKDe%Hf5%r3xx?D*Vxw=FrlU*cIaQoVy?!a7vx>#uJ9!o#o0{Kj#vnIv<%ULy z<5zKqrtH=~ozz*WUmQ(a+vspMs1;|}Vm;%7eX8xfF@sWXPf8l#1F4ta`(sLMqByXf9bZcnYG)?&Cp8LXMj*ay7A7d z?$N>uWm}Xn%F;;CqRXVb+$`HxO*_2}`Q9GAkE0qA3NF(AQLlVHHBQOGFU`iC#JO|e zwo=)1lFLLZ8e_*A&FK`OYnetKF0`sKBP%Kc`h7!gYSpo@>h_OQ0vOP~ViB9K|NDRc zU)IOVe|~=Z-~QM1xL*IS|NH-OzwG|E<@`4PKi9{@jY?tte=X)$9P9p%?589|*~CyQ zm7`G^8NXz4_RmbW;M8QJE~9@7rgHsS@)k7k&v0!e>5Zqt!HJ|>FbiqMJi6kG@_crX z4~XBOzswiA)kwTI(q?g(_5a1(0OPOhukiuofAn6B(Zmr0w1IfNWzUUqJTv@}Q|D{l z3ENzf`uiA}QLQUWbg20K+5Am$kBg~W>v>W^{FYP5TT5=ZptB!%pK zZ0s<=b`TJt!#c=*UcLvD@(wxm#!Y ze??SJX*R{r_xKc%^b&u4N#Vf5t{?n`*JqmnB=_{YX|A3@CmoZCZ&nlJJ9N z8iOXB=X9?4Wl1~-p3zDfPpxWOC+rAVU=F6;n zKKEGX-@I;Py@GOI4J*8^V!j=I&fZv?+K%9B8GhPRqHA9%NeE%l&~?qJ)xX}Yf00>H z+?}nY#~$7-k{;14brMg1Im%E&Bj+8hExpxJq-&qwVfDs=rPE-+;l#9)7K9LYJ(MDT zHZ&BkJW*VS`z#)xDLKPdf#Ilfi{x19Xp+(!UxF9wV{2aYmx)4)GwLh$=i2P*Ffyw@ z|9)xtzw`TG$WL@3-`TbO{WW;kf4a-HzU+?9j``=;^83eq;61gsvo+JOBRcfbDEm9z z`kCbFxN39DtPmXqh*ox#LbTdh-u|VXF zB%Q0%@pCdNIctgZk`z>j9tycN|o;7+fpFVr)^NgZ!Y`*=iwXyfva|a&G*ZJ9S%6oc!B{=YYFY(-Cw4OV1 z$bHdt#qr1Hvt&xH9@?IT!krUH?;00m9d*gso3c6WxW|d&Xuj-8e-C*l9~4P{^H~er zBYRZQcKcncqrY3~;T@gWrxPPKWqA`~+~;$@qMq#fw$kapWZAXs?GwXOC}pb6Jw`#z5kS*+q- z<@@6MT*R~Z#0Dvk7a}%{xX&AYW~|HM#D0tYMHK|L(f&S)kfstCVx{-9Avmp`t67Sl#)maj}gS4ZT zhSDw=u}3tS(X;OOz;`2EBCR*vcsGj?CRXBsGi(~RTB?js=0zPzm6>5HHqVO21iJdt zaU?=iDy6{0e^pnU3mSnjl{sS1)mgcYF;4;4x8W;03!bd~eBE}kd!5Cka7%KV z-A@}WCppFGkJysmHxaM=GG7ym1fUF?1gWluLh-u2 z_?fo?GIkdgq}Vi!Sc|){zZs`&X5-jXwx^z{1r~see~BvLf1)I@?P*Sjb$}wdC2#pm zH|~eT=0S(|8^#~77SpgtdzmFu&!mdCXaSIo=FKFaw2m! zIIdFLnJV20MjQ(uKe&-T7@?@cO{?7-)wUcUHBRa=JZc4LEyO+-;5C@o}S# zkg7*Xe^{1U4;lddp;UK^m!Rn>lIiZj2&Hz%%CZ)jkzFEF_E*FqFJElVN5l&L<$TQ6 z?Du@EvQD$Fd7raSFC{}c^1EtulEGD}|9ZZqIFBa-HHsc%DHzC(pPylld?==&9Hs0_ z8BK(XP2)oRep{@EY|muBpXa8#(p2Y9^FoiS$c^nL#|t<%>l5q&i1)jQ?90DYv*7k33-TPND#zUbRlK;)b8Sg0y$u%RbnRQSDToq ze>5GfP%-TT78?iTgRgvFS6@=Nb2M3U4dmUK&q-HtUo2^)$!!s@G)yP3A|Stdp(&A_ zvuMeFa;SWL+pJ-6KCnMTTrX#@`98m=pPw6_SY_sCcfLm2&ku5RW6Nj%T>mbe$rhHP#VueQ^AAUZRwCm zB-A_)TNcmbPYm_r&-3W;K4N|5@znd~IbqDbkdKJ_>rSLd{!!Cnc$toAk!>iaf4MW& zB91J_qLy~_Q`i2S@5c5d5sRfsa!fT#m;H{^fO$_H;XTN+98)@{WX96`txBfMSy9dN z!=H0Dzo%PqPKfnUQ?mFj_D*tqSexU_l!1CW*bPO=KyjQwH^%5Q;`&l@8{7oPUXYrc z$b+|~^wI=EEx9nVn}al*(it#We+7*2O42FP3X)3+kaB+nDv$fanm|*pB-`^@nxXqN zfJ^KI35iUy5?;p$G<)4AkH|mwPcH6pF0&^~hE$5-P7h6<|JZHa$!$)$oP<3GM9EWB zIGQvpwhRfotY#$Yp81>3x40h|+_UCH?iYQooBKeSp`u<*E`YS?><7QZ>trRW?UkN3mOUE_7W1yff_|CmOPR9&S@OweOnIv%ePT7SP?1tT zgZpq8G!8HpN(TGb3iY%ie@FUx!1{GNrPp4$MqNf^3s_F5$!(~^WKLI6V%=ebNk$%5 zvgc%a_*lN|GI?Cd6!)Hdjd3^ZMo+BYxH_f9GhPdx&+hCAS0* z&%G0S=u;nLdC50dAr>yC-4{U2G#cuAyp6YXF<#fYQ4;!^OKBUGEG2SOem0cWX#O}v z8M1jTI$1;6Ht)nu3Euoy{^Sqe*tz#R{o%OdJJQh^`idXR@xYiJblG~`di~(3pLNxpR^5aT;R@s>D`mN6y$*f37JKD&{EB4j5*q;Ly3( zjFqt5)Za;syqCfcZ`!As)i0`cEccWUAfBIeJgph{So(SgedEib>M|A>jTXT+l^-EAw@^7!#zd37 z7A2RucZ~ZneGj>{BJaX9#qZH$uyqyOuXI( zp3_R7)5l!k{dURv7QfcBAfJf71<3b}AZgt=bD!npS(|iANCrfx+^4p0Jd^EOJw_cxOZ-v0K2$ee9BGa` zN!N;kVvcu|b0&BUyBkNUEF-Epu02MhnQkiPYEque zc0HO(5O4ICcC=uc#^f65vV>-zoK_n&wt{?sH>31z^@{ZZ84CQi`NMm5=tTWiHR zJ{5&HN@Grny0PNbES{k(#Zs^9*+XSk@U-2LJ_5!NF}<8r08K!$zpNSClF5bGkAXpR z?G}~IwSQaJ`%dr_{WE)5EOU-4+eq3;sv|k6)V;maa`#0q18w>|-~H5<#J%h@iaagf z6aT1X!fZXuBN3DSsv)QJ5JapN`zEV(xb8{dE?16nMU1z$mQtTuAnEnCmG>){D&nxocSbC0H*)(YOnCyz1>eScH$OPedw3R<_faY}}(RV?a4W0zL6 z=dL>*m8ynfN5-~w;67Bw3cf<`*rt!gdj$(E3J%4CdF^fdxin5EJr%mINF}1IK2+3* zGge6n_nh;i^hus1&iU$ReTrvX{?u;&T8sbG=vfcso96=P#bHiK8-O6m!N32#cTL-n)mPVyz$l6?W6>Vs|fr7D0h)$6)piVmbV=%@qJMg&$CUL?y?{ev_ABuXH@zuNx7~S>V|w3znO+*FH%sSB%j59jNSw5 zD;O3oD7z?LDp4ZL-_!*nZ=ZhaNPhQ@{3pquUGUZ;YVLf0>GHJI*Qi&zSASpjt9!qA zX;C*}tl)IX#qiZAHgJV}j=5(J_0ed0HXCmJC=%#CU6KNz2MaW}CDmWdT6h+OP1?IF z>5@v+;Y@cn<)IqjR+jbWzw!Og8vpbGvfTa?2YAhAZHc_?Cs&}R&F-I?UBgcvFi#Mo zQ`8Bd%dNR~w$a*1MZZNOAAfO1EH-ie>)N&!NKvV;A}2-tO58{OKbt?axHm1i^$Ix! zvtCHl^Q9xb$JmPOeTVa2^oCVMyQ7_~86HLq(9Bp*;=GYHFIgA2*Bzkrheb};%lAF! z+fj9#Zd23?QxX5hC(nFFVm;7X5!b~vePVf*^U#-IpM7(lKj*@yUw`ts_CNP%6K?kuq3Xr^07s<~LNX?=4oG&+m=6_0H|}$sU3*rO(c6*pGb6t@@NJo0 z=$@UOvw5wC4Bv};%6iPcHwvrV*DYr-LA)%4GjSRunQ-W1yMN(Qk)}1A2YcGF!Z}#F z^`xVXUJSDd?b;i=`@U)_Gf+X)OR}^l(VzvRIHUGX(y)IOU)ScHZB*@3GY{`2d0lO` z$)mAg5bY9H5%SU2V;0QX=^ z?l1NeK*f+5f`3Nc6IQH3XRnOMC1!cD&h}Kd(6^%NWtWbM1sb|XB}wFRJ@j|b4^X3> zPj;Ii-7Ta8#|PyMMKtoavk3nzMR{p8=ULj$IS-{#jrk?iP4dMSb++Cgm-MJpXmOoc zT}K~?{#QY|B+^jS&u?aeSzY(1h86QF*1Y)pIJ>6&+<*C9H#k4Nb@JS7?f>o@@OvMm3 zf3EgZJbzb00*A(?Y`6hQqlk;*dHv%Vik>9vl71&^)OI<`@*Zv8?>N*8sn${%;lLp9 z4q6Xh_f;B6rdOH`Cvh2TR$u9OB_#w9F1eWWz71s(+rc2hVO2aMVplP&v7hzzc;jrM zQ7rgA@@M^x5U!W{oNCt>XKmi6<6iT5JRHli=YJ4xH8EfMJOgrb`a{g5M)_+@H=9+7 z+&%BZiyDO*yk=w z=6( zoNisBo5JVpbB^&j{cm6QlOtzwOvLki-hcNFhzFw1hg?YCBb$5YXGC5{utf8HMI9mb z3)jke6El{q4w5bT#vzV0^_rKIR5fx%_tlbV@*zXPY=OENGelVm zRAq$f{EKn;#$0wh%ZRS7LvrgntXk z3x?#V|FXZNr&FKG;mA|^hm#R*J-N1!-u{dDDbDx8j9OD7ckPHAj6BE^XKy)GE|I_r zi2h}k2mLt*#54RgZ=yF|7SBGfM?`-_`|C{5yu5Fh$lm-Q+u~W$-Ig@%g(JZ{@4#nb4Cdh(9?zvwuI?YuTTYgB-WYv*z>CKZKTg6rUIH2A?<2+xWcF z&zj7i6Sb~K4B)e%s@87OJy>b?3@wzMIAn>18Oi5(yiz>3`M-@W_fa} zpY@5Kj+npYv6H^b&-G{j{k<0QnoRV|=(o*lE9IH`u56QV;*`}$z)%H}|-Sd;%;r&*l+^hE5mt9FW2 zFvYC7*(=p?v9wj+7{8Jg*ahYX=whXiM?ULM#CXLwZ!;S6pWllNB!9nG&EgC7%IALP z_siwpia89iB+D5@UxFD@w)Th(pLIzN^qGwxB=(l(vTq!%q26l)Wy0P-_CS$uvrx6$ z6ZdM8C!5*cSvScU#P>Ue0yA!URb~Nw;a~SsM^3s>ta|^r=|fUeq1BnMozH!BrJvp!=aVXl{r~Ga zVw2jFe&Qn1uZq*WtI_UCU#Cm&UE{TCqw2IkH4@z`ATomC1_|0p2ZkDe~V#vAS9Br|W z$!q?(kMq9$?|-^aoUdcf@t?djJD)_}%K5=wU5Hq_=+7(ev(dApwlT>x&M)qr&-@!)D_5#=4Z;c z-s7wW|0B-v>{;{te)2l%wNH;v{A^@1za&o>9gnV;&DumP9fGCXf1jykm0$W+vtc=s zf7a3kMzMO-+Ii07XlvIebn3b3C2#Uw^ckHE^%)qHT9=y64$~5S;Dogh^stifz?riL|(8Ocdk>uU6}_Ck3m4Rw5<8t)~SsViUjPH9CwCWDmlmgcI{Cc zN046RT9F#DuYfDJy_ZysIVPVWO&pH8NID4o$RrYRs3k$CF>g3T_M+q8b=!CE{#rBC z{du1JUHAE#$$I5kFF2nYX3r(B8Eg?h#TsMwS*i*?gyi1?lr$6l1DW7T4Og~OXM9pxT{-m)O~E`#JzeK7iyjY%?sz<62At)4 z_O^!Q#`V&7AvAkvPp0P9kxJ?Csg*h?24?+^sB{=eA#I}8Nb{wFwxtih2FDaxfidKB zkbnN@TMz`F`8nr#RyWZsH?Aee6LC)95dX|n{`3s;z8y0akt=5LM^0k<) z+22@*>@{Hgj88A-v%a&Q0&7d3_20B|yX=_VwWk|>U_m$%CE!jG@B+V=)w<+*yq`l4 z@09pkvt>P7G_?BCIJzq8>sCa}@a?j;`+wm7Wa=|=KU0f+>N2-RtyA*iBj>i}%}Ysa zsWzv;?g`3YMMBVLg$$ymN(|<20 ztWP`EYCS7)fJ_w)S()`nn(f#bi`wSA=Keoy*47N$K8@aBr(GFkXB$*eO{Z_V0o|DA zv1l|&rIN@C&skmwo<+>ZzvtHPdFKZs@jFM#`()oUf$vG%womm_Pf-1w>Y=>TBTj;IwQ#Kwro;Qy$rd0)#uB*gdVb z3bm*lOe-9+syUvPana24u3mvOLcmv?2YUb6{cJ8P%;lCU{&pfikhjHx0hpa+2n{ENz{eEpTS0V?H9a3IKqRG zRt-!uMr%1WJCmxj7|JbT3wtR|$oqmD$jlr4H?HHzyfV}toRn^K5G_Xy&c!Y*Xz&FVQ=fn{`AgY2j%@V z7nqw2N-CiH{Vb26sZJhAF`4I)KXY{v3;w>|?7HaPf{FfJM~ZWbm?7;-qrH`kR7feS z%9VBeimBInN_xHP9DigJ#ovPuc+aGgIj=n13(+sUVQQ9tJ`lLU3iu589`Wx+a=($+ z6CXs`2xdgY2ei}XZNCPMY-+=r0Pq z$|pA&<2@ps8V6vlmi z3cx)=Y#_EhS#rt~tekpFBxV=II~tYXSPYp3wY`#?1~VM2SpOo9C=!0(NMeGEV}!wa zqka|fILzt}5r46>JI;qkwSm1-vK2~Zzwk=@#yn2u_rc2ZstK$E<~>=QEb($$7p0LR z_slc=A^EL0^s|h?5sqGwf`V0>x`mb3X zp}BkHP9x^QRKYgl3&8UbpCj)RuoI|>uz#lI8Rz(rVX+~>?|nL~3vM%Y zMH?`CC&!1FcWEPfLpf$3UlaBh|FZqT6L7Eo%wXBO;0eAnG)v4XvRpMgb40Enw>)Xv zmBIPHXCWFU&u*z8Yg?%TNf!*HA7gx>1m%}Jlu@UGrq4j#Qqo}|R<4Bsd9@<-SGE_a zNQ;k@t$&Lph~3oC)kT1+EYZ`#&Nu0dlE*L(7wV=TEmP`Qmxi@t)jO}!LTAP!O-d+s z61O60$=pSmNICA6eGI+~v9T{-MbuVLEyoZ2>2YM|6SZKLEB2i{#<55Km)e>m=EThovsCbps+>Bb08N+SL@GE{vR7Z)s}7i_pgtOF&ykug zWq%G8`l{^Nt<&s|p2gYwrq95P;M6eFD8a>gvmUIsy#`MOjEjom8gdX+f~-~1FV1FK zE)#O`na8?f4@9qk-1|3HUgWCTwd6T0_uu{2N@8ZknVg@gRcYLkc?R^D#r@Wz>8M2^ zeUd!Osp)CRb0NolqZft5OGguR`I|OY2hCJ+a`M@5 zw|r{_ebYC{O=WA`UT4pTiE2l76)+Z08;1}2-vYWqy{i@r5%@pCoCVH7)Vn==bw7Rl z&vW@;a_JS*EBdpJ#J>7d)91OHB>MX~_D3J}tHbi~%+O?}MBZ$#)~ThgyFr@j2!FM& zC5&~3+U^>s(2B;$lgifzUasmpH731Cv*)`b*e(-oIPMd8L`Ue5f?1jC z_XS?eStK>Zkb;IG2gkuC8c46CM1NWSTIl3goWYJ-qeXgb42dNW(`L*9_Lhq<>&QIF z2#1o_2;3vlyIJf*zL&yK@}ivQ5i+76l|KjLojdYxGAE`2 z3Z=x^(??A7)aXj-05`Xz1%GDT)qEh(_L{T*0e@$1j`ZI2MNj0J40}k^;>X*u-burH zGf3IkhW1L(#d2KmJEC6+-ha>ryN?)ylMWT3UgC;6-C1QdlXL2fyfgyjzwSvj9c)-f zBK%6DW2GO3d*vNC^sEM^Tr3>nOJM~vHL&a9Bg1VI{W%fYu-oE11V8%=$0=}Z?2nwA z_TdqRVzweUO`o}KR;T~kAAi?j0;kU&D|aQMEP53RMHrwwAUIdjYk%KTr?TkTQHI)6 zQ@?9^&8pNBK?P_8+r_A3WC^<;O1%l5}(u1qBM+!@?Ta7TAa40Eg($=OK^UlmT-7r_Uquz#KF8 zlMj4&1@An+6|;d}WQxys{PX$F1$w!f>`JZW=;Tw=FF|~wkb-$EwGB5{aPu~BHI?4W zfgD*-lp*HCQq9+sYpEC4PFCFFEV5`#z2O-+`lpUuihD{OS$~n#j4u-Y)Z_H`wNdb& zdj4R+{#p~v?8$L5oa?VS&i8Eh+_@U{m$m84TzU z=dQZ`BV&1I9I930B`weUveWQQ!D*l1B3@dtmUB*v-na>4FR-{?#ChFi*(w}Ojge$Di zRHw0!W>uqve!R=fHm!xrI32)&1_G~nF<9yG1pbg0G?7&Z5@F$&j2w>r%(Q=W}{4zhdQUV|nW z)w{;)+G#mq$yJ_e!2Ok!g}9DbTnBhW45Zc=h%hF@WoSt{DFV$0de+% z)!{7p!QaR`wYCF81C3JaqDt6Rr_WoquYa?+HMmg&COf(1NFFmU7o;Ls4ZTz2#^iIe z&zd$Z#l?rwSx`xf&$$6I@!xFUa@Xhp+cj1Bsr@XC5Q)^Pgvs#jSj1 z_A`ePwUd}b<-P7-94cN@-#d48G*dfTXwxq|qL3$eMetyNK#P8z;HgOnGAZSqoqvvZ z@&p|8g%`Nj2{W>+=L`lBz8VKbRBM6H-ZMP6RH?KbMt-lmF$AOdtybi^&V;2@PeZvwd0bOF^fOu`s%kd8Sy4VDUrmRvi*PXwOoUbMN6WKn=xB!20 z(H}0YsCPfJ{d`7FZhPD|dK)ocihpXMm6BjHN3&l+-dL2#$z2pZ7sT@9g^w1XfYZm<3Ns z;EKQdS)B17%wkqMZp1VEsU0^h<-7j6dVMW$m@=4aOKK(kh`BwOG_kk!27kXiuNRRX zuw~_a0}u+PhIzdF0uLefde11!PmP9CaG`?&uFcksSk7v6$yrrzd97&hUk`kBNvQ$w zmcVj|{?G$Gw8W9W^%J zG#Rus(tBWR;jQ}2>^v84OMg(6`T*L>cG^EkE<=L_y`y)Hfr)<$Ur-%co7U8ZY8D-{ zv*R#(ODpOMaaLq~tzt`h`Fw5|{(Nrokw2gNPktpn_dxbcK0A3J^Cj}Z1HBYco98|9 zf9Z#2JcfVyI(tscBXYjCW2NfYhi6m+N3H3O9B-LB2(AU#D1oaD0e|TMZI79g$>E!x ziJ^K?PZ6*4wT^1Y1R@?q?Gbv)maAU^Z&B>kQ`QoL!NwK-- z>)Wy^mGynzU#1_<+@_-n%!;_)HT8t0iRR9Jsni+E=l`S>hSeUT!K1*~a zRA|*>BeF}XQrbns54P&=mxfh(Ab zt1|?TRNxc-V1EcVTH0Z0M{~ghvrXph>ck?-bOpo?Ak0bA;eSc@WJfknZMD*seyzrMEMlUsq z4dCEMKe$b>%wOzj$i2XgTLMr04&-xWt`zyo%rTiaGadzQFY)uQxuM|EGe5RHunOeCXqeU{W9XSK@8@bvs^kY; zF}Jqk^?x1CVY0I*=sM>PxzXHkO|)mw=TXcv-#5(&5dP-45rhPcA3eCHXd^E_!0s)Nv)XYoJ}l#BKn_j&qMn_98!95bGSs`+=g3x?*^b-`iJzNYxa$b}o8hF%b7&&IHrWRmyEs$vN^gLpP& zEuucWr%IPzB7MaOTt0klGG8a!pwlAmr{JZY#r@=s;qxW)PFmwgfz$LPXx5zouq62rlFcRbD=C-Uemiu)ExO!cZ@2qYz&D~i`<_w5gkN#MTu`=6L0 z;$tUYi{kz)=BiX1kzm4av287N=!Jp5dYO^22~OxEZB;OeR1Bf(Lv6Z4J7c25H=+*RQ2l9>(a+DQ~OA6K4S(mXD{k>sj9=c)u;C-a|ZD8M1PT0 z`^Gv|v@~e;z!3H#F^fS|miJXF5;)#cIEb&OPI@fz8blZ!`g+le&+;VB_Vy9js)(N8 zi`ie~=sPFwWSUMdr!bMEo1+MVh zA7F)x+5zm~_e_I1Dt#U#B@=6YFJT=h_@8 z(?chrW(rllNBt&rJJ2YTIX~CUWzK;Ue0UAVer)iju-sf6Ij3aI)5E*$!j4a&H>N! z>WTq;QKJ_q_XJL#IDu&oGnK!2`0&_GkqgDtVFH^k_yaj#HOCZvIc}dhOIF7UJmeSa zoAE8kJFCdl?C zVDW?|DtRSZ1R}5e(cggw{mrqmIy}d){lZoL(g+cC|K|*C;eYBJxvyo%Wfovhi76U? zFD)HBAgQIL1Ap%@(~P-Q*oN@E7n}nXJqPB}vsYMkt6*_D%mb<0trEXg%oId_kNj-R z&$?x1H7W-wb&+1?^-p>kg3}}R3;L2JkxyL3926dA_g0$81Eq$ejNpN1xR@VY;-C4g zsDF9Zg8!NEH-G-d;4y20`y}SkS)Q5qg+wh+OhX3)ERys~<6@4Q5YtZex<;Mnem*QC zC{1GR_R-_G+v&5WM{bu>9~&ym)VOx=da|e76s-d(7d<|3;o%`NmqHt%NZD_^ZrXkX z7gVu^g$%kd5^8}(;M%l zU3=}w+OQc80YYK48d zG&RvcGk;~R(|ej`NAcHYbOe*=c;i=eAA{lKz@S(Z_nf1KBI*)lyFEj)58Gq$(%z#*5qVM$^J3xOj%lZF-2k|$e z$DPHeNXxa=vU{8D(S2550C`=&*}c#CbMS1Ff4P9hRm87zuPa&7-=#GGhqV)@wOjJ6CP6N#f$2-^2NBN(i^d zd~nVqX3w8}TmVs(Dq{!e2&pQEVfO|`9?rVx(Fq*JpEV}#P2_*L=ICtYdsqLXRgj&} z@Ob;CP-KXDdD`MFnq zMFqg_M|!;3=QM`}Pw2qVr)Bbo;|g`m?G2mcWy5jLKIeSZw|0pMh6zxfbwiuw#Xlowz<>M zGnjZ=JOk7OqqAqrlE!}f2e05kC67e@N8Lt!lH+>*rJw$t(XfW<|9++_dVl|Bj*71V zIBOJ#0N1&a@D13MN2O3m?1}rDbAKGa`d@$MC|Qn{^(wR8Murvq;D>+sw*q&b#j`vv z{p0{Sf08(Ffz`@!FTc2YLj7oSRI2y1>Oc3BSDXO(pRV;+ebB#;CVZPZM{scMMOSXe z&epEUn`lc<0D0=x$WeI?;D6b-huXQj zdk)Srcy~A<-*bPe1Pknr=uK!Hdn>lgvup>OM}lW;DBPimKyd8oJzJ{e{^3mKEz+Tb^=2q&dD!+=5t2oeftkq z=(pD98{CtaOXjm7+3HM&`hT`@S(HLk(cII25uciz0kT?Yj}7PObxOCL#x=DkR}-(O zKtAkl1EpO+v#m3zly}tIiVD2W&2-;v3spZ`{Ct06bvsFaZmFlWp*qd=fjUM-C|~4^ z>q5Qw=1j=uEm{AGK6*aq&U*7dS}1>Mo_u;~qRtRJ6uUgS)CBiZ*MHP!d9ZU1xK$&3 zjj{YY0skB7U|1$%R$9CwOA|X2@9o?>n`Ga4GFMMh@7?#&G4k`i=Kl|PMfA-$5HRx= zI^CJBUi8_g0sOp7=ZQGT*?Sc6ilru`mRw)nr`?6L!)Z3s1UUlX3vz`WcC)$e8hI_a zs~vsUU;22O$gjr|vwtBobV>pTsYy+#Qx=zpJ`1g#Pp^eOR{Ts) z20CUV+2(NdU%faOD72h=@o`CiWJgegci19m+nhLB0H!zGjBA$hf#* z!Ks5p44A#T@pY`B@x`*UWX?`?c_sDAVWrF5rhshL_MA8C7{mX~%UpFaHt ze2UP+i--gidciu@IxtV> z*5)7mDD+rvf(Ij;0j~vvPp**!G=jsw{te=u`=!8VZ-2>6r^gYO={|73e6&|T`Y{Z7edKFM@3|1FO?H!#B%uR%8C= z>oePt+<%$*Y~f^I^*=gN=uApT8l&$a^yjntat(g1Bhl*-yv~XIBCmV#j3WBCai%3J z@>|iXBVTalhTue`sIjHKUEB`~K?LWPzVA|q=DRab@oreecH&TQ0r{RhEZ^jupMm~s zmj>3MFnY~L8Vly0<>Xo2R}pm|V5_m1pYyxP7k|@>8HdE-zx2QUuI=*N8Qqy&1Mj2T z{nfM-y+u8n;fnfS=n_1~X>T4s?dUF@p2z{Bs6MO1lP#Ab)?YQDkvlD$R%hCpvu@|> zXUQraYK7;B{~XgDjp244Z#Wt$&{}=0YAs{D-;J^Vp6rgY)|(-Co`zV!l(pp&``b-V z^ndTgyhJ3NO3@MeDl7t_NAM3Gi0ER zWIR07fpn%{7oj4cBK2?y1@>}^_E26Qy?>o3n~7%}l^i!<@=UV14th7NThV6`+%Gg& zSwkY96EhOg&)tZ*z=uZxH%WdISB@N<0t4>&wmz&?aDQBPDo5%+voL%#3McgQ$jwxA znL{NqUdr<-{?6CPw`eH9nK+=6h_)2Efy^Rh?WmS|tr$=9*n|gILP{4Mw+6Yg*nc}h zJA%LSnos}ht9QUY1;_E9_vwE_*Ff#VcmB~c7&ez5PWF}gX}g@y4VB(^UqX?{*2Rav z$-1CE`{A#%56vgNu8($ohI9VbnDkCR8XNR`{%BO#+1e3YEB1omF6Ob()5fD3&PbV>zhD$95I9i8zGY@xXVC(r($mKy?3fY#P^4ENxNen|0JpLu@7kT{G#bPV zh0%+LYniTf1WEUqkS>*fe$@Yh5IH~4Ilb1K2-Kx zv-&6F8UDpV%DDnX`qSutibLlk?Y+r_$I{ZMewd&<~t;tS9KMI^jbOpWYg8rVTse) z_rB3rJ7O?EuhU6xautm>ycm=yB+iU$-3@aV$DqHS-HT=E9f&!rf^IVXc|0vhxh*3C zV-)tDIND|!1_$s=)i zapsFYo~R4B582Elzek@Q{BIrqFaG5Kv(S7X2J(G> z*8#*-Jej8aOwpgvdQt_>Y}!GTjF{v~jz!Ql&xLsIxZ;^`s$qji4~qEA|4_7!z{ zWk;OjV0@YXWOWDe8=WfnLz!;jhrjpHq5QiS{`Kc!9eAD?0S z{QB&K@Mkj3^}m0=DWU6ONn1z6@r9%^SL|UP6M?l8{G}f){hu}ZiDwy}0-TG`ME~>w z|Gr-zj2-tYU)OSXgmt8AF3lYaFa=vKYs(%U55%lFUCoE&Tsbj)kNe5otHt%CZpUNR z!kK2o0e_ebV+E$(QWN{U)D5{;K~Y-0TM@4k6UJAT2W+3;L6Ob%Kf>=iHtF;B{TB#=kVeI&NRG>LlY*_B&dqa*4iq4E2- z4tajRcmMCcKI@{Lj_(-=g z=~5rC4r4^SJb3X2H3R22Shs}+j}XkC)M?kK1B&eyQg>OGQ2m)ne)E#Q`hUB*|53hv zSL83O1Lx>iPY3W)4Zyt6*A;84a=i#IwPS+LXew|uP!G(3Duu4)+IR)kgx(~1MB-CB zXLVsujM$H+@Q)Mtet`#v3rrohL4Oy&u#e~17wazOr9!jd%L)2Rmp8A+^0QZPkALFe z-?PInkNk^q22TXEHUiCCbF0&>D9(UhqwRq+g=3Kt0M-?x;ni_r1_=#SRSljRzU7_^?eNk!| zq#y)08_6nXJaJ8Da0o77PWMx9LyG@40KjrY#`$Xus2w_`gwjDNy84#&)Qr&Z$q z+~9ejsm1}C>2`q+%i^ReS>$DMuABGai*nzD=fdy&KF42z(fP)%yr-Do*Bs~eGw1lr z*NR&5FWsZ>_y1@i|DEst(LMU;`+!vwdvU7sc|X{??|kH^SJxkn8r*N4d!>4Q?dpK_ zQ)0Wwd(yJ^HE`$j4^~S#oFw>Ksq(CGmw^67W_PAJ^e6*W)n7>6m~pRM)6)0s z+)GbMmPg^QOKnMPN*A7N>Sio4b*~M(BYSCYmYea@e@@^2lKI6@=u=6hXAkcnJpGvh zZpgh|E#Wit@kP51aKOP)fM*=t!FSJ?PZ5TFa)?X^$Vc;?$dvYyC~KSD(poPpfyCHuEDArIQ(PsZ8NSDpeN zE#^x)=VikOGb^-s@FXH%H_ah%HfFk4I>h7|Aq{Aut8`qe&tQl;Ms>Wgm?K5#?O(S5 z!8lL5LOj2R*wf72$;$Y7bo;cJ!Qv}Md?Tk8^{41jrGFlI$IgphNe!pkW$2wS%q`SFAoAmaa#mscrU*52F$DdVx!&(>oZa8{5U;gjj=pVngf5swu z;=g9wpZNCWyySh>UwxThzA+*%5qtTg8Mf8(_m>Q~JOV!&?K}m|IH_(f_oclh-_}|h z4iK#A(SJ#UpO?y=CYgcsnfu*zrw})%k^yf;eaTe!XN!%-hBu}j^ z^L?PvebY>|CWpDe?rh}!b!EvmuE3#~CAfaXoGg)t@SPvC1sY5Ei;)`>v*yg*eOooq zl6dLcx0=O#Ia{CE zefeOKa-EMX-}~@#vf1>fPIvNi^N;qjh%p()M(_fDa>V@X_s8P7fC_!V-`PP*(R*T#5DgSxZ_$6w<;=*#b*WE|`+ zy*q(>j*RCO-cjvfyR|jmuH+0_f6gpp7)K9FJD&8EW{n^O2t8)}IkdPw?xeKpCORL4 z{rU=65O_zWNPxJ+QYX~mUx_tTV`e)u#(!?@EC+{fTpFlPdreQ_xqFq?)Ul!;LnDyh z4ZVggO&i>PZhc(s@vjh^C=?n8Yq9rawWY%ryM>Vt_6ScQ^f~dh*ubIHY-fZN0lMmd zABWFyOkKYqml)7@ujxmx`j6h>kKWvO9ewwp!19fAkFN~(m+$T0=bGSw{dM;J*2m1^krfVvsJCePobpJOQOPwF>v0PYVSFk(il~o%D)cCYI5OIDaclbS{q- z&RyXRo#_lVQwJTDK!UiB#dMBq$}rFD+pm7$Z~c}WbN%gM`kI9fb#dRrGxIv3p9U9} zJjivc6R9>p=bsjKz~>3Na%?vMA~3txW-7k3_%+T^d?J_S^X^=9UGP42@J7`|U53XB z(;0U|Occ5jQnQE#*;Dizg@5OQmSgSnGh!PSWIo$R!{~?m{8=XlF#frAE%6{*KMoqT zVEOzGwUN-gAm=A`@R@fF--SQo;L}XLGNjkc=bVtob{#`INC87kbkD`lF0~do3g!nw zAAx3t-!aKKe0pZ+XL)@~9X2+_v(TH-SJJpv-wHvjZ2X zL;nwX#&qb5^LRLLj5wEAG$(jb`bvUA{8POLNIbnv~AFp;MJ!3qT%Y6gfqA;_{ zp9eS)fg?e8>Cd^sJb(SesS$e`rQqMag5SO$Uw!{@kDsWx-%K2i@NdvSVP>Xm>*%x8 z=z-%OKGqxPKVqs2S8!TJ4am_)-}@|y`1tZKin zrALNcweZ?6qjST#qsww17Azcf6n(&L=+v7-!OaHVm=@1Us%n49R%qh<*RKU<#LYER zYHx8KzH8{5_YgnNX0$)o3;uUS4xHgje)X;nAUuTL=uchniLIZSI?toN*fwcivN~`+ zmvMYIjtku_y*AxVBD~Z&w?>;Hl23%5Ej1gs7l*zo^WG79)#dh5tv1CR`}@ z%BkF!%T2>axSN7siUS~a?ZK=xX72_b{tGdA6g@b{;B$Z4X4--Kux=k}%mtok5u<}7 zIqs1BW;Tzn6i1QJ6GPuO=+j$M!SpdAtlPsB9IGU-V8rw1F2@fMr#VR5nU0Y-Z~nwL zDhG0pkEc^UC-~$h`E`lh_FK3Anq}A-pE`?quNgZm-g zCkPht*?52YOZ&J<&T`tF!gJu;!#3CoKDWIeC*an2p2CM;kWz=GXXN3x4&J~pM`(>k zc#-|fTKM+SB@O-)dOYNHs_?GU{)*c?XC(H(*ZkHh|HIe&`ljc(ST-ZhpS%8?SLLW@ zB$4otLPi5nWNG{^ynjE}h~BLf_HbIGbq%jEI--AZ$$dc^Rh-Kzj^yYfE3*zBVKF}a zJlEpmNfZxfa7`8fD$txqLIQr+fAL&%{=u*F7aTRuHH$&x|EV+B0T%{dHMpzm z=!}K`v-#C5_3xu60W~Unb7*u@?xEAmRe@hQ_+9* zpI>Fep%0zSvK?-?@K+U|)9UWPjdTB5V8^OA!dUy`_mtHW@TM~i^iMtUso`?(_mAds z&G_&s@)}|{EsW84?1Ee3_rrt7Do4WZmAVMLl=z%rfI2tcn|cQVThen;Y9Gc0xv;2gc^o89O%LiLV)12I9q5D zXSIwEE$U}^16lY)O|hr{+V zFOGC6jMsH^rnRZimLbL%#6LcV)lXv9S48o*U7CI3iz55P2;twCJ+C&pxZEF++A}{; z#qa!T?1&k`x5lSuDS!Ic#7p_V^=x%fr<_?w-Qg?cOr)Rnr2ooi@hn?}ZUR+FDaNA$ z@ldUUhxu=>1MIN=$M=21CKZ3RCa+5(UPq86ci3rymnPN=IeoT{v-uI^Qj_OnZUrBh z|NT85Nq;=+b4pA9bMIlkOf zdQQUo9}N|46h4@HFi-KpOl!Rk-0Uo7rCJD=jhZ*w^L5J}r7oC5Lo;Km070-+@XZ{w+Ded^rXR`g9j{g@br;U8SnFaP|X8R6$F&2nDx9{9)z zKb8({rfb)FQD;1B3Hg6LaX)l2|4YtoYm3fJB(w$KZa)3~sRXu1D(PMePi*ib@YTCr zH3d^)nkl{8RhnK0qHl-KMKRus83DPJDD+@8ZL0~OIcxJwN0 zg{A~1Fy12L;xSMD%jnPwbP?6%;XD^he1j@aGZz7B78uqm;3+I6n&` zKSmGL5oa;^RCXrw`N!Q69`(ZKwXZ72ip|F#&%+8X=F50lr@ z+b`Zn1(8pi&#Slr(@a;tU^Uxn_1Q<3ai-=87}vpB{l(?($tv zLXReoHQ9g9qw|^9Ts5%dPSy_-^;ovPM641$))zT_3X$}M?iiS%KH4?a^vZKXt6M5* z%w|n>cYb6&X6nPh3QpiiH{yG;D?Dkp1=ER;jHVZwzrNF*%}_A_<0f=SL~oIv=A{hp z;9xg<(xZJz{j-y%JK#L-V3R6n*R!@uIO4a49l3uM8|ct-HanfJ&~?HO#1S={wu#3_ zW{qHLTnUX^a7FPo>OCbeK0N;>| zSFkER@a>R#MTr5}=f1OMKeN(5^-Rtq_~~CTANhj=`s1_r%`bC5fV_8sAK9;Yu2}!@ zwex@Hs)_UQqoD+1hvJma%ymfPA_;%8)>vc6Hr~cNL+@l|D4dUEKKMW2%SWA3U z$Z(-JsnCAc=uY-}voqd4K&rB;7& zD$x^S1|akiSWVsz1&GYIS89=uDT#vxICvO%{G!Vxo>hi3{dzO~(|`Xj|EE9r)xU76 z%($7r9{P>oO>(%f5Q3^g=XtMM>Fk(*VLy?TMUhsuLVWa-+kQ*4w5-OXnk1mpK zv>JWqIs+`BO-j;(nGeKRpgrzR=SdACujRE95w%;zoIsT+2usWgNpOK!_@7AkA}Bf6F9tGt!GHgM$a}M9WwmWv@ULXtr+a_H%mBsm z+?S-5T0lT}$xu*0zzPu|BmaB0fpgC9TgBdIpM4{r@{3q&iXaR|*+=iKw{|>Vh>uP@ zO<&m0NbLRrlN{PceoaKViGK!pO9NL5=7qwhc8D(n4@mey2wTbrx6U@yB8CMGU0LLP zK)QS2wfylh6Ab#mb@eL`n(Tk^hpPeJ%|5hI2n8|tuU4P1lqYHtmtY(gfa^(_e6fy;%q$4C+opS#{pp^)_8 zfMlt|b*FXg!OwI!Y0xi;?;niB5U}D|;e=#fi|88=+!)b}h291<0?9sd;06SyiTLmG zm|3UbHAQ}q1q?RZ*BXCYzLOW|F!?iCmxI+#H8zGE;oj>UIaUegFPWd`SL$AqJt?|y z%=vp5d*gg|#MZe=Ke`;@X>@0AQ=)fHjL-q62Ih5QcYWu_ecM}u5AL_`q094)yBWBn zf4={$l~L9Jat$Eu;XpWPF6xAPr}|c20%W}%lbn5UStw%pW}$!K6(u|Dsj}#C%#tg_ zZ1m9-#FT@L4gAP}M}jYPlB_|m@ul4koLJf>n@Yr7Oma<--|rATHhYuFBGIJ!=-a2- z5i+sA`=kD>n}zZ-bH+3y+41`D-P?-X^YL13LqA^J$6nfwJO=?Zam3{0bmEh#rFcl*x^qlV{yZ_;q%^v$B2~qYafX`8>$6g z+^}W%Xpj*6A=%>(JYhXJC}QN1L!$=M-mT*l_}j^6Y)@Hzk4eFRUQJ15EjVa_|Ff;~ zlXpRZqicUnu?>;9ArXgfo)5%`;m;#;ok5}2LeW$ZbYs_JHX$Y~c?Y8Z@LLRLm=C`>YskK4h~JZVKEeJGTnKP(zsCXw zp4VS-o{z0WaB4$cu)gB6F1-Mr2xEc{*zNMPW>SCf47G^c6)yHMJ~%Hb?63dx>l_+- ze}H>Vjhx`@frn0@MFe`It3Whtp(}^{#@kcMNS?W<9&-iyp5STVXdU*7uBC>K2?*`i z=2}IAClR?*1pi0GiB3~)(h_QjfrA$qtUqYUd~s?h$ZZ0?iwisS0YRELlW>eZY@n>{ zwNZb~#(Pv+5V8s4{g+#@yCCoSTrc*KI1#DEzLKJ#l89a_+KmH0Qwr|EPYmwwp9j4r za8(sa1*&e&&o@AHs_7bW4Rsz#vldvhg*B-h zHLe15U0}Nz97yD__F-9yA1XK*F$M~en_xXahf5F8{ieVR*M9i;Y=UD&o+qTZ(7A{D z2CvDt|6tHf`NpXJJ=;+s$DcO7}e+n~01-xbe9aZ`V& zF{a=&DUXgLPBukh!qmbR`3ilU)i~gmTkd>w^)qK2-WUsSDM6wYxL6TqJgb7$E8yym zwO!$%-fN^bCTAI(K^BIwC{4kW6iL2~JUryb!u?~covE_T!ERC%dpjHR0l3qTZmf9Q znhIE?J7gSk{Qz%v(M&DJQ53{DS@3_oBk6OKa`qa&zEiU$x;5U<8MvD;a-PC?d}(V`_}%1KCcUaJE8W&f{s6!M6RA+ZEl=AmG?|T z-8rl~y|WBwnHj5Tc>-S6m^1A$^2(OB0qIcwL?3y_n0oCMTzuf!cATsKxdwlnyRmu1 zUOaM;Lv&6~*tZ{f-+$^ESRmT01W{^ZPQZmVW>=QMj&>~Y_V3viKJ<}L&=xequhJb^ z0r_$*1!4m%n6qzFyv{3Df-VE3~u1o z04LO-kHz0uYE@^LGYM87d6$0{aQ(>TAfXZLG4`r`Vn~jUiM0>=)fjo`kYU1pg-z>7(IZuV!dbR0p!4{I`#n(RkL z_>U*;`oM9BX=3~oCt!c}1J5av!C)$JzkG9FJc&JG0^h!x&s@EvVQYUwhuanvtRec1 zj8)>BM6v+!L-CnH_)vF*8wJnWL(~BNT=X3lHezOLRQ?)iDSQK8@d&b?YFQPcZtx#+ zu(Lk@svL&{xYPV$twdBbNfX0;sQogWm?&D`2$J-DtE;3ezosTf#;c zac?%Kfqzp_2eC@U?RtM8T?a4CzE&&Gp+EbB0jEZ2BS|?1oZ1H`3XC@P{qE0rK|ddK ztemXL28@>-rCxxA!VQyrN9rYr9BUA)*m59Cr=0TM@%`k6hr~_gs$4}~ndcx%U z3r__cFr=diE=GdSfM6FtSk~X-4Fjgl28SKmm#mR|4867?W;}n`upFbn?+N?LxiNr& zxe(()pbL3ZIi3Tk!*=p!j|Ip$5ib&*i@H98{vUgQT}Q|Mz`41?Bm}A{2ONEHL2&sB z{BFzAY!84l5FX0GuL<6xDUZdD*rF_ngJRz`_;d$}9NOp4p8A!4qwJy+Ny1f#P-if@coiS*&wjCuANf?kkQb)4~Syxx=7`E%k@| zwa&~!!xqPQ`XjdITj%;S z#_{>vR{72UF=$=D&s{rCfdy^Lp0Kw~VLw336_cV=-=Kw>>Yz%NfY-oWum|=CIyfqD zEM6a+kx+_wiHkTK>@+0bM#yk$3%h@ZIxx}TO?H1^%YEgyfv3NxIp`-MFA#RrcYS_1 z@d#JX5bxtjF8XI&esB)I*;&|YuLmE%8*yX723H&70zgr*)QQ<5hipEP&M zSR#LX$N}o`MYp4@c4ck`QZf;GcHja8_7ECgu&c1DS;ky19ry1;l-|FQ@5B&=ljyC@b z7v3+<2jcrr0^T0yPR6Qw@8z4GKI+{VTzh|TwA;^{k7s@Q=(Ir}g>Qu5*&rDmAac360ekotKI_Xi`VFV}d;a4m#`CT92K)L;-|w&aK7W7Y z`To(@{9R*m0uJTfehjXfZF1Nm2Bd6FhiF$~?`2|>ZSLviQ9eh>#sglX9m{$L+u`kL zqQ3%jlP4PgA;w9Kc*r#8 zz~c_%R$mt@@V2+$oT(+?>yI)FSL%Odw`->cY%bJI7g%=p&(~j>&-LL6JaaDg<3k)7 zag!C{c~$KXu5!q!{qbM??mHiR@8T~G@Bu4{G5QlT1kcRid;f|V{?zYxE{FhvS%@{( z*Z2}Xp#%I(E?6(y!a*G%HUawuDfVeLjn~c`e@`p7{agNSoj8pIIv>U;d$?xSx z8~&x-WKDphu{Y*yAI_0CkbV3I65?6#Z3Z!XFWA2l?k>!6zrG_eZaRX!i#^Hmrk~>} zyuw@rzX7#nxfk)Rr0|~*tOIgA#1vy2z9qu-Gx*b43y0#^7*oKV2X@?7IsK6z_ zFADAkXy+quA8{73xB7n;9@!Cy;X(!YHWh0j;u#|FF7QA7;k^pdj|g`;^Sy6Zu~!`S zy@;g}j;znz1fHQEJ*q#hVW(kC*zkX0Z+`$uAv6_uo7lC5iUog8Jpg?D`*(;Aj5m!oO`hU;vP#9rH|v_ObW7YO_FWxBzxlMNG^Pfs=H=UENX5 zl|y>!o{SyrB~)zH?mxfgZ{Ppd{^*G~^Y3c~axcGdmjl-3E7t*gpRaoQ+P{2Zhrnmc zwb2RhNi>&H?#F2UT6Ex@z*Q%4)sAzZiA1#rNM9N4Rt{^kI^!Pwhs1IJaUZu zwnn|ZR(HfC5?}yeph(rM+m}d-LhKEg_08ysx+U$*pXPrP;1ci*05PH_u5R$>JK)4a zERlMIxDV+g7hF$x)Zxe9jBYG=@?7Ml{UKg6U`hUhBd>s$_&u*@1TIN3AA+BZ-f~&A z+wR(}aAx-42SiuLoh6bB28afo!3W zymSIm2BLqLf!~nwf~zZgnl?|*F>ieQ^JKggSEB#Re(`2}@M6ETR4`_K^dG))TVM7- zlA><0WW+~pp=)PItE)@D!`Kcvr$$|cflrT@;+viO+cDY{x)z**y#6t}h2%Z3QT7_K zZuxwlD~_T6dItskAv1{>FhpmAn%CfiP<0jr8yJ5ZWd6e38+QLFM)Hk|gnrk@o^`*l z$rZoMpqsCB?Npvmv7YQ9QiGU(5dr5(%i^_+_=DRGe8x=_tjC%Nm61iRgNlg6H;A8{ z5PY&Wd|TiKah9>izy|`;Dosh;<{OJ#O;5kWLhfD^=4JSa5T{P@g*d4F$T@5*J5*T`U75vp}$z(np3t#^I6!qN~sBiG;puT}_9q{2M^!MRofUo;& z?)}C?{UQGwd|99O%f2=sPY2TLy9F+|I-2`u*M} zEy=b1#HYASI=kL(gkUUu2;cFo-`<-g@7)rz*CAf@gMIof7XCF)W6k-sKmRR0_2GYC z_!R>hew(mCU#F|bC9BbgW`V~TVWW{)Rq+0sKKO?3bBGy12X_jNHSn1d49Y|S3b0wH z(b)H(IRqY>ulNk%6a4f^*o9wxa)?h+O7RtMZpq$(yyxIQdZq9mffo&;X&6%&^Ps{X z@UtI1v`?O+;C-+lUl;+>e}BteW?+BA5uWvLof5n!c&$F;@%+iD{fbe6Vp(GcExX`x z?0}q^uh*m@a%^^_mnZRX002ZL(R*mJjI2sD_ z>gW~l0>n@G<;j9lHNe_q>$hBSGxijNUXR+AIAdn7dt9DA)E?4Eb!Q2uy*4IY!mR-Nv;Ij z^#zGTBq|5eiJlL(pfkN7*LkwZrks!Ug7U=(_5f$u;*5+}tap~F>}s( z)x2S10#}K#foG(={VpOZLEakhT3^rd?bcIjytTI@YxUhq6{$sAOwvLK(IaQL$ATPlemK;KM{gR zqB6JwFUui&byX5xmYoMKR*z^Tn0sOJpq{`z-pmynAQnZ`yKH@aH{h#xtXsj>m1;Me zhXbx)f8$E$D}L8(k)O@2aZg+?15f;-LYh4$#@7WTt{&q5ycH%10|knmp&b{ukqzUO-2L4a%j+T$nxto?Y0Ud{IO zG}8OqjegCMuO(WDs;&W)Y+|{FHAInlc8}*tzXD@#jZ+kpG+V8{YDPJ56mKXTm zKl@&$g+CU4XD%kX5dFWUCJyf{q`QCI*4Rht<4AML+dMd)Z$jUXkRfjCV@S3?RK#|$1`a$>e;B98ssiuE zf7b5#myUl5S>H%of;;eATYm8I(){Ymfs1@5--srYxSzNpH?Dx?K}=es9?)C3&nH_z zOGnMwVKoSbp$8#NJ{r;M-1F#J#{$igoCR zhsqU+pY&S`1%IO>FdbhSRO>r(YIqLwCiI=VMxd&RJfA@#HNV_yHVZwG&af^`x-?3b<<$3Um&87x-O?Z-C@6J7Oz8ROEyYco^cF0p61bCLQa> zL_SS`pC9fCpG1lWI&3z;Ei*DCU9c2js;jb_rS6KI8Mt z?*7U{9q>|8_ocfBn*}<$;N;nXhZ!_46!~0BBiO%yb=Gn`D=O`0FsP{=|<4bu;Ds!;OD- z(WyE^uEMw9ZKzKe1<;Yym))`!!g>XhOQs@GYIB>pUOMvUE{ise$9!B*d_Rh0Sab$ z&kO+$|1W({^cMc?yZswJy}$D2_3>YL^MEh=>vxCZuit&~1KvRY1ZI1p3zCri-QV_K ztOftlcYp5Z?HYt|aK3-Nm)d`U6A;`-zup0%(N3g)$_4gOWo!R%{uw!PYX2}!06B#G zJf~fS=(l$G?8E1RCf9-zmYy#Rb;^gZ7Ql z9`+Z8PGKMRB)AoUv;*cV=pB8{`^c+!udz<{*t=nFe*r zm9PkZ_mlso`TrN6lV4MbjgiH7@|L8~lPxwu@G%&m&uHCH?09&E&ti$SGmB={jEM@m~V(b9QMLrecK@z&4BC9fun(z*5(}|=tFo`ZMdcgN5yUgC>LOWC3t^ppMX9$kELR|4>^y7W_WiX%S;L3?7FZoIyi9?SIu1>D%6?zowr& z?{9n+`m}!l-ANIZBElvDzV5Ro+9b~p*rgXVXyCt45YPKwHv#M)e4Xg8#P^MP0_`oI z7Sle)CMYJf0R)%1d}15{N{fC=)&-Kc{H-Pa=l$&mcS&%DM33#WA0pR&$Gy!X@O^i~ z66xW=TcNThtBJPG9AFgSlq{y;`V?=BW?dM!4y1qlkzD87qxKGY4=E%%7@mn}ClGJ1 zWCv~Z2{$6gr;0s1FLyoPy`{70&-3YR;e^CUBsixPMeN_c#XcAF9mAW-b`eFlO*i&=30+X&08>T5J-iLAY?Mrd#(`)h-4vSN ziSC?z7MLUkxC&D)R3wA}dm@?M$wn-9z&EU^S&RK0@KRa|*XR#Ul;~9a&e!{!4#7|R z{p2ct>Vs%cV=s%mjv>eSryf3B5CjMEl@ou5>o4@!;c71wjJ2GzD;Gda51hms2l`1q zd)FOc-4TBlJNkx9JSavXXW#)aDDZ}wSb)qaO<+WyOnQ1bxOFAIr@+A$k_m1_r-ydW z&3gkAEAp-ps2#fiGpFWxEyxE-PT9gBb-ag#tX^mqhFeM0l%VZ`BHVD@&DlcRUUz?6 zOUUVZ(uw$U-*S6MZLko(+~#u;)VK!LHOdH`xEzS18mTU{cv1O zfHNlXhz~FIk6tM>^aq|O;y;mT;yV*ucCuGOa9t>Zbd9cNjv!@b7>W?xGD!P=@`eaE zpw6EVTLbR|plC!7MVjAi=osFUDR_TE(!&k0U1+rR(7(03%Gzg&MmVCx>TbEf#Y
O;gIFpLlQIR3!l|DTL8_|d<{*l##DG&SbXXY2VYNR}kP$w_}scmaQ? zfdk{B6 zYv9-&U~yOc=54P<;5vZq0GFvv+38ZzT;z#6+|?<3?5S+zNDR2L7xc7<-30!I5C7qB zbwM~@NxvYz)bDt}mc*&>?u379{0lP~jW&F{+#*NFI*(M0{R8>|(egm@o5v>{^oM(* z7e*ksnN91NaqwigkvCq}+M4m|MtaSK7UE&ZI{~?532RQ5_(( zwqAl$mFO;$IL%j?SYUt=pZLZA|KOCc9~;Iq_9kTSjyyv6^H6SZ`}cn_1rB_0niE|j zX>tOu;lNX#!Ph)+R*S+6eAdHvL@&c#LZe#bqDcyFY-PizTj-#Rj0G62z-~wB2`*#s zp%Q(QCN+WEz;9W(WRX`ps8= z`r`Nd>0ci`o-do8_-TK>cyvE=_}AR+{gK<@+wS-|{!K#_+Abeo-xo*a7iSf4u`>1> zs5bytNj@ulxgNohkX&e@vC?8+4T%-_qw(G}Y;y3vAr=h`X)YiQ0@o3%4z?B~hFO@A zpX+}AjC|Wa{Dx@y!$*ZY*%T(ZLla=Ms-!b6hh{o(>N?6d#r%JP=ZY})uL8&o5ZjLB z5_$iKiGxam#QnfwE?yTH{Fob%UkEN#8$${6#MHvGxb`pt*jWWYdqFe_xWd>YE>1LD z;itnIZO5s8COPxKO%P5F`0&wwYMCw_WwRi-CkKjH~4?QIQ|6Qu33(F{12a_v`)YwJ_{4n2e@UyIkEp9H`o(tobZT! z@j7}(4!WKxa($W1Lktvi?!XguA>4@nM%{h$-hlg^@KTWd#ozGW{DxChZ0M4FVm%)W z47AR1?gzI>&i|DU=xfgW%3XyRkAj#ru$tGCW+Q(u--b*ja%%kalb1U$NuA;duGVjYmCx@lSnmg)?!ZPzdJjWr8>kIT3ucZk1(h#ZIK{Z^rX?1AR3xP&Iuu+QI66!2yPe-2U{2EWc< z`q#h3r}MA$Gt6h?^I-D~{4<~5{i*-{m6LzzW}r`AZ`iw$oL#~v2A@VTe#$k`{D)mS zH}I|iex@e&)X3$!W?Fk5Jtf5Y|j*KzT~yZ2c)KDd9U z4}T*3k-(Y_ef!_wG8S{fN%+CTBm{$H5Bi_B7(?gye`QTU2!&u!5fg*Y2z+jVbR%=^ z{~E92w+<)t0%3Z5cuhY3jUU`9Vvph%2gZ-K_n*&y#aqAZ%z?9y=08{>;)@;Z&I{_| z7QbhAqD-i0^QY;GAFwZe}!f0k;}B-9O*M2X|fi z;E3P_2mH#ZzUou>(1G9h=oca98GBADMr@S5ZbTN#72%H~b}iwOf`TpPCF9ArCP&ABUw`~z?*P9J}DeMgRV zs%pwNmUFi%KKOTNP!G8wd;=`@@7PZ70Dn9${)VAUfwhKx^o{F0b?hIok9!-N1WCc!EB$4U)6?voHAOVfyl$fP?jqIWd32e)92FGS8!#u*7>1UVhA_8J7_p zt0>(3jZgMue|)zyC|tqa`m}#V`hc-{1nwHtOSeSdUNhK^?$N84&=Y$~8_k~8v zse>lskfTlh3t3GhA=uA9;$_AS!5@OZQ)n0c{Dc1z$ea{^k>3qFc7%WJ3LmsOgFHGR zU@e;+O-0pq_MBXZdFx`uC?ur?oc&06+xHgtGS(aWcp?4_xb-0sASz3?DESav&FL-S zA0luPPm`a~xaoTQ4a;av8>~t z&{)q3e!W(XA?8LUP|kI}<26OO$2pT5kwQ<;X>P4lPARhGhk$a>73cu!jDQeO6)^?YPc8MQO*DQ3AeXeuH)YG={yLqlR=?iFO z5X#VPauzlQbTd!=+C?rvx9_#gc!s^O1>^H7rLD6|@TX&BA2nvxP9|r>tY!t%ozuix z+tc&*Y+BZGCJKKgW9^k$3%r$8)$_SfpRK-F?9WYNE$b!UztxSpTuz`Vx(UP8te;&s zPlJ+$)p0XlQY(-1Z6r30Jdv??knQ%AqkZvt23~J_{6>ndbp>@4WRKl3=!_&!^cg2Y zdSJ1uoXnBkA}WB10VQB!J2P`V^T0)kL}$CmPOb`REbo83aNW1MFRHzJR5gpfx9&U9 zPhPXy!Z4_@=n=_ox0poRbY*4YqT9|^(6Hf-hVyK}5#U=iD6l)6Ykb_~eIMaJ!U?NS?8pTF@P0`3=c4T1>Gn zo8UzPFJFHPJi=kgMu<^@zmc3*SqA4>?32YScWN<(Zp$m#h_G41CC7-)U>%;~yo%=! zHFRScn zxIbtuHZ77=aEz=bJdczI{b}NH204%CqkMnTr+v`>gz$6!4od;y*@Hs+&|aVRCgl@` z{yFpqlt(Z)v_D>o_=Vu+Cm+fCY2=tEoHVRQ#sQuWGMhnj81+B@^f&c@eFWAG)FF9FVo@koM+3=YN0RcX<>R^C!j^+HXe2?}p5$>m`oSAD{zav+;io?}soH zj_DTX6Qm@NW0vB&0_}kmcFb2pePA5J!$M+p!jpo&hICc7FD9pEHMpEVY*Byu2b?uV z?=j_(d`b2jfE8+FPyoECY3zdxl6A@oWG^&a|FuwjeXW%N6*9@fUw?4oK~gh~n2eHr#eF|@EPEv(L?sx3Pp=bn$^N31tZ+t5N@`GBz`63 zUF^2t%)~OYAent>UEFng=H%5EZvvzA1+`Yx%XIB&Y`-3X3WDK|4iP zfGi>Uvr3MLPN$UcSLYkVO_jpP9an`e9i4k%e*{xQzR%UUa#erwEY~dMCg#^%yDGJv zcf8&xFY(L+m8BkxvT=iz8XM)Ji&C|sFLVAhR;l*B>F%#755h7a)~S(BwWkq9EnZZ! z8I7qqMO|Mbmzwre_F7G-enG8Y(*?C>R*%gjr6OuMfAts2+AXJ4pHr5wWT^f)RSsze z!RYDwk|rZ=JY9by)8|R?%=C52pCgr-?oR1*b!Xnfi8Gfx^I9Jlt3}B)`mr(R4D&b~ zpUJ#u?#%IF&hO0qa=fkP6?5Mm%Vh2{H{p0*%}wTdIcBRF!(4X9IGLT9k~s#ena`A) zqq~}^Oujhk$y8y|Q?g8$!-Vb+&gv;ndg>sV_hNE;MaqAw2q(@d7$;GsY_P6eW5EebkTpkAK%Uw8kyk!L@>_X&3z2`N2WHJQs}W|EJ03)@T!=cE2B=SP zW}A?GK0AR_pdM}f{{4MFl(!vzCwzX_lHZYfzTUw8#A=?DrQ~}6v&sFm5W1eV5RrSd zCal*`Dx<(@S~d?Lh+v*;mU9ENC)y6b#eLbgAk$5|E{ESA-H8mK0f+0}Lb!krK~|$6 za58_i9jA@U2z8AliL4~$MtujQE(3Q05PF+BKn6m2G>gzzICxC)oV7x0Ezg_0+=;_I zLLS%V?Wayz#Rbq1)oqTFW}SkOeq}xqRuN0pIJ!*?yhDtK~b)0{y=#a z$Te{TJTr0vcmm~#tX~I zX*2eK{y^$Cl^1w7zaZ^u2eKcbPD^EiF!Wa$1hmUPyJyXV2nb~v?(`Vl0;sCt+>U?Hu3^W+`Ny5}oG|l&{g&r7{D*5yUg>ac{?>gd ze{(WKeOd*Z%XuI$!}$W7uRwGtw30e#H!`LK(ykR4{bioPG>WRlW`Iyx7ury- z5@}b|MeTqHc){!1yUt0QNI8G0FYkf&+@XDu+g#&07$>&kYiI}A#rgMeevk9w=X{Ox z-f(`6`bG{CIiKMC`g4AY^Xcb&*S+B{C+8oS`+v1R)|F6vLZHV;bsX=|jtUvKaF8pQ z>qPl$Tyjwgj}dar*apY%VU9t$fgL*&*50rpb!xS0#i74o9_Q!Su`7QEFt0{yzn{i;XDNJa9&0ILi>lDzu>(5IZx_09?om1 zUjRwS`8&?5pYu~}JUYA=i_c4ctdo2z10*%9O#z2KP1au2q0Tm#FP~6}g~h&n;6E~d zy@&Y=E95`zXS|tNsC%-OL%%&mU0U|UB4aYgXN={%1(Grv+G+yUf^B*j+DA}+>w}_& zxg7N<2DKpG(I+^`^Fu?z+F6oop{=B|8_ePV;`I;#72rW2?=1{{{p)&v32VdOzXzG~ z{@Hte&a=VyX3G(#XG@3BgX7&5^dmC|$dl*R|NQ+ihDB0exV~!}sc%uf6lKMFh`##I z)(MnHAZ-QI%M@)h8|F|hEES-|9E@)=b}=WZSnIKl#$>(@i7iFeQJm|rY2^?LAniIQ z>xuM0?VLdlFJg@on-F1k+qrpeyvQzvxA5{SCF~q@A??( z!^I};W5tJ`I^`je^+i|?_sVd^_xaK-mSx5QZLY9 z#~AoSdB|L};W1VbHI`r}kw{I=BJY-;eYK~tB>iRs8~EoM*mPv>Mg$M_e;mq&@r6Ex zc@urL=HfHf6agN8HLO#gwn7;(S5DDaVQb<&e?N!&NjS!u^?hxKy75YvTocGMxC`Je zK~_We)whCwJkE8rG3Fi(bHgytZn{5~+r~Wn)%H-nK*U)_9sf-o{_2C!xF>Ubf$MT# zuDL`&(D~q%B@zbSk^Vpigkxj=hfknzDuWPTNmx)!dFzg){biEa6(Yi(RxKXNE_Z$9IN)XjhTKBTW*TiDO(Xc z+!2sKi}B!p3n6qbVRK*%i-+M@BKG6oj~~1X>NFbG7xY(*SrPUr#tUpH4P~}?Df;z( zu+wdJu66S_CNYoHmrHCo)H(JKq%UGGfxg%0pYx>b-5eQ>9dOpgx%{$orxSvRq z{s_AmdkiropJ5Z@vp0Np@R@Ktkk9V$`SbnubA3!cd!Orv&;IB7;dAi0e)tT#j6ml& zwC$t~_zrP!>@hmLKem!2o1PU7avkh5lD_eET}Q4%!wlEqy@+4i8?J+$hU?&(C)e$A zTt{So9)|0dHR_0Uhz$p^08(F{&!{LYubV}u`CRw-ygw2MaNXha{@mv}`t$zY za9#6x|F7$aT_&`T-{y^$*l!!YX>Ib`HQvd8W|!K6L!X>LePS4+!*6O)A=mfB9{zPb z-p>|)u1`K=8u{Hn*H4D~yY;WRV?Ffk6@CNVyh(c;`ZbN~MRGl{1#ms?pOb6x&bIjL zT2lV)&$Y{;EI-%cy%ClFeaxJQ9WMZzArf1XqlSCuxSq6=g0>{rFF2lD3k3q)`}Q{WO~>*6H9&9OE%OyY|)`kuw;;X|6Ggyinze{ zwK*yC_UBscm&uAm_MNZ|vZf=s7!LOOSa-9)FL{i8B|2bTl(510Wjj7u;;S+;_&m^O z!WnFdpEa*V+1B>MfO{V?xCgu*v^Iu+{xacX(Y}w7_mAA<&oPe~25E1^k&au`uu(BD zV>Tb;?r{&kyMuQT{ZT|P8dERkSg8ADVPnJpj{Ruurd$ql71%X4m-4S`oe29yXw_#< zpkLtzX94%-l@R-y;dj`q;r*h(uf}UTB|7_-1|SnP#ayp|R2n&Pb?bxU6XbD!|Drn& zAq|9LNoYeO;8A|y$a2L|yN zu*{NQLQh$@*^bma^4n&+fN+V_XmY;5 z^{6K_8MXUoKDfzslPM%iwOtyKGY|8S4g|#{sKS(Eyu9y80A4b zpnXtoJon0m5aJ=%;r&sr7`hLHr~Uu?e(*QtdXRcVdBBMD*Xwcqs~=#zp+51R8u=aS z9myDEP{J$msh^?!64H+NJ-(Me?TG78F2ql9UmEu%<-*Rw1}PSQeh2i|iRdNb81;$x zc{)TFK~+HBqriR1^}qV%XFLJ-Lhg%l;CHAe;6PZCKLkDsLI#H44dV>=8_M-5&zKxX z!}vtRjr1p!e<%;y6FOtYP|g;!_Ry|>d9Lx7`+oXIFuZ@t^+UfV*OT!w^uwY5YNY=I zB=V2ThwJek6H@BLq!yvq@Ji2CA`P zXd=>0c#5&0Vu}UI1^i2dSB>P759`xV#r0Par-X+Gk#+dlh^`$E91GFM0j?FD>&AYC zu1S6m+UNB(elU)}GXc~Q@D;#mU_4)nF59|@Le#4$5wClhJqv#IFpTxMw za*bULAXA`!uOsz5=&jN`;{Hu@R3)g!i3$X=NuklHkQE%0Lc_Ogka&AWIsq`TGHce*!4?wMELiA`5dJ+lY{A0} z-5F$DfeUep6mR5JfSU`r=q>Co@bE(e8ys?g`gAOR?BZw2Gh{M(B5e#&GWOm=$Jzm> zE;NOai^hSc^fVwmq2I~61{N=|1g9IPwn!>*3l7?Z4>I}aw%8y?5S@T<7J=KK#o*q0 z9C8j34@c6n&4=t3*uWN*1U`WcpjEKl052uf-k`Vz{R5&+kRm}u+yjkjODeGdZ;_-E z(9nf{;EB8`w{0<~w3zd3@bUTvz*)SH2pqtJgr_gYpaeCVf8~N!idmlqo(?!KOg5n{ z<%BOTVx^J|z&+neU?eQKeG2au%%p=YHEgg%Mw|48w~LFhqes3em!uNI0+e?|o-Vtq zF=w{;e*b+>TAY3Er66NL`8@Lsba^|R?~nmO|2hwU zuHdipX8Lpf*Y8D9J_Kcx?40uZJ4cAWej~PkE~pVyYW9pXj=z7u6?>jZ%6;H>G2?(K z-|Trwt-z}GNY;MG1yUXDxGN7;+qhaW2idO@Hp!4}^GX=M<*U#4aaCUSAjS=MP%vVW z0{=DNzX_`qy+@^d|0a^B-({NpyvMeG_^Ny5ZEe$^^VX-HI>Qqu-{HGfH_UHLC-+scUGRP0@qW(B#&rl)+@&)7bHfBznI^H8f`+WW3+#yNlAJCVQV9g_~f=N+8?uEPX+a^L4crSyHi z_-czn?2%S5oCi2W{+tH|%=y#T9jqT8!;6?uYvg9LoB#2@ovx36YXAK2|Lyr`j?eCP z{BQr`|M@>p*UR(&IPK%(|NZr;%FFIQwEgzD`@f8Re)*rn*Y&e2kN=TB_hlY`zG;t; zgMR+Uf~Nmx{fPggd6fS>*+2LHK~7RozBx_)=V4zT=hOc^p;t2YiyyqBi1qKo_XL(_ zax4yh$A9(@0tsw?dInKjT0sy3-0j!3ep2KkI7%lZM=XO5Zi)~q@k!^9Gf9CnNVm%2 zSf&1Smfj9&d`G-`6hb>D1rp|b2MbBLSE<)1gdCYX$tL@g^Y)P=?f`e8FxB~RmzwjQ zyDdUB8!gz>J@v)*cA8GtS85$}%>4yqgS&X$OP?9XQ!}Q24$S;wwmk%u&vu_+vDve2I!6kEhB99CHnc#o)LA%OW)RBh+mm?%@#(lvsN6MqGoqvC^rCdH5 z^Z?uZzDT{tv7H2a>L_zAa~_M&BfY4S&Ik|Ysh>d^cl%gg=C^XxuN2kqa=mymvp2`X zEy`Sf+$RIYWQtRAQ0#7t6yo}<59G_{G%5D%1W9?RF9^|jrV4o#7|J=(;v&zI@p?VJ z&-TeQSd2;lW4u$~AqQE2zU2U93i`SU+$Y9ni!sb%S%31b*Y@@!6=CiZz>AORGIasyxJhP(^$>(sf z>*0&Fd}05f1Z}F<7$ZuAMd=*s;Rc6Pd;ofQW zJ!4+Gi)ouTe;Zy(@!|HFNmb8rp~~ieE@dQc)32y|BFzmd+nOUGI*pC~F%u1Alm>A& z5xwJX$wQxWHF`ytaNP;-i?w$dD7~#;qeB;Ng?^Km7{&XU0MRZuj}x$^@Uxzyt!HpO zvgYQQFXPT#%&hEYA*@41tNS6`R?>2pR&_gCl-w?-=ZA~zZj4=3Kxc2U>B?Du;_Jm_ ze_iyNyZ6T1nA=XmELNXw-r21^R@R_u}1_d4qN(G@tv+Wo~$WyflRXKtes7=*pg3u4S&*{M94Bgd52h8Q!VwStE8X zByZO>004QMMzCy~Q_N(O^D~)$(Thnj0@a2QTQ~8xL!EdN3np>N#@iFX_`WlR3>&+< zHI4|gP?L{d-@l)C$GNPpG4|?(51f+2b&tjbgfvs7<%iV*RM~ zLY-~B?)bh7j#I5?=9O(r4(&0PVi@FAl z?)YLn8Kx4C)19V+)sA!Yo6KN!A7Vtjx-bc zH=s($nHH>=O`b3Qaa=+($D8JvNbxskMU7tPcwKL&>iC|U$(%mh);h4Rqh*s@VobX- zn~V|&oVrV>Hl%#qmF&}h-BeKe%&!;YJfUaaU4vZzLERg|O1s@&o8>-#wa2&hoSvb# zS_sujT+OH;+8ap(aiR&t7p)l;uhcg`wpaHMA`#DQ#rt7+z|P5c_)-4WMy9E*M)UqT zuC4cYyFMf$O_(aeS*i)3GDI-KtuwMt-iLa=V?-Z&vY(WQs=-n^ObPg zm{af*zvhR$ZLT2PxfkW4r-foOnOyyQ=S{{(WCrdN(Y|&YFFq!U!e@~E>-MQP+t@ev zavh(y*pnUTeUATZfEmhtbb$b?-7o;hvxLpVJK`pi@( zOS<#i>-2fGHRsMPvsS)N4@h#jm`!*N&WG)K99^yzCS-RJ{qTr4{e#zAR@{C!<^wy903(Smt@%;z)Bn3|Z!YS3={+^b>r3ULns=0TWbP;RU>55*>THtA?!m(r6YMy*o}GLEs1AC~Sozl?Ez|z-g$z7w&F} zk-wXNcqdIiOpH^|R8rlPPJLsm!+v&t*0IJMu=A?7_wkFb-~>J;%({A6WuNx<%bc>d zQzyAZFG!Vlw^^>;saF~mn*|gdP03A)vGl0to@Hp9zLZy*)xK%)?*)*WP3HjWN)1tD zGoEcTohODkyWhu9H;BLnzc(W%w#W`%MdV8b}(5iEHc^R z+`iVi(Dpg9Uu|I|PPfRX$)fY^fo9qsu7179xgzajp|{*)8?n3KVZcL^q)#p|D^4RRvd1YJ~sX7SH z>2AaAjdoq{I@I44MD)f+btW5Vw(X&R4!l`4wy-iaGrCJQ_$$JyRin=SG?*^sSpW{;~qE+)X|fz7_4BtI&5a;2Tqa>$&(bJ#a{VvnNbu7O* z^WD*4oP{UleKJH^veLC`La8_Buy|)cV8pLp>euJRW;Yl7$N8-%yE88>M|;M+D%8c! zuaa?lbg$9vF3!W7xSN1l@;wrNlUfs{q6HPmX!%}^J>xzP-S7uk*dpzyvEUq9kvlxOA4eAY+3~72TnnfRSZnFf?+I zCbdpi`Ro2V(2H%F3p5lr-4MH##zofk-Fqt<1>j&}7W%TaQ)c>pU;Dg&ncUnJXB|c@ zV^QbrIQOWeeO@ayRs&;(w67^t%CC00J&g8yusMW}axWg~TWZ(VF&7nK4~dh03f1Fo zcU2T>7tx5TV5;JGkF^Pj2*(Y|QR=NMx?Rm)d3+Kevbl#ihCdsRl$aB{^}?j)U3xr5 zPwgT*P{kEb#miH`2WpIe6$54LHcNiLX|niXJ9KTnU;CnrkiO==+g^GK$V;BfwtIwB zz@^kIdz_UK`<@<{N4ruF_Q~r_{jyzc&i->9&!SYmYxazhcDLK;64Z`TFZWL>3UjH_ zZZsD+1s8z%K-IEyZq1?paqqRc8R=V=6206gZn1L?9l#FQjhsAxEKaqb>39(y!)oLi z+r6R7?*61)kl2>ZJ72TCMWgSXgL1l@r{XB3?yDOoQx{5MXW(F2)+6VYU7nNjeYu1UcOdOwo1ei?!KC99&*yJE8-ysbD;R_k(BkH z-_lnx<|yd!NV^q(hA%a5qfKZu+iP?(&*{02ZF=&OtnEXePpFA|=h~Zd2H)GH4j%KQ z3Ffoe9aIQzwR)cTNF8xew_8IH<0Q#PE1Q;Lz)*4UFkib~WUf`LJol}UpHIizB^(!w zub(G1ouS)pER%=)(0QAL69M!Ke#G!QxX`>R<(#9_yzTgZ!pOG;GxuxJ=`R}m1{-7b zE<(^2Yh86WkJ+Iq#G;2HOrz7L9`@05ai;r`aEK^Po!<5N{QkC1P5QXc-pkjFi3%F@ zF_=W!+&tgZ@g9x2-ylh0wGkU9bzp<7A9b&m`_)SvKS9vnYKP;R9o?*RSM_68eRiS# zEN^0T+^zk81AW+6ZpZQ|Ya)i&bcR6k}E zQ1wtzO5pkkU)%Rnwr?tSfHeH+Q+Tk8=Q7qQX4ap7q4P4YSJ^Y@S`9>&wmW`}poSo? z!7C~))B9u;1&e<6*5^T4jjywDcLdMFI(pQ1nK!#+evqW?I+17{4yH-n${BOC%}E@$ z?~!9#GdU2Y@;u&WcWoVwXUH2VXMyVqfQW5u^MwQ)W3Z)Dk37rYDiH`EJjZcZuZ{?Y> zK)kbAtd9GuezIlnVLo5G#bNF<;W@pl%CawvJLtTua#E-B(&*mt*2CtDS*hwwtN}wCnKjZaAI5Ry`OjU#m`3Q|bnuPO z%WkXB4yP72|PZ3CEC#y34_6GLgI>*xij+PT_x&(r;snFp|5&L=PS;;s;X zdf5A-8()+6DcMbrSX7f1gl)MygP+5pg)LHkLB$Q?oN=+>pA*aqi{&Pyj#CGeg#nv` z6cC8P>+Pr*`b=GK5J0)=uSS?CO-QTuY^nt1U6+H)BKEEv#`Uwg1dZ1&yJ#Gk)IPs1 z+}nsxrwo5ctj%bDsi$0jr{DGQax3P4Q;^Rp@~K$qV^gu4RISzehKQhaxn5m_&0t7G zuMW1(>Bg7WclA!&dD}Dn(z%}W#Rf^(H$OPpT5b1>DvdYJyJg%}uotdWLZ4q`>RBF& zS0aQj+I4yh$%+qO^B2p*NZn&?QQOJUUbp+lp{%d1v9Xs&fM*Z;i5@ZIu5m|yj}Uk- zsnFu5(71)Hhv)93JM$=Q`DbGAz1qzw#Y#r8zokz{Rn6;aKTfw}bT!LcpF8S3vv0+% zyY2Nwsq|vabo$LaWC>udTi<+W^xWXA?ZiG%RlPe1de*#N=62~%>BxBSMSD8#ZQgR& zB|Yl3n^TTAoAvq4U*S0ADIv^%pkY5{H?QS-hgLR z>B*~KUhCNdp}@;`a|<<7hT@Tl|PNscM!iE3j|a=ckEugb=x9$r~xxX z0P?xg#-<_8vbA(e=HO9OM+-#Wp1X5(nCwfgoMy~g4v#{2xH;Hm#s-yty1edbQlU~g zcMM`DpQZ7X-cw<@waW23NqBA_K7=hiRv@Vo_TjUOF4p0`6uaB&^t?^3cLw}Q(pJzK zIb0e@f^6>CrdCwnZEw9$35Pwjj!!~x^_JsfU251)@%wjixL4J_00GTxDM+9=HYwd2kJ|3Uz%`u>|=U(pfrCu#1D^k_D6Sw`{zl)uvGp|;c;Zb|P zVrQJK=f{>(jbqgBW3>^&W!ch2T2_3XGK+?d*xM!xz>raVoiz2lQYmBB>wCc(qim%$xF(%{19 z)hOr(%gh~VeXOcOx$XVO#EX>ERH9N`}_Lcj#p|j)m60 z$%d3q>TB~ldqwj08NiSZkMZ0o|^2ZJ_6F9LS-$e6?Bo`6#< zf+DoUi4R$QM7yy|>sj}DuCj+~rsZ?9J9CeuJgdzvDWr~q^nwWPB-38%|BMt$$*aN4U+ut=8xU?) zbfO;Hag)*a+sd?EJJ*corW((bc(S7C?Y%Nb$ixf$%m8E(=>xkdb?QQ>XBwN3K zk8fJ;O&?8e%n>yjW53Wcr{}Y1<%myi@4}{cis`NO<@9mCRR>1q>wC|ggmz8ao6Ynd z9%jX*xZDAb7tZ@;%;ozE9%-0T-GhD2<9xZkkEC}w>I`ahi>{yn6kk)Zi~37lBJTH^ zZ5Bmyfy=axil7c={DLo`mp2dYU3BAr<#=RA*YpvKu9ErzQ_u+jVrY4mU|W zFE-ol4oZNx2tLb`5xL!XeBUk7P8)muz1`ls#bbUAM)E>RPRY@4DHjsMP9hZ}Xmy#p zNE6GT+HLoj8Eh;%7_Iwzy|HSZU;1-@F6gK_Bs?6%KVG-7 zSsxC{`V^?AM2k#DzZXXb{GoQ7cgBUQh?#TG#rpoBZk53vrq;6%nv*fI#)suBTCA4mu_dTjK^r)N zrNz~{ooHq1iEq&E?WfCsds4&=-uG}X9(KQMpZ&AJ9!5hWwoREJJ9TV}7N zHy@wYoOp}&-0tK(uzif8>VDgw*NbXFXZ=C%D&8M4X>3=;B4|$G_{mFJB7(DTwd03bgD-83Y#qLtx0}1U8sjN^fcobp{w72A9i-TI=y6}oDdheEh+vm}Un&^=~7PJw}&eMugV5r^Cj`?D^^?N?&!AOb4LYtM^b~T#4 zs>l1N-Jb!#RTFhgRgdQ=z0Z?z!0x?EE!p$;RGq!Ed*o7@3w?=w{ZQvG%{oSl`0@md zurUpfS+Jpv!;yf8g^T4}l%FPu$UBHW?j}KECqe^;E@f6eNjS zs}~P*GAe1s3EtkwIy$3na+EGvAeo#OM&oj77zT`l!=um>@hu^ck5xH-jnmFg)N)RN zO=#3L6QR_9CSzf7r2tGjuhL?6WZ|y`mMygQlV&eJ#?G~$Gkf+pe{D`yE#GMHqC7_r ze^;I^zN4RSR=V3%v(4+62G|0_H^-W{1@ucvO0d#1d`gwPs zsSWIZWGH8Z$HjYo2m2LMOHh4TGjQO_qe=QWK>GQ#+^e3K?uwH*-N^?SFRxlV`m_dr z55w&B`FVX=nAyfHL3XsidD?}oz(D$Bz%F_}LVUtdva>GRX0|&jy_YlD0&=^5qsbfA9$UVb0pFAls|o_$0P^IbC`;wA zQ_8AH;^%9^XaIDs-T8)Izqj$}&6`Hs200KJpd%NpBADglX6Ik5Q94cXch4CLbcdl% ztp(ggsjOvjAFmXl$OLNxoK<^Gk)hgzH$^>+D@l?c+IeSHqI=i)S_4a{|7?QeOFVRc z>s;-P8E7MJ2#G&AcG2EX$C;*rGi&3An_4moID^$UT~FA>@|i{3#njr~_LQgF6Kb}2 zJVS)K#m$S#y}Dgbt-Ix%m=|}F?^Lr6?@3-A0=S7decea2mUG%phcwa5D6)H98Y8_L zsm1f&$_s44uKCjl18?8W>#C1j`MsEb%_K?O9?Ni-xCd;_*Vu}u*WC#`cg(yg>X&e} zUOv35T3%B}q!<>=WqMtAR1x4*a36}f8waQ2(rlKD@5DY=V4Iru9wLjL z^WF>j`Fn6nyrW7D;LddsZu1B30 zM#4gRO~!c=J|6wDmK0z=@Y);-j2~KWvm>IBfd8jKcZS6hX^k#ZpJp-yq>4}PR|_8KO8k1a3U6eweaeU`GT+a z+!oYtw>XbZ@$z-by67CAyC!^iBkby*k#nkTW-A#RU)XAyIC_@LrYL+008aOCh^=fg zLhof>B4pjDZK^wkdx2kbFuV1g15jdB0oa}siy?EW! zy9&xZIsl(sJgQd>kGT$i5J+hGP>HJ;R682k{>*|_T#cHQ)j*@uEHdo`|a{zO0(t7 z*({hS9ktogJcP!c2{jcP+R&MtLm>h2c@zkUxAM!j2XfWMwd36vCDgUGJn zURLzrT^jHc6&hZD$2JVB<#X}W54Y2W#i z_QuTXoN|xMq05n07htnRe$m`t9qM`DTQ>mJf=Qy6{dIJvVwh6J^|IaQf;3*u%h}C4 zZXVe^-8I~e?l0^6ax*z6_h~|14)617A8YH*d9Az2lL4;os6=N!JjYEeF{iETY1esw zX2sTX=3bZ$^;F6POegwv{GJ(Bx=p5A7fdiCAsgA{TbSKv7<#9hx0JO^)5X~+<2TSv zXqFd#eSJ)vg}uqC>HJFhTjUNs(rhVgd+{;Pa8KqqUt8u1;N#-%W#b5r>CJAFG=N-y!0M&aFSnk+!t0MsP=?v~UcWZ+W6iBw zGt)bVdCSqkAA#`G!!**brfsb$4{&oLjvvs^xpQlJ(U>u@aj5iT>40;cu{L#ovB*Z- z)De_fT%70Ea?0JVHiWxtV{yA)MZl6XyUxu^A4EH@iL9`AzL6+f!NXmr9x# zBhZ@G>q>21(_HV`S;Hj9#jT%zkH^)4q8w%!t=E+^mRt`^EZcY0F#GGK@o6t?vNLa7 z=>3?tHt>mUv&FRx5_z$>-hmaSsgwZSnCwk$)!;d)usSAP5g&K%IYzfCb|jdwTAvGr!}r~O zZj>(G760I06QSN4zB60fdwsmQz3064Zdcd4QmvAd31NHJ(xI{RIva1G!ahIh@_YGI zg1KZoq{&0C=i~X#5|h!APi8CSaze}ic%krsIoHO6twQjnZ1rC(@k!hEjP#J>xKHFhk z1~_W(_fy8pFWe(!?&)O8G@&X11!sN2PDrNE%v}Xq1{n`3-enut!B0TKBSlF zTK6vD=q=A(bvk=XN6)#>m4~@g**Bzuj}hCiFZ@#s`8;gQ?(x1CdUMpqG z(Ec?$j_Pvn(6E_*AGb{ox1CIp8mf87r zGj}DiPmxo=HGx#tukbS4Y#UEy{Ykr=`2IArmG-ty_|kjKo&bW@JNHRPb06y2*cvgT zQ3Lte3FJq4XWvG->&fLcxG0R~9v|=%yo!_DHKlw6q@xCZLU-ivmRTe2MMt>qfG&N~ zmbW-RZ--@|%`Y%vx_$@T$IZ=kZPH!^@EB2`@?9bVq9Rpod4BG!+snDmMj9eV(s8d*F$>TEan$& zy07(9#*zMIP_OP9vmS8cE zp)U%U?ADb8J)efrWQ1%7;G7{Tqk=K1()00uy-8Xavr~Bq__){1JYjUTs_C7d?R{j; zzU#?)evkbuW#&$L0ezr*`E8^!5pgj{0t~U>qhj^eb|p; zBv(~~Ppwn@sHdKPd3~kU z)H@uJS+`jvFYfV3QVSSIOGk8P(n)LVRCeRs#$@m4<$IeK`u09jRo6YT@8mt_UMjoI zdw;Z->syY!lR6G4sGNAh_2wYlzH}#^1{MNJ*l_owygtmmK6;4jQ0`;5qYVmN=*RtQ za%3KYG;)gj-kf)GyB?p8{B9Y4J^6B1wNxO@kIuWkUg9QN+=NT?|MGQSyN+X9mi?E0 zYp?-&_fQX#Xe1huq9@--iWI%~U*GFQn`)qdPM~hxyqOsh8Rv-Hd#yRgfNyn9Ud9S! zU3XT!esMEI6RA_&SN~|+loI*39rcDT?h?xIDw5PW<<*T>&a8^OLX_iwJ^sGD?#{gY zl%jCroq2yx)2sx8hx+Wua{*@?zxbhC^@v1p%$UJ6iHHcEk=~G+($`bKU#U0dCrtwY=**1FoLWd%OxvINr3uZGi}lL$kZ%2*$nTgrl`(foc^+ z$AcpqU%P;Z>F@?$c-m;0&}>omg^oJh0^;a<)r2z9S!dT@21o`woZ$1Pq;u7m@3DJt zYA^t?w(35B2G8*%>bape`k_fVlp>h`Yoq7j{f<$G;B*Urz&=V84D?y2E&G~{?pDCg z>xcBW{?Q8;GM~b}P5f31Bn(I(kAU>N&{b6d&iZ)Q@XYoZZqac3t$vFLT*&?Y>V^Pi zfyZ$V=7L7+w>>olA;A^j``AMcd+c`!;B^z^P?-UmlmtNmcKC+W6ZTwhrt^A`;B!@4 z8v?g2yy-@NvY>u z6~H6L8xwz$Ur*l4AzQi&L!ook1m=_ETu7xLCW8PI7jXs1_JM9K{NZ|HNg{T1AgX%9 zo7J1K0Pxlg1B$}t#ww!|MsP8ZSgRY4UUB$r>@%i+ZE!$a=Y0aGzOezvTld`T7M{h4 zk~gUfP=j+ewG;q>F=d7{TKy&>C=4ZISjM3mK@cX&T`-w(AqJp8WmAF%%zURAn%C+P zHlNMS`a)}u`?$e8m&@Zk3lELnz&Q(wi8Peg2r;t+p4h#;)Y;qE=7-Rs-i25YchY8! zSj;ScHS}|>zuroCgK=taWHq3G)s9d@ye;o!m;ZbQ=#1=>Vl5QQ;*-r{=LQh~SDuu(I=$R^)k#4P|kXb%Ny^3sh@prZnNH2GP;`eZ$1~w)cGyw>1rN(0&-NZp>RR9_6 zG(JFGN!>=YjK`iw_XMk+n|?C^dx7lwTLg>;bREmSp`Vh8ek(HDNcE9Vb)IOgy z{ap-_%1a8#EVe|(;a=t!evZ00K0Jw6393aIfwr)^se@ck^b5JqV++5znMw*rFh?GL zL_cuid$34fUEbk6tr2|xXSb9mWWOTxMt~q>ay4gTGwg=Ge(7{(NFQKjnYl2XLE$=6 zJ~FciULr+(jk6Zp`)h+U+NX{MIYy@w0BAs$zv9c0y{lwT`Db)b<;yf(Wa{3i@m7ur zO?*rfp8n+xO~>TiosD-0eZmBf9Val}&QEd}f7l41uRr@~X(&|X(4 z3JoDtHV0r~$M8zzD2r9F{$a@XP=|LtL>muIG23Orn(qfJCUxGoY>;D%Bb?P2LJDVs zLYn0zg(mvql^tF?9sc;mclJ}1XVOWbI}uMudT|1`a#6<(T*li8?7fVM?VhdAC9$=F ze>BCU-1zgJ_twW}Lb2}}5>5dvY~2GarxfQw`VD~|LK{^*a<$~^!Rir207m4T1U5RD z9Hu*7*9L1!p`<8;jnc4!bjr))(Iv|~lgjJs$uM`k#R)SbMjDbw_wDu>1b$souu>e< zw-cw~p=G1=Xo9R<>q#bjO#^0egZoTlf5r=jQMxeDJd!N&`wh;Q8-($lzU>*P73Q@2 zoW64(Wo|{jIS^XhF=PVy?1K?U4j3_!IUPHo2az8Y4EE`fmQJ;tV_VYv+l zhYiZnAS`<=$gO$*_0nY0OX#mt>zF^k8SIwWW%q&z>G)Xi{*6qLFa@nkjVNq4f9VRX z3tJsjy4@kn^v%Fi-;1Uoe!nB8Ra0slL|@8q8<^{?il)K!;>BNHJpy$D=+A+AoPYaC z2&IzyMje|QE7&-v*($w0I7-BTXUtASf_avq>%$vj4bQEB9`gsryHLCKC6D^;UOJE} zDnBNg&K|1z97!S+AZc}54Y_VVf75oSGict;#p@3d>mVBB=b%AHSC`rpzBb!AenyjW9I9d0ofg z^!VA9c5vJ#eOb;HhX|?Z2m7XUMy$5;c}?6pP)Bfi)|>Yg?nxlH2Hj{ZeEn%C)3^p*e`y;Yb+^hey9A+ ze7MGp8;*5Q<(w;6S&X{&^)io=Xgvm88h}>p&4&yQi}Tr76ICP4GXDzko@|yhv-h_`1&z}0Nj|NgH+g=tz)yer5A)>qb`r=paUp3l_-TIzUg-JA zjT%Q9`LrFB{3-*ae@C7_1qE<)pg>l7PMtg3nc|AtYW_2vunZwQUho=Q4L6>C@Ql+v zBn#uv-hxk$Rl3GhF|z_6h@RKwG_#A{d>3qYdkvpLR`j1iJcYrA3R!H#sQ~?ncbd zBXeQR*ZZG?hRM`|S~ao(kJKZd-ouyU;aWgqL}g55hYWgme>!@dzjN|rVO$Trg^}0k zt#Lb;0oDRU+2GCd#`}G5$yNEm9PW$_8)dcI1jzxUy)WAv?PK_-9>O=%;l!dEEfLy` zAA@Ljoh3lC-4}pcp_{>%cYz67Pq^=;sb1g48Fa?$;K5hDO~;QIbf#nodEI`{!k-Tp z9_RK@&)R-WfA|yUFmxoInxNW#<|&91ymytWVI$bYad)DEg1WI6x9JTX znVF#l0QNZfB6 zFlD&TS%2#TL2&Ms>Ck@d9~-uk%>D&HQ!n^;+#n~V1A_6JWgf(GLe5l{u|&xHsdoq%fe@})d+yD_mOl)5%gaPK(d|_*4bqj{l*YNhO zIv6O#c(JDRxJFo}qciLVpV+kZJF-XJ*Nc^$D098syZa`6GijE$?m6|J9yI5G26`gB zom2ett8llmh_3)vtMv`my}##FR4pBb4TW&gn{^4QHL3~q1oANS%U_eSh9x{>f0+$u z0)$UiH0X-2sGjIQIG;ih1835oZAyH5^mg(jzLB+y9jsj(5-y?JsCUvRW$Qtn5eNy* z?O7ubK~Glt^(rycTSXuQ85!7;cl1@Pxw)!Q@;e0tDh)1hWz@JT2~JhfWoEBqCt!;u z7_0C-`wJTGW*%10v*vwV=?QB$f9}I`BguTN$m_gb&`E$Hjd{$_lY+Ws)Y`4Vb2Mj)cD%Qq6knNrwRN8Y4+^Mqh^i!SYn*zg|PQRLx}T%khj(OKbZ z2pRtRD{)z|fKU$3yY~`Y)o{F&2Yy#Wb8E^<<*-N9Z3PXtj8*5PDNuR(fAiPmKj9pX z%!zxiJA-&hdhb8_?<+|8BQf*Wu27#0MZ3*pVXtEaZz|9o6=@=Uo172YsO4yr)8BVK z4($gh5b8|L+XP3_6W8a^?QE>Y4EZ2m*_?VRjfIm{=h7c}V( zh4W}~zMo-Q?LYT{fd!NPe~#x;7!_0nukp7*YhkmG1UHd8WjbV$`TMa;<^+dQaC6I$ zmXx=ed=UxhQMdAGPR}_G;g~layplhI&=wf(O+b?q4N%2p{$w4wcAa4i1`I;=%Y=P@ zj5S>15(6HWFhbJ8m)0$BRJ|mAEedryf8lj7F}eO7EKJ(#y`D>mf0bW%cJ&1h5*gYV z)ds2*=#hyVQyFYJwk8B`KzTUGPQ_DSWV<_NYyYUz%w~!i;GjL8iB%c#M$OvuR1Ox> zQ%4`4&`3eZit_`9RyAVWQRfQj`dRae_Y6AXQl=kO;oAm0>mu1nmU*iq7xQoAd+EQz zJha5rW*QP0bN68Le<=0+EbJzviW=aO03(2!B*soPHVGl0f%^plAIN^Exapc{BT3_p zgm=^D{^10lwa!l=z0$mLGyl=mzLC`42R!***|a7QXd-Cp@M7sII34*An$V&DPEMP6 zHw+$t@{+oFFhzd74YB&wDP>Lg&(LQrvVKo7e#LJ==07mCe-Kn3h7`sx>#+M~bM5<) zVwcnRU*V_A8q1Es?F%h_BuC#~ukg!TL104_SA<_fTU!qG8xKEM0moM0L!#?lv4E3- zZv85KfWAs+B7vFN8n}-`HNIae6i4Cgs86coUT48sQC(j9XgJvwRKBI)RvDNjTm{5M zV$G|csiS?ve`Dw_eiUCqVeR{ZfdK~$;uO^3=$@%RIDr98AMQ|t4MQC$uO{8!;ouG> zUjl+k28VbxVK8h(eElqyetYvt$`ScNW9V#%U_FvnO!q2e*Wee%$dEpiz+~?obhaW0 z?0ukLu9WwJ_yAGP!q+hSN)UZBCE_y*hqJY}^w}eHe}ot!tJ=ol+nK$v=2%)Wg1m^h zHsP^#UasVP3vBU!zv#pOS45L*jz!!j>EtQXNJu^bFt(;A^@D4?R=rh7Aeq*hlD>C2-H$L;8d#&ug$#8~`_i{DYg> za<@C*e|*RVbM$Rr*w&Dcf{Yu#<4M#FkNz$P#Q%<~A)uCrO3-G=I7mCNH%u`wzAk_p zM5)Y>fM#7AlM=QhpV4DwC>JBz+!8ulFhhi7ur`wt^h$@X9dx*_O2YOu!1~%C2+_Zz zw6DXg@kwY-N6n#3XaFdHz@qzBAQqXvhZQH1e<}e_;6@>Q1Y|fDZzZbA{H%ySTLRW4 z^BCiymrL+rQtb%9IuGOkSN|7*RFC-<;97pH!$bp4*TH_Q#N~`Sn&dkYKRS7iEldTN z6xsN^lQ{XIu%HoaVC0@_iUR$DkbK&G?aial5t-fq3jxg?9mZKH+HCX@ ze?f!bN56t&s)3l}JI^I@H#sY0s2A4Iusu>vc%H8V8Z|O=(T+F4H9x}N{NA(Lb~qtW zZG&zX)m(BNbo3qkqWT$Ge<()Ve!vzrav+&Rt`h&G^=_Lt#bW+|PiNlXq9qqEJ0HKv zOLcH^YnHD`-PiT*+($@Iin%*}iujzje_BI5Vhsa0p0$t{`LSf|-Paed5qvCBsOz`| zLovW+eewRaIK9C0^yiK^rrD3`lWr>l-$Q#Ggx^O9Wg=#z zI@5Irfd4G;jEUt(HAFD6k2P3wf8kq$e9%yD!IQV!Q|MzUFs0&H1s<>a(zs*Mf1rdF z4^7;+j2CV-Y|$Wp9c6&8aIR$Z>yos|PxBI6wT>DQ0?D*asDD=rT=HrqTSbCt3#`wU zNQK8g1H|Q!5;A2G`JYNXd7zYu`Fd$92n*lE9zcT>e?T0a0f-9(erP%Kdk#9T9zDZ5 z8$v)C;%8^4A#b564*9NWy$u@828wTGTGYBJAkirpf4t=@U0ZrAq8MY9HY-*p$_Fgm^y-d*r|I-ttWaBX^Rzn)lo z(&pF$Ron`7|73I?0uPQP$K63ZcZRcjS%F*3$O$*1YbpNTISJsd_Fnt~f4v!>XAS2( zC1ic~cC~!TdZPv2e@t$I1{fe}Fd|kuM)$JVe8|KKCJ>seWaQ!ZZSThhHWt3?Fzn~0 zI9M7bEM9xJ^Kq+CENBi$wd;!wPp!HG26#@yH1d@+B6@yxC`2Db&I><~SEVW?^FGs} zQYg}qZ6)05R3nB#bn}U_e}LpVwzY`prRU-YI-`WE6GQ4fSD!ZE_s653oXxY2$_>8} z@JV(-bnub(GEBF`r>=VIjb;y_Oo432M=)KFStrkhT#2KYby4N{71ym}kqQ|eoky0X z!WLJR7_kC57PjiQJ0w&JgFZ8Q`<=1C7|evL%{H|ed!Yn-?p`(Jf7vVJq5i~_0UmqL z<8B(8`U5aV97(IoY`A~oz-mQCwwkWVgjk)6HZBqZ@}$WV%Vj8imP^S`QfljTA_tZ% z5G7*#sWoZ*0v3%0yJ}W)t@jzjyg>!5F*W50g0o3%NMDwo5%4vGfbE&%a|(5Ha&VM2 z>)!XQq%O`89$4Kje+EZCzsLByr~3YD6jIE+P$efDKLO<~nLM3JKgM{QK-M$rkIVwMMu#j@U z$wm8d?}4=C74vMl<`C|z=#H@ivO=vkKZ5biyt4tG5cjvbf3%`!;j_eB_3&FFR!->U zJKppGCKT4YhzZWfwZ5G9?-tC&XZ^4~ki9%g8aegPBm_nwm@{omJElfTg9pOLIW3Dt`Q*$6JlQKA%ow-R=fUmkkZqL_`q4~Cnr{>a`>WsXn-NUIsc1J03_w$=P#eoREDnb zENJ+_1T%VRi^ete>@?}K;eLfKTJ+s|*=6rSBxZR6s%_^gQB%%8lk>}sHEDh|?-^i@ zLMNwze*wK{NG1v2$e<%92xC>?Fh&DMLa)hhnzZA}D*yW9ZT2VN_W!JgcBv3h~} z5|~d!^ODk81t>^2%^$nZll$pO@`k4A`;9N0-qk1kVatJOzJ^|SgcL?mYxz{#SHl9x+f2Wi?;^U-{WbI$n{$? zSW8mKC_%x8{FWq~KcSLoq+W;izshvGS4QyxJP*TPhCVA^*-V-TT4qJ93>W`2TSr)S z&|BYT^9JtbMDJ??dj?f_zYAUa${|f((gJ{Zg{{avxmvJrtl;~3qqj%@PSs1P9?cEW zf2y`YapV(p*QfhYcN|q!3^s0_W7;9{(onqHXnxV2gB6p52fWgUS0%UoKmOxC`M&?~ zyZ`OK|BwF)<^6w|yw?9^@^0Yv|KChr0QLU=-Q{=FJvs0Z-@(c8r_cIxIoyBNA}fsi z2Hx}Ej|V~p_yhlQJ~#&&m2ej#Bkb?*f3)BKesJRNKm4D1$==YW@)tCkcMu=Ie@=fQ zFW{P&ET7B3cqjk;`u?GeK?w1GL=vBfIDlLP|ILLTPuLsyC!6$VK4FU1N8rMSzk2mr}!rlle)jZa<3!be~e)O zOS}vCbH4ibGynZ~qWpzj$I#*hpmN~nz(EK1&xP~%i@fVU_Lj5~Pzd4Q_w)DT!BZLj zp-L9roWBGA{0%?<9X!T=;?Td}D*64lQIekbGy3=Y_XG3L4FnZ`{&c0;|5%yt2_9O0 z3g6Y9f?plA8e`S~5DGZ7!Pl>oe<=V9+$+?it(Il@G(rB$jY?Qt!(*a~6T&~h++pgA zZt5I5%N;au$2)?6&nnvDB8CRsrIF^30S7*YFbAXeJ^`vA2=7>}zy$qf)aDF?| zbxGKE#OtStCpdh;5H-ayx8xojhS3rhfekQE#?ZCZ{{4Z6EGyC<8jv(&f0MOH8*V;B z!qWA@+AJYZokh%tKJXdXw+6g;GobxE0nItzfaeC1psPOu2$P&HuMYrdg(0pEtBK7D zo#Ih;9E6{|ss^vz;U_^*NxW7>^-!0g!_fbHcx+|Ck!60t- zERyF`o)9t5H&3W_+LP)Wf8NvtPS`~w*QU(G7u&FD_#wl=xMFQs7uJ~>{g2=1zHIk` z8naKK%uHU(0wh4&d1}b1nk8N%u*`3*^n{uha&Or7er^l^8{X}-%|$1zB$=_e0W{x3 zeFC+9`7M-~I>(+1LQ(#n#`gR&F@fOBa9kXLu*HJ{!e+m)&2b8he*=YG6e*~0Dj;+- z%+HwoRO1cO!7AEKpG@oG`G#xlbQ~1V2N;AZrQ+Kv}|;?%jh8 zfc~I=NO4TjibW?1e-3V2M=cS0Hz@%z7u85!NLs;*4|CFqr+0dm-@tMrEcbo#<}c+I zo*ch0Ob4gBwpy(MZ6*kC=3JOY5&2;ahV;6-Jk#6-gx2Pc9dYhK#RT|FvXVaUL>60s z51OJ*BdcGW8uDu_kK;iTRSZ`=FugaQIBv7?Z>a=b&%f_8tDb=6ma_fwJw=>krKA0Is7ea-xn5%nCzQt8G4ITLBYpo zwJZ<6yr!`{e^G9fm4gw)`HEYE|2PNVrUAkemYWl#{zZ}X^MF)Tufg#CI`ch;_rFS( zE-X7AYy~QLmeiP8(w@@l@~OT153Ykp+-~M?ckWO;jUoH-utPZ zj*buBe^DPz5r7X4!{%RjeI56Q5Z`)ADX?2O%Jr&i7jxTLwU5t%C!p;FvLmDp)mr+j z%Mx(6f*t@b89?jnQ-Xe!dlu*8Pp?|{Tdemc_7z%HF{ z02D~fOcYaGcxpw^FHc^+5%pe6rxxFzz-A&yf7IKEq|E?I6eS%}b0ESQr`WnyRIUw8s+da$$g_IDk2&6x1$vU`NTz8%FT)w|HeHNQhngX< z1>$Bk`(cpEvA;W`mclP&L=&_zq3ynHwt>k9Q1SNb_WmvQH8F&j6&j;MEubPvWM9!7 ze+i$LUOa?z&SCAi*eWv0!LNT?0%jM*A>36zT zsdST<1V}$O7@kTx)0qI!`x(jSXMQLINy{q*e35dPf3Wn{>a)py>f!@qQkbX*`ZPUg zAP10&P2`H14avP_dK`Ak-tg+1{lw1;e6$Fj8Er*^Ipe%?>5ZoRU+|Vg)!`VGj7_)G5hiibnS#r8!--$kTL7V?2@> zNDQnQy?KJogRv^gEL3Z;Ine-J)U zec(z+ane;cjZl}jFDZyB-hvPb5G$pCsib;5M0a=qkXgKbRa zb}5I0#D=?h^Fu#Umjh1jOQVdhRy-kThaQC{5r`xEnyn&y1>sFqjKl!|Cb#;5!EJw& zd#R>xy7l3YJ70C>3g_{!RvVCCe^VYg1R94PFQRZXAmv*XioPg~?BHi2(GzqaMdKbO!ML*`JgDehwHInih}=ujPACD>e|bQ zdc5@4=v!LeTcXWKN&JEB)0ZT*jfQgT39xSu{9JRN9|<;o-g&8TP?BY5e`7108VN>x z{9TaE1&p##p6yPSg1y1`N%#S1{ox!i2nEhlfJ8!sQ14uMZTZM~z!N+ap^|;SA^@N3dH|3^ z3?>%?`q3O@43)cly;#~8@;`{F$Tn$i?o9AGn#29lErYBNG2 zbM$0_cU>0lg|akjte&~P=uKb_ea@ZXCIIOzZ^1Ku-8$hk#$Tob`wMvd`?ZZ)LPoOY53Az1baIzA-n5ua|8-b6JZ#TO@ zXWHfg=2Xyb{797ynVKH5ka&K-0Dvc~^&r6ZQ2w$&f2g_hwiJR-1TFd($4>h6grWMG z&=hcGcZ-Acpo_M}@s{F6QL`Rm%04A^8Gj}pzKL>(oOX2&@h@c4)-G58QuXNW>S}Hi zqt=$xtBNYaj^}S@rH5p^GLlrrT&oMMU$;uevttL8Il_7m+ zv0MhZe@bIdNd;G)fc|hwy-(pDg$%_lE3?xF*y{BqkKgowCJI~|y&x4@IIrCs!QYmm z$U(0Z0kW>r9wHofcLjq$181@CUA96{9QN(Isvq*a0AOU`9g)$Lv ztbh*NF2B;TfDP*bx_+cT4qb;%uiZ$DFi+S3e-#EGGdmX`;c*MhAi%mqMNwZlP(Hm) zTEw0(P{aAveVl8?OZ(#TguPxl*wu^^zT7(tJrMfkV-2FQCw!F&dU>(pl?+Ur@KD|3_< ze>?W6$KLpoI^IBov<^r7jtCHPULWfiu6Ye9s6(#^XosbvV6rMLX(h6cWw*x?`ncD{ zhlp>DllXw9$b99;Pb{Hy^XUx>-pu$pz^>ClyLp)!;>F2F4?-b)pHnul+tU@p9-KQM*4Ahm;(#Omi21UR>T zD$|20j{C3x)Z7iwO>tTMDFF>MPMOkIU8RCa{Q-Nx(#^YQ^7W-tYxSM_F2b%jQ7KB? zXPqX0D{db^IV6p$rOe*h^gXd`c>ow2%c3Tw@Unq1X6ViQ7B?s?>|GkuZSWIge|$%Z zq5jY7atIJ(Ij?r%N$LK)(=m!>#MD^Ttt|P#QBd{d#B#bhl zk7~Q+nQ!?7sP!3JI*5=Ar~;fYe{=JF=w4eK6a}dMv8tVpK7`u;)yi3@$NG86-Bf#R z5kVg1z`yCWSWh^^d%&oe&7?24mlSA~kyp@djgVQ31Dwhes8f}X8(E}WEOMyYPQX3e zavpakA>IOfm9!mdw1Rmx0m&%T$%QqYXyPS>1S~AMAURe6QrE43gky2Ve?YOlACeW7 z6ovmacJ5%`IA8FQn|8XdKdByNz1^!ca^t2uNc^7x%imM3nC}Z=q^!qpLqI?P+^(My z)pO5)v%WdLrAXzM_7rwd`~WVB0FYFFL5Si`6*s-lZ$POOiy4EtCDsH+xQ9>zLQXB> z@umuJ-Y$dsNpuTx2k#plf9Vbe@QhAW{2S5w^5YgoUSWtFXC33pYnDxNP{1lpd3;}R z@&}fdF??d_jaHzL`NHtI(TeUUVyLA~28}@zv4otUa(pT^~iJU;YOs5j#&JTVt^2-vObe;*nLeQmSvfTBiF zvtU{kdfjqycONJMqnx{)K7JfHAz;TPgE8qtK4yx~0hmN2lL{su*F@K{QCV=$1?G=k zp?gS9SYU>*v_BmAa0p^i1k^T)lkgRI46mUXK5L!tj|X2M0$k*OVzrlga;rIlW1$hs)0V@9 z65qr&Ub6a{J%;ioF@+h?7)SPkZWrCy&KzUv3PUpUx!us@e+TPSm=1$Y$^ZaiZeT5K z_uexh*%xqdM%lZoAz9!%xzRlYgi32ZxI zb7t^h=!`VPe`idOU#u2V))jy~OJcb$Ka03PQv1d|LQA9dV=d4TQj*Hu?8}F^7G0RH zgm}?WrI>(_A#u}n5{&DnJFI=+i44eK_6u!X9)p-b89>q9<6CRzJC0q569ef~i=?Rk zI35pRZmrm2@4FqUP?x(yE7;(y%O}^R``ilcYGTRJf0P?<(4Gq(e9{0eIIhxW=L?aM3HUk-_1umf#hyqj)M=+8Y&dmq*g zhgetm@iE?bvr{-J1CaQi2HBC9DFfB8_d`=GO6gg~?3iXK7wKq6z1h}o%?&>2Tzzg( zCSx4ye055O5@t$pnVTE}{H>wQQ2@S;lm;OlSE)QC89Y{QEwatyG5s4#_i?C)kH=2%3gN9jjKE&!z_Jjx0}egseNd?->QJMc%u=%iRS2 ze-27U1>k4Yar-hwI|vU5?J$<_s4$Uxzt-tVmv-R=k9MRUSO|qTTTlDT3N6bkaA!dn z*QqJjDqy)UuBxIktZrUw>`iYQBwFW7~6NyB)gu&;hL*xC{H1!{iPALq#2Cm#6wZO0Io4&&aSYdLx6acXEiBAeDl6Kjf~zKZ0n% zYq3mD&*`(@V<0M>`Mvot{0RzNBPUJ-&^>$nlrND7uWY9%`)4LR>4QPU3c}I^dUf$$ z;S-B-a8n*Ka@i*^h?%(aNv!1yf6&17Y}4)g)rGfJ!KLUKKhnfvz9h1U4KThYg9kiF zoZo?xpELr}Y9>A2f24DsJlhp4N=Y_sSMg%qZ=!LSuDNTa5|+Ks^00Y>56zgd?`Hwv zo9!W!_>odFP?n($TiW}}LN@{9ZZ_zAhFx#erb9juHjzA&Fn+*SOavkae{(A|2Z{7( z0}^WgJ7$RX3HbsNCw_tU*!h*XLWdsIyP~KS z7wp^M^lSFS14&OMM+Q4&0kX@Idjz$yA>e_VkixblcXum!mH-%o(f-2^8CZ4xfER>k z01^~G_xfSe#0`L5JzV^w!^O&{j&3gJUoi^24*<2*e*fzx{-6=ke`=}ps}ZRUxWeBJ zh$Db}O;PK5)HhDsPz7x@0+H~9sM>6*(VHN_(!b65b_1C2fnqTiticeovj1?U(U{&U<+*uQ|l`eA$1JNy(VZ-}JW z_n+hJJ{mYm6;gXORi&~lW#fP$1>tb*I7l;@(swQ)w)Xlx-i%#Kx&7P?LXd}#h$W+I z$*M>OrL<)?!(U}w$!+SXDzy_R46?)oby_jRHEu_u3ZMDQf7HMQJYjz0-2A6@?wV_} zf#8zN;)>lpw$)9~;V}2W<=nH9%3kP7K2FaXm`{fww6=-4!nvRyW#XOXZyMm(uvaf| z`tKq`fIp0FKtTDPWg6RFgQ@xsG?=g&wgQs5h6{7D-r#$PaKB|C-Ei`fNx^pqye@!m zRaBWlvL{t2e~i*~^_^C~Ada%ZLJC7aIz2EoL95{nGz3c1DiH_E(hvGLP&4aZ*KREkPJz%LMS4d=9C$Df z=>+Uu*Co>Bd;+rk>8SbJ=r;VAP}_LNNd_*f7W%4xHUp51cSM`7<#_v%>kPY$L+1Mh zxKnNBzfd@E^Q(~PRPdwqaIWwc2rnjDbg&R@e?D1({>n+LI2_^H%ms7d^iv^&A*ZPs zDXk0q2j+!R4qX323wlf$3SNQjsZXRA32*o{h?TkAq0b7vYp#TK19vK?o9B)7JU-5Mz ze|6$1e}^{*f% zrg;fo{>aV6&d64v6qGt{zxxY5dXPe+e|nmPTsI_gt0!@kT|zd0jk1=Z(;^%RaP_Fi zGSTD`2^d*+hgb>~S&M?$MexD5wbgXKmx_JFIESS}T<(C#=5Gu4d;Be(TINb9sC>lZ zqmEXP`RXWr3PT;MR(JJZDAf8NMpS+Et*aBkWr`T9;l(WF7XBm|9C1~pQBo@G1uRzNoHjoOg z!t1N@_RZ6x^Z_?&I3`4yQ#P*^f0zT738_$!3nD#lP)4>Q|CMYBxf3^idlS7_@ zdCS^9K}i&#xM;>d*%y_H(mC+V!F{yTSxbHgJOuOk=}Hkd&=Cn{iqMg|ko31ck3T15 zF*0+^p>@=?_;r{VoN(5-(_q}#d1Mtplw*ftBLoCV=Eugp~EocQG z-o(>iwbfBae=%WPZ45~VzPaTRK47kNp9C-M8~z4QB2#F>*Y1r>a2Vf~JyOQp!ToH? zZ&~vb@WdQ26WCY4$&l9oh()=e>tQCdrQtD%IK!C)dbJ=Yp|*Twe=v#nP%fNF3%Gi` zm$g!b8T_*7*;{1+kxKXB9~1awPx#^+SK>cBV-zoG-=y~QGK_yo+t-&uzzaDd8rgXTmt)8*YYDlD!hy@|^D$n5Bhnl^+A zYG;wbGHPc4gs*y_f5LzRw1US2-^s}_cE>Z$@AtcSAIhhn-Ooz_Q*f=l^b+QkL(lU) z;05{RvS6g}UCR25kdYu`iuE6!3nnR>(@-96d{Sw{A9*mLK*)yOCzkJ(vqVUS|!};COBm=)aM+aE>$8Z!ze_Z$rcuMu_db!!no=;{AZgtlW z#62|$tKk>C!X*lHh*1Z$l z;~xae&}CqFKsr3MRHzh-48u>8Z0-V9#Ekk>!eN9Bj&H2@yqPxAC4!r=P+ zMtUo3e|hzf#4?@}DiNiO{#bC}NFcDd>{V5L_@qs~_;?}%BWdyvfwO-Lf679Qh(9g$ zx1CWmUEpe`5W98tt$KR{jHmNArOZa6Qq=TRO(10p^Aq|jVtGe(R(JvRX9R{mXBP{4 zEI{J|?UP&$uY2We85c46fdfCY3Lyb<>m%5le?REX10ehKlWwSoJo)C%!3I$4AymY% z7u!>Ot-Vb`r(R*RYLb!yzAM*eoDt!Q2Kfx{`jAa!3z~|V=C6E6NGOcOhfx73$Ad)Y z7hgAk)_=g{jj;<3ouc&H!&bmHnP7zeuC?9C5(J*lH%_R%+uQ2s5Ar_2b3^((I+Kc5 zf7HI;b_&^*f?7b}yb|}9(8u=qyJy$S9EZy8;h~7Z>|p-jw=c18xOqldt=x#03}u9( zm4>#w!K{FIYF&rP>T|Pp>61L;6h_9J{TY<82Oh`a1}s@AeDrqzey>Wf6)af zHhK`OhjH|~vE;wyhwVXL1$<7Z>^JhVfq<^k6NrnWu=7Lv;_Yt04tb6&yKrxS8Vhy@ zSBW>|dannlhuh9%V}kc6kY?vioYX{H;x5D&mC9798eWT-j++&D1->1O0;lUS)k5Z^ zpA0+JJ#>%B2B7g%fY!n*`T+k5e+<S8m! zfzdy=A=~~@tB~J=8PbCUSRf#Za(TglERyBS7utw;6e`XA?H!uR`aujee{W*&4KOck zphV%+F!P}|q?U}?be;cZxec%oZY6XdfJT?GiuFHO?$2!qBt{4Sg3@1rk`r(WKPmbg zIzmOgJL*|3dCMfZ%fM<(UdEgPgObHJb2X1MHz%S5VhnRzR+#-Pu$rZ54s<3mUn|8t zTrV7(pke?dR|D{BQu6x2f2hF&b+lHJ^5y@E97(SfgvMiHL-xi5*h@Hp+pGC0SMd1I zTLlD%r@-v~GQFsrO}BUao(#q$=!#!suhTqC?{;|KV892*^}p8gmzE=>!(=$HXIuoFSd?0_@&zsc1&PydGqjh58gkrvetZ68!|+7GP4b=FY8BW&oklc^|q|fPLsi z&9Xb7rs5dMTL2!+xu^s1#f+Z5)Hkofps)3@*_|sp<5e3)0<+gLB1u6%exAYZzuI?z zb&vpJ*~bx?f4b+q;65!@!~#>OFv{)ZrKww7KoTcoAzeBGUC>j3Dwe^F^(pZ37RK6^ zU8WD0mbGbJ>!BC^G`+Qmy1tW_$S>H>;;;=W9hmUOV0nW^2KRAF;~7EcwXfHGY3qfl zA;YtrqzJHvEe=v+XNm|u;b~e}SJ>LhSCqLEdkeY+;;4fX?bB zk)N^73B=e=*P2RzJHDd^3@Xl-=AXXq3;Dv{{xD12q3)_H!?4-{`SnU#9?&G=bbO<=4{8orkuEO?lgG=Z%n@yui7KAF&73zI zx^z8~fBDUU3kNX#KOaD-rO}dp;ni}%%fkdp+#Q(rSTCqeS}|`3xEk9X*)2W>sz|T$ zUQ-As?Q|(9eb^vyWx+MJ7T?qlt^#@cyrYn^BfQaJC?%AzYW5X43;)0BtG!s_N-t|e}op}orU~w6lpQIiE*K`IqU8jVyzW` zv8R8Hr+XoSf>;l+74|!tGS@)B0QPn{nAw)72845IJ(_m68nCMSB750R@=G%asK>JS z=H>!wGKt>5`|#Ze^aKNVF_9l=Bfzo%Oj@A62g@I88Q13lS8MlsuHQ}bkeRW>=z3KL ze{^M!tqrRNs@UnG+>9WAs)47e5m*}H07S+|HM6lzSK*wwp7&J=Th@z3 z?0oM*^$cTJNNJRwz744lHK!Te*wIvvGo^X6P(z?Mgv0#PZ2^1`R9cxni_YylzH{i4 zO_%+S4*CqTQv35k^7q|?<%4nBq70V}fBAaSM$K51aKAl3Nw=Oy4_uW(&TJXt2PAdQ z*qnV37$|uY(4<~>>$-78t$_lK6@mUuNYPC`>$fT2!~(eOa*i0Ynww?Pu70bo-c;QC z;P}$1p!Ipcj6w#qC^Tf}U03I85=yU^;ayE2#(1&-6DX+@1m>J{yz+Y4I_wlZ)t<(`bgq^9YW2oaAF0B(w9nE&XifQuw)N+=K1Irs z_Xi+sLMXBh(Jw`UzA$msg2U4J{*9_!Ov;Pg{438U7rQ4`*-!Mk@dF+vJoC$O0cD(F zRKS25OqYM&Vw)YwljFMuw?DvCe}H#_&boP=f`$ zA%Me15S~I{4op`m`)y%;wUjU5>b_p^s8hoCrH$;RsMcMypZAJzcl1q)7f4=eV}U-5 z@uW309YI}AF#`G&WsU`1CJ!0foB)iXGBl-`fqp|e0OuiVQ<&T`9F`-fe?bJo(~{tN z=XXm>!<>oEQhp{!jBWj*2m(L<6g`3h0=dYd252d3UV$D=z|I@|!zFh; ztxb|bD3cQMTChNv6Jm7SY4-k0es0f*%!$xJ*e$?xJYn4mmnuN2d!FI%pGMkVFxDTr z_4FG*>G+xGD)HT}bg1nhf6H#*Hm^h4iN)!>|9xHn=|6J@Kmv53-mYNzMLYAbRG=0VeIr{Uco8iJdv0U0JH-nLN;?oz4Qf8xNq8k7t1tCJMmp0qOs z9=cMGo(KFWz4(y@FTt2-79ojW3>2&j*Zp3^KJwlKHr{k{xNEXn<+_z%z+6Yz9S}e8 zfiSqK5mQV62ep@gt3nqc3JqLJ_oBrJPR@GO-c`&4pAY29jz+9w3&;+N6?bs!TA^#( z^6X<*Bmaz>e-$DIU|l*FCws2we2X)lF~4#>zD8=rcGxOI+E-)0wX4fUiIvpcu|-ss9WC_ zcksVBw&TMDv}NCbWoJd08sWOEYSt8$F+SP{TYAsof9sfASAh)?2Aq@#)D@0G?)E^R z$t||Huyg6HbbGoy0Z=1-b;UPl95bvMlG66yp8^g!?Ln3)_FUF5Nmr%klfIp$e+c(s=@5XAH1TX^S$HUyri!A?ST z@^p8|Fm6&3;D7N9tL2{h&9}j8_Akh@Sc?TjEyfOq!P@U3K~m|`6hIaQ4;b2%nbDg5 z7w!y6=v;3gv@fvwIp0{bA4Bf{9>CjPs*eoG01b9TbT(o^#?@uBJd>aqVG>HfltWb` z&q+ENrQ6##u0S&n{1Cf=#Hst}OZ5EgK$m|-j(!mD(SLioJTS-cOt_pR08Ha3EADcQ+_dw@e03&{^#Q9+bL%EZ~JBKZiQ-&yJu;hr|rSHX%e`6GExa7LL!~ z$0MR_M!=@*Dc9mMeo|v9l3p)(s7|Shlf!Q&H5;%wLG~{Wv4)5OdErhc4i4({9MSjvcj<5f;v`rz*czCtvi?r;3&`n z#8lU2Uz?F+Sx(2M$M!?qgW7|-2Rl3^{|}4DJF3Eb?6(AUi}ITTpRqFSKI#@Tf(i}a ztQ8>VUFd}pG={m|y^4&73}nU~(8QV#uilf|`hU}PlX|ID(E(oll|>YjFJU3*nmL6J zMEt-Y03R94RkAk#d5sEwFXz^tUc;eA#(nKH{vvoVDLYg#wjf4)ahp@sGKkSGY#x0U zVhBLSxf)l@Z)2K#hVI#bw1>a4Pm5t4@*X6_EjO$(^|Uf4$vG9aEcClbHhI9EFZS^f z2!B)8^E(gtJvghb{GYL3IKOkap7;-O-*|&MP(f|q5Q?cLFv=q`@Lfxb*Lu`+=yI%? zCaO#Y01tEYYhmv+wdl~e8p@P!!5vnBu_sW=eNC>Pz8Ar^$&jAL)f4ljS}knD zBOCOJVu{P%rvV->7{OZEw;!)Yj9fUI*niz@1Y+^dq4^7zV%LBJ__bQ$$pBy@gtqsK z^I@$rept#Ss{RR^u=H+k#z#r|Ta4HwrVr=KyJf`WO>qcKj|#juA3}T{qVXnR|K;iX ztpeV@Wb5qI7;F4H@sg}zsD*a@3QqO}_TTYW{)RP(TVK`Oj{WyBF8yRB-l@L?$$z>v zz|62x7W6q4v@ikD9?Z$s1LPCNEHmr5Nc3%02F}7XK!)FDLxBAPE7#7R%zL+&j~P)j zgqKVx)kAB)fF}*{$RV=oAJHRxrcipTfpZv=)vR88j7y_e-2fCpX#K*=)xGtgj2Oo@ z)oK_v!E8H$)h}42e2K`vFWzx(CHm!(hM)?DB&*)cRT@NG-cw9YY!F@m_B|Z}?uwPo6%* zJ2}CP7vpH7dJ|vR&vi1TmfK_M+=i>^JMsltfvmmQtEATq_WX|nQu2lhb$>i??Z;2x zhEhU=@lW6ssqg&-VQS+?VK(LGg4tJ}Jh35A4`Dh??x4#Pn4md>PSW7wG{KKC8CP*( zw=j&aSSC!7X72Z>fYrwrnkwG9yE|_w5mdDvwMROQwSu|Cun8*!V*ML~+3?^M9QZZg zpU%ASR)ex6=z0~c2>KRpUw`t@7KD(P(PF@{FMY?-8sG=wDTNAbrd_}1Y#ZN{+wCvy z+q61J?|(piK99nIkB-i){`*~TCxFqFLH}8*k9()BKx*_$=a-^@MzU(WnSI_YnSgc8 zV{o9?kY~R$%U)`!ls77O;9unnFt;bD|Dg>l@%P}hk}rD#sKWwHoqzd&$K&D((DfIa zXoDLhz(`iDo)cX0y5qawify3@c%prwgh@vhert^;1>ERG!BfK92OuwT$_0}Yp2tZ@RVM8T0N??5X>Zws z_Slj@_K;1Ms~V)!g@3yXWgv_6aPS7Z7_t60Lb30VEpXoxUW#eQ@Ag zKm!Qw^85w)`>f{Q#`kgL{5Q2wX<=KJ(MWdCK4tjjpjS7?&b^5VD}M??8osJJFo|Ji073SHf}Z2^3pB59i|2m|U&;ZR%02FScL*nzpJJyYPzgS2HR%Kuzxej}2!0e<^04L)BYQPc70efb+ zg9;2DsAb7Ku%-MuJPTcOVcy}qS!Jh%dpOu9G=GfTK>6h0FRF)W=--VRH5a5HFb zaEFI<4#7j{8KKk1L$WuILiG#f+RpNFx9D?F_NdgWLt#Zg6Ef7drX^4*1392=@&hhB zPYLiuyF+uefOnY0aR4babZ}4d;~Uv;ADq1P92u)_MwcB$okd`~T!2Q=$A7nxUy#b% z0e{v?2N*~62+XRBNG0Y`5yQ5&E9xVikPL{Rw=Y^fVY67q5BqJw<96Z5kucsk#y(+f zg_ynZ&Z8%V>vI9ccJ{oa_u-50%dfx`?S2HN*s+zKz(;%^M^G|nGmOI>w65RC2#x?l z1?+Q1qo7qoPRD?nzlHo5Mfpu|$|Za(0)M-%dW0n@0zCU{;UVO}!=9;XkZo%TM?gRl zAj2FGHQPra&iDY^<`KPV`@HxWo|Jc7W8sUTiO^&Pj+wmOVAU)RLwcr_8<%mnf%=C2 zgv?MtOPl>A@sGe+2dwd5Gs!y{5b59Q8elOvAqf^-fK%`~roaXulxmF7M_%aSV7OWuzUKTV2K3(|yh>!qD?qSPC#*1M)nu$_3UhTKC<< zhXbF_3UVmZQ=mbW)ox%--}5ByH?|6$;L_ zk;nO_)Uhm~Ddv|43d>OpkoocR{=!Fa{U18t_3t0M>63##BWP^C zAV?>7*a3?pQx^^9fU#3Ix-2vd=lDTL6Z}S>33=<7NG$+-261kR1mp%^Ie+Ctcp>HU zE|W7~m0NhHoQjAb$8fWpBC>^i1X^|(U{9_8Y&T!OL(Xm^h8>2cHz5kFC;Gc@yQ0eW z031fVgsmz#+o?`$pwxgl9d_eo$j@aoOnu{+2{e$aG;m!19wi_=t1qTT>v;CdVjtd0L>h*59PtbDaxs6r3}QJM8pi7iEcz_Ir|j@61Ahj!k|KA;0%wSX zkv}nMdMkn^SU?kV2;JHz$1CT9Y{we&)CuC3SZqDSm*t6f6y*_5i=Qq^S@XmDV4&-S z!&}#O&@|D|uc$-m1qt^bXWS;{5m=qi%VVz*odBxRu!Q*s=yN%dUJ%>Y3@Dp)@V)iB zCniZ&HxrWUK&J8`HGlXKi(q`6K;j3X@pG&Gc*f6nH+qPPmgMIC&M`svCcFDpzG&nO zAKZ?62YC1OU%w|LlU)3p2|6dnJDm0E?%G{(r>gOQ$~v?_+S?UcQk3 z^-W4*CPP7C&V;_c8~V@~HNAl`dF@6Oz>lyCAvtxrNkMClK{^L}L)~C_i`dD8^ zqAA~75>(#~0M9Lu9dzK^WE`+^&F7;40BiL-OCBUeV2oF%cY&pw!*$Vmj6*ZjD4N0= z*ngcjpl>;X0fNcx zb%V)TMz|X8RHz_4e+nF?6L(qlK?LVFB$@RdXo3EILf?ZP8_j?tfAi69wI8RG5^$QZ zc(PTBZz!-E>WI}ZYlry$@!ERvX4TwT-R+Z~gAHAoUjq|SMiUey#zHi+z7kXnNG#|r z6zdL&XMZtsuD;guy*&!Zqpw!shUcjQD_&02t>qw5na?i}PJ9=7;~(qR5aNUR0djPT z0jIpqKQJ5ltq6+%0Vac-FPC!1erO1=QVDRMbzVRzKWz3%mW%`JV*KQ;54SB_3F_j; z45ND>yz$KI5x)tyfS}+D9sr>uJ`~r{hUD{5HGf6N|4e+{;_x}rs6WqwV1dw3>MgzK z$wGk1)la{hRK4aJIHj*>upsv#9zOnQ9+v9nFY2PMO$)VO3K)(p6b)b{fjGOW_?Ij} zXW$7#^7v&Vw$Qr5^N=p#m^k6@avkYR>be&cc?|#)PyviL*$6ceF@ixVr9!I{-oi<( zn}5k$Bngem12cX*{LsVOE+17O6$4P0u1Lrrive6121X^{aE@NS#Dhr|<_$k$U_ikm z@g!8`iB0!}DU*S~4S+wsmJu6Muh~ z-b!Nz)bx7{6{w%bL-#=|?Qg0d8o@16_6%?^<}g3;mU(g1s?rT`I>ftEco>MwI>6CNGGmh10)|_4>tuem4%P|=jt+7B z&xwJSkbgB~d?ALP!h$cwNh7YW-hZw|`Z-@4WP5=~!tdP;S#)wc)RJO+x_>xt^456d z`ltwmu!**|0p-?+j|paQ*QGrk^>V?l2{5gG33>r@($V|z*}kNfX|d14s^+^p-FD_M zvY(hO&Dpy*jK{a)D==#;|9Epm3=rVmoSXrj0KMHv+JAhV@B4~cLJ$FL z#E?PE9C&zOEnovX*!$fOi0h5XAoN$j&b^m~GUJu)o@IB;@e(860|Fu#$u7zm|G7%kqq;nG=FP%AtsNs44vTPieo}ZLxu7cl!l@7?<}h z0LwY{o_!=)v}1=OejJ?JG@uz$tkUQ)^S&H0=MY=s1} z0T4==Wxlqr3$s=V_%g@IpYyRi>yt-rgaIgp4|K#@A3|P~i3T?Vwv!lGg3&Bx-ytw* zCkoiLX|`2pmBXOjtj9P=bY`|p}QiB zxu3gDO7`y6M~Fzf7?gLokKM7*yO)xzz9OM$a5>>(6Mvj7^6n-Hs1L!0nC{SKd&XGi z9gs9Q<{Sc#&zCIzHD>Eq{3_rpD!lf-Dj{T`h*<5|771C7ntud01!QbY`Rx_t)te=l zU4nTapvu%suS zgMw(L-hYPY`tP?MxRd|B9s+W1m=^3D$A50-4f)5j{4ak%m*m;G=@KFt1u+SzY@ zn2i9Rg|u+jU8cY_VV~_$Jxc> zAMR&F^d^?9E@U~ zL4d>O?xp+v{_iiBt8I5bJGb;Gg%U_ISFUvpI#i>7pa1#}Yy9{3UCa7CFU|*^p}$+QEOheIXu#y%Ds_pxw_V@t=0b0oUlp|6T|8-)sJ#*KncWH~)EE{$Bs< z|Nm+K`n?YSY5&jbf7<`^`k(gyygq*YfOG#n#~?}p=LP-!yQUBaK|f%9{&i z(EyMq$n0#IH5w1>G9+TI}_1IPZ4wl5f)@8^HE{q@=YciZRR zagYDzI=*0>zOU$iT$ke4?+3(qf7j(M{?q@7T=Q-!&DB(?*HRGXaWD~K!5Kgm|;NE>6h?)LBVE(?=QdE{GasO z=c>pPq~c&anhFrvg97GQfb;St)Fsqw__V-R4=O8e2_TC5%LBojzqXLqnXTjoM!CWI z`4NAKfSvOyD)(RCyTU&9m&5U5Ck1T{=-XukgCMkkxxR{_l=!sHn+IANK-~HNGo1&; zcPOs@{{H1*U+ypSxuM|4RQ$;uL!Np56^F6w$bhZ})I$>+MGU|${CzGHfZK&Sp7{Kw zG$E4veg9woYN!PL*iSzXq+ip&yzzg$hcJKm4SDr{zlZc-0IAb|zXv!#50W?keh=jC z9{iX7{T?XjP#*u!&mkZD|Ms~C)g`FU{QJEQMd=6ilLHmlD=;mntN6M`po8!$E~mdc z2BnSQ3#Ags`>KGZ_^cn^Q| z2lxw*{O6}%@b~@>KAHcAe)rG!`@i{JFm-?&-LHM{29&;cbuG#zd zIp~f3-or(Je7N>|55EJryVo`T-e!otf5(wvB>#>B?_pSeA)gNYf_{P`YVd1!{LU+c z{p;`dm0y40_rRkX&IclhHzZ-<{Jei3J{v?-;XR!91vV70i~Swv3i)R^@AL}!atKA? z_pATY-vfWl@D2SD)!+N=*B(aD{=1&)`wBY=cw(>6Z@`bh@$lbyLI3;TdHtOad@f&a z#P52-dB9D8@BOYbgh0>^>UF>I`Okffe#iNzJ>0MQ_dCb$e#7;retxb0abJJnJ(T$V z_oc=fc)&Iuf{A&-_^{=1c{{Z9l4cGfOUf(bt z{|Ehw{;uQmcOBt-|9yMt_rHI}iShZ3TNolA_#Rw$xF1j=fhG9!&-MA|{{7v@`tQ1Z z{<)8T;~p&0;JC~0xc`jr|DEys_xwN?0UrtHG5n4T?f-M!{(kTKJ8tm%edq_UjDEil z{}10ke}7?o!FBuV4@{STfB*WE{@%lL@z>9P#uJ?PUwZvXD9Ftz^Si(qJj`_kW| z2>1!e{6PHvH+25iv%jG-|LYY0<_%zhsbEHUh3g0Ws_(CqS6_eS4?ss@{3`F?^U3~e zzJ+=B3(pbYQhp`A-~0>f095hjD~QlQJrGhYzvl?%RrqmOhoIj4{dJ;-5+#gd=nSNd z^xyS^P64n2c2=+xf_n6CT=>6Edhc8GSGT<0Qn+6~IWd3*saNs?D;0oJNPFKXfyoB=RjrTO@d!y@A;bka1@19WK9iiqOqUnBZUH^7Q-oAguAMG6p^}`Xh;a|r*-}Vph z^jDt-y%83|>;LV4`(OSqc|Wz$@<0Di%d}?8F=YSK|MLI*zvX?m{9p4Z$^IYzdexTA z@n7V~&iwzo8kOCD{rm5EIhyRhluOi<$v^)CVe+R~{>uf!{_8v?|7D$;|2d78=)e3v zM8AEW6aRlTj^>QM{I4m-f#%k~pgK=LrNC10SCIY{vmwU#|5Z-~!~)1@0U7$!0zmiq zy7%Hc1m)56`Xt~+K8ouXY&YQtfcsa9Ad#>q>c@V?d)x3gLIHtdaOWmSOMdoVL^zHV zT9}85C<7F2^=+QL9y4$wd(QE&9?`au_TSu{=1hNfrU`}p{LD@!M7{Wplcqqo|L!R< zD`53{xr?}mrzf)t@r}v`*dTeh$((#j4@b1~pzj&f)id4Ou>*e+ezLglIa_JyoYUOw-=ud_t2&^^wH%{jP+Z$_QP-0n{*r1I& zdgN;-Z%upxjAjRWaAR=BBl#lh)_FC~gy4Tku`4%_PPtQ1|MbH3BImmP@rAX^&kyQU z1HP>7*SjWriL=I|RNwdt=t9(KVhZNm=Kw|sC|HWWL`?zo+eaMSE$27K3!O0KGP~+H zG|#~L+i5Q`t}ve)0cPyELE*!6IeCx;P(QWPOgGB+j|pY-*b97?x_Qr4E;`8vJ5hhs zCbtOmhEwXj^jc>6*J}64S5#S2gF~gV>Bxe-Wf)g^1nUiwxJ*LKz9SV}QfzK^QW%5I zLYIJhc;6T1>}Rce}gmJXwGGHd=z5 zi>Bg_J?D5o=V&7%MS`2r6q`ROGpNZn@qB|zU9DmUJ=JXq-oZ4j#*c@EBA{eUz(Hnm zz%pifDgh3I)@K>=LE)tB<%kC4@oHPHLB1rERz&RC2wRWR49ayJLm?#7#Uy+1lNEYj zJhAX|LWuVO^F!(&%+X!tyKaB$dDkKu7$ZFzFw#blloVN#3ZLKM{=j5>5zs2%$V!#L zAo?yNfPo*W?9K4sCf`s%$*%^tB%lyCqR$s~G2dm89tFs@40j~}yhw``-y4^JuSi7D zpZWo+nARa(lZ!l)N0Suaulbp`Z#L)6APc!_zgf@ z@@jp^r~78S;;LKKr^bKvt5m#EXKnaLz0iwsL3}XS0F6#|r(J&O3#qNP>z2 z&p@p?!TDslR_^@?F0M6X(B!YJMH$ z{+SHWW_=+U@lO0vv99}}bYrko>3wm_@Gj$DJ7-aiK=<2ak=U{Z*V*#k(4`YyW4xxL zmT#}itEQqs(;4&0B+oji__YGNo9#U$?jWkJxtD}l`|?cJ(dFedTP93TIwriUc&jtl zh~AZVw(#=n5mqbBp4iYa|#({?+6of^m-ZM^_&AH)?T%Sg(oLdJAG&BvZ?0d_9c8u2-U|Osyyah-o>Xw zV*YIq6{0(Abs{}p0b8o~B!u77^<^D&&Y}@lk8zNw*C&7cw3O6b?oX>(KUvi&0N4(F z5h97lhJm7lIGWn}SRvzXV>c)9mT%RGfK`m2@VF?Wxf86CskD=bMF)H&>h%JKY9rk1 zy13U(1AlG!>%-qKhi1hXa)fL#zUMkrP?0K_HH{V0eZd)%IGhyeAAYqB=64CC$~>oUw9jVs$*A&dl)Zns&O(nqOD6Zc#*~!wY5>`&cd>DV zVy#iJHkp6w$XkMCFam_UTAnP_9`U=|wWq`=k$--g-6$&djCAgnwlw#-paDynAOZVC zL6(x5?)Wqt=%H0wGAQ-R=Oj4V{fgXWjn6N39ZFESxl^{a_D)iZ&n|g+_FKkDehpE8bVq-8 z$AY@025YugT>|g4t4SOcSxOlv6ZJ=gUyf?hkDh+Ix0mF<@V&+n40{{~IZpJOZVZdS zzqt+A7XXmyNGH5T4Ru6N5~}C^BVOd>2&0k&t~y2(Y};?k=OfF&5lYSFdU6f-(m@YS zb_?)6q8L;x@4@S{oq(VhYVeTs2~mFl!PQJ}J}&qvVYrG(>;6mu5-Oy?)PXI@;kDQJ zp|Bdjk_<2qh9LE6KOVisz$AAl`rJgpi8& z#&cPP_VVk7dF$O~9(1OY49hWP{zaoY1xJl@H{rt7TlLKniMl<*jcrzE0;yif3WW^J zKcHF!$xZ#?17%?bizev~Pmv5;g6FG!ld#K3Nri;+v*&`}B$Zh=cM1-Ne+qa`TDXO? zV3KS5YnSbNmTj+cq5V(CzvX|e+8N|#Hj>eE?-eZGe14LQiA%!1?P1xHPFeFgh+8pT z#EO;xsnFN(3C~gO@mqfz7zNPnCb`jm za{2vHl#?YNWro*%Uj}5g-Ot<1)SB!yVd*?XAAmRSBUQ%j9aS#1&FChHQ(DzFK2{qT zH;?lGZXDBVdc3)V=alC2IIV|LC>oXTVfBb%O&(j%ddojEGvSsN=Ji?b!7S?!J7goE zZ);odruf&`gmkwsEKz^KF&5&%Tqoxa#^4A9Tax}n+d}Gdqjw*<^}fXWZolJC&9q?n z-2}DIxYL!| z7xt@Ib+(%F0rzVW11tKk2Q;1=5f%o*R}HO?8&JI~+%;MUwL^bbo-f0CJ6938F!sX& zvpObc`uaMQ(Gv?|<&d7X5qBZDJzdBCyJoODTS?*F#S>4J0}I9=EUm7us+P2h`-yaM zE;Wy+y>i6@c9W$&U##%$+P9lmwR{e|RaJU|f?|^16q46u(E6LoquFIxFK8K5*v(RK zxkio(D zpq&Jc8BR`i)%_L?)BBD=U7iLz#1hgV{!(?5 z>sIiR0E*Gki1=2-i_gP6r%GQ`5@Ae%vvVKAtru z7LN(k*JLxI#Y?M+iy8|rDb9?FCJ=(BtF(Q877D7Mh;Vd%7l{p`2d**sQExo11-h&a z>6Hi%jEsL$nrjCm&b%fJ7~G=uNHOnKGOP?j8O{pZiqBDNDTy2A_K@T25rGw=t!4EM zcOc;kAz%*W9l*1hmG!Y&%mle>y23#q?0CNMXKO96JsO&wy4m*oj3XO%o(yW}&!u?I zc|8Si38}Jr;u3bO$BMrm+gLX}^5814)daZ{+x>q&2M^4C8H`J1b$W1P&@#bTON7H= z>oL|QX$IzQdHpw@3V+1A5VDa%*5)_C3_$)lT**x8!9iEMTh4`gRSQK?q7DI~EqDSo zAK#q9*Y8dQ&-N0+*llO7I*BZh>-ytK708v+y!kEW79mD%4o4AdP0hp3^w#SQ5X-(S z-cWx~$67$tFTG16_(Osm-dJiXY<%7y1axMEIiO?dmN(CZx=NCx5~j3_o0nAU_{G}S z^3?s)`Kmj;-j*o`slr_dcG$_>JAvGS@j%Q1xqs{UIZDFWa?BTOl1DygAZVFiVL+G* z!VcWad8Eqqao4r@?Khg%561Uxx)E4jJ!gO9!hzNb5H`{J`OMNNx|947&-N(v!rwbpb8Qs5#9ndg@jS8^n{u*&(T5fK=gk&gFys{ zN}nt6JVL#NSSGYBsEC6kyx{_boZAZa1-yEQprZ2ufdFxbVQ-^J_R0XGN|vVw`x_Dn zfn#-3y+H^dhoYWWGTNJ^OI(mFrFyx_GeGHONo@zNT1E7#)C0vUa>|Ju`yqC>bL?S@ zsTBK~@$Rf(ghZlFSBfOhCXIg@JlFY2)Y;^ynN}O0rIZ4&=J!?s9-e3vlzq6xe4eNJ zB0Z$n>ZIg~1>$7eugmk1Fa@1n#->&8ryDtaPl&YTiZsNt^F~K-BGlBVqg77hVEdtt z^AYGJAz%%Un*8_hA?#U-g=YrB61*Q!O>X&T3s?n zf}jE|euOHFNQ^uK$amwlLHPMh z%4uyD=q=E1QRXTB49oLXS5!p%_ZM(9#+n}lomzpB<2(uDRVeF#9E5tGDBNLoUX^Fh z6!D^cKGlLKFd&k?Fj!(9OO38cOO|OIM;L9Hp}xgU8gyDDrex?54hQr!?%(Wm=tOD@4i7V51x-8 z`J4>0PYO~Pzd931Dyld3Wp3&XvKNDJQ@)3H0^#?yB-p?LpMQRucIq_Gnp9!48I5e> zcJ_d@px-fxAu@t4LuJ}D{@U;W6R>kYf=jPZ7f|D`j6r{ZCs#{YEI_5N$>{*3Ly3LT zRs8`Op!F@BAk;w;l`16IL(it}au|9)VbN>EVrpVo51BRwbC$v6_0fM>^S--0lifjcqr#_b$zjJ(fiVrl9hftP=dB=%-9vGt zQg|e@nUjCIfV&rMcUFgO`_A2lDB)2MvgOf;mIqK7eAyTZOMXJ6GlRKB$Y7T!*xo>! z{PBl-Hm#1Z;N&d>4SOYg-ZR;EX~EDQc@tT$PsEYqmlX36gqSz&%_qDuU*W`rm-waJ z={<>b114e#Kq5N^P{O-)mq}Nsj6KH$aaJtG+_-;-9G&SSSqIa9ZNn{HmTFZ+%@|NJ zT(V5#S~)t>mkCx#7I=ZSV4x(%m_wP5?;Jt$z}x1!15l&Tb?N1EL_%pCdFZKMN$@tA zK|+EzRt4wwz{EVs72>BmYt{$7zr(HTzUDrN2{vI*we|)b$oCFQtURcZHCXw!&Sigx zwdAqhmqpwUK`o;ks+-H~}?D+Cl=FwjNi{A>qFq^!0wNx<$6 zRRmRgKBMLJPVc_SWRQFOt`FEx8jqX8bzWg<84$Ho&2iPEh1#(eu9bml9+zNC6}C|S z0psQ}T-Fc!5(y!ZnzWW0I33A%qF7f-m4cU5|CwvrE+o(<99e2E>+nEgp8?8PLGbuBr4w8BBOctrO!#usF$E^VRtUjA!PD*j5nbbPM3{uKi zr_(n8PKL8m93+GVop1!7pSIJMe9A0@ip3HW=arGf@qOImdO79HJ;eN4Avb@BYDmuj zVTYAzpopsyd9;a}x?L`t$0yvQN}<#bg4s(;gF6|QosUh+Y3wl9OzfV8$P9(h8w#X( z&{p@ATjx%jO$YcbB|2d)uARW8(z71FNp3*B$KY(f-eIqwOkHvIUHhWdHx&fAnsN*d z=Ce?z^1MV_+*NfAHI*2e7O8*e!N{JK^!_u4GacAm1R+JYVDd++Il4zz(YaAL)!YR7 z-Crc+*)3GoLOo-uS1sA^0ew9O{zdC$!57=+2kdHIrw6Q4moGj$@0&qA_>r&>63ic) zc~bUoYH@003Vz7@z56Xo9&RECOb{DHi5*208%C}rR2ej7?iinU45NSbg)X2mp}M?4 zW?gn<5)k7qZ25(88U-}Vh8q$aOD^g_?!3vT0L3{1ljrTh;b#T8XU_8B3 zSp5qK@OGpx#>F%5IX6sJ!m2UkCYTBLB`Q%;c$&bFYHP{3Pg}-0_8t;5!E-2FGu$>3 z7EQGb_tuhOihQ?pOv`_pYHndSz;3BsmYQ_DQE)6Je&yXVmnfhTEEAOwdfND$tRdH| zDcY@I?qUoN*-*QvY}1c;|MFp$k#7=>Cqj~WP2CPIafrDf-}ileP)c>LQlc5awg{_( zReGnd09fiaScdM>{k*8?2oL01iCWrA%xz=OsjvXG9KCe^Bp|eTW|yv!HN7kalEZvJk*p&NGA^^EwcM!xesP!6hCF zCXUng;m9e~JXsalf+`54QKPfWlo|>*K~sq4&P5K5NsXO`IQvl%noLNF=RWm(48)7d)qm54&lPTLQu+S2397iK96wFqw71TR5B>elx?qtck$4$TBdc>JT-TL86pGgzJ zCif=r+%v=6K}A^ad4RY98OZz8IYyA|I$3M)d5T{=jv5ySl^dut}{cIgXEHXv5BkWHJixa_0%M@_xKc%@`KGPJ>?+{Ka+ULiVpXS z-uhxTz`AbW5;frGI@G6)gn9nQU^(SQOP}}@_mmwgT0mKKdcHOxFz?Z3@6K@2=S{hVMWq1i;@r>@ zpRiZQW-bM{okjo3Po{1o3K1D%R3qA{0@$#1p=RnY)s{P<=>)VMANM{yr3yebn)^vW{ zfwq4*kaf<(;se4iUz&Iyz&jH>!uhg12L*-&JXHEgYu+AejCT7p2_YY=&QM~J6< zufZCM0}H40zU5v$EMtsf%6uXBwsfBFD|8z*S{PU-a9d zmn;N@oJr+kCuxXata|T)lWs8#zz#yXkxPGF9IxE9oiHyKIUoIg&h^nrR+b)5?m!Yy zhOKijA-c$hV$ug-o2?5r2{=lslncU^2s-S@q$lRd4)S`hHeF?mC(FH{9xh7OBfrsd zaCQ}^z9uh!J|fDh4CJkS$aar|mVlVH$@>pB(yQ34{Qj&?fOih_q`I(;#jjGPKSO^I z^+niMKPxYq3fx$>u=7396B(nn$KZ&*&BvP4%JdjJqC-P_R_B-o)o%vAl2*9(q23IM z_11`AvoIr=o6%ciqabk#4WhYY|8Xen)05UFvg!$Zvaa z8IR%@@WHFsAbCut^16(?`_w=bEf?fS+@R);P3-<&X%OMDyD#cNuu}DA3K@S01f7Oz z@$@0aJLXlz1`zrnx0`9CI-7XH2I)qE8?$7@=Y{miK_)%fB`e%Kr`5PWd$%|%&P>#@ z&7F6{K&haTmjj`>Kma`TOx1F>WW`RlDxG9dBE2E^vvSMBMs5F#b*|+-zd9sS&3jJ; z_y`rZC_4=LQj4YaR*pEV&)a|7zOf<5>)fFv0F2%-9$R*OM`Ipv9NJVa+;owSE7VlC zJJGEIVIqjZoVTO8*j^O3?2RyYKO`&y!#Uj(tDO)wlznt`zLc-0^**+7 zA)qoMevy&y{DuNcIjjFj^fw1jEGZw#JuJzFAF5iH2e?0pJhKE^b9Qe<*qNC-q_hHo zO9)Vr7NBA~NF0)?o=0?fvoy1p`*$tvf$(T+5 zw1PDlDW~!Mwq`foB`!R)Ln} zoWVkkwYuSn5D9$Zg_`_hSSyK_2lA-ki1}BN9}I6L(OghZSn|ImifmQIKMXfQsTm@4*6> z_`5*v^HB6A1zb@f_CSu| z=@l*iCMy6gfbgj(5UDG>t3hFmA)mO%4jRcPH!Ze=;gbu=X750O1YB`I^RIMu0HN@ou{90n;}CPe8E0nQ@H#K~TsmEQ_>Wd*$4df*~!ozrQ zd|9eK*#OCg-1d-|X7&Rs`XHj<)|9E*bGauno~WSxn*uL*Yg{tur5vYphcr+Euj+iZ zCzgo52<*@K3zXCERg7}rA+!@>%K@xF2v}MQhX?6@6%jq)h_X<5tLHcwNwze_8*>01 z3lHAP9bF*q*{#-ff#u?lSa%6Sv5g}dX6lb-o_?bTPs;x^h!t}3wO+4C!!((((ka;K zKQa%bjRO8lcXB#H`WAP-l(neYbc;cTVS3y;67=a@c?3BQ#mR2oh_^@Gi18ck8DxG^ zyDEQwE`XMKi|#YJksEClA9ND&MYdmSuCfemgHY!wG<0H8PsawE)`z(fX#<*tzECHe zHZ?c}E-=Cqb#gVRkIlrfsc*p`=yg=%jT9NiBp_S(8eKK4%?xK3k$rY7xh%@J^{E+& zFC29UYZDMi;IXG>c-+N*wGF!D3TH$<#*UDGz+f(}j{>6hK#>qbTa7wga=J?j(ZIx6 zlQ=ll?`{8tiiqM12B(*F;w*h=1d&-f)&>zsLOWUGxB;P5Ezw*{i+Pf6>F==Hj@syP z67)@q>nZdfuLOtYTS%*_=3NSZDrqtz?B;njcJSL~e;3AJ6&=XSq;d;Z1a zOR%(sUHIMn`=Rk?y!9c5rhPn%esgxPU`fe-*#*;A+EcAFgBp2k3Z_=hix`7>5NW8WKIH&2mId!CejHUhan*}Ka zdVF9Hj?@fC%WdQj$=9@fELYm^(sD)yTG^9Bj=)hAbG>$5GYvPlifR*ak^&HO+7~z! z?M*zx;Z;Q?+R`m@B!JnLiPS>y?zugiV3Aq*Zfn*>dEhZ)sH&{6-thp_8bt?jBXpsb zJr_iqOz`_nj>Cfmzd9&?|5F31l~tDArZLt?X+Fy@!ipd1?u)Z4RaO6(6C%>+YT@x8 z_k(|+kaFFkh})+YgAKx`vx--*hvky8$EpIzq5_ZN=bXO4g4y)UOQ+e!DJk}Lm1tYR zvi*FF=NG*&c(_fgXH_N131BU;dn=_`jPiEx^i>&Ffg!)sRT64{D0$>>Uf)n$aKz#! zV^Fz4nl@c^$f@~yeps!ouWAofDs5b_R(x>}`@Ym_9embc2Q_F#h6gLsrO|DsYA3@k zhU~lfa-;QpbklCXc{KKmShZgF*~E*{BFn!v5hcgtw1E{yML2x;dAp3d9mvN5ls#UlrX(}e}O z0K-f*S{#o0LqUEWN5I{95g=;%V3TEZb>ZFyy6eVq`hpv?aHDT2eND5FIy&UrwR_4?(sTF`{IY%(1|VpQ#TN}f-s zV;}|Kq?;*!pOS7dT@J&1Z?B;83^gbHyUc2Y?1G+^=~k1SLB(7bk!6g411_-m-_Bj~ zAGoVuJiQe0i9ak32)FH?VmK0y-}`YJwK87g>wmZ8(PQ zR1Ax( z_tm~imjv^kgG1@>%XJX^Qsy8}3rPYZ1L5bU2@?K-)@5-#A%Bmz#)CQ!x$f!mqe5H+ zDWfN3WExW0gpMBR1n70JqTK`|I7A8KflhRPlVh48ReQYt)^(<~P1T1vE7%X+2VkBSXw(a}dXzHSt7;g~xEro5zo zgFVN)xIX-t<&{A$_OAnG>D;r@6c2iQyizUGKoVv6F4hO{vw-rGs5A4eY zz*BjvY=Oz;ET)hS)`C9iACG*D%9FXTCy#b!O!p5Oufq)=%hoL_sZTTj*3K2=jquoA zI&A3dHN|otGj9GHI66<>?d>SE?YiWD8IWZ-F*$d79R(d5`cyPmad>-bON#_y!G!oC zEurVAA%vYrmui9DNHTjJwL&4$PNLYRK!ISQoX7;|)dw^l?2&!I4*xc{_`UP^R0A_k zNos2*3oiM7v4<9Fxzmd8ac4Kkhir<6$c+|AxJ^AX62)<&@xHZA27mF*85JCVZYt@A z#tdUTWhLe=2(dZ7AiWIkgbYr7GaO*EJkHe8ABY76v2bMG%{H?n3XWCM`9d1Vy>VJF zVB8k^5z^bSRScl<_xjvi795FZ@w6tyCOFE&$>JNztpg1q>f{wBBc_X%GJ0w+9qACGM*|sVjPf-sx@< zUXUm?Fj9YuFHkN`6O$E#2I|OO3*Kq24B%bzUVDJSx8B44I2+56jWJ4pnr{pq{pw}3 zgv_7KBL&aY+nroYMly*zpk_k%}Ly-HpnHRxZX2(b8D0?hDp0KwU0`0&M-4ebm`3N(=!o(RUMR8GE z?NMRGv-u9MmY2Y)Sss&`4a|=%6+<4Vs6gh4un^?NBvzh#7BeJ&x<+oAL|@0sf%)*1 zv8HWi(|EyrPA2Yy(S38waTV>X4S?h!|=?mbQ(M1X?H3_U1- zQw*q?2z_>s@BL+1}#1`U06${>}r11k8xbnu$b+QxLC z7vmf9YZ9M4K!3mf?Z_+RJW+yYRQ6^gz*ePu6(w;F1;(9!DQNY;KFH^p&TnzNyN|v1 zUjl;zrUzITV43{!P3tVhlj;wmiMF#0oH`Y8sg)s9hOsOx)WXz%=;|q}{f4V%hcP{P z!EF#1W9H`9*z(mwoVcMM#=@EVCk$#L$_38{vWSu2jCln_7#AwYTGNkEbn{>@%Mm<; z7V@E43*7X7X6H!kQ(WN#_Xa{Wkg^S^+_GkQ*@mjxMnI1!Do8-i{UcC~`yq+LSzh|e z9FAr+Z6<^tN}2nBrH_N8)}Ld*h)tSjF>X8T9u4*0!a}) zEr7F-SI1PQ)a+d8jhjslqL0u`02PDF9j~~i*hK4pmjlx?%B{b5qf0?HaxJNHBx%t4 zyn_JFsTkQsp_}$2UlKy0}4n+;(3>{31PJ z@98LiKJegnJ&#oFg`=;AoSe(?h{X&Du;^imhIgT29`N6=t4h$b9wM@mrhMToRr{C) zKWN;V-#ic+Yq4X1^ymJLd&R~Gjg8#C1e+3JHNVY9#QKb??M;gF>M=NP*8f6<>yV@N zDXh;19)BQizClVh-9SewJpxQe<{Qa?pSlQt^Ol|)DBxhEl@UJhhXU*HR^{3|^X-w3 zv*jCDa(l3Eb}XO%DflTakycB zhBVUb)l493$gyxF5E;-Kt3n(EzmAq~=)A1gq(MCZIy*SP8KqwBA;&E!A#9c+WV~zU zDz-j6jz=g1)Um^!dpMZo$A?}N&&xVTkW23SwfP{~0maqN4zy?`!~ox|rRwBYNTx@( z1c5|o@mc`1XWu4A`S|Xe8nZD;uslV75m$Sku;+dYg+oM%;ES*x@pl{H5UM6wvISmI zlq$If$?tmr;kk)M@3POJ!WDTMW_GaQjPxZ_vb(Cdz;VWY_Tlp;zn~QGfdmYovA&9W zXMx^AEfT}~)O9$#?w*&*X=lhO7VkcTZvON@(sR@eTe#YK+=KP zeVhQtCag{XP7Iz~*qN_Sm`1*T(@qpG>`f@;n6(u^vM=v! z+DW7U5Y`}ebT77O;1Ka6CKq+97{Guh!1m2MV=%@UnGt^+W}Jx_^%W;T8zp11Mk+{b zmykwczSwkfQHS&?m7tG_A`)CHhP&)(_$~J*?VFi6{a@ zz5yL~0mD>9IZVZTMk{x} zYWUp@GN9B)#KYsD#OZrm7A%RvV zH*Wi)!)`|G zWkvE7Yv7aZBo8Hj&u}Edk#q=HHc3VE&?sYlC{-XfB}k98tY-X0>WEJlsV~RF>&sv# zWPhjE*6>Se@;f^-bK&lD_Zb+r1TYE*EXBJ;IJSX?oz54Cn&!_a=HHf_tD@xqFDuoA z6H7;utlvcT@^O--SLyc5G&5w|+>+lBAgH~mVr5Hjz+Je1$*HXNK+^!=h4&2jWW;!| zE3H^%%0E(Y74yO z@)9a~81XWHxN*BBIpZydWf?_eKMS8*k1Y`Ri&AoLSV)|aXAKt3fv7AKaiEU_%@+E3 zd-=~(z}{ZyUM|{Mt=UQd`|G~7cDk*=kAImijsR+s1uVJNc{bEla&kSo0Y6+ARC}rR zjA7Gt;!hkl=QjrU^jHIB0x!DMsxfQ6@e%YAA32GC&;7W)KVwZ7%7oD^jZaf zATSoox~4iMA>y(EfNz7QG7$S;H-nExO^xZ9QlIL9loX_A_296sQ3OP-s?k<9$y8YF zd(8fSs^_QNqD$WfGClcMx{_lW0;Ohc2^8`^R%e!Da>i^tGq*HDfK~uV?&NI_GghN2 z`c32B<>>qPDQ$Zc>Up&6oUf49?JyvZK|Tmhkh15jC4?m}2(`sX%3#6z%E=&5%$aai z9uKS;K@0b&V^RQt0y1eAGv~lC>PX-A1q29xA$_ow0M!W)#z;Uv`fjIYAXra2fULKd zT>9yjMrxi8mJ{NU0um;phXqB7Hw&aQdVhCa(9^0tMtZPkk*r47Qx`1^C??WrgJ**B zV}UMTay*c6TU5Xx-m3!aFYlSB0}m{v0Et}t41AP^WH2Z&G9Y{V zjUtQO_~&J0$N>GG1QluWBuJ}IGzTFMY^x;>oyOq5qA(-1f0-|reDGYaD!^j)^HDF# z+HOp}o`63Qc2#Tbagq?jn#2vW zz`QeQuvZ-C3GfE7<#1Z-Xq!>#v)#jvv9I*dRhZe%c-Y%C(tR8{9eCz;aa$V9Sf4yX zvMMN`8K5gkbTb7N;{lO~F&*H4#~#c3u~^fF&W~&d?3@fLHIfQ^&V;Us)mw}Hc z0$H02N3`F|ijis|p^tEc>Bs`HhOhF7_S!}kYb{0->t$mp+n7c+nS5q{FW8I#y*tm7 zTj~Orkq)<6qr1`Fip#N=2m$fKavEDuTxH=~cwZ+cYAG^Y87jgJ^SKw~ZZ1?;xv9h( zx11si>9{c+y}Y-m9#BWpQf{OeMQflu;G5=fMu6uYCNLUe=mJ>|&A5bf?}~bL@1HK& z-Q2xEe9355&sl|n-;e-*uwt(udVuW``mlxI?(~#+qW2ddQ`E}9H-;m^s)2p!*wIbE zJ}%S&To&vO6tGeuKuy!1>CUP`RVS~r^}G`?$ACsw2=M4&yQkjg6o6Q)@>P0CFBmvZ zXI66F0k6tp8jlo&5=BI>RWiWVon0VWsAg8Wfu**CdzqMkIY(!I@V-yLRBp2XaY*aB zS5Uy-QXdcY)pugoq^h0J^y2JKOMmh?t8)5_6iP(oK3ryAF!A%aKJpW}RTA|`RV9JB zd;6GRHM9-sM|Q#AP>36F3cyrioOUcj43acXd-|Tdt$1jTULmDi_Z~qNix5>GAuZ?U zQQL0W_J;{oHuufXOpHeIq00zE!L(f9J0X&R~s0k?J%sHFAZaGAy`-2W&#zsOd zz1z1wzR~^EAw`BH6fjp9KV+U-U_WAyl!lWDmTT&b$b9*#DP>yV@7Yj*#ecA4R536iS8a6u?NPNFXu-QLe zSL!KzK)Wfm+(TVnY2tMma3EuppTUq|MuE$A(XW1``X90q0g6;kY;$hKE`_`dPUO)u z0L;cfJ^?S?E_3ya?&tT#YHd2!-{G#dLqDn_V9Kg*{5JTLw!5 z@Dq!8LvuBMfNzG7wcxE=_DsmPPvw^thKp&NwlFLWnf1D>d2+B6Sgj~ie6TJP< zpO*9cJ8hTS4FaC`30pb$f+XJK6H84NydYLiTQW3%>`M;bPx@v&xXZWU0%CBO`C$%a z$J9e#W4&1{FpK`GpA+a~A+xAJ^)Pi^qsYCv)ImIbz6tp~WH|y0Hl}*POfK}+Bbyya zYhsj7I8R=wbP&b6+jb#qnT!n>x+~@Ki2~PHhjgj;3_9rqY4g%tV!oG1?B1!&A7;jFSpa#p5QGs zpZw+f&c@hZ_@{r-F%>&Wonz2XT2af&ouIvlbr8IM*L*^f!R7}%;RQc3P%cF({~B4$ zcp-=)0evnxXQg;d`n*@%$6OdUn9vg8><6KLyC|KQfKOkpDNp5l0rsK%>6*i-x9FQR zG4F+_`;r;3R1!w+!LAs_Pe^tZsO|OO4&95#DFMXL`>ua-i>E_a=e0@nYl-A!L#y|- z8d45@xE$o5#?P9s8rM9dQUy4F`Sixis4z;(a0`{%(#sdXU$Lj59{s*JU%WJVoOcO- zG;gD=2RuLkQV0G-c=_B>C+CNN zbZks)@QE|rfLn4IM#`zx8wHifOL0Jd?xjM-@uL%@k=6XAL%`#b+c6Ju%9y;E+Y+x1 zQv@wd887|Rd}}+xvV3)0`svctH#7pi(h0nEn@`&J)ylju(58M45%8}uuU>HIN(5lNeRSin$!s}|QXhr3@Qw~rrzRLAX{W#BWA(3|(6s;SO~7 zxgQeIX}{V_f3C?3jmMMmB<>D45=s&M6qy5Qutp_MqP7t~-)R{X>R~$wZ}3|)C{N6h z?@O~yqVX|+@?}l&3B`;ZsIsG8HjzfX53rQ^aGFTLhYIlWOyIuE(T}u$-N<3^67Aji zz@mQ}y2WV)+8SP9`)UKG`}q_VDkx`j-pjjlccdqj$G@f*oCP0f6=KNxyZUUOdX`=V zZ2|AtvDW(qWQp3QF2KU^0Qrd+91hE3coiVz(93o;__w`;p42wj65EFo4(J`^ zu;auYVI)?7Po>)JWJCysMfwB~6A*77`t#W8?4^FjJ0$bV7z(6e@c2?XmM5SmX?NZj zxmN*1LIGtwB>nj_uI)Dn`l6d5F>D}50Nw1Kv>t=y{VraA*pF;!8drS{$W~F#c{<%u zK4?R_yn+I1W-Pra$;#08;kerBz+(WJ*pk;Rqm;aMR>WKD_z*abeXee4UO;V~)C4-? zL9VZ8>6gFWsthE1-+ZfXpF}Q9^G#Gw2`gF4(%E5AZNN%KPvmt2{T`5&foM+J#NPds zIZES@*1g4lfq(30z>;z4E_w7ocXg<_3nUkm@oK?lg|P38-)6?}+A0fxWh*aqyxH^W z{VR^;D3ARov{oO(*388-fssI6V&zrh9P{FJ&%U#rO8uRv?%F#w;KBpc&%O`VJP*Fy z%7PO;XtIh9=}SGnwgmg=@M;cOJgRc?1tpsGwxV`_UDKgp;WXBI(%gz;@SGkmqAJ+J zeYXP(I!d4?cSi~iREdY6*S2Ix1k417<^&maZafW8i~uwx#O_}A&6ZalHJsXDef9CT zhrSo<{pw~@tI)qlH#2bS)r09GaMea>SITt>chKMfwV%?R>Ea`Rb2+)z?yLFgU7kGx znWa8|k~~L|7Mr3o?Ge4~7{H=$rl@c-dt_!jQ9$OV%;V>VA1lle-4{-NDLnVT0f)}NKxB3W8KZdNM;xR$8=1L7v#2$?Etie8x zc`m48>sph#?d^wq5{*Qb7VR2~|X1sXpHJu&}X`NFGK;3=mkl=K;A4%(z!z zFTbM~>V)K(Z^;I<6pM^LaWVHNEkw)F?* z>w@QeQNC5Smu7MKfS%r$2ku6X6a~0{)hpf|1a#5wrlUykpe1o5^*~PV-zjOk{Wu;C zlp)s|U3eOuT0xAT?1a|N~7ntmY2Tf`C_upt9|49=6PyfsR`u{0Z z{NL&noB!4+KL1mv7_4jlhkm~QsZeY<2mAA10W;*E)yvQC<@Pb7QZ5Az_{YD0;|X92 zd<7rppMQ#3bOd-HlOgs~;76lz%Lm3q6Q_yy@J0x~0{5$^7wE+WbVE{=B1uJi1AA(QlPa3@YbBGB3x)KoKo$vt0 z>Vf|96HEhsZoz-Pyl=4rAK^%Uz%kIvzZ%@{VkaB9NM6Li=?MY5sos_oL5O zl|}&2#ET3>!NG?4?|6So*?SLA+;^w@mrXq%*9_2Bj~VvGGkdD1QF4@xx8WMbxegqAv-EI;@Liygg%O~Y>-b#bMHxY; z(Lf=mb;u_OO+950CW8bFeGgyn(s?tubHNF^p=lN;AsOELInnXmE>vCpt%F;4$=Hs3 zzMI}ZfNl+##wUJ1(1iVezP$}U2Yup}_)A89sw_*Ewl2PdIKwe#i~l` zuR%A~k-kF)8AdcXU@K2f#bX06L}>y8Xf|uP-O$l4++LEN>D>jiLcRyvmN4j+nd}zYKXN#?)76FZF<^Ceoqr_*S9Hrx@$&>5_Oqzlz8sQt{gx%(k3IX z4R%&okkSBT17*p79%Ck26s{r2t*E+)=pz_$66+pa5g^ZFHc2yZm;yF(hSki>I%UkCvORo}LYd+-=L!Gv%FX=h_xExt*ylUA$z z`vr<4!%EsCNx;&V=~Rj7G~UAEBrG{vzj2VRPw?%w2`5{B>f!?+3YsC%kRSkK#5*b# z0myy>5EztORV5uOW;wW}%5ZCcTVUvQ10*%?51`w|Ji#ko(kLN#kldzi3t%t>W9qVp zJsGBSNkL!%Xc!CGKZ-~80=SBmTJ{C$@oxk$q__YCL;Y#lE+EtSyvzeplP?FjWI}jG z$mjK)ZiVuH`hYGu=#2R_p~Y$6)~B~ruNTd(V;S#|LeYnCjOvCbv&+Z3{Q{rSr5owS z*fx}$gG*wrEhH$Az)ppIyHD<-#E&#WB>BER*LQq!!{zEq_B#QyZm=V5=YE20^P?g6 zXAHzt>XT-v07B4(uy}S$jI^bC_W`Pk7xdtu&KEL&E7~N)OwKYM;YK_~d2WLBu;Q_+ z;?UqL1lrOKaJ)PUXDBVIUJa*aV1HYLlRg#Q|v#XcWjtqLI2kvC3EJIvd4b|O`jN9+jTj4L z7nIrm)c#u^T188WV3HiN*=w$c`ZMILpaNW)&MPx$%3SJ&K_8oMhXXVsQgo8C4{9e?Y(#}Sbk3Dg6aau1bVxAf}M%)KrBfB={YV>A zgajco1KGk6XgQz0d&!`<0vFRDrBNAw&H*Q`6Eirc7{bN0n`7wSPfwc-&`5rofN^0z zh<_&mnr^+ow|jm5oSNgs0_+tk4p&mrQ?>(a&)}m76gZ#vma6J^tKC%qiGe&Ls6~v+ zll`qRoI5fu(Voi7`}U|+f2jiy!SPI%Gt<3ZwuB}=l-cB`%3xc2g-p)40z1Edx%Ry} z>tF~^Gt~!}=qXMxk?ldW9G1Z&>ne0Efi8GaNo>{Yo;TbR+~&u3?_%rV``f&rVwPCW z{u;lk+FCDH3Hr%pqmH>m#Qm0t>hg(?sz{MLU9SqS``f1+*A1gAiX_RLK7q^5nujm@6`JjX2UPOUCfSd@1!i+`$Jv%XykYdl>SK;BC=d!@ zXD7&p^J1RrzC4Q}e2_bTduK@e6OddNHf)PDw~m^FmOr;42Q%&k7c7dn)ms=AF{71D zS~~@zghuR>&JE)h3sns+4D(zEUaD*!oSzCISsVxVLvfa)apynMtwO&@uHHBq*|8!( z_cQN?(&h?ZRfTYT28<~~jKDM4S6@c+%z^wN?a`pe=4%0hSB!3d@^D|nE(NmH!^wK% zO(B-tW^^)vmMRW%`0?^IFJ&k521q*qy#zql$w2cQmuo+d8aEE+0`TgZ18j7#B$#=4 za#6%?engvUod}wl#?3m?V6a?H@sfR9tFU-E(xHd znCiMhT&xeVp+J0p?N^Y5M_eHR!CqKv2}78m7LL0a?Cb_=Zws?c)e8T-AIdVuEtD`Y z^`W^=T@Cz+N0$_AD4vXN-wN1sL6W{_E(<4I0D7T)TY>r{ElAfa0FiI5-0YS(HsnNz zN(v~@O3dppE-TqF3e(kXjAp>Rz;`b6*K@%?-PMW#;bQceiGGqJYzH-|H9} z%qWc*s2o}kD}y~XNVcRyQ@VrFccIx&0l~*Ij!}eP%x=rE!aGqN??L;Wpap>8zdaM% zebF9B$sfwMK&5j&;{3*ESI0Z8a_e&>P)mQ8&2-SW1ww!7xCoPJ04M zYA8J(-?dtQFi$2<01XGWpdrI5M;GRl#n=Q-?fPm4kIxbSFpve=E&nZ7ElgR<_>CqI z%%2JJOdyNh+HC(ADG~wP?ZL9{#n1Sa&YD6sJy3TWZ`42`5b;S@Ud6! z=}(?U2h#*HAGRlE_lBQSsUm-UhYf-8GIdhQqr%9|UK-D=KHo@y(2>i5}};Hj8gsLCrV^)8qb z-2lrwf9C$N05Z9-S07ke1E96~isq5VK2e!}HreF^I;eV(!5EBPW(25t-qBs^4)X$Myq@ef3^Cpc1&oih|R2ijvoaH@y4m*EQM zbWSJL9iz1!kCzai2S){`EPp_HdhH{Baxi!N^3}+HGE3rxaMFM*6We?j7`A81u(?-% zAFc zu?M!PkS>TOjiqfJ6xSYk-Dkv_FJOXD-qUG~(*S@!PrM2S`@#&7({(ks3(?YV$yRUGA_c zx$&+;KaFCT@3J{@V1+n3Ia~W?@C51S+nvYo(IDb8fn1=*xF0lc9>NE_CmwZwoNaC$ zxR0i~M%5{5o@WzzwgEkgwc=-t>yRMzeP7fj#u8lw^pzg>E(zu{BaJbyi0QqO47S^& zJXAU|p1(0@!V4baeEz)%SVct)^B=N;J7+t5BqK6vl_Xlo`=GJvPntzSDLKa{_djjP)!^B)GFaJcErvV>AXR{wO|S<^{s z`Bli*beljJXH*iny*;ip0h)nhvqH^{1i;T`UJ_)mA82!tFAu>L_?}%(;Q0C*KFo~< zKvCFcGoXYFZm12uPRZsT9c-%R)gY1miv7u%%yBo=Uda2D$J<~B>4tk2YSs|BsqHVw0{ z37QvjN!s)Tq#h2-MAk6Lm)c{%-0wS6p!KEmd0$aea)T>3aGvFshtE(BPwnNoNdRfq zGma>t+D%>mrSc7bTm#aN&OhokMM?+!J z(r3Jn@xnQ%YwDRNRFW@@BbmnG2=E9*(8X3URmkhX`;Ow9 z(N1klA`_~h;F!FBW_Rmo(XI}z5HK&gOG=7(OmI=DAcEdrnmL2owAl;y%e}jhprjN~ z1Er`Je_NCF2c*K?!h5SD)!w~rdAq&+^!;>qmASc&w@|W}ZE)mHz0*&Mn?dBlc`Y87 zt$vHT76QMTrkFQ~^W-1~xs*-v5O77gLoq?uhA4`CeZLlen=>L`)ZMz{>pXt2BfI{b zpln9%VK3D0(DQzowhG{qK!z^h@La1^9P6nmi^DD8(W8Gn;%I8O?vwQEPpj%)ThO3d z9=Hfy1DvTWqiwBS7BJh`x*#(>*cAK``{;McX(R2u6V#5?Znd27zVom}*YacXk`ixX zbvGVc5dGSJH`$!QEngkgY_l>JW{9e%__!`4HKGNs^WW>+ZFHR1^F`6XJx#(4sztE0 zVSN$7&B!lbfj>}c-{lW7*RYCKp`d|jg94fJIIYrGy^=RK20f)ll*L{ihcVQC&4hi)!%2fiI@<9z%5ifq6?KhQNq+VhJ zLs|)+^RaSUoWrTte`r;N{eE{cjyfnT$pg=mC#1voHmgF82rAJxR@ewc?L#2~Ms*Kh zq)_;auT);@s^VQ&YQ*Ofx!YyMg|DRmvYX_x*zA>yG|OF9H9D>3%ebyeB+uoigvH4A z*xu!Twt$3cS%<4nCV0?7ZL?8^q5=~2oxX*;#=obcb@bwV#f{#@z_~Z-M7(bw z&0gjrwcZ`CzB-e7*RMMQo!6R`jN98vJVla>Aov6fb^b8sX0i-zS1qf>351LLRncVV zFyQ?5!HgxAY~7H=@}-%@wAi3;1=b*sl77a2opiNe+aB6CYnD1uPaU6dJjP2-wJvyY zS(abS+al2DV3Co6W1BtHZ{guNks9*f?&Uxi&(OIbrx%xIS$cP9aJ$waAc>rmDtU8J z(^an2t*j7~DO|uzQ<)q2&XjGzp95)WjRDQx4&ipWU%(q7(0Fhr7Y%%zgvX8$1b@+g zxc2Grpl+YnJ~WPp&GA{lt?Xp3d`Kt3iaRb7wSv@q|D=<*%%On0)IQN;d24qhL>KCf zH%wFst&!y#gdxM!u}W2p>!V-w;wR0(L>lS9X8XG?(db9K!Ue;*KX%4bjEy__T6HJK zjFQ%Z!ochJxz`(HODhWTe3_ch@T{zVqtz0UQuk$>pd0tI>RRBA>lW0vOR*dfV3G@2 zd_N~+8SWm`YrFQ3(ro$nD)l4C>!Wj87}46X zULM#e5TBvebH1AeB%%e&S#qv_x5h=LWmpQ2XJc?E9Qt|th~??{5IA$+6*v;$weq1% zA4YmGRS+n1DdyrKmromLU!Bk0KmFIZ#ea_gXWbNrdkC^Kc-6uBF0FkfO5*IZGvgR%D&TlJPN3c$sqcb-C$<5WUN} zz#Q!I&?p)bsL)13*6jo}G+DnyRHdNr_0lHHCO<`S(}is@6a`J29Cf>?Vgej_l{|fI z&LL{`b4PPmJkk9r+25>xQSP3iH$kzPddUxEtlve=pNPJ& zJmk*M3}M43>>^y5)iYGHElAmdc3HVA$a7G|?IrCOqr`J?HY6uE&p71J7V`#^IR&eF zbEQ2)&dhCK_uYDbEu&lDDKs}<%3OVB&d|!C%Ni1OhT{1o~a%!}tmwi8GIMG9Rf}I6iN{<`s`J}IpXK{T$F%)@!o5AKzrh=QE21P*m3oBkV zy|Dy?E@Ygls?~N;nN_uo`m}ksMskBv`Eyeo<4gx=6`K8xgo(y#P2{7~C2%TZ?&mJe z(|~m@mczjin~xWMxT^$Htlp;NFD^2w;x?tXQE4;w=;W_pbm+DI(l<~z+ZymWZESB5 z6s&;wepQ-(QnO|sHQ+tuFt?M`a4srUw#7p(PrK@S;#1mO0KL!U=0$EsT7tFebb#$s z-Y}ILtv5U090h*YKjm8c zUWtkKnR!8cZt00Nak_rWUU`{&Xt`w#t=q2XU7ul5ZP@ULWjH|2Jl;Q%)u8>SS=@Kx zP`z0TjCv|Qy~cG!fNWNHd6Ef z*IO{@dRr!5-A&lHCf_WR0_q5bePk=@J%H=ET`WH*>RO>~(}xV$E^EwwgbeZt=jS%@ z4F~w-1F<`J-K5M-`r3FsWl=%X`X$|LTkHl;fLT$=K|srLVJq7zcUWNdkT0P3P&5pG zO1Q{0MP8khCey^qU+3>fpRAPE0zhHV?~Q8;iModiJy5B?o2pZI3$2Wp+6iD<6zRFk zK)F1=$Gf$_f)1XSV@f6Qs47zc2FfiCO{{R~`M!3hEIO}ks}rksrRcK5kmWUzg= zP@uZK^!)%i>u^BV+vT{gw+P|7C&Mp{NBeo#%g^%OEkh7n_tWmV3is#TZBk3`W4xdK zZv5CZ)yTTlav=WVF6rs#@9y&%olQDu?e%q)Z; zCgi;!Em>#@y520Sm0?B4-WG0uwO>B6Qw%u|N`n+mvNBgDCoiw-*Mhen+vM$@DOx=t zkS{jQdTrVK%Ln)7bVB*AZMDL@Yw{UJPs)ls-;s<$PW^sLOXzmp(J%-kR6-I_cG@m^ zAG@=*J`2e`S~>0cxt<(>MhKfvg+LI}>&&I?7OGCEZHdw?dBoDC9IWks%HHJ)ot&pe zcxu_WSsJ0WN9w7a!>dH^LpbO7yLY^(ufBi|mI=DQ*Q>D~X(dl`QBqg3xL=`AxL6)q zg{N*;vN&V{RB%|~KwnksbGhzWaPnygGN7iji?6WCf;7Bud?r17hpNukc{iXHIVTP) zb^i>Ni<04z&~6|g?Mj1xr&mLEH9m}S7Emrf?CudfYw6S#jje5`=DlP|i|4gQ9G{bLYlp z*bl2SpjTWcCpoP?JK|Z5+ULXM?dl@yaFqD8@NLea|5p%%^SLMcjeaffQn0!nVPCb5 zNfAa@Dcl}I2rKm)_1c@->fOkP&$B;25Q*+GFj1(_adV}lc06;!df;g%eK;BjuBb0w zVEqDvQ1obN=MGDMKSllxr#A5(FeI;-Q+X$itzgLmhvSa7H=s<(QUrhP*gfC&WEiLH zVbfdyQb4W0#O!6BEnPwXo!KFjajU@?pd57*89J=t5tB1MU87(Bpox*HkVjvtPy4#C z7-%P?`=N!t`$BmNw6 zK#0f$&n5M6J)MJZK%a9> z%g-OT^ENeq1d(LeiIjGMwZ>r;S~XgC5Ax-`-#tiai#s!2RUCg(Z%(Z(PpA(Y z%SByC;*FP}z70R+fBn-wLPyzLTS*JGt4^7oq83P*um+)YMZ4PY3cIzM!gTh-$U+0I zC8KAbs^6=wz^s>YQrjq>zwc7j0-=Q%JZ}psOTpdiOlHLD)6WUiD(MsnrTWJ~2tT@Y zxjYzMvA~<|&b!RIcsoY34u@?(^^zOJXGqKEJD$o1@k&?He+U%!Msfl`c|(ka-!3_LJt7b_FHM|7YCO^aW)i6#K|-+|R0WvA9i5$^dY(7WrrE?WCW58>$#ULjF;VUE^B!pBOlW2G zXjkv`f1QYeYk0U~sc!cr-{Sln4XMb$Fmg5MG`DmP{kALw-v9o*HxY<0r@P&ijt;S) zz>`|PFPRn!!;su$Z8#*f>F2_fvaw|ymU967Tn{&5xZU~}Yy;a?-0eyQhBC_i;4!$a zDRu=yN`B!^Jf~3?#s=yN8Z?`bhs$SY3EMST4Yd9ufF<;{KY__S)l$L&f5l4= z<<9O77aMc2f0b(xMu-&cb^9|&J9TZnr@3`HEd80q>2GSN$GgB(pKKxQ)_As||EEnarV|*|NY`|rNj!#PQ$8xQ*lfMju z{^XHHdJ7^wF^U2q4}IO@IRg0L0(HUcfwq5HkjLbzpUpwF&P*-9mGyn`SIgaQ(RVSS zf0kq5_NM0=$IabaJVez765;+-G~fw)0JPdL14AM%yZMz}Qv zjq*k~MYo$)Oq7!pP+5w>ppI;#zuc`@syv-eukuSd7KA&f2YhFS95jY zVF)6+`3iX%stn$S+DV%i2Sv+pduh`+z6?84BfEqQa?`S^=>j3Aovhq0S7Jw_Fft@A zyj!aCT52g4eMpDrx=F%Y#y@sET7V_u(aGYqV)G%rZ7Fz=TxjzO!0zR@w>q4zwtwAP z{LNoQft~31U61QJiPPvve~B$EU6bfYEw{JPC^)KpE(tX$o{;QJY@J#i3YfgFZqWE6 zHD?QG6h&oflXaxG?9NLxguE(!i~ig!JgSHb7u=eLm5&uw(B|5dW^JCHd`2DJzSa4K z`l2!MaOfCmYh#l;c18NEe(tmKv8N*v27o!(qV+f+7jj?v@l~D1e^q*vwW$WKVcs8I zz2+CgMck-6^mT6&>ZI34b?>|U5+4s76^7VZhb8=Z%3y1*pkUzpD|H9%u{(k2?&&a< zG173SsL}KhR{0T8JJ?OM^!(I2zB}k)??(n9o)G9DtGKa>DI2#JVV~?Phv4yzG;#BM zFu{^qKhersFg!KIf8n$=iPdqW#eTQUd%fs4O<0bHx2X=R>!Q!4!1nIT{UaKm?KO}6 z<~5Nnr>#Edvx8=$4GEC@{Xu`;i=I4wprGDPkFdDE$deYa zytl*V0U_b>TJ?|9K;B*YuB<=j@EHdMkG#u1>gRjzApX8ce;WBgcU_|>tMCEKC}=4} z+hC!y@avXT(U)U#{p__R^h{%?93fPRFMbDi4<%K`3 zK{wf*Qq(6L=WRl_%WUg#2ajWz(Qj!ucxt5aFgMlZ4sna%vu*nASQ6ZnGkJxGBStQ! z)nK@X9Hr)He;gxMhi0+4+EtJ<+~op9B`4XOR-U&zlrx{@i!CAu&d2vkCd>4)PVT!g z-D)VACQX)=ymY&T&~6}mUzP)IM-a)DK%;-Vf2j(k#3T`E#2N+xi`<=@eP;%e+rXc` zcpeEwY2fSyouvvAK|=ysk#OqHLQGvd-az~PqtQ->e+!X|4+ah(_w|v`UC=NE~bk{<>J*VM)zGYiwW$SMdR&vi6C2@G|^anfWpa7Y}@0Ed>eKsAY z_Y+h!Lph(53%4%6Llw z&Kx?m4xM4#bjDD4Bx{V8dF<84&JP3o24bUoe`O!HdXGyF^}>qoH29O;gmv84R?UnL z39{qh^98=!gg!&yX$1|6#6*Xw(5xWfxa)q)poGC=JQwie2Bx20)ca`%34^>Q?WsMp?z3FgyED%@TrQ}Ue>ss} zUvYX_j0a~EFzw^l?Na;A5^ z8jGN?RjqRjP*jO~?6U^}+kS9bE&`OIXe!wSW@O&?FoT07y7C=)&juiG)(Y4!0Dre;UGc6+T^xC2S&2 zhkJI>pCdZzmhE`FoIJ1CY#{wKmr}r=JiagYYt@t+YiB~fPCaekJXz4bWGxU_LIxw+ zHJ6wvHeeA}@grl`;yXejA6)Fa=4hPM#LaUVdX7kN_a$GF4`Zb)l5>&h>~w=!`qLcA zLrJ;Wsy(@2f3}!RepW2#DlHZ$vxSA5tS8}A!bVG9zl7yg`9t?rLlsCEZJQ z@`k4$$q&Ritzn7l@yAnm;kxzx;h1eW=&MDzJ=f1&4NLLTJ^=DpG+)@76{)L;C06H; zDl|tcH8!Tbsacq*fqk8M6O2ziH3`rt0MnBwd@5wsf2h{jvJvD7U%=e9n+MPmSRJga zN%qcl^mOu-vZlb!6w8J6O4;lP5y_XPFY3BL`}v}Np2x+$fJeWs_qTBBrDS#o@N&(E*6YT(oWnrBLI%R8Hgjfk2O>dksvoTNyFs@SoN3^nQre6dH0v_ zW6*NPv1P5@Pedm-+HyE)OO^PT1013EmlrpQ{Jq((E~Ewqi~#v&nYRx(h0pzpJ*xih zpk2z{Ekgc4kj!mH=bbA~m#XG|Y$A{Tb6>c7u zuy2lps#vi`TaSZGHFn50leE>uOTWv-Tcn2%@9k78kOkJB2x4LuWG!tESBZe&*?vte ze<0Z*^0P(T2ER$(;uRlS$hK{Sz8p7cdT6SV(Tabp#08p*c;!PAnx~vlp5~UN z5uUv6vq+NZQZsBQ z5g(fMm@2#Am522%tK|AHnX}Sa$3LzkY;?3o5*&tL0zoK$Xrv;nYw*Pd#vLg`e{`DZ zdU-Zv4u#4~_tK7PZ3s2M0SP_8=Y_I0%L}s~58A8QoJDQATUR}@6r})9G1{Z=32gfN z=rZGtc_`Fo1LFw@k}tc+^cwTSHH#bbNZKsE{f1j?rT^3sNKp16!s!Z}qp)-Xylxe7612ey;CX8vX*5SD_d zKTuF9*>aYcERSgap`SZsuxbK%nJVueTCjE6A%dT7?1b*SE#L5|1R3ETe`r)2!4MuR za^t%v(>I61bSsi*y_HwQ<+GB?jdp59Rf@EO&UP?`PpXNU34dT=7Mgv zlByl(_(5W0;>;OqEt_-4Z6H~^JbJh18O9|J=b+zn{kV-J&GI)Ut@!DoU$)i)PHg%U zf{u1}@-hh*3+Dh~SIdPVfA$maw})<{cy0SeVG}!mjI+nE3fdEOHi~uO4KP=7>J(KL z5tba*x;49Ig?UcbyAMC+tIk?wH#Kn5^Hm!`-LOd+Npl)e{j(BkH$h)4$wO$ z55mTv->4#5blt7B@X+t=?duCd%G)$c3H`xo}j^p)rW=XN^c&7=Nv{N40t_rHMZMx|@$K;I05VX|M6nl#-4bGr*G=zEMw2=Ps?JBzxT;K{*pDu$ z+O-SAUDRoc#~aGlJo;-4r^!g#tUo0n0Y^X=Py_|I7a8f`V|T!iU8zyN;00lX55199q9QC37#&EAgaSG- zRxj0Fs^>I4I8=6*X-hi7B==2vH9+ji4zIfw4CKHZKyX~VZBN!SwT=G zli%)8tl!IPe^pJQDhs?OE47k}w=psxOBN)|tA7b^3mwWt>EDnUa?FR*E)bGxH)aQ( zU%k%v@r&NU4C!6OJy7xZa;sqeI(Wr1YX$ML8$HfsC`qG^@ISqr%e9UElNAZ}1@VU8 zp)nHt-3p`6iz%DhIpW_CPk@5Po%9<>#pSCWf-=??f8YfOPa!B|65lPmHy|;0nV^Ew zZm8jEn>9Gg2c~t&qx|{glaflhvmx~H>M0MJEu`J_da3xRr{-M}I~m3r9;hmp{t1Gh z1={$v4c^1SpU5GRp$XqCZZU!lnB}T9+60f&bkcJXR+Bq=D>z4S*IyT}<5hxL357n0 z&qQ23e~UHtpQE%`^dFIbJ{C2!rtD-g2)J{v=)F!(pPH5yD~G$1%5+;3PbZTe<>vKy zDI7c;TfTu0;d<>GI5trSeT*ZT_}V3!BZk_SrVA5*Um`qNH&N?O2%Z8Cs%e$`$OX263ZX%x~$VrNV~X(KE-Rndw2QCKWqn*tIeZ9 ze@V`|6EdpaoK%=3E-qmo7>>kvb+sVrXw?J{5MXx=(QdThtf8DFaLk9U~lG!vY4ih?z@wCf)zw6jQT3=t> zvf8emm)wC@C-LGSJeKC0?c0P}w(Q=6W@j8xg!PimfleMN^Dmx9qFRS?F z^nzD+Wv|4my!78UlxN9C7w>V^c-QKv2;Lq=+}kuJK3!Pn&+Y{`25dJN= zkY5n@btkR4Fi~FX%-zKsH~4p{J9yUF-Y^|(|nejhU95f#pKP^6kq_fd0%n` zN_C>CK0o}!L{RMx?@x=nU2*CHDt8+htzv`xguD@iFt$SiCHj}GZ{d>Md4b#rCv3&X zy$YWYC`2t`w}Hh1IzfbcAk{F~%Wu298hDu5ClPEYV)?ueF3F-Zokqk;e?DiOMEjk8 ztK8S>X4}chsbQ2HA2t{B@o5$<*oo3`4*XGYLGT|5aLI4DP!+UTZC(P;r;?E~9Vp(* z<@#g4_NcWc&>s=cI&&QKK^e-bvm^tzy+>*HG7g^vg1 zdEYIwwg(e+OuZ-u1`0MGf5D2-7m!UK8#;HjeCfs~^S+}mdkqFuE1^hPYW3dk#}S_kKDh6*dcQBT z?u_=>`Z{#v5xs=B7kR0ShKv;z!0JMD-cL`Y?8?pdNM_5hqy6Mae;rPpcE1wqlYG#w z5Ifln&xIskojw z`~C+!zyE7aOXEf62w5`S*HW}#NiVueK-lbFo(WGm9jc(g4f3)|RNfRxM$wdj``UJY< z9=+gaW|BS`*MKl<%-FX^ByZZvq2(^VC3N`FHrfk)oTLJCR8xFBDz5fBT0y1VwFhg@pT1zLd}?0fePVbU$c#^N;y$j{Dq-sz;i z3ZzF~VTe~SOZQz{lJfbgNS}AOPenAN;1EpWL6N{rdmL^?L}c6Q6DN z?(gLCd_j-ZFk#>1L3M2W<$Xu0=?)^l@OLs4pm3XAe*&?eZGd_howJ!LF*fV;s$#}F zs56JHO6-o$^ihPt>kz0kTi%Z}f5b3%Uiyw}&(lD6lT6Koz(D6d-rJY_dF*!Wk{PF) zrtJ&wbf&gRi0hew8iN2n?IwC#4Vv2`#U_MLbr&)Mw)`yJ$8Q$O43=z4YePB@42w0p z7IVr$f1O3ldV4hvw@>A6wj`vYj0gQ@AB=|9ev79#KNmx)MdQZj3W0%p3T?*cs6Z3a zGW796nW_#ZvIb7^!oBnIf^j`d%yZqq(ns?UX7d(V_OeP{Tzc*4c<*?X*iF7iu%;uee5noC*<(t$PHNuXM? ze|tNRO1fAj{d&ZXdsu$Z2?@cMOq8WkjA9reoC8$xx?w&yCS?r7VGv2uLt@1TyZ1c=zQ-@TwIDE#uU2Z}5mkC14`?a)moG1$9X=<(R-gf3;6b zl~i!b5k6}tmxo+56~?*zf$Y|p345k=1ZN9#5ME_!Z3C%GoqIzMN~3u=O*tET!znNc z*FRt@=R0#_&d;`VoscTT;e5U*O>(Z`t=X1i@(jjJp(Gf4%Jcm9C+X@CKaGOEYaZ8?GkX-pTuAbtaSJt+9@q zz)X=U2C1;x>Tt3$90d+jeyPYdzboDkbP@?Bw{EPh@Z9K&MQgM0}TMT3dE^^~WHV)O}7NB;1>svJGgW*RX%9;UcN5VUJZv2Lk^k=%h zc+sP&n5*Sj$e9&%LJ zL~}oru~iBCs`qSIXv;Qn8_U~*R!DdgGI%pnaG(}Pg`wEje<56E6~~UB7*KP|f6hD$&Zc5Ea5W&|KRKK zaozTDM4p|ff4`UYdn;UE)Z<~DY;IHUOzCp_CO&&<4<=&lYC9sbo8Du5@s==mIh|EDWKAzTKQWS>@|Y&|M?(-k#GZ8^S`O8Mse`nf_saU4*GpM+e<^LUcds zBixm}ZLVnvJonyDakV7rjz|?thjzQT->jd{*UN1uf8CiBN*~#5Z*v@a^1`?PV*%Z> zTXqYc(0=Q_clUsV_cvQkft+%AF$C_dF))1faMmdQKuemcXORuH->wabiMi?yJRihV zIKvNsd5b%W$2(kX(q{#)hhRg+5Zgg}51*hUlB4f4x&4WP1se&XQ~xc8{v5N)J(;ZQU#xk!a~*R#jOAAm~N zq4d!mv+54ZV!ue2#ABAL{3=qyMCGq&=2rzWLjT|V_mxj)k;$L`AZF?@tb)H;ozW83 z>k-m*0=H+3nn5K2N;@Z3=pmKtiL6!%e|slHXAo?AA@|BoCFI9bn^Ssa>6CZwK&bZ= z4KtmiJL$*802mJd8~nwwAeM6E*O5)KoF`bco4d+cs7r6>nv)3Ct;Y}~;)yu0Ea1tt z6ESRdLHZTFhhmUi2dZz-eJ$Wkz;>~;F%p3tSrk_;fxE8doW2q;v644TDvg|_e^^20 zdD5ulFqp{=S)ou1=Uk}#P~|80AFA6`VIC3clR!1_HfmCd?Orol7PN8RmRd)(G}oQ9 zi9kv44HwVkxzBrAM;APrp>f0r?_#7=oobLH@um?8BReTiqa?Lm^zQlnk`Gj*PXkHd{8C=aEgwqhnq{8JE3)!bEJgJO2}C52skRkVmXy^P}l7@`MxLb~;4x5L>UF>Z&)J z745Ht-Fb8U$g&E;B^BN&$O}e~4<9{NO?rc>LOw!wFMsj7jnI>)O$+Tyd!bv8B^`|K zU7PO+uv_B%=_S_2g`L^?f1QQJX9z3;>EPz1`i(6Vx z2&G`}@{e`$(@p*Wj*o950s~y13ElJ9LfAADlz%tC$d=wEB zZzK7+zEeb&f9|(se_L{7?WLa9pu>vkZa|9X`LE41s-P#RKwK-5*fx4S&^yXI{X#h1 zCnhpr*@$f{WzZo@yZ$4WI@Mpx_c`h`OPnqLd1_G`fKMSf4}(nvcG+l=h>+?P&oup$K2e?*&c7ZGxNr&`PtND(5I7*)(qZ3Cp$C5 zyARq4KA={+JO2``J}WfP?TtI^s)~XCEH?QQ_FkYw@0-^Z{^DWTFqwOf@1CF>{l==( zCWC5qI+c}vC^xlubC!=0M6JUUCuqi-L>cbIchEtRf5)o#J~dxe5qKD8wk3^wMFTiL zfC2hMP&w2u3A$B{77`Yi%07Qqqpn->d_$&ixYIeP5|Bk`ztL^fA+q!e=Xh+C$cEv!;BqsG(Y5ch&+8q zda^b67BqH?F010}_WnFVmsmwdhhFo=DkoZ&2kBbu9Y%!1Cfxkr3InC@gzKTR2F!>Jn5^5SGY{4dxSkBKwUof*^_yrB2PMr0Ss6GWeS+QsW=dO%L% z^O#y~gbC;RdGp8UZn~|T59X>@+415heU#UFJEyQZ=vc9U|C5yyO;Leg8n^1qR=k)ipUe!DjzM+ z?Y1J+_|2L+M<~u_@0^0Pp9CQ30Ji^UdERtLh491kbmm`Z>`!F$9m*#CfZs(ve|*}l z&k&WXme=4?ozU4{g{lndnD(yj*O0E5ad@f64;_tV`N#u;uv2+GL^A1A#V#qR9X-Jz zg|$$lS(Zb^a_%_kbf`A^rViNq-8+Je;8^Xpmoq#Ep8G9l%D%eaxZ%#JX!G6+3o)~V zX?wVhCt8;h1&@I`SV%-H{qXMHf5dX+qute&s>`ea-Z6~`zF4FSh)gqnoR{^aoy+Br ze)c+lvR#mRNeD`HSAXaiNRKoZEX)(m^3BCE?(j6d=M{CY;Hj+C?_!-S)S+1co*$4@ zF2HPx52`qgzEzA>b`w}gRcY=*|=1wyK1<04CjR(V!}NvpoR^CRDPK?dVpn6%ctXj-Ryd%H9$ z1lh;qWB=?~gckKvTEp4<<*>FnMdt@)t=S78%|Yss(DyLEtu;Qq;OCyc7Wr-Z{Uy->*mk zTitT6{^JdaMmyPv>5fOAp{g72Bv z^ZY01&w_5I`|z{jf8GRKxGVCyF^30NQ7(HqcB^|Lte5P_kV{8D?)UkPGUdZ17(cm3 z)^rFvO$yf22r@3+Q0Q%U3$Jv*7t8hgo$vgp@i?Y`UayVtO2-Dq0IvzPMe(`T!(mS! zKzn4ip$ARkMYK!iPP+0SLT@qbdqg;-$l?+RIW{l0P)H*ve@2xG$QW(Ud6edv?;l=5 zWwcuJuxnCM&!>|`;AlFZcGgQ^?}r+kx2UPIh^@%9I*5#VIKzkYJUOZF*oSk%NZbDN zf+tj1>!;|xs$vz6{#GgNfMD&dm$3lPIx1u*eqEmp`X2SQ zI&d+)?aDaae~=KwFWc~H*viIu@%J}Gtd5Inq~6T=dS`e$F2YwKLN1noAk-q;^h<{( zt{2equMKmv38+8Br?iIM*F0Kh5BzAYW=YNrBPQdoaY6*xTh^&!@_C3paz34XoH z`r|+Yw|$MLLzS!-&up>~-+s>W$~2e&9PN-BC4P&ykIAGl^8b6dD2|f1>-Kc6Yv6+y({{yr&F@k_LC4X%gjG%8)Lm8B^srXX`+HaAPz$!{}xI5DeH1 zHD1eWFS~^zK=dl)_u{jBeFieRfsHPAaISfw62Tm^>jso!*T@R*2FLB~FWzg;xD6Jj)?xw1@Biu`3rmeU+X*K0mLF}2`sP00aU9`S(Rrjmg2<26F?x0%_ccvNg zL;pcfcD3=Tf{(p63G9!2KN|dPPe7+CxmXFb+>@=mEO}Vz^M~ZrJgn3)&ahisztG79 z!?Ok*x1M^t)T7v11tdgh@a$wL6h^$Vf4=oAr4VHF>F8Fc3ecw%&>OXYV^`1)txt+o znJ-5O-G}GHP!@KXQJ*z|?hzP8KedxzAK1N9LDO42UTJyH%d4Dzz>y_~_QjGCEA_V2 zmCs^9rP14kT>{-ezGvsFf?{o-vpAS^mrmi5;Gg5Fd^O-f($mTbR`6%K_|*F5f56R8 z-rkbJWqN)$cQhjZ>-xwfQKSeMgWsGNFR1UnS8@0zci25V>>H`O<8>RYOkwK6pl0m5 zuZQ9}<>TyD8)zws>LCb2KP36E>kv^NR;y|4P`h0WuEG8Dy6kb{p=*icWehhDU4!c- zxkQ@{xppq5P*o&;<{yviBk$|+f9;Y0KjWuqT(PEY-|U0~C7OOUG1O9~b(-AZHGdLG zSU2qn+g`56)p8H2s5Cx=#S)DilJ9mSWS*ulH^Oa_N3Ru{)GgGd^W7Lk8JvJXfjmPV z{Q8g(=s~G)piEtVYG9_Du>Lg}9#0Y1f(bW^ve^MryoVpOFqwk@yNHNre;iWfEK*fv zQ=On%??GSI(|mLq`=*~xGPr7pSt%%dE{oZUdSd`yP5q`5+*lpmCK6)C&|}x1?O=e< z+?Gp*q5p?Z&Hu8gdA&vlOB$-mIZQhp+EnXwR&_t}pD;L8TqI{1C|9UaK(xIuLce_!5U)dQ=TF_HGu z#g1TDgtx9pU+bOf>l(v;U1IPh{dLLFF<>e4eX5aBVAKO`UA$Xk5GDcUu!3idRXvjx zlyG@PLf1*noUTSb68>&p-#7v(`Q0+P~V|oz>0(??X5wRb|^pjIq$wDeE z*rBu-XC@?)ciAgrF>Bt^Ji;m0Gsl#ZQea{%F;9i^^5{Zvg75nR@<^II1eNFa=Q`kU zec$6V5QHcoy-Tpe+8b=JdZ?M8c~262u=*Z zM`9YC?MYE~Aj8x5oDHp5q`HNamJrQ;D6%WaywkC)(@-A#ych@ilh7>yMdi%75deeq zA~=c3!NwhS+`8;UER)X58j71sdcjORhwh0NBL}#j!x3_y=)qE+=H#r62}(U4#(SJU zSF$qDe1^|4e}6t%WD<)>;aV^y_*}npGvG51$s}futn`T_`t$b_L{=CF2rVP95LtX* z8I{`foi(0w!4%0FkF{6_I??moe#Z(w8;!e~L$R!n!ufq91NTvLScRpA!87KN2pbg9 zOND1dPT-Uy;{a-^1PS<)#RM-6p7WNV(V$|6t_1otf13u$;eaBHtJp|A6sA{xAFDV@ z2B-0J_+NfDKM!FumsO?a=lRcxOCP4s2psDW2AgWPCXH+kWEMq;-c0uWTIV1U!E9i{ z=QIIV!lc>}1U3WIhYc>|!Buu6hsAs1dUzVAe}DJC{OnMrg8ZJJBfy{euu|pa$e8ww zDqDCYetTog-AjN^=2FJQiug9zIh2=cCe*5B_u_4EAyYaX!mP3${- zeOHI?&wu7(jDCJzL`RcL>hJflrbRah@VslIh<=_o6@H!gsAft$5dxB*?f7@aC3^2aXx@Pb{w`0Y$84TXo@gvF* zPGSp8lxqxv1B!y18F7&?U_wf%ywlhmLRcYH05UbL<14A+HwouEcY{n?HZ=k@g~LF` z9C((2C`Zas3}uS}`ol{|=Q$AO#QDH}HuMr=mvdbkdqre|gmpj(5Zq1?%Dn}XQk0_~ ze*)gLGx^Sg)V-~Fx&*ZDL5fAe#}$_q`LIo9ZRj`8Q;>*aS|=efl19q{}3 z8J-K{iudy~$2kAKkIw_>zz6gDlG!XkhZW{MRY#TWGPQR~$wybj0DA7M5R@ea(;F8* zbv{gX-c-b6kYQ8}>2j6r6)O?Io`K+&!qTodbROo9V@61{v5YG?#(dW0FguH+e-S&@ zL1+Hx2x3YmplYnHd5?91mgh9-^*)*RYlfI%v4$~*$Wm8SPdR}RAnKrSnrFP!tgK=D zc;|ZI0Q37yNjp6v0^TKpSZGGDUg?*`LSq#+oXU_y9C=kayiFsIdJ&4&G9>ufp)2%^ zDqN?FIcHu^O)ZTP4JDa4n1s}We=mf7Vg|_Nm)t;<7$e|zhGV#N0DMTq=MfnA5aJpL zaUZc)=jT3RpN7ji!n~({&c~m534i8$-mA&)KE<3k^ZW@N#;cQuv;EWo0 zvw!b2u+_8Zcg_9T=h1OqH_B2#4}&shWhLM+lbPW}LbcsvG9)pqzttbr*{@)S9VTy_ zSt852ZL3eL#6j6)LIvZ;36BwAFv&y_w@Vqg=AnqyBbLd#Bi8rd&ply%b6D^5-7zVX zG5qU2jfTSZ25^ZBI7kpOe`4@jcs-s2cc{?N8*$(M>-9N_6X(}gu-@43dqii)gA9gm z_0KSH=&H(m{szcjyQu-SKzRz&cMl%$x<~OlF5j`iUNCS5JM+H${obE5_`6r~_*wc&@3 zWxTRgymBw5aD2tRCo&MR4*(VVdbj}Z_jf3l1+E%9v}aulP@<2azL2jd8X_7%niYa1~JkFkOw^mjZkR@lpb z@2v5E#3iYWN|hk-Jk4iHAaH#rz(9M3zQq3I zSda)o=Yj1#emCt@w$ZR;Ba_4W2b}a>hwcsWl9e)7he0(Sf1V0h4_Jq567yq8I%Id{CoXFF*n&O@sl-adQU{W;yAs$rOl0}0)FQmiO9R&C*rz-E#y^W*< z^AdsQ*vGIp!OK@k?Qj z=V+Wu7P2~pi9gFHVpFbmkYQs8I6Xk+4tWydRji$Vf1QW;{PQ{R8~30B^!LvCb57tf zzvmU^O@bCs|9}FUDzUW!qHnBm4Q296P?Km&fUY(6#6!+fQDg~1V5td#0na>ut4Scr z2ZGPG!P=ru$3sqlf(S!Glw|o4l%Q?U$zub*e&?tG6fs>Gp*pCiV(pNOc4l~CK37pI zoX=GwfB)BcHlG#qnG!G zTc7Q_L7DG#r}GZEl7+b<Av$O(w>$f7Y{U-eHM02RkXnOP<`p$`%r%I6rT<2)7;Z*+#6)QZ;~d7NKZ ze~UQBln|R%5)YvgOw5yKqKC?KhAyHT6B4CnMzL%kaEyz53;T|rfbE7%E~Ow2g~mp4 zDE~+_ku)`HeWFi%E{t3%7}y7jkqfw70v?Svz2-6@VhWyPh#YaS#IdY$gMWz4XC}uIhnu4*#Et&{>jV!eD9wa6ZziHS^aZ=ed7?E6UlrI;a>YE_xR3n@tNUajOJWc#AhK9o1{!S@J9BehjwO>qWuil3)e|xVs_+^*2a>5$ zB&D1&lH&)x!8JHzlT0I;1Ns)c!y4y~pM2QNJ$&Cw?-W|z%(D7t!=dKqoza~4a-!Jv zJifD!^wX5YpZEx$d5(?0Yj=*3f9Laa9tZ!QF`e)Hd0kIGd*FY>=JQ+x-+i6^#8UHP z=bZXyZ|HjkcY-yh8__ioS23Q{AH4${K5+T5?C?6YSz^K819xxW;$pXip71drs8AAf zJFx;QWccR`4ssQl&z#HS)M1a8M}7cmo<%&8AD4|wHioGP&L)xZPH+sJ^;^?j7{-Co zUDDkMNW;ic0*aCr2X1UFE+HbO>^-eC3mADnYt=ZAAY z&wbyYv;m;)Iq$d)NRQrDTBN9-sV`%wIO5VSPa?fu+pDP}o9$>A^tIYy^xj^>kQb_u zaR-Etht$jaA-PhSbC`g=R}dJ-nXH>P++c|G4Tgd#oWY(G^a0Iv(u&WS+SD$p$c}Y) zNc!+8c!HS|v_s#R$F6B0_pot75#h|9eYtxYxOhqI&BJv~U5lxDxj+pKbc>|HTYr~_ zj#^t%cf5A&kIrUS6X_#ZHGR*W^R@cChBOQ!s$_h+2`6pHHYPQVThP~&8{lt=PN5BVE} z4B$l0zA41|_XC4ep;Q=*+Nm4UvNVno48`!6^4P=m1p-Kp09t1=VZt1z5|Aq z<0O}3Fvm}}K`WuscDS@<1s$f4`%F!jPZ=^34eI##Z*eLy8($qwGXj}iTs?jNQ%-EE zI?G59oFKg~+43bgLwPMn!R%A1_&RvZ_pSU&Rma*Ezx5InR2^xZ_+&!jzQNo;cBChz z&+Ev5To_3Dc8`+Hfq6_Bup@-0!~mzFLB1T~fn`{f*9d8i0-%nJ&;04MKg{mSWf*05 zN2I@9Fk_kC0g(Z}SV+VyNgKlt-`>JW^hbQ`!FdrKMzvt^kG>^$zV&ERwQP63 zD*VBwTZ;XGHqx8fA-m{^9k2~VanSamMSVR#Cimp4_%J_DzoOMAWYn8!oPLut!*1fx z>OK2p67#(=uSB;b<;X6n6cX7Xg;9sGJ<+&Ji0Pl=Ey-r|G# zUiV1pn1Fry&TqtjTeQ;)5U{Cn!g2;$vqls&2n%?lo`a(XcEvqh0syGEXX=xO+|a^BGG5`6ae}*g?b^|=3=xBi}-aGqNPn)X@Cgr9>Bu>VmpE0z_LWQp$~LhvfF7@sRZP{!`*0b|OiL)DKA=-;;zKN*MowGdw?>%YGB|@$oekVd=v`#k)*9A@)5!5c!eYgF zcH%(ZVPI;h^BvY+Kitg$TD7$j^3`7xG(RfFR?UU;&P}&b?xKU=(L?U7vrLO+tT~M9&h!JMvs)MQBe0t(Nx95eTY! z<6w->Vmhsnlu_u8r+si06rm&Zf#K6_)A+e-Q3GK#Un(0q#p)J!NSY#8%&0plWA6E}0spqJoo{hZt@JuWmT|9R|W_w1hWn8}4>&?-{vVaGxOGYhl7 z!WJku=Mn_G1uLQk?e6NoAP!5$qB3C6ZM5#znb>&+Y%$8RcFGW@{E6X zXY#Krvch7?c{=0>C-A5NgWpDhk~vq!%7(TfEi#V5hr22fXdU> zOFyB%i#yx9daK9y(o)BI*6m-uKwRTlChFJOJvto&i>IGQuWs@l(Jj|{T~mFqwCkVt zMB3SBg+glY3g#!*pG?jefV4oPyzUq~3!1MH&kEhM-v42_W*MTZJ$({(@Jy%5&`po9 zGsG7Bf*nn8q}u~60MEPyhm6YRyRL&S?=Kt5aLY?3VcaOSAMa|x06OjI=;H96AMEqU zNQ0GTbtJK!G4^zkKN4QVF*SfcLPq`$QsV|M=-(s6QsCP=inj=*fqoq1?(^8a2AHw` z(umKsBV5cG_8^Ru(!p214EnrT{@+i9w^}7i7vB6xIJ3+KuKJp442w9bAn7KeIh=}x zxK_$!a?ho*z?>g49se8LD#NpeGG1bcW$EKR6JPO_`7vOA+`rB!uQjx;1GZGI{o#_) zp5+rOBK(Gxz$`RfbF(p`0h?vORr<&V%#?Y*&a!gKPv!orKVlPVV&?Z=hkYrriWRKt zs%j8U{Se~&&;JFZor8OIsS6Na7FqK53N79!!RsOK+@n2c7XDa*Wr#33viK)OI;5hA zE9Y)L;OW?~G+cR(Fk#rF!4%t~Qj8jiRMqA;h~2h<7ZpiD%s-L`HTw*c zrZp&v@Jd9V|1@rfoQYGiiQ9^=35zO^i;5dp8VP_g0^z*+F(>7l$4;V!v#V%Jze*l*`bf(S-((p;$-$n z)_S%A>0%Fa1nsa7ByQX)1gF z$1?!&dA*$jj4T+4ljS_3FUq|9`^loLnvE*VVki7*)3cUd&s>}9bX7J6s~p|UlvyTf zk0lk28i9jPASX_J3DmX8urfE&Y;+n27JR_PX^IX6LPw0Y_!}rxYH)?c?v54j_mvkS zq6Ik;W0;@M7?A^eqTz;KX~oA@FIWHh51sgADjYKh%M60)`XbNF9Pa zFR#yxn3u&Joeg22)`BVnCj{~7O;qcl4{`|>QW0>e?+3Nd3r#rJuP8#T{{t$?q!bNO z+FgV1#m`rc0;mH)jS1YULEjzM_4eFYT#w|nI}$ixeIlm%yyCb%6$T zAV({_H9-c@r2yD_@uXtV5MYJGr}c)_Mdw-gwjM#h@=Gv9+rB(&blpFqKZ&(?iCxr& zy@?S{a%T|jqh}B=OQveCZ#>|`kBNZ{)4bvogS0-;GmdR@e2$_LHNn$y*z}rtKQ*Z4 ziw6Uh*mH4Y9^eh~Sk`{*f#UI8RdCLZx_V zBH=RE8pB_BzmZ&vQuY4?i^AzMX|BQA0U3r=&MoF^uYS;)8T6fx(ugc@**{qcqY2~n z|(_)Ecc??6qaRe(o5kce#*>eXft zi2zyewaB9})aJZb=DL;S$sOR#ygwlCDF?^Y87e9jD+d}-TvZ9^0Z!~@nG3M0{g(0& zr&x3MTV14uxIM$iBp$L6G>Zl5O%IhwIxg6a_tX-u$|dAd{&} zB9~D^@Z_xSEI~?;>I$CyO!3VDU;qmb9(8OhUB$uSe>fE~Nj}y~Kbbi($-tstzRuvE zo{1Y~!-&mDYOK%D)e9%LfCYz~&y8bMm&Lio&?|_FICx^%VKx^iDI^RUG4_UQtV@f5 zv_da$OMBmPmekiyAD1S z?kD_NUFe4%A4By5uD>ojjSs!h^5xnEb>Zmd0k!0rKC@|~mEkinU>(b%Rd^)h53^6#6b_M}=d61e>t{LBt4HZ1p<K*}J(L|5V9&Gb+ITj3w(xK#pv}&DpR>zq;fD zTN0cGz++IYy$e`wcGbfjVSs=#CPEIrsKXAMP0s{HiQPQinm4U^T4w&ar~Z15!h8tJ zw@BT6U5)$j0h-n_vcCOVXqh{;ugu{AW=Kh}@+Td6^G|*feyJmf$Ozq{iU%c}DHw2u zf|+Uk>)51S+^1VLKHK>^&>C5bk$@acCqPi{B|oWKV7ElCG-F>n9x8VDmzu_bBj|SM z7Ey}BqUmFA4VxcYJh8LbNir3`M2|}l3@j0u%0z#|$o$yOyKz0x%8PAF4UNar!@JG0~ zb3HM?eC*pICVPjazy5|yBl1k&j5oSXf?XM*yL^kIF*MDjv^%$YQ$EZ*+b1{Fci;SB zt`D~rj&XJZn5IXX(zCj}(oE=Rb8^RCYuz+Aoi|lNd~QR zY5IpJ#F88>Skxv_wTG>5c{L0Jv|_mV%ObgyZBo)>9$acv_hDT5-numY3w-wMV%utm zde!&Ckxr7LH4zp$4Gki9&&2(q_`|7nkpl3tPTd#lH8?SIwwBGa~ zF=ArDeuJcu%O@_38dVuEuTfUTvMJb7%@p*&TR*bslJXMnBZi$XU z*)azQA{+|XD%}|k>0!2KoahbV9Ch33(C?Z-;sb$4Db(hcz2-|NVL+vT?kXQ}@LoK^ z_AfNn4yNKS^*qNstgOI|SjP55MQH;mX8B<=MCbCaAp7M9_>BxyiV58;V~)F>$<>IB z(mz~RrPoJmx$Tf{Xs`w%cRi|r#-T8XgQYRCdIfk)dnPEh)tXBCRYize3^~__Y>3a; zK#zF%FY>14$s3>coE6df6ArhY*N(uzs-ywG$}GPHgzmX}z>&!?6U1#67R}o_Lq+^t z|F+gXMOpl)h2;aL7K1Jw;1&*yod@aVzRXp4OOG0=NY>n=m>^h6Y0S(LdBo6}b| zV4}S;@>DeS2?s9?%HdMV+$uZRdW8qs7B-QB;!oF2PZD>d_1=IWlMGIu3uk%sh~73B zh(*aND%K7cJM#P1D?eudBuVE0HmFlULt|uHo&7wS-B)Dj3pT_HwaQu=Fkv1W%viJ` zQy0q9-qs>YT^oT%;vII*|E`mJxBCEN${JsqEB1gIshSKGb(=x0QgxZ`^PJq)wNd?~ z=@?YPRs9{_(y?5v&KAIKZ;9X^VWQolHw}+J7#OO&z$l|%zk34C8R4;`-P&NUE-^1`0)B3D3s;X{lYSn zP#?LJ%cR3}%X}b0?URZk9xJlqv{o*qJ&BJzExLL9_2bZL>_ejYDQRG>#ii=06BZGkcM6(qp8KW_1oS{cDk zY?Nb3ft^KQ*~~t)P57{REZNhek1qEPlTHO{Sd4n{EfOF^p?ChO7beyr;L?ONiGvU}8c7a8t)Y1Ls>z!<+p8+Xeg6 zS}E}QQc#|exC}7zUvNmU4~Ab6Cu^VlE5S9SOSX|kKlO5u(4o}ZM3a;-rh^V9Vp_CW z(C}`2OFiR6=BUZxEslr%a_Uz}lza1oR53!t+8 z=ufgT9!V%*H4ITt+l9uy*0{Dt_)!N5F+R8O#Zr!4qNDXG1U;Xl^Ui70XyWR8^%kL< z3%YFR=8QdS1fhvSA$qe!dhJNS3vRd>* zhM!e~v8ycMjhZgEe)>k;V8dH1|DzELf&KJ^YQTI&aXbk5md-rx8Qa^}Jrj$tqX%a) z*gxTvCVT)<(EL`g~r|wEc*o`@eZK^way3$nv2Em zV&w_5;^dU<0MIv01o&vta5EPnT^gJy16Gsz7hOK+Kd4 zjBQF2=BfyheXja)5!YvXg8EXWUJ$MK4p9H9fV!@PW8QJ+ollJj(bkK+c?lLxr zU=^v4Z$G{`zoOy91HrEM0{La%>>rBL@Ci%Uj@vq@FnY=83P^>9 zeCewgHcuqxBX1Obc750&uO<`C&wP+MtVqwFDGXC1HipxQ`z`U!1~< z5AXhzgEROD>0NliQO0^b*GNtIBIYKO%wxKlDQ2W@a3#?LdT{et_b6O^OLTBQFqZKY z>jK+|Nk)}We;Od*x$xkBeD37{o34ZY2A^6wtykRJ#m99zT{FHWI>|`@nA_w(<>id~ zcLFU!mE=qV(uB0P?sEOfx|a|evDN5x zDAr*4DIk8SwcaD7OrR=N$V&$NQT(L;y^F7gqSTGRsWhYD0no>E(--MeeR2QYWS+Rc zp{&+7dKIB*cmHbKf3Z$%Ho(EG$0%JdM3&%se61RwBTT=Z`Hioj&Dqw6baNpab(YW zR<1#a3fL{q|J>M3g)lEMV5{ySaR2qTb6?#u3;S3#a+C9g$dyq47C_wf-9TK}r?go$ zC9GeakuO{F)v!t-sZ<(Qs2 zVQA07X~eyk&Gj?-u-HUO7l0CWAKA#i3|S^-kqo((h)3-?oIbPvQZQTrG?&wWB}v$k zaKvyqtB~IQOUTjAJLcu)d&6|jWZ07S(^Z{f_OcE02hsvrz|zGD1=TDRcM(tU41fGu zEhdqdiRtVqXAI0f%lk@0@b~>!PPmiX1eXRie_=_vA>-~qMYmh-O0%B|(4C+|q8T@U z6Ye)hKP&mo>-N0cX$;#tO+HU|3Z$L!l8S8hW!3pp`sQ!=QsZFtsoM(tm^NlL4LJX} zPhnys6~g&}(eaVHPKkFcOm7@yDen|NP_K3?BoEJ8NxCMj8yrj1xIU~~G zdi?i7^gpc{h222f(bm(OipM-dL<8si@S`s#U9076z=J0G+=IXB(9`~K$yr7C4E8fK zz)fn#8Oj8=!k!vQpF;ss1zV%h-2ZgiC1cz`?04;YVk3q5M`%i|q@BhDlV{7C86ic7Q<`4h%t z>5>Iwc`Sbk>v`B`3AY+5Z$(l)#jv1|yd#P^?f!D);vijAA6`*L=#|5g_qh6tIUslw zB)at@M{9zPvi)t za!MGFuzS7e3t#OXx|$b@IgOlV6kx`$2X1e0Wc<2hTXCC%>=K-YLG|cccv@vP<$^(v zt|bG4W1bN^G@r~w9L=AK#!C2KQ7D>M-NMZORGqN8tJ5cS%pEEEF5)_e@ny^%f8-+o zp}Q1AL7ZVPF8^x>!dxKmOM3nsj0FZW-eD(!RXOydTJ#%S#V!Q(t@#(#bQ1rgpj}%!hF12iHh%wLug#hC~acRWlfVG3zm2w*p~DBqfULT8)O+~pWEI*K z^WFpQIdObt;qb2{>9C5rV>F2K3y8(b#Z23zL*fo1@Y0u@8Re9K}MHPH(fhh z2HoSE|0*gzE2aq*EKLtkc>AU)LgBbZYa{QjtCq%%q2%T0ReF;EkG^fQ&^)0x(kIUr zL^mfDLuBUq#P(?;flcSNOhJ5!ZB6ycvdhFFI%ejvKjqNH){|9oGHS7p~nfLp&!8>`p3fSyT|C><@nw{NGKQsE@68kWi z2O6|y?06zfBGe}VmkklQpiO=fxH@;08+~f5CrLaht=}F5xX8WU%a3;-jrcU-%6Qu+ zx>ISMGoFf21kP>gX5P{uc?o+Rbgd)4@HK$=0ESHkIH(w$>4aUUzX>BT@e=9lO>)$s zQtd}+(hmRqtF?+fuMWeFvWb_~@ZVfxCtWG0OSqUM!<)H@v5;ug=FuKGGK`qlNNC!Q zxqjqgL`r&*xf;iwGVPbq8%QOhm|+O|7&$qJz|4k876+H7>)teu=TTsMe|v*` zkw*fTA{nAIGlmeSd5aO!D@n@F{bV92CzCfor^qtkZZ*ht_zt0XddiWSw2&@;SW%N{ z5RNfud6j$nnGo?a^kYQbQg>WE%aXf&sbic12Ot!CDOF)nX&?5a*0qc8iLzC>=4D)i zOL_H6#^)|^!?s3ig;l)TgAM4@sl}9s~KN3u%Pw)aeQhge_Uiw(bwxS`twidmM*pE>Qwci zMF!g+-2r=G#{aeQ=_n4jv)A&sUhiW`(@rG-oV<#ae6NpX8IAG@#_#HnN?n(8!}!kf zAO;5KVkgr@XuC-+Pl%dg&W#<D1tVqqs_=qezv5?P_LANNrdr0lccoSQ zv48Vx+vevtYpugST_xRA#qggWwOiTe6Igaq5<2sSJEE2R@{crBx7eV!R}@KQ0R5YB z0KY>W#nyK?qo+^AnV`!)U8FR87;UCm#qJ{*9+sMdL2KY~_=BD9=p5+}JqtWZn3DF# z=YCnSk8`mKL+%XHQy-Wmt|Yh?o@If!Dp!Gr(O>BoBTqXn$n9sOU8*xj32cdm>ENF@ z*~JGWlc%5PR;k1-tA|c3bz#K=0}}um=qh`a;NM0>?2SkEUzurgCa(GI$Gb#N6KuU% zX$uM}QCFFA+1>md@wai6sa=b7iNHY7M4HpRj*qfeQclbXpE;G?B`t5sL9bN#KDX`l zjj;ue$pv$!=`8!|geMfC)u1e*WXnvK4HxbVzVG^Oo%-re1hx(er-2F0tpL9b3Pu86 zsXgPFvFv)dFRbKr@*{9fyyTNyp!^mGIMMc!4j*jaZye+`VYH_h#Blx^6hTi{qsDfR zb&gug+^PMI&qp;1D&p#y<5V4yJO2^lOm~l_!`eer)g5CfKZlVPah7tQeP>ivJ~XpN z+pHFB%~z%nD1}ZnR!y(|=XKSa@g2lmCFs)7z5$8VF*+f9Sy8zLn2n+#qJD= zM&o;DEh;ue!|?t8qFfGyyNM04a+VAO?gx_>aMy0=a$y2KDZ1O<7!&%Bb<1I2`${J( zMep5esOIb20|ITVtfA}O%#%hBU+uff3+GoPaT64H#h8Bc)eM9B9bm?nTGZS<|2(lA z4~r5-wr@o9%+PoK#XJDi+oZjz-Q3{^Nu?4o4l{>7ha2LAvN5nY-> z7?s33bKw%(VO`LEMPMKle#jZqf$uV9V1oTehfeQj%xRDaGvH+_>&A4jkFhZ(CUC|k zob90YKQvun*12e~mTR)l6J>HQ4a1=NB>?LC+R^p5p|8(PA34Av%3np~eQ-d73vY)4 zGHQY^PrG;%(ui`>nRi}s-x%ju-a9k=IKJJcyaX07w5K2f z^04Q<1F?`t5G-JjnmWTKhhoY3T_Je<5zmD_Awpx1L<`7t2WZ&{;})7StD0V#BOde3LH^6#r){F>)v~24&uC2OkF6p-EBkRF zm^682p$-wjqBXJsWm*Ufg7`m0{tPQf?e4Nr`3v(`-wXn`&Fa+Hg z8K0-|zU{qZl~^`?_14{s=brqp*PUw=x7EB-WPzwOPs5}dC-o!H!^rP{0o|PR!HBo? zFbqSi{k~*yUFf=&*z^80re>jr2hY-#BCCGDG*fO#eX{JvmNfE42?QyPcbxeG$k$8i zlpf){BN7a$gt--f zH#s)vyN$WyXn*iEmAi;%GS&((^E%x@5g^V87+~1{G)#%9a)rEd%Gf3Hws$$K=EkD_ zp$VyN-b9J5++UW+?NmwJU^DsCQPsfo(J2M{q+-NV-t^LV`)Fm?4+DC828^g-_Hc`N zLR0yI1|U6j)vC`Ilm}W;gWnb^OIPr~&^9hI*vct;#pECQ<&+KlJQ+QHXTZ1vh&#M0 zE`K)99(dr5rE^de8oks4zO+nl?nV7_PeUe_{yK?y|GsYr!gh{m2Q$8fgFpuqD-vDz zKB6hVc#cX7F{rZZu>a16RqzJ{qo>jML1;ft($nY7Bx2`de%S?IH3bWXBIn>n5S67+ zxAjSmE-UE=lke?!=gB#w0p3`kP_FIRjaFTYYdLx?B9HGbb9d=b;>Y;yI#LePl!=`t zlHRVjZ(p(1F0&0BYG&eK{O;lvdyD(R=$sPa^I^YO(Gd(YOlv(?I~-FV&k?NZydV(D zeGja8ZlOM*$M8|^&{&Qup)>KF4s!AD>-X-@IlZH9Z69d~1(vlVpsQy9M9c)|9pI1a z0(YMOn3;Y9fVBe2#55bCaBY`Jj?7)M@bnfLdB+ton&Fb**W@*QKUg{> z$qhu9LPJ;H)&z3@Jjwvd`#Q$}VVaIx-L_NVO{%<|j}YV=UpNFgmN43{5wqzLDT%g& z6}~0jcmBp73miOEEr`w3r*6XzR-)n7u%o}bsbbB(Ps0#Kw_GqZ0gnInhQCiA>STXu z){WrdZ~>KSM{PO5>4Mw^#lG|EW(Y_z?2Ez4{epKbZ3|U_T?e3p)*ZbMf-Dpjab=+j z<*^H?BPV=e_>O0eE0c)Sg@Ev#rkX!lV;6Wt9fB9=-Pq+H*T>R1Ty8K2mnt+Z7xN$$ zYb^QC2ZnXAZ-s`foku$K{CUMwb4o6`mkr<3w=oH;4MK3-eVDPv13@ z)plk?6%&5^^B;zHg=C>aIF_Six3}8`@HYVlg^c6BCo{?g^^_heb%jeRaI`*ZTyT$8 z+AT;O{^UNe?lPnfogPHTp3qF)1zaT-)aIp?|ubI8rLL!Rh?|Vv#)Q$+1>ph2vL;J5#b{ zUlF=1T`1G@Lv+#2BH@DndZUdyZ7vCXVJeC!%l2%ewJtnn>!ZssD6X*g{WIp__$8ip zvEmt1q!7@@NQ2Km+4PS*b@qW9T*(_ZV3>N{(hx+X-YmuFUTt%Hq(My zhCksiCX%@7W|rz?UG0OfzzjLNY4aC)g1R&l(gEp?s205b8`6P(y?}crQ@F5VG}m74 zH3w@UQL9GH(4V#RR7n~i-{!j6UQY15{VLtbg@jHEVINJ7M!NgrGVy~~;`%%`9_#E894wUImr(`j-W1-}X|rcGnDnAr#JuxF zQ7>Q=s_UjLFe6;Xa~NA78yLBELz%agMOR1i)Q?gt!YH<@$w~95!BjtfLRWZQS2*B4t*%9`j{79E#WMEk3}aqE+p;lR1Kec)p*zqLy_Xga~ayt+1fi~4S2W@ ztM)1;E}mXG{X_V1Llv?SKM8=6L^4r zX_%(@-rkJ)e{Wy2i%Gu)^@1FlLBHLS^n6q68w~8o8Wjfl5iVnb@0vy>=9Em(H*L4v z!V^$t?)(e{dc-_2Et5odz`4A*vsC5-(Ht%fJ05I4d;k1siarXq2nUbAQ9mJ}RgDRx zQ7*y9xWKnvV`)CI#-FzTz@TEz#lSB{6W>cOD~)CzAD8oHJ%43jp1uCqWi2c?gV#R7 zr(@7K|FYxrn`N>n@uH2^gE&P=wQ3(K-o+U# z<#EA+zR# zY#U%XzMnbQkrXx_`X6)ClYopjF_WKCX51VL6!5R(j#ECwo#j?0cHldQZIj01svBNi zIcIMC@6}I4%y@GL_qd(We8M*KV&dBfS#*Q^bP5k%Ot+-SVSD~ErPr_-3_NQSRU=^ z?oJEVC6Z&RBy%vpgV+bM`8dw|vdNH>$@UpMSKQ!UFF<3uRNu7@4`9$`z)kGy}DG@;Hs_0>~3X_dz#b}4P>RUw&nE6{an^@z
P06q^b=#=x%;AudGcNdb-eIwK&)UUg8-9c|M|~; zEfIZMYDvsQ9qDsvY#Eau`6f~j^D*<3kf^q$>u4SRPVI0!+qA7-=du6It9Kv)>R;z8IfXnMkxqXqJfpq8fL`%- zz~-UAw7bmcX1yV%J#Y&LE`l5>Ze){Y`5+58Nn)a*-9X9r zXdsilAo9xUA(-ovO)tkQTe#ZR+e->Y2WB^|_o&fI~6Ot#&=Own@_ftX?=KMq=8oJ>%y#HQk zHWMQl6TfYZ9+8aPlQg};>jMvS9qN}xvHk0ZZquayTEuPA{`oX+ew_6f>0c`KdO|w+ z%5Rn;!QXb#@8WZ#@XryQk$Cyoo!_a{bwOZ3e^B?g7w7q(iyO@~3EfQDI6x!qO8oU9 zgnZHMjLH0^imM6bhQi5-mKsu#HxxFHBIr0w!`W zii11i-w+ceg@3&qLU~f{&V+rdshMZOX$y2Xt-$U|!ar$40rDV;aPz60pq0lKUFxq5 z3tVd(p03`Y@unBsLuCF3>|Yr+owcYG8riy4>1gSgaK>G7m2epbAqxK|NgkurZtkwY zqtstj0SV9on(Vlj=0IWMSNorhu8N7|A{w7e;@u!OQiJr6>>{gE2y3#SZi~J0jzf*Q zL63f7)DVP9Nrmv&IPc$BNu?SBO3Rnd5>dN<0_IV@>JlH}2XTO@87p;}_OgU+Px0m% zoz7hmam@^7Anuq)!RG8;9RL=x=N=L8<7WxvUKZOtn>X#qPm>Q*_#L@sZR_>@K=4@x zSP}e*J>HL+6vhQ>b9l?4sc;mll&SKY0N!EN1X1O^_ zD78`|_8V&32kmRR=d8j#K`}jN{uhxt-tChIP-oZxkoUn@cY&CA>-qc3E72Ga8~xAx z57#psSO&^{Y&PsH)f*@9;+CJk;8;>jMILKJ`KiZHA7n1kIltLmy*RUTkM2yKV0F-N z*f(;v)rizr&v@0kJuVMnyS&Z$AXR1V33&?xGMeH5P-S$MH7tyVIjoc9KE@vE>4!7xsNwwKX6V^|=DUd~#oNS^T9(*3D= zo@xXhCqojE*HhIJ!kfm_0XguzL<`o zbPTDt*kV*_bDjw$wgp?EL|Pi&BZdaNt^b3yHh+=Y75#23Z873AiNS(YoAetzt|H6{ zl1;s}UaBZYmN0<~xW)=WW6&+t(&ve>{=d~F=>FBFlrzWB!!+ffHfi%wm-^~EegaGZ zl$Lzr*2uYyC zT{kad6Eg@_dI0rv-B5epr_ILs;CDFlPmT$!pSITh=z?#OM8SaMi0mx+ri<&Zs~b2y z2$3fC858*|Xp`mN@(Us#C>rTm?7*^#^`5poxk2+>#BaKW1=!ma9?q2IF#Dd)&1Wax zr$U4pM7eZF)3p!&jqWK_z%frJ&EdZk8HAR~CFfL0PZyZ7%~*1_d6lu418cwCqfd~n zg}1G?Hsf55JLeGMvyuiT8MQ z91IXrGQ>%m7G&C|NkAl-%V8==Vzt8cC7~7$uKC2Y{DLrT{9nt{%ofgQ@$*+6!R+MS z+XYx@WXpp>N<`}MARNe}O2_6OEf<=r3msdVioJ!OE{LKSvINgnyP(gH4XA6%5Ms*O z`=z7!eB23dizR7_H6rXZ=Aou=cjc#o9dBcE@0W<{1$q%5(^4FAt6{8BINK)z1e1NX z1g^A=A1+4*C-j4~$5G4V5a|jQH&*}LJ9{}Ij0S`4U>bL~-~X(^=IY7EqUP&@+rQ1x z!ZDoDJB6%Ek9L#E35^zA+83%6RlZt=R#a!D?@q3|(p1P%MqN91O4U~a*tnZJqFG{> z&u#7T^QS;CFT6$ZT&&yUe*jHDvcJc)|E`}{^_w@~5B@)OEscu(#zhXN=kJSG4$_B^ z*h0^(sNipVys-6q5*{*t4vre5FTQe*XdUYJA&ywFEKys!qZb4Gi+w-N3D~=8*lD;iyy}7QFsBH$ImNx+l$0CmF=B z9XJnQ@9Rt#1}Ewc{fIXQG(MZYxgKEu&m?BMr-XOqLiFZSk~bfJx6@s8hp*lHDZe>a z2p0Iwqxhvk{bl>#xRp1);5Vk~^WNXa^RGDJ-}T7cBp90C8VDo!HzR1KeQAmHJ`u^+ zz)$7Ba)Ip5T%&LJ&2fojqCSe(D;J!i7}F1@D0nPQ169Wy{FfGTbEuCHt^pA^dyGHf z=lB|b){NHN@P^!f+J{?c2b?Fcute9F@cg@zm)E~HD`KKun8814c3Z7&kP{aTbp3FE zlcJ|L&Jvj?3b`nWmdc+MQ4z!Rb0B#l~7Uav{ zXZjBxG+7gZo`GM-Ni7>|4?R+{Hzru9=5`?V#kEXZC~7@_y?KUL=~NB>APd2XMtC{M zq?;3BcMd&{5R$e8&)|cPpyzn~8&A&PcwSlbi2q@4a5jI>cfTf(Yy9t>chns}c$_c3 zvrlgOv!>qP$EBm5i%_<^e>c}IVrXueFX@|44!y2_pE-FEd-q*qBv_!|eaXLjHh%eA zZ%u-~_6h!x=j-`Cf7NcjYtc6N$4I-Axj_97`e#ox9DvuOX?J^<#C@eE^2`7SmZ#>m z3eIGVx#|cF&h69+0#bA0c82K&(0hmED%SQEaWv+WmjVkobevgPgt`Pa2lBMwVcCST z=o@^0GO59}2#IWP2UZSOa+!22p#=<{bf;MwZr|@CaT>uJfDeZ5#IT!gJZS8&XN zXP0&&E%@dVbAq1V4p^aHFE~Nc8h9IVhm)!28~MRm^*9^U{=rcVJjJ^|W;;q)W4}8g zHYVI1MUWvLB7B^{EPnG`5dH8s&xON5@eK=qu)+7U1YZbz!RPyXEi&Lm`+4x-FTauj zwP2L*c)6w!avr$`E{W?_SJ_2KkifK z=xyug{CpO)mz%)CWBocQrM83{)Uim;)<~tf8-{xwoMW()D)*8=x(|ER7SM8={QpOP zKR*>-?5gHhjXq}4@(A8@^-gqg-aN3}7hmufH_CfW!`A)a=3oDbYdG!Y>Znh{FNby+ za3agjBz~WWcxcpoqd)Z@*ds)?^86OFzi3|BccaWTK} z$t2$Y%UB~H0tLVc_$o=>a`)jYc&aUb;XU}|-M(}7Kl}Zc<{7vx(VPC8$Nt}3hqbVh zyN2*LU@w9Pjjar>8A=NL9Jc9(Kl2uHvYWz=ZmoYYZ)^CQu;Da(#D3lo17Y5vfu`im zQBI+SHoU07E|4`w^h73JOU_`N=sj<=K=AQc*D3H$>W|LJ7|FVqj#8@(WZB$oa7 z7!-K#h)sO=v-LjVhv%Qfy*@CJKfU9mpQPtM_(Lm~*vnokTWLyTBGFht?IewKwtyd@63xPIzKmldB4;Dr*5Gdwz}>ey1Dl9AAA5Q3 zzR)Vz6c5zzRrC?Jz}iBK42Fb%RP>t)xMp^{51gg+f=*niLIztkWi;0dse(ERbO$nW zAjOu(_dEtYcLvTjW!nIS)dz|JuOVZIdZ7yLbcOJ(ArJIA9MR*ERtppUJMus`Xyd>8 z2+8(pP}1lL*2#Y)HwO+*=!xLJJ)~nkzlVpPA`aqI{EL2m9rCq}G9dYX%LU=-9M%G` zcxZfVM1WZ&PA{Ol6~g?J0Cr<=uR^(|=@L>uD&-VsKBBdAS|xl}nUFsHx=4o^BUQWeV_b$J?qMWS%E?M0+X)W=@?<3YK>% z&6pI=+`ph7s0?$gu^W9}@rO=_m}=vlSRK9loBUen_2kO%b)vuM+q!#5`h0s^`} zP-0~V!~p*NKhhV^rRX((5Zgf}t)I94KYXNn|0EHN>H8~u6T0%SR|aadDb6XVA{CB7 zv7jw88`$}z0CHPSSWe_9P}Z?e21K{$2m`wcs^1`CC7)L)%s<|T?*0KSEh%1$6?vVK z*9Lhl_pc}9bq*|l7AqqM+d${E|9t;_JYLh}{iea+*HV7kfY%?#r_CZCsKq?ffcN`~ zTrc#bS%L8K@ey>k@EXV?f%I+x!U{8GIjC3*FzAxY}gf|@bUphttPBiGY zLx;9**lS~)S!|1GBUmhWu3ju&3oLmpvnLd|!HLJBg$PZ5T`lepXE5$xD#-np?6yHM zNim^2@co)R4>c$-m~1~D*v9xg>VFiKlJ^NMCo6$_l2pC5d`2+TWKPx$+1GQVa+^dZUjvyrnLU2lgRfE z2}k*oJ0h) zkp+bGM(mc7V!aT)wLkwOTg7G^)_&c8e#tpW5u%)kw|U?xI)MVaM(%z(QjH zkqY_K*bNI*u!-@Ux*i(Mg5f2u1!q(9SrW@}nwKE}Q)(G+;5TDffmk}m8FE>uC`b5z zztdS_-RA?X)X70u=5^#WJ{cRhZBVyTJMmt#X06KJ3pJ$>lraLSa1%vy=LU8F(c)_l z6r*#NN6+#EQKyM&E2z<}3}inbzQ{Wg+afCuWS0a|Z@FO$OQ_RHh*{s_dLH+{*BSIY zpoZ<_P-8`cB?(W^I08#DK8Je|4cL2seGXGFOpmH(0tpKmsqX9AVx!6q6wQ#~Vx3cT zQ!5rsv2J96a3zr<@;AFM3Cep+J}XZyF+3e~yrdb>)(Otf#>V$xOePV=vvx$wp~Isa zNd?By!ROI6{Qf@NFF?BZ99YE3=Nj@kGS1Zn-V>`M043)!VP+!Vr^j54YDz+XzN4{D z>?W4VByt}On@0dYT1QmZy$JJ(xx;7iy=*JU^rKPA{_}fpBm*eL;ks6c=g7s$S`tNW zhPmVPVVPj8Q5ncea^D#5h>T_AjT$&LDFC++PY<&MYlWjN2Y*LVX}#fjiw6%x{~a-p zYvJGaV~RK*^W1VHjmNX7=t2E!cdtKJ~uVI%K*?y$bLb(Fb=Vs3dom*HC=Rr+3E2G-Z#_JV+dm?{SNTjGt3GuF7M0qx|*)iL%skgAaA zoy-}#!FnL$o62)MTUd~Jbs&(DU`$JOgtns3JqVbWfA^g=52AA1OaDFJV|2p$4URDo zcI4S{zjnX>F^}jsfx)qVByt~eXJ(OCWq5yS7(`AYY7$uz@!G-j;qPNm*@fUitsU3n zXXfaXVA&Kcu+(Gb2+AGbN!E{u`vfalo?u?%jzO(xQh-c43xYZ1=1$v8Ev&4HGaIm;EwMBEguID+gt0S<)@#w{z@uFf%uusF;Sr{`*ldV$&i0J zlDWk^vdbH(L1TP>$ol1inkDZSVSxGcz&e+@m^ZSg1TwEDFeuCX7-keh3spVr8o(a= zah1rtVk~MGMc6rS0PB*IIgxTQUcn9Xxx)N{QKiP;!9IberjB`#J-pxc@2~N`_~*E6k%x_AS6K zDr}u7fA%FjY;WksYR5kCnsCivzDM9}g?%9^h$k|pcB7p*tQXjEeg<)975br4g1-qy z9PH!n0fdewVLf48VSS0PllZr0Y3wPe#3Kt&#^w(E4Ys?B+2`{U-=gq+tJV4@cN+Pw z49^Gu2hRt8-Bs9gC?k!)EQ0aL@oXCeyM`#d-w*IU8U*pahVkXR{{1uFm*4M`@!j_C z>lj}oL&^JBysy3ANB%$nNkBxDS{a;^d?S-CE^;`UlFY8{jBK(ebNq~dy$pgGnM?Y)8X=S5r<;C$1oY#5 zuR-wN7uZj%7}bJ!#hT!xAdF23XJ<*Sg=VzDE@8v}lh-?V?&$s8Lci8O*Go7X{`onG z&HH!H`F@{`zMNTCm>g{bg-YaIRjeZ`ZxDEM%D;bp>|v3N7q0J`hxBT2e?%-3=ONbW zzdI*?a6bZ>D`cG}n44*DL%A5e*OS!6{w8}DHVO6F3(nCkvDY#2rN}vo_d4+D0r%;U zd7Y8-1X<2ie}?kpOhg|E`OF5~nIg_$+Po!k^}t?9muf}+-u&;M2~wlj9^DSrYXc)j z>#YDv>zcg_I-Ug#LBx+?gD`&q;328p1OEblYgv~3``o;cvzdHeQNa%9LUD*Ii0%2O zHHPu<>MuV=#JO0&j!Mo*0?BC(#ySgjl)UEpGYxz3AI4rJ-QKwG7=KHg$k+osAhcu; zkb8OG2T&+@7UD1d(;N_cg8RT)NQiBRPr*vWUK79KpYEIZd$9FneA9%-vvv5Fj0^aG zJg^7;xF2GRmMB;t9BL%^Nd(zmv(Q=Kr=R&W8M1Dc@PWUtflo(lH!_?U|9yXN*k4#v zu$x$`HMc$DOc6Br%UbZBE8HV&p4j*T*X5yH zb2|Z~JDZ>^V^wGq;|8WU*Ma@xxW8YgZUPh>|8gCPA^zp}k#oW&=W&30MtfI~-5vOg z=~5~lHIQ@?v9F?8*Ftss+qK-C_?CaU*1@&)Ezj{h@BKo?=Kt||$Xdg>)({(i?|$a~ zz5n9Ryzeki4SdnhI)T6FbXZ@oDVT%F&)<)6R#e0v`Sl(DwLJ{G^RM>`*&iKT*%R@@ zT_Kho?1xZ@quz(a9u_zKYl--e|GawEy}0xIaJd;P>PEM4GHe z_{E4Z#F+dHpBO&}{m(9bhL70)?DaptfA7Dq-;$pptV*u$e-7W*_diE}@9X=Y;g^wY z2>@UC+Y#R(rG^-z!}BAQtdZ+O`wY1b{#j4I_`a?q*I{D%>+oD8PZadm!B4|=NY|6= zRynRCwA}r5OpP&OUE;&Z@F_4}@1HSHI9}gB!}mem_Wd*V5w7>hT*%lbxc>b!{2yHZ z{u%2R*S~+pT=(?l{XVXL`sV_T47rcBNA3fEyT17z^|Lgiu|NUP5V>^_7Z7=W{mV9R4uWK6Di{yHL;tSwp>#Y;1$Z${76`=>I}2?WJ4}clCjPyjU*fpKIL&@#2|{GdD6S z;51m2rg$|*T+@FJ!5W?~iNezmoS49BsqYA9)iCFP=E=?1$QEV zfs=d^DShzKc4XvK`@*VPif$MP} z_&dmBW4RaPb22NFx)bV0BC3jj_7fdw^o{p_A1UtsIg!1_-+_ndVE%!VjR2t}xJ|si zkmoXg&;s_C?}djUlJ!y|O$6LOXOY(!mj}z!glCiP?;n&kR*AKxP>J@ns?MzCFf!;M*Gi~GU6;CXT1_`4MuC(PsId%Tw^ z>9hHNe_i~;eL?pdxp*>;_}pKw$NRV+@O3sQpk9XUk{Z63ey^*4w~kP~A-9NigXbtnNe|ce>+&%Um0r@?D zaGNIi_iO$2`>(&R_x%jXYZPPq`|Q^f?!Uhu%qP~P+23aaE8frR-~KM@e1G}g_xgzX z=O-mYvcJgn1X0?r!+!nhWFM~o-TmQuJjdv>Kkz>0f%JjDpKDC6@7GO#-TS@(kp&Y- z|2fnP@fP7N$9>{DtmBF`8W!``uhV{i9YNV0;{}uYb)TfZigCn!p9y~oo?AxZkKAX! z&wzxQk>48)x!xq#;r>aJC|+aTzV`zufqk!!Co7^}o4!kQ-}S-e0310#b@adghgOX@ zUssRAB@QMzg75-Z3OrX|!o(w{sN3UBpn!mvK#NO%6v4DL9a%UC%Oko9~)n>h=x!aq{+NIKl=vz$3&eQJhY^g*I*yP4#3{^Jc+3Lp>&Ab zBj*eLxa*K{f@Cx+*IITKVUuK=0d_d7B=K2$Z9fnrc z5wggvHNi;)#X#r-;z=_i^^Ti=LoV4QbxT}F_(p--+!oKi+ST{k^;F|HuFN z|N7q#)%pIv9@g9a|M~gpl$YIq=xcw!`ajHde*SO$uj_kP?*HRVK3~3n)9mv!yZ^_8 zrvF=g+y2MnR{l@B^?m(+ALLD{sSbzHf7`6<{cQ5Tpbblg!tlEeF0XM{J??|0OZ+G& zSnYk?`CtCWf%z*d9W_cJ0LlS-R2eG`G$jVA3g}rC(Bx-iC<~KVI#*KVSU}#uS2wry zR!O9ZalX~N(NYRaoji{=vFyKZKvKtFXSk7t{I#J5Zfx=P@pFFk&J zcP#H_rj2`-4L+7-=zK`ddeDKt-YnyuwrVm-) zug{8KmxC1(peG{KxF8gDt?7X7de!Q)$Y$-~xL`m*zwyCNH)5(0O;s;B8C(~wI=@a$ zY7TbV=`p+Iaizw8()#$q988`;T-(g%nJ?iD-K#O<1@rwRDs5^p+-+~?iRXg!!9l?@ zb-arxVgroQ2>g)SU*3hGj*>A)A?i!R2wyTzL1KiX^0Dd-Y!mYI-zW+?R@HaHwQL) z7)F9s?`6iN)dJXo=20mA=(U)Zv+cDFCX-#G#lmJC?~E&38e4Tt>->iDwpql35lxNO zK`1}malH`7gV}R1em?4!lBDP1sf<7ncM-LxJF*u{b-NW;MI?Q;n`276dB_k@&+92f znSVuXA6AopLUwF*x<`LcyWf@b63e8UVa_~PLmI;O$1t_%71LUjrVpz8a*xZ&HO^MYY_4s_ z!k}3delQ*&g)uwtsNHBk+TEi0!6}ayED2UW8ajA?EvIajKJQD}8((G{6ab-D!E6sUZD?1VUar3w zPo#0i2_kxrIW&meTWd%+Ubgg~{&==9uJ=t%Wt%8-gH13CHi;MCSCQ)MFXv>&?7S!Q zq~mUXsWKbAU8ruT%)&7>NY=OYoMu<(oCvaPu|LnsmVOnh%l5ee{n;al(4q-KicFfc z#>_3Zg~^EJtrD=ZHOPi~mn2Pd@tRKc?XmO>5CZApX>l!23r&*4<@0e4SJ_%?*W>5l zaG~s%a2{WnD(bZBqSHqw{`%-#D>3lJdYYI(RHSN0j$PU2@14#>4qi#vP zJD;c^x~@FM85h^-=8-H{M}L&Pnryh39>U#FDm(9Tr>j(LlX|*ZE@nrgMoDTX*4>?&vY`KjGO&~;fm#cTW$93W+gtmu|-{^IYa5Zw_fMT zX*~)~r(I=UdC2!`x44Nm-h$e?tKEHntp|Y? z$H&@U0vo!0Z0n=qx1cqb#r3w$Qhxu8+i>>SYSCdJuB@=2NMDMfF}5;^jEU}VM1Fss zFQnzn9BiHsyS*>m!QkNBovUEUv`XQsFy8` zrvmlwi8(#b2mgB9QtIxp(r0^UQs$ktqSkOm|4%Q zQ@cI{qsM-_UahW^kwq=LA$`byw@Oag@#@u18D_u-#cSF$XXR->(CJETlJ$fchUMik z0*b`gaku(3wboS|>A8DQZo{>_Tpr3N(}5_LdZMdKQ_fj5sV)IzU1lwltf$L-IX*j2 zUN0GBr2XjlqMGfVD;9iy;e%)<9mbSTtu{+0r?eea&W}Up^|Z9D=GV)AIOP{-Nc}U_ z3e4{>ejN|!)9WHFZ|U_IAmLG5oqt5mKvT5|6F@;sd8)^7 zaiFXCB(BZKvZfHJ-LLXZzF;{^Z~67GHTQ;42G^r`VfMSj3YLSO)P}d(I+yA~Cok>- zPA_Jgp<;ekxml^O`gHDpBqAWu$J`khM z<(2QWV|uhfG%7dG>y}#h=c=0}r^U`p3au2HqjoTF;!vKM(k+#Lp{gD4b1Qe2M^UP; z;rQz66Xhn^CBRfKd_g9MdSk)R&OPmv-{@v#jxWNrj!{g#b*lq28K}N}IL!7GAQ;JB zo6u`@)=lqGCcL=o-%gowGUIx`MwV<; zuC!5>hqoY1UaBvDpW2}~P$!zLWS)KBZv}U#&bW&nu~1X?<|lgu_5QP3U!oIqDL~e9 z*=PPE;#-Ij9@n}#7+lT0a!M{95_8j52dsQt2li++t6}-Y8ME-)at#68x)xWfCkS$^ zcxejZU|yiaB?ZSTlPTwAY0#z2-$GSQhR1Ftc9$)a9}MSzS~-cH$j`h}oa*5|*XlYy z8R^Nhk%>Iq?@2Zt(A9RvMEsGTOkO)%8p+q8HIfs4=~v}gGgPYy3c;4jO)Yi$z@+SD zM~$O;GFvUnMY3gRFF1As&rOGZN_*Z>Hfl<1p=d4vLabzH<{eC&Y^4t3<2v9+cfuR( zv|1UJ*TU0()XQ{6w+G|`n=teM{RCgirCwb(PxgEs->bTUyuhhhrYkf!I7OyAvjL*J z))PxMfq@J(pt`u9 zAX>8vmEnU5$HnncBC6Fl!>d<5R^s`Fc#}zt!v>Y2)365gJQ&$^uq-;WY0-(5J!5S= za5K19`KF!mD5o8pQP>HFQrw<1r&_!lSdYu;x^iD>YeajW8wmiqjqfjh zWQ`^(y*W*#(vO!+cIWohwwWrXD?ujIOM`=HR82OQOSC~0V_q$1R56y%iawuTJgqKV zeyX>!?H2NeA@Nn%YtE_SPJlKn=Hj{W`H@mExWQ*iaZb-*yvart5z!1-t@>sYVzhEhQ+$^Whtd5p{ z1Fldu#vQVb2X(4UGN{6o=uLR|^z|w!n)!TjXom63*SIK){K00hYV_p#Uo87t1ZNBTcJuabgQJ1=Ig;V_wI&ya(Borz*Kpe)?G7yU3st; zDD=AF_M*S6VJ?-cO}vk$3!s8G#o3ki2?)fN`qW%rjxB$Xv+HgJsyt`7Z&OL!Ho}Nk z+48wm)3Z^Qr^nc7AJKRvZ^k2W#V&!Ki=>;K&3DY+vy<_jfs3|6Vne#xl(SH}$#h{Z zR)hKUl@$Y&WN*htnsp6LO~U1WG!gDTe>Qiw(ix}cI62eB#9edeOQ0=`=W}#AfZ6(Cm#g{E=TDMySa#_2s0wSmt41enVZQu& zaH(4x>-;E|)7xlbU)P82);pcOVCYhgKDc}QbdFY3XTuKbm%udZg5({#6gsvT6ByV1$eg6XxK#)p&8?MHU7KsVUvwzQtfb4Xo{shzyE zJ5%vndi^*i>ocG2mXGW19I7~VAvb&NtkJZf4a2JCiDGO9oq4%Mbm4%HrdwndlWazl zOLsSqZC+5k(o*YaBnQ{)YEJLxVBbxt>xJT*LpV7Y{#x*ob9<40z3pJQ@)moDk8B3F z%H<8^?p_n^5=@ufQM%_Z9jTmj_&Rvn<`KUZE0p|`stE4xz?$y-P^^j~kN13R_%wZ8 z(1WR}%$z1^`Q1s6Z;v)?!(C{JsI2X4V<_+JB)qZWC=+j)IprUP2)1DPVeVu}LvR-` zjaUrhd&|J-^ulp}b+V%DBn{(MpF@#zRqZ@yr($uak4#(9o>H%r++9ff{K!o$bS+sO zxc|x%(%p7D2i1xvhHc8UM_So%v%xI4(25218B^a*r02B*8Pi0fz#h16&w(y6c?san zL>F?w1_HGX$CF82af3BTWZ3m%m@Br*7`nAxEq2hP*(FdN;M~_5~zDYMewQ1htouPs8rG#xw0FLgw0Ms_D1{6$57$Ns$-)W%>d49h{J+0P4o8Hto; z+KRbD9zoA5aM3`jzg^!2(U%(^`VUFR$#T0JC4(f*U0VZ4#KmuDg#rWZF&} ziQf%Ru5>=F)$vR{)gC3)%g0Uf^UK+!OM8m`vog5xn{^O5XOZbvrHgA0 zH$kfv8H}Z)X;T5BBxgF4*!s2M&nx5vIQ#0GVtih9#qxO-cC%`z(j#}T-6_9zTwh{; z#23I^Ga-((_e%Yf<8ICA_l{9M$b?2)slF-e2>( zblP6L1tN~gnL?bn@=e)aO;%=1p`F6=WluP*58V-@Nzrf>U*&NLmUyc^WMS#i$N6Ubc34x8P2_8e-LlXUYD*F5KgR35F(=T*L^5UJwl57ro^ zt9?bRwE4K@)q#0bD~hQ$aGiwt3plnzJ0wfuOVqv(Ni zKWWo0^WAB#-+WNlRWTLIFJlCnvSzGj^X`g0dd2B#fw)fT^}csRx3x-vBzFeB|dL1We{1b$lX5K19ye6pxXho zerM+E%_X;I6Xk%TWaR=$H(&Wjn`C-_+J)Gyv=9sFkP*HFmas zTkL?4nOZIK{E{jwG?WDb|FX-IS!*DBh}JlM!b z+v+4a|w?=9)wR?-xSx_zc}}CX$C&E-9zH+P*_@i9~^gF zB}essm>IWVX6%-K&wKc4l5Bok=lAJm`JBo0Hri#loSp^xDX2S-Qsb!EH-_MY)60d4CR0*Qqn|Qth=q z7T2P=2kI-5K?=@nSm|Ov|-H7cx5NdFnK?gb#;QW?E~-Df zZJ<}>dE>cs5!N>vv3XJispplX25rPUL4Y4wY%;s1IBng zSVN=X+AN*tL7fCwQJ!EELyaS+p6y`(_--lf%&=o2+Ys3MqAoi*dvQxU@$Ckf&LY%! z4sn8}=^@2y!Sv6lwPcIiY1tTyqjfHWs?i7w{}yVEyuU_bc*lE!Ltv#~_ST?$`WBQA+hVyX5M<4NHtZu8%iYo0@*v$DFVQdUbOHF3{OjfZ93I;Y z3srb+Goh?)5Y=B>gnnSmJtB=0v)gbNb7qX1(d%J%$6YnR(@O@I$MfeLfAC>8ator2f8p5%~podz1;%=6$t zF-Ea~`qF&85buWv>n7{`##LN@G%K0aq06VaJ#nt;F4)wE)5M~Z>xjr1wP~>f>H6Wm zY|b*>?%~g*kvZQtr$HVS`!2TIZl>I*TfuK9ZVm^&*t;7oek@CwJ{qAb%ZPNt+F~is z=GV@3gJzG#J!jA1OSmA-#2#B99>?YlA}DkJ^t$`=c63pet94zTTPTx%`J(9x5G=iT zD`yO+q3$MeckxT8J>N^^3RQ;~LKnM5B>Dq|W*0DI(tg~^o_Ck5!&0u3R?EWC1_ju& zdslAmWHPxPj>XuL(O5D!nA>wPKQDQP&ovdzpu-{^Ev{y!xARd`PQq~1E<>u? z*iACnJh*5#7b|_!`OzSMz3<)GBs7_7w=ZUZ_3Je4wZ1=3Jm+-WFRxv?Sj0N(OPVF~ z!|gHk+(>1wI+_G=Ji6*J;dP>*

P4CIMR4;Uk^8J`KG>6=XtqFF4f}* zGLq@biwR+d z{X-ht`>D?C3}<+Mm_1^|X)XNr6ZA=z>%x0MZ-DW*==2*Y`NP$|GY?@8(Ea&D~iq*|U5n`zhcjvkISn>OCX!C*Bx?gxIj=h9sfZ+y<$(RZ7a zy@YERT`wCoVoy|?`|)jY-*+eiYjetUOs|4eMypY}H5e%D?XelCNz1O7+ipFW?xy=u z1wpE8Bog_56&J*M!m8D(#o~I+W%WR_&+BBh2C>F=c~!v)pHgkJK5}O8x=hdKBFdR0 zEk=`6+ntWiPNr(naT)%^&(gglS+LVT5WEaldInKi1(yZ z&_21U1>;kDAD&L>sSiEs4lZTeG|umz`(~P%#rr_ZaTV*vro*FKc>wHHMb8yyuVLascw>c-ardX_v0zYMGeh zgs@n0AOdY`z@@j+gvptpqAP2PXueX%NqW71OwAi_+2^5~mfrTIGd2q0!^u<6rO`g7 zCLJfG=Symibt|Jf)n7AGGEd!ntrp|KaWkDFgS-$Q$zGq^fAoWRq zK{)O#PNjoA)b@L=pr-@q(q21s)lC*Gv_I_o6CH9+#IW|IOO{^Fce1u~%X~MdeOZUo z>rCf!Hvt*gI8Gt?2A08A*B8&ESW-uK$mr%dz2^$-5*hxyIok-c%MG(lC5pZ8tc%Lu z>{YX)FOk}nmbEz@nT@+05&pt-`SQ+xo8uB`G;Qp>>UAB-=lf&!=-TN)t9OUlL35LE zbEk4R!jsLzj~<0Su4Qg4i8q^jXs{h}i_v;s4$f!^oYM9YBD!jwc_`sT6}l)mWVDC^ z6G#Xzi}Et7+E#k9>*S6mU5P)P!Kmk#K+m|rKqSU)FrTSYURqY+v!dcqoL}UBY(WL1 zr*k+UdNSqt3VHR5{PM5Hd=ih6YQQ_~zCQXFdI!F9Yv1iIy!kq?^QP3=t;-)m2S}5n zqo1}LI+wf`Ri92X&Sj$eMetu-1G5k=t2;Cz%U7En)`#`(Dn-WesGE=LeK$7NwVSvm z`y4vkY?o*CcpLiL!Z_y%IF`MCf~Kp)GHyUozg|>MgPfo!5AKDXLsfZtgKHDh!c-V7 zZ^KHR$znNA+r);ywJ~IKDv-_&lwE0UcX-Wa9$UDi;HrSeae7 z)FN&6j2sz&hQ)Jlj?0Rrj-oLWA0pU$2Kji7o(SJF>`thLi)E-k3`XA?p!FSG^wsLJ zpDWI$VMp05iWk8_Tp{OwVn9GxLL{m#^>S}f37Vp&nlEqgkhe>JcNsMITeVxRUUA15 z!+2b=ts1`e=E4XLR&x|*`;~B`<@-W_Xm2xOR^#GvmgblBFrCSN;hLIM8{?wfmPmN& zDK{Covi6*epF2mrJTrE0ZIx+oJGr;%%5_i^jgy+mC1<7QK93_YS9Xj}$?nF^7Y}yF zU#{udobM87<=-LMB20qeszw}9rK5|M+$)^kH8?1DpjTM4`|EVRYvx(?%;C-*6HRhA zyZR(g6(KAOtBiPm6kZv`8!Ho~xvm+cw#HtQCNTE*jWganIScx3-hFll-fcwFYI!cB zZBU;s;rKjwfd{^+rqW{;Mz57PRkcK&RC9U&8HdYYTF-*hXs``j2AnrM~eeV|(pf)(O*Iaw>d(m>x5_L0fv!42s+w8;f;0 zUf%Ld7dNqI5k*)A3| z#hxboY$wuxJD-cps-P8WG})&Y#y795od_1ao}tVd(zmhYZ!dnjfN6A5V8&?dsbp-k zi&MN!+v2iE800TeFuI)N(+j*FiWruvA=UaFJze!_aqh;?Dmm{Y^+f{`07rl;)*NER z0iE93r@1R0vA;b&<;|_m8;HUf%ySGqS5}c9P>$JuzqoEbKW}pr+F{zFl)E|3^2Ox{ zOa&dOuZbflJ1quEXCKoeNM{_x>ob<7ZgxJyDDvfHK76jt69wT?%bQ_uuI0)342|l& zx?Oq40zfz_AG&$oOz)SLk#-TQd%0v@3h1~x%4U1C^6SN2PWF)=76%ulYU@l{<7Q=e zT^wD1+)WJWh{f@Ri0{%J^i3X|=FdxNpgs>=q!gntJT>C@96?riJ&>ukzLbTq5~hJW zF;?iGc&D^q+cwE@Wh1?xyzR|w?R(MSItpYBe^YWz) zE>K#aWd(&-$ zZWnFrtFnu?IMr~Pph&j^|I%c!RzUq?B%{DUc8IbN9bJ-=;l|U;BUyuNFup$&=ANy8 z1jStMLMWvySn8Eav-#58n6Th;L0%{6M$Swl9t^i;G0@D)6Ly<Dt0t? zbGFucRg{m)L2f_=)ZlTI4+3QE2PpjjdcAfgqmA7jkJVbC^?_C{ib2@gb0$uI+%5CE zcU|(@L&x!SVY4I&bYYkb3jMaqO=g2S$n&Nc=gD4img>S~?@3Bchr4bzxvUOh&Mxi` z_WES0`*GXY!*O!&uAS{E^Wce@Om;8dub>{+6?6!+I=$Dq#4%H3=LEdybV zBrTJu)0iKM(o0cYrxS|{Est4$|F9pg<_pn}cEbroRyVU-{<>LbJxseJ`R=8r){~5XEjG(E#vI2sv8qEhv9l|pqJi@y@NQKu`*B$ zy2%%>W3XL7Ck7HhLK!B<9e{z$~ z4L?4vq4~WJnAE4*#m&#IbKJ$f?WDOKDRaXa#lgeOc`67N!RC-Zv`(}O;WSuSf}h)7 ze3gcW+~ww@`&6Gs<3lkUK1_p(Rql9>cJ+F&FYHa;0I#J`N)5?%tA>=FdU!0ElL(0R z`OwA#wSG)D;c~%-vcHUYq0YnXe?XVl$#%*MzO}fXm$QZCU+h=+xXjrlfU{@yq3zet z-8HgTkHu|qgfPD`r@g}_*=l6Od#7~*C)lQykKL*!t-yN%M>N#SC*ly^HGfarQ$ zt^_~29K>cLFqvnei0el7ap!sNMG!ZM`G|_y=?EM1ra0d!P?sc{Tjw+^SqcV z0)T>8t-xX{WSW zmAR+XNO&kYXF)q+u}5u@reqILv+A{3j@ZHEyuVWS!`9f${poZU=y%j6CE+=qHke&= zt!@V(yVKaiqXO$obRYBgf$sW+Ab6%d$hO|=wpZ2jG0;JmuSid8DI_!ZM)7m$b$y^O z`Y@QeAQ0a@{IW3GfAMuU9E5Uf<{NY4@7Sllv26{(6muGEd;_c=6x|iU;I{JJY{AC$ z<~)I)gE)$($9Wja#4^Y-}=k69D$CM%q-`Rho(j5aK$EgS*gX-^M(?@CQe zGsSsqoCfu!z=v1N{)Pno2--j=ml6lp;Fvzow3f_;A*b!`qwNh+O3KBgAFtu&ZY#$? zl;*clR}KR_e^eLQoYKXHBNPENVG*n^LMViRCS=M;1C1}4*Y5giz_x2ze*GwhN6%a7 z&jc>bj+QPj$fTl&M?L9vI9!$&0Fq~Rxl6Y6VrBOt5B>URDd?Jvtc4WQM$ zkq5rFlTsyQ4&xx4r8$z+fRND4`9Yyz_b{U^#v-EJ?(WYSE|ay9iY>-w;P@ZObw ztQNc!*VhLl$_n><-5=wG6BOG!q!hC=sSEf;u2Y=V*Odvq=jUqXJJXiVZ~WAH(K5xX z0~s~z`Sh09^Q7aW)pVn2m3{*Yb=g92fzWiM z9(Ad+5|zz6HQ*1;d=A)*;k501a7FMsgFqg<&7jz<3Uxs3C4V*@6wBeGd1<<0_!+es zu7|5GW%qLpgs_t(B#=RyK0i8Of7x-AGPqVle~EuqmAE>oLiGZqIRWWhBG(WEv~FVU z+y;4zzM_mj#-*t|KwBQ7TJA0$BYrn{#53b)T|)i7;tO#cCgs-jLr2>2=fix+X0E#O zm>Wd?<@;)FQ@MOEH~Z@;&aWrrrew2>HPhgS1`$OQNaBC^lIADN%!5sJf?OclBUCR~ zf5=Fuu;y$SJ*W38lkeb6SZB{2w09mvrcSk8+68?&-=4Mwl@m&3CKnKKh4wOv00YXO zow&LO+dYFr!+%{7LB9slmQPZFX@PfHGdqV;d7Ll1b{HEc#;ubB^6uqzCA9}>ATtbusb%_Esme88Q#E3GeVyeGm}To91`iUP4*KmPVk)8#^tcx*tdsAhZ);( z=VrdXY>r$ckJ2)_Go=wb#p}8A%=2Ic(b-#3n~qK3!?Ev=J2#}KfifP`qX7Le zhwFN|ucK<8Y4=ljnV$`LY|P8`F+AwA0S#Q8^PKJtS-W#39la*UjK4fQO+@wKT~DqTahq@< z#4R&EUSG*!b%N^|dDjU*oYLm*0nU3a3)a657kVnpcdNVFNvl)T@H5k$f7zLDoOWff z3arvQG>$*qx$9><*7sEI=MQs}OTh+Gud~~6#^x((a5Me&6CIqp1Fb6pvwt2eD+dBV zSK`c@ZlC*nbhMwdg2!Q-5*^k2HiQm8Kn) z#&UG^y3BU$;P*t)KeVf3-qmtD*Cp4K85U zSjYc2UFX)MD7t3hzcMjbGr>e4XPk+Bp&%g2Ns-eX2#B0TK;YNU%AV_~*ipNyyE;27 zQ-SY&*LogH5GYCQ!AuuNVW(pG{pL1*8s#rJ{a%uQ#pE%+F^p58w@dj;>`Ds5l1f%l zYonZNua1ODtNyF62N%|J$uh|1_ zeOM{^lB66?o*((n ze+cy+bg^-MD%m0{Yu0|L)XUBH?Wzscue;4na_~N3N+B)?Az z<&&+uFZcxLDi(c|`a4ji-c8a$6z$W{2dpl9It=8VO@}FP@pjQBcF{R(Riv#UXD^v0mqu zXOepV^-S$o4CMji22V^GeF)T}9D`B1lYJpWLv&zGfAM1<^iPLHJ5M?Ud0G}!hRo|{&Zh2izEvc>B+{8;De^ZkopXsG*Csedr7-9Ii(K* zJq)BQYvvZiq60|vv`;N+Vdd42t!f1V3)d`B8nB1uVrm!eDJcDPjR4n_hz`5i-q*qGsDcW`S{`+HQ9ORWg+F2FRimY-9dEaCDn4!Oiknh< zEyndi=aw2#d(#$5F=~rsuV&v`3W!~nrtrq(F)sPPzOJ=c!A9i0tnH_>pNy3IRrPuk zeEGG(MW@0Y+tR9}z6o#D_N`VyAzzvEw0u4isWkrMCQ)Er>q-v!@Al(P#X?#BY! zO?8^~3OBnd;&8;}*}dFAXdJ1B0i!VbnT;-P75pA#xjn^vHP|WJWPT?vvcsIBg+PKk8LfwCipLfB*R?o+r(`9QFqP{^UU75zC7G5BWDEe{5v? z-R0v@`E_$az3y#~KKERFAO!nYEbG!dfQQ7P(Sl*GYbPI@w@rS-y*%hai!67H$C9_Cy@ge2d32_3I`;q!O0XUG{Z!ei^HJ&OOMbAC&k%NHf8j3W zGYNjQi_ZmLsJ&gX(9F#8+;^AHYqvcub=P4GiRlp4GB!)d9+;3SQ|N0%QtaRf{XO~#B!ycbn#;>e`&8P=yfoP zP|qL!ZVMlAZm7?=R{VTr4X!D-`L(0s5y#0CnlY;_>{kcaadngOvO5NXo6CmNg8^ipBjaN5w( zBI6!+eW5`+)<2kf%$uJmm$=yW4fPo)p;FwGrD&h0PKj>~z5{n@e|bHw=dd%pVtyBv z7(S_L08`AqEW~z}-s0)wMdm|gQ*$Hs#cKhS;CLAq8dj{#U)}2t6 z3XKB*x02FF==m?geSI?vY7NC4oASCVkV>R4#V4Vf3t2Wd@F6p&xXoRE`w1ubzn-@8}YyGz7))XSZg z-{_tn_v_C*?M_h50HY%W_Z;Tlr()0R)ZX?_KelX{@0{3DC>!n-nQ}4weY<> zr#k@w%^HhVf|wBRmRAMM6>NfuR3^0aN3R233)pPR%~{aiMUOVsT|1dWOJzrOSMPCu z?*`Mr!&1m(`FTG({aHv$JUH1)yYOOU;5jo!WsVK@1%*5P&WZSb7mJ_ImmcNjgVf<) z=L~RgnXa&>f5{P=iL&uzq?4Uh?*)&aPv*v!yFtCmj{qI!0iZ!hrH8f}!E%`u!w zUnns zM&?6se-;IrQtTydwpyujeSi7Y-UKevo_bl%^u~IqPpH%HtoNkBZCsol)p(lvC68z z(U*O5J8e(A?k0Ze|7Pyk^`MlB{6Ctq;2S7Uf8L_{VP$;U5GXFr_k-NX(Rm=`hQ`t@ zvV5I-U;9kf=n1huSKxc?Vg>i#{aq4Btae#)aJ|PG1Fzw~y(|C8qr0)&rBcdYLAOr@ zai-{LZ2X3Cju;(0t(hRr$H%Zl08i)RWw>YTiDHzMLx%gqaoamp-<f(S$j0T+~_Td;-wv)?J(73Fro*?RsNt^ zhNpe^d9L%BK7Y262BLPpUc_vBMJ!mt6L^E zwK(GOTZQI1_u(T@WUTpz_?i%A*ZuHrh@0LsNr+-#Z(_6V)Mw< zQ_QvPvzuV$!_oeT@}=#Zkn?EMM6tgehw_9N|A&W86iumxFYy3wr*w7gGRi*^d#c*s zG%?pu=EvbjY8UmJgg0lr&p<|e-~A+ z*3WDVMH}w({J{Dqc>VdJzb>jsty-E-V{1}RxC7gF=Q!gq zvV&Ab$=s6Py6czY!&*BdpUm3@57bk`)_F;L0z(G5Z5o~VKpGgynw6MV-3BcTaI4UaluCf5H#ji|*#HnfMin^Up|$!cOw2J`6Kn<;OGw=7B^C zLh_w(|9M?Pw^$VEz^f|xGOL*6Zk~~2O>v7(6xDvtRofn%$i-*_7tV_0BEFQ*K-qm* zRDIHU#>PCnTh?uj_gKlW#6Zlrzi>J8$|H{Y1)Zx*@rfEmouV~{Zs+lOe`ErO7l_0L z>hiJesVC`o$?t0C9*W!4?Kl(JcSc)VX5XICn)X?>2;Y_V(uFQ79vyzkb-#+UQgO#l zaj7HTnmxe!LX2{#qvVUU=jLx-ZCO<12QL5icK6%++;2vE+j-xt9&q#j<4-1tKge#E zz31Gu%kUi~xUN&!qUDKJe`~2H&gduV!G>de;~mJ)D!+E4d?)pW%To}!?03F{K$n{K zt0+4nNS`nM$lnQ{r^@idjXc&ICo7G(_qX9s2&BEmmlkPvS=x#HpSVf`UC1{1Ln zhCGe-xGSFEUz-xmPljjn`@W)68=rTa6!k|$DvH?~VwDv8nA_<%G>UztY5@3X=UQm@ z`Fi%Waa{17ek!{?CU7Rp@>6*x?@I}yDJAxO?vJJ(9163!A4u><`OCfO=UQ&7q`jwi z>i%=%;x8b1>lW-le!%B4!`gDw?ulJea3hT|(TsQ7-VVh@4cXJBFda=TTx z1whim$=i7|pBYFnT=XHGX`8~S?+5JSne|6qANHf&!cd|3KKH*Jy zUDP)cj?-w0*U$F`ts3=Q5l_4Vjll_1cT0+@8~_0mj@S7+B|e@--X^ZII|?v9Ev;Rz z6fUQB@As;d1cDOl$ui&D4|Bb;G>;SWPDeLzYP?$>-6&-%#7_P`u|D?A2K7{GFow?_ zx=)qH{vh4!e_WFV1{!pdrav*@W~{ZHG~ULe^N%kVChQYY)0F4SQF5XKKY{9ZJ_^gR zVB6^sO_vwu$BvuQm_D3At9i$~gG+WTSQW!N=Qw$uJfb*G0Q%CDC44oM4h>Bh6OP}! zg%I(_pBoSrl&9c2Ne~s|1^&MuY3EPGNr#5#N-|W^8 zeSUFL=6y$$_O}mF=-Rbe`JqM>JcR4#K}lnq+;ustJw?ih=S~{BuBLbU{1=`bmyenF z_z@0R(jP_o91;dE7#B~%^Uy-+zxXH{(QaXf>V?Rva(1KqS3iEm0gn*Jawn9>Dw7|- zD2)1de|RNQcTb{DY zAQjDFtpC7WEzO7VFKr`{P2~OFaz@oCFUrwty+?qoq4>3SQM7-mpYj~Z_@2L&!fhlq z?keuLB>2-ed;h-M=3G0l7_hG7tGs)q?{rYzK+?-45!d!wK@&t}DJf>0JZV?Kvle)AY-cQbliulM<0KM_rB_%9ZL1PkHylUzzJ=qYV` zp{G4`8N_2W({nu0)Ujr%vn(()eYQjexHY%^$eKwEVQ(l742Y_9^y1h7`icooe=c_) zzEgKOcSTOEM;ib(^n=8gU~x)tHje}$N2E1xjbsSs;$%*ddE)TMWea$%PV4c%6OsWt z1J8{95Lmc|_z=^%<(tbMm!hD{Cz0aTlYLa{wSY9VoV;OV4*L^!NbLE@Az*`WQYo-B z?s(EH<0ddRZ;y(-1OkXClt5+>e-WAM=GzbBvs}5wI>l&0p*y65w^t`$(D%FCnUO|Q z`0a36K$RPgeOr9f`=xAE)QXOv^ThK{t@yoq$+d5OVScDH(Yo-+E)qQW%eBDSpLp?# z`MKCOwRPiOdNi?P>X4q3#dnM*_;)OUuJ@yowT=)%h@$7v6cQH{pvc2kf1j?3LlL*E zgGKZx*UU5|1L8J#K78HxG<8ZhAO`y)&HKr3QT9BZh8=o12ITNne|W3`j{NKWo!d+o zhfQ7z(@DW+a++!zy}K}q$oaQ3ul9&)+Q*<@qwaBNWlZ_=?s>X`fSxouE4Cly;nQt5 zAXS#|t9(p-9A29JXy zRd*pIQKyUHkoX|T8Y;E&w|`VRxA9qR)GAy5Ko=}gJSSDm%^p1? zl-d!T?#*akOPHiM5$!X}9uS}W^vVf@H)n-i{eDqr8YRBXkAd^Dh+}O|f%=z7xxF*; z_<%b~I>i|Mc-`mw=JofN?eGRGg5GcQe!-z>;JAHSPr?iHf4cCBi&ZPNwYGKSen4-M z8xd&XwR3vrf{%5REuB{ly-vc*%k*yq@~d|Q(DdX@HlB)5*-rFhp2kDb8a(0$y4lU6 z-40kr36|`VzCT6TzrNo2`v`mG9+k?q zm1r9zG95jmn-My zo=)>9$$$7_mBF}4(#vJKWNXi8cpIcF6lOBIiM0{ze;=m5r;m>DmOo$j=HNY-z6JUI z@seoywg$ad^Ev_c>wL<&;OsV2YvuQDAvpPQDy~@oW@=kJ06^rJ7` zCvJMUxBUDLarK3(=ahSSYx;6xC3oiAh=W3y>_?7vo1neH!@^U2wbt(F}<4~z7GW=pkQ>vf2ly=ZxSA6%HFa4s>VJ&47{;Mzh`vG zk6^c~@v@Q8ipt{f$yky)3w)xJ=DdoPmYd>kPbB0{l?uu*vy5_<;(I1?<`S(*)6!V>P9Jh zg0)Qz(VSiBU7~QURu^_%yWPx&!?zFvy1%d94P4{z)fl7mWw6uhef+ScE^&OS^%*7x z9Y?Kb{WkT96?dQ1G8UWM2Q#SK3W*6Le~wzGp<6n3d@kWle(pLb_wwmC1vmewk^{x~ zyt_BK%1^yD;%B)jKGc-}BN>IgZ^OGe<| zwV-MF=h&M~%zv7?2BekRv=rCsbm8uAui{xJ-DYj^@CZNl`{D6j`Mi4E>a%Xpe-?r~+;q@!0@R_b1& zJ!@Exbl656Yup58_6xAByq_QV!Rr<4fG7`TnR96O>YgBe6|j*kv9p|z&e?cF$uXYy zfW*Y6dpP6Fih*k?YC1V$XdRw5e^#9;Qr}u*)XZOHdQ_tHwvg3N9;vG?R`V+UvSiQS ztI*iz?2alJi*_TEMqT>OmM-<1A-XKEcU9w)dGqgL9YDLG$1AWz`7ZW>(({oCK0Qll%V6^w1&q3bwPEBfU8u87me}yNP7vPgg zLlr%waLe2(g+X##+mWUW4c4>>@Q9HgK^t%%urD>>R~ODKs5t#q;+@c#*A{?um%MD} z5#b4Vgp@NJ&ky!S+LW{T-Efp7R}=uwdQ-V19`~u8(C}Gz9-@Hrn`q+wyI13fz4=o0(fZxk+mq*rTCe|Kcy^PlB+n7K;wTzw8-00)hoOFn`B^gQE^$~0D9gk<2%i{CQRWwfa4!o=jCkMm2OUsYi=Lj;; zlFypa_mNXKm^W9!4|gu{@RJ~kbWPb41i6yRB>|;u=DW70zU4A!f4K~DTRie_!rM2A z?wiQ!-5p6Jy4_8q{MWjx>9pvPh+q8vWo7#_adgdqIsPtc&cc?1x{o|6ty6%vm|>$G z!w6|7y9WjrYW_-O&}WxXRd__cW{0;W3fYvFB#`#mdq(FgGe-iU`Ve%z`4M}0r-331 z70ro#o|b)_wr1=9^Ywnag`CpezdhMRI7fX334#Qta8s;|)0LUJy_e<=yXboyPY?tMt~ARDS> zxd}ROWzBCIJWc(q!uKlLYF3BY`bHV40F`f|4w?F3{Eh!TkEjEbSFpU~HE?@vcUl8t zBHKm#S*)!>b3GM!cML+4tZ|?T!NP4HejaaK{wc2fVV4aUf93fg-Bsqi&-GsaI^OqV zL}oSsWBZ-Nf4p2ZIR~N2b?;lt?AAb1;vCV$AV!oR1kTtYCxU061ex3elzu|uhN~I{ zRKgXld3-6U_oKqD?fym^xcy)yp^0wvLGpY@R#4t$m6`t`*8oz?f0RpiGI7C#38ShJ z%T(WN@q9KDH_PeLsx$>F3HbeEy*w{-IBQ;Mi>@uaf1l(2`z!zEhb3nXBPU#mor7;i zE>?~ia6DG-P2tp8btJGZTd70Wc}6g;v;0}Xh)ZZoZ+7g5Dg~vij!Gx zL_&#ZV8ab4eFM7~8=79na+Ku8^DU7#u-Gq(wa;%m0Ee`LOBU`~vhVrP4t6-aQzSsa z?fWJtf7^Pk1`3pR;zJ4+oLhNIYsAib!$XEDr(EnZJPyCPU21l3*VTjjDSQ$_^{=93jLL>jcof+La4eNM$mnF=i zu?--p=YOaEw`@r@iXJK`lW@g&%yVnaliWMvq;66n=DEO_xk>PrH~XY#Q9y9 ze+Upmtuvl8hmRWO)WS3;)Z8{a{Sj|md;EACTyDiN?fsWax8|gH51kYMoH>(q1;nBP zIx5XyZmZ_vasMpOV}Ppmpy>o>ts>8Zz09eIAB@8Qw!@?m)M9f2%x2`cD+Y9kJaW&DA``jnC7!AG2;*U0USB zB(};N4t2R;NF?n2IJgh3;LkUlNwmqI*g37sqO+Dpy>fF-tgjC|y|emJg9SG%r3uhN z+R`khn04sM%4a(1My5MamI53LwMN7IDnlgqT@1f~t3{Z&8sC>Pcj&B+#*Fcnf0!I$ zJDBbS-o2sm5ANS#uYGu>68UqVT+X7o(_9?#&yy1=C$bw49}G&8tL~pZM*PXoL#_w8 zs!5m$5$}c?6Dl_+v`Ku#F`rk+B1->MYtj7c%|f}H{I+)00=GXxzE{J~oEV!~M}1pL zdFLzK-tq2NlF~m*R>=J}Lb$ple|Hy@Y_fZ+jrCStPdJK<-go8Qs68#?OA+6r9})cIBD}NT`i7hmdvUiu@?0tp&z0{v31m_V|&OKx!R}+gZw;rWhN>Sg9&EbZvF-LD_2V%bl0VB)Qb|m~ zpy!@^@gm2z8%tqax7E=bcria$GWQHM(gOGF`^CQb*=~%FVH&|1 zEch?+`~))#pZ>lV_(SX3f0i$QKH~G9eKQGB<>K1!X-dL*s!rbM^6i}S-ZZroC4p9p44_|iu?ERc&waD+tMTd0pwEuNb2cofy-1y?m5nuUBT?Ie?$qMVMqfG$v%s( z6I#@%P=B@Js@?dQcV?uIp>n3J>UuDTueqndP1Iy!;LmtTBX@3VfSz&riKh>VE9pCw z=9PV$YJM{(TDQ7oFgx{BUMRLa;QFm{#h!T%H->-w!dLo|T=1ZnyR|cg2Y<6sbImhY4!$~IzIp?v*Jwq1UloAEIg9uacR>{SkBJmzCT?$QRa(if84#g-wr|in?7J+z>pZq zZTYTWD%jGuU~t)%!Tn}Dz33q~ICd#wqihLs(DBH!e-4}SK${PGf`e-nE%OkX6CE^| zGdk}1QV9%ZWy|Axt9Iv*M^F5;YmCH(o^9aW!_HKl^DlkHr4wEXKVqackEtISf4M%#6NTC}kG|5&{!2DS?`+@Q zjDeg{Uiji+EV8Q~0O##tPufRvgt1eeB%pzKI_!sSp;~+l|kQuz%vxsnlDln`GE}m~2h*%Uv<~GP7??R-DJ6=0SZVOZpp0 zCwfB%1THqhf!d`<^gcUBkmR3wV3nZcg5@@Gf31E``#|0MghxG036)aHP-BnE0U`{M zTzg&%^J{mwTwKsNXRg!rH{fZ7K+COGk0bklMJfs}LiZxRB0x)_Z)BsM;u_zI9LdCJ zsL0#5{oXIsTe1(a4N(aAjOO0V$wLQG4i`lSmw%8S#%k{VlBbB6Pb)iyGt?T_fy{!5 zf6ID&<$K{e|$exaIkJ(3UnN15ahguIHL}>*Pdd`4XzD) zz`Ao`Z_|8r+uOCWD~wM%Fxw4Zc5hT_;3nvNOr!mfm1)AiVdI4kqX`aQjnW!=lqjhP zB@Auz)R5=k9kDmyr_-i=zUF7#mdx!{N?oY&h`jxTI~vX3D&G5)<$m?&u|fopf4F+G z;8l3KY@ptp$j`(JKK#U-$;%EAy<_i9I8nUiMA3V^24(OqR^ocm zFMPdgzGAXB9Kfe_+$Gy|CFHk?4$X!XWq*Fxgpm6QJ+<$VJi|HrK`{Ls@PPWTY4O5a zC(|vu1mtMl3HwsQE|&yLrFLlX^@~QbLv`fXbx0+UDP{N=mhe7a%r&rOe>6nbuRcmK zEA*eex3QaS>)*}zjoj@w%6TPfnR(VuFKzS@yX_ks4i!%DXe@DuIXnSUGqQ(ft<=p1xxf2Ez7OK1Iqqn? z$!TL^{Rwj>7i-bO*kDt1Qeoe6At56JS5PX&*y%qzYjz(p^><<|1m`2XU|T3=7=4x%zWdFe+to@dkGqcLC^GtZ(+wz>da^g z^9`(>x&|2@mu5&R*+-rLt-o>T%85BQVjX9y+^c;yWD>t7de3~X6E{4lm7s?4CB_*4 z7>J3T1J7$7)MMQ|k?var%5FB|KU{}p#p4ElSjIA)I?^l62Y$Bfe^e!V(d`|o4Ko)C#$-es$^u8tom%qnMvHfU4`#WxD{?wsO5A<;J_^@yzG6fU zF}PxYWCZXK`Y z2)Z>~CZ7YH#LMpl=AcNilb`Jv39#Nd>Kq^Nf4T5Npmo(6GNb7p+;BRAt&YvEslXF? zk$!Pb!oS@M-y`uV_f5M+B$N4)b(Nmjo$3TRKRo0eiYbeK0fKIgH4Dz}wViFY75F{5 zAFUXS%U2lP*Ghd?PlIO)+Kz_(3mY`{rd{@S*vDA!8dtUZg@r7N`53b;k^Ju2n_)Mj ze>4<3%z-xkVRiTP0kqAm$2X7mhoK4=7AW+;`K{Sxuko~PlJ`2@f^w2B(E)&%IQdbI z!HJB4_S7JO7C**AB3xn3J{}~jbkQ{$o1VdrnzYaC`Fu(ChXu2lf%0 z%)~GBb&8U`a+%H%k450L2dQNBI}C`swqIv8&Cxu#P@V49`}`_%k#Gu9NAk)2n%qkI zVmvw;UW(4xzvNJnv2~734$O*Q11(9=fRFSJD)Mpt>G*^50zxM9IgqF8AYGGPf3-p4MZ(yXxw{igLe`X1QNWS-X`(^zgktvS3zkeownl19=0{&P(FT?_n z{fNuLSrsx5E&-{yK4_eBPd&}ot3@10)re9?wzAd7KC>Zj|0-}6}4>#;dO~?!nkUf zUw#GD74{D*=8|N{^c%+FKwjGF?196clIm~usz@LC^ET$c*ZJ@O ztqdZ>BTh7v;?k*MztV5&S{Ea_r3uPu^WlL;u35e3sru}ka=JZwf1kjW)b*Bf+8eti z5D!utKTlcrciZcze3rCK3KE3kR7=QvT7Q48{-N{ntLJw%Z}F9t?^6!g2IU1FrMNdD zl>V+D>4)$q<;=43Ug@UIcwZ~M#fcHab(4$}+2-B&p?k;_d3kV1(klrz-b)y~i*jAu z7g5^hlXN;i^_TBHe<2u~W(Nc`#ATuu=cy}?vedv>co{P~qpN6eUaaGug`W3#mKKXM z7!RO}{8DcoLRWaK*^o;*ud3zCcAYHHu~f9mg-6=>z;oJt=sOnaSeEL}H? zHDZhoR9Q8>jCTli_oYZamz-6@P?}&qr#Hu*Tj$r8?pm@pCLu%+E~fbjg!gpdHUv)K zqOETWZx5$<&!|(39`VPsh4{4TQ*>HwY6f^`Yb!cb-5+rcl0XZv)t45#z05s5;DK;_ z+n-#Uf9qWKDO5hkddT3>+`XRfF|P42(~x63erKxJ_uoe?+Yq0+uXjWHwkvn@00Q!` z!?{ta?xY~OF)D|j%!Z>%%P`U%23L!EUUYU4=Uq=k5b?$a^Wq5Q)Ho@B7xYIQ=U1p5 zzZZqM9@p&r<8ci-m$@Au+6MLoQ}DCj9_V)Te|z-Y56@g_Sxb*jkAsW1DK~(op zBs?X-i*rkrudaBzrMQb_+opY6q=@kf)=qXaXB@-!4@~#lH0^Ma;2mndz)mx;Pf#kr zfA|D!gxL$}!_S5jw|$v*=(u2$Nfly~M@IJ~8_oXUiv(p+@cfL_ef-U^6q#R;CWo(E zA^Q{Ctx|=cIsklnSJ7-g8rH1)!%ueadT)xpa$Kp9)NE%sp=PX?Uc%24f4a8KDCO?%gxI|HO7UwSOpQi17e)tRbI-Y5 z97-&QrY|YCSJcM#o}V1RC3(EM_I+e#oxMX`kPIjPm3btw#g>qWm|1^;32INMU+-n}MemSnfSNCo_rTf}Z zwvF)&nIlwq2{$)zG5=ygfn|*QeM(Cs$Ga@8IfZE#%0Q;Oz9Yi0)?^0%i~#t+ zkCS$K6MHv2;%(5T0;qoyw7<=u--~*A6_ZR7q%W6{fHj1{%@jQd<>Om6|4})2ZW2RV=on}xSPm^&AS;0#$-YD+h_F!pg_z9A z;p->2N4tSNbe1E|V{K|V4$O?^Ee~WCa zj?bIc0GPeZ1mZi;nB}){e}CYL2GQ~1u$j38eT{qsp1Zvl2LQUDG9H9H3$GZVCO1Oy zM2UK3==n91JZDbJ61i5M`=9a?6P6Wl>6;|Hyia8)68bbkkdjvGP3F)S@^FHiwi(}v z284vPAm@_70nRRRL~1MJwJTKy*d6CkxYFtcd1|lTsaOGF=IDUke-6_LX6aO@h&r89AdeD&^(eY8nr z8ZYin>{m_LjRl`Ke_Tqzyw&|JxL#X?^DM_yF9x;~hcx$+Hx=9x>ZOL8nV39~de<)H z8v4Ha>1eJ`A#jWCd@7Qnez?n zvNtFw3YcQoclh%PX$6xY{er(|<&cbb$98)Ov33xj8g!B^e>QlBLW+yu621CMGRwdj zU7163doxyY-z3SL6Fm)blG>_vnIBM;SzFocOdJn~ynGEFasX<)E zUr-1K5!h;Oe-0-0Q;dqqVW97!+6T+-v_FYy@5M*XAt@FH80aa(kFT7|og{M^sYmokLao{rAz?8jL7kcKVoE-0epP$Fdkc+rKlk;mr(drthq_u_qPc&6paq4mTV~(S z)DzlboL{6wF{$%a_Mi$+%BW4u(;~UXyFV)9!K@s`E1wuAMc2#6aG|?8>nf)>my1ca zVgRQge+<_c$Wft`oG)(6>pHwe7}0z(ujetF5({07z# z-Ud-8JX=0^Q)K8%QXFADRc(k!ZS+Xvvw@hne=f4`@VwtBEV_H0{D{P|bXQaioaMf7 znb*gSE1fhF#B??*1ecGdAMw7QX)ivJ0g5JUcS<8GNN4HXURPHss@P9P9)|NXmd;d3 z-x2ZDV&{CY->S7pP59S7c@^CIaTxC(KVD5=((|p-K+t|JDjwPEmjFRPzQ2tg?ipJm z?^WfKedC47@9S-}?;S=}lK?o1)?@1f)X*DpMj-bJ+ zkp!`kCAH__@-5&`cRfA|mz$qU#-ZOLSJet0aO2mRxw1H^iaU<2*6F^5dUB&`1Q}EK z$X9xQ@qX31DKI!o_)<^wnXneV1E13~ohx`9I)Cb5-Q~+Y(BxY4nz^4O8g^7MSxHO0 z_PdGQJ)R2P3r5?yV?1vYZk|gX=t4C;^WySp;y+_)v{N1V@F-h%d-v&Ubp5JL)!MD4 z{=&Qu#g21Ccz^&%td;j_AACMupS@yPM;L9ruj)?C30tDOI5F0ZL4Nqr2E>#9UfoBW zLx0JB#cRO;0!@?)px;975PbCOJKdNGEKRW8cnC1oCA_XtEk3_j;D?7`dbY!$% z^H!~A!Kl2zfyNbfBJ&&7!SVe(T|RqD+L1c&8-Z}{<#Qp~xVPR!PgOu((u$`B8?LT1 zf#ts-n?4aC%jiH99KVj+9aon!ExZ7ubAK!_kcFQsss5eX`{5IQVt|Qi%|86-x(_#O ztg5psIOX$+XSy3{Ar4Ag5Szy;BWuUwVs^i#w-#A?hQUI8`maw1CyV`5%?I)&n!h76 z4>t2vtNr;38%It)9QHD~O+_jN4X)yx>~PdRcodE4uMM|7t-z3pfWxH8r^oAYxPPB+ zP*wDeD;MW)p*#?DdW7)I+-Vg?AQlbbYFFX=X^TSnfYINTw!4mdA5g7E6X;Rnl^o%| zvA8a4>Y2674=A|`eYnb3l*=0@zE!#R9^wtag!R-bs@wVP(M@A1j>Y)K zs|_F-jj^@yQ4`}5 zXHK<9SLo}{y|$bbsT=L3k(~VM z1dzYThvSCMXVxmK!Hl{z_#l*Xr+t;8i#<%+ZL#l!emsT!qXi>RN`K-7>2E~;$yc^r zvC;-exb`7vU)82Iw>xX@_lV23cK*G_fQ6}67y%*bP5hxdSjQI|JukjSX%Sh>pYJ*_ z3!VYA?mqh$F|s8X+I`Utc^h>0eLRBZBmW_12RUGJh95Uq#Fg{)r%| zF`}Lu@l><`|LH#fV#Z4$%Eu{^kr`v*eR-X~`zDTETiVHNeQqVrrcQkm@+<8$UeO4V zLdA%F>>;)B#zu82YjZP?4wF-q?P{SBKD4-E!M|66p=~XqW#Y?2iCzB1XOa9l0cd@Y ztnk;kK3{@{2xkZO7JrS7>Q@xZX%Zu?0RN?Q3Cl{T4g^?^Zg4-Ix%Os3-KV2;;-F;Lwc`#u{kr~^ zLr6D9di%-Pc>b>8@z~NqC(0JUH)dP}opAww!yxDiRt~j6Tk9@dewlyPs2w)yhvyMH z$PV}-CmmnB4-b8;Wz6BYl;I*8=I4G>+x^d%0a!ag3hQ*gqp0r?K{vhgOrhpZft z`C^t+P{!GL%YWX7OMCkL>tFx*Z+SN~@%SJA{_FCb{`dd@UFnjA4LZs@2~IlhhY}rksW$(nH8ZyEA_I{QUjR1AqVb6XKJj>ZZ+8O-Fo;0f#;O zjQ{?DHNv;B&(#z=u75ti@89p2{`v3!^Y1$my>QXrUq0~3suz6zm@<8y`K|4{$X6@cFKRe;~m-y*$`1^bgl7mz369{-}m*JOD_xImF)A;F>nuo@+ z;ijO?0)HQ`QvZH``S+=uiJvlgF-;Yj_V?$t|M}wm-^V)~x1ZqP8a77w)SlDd7gNpu zK3=8}Lk{^ja3%PBk2`!&d?o%qzf^%lV>|`O#{PZ%wDUjTf#&b0t6u+re*p36JN>*D z7(>3zFWUeIM%`@s3((cFXHIKW%9c@`dd`B}$bX9P+)dXh1dzpkzXCS1pR_yZS|zfG zpDSSgwk-}9hkq#JWZm#U0}?Bz!Gisyps2lJfRylO_YiKjVLrwfrdIWXN{-oj>u|Ph zJo^W{%nY^!hnliK2I?j{A^S9a=XB|Ih4nS#&RaJW`%rOr3RbxW1GEn*@~)ook>&s) zNPqVf-0cDwW!)b_^jg?DD+o$!}WWa|KbO$ghqJxLut-_K&-g!vWXT7Ew{P`3-O zS}boN8*%%{JBjmN=18QK_wGShhQ{*%B{Om>&mCJ+uJ~PI1Cn0M(An*|y6bbEG=DtB zqbhA)S60#mb61GYP@POnzR~k2N8xyiCFrU)Ji#XYRlLsKuy5+VkTC(q(|Ij>h6;LsaWQ#+xgIb=QK6*TbbqVnnm0m^hvZV6MwFoXQFbbT236^a+drYyy z&Kp!1$T1#$TKwEdb7j^L`qzt_D8Z;C9?C6)3&Y}Rh!;b=IA#5V1r5U-_Eyb;@t|c?O{UuxqIMM2t8$r_uI>ArbkAtHC6lzVy*?&}F^7%0Qk= zrvzE!hg&u459;C4*3CxHXslw1wiM`r82@VyO?wJFj8(Pb5wJ;{e4YWqCg@0wveQM3!M7reF zhPES|`kABA+D-4^4)vn~c#gW;i>R)0oObY0jwNfOuX*ZY;{vfWSp^M7Zg}11^wvF^ ztO}~~7MwGdkB_Pm82LbHC)8lSzQnOa@FTmnAbN0VsF`|yJ~sutZz&5u4to9H0($kC z!5uzFU-nQC-;Zq^c7Ldpcozb<|G%dUp=J<;FD*!bw{6<0gFU;pfFw%?wr`!7+O}@vhFRfprL>yIMc)MDB0~%zoYDRV%;A z_rp`4mb^uPzSu8mEXM1P<(8a5e)jAEw0wo7yxy7zre@^OOn-~@vJuvUgwDv#Pj;8l zNXjnJKASkU_<~h@&S`XN@Rb36ZiDw~f2A~cF~fco(pyRlhjuqd^!+KC{u3F>4%^4) z_=~<07g2@#Bo%4Q0WJ!~o5q2xT+fT8ldsvCX7w4dsI#nbSO;W5T`ck}n}_cY0~(E* z7yT3<)t}V<$A4=oZj0`B5f0q+oE@uoq53Sd4FZ{PnJ#5H6aafAJp_A_U@NMf(~-09 z(?MXE4r(>dL17-fT%C}lGz>TEAse7@qJ?+NpEC@xxXwInRY0`n#q})wj~y^8Y)lJ& zyf~w|3|D*K7vRKRrc^<(00^2G!1{(mT<;VNFYlj80jA1ouJ;DRMk zxbo#6`pA&X&Md`-=i2Z0agC2B$i^kbRD7s<4?zmXu;w2|_La7`t_@g(pk~v!C!TXR z9|iOfphgS7SgNmQDCzDuAluKQ67+{t@yPBihh5ev;-XIO5gQ{_%f93{gFjwlhJvL6 z*N$iVF@K0#v>{Pi8!bTgi@%OI_pOR4@(mUh(!<*t)W$yRic{`Ud)tSL-?0x7SM|eJ zm8-QoM9Eg($TiHa=ZkCPN8LpDPutDekqrbZGa5vU%NMw;%Y}YEgGT>`MJTLW^P&Xb zlZO>UZi72%xK`5s8r0b>tgBrDjpks7s{H_hn12tfH;#hNN9p{8x!?7dWk1>B^&-Rh zS%G%2pgO(cdXC#SvvN8r*G0Lh=S#E@O2SKRb2e}y!i}ZA9^x#^m(M6o&M+Xx;yvPVWO1R^E9DUtS2T_CXZB zAAdGQZiqqi_FjBp2jmg-OqSl%p+XFr1h8E?)rJ6}fSxfmLD$yN&hX z_226B0u6sS8Bw%4N(Ds{y(gENqq)oMVt>7!#|`jm=lpui zkzoj1IjtXi(mtPnTU1QJWmv+(ANlSK^;V_R<+#L6**kwyp(q(q>;)D(l!BdZ8-GA9 zt`0V!i53~RFFb@tYub2$Gtdsa6Ok28d##0V8v@V=a=Ou;GQANeZ$l3Ztl4d9rpJn z=NYbg#re#`dJ$Txk_ZQboI8g{r1b1uaWlE^arJZKF}P45V1X|$niqQ4K7ab^a!cps zo$Ve%r0z}_^VK?P3PFuHhrD&2OL*`~%$d}A)#gs#?`MfYgg2B4kt&>Hjk}Qiva5e@ z_0YC(96>V}I*0Cp_j9$GYyJCu_~f)IJ<4Jou!?)(S+EIp81*X=NFyZr7?!WM9S@H= zAiPS^veUf2sT1lfzPDqYy?=)1XkJLO4Bb$J{Azq~MLCOKm#jawjG>(T9~FuE!*Pm$ zKT3^|XzH?(eqjDQ(Xnu*KBYb8E;T;dUZ5I9!7E!&nP6l-7PqzKP5AXT>*(1C|qs{9AE48BP~8#_i9Ycz@G|w+$85d~@=h zz4v5ZL(x-M*Bd$(S3khE#L4fXcg-}l!-O41o|3No88Y6N-#8O!n>Tvye{1V~sQZ(e zchrh!uQtRC=972HlYOC<*Ys6$1PKHPE0JfzDBp7jGpv;GX!0JrQN!{AX2+P{@5S~Z z91taMw|n^K+Pl+FxPS6di<>fb+~VqovlI8&ls!)gjD8@DllqA8)L%6FBim~4hRFQb z@ncm)GqGX{y9hj?xnpIX3y`0*r<1$8U7Y-aO#U*`A`IHs7cyy&sU{Enw(;a}<9~as zBepp_!U$Su_2GK;u${le1$h4Rxu(FM3pO_BXq>Lpf`{4A9DhjQ41U&y>Q!I4xnUzE z`=1l4>6+q+ff}p+@>JYIliO;bkwQ@j3{XY?W+MUljbrq#IBDqr(IR? z?lEP#LRvHb&}4Y&5cZP875(_gDP|dK&6e)J7k4l}FYbt-l=M8_ierT;WQgksNCeY+ z`LH^c?V1CSsk}W=9gxj{!U1Pe2eOKim3K_<~%j^O)flksXL)iT;n>O zU*}t%_{ULBx##WNg?kG~R9EoDtiM@cCq^{DYQ+-V`v_JxiN0WwklfVjykxs)vl$xf zUw^A;YBYNRJqORZjvyANZ{!&IpkJWUjE}l>1;p~e4e@QqwVX~mfMP0X2(}3iUjr&Z zfL9E@@};X2I}YvbBRES5@3HRF$GgS6!AD2}IF~P|s3R%pGf#dO313P$<8=Yo__4{B z*Wqk--@btpO=#D=IE!Dl0f*?NAW@QDaer`u@LI#Y9Xv0w-@`y!nr_$a0V@fiH`{X9 z4Aw2Zu(sVwNhj^N8(;3Dw3^xATvh}h8JK&CbM1kUy+`zaH9zMW^V$?2^5e8oTVl znjB8P`cmgCkJDL`KO$qD>iB3}iGQ(M_S+98mk>vHxyyGhwmEzEZN%2{({1$KLP38d zpQj{5@9+NUaKrEHeIR1I5Nv!PmWq{whEZP<;JAG%bC}Gu+9Xn+ouD+`qE^&zirgFh zSZXk)^#a0MzeaBZ!KcyPO$f`p<(kuyI@|BBN@&~}oQa(9t%k=J#xP`drhnB;k%pqm zxovyJr`1$Yai`n&8}?G^C7)i@MT)N;@N(Y)wPaWguu*-V<@yM-+!rC*!(rQVs{~xd z-2|_?VSe(loiZ#0HxSsNhenxIZ9i9}+1TY}Nxv}B&F{W;K7jfafkZIOU23oz%)8i1 zq=s*Ih^A}{%02_VBoZeQk$=Iz?BoIT{0ob|DF$u~Ui+LjH9Axt9Amv1jKb%pGc_78VguL7 zw5M-cM|W}gd@j4s9jiJD*9PT4dtRP^1;e4V}Bl0!qTUM{X;(EQL?qQ zAI_g(?T^*@b(x;;d6wky7t|(n@prUcE67@(QK6*!pE~FYY-@C|0ZANM8cp-n`Y+Sbvm<{;uGIakY4M(6H5dA%}DPsj_2x3lMH_Vctl+gC3swkyLe>^U_$` zbAR=`k1-wBg-LSfNpK&*oa8cjFEBlxMhNKK^tpTg<`naSzE-XYwZ+E8`i?j^YPzAl z)g6b@1Xc0!gDmXuIq{g`XN5|W;=%gZf5~x#8d-s_xAd94R%I5TL@n2&pVf_b$+tJ#Vc0u@kfta+~k?z907GW%uoAa2%h@{;e}d+57Bo>I0?+-mPJ?RPQbv@M4Tp7tFBQ zd4zl{(tn=3a#egZS%k`TB(-XX!`e}%=`!0r3!5jkZoEz3 zc6f*565TL8*+6Ex53(ix0;r0M6o}eziOkB?@ZT-1-vm`!z7D9^?eOzo3g!U&oyoU+ z-+%tTypWTvg`=5u>o5yz^sBkC8Ny6grRXFMVk&|r%KnFjQ+%bZ)^5IoNNs25xJ<2> zJez98%0(VNr^VWb#awj_-Z#`Yv)V3+F+DdQ*SD1ShcZShqdacO3Q+T1(EJl-(sDfy z^^D?OO&`8fa3FEe-d)GvhxoyFxUU}vMt}G5``v;@E6=QbC$;| zD}Jv_Xv>dqaenDpoiuJscR9#lz^Hy(b+vGL{dhP*(H+B?=RZbBB$UbP55_?Ny3%^C zp~yFsvR}>{-RyHsYW%g?XAp`YtC6Q*0LHUH{4U#P;ct5@%K#6f?Qg~IFolNegnxW# zrZO~nXwxNHOjolx7H*|B)#lp_ zw{;rMWJHKIT|7i}O8wIq6i-TGQQ7lb5~46*(9;i11e1*>tp@;nnJ7&nGh#;k=y{qX z%t!FAAu*Cj@|)hW;bDay=(mCsB7gWw$A$KIyC#{rGR`PZC3vuZ4TI-I#m2LFgQ~m~ z`1ltX_T5rW*$-Mvd!uv&X1(KHXW{QN(6mES`1XynvHe8)_LYe0#SRN2U!vsQ5Bd4^ zY>(mh0ej659DndgB~FMt+_e3=?wK9RGsRFuy@B$iJ)UHDg`)z6O**=58h?hAF)swS zt-=&-7mebDr@$CwV(^7#Ajt0yb*Nx=$S{W%6hI5`UFQ{l+j2CM3cjqnz}=U7NG%Xq=PMVYPe>E1sd?Yi}} zc}{gMW*>+;`yD|XK;L(VoPUXJOyG}(E@5xtTN*PJ=NocqryFFeXxRiO_5#?1wcnGg z3DfW3^i<#4^Bm=FufFyD`DoCcjzKX3Q(>bGFTmXutc)Ot4vG0XtxcB4Dm}Swo5M`- z(Qv~LvOU00(N+-@WIR5A=EEp8t@G?Mg?~-o^GphfbKj^aZ( z)4&4X?QK&ZWw&d@wSR5xei*#@NruI1hnH2go%TD#KU7xDW}x>af1Ai#jtg`DJWScm^Xvdwa|!EQkD3 zLoL|8fWK$m-%(_Bg^w_M|8a)FM_O5bR2rEo>mX}~8EBsJmVdl=YCmCg;~cRAQ>b6M zeUyOREeAD_4yqILoZRXL2Me5f-AfbsE)j+Gs3!*jUsct6Ds;68uxwD_d{iY>c01!{ zu}d!&0+Nrkbv)i&4@7STruV)3hEv32p#+;9&FJ`wRr)ynuIH4GQr@YmqvHY@`FmE6<(W<*Su-Io)Uf@adHIA zi_c#`n$P^N7=h*<8{!@Be#ZNDh=r8(;*G+a`3A8gNUh~ zqP%(fl7IQW#u!Vv{xIBpK5xBMCHCw3SV0??Ljuo;M)3&>^P2IX*zPbLmUd%V?PFl| zwPPMg>Tc1SU>3uh6V|4(mH4_ICM^l*kvuXd@da-#TZrUh1KOZ4_WdjQr+as8uF_1V zUW)}eh}C!&(!!(N9A4%Gr)Cdt!<==?_eoDzI)504-}iP4zRqp@oJ?UP(Zb^{da6qFl^=S#4WbcF2|F=37)C* zDehW+*FjyCRgJ$!F-9g+!D-b@#sVpUE+}O(h3`A5&_+mL%Q#E z{SC)EPu4Leq>h@W8|NQ)kf9iNAq`JCf>d}%Dsu{2Fz>BcC)9zEPpBs zOT%sx0;YA#MIg%>Z*&{`P5$r&m9=8V96YH(uBjK*CYi6tPSpIK=Br%~cP9VgkRg@J zup_tbeP0OGw!|e)IgFA;T4u5&A0MgzB?wk{xp){1`T({yiD_~5!kKx0)YRwY8qKhx z#9Ku{pLiWPc#=O_MA+}&Q_2e^iGOa%3Z7CWUCvE#yN0&3!II_4H%I6*0j_W9Gu{`xs#i2(G7KlRi)adu{d3e0SD(== zlfR6B>NTB_)YtqYl)-I2Y4b>bQz(FL-(L=WpAy9n8eCT6peZNM8M@;e(0|LvSp&Lz z9Fa!-NUw)r2Uc^thS{9m@tWL;JCjDNxx#DGo^h5`ymr=oN-5RgWZ9^vY<_8BCT)KhsXshpgg z+Q$w5|9xw{@Ajw-}x$I6~IJ?L4l~Y)5)^^g)I?7Rz^MU%XZ^ zg|jyYs(}p?170j~(@uNi^fGkA_?}N!&U5Phy2w}ajrEafZ^Q3A~({McbDG~QtRnMK1ao@gA+M^#>rD~GrBbha1RA)dsQCe zK9A_v7lpKsL{6ck2WGi_*rw?P=v6NjlPd!QXB&V%02h}PP=COmPq6Ef1iyL!v7OMy zq$#f=UZ~zRR_p9G& zZ-ln6kHj6@6W4%x#cuT0`%{^4fUbUV77|p-4gNs6Aq`$*^W6_P{5?U;H715dPoXMO zbOe+vFrBwc;eWsY8=80g1^Q?w>%KwN5pnsrHh@>E7khbrEu}Nm;u)1vTuq3GX;<%j zKsWlj4*KO$$0yFG@;UdA(0I4s?P4J1{^isprR7p))#{1*Tr=l;mK*&z1{)y;Voy*T zkW6BbKv3%I*QbptMR<77!=5*`_u|JRzE40CX(z^RM1PQ?ByMJZ9ot3gXJ10Oy~W;RT_OCZjdSLk!(*(DjbV2c0dCK5k<*$y_tRpUD|5kJS5~% zlbxiY8XrBq4=#Iz0~}L8{rlccq7s?KGA^93Kb#&5i+M3=gUU3)&!-y8aew;}bD{C^E4lch3%-*`w>3Rq_Wv|rfI`II{1&UI+ zJ%*IV*jWPNeK>%_su~I6%&*8Y6CmFQpD`!jij;@|X4#BH4&S9+;_(__(5t02hZ%K+ z1AoS7JA02b_c-Y(Z|r_$=i-K8|06!Z{hdYH{*5yshxVK4Uw-_Ic~l?A@}0Day*WVX z1AyP*UXgJ(BDTpnspX&V>vERu0{Z2$avdfYFSGe2>63#i_H~IhGPj6K4@0X0ZP>qD z8}ahhS6(yYEn)Zfpc`iYHXq{C9#mPK-ha_4CZ8&~fapI${C|#F*Z*xGPCwwvVqb0;p-oa7*=q zKlz~sK0H~2xS=j;u(wu5$vg|%ebuEq(NoF17zK@f-}Tv~oJWQtU>?@gLgcw|`G0%+ ziJu^Q0%)3GGcsFyc%VQMfJ}bd&eOgfT;*KE>^M-_89|b(HRuu(?zE2F6}(;(Mr1 z#y}cr<2>KtzW7Auo3_sGvwz3c?ipZBgC6y2($e9#=F1&jEuEh_4mFfpWc(JWA5kkH-XB@ReS)!O1 z>!V`&Eey;Yj!)}Z{4&A6zBX7voq7y5FZD$p;kyI<=9?yup`G|*FMo|Wo73D|dV|3I zMSr2#%~kQ&JY}Le0z}#O2Lq7HgcaP^6RYU`bv-@Lc#+lvnOOytJ+1ow$%)2X{F`0- z{0EQEWcd*q9s^r9Kc%yY&UO`L8(tHtUcz$$(7u_Gn*o1|06`>XuE2G#9%7+7(!`Xl6C19P7rAV176aixMMN z+1@Nlk|OobXyPa|B4JGPHyI-FraRT$M;ZEh%W=M-Y%txST2a{7xS>9srUz^3MS8yW z>0IU5H#?u@H*JP8$vMS1va+p&+IX0ig%zh6^9{t$NEQKKP zrmBQ(_0y?x=YQc8ohX;RH^?Dt`dxf`5yH9pVRqehlOmiPw}?1o^VeKiwVds~h}Z)F zoU{~{!bPfjh3=w0mBJV+4{3F}XJZ$nAbcEu=-zK3)>o2qI_sE635IVmiR2JM(|x8N zwZUmhA*V?nm+H6y(wupfi*k|Do|&H;Dw__6$2m)9$A2HGrmzN4yN&11=Jgu|=LBop z5Yz+1=>#{qg4g6dJMe zBuQSPqdRImUoyght2=P7+2hjPIXm7L7?!wwR$=zUf|dDs?hohsQhb;f1(ovddCuPD z&Itrf+JDy&#)+?{Ge}kpkGXAC(4>doEhZ_c5*%|8qV^t;lL_DIm3}C=@$J5rR_v>h zd`u6Og$XveACD&c#gtC`R8+X4d>@Q}fZghcZQ|ku#VGtBU-?-N#1gh;`^-Nxy1zwu zLdh$xh)I%7#`U&fgbBW$3oXW-4BLge)TH7$Uw`ixg`&V5m!)Q^Zuw`_i>9Ria09%xk(pvtX$PR-(US|q#vyEl?Nk3n$=*QdI22j zmV85wI8yQM`4K!!H zI)9uQa5(S+V$|d@@#=i~HKqxr$4^+U;+BCedbIS+QDB?xskMHQ=Zt$%JDn28#cxFZ4E3n$n)91sIe6b4E0$=y6Z9)Ii$oj6j^WyHpu zOL>Zpj43etS7s%pbsQwoXz@`drQxneW~RQL_;?*8np*thcezogx*hGSH?H`M6N*B( z6K4vFN8!fbe0h$FxTc=0%1`CMJ4uZilYR&pxO9oi6W9jlxXsrq6Jlb0iP zM}c?Pp+#;x-wk)B`+r^j8jn4w zWSyJn?*40czcUMO@A|$~UzdAs?B!Adz5q>lKugS%!)Bv@=rf<6B9NiN#^?&Rif_}SI37l+ZprF;R#tT-UG_sy3vhcPyS=0EtGk5bcA= zC!Wsxo)B!~40GYi^|aZ_CYy$6&hsC}6_P{R`gb!9pzhqC&yXwTIB@d7j=K-=vkF zZ=d?G-K4W<4v(2@$~S$b{`in|_IBFr)Wr7zOnO%s?qgfGN{|Qd(iOgOx1GCpJ{n8k z5Mq{T=_#+PXMmw^fPY_K@caIRp6zQ_0vDwwzFQtN-_ILWM@YghM}G|WlM+L7_wBeZ z9IIn<@RWtmHf1m~j;Bj<{U!WcctP(;Y60v{wNlci*6VTYT5Z$mzB*KfsVPyKoU7a5pS9w5=|-OBxHeY0cT_4@vaCE~sk?uP3$W zx#0GM+pMR_?sqCz*}^F!Q8M#sikaU)&>+4F!0}JHIggiV1>Q1>H^2;!zJh?}p}%ZX zDFA}d-wc*F_Slg-67Zn}r>$lr)XK`XlI^a+ZyJL#Cx7nIE@p#5E(Gx@nTm3Feh6#R z;9no=jUaO)SoCJ}SD1qG09l_OMSN1|sjwr8bYIwhaP6(9&I8W_on8-!wn-U6KKbn# z+dC7n&cqnp`jz8`=;|~(+BdYT)LOLO^1+?}ph7M*X#zjwZr<--c*cv@ynIEKhuFYg zxmCHQuz#rB=&>#N;DC$ZO^)faOtEwNBMNH-vR@1?k;^OQ9jBooO}bzI-9i3#FWvn; zE)TGM(7*cg%Jwiaxckqt&if@Q)nfAdVm$jlmmfmI{v5kgt}=m|+O##gA9DAJsL!d+ zEYG;ULJ-*F^7XqBV5D*-RyfN|cV2=qY=Rt$E#QjaOs_v8@hrNlQ4QV0Nz8>B;?S2@4)Xa&HN} z?qwOt`PF2yc?qx2^l=$?znb%sRqI$mw#<@vtREEne=ZA!BS7_J8(U5*nMue>^x=O+$!=zjURV;LhCxe zPKmag)DB|Wb4@%xKOWwuO0wkh!Ho*gNX{leem?{ZZ>zIUhwIB=;vYKNde{0rNI|fd z_a(FI&4`N{;Un@w%-xioXq{447Sc*K#eYS+(7`|DL%WG4X7=*{noh_#%@A042FFsa%Fd%sQ~3D3@{8m_f}NgG;uHN` zy_dn)Ok3rr8=uBg=`Qo$HzKzYFlP_7Q3EFyyRKOeHT|9$0{Ig%K}B9IGe-GwM}O~! zr^Cxhnh6mVv*wra^r6jKi=4-sd7AE<`A|O=)f_qNzp8UM6hAe-5l53_*~!Nh}p1DfIljP03aQtTiimB zAcydYh>>r((Ejvymd1N9$;h9p4u2>)xwOeXA!s2xY<)ytzGe z03hi-#HAT`JK?k)UloNR*yjwl&;-iK-db8iK*?Tn!ta zGuuV8ziZ4(;u#ul(c%s!&U+BGeQw-G(Io%|_&hl2?dkr?95LB}TJ<&eEPqzs3`*f1 zp_{L<`s!_f&n4jKl9I0h6)bWgNsz zug#6eK`7S!hKsi2;SHSei7Sv7hi>!>U-3#he!Ag3n&vCqc8HNqU*e0=vw!d?Q{~m7 zaAh9ULHWU+N)=$55_-qIvVYL?iO`fiYL}b@5`4N7!T{e$cnf*EM-mg-eZ!{Ix%YzK z6lbVOy9--0)%x=_Nb)UuntFIVwZQ1+0P*noh4&~%X zK9G%+zAcwybNk$$X@ALv*1A%%_nCMxAmT{_n)7f8Z}R*v*bC?>1Ahb@NnP%AhT{jr z2-Wkv?`y3-V~K`+h6{wwCAgU7@Ay76t|p^1PLmccMFVD1+wpa}>7T)f_B5eofVK2F za`aXvYcd&#d2D=kR;KM_5^4OQjY(3VVnud*bi^!m`!x(jyJ&>Avn-6$%({x&0XiP^RGoGy2#ud;BkubL*3n#<`B9tQS4w`Y)9cgPyZ4uyeC2rh zDQ7+v>T(Cy@}9=86VH(HxrF_6VU;S(*?q@JR;2&I+F5Y@skyhM z;FsTU@5pb)Jui;wQbw#95%8-nMSoXUZs33Fvxk^{+-6KccB928fO9to)>B{FeyI$X#M73B%koeB5u6BAAxwQLL zA2WYC&qI1SM(Vfahu?{T8-7+YUO0!CwkGt|7oyS-R>1qe-mPjIym|`7x`WxMkML#& zxBX-oTsCjSl`nXLh^DQR3^`-ZCWUX7L-i93C!>ctwSUap@)y#7Rt1ve08j05?oK`z zOP-}-_V_8DAj`3ZCRUE^wb8bp^7!md-EvDn*K`i?c|!sx?Ts0u!;< zfqZclu)WHu;5r6;<5%cO{O?R?a6oZFaF|TQ#sx(p1V07vu2aqpxg`hrH|P*SSB*Io=(U#XL24*DJv- z29JPX!LvI&_Sdt(5cuG1YCuga9`%~DumjBVRDUgebTj>Y@?V%g5SwWQENg^@Zffg! zG0|#O1_Rm&k%!G4xvI^?C=zYqufn``&l5TkHp!t5*DPSb@rKV!;DV%x01Nq#A#0sH zD6BGbekk9wx%C)30st9zB7`lth5N~QtalMU!7pjHM?jo+?Bia|^xVFS8EF*Vmv%3H zsDG(y2q0cBo%B3r<23nv61wVB!H+0v=YoTJ++I+p9t8h;xZRE@MSkbk`;j@vPCM2= z$(?-fW_Ofuj6+p$>bTx{_j`EF3O>3yp`H`WD9mv8bt3<&E*R#aa@zj@0y*>qNse68 z%0cidsfc3Y)32q%J?BB}zFdM$CrPM~Yk%{TYe%JSkECF7G3jE^pNQrTRI6HnBJkLU zR!Z76sGa*-8rwUu?L}e_rQe>|SX;W2-!r4+5Lk9JNU-hN!pM_AuUZeRIW`_N9m(Ih zx`Dm^F-dvlk^Z^!q1pJW@SMdAB2(?bIP|-h*aBGi#kt_5`TV@3-^cbVNueF#Vt*>{ zlL7nI&ti5g;uB|%c~fS;Q)<4s8vfmf+)6XYv9W@iDp!QlDE>Vpj`!+IEe!;7RQS6S z;ykeIQU0o&-(*$(Lg7hmj;1-|>>-CEzK*iMj30k2@g0lpp8RT}k(v!8X0CaMtoISS zWe*g6&uC@mtIS0vmk(z=H#=660DsyDfIL^nLF}tr9v_k)v;rzE@@Pu$8p-}dnfiv5 z{Pw(tsDYZS1r?ISIQN^KpQFP@{`qC>{FLvwX2)+Y2V+aM0PbaYp=?#TzsDp3St;r< zDZ?+m`JAEphA@7BKEZCV0#g#WSA8|z-}@R>r*3ojZ@ zuijaiEMj!|5eLdUj0GTDoxlBDExQ+Lh{@`^5NWPLMC78f7HnCfy2O`fS8{J9`~KN-ayJ*lAAH#Ytj^wZrr)+q!mYP~!BN2n4sG*5?6 z=-=QJWoQoYM^|VDzkf_F^s+vbu=`m?M0CW7XFXkf%UwX%W9aQqUiuP=o<5CkID(-m zj$hgk!=0>CvrW&qv3k(S!1VojcO+-U+FeM_`wAD(yD{>}hJUxcCaCX0_(sY*A*X{8*A%aZR#mhn!8%}n?-Fw$ z9_l&cj-Gr4W^*(|(%ExN;Vki&M4N&!&uVjCP87*OdD#89H z;@xt%u{5R24Zkb*Frt2GaN_sLzNVUXCRn->JgWLoQz|}&nTLNBVe)`aWM)#D1zGb` zJn&$@$i+GESSiTODiVeFW!X>Lptqq5Oz#*!MNP^^AnIK~CbJ_OSGa1nPyB{>-L>vJGaSdc$ymhx3 zQxp#Oue^P5vms}RgG&`NHK%O$_v`0$7L#Tx88*g=?J$<+9ac?y~E{4r9+J%l?eU<$`5?E$>bmI|zx=d#EF`Zm$Mz zsCwebrgrJMZn6vUx|;whYIaw>Z=#1XMxBwP>c|G+p%1UWqt3h)v~@=95y`M54UywT zn%o;dctL+Zmm5ZjYWmIZFS(ZH6h#9;8e}T> zvFG2HuAS(2Sib`{Fao*RPbQ+Dw}_!^l3G|)-g zgD$Sx+;s91_6aZ!tYpFf!Gyhjv^(9)i=L_q(KYi<%->FN|5i(BrbUHN=J|ZdB)RSw zlOpy_2MGigS?ODlQu(GEQtwa~Fy!~`u50y0J2UDunHYh-k-+URPz|7PKTfL4eYGMX zIt71GEuaQ$saC|FV;DeFx^6dY`KH_GRe$gtS=FWdTuRd*4B_nZ)$i(VV=4MLI2q%c})~S=Jvo=W3y7CH(WFJGVhyhSkY{!nsEL6`AtM@oQ^R!#XyY z4s2y)u+L6x-fm`dtK`oI0@>rBfkF6VHj{tFI5<9`<&48%^LrZw^BUE<-O)kfv=JGP zWmG6Z1|H3E_>EUr+`GoMeCms{#S6Cy`PIv?9)aS?GIUlx?vBPw^X{|vVD854FjMWm zU3OOud0G$P(3KcaZfoYuR9gMTzfah#vHr7sAsrGOZh$A~$2bOZzGwHpP1wcfGqQg$ z2wQ~87q%M<)X>>u0f&oYioot)%YN6$g3B(RKHLb4k|iXH`HZUOGV%4KuGUdpFmnDmnH@tLc^; z?iTdd`3W`02|<;X3nv=r+-EvB@C|=R>?0}1GKS9{sn*0IPRER#(4|u{wn`4!^4avI zI21lHwSu)4OLs$`M}J#=TdLV$)QL)bRyr>zlDtkkWg`j@xdX&X@0X9$pPycKAx@=U zRO{_Msz)J8=-D5Biw+kn*a)Tpwk1t%}J0bPH?3mFO5 ztC4l*n;KkA6AG^*+Xy6kdz^nc6YuhhZVD5>;egYtq4zKq6+0P+NRsyt(@+IIGA$F6 zGJ+wD+>Rr2iU*+*YxiS~LCzj%6l{|$h3?T)`%|3lg`ksmV_oWf8S`rYYln$9Tuxd( zMr!pIWgn3Tn^zz!|Mn19SG;E4f@lBW+PWLjtHK)Bcj9ovGY-EYr=1FwIfym)IfcQXDa zd&6wKG(T4@IL9uVRCDxL0*vy@n-G7Sm1D2wEKK*M6}N^2)GVI0+)IxuZqWju-Zy=2 zJi%XnT)P z6NupPMC0l&G+iKSG3+xCAuyiqCU~woeO{G9kLAj!Ohfr->V+6 z%E-B1iAN~_HliZiz;6{7evOluaZXUKT6yH)WYM8gCHvzeL=Xa)DE|A@@TJPIsHFOT@qhrEv_A15nG%PiQgSUOqAnCF*UX)&gxdn&4J^%qCJL%W>ri8Uz zd#9H_*LK5xfD3;-`87^<$^UxWMX%fckWOgrU~1rvRC<>W&QkH_I*-#!McNoa1Z-6~ z@90u}jd<^%qr!3&)Vp5Y5Xw2T_0;2u-CPeVkyYfw7ujNzb?G&6_wv$4pbsAH-DJs< z+^wA>p67Cd(`qNGXmZ-A8{?U9YxeD}_&nV=&Rco4Ch31k(f8W-y;AHO4G~VwWPHnl zfcAj{>IvbKA5qg>M%II4-+L_O{cHB(?d}LZl6f00Z?b`^d~a>#bUM+=Hl-mntO)em zm-^b+F>gN*E#S>dTU{{uIs;%QRJ;3(_s327-rqP=dExRtnoGeU_YTzFBP&teM$AAT z(j5RW5FLL?<9jvg_rPYRKNZt@$D~pGMsQgk)oV1=&3;t+JC!@=OV3~8veJPPQiwaM z{^Q(2@6&yDG4v_5_5MDJ4UeuPrq~5GGN2UsJBc5PAH_Z<>-@ydW*_TEj+S&-%0>W@hWZgep>>)_oZ zr{B4wDkI26ChxcRIC>|b3iP*=a%1#hfLC;W>$$$KJO%8Z26ad|A{%)5c~9y(yeopZ zx`p^BFZ~r(fA25|Hf~OTtd1tCVH`Bgr#mh@cLC^*_=G+m0{)gCvAe8Q3TrT=+fCu+ zr?!7HS6!R?usfbyz4;D^voE2*1d=D|>JHfK-tL1Ho{wHOxIs<#X>{y|-@xh+!0sk{ zoT^fMflhAHv<`IF#|`V%oaH4r|E&8xQNQ@!x-|X8q_1~q_&)G1ZmnD5OF#r^-(Oa`>7_oL)*@n_*|w|t$0)aodZ>`A0J7LI*tMsR$LX92It>*tM$yXhnhE|TNa zpuE!H9HVSnoHc8hFR^FHG*(Y!$SX4@1*W>=Q@eC~xGmxvf=m2k;i&x3`G6L)K-x?9NX#KrM zdM?b>f3b;7$L8x6kMb|8qw@FGnO%SCEy06#F0yhD+2*0+{tme-1%0k}dhX^Aj=L8T zqa*gN7w^xp8CUx0B^N2a1lqN%i4X;EA*fcFmufG)Xt)M+x|+Yq_H@1y26_?bz~8^t z>)8V|5H$!>!3e-F5Xj>zfb{i4L(BAIksx-sAZT8{60<7DD;P#SSFXZtx*C5K=2K5z z9oG2c@~T&|{BI5K(o<-Upok_4#&iyJgAIgi#s+NdP@<%Ovu0QRwZ3J z^ZktscTHL;x?cARU|X6wKl6W5xJd_2*o`KTe(#@oddS~wq6Or|{WU>g=rJ95zt_+H z9Nf^0kM%RCqO=xJ?dgVLLUcaV$!R)Vwr)aBOgf>^$TFu$$c78|&P$a~Ffm6TcK#k1 z=T*W!76)^Gg=KJW7%%Z*+}NjsUkp%q_fLx2O8xsBr`>*b&a%#PSt)-p_fDXK z`1%U`CJF-OWC58gB=qNTSi2cNU$EXO&EA0x0LGTqa41yYFB3jC4mDWIN%!>kx}Cig zwx@cPFn~~LTv=qdro>9WB4L^zmCz%8hgA`LW`_3X(CAt|wvRB5t@h0EFgBQ2oLsn? zL-x_2cuPBT26zH9QE-2AuilT>Wq$`Bte`v?6^mUd?j4)PaVPxoz1SNcw22Q@cYl4V zlR^5?w#vR4!YNwh^9Kwp<&_lMw$<6w?eX2(3d)$I1d3_8JQcj}J^FX?YlB&4AON8w z?W^uZ?-qXEIxT@p%gmm|Kan|nh-1Hz>o&;Wok*0Ew=en5*}H%II>ZDrXZAAj(jcDI zXieoTD>G-$Jh!73kUkjjd-P_RBlzZqgL(8qJY|;-n&dFx{H0|x7!;xj)Fk0OnVjP=5T3p zG{*YAowDIx6~})O9LgzU4wa9UzUUv@G%;=XWM0#II)h_!PrH$qXQt6 z@?{EXwB~P(b;Xe5xG?ncLPDaxg16h<>l<-6|7~(#j3e1*5XMiY8K)TRh*o~>QhWGw z|5k~XSXfShrs23(;YWRPsaXz%bfTi-C+%${uW1z-#+83~eBXERXf~qRknZVeSW8%C z1T&*gBcSF2BUJ}l;$FFCSk6)%qMj$1L8N!LqSa8kjNt2~N|YPFw(r*Sm+@&L(YljR zBFuy@c1^d%zZ;EN)WY~vf})x~O#Wkax?r#&G9Bu-jD!HUA@5t{a~=f@Bm#_C%w~!Q z*2$;veK3En&ayl07z0y6Z5w`k4Vd}z^-XYFAW&Jp(&_v;yte_a8fUv8c{o?E-W_oI z!dzqJAFyC3_$k~2<05KySGmt=J)fQ>5r2{1I3>&l*CWKGbB;L^v>x76!+-MaVi0+4 zri=?*x39m*>_kfb9ejPF&t9UeIIax5pi>}4>_IVowA}1ee9{`nraRZ=xEkkPQ1v|Ii&$ZHJ zwL^131|sd&Up;*20iaH$l%43wW5Ld7NZZ}q;TgYBoZb9B+<_+2|r}%=hlnOzg92Xo*%QrQDY)qzff(yApwrMAPFDVo={%-$5J&UuQ6C3 z_TWQ6zE9Ouxh1iBlr*mt>;-5Dw$47c54Kd`qf`zReWsD{w8sW_0q;cBP-Cf#sXxf* ziRV4omX9wNFd$y0l-oyq!b#!8=-1=;CL(`MtyC(&0ICU)8Q^W-j+COH>*Q&lYn&cM zEGD#@7L~7*ltcFGSQ$FT0sg%>YGah%Rh%NcEop^FbB-9L2^5Bmork;2t76~{#rCOQ z?%t!t1{g4S!}WfK;e^?zp3#CO`1R4jKK-lgBf`(Mb$wp!fZN3NEtJG*725S~@56uD z2xWNlL4#!_N5HZVW1vd+s?YvcDo14Cc%@F>5iZ*8dxQu^bePWv!ddmsh;e^fEpqs_9wr611w+OX6!>V zABO4u+`LM~ay+W``jX2I6A$<3t$wAB#1i0k+*#`RKrWYQ7V2I&cXWl#?}edzcF0&H zdVKOH;(UmYtFNl;HccuoBmG}P`~=Z^aMyO>e2jY!&&tWmanP~f2yjHhJxG6_FC8W% zOig;bJ(jz7rr}G9(v8;$nb+(pP4*!t0VTaLHhiirD24XNX93r3Jvg*atDoctg|XuW zM#QFOJcp=y;)Xnphm1-d8Rpqs(RRch5G%Koj*deffA&vYWwat6SV()a$p5&J5eZMd z0zm!LA8wX<-#yUC?JH%0mSTTym+@R3)*xtrPxvOe7haxwXEvTc^1>-I6A0*F4K`bC z1G8YI=Vkkt_ldhsX9 zCA;62XY^5Gv&)JT4f*mM(?E26RWG9D?dCQZ#X^7e0%`aCD!-fzXU$>C_Thd_|9;+}rFcR*D@Qd>urJFrh=|Slbw8GP zJ+eNhA82d6vEz+=0da_SR{JZlI=Fbt=KxL)>75Q}>6;RoV5L_12Nn?dpn@nNwFrnQy_V9Q5r zwj&)9vd&BvVHK|*Ae-NxDc)9j-}dc|h*iCIJgbpD+=miR$%v#0q?ypG~#_*#CYJEioXvX=uVQ6sjdPB`;ZjTcR$Nc zH8ETgM&HBZ?Oq396*#HX0MNG{WWeU9+vNEU<8tBibjg1yPC3trS5yq2NRQVp@AtT4 zV{yGa5^D#JHf5bj#rfg`A8y{EV~@=&J&9JEC8#y?*}^4!F}U(_8oyP@O|KnVz#m#!L9YMMT>t(pmkmF1e?i7;azF1L5>Nki z$mg2bMxVWi&Tf1^Uet;dI8m0kmu|_&EeFZqvXA^^MWvI;W69QqpCSiRm1>r?Q5Ix-& z?^Bxe_oX~^!YdMdb>2Ao)`{wuPZX?nr0h;y`YRxs1!tD}*XMb(U-E(9ej6i8`)8DH zZyJBOZ_aGh5!6Ubu19fq9F|iD{LHKs1g-ASJ(XhN31dtFC*)(XjcsRu7bus*ByZ7i zX-Jt<_ne~8gH_+5wpqG71heR{_=X2m2&r>lobW%LMWKA{^$=>RZ8%lGVMx>vWQXX6 z1DzcZLSjCXjGCYFWjT7@bllq^KV&;(XZwHt^gXHZF23U$t`zF$as0A@T_scVW-~_J z%+CDwfEEY#mpnoHcuTzWiOIa97zADiEPH=Yu+Y7*K_9uqw>%OemMg&VU)0x@s+*6G zA?7vhVqN6dLF@;Z(a@s;n>MbZ7*2(q`&IVN;n`m)x-}}oR~BTm>*($6pY4znY;0;D=5Efl;V)Q%-+dlIN{2d+&a9!rWw*BkO; z!iWz!b^S@Bb%5o+r5`TV6Uc^8K5s=PB@Uk6Itg<2j%C z9OVM|>*$|fps+tsk$wvl?LCG-8DD!2bzwjFJc`g9t*P%3`Dq^f5l$br6Gd4}zk!c# zTd^@_u3Z-Cl6lr7N6$HU#M6I_+>9vrHyBetF%j9vQ<0s_HxX1X@U=&z(vuvq`_na3 zf3#B>yX;}l&!cLW93!7-@=o-(UxOCHEFH81%*WoXl+AdpiR&sA{1_guzvxn+78-j( zlzND#64epWLYqaG?}+2{v;L9Dn8w3~e>7wrKHX&j-86cM>Ld*)$*X_Q^yYBD1EX1x zqwPIy5aTejb`)Ce62LwBAib}PC4O={&2}u!gilbXNTk8zBg3+>*oO7D+qhA;Et){K zo@{PI2$Ft_!MeX)6p|1-9of7)$+z7#^D|78uq=0$UV<0(Qo)z6cun`A#~*f19WopB zfvwPixH0a#yEoBkN4$ToJSow?dKh@WEOw%m?wF@N%K5`8>OyP z6U#dOn`wXhMx2FHuwJq#g-FH;{2eEO9`U4s8p1gqdd`K`h}DpNYIF zf`~@f&>(0-&jU~;VzTF38c0d$(Ur(irN2JAu5?k7;-+2qg?ze9yJDYVf|cw$s~=H@ z$bQL;f+yI4zbkRxyTqNknm`|oNQFNhc{f}<2QLw5&GlnrSwm5!}#DIq)CbEL1&UF^tw--TPk zH%<=Bj2nMHq^&Kzm`3_NgnmMiNK!keSF`GVnTJJ`-_7_ zYwi8iNo|W2VHFD|`V=RR3%)0fn(BR9hhL$*X*=g#T}7)XR+quz|9W>J9>hUOki$I` z(oo~TfiPcox$`iS&ba&Sf&2b0J8y7)G3q7-xf8oEuku!fH_b6_?RAxRY}b$Cb+{>g5wN@- zY30L5+A+`ZgQiQE$uS~0i#z`B^lRrCWp{~vo|TF@ z>-_nU2g>w%`>K|i3!DC}UX(V-@AZGfYMDanD?OpO_lr=eV)U&ET*Xa@)ws@ZAn>mO z`xGuO+`pOST_&iUnwawtJeixfXY(P)u(awIt~*l_pRo2tbsC8jgZ{pZ1aWcUD}RsU z?FSqkt6hiPjrrcP^aamAeJx&l{quGoBKqI2{(}HJnA|<}{XHvWtk>H-e2RayrR0FK zh{zm&pQ>L^KF+576rR?Nn=$)nrv_2rKQm?%#K(Fx`CD~64TF>S0~dGRX?#}sc<#T$ zpfOZzKpe(9$6aV*Vlg@~{iC^+PI}|8J`Kc9->*+%HXoR$k~5cSEwxz$8qLAbw@ff^cyC-Gec{WjLQKl4ABTB75e~M;r0!HdaC>NPA)Z7};|)U2SIiG3PNZKUb;`S=HX^xs z2@h^UbmEZ)A*lU%4)j%iPM7fhfk@9d3-QdRA&ZW#=(sm0-!ty4R?$#oBXxth0y8S} zeLw+B7!T>!`@ZA+Vefx;0xgluovJJhX%$a+(*RAdea`HriIiKW-*EdO4{Wf1!zsj8 ziYJ0dEFzfrX7+cuqmZJKv67;h>v(0rNcQ_pj|`wQcRf!+o8O6w>_%~Ry<2e+&=|+HBsf<1*uGaj1|h%YdwF}3$Q^$S@LkQ+14R8>uOXcD zqz3VrQIz@fAJ&I;`!v;O|G=fM=cUjM+lzFZ5N0{N1cb6(1UtfX@APOL@Iz1hb}m-+ zOxzW>BZfNoqH1CmQ)c6*ag~-7(26ZjuMYRzC!={`^-b_&vy)CFhHFpvRF_{|Z$1eF zha@$A9Hd=d1R;O$ee1+|#pMZpPIBEfMfjc<6$%Ks-u4BvGEDYW71@8c{dozscsM{0`VlZ|r z0uJ|(BX6Pmf;E!0?_}+vR6E?tn!I5BtYdkK4>q zm*aA<&ZjS_yf{+cF!z#SZLK8(^2Ab!1(|}0nPY!-DHeA(xaa?L_aX+xTfgfq9cVZ{ z6D#n%b`nRQbdxx;@P6Y_J$*LKfyNf~Wq#4Kz%CeoBiZ2U--Ye|0LJP1E(lS#m-Xu$ zoc&!E-|4+Ub7wRTf%7DjvrNke2-tl(ulgq^wqK2X*}BbkR%xB)OTf{3U(4MnT(vIH zd~biWLH75~-45T>wB1g~m)AUcSh@gaz3gx-$JwH9V!aZA zjXPZQz#fZNl>3^|9X}Ng#5&1CijPltT6bP9-pGd@;e`d@*y#;WRC})SqSb+!d}xUB z-npsDb05*Vp(qwEBINnE({IxOe8d${Rl|Qk^ykUeR~973*E?q4M9W0+WK4DwS*z6V%++*LM))(z(ljk05-rV=~XISBbxK5egb(~?P$MEt?l5R7{4kPIr!WMcMDT9JP$ z4=59~eE6l;_o?957@lnD%f0N5JRQ7c8!V9=R6NiaJbG$n^YS)?8i@WaSnP*XWszOz zFJe7+ZD-Dn%lFu2K|2$-#aV?&;zd^x^m?CuKs*LZ@N@JJYi;d+Q(5q znLb39PREl>dtOd*bof2ll+X88-DvHlD%RN4-}>^&hYiq1+)S1HE*UwF5yYxUruTeNV57I{6@E$4r!#CLPFL~+c|C|J5 zqC-|ZcgMVilZrN!9hWZtaHD@;7k0DleRaN-@F3~e$_x>T&lIqQs_pq`r!O8hJ$(eo zg!Q|uJCcQUU^%#??<{}A_mramRY3&#{;+#^OFKHpiUAB&xGPG46s>b;=QX;-#(d%5 zba!6Hd9?=Habtd$9GK(z`TQvEx`@v)`0=3rj(hlErrDU_NAuBonzMg5tYO!}<-EKq z_ax)=cGJP92x{*4KDQ@AOwkWW@@M9Mj*h9U{(Yu@BEGG}FemI}`0lc)0gduWum*?o z*F{4{@C5D2S4}7W=L>^auMP}vyt^PTfLkMytZ(84f;L%m2AGAyu(`! z_?Xz%kh(u}IDKO?S7&X#!r>{j&(F?si9G?Ee>6QHgvf}Wd2vk>D42=6fO>6>5Z*jp~HSFp%s7G6$uP4ef4Zc?^5Z^ zsfrKjgXR-IJx$FAn!0D-`175nUyIes7j2ryso$~n7d92+Q6lKN_x9rQ?Z~8+wFHs% z^_oVX2QZ$WqAs%eSt37p68FH3H9|M++wECNs*4T((AYFL-f2}uOYmo0ej-mi(_pYu z56=BdM|dkz+X#PmUHA)j!&<2^8DpJEBoRCKDXKB{|Q(xtGJ3a>sw`5x6xHgVr~GXYe|kUu<-9 zsl=lr?}Ni$EzmB`H353f5ZM~K<`N&|ZW_ri%0}w2hQFO%=}bqr*g0-()Sjljo$Q2S zY9M&2wI}RTZ>plI_2~jMaWLk`1K$RrW^B}c;WY0C4BULU9MblDYWLv~3fwa~8c=M# z#KaJCs-~2^}S8%0}Ar@!_}?V9^K0yb1IM3SXuzd zm2%*u@2|=v?05Wv=&f*EqV(2F;mI|Li<(O9`BsC&G6;{rNAU8|BK^+)h(%2ox#bRQ z`o`s5D#dY6LkO^IA6d6jlouizeVcmfl{}t6u)%-L8g^gtGPpHY^cWnxw2$dYS86J3BWc-D!L9QBQS~}^~rM;BA^VVV*msawYB3sKmJFij< zxlY?bhol=VLghBZzFryCL|m`e`Nq!41%%TV7QGX*MNjXF@c$Gltr?|1k_zUmOfQR{ zB=`G+Fr^p#BYyMA6ewws!_%m&{ziEBH~oJDC2oSVMS^J_5o)Ekr74Qz0a8CwLrLTO z{l!{BH;WnnrI*jSF|nquf5->MPI@L1UA==lF-$s=EH@SR-&ouBOo_Q-eD=v5j_okz zZ0FDLL>x_h!I8A5{=jB`K;>9brMIH33|rrKLe!|+?!rQgv3?ldKH4}!R(mDq9vFXZ zmoi8RK!v|Rrh+#on(K-+Pw)OQYWe}8%9HO$R~rh2)x>(~A}on`qReLp->c8^52V`G z@t}mM!*V48O;N+#1t|o%9(Zt+A^Wq;J2cB5XL%Q(RCj59++T8L!K#%i&lIt(ngC-j zd#5;zSAP|w{Rk1fmKdSR1d0KA7Q z#P;oBc-?WSPmjD!I1k|xzS%+)ZrjD06$O}(??=xX@|BwPqG14W^o|l8T13*JehxDE zd+7L~-{Hqf@ySkb)ISsl74eYr3fr!`=Y%jK{XEM=D&i#h0J{kZvOIg&NZEgUoZ3e} z>dzi)DMUidx*Ch6N}Hk>VWv| zi!etmCKKk0+~{X=O74O)?=DDgA5j&4r}?{YWjgakNC^~JJ8qVKGkl;ouBS6gz1O#4 zON4pSFMHt4pY=W$2q?XEZvTI^7R{RLYh3$UMTXR+ltl}zJbgi2mR`LvBXrciM2I`* z{ux@_&STAV_ehIQERn!5Q~i}AWOXyCuN2!?sGj5Ns{}J+)vYC0>OEIMjG!B}>M;8X z>&Amd(L>RVyPuFAJUM-;FF*sH(`xRLVpz8elic0eIkH5K-}OV#AK-sd3B>G@Mks8m zzI88iF^umtuAZQSMhy5(#|9@mgv@IxJ)VCv=~R#J?GQtZpa`r@YVARz`S0$MP79$` zq1#dz8j=fF;n};*>1!3Q7R#KcdT80>_XQNww5%@{jsAy#Jk&1GgcAZb?|E+-?iGo^ z9Ab~2)I_a9%=iF4_2GYT+a0>s0qKZ;e_&^QIH(8N{`;%;+FAepE`E4m9hIZ>_dk%A z5jEBEL-ouaIQIYj%SrHGzb>FXC3uy}{>G1N6qKRsS762xEi z^v$x@sup)&YeI8_HS`+lSC6{wekh)}^5gE`zNee*VZkWxDdT@~WACTHLWuCd{Jlkc70z@Z-099obQ1miK*7ZBVGCejsQjFi0;{GOy3yF-e0kbOA*evbtFCy z38XFy?Nx{9?&X%L*`mN~n@SC*m16qez1Oqx@)=(Y&>nv`b056-XL1;bZKtiNcExYf zIlV{h+(aN|(Tpt8XRKggBqPC=5jN!bkAnCz#eK5eXJ-<({Tc;yl82(elC+ak2nk&X zJV42-*&p_T?1#!{cm83NckS^w1QAZSD9cv>q)|D=NWWR^w~WI6Qmy8zXUi91o4fM$ z7$5iPXVib~d8~Qo-f?;Z-0+<|T?yJPY5hebmSrjUQn=M-=#{+O*ZtkukPU^YcN-G} z`@3x0vku4FFge>oN%F6kfw$j9{*_2^;Qz_!c3-?HD)%F5m-*E)O0~OFaW zs0SrS^e)8b9nPY|K{_0i>~XnD!6FFa6^A1N-#>r<=MS=M2;uNF54-2zf7Ji|`R@?fA3+uD=U^g<`{oVrETu3Z`IH)nXuSj+-eO`m|>{Fw0LS>ETBMin) z{r!KL`zqPR)784>z{oP|84@5?-^n=Me{tY8LvE4un8rN>5El0AQGphE^R|nebpWvb z#$Jj(?Taq$+qVhpVaK)JQVvLW&tI2(44L?xTM7}Wd_5H|(BI;oQWwhB;kx>MmTsuH zl>IfM9%Rx{4d!!Ge`znZk2C)nf_YGPjhTO$#4L!#qjJEOXR+}cDGe$cPCD%?@nqkF z;nEAW*p7H|^4QQ)bItD2lLzF!bopP(^)#JBPMi1EoR<@UN5K`}5aX1Is{cg+5JBj3 znmYqzisa;x;LOtGAk)NgMKj)lv%21P_k9CYpN7IaGTd8Z&)aMiqJkM&7T0D|ll*@i zd;yh(G5d1Kl)d}D=6EB0mF7gT6|h+MjCbLrbUu)Zo=Cc_Aki{^dj8bO&#jSbY80_? z#!|m5#{K|$qfL8Y&{6c>>y=l5du*pEDfkL}@H~-+C|I=p>0n^Sy1!&khRr8(L!=w^ z<*d^7V!o%V&aBV4TKBN(`@3J)aln7cT*_Jir3RF=q9|TqttZ)~PFm-q0wo76%p}`$ zJwm15Q=gVtU7!zfZHIm%Lmp7(n+X*(lOq?=Y~k495c-`i*0WdNC@g)PCvg_`XBYF| z%$r=4RA(I`l|3%(heKxLpIN=VLUm6f?NgK^%QZwo=GpeVO2@|n-|GQ~C#ior@0gGM z3g3vf=s5F*s?>#Jt1OmsU5F!0;RgWt^`Cw?s>Lg0U&AtMwYgiedWyrvQ4ZhBNqTuR z_~U>TvDWSjKgEHPNKLU_v1Drcq1Wu zkGN-gf0uj^9DOB(D}taz9%DkQA}ASZbpyC*sqyBmLj|5YpQUQn0PVGDf__ z;)lX?SK9v`KYo))?mZk|bCS-)Y$^D}pkB=aRNU{zr$3+iyk=>%KUmJ)8^Jt^{o`3A zUTtvQN3m_$-*>rg z0)X1Sa=B51VQq9x(>2-UWY^MjT`L7nZJH^coJX za(PPbV-VY8cj)cvM)>^Of5vic5>?6T`i9YWJbD687_mg2EMp_V`$mdup9 zQodGHdB>@L6%b<8=h^9c_rFw!V^*`u$Z88-neHfYaMMkS&rYz0JH_= zuY!N~@9a$>D?zI#W{zy&Y4~|n?si(;`-^DhLVDburz-wFD4;9CNMj9sXnS8)i>9#7 z$>)hg=ywrLzgxUJI7XBUK*4_xd&KfYCRT8D{fsi@VwN73$_eZZ`9{my;p`mIwYhg+ zHp4eetO`ek)#``Ft%I>;3C_`J<-B7M+#7%A+Z%;?&qncrz&5S6D)xiPq1NEe{WVAo zY5cXbKB(0X#I?EK*eZ1nXc25&v6g9K79XYb^C%gEig&ZLie1hyjj^-9rx06$~n(QziP=Qe5u{^5>25-lr<_9(BI5xcvJm7XZ*?s?-~Z z(#mjMxcTo_;*jU3SMI@oIIfp!-kkDj9e1e;II;RTo;A~&e~-^={zO+bOV7EK6bBye zjqFqbLz=G;l~xSSL#Fx!qJAK$`_6wq?(T?;kRUP)(elENiD3@By)@k4v&}l1{->Yf zM?E}`o5Rmb9WobQ*%97LvM0k$thT$8s!URYU5iJNHy@np!kMZm;C_XzO6PX-`rXmi zUo(ScG9xH64;gjPBreE3HENZ3&;FFr=1wQaF|32j+2SvTf5P@ol#xH&J)D2G-2}{3 z*i*4SogMFOH>w1V0hYbe`#x3sbkgUNnRorG1*JnKcgpUHw?lJpl-@xR!aa?e5ZZEbcu)YZwIcJXBSt%C4aTY`^r_+@$){X~S`zp9qHA9J=ZQKy z#tTi|5AGW1^;u#@|73WhR`P#Qt6CDiPcS~716#hi*>r8#TNAGOnNz4Yyvtf$orhz+ zbK@wyn)MgLm7#X2ms^s)e)(i6f4olLfje=|XHTp!7u3Eo*!Svy0&C;6s~%-~JNa38 zmm9HWBWxg^KXfik>`twa1|FR})cwt(X7`ag#m(4D5BziW^&X3mGRl8!hlL8J&WQAZ z)kTp_lu5jh7HDW=dcq?_kYnqzi5v)Gp7!4j@9egCo@p9A|5`g}I*zy8moyl-)x z8CW&hle~|uG*B%newmH`E~*G|9bG6|bSCM$n(KFk8km=`ldupo)W(@I+!Wm90jzMO zg&KLv`^N?{+3Q|;O%Qxt&N~Ko@kUm(*;6Q zl;Z*s!bi+f$QPAH^c@)i(@%cDEU=YkYVFZxHr1NCpXlv>SLZ_FHf4@>S?IbV!)f?( z8O5l~gUs3tjdwQoO&a@4Zey*q9!S0jDDb9@uB3`39c4s^Eg3skAKp(+egj*$W?s zHzv-7*W0pWAH)da?WC6prn^UeIKE4Wc7}IbQ2aF+skWkcYU5M>Azz*W4Q#VGz9HyC z)g^gb&u1!4G(Ug6>b0Jq&G8I@4zL|c6MPWdX}#SdRFHQ&z;K?qoQLmjC$;?ZV2P9M zGZx%LntMSrPiqu}5^fxY1sVTBCF%P5feXx;@A+=7{* z9x(rd)#vt@_c`p9C1O!QdcR}O@7UXNe$uSHF!SO!52=4e!gDzeK~`OYgZwZoN>g6f zVmz`mbgMuA6WNj`O3&*K?7k~}qSd=P?PV&=8?F@*_i_##W!&`QS}gIgn;#Gs0xFWh zH2j!>mfIHr3_q^u=?8u6!Kq%`TUX=`6H5N!0AkV|s_ES1Nl*+kUAT!`KwU9X1Hd*B zasCEH-;93+Z=ASelyAnJUqPV1wZhJ9?o^1i+$jc^V7ki1Qcl;u`_gitYW*y=($!Mx z^>x({6P0UB$T7fGzgA2!-HTBYBv%)UB-n#6;@r5HtAZa#j6(Rsj=J!6ml5+g{Cr?A zDz8W`@Ym=SK~*ypPss}J_9`D_?x=-#ND~x$_~U;iwo7w3uAiuQs9ODkNl}fKFpW__ zEN{P{3!&v+zMmZ_6_?sQ@RB#w<)rGrVz86>WCP$Deb5w`osdZYxt;9wyW9t7y>WMO zrQbI!xlZ?i`hpn`nTb11+GKo-{{6Al1GN;^V)7q;a{U#Z^2mVsLBC*yq1+!;!5w1F6?f3q0 zW%+aWJquMjV5jvKsYL}XXC25Y&7^W-Z60GwupDv0B z25li&cux$=yW;y(O$PbN!KFQ_@mvv~RQUc~5qBPL%p^CZ4t#y=ulD>pq#UK8YIuJv zAa^Cnc6$294&ch`zx%L6;RW(H?xKPzPyVVw5kKxvD(Etp`YsGK2qznhi+ut4Dv|gn zY2GW^@gU2xtA|HjKZl@EeP4C?DaumzolH9GXZ>A2>esu17;=?eGto@^IE3BGvM$xL zVIVB^EW2+D4o}>b@2dI6vEiO0NF#sSkY=0-KaSye0Ck~r3ByP)id#{gR??eS#6!bC*hHD&XAkI;D$i{+Ja4ApFY|DO-S+=4T zjqdMvQDh$Qy?v+}JoitOWn1E+>!|lLMNw6Ez~Lm@w#FJXNzkbu;%j>G6Qs#`EmpVc%VP)I17tDO=GQ>;Bm6O{So@ z{f!BhlG<%W#E?d)Wf3nXBjf=V{i)`?$ALc`lGju0)E#hK!^+#yGfIBoy2d(Xovy2p z-!6T#G&|-$#JyW{;@Y;Q`&UlXQ&qSk02`mJQq7Da zW?#<>zc)wYWn=WaeU8g*9{FSkAS^X_@K&(f_AZ|lXM1VwT)a~KKb7|e&c z?R_jawa%}v@9}81UoGP!)#vGJ{5anX4=YGbZ}kHm8!a+7aA?!2d-Gp^-UFH1=6Mm; z8!yk}Zp)dk*6BKIIQOeX|Ai^lFLTami#b!ha{KaW0Z>0}-uB1!dYj)i5QO?=(rCZ? znm-h^2x-q@(+VeP^R+&;n0vf%632L|hYZtTEI#sRpS{)%Y4~k0?QQQtysYtPSnu3U z-ul1R@o~R-synTZhmUoC4&eHg1oPwko?d%vx9c)!{$+o>=|1Ijk{08;m~2TrtoYG`k`Uo?>@Msji7ul(&@v* z{OT2iv;FpWcbh)^YW4GGm&FZ^FQd;JHysV{cAfp|K5+8A8%&sg64DrXH?6nm1+0(Z z&&O%xzCmyDdX2f$-R8cH{^z6Xd$Z2SY+LQ9c-j9lZs(uI@_hv^S=J8p#yn{VK2X4S zEoBw|>wo=^|6|3p^u_0Y|8K=^wfb-W<9~jAZ2$Xm@wxavSG(6wvWV6H^|aVO|7!lv zQw3n4f&9WqM-Bgf+s`MpXp<||1MuVjX6^nb6YyT{ZkBQrkGh=J2se!9Pt{9(&V+7N zMp7{_7V}p?!HM@(oASlpd?)XX)+VDCPM=X(VW|I_y*|0ym$&k<{VYE^;Pu`2$)Y{Z z=IwAAH00m(IPrh|mu$xfoK9OM48SlC`io|`dzyw#;DzRY4e$Dq@p={3i5oVv#9(0b zo}d4cmzu+uM)vF?30*w!N`!v&&l_K}Nn<Flj#?mri`w3t1QcwI%hz zaB5HuGPXt9ZS(nUSq0T&!BTaQzfuhx^I~Q25&V)F2oV}AOg-Da1P4F$#PrnXVUZqPE2ylh)cE3?#o(`NW_XRSVmBbZx3D>l;7sbZd&>Z?{iIO8u<(zPSyM+AHdegzfvIz_yGOn)AK>ewVKsodBt1 z`B@B{6)@H#_4seg8QFEi2xf--cO*xKN&K}I>iw$sr64~~3RJ}CS)Yv?i_xaFP)F~7 zqtD*){9`AZO`Xzy<&3;=ZeOeOBkS!q{4!=;lZjKZ*6D1+YgoG(>x8rbQ-V2Td|&oN zukpr-81W?Uv1XLoadyjojW}~__Mpvv@jl!U!UdU~;e_!mf|)P7tT{E(`b{qgaHVla zHPUy-Y8K3@HhXC!0#mnC5G)BFR&JJm*zP#Btpkd(BcqGtEYq^lG+578s<0gw73?7J zpriX={$a=ZdFG(VEfp1y{=ut!-`A1*&aukToI=`Q`3*3>{PUgSxU)kF+Q=?>m zj@%_)$7qyMtKT`qx9*{j7Fgnc8&R?f8?*KxT`mWK=O`w#kr{TNtv!-YV-nJyO5J2Z z$6Ei`u!7WYtGtMvFhN?s=z|`|hT)HTk4Cp&6m1{b_4E6Ghrn8;7n^C^>oDb0zpm&S z{`zw^7^Rc`T{%otZn{T$9|C(Ar5?3s_AzLpo3wWfRUlePnJr#FzZ*G!ZAQM)4};jr zjoTm#BKz=|&Ayxd=(g2So8)2K$nOWn>`n#snCHF^Y=71-mC5VY+PxI&y>_!R8)E)(=`8 zFWQA|68g?<(l^bqV;`6g8|2Yq>-CNFanMd}&no5&UrG$M_T1ij_cBoa&WIm@59yA+$TZ#CJt7TW=#FBd6#VpbuNta*tBjm+dOrTehLtS#Uaw)j zx*L_ z8#43EGSpXIo13}Gs8pCBH6x`!i5+Pd3|uc-h8l^#Ob*L4H@Q25MwWtZf9m~~^Ub=8&yL0RZ@j!ooHpC2wE2K`|9@HTpBH13Cg z;NKKxP{Hz;%`|Th!`ZW&%^t(`Jauxf9mkUVL3x%GYOyz8QrNVt7aJ$*m1%z=K1Y8} z&K-BD`nP;$d{u_XcsY9@rZ#r;@oWs+Z*O*?X^>?>Hb!eK9vB||g#osqbs!HA+fDoh z%Pa6B6*Vod*!5NMbkkTp#MVL10H;fT%mZ$M&qMaOIJ)VzUC?LsXN^s#Hid9ZANzx`Jhq=Yiy)+ZA-2l+H`wC_GaTKaS9VN{)Xd7y`bBDkBtla1+e6+$* zlXh*-Rz*B_4|LB*!6Iremr>~r2OIxA3R{l@WjiViPMs% z+i;U4cC48F$NpjOT%)<~t^IL-d*i*ZUYLI0<}PL`X|3DGZJ*DS4TD`cXw+l!RPt44 zIXH%sw&^!}$d`GC;K?}<^C5KO#X&qhK+fe3RsJh(kB{^Rq_jGvIX}$*)*e4Ac{C*e z&Ss?k zTOVec+G_RixVpZ4;0iXM_2Zs&T|}f3@vx<=#7c% zBrbn-MyJ?A_Oh}um7jcn+0VR)e75kJqy6T(rT^4k&(XmRT(e|-Q+x}fse(9kgCKRF zv$WhY?+!-A$jQ+LZ`5l&!yj}G*2D|*DRwpOZi+3+?>YMffHbYHXVC9A5NOV#xjt|4hwFuM+;8vT_vg9<4#PYplaFIynLv zi9I9Gto#Mf4o;xYB)Ju~OUEl>*R-F|ar`BI=Nu>1U9Rlj*gGZ$w}YJHzL(x@$z!RO zZi24d8H3N;HtM#8254a3FF)iiv44UasbptTH^5Bq28OwRTsk8^QH?h1l?>b<@7Q6Q z5u3he%`~O=xQ+de+j4#iWhwLfWWRQ*ld601+vdlz%lONWc9B@}T%B|LCi{97BVTNg zKL6D@uk5s7Ns{O5J8;S!b-a7aoB7vw|e z2dPeP+1NgR46@v`8g{$wr_HWP7X2td?K*xKr1s3%Jl)(lh=Rv{2xjL+MUxTxsXr{? zA;+yW+~gl_R7ZUzZKHEblc$oqZ*)?xH5og{$vQYXl=k{*FxVgVoH09h4(g*F#M@44 z4~;eW%jUC>y86P6_L)fnzI|Q zH@4DcJEX{Gr;MPF(E)9K49)(gzj^HUnIJn4c2OtWk1f(*KSEBTOnat^UuQ;4>#%JQ zW5MHpE)D~4=bCotbnKu$U^G%+CD5ZfdyL*xM7XoshT4?~Aie#(Ev>XH^QWYJ(9p zO~zXdYWv;|!-E3#TJY$;Wz}viiz(rMDG>7vs82L;clk?$K=Y#$C(f9m+4I4kj zbtn6$ZJc16I)2l4_H>^Oa+T_5^qrS8pP9|o&`ds6<_Y|@vyRJWN5$B2zb82vF?Q@^ zbz--DF`&&z6~WsuyRH#*@q6|Op}{t1Xo z?rZnD;p%1*cq{JG(2UZyl~w+K9l}sUF+jWsj0IeC!QHoL(`(!vyFDPbme@q>b!D6Q z``MT2HMxp)&v8y=qfY+)j3ZvKEpV^v*xp-Ik5FLi4;Dq5p)tM9;nS9VPMF}G|c z@$GQ0Z}+Rb0pK(9)l;Wt8V=5+e6dvT=1|a>lllzT@z=d#@AplcHL#88$Z$h5IDkQe z8y8Eu#D46VGOiXl^$)ZzLcV_|3$ar%Uec!M`1UBXR71DG1pLCaO<7 zr>IHPk~!xh=t}NLJPvM>xr5`rjXm$%QPY?(TzPQ6m}_vRGYZuRX=|9C-1%O7t9@|(Ey*b#OdBqt-p@jz%1IGb)oZrT__k*#uYrJ#_X;g_gvK^ zer0_$p5m0TUK?%8G-jl2-nq6`-N_1TKAdnYx2os-5$j2eQ{DYL^+pa3kD1D!F1;JaWhFV1Uh9*ia?1M>w$CJ>b3Mlx?f=4(7)m zy=N`RJem1_`u+w!$okdMc1X7JKFm_%IXY*OQY1yd=o_vy|X*CB7%&HcbE%%^k* z#lMLSiN|j*&A3PJ|1KoH;A_PCA_>g*61(=bxw%jM4reHKiIC>g`U^E_snUZBDs!W{ zJHlD~YM+{n#=phZZu$|Ep-Ra+q+X}%d*^&)Ob$``G1h^2x>4WGqx&L#+DvMaHk-^G>#H!o3>@D`9 zd4wtQD+p~cSq2a(`&E74-g!|zGMSbRp2L|B>q}nDSiEQaIcr9o`ikBHd3`t9#q53D zUFV(=wA)#A?hWHm|LTm2xzwao?cqV&A?782Xgrg6Ln%T6xRAiu&LRIg7^^Y~lwC zl{0h74Xd2Kij9)#B~}7c#Mb_HzVsQo+!cNHHmo7$YwdwkbxpjfO2Kf}U=#=$8Vam5 zsE-0ZeJ5-bFCn;T*j|u{jL1=}wtDM-^wautn+KuJCwFyCEu=Hrq$Bs1)`|M3_jGt< zqPfrLZbiNZYA<&uYSN>!e34o`lY#`@asLCm^7U%p_q%Ja>u-V{^{=m=^VEyg`@X`7 z4#byN=Lx^Wz2SUF%wFjGQr*RoKe^Na(&26Y3E$_B8g}2WDWGO76;EzOFwDbshG4s=}EC zhlbBv{V^2++usk=CF$%u;neLF_hD)Cc@1hgiz7xzKJVQHn-%|8l4?Y(U0 z8O0f+x7n|zSp)kRHh9lbb8j#~s#CGKw4dM@eWdzn0CGf8$`5CV}=^}c@2ZySQKXM1Myd>BWeF^^Lp}!%xI>6MKECE)#tR)gd6hv zeQ3*l$Y*jN$gi=t_^;pM4Slxc%v7}{V(gQzJ=K*g&Yj?kV6;XlSom*i6s4UYvW)%K zEO7*avj=`V%#+372=}7!aS+~2ecyfmTP;hye~!mtpJ|6_G`pqK3%HKEAbyi)eY#2T{jvsf77d2gs&ybX z&|rf8qgn)ixwa37rVTDNt9?=nkXmnEc(w224fBS1M!`om;G5L7!ACg9pf>9U_+iLB z6mHVFZt)Wj=(zBV2VjeT>Q+DgzrsPKrUjon>bA4x`%CD9rnM%~t?Jm5#=>DDv{m$K z-DtM!jK+?cj$i#zYhvU7R@_;iIQQ_WI9`N_}3V9N-hX;?k zhVXfG99{Vfa)LCKn7I8=sAYdy)Ujb`9+dO#H1)l~;o2|njES+k8yY(HU^9cik#{=1qcHG&si)Tz`%M1V zP|i#aZTGWLPUAQs&+-Llyx?6YJnNMEopYg6j&Q+o^!KI4agGZmca;40%rylYy6p8i zS3K8yj&PG05r%4>{JT0A+Bc|K)Gfg7g7VLQ^C0={?0t};$RC1hZ9Hsq4&&W!bc-3- zSf0T1=yrG5me8Jd7(H(pS-U@)FN~tW>=!!}w!_`SP_<0U*=_wa9}R-SYi}p(O(O(< zTem+^^`E^a;lxgvw399sHN3H_b{#OEk+j7tL+UTRMC>URgWz%9m^xsd8h8Bhzvp{& z!87C>p1*eCi=FP&bv=1~iM@pjcFs|%b9=E#aNXdF>2q7{%lNdfoZG0%V;48S?85K% zqCTTsobezPjvsA>b{Z1s8u%mTme%2aUhHG?_&(tGrj1I}HY7?9W}R-hqcPW$*NLwR zR{en32K`&CXPbM9ps~H}@@#RerpI3K69cr8XByNwH=N?I^=pNqK$X`HYrlft zLhKZFi@cVa!|!?R6^~VLK<@~jUTPrcoamg_!6B*Y2l@`&c|Ci79WTw`{)=`oCZzB@ zy})=s?1YyZ&bGqITXvl{AqW(oQfFB(n;nK{|Rw_K}4_EPZsboBASa8Q-!{LU(8vA>&tujpE3yk8RZTi9h_4{u!~m1^XekPp`So5r~Z% z^;rtgH_lU|b8}FocS}vSW8FTsh<_YA+L=qx<^^kRm2g3V^Xt8}s?W`1;Crr_tV{;f zJ(o!jEcYFMNk1kPcwMVx{wDc@r}s+NF4~X6;Rj2A+a_lZn=yD`Mt{!=$pI^DVWV!h zuZ*&|5m|X`uU=0RE=97*?|I*W16OEHT?fARM6FrdK=pL~ow9$>qOhNE8vl|v2v(-g zV7KsoUpuiaQmYv0xaqh1R>f-cV*KDJaD7a*%IJK58c~nEU6S+BPZ840;2N@Kk+1q= zM#od{Yj(`wa~(#3Yc>2QHGU-L_6OFXRbeZ^-eE5F=LcUq*_NR~F*)_{(M7&AOfc_t z)9z7MeR;{~g*jm@f$#u@@g;G0Q=a#UJ1zb~IP9ky^Gq2a3+FuY#Dh1%T{l(zJC?c@ zB1M;foMIEgKc%2+sPA9CLVA7@Y;olHGls6(pa6VklgR{u6_-7ne2gFH91UVaINhpt z`z6?dIZE%_^}J(J^Ft(xx(ZyQfV48I3m$D4@f^oNdd)^Hh37qN2`?2M!9w%IxDQi% zL=H#)m@{2;W)u2DZqi+?4c2|)2g5sV2~XR95<67Dch@{^;dziJ(fN9^{fZh0whuKw z4vh)lyY?QKx24}TYNcfqIZ$z6eOLZ>BK3Q;ZNVTlUqG-5Sx?T1E$1P0cF27NJwzOe zSyv4x_9F;M9ULfH`o=hKi=)l+={mpUrQ(N9JqN;FA(nsb9~U=+Vhs&BVN3;Qt*AeL z3D3X^>fMg&JPv}ag}wBw)GK;xv{$ILVV4BQtrQA}H4Yvs6T&FhVNAUU({WySn13gt!%+1K4b0mnXkB&4efi+Jj)C3%CE2| z-DjljH@#kGT>s2ZTJ(BZ)3f4cGkAf2|0DIa0PZ#P6RBsIBbBw~GnJn0$9A2Wpk9Z5 zpRf%FE8QJBu|L@od#GLhVjdMjLpk*syTq0Z>)M>)BbogO2F7zN9yh1IMj#}6m;#owpzBk>CADNu}v9jOCm&bP>dh>n!YGc zZ06v3%#>Wm`5qLy?}4@AUPL>8LIL_PsFOLhhgWsnd95HRtRT2G%h@KdcUz&MbB@+L zKcnHfhL$JifA5MtDbzWS{cG)^@>Q(MRlk$w;%a+ys&C5ooMX>u%KmrS_$4=Lt-8$F zw+}mF5qwHIoBP}Sp*O*1!>U*7DFAmC1 zmWSP}^$inYWMI3^+Cxx(+czz^Si})KqhBON@3@?s1_MMk4tU#n@0bF12o~Yy{+s#( zbuLjX^%Qb!4K;7cJTG`WsZ}TDeDBOH zxxz6nd-=m1t?J3>_`lu`a4rk=YAJ~^3Z}viZJ}$LJU7L~q6}`sV zFH$=YKM9Uu#A9c(H?MF{xt~%8qc%Yu%nEcp?CeK>+Nj)PLvG^Nz2v1{mli=&-4Szf zjxDKo*{*v~pMsbE<@<8ZsjWB%a{l*yulBL+CBd_ed#mEj3@(tDc%D&TOa0iBI@-Zr zkh!+4wT{mR@C}mA21U1Wzm4nO9m1@YO_FU*ndMHeZ6x&-0y9SOKFxw1~@zRe4_ zIn{oD;b@1K8tl2of6e>n-w_HA>d6ZIH#_h)91F=?9e8$Ali?YY(&K{d-@zszMu0Cc zfzK`Xxm7TrI;EFo(CpQA@V^P_e0VvT3^Gc_2!gKdZu&um;Nvc3gcDHVsg!D9|DO#QN1XLU!0cM&)16o zscVYx=}+}W*7v8@$a$0RUGN4S3!ZSKOFnvz^Da5ipzyA0=kn~Vg*9ubP28f_rY=2y zvyz+kjalUz;R?dn0DH^(i!_;9hV;6|{V;m$uECg<|IHKp5x`D_d@Qa z(MjYvPiOZO-|0BOsqNu&{g*zUK!yHMJ?+#=<1oK{zA-&r;?JvofpgtK$1H!^9Jsc+ z=6&Rk6E>F z>XD)7cZ{%U`L0cSvpTPJpmu8)hrQ%|8Y=Q{+p3jO&)^xWGi0l-2fa6SBM+o?i~lgbM2AE0p5WSlNIn7n{loYD|9s7pqF~*s zdu1^sihJepOj6@oc_uzU^KE~B`~LO4Gd_y_u9G@_qh0@x){vO1D;2&A^3~gawsyk>iWXt= zDDk!_Vik_;QX1ZSoF_5799h830#;_VbZH|**7Ed z$Q>^na5)DP1WL3m8OHUAna`h9`qm$Y%`{{||J}a1aZ;z3>D?nQ=~A3w9~bR3*{k&a z*SPe9VBDNS3UL)jOj#CffrCxXHZ6kh=7(()xpPaMGW906h>e^Kfde%7LtW-S6Q%#UQve$v- zfT8C5-XIdq}D07ir`j?=w4pQwuw6(U9r&)T~Y;L;6gN z)u*>j@SB9SYxOo1>jD~Dg1l=P_l}Wu+{h%C=SyZfXC4S?Pe5@iW9}&ZBsN8Lqu6)d zBOIkWpBjhc5n$gbDN9Nm$$Uw%Yonm^Dm;7QG5GgqJdyY-I_UykVQ!aPPUCFwWi@|z z(eHXFb_s8Pd+qP3IqaxmHoC_-=F*|}Bxb-*XJ$R5U%6Y)EJnPTvImD|+_k(1zo3K{ z#g?l@i*&sWTtxhl^g9TD>bG;N^@C3Ltg=bb8UGMGkI(ogW^&Sks`l>8-wU1ybHq*= z*s$C+{9k=`$GsV}g5=XiJ|cveldzfbtM=#6O&pekmqI6~B_syM$u zvzz+VpBy4xmy-J^Yf63;r}0Mz;)K-E`TUgVrM5f4Mg&8OKr;iqdM3ws)|$T0qrZZG z0*J?dXGlG2AF*9~Z#rmYz)XarOYh_f^G6aOvPZuF4U7Q%85CLYD@H1P6w;p|{jqg@ zzvOOF->G;Y=i28_eMo8*aKQiI7#CL@#B)uf(!3GP5XkfuFa7HK2I%-gt@C$qANY?A z0tosvQ@f`QtpHsY&Of|P^L~t;5Py&0(y1qZYHD;P?zjid7m5Q0`X3$EkeYA4#twF) zZ80#4HrU>Vo{90@a|$;VLENeil6=(O$=8dvmwlz!iquGoOMMb-wN0!Wto*EevBMo0 z&50Ly($l~P0U5T`RL8@lLRu%|ia)M#krTdkdDd#5PyXeSAL(9|zu^*AHRwwX=(_iR zNB^Au{)y+{gwu}I2AX!X8vi71%dRyh zSpfS%31SFW({;h8hB+zjanIc3x5?Cy8v1_1op^+22+xlE3hf^+2yTY^(CpHooZ{R z-tCLdMuIkh*l=t{~ z)=&OIEQ6osci}(%rvQGQ@F;kYZa_a;x_=YmfwuFh2m<9M{Q+=!$a^<`>6W#F7V`G> zL_N<4=!2GipBw6A8~@g5GA75#=@GD3yb8b+AQl+tzG}smR@rVgG0TDuZ5i#w2zo3lK(9vTnt*6@E6>DeE?(Ce6Nd>8oDPvXQIy9pYkt+A-3$XXcup6BpJL zgwLQJ2EKPy->l+WxyyIhife5jUGt*8O#HSOU{}Yz2n~)BJ;k&cdm&n5Ve&zsv?2c( zz%zKJ1BH5?wjp6RG=LwLap1ssB~$coC0E9E$xB{fG9s&+&&>B=1r6gzNhu^}V0|)nsH1ruW-~{B^t?%5w%IPn)cF(YoAEb2 z{*e5@633mP^_p(YdeK@${>&pkF>c2QjXgIi%@ZH(#s0{SQfAm7&I1?7$o7;#Y|%d; zH=u{g+G3Mhkrg%3E5bZ0A8w$p&){Mkw;#hDdA9$JjS)Rpr*;gCB{X@dHxmJSpW}G| zKkBUKDlz_|x7Y&fN7Qq|ezWn-~R7 zJ+L;rtzD)wn>smX*)nsRv2OHS9c=1-`n6v}Bbuw+Kg7`<`^RY>rytr5a4)8X_pauF z;MhIEEy=s5aM(|H#AVGmySlE#ebD^Mb58a$J=k+}gff~O(jwc_+s;I%13IJBhSf_j z!Dml@_;=fmyqFk+_%oTo`jPm$sj%O(!U2rbd_`(DC%u7#@DVFcW(R*Mx(3v6JoF2? zq*OO%;aJ>#%lx+c)`V$zJO@T8n2a%Ll*S&`G6{wvyu*kgHi1FliqQn}M^2Ghew zEekrGsn=6oo_c6Ip#cN;%B=NT@GSa<{x2ftlVy`LBL_|Ab z+JSjaUFY1t-+vcA;4`nfI_)63!}gngSnSYH_zaE>xKW~inVWTe zbV0kQ4@APnEpiWTV#jBb3EFnlQs_0K-|~=qX!31M`Z}one76In>zJrTyjsw~A@To< z^k~`e#E4Z4gBBMbJ^?X|)b@n^L?7!ICGG8lKt(ET6=BRiUG?%>Y3JATWQTrv3tznZ zQjGsYXT)C%SC9S83*se|ZGN49`mWlQPNKwY)qSiug%{0S{BFesx#(bP9Oq(t@W0|e z&1G72jHuIJw72ilH)YD_hEIn+0yeb(i<()Bm+fv8dF0K7y=Z4U3PVf4g>*E@-WHUH zcp+D1{>u08)2H`syntvP@`` zz71Q~&~*%Vj`6Q+?WZ^ZzeLP@%AG46)C;x==j+UqAs!|!F+6zK=orIGyYzd&Ix<2Z zLk;&o5dFy6Qib!O=potn+joa~67eyGH_63g$T_w{_i#*JN_uS4fLjk{;w{|!Un{Na zy?Q_lqW;w%^&vHM(eO=wo_(Z&(aLVO1NJivqoi!@y4kkGYukP!9mB`_G{=X~jJ>K~ z!%m-=1DKD-?KdVbf7Vt_tCqKnflc#})Zoj+ZI3AF3H~YZ*fP2FJ$KN17QFuVTCT>h zs9~J6s(<`HHGoPZh`8ybhkeoEk^A$*LD2sDqHpx03&;A>-$gusP0YMrSd&5;)dlZ$ zkTWLdU_8WNgY=5}1Y?1eFzL8K##+E7ZilfYx?IkI`oxA_H_^Wd5KulQLu(y>5(5b@ z3;k1U+Yj%fgETLAR_g(6yM;SqAVM0aOquZ)LC_#yjhR+DY8QL%9X*v%v1+fkH-q&_ zW1Jq0(8@**_>otCtZwdx57K%rB01=XO>2)q_fC{K=wEpjCM5b{pY!osL=GhSVbSGY zoqB;z+`TdR(-p_(qCq)1abPNLTE$Vj)|ol;nkpN4#x*Le7G2Jtx(Yi94@AdVb$of1 z%ba^TcKa`496uTbE63h5gr&Y8HRzWrMmP84`U)U{W)!x6r4*g;8M7XPMcesQM){Tw z8u4e_+CMJJ6fJ4n=^Ay;1bGebT!1vO$$Mq5| z>#85d8?ugnHZAlUn*R7*v*-6T-AW%U{tOH!?O9o?7*;weOfW4>WJ5Urb?MVaXZ8Kg zEmLA{F&!E^x9e-bR2eEa$2R7~s*Onf% zF^vM5n5{fC7(nF|!3`p;cU?d%d>(if<*Fcl?U@PIen#H`_1rLc<^Ih_Yd`k3Z;Z?K z$j9S<*lp)?L-?JGEnH)0=OoLnYBq%L8E(rJlcMUvH&ssjwELY*=@}FT#aB>~@iB`-N@8_@l`je*nsRy6J z%s=o|sK@+keu}3;(TRN2veZv>^hmzDW6+|1&Ug8|(z|@dV9uPyt6sxP&uFEi2;W=R z53ASx^aJVGM`AI4uF~L;*KkfxIRB4L0Vj?d=nb9w+s^sgZ=6>7yq!Xz_`K(B;>El% ze_+k&7xh<8{)r>reS73m$k36v3r?}m5L2r$#-Cma-xwk<+$oz{zr$swM{wj07CSG0 z+&Vq?K$i{JR(IHR(E7Bkpfg5kj(*p?gcBpZ1;nZNt8dyb#eSzrFK}-2i*KVB(ImFj zc$CCQ=q`ab*fV%IU~2TgbRM2LH{Fq~^-uf=3Dw;*@rT4qaB-lTgUj-HoI3zdK(N2+ zg}qPrv;J-DIB1k-o+foK*PIOHN`2)^^D&PKe@YxW!t{kY-Qf{MY`Jed2U>4>eNT0k zi@iSaXRrG|PufA}83h{07Q0{B=yQzmqZ8&-dwk(xADOfENiHDMSaQ$p)=6t=Gn(=qn;*ysB(u?**$CNo4%t{b! ze;}tK`jWjqQb$C~e4)0YwFxyDG>vlFC@8kEcU-;`pgjZ4zL{nn`b@q_Y!=>fX@jA3m50+&TI{PoBDS*OrG5` znWJlDS#@f`^P=s5OP%rY|G)YF33dp@RufTbt>X>eTm3$*8`Ct&wL}Zv?W~vZdC?$T zC-fguUnu$v8*0Ab8L}a|2WAjYjE!BQo2}oMG7p(slh%#j`U*ypGlhZkrq=3tq4^nwyKZvV!UF{sd@yyAKFFr%GO2>7v&*;1pTc!39ZO{lDk9sVrH9FE@ zf>13vtAbOvcg9VCUeU*1>g&>9cH%Xi{HW0cFBTop>UVSw^LLl76!S29rx#Q1G}^99 zFp{RZDeWh?al~|@<;7W6!C=rlhGe_i*hSqwkD z8`G)}O#AF}UoCjO9sa+DnV>_7uK}Bu-p!fIqct8S&baIE z;QN>HCt9j!d^m&47dnSML~@@vr-^1M`Fxf}e@p!HYfnSFq+S6J#9Oy)^Pvt#9++S!=oex-KZ-fV3vx}1 z{0!1}mj3zgi=G9p8*mP9E`7a!uOCJ%b;ZRK-JS{${gaP*(JF%5IIz~Fkb&JF-5K=h zN-Sej^eTGv>-+6pS+k3?9?@QF9-y>{xSmKR7WN!_A@#!A*Xzx%e-w?Sb4|6<^#Wg! zo<%B==p%A(nCrD}*a7vAlRl!%3OMJyy5|baAKxOm1a%chv^Hl>0pEM{G+e; zqn}yj>J>LkIIHJ=f0c7gdcmD$md2*!Udv1+diwrBKjVgnn-Bn);VtsfY09kS9OSp3 zxEuS)8Gqb&hx;9WjPZO$gEyTnVg8=o?ta%nNbNmK-&=0|?=ZGsN0a^3V)kp?9A_iz zb6k6UUU};-f2uhI`mVpl^jM-Rt@(qOxed%Dsp?H&$2MnIe`b_i^N0?_?55mt-4m^C z+aH>0M|#_1<%q5V_^hQ~`XpFd{E!|_8h?v)xOh#u`wj3XW;x|E_S0*3N_5fx}OL_f3`ISM!Zz@ze5bx{qHexWtDTtJ(Yjcqr?3W9Ur+v2=#NXHljCj;UJ|K zECF0j^i$D6chPl4(~b{WJlu>@G=9yDm=q06b8~a#2Em2Jmr-Lz&uyJCC<#q8>Blk( z_t9?3oDUmmG~&U`rymts<}n|s@AbXUAZxTfZ)#xne@5YbA`s7OrKUUcT@(IPhu(%W zFI4*86?vJ&4ZrIcW&faIR@sJsp<#yq)cR#7oroW8rtgOh*u)?DW$2z)+79~O*OuPz z8f2Tho96dJ_Pd^s#{a2@Ds=2cr$_4zb2p@4N9$jXiHcs)5~JpzYsgaL6uj&fn#&o3 zJ?SCpe_Dg^0f~XH^bn<J==8yd7RNI72!ZHsaUx zcDrj^(oYsxkE{BaK9pHU^jo6Mow})W{zSTt*thI>-t6%4kv8eUA}!PT0F5m{Zf_2b z531lAqT|Fo4h{fX-q=_@A4vBOX#Ji@;*%pXt?Dr^A)=K^iuUBCSo5wnI%Nb zwcoajV#KVQ(Mit=Im{rMr!zQl%-)cOn$UBHano@>;-L!e5B&qZg>jQUYYGyaM;EQJ zf4RKwam(t_`AfK&(*v+?{p3SO@fg z>gmlmle)Px8RjX^FD=PnErnh;+D&Fxe>!0I{pT_tTIKd(6f{r%^_yAYq2TddhX(pb z?L#t;@8zfU+b-CJpt*5z+z)Z(DXXbg}JIzpLK^4f8xKm z!SB83>_HYjYcOa(an74g+Eb!;ESi<)`3hAJ$Zv7U@B3P<`5dgCc(|x4q?XRZh;dV2~;7p6Y zbq3#|Oy~Y_ze}^!?9s^g`ti?`;8LSGI&wdy6buX>q@S2S#vql z=I{CZS^xZM4n)XW8o#>e`nm0x0~MDcirr(k1*8OkDKd`wali6v~*JjHEW)qf^_d5 z8EyOf+$?6=IDvb-5zUFq>xQp?|GMGp<#ot3K8y6}2T7oQ9r5swJ^yXZFLtLCjt4e= zKU=ra%^^bbdm(vKx(9j{%@{ zA5Ja0xM%JM`j3L)pXS^incIM_Q#)P$bxmbHh3T=M47-)QJ^+6sUqLfVB#FCf z{o6Ue^A2+}X_i1We<<{sxoa;vM#2vVORM#-=linH|8iC@alz?K_zPzLzR-j7*k9*M z>okwBs|0SsU1%NL>KxN&BKR{6#kx=XoV%WD3{tJ8SsvXhUkeChA=iO+S~n5qA0 zAk+WDn)7`5yOVkIw&61Xj{8lFYf+OhGUC}1?KbN4aJ$(+fBl+CH)AphyfeWG3b=Fh z8RQX>KfaD1N^%E10^-zUxOsj!uKBU`$KdOjFkS+?6V#`6W)sxprV-LUg!6E=UATJ<~$IJPXku2D%J8atX7V9IRb0MGyQu2fIL}@BSL=%J)HRdMq!Fjmx9fG$8T_;} z^xENqu)bToS&HT_>4h|_8Ox|}6*qk7D+w1g!z7fWh&wjqg+{ZRLT5wl( zgHp6kq1NIh4e;>iqMBdR)p?$uz`Me>ljj}(IzNWFU4PEC$^K!kjq!FpQ%1F4|J_`~ zfBLuhe|gT~o77v->mp;5xrr4ndd7-G+e+UrgScrwTcB}ed=O8xjkj+Dy9$|`Uh+Agu%-8*z>n%)-8JWZRokdNbEEX|bp31>z0Zm#F@rZLScZ4zR-oM@|6OiaWAt08 z@n_OMyZ;~;l9|EeDgmu{#rONKJ?(&<2YTB{f1F}wVF^CFmp++EmwpoIlVM)wh)+>z8`*GrCV^uQswK_e4XF; zA%a8d7>2sw_55)762jd)%oj|F_UxGJmf-9HA$qnjQ52LNjV@ZKH&A-0w0ls&?z=QAoTy;2`-4V@qhU(;S|IFS&_1UNqe>IVe z+OA#y%#;Nct2~ z_aS}_>YaE$GBbuQi5?g{18cO zsR&+5vFbs!nI|K8ne@fM+7!Qh;$cXCj^<&A{gR&fpBU;&E9W#{_}jsY6#IH$t;@@b<80ejEKG z%tr(b%&2p%&_xhSAdairWmjQNp{ymkCf8n$0wPZTb#4V{G9cEJ) zyqSaLe~x5c{){>Fd&jKjB_N;7OkYkn00)BSrTLWJ=4a#9?0_jQo$YqgW6l*dYne4V zt9`dZhj~PBLMa={5_%&uK<_3fnBl?<)KM$#L`~Pa9W&Ph>5a1RfBVI5-rl@H9&*;B zmo)5*vc(6n6cid9u>Z!veeJyGcf<#qr2V$&mUgEXB@Z>``M|~N(R76-VrPo|cZU`; z`!#;A^`>bbC>X_Nian=xebUj?^X{)OrXRetno;qiZ&A(3L#O^4uhF)P`oYzV3g#l6 z=BS!8<<}*TC3ezz?oS_~o>{H;`V>oPErU$-3}mKIr2~DQcepCB z7ezfAN&N!$h;t~OAy+=R^w<~-j3Qr!EI~Y@``SIX6VX4l@d<*+1q*ceg0Qnf(+F&u z@(J$5!f%V(y6PRi%cw6%puF(?v(0)S4${yM4f3CT&k;h;p_8x=L#8`m; zFfI|@`*>_K1iAj~5?^_Tw+|gJt9aUL*smol%x+qG{=t^nHJ><*MZRvOguS-8-k~j! zPT?j!b=S!foZa&AAB}Ud9 zql>-!P2czDyj{3|KRmW`zp(zd@D2cS;Jm^&qZ}Lhe?v$Hj`V+{In;$y)H6il)%}R>(2z5tjQhn>GhOk zhl#l+j6K21KhPwN;7T$tuiYa)NR!Aq1e0M=$UJ~Pfe!q4zfY3bWu5~48+Gn0ZBT|C z5jXqqe<1-f*zJ(nsjk^#PI|rUss2M=7LH-Pp+gZ&JEl)#NPVNV^pI{qvsZYsBl>4h zyDm1=MsLQ3JWu-SGNXaKU-LE5Wp1Fbi$07VTtj^w9@+^v`n`X*;%fGfP3LEJW0LW+ z!x}u*Ilcfp2G^h8@iKNfZxQvdNP5d(IURmfaevRye_dS(MNUw-=0z}Lja=rbq1 zh{8FGMZdC&nV6TuECl)-#Wy_BA4MJmM>cQ3FQ<=Ma+=K9N6bvn^~hk?Hm4!A9pTX> ze_x}GXS`Ck0+*6H7CryrJ+&_MBAE*WhiHU)zRU;XuFGp;MYo&zU8m>1Y2Jb3(u1P= zBj`Vdap%^Audk$^A5OZ)2TfgzI`_WIpL2X%=?R|V0zK3IyWw_(-xTtpk*hV0`Aews zPS{2Zf;gZ2kN(xieJvP%QZH{rNBG&)e`??ybR1)BEy6@*(@z(%Kc?10PpCgxQOV`= zg|%aDG(|5697vh5_*3ClNUmJPAjE#MpFd|qv<$GlXjJIWFXso1X~7?OmavLP-1*OX zf9~Ng2o6mA&?rXowcpm8z6j2eakwA)&I}t2HHi2`W_~7h-5UzUA~CG7b5GD9e|}VK zC{EQ+(Tz8r3g=t&`YqPp%*mmvzVExsy2FU(OjR|RDlc(8os-KPY3-B#oK2?t?@#vS zAMS_XB+LRePUrpYp8N}3OJ=(=Q(&G49WxHe>jm0Qd~EdUY1mk++l1MgvFs)Dv^(3d zXwgoz+{#S17Pd?FXW8(r*sY@ve@pxk2IJs1p9)*ux!qc5A(N}@E$6in{F}Q)jb8d# z-J6zjH{44#(^E9y{fa(}%w?_m+898e|R)5GdOnf~06=&}MU+?1Q zH+e1l9Y`t4q|Oc&B3kI=a_OwIB<$Ab!k{cG_%h#%ZVPHTqygMZVxr+we-4IG$oFjW zhHafkcYAsB0Lbv$Sz6jnbOuCo3g4FBOAY>yv-I&O^%w5#-=0CZiA+aq)ZsGzc?Qke z&wWSdugJgA6{dv!y3)+NaNSSw5NENFIW-rau*|?fljAh|dlR4!nHS9F z^5Eokh-U49s^jN^P>^z(f8nQUcXy*^qV1!5ChXelxPNapKaCL$c`-$m^nN(a(amJ$ z0JV~ov$f%DVbk^>)We+3_ruLnuz9poq;|iq-)*RY^xVvNom~2l>J}8}>iMXZ6CYmn zO^Q9J@HlPH&Riz$Qf1q~oX&A2cZaiAVXRsgyaUKL8w~7t=#A<(f9$_#kUaS+mpKT% zei3~2p2L3a6>KJz++0GJZ-5h-OM$q4o&JcAT-GROuNKreEcjQ>G;a-K=6ir42<^6CozfB(HN^JhkXah_w#c`xhw za}q-`XK@^l?!5yEpJ;8!-Wj#m(ai=OA;2)bcibgr%4arFkTH-r3b{$c*cb3CVtc%O zrN~Ns*3QTF4IMK`2N%RWQp+FS*K0dwCxLy5HaPpg!G6-$Bs>lFb)(OKjw`O>9|0t* z{^hD~??3a&fAm_&Y`lsK&(vFDI_NTac%Wwu!tcMg|Mdjl2S?8~-h(}L<3_I^?+bdr z3GM#9t&fg%=9TVV*3{U~&x1C&JWr#F=lG13&T!THeBydnz5P0Fx-sAuT>RkZG)tz6 zpN}Y9eT}WxM?>{rFMbS^Y#t{==0v)4nHMNH9KxJDf6McNsF&nt0Y>mnOV+)bHS(b7 z51&hB#4$s}DBd~=wF1bUh_rR!_>1vmPTR@98C2L&ZbmnqKIUqEn)q$>^zqwtuPLLN zTm6G4!gKylgsd;*JCL-J<&D~h|!5xQfA$+>CyYI$5y&7Z@rw(n+Wv^ z|K&8Fe_q>@O3$dOr|W)@AzUH|q$Fa(Rm~mhik7i~MhP@PuPE~sG0N#O%;{l}J4Byb zm^R>~bE&S+H>Mf=V>#dFK9iFU)9*SxclbhURdBSxRyx{1z(ci zNv$G&LR*>XtoV8L3lct)jh=Y~9)8WeH(VNUNQSM?G{> ze|0Uzcm3P7HTNawD`l8?COk{ut##`35BURFhErp8%i8|K+Q0p)^|-rv!e8jV;BfDb zP{I*1xb?3v^zcx6_~bcX6!D-Z}Nt2|j(S}BmYFa5@1d;AiQi(~a|iLsf1)1- zj$hmkC4a$=o5-r6)gyLM{Ib(-qOU`ZO*G5{l1e_i53d0&Ve0AGZfgc@c=YTo{)$Z2 z@PqvUJPOSX?!x*jo00!$UP|a}c9Cd&6n%By!c%hrH;%21Xl{) zA_|V7BYP_vIt&hFZ@+Js7J9*(U{^wLZez>3wJH4WG5w9j?l!FQ7v$3(p+qIvuy{e5 zjqS&Aoc0v#1&x2We3>-&lXnJy=HQ4y(WS#i_f*JCZ2B3_x{nnWr1$lle_u#_;v9>} zYo%r=IYtQYMz~+{vuHD`IknZm_WPJet@EWm=V@5T=StFXP92AQLu#9;^clg`r*DNl z+=5@3?ez?7t$S;&9tO32s|O2Oa7OOvZ2vlen>|_3XfUI9r0${taR3J@*<9b!H*tN=RjRc_gW`YZ*=O*PHphWSs$C8K!bwT9DFgaPkNA`14Yy{ z(AzD8Y>l=Z_ZLl!nAeROfAhMo2|s_GdY|U%uHzkrJ{zxr;qI9Kf7>8lX{Z(Rrhl5L?6{I$kK)(K7+P0nF~PU*_nU$+?4;?UXU8yVn*wl90 zo5iH|8BMs9qHmAC^W1jS9DAGkBRJvXFsv_nWqNtNyGAQLEsuY;6EgEj8zUHeTXJpG>LFYPo>-M;Ge=d4N3;K{d0?kBU_h!af3Y*G)% z6q^sn5AB~1D7NrP;Anp13urb%li*R!?`~RN_MqPBXJx$kavC1n$2|kabH?S39>+4H znwag4UM)s4f8D%t?PUME(l@!8iH^vf=(ARN_@BJHOP%d9WA10J$*C@@X8~P$&8vEb zu7RAigns(e_0JtLe{LwW8_L{epkJFOpY)x#aJCt@+tfbr5!0w1ns%8yQ`Qbu84lZJ z@q&sTf%0caoR3N`#r%v~shanDr4MJ+Z8;$`lty#ue|u- zKCy#(eyH^`!$5+ls>TIQ1qEU+H5%etcx9@NL?QhdSvs!~E&1~tn97b;^Zie8@{bP5RRG=a!k*bU5m`>Blep@KY)*r_!gn@NKDWOZ;D*r7DI#%tzL(^VxO| zJ#-+@?0CzK+j(XcjonkZBS4R*VK{5^402yu=;6Ib+2=R&lclE1=ep-}Wo@T%f6Kq+ zS%{P1jKi-8`de?NZ7`Up6=u$?l=?emDemo9@~#hXaXl|f5!;?}kui0=JQl5h2g!BL zdEsElwfyu9#H*P%cfRMcC-!A;uC&b>zuo!QI`8)|VTMrkU-BktoTMLPEPb2Z+QV2a zr6-R(arp2pcRs(HZHX;bbF9dISIOT=lQr}AEb#V^a&nle-FmT-MG>+ z<1GKCYc{K9W>x-$_n}T+X^&nwr}xa55v?tHJp0C|cxrS0tPnonPPKbmW~!uUp{jnJ z?+=(;`ul65vE#G%XuJXx?mPqC6 zuk{~(`p}JUHyn(u{FZKkcrcoOQ0M>bFcJ;{%kQq;yGNz01t|{%1%1wNxA5&pl4kzv z7{Y^=zJ@z~FZ&B$@b7*vcWPs zf=`UWQ}kIU-am01&Upsa_zWBy(@$(-{CeKgGc7H**q7Xa-wu{gOg>e@sA6(N<{G)i zcXL_GIO9e8r6;pNtbly)|7P#qnpM@dF2TQ2kx!XnRe*|uU%r&wuPPuSyi|BW?x3Oq zR@8s5HgL|l*4cZ1f9LE|Sy@lDB34Whgu%G>(R=HyX$DmX{0-_KuxZHrO#BzfXGH!w zenrv;$h!2K<}K+D-+6gI7_YCoBsl=Xdn4cU7j~i>^woantNqdk2$)dTiybx?eE(?H zZJc4B4ff>gc9YaBcn3p;v~5?r4v+xmpl|P~yNXG{`$*}Ye+?eI#0O_Fi(D4WTa9F9 zufPY96|{mZ*NbKFBnx6`)q=eP=pa}DOQEdzE78hBz4eaRCA~uzk7dFWULDz&J%SZ8 z5Pw3R5O8od{Lav=0MGw~=pkX;{+2WKoBj2>#_yo7_7B(*`Ag68OSj^a^9g*_?>cth zyvC)+7r|y1e{S^DUi0+MHL&MZvnvRsxuZG`SAYC|*@Vk&!2{bOv8xWa42d1BJlR^O zv@d5iz0RPO0O>ly5#wwfe&cX2slCGgx-{i!Xl*NR2VC0)Hk=Xkk}Y)S$yTf`p(h&$ zaI^Ssd=<(i@A{6gQLMFqNc!*{FbwD~UwLN6ae4QKe>}lK$Lq6iCw#I$I7|5bVXtG2 zjg155eHa%w&Ja%+cwiyGTo*j`BJnh$t!ZOB0E{QFVW62rdo{3wz-O(`4611WoxpQJ zUctA*h491@PDl7>E9|LW-Jk$FPfVlDyK0Dgzr9=O>r z|9XcmfAWQWgI^5lo?o#>%&Ex#SD^*;Y2QS^OhB-Iz@~xo3u`yFMBuMQ{~_6aBu5W6 z8TQ^9EDq>}ZrTarzE~4=j&K$;HgZML|1O(3avh=D72D8YwGpfO@+)JX60|zl1qa{k z*Vu#K`J+oP_{j&ITB40g`q&%%*0D4KCIe+5f7;g?c=4g}3enP2$525ZA7MulK3;bT zYM^D#VXjw4(+j(YrZmD8pcR6j$kEudpV2P-9LTW^cwlZx-3)Q2Is9b66_LDj*lXW? zUfUD97@TwDnCNMJ*>9SF&=2GebIDh&Xfv^DH5cV0FOPiH^vs>>moX+zv)AM_Q-=@4f-~p z81JtfM(FGl{0lHQTp4+Zn+Vd8kPZ*_SzKS#Nj<;N(58>2o!D>}D177lC^^od#bBtlr zsum**HVmBbiX?gt$AXAUD6N6bmT*^|FCB!B5c47g|FEC1wzedmOse}7H^ zbR~KX1j9slv7xCett2PjsaTiFAJ`yE2XuF^J&iQYc&7U=Pz#IS4f0)Bs4yy4rDu1jy44iSTZ|3bp5$ekHz zL1I{NOCXQ61_qD$oENmChITtb+d>xg4Y8NNYWEV8oQB3?4E|J;r?12pg%|*HPjW3^N~1?=e}Mq*rld&_ z;sb`SxdNkb8?=SLFBKKs#cZi5p4EFxOIaHFmwlktv0I?qA zXxf5y0bXwcw-YH3g6MS$f1I-qe`yLHVri8_+hQ{Y*NKk*179f*gyeGXkg?5;fwu?u zTM%hpl3`uIgcx@saE;*0^{6sNULgFv04tLG57=`)$y0@y|0cYP;34~er~lymXRmvb zu}1F5`yyUJj-mBx0tRPmE7t?(KE@Ez^P$}YtnV0cJGfb5!f~0bf7dNEW7_txN|({+ ze48r0rBnO-vj=*y0@Y|BE5mvNU)1u$-70L|{|9~VGQYr|g*Z5Lqiz2UfA|pBAi%kekk<~T8<9in;e}z`H8>`4etE|r$m4t1My#(pk;Mj}xg$e#}a^3`%1LGPZG-og+ z{oB9gnT{hJQ->}3gid>iIXqO~_8GWZA3{HHYe)g|Oh4am14A0sgguaPV+Pv8o}oZ% zLvc7-br?%d5m-ka2^_;K1-eCwLk}DV#2XZPU02BW&@Vu9SS1P&(kG@huXOs#c;5oN(iROFJ{^%TWI^DhJ;PE zcD7gWoy`u&L$^isri#fa6t%|`#s^|i@F}4F{}uDghSbmL(C1@0VlbI9Yg#k_n3pIsCccd36|q zi4~{^J7S2fi&!P;taIeB{a(fcVo7O$+xF6UKyHb zf4Js(8O?wr{u|eP@C7VG;=Ah!gv0KV(h$R&>z9ZWBO_g)BHfa4k3PXoW*mKqaWH@N+D^f!#l zAG`6Be>UV(eD6hu7~Np=e&Yr@%y-ay2BX+M8iDJYhJGsLMe0mUzuU7Y!(P9p*Nqr?{g z+@U_+cP?!kz-)s5rN%t0iL&V4e-S*|tmU>+mf)9tKb~650e9_fH|_%8f%BU9MNpp; z66Z|X0Uz|Gqd;OFgmddNroQ)Vzw?a^dwTfrK5N65-g@ZOdyAJkuCkK!D7KT!4WM3r zEI$dCqmZ|-XK7E@Zm<(aVP&5S!bkRpmf9A2YZ?Mrn z{FJEY0DdIoxGW#v`Me)r!!LZuR~|gZ@2@`blMniXBZMEV{Z$^`Ssy|;O-k(Bt2yB? z2Q#I3Mk*-wk2!e2JAmYdcL3%SV^pXXXGIG3`+(>P5Nr?j-0(#}E9?|xjE%f_dsF_r z2k}Q?9ozn_W2kpxt9^7tf1q28|Nj_o`Yk{92ept&rc8)Qj$Gcj%?u^Z~QML4@3~4nCL67Jl#*#^}A*P$LxQf7|`SDShQ6V4wHd zdp2{zkMwDyFYM-bEQ-8aAN-2#?5tm9w!Ljx+Nc2JH-(U49@5nQH0S;L?mwFg|Khs? zc$W~ni&r%A4P(u4-=_>Q(>%Q*{z-D*5oe%IoPd}Y`s9_`JAV=HO1gBDEM1^yMuF3A zh)2F(-X(ZVvQ|vdPmy7r)slQ)Z-QQ#XYt0zAyuf4s$#n)V z3HYBsJ|L6_P%LojeR$-+0f&+fd-lUVWQdAeMV~M3+gs*9q z$qw3l%55Xs=7__6P?y@F#yk+?`wcKzF{IH4{f*Is_<M1US+B0=3Q#k&mS$ZFrnL&Vf)r>~f7;S!SxV%6Y!y80dr&nk^J?*EpWwke z9hCN5!dvs7zT0JZA5E5+M*-&q-tUQH)E}ho|JaV;oNa(j9`LCD;(dPf6$oAo?}nJ# zkiSlJMu9OPI+E}i6YcicBV4%y#!UWa?-+F^NTm0>Exf=AbOB%N8qA+19Kqu~w3hIf ze@4y4T6!E!${99s=!#&eT>_O7lo6pQt_yGx>4LBNHZWgPXS=pN2ka~fo~NhEGCgc} zFX&H=1E_u(?;TV{@LjY34KmzUplbN=0A|#k_=ocof!p3dasrQx&HQh{brN=&Xte8fO+wgJO3L$WLZoi zA@eb-#ocJ1K{3`#5U~dp9{lvcfkY~Qu5IvEs4B0hB6B=QE07CY6K6%23c*PTf6~?H z!Hoo)Yv$`|$RG{9#Ky$tay>YQ{JU1q8f1vT7_pti)Edh;;bN0_tIYvUe>W?1>e)u< zSUh~%h{d$vYWw)hzw*Fghansw;@z-TO7Mk{`4;m&=6Crr%&#$gQt;)x$U2h)*F(6f zukd8r?5JN7eAplEM;P1qj|~c3f6AbTKdd3ZWrLH$hH(Vg0OIY#y`gtV)<@`8VE$fH z{xEMNXX=NabGy8v(a1RbV0pgrE}!yx;K~Hg`k=%0SsR4&GRIoq0F0e2%3)p5KY3EW z-;eNB@WYxq;9~~-3f7n9WAJg|o)WY@TkI>OI{}yp4s>s!f0P1lz-WP2 z5zv8n#C?YKitK3!7U$z@(=t1gw;ByJ!neQeH$BReaIrbyk-@Y3vB!S%vu%FUs+=S4 zI{4T4YT&yk{$*OF$WAMU2}SS?wt7RI5AkKpS09`6Yya{!R}njN51cIQ=wlBbjx;4Y zcdZE<0z5`%U^S70HMO3Lf7fmd#cQ=3j^S?y#S8pdsFFtuyihDp*X82n^XL5#cLDF6 z$t*#u4L)P6v*I@n;}`E!`a6e{$v~VZfEWZZrZsTuee%RH3OCD(MD(bL2Jo;i!2WRH zq9eSOAAdLafC%<^IqV_7Fa+om&}jU|6N0N3I(^`9Xl+nafqQbxe<7zEabLMOA?%ju zUa07Wx!$C&^c%bkIKpFU?Bil2zdqMf`ekb!F<0=rOL^%*9L|QbX`yI z;yj4%0D+yM+y{zk#DlHk{gH113>V>v0ag$>rPw!q^Yx^^u#ki+72^zZ#Sh0d(QO_+ z1H%W*w`OBskRv5jf1qwLziTycvIBD*8#%xj{A-1r8-m{>{)#~-7Fdmy0$l@2zQ*YG z@Cj2Hy!%Q_INX2~JoU>^1cfYO)=T(wb_-g9e<2!jup52~?l3efNK!1QgA>jis#TR> zAzWr)BO!SxpiO#F>C!qu{^W=6YbpQIar*Wfi+|$t0@u|)f9($tSpUtJ9Q@Aj1r3Qm z_>L9u^ZkL-3rdQA?oWiDOc;Tm4Ecw@%Deh=dH>8mmP-G;yno;zTOLlomERrx`gUZhw%K5z5koe)xcc~NEVsnub7`KIC@vlqg{*hVU7n533)rf2Vy@;@S(z! z;0cj>_57?y1k?Am7LmNkkKa6d0cb0h&}U_J4(i{<*Z(~2XNKL9WW)3KZIRn07rv+0po($73O{TPp}PUNxcoc zQZtfwN63E3zUMbSHE<>W;*I0>a&g+TE56UdcGT0^3hO@c z_XB$T?E~3)Ho!;AHVMF$#a~JoeR20XONp=*&{qievMr5H@!ZzmG_=dEblmP%XMw~S za%j4A^tZi4wJjgo=9TDfp$@+|ky-&d`96FXA8dh5c=dj;p<|Sv1BcXvaBO_#CE)vk ze@6j(ouN;KMpNXQ$qLLFXMy*uHqmQJw|jv8{EqzBn|vd>C$Ixl!+gP(Y`{ok3 zOvF&Ie%(j67CyLgyMXON>=m+J@)NRN!nsYfJz%f=z83yqHoo%hQI{TBKcT#5O?1A- z+%UlYHTpz&Yiy%i{8|r}P^SKE-Gg29S=-LUUx3^i0fA$}^&*V8Q<%{@vbXqdf6y0M za#Wv-o23eo6B2*=4R=4pX@@)_=OrA#|#p0E{wOOWpO6Vd24DZd7N zlZO8)FayXRv5%wMYknJIScqXPcOK&0$n_3e0m%~r$bzX+tA^0wA^r)HPf>jFNc`G= zd|~Yn{Yzm0N;0nh*aRwM?2wy|e~?-ZUL>;i8;F_QmNK}Az?EBt$Ol7?7kH8hFBFx> zd~PBBj~Lr0e#c!&{0{jw=Pw?J@44VF_d-A1v$6C`EACrE@S8{c8xs}Fjh&6XjoO#s zjS2MIxi#PMU6rE{>~|Z)H!?!=L<2}{xZL%8fbY+4gIIp3+a$2B{sZ_Ze@%eSi*G!X zjhsA8w_3v&i%sxkWd?iN)bp8oxZX$M{PF~!hpFBZPV4g-7;FzDZ&Rvwaol05CF6I> zKt9_HhJG)6aUOi~zp@`**-txv-tqJOKj3t}a__%+Fn@8eg8>++y)-Bj1+0U}JAsrm zG`OK@9O|BC0Hv2e^A@?We`DVTTR!lI@H;lWSHz`>z1>?m&kYycab&WjTg;cxl^}X} zvFB_hU^{tc58vck6|arfU8Q5LTQT@66-~FcTk!J@Y3Ult8;`JuA-FY&E)fUSfFt^j z0et&0NY4LHeB%%8`<0JB*c89>PZEE|Z#fi*LH+#pn@h(6&$}6|e|H}E?@_FtLvnju zxbT-BlSI=6vgQ(P^v0BhAb0uX32a(tI-Cnm^5W#yj*<`@1qZ*})i&~wTBUG`+BP(e z!2#ky?w68;iB*3-?ES)?ayq&@V*ZD!9tP`?B1xu@9e*qu4(1>Y+g1sV+Eln)q-4 zI1~R2ZiV9&y7^Z!e)VV3Y0%2I@$QhsXg4vO2CjIhXvD`Ke-(*IfU8AG-PNpj+|rkql$!hg*D%?*a97ASf6-vC$GX0^`caV8R@Zxf{7CsiZ;I-rBxt z1=@la4{eJ4f1fGx$>F}h`j=`n+UsgzPTp><)^5RZ)p{n6upl@b5I^K9GYzgg0DGMh z+E6{&(-B^{*4OOzZ9BETDNZ_aBkC}2Aof|mc=)#m4Z&1f%jlUTQ49gaD|DVs=qFBTZJPka*z&63A{NY9Xf6l!pV@f0#X{;k3Y#+h16D&#I zVlSTqX}rQbfjCnGtQ~ANQjTUDgy6|R2)Vf`Xe$6kK)S#F#TtC_fbM({C~IWaq4|enyuRHlW_$NNtUsprUH(7hZ zp-uWKO>|wcHh*^T^%A{iI|n~3c>NpZ)DZu~j_Gs-xl28kNRq3Y!>9#r9lQmI1rc6z^fQv{C3{8;=~u$NO8C$^%r``D ziD;tQ)^^j;>1OO8x{iJg<#h+NsmQy=dGrKfxj}w4(J=*QokVZQJ>s|~Qt3q77WIez z7HHu6wtongh4(|EfeoyY7H&%|7AHvspS7S!3;3LOz?36)dy7iUGoXfQRx&jwY6QeD zGwgp*|LCW{<%lF_{ENdV)(9^%sYjwY1%FBu_(cP`XkePyXo}|&9~3Vkb~Adi4`{K0 z|3!_!&l|y~1@d-HWyySk`4P65KjaCJI67j=z<)}8afcHAm5)ZwYSbNkXw_hADLz<& zX{a4ue$Ck@tx7Stv{U5RKo1F=zn}XWQ(%AOVhOR<0q^iYTg9kUPaXTUHFw)_V(NcuH+)_!tdNvwM4(_ic6U+o6~XhC=me)(-m zm&B0|pYL-^d{*F+1y|SCI}G~(VS??dkcNPGJ2V8WPsGOGo+w0#3?0ir_F~0 z5A&(J*G^aq!JXOlfYA>8A@D_k8|l0q@*=@o&O^@!d^ALpXvm8k%JsItc`?567{7Qi zK6u!V?bge{ckYD0$K!MxksQmv%iZ+D*91u$3ZcOP(L3lVfApaK$GHf9hWY)Im-0(%^ot9~g&*#B-kKI{Vqk!UrkDtkXHLjqYl7E; zZyh7+YuJ&2B9!3ph*3W9EMP<$Ab)82#(ML}arMEsVp!0q=AjvK#}*3tK^WKn9)AsS zi+%H*eDm}0Df7+G1E2LDxLA5E!@GQ9A@HvL;GTZ*5q{Av+vn2BC=n(-JUfmJFQkH{R^ude6VlWrX$zXZxS( zuck2{s;Y?~E4yEf@TDI1i%`ys)t5 z3J*@fKq#Q`L*~Z;6HMktaDf4rh(2Pw$SFXsV!&g*@}c1YyN!X}N%Sm;E;aTlFE$0A zG|@SXa2|aG`4mK3&3^`-$!e{KME7@JvH}{o0F@*yU$G|HBmpM>X+X zk-3*(Ke68!Fq7@pv;GGD^RlRc>v2fD_xP~EeQ*EZC;tf^6h;j2pxF($8M59F{s@pu ze810a;I|s`wm*6ph`0Tk5B@ba6omNSIRQqB#oXit)PMDeVSk5f;5P!9$&=cH7gMX^ z1RU`g?=|%L&O6R~Kw$g0c>KIm1Fqo){ulTJ^Jad4fh5x$vlkHTf}YxX!lWX255X_K zFrj2iaxAHs`Npqd$$e9Xt&hI^rX+vh%V+-KL>>Aw@@Rdm!F*edKqLk}7x)Nmg0;yVQ@xbRN;oR|^HeTi7wYt~I=Ydeb5^xTCL-L5bd?;roFb`$q~Z?2e!<&G)y%stbdwau6HGSbn_0>jl? zl+kyke)6^s)+Bh>+2*~H!9ok+M z#dCep=23R**S*BL+t+Fh5avegm5m;A{ri2}*r&zuJvncV!D7Chtv2^|G~cqL%{!+~ z;wamUWO%jtu??!H5iOhc@gX?5QP@x9e0VF*W@h5t+j@=zKHD5*Xy=7u<%iU13w52J zuYV5HsdqYFyYdxufw}AM9X-#>&OI%3R;n*Xzz zH9~8a)8nl#ugV;ds7tus=JR~p%bO!jK@FLZfr_{z>d*Oki_h?a0? zPu{_)w7Tw$VmC-N4pB!kURE=Ev6L_3`+p7IR8B-3=`a3?Bjduhu-8fa`ZK7=xcZzM zcpQd*9>EC?x8UU%xP}WzGh?7CLGA{=l_{ZUUK8GFq5hl?yyYGyHTKpxk04`P<2?4m z5`%p=pa9TG*~W+`e12b%`*|%n9{5{s?@J%7C~ zmVTeS4?L%&Qpx+I&}as)E%-+S4SzgiI0i^s#d-j~F>?M)&MP+F7su8Ml*R~(BDa+lJX`Y1`>E6%Ge=|@10)nM^Pdw+b|*CWr< zp&$Eod?NLs#2fHpfI~ON`)y6CC(i@I5cluN^Rd;Tmyk5V@zDPJq5p7b4|r_9`-@sb zX9w?({s;Y1a(}|}1tl}!8i+p^?~C@Ckn*fr;Mzp&Wk}vB%42@&Uj&ll_x?>FP@tqB zrW*Ryr+px@IoNA>{;&Rz41Y@WM~(2d5dIe2-$f-O-rg>;W?@{}1OF0v{uG>IXd2*$ zLDPui`cq#U*fC^$Ag+)55B14Qq!2^>OE%8)^ye7ohwHzNc?D~J>wx#?^pEH9emG`+ zJ0|__^0_~7<@_)YfSJQWQ=vb=ib8pBr2h=h7Xan8(YsN9JHpE$lYjPwlWge!KjW1d z`p0lQlz$liE*XEH=MVL}f}IB)6`V&G!~Iq*?uYRLjM(&ZtR$rTLwle0iIb_h-Bh?=#fjPx+v$f%7Ca`!gTxKG%Pq_kAou$GcrF$n!U!{`img|2`ht z2le$23dPi(*^497$J!z7088T!K=ygk?#qJKbw6vuM>Deq7}A*pX9 zL*O{F5oJVu!q%C=g+}gw_`Kg=)CUjoW3r#dcm_@JFu#I&_eqXXpQoik0|$rxk;4zy zh9&AlwekMsc)i3ifO9t9k1t7mlkw4h>JQ@_;1z-f5fj2IDSy|hO);PxgkC)HU;OzW zaqbAqLrau%22HRuHVAhD?qh&go(v)&%tew<@rPfMC1uUo%k`V&>3rYL4)Yy?yf_U+ z_Ocf#kRTtBGJ|Ys@|y+roA`UIq5u3*Ap5Ly!}>IsekjC{-AqO^PLdrXnuknA@XMo< z?^}K}AOc&VUVj4Y+StN`B-?Z`z|1RdIg633zm<3_&r?Y3A?|B}PmC6eBj=80B%>7mzIt+vvn zH`dw}sVA;cYtZKH1@oo7qzt;MnZ`;*)B7|1NEdCtf9a)F9{bBxA92+vy>Ldx-5S&{ zMGZ{_FKyypxyZaI?jS%p237z28X*ngl-W%i`+A%KJ-ysvFT;3Q|)cj-;f<`BZ^|*WD7f?Sd(flqnw&;sq8h>EcdJp47oy)R%WdL4bQk zy$fo+<(Styp};7Wt_6ir**F^UGe*6{@D$fmZhOhjBS3tvF7xU-z_Kt3g`E`2BqU&P5pI*07{+K({nu^lJT%T4| zbY9Hf({dl}7qj!UxJ0}2Y&Feak+ou8)A$|<=jnTTo}Rt)!=BKuqq};DlP){lR=3N< zy@c8#k0#F42SmKlHUsb6|Nrmze=w{}pnsy4@-A?8IhZwJc5q6^HYu_b7h6H@$0?G({3bqCeJk6FyKGUlt1lgIVrhmXW z!F@8}g+`DIm1LhH1q*pdJIq=qTn|_r?vWstMUyhXD-H6Va98%o8y$X!vPe+W6Aczo zz;T>uu9Y-AXwCcYYxAQ9=X*muK1;jw@6b4~$D#u^*amWwK6z%p3%%kIkl!lIHPGNK z_vAPs+2jhCqye@JP$!I!`fNbCYk$+b%<*NY-6S@C4@CUuekdT^xt|g64I(BXxktMym&ZUZLhPH{W$L%01a(hBNrv`B+u^sk@3IpW z`q`?*@cYP}V9&nfab1U9DD?IeJ@kDiLEG`hxCl|#V7f{QQf}0@PwLV~a(@PoK)8b> z6kgaRA^HkWuWUR=)u5aSAhMu+1URE`EuZfKg$0h< zTgk!j7j<6p_`H-2U>%!fB5GP4r%*CCSP9Sd@yyWik%_5HWEW`_neWOW>U-&pVcfak zGXYbT@BXwM`q!yxmId0EbbnE#`bfZZBhb`9Tf|N+JOi$k5^Y%OB>*$13n(`)PdD@n z9Zs|1J;i*LJuy)PDH#_~6ohBV_YcT;T7mHA^PWQ515*!Z8NCw>u+0n{l%M`(qJRBq zEI?EPV+%^sOXL@zp3%o8Fk;0{P-lV5=cpq%FiD#&kLVAieq)8jyMOI+(yq3z>_XIO zp-p7zy9^@Q<)7WNA^U8UWw_5z9l=uUy)5t?iM%_?yB@|r+Oa`?9yZuADF^s-65p*8 zoZpxxtd-mc`JgP}*v9+e`KZ+80vXt(&&2r5Wp+_QO>t!m{Sfz*a8J^IB$N}%glUL& z4SI;4pbw_$3gZqcsDFZB5LBk*t^}>ewP|PWK=cA~7s+?sR`k6rB2m!3Gre76qTN5| zoCf6s(rEHIFAF`?Uv+V}OOcI%d1VRD;e8436y8_DyQe#$u{lBapuCAW4Ci=x=nq!m z5n>q|eFbw2&Kqee7}|s=1t5P0Slz(=bxis%X;U%};vRzTe}BG5|KHejMBZP;eS|;v z9rCOh8F$!(!y+JUihhhSJoLi=WH{JoXQXY>j>`?QP{0ZB>A$=OSt^{*cOKf))o3kt z&~9W*iKJaipt%)cN+8z+%ZQN!7@{%Mt4!Jzbzfq!;TT%7NQ^9P)lKIa>p_lEN->Kn0Oa{i3->gPOz^YQ0=-*x^PxJh&w{R#aF{eKHyQ4xQeC5N+rO;usb8ocfuJk&&SAzFL!~pLnXm~%cx8VIWydP{s@_sqai=Xoz&V$Sl z=XKOC2zto*3g?y2c~ZZp;k<$R1#q35zu~<8InOkj2xKOhwi z+KW0gmw!*pmk+Q&Hn{GN|78B^hWQIC4rYR%ib=qWUQS9xz@t95aMG0?)6CC z68+D6i$h=kx?aZG@YnA_=DdIQo}cqe5War?Ie#LPtxbtuI^In~KU$@23?XRspT9rG zute$$*Z1vHezz!JjIv@qL|^@9>jcUplD0xbi$UAWhB=fE3NT>C4#qbbyO@)7to2w& zPh`Fhh%H6dQJm}HSL6}@BJDaS>xq0v?Q&49YN1 zUdh@_ey?g1DRWBL0Xdm_{;H3mKHTQpj*+k~=9ouidm`tgetFa>A|p8Fhcyjj@h|Eg zxgf;W07tA-N{oUrfINGJi~&-XPdQ}Le}5HXFaA{yhXUTM$6rz}PLp8_{6l%jT(lu5YW~p5u#?E%uvvPgCd|It)8a_K*}w+= zxdt{JnY$5vMExUO4Q0dlLZ8CCiN4zKrwnU~sKZ_ccg)aMC?n=d27MK_Cf@V+bAPy> zjAN`>-`57H8?SK5HNG-~J3)h-IbyM2earvP$GM3%#@u6IZW!j-P5+POwlNQXwLKJX zfml;e$A3|Wzxp7M@nnwAab3Do8vZN-xO4Q1BGAMxH1BX;I5y@#p8xZ8+R>B$bRF44 z{HOOL>x4trV-ICUq{T}fHtfZCBY)@5W{b6+%&(HwG-7@K)3rRrHvV+2jcc2G3bg9x zGhRsD{LA+teGThcOXhgztL;DIFZgQrGuo+zE&A0@U@zJ|`WNODv;p($`2p67n%E=1 z-Q$033}fE;&&LWGA3aRjlGx!6@|bLlhn*PsgAIu>EFFhqnb?niJ$~>msDIONSYOaz zF=pY7moQ#nLm4RZT9CtE?*}{GUe1kv-eD5+NPW4)hC`iW|3La8_7doO@APw?l)ayW z>5}|5C*x(ONf<-Kz9DutcKYT}&wY1M(R*F+O|4X9u5QBMzV4;q&MF?dSRv`RskJ zA3pCs*AJil&-KG+*kvTQkw@E3T3_e^@y8yc$NOU|*&ucSzdN}O_8G|?`ns+s*P&sC z>+oL0&*=@7Q_7ATAd`ADp^`FmZYvSLOhH+dE?`886DaXMj<-oIt>(Fi_g#Eeh z{&|0SxbFCQe^6SH>*&w>d&70l&-;H}N9;1Oz5h0Ew8VbfK#|%ezg6*0_L4JL9(@wT z187r>ui-b1q>=02#D5=RstyX0RoG);zFpf|~no0ka29p*y@Dv@r(f_6GMhzK@X)u_L@k z^mpYL0Sxd1X=~#i^Av-* zUYm6AUr0^6gH9F_JN~9S58{pi!H*yfmFUC^Bqa#c3xD8W(gtNqWdFifA7J-G^k$Zb zD=G-}iO}O4&I=EszXW}f7I<9hZGC?K`KQCzIUk z;CKHyhSj%()ZF(mYBdMO?$m$f3aC^` zd=$UKaY2qD$&7^CmVA$9C&wg84wvBHe=Z@d{r$Nh=f~Vhu1B2WNaDIkN5OLvLgoIgfJV zdC(Uk&qcZBU(fwd?hAf78NVa-pn6D3$M?9;zkB^ICiMwM!Ir<0(E9LxKoOFV6v@Lw zJtDr1`vFN#es^A?yda-KeGlay>Q6oEQKcJpa9<&eI4dupjYf}GcPlnV#$~EXQ zgQeqte*gH3a@|QiqC8LXyFXr!^I!d9IU@Cm_cX}wQ11YZATJk7_*eZ5?RO^ah~MLT zWB`$N#C0gwmb9CK{2o$%co6YCh+KHsx_<*df$cSpQJ)NHHxiXZe44yRj{B19fA!PP zIO9k^Kz}%W`Z4Z<@2_M$;xDH2{4@S=zoA^8@}S={07rbvIrJx#e<%;y(}f`0P)?wK zKJEIa=Ylll5BL4_4?sci{xPJ^NdG3+lkqe3!=e9@I6Q*b|D=4l9`7+B^^bbRd4IG+ zO#TkZ zJ)+#uHplpE(Oy`xQO;qU0r7!x&*47gdW&3#@)Ph4j?r&Fl@g7gTya2THyDJGG-AccVPF6!$<3Ipl&nv6S`R(~|<=NnQu zAiZ7EuTaR{QYLtKQdlfSF%KLGT#hy$k$Dk?PYC=QX;6R{GnfRxIHp^05sDG10f3;8 zhL)3>V3uI(8@D7bO?W2}rw54>j9$>k1WS1JA=*SQC$@MY+_)$tFwHeE+3@w)G<3tj z7luU(8kH{iFOef6?SRuD_j3d=hvOcTinxNh5otvUkf}04^L-={fI%zlT)*-j<^!v=Gq<$ zq)`sGd?TI|G|mI|0&u^ik&qG_gD(_~b++Zna(^O?1~e+kt3>1NY5o$+)L=Ow&5^*p zG06eO9`KMr2P`57-hWJiTx`o{wgMa=M|vudus}+*ipNiP&jZjo_I9bB{n-_q4DVHY z9v@4f#d2%*bso|q(${%VK|%f_@W+q9{<`j+)BMhIM}*DO<7Y)0tQ5BtD>(C5AO^?Rwa+Q&5XivpRPG!!7;fI+w+ zSX&QMXAHsK~W?yyRTicxZb^fcKGMQh+U*|pPt8Kst?R~XPh7{oM z-+Q|BbN>C+wi&WZzv~37^&$}TpK|IA-mg4=zsJN$kLnKbt6%5A4p$vi+xjYh9+X{< zzt7+A5tIx1@ZC-T$O?Nre;<-MO^(Y)5h=y{tn8lPfq%hqQXKa@9qm^Ik|8V9d4BM5 zZh06 zRs>V$dVicSGWi|;s;5cuIQU(1`fjTX7S-4Nkt?7Flf^Tf>SOYbKi?y%=Igm*3w3qc z@2+Q*57%Tr{WuHxpX>AQzN<~`pYvyM6!K^vo|J2hj7Ze&tVR2@;a-2A_rv%2uIocT zY{Gr=KKuGR+}mrYVV0KSRrQd_SgAbdVim{xAy9UcE{)fJhTa#bs%9H_ ze1GtA|D6BnM=LOmex0Ya@A@0tKj(MC_uRGmHD<@i8vQ;G3b)-G>=yZVKR^!R_kEC0 z125&I8wX!ySY_e&JtoK%{JKXlalX&f-gg~=f%3bq&p&mrqJQdO>SW))KP%t!&WaAc z=Te;iuEVqP^A7Y+ADue+*E^v7zMl`U$$!`V!DtBdCOPk5eIeemf$s?_+Q_5b{I~z* zQs2wNS`fK!G{@efczhCO=@xNUTr|AFu`c$Sx{~yL-8}0uO>+nea`|x%1 z=!@t-(#N4lPoHnvD2qpc zzhg7rG4Omt^2~14cuHIGRy(q#B{Q?QfR7JpHw8Ohl)NGaYeis1x{v@B=^+o-U8GG% zZErIqddBjlVXZiyDAK`Ng}JT-5Pt+d@#jm)-&rbk#c;f_dv8DMvL&@H+ez6)#=II^ z+hlY1m;EC(_ucJv-V2M@LC%U19Ziml`yxBO)<%?e3Y2ee7Rx_o(gIRrv(5WeP1)y? zStQ^Uc)#F^)4+>d0Lki1dhoh-S)lt7V{P|#bx(?WB~PAa@mh45Ox-W6pMTx*vp<0Zht4*rQaQ5v42R~$z;4K%Q~Y*I<)176Cy4dQ~)vPKC{@d23 ztkj;2HJI_p?b6t$<-O_nd90_4)dM*}RZ80%uio8`%Jg`h+PjxI4;lVeIHT_3b62@Q ztf8jf?Z$_h&%fNYlN^$3?+H#_~Dg)B8;ZpZrMT zs)m9F3;je&-K}9Wqkr4p6iuDPJu)V>_^ey1^jJ_vVMC@AvCGkLD13--h!NU6C@@myBlU=HO?ki@5sPe$`2EGaOxD z?y7Z*fkv3LwUjH9YIJUb@ll+Q=U1Pb?Mc|$7SfL0X_4-97k^=P6vMUYhc|QqYIat8 zn{nDW*%fE6yVuY>I={ABwk#>!G+3?BX<3Y8G1Ql){Dg~>hy~ey)>~wGOUQl}=Uln7e zw5BcZ7AsxhM&@X=iALq@_Pkqn@FZ^7)8)9HJdfF>cto4@XmYiDzJ}vx_MCRf*?kB0 zEnY16-QlHK>I%G7?4FTlJnM3uDEAbUsJ4qJ2y`v#p?}_56w7yg5NSV7aQRhTD);+@ zR}|k=)(BOdwCi)KSN&e+mCU4Ig?E=NzbxKtyUi6}o_O>|S$Mg5e&4L>e(#oxhHa8v zvkjS1tEAFM4=rUTmv$OE+1)R^{L}@Z9ou#x`n@Q#NQvU(G1HxNf3!Q_UGLRo)Kzj> z;ktQ5=YMG{tL3B5w5>BT}npJSL;*JKuq#1+aQi5D6oB<`ZXK!% zbAO%i;d#FIOayK_C!)cs`fgM8?y$F{Gi7}tW{ysJ}#y@%^)t=dhrYH={7tA`@fX>V) z$Gm$Ty-Mte`Q01Zo%@nLT$Y?6-rL4{=hBh*yikD`w)ys^x3B>c7G1! zFy*|mPuE?oI>u3S3?_37>7B}YC0+fsbl+Fo}K^CUl=Y2hcAAc=oOL0|0lh0_Qu_wBfyk#co-l>tG&f+HVvvebC zjF5i)K3?{sKi&JMOL+~kB~f3Z9{be{WXqLTZ4vcTu7`?Sdj7@U9k$bL6dL=-wI8{& zZu$0=V+COBtG%9HZu~4PR<8RHY5C6N)yoEO$H;;lJEz26nNu@5Z)^v!wtp;sv+r^= zGQH$FABA_Ay^@`{#d&s5w(lEvj$}QSyGd59*UP>497%otcp^~b+>j$BXA3@*N5N` zChlciDbZcJo=$830fneSvS8=f^KqK;PYBGovT)x80n20zMDO>AKqbV=0 zemkse>X?)<6riD*3R7qG+}8GNA<0gy+<|;nRFg&8)_u3@uGV;ckbg`n-(Re8ovXzy znK3!Nj&na*^EF_v}CI>QVEi59Bt!@;yi@IdNKP+MzhTuSPHe zL*Vh8y(bDX67Gr+yr0XEJ3q#US>3HlrrSLQd#<|oTWxL3ZM3lBN*-XLo$= z9R=I;&JbjUjeGNN>!;$W;9^?R%9uVL4I8q8Id%}D_d*P>XqA4qnhMAG+^nX^X%0tm z$y%&)Ga4~f=3*Hhim5mjZ{|r%#kp8_TvR&{D#pINb34HbAAj~+08@Kg$s5UhjgH~9 z=U-aDitkHfOlOWbT1W52!Wr>nCV41Las!noM5TpHoG1C|bm$Xl7oO7ApSKJ1ks!hN zz!&`GSe*Mu;e-e5Gn?=|20!vm-tk9w-kS6BA;K<6s(Vwv$3EmCwtkW=efoA%=94P2 zqtk|?V)weS%70}(RkpUV--A*?ZKQGb5>9U5o6c=9i6Yh3(wD~BhkF$AoHt5#N07!k z*JrOnjQ>oB5rC6(u3A@guuQ<`Zq?4X6c0P)8H}|`q@sg3@l|6dFW>>y>3N;aZjkhu zZ6v@K1nzpEN=M%8UtOy#p>)u4c^Vep#P{EVdu&p@seiWsa)5q*$FpMyFKWg z@azw*b9Ilm=L>}`A7P-D<#wjKu3pMpupxpmz}sP8vtP+TQ0i@E#D!eOtMmL2mMZ5% zo2=iRZ_lxLZs&0?D$#>Z(=i>KbqU%@KD8IvrFj@b-@hJpw+qOn4-l`3=-u?GKP$JX zdcbsE)_-cBm&s~InJXim<$n1-ZWgnOHqA=R!!XAXkI zdxU)W%et^11tt)EF?QCssyMs6TF*ML7C;Fg1VlXK$9!X(rOWQQvwdZE9KBT@Exd@P zeyvKZ-Qc9zUYsIZuPqHy5*FQ`SP~_^E|P$&Uw_L^yElr?g1PZ=hOIfdm4|Xek8TIG z?0Z(#^Ejzb#$0^@>2utI8;|c?LkJFB`|zGdC_v9Z-@5%&=Teti97pjS4qBz1FBt=Q zQ;XvXY(4IdiYzVI9NkC4w9FM*`x{D0h3M$5fiF-FmbIVV)JMLS_agM`m-lWeby*Ql zs(*!W=v!mkDso#~OXR<7>D%Q3*Op^mz1m75Z(U{LM3HN?qt;bEx!#W9 zw7KP!W6G<$!kt&MS$Rt!%6!`OV`m!fyniR$}`?#W7@29 zIjs)q+}N$kTJcxwb*IJZYrdJJ)APci`qFI=o5DBh`SMuBnVah73e17a3>`UrudUa` z0MFbN($R{LJmc|YQu+Yvn{w-qPW668cWWITbSEs&9TjiNx&puTbFbfz%6cqmCV%KM zQ;~JD@oPjMpffZJjjPkG46~i*H+K z!Zhn{_929{Omm-Z998LZ=~_)C>s(}u6}x_G>`qB{-9|bf!Oh;ywMdmb3Yt;!`jOs1 z5CVyrdcEJ=HVgDj?R39g-GHZV*?*dmMjALft%u%SPorbS`$}|VsL7OLFhCEIL&&8gMn|bIG9!>!|Wi*<4R8HOfyQ z<$gV9JL^%jRXODgpwRd0 zb(A}^GUV1#t?x6Moh<$KI^wU3wmMOp?G4G5QPW%-y0)JwbXTuNHz{p5D(b~aWm@(K zh8g4S_Elr7{p_^K`inQ0rxv;9`?DbO+Ru^}pyk-SAy+17F z@LbF?+Pn#VKJ_+JsrN-LL@$$CCj2e{B=2EziMKVoy|8KTf_vF;m?kescc(_j5wB4*3)K4a zYTv*i1A#E(CiIV^S@I)VE6fFTozL{g412rS+?)(~l-wg}gzaprH>;~Cuhp@8sE*cc z&S@|>1$_kHm1frakbj!6YmlCxf@kl2xq2YmQouHdO)I0+)^k-4cDHsknHb}RZDOVL zpW|*nhg1b)fKpct58c}g>qi()5B}sGX@Po}EvJb$VitBMXrOJSkk0cEwNY#7$~nOt z{ye7k^z6??4-!{nP)VALk?OqTuxgxYa*kP6UStdA%+KOg?0=WOIiFU$i9y}YQuCbl zhRf2L3@4J!m2gtn*@9i?$X~nlkGFQekCk##U0-KHG9ngi%fwwdnI5G`iY{~SykEC> zrf>QR)&m7Pr_E|S$|5JbFj^(lk3~IwMb~W{^&DTWxhpoY`aV2v;zYWMPp(t$`;{$Q zO5UXWLcRvCo_{YNAXfyGcu@;ZZ1&6Ov6uENcepH+h6{OXbE?`Ym}=4*ePjA^xs{q4 zi~aHXI@!gk9YxJ(eBDmx-qrI?u{G*nSKB#7_c?MIDfDILb$G8{!D!tV_INa-n8oO< zQ=`ZS-Nd1sENP~jNe^%zQ44Civ)9vksoH(2;8`(Qv*Q-8pR8`oGCGHHC z!r5wF7w&XlKAzb;*v>?7#_Rxuh|c$7Oc&nCC}(Ez+`Q@ySMZMrDP!qIq&exv)jRoa z_P`A~oPx~$$%A@9(J1d`bmfemd!)jf)HoT}?rLi73+a7!wb>L5&^niNnm1v9O`dgH zUFiMhRe#OIy1B^f6!_MJW^$P|y@wUgdUk!b(%MdQa0@uyEbE@*#2`jl5B(-!q94sp z%*L~9e%j{MoX)S;9oNv5Iis_C`pzGD%4ykUJ%(_9cME-lYK2gIUXMsX*ZRB2S&Lit z$bwyUcHY^ITNY{J3Y&#+GaY#?Tn;H6trZ=voPT!ZpiTCZ@$7J-ycJe1;3|c^j`aFg z_6|wL@#}Y21&jXaE};Aqj*-e=gmi_SeD^-SYsgr^bLF3*M^$&p^I;TicFbm0HYyY! zA>)fxcOv%q7dy}$(eREhpM=P(#sq8g?XM<;F+EO#Jp@t?uA&MMTls-)Y`dJGlkDe0L8Kame-lIK6m61A4b#vXzw6Nv+2#t~KYU!_vrcCq zchy7P*^IZ75=e;g*|K{H?h$1Jp?^m6-iCU`9?U}8VRMq6m`(87J0LA%E97|XCT39g z)vM$0AV{`Bj<6G-?^lo~VJ%!I^tfUa9TW!lX2EDP5Ss1YRny6qMN_ld9IxStxy$sc z3`*g_Q`C-`Y+5r5yZAou%%Is27E}Tiw|9 z1z7Mo8%xguih+}Qd3ssr%e!r#Ik)428{9$GT^WtU|nxag+Qm0ei%%e;54se_rp+8vLZ z=@>JAJ(0)d>^QxFEPrx^eULjjNgQZ%A1Y|IF13GAn^ZlglQ6KD)9Z~`i!WKo#-yV0 ziJcYpS)V^CBb~FN3&(rdipLT9irdKyNUY@)J3Vi2C!VW}SwU=FUu)XOzVk{*OCP2S zhu>z=jed-KUTk(!QUvoZytDGy@Z)x&VTF2{ee27ZkzET>*Ko% zC%}`_8>4bh>x;0)E?5ai^7dsjmT@kt)fA2%c1oQ)&+@zHReHVjw{Uo!XZ<2fCsR;u z_(8j_!t2&%Ej|XS^H-E2}`+=RkYV4>Xk1=0(>vBIu^29RoS3z#Qg`5SH50vU9i*Y#KEtzI#YVLt%^`muEj^n(&mvuCn z@6j>dhqZ02AAbF$*FtD2Y*WyVRwhqW_ax+`h0Gmm@QgQ#Tf@)NPVDN_ynPprCtoTF z8+or!v7zDg$I0BeY_u9fOY$Kx+%=Hx$}Fg+x$?w5Bxy4htj?hT{u*Sz&q zURzrc7k}F5^z!<*ieSm|wg>MRgNX+!ZQ*>EH1=6eStcxIo3Z^o8~$!}KwicoOLkGv zjX=5bn%NK0x71TTWx!3*v8y)wMp0-CpvANA= zMfUD8H>T;t1sbed$tiUUGHlWw=@tvfU+^`|@8&(+f30J#+%oqWOv(W7x0*E0^CojEJN zjBa6gZKLz9Woh|%P{O>rujXFRiFSI2I#&zkC4ub+qT z>ONn438(c-6@{z=s=<6@adj&p<8RsQR;yg6GT--0JK7 zK^CS_4~Qc25gpUw_Oq<;pC`k-1zS#+Z?lCy1_72L8y65|Z_ND3v87oXPgmPhNT*rO zZgij_?b^%(_qL=P|Dn%k+hZliMO^eLAA;Isqj)Q|^lE2&9!2g$Hec$cagLEVTYnxi zMZW<4n|5JU8Yh=|VOHAYPur)!o_KhP$s4k7*W`n^BhIpL0JC+aFTqz$mC zJCG!FyK+7)=V5e9$CuNMQHysy-G5h>5p33)`Cd$uXL#4t>^Y*JFW7DyaaDmV@v>PL z?>lpm7u;cI@y3a9$MgIVOc-;^t=sM8>aUK;>fD?)Yy2E@YB~l$&C1i_$@(tpS#{(Y zSI3+ArjVLd5XSZXw(DX0@3zsb1jOvZGO+F!@f-1G>@@qWKM!7TSNCU~pMPYp{B}vY zCwtc}sb`+e)hw&>xir2k1nl}EY~sPolF9JbZ#FM4D5rX9Ur*3z1vaAMSs-Y4+Qbp2 z%~LyOPZcg7k$Q7x#EoZ`EbVj$GKc%@9>5>E<*rx7GS@TX@KiV4-fhbujhPi<9Vhd> z+t9N`Q+9hBi8r0Qlg&K6pMSL-<#m$l9vC1di&~v3i#O9F6Z=tJJj2y1V>6zYjT0X_ zMM7}Zm0p5Rx$|Il<2?Dkm>nPzSWDUZa2jU`eS{>jV7T$5*-u_$ol&!EGv3|r&~Lqo zXMI~IM;O=(KNz1S$tje@DazVXdu^`m^MaerP9C>89~Vrs$*}u!*?-Iaby$QK!D?sg z?J8gC)2X2PS~)pUQ}gDn)&A9t^hqCk^9?+dM^%7FaSBdXtSh5r z(%!2107F2$ziemdqt-`#F^6WCmE*!wN&ncjcLGp^sO z{iPXop19SPTdLm7A8SJURaW78J(pB&-(=kEv>Gqpd%?LF-Nj+b2iJeE&23Y!=X%gw zf^IK8A(!U$Om&+u)ajlc^I707TW6{=iB+de$=LjDe|Oty<*P{&ZFk0)dA)*%l%8|p zkSyu<^6a*LD-_P#kAl~fT|!$%UtajPx`pP(ya;m@UYaTY4w)_Gyeaw$5RsV~$X|2LF_Wn->eEqlpwxXfKDU(AKRI7{ zbJ1cxZy!LT3&ni9!Gxzao<|ig;d#6?`Ivr>(`FM?%*&aKZ-9TLO#E@(Euv|ZCzHS! zU*c#HB0jY)!TstMn;=_1oG!KI^F%1#t~(wxZMe|4S(?vj_N`P#&T7eK#RSho4A13e zB3(`gGk-jq0@#F6+Cq2%0>oVXQ3Z@$r?=P!5q*uY+5To%7q&Gn91F zm|=X1$!`_X;Q%}Z^*ko)|i9D3$)n(meL^xO)*acPqhi0tQa#`N(1WsH6T zoM3)6XTo7IZ<3Qb#u&SggBHekly`AlVt;w(ZuwQpF;0IS^9c|4G-0-7VILOWES_Xu z6Np*(j287#&Q`^-K8ad#i{H9%gz?xb%nnw4MnOpuba{y*&0TkS*O_tCC&5-~=&&PkMPi@rN;_?{>S(bpLvKSy@qgoe9on zUruJ`X03m0cIsOlJveA|D51so(fVkQWUwwK?{aClwOdFod^h9MF_>Oph3q6I zd~R6IUXM-^`0ckx)~vJrI_UE~lXLET@9xL9qhaLqzV93a9iK<#tzu*HzJMw;4IY2% zJJ0SG#WTOgw{QcfT^bk9CQQQPKAt~RFU;#XwooCMmK816smdz&i_8~lTCdLHi2W- zGxh{qrx>v|+9;>p(eiFEzL{2zx)y)X@9*iTUDX>H3J5bTD7~NfX&vkF%$g{?xIgrw z6vUUY!aC#SX>{8{N-jdqwAlv>>IK-*OQ@@j#O%6=lkW81oPY})GAHhNE&6+SQYROn z#gAR&Dz{)id+ewQ$1E1>Vs@OT`?%tGs`AIzkvRd_OO4ujS;`M zx6<_7aLV1};9H*UEncqxz7h(yOAUW9E3a@gKA&tpTfeP`<`wY8fG5q`JWfSPs}s9I zrsm6j!E1aq)2U~!#5E6?wyDz^Q-x90KZ}-Wp+?h)D#OlS>vOMq6|*-gW?pv_doIVh zq+U;$Ioij7+WcU5ZdL>4@KG=~2iW2xLfM1;)1L1LI&y=H9L9>6ZyvHrErnCNN z)~EG3@1?u4g7lttd;}*Inwr^rywU)k%2vBm`R3P|G*Wr#od*%y9~8va zIHOnO`l3OGF1Uy*YMS3(07{zU>eMUJ>WqNntPR)N=C$SPbk9Ho?@(8~Qa%orJG~<3 z>|`%?*+NM)K4Z7FNtb`xV&=zA#U(5LwB{-Z^|WJWamg)4HEE8L7y}GcW5^gCv)#lG zHxuQBM6Wv|#2jXteAMUMLD@;r7;LPqEw4qDLn6#;*BK(G#oO_tz5yvc+KeM-w(Z{F zp1)B{&M1nrN-o)Kncef?$OTh&T+cD>vC7!&-SK))Pu8)x@g09NdxH!kvM<%4zsOr4 z=iY{6RKP;$%KmZM91vK$&2L~|up-#G9Y9?E^ppn2^Ju&l3*AK;|9v!aK;aUw_Nbrt zwMflKUuyG2lH@rx2^zvzW40b%yt9j{SSPcs|GPBe$;vcdNf& zIX-5h<1KcM-<9iK=k0bQZydhH&~8QyEIwr1V$GhaS@Uk1z4PYBrNKYSi}aeA8g<{W zx0xWTROPL6pIhIKxrqdzPVT)`!I*2serf8Ssl=O;6vltnT+%zc9=-Q{wZcG?#wwty zFUU`?4j^!KCMOs4wd+cnT)G}wlm*o(Q3NSz=V{-kfP@A;RYpA+Hm3dagdB;kzsBNy zwbg2E1F6o7=CfJ-Fv>tR0?RowRCxpzY&vBZ@)I04lS0T#c+C8_>GhC{g(qY_xhd@H zaE+yP)YE?{f3bGl0_v&~N7oBgU2KAbk*_P^pdCR*_Ih#WPE8o^cHNkvs*Sk419iBY z&OMrZt{?`?nU^>mi%4?J5!Rfzo{6BRY}+%*yDBx}FXPK~WmhAT;Bi?*b=EB$z<+N2 ztQFKlYCB`Gg9(1V#dF!;zOzhqDwf|)7Klc8ZGV4VAU6u(t-HW>ahZ&d{2U?M*ggjR z2CNKh?yuKXL5Q`SjsjXh$|n}Pp94CqV_j6|HC-EST7icFwRK*#lUcOB7`GX#QX7Y@ zab*<7PLP;1S*h7 zku-llYgDy5uv0`09uSR-vglv{=w73}Wk6SOG5C@GAq_{unvfE7Z`lad3^S-}^0CPX# zJp;iu7WZmXS*NyqX17Wz7Uq%3%#h=+rm=sz3%3yal=68A-tf$sej7zcWuj)MqTa z>)E;Kcd~oGUykaOn%{a;dvu4_<6L0|+1rkC_=zJK2df#WF(P%#0*-&I zB~ur2${g5adFr-I#g7u3ejajf%^eH*VPD*5`FueSwNKZ(tUpicMJJr%pe@-O??d1F zvDs+hdm*UXi!IJ(6N+=Pqx42i-L%q}vfR6D_!lzwV?Mmb+%;-FX5sEf40; zvdX-ZOiCi3j{ zptrnzVEnD)haHar_N;73GVaGtN1FJ6-iEHyPF!>^)b9g1e>Xl35=b z|IFs#t5)|Qj)Uar<;}DG~6ib$D4=IYH)I2)Ev;8Llu7%`6M$5Iw5E@z>>Og$(=IlOva#O8cw-Es->5q~pZQ&_1^ zBf)$}-qU}~*WisG_9^By0^?IM1dqj&?P?6SV3 zmmS3yMmU~NcX#tWx`U0e2R#kIQx*Mar+vvF&p}mQeYZpYI5mc}Lv^1XE$>|Iy3<9i z?np5X8;YLnF^Q~wcQk*gRtT}QhC96?2kFQGcYWln2MFL+dewjR?e^M@Un?sX`%MjZ zMhFeiBrM>7y+qPN4L0)3jZQ1b)htFvO)-(8S$%@|7(JQV_Zh%;rpjpiaocQ>0OUhI zv^bBPgZJ8=W)lY~k0n1IQ*p|xy<$;2YIFh2B~P~8?xs!A#~1TX&}r+jpKZ8o1KwCK zq-lwxcJ~Wt(pY~k+f}17zqG}~xSRX(ZARnfHM+`gLereju-9{z4WDO8tfdg|`wqZnxC>$_RV3_^eISMU+^^6eI!3$9^YaX-!3 z-P@q)cQpzgDuC69$n_WAF1HluSgX185@jQiLhIl>u$9qHFR@L#$^zOx;f1x)tw!*E)4KXcbCbXQY20pW_+$w?~NA zJ>^xuHJk{cq56#Uj1u2C8EN0ANW~ekm+5uEd7h8; zY&(BJHeqUD?+OYg{e79A<9u|#FYTFUV=T@_i&Nqqa~mvi_RdGgHx_Fyh2egIo>XIu zM`k_G{0Rko`71xzB|VzRj<~+7aH}86f_k3}UFQbr_3$VxQ23waQ&9nL)|vO!v+`TyXdKKO;!8CUNxpwI z>!0)`dqo)%-{^Cmx@V)3&QGPZ?T2sHe81qg$nD?U3{QU$~ zEzU0bl-kriOSgX4tP1ZunjeCGJuB|r0;bgc+}W?!YPsodjyu}aXL{r0tNcQ{5uU7i ztL5B(`_p1RrsplzyXZb)G>Tn5MYVtGq(DD(hsB|)q|Fz@ z)_O*VT05;EpDV0&37W9+RXfnuc+=4L6SXUlaQ9fxlI#7JPA<1xt`vPXeY}7EZ@$iL z*Htau&hL`Dh)CFAbGXO_7=t-uz!y~gTXv~+L$}piIgaNOI4~`tLB<>jA7{g z|6BV$uQ~nQXb{~s-%A@0v5SSwT|EV&(ySL9UO;a2q~LSmBfH45Z%g1QP>eqs2}SZ} zyX1ay)LXZPIte|;rSoA7s84^0=b%R#yff0hNki`O`{zq94ql+6d`)n1-SS?{7B}J? z`7l3IVjzk2#nAj`-dEg;@1FX45gEkBdxBho1wF|NTj1<X@@QL%)Ty|O5BB;PP9VKRLQwoc{=}A@3@Zsf+IrEoL&>hS#G&gd+u(er7fPl z-`f+|@AtElt3{%O>Sliu4^Pv{DVJ;Zf>9ym!cHr&-O3=L=lIrcp7U|o?+HhtzwKiQ zn{8_Dc0VF3l;wzusLggy=?P?N)lM%Tk7>#$yXe*3f3aUAnBk5( z!J4Q3$engz>7K3m%ne_R3E&Y`%>IcN7~MXTJN(2}A&3N+qwE_v9Ulkvx_kMZ72qAz ze9oLrE2r>lyM}*i<#7ZLGCrX98flVP?UI*veJ&JxFt)H?`S22dq&}USl0L0zbE?#BYgVIJNv7GIZ-?f5>*owQ>yHJcBL(OQQCRV6 zak!b87zhx}NM$W)kvl*u%-@--m@}zNN0I0jMe4qzKABO^G~A3+nU)O`@dXF1Sgebn zo%S@xn%RFRl{t!0$j9&2$M2p#H+KS^+Twq2E@Ky%!W@-RA!!tf7j&khJ<3ac zaB&X3@U}@hvQNmas7``vpK^PT&hEQ^KiG)j`D>>gk8Ztlwu{1f20|lvyPrRJ+3Czn zBD~Z7HOYAC9)>(%k9vKjDv?Jie~R~+@*XysoC3}tCU)fl<`a-HIZ4PVG65Qeab$3j z19yKh88xf>cpY9pUYHDMmtvZM`zOUS>1#>8pKBaPsVu%yUU1NFONL8+ej@FFm z0D{TGcG$prm%G;3I;$i3efyT)`uT&Odj@|{Go6C{ZW`NbNoq{J;|rks_4Jjz&FWen}(QTrNlSroL=Lwc8tq=j@Ih$iv@pC zXc5L*;TY=2DCTTVz}XM5#)?tuoEUsQZip2!O0^Wfw9a%qwus<#D7o*hYR1;UwR9j? zt>2H1_GanFIkwM@NptrLUcBf2d;rF)-7OdYBA7f~k6U@t+#ggY0$Nl?>8oQ{I8s` zD~jM9w*)I9UE<|EEQgtX8ZMI%Uh2yM3fW5|AenMbb>jZ2Ev$m{EEy_CCX;{B2GbTD zJ{g+gB&<&Ui2eU7ta{E7SznYts>X686O3_>VB#C0dYQb%+CGT(-!vmD;DCr8r5*Y? zOQdK#Wzy5SaJoFt%f};@!|8Ih4P$-s;$VL_k9wS6w?<~)lm;VnM11eNGzqHm&i2_oP{1ZDWXyn^oT(pto$G!O;q<&)j88akp z8Pv6A`-^K>{2uOyQp|t-l^nO#(1OWQzPG#vR7C%kpZVDhBm)}6HO%IUrDf~l?SCbe z53gu!RsfDBeae5RITm4EC-SeJ`uo;Eb@tR!nx$`fqcIQaU>gay_bz`N6VazlpL8;0 z{P#XHbB3=CF|((VnfDD#J`miRlus>5J$ti))0QiD{n!o2-M03o-3R$VHK&G`xu7#= zVlP=geY0?^qufc&A8yfAvPIsU#i2hJ;XwS-ey)V@ z+nL?0?$ac4Tm)S@7u$az{!FCVVcC{YYLa~aR0VRuhZv2H7m|^?{jho3mgb@{=ev2I zYwkzk&L}$HQsL+eH+Lbyh9HUiv)}0l(mysw^FjrX_)-LepS5BtPv29>N)nB}92<5r zZafXC#o1>jU(g&|4gS>pp58%VBsM!G`-aEo^Y>X!j)ymKQI~&Rzq;SjyYi37#>5GG zSCc?yV^yBA4PT2|yos#x=A({kO@EJ=vxLM-^m*WVbKePgvmr5W?_6XdqsMJwf(%s3 zY}j4hHYn9iI#$5iVwQyw_oQQa@HE*W_L{x)$L`AXAsF@)M_u;BJw#-SMa7ww?l?W@ z=tLZ!5h-i+tXF@RAED2uU7B)p^uCLFB$pEW7~BBukQuo*)so!d5IFa**qw@S^wSxv z+Ed|2kB=5R`%1=tRb`{X&1C71LhTG4-e{8pEpdJLt`r|0jKeKiKbo;zuK~sKk7@*D zJ(D2dH=I8ATdgqn zmQaPEs7_1FaFvC^GKcnWo zvDoT2CGCHX1B10W4Ovy|MgFed=;^dr^K*Y}%HK~gZC4rQ-^P2%7Grc*u)k`##^!bN z80+=Eq_)c*&ye`rdUGWi8e5)|F?@#V6B4txcjz=N-Vh|WN9L#GH_v)>)oyJcTA2T? z4)v^lhLf3lIdld(CKzP(%jv0r{i5;LsvnG2D7F@2@+s(j*>hEdi63^^43*J3rMvY9msMNA|Q?fPf9D*b7@6%(OUVhR0Xo;HT zUVmr{?y-A7jPtSgs*8I1Q}AD+PVSgI(dUaF<5fEjtYh~EbNmP)i$%t(9H^6wt=WSe zDc1R=-JsRp3-UEJG7Oe!I#22F7B5cEwI6?E1TMn&;d?~6DAQ7AzC(@jo1?D@6qd#W z9)~9+{|t`uHSv0>^Ab-z#%AWP`38AJ?(~dsAGon6uW>v&O`Paf~oI-z!`sE zc_Xbiia%0n4Z?i?p&vpNe4+~YGeTqfycW-|J8z+J@_CLJ6$D-?Ti=4MNaE(7^Xf~7 z{G}~5MHw)m{%MmQvPMTD_b@(>?y%&y`Rt_U!>*2!=(+12C#EB6cgh(fQ+SIy#OM9U zKCNbO_gdl5gyPC?D}lpMvMc&Mw9&c$mw)5N-vx7+`?|@szPSg^+T2Yh?#$s!kqK2 z6Xj7YF1JhNkMoGDZwVMkzfbYqsPTVO5sGQIH#^{SHLo%b%M@?JO(lJ|8zX!xnd6yy z@Mn%szLkOgicLR<;)qpT&Y4HsEWmEr+}NUl*vWSX>|*=*+J}6HK1>Es6QB+ZTq=oR zo|j^89V*<|BVwz38%RY(Bcp#qYK~9H<8h>~7#9O$p-zRx0^Uw!Fbajt*^=|JUDu-EHM6tck@4I>OdD{U7 za_=Ys?0o~CXQ$0Gn#*em(;VqvI^SCt17}+;`^^OUBH>ECfp2W{EZ?vA-Tmz6 zaWBm%my@Jt2U2}mVvc`th1(~*&+A2b0|JoOIW7mI^!Eh-&`JNbn1oD?_nB{R@a~Pn z5nG?;rp3=`29n`O+JU3G+~;J6!*lR2CNCuNzAH74MF_|yf|C0sFRKi)7vRGE{f|af z1epc`fGO8nx8h{2NRN(nz1QdZn6$qF^$rKr`>|*j`Hml8SlNHJ__JJZ<3|(iQ2bG6 zgaa8#+7`o zRj2pDq#d-oF6TOAtdC7d15x0j%7nAEedzBCXoUFTR=TV3@#d@1w;&9w5-JWxXP<;! zXtL#p-d8RfW+#93bBF}@{(K8>kK8>jz@vio_VrK2E7OmZA|I#By+I41zVbs~dYPj3 zsNvpgIP&9jc}$ot+I~#=8L1V(Uk2*ZU)- z_H!2F*c5-2=!hT;tMM+|rNni2BS76UK54c0cs8_axe7g?X%J9uOy7^QnkBxWYZN{4hFhyn}bi=41jmD4<^19#gjPSa#xP!jJ z7x;g2BmVWhu!zApVrJ9|?JoR9r^Z193ZwY&Pj{C9{q~bU1p6@4NBbCWa<02S>HsST zm-EpZ)2@$JpvO;p$DqW@lK(p2$CzOYf@8FYTyxBA(h&bie$o(OND8zw2KGzmvi8xV zHVjYE<+RCmhPe8UKEx3cVAT}o%Z+lIO;vwB$Io~JMMG*fc@9Kw%3~zc9-VIrP5m;e zO2o=ylDJPduou8U-if_m2WO_x3n0!PF^sS2Y2Un>&w&qK-Bp_-nl!zRb2OV02l6+o zw)+`v^JLw7mU&;qku;CYv=C$)I=2IUhz_8Bf$HO(?GHZBp(gW@tG`a*v#&t>=?H&7 zUM>Ll;{>^uj$`#FkU5~}Oz=ezD>0JV>@ zLp9sY1SZXMnkk1=ytaX!#(~;ioAZCU`kcBzi8m3d6((HQM0M}F7MvnSJNLh~?$HcJ zq@He7wLtwp>Te}>X`iCyyz{Qik9eiDidzJq8hV#Ibzk>~z_-bwE-tG5Ea@94yq6*U z?xHDZ5Kxb0^D`~``1i*F47X!9zn-6c^nUTqVfi|i{`Q~~g1M9kh{_zPZ^wV3uF!DC zAie4ZRfM0Ey=oeoOEk8H2l>s=G`^tc39KH=7Vnh1vnMLEXa*@bvVHH5I$@Qk!+fO5 z{Kab#YB;DBUpm}ebZEzihF#}6%m7pH4v0*D(zI-1>reYXNB%(xOBciU^J!Xsy>e31 zaUs@^q@g$B55)HpgRS^RM+Se}EHOD>r`4IiXuER5ofl5$tE{zDo>P0@dvVIKV|Kmu z++RNCW0pU-UM6^zU&6#!)a0as9yj>oY2@9H|L56TE_jfNSbZ|tyT!(LTvGX3{9~y8 z_y5g!{fA)vFaPzw{oi2L|C7t={wJ3edawV*Wd$Ph|L?^Xo!gK5v7itdjXYqT}7)-|0U7$pOf}e|h8x&eD4v4qtz2MgmfM zLHy^{KbTR$@)d%D#$C0hz*b!6iQq67=w=w+6 z_$!e7|NUOzoe=~o!Ww`3P`~ll@U@cm|6~RTx8N7OUg+H9AD~~Qf7*WX?-!M!3J}Cu zY{T>U1!lE{SDV26`wCfq|0<<2dQicA|4!jg5TrkS&i^zd!0ZSx5Cs?{P*=c-~NB$d+WdNgQ2m%oas}m z$KW&I6f8hV?AlyPgm+I+SLJj)oge@>7p3nG&%LJ2uh)c-qw!#$-3!}9g1Hpz zJ!wdUzi`JrN3+(?e@Hge)aE4wLD2XmC%7+Qwj@&jeLmgPbzrymmf_NiMSgOLMNKo@ z-RBBr7+0HN!47rmdFHLqu>=3exB5hLa?{d8>JRG{=%+PPisgVIMvt7xQ27v|FT| z)taULMTPJXxl7-=8s!pWIp4MqdzK^Sdcl*)vcrE(bFZLTa=Il`kA(%+@MeA-1fC9y zZXBQd4}MCuXN4n6hv)hp%KdtCN~{#`;(qxS{EPd{;e0*EKoy+Rn1uQ|aPT1&clL zjcL8F*UcXtV_FWez?Rw#^j^0(iB%oNq?D-U9rqOXvxGoP_5V!!M?u zLnr@>K&D9lIP`dA2d`-)Dhd0&S*|ZFzj?cHfj{?YV*B~w#zJ<;=x{^-+${X`XQ6+) zz~vFdr=ULFp}PsOs7ekAP zY9LrW)vvwu{!M-FePj0+pv)%_vo{JgAScpWHwAr7T$8=pLi;c zlivRvwU`=aSHjY!dJ|U4h4zwz)it%>+*`!0_@|59MrR) zZQtNYRci`f+xVbLkv@mxCpg|1T}-5$6VYAIe>ZhE z`A?VjncJoR-E#KFvf=GZOCFs;;?7K{#%>1~=i%QfVn6P__u*=YCHU;rF{aG{ySK?VNui*Qj6Hi${GCmlD(8 zR7+7%0jo(D|W6Q#EyDp*+qZvX-3Qzo@vtJX!7a|7R7Wz4<-DhUp|)A z&VGC_GSpMUa$to;JXQ)<;nzBwe%eg%XIV){)-;T=UE*IB;jw>a?=<32I9(KBi$ z)G$2W?rM0DWR6FLdHY?--M{|g#rm0}s2jg)F4h1{e2_=q<1bKo{oyYq|B;RHvhAW4 z=^wzI84`aKGES|~f7uG=lDWhWn<55+wP@LGwGEG%Qn-$H%&zAHEu}zrW*3#V#HBhV}c_)GoU-->4OA;BvEnycU3S&ZXRQ zb8xBBLzM9cnktH24z~B~f6&fTjJ4pd7+=gZQrCRke)4E~pXpwHo#XT4w!0icZo2j4 zaSO^r1@7x8KavwHzG`SRSDL(Bdg@D!-hVhh8O`H;_=~P- zyhPT`@m%Rj8f~7c|6$}L+TR`g1L;&If9-$kGVklxm%>q}IwzX7y_x22sSV?-bSQqUpH4rn5ISYt3R5HCLusxUB>rny8-quJ=N7A>|;!)d}B4_>fC?* zs;H0t z;b4XF*yEQq@F2jln-smC*N$>fq9A7-RPV*Q-d+!^ zVzO!6cIU-??i$6uII7YP*YoRZdoO=S*GbX`O-_z=BBO)+Ef1f4v*u6CU#Vc_Fk2IM zN?Bb2vsM3So399jaU&<;us`xAzN8am85L6Q6+~w4rb1(>@(Zpq5&if{62rPIMt+O@Jy zRW#(tD?vs#khPI%IxSP!b^RJ?zM!7jDz??1+A;=WX)8vr`>1Y@mzs?3rQy0hO%297 zaPb3;pG5h=ezF|i3kj2pS0R5Pv^9VqWj$oa*Te7GI8Nu`D1Y>cxA^nRe@%o!VCjU4&oiW3IcHk`w z_bVm587bC|miZ*Pf@lpp7d5}L)omXr_rPcGw}9gqN@&weolp9SyMKR?#|r53n5Oa+ zvCQ7Sq}9TqILDSR@cJ(+eimoV768o#fOxsxukAw4l0o)_8$Z?I#_bOS9LcQGI_F}d@jP>q;CU-DIJoG-eOm7jJ7)hN#Fhl$s>FP7{A9MX7({OqhQ};LSp^@Txwmi)0!|r2Ra94sZ5J2U@@ZFoZprwxh}Cb5g&*6I@3o zmm5PWzEW7qxF^Xdrg;T76@$hz#a>E@xGwY!8t7kl7{td%2n z7Zb6zD#}pPT>+Py*!g#`CBjO=A}aCY`upsFr-SK54NsXzDrT3yIbJ~_S2`JmXPLXR zgZkqe?c~htgVek!jgf@$8Ov>NV!v{WS@}9P0(sVml{^}F%{9$HZhit3I{F#jD<#rr z&}GJZJ70g9?;VAodq)GJo3F#pDOv5(AWeLjeGzM%`x<0l`f8BW(6hz{cgbvu(o=DO)B`*8$>#JgSeQI|I27MGgHrw#<$JT#n;N{~4f|v)2#62i=&AYB7xZ-K` z&ou+)em<+FdSaeoTxT+IS|+e|0(>*xdAW@4CIgInToy#Gx8h=nLP}`{D$3{ft=_+9 z;`kLa`8CAZ6WgczmRMSSUz;@QopPZ+5k6`M?2JhLdmLcS{BfUZzQ3R2o>I#^ejTk1 zi0gm%b|pbp7xwG^MZJZh%_4^+h;^U8zQ4O0Q35Z&1YFzhKC@=e4}G8SQAv66`JPBD zuJ}B8#fSOgyV9qPQN5OLCybkh%|B<%L1c#NeEA_1#vOxwe;s8C*N!FO$@?+JMu%s! z751o4zO;Aig({%GCbUsjNME+6C0XFrx8r{~*#RxAeuqn^O)_s1sWQ;2_D<>gTkt4j z9e1l&j+bv9#^2Yy4HG#%k+1XI9y5pkiK&+w;(E%q+YxcL%3Y|SG^%HGe?n{U83eeXD9sFYGg<49 zSfUBuY=c6TrSEF(OuYRACVr|d1#lq zUzp~2n8|Qggl`-$rqA*OeibN{icH3 zed{0Rk7f=Z9I1D@Lis-)Kozwwm&4Sk&F=4;nAhiRhG!+X8H@g4R#Br~%8%M^C&~oH zpL_nccuW)K^1N>kO@+u(IZ;L!Cbw8Z=%Id^GIrO!|+eWiv?mE8A^38j+F5&y3m8WGk4!gj_Jvy{l5O12&lB&hX;o|r;;&P)- z2w^^M>veYx%Cs@ftvP>A(=F9*(vLz;k>j5&l3B9dslhXCvke5gYs(q9C*F9VICqiCb4MJi z-DhRP6d)ED(_9M>#`bBZ&lFWW-e8-X0^9FZ%~p#fFCXk;h;stJRW#c;li=P`UQOLP zGu|_TqzRz2;WmE;EcV!U^&pXB9|U_{Vl0MHrPA_Njk1kB>?rLm>F0L0d80Sn_E~wQ zt&}Ie-k~+J_s0*E%-_FW%Hu@`0o9+19YAOe*0a>YHqSke8Zo}GwbSXBfw6sH1@bBc z;viJtW0;0J?|GIbE{a3`rdus{bM@(@I>HvNDJn)kdFm=6ep- zOdHBML7abicx7MWONAljiD>Yp7;f9c{qfP?45gjgkrBtTUYvv?yJi8dA-e-R|F{*w zwWvlbnyfj)SBTFy8FJ;1>kekKb-r>x1=iK}+vlF-EH11U=F4 z^@?aJD=mT_H59FZ)oMTSIjRq?n=j}kW0mM3=zRH>EQlobH%ejiIEfciCI-yrXi`Sc zH;;ecrICOHeHO_>_q{=o`J8Qc`c$W~B|yCsdQ~3JKhe6d05qSke2PF;AtWP+jYpFt zg_Tmu-8+C&GU?jvg`>^Su5bwk*%K8g?l?Qyfj@k?{C4N>DBgcpca)zb-LZg8$bW7^ z?(@5=>#HBj{xIgZd~D&brXIp7cqV%xq_}^6N#m3$Yc9*$8;B%G`r3Z~2)|0QZ=N*x z3gf|d&svunI*yBoVxeQDJ>Satb=KbdPz=s}&^~?n-kR|f0ZDKUxvgORyU>IBC(y-{ z`fGGU}c~C=E}>iK2lEkW_W-Hs0&b#k9O5@m5?Ik{CQ9q zV-MTCF)m)N!)?{zN2Mi%!pxjxXThs4|(Y?)6sM?#`x3&MPI6UdT4r0ZOyw069R$zr+x>Vp$yG-_vi* zDX%C2Q?B=rdk$H;Ladm;_Lq?|eUvDk~EG2d}p~0K5h{OBD(jRqy z3R~6^#ePyOB7?5JqcE&Snhz(#a{hTf`WlUQ(}x!b*Abd-`qH_-H#y> zMF|0z;-tq?Cqxs??C4a_XU(Ae_A;MoH>uQb0Oc&wkjXxn2fL6sWJ0Vx6bQya<9lv( zXmY~pif9I&VV}DbBeI-;O%uGFotdG3Y4%3O50{akrz1sn?P`8xwxLffNrxY^h|6I7 zoUF+@WtkUy$RZu9*3rQ6#G1b^osWtBDU5^%w1_4Koyy(e?|19Hrjl>+ZijIF4a5}b zel@w+@00eSc<_cZ`<#N$&W}GvG5iKCw@NZkqP>S<5?Alg5w(nxbp*SM+cWpnX%wcvGURN+Xp_e9A7i&V4h&i{R*1y3!iR_y>k6Rrd zp4c+S#oX*4WOns(WXdSx%p>rBWCb29Bo)X4zsEHT3rgzy9_SN$ITgc3%JphhVy;3j zk?tF9vu}7>)T85QZoqu2_VjKz-m z=jv@)&bbnOkF#bNo8L!V^|(fKRrhtIET+8d44?v~_Vsf;4Ct82>pS~@b+*P%?9%l~ zB~!jZnq%Lt-$r|oRB7|(yZtodUE!YIL#nz=147u?jas0Dvryy~v5!{jO`1^LS{&jB z+w^5Y82Z*40}1Op&F@xn{V2N)SSq9qwlg!v`Yruq86OydV-;FanU!2b3_rftT+=Ys z4$|5GWy?dwKlSbf6s915-e@w5TB6nu+x7Md)?=l{F~GPafj%dO{tbzF189@RH#I zH$~?#;|)**C@uwmHQu3whS*`{-?Ztr#}P-kA06%FTg#{Z4nDdjbKMq{6MC-KfWHeS z5?4|?YtN=zL&9870-AiejyduAjICRiSYWgksHA1TQL;Lc2Kl{=kXD}6#1Y}?Xw~-mZBKwF zPgysYI+EEmVJ@D5DeGd^@(iwmIgm#8Mx*AM22xzg&!@LDIJ+L+s!qE1F}b;F+=J$s zWZ3WqfNmpy!@}Wu;NYpDpB=KCb>wc>c>wbc$lo^^=Nf>Cue86+5B|7bdw#oqlMR~X zi%~Tv+A#P#d&cm(Nyh_H{rP#gZSGDv)rVS7?*cEx{`=m0K-|~pPc%<%`zYkR9 zPC`&Hab{pTt}R(Zvi8;|)WHl_=TAA;p1XUy|Wg z#Ft|AyslrmAiJ%4mk5`G$xZ~NZeNl;3{3a#bu=wjrY5xaj<7FB%bVZ1atUvSRE(Fx@s{>ke%ibBaVY?>XgVXVUuuL0u zWf=RMLA1{Z#G1+^v0u#8aZI&WFt`~@t8ll7)JIR>7?cnBcQ5t7$MiNqdkfu66sG?1 zYhpjU5uEbY1~DyQH+;j_{#flZzja2WFLby+$hY0k0Pi^uCM3JA+Ae&kVXk>5xp#BI zs>A3VM19cTyt|wckcb$~x_=;lofc8s@ZEt6p|itFYNt8-9k!dX3n*}QBwN!j%!P}T zcB>eU9#1FtS$#w0`JFp#GOjRvAC4~p&@?F&j)Gr&g3zZ0?ffI0xn73o$^42b4Sp~k zzQ^a%ND2BDH(?~CY~=K-{Xhp^OZP6~Aw&>JM@$2bkTs4aK0s2M95j7@?1qS)jO!Vc zxyGX|)1qOsAidZRb_SKC%nCtne_Ludib_nI)o(B{f zMfp7ueHm+DTGjGQrNaLz>Kz})+$ZfEv`;s)^X=RAx8=v1AS3B*T89w&lNZQWF*L{PJ9~@70=uAkx zXu2P3fS`%b5Tw&kzGHfiJ*w}@2iLNugms_$U0;>YwK!{!fgZkpB7;>WpGo343yX>^ z6u2#?n0@I612*S!SHKfK&VGLS!m?D%_B&RtZLEX$Xmn=k(6KtAN(unPOk-wyeMDnA zaS!N{WNRN~w8v{`%PU1YDBtQ6MX&yJu~uy@Y>@3Cm;*_&zkWS+P~Uo&JJWRctA_T1 z*R;I69j9Z6dqfR?_`P&!57ztORE_TaB6;`vLs)xsMUlI^evHF!#aPu;(bPtg={0MhzVf4^BAD}iP<;%m)YXlT#RY>-wK*_-Pv*KQS~aeo8{c8@@>48Kmh$MjBGljHNdC8Ave z7f2zOZk^jwvAqQ`hsIuU%opGOa&eRgE%|bx7{OX5v9vbl)fC`r7OrLcemwY$-qPT4 z7EX?T`5W%t^Kqb%o>aO0)h{!y9-4??cYA5Oo?Bi;Hek1^CLnFf{n#OQZ?azm3GE|b zrwn`?rDadV6dfcHr_)PlQffQAr@Ui1Edy#G;X14!i>cM~fB|)k4|dpL)pw#hB59+n zu5ml>KNlw#4#ne&Olv&=J%{Hp*(g`fFRYP&#$DV3n2R&Xygx|7N)L-a=Kf;fDiDAD zy5y9MJbZKZvL~0bcX&rVKS}%kA;lc6aKl1T8F6Jw!z}M217r*PQL%Z_ zcv?p{&3^7Qg|kF~Hek|HNw9|#l9~pHz#=XkB!1Gvp<9omka*gG8-0^7Uxx0QhtwH= zKDa=s6ix|*VCwwf;db)OpIA^i%Wb{=8co+6hyN6a+%NML*#?2Mte)e7*(dgkMq*|h z?;VYa!zS49_9;^wSE<9XovtVr-hn_2vGc<~M1l^Xgn)HcP)8^aeNpq&(WwTTJz&qw z?;IY(GPog_1jo6&w*v7%9~jn=s@+Pxa9O1{A2MOwFJ;Fq`&q_3iq7j{Tk~&!6u#u}SxK}6vF4=)(@Dj$=yQW^Mt9VwEh}>3Z4<25A$6ERmJU0C! zte?}pKgn-*6Y@8Bs&?Qh4ZV5`rmtsDeR#4LZ3wWDi+7ODznAHYr_oOgLQ!R4>KBJ( zkgGR~a*!gZIS%jRiMDUFF%Y7Ei@?)C5^RE{g}^BK28u&*bP5Hkk9* zu7j85h)N{Am2Xpst;ZS8QwxN*-wOeC^OU?_^gLnr%=v|%LB?KzRN{Ak?6d_pu|Rgw zowueHGZClmA$}bK28^)<*uRN+)Lyxxkf|zPwwqh>b_{sDK>3e)b09R) zRRJ^**Ab<2{Q?W?a}WxDf^-pb%QDeju$EbNBkDf>i9E>q6}DsV0veO=Zwbtbn8App zDv{1xi?RMxew`;_?A*|3I#F2L&Z~SA=m!&Bt3c?Uor)cfpnWGhEvFH{x{LG49p~VE zVp{E=r3%E|b6PjkE7VdEV@1G3NWWVo1_ikTYs_&o^L4M24U>0&POf-cwdihwrqE1H z`{i>V6>7a3obX-JCB09_yB38Mk=*HvYipvAqYo6v$AzW#;ctmE4$s+FoeQpWLOwO) zFNb?xvgrZ023g9bPXPn*70-#!76&BAQS58ZXNP$u^PE!5a;(W<$2W)llWL)be()i8 zI-a#eV;Dq|K=eC*ez-gFVX`meuOMbR>mI&Zl;SDAje)e_!P^eoi`#ZA330;BRIj1i zo3kL_hg~z0**Vlm*D2F47Y@yeGKu^*&O8NjHC`Zy2Q0q-TwH?LzkKd-gp8`Se=`!T zg!eh0;{kxw!D~$wd;xF(2MjpZ#ZmobKg4jTzXjAR(CD0hHYpBhQ~02mSh#Z8#}}>> z-jBm_;;U;2f{E~G!j`YxNF`(%alF$Fzanj!WDF3wkQ#P36+Bv;bHLH-Ok@4lKEFp0 zdJmtJXziCcH1$m)rDxk>zwSg7SyMb1q#8NJEl$d^yxT0Zih~^Q5W9{kF}D0H7S9 zivYxv_dp7YPpz>lhF(8dVS6Bavf=jaZ(MSZ`tDh4$6pirpI;Y7p>$RsZ=_Ok@Bw&5 zxjvuZY24&Uk%}EhY_~aLeNH}07U$Y#%%Q3JJPmg$=IP5Nz!8~P(WSn}zn>2yv$JY2 za^4bu_bB{bdkqNlF=Y8|H*4jefJ<#4^>#f5QD5>KGvG3J|3B67wzvI7I1|3`$zRgEz4H>&RGc@MtNJbhFg;>jKFC(J?BMP?(Ul&!uf@DJ z&M~gL#~D6*`{notTK?Vv!OiEa1KlL{SE_m*vyt@(BEA@ptcqD}8dD)Z_O}$}Cp1!j z$uB&`)7IUk-{Ks`LuQ+=9+9MkgcxkzSahf~PZQ(^oF6{$3}rinqdwk_M@g8A>c#^j z)F_z2o)bf%Z7iY3ygPWf`_?eu6b~=2tyB~J8aau_`J9!n3fHrldL(GnY&^SW@A@-S z&5F&ZdPRkm=<6MKIKhi`C|$~yBQ>Xg4tIcJel+%++QkmdeG5164-d_bG9IQV0+*(o za;$BoP(<;O7m1^%TjhEYe_O!!{<3z|iIv0aM<8`3!xE~?Ied?^l%G$^jz8-1 zUX*K&Go{CuPft<9$R4B3*54ZWlq12HS3eFMSH|$;d3(%sQ~WdojpdP-PVhm0g~=qc zH*tV9CQ=*@n(obch$Ney*BHMy(d>Sz9ag|wW20i1kM#V!y_1t60Q++NQ3#5IN*K8* zt6^;+J@?NWYhASMK87d>aI=_Q)4So2E$dLmmK+Kdw;;zGF>(0R!5jKF7ZO#~JKe$3 zsBZOyAiXIU->-tTp6@xaAc`S>ul`(|yfd7s+mM&fOyYjfomfoa3%|{BbMK#V-gy?! z=AM2(+5^MvrDV`$ms2TTte?QWxC61HVV6zqoGW*A^Q!rwJ57(jd>_2Tx6$}Lwc$3=tSULFyCEDH3NO~Rk$KKx zZ5z^cJe`19PJ*=eGhpWzd|3$Hwe<>tA(%?vBqTe#?4nE4=33A&~!W+(kpJCoKk(Q zW!|2+Y8U5O>J{;S9=I5@5LY^zU>s;eeCe>|wzngu{0@{_VcDU-S{=a2RS^zEOs1K( zUs%pwBbBxUhKU`LJCm#Nx^=6Kn!jfLd+niNr5yShuXn+#=-$4ksZAdb$Nqd~f;)6a zzkb=Bju>WJ@&iV9?gfH_5Ki6cl5^Jg4+(jSQTI}{9sU@9swS4RBP>ty2Drth#K-No zV;&VoWAsM7MPYyaA!~pgeBE2vU{kKRjjvJ7`tHtSmR}r~N}!f3(iMPx?$_vXM)ui21`DnK{s@i2M{OZMjiP!3{-R1{HbrWytE&CqjaG%x2 zr0n9sF6IG)n=^2LISq8}?T}07@3I>RHQ@*>^HFvR-<*&<3~Y7Xp0uCJs17kyrZ~wH zlChz=>zzMY``XJ613_a(Stb5FRkOPzT6V=D!Du{xC#-Lm{JiGp1!@%hN{7}ftP(p3 zxUbNE)}Ynms9*LXeV9lw9fYea#;bhwmLZXZcE;b*`B4SuEreX2E*!xI9j$ z)jgdc3WlH0{`LeC)o{-weh)1DtM2wv!I-#z5^JDrs|R#0EdB5rI+t@!X0O*%QNK!i9q>e@4ynA+ z&SZN`dz#o7fuV$`@D9V7K5WD9o{PDiv6%Xo6OPOd8cpqoY2cMy!3TQPi9^iIcr_`1 zUu1bD-0hcAqOQ>m{lfbN>uMBZa@6^(lt}PIl23hOei*5yD);$r^Jq`s{)$}39Rr!S zVfN<&hqJeH5G`~VjIXg<*K{l%dv|{A&}hR4H#sfuyzbWe#zP_qI+NF*};o z{$dC4yi4&}FA%gc6AbP&L4DuuE!)3;)VOds=Ui4#L%7?I6RL$J2x~>G)VknA5|&@N z*O^?Yz+%0Iha(U&SSu0bA`0(ZZW)KuXp9+>IY+|fM;y2;yqjXj=iK!rZal8EzxoTR z;>n9>IgAi1V5S@1cBH7N#Y;D9#ukF;jbbkN9tQG0zo2{+jQYg15q#kQ>F@J@g&gE< zPA*R{zKZr75$a)oTqZvOcd>no5y;66d=L8ZzPK7=eLuOzEYiij3Q<{I=3{prbHKWipsUoq(N`wtU&iLK*x#C187d*ZDvRa|#(fn0yq7eOmoy+`OwhPY zxvU4-1G_`C0D^)f_KrC*&Jaj{uI)W^h)uswNknra%aL;D%~-u#Enis|o(uXHww5RM zSC=p~#isZ0(MY_1bVI2~?GjupPyxrbSwwunWG9tcr)KbkU?K{;|VKjKbagKgWBqnnk3ygzJ8^&ad)n# zq#6QjOpGtTMWfi~?=8Wm`tl^?WKR%h0700h+|^#!R7dY9eV+LNldvaHlu*$+$j2C7 z%3jHj|C?(F|NC1YYHB-wdfdwqRxd@Wo3>DQQA(g#zEgI1hP&0aZUXhNSy~j-OFi6_ zjL&9fcBwGC?l}e(CuD7l!z9RngsFjI+2>Z+W`%;1h8E03p@qmpscPBIszGOJ3D{97 zN6S-nL~nY!oEgs{=G*i>+*wYGs9!ADCFj9Uos;&JAx;QZJGh5`(}~_dB(6 z4XSIX)WP%-P(&auj~l(yfH>T*oQQ@>#s6A!VU5pM2!S6(>sq(ayy6c>7&rE*v|9#( zasGUfOy7ktwst5`9d}J6Rw$%bZsyU4({~{@_u>a8KqGuo6VUCvzO+})uXTrKrZKX> z&^?bw*j$p!57QxkQ^o6)4}4*^Vtsg4f6Qvl%C%Eb>F|)UlpA0JP9oID&6w=!mLL2a2FM>Qh{VyKj zA9#Ga=iH+F(Bpz6BLI*ZrT;cQr7nG3$e1kNJP~!HA8wz7cpvnFCiT;;|1RUf86PPg z29LSNwDT-~JPxis3DZu_0lR31_Hxz*?P88RF@jKj92MgOEkUjQYSr?;ui1i^seE9D zad5a7A&RPb{zCwNo+N$7hA?DqVc zeuDGX8|SaA>eIJFrIYT+G}#-uw!Nioq^$XNqeGt{LnZ!cJ(z3Z)43-XrBG<6V0^~E z&`k82U-RVx9NO2ECJcZX3gQAls$X6PYj*^%K z){xO!EmhKtyISWDYTQ%7C>9$tPY*%RQsTYE<^%_(D zNIs``6$sFg?7kp> zARIqrwDJy@BX8T-HQ72y#TLh}HBW*n}l-aC5|Nj8`CfE(%N3G{A6s$HDM3wI_mDicp1C` zWo);L;s@)#-(rqL1MU-3_24mcERlvU6%39p!&XufynO*Ku~O9kBp1uz1ny8meVmV5 zk51s@>|Y{t%=H8jw*9vm<`fHm%@-}y#}I~CntXHng0cDcqAf3EK9UCc-Y0lOn0-!< z4x8)m=6u@MWoTQO+cRuW4#;Pd%B6wmRqJBIK|?Kh9eg&r%1t~7-R_sTnP&KVP2Xh- z5T;dQrs+XAE7#>sVfv?p9U=jVyQdFv@1KU1ec0dUr;p=9r|Fsgt}1DN-qz(b-yl|b zgWh8vHsO4^$lwcEZ&iCT*6wyQ0E9qqYgU{OXe{MrU*JS@pEZuGJfx8cG%WAB?pG_? z{UvXSFvpSY^gu2P)71NZyFB;EVcp!Z7y)3Tnu9!Xz_GpU=*#Vj+h>7c?;z$NyeT|J z!qn$lqJTjCkiqS10?lQAj0Y>{O&yD67mbTUIB6>Wt`jVhCsT~wpnF-qUvswflmt`l zjGPlH>tzW!{>a-$Xcwgvy0VD zig1{enScMH%#^@sJ>sa{urA{nRlQiz!*a>!RTxm#Y7;e{7SmpTT}6eHLCg2oR0HLn z`L-o+hEY@ErN^l@81(C z|Bt`%A9gMmJRFDd?_wzECRz)MAi%=zU4eaC&fxVrz~Fq&R2~@OV~5e;6VE>LLgsJt z1Q+z<+Q_Fm(OF@CL=H4JO<_x=`_4M%$5w)fHg^U3Y$PrawWvbgJV(^G^K$ib(b>xv z^+vCOBERyx<~}d-?u-&^epZw*TE$PLMD63`c7PTDT_OG{m&_zzw;X$xSLGgLJY%x! zn?MDX%Z~+;9q+LyPGI%WhY}p*RI|7WlfYbiaWzyYUu{i)plwC#A3dk__kob-+|5Np z>NVFNz3HtJyD|9fWFyMwQ*k7X>H5_R{y(66|9E_VjK2yP>>!i2Z7&F%#QZ<`d=klI zTq~O92|pG8!OQ!H%=^R4l@CMZB>1zg7_K@b8sfaNJy|MzK9T08&`7B1i;j#L9UBt> zJlEsA+JH%a%w&EuBcYX$n0-sEa|#L3EV+*0wDlQIU#dF45uw=H$Ctf|+vV!>-iz?O z1hm+gQ82SnUPoPN*#49}XfB-;y|MUJxYc7TWvrCBMY3V_c0`>Y=otPf@@)jo@|FI6 z+WmK)p9zwdRFkdl>Os}+PYs!OM?A`epHM+Qq;v9rZ)#vF*6scEYjmr~@l|;#^Lp`K zLoz-2@&{*De(&F_(`gD$Z^yx=T1Z`_2^)%;|K-0tPGdSaTli7H z_b;NvPo$uyQ4x|F!3O4#2DwDz`*1FL7&!zze~UkGkgTqZ6s?G-L0Zf~0P{lb@IOrp zVRu+e<+_V%vGA{pDJ=*IzqeqFK+&|o_z?4d+XaQRdRqY4L?;uK)`gaV8^m!a%I^<| zgm!{n$>4}z1sR9)2PMJ;{b=cQ7DFU~N>(Hy&A+m^9}G~tzh z42hEMM^m?L87F@=KBAaAa^ov~@#tiSZ&)8KuR8R%?j#<6@DA_ZsyhJ6ils4$0P#Jk zV?kaa{+3ZNF7Uj<*LRSgVLDBw^rrebNCR1a2z62pR+Q>HerK%gm7=qd{pr+4OJ4;4 z8=U2`i*I&C6I71ik;W476qpcM0z)f*?9TdIFz*Dr+=M61sJnDW->re~L2U80#a>$O z%93-`?-G*O-N}a$e)4-2!|{E)*T>!aB<~ohLm1RwS}^@IZ{OK5+zptm4SP@SS&ZMV z+e?!(WE>6W!ELB}5yEm*?>5eRQ9AkBF*A1PR$X4qdbh@SaEBQ)uIdF9D!ev-u+-8|(_e&=zl$060_(1zii6T*z zwWo~(e2*|spm5PWHTXGteoY*IW+|e$5O%S;2eOz|z^6JlR}r?xgIhr5pro%b(2jbq zY7mHMW)=>{x?!t3@aRjMw{zJ=-u(F+y1sakIxOdh@__e&+p_xF6Y}cTMk#i9hF76k z&2VWr&un;CZaIq>vkij0J$bnE&jrEX-rIQFikyvD+$U++zV+H*cq-~+(1n*?{N}omm7HDpTqTeJ=%Tw zVzUa#pylc$sSDw|z6{oJBq*MO%BuT3nFQGC{v2Bjz2j_fb2aOFem??(_g{K{ic3fZGdZf=-vWA% zh;a+$14t4&%tbOWX?b)!@ycJiXe>XUr~S?(c8h1F7RBoDY-e*XyzBK`RpH@DQu9+f z6!?qENabvLQ=y(Np6izBckio?PiFos7G%%yr2@D2cDIg!=aNSH8@iaTGVKa8fCu$8 zXd1vNR~C|hBG27_MgFEm1g#P_X)@Zf`0rgz9?kfA{Uq?X%HHEd#FY>qcNHhkK(fuF zc(|M03BUIHfMsAzFJrtE*=a*K;~n{Llc*Z5&Q5D{67l!6di7ES&=fn?T&~mOF&{h6 zTNmHNY5L@r5!pKB@eXn~&cEZY?!W~yNDsF9olmy_3dY8N<%6hychri{GvFwIMeWcp zwZo9%$ee1&J6Zj-q2+NaO&-=1{Lo^)ee;r__66d7pRMJ(C|m_N?|z@`eP!wfW8?df z+%MX~mL1Rmov5D5;q20bYznKmvr7B4^wLWQ8aNc+_dZ8u`$l zl=~y{6Uk6M+n&W%v`ATl_pZ@vDsH#elvK%1lQrge%3jI~Jf7y`c+fXO&1K?HfTU_y zy^+o;)MN2;4G&B;KFW#sNVo2ZDYj4eVW7L#B-(a=BYWASDdNd9TJk9C9t!B58N3HG z$_FdF1ID9@50e<1Yl#i-ey%WoNk>0sxa`BPEF8eZ8`^nCf!~mu5Qy}m z5t*pP2u4Cb1K3i3`y|WcNujQG1vwu{);5h(ynqLtizsLUNGf(>*Q*)@*F0*8%0psR zIJxcOT`~KvRkgiAV}%Q}Ui7HNO>S|o@BV~=%)L&x!4+V8{4_45YtdbZUy&BPuRxuD z){J}yyz{)1fHngJdNkNaXH{`Q-8CMD%6onBvCyHt+VKb9>F5qKf)%RGlZ3S0V;`k< zFyGd7-6?F?GbhAm_+N6ZNqf+ggZ)Zl<)~L-DgA8UxE|T!+*D+j%to#>-6Cwr+&`oB z4lSU{k>3_tjGbd$8sD^=UbsMKFUb^t_593t-uxPz6b^(`(x3x2CDt3e=08^Y(pZz*zrZ}9p_Zg@gsEP{w=+JU%Y>( zz_Gw)D~BJ;J<8|fXS@)O2=CAZP4KUD)TgR&cK|lF@w=lVdgzD%0g@0P zzI1Rf5(tcKYzwHU|6W%P0k)ri`K-0}yKBarF>ND28iyR2ciwqti-HbR?3X!=hSt`~ zo)H)PTz!d*xAuPvF#2?1qV*KM-c=gQfj^%-RZbPqP@m#mBOSd|DwTz*WOh0}EWPjn zGAWIZtmyD*)ZkiA)85fTBBoGLb~KTS!T;VGq)vk!|EI8YkQp5q0t457KF!lqeTK%Z zUHIfvXP#T65Az|#ov&NGl$GQ4(DFg*+_+6=R)XU1!`AVWZM*f^FOtE%J2fa#%0Am3 zrB2=1{px-G?o<0qx_#kW{mOFQsGlqZ&3ez_Yq)>>+!#@NGCWw(=Pqxyj$QK;jiL4U zBYxjcY4hNIe1)&eQ`=pCFHv4z`hF|7KWG;{>D38d_O-aV>2B}FoAH?bm)+y`Ih}Pn z;Z2)Tv?Z+NbWH6e!W`D3et&BzdZBl|jAZmiUKYmm>C<|Bx|wf2?-uA3eMb35?LC@q z?8$n$w>l$xcEZo}k2eL<-9@Kpd>mTYNab&6-@5MS_zOicJ9;jEC~)(uT^EJ3S!cCb zRKhwPLwm!wb-iBeB3}G4cy-^7hdcja>ChVbbL%>fn};s)P%reta0d^I zntpU|9?6|1Pw(x2PsrZ$_Pa${&)sp}YP~<~*0ZKxsZQ3bm*p%K0fXUbac5NA|`iMz8Ql7R&eHsONLO zr9trI=9Sie;Uyb=&F0frA6<%M{8(`wbGUW1wJ_nVK1{M_3=Hji-)^@Hzdt>`8_j)a zOz9m=eX?36OS>~ZY+KK#fp_t&JP~oqBtT@djXjtltHBUu~w@ePuMy=7(wg`9K5v zIUQY7G?T|J#ekoS=s9}2K{Tg%Y`pBNTMt(5n^S$dpUefb0)JVei=i5S^jY{vn*|2; z(q*AUZB$Q#U z?zXxMg6?|2`w8jg{l;f9s%?_!sdAM2ugXn-prGPDofh3~wfacIi6)0RZ>Z3NX+rtB zbUEPHV_V+e;r>#v9sW5gZzyP@!F^EJh8ppl|NP@-OT+Dy#x2kLBNY&G{b%K-L87e5 z?rH#pDDNX|guKu6S9=`9q}H{PyY3phn$1T}7RTzC;2C0+(HSuK-8m#0x0%4P8XG{a}+*ElDdMmCz5`Pyxy-!Naqa#->Vb!z3P~|L@6GiDA*& zeyQvgAKvkz!tNX#&s}*v-%>pnDo4moxM?+_qdWmzRc z;)!U|>|2Fx!UAw!oY9;%Z$?3nE^BX?tJoq@>fr-eDxsTskyt#W(Mppx%^4WuWME`A z8C}n}gM};M`~y%vVYiGvzl872ESs@MRqxW^uP?E?X)}S8i4edV%eC zR>58K=!dou^mr{X3mVV_bma?w5O@#L<`yv?DiTdq>_Utn9pb8~b+q%ok+cl{Zb4<1 z{qOf1A1bXHr@&xHrYAbG8?48H9)s(+$a9~^MV|X0V*u4jF-Cfh=W^YxfAX03plPKZ zozUm3RlP21U2{&}m$j^~OIR;5mgO4uWFz31K= z!vqPDXt!;d#X<$Kqk_3@ne!U`5j>*a-wN}aM~OyD+fSfKM(c~ZmRe5xF!3!TvMlRq}RzyYt0@=Rk_En zYl(3j=yfM!yY>#3{R0TjzTrPAK5f@HEGK3fz?6oXwu{*YzG)~&Smm+@dPcUh^w83iH(6AFUb1+0z4;(I-n6#Q&|^IvQq=7Gs~IP$s? zrigHOMupej9ma$8e=11{|B~XR~IqT)IE&<)G9oOJ{o*Bs&gT|Lac=| zq2V3iGi?yk8`>yl(v?INspZ1F8IDxa8c>UbvN!q-Lu{*MWc#^JFQp33bHlXKb_{h= z^#fTK$w%`*Q+~z?+aPmV+7B>;v&~^IUF`{D#NV;0=fAIiWZfW%{({Y9U+Ht;GG<$3 z-nk}-&EvtgL(G2Qjs4?m+B?14-RK_up6n3BtCl_3FGB{PrP5(5Oh;B!xCZ+pNZ~1C zGvZ(e`%Ueqs$n*>D*UWTcS1|Xmi_g--eg-A8M6)@E8M?SNpP7#bwA0yoaU?tcie4r zE@H;5N+)Au8aU@A@ImNws1q!HeYTUQ81?A5W_J{TsmPGF2a{c*BC+IIEI`7cFwwJ+eYk= z{Aj~TlXKB*l4v)a+H(Ym*&leGV^kMTIv?ltJ<2$LViSUUSNUQGJfp`Ol#gvyma?qM zW8wAGYm7Fcsw#V~%=d-S9oR2!LFWkUsI~)oKK0pt)X$*T|6I@Gd7tx)<#jEwiSq21 z^IqPU%KPGr9p%3{Tpz~Kn6ACG{cIWedr#GEm3i)X3#SEx-Y4bPj%vuR+K55CT0MuY z5H^B;zC90&)#TL2Z8!5YpwmLzIPCUp`Ib{tA3aM&PS|MDd@&Ad9$ukmHHY2jAJQs#UdX9;!nVp;8Xxb!F>)bc*?wH0X;N&K>O0zCS4N+2}dNICWCSXF3bU-m!G8F6l0=lnWumvsYwMM1w-o;8^~zursb8Lh{wZem%_V=eY% z`sEyxhgOzS&1KU|fVO3|I(5&*!HXJ4Usao)vHjc)Hg>eh^S&8%9QwyOvP}vb`X&Nc zhhV}pwp9>VYgOcK{Mx5e;o1t4hG)|r(VB0WL$qS=ZXR!Hk$ZZA5EgHmQEWVgB*wyj z-O|ioJ0`6@%z{=)He^PNzCHMT%9N~gx2g7XNi}mGuWCr9at<>tCx3N4du8pPyvQ#1 zfIc6ntseXACDy^^oRI(PF_UwctB!KMf_i6M6FVpGdVZI778E;0Ck|#YYjN!7c5DJy zS%>|~W~Uz-xn&e}Iz$Kf9o0|ziu>Y!P}(7d=ycNMplE@k8&0u@T-)G`P#f>qVC~Vk zG?-_Oy_@2OkYuD1AoX$U7|&rioZP079eS)U&X?4#sX=a;D(I-nBkGhWfYCC|QMsuy zn>NMb!w%oD+4tQduy{|Kz>{9WeAZX630CzNH}vtLis#or^- z6QO-}7gZA)dF5SoqEYoJVJriECSTT=^TWRpJIh#NvwrwFdR^kLo=RVj#&N8#`C7)d z^bI+mPU8FNzcn4pzvFZH?CDCy6KAC2CUyk7276C65;-SdE~Y0EYT>%%I^>dT>j6Ldz$pwqR`F?7*)4xD@v6xE>ES^YF&Q`r$`qlJ$U zIQRi_#<^R-m(6@BHV)sgXhyq@S8Wk!V+(25uCIGe>-7AqKT|(V51xs)e9S}Ev%)kn z-B|rvr)NkUpzb)=tYN{i4_(xF@6w!(IG{iSPR2>jvhuuVy!4)2vxo72c}hR;dbYD( zOWUQ#mU!U2_t(ky*-+X5c~5BrQm^ZnMRRPT@PTG3=Wv%})iT3)XreC3eLkUC2vShI zxd)qb*q&KgayO>lvwg~Dv;m_tF#*Ei87X@jIks}vYEi!n+BYg_RJ|3$ozl;t|5(8X zlFxJS$0MOYTFP0rb8kC;k6<*C;c&>u)>c`2lt<&DV^q6p{rB}(#(diEEA5$HgWs>KatvRH9Zc4i_*Um}o1?w%xZyB$ zIM474=l1QQ-kQGDyQ~2^rU&#GuPm5v{Tj;f{iqsyOn@7n!mX2k@6gi@23ERn|9u^0 zuSt-n^l5u0xi7xc_u84%>`)PREFX83Pk5i{4$4yKcGehr@dr%eXyxClw^U)=+Q&Evik4O8mXkwrqyPtl6r%;)U<@6Eh#oe_OG^!?V!C=E~(%5Ze*qG-_yLx zxl+|+)Vn-=|bzBATRsyl(tUEj@E1Tm_L-h#cI7!Fjjb~6(uqE?)bGCTxSsv2?J4!ow%Nj0#Fy(BVHKY~U>?Q`&MA*| z2KM2!!AlsnE;iA2tvm&z5r2Jlb5mf$)6}!kM0AUPxueGUEC^w>5I+#Zll9d{WdAzm z5-;^R8Eb^+6h%wC)HZ_O@zTm)U)McjU5>ffO?~!*JIMN-k1zhC#)ZllX?uygFPOs_ zvtVmqe1`dDkI7h<@p#Jj6`bv;vUmjjoYk!m$yTIQ%+=doY(`Q+RJH(YK$E|dZUVoE z(TMtsy{-AvfA;7&Beo8Wv5^s5b*!Se>8GsWAZ~IeUu|b^a8`$QDra*aOeS7)UO2sG zc0ehs_5M`wUB%HbZPKuw2kt?0L)kmbYk&Z3IJcjp#2;^3!R#21h8ypT{NVAI4?=Mv|Uoe>R+4Y{l}9Y=D`lxtg$f4%tO_)47BS9^S;WSlR)`j2aw zQ?V`I_|S2F@w>k5_-Q_XF2v{3-{U`A`={NuPu)Wk{FE`q{=^4AtS)%`Me}lv@>{g~ zdutshlz`7Y?L!UhLl`5B%)Ze3$J|Y)#|4Z3d(ErZjW@~>TL(6vZQYx@-M>tXAY_ke zyYyGvf1lEghdm;()nc3&@}I;{tZ!Pvb53TS(+tG7qYHZfk=((zO=BG~&E@CF`+*&9 zDyiU+dwGw1CZE-yn-DQ;IggSVGyB?a^8{zyYMk-MJf6>r(~r41#|vP5S@7*Y$XH|V zFZPUHqehqypRwOSN<<|)xjgBLI!?6bfILy1{1(XERtwshD&!9a|Zl)-zssNx89^Vh{vp-|FAcI z#s&dtGJBD%>eszZLH;PuN$M7bOX0E0`|uai7xyx6V0`!?+>hNv9OF^V@H_(`f7F9- zS?q1*h;^BO1In3*EtfNjW_={@Hsf03mml(L&!%>wLe_h^-{m~}vESSMb?QA$Eh^=} z#7uB^43^;Rm-WC>lyy%%;k$qQ5B3=U;EK1iAAb3_7yMAi1TaUx-}~Uno=(`a*b>Il z@K1}w{n5=>lqutDH7eM}Jj~jDe?O%bmQ6_;35~e=RCGvvIpR#^O$4wov&-Wr{1P=Dx~q9@MCAR=)Z-6%Ih2-RAp3?pwPH zdcNpm%zSCT%%R)--{P+7h?C_-3V@{xMI)BM0ppzbl@omGWPK zJz{-dG2gRK{5>cB05|J~6@s>&Elo{8bxI7&YIU7p7>4I^yke~77I19+~cnN3V% zl5)m2+BmjNqdf^?^8WZ>XZvHvgG2S-j(=snrwgi_YaV0I*(3X&IOO6l>-Q~hS(7RD z#(rMaPd5_V{P3swRpa0_!3*gBQ&FT4+k%5f_Lw}J;NVV~@6>ys%o|;C;ROFl<5SvZ zvu@A$puW}^dE2Q9e`<06{poJ;>H}1zxA}-x(ey+eAH9YbIlYMbXZo>HqB0M&of;Us z*DnIA&FDyzyz2A&F!zm{{3C9!g3A1EV+@Air#q)R0I%o1C1|w#&5#cuf2K<3r}Oh&aLWs3M9e96 zii%avA)`7RcI-B+M06mFL}DE(?_wZtx?*R@kl*42V4qnxmLD|hM$PHA4~OnBLtW_* zhgOIz@V8HcFX;Dd!P>}Q@HOp$-7 zLpt#OPl2YU6<+!p(xf>6Fu_f350GVHIptW(#Ml#BN_4hjcwq3GD zzjL@Hf3IMDwnN`BW4R9Xx;^K=OMml%v6L8q*tK8h)EQI$c_ta`<88Y*Vfz*vw%ZdE^%**P7he8O6Cl5H?e;OZ~{9^g|WDFh1=KU&)(FcOA6SoI5v zP7n13_mw;3SNXqZ4nPipB&G-Zv9GGMK?HgPvv`tR>MwhE#RQDUu#p-s^=JMXYJ7(| z2_x)IpehnC98>UB=94&f(!z(vk1jDTjfa&nK4Z>2%3S`!OJ%Ohu`V$Ld?nc*+Q+`w ze^D9#%RFj4@jSQRe#ZgK0l(`_9&;00HDU^TVP-`oEIxcU1xr0SkP5vwGsnm0+z!B63?sf} z+*<4jFs{ApD$);>qGnmRBw$V_v%=Ov>lM| zqKR&=ZS0!lx&b+|wrkWUu0216!Ok#`Z1F)rj&>jct@Z~v|H&N$=B^<7jX%(WFTy@L zdFS!b_DB0@#P(-yh?V}tFg0$LexK{)ZHeW6;otb*63#v=djYw29_+18|vhdnnHyb8fQe&H)_0KSWVgU{-(?P`}2 zvt`4sgM;+&_rOHSIl*7S&O8f8iO&=KD~QAUo}KMARt#U(G)5`xcV}aUe?~Sg8j(r6 zhQDDa5;s*BbvA;L(q=E}=8JY@?WiD?xD21o2#6~shatF^#zw#xW4p4Q+oLhnUz)e{ z2$`*WtKRA=3c%41P~*VS`L8?|OHNYh-={h^3?EJBu(bV`Jd8SC`4{>u(C<6@TwEvi zzdS#MQ*eomPl>sr<%$a?f8c=YX{@M*t`8^sBa~{mWBvWHBMNf1fT&ByI z$O!InG>N@$oUhsT>FZ7~7yM9AU3^s;r#tfT5&4F?oQI4P<64fBfB2Z%4zk8%t?BW( z_#fo$%Jc9WgR#^p{(+tT6?1~)6-1737LN0mGsma&Y0I3peCK6+W&JY7;`__>e9y&y zT%QKIy8xQ`remC6{Moy!?k%U3f_NUq;LrgJB=%c`IO#2K3-EU)Syk zfjGf&Rq)~(cGb6De_vS-c}o!+Fn29u^6cFrQdod<752!a9dBm0EOV{-P!8HC^d&1y ze3t=nE;fgDV~n+MisTHV$cm^Ldw^YGj+$x^8(p!x3Nn&DugOn`;FiKI>)&q-07_b< zeCk;38ygSVV`8uT%9h`s!uKhnf`+OC^vmnoj%>yM{?vF9f4oX0U7-W&b5L?PN9SnF(a`L;6sv|z04nY()aw8`vfe?7d$-X+`D6s{5f~!o|5mtCSCHD`rQ5I z<(y;fKYaQVoXo}CoD95UhrFXv6e7y|;yM$?C-1|R7>y3T+X%>|Me7%*?ZvJ3T1+z5K&+eJvJ83^0*u}*UvaLfm zZLNlD77v?bzx2pss<4Ia72cO^S(yzrO2+)Uue$1M-nK~1WxO9uhd8s&I*`~;`!D#7 zXo44XgNt&(NNftV&+{tRXdG0+Q=7Ac4t^tWz-!D1f1YATcER2an&jh&zlAGxTbKBo z)F8Y=uo=!YS7e3JG*|G@t)`?lnj{>1-e9{siE9Y|O8Bk*R-Y4^-wKX+J}1sOx~3N?Y?b!+*cX?v z_~u*v;OUiItVoHUDV)xsEG2j(`aOctVlP5#ek^JWu11JXhswdGmC7ikid8tcm}qv7aJ0+QnSBAvMN; zMTU$rR-P}Md&l~y^O29&axP?wD$uIS8QxMK=2dyfqf^g4uQ>}w zZ4a+=<_wfEIe8AA;R>VQ@WP_$q!WZ2Jk_DMf#l*oh%=>30RBR~6Xr_z3(j|}ey5*v z<%u1<)(Jj5qBA#g#tffYcPWD)#4-#LRvneii@ZyljnkQ2PphG^pd0huQ#N!+s>@yAUUSM?`a#Pb50HG`K~a4 z%@X>E$B8{0s$pJft35Lc@dLp+FN|M33q=#nUs$s~noj~JcEs;0O25m9uFrqvZCrR* z`tLtEo|k95t_|CoG#7EaqoELme}P~h%zZ!umz1vJ_@&2u8)U)ORZ=d)7+1JM3Kd<#OUUxYIFFq47Tz28&f9bzna#pup zYxz7J`EBPuLmH|f@%H3j&2J7(-8Ljg_upUliDv_Bb=UeVD)C}q93MvNE#PTm0X{R; zehrJdIU@I*?dfRky!dGx`c&i(;rgP1I5J*{#f87~#rj`Xab-EB;gPZaQAtTUn!h91 z2W$2+2EVvU#3Vn~JCT?8e-P)`y|e=*m*wqavCRE)ey8DL4({IFm`jXeW5Bu4Io;CI zkt$O1h|QDz+BU$iqioVzKi$NqI^_fjnu-T+fTj-}icy<^!@c&Yppv=(Y6?yR-wpRO z6!)nQunoGn=r}j%H}2GnGQR>;Z8&!aSwqfU_%v_;OB{gw_tp2kfACgM5S^pa*R*fh!I zwG5Oc9=CJhbe;2Df8wh)$8>$?9B2ZZ(?v@Xs5v}n8)?;jA6$3^M+D4C`$eW2-yS-5 zjXSR>%ntPgex~s^RUtlVD#A_+fKTwZIC=7jmo;v1-CwK|8MBLTdd}xlCRF%UNBC5YKDqtwoBa9hLaG}XrzjHZGAM2`j=^LH|-#Ka}Jx9A{6Oyf0t;Sfq%c&d4AzjUiZiuKVhxFbN$zLiQEe7?1KMZ_T1U#{_R};&9SF0 zD|QFTJ9`ukrPpd5i!`jJY?8DKR?HajAAS9TIPckM7cyq%w?b@8y8ond#<0R9CMW9{ z2jzmhH|?zJw1T453hT5nDhy;FlUCSR8B5OoY&XLoe;KSv@;HCkD7Zxpd=k^#+s-hL zYlJO^fWvDu01r5voJFRD%%igW&&-9t3Ij z=$v@37b@C!f#nS5A=Tf1%^i zsC(P6!Ciz0X~H*=J*W=B#*6Y}sCEKV{$*_1NZI2rrF8<~m^z5kU!uf&OAR79HJkHY z@)_Usoc*q4fRBeG?1Tc$xRI&gHs~vw$HdOa=|e?IY#*LIDeXU4?xUf7a=F`|ABT+KP|pIhdJ=i|VO zkfmOAKRseqUI!g+Tjay(pD62ae`E{*ytM6VIZk})9fJw@J7pw$gr$?YT2pi~@c16J zV!8f2Ylfz=8=9@xRurURbJzfjk+nt6J?)0nXJrU9xCZ=3V+}7U$gvS0e?#+-%n*8B zcMY%Cjdthv1T#P1ubiVDwTAVl7Wh~*+kYAm>3sQ)B;w%SMLhJTdGws8B4P2J=j*K@ z^F{sg|H^z_>L$MJ!1sA_=602Hcjg6O@=W?Hx!^kP`VdpW#d^)`9CA`i^Yp10rhpx46*+?-7292Gcf=l94c-o$s zQAnybU&CTGL+}*CXfOPuO)@=hqjvl340dz?EaOEMkFnA9@Rxjo#;aS8$6>d(LeMnz zQCs&YkH2Ch?~Dr=i7|>HjZ9yaF*;PwF(Rs05;v}lZmjFLe`buUw>{%X(6GRf2Jth` z^XED^mUiyUTQsrYR)&cR>VAxAtX?*{&Qpw>NzJKzUB^7Ky~gc+#= zT)JtMoPN=DvS31u-d1IEHVjmtR)XJHP`!KKKA(I2f7WKlnZ6$y$NZsV&hPlbHBJe2 z7YVWCe!D^R7?lhV$rr(i=7JO9UsCsqUn6xc=elK`|6R>Li8Qz8=lHJu{*I%+pY2ln z`JIFK9V5Qx)7~IicEaka8KfifL2rIm|Ck-q=73yUJFE`ji+FC3OsM|^S8g1GL-n?~ ze(paXf0GVaAh94_PeXgu&V-XC^$vaqO#B`FP{T5JATf=1YEfoRfNzl6pJuR=n5bo; ztjd0Q9}adje+W2hcCR{rzO39U+-kD*tbh1)W&XUJ?}0qqw~f$z`|^DMj>`@|_}iLu zAi0NwW* zHW(cQYFBA9bdAs2j-9^ZD;^?MKcWP~nXw>ViH$GLS>t{Je9So;RO8;a4HcYu3NN$S zOmlM>+Rx^}Pg|pcSQwwa@NV0oI-j-Rf8gvppS9O|sW;4OaG=)7a8r?FKHHP5V41K~ z#hw^CUv#I>(Tjg4e3n17C@y~Bm0MQYosv&{jp42u7w0*ceTL@5^x!uz&)^|*w`-0s@V}W|=Wh@TWBC_I*y_!s0!VS7S|AFUM2FMf7ANunAn{PjY zXDjQ&&vjgXuBm_c%HY(x-stc4_xrt?uZnHB;#uEgJ4XZZ$@Yw01ipygiZA?Bc=KR7 zL2_pf9G-Q!@eljY76f(=49=Gre>4qThx`reupGC8_dZAqzkdwIR+e|{YW1xteDrl} zYX4a_dm0|Y!!BsjAJ3xWB%NBi)GYQqPAey$FWSA}7M{w?wd`dt*hTFfwo8gk(mW2& z{+zW6$9CZ5_y$O_HdZ}z(q>)_cuDPQ0pX8xU_Qzr2bFb2f>RAelBVQif7wo%hyKPw zevhZv^dDn)vFqYXU;LkvtMy~={~l}od1o%yWe(4^i(sx7-&**MOJDRJpqc=r$lkQw z;r!iuQ7dz`I}8p*3mcy_(ZdEPr2s3*@NcS&0ezS}Vu|bfdafI)_@un{=_7%+@_}Ex z1i?W|0M5~h`j3<3_?>;rf68BpiKN~Izv2+$BcJ##IC{Gmu!saw9b=5hdD?h08ekDc zgM?`Dd3g`Ey7PxrIBtn;6I z;WCCO{pcUQ=QV!3tWB+%boMo+#_{r+u2uZDt>3odnzO)0=(SCAfA$f6kB`m=cB0Df8lt^czTk@$nts)E z2#^qtgF@?vXL1kwff>SQ$)rFKk;0{4l5#F2B30!@k-?7)fu9fxDpS7}v$+>C! z4VB~uLP=$fe@2$wwvYdv8ra~|93-EZPFpSSByaLFX8NveMX_0)d!;(4tHr9R^~`qo z*1pD7Iax1Q&IleaabKLt8c?hci3b@I@q@Q@q&ea&IuN5p0lVh=b`^(zYoN*Inyzqdkt)gi`QS(A)9&bXgSB)q3LqgUTu6O9+n^oUg zh^YbDe@?g{RLojd1dJG@Ja0UFM$4Z(K$(2J`CMa}mi(hkkRhulsr zuI-EVIHTo<0_)sE>LNCHRBb!(rS7+lt_jn-cVC5{Cvgo`%kW9)3W`37==q#^hj6FM zeSfXxy87tqTF-Hw_rJuqWnHG`?07ZKm-CsZf3c*qjS<`H1yKeL7jjmWHS!Zraw6ni zmgH){!Wgn0rgS0rmgH&rHa!~XleeK3*=--vpmW$Y*p3@Yi>A;|F6NaVWh$Bpx3@?~39q2g93FIKL?+M|{V(`Tc&~cBQMxa6 ze-C#Hx)xNa&!p}FzdxL zuWgJ^67okqMVTXOUTwCl-%vC<{}zdPDppDAM?z&b6tO{(0L9Z4Yck>U{n^g#{3s`r#e4n%bx1F!ei4qji=rA zoS!;t9h7ILw%MhY*!bI@{^k!}_->`W0W-rVL5J%5EG~5%b>7cMQ_;#>Z3-7wWL9u7 zo;PnzchjlnpPi68f>72Z9E%~YNwK~u`VSScl`5(&TaUIib!GjbxwylYf1-(pcHRKV z8n|hDr$av`YV;@z7LlU3xu9)6vEsg6^(9y7TEZTXao{{A=FP(F)-a}T8*?Rg$alIIM-K8Yam5$0)a?%q9?j?b#(Mv#nZMu(!r8}1Is18+ znCFT)RMVvE9Gt1ky?LvrfAMsvGF)Ea`${ax9M3DwtlR5mX&dZo-5Zb(PID&Pex^GY zdFrxOP52%y6F-3Zc_-eXHJ*T3!PAEuWp91+3%w%8-O=Bn+6~rSP(@-CaUn%_Pip$9 zzjtoyr*<$6R-yw34-jk@sP0^oUnHDuQnxnkZE#aMheiTa3U0>!f2MX0+>g2z{02jE z>Zpvc?#5Eb%V4ADb~_(^khQhNZzy9AvAf^88gkyTUj>gX>p}i~eKY(RWBv^8c#Waa zNFk>9UHAJJoA3(*E^Ff8ZLO(8C60spbMe*jDQ5?C*m^^koaJiK`w(8X)EI>ML$BoV zi(MFY?;6=WqzCdR( z*O-TFsA70ZAp}%VcJ{maW)$t1pkb^v(cQDf7D%L zC-Ow_I`aAX7JD*nu8MUp5WkYwq^{*}wqpV~bnt7P6+Taaf41u~u$Jcjfw&!+5x)X( zx5AqOpCc5a?TxMz5Wl9zIVL)VT9fF4QC{;`x;8>^_&@QsZ%+AT4^j7dsX_cvNB6Di zB=J9auQD$EsmJz%!(Q5d@(J2kxZqj61Noq|Vf9Xh(+=i-E1JWM+1+byvee&7FKi-P zH_&^`p~ejTe~=FcAvZ6eM zE4;P&8g$h>SyS^f7kt60v`fQL%rS#+mP! z3yq(j@vk4;#vi;O@iR-j?C0EEv?womb-mvh|L>f!-fus-=VyOS`~K&AD7=GR#`WY) z+!b`PV_y5(nd7*MUh*NGhZY%tQ&UMX;#-H|RA_{TLt$*U%KK9<;dTy(j^wx0kkVzoD1HW*;w`=0F1+5m% zB1*orPLO!4HrB?=HbdC{lT4G zQRhK(<3;4Z<)dLR3)2qR0dl}42Lv6;I&nEk&SVZpV}NsLnAYwMU6cxW4!^r?Ey-2T zwq`!GGG{uzMQ<=D+NP6@C}H-7@b;Pq1f>T4&^w@gD;Uht8AWL$j?k7Np6Id$r}P5s ze|4b>a(_m8NIsjEUk?r+18{G9Qx$%tubO@8pI@r*D`8oZ1R>i_{qy~(zqTj5Uh+H6 zV{y*!eAgY7Igb*H#qO7SkD@gwc|*;+7rfx23C{WVvp!yrH5m9=7wv*QrssUCuAe)f zDbQEqVZ(uV*0#di2{~i#4;+Q|sdj*le@IGDQzYg~5l^N$dHHqx!ai-Ahj`rtCZ&7Y zgpy~{1m0bAET}1}_KBaUi`xkq3%?AGTe@FU7lbyh)Ym2s{&uL|Yfea9Hn515&EjdE zpi!4KJa>RpCbfI`oaENLMxVY;s_^@sOnnupy%{SDQ@e`9Z@ zp3!CP8AdX+50IefA0tw@XB`r!a;%zDN-m%q3of%a1(!MV2`{#`m!9>>O5aIzwk~V) zY`06E!j;dY{q6a;ruF$X>?}E@*tYK$Qd^Es9JixYV<|C&VJzU9vbW%?n#1^s`*7TY z0`sKi5Y0LA3mk{l*U2W|Vq-1le}Ma^kDl7!jLE;*Hu<{@)q1t7zECHJ9dPW{Xhaub z4^lYKJ`WvhGeNhp7oQ6FgS_eB2@gTFDkYDfZi`3ptKkRUbg?2oc>Maj`oo{O+ihn$-C%Y|H{3bYql>Odipe7v1xQc+{{#U z+Q>YRU$zT!mT+!&*3bpJ)%oUwzt|P}}^0&wNf|$v~SvQ;i~G(bD+H$@Eg$leJdKz_^slT8m=)9 zHU%3GPB3xc(6HRFS!`eUj*Yv{;mj6oaGa&CJ37{f z83Y-%df*aZ`C!SMr5cX{BRS`dJ3%Ya{2a|A@E78jUiaxafB$>YM){q;yo~KX@=EBM zU-dO4AN+lW=ooq0bI{GF)`0ns6Ks=c0+t*K^1}f+j<(3&BM8m<8hOWfkibvu4MhCI z9yiAdafidjRjU=XP58QZq=VOJbA6xTOj$ zta|eroub^2Rty!VMTbHs47Z=BdnVX3d#m}ecT9Hue*j(Vm&copxw~r~YRgfGZyYS( zC)=te`5N3u(l;;S--1{F;LZG)+kf&Rf5-eh^KQ`f?3Ir$Nt)2%cL`pLf1lru&H4sp+-HeH zNPmq#fB1vnxj63eqE-8Yi(a0mXAY3mLulRVZ?5qrFZDZ*ec2189_fqLA}J;-)-WJ< z;=x59d8*zs4|dau4i=pL7kuF_oM6GG7S3wtqY*6ea8orB%`HQKg!o7JpTwA<9SYAM z?YBk9dL;fRd4#jccz8glCQP~n#ex;Xk0PIsfB)4q@}+NJHx0k%1$m4ng+X)WE*)l< zCN%*iA3JT)Z+=32SYWelux!z+x78H(UYT!xtyFw-t@*&Z?inu9xzEaZJ*!eP`CM}Z z4x>3m=tz9)qT?&0vvSt<(7fO`n}CjWNKB2t3}=D8c#2P!K}sl0Ywo(c=?Iq;AI?E_ ze~5dchKk0KN@^P2Il(r$J=TxTP%>1pySBXc9-R<*fSDl&-+V}=d`tp)P9&FWa z3x`N_B#5&EMgRG#`9_((LQ+n2A+#TMjLZCq?8)Eu=@-A?N3L96yXZ@o>*L?>XVkP> zf&9F|`J?{{8r{^yWZ?UHeW33TzUy{Oe|RfQiwfso<(krEmL-li>&&lR*Y~e?2d-g-xtGc0%!4QmIG}q8_rWYz6lE_|2)=bq$2 zYF*`1xg>d*QmcV`F(T2rZxm6Okt+=+ocHO*G4HkiEOp9Klh2vOnXP>asUv19P=sjO zWBAu-zu2A|1x@#qJh#Go`WX<cGDU}5Y@$+KX_ z;TpBcedC8<2fPPSy z_M8!D@=$wyD78t%@6>grf3zi(7W(XG*0KZ{ACWiaocdi?%K8Lz|6ZeItZ>=Om%Mk0 zXMWwU7fwb6LOzf>`lOk+oFtri;|#sb9UAP^iyAw)*!u~(PTpv<_!z>lM5innhINR~ z@bY$(Xg;9aBvTW{a@=3x-IeubW4Yg5>Nd{yhb?#3rMcc~nP-vpf1>O1N}a@?d=t6P zzSi*j%~8GLwcrb%WW~*s>)Sp*LDu{-69jzU5QR#fg8f5=@y$CIcDA=FC&PUkP zrn%`3)Z*=<3!js^ZNw6o+kxF78CmqBq~5pDp&@s|4JPeP`pTL5j4eUiV?N2M5FI#6 z_$*yBZUpzl5PYwye@?+Yq^?og!@fl*R5j69khswMqL0r8x^RK+Zdu>DM}ytabz_5z z9{p8slDVSpLB{6H#T9JA(EXdv{ExHV+GT!$jqE`L*?ianV`&9F@dH^$qDKKxGj48e zdcWE4t?=Nq$*rg(T%^@~oB9ZHgl+;((jMB2!72(iF6XJ*e@y21h}hngNUpWB4!dRV z0AMl!=Su6-fNmGzO6$>PmHWAIl(?U|Oy-+fJw4wSpFzham$>(e!(KTpKQ*?lw&W6n zTudX%7XzvnV`KQKdBYXDes4VyO=V%i8^%8U$Y*`)mL6gq$Nb=em-$ir2l6LZ?h!E1 z!)K86h!HzY$5~_B$W11%`u9TFSVU=0u!{bef04gMxxFM20vF6f`M&*v2C_Y@?icQ+ZGlU3mO5^f+EWkC6uQlM zYqkGu9_x=j92}mLm`eCJ;4JVN1P`Fje*_*mF-(aEoa^p?==fdq|Iu{(M~#6cI!EWR z{njZh@#k-wEob#_e&1P}9L>GnjcB*id(B(o`yNp5q$_M{d+a_E|(bc@@0P7wk;*WmowqE(? z|A@u4_V|?xd9F#+Jdw#=Sp2P~f32}{3h+JZbY0Qcn&3+YX&)Yv^#b>;9WA=7R_d(L zYsJ37??J^SnL22{M^?jw!zdgCaQyj@{3rFU(4<$Rh8eDr5;f*koDUVt}XlemmXz_?_K!L-h$K?E&OV-r8Z#>*I9%D@t^#Nv+EWo(Ix)5w}p=y+5J9scimO-&RGXX49-FJ z_|VMJrbz2|t&kpS1#M!MWKHmlBr2WE2oj(4gaTr=)BkN2|ywI~o9L#w9l z5hi-;z$tB7%+F!Jyw>f%!s7^xt!hf${;)PZ0<~QIuWOxtc}|N?epGVBsIa|gB>c!( z{?Ford@eA?)wW)9M;f!e=C>W;XdZvZRiy^6J8Nc(aa_RR(X}DFe{-KG()`4~g2Up^ z1hmX<`|-y__N1&u1t;XJX(T$K!acVfGfvxU2Y!n;c6A=Vk%9ZoS`vo?=d@1=$+s;* zm)&yk;91A5$lP@pZn&L3Tz&hqC%i9>p$cDDbQ^`UC3>#3`dOtbaO{J5NV$)2nMiyT zRzF+hxBpq&toq|^e?TrVpmw2w>^6x4hi5E;v3ERp&7-~SZX3&vghy%o)}-f;G01br zk`{Nq@+;Vv_gHio?v$4y2j=YchB_Q{?ZFO$*Qs&54X1M8@;VypuNnBl#5(0dF9xa=T%*zN9X<}m zabFCHMH73{H=Syin%)>JrK}g&yiTn8yydETpN=PWv=OTIirk*MySZCFaE1ox8$bX# zA`gYtptEJAzCper{LS1(0_h%=hpOWL(Q$!#{gy{GsPq=5EBs zH>mml-Vax<<%)hz*Pc0gTx;HSkNm)huN?R;e8G84?rqVG(P|0^-vh@5%bs>3L~T?& z>_m;h)yJ%{uSYw8(uDDA-t5idql2o0ljT-riCzr7e;V3!uV|T1oSS~f32lgWoR@L6 zO+s|+)Lg>}b~Sf^+eIE`bckIyT6D&@)U(n5PI|p@eQz#3pHtR+G&yxGbSGBv2YQC+ z__d$|6+O>{VLVJu^w_h&4lv1{iYnx*%Q%oIMRdM~&Hf6Fgi`0|d+(RIzdvg3$?=E| zUm3^$f8tMkYh(S$p?&8Hf7F44HQck1RAXz|;kJpU3P+L0_m(fx>DeT_2Do(u(FnZg zvkIvsZ8rjXx&}RO{rWMBj-S|=&M?kyhqd@J8jVJ=SC ze|;3J?S^o($T^}xE*Mq~u~CsHoYPA{Y&Fyxs0*S`F+2P1Jl(aPo6q_i{`yC*?nhqb z&+|xLd*A;*)rv6=ID7QvF)Jjt@k5DqsP>SWA_Jy&b-rm*qp4pfpJ|~n`p4_o?Pldk z^svC-S^v6cox~4k+womn^j#xxIXnMZf1_3QKrj8n)P6-<~W&7)*_L@4V9n)!5gYak0EH?ZVp?n^$ulw-aOfzH=2G} z-FkNx+-bY}azP`Sgj$aVzzCe|k7toN5b~t=UZD=dZ()*?cI{&e7U>NwIQ|ZO=Ot z>*cEd+H&go-Eru(rfPF{oIJPQ8ZXXq0e7If8z0xrc9_4;qwPa`*?5~B-#6`B`~5LG zj5|U6u`=uJ&P(I7W;WhBcQSbRv1NWnnWIJ68aDdUYAjkTzN_Po-`fZJE?Vdx*VR+P?@jt|)J=YWQ;j{pgI1w41@G})J<97{ z^vhziDs>CDF}$wsj@Qi(fAqZJF`UwgE0U$`B0OX{B09FHlQ|}-*J+_n<3|%W!36l4efAafY^4s`TG9F5uRHIS8-`-Ol&HN;^ZdC3a%E(@y3PKW7Bzf$EPX#q3x6bb>AVlbF3C+u zeG&iQa&VaH>)zxF{z!Ea?Ee6FiGF@N)r;{DtZmVIRu3S$MQ^Jfa*^2ynNN$3*0nmQ zR9DpJHL}eolfFto2h_c9ZB=ik?&~1Iy|sFXtvA&>s3+U11+bkf-~uN>|x%>?YZuJiT(YyZGnK85ROZklb5u=5bOhXUntt z>kt|VQdd@Ddml7i+0zj;ovd!z5yjez{H!xmR>w4`qV&o61Al2fQb}=?j!4*wEVP1m zd!wqoLgigVOV})?{@F@K5d~#NeJ}A;-bJNUWs|VT_NrwCj~<%vj@jgTmBY$@XP<_= z$M)7dS1@vilYNFyA@Ac}Xu43S<7eT--NZZEYj37TLDj!r>mH38SP{?dv&mqNA|;J#XNlK_wu{-UYI!zdYyzj7u)f~iBUanVuXD%xYIZuR6&yb?TR=v)hRNSN z`5PVI)_jMe@3gF&)dcMX#=<~Jwb^D}m=O-Fe$IQwk^{u0vO_+X>ZQclY$>K&lMOpR z=Wf4Ga~z1S6RdXG9&QXj>-A+GSzrIy3lu}q*?*t!H`+(W+2)zeA>TJ@d$)EvVGV)O z$ec;%Jl4|+kMxsgV=iP~ooq8=Oj}5zhu3?k{`=4Ds9Zh=H|*CK$@=wf9XsTGiFsRC zE7||JW$%6$pUet}+()>lTVej@MZA7I9z^PU?}k~(^)&sF?@#&Oeo$qu&LM7ydEUmDev=B3LI5Z->s7wR7bk3tcYN_hK`9o*LwS?7AE^3 z%tI&7_o}w>?o$5l^LHnSYRr52+Kl_7I=_2d1a|3a)umEh@%)VI$o~F4l=aJ;iXD{C zSPjd+v;M7Ww<*sb@;T&@{;_uWE|ekIcM66c>rm!~jO|3P!&^DFK_sipZOnIC4@c4F zJg?s&U&re$XUKc(6?%d8h)dRznMmCr|C)PFQT zi`8*I^Yok~GG4>>@wRYee#7{k??I4oZ<=%GW6xBleQl zn-Q-c^y|;O?v<}Mcs-@7uw?=2^q-)Q7z>|W<7CEwS_Z^dr5eVsdUyc&{ zBOdWL(KwlhVq>^(2yot!?2p^hV}H6gC#kF#JRv+7=ja}7?87bONO4jc?>wU!4p~oz z$NDmyy{gN2blFoT`!Z!78Jn0`oXo78N4r;)zTomdn&_JCSNgMfKKuZAz7@}BcqY$h z`aGYP%Ja2MUO(#BH@r^bKwg*e1yhvQ8DEfod40$0&GK~_UzBHMd>CId(todiQgnxB z?4Qt^53j8wN3_%y`1Hy z!M$+C=H0d4lh|q5%XIutMU8vaXzWgeKn6cn^op$TB!P zTXOA*Yq1;bmH)Zxt2}q@{D0gv`JT)5oDF|}4)!8q!?uXw)NS}`}uCVr%rh0iJaemTVuRtzhC?qgL83$A4l7`3^G0Ec4#Fb z{$<=v^_ezfFaC4vGk^B9_#9ilE9)71_Q0zZzOx5r+{^hsi1w(y!e0Kx-X>yC-1j5?-jbw3Wa2;l~#jGLDrO!Z!8K{vR{qx$|{Of&wtYeQ=W3|RMh@G{t z-#_m=#}4P`xfiu8*Ij$r?5klh&JS(w@%OcA zrhUub*VOIs3*5ZXNAIvWGML zQhqnI|M>TPkG*3|_4&ejWzVAJV7Oitdl&8H+{s3>>+|8K+lm_8&po|A96esC_#}^Q zi68Vin21kUFJEWx=Kd#)5_xY+@0ZA64~gF@ezr}1L|#wcc)k4}Uhl|va~}ijy8P~Q z{k_=Q0e?kgbRu!YpYZId_|fO@m;0+TpF2L!=*oH=9FN2phAThEIQ@KM`E$h2!TIki z`T6|!^7^LyJS?v-e=e^te=e^te?A`3FXq;dd^cpQz2FqKFV8>ebjagyHpz9P71xRX zs;}G2bxe%DZq9XlfTOP)M{*q#udm}-B)?u>w}0$NtYV7~XQcY)SpS^yvuGg5&-gw< z?bJW7%j;A5IWMo*KjZ(%_4?;+dAerbM*H|}H|FA>S%AZ~UONYw`#$R_5I?o) zm4ACw-t%af`g-w)2iNPX;`4o9f8Nu9eZ78x|3+Vv_aC<7PJajIO(6FKp@h$W;(Fq= z+u~V#0j}TMo?QE&KmU3y^B36HYnO8GgX^_V`g4EonI`iHp2+n|UoZD=)w$m0dQ+9x z^Vut|^}u5S`+99E_dU2?i*4jSuGi{#s(28<+3H1QFkZ!Aw3P=`#aieFZSPydh4By2KYM?D*iDw7<9i2B!Yy zANtgxQdw2>R{N6r9prbv;CD!W5PwR5l_&{57VzWegUU@G-fbo|08o{9AB~2p>g`c@ zGPmc~rG`py>|5yv`$64ArLvZKJDpD`|H;)HRGtltK95|gaofx9xaNpaf4|PuBnn=_ z<#*`RJHX}Z?+p6Rp#SORcPN-ph`(QV|9HKtwom}SfA8_!*1gEcL$RJK<9}W?JxQs( zC`82j7n9Dyt60y0*lHx!JYY?Bmh2rtf-E8RqkBPQ1?-e&->)}nBR@H z{GTHxl|C2xci8km@&AqQwPn12d@rJ9%IL`TsQ-S-bx@7@9$SHLBZpH}H0tGbo8$w( zGYI3t=*sU4WuWTsK}j(7cz+-AIW3Y2@;UsT%Y6gYH{?E%i%R6WqdXUR8%B=)T5>;v zbIWtu?7#prJz)K%kM*<3aJb__WJVs zm*fXnLB!j2KuUN3+!xpLIf;B;O`ePUC;6bqo&1r2^oH(Rz;iGOynj!|nG#>FD}>K9 zPs@3n%IB2VGhexU&r#-^@h5MD!r6IV_3M_XM>1zl-%nDQ+|P_`uiQ7^|0eVGthi3riKK3XSe|jFtFEMVSS0nW|`o0*K!1$2QE!X$ISigGzaEllzp5s+Mm+QaRt$bf0pPR__ z+y|8*^8Ngs`5+IdWPLGi`hGso_oeS=A-}UP^nI4=iRae$Gk=iv2oIS1)caP?>!07t zQTxZ|mg{4vpC1VdZ5QNv*-v^M>h;Tbu^-ZZbAMbf&#%Xy^{nTQeJuAO_bH#N*G+ky zzAy4g@#TGrEEHKOy-v6e7Quru8>&o`8&wt_i_1TWGF8O>}-{pSb`M=i(-`5wr!Z=>@%N1yG=lt^D|B*bPPO<4M z8AGi57D!#8)YW40n4*G5!l={#n!M>;)-UT&^2##)RT<~6^L}7o=<};4^|-JF>f~wV zIUe!*0I9L(86Erax2@iprnJ8zch_L6% zflDa7Lah-)EfW3J(FH|Eh#ubbNE7`|v^M)s%S1Z_o|{D{U%Aa-VHye1DJo@S$OMw%tm%|F}i>C zE&sl&hJW$Y9afWI;wPipil*Y`$j!~z&7Jsm^CH&{$Hr+oI(*I2ZZLIoD{gjQ&!2e; z2Dy`r4#m{9?PM#TGkoE5KKOq=p}{~BEH+g6z0TaHPow#|S-A8I9EWs14|n?i?&gcX zIn!GQ=8)~qT<0U%^7qTZ_aLOuYV{z$bB5AZo{3I_jfSH-Pg%&EmL=4 zwg?ur%lD}}V>b=DYq@9nn=yJKLAA(?m-4&)AmqODgRw3D?(^Q+HZS1T9xmS>uF*GR ztY}7iDqr6?^uD+|1A+}EifBawnzvb@b z}gAH z$>STEhVg6MYBbv1f!ai z_c?BVyxl%j9`1s5W!>}Z)+TwIM7M|cTBZNyE^i+w4Y)tKV|&zjc$!q!%bSLm8Gk8? zQP~|-{hMQW`~3RV|GFDiKb_M}_rV^`y4&jXEq<}Lo3|>Y54-&kKe(MIZ-Oezpt`R#B|cW)tw9m*NBiDB znJ2f+=k&RvX7l%+Po=h7zyI1btCd}4XIL*Ur_l%F^Y*wn=9|y%?QLt2sDE)gah}Gb z$L8vN|9bp5e%XW8aQV`Fy8EaHlqNlf_U*_8{mYmA` Fy1nV&e?@Pr@w(D#&06`x zt;|G2z8^gPtjx0{>ZM`dNrSJ7J4{jb}%*V*T7RCLo~lD3}L zp*wlm+&xTB$P2}da8-;e_kVrJFt<;Q$9D6OJ`B{;{Pk@;==S`_>SlNHZf;MlUgPHZ zCTM=VoSsLcue?`z3=SW97(vGc&(27~wjL}I=7n zk7-raUy?m+d)i6i4Q}HBr@z+m^B+mBrWP z?M+j^S+Nd2EKHjD0nVWs#ZU^tXCeP02eht(!1V`iIuyk@SISCC3=g}wV>q&}1B30j zKv~iOHRI1)`sGiPrhgH$Yy`<}9PM5uWMY0ySQI^L^ZOIM$ddZ}Xkwy;;a_WY&H=jU zYa`t2XP;L0Ge3_CUfF+?bof8*gGgRq7t0{$8vHgH1>rN!&rGhg+|XJG{(aoK5#Ky| z(>O{gXExK78S#RxK-lj#BfGw|2PHanS0|cetthouZLtW}jY6a(3MxGbV6%_|U?|-?13rIjJwZi^zu62&?r?bYI0u}MM!x4nH5S<$YN|YUAA0dRyP^p1A(cC=Q+k019p771kyeRSV78wav(A<#EM}7q@3N zJem?j#}$6~s~rozj3$o&ut;`xbr7l|kFOp$##Y0H^?$;i9@Ld83vAOnD&Zy99$p_; z8Y)38Jn2y;$-b}NRbmt>$PcXt-HjsK{zxjsm(>IA!;ec!+j!X}Jhn zAY%I7TyYXKv(03+h%G`JKb^oR-;pM$?FT4k$Vo2@DGzTH-a=R2Vxs4|-IsIUcsbh% z{yLh~x_`!|X~cS~hx7B3)IU=E@yBfa4H5szW?wF}&lgc?$0AShPvA|bY0=Ld5dVJ6 zAqK9BgyyUoqPnsCwh_7*T=FYUr#V#h@?6v(;eCKK;ObhM)s!Q)SGM8VaXh93Qx(xJ zD5T2)%HlkD74V_o*m?E4$&gE6zgky&V9^m2i+@w`6|naZHh`$OUGwuxXUJ>{=XCF& zcG#oFQJntlI(M_TOnqqY=sOCM=SYL$L;$$#jml{*6+h9&L^y5oA)@#4*PKVDIk7C{Sy6U8u%Dihdywc zmw#gHq_?xby6Z#8xLe1E0gXLL(L&8y)&HqWI*}(J@uaT5$`1DKDgg~wmAdh!;O_sd zNW+WHvKwXr$QA^B-0MpbB3yxu)h~|LU9Cr0ck%K+!!Q&z8Bh2kzOmhm;u8f<5^%Qg z6Y7n|U+Xs-wpXxqj#o{7mfkIvAV*x)E}R} zAK<%p#*C2?AN|w(`E;s3C7tLH6wG9cVt&4K^;1w$i?cK3|BR1rw--w+K7ZSg;)q2a zHZCnB!2pL5Y-!GLKTEv#Q0|9Zorb%8v9jG~m z<2;F}eD4$`UU6V2`3> z*2<;5yJyZg{Fx)iaZ$!|d`!5EDRrN-9LlP;Tp8jNf;K{cwd=S)lJKAGB!15ZJ~$3E z3_8YsdiXrYkj56YkzKrkG>LHh!S{~X zkMUhUlebUgolBG_#u9o7wokQxwc1x4IoVBvZmS}?OO!t% zHUG*HoXOQ+v+WfbIDf)=db_`T>DLm$zvup>V}XWaSG>;YNQhtd4A>|8TE7VRr%H|~ za&VL&Kbq%We2ze1Ze;(C2n!z5cNIQ)~p|r;BAqDNa z4rx8U7*HC$t4w>Iw z=bDa)Qw!9IPVZ86zgLysb&**1{7kB73;xuG1aF@r5}1Ccg99%rM;{*T6T9Hgu6s#o z!7wiflz#)xUFj|C=x6hc+X#TD-g|4c>hdAt1YRmO@ke?O=q}~5mFgZ6cwU61g$sR| zmlQyQY{BBnNHn%t0 zIL#bnA31&>i77UzRA%VK8BjF@iFUQpAsHdk^?wv}P`aT4w_GeHgYP49pb4MPa&XWf zQ+Nd*bdcfjB9=Ch6HMZV0Cm=nBL(}x7W$*+9Cl87oIe7jd5S%<@iB!-?}Q@{6|@?ttl6^c zOcnBJoQqr})o-8Xcz!*xU?HNxY_p$&B3O6qa2wW5*f*$DevPkqVDn@rf!H%ed(>=9)Y$o}3dtRyenTbh2xU zdcp4|iTT%V%(!61dzc&QyPOXPn)_jpS+mTmO*i#@&orY0SZ&K6X`^&!MK^?bWpT(p zHPk<(`}TG4^qo<~@QDYV`03GyE`L*w?KP)&@b~uFcH1h56B)g6TaWI~zpCF5?6h1X zcZV}?NOx0^1+x;(ra2AwMl|g(TH2$~kr6~xaN_0l*Ig&{$1fP$$JO!&50Rb4%kH9x zzXek8R`ru;njFeC1R}hb%+^!(=cLFoZI9h;H-9DT#byRS z`If-HaI9VTKb=Do)TCCT6CXtGDYN7+&zZK?aLB9Rp#9`W);xz+7P1x0&kTwldKhUH z$hcM{P_zqo7B?^xN8B;m@U&XzF!i>rJ1oU|9>BIa56 z8<8zC>JFk?SOUtWb)%kig|j!YFP_)ox+l$sDqk;UE{yP$toHX%UxL2Qp@X4kt>`7j z2@=J-B9vGd=|5$`dN;opqxDEW5mf&mH>Ua;$=QAgp7~3s40wjI^ndt-4}KRB1*Imy zg_$$~k50uLtnpP{Z<-Yw1QN_nJ5gYtef`nZo{V@8sMw8|fwyGoJ;4|vsc!*kKRa;fpO2f~s>90gLhIhA4Qg>Iy^b8S)%d;8 z7>1TSb8IMsg<9-$8sV9f_R}wuuFQqC z5DL**focwCAa!zA94^u+IJG*4V7&J)v0~o$`_t*FLWcHa0DmfdeH!vFE9K&5_Pl4M z_@+xggvS7vWD!t8$3ZXp)*B3B2pY0MeZyhm5epGT#=_h~(qi$owt9W9#w3a8AKVNY zpGJ$M;VH4<>gA|c$go2l&*#tLW>zK%?1rhO5NDLh$#Wt!GkoCs8?Il;AJjxXt^T1r zI4~vb!ictS7=IhxFi1unBK3;g(WN z987;0=vv(CAW;uj6DIH3SmO1Ngdb>!l1-FB{jOzZI)5H42h&Mtu?dXZM`z^X_C=l} z{jDZAgYJ!R?c>!Xl&hkA6%n7)&SgHf53qE--Qx2|_m+jwJrnT8cRegKp^jBDHDA|0*n&UH^O_37zCTQ z*2=0ng!we(Z!v)%N5ib3tMqS*j{e&+W^tcLOU(29Njg*QTt;C+t?e&v8M&?8=fSCc z2_^lCb|sSvddue_opLN!K7ZPpe~q55RmL+y;pv6-`hf|^V|8)wdwZca2DIV$&o>nR zjejBB;ipX12=d_8-beUS9Ww_PZRMMA=Pl-FzXE8@4E`VjJHrV?l+cVA2XArRHZQ-F z97>X2FpgXSqaFJS^5jtqNXN0c&|DzZ+IazitiTNOApfq)c@wg{@k>!DAQ2tDPE|m$(dcuZ(mFsdc@e+$* zciOBwMTx?iqcF}8I5QNpk7|Aj#q&=79XhO^@=E3(eM>YC$3V8nYKaVR?x$CGJHc_vSaLIOx&5gB^4H?x zJ(nPl>1E+?Z0-=sR){Vnzva!)?4tpl@9G1N8{hn55D^e%yBdfp@yAAt@_+Omq+ySX zD?9NXD^<)iLzYDDow93#>i{({%(04=?sJd{X^6#KKU$pmB^_qA(Cj;9FDOF~3)5a4 zdVg{=O`i6m`Z+Kh7*QtWuAd_LELuc)C?P}#EiPtv{GdM-m_{36bwB}Xb?yl^vD}(u zlF9QJAHok@X?Q8qKVC9y$bUQv^zqoV&jG4a4dC2hwrFV;p(+7GlifnzlX8JI6(x*K z^~rOn@J#_QOzqf`X^9wep?~4Q0L=rQI4g!A_e$rRQa@M(3AEoBg|XpXNGj9T^_|UU zuOgE-sQ{fQUF{-7K#<_=p{3mf$O1xazVSS}p%)aW!DOHLWVTQYE`O+>6#5Co7j%{| zAP6AEh-|b+&}J=U5hQSvPc&iTv4cD!$|l#z5MU%;W&cW%#;to_4;~hGi{f1qX+ss> zcee0!(6~a2b7;nRh8^DD_`^54HcQQfVqEt!JMzZujO2^=9}}!EWVtL5kDJosP9!EM z-mjoMf#TH1nU-L5f`9c6Q(`QOIf*n?<#poEky$H6BYUpR7zg9#2bJG0M$P|T33#;6 zsrn&B*c3zzQqX`gE?-h5ZNx@5lnoS05xSlB{_@!yKCL2cWc?wZsU0z~`)IuEnKKEIzzV9^-a94vS%Kbek*BKQWPb(?jb4v@+Ys!j#$;B? z9_QPI&XNd&QeM%X@c2Q4_uI^gRGQcFuJ`a;!H;AZB7loXa~0R;^nu6psPJ-jub(v6 z3BA>hg52E`fXK=Iz`W{brJxu+gxaQ4&>u$tuO;2g~qV4kJB)#Yr3x**TfqEBd* z*6h~7`j)g1r>6WB2Hp=VZX0k7{w*hwQ#$>}&~O=WYA){=`Z5Nm1mkvau4-Mp=((ZY zshwy0sIW8M?8B590(ODZoLRr>5lcpK@3Yjw`b=+zX@AG{!hAOJVJbn49>6+e$;H}y z1mWtx8G1R*nKr-goRF2`L9^;cGM#*CaLgvCplsaOq0Vtpyup5mhZH@?eBrqA#@N+z zUP&K~9|po1uVmd{dKq_;IRvv(&GG$Ff6t0Qh5C5yPvS{CqOcPlUR)oU%z%4s?$Xm$ zq1}2-t$z)y>ko#9@{t7>G zKlCPpqSD3agrm!Aa;_K}+m#4ZqzPcC9r?h5cYht)3#)*L+dWT~+l9WeEx4+le2~}| z0y(3Hq>WaNA-5Vc>2BCTU0KF?>up@+SP4Jj9Z*!k@PSc$yB?d)yG2)O%G5+(wjuz@ z=8s~Fa(6++Z{GOZv@xPuBXZvAD= zlJ4HST{EDHtdahis7cs3UsunVBp_zs2y_9!jxuV27rlRie<1krQ`Av*(n5-t8b#Ew zSbI645xx@si#uBWj*W5Yayp8pcZV)kP=62B5yF3HmvG)Sd6eWKGWlC4ASlM1qcIZh=bNo%v1&3W4 z<-nIanaN}S$&Us$1wc8cz`bvv#x3$Nua#q7?$e{C?(0T}NsFFoJV1wK|KXm0yKsTC zNGyGPPtiPeGLO$P zVJ_eef{r)c4gFKpo90RcB%A<_iqjR+C=z@&wfJLf%3C0lrd4l0`A|Uv0DqMw)O+Ge zgNg^|33(>RigbPz3@j-#1)S zSku>Gwe-xncjrR$2{F0}skQ=OFE1>>d)a*Y*;h!a@K+;ILdR`4uGpKNhn~YiQft!j zoPY?C*YW+$5iVDjLU(+XD1TL(%vD{ad8oe~0haZ~sb7^^7d7l^=!^^G3%a9QneL4M zcnMh&tJ;ZO)6*-r*cZ}FOG2yCCl@j>*c_}2D`WaAtqZszmV8f)@OK*@Vu1@3*{!)v zmIH5oi{OPOrV+L9&J!s4P4%omqR6)i#tfFL%iF2XPJ z)}Nm}AU1`9w-4rU1P4lG7qX?gI~Xm~OJ{T69UaB3sac)&g$t~LAkWBFyOdJN#ZHK3 za4{}=feXrq{(rMD3%7`CLdFVqo5@~S=m$0!5d3&|2FDSk9k<*&fTKjWtsMo=PMRIp zw%^jDS9$tbEGX$o@b{nL+|%^ZH_-3VqbXW`yaVBgCcR?fE~`d;>sRxyY_!K*vE zBo4p!(PttuYP*a&KmQ(H@&0bmgT~AanKOU{JXO7Hp?|s#OgBg1h%yze3Xz39y$4;) zwx&Qx*+gy411tRj6fu;hZ^G1?xc7O4{^~m$*H9{Z@7WElZHZSe(`8BHN)e1=y4DjO z>ZEzG^X+Ex)FE;<6X0umzS84{E0qP+qlc8Tqime|Q64HbE&lRIvy~H_jdaSd6TY}{ zu=PDXJb&|~88pES%J&Tc%qHCWK)qhl%ia#DvFYoBM-Nq>DVhaB579DclgCe_9z_`3Hb*0Gc~+c{7Pghu90$I~u(5#%4Kk{m;K0@2 z57zn?7#CQyJpNEH)QpdGIqP>d@O;v~C_*%&Re#uZ^#J3GVI9>LH5v(=e#$6xu1uKd zIKXtF?3)@LbW){X_pk=b0Xa^}Bom%ka)K!E;z>5j6hAX6aTW|Trt|t7g0ExcRk-$D zN|r(WR`*6{T6vQvm|Ka+iCh(`B7-m2lO@)$UqXry$zL6Hpq9jgznc_zD@I^6W58dP zFn>y#HXGq>3W52bPp|!clYk?1&O|rt<^H|Mr!0nFVPSM4w@~pBqq1*TNP#W7KS`sGhVa^^Aj_0Un+_%0CqY`ea z`llx78duD6<>}Ehc?;Rgm9moTebUzBhJP`ODWieAE*k;w+jpk31ciy@`(@bj0=ni8 zJX9T>hP=-aF9M7vCBk%#LEikPvMG6mS^F2(#-OIn3(PgPmyBW1)$KnMT!f0|1L_d= zqV%LbVPZT~C|LPgEE#gVy<(~Nslf-kzX+gU;e_(BnhE($5D(+7>3ECYM2!6P8h@bW zE&4sQ_}Lis^^NA<1ACoNzUEvL!f4*ozLrAsZ#(H9c28dB9DfIb;cwwLJdx&A?$q@L z?WwI=`C^N@ZwU4W+-PFq#p8Vi24EIwuq1bvRm8*eW47aV5lLUXp^}(vE()c}t`&L%RF@dQ@tV9p9Ig!_B-W>{NS(<>%mw!v(T^+`f&vV9rp#hBlP9 zu7^P}CB5=ccT0LN+(*dfCTkMF6>hFBIDf37*FJ$87hhUkT!=J|NKxxBZI8C^{6-b#H^+bEeNroF zcs+d`FqTt)oA%wf!S^ul_T-x;$7@fKLCHRMS9ic4Pki;_^QhwZMsmHa4ra zYyh4(9HtZ#_CR2Vtj~x(UJ0n)K?#pd!SO`IG01o|+yol&DCh0qe}6AC{C)x1yeZsf z0^RIKlcprj?mjvf%^ddjT=;}FaX3ifBt3ke{vg@S7(MlVB^oj#Fk2u3S|;9e1+(~; zuydf~ixS-^5L1K**2^!ks-Xz=ya4U}mu>HNVv}F2*5xn1;8T2T{y?$WqJx9^dnm%1G$0EB<6HyatyQ3H6oX95A#lOe;uAZHcCq!Doo!SNDP0ZI>eLX2z*0t~QM;J5A@7mm`FhR?? zUI|ua-NA<0Trh-cry!mQ-IjSR?Kf8iB*WCS?*?;%ac>R|%xD-*(U z4yxzsgp%DaprW~YnjTF$RE$EIq#s~Ye@(J-6VG2Rwon=eHY{KO3B)4)GxOL%Sp+}4 z8aDH2&D&zGdqIjI$HB-1IRTD%1}fd{z1Z$%GL_$l4i>sC1kgM<&ygZccBauCKuA&) zq$Gxz_!ttWOxw zH&~(t6q3<31%2DEjwPEOhgOaC)+nIBeD4g6AQua{*)yb};ADT?QLf`%YP}N1W*@_K zjX|SCoHBjG_eBd-K?y2u(s9|&WJr%0*r6_fltX0P{ePCww!>`vy>-UeVphQd{e(m= zW~4q=&0-<}#`Gh=jO%HxRv=n+q*+fCUe4figLS(R+;Tn5VRi(WXNFv30RGXayTR{B zci2`mKUq`c@>A^pSVu9*J&uzkv|Mm80qQvvx8jdAnl`U2s3gx`#$)%m2zjn4c5XlO zX1)1<4}ZIqp{Ox&w4LqWgsdfRr{#Rp?T9BMsv}&-S))Se&37du< z_T5UfS&IFQRIK8)#x(2A36-?>*^T&`L!d;{&KY4gdhG=iPyF=t;-QWYm%=2EUMw*; zJluvvW1b;v`LD?gfuDF?h(+`p;K03~Y=N#^wSUr2zk1hKrbf&3Fv_@1O7Y=BvNwv< zh>$beaF*vjejC^bcFg#Km)p1XMY}z|4 z^M5f_rgob@eReJNPF~{r6}#fEJ-81}3yVSfz3%{x{oK!|noiBC+M=7%Pf&ZPhx#(# zy~ZbsMXg$gvhUMLoL@?mkI-Y!)M&xn@EeaeGrQIMIi9R{&%jqrJ$gpy6SkirtqMRh zi+3HRY#=)~#IGQ*FTa7tBuSF~f&JlKZ-0#h@GiXuW@pD@rP;=RrA@a({?Z z@_DmJDNt|s5Q0|{!AR1b^HO5gcG1IAH*^V>kMdnGiW+F$^Il4}XC9IXx*@IEN^Ax0 zN5OYS6vqpm(vG#Hck#!tw?A4=)0O2L8hJPLv7#uiTYsx|9O;Uo)6EcI2LA zsQzaXg;HQv`BL{c3im$~Lht45Jb&(&^Yn#2a>I%Abz(yHaIyQ`B)}CSo$srE8yPJ- zPZ+s!ntMRGH|3Ky{i^7=mv}I|@pBDcAPuUYqU8gO?C)Xy<#Z^$#6R2>OIeJ92LrUf z<9bDY?(#rJ0K^)vg{$;$tzty4LJvdo^Vx*}I^R=RqfUi-4}cm-^_=ndCEvrcaE0r*niDKzAhd$E);eU>OttCogm zN31vrZY(Z%l1ICiA_LqYG6Cfx9F!`V7ZK(SPYY<9`QvZE{01W|cD-^*P7`4h@CP9_ zvrppVnsNoU2;6iB(dSpj9)BLg{9$uu{x?=Lbhm{^8vvW!#6FrYQd7;&0HZOG@1HFv z7CEH2C{%bpWmhyoA(Igb43@D^?6j#U)cw-tB$| zI41C4Z7bi#j|kaHciFpfa6#ck?9k1FS0O-hrXjw|+|7pQ#w#uPtrH+>o+4CDY#i{DLgr>hMo`ykKwYW^7z zWqD@l^<^QE7F^&e(qhd9=g1@u26(|*?j^o-8%IFo0(pb-bUw2?}nUu746?l86`QvlF2=J8fX4q!I5>N>EC)`T>o7Z#3hmSeX#a(gvXr zUywyf7|%!X8-0Luv3*(!>?(j8rnMMrW;(}?#Kj^Ogx{u-3MmnOGrbP|K@oG}6r-^u zfGH3TY|uSgi27q0QShyWN|;(DtdRaF_Ejw>_2H`Lb6NWS0Dqu$!qH}U0pic!m6`h8 zlF(_#lu(7ES6q-0>tE2-h11h z&ID~IC(t+Zw0+18nP~h0+U7DY@rUi(&Tt;=3=Md3wps)bhvcChl?mIuK2-W2fz#Ec z@Q7+Rf_SK6W1yf_d zWaIx{;kR}yjlzmaK9G6&2WzeXS`NNGUBeEuM9q-Jo*JF|NXjOLZO;4mi65%#;(UVC zOKtff+l=rr5Zn8n;e+v{!?njUAfSE^^vg~B8XSLh<9{$QQe~#$5ShnRk~CM8W!C`y zgLyIbG6O|ApVU_y8}q<`Kk3M2ihe?Sv^NY_Fjr5Bsngys-Bm~Wum+xHbq7||{_a@^ zo!nb0*4k*ZD25y%BbcG#`ar1yF8CdTQU@l*;e zDv)^6-U;>m+C4bP{6x?`v!A9ZRy8Ab{@msSo6R3Atc$)Q_=^||FmNqHqBY8nK3RJQ zcNv6ImbJ5vhpK>mL#g(%`-sb{HRSzkZA6RE>wmig!SN;iD=31x^YT_AA#jzhd<@{_ zPT&AS`}bs^F}Cqof&k%Z-(I-Ptt)H2H00)(14$H4Lk7hzIe*Sp~le{{ZHO7|ea*r}};zHGPHPa%nW*fq(+ z-puFe!aFTaX@J}y)ph@rZQMi1VqWjyy&d`M&HOw&%-N}F<@;^b1*~y$BV%lQvEBG| zQi9{xA#Tm8suL+1poSK7W#Qs1o26`qfPW2Ce;o{~%^VJUW+oNDdSzoAAZCPvL4XP^ zUzR59nJTid=^n%rsV`s@d-pNHEJ`kNzChfNe*HCAO4sssz1hg}|6YBf>CI#aRw?-> ziIA8D)&={Gj0vu3kJ=7&*zBcHw(GKRtQLqvzuc&UFb#nC(IT z=|n^?W}8JE@*Se_B5L*V_Tr%WoKgzb17#kzTcHtDhDXH@zl+c&h9xPLZbxd%AkK)!Avs@3?zJwFB&PjS_9 zyPz$K&M_Q-7+1p$YPo-MUpxk9>!E*##G5$N{H8rKl4d-FTIqP9NA@d21 z?PQvKY5~r6Z7kyWM}Iuu^v?XgxJoln8=Scs%F{2JGl322TK(T|VfXIOg2@#W7>c21 z{Hal*-{91FUBfCPW$_(dBK2p;4Y$98MYTtWwzuD(YgNCHz;U5hX!eB6q@#kofZL4yJfmK;!vu(*f79WMEDuSeTf|~it`G&c7oJ#1Sz@RPszRV#MOZ(k;}xgr@T#t&4AUPg zw!-yu`xEv;1MT;B_b@PFc2}?D0!+AYyzP2p z-X{C8U0d>=>dSfqrQ?57Ps;Xo--!!m#R{S$S|Blxb8%ZnlUfOXJwf=B8%Wb$!!kix zS(ih<*omUzb6-yb%z`4N!V>E??_{;n`vO?X9FTzj(LPnKp5*?T&~zzP$V$Yz2AS2as+$d?fG!CIlhh> z7eE2)5FJE#gxY6iS<3=f@M1OIN=k8Mu(anbHzo%;{74)hfD?pXdq z-d~TiCIa|ME2kFZ2q%*@epu@~3mA>SnzQ!36(>oM198$`Qh_V`v{I_lSv$s?6Dz~mT0LMsFBQFtn;=l&>8xrMRKriTngs6XiI$CL$6>0=ytF9KCcwjc@ z{daQik^GJ|L!RpVIAQ!LKKL1XFp%dkGeh({zg8{WuGgI^*m|1au>6p71&z zM@wN9v{XHt_Q_XhXx8GHcyZ||sYM>k4hc94PRWv{UC;z@L1$8`z36BT;$w`1ki)=c z(nvMmeUE=v2M%yL)2~@ivfD{Trfy7P6GO&G!R#163^M{-=6u1^2a;_(u?Lmfaq_ad zFI04eB_e}0Yon<_qT<7fJygwvmsapoI3)9J)^>#?&tSi_p!~3z!HA3CZC0AGt?mlSIrH2SOdm8MYQ=Wi=Zu}V!w_8!M+r>)YR@-5V^CM{ zT|j>yu;j7$YS&voPO4KG#=X3Wo*7*U0dND}4-u{LoRB)w`GJT4$ z=(nm`;e#sUufO}~U6WG=u1wc?xYXh5=7(_}#>H(l#xeeCSu!|aiuq-D&Lk9A$!{_i(vAEs&g{h3?~C?i(4va*gwKJr*l32wEjl>ifhsY$pHtI)EBe=UUw*>^olRjC# z>w^K>hQ58sm<2gI?;f}aVo>gQ73!Mg)-9AzM?H#Nc17??Gi z^(nf_IAuRlz`@r)+2NPHA?bF-h-BheGzFSgL$wciZADrh5wi3kx)S8L4_8|i*eU!( zU<65FotK=cDc3{DyFjTnN*|+zqs~#ZTl9*oOnljv?tijSN98om9Q1{`UK@X>pIwfp z*M?0QxE`O*gG7L>ftG-WeAfsRJAO?v%(DfD6Sd@NmXD$3^<;~lA7kvUQG{1Pf~}Xo z8GTkAnb-?7V+5ZAj3cOX3ce?zt+e_O9-x4L6UQnH(46M@SWQA`1Y&+9Y<0O6qc`_2 zR=C>3qmAl_uEU)g*`kOEyr+MxW3wo&U6Yeee~zg*TV0V{AmDm=cahB;+5UO41cW%do)H6M^5 z{fu=cbC>#!0VlxU1lm5{=Sf!Y39&*^F9x&RhOpSYN9;ta3TGSOcfJ!vEJFe7O%Om9 z50aIiEj4nfVED@*A9R0Iavipz^bfEekY;$p3;?m-1KU`%AnNgG$3Bif7aRFzt@R~DHNc~u4*UfExW>hmwJ`_K^}bX3_o z4V(aih~A_bq%LiNqn_YczAWB8wyKPZ_Q1^Vgu<55+;F(zX4-$q`P%sdwg-HJ4*_53 zt!)&xORjb9b?pRsi;n9R#l{^}Dk_besqgqcj5#;!$rLA|sC?L~@wem#ceUwGK(w(; z4azYf6I#Pegcs7aA41_87&t8{Maw46w1BaQRI9$-f??o*ww!0WUJQp#&;~1xB0%DvWbk0jnE_*<*PYyTOQaO;h#uv&LM)~8l_M}37*i)e=MIoVX zuqs-QXWTlBx551kvytTeK5KMCA7j-K=qg}+e7Mxgq_nO=xeF9X3NDNgkjF1fqX**L z%5tWvy5oQEvpdwG2+q_&v@<^I0D_dBTT-ys*+NgFSZ!y>R>&jP zS_bT|j}w9j(Q640{#PV+>X3F(7-+Kxu%ynwT?T9l3=@7D1eL+%=DfHhE77+Swv568 zJf6QAMlZp~BemRF%9e*rH}}1VZ#i(@y9t5k0N+G!kk4!jPblTlOE8J4=T{`zX8X}_ zb5MUdV)U8xuV|#ViXgxU>HP+XOYqPPx+5MvKpKw53WV;_-CnG(4nUG|rxmZ0 z;Guj2Q$-r{5b)J%iv4hPrpjJAGI+0zS)Uy}w_ zRvn-NKoIILcC~a8oantTA)RO5w)oO>6#Re9dCvn_A+>K^Q8Sw4;B6Wd2WtPZ6tWb_ zj8!>eXmLoT?_@Fn6!NEwAMYNt{;PItDfg)U<60>E2_mU!Y@`}}C%g?bRXqIX_**Xe zuZdoSU=PxT#F5~JkYKEPiH(i(RcgOiB7W*wvj$StUn=>zhF?(pxWNs9#srbS1FU}~ z@ZmPFoUU*&l=%8dy0JMO;a>i*c!R6x0{36h5N=qwiUcI&8!=PXjPR5_Vbt*|gxu_T z+`txevOv_Az~zR6U@^B4i34d?-+;j+gQA}NUe=nl$%d`j(zwbb)b9ccls@z_s`fM53Z!=1G5K#KGKii|SKO;b*Jq4D=1Hthns+@=6vU060H~ zCyN@&#Z!5Kj-?oV`R0Qy&O)i7pUC`~Pkg({^!aP;=K zD?CGN+QC%4;44jK%(Yy{f7`qXI&`zd?$BB4Bp`q9v{g+Jra@sAWz3x(kcJyUp;M?C zQ+$#l?PTb+DWn2uh5U>^)EKyQQVP9%TQ+3T^in4#?0z^GcaU zozs7dXO4}f>?lsf%n+dY(3+`EL>;LwNKvFTf6p{brqE54ViJ{AAt`?Y!-}Yk3?4_@ z_#txsq5UUp;u#B?|3j!*3b?cWb$nac zFZRa~InbC5T?lkE-Yc=1#d+qkixQ>ToNTMz2!>xq>U7V3Ma$9xYo#fVdbzN!~SmAuu2^~E7H&6X^?t%txYdtmhf)v)}hoY4YrH`|af~ce14Hv! z&#GeM9VOr)%79` z1EDN4odS0GIAfH3Fh^r*x@bBB0~Ek=8MB^7aWhZnu1#NvXZK&7$&gY048-3TiW8K@ zX*R=Q)}f&pk8Mp~6+kcgA!e_1tW-kV7RPRGl)absZNClL2Bm zS%uEs4qb9RO$Ia=61mN0I9#B1e|QKwQV?`5k8s{x%@Q*jZ(dlf1PzEcGdGj6hdnpN zx)d7y8`CrYg-RKvnm|huu3y`dwq&gmI<;oia|QQN2ylPA81Qb#FsjG_C->SeK5`Sj zMLUo8<DCKr_bTP!+4ToP=q(lo?Cz-9joPHT>YkZT zDm>Zg*IRL7X%jvIFm064FkOfwpzWVTmClPiNhy0w^5f{2KXy`I zwh;DpOXf1zZlKu~jWFD21)SOWgR3Os3XOXQOZo*}$WS5%Vswh_VY~yTs&mb@rn`3Y zvyjlxY!e)VhCj7aVfhRCwljHiCqvfnSF3YuhT4BaY~u0ZfZNc?4msJtua>UnV8<~O z3j^`iV*w_N{Gde|#umAj-){#BoS;<;S^-JdI6z|qEknEU!fagS`42ZAQ0| zr+8mF0oLP;mP}j4Vm98IEzgk)oD3=!B9ea#6BSg!T9p3^p7-yI5VbMr4!W#1O_m$lF-^etDT@h83hjjH@n2Y8i!8sx?xfIo(< z`RcH#*&)o{w9y_pGWx|LB7XcM?c3#lC8qytV% z1t=@{g97M(UrY4w|M3WdQ8<|OU{Hd82lwxP4c7lY)$1ckfW1mY>;atfKMN``;G6od z@p-pwxT9@riW~oWcp2a||NR;NeolO0y!6xyunJt7@Q>@i(t&?u=zm}2Q~ZB1a@q?U z@Us8x=HJH?|9$9p)*)5-jKOkc!1wc%{sa5^|8;3|clv+E84lT5J|}I0b?6YXt8PsP2i|q1b;)dFSz*b zig^fPslYW2(vt86-t?ZK{NDyq|LXQvu>eQu^M~{%^_yM|3C(wsOA#gSb+&v+`-D{M8h~ zar|#;EqHR&FJSOR=Y@Z2@qzg-5;?(`!I2Bc2nI95nA^}S@+2rS1C}5l;2?AV+aUFn z;csP+(K!J>>6}~m289~p->@H}7pfF2^Wa_tynaW;5@P}aF+iH|0HNHZP*?`Cp|uXnXPk7prU^g7A9PYv-u|Wj@b~z#W8uG zU%P|ovr=Q-y9@^4e@?X?((=2Nt-nudgp=;P?X$F=tHF*3cBMPgM+W;=MCA2%HgF!r z>xuN2QNlC!R}c9vs@)TA)T)hMfqw5OYIhLv%!Z&qc&PLb6|DakL{3^LdvUPbhrjTE z2a`~;IE;Vv9_bXe+ljG5;#v!Qmr%;K421baa{>dn-pf~ae6NQ;@9lnF_BzrDNV$3Q zW8R>01PPhZ(cM1~INKi)K#mfhes8`%Rk@cVSl#i`Vwu)SiE&Ul09C=%6^a+YT({^5K`On#`PV zjTJc)XRXhADx2xkQx}PaGZ6fYB40BB)w?kPSi7){N`Z~90I25LcO#L&|7@xC_#s3h@7X2Vl6sw9dZk-;RFQ z5Bm#;Dnvt}qMhthP2mH`2h}{WnRI`=dv)+hM8Nk=)~Y`Ja;xz?8kqsYKQ$w{3u{so z9F$Kmz8J}{ym^sP@QAd@Nu=te5X=z;5|UGaWV4>o?FmPm$u<6tB+Qd-KMVdbQK_Xn zYD#9LwP%?(F4q5n7_84%@j@zv1;i=7UTGBwP_5gj@Jo4Lz~GS9;_VF&;8A})#yw^x z5W3#wxCQ$>ITUcoh{hyi{|5OGJ^j3Ku=w@OU+^ik;zJKD>>4`}8pB@k24;fasI^Do zs!Pu0P%BHII4;M@3lm=&7|#g1rOd`KF5?ouEu^URs5wWFe;I=;6*Qm)$q`nL>afX%}O!02S@Wq)W>= zQ0oD~x-@>08GlIoIErrP@xKeZg2nckOJB*nQ?2**pix%q`SzWwLP9Eo6FqEQ9xb@N z1uA^-LK%W>2`qmjh*yNL_PZA0I58A*7a6X*#mbBky(Z@d*^zE)b~Sd_Mj% zODomq(p_MfDrQFp+vspw^sJ z?LRDl0pGMvH}eSySgU^lv(LF8BRw@=LE^wi8;4E3y}S6*A)xli;RxT#8W5mlkrOWd zn^?9(Fv0uQCHXU$sAh-P`_IwXUrM=GKIbt-xy;KNeEHim)2Ml$z?W$>roWc7LOzkf zdQuC57dB?Lm3yVIE_C^uL{aV{Z-=z8KXzA%%L7ADs>t+2Qo?^5!zmdT)^~A~r9E5Y z7$BHKPIyHhLXYsz&MJEgEMI|#t}lWJmdT*0wH9LX2A%uyZiOf8?rnr(DZy22+WEG! z4k~P)@68a8P}eZx#nXM)+Q907Ro2naE=Wi-*81=Ak#}Bp0QU{x{`?ZcW0c1a4_-`d zN{MWNS2F}f&=Y^|h=j8;rWWoQ02(EE_LmTF9({AvLdh;P7rPxwW@AP+A( zKpd8UC6=RqOwYFYlFQ7ChLrM>x}eI~7o8slaj}*GxMYpUIylpG;kZn z;*y{RxYTIvcwJh#IBD!6iE+VhwB*e>wLQz}bK4&cQf59RP4{zi!+iAHRG!ge9@< z=LSCeLm>6`VDD{{#>=sn!^Z(}#|+qLivQ}VdvW*c_VI!Ntm%Yge_2TGaV${tfVG~* zg@rJ#w5?DLFm|!;x~lY-_(0JATkNTfA}Bs8>a;_UU5uAEF*UN8AbcNnPl(dP%>p0@QJ_G}j*(ng6031wDP9Er)0?9RZ$qo=eWQ2*KA;EhL_sDKZ z=!yCQb{}Yx#DU1fp*RsotY(1)1zaklQ)SCY zh*I!>O|eK%e<6HDlpfFqe&7%cFtJ~;?(9fOAj6qKX&6SJ8jup?8-1zaz$6*;pL2mnyvHoBdhvj!M7r?-yzIhYVRjSjq;< zE!+!{b8-4TlV|c(85ZMMsR-sV`uq#{xAj|jLrgM&20NAb+$XY`$Y824t9&5-|Mnlw zB)=8`Z~&5Eq4gy4W^$8{t;~O>w*f6)-IaNWkFErSQicrVm~8jum*Qvkbik9@OUM}c)#4m`E!{7=+n^F z_4iYHov$34D$t&&X@HRa1;E+TgK>l$0X-;x0-4%6l+Tskhtn^3vUY!z)DunRXP>kd zCVD*ya9{u^5b^^6xNeEG-=o|>CX+*E9{K|KK44dEv4m$KKy&#vrpyQG%rx;cfY4q4 z>-1VQ;Nn0b9<;Aj<0~6Lf2XuRSgE3h%M|nmkzW024Hux$NqMmk?GoVObR#8R@dYq1 zvk+Lg_$Ljd_7x&;c=Uf{i3`+INGo|myXn7k5)6xib>TNxwD94ML%tDEC=6Fwpo+ko zI=<6tIJN@$%3!#Ef8y8s32YMxlIQ*}1^6-QN{jyI$)xs;(T<1D@$WC;7W^cl79JAy8!tKwNm|kA!dKRLPF>5uW{)AlqebU zrUEXA`6XgN{eSvz`TCyzrcS9N@T2CpWd+ljF$}iJz+FhJBw2llvjFZIXN|t;$HG2O z67Znzf|7*;KkvWR25I8`ib|;6QW!eP1acDj*zu)SEjsFzT z_Gj-sL-R%qhPi*D>xm4A1V{5R1M~MT8<;@WA6tmq2E90-KH(1uUWk2pXS&w@V$WSK zB9yKz?@tJ2#r;r^EgKSW^%6se<)DfSMrV{R-W^yB3E-29N7?|&j;O^urh+sQq6k8b4xjTW4pCJzO@7rZrq z1$gX=@150hE9(;^{v$LXD(JNB6LA`Lv&$UtHa_g8&`zD&)R-EeZbbl`0N;CLG8@>l zFJFvdivWMCD7srfMY%rVZ;7*7n*@(!Xb8E>zUBfo*m_VKO#t=aZw|}s{V*p0VSjU5 z-2qkw`d(x=2zne>q5g?*jiU+aCpJ`D`4fr#MMwMoQ-h##3B z!ec6WuntPk2SRX@t#m4#ZNuZdoZwU7`SWL^(4qn2$cw|I3WdUyt+7}2+kKj5pTd#$ zEx>i@e%ifW{rkYyy^`Zn7$1C}goS%{tCgyoF)UiH`+ zF`C!?ma_Z;8S0*@Dd=&Agy_^3)`Omot^KUgS57;n%46Q-I<2SmDKid*)*$o*>;AL5F<75>8<4{*A@ z{yL!4N#rMrvD4t}%Yv>A3SJ9H|Neu#z;F3L`?2^}FGFb4aDjV~AwRL4lg8}l`)&AN zve&H-Bru5HHt#8WUa%18_Uu88ggVJ$3Mr+*b0CLYNN!l-hIf}+=_ph_m=%AoBE5&2 zm#1h^+xDUjH?oaF;|$B72_2NIo_au%41c1ce?#ggL0?WjVf9?zUco97F<@-xWx0nE zZFM?N>>gG!DbXLVWz1rqh{GzERTCH5xJ%9a(dd~E_TEBmXZpEb`qFgaDva@|QayZ% zeKg7IV!)r((etLgp(sIz|A~Lx@lHjcbXjWkqR}sKVcK>Kyf-4rC*61gJgwqqfC8b) z-S=2;Ng(sHRq!M2ZzD;lC@a6Y`e=aaP5}Hnem)u8o+`Zm)G1%K&sFcEbdkg6`%RG! z(AZIaqj?jneXO|FRaXtlfuEKH&THT)kwWOdsBS!LDarUj%=AT2lwxS=T5f zrK|vhi`%$e2z>=>!>!-s&(apSj}{k6gF67>0K&Z~CUZ+SGgyyD@6`Y!YE#>k)jCRk zKk%l%RPlFNFyz%4C2X_KIfRw@pFruNE1lODc7%VC_~zK9v<%l}diNRTBK93-MgP zIy}$*WLSr+stb8~=SR2^&ruyM{-6lQOvNWu5&ruT20; z)mKu3OO#Ud(R{{(0+x0g?k)d6WpA>jxUvL`eh>o$2oQgRH&EXT)Pghv0ct^j05xC# zECiWV`!x5Sd)BH`wX-3UP=trOxw#ofzt_Qu(u+%{Dbr$_z%^3NJVs;m4V>xY?W;Wp zS$$kxq}Yaf5p5=55L-7^(r#EzM+w&Pw5K9Dm`*QzyeJT0bnmeh*M6%id$cxAUYOkC z6UTSq#Xo=Cd3a+PpfgqJO7FLEo!VM=yguC5YJoNt{kSNF3mb z6@C5$p0rLts5}R5Uv#58m8`?yyu42!ksT!R3FaFkYTEIOc0#d!AZ$vwOyuEOJmtFi zD(lN^2f+jHcrcYCI1!PR1rz5a%^{LL2h4xq%nxelKJUwwK5HhU*Ww0((N*hk<+H;<|>?4SjE~`sYj-YwLEwb{k-}}ya`yS=%^Exc9 z(NihwTFmM7hB>cJHbygho;UAwcIhQpsAY5H3z%tuO8B^~njX_6_ftIGjf>+JDo%eR zr-wb(_$F``+zKEgMva8J3Pl#GU(#wVlmN=0w0q+B)0bF0dgCnA6G^L%_S>65U+dJJ z%#3>%LnqsGK?kHLll27jMI(t+y=TPW<7j8^_A6|^y-$VZg!sx^5gL=cC47OAPqf@)0$RIj`ys5jS?uP3g)oTN#(dlzPo6rjdJxEu! zwJqZ`qPIv^`MRsODW*RI_6b-+e?Z36Fs4#_Turd0q-Rz+=<`?h!Exi6%g}$4^MbPn?VJtvk9fb&^``eB{!@2ZGco*lwRu6!0<-^kao}=K4|W*n*KM`nZMh5m zneVD8%+gH1w+U0cb){x-ENZ&D?V?=Ft(rr1S zwmD2;v}ql{7W;Nqs{4PWSuT!8C*7985{fpj1iwCL$_Ip>L`Yer`nzb?t&Q|?af3S9>EdSwoA=DVnvL36!7q ztNFe4FD9fE+BOC+YCp@jQZioG)Lq+){$!-&naOLjr*74&vw(l<4bpfVAI$V_eLscz z;?ip{R+yOMvtOoit-h!A>VX;rCajQ$zwUtN&t9vn#gKspXlB+w9*eQDfAdD=zfO&| z6IW}D07H}ddIRY$LBp#o>del?oet+cm{H*BxI?wM+^W;3sd1M_s=nZ6Gi_^79N(Ca zem>XFQUfVe^E!XG33-Qw9dc*)WjCElNlu+=4^1U{U$kA;3I#0PFrlcf6ObVD==^$4 zFw&NQ(kF`GrtX!j_v{JZ&n6|0*~#5;Ek#^nMB|6mHG8jwkI~M1?iTd3G2>i6L;@s+ zpcVyNu3(EJB%@B6!?-+?UR4=_b$iX+?HXpFerr70i#&fX9$@-FmO%%Z;(mP6JnEuL zwzQkO{TQ@GFqn}=kD#VVFV|6YKb70b0^%pT8wmAM0;i~ghWjT= zgKFGsvz3lm=I^12xF4tMK9mCpx%BVFw4ItuVgl)8(iM%G-JqHQR**e5g% zu_8Xu0`Y$>x+v+2z1N!kTYp`qeye5IS46EYo9$yX(a~N;LFGMeZp}gE+sG~X%p_jX zXm0S3r^RmS-80R4u5Z&wF6H|KVZqlR5Zso$ppG%nTy`cWIO|RTUugMyi(oG=i z@005V>)vKJ2_8W}m+r7F-AudFG~d`a(Y|gKH&}myH#Tv^iM3#0dqzo4+_V7(?BQ!% z6?;re(MyrS!7CH7VjYoqYlb;%6&aXhHlL%U+jz?%9K2p z$NhiGd>Q>^`yOW*^RT5p~t-QT#)|h|c^z`H5u2V83%~F`(b_SY3k5-nhBJZU0 z#X>I4D!4+Ja&m=ITD`a}XVcwP@yq#fT0%B#&vdCQ;D{}>*}fWW>?qw(ck%d6gDHPO zib({I)w2ElQ)MnOJoh>FyOqze( z+t8StoTXCl){k=1j8OT{&u31KPAB*so_6n4gFxKMS1b69meX+z_M^78n`<#r9#mmD z23;K1%x*!oBU)umIEw2v81qlH56~RUlWcBXiX3&y_k3@fJc?awL z%GqWdLWi8S8Xatmq?w@GB&N0ws}yg)yVd=Xx?b<>EjqxSdEUqO*j?FylRg+2&>kr^ znoVAhuEAaFsAhHC1o<6s{EIqh;#>tU@51-lReJjq&)N6M)m-xMZla}NJ9{mjui$*9!_l$N5^r<5wC(MAyLO6*zWaTGEBEA{R`d1i_F%`8cuMErFh!x}up~eo!fRsa(vk zTzkw2ookr-K(SvcJl2MA{Z8|4VfV=3w9EW1i07Li8tuPk3=Asu@FQe=9JFM z1>erEG(fHQUeNuZqDls%S?=lzfhWp|%YF>Y-?tz>j>2wkXfJU-qUTF1q|*Dvf_f_m z?rK@yjS@|biDhDCX$~2GeN0}x zCPm|}=N`wnzj-6CK9v4sY(^nk>gyh7>lOel-<@8`99))~@n{IqWJSM7q~{|BmA0M0 zE0n9AlkUva>%~jGxoyI1p0?`lw>IojtjdZ;bPcvAye>x%#p{Fx`-BKBe8{%&Tpe{U zN>5=P>u`UM%&8c#!XNKxbVD(%Y(PrO3ls=G)h?i7`UEd~Em&IW^rP6gIAwI->|pzo zyr(9*Nc-t>ygFF>`!c?8yMlkr7^s~GniV^uaG%X!CgF)8E~75wIqxhrAjEmHKDFSW zqcuf#zGwNOfIjCmHOEzZUpf9g!bs>bfasRz#0r1aZ{Wt*Tl=$`7Z4~c=j@t0QO^mC zBJE>@c+OYBeWXo_hkm$h7Zck7bNz5HmxeA}?j}fHm2zZl_%vQ8=O%3fznk3n*95|S zS9ppfnqGLtb^K^AvJON%gr;yL>>_mHH#VBW6nbagVdlFqf|O5VvrO)^-tD^!h?86& zb$x%&*mQ2O9o*m-Ubutf%)5&nP)SR5cj*rA>HD=CW4e55d+WYD9j7n}yC@^fc+it; z#Dv<*fRm?((LpFfY#(;YzrEwwHs+M4;(coG#Me9Ko2+D2MlBuiy(Mfg%XEJmw>Fr>`jt)n)o4>s*6l77-be}|%Phi- zgr4pANUPjnAyhX=6O6`dVY6$6xM0@ZK56{%JPP*JoM6VMu>OIeIB-*LZzOuX<7C8OTEA zyw^{dJ3ikeDAYNlj4$rYvkWq2s=j5+y3lyM34HQVWb~A8Z(C?@G(*?a)K{Qrcc-oHk8P^U-+| zdu*YJ4X@{AAtd(|Bk#v=d--@HGjl{P^qjiUK>Oz?P%T8(gi}i}8ClD{B13;oes&V- z0}gtZVp@C``A`0oS*y`G-K;Vm?uA-rbEi(yt`k`+Ot&9#tLSm;_Ff+J5%;`RRWzMF zsc2#FD;0)hdkCe3?pB{w*9JJF#D)F*y>H8K-_5O_P63b|(Jy?D->{A9br#*7`Dj94 zq4hkv^H8?xF&>O+deTV}_nd#QvLH{-%`-j4 zv_HlarFZa(6;w!gTPK)PS4){Q#YQ?}IA3sBhOL&HCA%c@U-4yOT6ceAdLF^`BnYP| z1$mjip}Ut5@2E{P9=Tv$D-dOdr1upe+V7J^2z&R-y1TA9X?kO3EQlq`NVa@>vaS|5 z1K<|)SDB(0TJVAiMzyZ}=z@u`<1*hQvpTwhR9;uPuoPNvGUhOfF`I500AVm9Nn36U zK0`hp1ryIz8#?tTrICN#lGCF)wYOZ%&ejsDIBHNVeRk_qmvY^X%8QP^R~)FAP=Imi zREI%xeTWyg?c|{JC;Mcl#9|P zSxwb58>*+&z`zU-EwS2#5Io09$*4{E<# z&Q`B6wcIxlF1o<5-R6Dc46*eaX1}uMS>l6Q5UxONMI*V(I-^;g(>GCv1wNgR8`IZW z%Q(UGqQ*4g)0$1io;%YhP&tvUs@Gy|#W2o}>db%CLqVv8mzj~r=Kh-N6}39M z5-@PF8e_24j89W>_l&OUVR{!9(W!{p+>EIA-hq*H;YX9h8^gpeyGf0EJn85ADlb!oQ^Gvf6&5P15>c-a;QUW0%8gdu8I{ zWIbBd_a=YapD%S&aQywoK30!#^aiqS76D}!c?0f`qj7uV2-DK-ZFExZ``wnpZ%hQ2^XW*2|20(_sxaZV>&m(}PKe6NDjBHio+ z?eZ+1o4uIB!ls`WGvfrkvH-X1#NR)u&1*Uqd5AE+mT9@TwOc1I1>uauwD!Y$!OUM$ z^Tt#26BY!g1&=`#G1y+c#l?N_^Y{Yb8FjS>%e**;$ag3#pMH?h44SmksX$xB#Og-JlNDbip@>E7p(x7+~WZnA4gU> zH()^0mUp$LvGD3ftUfXA$=spB6}z~uECB+ciWw)ckYW%O>r3}qSo3QJdpe{m_xPTs zyhTr7JVmwf$uuOGQ1Sj7JT-Y$6qMF8Idmh!?RpFn<=ah@6b+awCKxRNAk)2NvsvMY znzWUFZ;+9n$EUUDVT$awUYg82zdyH4AVZ2EYy=pZO?rNG*3f2?Zf(8cgF$ zju}sg)=gKsUCWqMc>ALlb1mNO=p-eWy*EQ0vLd{!Sypvm6UDeyew1$*OS-DcL}X>W zTRk?eP(iyzt8|MgB5GkMf;57gXWUY^hfN`wBeRpu#C^iLDC0$spJCbY-) z=+ND6l{pzN1bTj27U_Xk4^Bnx6=++30Ak;4jm%f=m1**)@UjMpfF(hJkdtzWHKK0h$0DMqci#NI={wn=l~9n@!y$S-qSeh z=-sH2S_q+P=o>=F{EX36`rkdU4akpI_0+4aSTx)_9=;*+yy-dztT*(1r27XAIeOt zr~l&x$97U zzpPCvT=QqRca3yi1^wC)sw4L1zE3yOZ5&@W z3cMqqiZ(WG$m@f*EoO5n7B9g<1DOWS_V>fs-M!5=Z#*4-VOfhyIBxJ&Vw6*8$o2NTJD!ty$t+&$#pA^u{Q13@oDS=L9zHYwA+3+K z7MY_3pbT6^=OEhmRdOA_=T8hL?A&N;wsW~4-W z>2UxTs93%>P=+=4*2-8%=7z4GuT8#lyRdNI?F34y$h^DY(K?3^R`-}GyDXF_n_5jY zKGx_x_^X|LhxJ{5IGJ4bR~VJ-`wK$W1T4%(S=ZNXab1Yk*^-2BabVz2#7jvh* z%qQ%zXI?vhf{VjzK0U}z>@a88gB>OZ3}X2)j6ZtifK>ecF0VWELg)=Fojz@7t>) z8V?u{E7h0w(4Myl1p}K)-z#V>y3Ur zI>xgpV6{yz+Yk--?KYm(`ZYOv(rJt75^wgt&X%a(mfB6-U^Z%{8>yt-%{N7Pu`@Z2 zC6=Om2w?4}AQK>!-9EKo^>#V7;YR>SXy{K)q~{n?dgk7z`vzh`TDdV747@xMwzb;} zX}Q{e82M&-XdW&|=2|sR9F01g$H6VJ2>YE%=R?g3r!uy?*Q_o#F++*}1OYjn*K*C9u*=Yy)%ml{Po{F&$m3NGeG2PeIdv zTpw+3xwf~Sa~`SgNIf@4;~E)oVwh#N|PbqF!EIa%Dp^`>m;!@ zqV3gU90}{yI({`r=$jRZx`5$tHdS9rQAejIEXQD!#!`#P&R8EG>?7pVMH)KO%E(?^ zdW&{4z13L^a0TrWlp=rJu0qT)mT4=0Rlg3qc9O5;+iu2WZqBsitSoy36&_?UhlU_OAH2rR?;5}{aD+j?vD?8DBFX759K)(NAhxcHV>nqrX`57PA79V!=P>9S4wbvG!UzH z*MMX(vMcvfAt-n3ynJs?7|Nd>kHPKc>FdSFsX`NY1uu-;d;d0;@_n70g$t^Sxn)ls zOqD2y+7G6UBvEa``a6f#8DE6gbX2{beOTV)SC-Yj-A%Ph7z4dxQVZ>W!TQn!@P$E+ zf1T%%dz{Z;Lwnll*=+e-LAz*P17mD9uX7wesiQM$VGDNT56gmvZ!*=I%O1U0_%&9~ zDFf*EyncDAJaL!sVp~$XSvkVEn+=!o==}i4s2)$(^Yc=HmMC=KbMY~E<~^KEgbeKd z4a3bWaB^ld*^qTeueZ+Il`4t?1qBL~Ct-MA>0JU5wH7n@?xb zDSL44#6A&`LCz;uZP)F!d5nyGv@Ce)ns@HgtLK2@+#SulXX(3Q_Q`m|=*ubydj3gO zKH6dn^WeGX#j6fEZFJeRPwU>-l)`>@+4>Oq6 z^mST8xZc(~!E&B|PMORWS#Q1NrE`G~3$?=t?}b^yI23j;WjpKD<9hi>{Bt-fAx4G9 zG0irpQ!vrAI>X(y6Q8p=7SD`t5xq%vPO+lo$vBcD0La1>?U3`Ln`3tNv6v?OlB-uD zw11Ccy^0Ddt%|3?9-NeepBxl@G5n*ArtpsL-(q_`#`P|Lp1)=nm39+r|7W6HX}h-8c2g>XzVFg} z!_?kHzl<3z-3!GWJ*$A(kkrZxBfQQo3lDV;-&J0=;ZMiW>5M@TxD&y6T<&(v2#%T3 z`Kb8!dBh`sF_56RKZX)jsGJW<-k(MjK$DMRJ)iE(7v}w$*&g<`Qx3z!^!PP-EO?0A zVFDiGLHPQc$?6@jP-DfU9RDo#U})4XEd+<@&SsRUaDM^;%VB`4uws5u#JT>&(1n1x z(D|iTqT;qF?~m4{R?pXURgRd&6m2R^fh{|~3bv@%w+R#sq zM* z?4Y%OrG!#6xx5zd^m1>fuSqIiw9E?MN1xvK_#qB-@S7^UC+lXueDszhR0Nn@%&%3C zfoe;c`0=(VlSBXnM3ozHv2PgI7SY}5&}C>J#pBVv+|d1qelg6&WWc3u7QPkh_r+<) z#evI}l!dt)RVo|X;k9K3ir_6cER#mnscZ#*Q=m7T^6!eVIY4D8gdE1oM(Z^h-zE$& zo6`%UwG_R1!mhMuk4wW_G=S?W4DRMC3+wFjHeQV;)g+pu-i9do@t7FrBpIzI)CQ81 zo!^WwLl(h{?S0G-XuDSoXtZ=IO_r~#eOpqUb?E5&MjMc#G?fZ)Fqo)ZrY28EIM}m) zO?ZMny|4-un#yk0`s^x~Cz6;ahVD1UqMtra+w};zeh+(a+OhZdZVp+gTePO8`QGh? z1+~dAh8jwswhsYRRyi0#A2|F(qv!dO?5B8H63!(tj(%5d82s!qp1REy1+;A$nnb819mK~4Qm|5;$(3QIXbi0EXtAwWm%XleeZ4T zH3|IhC%2ufz%M#1XOQ4N?(I8&E5fl9W{bvQX8WmrdHD`kPLIoZw@;@fKkc{sY9>F? z_ercj=s7>7!*m#=A5P4u!9d=8GC%P?)2#~YplEk-^`?|dG`_OxRqLztc(dM+ zs<3j6x)-WZd9dJN?Ir?$)8xTFb9)b$Zg+oVwH1Xcfsee!d^@K+NG$fpXIH-(pr&fA z_Ujycn)e=ceKK7GzhfI~ztHZ>L+2T{nFf2o-I#?H6Eh|fZyzDKL&9U;p6Jm1s<4Hu zI!MOFLPGS{KK~OF)`jvF{)UQzX0xkAdb2Ubo3?&nUyTsi8;65`b@DX9h{eH{{p3(_ zvrvU!^_;g0t#587|3!!RQB zL(bDB&xX6?ZX{LAmQGKcG*&}EL#FICQ3*YL`O@s^_Q}ai4ZeGSRkmpGb2Ff{$>{r0u|?6txWEd3d@pdm==5$*!yw3&!`29% zjc|jPNuH?rKLku&2adN!9fVI1Yc7|)imK}la@p!<= z@KKc11k=SCeRkaS8zE;peLpJ*%T45Ka$ubHTC7Lq%~?6`+h(!3jV|xXJB43u=m`ROngkhz>|*U`-OObPNj7-v%4Q*ulXrET03&cnn|IH zU4nYYSb(j_J|56tX5V<(V*IYn!QN8{&KR zt%O`K))E@HwOQYSzHjPWsy(<>#PWGkoBDb!O~wZ`np>}B&D6)!adnoCyY#u6TW(2X zW|imW9VS;nPjw#4ym;2tI652K_!6;FD_X47JT@8P70w5=4N#;#p)D}Izk*XB!O!x4 z5(A_TPdc@tjCN^HXXo9r)2D@ojCj>OHVyP!{B=1SO%sS3YRggd3{2JR8UeV8sHd#C z9l>FbJUIs(H9S2%ZB8y8 z3Dl!SyS3`*;h@pE=gXt;EGw#+X0RXGG|j>a%@yfK^%x_cc*|=8f5sjBZKkM#r;Nc* zBxXr5dv#S-rxI6Db8S}B%>{!AkvM6_;@R z8m;`auc2U=EW@Q5ft-3Q`R00%*QkfotFr(fwDB8g2Wq1vJsGlDSyZ{5=Xe?0o6xj>TCEQHp0^C( zv)Wo%;BZNf)NHML>e=(Tlosc>%DM|Ei+lMbHsveo6bNPr%}$aYXcyl~5B7Y0&yOgewd&B=oA~6|U>r3MYYsNdYKnR&Ty;5#r zofn{Hr)Tt?qnC_JucQyv%bc+3r4`E(>c8XZ>mbk>J}KAJWO?#`)ho!&hqgcKTNhp| z2Q=y%OY_}GDV1pJZ>@RYI%D+YAGc^$o;BO75111e%jQkf!_u+@np-hjZslv3Q&&f8 z3?FMvJnJ08<#b~e zCZh;)Fb)qGsMr;Y!y8PA+1d%$&17xzE*wl@s{sR~>@trQnyH_`P`DNtzmUp^esU2q z$So>khIgN0@2)h5T1espW-@qRFYi5>c3MeO%OmK25YBQwan`{?S}ftC#KmpYJ(%F( zVsvqP?BQ_oIK!KTSLRrdTzdp-NC(|U-SUe9~HW)eS+0^W;Zg;KSXIP9G} z`#SEovUTORYTn6U(O&~Ig6H+d?e{{Th_^7Q_J9l)<9lRq_H$Ym7SmBRHQs}G=R;$r z%Fy;z!A8&HdBcZWMR+*l7I`|*^fbM3eTJ!Ws;PNKcT0L^X2S>q;7tTV+=#^>-l z5}XNs=7TPt(Hku(?_|a`21%gK6GFgVne5&YOOIDe%rsI{U4%%RKD*vIBQ)=3%^ES( zcH8}`o5aAfpE8sgOFuDa?z?(mvYSi+v9MAf&|R<~`py(r#|d(GU-#(Q|1+w2Bq(DuAjdCai*`$>3O2QR;W zZCgrS?(Q(8)!kJW+n4bgQ@`~3c#7cRSnhJ6TYH8LD^KxSj$!gT!!nqwS=gVW>>u(2T6xP zu+jAJ+}@6lCuH1tD+b}fTI9!fS!T<3Xphe=6{9cMuI-V=SEtRUQ+TcOuKL26$}8tt zOOFkIosQJ>2sN#GjT%tS*wl#f!ZO(H4E}l&Xp0xd+X1kz&d2Fwv*EK@wBK!iqQ%C9}^D`_4En6L+^AYx3a@$_XTN!H2mB>1nr7UfG*ydk^_S#t&x2BflH{$P}M21;PRv;^31KMv;PLiM; zKP8JfZMSDQGckxZQMiVGCBnrTZ6>>;x=?qU=$duSaXm*DUpTC(!;E`rJLamwS$o+$ z>e1otdri1tXi(kk{j?L)<4JbzzN_Ea*=c^FU;;L=efg%RAsQAZuQ}>&a-KpNEQ|5j5-@Zy71>k5 z^Rz|u{PDl?5nd%Vc8+l4npv~?7^9QpO^%W36~v#>b(9=)Cu**XZd6drPj(<_7{v~} zU?TX`^ML;JX2uAAc4y_{#O&kd37_Tqu4Y2*&TIP6U(%u^V{Sz72dG>Q?|YLkH((G5e0o7 zVO4Jok(2b@vcRb60jkJ1<+|jW<9-FV^JbLr_oEBND`;#Sd7I&byK-Gc9TW?0?(%W> zZjI~0qX+VTMWLzj^9h>Hfb^RBY`~G(4RddLs&*a>3i}WK!i~oaD>VP#|MyQ%hkvtl zcs_p}9X^N+3-5Enf8!EwV!q8{tD!vw0o%d001_^PL&M<60PrlIk`E$@I9~09lakXC znhwT^EMqrowOhI)G*dUh^@`Nztj zJmY_}%iy>$I4ls?iD>wK?iIojV7HJM2|p@ckqKJM6Csm0giv|xGu zHIk3*{%Clt?+XGK-;~DI*_3X1JCRriT8AxvlNwxXVvwmh&&ufnrWzW+DH+2N3J2fn z=BAmDdJ^r}mwOb14Ov65#0e6&M{fYEcl@Kr)dcZ`8V18L=0)SF*{eJJE|9>EAuq3P z@oxwus~1?Z=4uvBhWCN5vwHm74i&XKZZXjZ&!R-l{F~5RWH#m5I zG1zcDW*c$mrj^1*rpSUaz$LIoVO51qd|7l@{H-d$OJ&ZsW%f1&!5N<;h%QflcLlel z4*;2!YfMjJiti!zHu-(Yx&;y&uki(ImaJ!L_fWeazvJigO{n1W8H_KdvO z;XUwpU-+KSYlNAWA0KY%z{5`DaeaAzS_>FG1zMiKG*Q9Oev^n?LYbUxf{?nOWRMZV z@6Fvsc4PS~&T?o-fX5L$WH=8*<&B51lalvGxd2#Awkc%Rwnz9LLrGbgO-?+Uh~#4& zn*u#ctQODeX+TIwNmS_A(w^*8cE0C9jqHUWppVDxTgM<;0VB%T0bo#w$uRIj4 z4m3w~@|@W#QeNElJIc2u#b@AuMh8KyBv5S(h_O7STC~P zJ=Q7UI`e{ag7Xm1(9%^nMwAuXwz#H}xW<<7jD_Nka{x}8?#*7K+p~6e#86Z-0!gG@m_|r{&OC#LtJXec_;7R zCVzVC6AxzOZ^`jVOh6q9WtN|qLH1LFd;=fHx>3V^x-ro0kKt~`b{qUHwDw+|uve~O z@H|yKPhWAw61I?VKZkmO(ISpJYN3_I4EqLp6wTbAd>Jh0P|2Eq9iEfs2=zX18|hUgx2I%E79VAgB+^VM9#)#hBp`tvm&;(u7h=t@(tJM9f}^PQfTGz(5X3 zP)79vTTE29h$WlmB0hvfQRz2`fNSw9sJ~hNzt0#8^+3&;q z!g(NPVYt5_X*aCZch8VDLVWeYO#QAO*F=T@0J} z{fU3%-iXN_zhGVD?_IgW*};4EDSXX^C1=UAhp)k(E4g2WJD!Uq%%ezQLUS@#pcO(_ zYO6Y&!@WRXe+r3Osh+6`ZEabG{eldFiqTo!B6nwha^nol$0-n81_2Afz~8MMRtgfO zakhcHwnJA3FIlq_O$Id`>wz|m#3E@&Iux-+7RQ`jIfCa&W>+V2Qam-3pN1nIfPY-f z^E7vQ7QZWIdoYH>IqGf0g`2gHGlK+DAL0kfLz|`GzQyuJz*!lcvAOz?K6%1$M|^S^ z^6dS8pF88KVO@S>Xdc3_$^5RZP1fZnuYgU?5O;po^m}f|nnp8nCIq{JyLf%KyXMMe zPb3la*vQqb(1!;NGr>Yqt=Xat`3B~{LNVV!9;i?4T*sLbu>aAPqBL|h#iVW(*qsOo z?q*S3CTy%ZtZITVzT5EB(Q4=bF;`Clx-zGKS*mxCAaRNl3oThWy}=DBHrH)+SNPQ> z?4bI9^-)QR@(lSNBZDkt_OL#>;~@FP`j{kt8tw+T>XLo($xYZ#euA}dOm|o}yyu_2 z_HSR`#qh!UoHMNV=bnbhoz{HrYx7+@u)#r|li-fP(j@j8n)5OnFtF_^T--@8=~ShE z4d)B^yujJD>$l}ev74W-_pPKdmzi217kL3h5V$u3o!lBqvA1i4JQnfV7Vy0+3^Uj= z7I@p#*z;mFeoz7rKY2K*kQ`3t9E6VsQm^xHu|ZCV`x;gkz6P;E+)pBua_fVyCFs=3o9PTo*ABSA_6PG@p!yguUIzzsczy{=-OAVh(z!6Ank%wm=*EC4fD%`EG zsAXl`$uOUF<4SC!)Ut0#b2Mw8objufNWn#k1FQ&l&JQ|GtYaePqN9dRP$>4+=a1O4xcAB3k9&=A5!VnSp>*4ssV<5J+myOR zC1W^ZAb+r3{604Jt$-X8Ie{aoIhL2iLOT@3$g|B37~rd@*9megTVFc`6zh;%a8o$q z*=&b56IQC$S<*f{@-&stnHsV<+EHnrGPIcqfhP(xuJq0h zM4gzb7A0uvJT^rOsux&2H8)6jMQ;9f^6Wqfh zAQ(tlY6MOYKMsoThQGt$h~$|e6fbeVa)|Fgx#cHcNU%1;*&NP)^N91zPD#w&@VV@} zj>8>lVqM7HNzNnr+TqVos|;t|CArDJtSjye?62!j{8XKxwj=v9*Ajifr4-aC(g5|M zz%8GDW1e>NhM2-Depm_ z8^3m;%VK*qNK)E=;mq?H{%tsjm58sn%I{oTp~h29u2FbCZ&ZV)QBRUwC5GIc3i)gs z$OB?(gP%=84r-xP_3@gfAUTz%*zjXKBc5Z;ahD^Ds!8oFa$`JiU#Lra4b2AYe@rQi8 z6aU=XLq0@m%5S~-`)p&y9AfWq7N)5Cc=MY#lHUQj@#nREo^OLG>rXyO?mygn`?&&Y zKLEr3(|R8MtS8okrvu2_Vc`;nHa39NJ*XCL<)^y*k!VPOrz*|dx2Byz_XGC+u%iLY}Gb#ys$F32+s zxuhCm?MBQ)foDw;izLq8j1gRMAH-|?AG9^1@fqVg*wd28QmFq)-ffH*i9sXPWsyuE z??ybv^WdMNj*o-Mh8P* zyykFD2P}r1Q>KK!6yi6DXaCHJ>>H|m7rz6vYREmqvEfGBiV<=o2eJ2G@+XbL>*3db zCvO3mX#2Ul@HLpM!pHHvIeYgc`atX`w2%*xpQlSUVj=b^@+b0|NY3%|&NGk1T*M;a zqvQ;tzOt(!etz>O{O%|j!V}JYh`kED3gtQC21)Lbci~PP&iv<2B>PAvFw42ZK7yJB z_K}^LWFO6bpW`PU5{zl~n>)4Y1ho-=!3sX{kmL=Y=iCqbgv8j---tQ!fH(aWBYQch z{m*+hWg!XSKuhk01{}c%@2Ohh&cj_nu-3Wi=0{iqR2t5Y$hP)bCGVR`z*>)WScQF_ zRmzrREpWHs`)HCEzzymCHk=Be5vjd#FE-SMD7XP@vV{g~+~973Nox+OE_ptGxN|;d zZ@?3-h-=!M2v4))Y9a+liH5N^FMg(zr_i1?!PrrkJn<+{2lToeC<$M ze(Lg{*zl>JeldKPbtpV*5{oOGZyR%qIES|V8xydMiNC<9_*{eXDV{0z zkcojh_vE8->L5MUV$1mK~J7A-qyV#8g{p5@14eo$t~qc~2hxckBO(2M!|-u<4Zp-%haF8?V9c1f=bcOW@Gz+CX!Lq7X` z9lKsT&Qlc~)I;b~u&~$@Z5MkS?>(#Fwu-AX31xWrL~J9e$e$+byqf5CW0D-7L+6B? z4~`ch;fpOuV~&{IiAgN+t%Qqsf<-UVfhqdT2Rv;q>oZHY7E)FvK|m)ucZ1 zwcj&#DO(5o6R<0}r;$rl%HF5?+3pG2*Gju$EQ-kCx8jcwT2JspsZ^3JkJm^gW z-t_{iq4%f_1jIY?(8$l@d56R+vjn9ac$&6fkLfAT<9iy0p$Quzi0gqPXBqY!@Wpy zZtLCbNe?g6J^bfqY5a5g`9vI6YmGaR9m&71S7&_=?^YL_GE?26S!Z_^vHiAUMPA8 zH43{sQmAeJ``ske7qtP#ig3WQ`*lj{VC2JE`jmCT$!LK0^)#NIfo#TReHO$!_C5=L zN?g94=%I)4z}ozc<9~Y}!@7KXWZ%9s!34>Fe{-`>zV^*uvrr)M_>2D!IKih+_QkSs zACeponD&hHPKNxC-~=H9tt%Ri&_|PXv`pc-_sJ12{6(AIu3!rw4?wT%tmVK^l2}77 ze{)h$jx(Fmc!pfxXQ7cq^sO4RywS*ijUjsyD5w*$xzxSF`Y{5bVUoIqeg0#g@{PDI zbuLcNnDHJmy8fafQGUwvKe zL`Y(N&Z`R9Ef!=V+-q;pB}ji@73Z=0Ij@8df}Ra9K*CY@gCP$2-VcVD|NNVOtoIMr z^^e^0+j~VU{q02oxA|G;|59K6#D%|ba+f>@#O6;9%H#D1ZpfEE@J~$gPp-i~^k%*7 zn3Cg;4DxS;Rn&!PvgTm*j5>;dFtFfDf<1@6BEh$k1bJq?k%Wn)^0=G+t^*||1RM50 z)Pa&B`Suu?0PZVbn!YA^=*g&mExY}lLT|6R zq;a*bG1#0zVOv`OcJ_Q;*k+2H7H=^n{1oJ(zjX})6U>{zQNX?Sn+MigVK`?nSw-we z9%Pu@5&RC^d+3)DTpbo*GApShKUAb|y)eWi@@zh5=O51+eN^jm2B@=Ds3TP_HHBfO}wcPU@qni$!m=LW36Eh zgLp*hYxu0zx{$35#5)QqZ-OECb6tQPBpVxhzFJCoAqx&!Z&h_ret~;Y|N~B;lDq5;ZHyQTUY*d2d1QFD{({o`u449 z9tA8MTe111>F zPwx$w1bIHNO>y{|69=5S*ehkTMSqIt%A2G+=I3?6DVmadWH97Zfm`2u~<_YHej~Mq;A7gz9hDGAi=b3>=)$GHKm%3*M^8o*vi%}JF zF`MLKf=D5bc2olmx)V;&3-OzW6llA4{wItJ0{6)Is%Ex-+1?PgVqFF=g+;zi0txwk z+_53)2WfT&k+D^jl`Z0txD%7B)dEw%*O2}kiB_8x z2b?N6^{t_4;YQe#E(IJ7dkJ=c$cd0!o8b77*hrDOTIM#>2R{F)DQpdZQvJPemOa-KXLQI z=EzydT7K_Di%g_r&;1+b^zSwJu!ds(;dlJj8vQpn{@k_0&lxcG)^;V2^sdM~OnP}A zymy0p%4szNt{@)n4HEk#-eWTPzOb=QE^sqJwVUleT^SlHB3Cte1LsiW7&H-v`_I(# zN=Hn8v^2*ibsfP^B*~#X?1z+%T3|f5*WoJgQ|Bu@gd%;$MxP(`Cjto03b9?_Z90%M zGl16KRZ0lfbbQ71VU6SP`@B!lLalFj1TX;CBQ=9cp`E zW&f!2eZ05xNJq^psa2`t^@d&|e&^5q9*Z)6slyTq@2MZHn+7!_v(tEbq^-fJY*`1t zC+Z{u>f$I89F<@y6T%1Jz|RB53G5#6zWm1fpP2K>eX+Y0cmP~ z3VR^LJv%(#wOYp801D}N4aBB!4IRw{mXimiC7h>%1Z@a=MOn}wp%}i$w>=M~JbB(y zwA)}lh|fvy4CxHjoRt!Nk`HEv?^AJs)7%T3UW!j)psw_R<8$m~Vl9k0lA>Chi{GH^X!95)wCfPFf{wGy=v08DzG` z+adTfu)Cq(607X%jeVNvxTlaa#X52e5(hCI19{arMjvbmSt(&Zf!@-c{I1DCUDHoMCEo=X`(C^$1Fh;2|@kZXkps!H$#zM0Fi zVD1T*6Kn6eJf$Tx#(w((Jp|~+kh>gww&9G};FoNV*W)Q6{Y0EQ(r+X<>F3-D_HKlc zVLU(cXgKYP$HXGyE$+8wL;AdbczqeT8F`jZKDzJrGGVfdgy#xg5~({_^da}Xj%Pvo zTV%if#7C?puoRmG$LQOWB6qhLeB&wMAxyq_nNzh@yg`CSX;9((gz5{@2; z5y&OMY1KCqP}-yr{DT4a0GB`%&?Iue9pc&1`O4Xc6@&e_4H(r8{FrNhu=l~&kQ0bF zzj0q;zYC$D6slRwDYl5NvLcIE`#`)&ZU+(AU+=E8@#Y+CJg}=4#a4$el`ir0Z)JY#6 z?Z5{`yoC*n!WPDGe}IF3g6Akm8M{+G2(FTzQv#VrjA7YXePZpSWC}Od%o3=5Q2zMg z16!cr@(H{xhg@`4R2zpqg44!j&?}ufaQ3#r2jK{B0Kr1SkU$SrfguK~Pu#^b1I%o$ z;3hDUXq6N$0zBE22`+9AcWtxPDZ7UJH}F`3!@zXdmM_pBWc7r9^s}P&Oz>*Y)0@?<@kQ+g>X5{9Ayu@T%O(86rqj-mwt~Y?4IFX zm|`dA$l{CP9HB4u!(SYFIKQ~sPY?bFQ~Q(S{^lwF(%a|mT?o6O5O0p~YHCl-<0qyS zZN6uKCo}er2+KFNMlduvpP!{vdg!U?DWQdXgvymX;lh@Gxq?sV5UDG?+oxQl9Gzuc z(_b6L>4p)~F+u^A6p&_wf{KEG3P_AlKtO8JV?R11EjlewkVcx(p!6REl-Q^ZX&A9! z+vDeXb?!Ik?YYmn?sHw=*s1SdSt!N#@udqk{ZyEfHb<>_EvI zs4u-|^3s2O2ze2W!H`R05(=EjO6jc+wS@|SUeLQm6W{S(KqTYWuH$02x*_kX%=k?F z%nDn}7JWu`BA-+AE@!Rtrvx>|11P7yoC`#hF#F}3yrGSU`(3RKbSnw( zfv$e|WOo61Ba_-+2swAJGD|$cyJ(i^A5Y5+3>h>qq4ZBj2DZr|6EFwbHOE@$mu$56 zfh3E}wKw@)UE8)6CX4)a%mJB;jsb~g-q;WOHl(l9hMyK5;J-9X)w@L9x?(|J#_DbQ za{7Gvy1*0vr$XR}4q@CSW;N}Fj~^Td7qhed;H(; zxvpEt)~Xid*?0TnldQm)n%(>SGgl|?uqgjzY-b8)LG`M;g@U&q>pdh~d|1C)tu>68ml^Fflq&#)<8Ge z@&_z+_6~ehew3I{6!xH^k1={Z;r=s5)YmCmhv*9@D&~bZ`^Blsi-`xsVSsc=d!Zca z8HhpEtv3tah=>GcH%E}v!3&XgoWKsEfQANrj z%MIUW$y*?DO?5Ph9bc?CSR{jwDptGd3hImgEP* zWIOimitnvWRER|p#lwMOV{72YM zx}tz4&QsiQY)sa)95G89w&f`vjU`w2HBV%f0+cx&JBD)UhO4#U^deCBn@ zqF`aL;0t2W=`1tPX}g9P0_3Ku@9Y5Tw=yY1QvPd?%0=`}V}pO8nF+=`nl2+B`= z+iUuPNAOle>9f01AT!3=9x|PBu4iw|L}IJb%+!pws}l?%78sHDC7seA9P1YQICKr}|zJ3rN4i&=`v zQF)Y@)oRtT4g{3@tg*)>eee*?*51^^k6R>nru|O%_#?X6;PyS)koQ-AUl^ADVR|Zj zt$oW^*DK;hCEa>mWdYCORo-uSVJd0l#?AQ{CS$=v-%b9 zYbCXgP9VI;zGN>b2Q*X16Od>85$R7lU(r*YA~_<%!jyo>u>%@bp0w-%Qk;U)0q=z( z6EJjGbUHp!3*#K{BarjrY&3seE`7Jcx86RQ4$Ts@7SG}Z6lo)rZ;#`zrDoQU_T+HX zW!ermYsjC?X6`B>Yre@LIwei?epcj@9!22N=C}*sI?V}-3&R`~4lWZ%-@IPr6k-a` z#6WPJQouNzO8)YLs;o+Yl73MIaAiujRjymVq2OT5*{AvoH!=lY1~FJl);V?Nt+G_9jGhfx($3bYAn=}Z|>8k)Y;Fp*{B<;ib_ERRaOH3CRIY8H>I&Qhu8lm4@|?`MwV-12WFj7G;2l1qKJou2ZQ5;TS^67pLZ>@=O>H zeiglYXaMnWm{Ijp-H8+q+k)*MpZxdkPGQ4jwvAs&W={Sx zJ@BUYi+_^U=EHYM+PbIu8I$GM$s)%r%1ZHQrJw6@>@sHxjvAIxf;$mBTVt*PBpcki zG6`(GCZ;u*)Eu%)0rGr0lc^hWIx^9^msGf-OTww4Zg*i8YGUTI&@@-qWSNMJmIs%T zh)%vEzD0@LXN-QEuY;xFuK(%&qyL%lx&8A2i{qoB0xbB`)sJmFk{v9Jn1p&u3fFdL zxH(vwxjz-UUF+k69Si&RvKs-JgFUbOUvKMtQ!#$D*h7uG`)c65vys62SR5MW)!{oBY2$*w!faIvAU>SE0|CkfuVkaU7|wD*Cj+{7o( zL&r&UV9IjbzA}q!I#!ze3rg*iFX{Ci z4E8GmdlKT`KuDHi&vuO;e(qbc*O;#<38o&rt@ySfd-c_a?m8EXY}=UO{S}Kii@R=r z@Of^AS|#5(6%zrMTW{0SulC+ZTUSJO9$x&T1#f4KriHbPuEaWBpr`tn9b60(c({`| z6Q`)6Z4KfgkW`>YHH0ky)x`B1wt&*7yurS1BELbxy?e|X5(By=LKh%w)LP{)l{;C* zm0!IiX3WB=$ZU@eV z&1iFP(rQ|Qg>-Gz8;d}D-aH+F{;$K*thS=|;RG~FY^QIZ+yON2ckE=@c50n5D-)*N z|EWqVt$@maa>-f}=zK#7u{xu1{4x8$ll61*>yMw7C5C(-)Ypiwe+{XML50TwFJ7M0 zuilh2a#!MulviI2yQ2PU?AE3xx3WdN(@IwY%TDo=X$tL9txsk^DwK#6E_xtJF zybVfXX7^_21;c>WrrrXjHv7_?-UWK9sp1D(D(LQxtp>YNLHyyn40OE6kd7Gfj z*f+L@%USKKSQ##mTHW`ysYsNG-?R5}piZU(LMgv17ZCNp=67hwk3YOj2{*@{+0UeG z0)LTZ{@<7_rowT;>ee=1;7C)kCp0@p?D_9cx!KdqQZ_I6Y>euJ)h~`u! zC7y_{-aZ{E@BN30%ZYCPCv`?p*V)OVuI^yz)7Ozyx&8AE9;5IWGjXQTU&V&*v?Pe> z{^VS#55VhU3nvb3y1GK}5gyqQ)onUk?s=a3-Y+S)JW6T__t*ZD2#c$msb?Z)D+RRk z@mfTsABxL(mar6ObKSnX$tT3yKZ)=*9=gX%PnM)NelhPO7@h5Pw~dmbtODt;>cM5B zS)>e`-Bh_d-c5>PDiU>s9}LjTJ3q0AJvcr$&=Xc~=!z=#?;uS1;>Vl}!lt?x@cntHlPSum^EveFH{>Y% zeaf}PVAa2ZD|@0XAH|OR*Xy<<+@8HGey_Y~{7FpDVeuqXQ9~EPs@T^&XKl)V%7DE@ zKb}(DFe5$4EwMiF*bTwh+NjWWQ^q5mMG(L;AKh4I{TTMSh50lwfGv>mQqg?l0du#( zk6(PNRDq}v_vzqjR%711HoHKy?c8j>k|6av_3Z;0(n-3ju$^IY_PEW}WvSeHb^V3I zEdR=X%^kZ^m~8@~x$xEB;1sJzrY|R@uQAjRNz2lU3rO!$B+-I8%#XmHxNK^S0(4O& zm$WpH4M0f*oO9ST@l>2BN@y!X5AJE|aMLp0*^rG0lJ;^Bz zpirnY?v>Bh58YL|hP{N|Iqj7!83{kbyULR}CZGZ)xp9=axIzXuJNLr1zbyFPQ$Y8RXg>VLj@YxxogC$NcwhpXS8IIsQ-jY_g~>?4e`3ZiwP zCp|YhpyU*){^!_#Ft&C?v1l=E08cM}#^0ukCVh9(P>;UTVQj-+^U?b-0og_^cHi!> z+~nFf&5eq#X5EL}W77*1)xIryhXoceE7Q3K5fW9|<_NV(%6#6X8d4q?`OWBtz2O~& z#>^>I4F}}CQ(5yq#+mp(75f}fJnv275nQnjI-_-s!S3^cGA$F{OM^b>nc*ja%D=x} zx!-+1oXF&BIQyN0#VfhpF&0=f=2BwC#4(FkOLZBWcDTjG2r}K7DDBhOIfS&1cK69> z4T3GMdo^7wAdgQ1p5UwsmIpcwNrhD&nM`Idc?#q%K#gh*KXdEH=;_z zO$XEE#d_A&6tgF$B4cvvSuu91t*0|tRHZ3ahT*Bm$JQBs1Nq5jZdWDNyWOA5Qr66A z?)($Ia0xO+ALId@yRMAbXCV zVbjjPqznT-ZxHjEC~^E32!ZpI=kdG8K_eMY_0F0%qAbZPlMQ=fZ?FA|eMh(d^^{&U z4yI@N?$3<{$D6%ZyQl6a#50+JaL8QIUPoH|DO$M98Q+}gE|JV|wQ8t4&(q zCE+#`As+8=`4T~#?kK%6tUp}~FB+g>)@mYu#Q+bY5yN9`( z13u5Thz=JjdELGEf-CImRQh@i>nUU5MxbAo+}h$XRN;eg%cRL7SUsd;Vwu_5(2eRT zyRHj5E;=FZF2zxge)+`Cqft7#{vo>ohj;J+@h@r@!URMV(d3`#43|u!eERBUlo~xq zL5&6wW;;>)Dp@lhy>GT5<%MP309&~*77AX&BF^lGx6rkv6X#lcG{a7T&RxVufiHu* z3Hb4DPA$bfrCaHg&GypFlQ@*cfW-iZ?hoJ<^#GCh*gLnJHsX(BtO3vF z$##b3uU2VBZ(QixMa2v6T*I++s%t&v!ZqvX@fay*4c%q?M?uHl9l{mQ^!jqgU~i}> zqOB&IFGhrxo=8O`Y}UzjF?2*n^63jZM3?0muPPhL^Lz|H_ls}8Tx}Ja*3*c#|5OA=A&H$Z^eAL%gJ0Ra*QDG%8wZkGlbop(UzD^CTHe z@D?ulqXKjLB(?7vj2695`XW0FMzjcns~2IR-~TrJon3gSnS#a4*THjP@0%Rf+#Kmyw{ZJ_0+No9{KE9P>vTL80pt2ug|x zQhNlsHwEVE2NnM@xq3En_jl-c99@R*-U?=JB+6tj@ScD+G#I#-Inl4X${2xg`*vKr zz3l?YYGkX;h#9)v_O!rr!OI41!EUvt<~cw5n=U|m zb95{?qX%c?Xa-<2pVHdA(oRc^JMFr+N`yh|d4hCPwkIzWevp_faAvxzmCV=HmrEc& zQTnvka#X#RxmpI1OG%KzkEl-4(n8X zr!o2a*M*uzgKiyoHqkxSO9v^beQ9^e{gQJnu@GJ2Z-w7GZewvObhFnP{+7H2t-Lk6J?-8o(W#5J6D4bpacMeH%1L>!B$} z+xX_k5}gO_5#PGUWSvYtm_bek_U}0SdOUx28-~kg%9W*gG1KH5E*{RJocnTc$4jdj zrY7C?6UYu?-kZ9sD)&c--r%NNyMfdM?$)tN#ZM88_iz#MmoX8kXTD*nNBG80$&bP7 z?XM`D*LboJWlFI6{k4#WBJh&=I{RUEB7!>tL)ga^22eiVdZG6QSuA%FEm8NZdqEuk z^=sJIkDe_s=5IYJH^e($y}Q<%&+hvYVNg|z1VZCK>>q$F(i)Q6r&#Zc$~^C<;l+H; zg2ws}*}nJtkTfqxI>Q+*gWQOjAw#8_$z7a{l8aWH|Dhg_tYppVY=VtxM333EzhQL~ z2GibtA80r?k4yNH34Nd$A}*y{Nmp+FCNl5IqqHUV`iR_0p9leYv~4t9*Gg}Z*# zKmLh!e3*M8G-DyJ+tdTyGUXi~=}KrWAoub9RFgkf z(*tQq$;Cn%qmR7&+$u8ENQ3zye3$B6A#7uOG+B&CCZaixE&Wl)Zd3`~5TzHf%tA1* ztLxF|LeSX?<#&}^UT2Y;=BScA72iyEN7DHEahCF=bV|!zxN3di-YX!?6EO1&9dR5|N=3R!c-NbA4&S*=!#c0d*W+3(26KSsHeG$n}TRE6I)vJ19q z`wj~?mC$FZjT_43M6lx^3Pu|wvv{wm>}G7P5v>JVof3LbQZWdyBi$uE;TS$2TU%yO z{R4W*q&@5s-70u`UumnD>BVz?zOePYp@*osPn>OSymg;yhxEdIAIWWk!isCfGh#_M z&gqI{`w3oh#T;=mNqTd`P9`(u+-I(Wa5`uBvN6w&RNw6L*@5%7GC{^L$MXWvL8#w; zj>IXdDR=)63R?xL6!9C^sA+YmU_JCIKpgbXa8kj&KcsX@W-7&w^AGkpK5-Bae^7Oy zi;*chT0-pTGeICCJ4vX;zu%21$aGbdIBYfr;yCQ2z3LHdvgO{z`Cu5B@HM359Y29Vj^A&g>!m$fjLj@Bwfqcl!l`OwUP zJ5pJiI36n0SH{nezjcBth_dvK4zDW)ZF8f*^p6dT1hCKzU&dF@a_W5lvZ3TSMLOhj z3}I-G3hAHkDl$JP9c=^v8z!}h;sja^SHog%=DAC70TAy2lLhRW0z;cNyI2ezXPJFB zFy7sIhpvX()7dl3vs2^c#c&*VBR-)#+CobdH z+%N-HRuKV?gV+3@hR?2voYp=kGoJ$sn$(wV3yz+7b03m7%tv*Hk;at_#$RES15L{f zOT`3e-86V@<5X1i{FhDc0$sTMdM(1|3Z;K=F>nx99D5bAu z^C)RuxAjDy!tSH^&keNl;8`-wF-UzrWIW|g+a#^ouz5SgT zK$1L7;3c?~(j-H!hWmH%oaT8{^-Y(7B{FC6OdnfHZ)>fJ#KZPaFT6_VOd|eKeqU9) zp4!;X=9tsLWznskj?(Ah+-B6DD8z{i8h7yPeXft8S0z}13f1`lvsf>%>}|;Oo-(*` ztz9$l+dPNfjZHD2{FbSIrhvL7&8V`Zp~QGh4~=DZd2)XAp$DoldVgsD(`93y`4HE8 zZ#gHsgil-9`}wr`CjIlnS#y&-kSvq;RY5U3432_ygC*gclp*IA#kLgq9uNTfevS%$ zxNbu6;gYR|sWL7XaXAI}uG=%W&~d$eseGL|oOPLQ-*N(QUC~XQ`BO3Q5*3htZKaI$qPHvEbnt~`4ouf6^iW~t+tlcH`-;r} z%x6*xTlMdMF^wEO{@H3k%u!+rYd{81Q~Nj12Q|pO7C_)JDxBnrw7m}9V^vXu?LD8c zwqSCWUljms|GUdf{54j{{I2elg1I%0|I}(hf0y9~50t!Gz#ROa6Wpy%bNK#m>pSbl zF_n|citW7`rKXx|^8ZMb9F*))xcGYn-T|ef+`^75`O(1?IbZ(IhlJ9>IZhLM@+c-8 z?ootWXUf?t%8P%XK4&%X{+d9_+%kG1m^9>1j|KjH%fj0a^(^Jj1#q<)jw^ftopV00 zf7CV1?6V#`1yYt$VU*IUPq|hdp)K>N^Wjmf4I90WUaeA79#~t8#3l`6?EE z{h#I{dS^7fVqx7io}yfPCOYe9OC!suTApA=-H1YqbUR7e(w|>Eoo|SJ#OQc|C?xvC8_7SD)UF?Lf;z>;f+oo#gI^R(#l#Ul%Un+iF_0OdB4kE5@9%H(3jbDk6E1e6Y5a=U+O%Oi}@ zpg?5LLEFA0My9seW~yXO1|_a{4LGu5ttFMTa>4Fll{sVxeAAST^wVaEHPqoqXcS4D zeKiuTroIM!<(Ghd3K2nCXoFh-ifWKeJJE`1P$p=2%!_*DF zGjn&&eO1^ikQM5F3inA+@)O`G{j;7-5!I56@V=a(pkkR;wVK5X(g&q|Pi0IeqVv9f zV)*Qyw-Z#c8x}Ucqe9HKg_L&k4y$W!ujlXlnm(rcM@BSy@VWj$G8p`Y9JEqbri-p^z&Aau3+lt&b2Jdn z^RbcNCiOrGMjWfR~=GsvVcj%>qURbQF#7U(j!lw)01>mWbeaQ*~4S8 zJq$xKofeeG8=Qc^>z?p2*cKGlW4U;ew7i3|dj&5>VpZn>>}r3e;^2~Z{PI2hIfuGK zLHwH)H^jRhmp7&EFtqbzQh|&1lV;|D2+Wn}4;9S;9W+%3G?q%njT`owI0KJ zHj?{BsBvlr6Ytc@c_6@wBw}!F|2t9lGacq62LP5(c2{>&DTO`=tmbbYngFJ%ihi?} z(0P^?kqa+uPgg1}-PjGbWe*-5hOuPI9KXIHs5hXk4EgR!<~z1^iQ-Uhq0yrQm2x9` zCdc|a!mecbzm`H}ipi7ug%X$EWmzTHai=VVBqE_I@7!@5YrK&MD`#f=+VaV1WkBg4 z;87GEQaUj5ldo>T6_o6HRxOnVI=D&>M;(7?&|n{l+I|I~w*ZnkIi{ST%Y4rM;WU+m z4+Th=7%E>P9(?4Wb)DYL#VsYbQGfWfiWGE1QIAH=Ze=?1%auvaUOj;^612YqmpeT0 zO8@9%(30+_pW=_^Gh>o67iq^)?p**^*(+j{M8on>xYvz3uae%DPvgyk3B+&uI39o6 zM{0p`!(s>VUh0TOZ;txr<+qwGX6`>kqmlVhQQs~qE6bclqsz!ga}0iegl5L7NCCyc z-wmP}DgECdxZhBrYnzf(?;7O{G&KDaRJ%o81 z-z<~Oor0j~l;3H2+PD66nBD&^l$%&mClDsoOmoS}ZKSgLe3!QyMNj;d>^V0~zPE=Q zhJ<_vqN-lVpw-EwJbLCpZ&6z6wdsQK|*U0vgXLUkNm!2d3M_^`NvtxnBdzxqm`_W|&_Az-!;Ts+6e4{r#m2GizbED}LV)Ebn9w~jjg%XYqu_5V5urC>D*qcI^j|{r>$eY;L943 zu*%1t&Owg*&1YnyXt@Mpf?w#$4{3w*?*DXz71l14q%s61b-wm)0leP?FdW+$OfqoB zJu2s#?CvG{yvx7Q-Lxg5gXsB07~X-NoDrV3QB#enAIMrHP}uiS6>QvB8QRXI?=QK?9DE#Ovy( zX`9i>1ShB0&61+}R?}AFn@Flo^L0-3Mg-c2Ly={@u&mWPUcPLlVcaP1(tkIyiXXfT zzzX@E(R7KfhI&V-um@ne!tQ!)2g0g%vtD$Fnt0JXQK}&VIoeu1yB?RTGi@IIC-kyA zNH`>;@RY{sLQs<-xcDR%+U;bb^8}-Pbtf=4{IjJ>u*5V zwg~LinP=3G| z07&dTj5T#+r(HH2YNIn&(q)cQ!!N1@qaYO3g1DF*8We`GJ0k_A}&ykX^8k zz%%~eOfF*-^zwao-{ni<#YWB3%6Icl9qVf~Gf)>+tF zTyl{tOAY}k1NNzY7-8drAQho^BEP27zTBu~{}+H!gJ;oOE^M@X4%+#5DIp@4e4@02 zfi0^E92SiD4lP_)cJkiH#BXM{Ak682kJkmQ_2Tv01InjE)g;VBoVDaFn@OS^i_Xfq4ZS**HdX&u6ks0ACJT1S0 z8k>`5?CyT2*wGfO8ruv;D?3)y$3EH!Yo*P6^+nBqjrE=j)86Cd*{?1NFSKZ7AafNJ zmnFGknG1?8XREP&H?hjV2i!j&B=D%{pzGgrc8lUH+i7lcGwpsKT6P(9-=dAK*9+?f z4vQ?H;zhr+pUw)9<^uamwy`K6xDm3}4pC86I_c%h(x!oUR&7EL>Ga+YDR%29mFqHi zt6X(rHcM^r1S<=A$#tMQBsp(_;PLBJ3r$@j9NgObMceu{vdOfI>BPbRI=X*R zp_+$zZgPA5)l-7V&ID0*P?8e{F&OnDBFSS@gWVI9(KpRt%NfY29{}{*H24qOLjF2T z6UN?Mz_Yj5+W(Ylrrp&l`ZVd}8!*SV@lx+XOUeOL(PMPe{rv!@*pr?IC7dUa?&}&k zb_pR&L3Ry-!yiip3O~rw^u{Ui;j$U)6j8AtJ<=8V_;J?Mx50Defizv7&RQ6J($lpx zW+bhB);_XkTkf+Bngx)QOj(^}T zX)Qdn=!Q0_Z&}$i4EzS?5>tW8f#%7^Q))tSi#Hb1+!bN($88QHt$8nW)YwOP=4|gY` z_8o2yPfc!){3o*fA8G7*1B}9X@gTie{bpX(`yV~QZ&G;KH!RZ2KtGb=h(Y^0B`>zd zy-vd|yob;$xZn>?ww93*B7YVl62^5WU4<;ypL{EK10SS)>1)59Ph&-yGAHik*w3ag z1%~n)fL|vx4{%rmWGvA8y3KFvTq=Rgc~mTL0GP%0?lZMy&M2ileWjcrpnj~- zYBY0Y{)Nx0-{H(yULfv3m2O0Qn;$qT!V#J43nJ7s+#JvMz(@hePM zU8kJs8%dZPN%QcjYHR~VF1X39uJ$mGcAsI!LKZ!ixtMZRDG-1cPElw2QgkY~p zNMCD^P&;L}8xJ5?z+!d(#VJUx;S{-SB1Q*k7e!B_FTiWcU7{M5t?d;|KkC=|qf70c z&7Ub)2H*`c?>RmCabaBsd%{wJZSKya&&kA20aE#%{IA@2SWJIK%y636xRSluG)-MCE`sRdtD5+n-2f}Hv%xh`-b15 zt$zqUOoD@7KWwp&doF@bIRx=rx?J)67czs_40ZD82XMN;`T3icKZ$mPs?pjjO0hwU z03G#LDUyBIR`Q;O@drQ3fA#>$lZiNP4p~F%Dz8X)og0hgy_rAv^Lc8eQ~{VYRp(a;JQ6u5HR|M$VV zt#-T}(df-rh0*Xa+)>P!e(siZz*Ft2&q-ohJP-c127HgUV)4u`X}=lRksq`!g`1S7AD(YUq?FX=3 zcJSr3jvDlr?kwYr6CCI3HZ%clvFKk4KW{!PUT$N5Dh6tK58&VAJQtfpji#T3Ypxzl?YbqmwH` za%vs6X*jky*3+a3nZC>KRR08R-1+^D->+={Aiz_KabbuQ#k@PbLhgQwfJ}^`@yAgm zhf&~3PwUg?*bXL}w^4Qmb_5)TKnlMFAK|;6^Xpt#f#2l?$(jGX8}nPbL=GT9+sc|= zk6BN*L$Ks(vgwXWE-mH^=A)nd>T}k;bMpP;O`_4mgffMd-zmyyy{Xw2iQj#}u%{yl zUgjG8u(!4L?*hc%Z<$|0>?ywPJ9A`X_X}eylT_9L65p;k3ZMVP2 zt-e!BJzd{sA+GQxq56=;;zU8sGRdt~!no4|hE@4;n(-l2A-(r&*d?Y*@Z*Bg45nGL z0z%lhO&r4Nd+rK`q&}6+(iPL&+Y)d|%i*V^P?w!jsa8t|ux~RR%m{?v^HVa1Owr{k zwLr<0H>t$(I+ti%N!fk%S_|D~HDb#{TE@#8+V6FL$G%*(gm(LCRBkX-NDtvU3FGIE zt&+#yjb9uL$s1=H(F|m0)MvE{99EKplyp6xR0C^~+DMP|4D77j#}Bm-N<#+g){V|?A658zcb9$^G2 z^V?dBe5J;+t-Q{~8&7O(O8;E$jdHe@4e7?Y*hTBQs#k|wpQ4$k-} zek8XfECqIV@MH!5cP`MDneVOwNFw7ywl>7J)-aAAJ09mfgyD&lRam zL{U^$5WzbRQ?{7XhJ3l$md}RZx&W$|ZEYk4p??O|v+39;3Ew%vowZW;84B|DA$k8& z^!9Y|N@ayZ;`k4+n*oUH^Uv^nd)8XT8t1vQaslXut>fDgStO zsHYs(^}hVx49)$cWU+nrArrBrkS%yePvABdubw{nCNC2j;7HN7K;t(e;U$(b6H%Ga zKu59!wygbSB$a$#SN(+R8S~Hn)3Bn^?IAtCU;tr=GE6+0YkBK!arTj8etfLEcnYjK zgof#kDpy{?3J=#3KKOMa4k5G0bqeY>gs)Zw|Mt|4aZk@nhu+kL_T7AXf<+ zjalX=22dESMj`~L!|k^{02 z^{aKfz?<|dVd-oC=98HdyQ1(+A1|mMQ361>9KqmzQ0}V%juS~c$>ECVL*3aQG?LIX zZ20q4I-a?-rcjAUMPx7hw$yD-z7Jo9fKh0y)&!Ka-*T=@%rj28st2mtpt&ZgyIAid z<$_Vwo|qOIG1OtOK8bon3$)|`j-)69rfdq5SlUE4!$ba=!PFO?q2?z5BSklJ`Srj;;MGQ7y5E&(XQ#BF7|sb z=dPveUVQiI2C@rCbFFp55)u;qNrdEWwZLyftwq$4V7EkC35;jL77&u?X)_L!zq;Y~ z87r3Cc7GWdgIRwoQ?i-o!}$D7wA*SISj(+9-s^jVC$zo?YLox8?nhij%-uM{>MLS= z&hIsUuKtPRA7pDE`g!%HA$Vj{|Mq-Og7OA#GETt$X4RS3Xq z`fe;lY*^S0*B|1iuC@hAfR_=gVp_UxL)WQ=?s8rL!G+^IT)#P7>5p{R(@%o6 zPjekGfy|($yHv6sME-nmXKw=b_Vr&NcR^tz7r+fGJ5=&YQk3#S5*; zdRP1DX)4_`JB!@M2>SrIU}1K^XS)Wb-N=az+nQ*5fS|qX;v|S9%_6~A(@cVv?iEf< z;=W${`4F&;a0zXpob1Hp-(cc+ey;o{U~rYX6{q4+j140#Oa%J(({@*mAFA$)eX2Ku z54k1`hUYd@aN*74L@W-Y-;X5g!S_Y>1ieHZ8*VO(<=xnc_c<&YOU~FE$5kfGPmab6 z5r<%kd4=vHfZp8TqPIoob<{*?hdONGykrRkaC3Z`qR&gzE`Eb(TA9F;$cu-C4E?1@ z((RFu*z3Z%HuKJebpW7tWP2o=)xyZzY7}t#O87FN*?mLT;KIfi#$-9|eq*W3L$rk7 zie_TJ(Dy+9WV5crHKf=-gG=eX?eHigfb25r26^T_jz^q!?1i16&XVltbXRgH#z4>V zcg(3ZCbz$^HgO_~2R9brx1mPmZJtdU=5>?fQzQGE|q@WSKfp<(g*5O0b zKp>pr=g2I!_m^<;3^B`vdCXJCfw?E@E{Ig;oh+Gfc!NPo zoMXR~L;A`*p;NHmOT{kIbvbXM1TnaIUW|xF$c_)e>id!7mY7`p@f=~j2G@$57*aF& z`M7`r6zjhBN68)(@9RbP(i(sdlQqr?$YShhc;XrsHm&_xj~uKN2m7~yQTPWhRp*S@ zdHSow-Q$qFBZdX`gO@Ao*b6h85=80=e*z6K7KI}+;J67&RCR3Evy^|~o>1K18rZ-- z)&gk!5R34x8$s;`71THwrky<(L{MQApyqnd>zE13_9DD%ZyJhq+fUoZZ#(LFjvAn% z zW!7OlKz9f4yAC}Zl>#4dBcY*-oaaR)b5}fsE?Qx4NS=#)mRm-RH4rRC)ZBsK-GqKs z;s;J!{p&5R83_-n-qM4fMH`~ihPAI&Gn?GL==bluZ5`fj98Vazba_+lt+Czk1Nr7& z(usipXNZOD(&x!VzmI5l)wR+$r24Jv()TY;lL_Sd}Xr}jgXx=&rknR254(*8ru zrbU~yO8-C<;J)b;3X42YeCh$T77}i-h1usGV9pM`7{We`>SZXsnZpIn>|@s8n%m$` zx@u;L@IiVwSsS2yQMgbSP;=`A;!*^r;GA@#`Hnnv=xdR{3?NPWHB5{UNK>Ca0uz`* z(pbL|-Ge6upJom*nisuk0FX?6*-O&0#pIBfpgW2?-`8JrGH-q{1k7krGo!@717p+I zuE`O1D4tPt8T z>_h+X9763B(w!|qwdW(}@+Qo>go0UyktX&*G+QEHjwV-iLZIVA{%T0!n#028Xv~_1 z0VKbM9%6Lw8Fn8SAh}F*4PXQMs~mV``UZXq{tL(>m6;!OT0W^e)K9+X(x|=WI|-_; zx;0wEDuA|HB9>1Ku<%=5=q7(CDPrDSTn)aq{p&y|6Lq#iwCCD;&9rq3rrZ0@D;GL3 zrv^aqO26UMWC6Rv%n48f!C;X9yDu_~2^mzu|MDZb_yHvdL`xXT69bSTkzFWkqSib) z*eTj;Z0?dUj=Z8#x?fAq1>C-xk!beRk*3ekB!UYs&1=hsEZ#Xay&Tk;av!FD^+3!P zXgX7M2UGozQ-MII;qPt|g$q3UqzUYrR<&7AaBXUyLg*B(a9bgO9*ndXGXI+=1`GDa}wy#5fNBXPit!Ub%(@?Z&tL zTFg}8I>d!iNg2rfV6r1f8)>S#A6bN=#P5@wLB(>!4~L!OF>yzO=Z@6Bk?8<&v4lb^ ze|Zl$g>)b86JYw-1rU-fc&T>31t6WxCE)_WGm8{2Td^V7Fs1}WdXL2cgJbaJiC`l? zPmdyUV?ngZZ%ar|_~sBNJNSuSvpTkYeiAgl2#@thaLlpJ|>S&_uGUi;)1{=nDh`rncL>Z z^!axZ9T@vAb$c(9Erf9wGt83Ajm3kU%klO|j$qFV3o2aqlZtFYyHRTq0mv5r8B1(n zW*{w`A;3!z9o21O7%4f@aI;WjVy>2zMKV%7jzz`NBe(qEQ!aRnKs?+y9W{SZ7w38@ zv^ZxRH-Hxq93$Rr0V>u<=EPN6x^#TI#l{|6LBV4JHz$7E`VaJ2O6b&g^FBHFnN-4X z@XA0d7ABL_{fMTSdG}INk;W(MfajRhRw*0{YLoEZ(INr64iC&vh)`=m>l9=Ka$Xl* z9MjTl9WV%A^Qy^2_xl$s{CM*}082o$zi(9{i}Fd9x}dgiY;}9E75CtOyu%};a4zW$ z7piJwY9+Wbj*;9avVMIgA+9-JYA^Eb#>wjZIZ{YTP*TB9*=zB7AD(f(KfdG4-h^j- zKIIXCc`Rn@*LnN0%K;fF{`I^0V?0e8&2!Fo&1T|IMk70#Be?N)) z#9u!Q;Q*hA-`^jQ-?nny5sIJRCQ=i0pLsQX|3TlebIZhEBu z?Z5x;|G#T<@BY8n=#u_#zkaLpYWfc?I;H3TzZ2bCCw=AD|M5M4)^<~s{>Od(km~0z zh}pAS_a6*F{Ezl^`H%ip{Vz$>MgK7$Tn^3kO8t*GYSYc?e@%#;jL!R0oaGlNE`RIG zkkr9Q;&-zc=zkr~fexBwsC8yFTnhmlYh>+YCNLWcU_fsQQPy)aLs(&b6r)l;9wv|` z`sGRXWf>@5_ON7s>05`wuAW?o6A9F3xTJUCN=80>zd9zK5xa1o2;MT>t^I{2X~$IX z$NapDx*8d$ow6Ez;EP@xGL}25tGFB-yiw$s{UzoL_q^8qR~iZZdE1|_LgLI!+);xu zlGNPE_2(u=i^-fJ+2iGiSKe4rMr55zYonmBzG5tJ$i^r;cN(=u;RO*j$Rl@0r z6^m8sUiIcBo8k?qz#nIE9oEf_M=2p8g#z{leRueG~7-cOtQdIOO%ZT0KzNbaJ| zmb#4TMbx%>qjdCQaV!vQ`*iGIV)%eV> z*Ux)@$?U7k?jUuncqiVP8q+e}k=ki#KZ?nb@-Y||bWNtLndE0^g<~mNa1GaO6KuBa z+g51hB(=}O(#sbM|GKy3jj+G)`u%XpLSa$7Ud)I5Ov82-r#e1CPz3NGXkx`J&)m7$ ziEnT3*^#q*qFmVQHlHvIg5hyk9r(G@`p>ojR%0W-IqizWeuf`M%SyW>{W(`Zjp<^0+-NqVsFV zY=HN=zu$e|o9I#uK}GE0S?n**v*evk!fyFf5x$q+6Che=k6p$S>JZG!STa?&kUn{T z7k^I6MZ8&NeGwYa<1QbuNeOHA%@@=5)L*N5BdqVk>Y2i0dLbUYPo6bL>XdC<66H2- zzjnnb>T7NF$D z5iwrRz@uj8N9lFqloxUc0MQ}1Z8oA<8_TRTU_diTzjH>EKecXu z>z)10zbEBKaN6T-uP)aWs1R4n*6r4REpEdj`mA1-@IGIBWG_xu@G$|`L{%P`N8=K# zVzD()3S7fGdAlFAjr#T@9U?P-uHdA?hT+Lmw0$t%7KY#4o+N3`K)I?rEp>1G0*}?y z%FKP0#uHLK9wa8{$b|n*oDy*Vv}gPxA{ZdOoov(#l}%iQCV+d z^XyNlc&bSF2rE!oD#{c1LADIjiR@vCw)5gG`N=D&5;XT5Np$j7#1bFNJN?BoZ^TmO zhCetPm3u`~Zkx!N)YX_L@6|4tYn8io@Y~6I%-Q40J=7)}4Mjb(NWbMSv?wQ34{zPshxUgpH%{7Z|C2R!lR#mDC zzIU(QA$%x|cy1Pd3RSV07gM4lUyr3266^g{JE?V|pp;!D@o_X%sVhJ0LxUC0(HLL9 z5LNSHuOaMZ)_Kj9llZcCTly>m91JGRjv)8DOSM~_Jo-~Ynl@ly3}d7Ravs}j?FtXK z!DuX*T8Y(;SrI$??QWjY=)Lkjv6AX}xQPd7syvPcWjD@$gL#;T`vcB3S7%omN9*)X zE3QdLThGb0XR`!#^BzMpAdPxAyi%mhWJ1Ba(iOLIwzuIntPU!*6*E7xknERy>mZiL z=Um#as1nLzV}6qygRI#Q5m zDyMpXnb{wIvv_3BLz&TS#*9JBFqE@QmHHLG(rxT-Mbq*hT1cBIB~y3Bzg_y1LX`Jg zd<1xH^C*J#uBW`iQYm(U6oBmqXwd{)r)6<2f+3_+O);XhkS$-Z%qWgt(neERX6wI% z#D(U)H`YF%%BXz`haGu8tjDGFB=*^7gAL}k4aM+(aUSdX9-s|}k5X44YZM)^e@2y9 zoW^IcgT!?5o$}$;IYHrMaDP zRqKl0sVCe&?XULZ&8JuKUBJTm&Q&;o@6Tgu-iJ4_--xSy6Rw8-&XvUG*uOJjv16U% z&QYpYlXXDpH>q1#{Oa{Kv@jqS`_lwAcqfksK91cPVok+s+ zxRROndA__C_fiayWGW#X)iV%ta4d*>NU!H=J8Sf;FWOtyR~?(L-3Pxutpa7NBtM3K zEYYx4$+{j@RePqh#C35kU+f4aIoZ0~oYxaH3vzv_$NL7I(|5lUO;FK_IJ zHh)x)GRX<+B_+e{I!dQ$VW&^4ek1(PtOqA-X(}vp!cUtF{r9M>-zN=v#=i4vouYIa zEF}i0V+N@kRzWbva4EX{QI@Evhw7kzRq1m@-dB%yG&~yMeZLte*>Z}?0?EBoO0TI_ zc3!$qD!r}V<&->EsYKNG3wgJmdVnT#Q7#Q1%vekJA<=pUpbk zu9UUml%r~U2lExB^$rZEOYN1>#w8f-}9mX8pqxV6O6qR;J_*ga@g@;Lv0)e5`L zqpqWyw~KdEIp)(^HWLR4lG`p)ZTY<4&};K{Ogrb0Pd3?=5yWB+5B(A!LoL(9>T*PF zpdH7-v6sCnKHoa?fbo1mNollOnb9E+*pXy4Ea?1^s)l@bi3qu=#VWl(YprtGCVfJx zNO!MRSXf?zazgensLF^Jj{=B)PPibZVZft1p$alZ$V^9p4#TP)~NY za*}p&pLnPPhFWv69$KCT9ldE1$PVldug9rIOuf6vM|vGu&ktkQ?s_|bq4NkYUT8hL z!CFfXLBwLncWW5CY8vXv7P|30a|^*5$>5zq`a7wpRTCa#PM4q2+B+wI*i@9xwaQmH zNs@4ItN{5tZFg)!P~ux->QAY?%tSyM3%=@l-tJFvGg`ONG&o9l2;-_ImV_oWQq$*Z z3ZKccr_$|{nUts;>zANvSw13YBKoax;)Mu{ubAIu8S;w?(|F`dw;#>kK4;9cthh=q zX+gE@XC;-!lpy03GrZ7$DTgb$tE%JaawrujZ_kx_|9(~4>-I%1*sJKA)%W6ZOzGfr z6`CSp+jEy|qCuzP2lW*-QS7a}Ul(VpUe{8ih)$z(%G0d%kERYC(oNHu)4A}~T_*o5 zKx7bcISTtWCqHttlgW3iN1gU~Nn3aoy*3e+>=m)i`OMR>^0Y*MJ!rx8?YQdlyfHr| zLUd{$m&TGao{kFcH|lQp=h}i9_pZGxwnHWvW#!!uc*fdOIEf{adls?PE_xf_Gc#lM0{F`Vz|9&&e8w0dYO;rY5!&jx$#^&u9o@%9?HLW20O zq7ACH15jzD6SFaULOYn7v+RFv`%2eU-wV6kza6{v2)@c=eNox<2!4zncrU~1DK<@` zIP_(ob)K?+vdtG10AS9R!jnsT7g}t{iwxS3X>qu}m(YJ>+8hS$(moZjFVC4p@9em= z=VE}X9{V&iPmbGRi}_O73$^tlP@o<}$=CbWq7)u+%E^g#ossASL$R2Q*E{=he9_N?EI?&FU@fvb1`+ zM6-m>VhYLN50mSdn}c&x3H7Nvm0jpk+jv?mCTV31;VO*X!&O+E=p{2MV=GnnQrWyc zJ*rZF=N^oArzouOs2w9S*L)jfm2$W7iKBF6yyeL`b4{}r!?k_I#+q3jp_m2k9|J?A z%R`N5BvRy;6l`4U&=c#4=YzIsHusb}a2vvCU!f5lWztRuzjGvh|5=CP^1$@rUOXM7 z%cIS29)E$d0ud;|?s0OW=C-}3$>*6Jx!%Zs;y!@%0O(!i+6k|tP*OsHLQ5`C9}ge=8Ewg zL-n5AC+BKx1^RLoSmIKr@?#7RoATOk9w1G|@o6C|k8YCW-Eq+q3}jKI2nOX9-^#&% zof7VM;d7M;NJ*q1w_a7I+{zbF+KKmNJ>J_?A0YwidT07*oUZBJFS_D?*(+xn8{FMT-`ONjd1fVBcVCj|ps$iJ)~%es znOagDErVcg8#6R+4x&eeJYJ`r_#|K4`mke?_a}Vc0&(FF12v^vcSX?f^RG+dl;1(w z%!J+6)|&XvKB?xoFt?iUssV>B#o6{Fxf||av@**VIcU41ud25(#jb=u7&Px( zF7}(-%TjdKWx0{x>SAp~2gx-c9WLn2Dtn#{8T-(KY`uf^6*zJYk~;4_vEz;;n9x2t z+C+4$`#H&n(J{2PB6(Wt0*~%6?eoKAd+K@Ms-uH+aKehxtqp+*w%PF*L7?)++%x_c4L1_72lB;mgRuE^t`&>zSVB`r^B@y z+q*B%Z^C&+-mCareJf;>8%*S`(P1nY@KT#&kt=-+mhW|)4`!yu>!TX2D5|1)m@+@| z&!(XOUF;Toz37D7=kA>NVt+>APIuV}Q(09G=~A`wUTjuP3e}%~{%XbUc(TLKTYXfA z!-(UB}riUiD9K+`WmkuX4BH z<#S!9J>#wkt<^(+S8Uj&i{*-9He)$hZ`CBgUz%=D`y_NrWuHHf*Lujt))(G{N-vV4 zC5+;JRGpo;kgA4h=)HBq+`oDv^_ESxQS;T#;XY|brO{wLU8zr1I^0kM+rO`JUsQ2I zI=M_9LzVRI`w1rDQF}M;mNn@dP1TnZ_B~7P&fLds3u@|r+VQY+MVN-179D!)O|qf9 z!Fv*0Ds}@8WIgiYsGjf~{9tmc({9^vyC^K`i@v3={F4s5E4#QW5xo$3J>isjNbyYH zUSryT>jive`7h@s$rgv~Cvij1cRpR!Vx%JFJPw`22$|FNQ|W#fpaS1o_o7RXQnS+~ru>FRD@ zz2I;}r$5V0WCEH@uUYa1Q2Xix47k;cL`yD9(nZLKCM{NouR8L9wT3pm(CoOMO z(tOCH#z#b4$j^gU5>iq^-K7U1<-~UIhD>vU$&v+s*FCE^c=6*ILgLg`iubiN^twG> zX>FH=gtysD+&jLI=T^)Z&_tZqzbd6By7`wOp?0Zx&ka%$Wv-d_I>NV5q2Jj#MH=?d)HU~XxfLa ze+tK?%!J3@-S#q;@YQuo*V*RIG~q{-4w7lQ+x1{CKMEw!gSJ>jef_etBgLhMuuYtQ zdqW-!kMpVrqL`){d1@1Xrd{O^O&ag#+LIO_J>n%~WGk5UuQrvCgjAX3=vhU%+ENkl&beC#KN)L$i9=_WRX$bgb-y%~XTN z`6Px_#aOSj>bJ_(ZLteqs0^xxi^sTsA}0Bv9!svj+7t~p$@>(K+Xoz(24;JFaSkf@ zEgK(JnzXP4#}<|+>8-?L^N3H$qBY5my(p$!DQ$m9kDsZs$CkOlIEJ|{iKEGa*106t z2!6&)TNc}VPo6fP6-#b7kB5clq7{#QcjfulGb@MHisDaa8vNsCSy}CHX6^caeH|Cl zvwk3FT^^WyYkam;nqHZUYwfozbzG2l$u; zP6r9BV7Awn>V9HweeOPi@6+*re)5Kmx=1&}xK(S9Nq4Cr?u-4?prZG+yTl7WDVSGr zZO8lV@*x-!wdyk$#;aTWYNz{~cz27vu_JBn>Q#DpxP9a+g*e0wx+qp_-ek6#tlUY* zzgAPX{2Y-Uy5y$5B&!4zT6&xOBU}l3|pCxhvT^~6(`NeZ4lX}1hv}e+} z)fTXyQ_&qS(b=a$^QNk*k!Qu8S$87$d&ua53(EipLbOv@Mc+-=0_pAFG#* z32!PM3`Zxo{<2vkmZkxPS!eROeeu4TUh%;YW<81+gy{E zFi%eX3nVQOiuDddEMHVSJYMegZBIdH9kLfa@T5ng*u@x!JX|w>Wyn(w281b{)GhNO z@*>~mi&WBJgIXaGO|Y-+%QlYW2PW`a4 z;>NSwXcRf(Hr{X2!$B4&XSZ0asgoDq;P~slg5_(Esp-Ujy`ybY#4C)cloNN=KEFqc zvsDa7)ajm-;a;edZCB5yTXqj+N83o$v+Y#_^|TtI<8;36x-Ffp7TdaYgU@+q<=42~ zITcVn$Bm>d{UgN7bAvzmcb9D|(T{HOVh7)!=0)A*Pm>6OPoLCsk@W-OU>Z^><=eBX zlXtgCra!NLB5$)L$uzsaY1efhYx})3%ziKI%+32;9u_F692QtDg8UGtpKY@V6ZRF0 zi$OjW+~c7<(yRy_fwrE`{TWniPxF-f{ZpljAjvvxDzHj;M8aI|rybGVqx)FTeocN^ znDO!A(r>0DcI_*jMtgrP*n6~vd+sNFzMg5?QE8WdApi8#ZRs?sIyAtNPcA&$N!a@` zb?IGNV7NUpwU4V5$qzHW?4QMCjzB@vQ}t?JXJWdNnr6H{i+DY@hT=bY;&Z*Lnya1{ z#;p;pchPKyXU)=O?Oiw1yT4wm@c5CF!&DTadL5Ag-jJ`Gx^f41v3H3WN=e1(u)K}^ z3}%pjb+Rw+Ykice_33h<_^4Sj_er~eHN`>g%hy(maW86^2@5;fsfU+o7!^?k#lnCL zCk2<438iI@8RwcaU@*)xKi@prU=*Foz z(6@@D@8|7isbi}Gza7suqp?Irt-Qw6o5N-Kr15x|`y!Jcn6=jZBR`S*+atLgqweB= z-Rb;U12m$nj7q4f2YA9WQNzUQ9+nf>Bd(o%YT(~XJ()$5)$!AAvRRHUh zNs5n3Hr_uCtj~{u|JiQWq?BLDLr(9S^GUN->qAOj{EKyy$itu)cPD@Pl30$fqTdIf zuqh%{CynTFT?RmQP+NMtdOy|j$?@WUyRDDeyHsoc*7bLa0KEUK*YQ?R-eUyaPMa;K zwrrQz!!D9a7)1}yqU6I=&Ivh<%Qn^q=|0%{KuFZ@ zW=x_sIO*;w4DTlMc(vh&F}KX&lM%GaEU>F9iA7jI?^0N9S1D0nsm@_`FMs!cdiU&( z>WNS4R2z-b^Jm{KGs%33J5k>^;QTGI-*EaH^(^=?R__$d8>@sy9X35N9Z$tw*wb5` z@V!o6U*5`b`q;YbZYDh#KZfwZR(tF;Ccaq5{qDAO@24eQ7wg^BxC6?O<~bDAFf2o2 zaa_@-BS$YkvCAx;6q7t+AS)h!R^2+fPx6@|B&o7WVtYH0YefSKQ`AKm0pM1UK{d+{U+v7Q8bHfqUbG>AC zL3)0l*1j02p0YtaTuYk!X4cvn`@7NKxOB@!>u0~<9}L8L_Ddu!dU$1jrQT(k*0?Kq z*De+>4lZ9%Ovi5VvXi9#a(Erdcl1v4>kuw7*UoR$y-%kqdT|`Xw>n8<|f<7**gJ8Z~+Rd;v^pSC1>35;g9$HFD+fC|j zkXhq=A^U`Bpd7~qF}^;3^)mFV%k&~mQ&K;I`S6a%>gHCoylEKWl(AI>+x+BGEQnN( zHuObmN9%flgv>Iw$B6KsU3yOr*M>ha6Nz+BzDB`=%1mlJtB*#=)zk*QJcqzh5w?tUU4yhdt}mbs!ZZZCG{$b z4@7*s2EEszz+o>TO2)6!>nA4!{dAhDmg%&cFd=^2=~MnbYlR*H4zi3ElvH_@?O4zy zJ)d^;<02lkdtyfjDj^cSchsk}y6mgf#!%Pj>&Dqv2f4Gi*%@Mg)S#N}{I>*EI-bVU4HTE|nTd ztpx^E|vUxQor7rr? z?J>(Gaw0D;*Iqgh(Rz4{XPpcoc=r(@EfZgtr~B1iF}wSJ^(8(mfEU~xmc<&HUE$(ZNQuNreYPfsB@v$|58#arz27aN+7Pcd&OkeH ze~-vG*e`W|)nhgFlJE^EX_Hsnncx|&Bh40iN?-P$Ks)Kz9fIgjri71Gu_f0X6Ij>1 zo-g4n$H67^!kj8UP0tGt!TBNvnuMgN5E*YS=C%v3k!x!_B)q3o(>hMhcTWxKK)YBb z?2%xLmn<4T*%RC9{ocs?_XBl_2e=H&=lW9Ky#V%qcDJQr-I)GD7`7gvnolhj2TasR zPwK;KST6ECN&pE&06hQ`gH70ZRkYpZ`!?D<)Le&O>@!%Zk6T{6c17}d0iNXuE)*&Q zU!^vJ2AYAf)?_QXNKaJEJI~5CUUfR}dF`N>ul@DdJT{NZ{nj;WE%)#5)JMRPJw9en z(NzC`SR4aLS@(R!7`Ai>^vIVV&y>oLXq^@Vhvd2o1-^@&LwSF4|+cU^BF z)mW|GodtgZRb80T=d)Q)b`ste8}EF=BV`hRjz;8(sJ1&Hsq?N(&F9;_C@r-Q>gnEp z_Ol85LS9T!c_sJ6!Cawwv{APH7$$zRV6HjA36vN<-HY&G&5OsK_t$KUPNhBMsk++? zpJnVHA1L$~NDPGD?ihXEswfE;rI z$~IeO@Yq|sEf!8?o4e&pGi(t(-f%a6Y|}X@LiFe|n$XI6J82(5?sMG)?qvl+?xpmO zJ2P>Q2Op75cl<1EH#=(^_uPiN_+Gst$*SvWq3?xfzEF>0?Htf?>lF6Ui8&4e7@#Lyhcn2`Ka#)0j*h5~|5P&rYpNdX~gK-^yue4CV zV1C=igw>AdRE&PXVz*h}Q>grwB$IV~<;ssZqgR}rzR%WdyQZYwHg5*zKK zADxfY7^OF$Kzg^`1P}Ao$tS6QZ;xCj$L&IvB<8}77w(e;XaAy=Hy_R<(FUadbEtOb z(`aY${eCPK+_pZyL8N``;`Q5Tr%mt??ZqZ}tM|qiKAbc~eOlg^#3wl&?XHFuTlr*Q zeq|@seVM(3m$F}edUlaif=8CoSWPc|Q-9P%=f>rU1h~|F`FpNmPR9O!us7BbF~rvm zu>#!MOjJueVKLvPEqxl^w+P>H+}^y)B|w4}i9re+mghK*D^=QAd!}wT?u%B7y8_sS zT|RcN$N5FmOQn(TpU2r#y~guT^#Qr!#c6PfjN(q`w4*Cr?Cr2$oNopeo6e@5QLn-_ zsqVM0iKQFY_= zTTI9Wx3+uqmD1H)`>8*MC)D|(16E2O(JTGZjjf)U#Ky&*kqTt z*%Yt-YWkPJ2$;R+i|2cZJ+wf@r+q89!S20mYY6Yx`ZG_3k4yU}qlw+^Wz#g3iU2=# zQ4zW2PNK@>QXlGn=~j@}FdPXNAR38iGU5||!nR%*!9_Kf?WNv!3Pro7jI>SCJh8RS zuOYDt{p(?*%;uakX_D^M+iI=##|x*@Fbr0BXKFMnu~@Kx0>UwzXl0$A#IsMOVlym7 zf3v5S={+E}yK1`0sP(H$t7RMjZLYCpxK<^no1@hE=d7t8)mc2*gSIr~vyV{GEnWEN zfoV~U`Lv>k+w#=MMCzFSl`I~K!dTY{)x!aY{bCX955ri>{Dr#@DNU^ zj|_9a0SEzCJO`6LuEOX+eMEW#XcTadWmELR?z9kp3?}&aizY7)8Ts<1_wm+a>MiR% z-)NBkI8cP{wc!#jnk;#8Zc%$c6^Ji`0GZf!SHGab3lwyGUTNdB$rr*BTmq#8KGqWk zc^_i0^Mb5nhFYHM+O^H)#F=9FK;AyhTW=JQ71e`;T{FM~4Ay9!dLWbY(-C^nq>hL7N5Jncgf4? zB@l?bZBHNiwFb?{l0@Y$hNZX9l1+Rl+x=yu(Qi6Dh%e&FmXB&}1bko@4*(JrRaaJS zqduVDvwV&H**}gA?Y<=uCG$~$nsztB?W50sU78X~n%HGaf8AFul+-4mk!x*idK9EaTwDz(*Vilto? z$AW0rue>_H%-eFi9+}5tY3+pm0F##Y>b^hh)^|E^SbgMsdnbqbcrzhvZIvHo{V<<@ zmAj*E-NA;s3efe}mlM9}cfp~K);sjQc<6#4Ek@IR9-KZM9+N1fH`!^QdEbyA1*g4~zK+X1*Q0(N_E{%Q)#kGTQ`l46_j!a7&%h^MoU30?{qbz?iX zBLVZen%ndA5Yh6}W<2q-iRsEMQ`*#6Q3ryUJUzJql>iKV`9*|}*P~09b>6Rk?1n8& z?dor!ZCRNB#;F9Ez;y7lxB^qBjT&&#>m8xwdrI6Q^-P`*`ctsBl?T1wD=I)sb1%Im zKN<~eZ$E|&K+3}_7FSsNP2LgC@^GbI7sfLpM*>c85?p<(7XSi<^;ig>js7s+n-x(b zx3}PtoP2Lb7`St~#1-%Di(^86@+E}+S(YxBDSr&M&1_ur6$4iivRVLKWVt#WWHrq# zP1T>b}+75d#3)JEiik%9d{@cW!PMB+cQy|Z2WctanGtJtN^q0vYqTv5f|eM- z+c>e9g`jh*wQgrw$4H&yphT>LHMm``-FC1~^~nNqQ4$X8lLVW8QWv)7DG=X-_t>=t;&Gg)?U^L<{A*#51>R0jn zSQGNziML1?TYEceO!63i$V2TJ7xw;unUA}%kJ`x~q{w`K?_9B3QbKls#FJjNmB$)~ z7cY2VJ~54!h3a9Qeq?F!S(Urf*$v2qaX;EyOpp@lHo)Kq3h)PEx+G3xqfPIa?9cr9 z-j#q?z%~^+mbFp2cXlteMn)GA5;J6k)S1Q@X|D(=cWd&s=IiUb=vT)Dx0F>@q$1mA zwj@~OCmNA|ru#0>-WrIx^x>~WSScxTu_DKGvEBvm$ev#3eu-SkEq5jRv^7#L!! zYvf%p62Q@o#4B;WPmJz<=<{pu%3_$vEH`%L!cxe8$$Hgp2o3KIS$Qj!#1lIwFnb&6 z4#p3=dhoOynF1J%80ESEreiBNynp2>a0qEG-T~j`#)Pj4u?!E0q^a6FH(u+hTnEj= zFA>8O7j*?KUBg)vWzzN(IP{mruExyLKH|2>nwJI%H?{NcXDil`cFQ--Rt9lvgj~JY z-+N|%(mxx1amYXrY1r|)wfMVb=WBq4bl&D2T3sDE{^36qjd}7niY4#+&qQ4gmttRE zKF@vk*gx1VhdGR~G81Q`KQY2|{&K8q<5n}*Cy5+Yr(Devw5j^Itvw*nm~yUkw;O`H zcbLQ}#yK}D-_zFWL35>bzxtcjya}ibL&XEldjTUY5B@WsNKScq)&5 z;PIvb3x{L0s915Wc?tJK8uENOQ-RXmu4Jr>BL(?z1!?pbyQ%LyS~|Cl;scGo_;h&$ zJyBLwl)#>&WrVlG8AlfI117pri{w=qHp>_h)4Iqj-8ZV?2qIi9+V}RoAO{|U8w4AF z`mqd_mnR=k`(kD4HyC_5VrW_l^yWqn)}ht@l#5r#_^^`^!-`>^4M_UAImjRdaz)>E39+ZM zfv4}v3k2+Qx?|`rTMC%~ozhgacuEz2_DZI29TqHX{{A_=ikEQzSPKa0TdO_e=jJ7T zIrTC_0lw~*s3(-4WD4UZloBclHsGm+`SKMR-m9-Uu#$CRG zaM%k#Vjjw8kH8R5_ z%V;P#8nNsFZbA6>^?2|B6ql1*eUTFwRI>eiF|*qvuEhv)W_QKG%WRDz+jRO9EGpk3zIAH!z zYkvY7F_K9F+5@ciy)vl1-~%@|?Y5Uj2vQv83wT0s1Y#lcmkKLw4{|QHw&`p?$3MO; z_LUHUlDwX6qPUhi2)97-c|IhA`3(`Q`k-H%WQgg*{Q30ZdnfayN~p#kH_JUYg>(A} zr|+FYE&&=>Y6X0}49$Um!<+Rl2v^AChXJvOTiF#5J($T=hY|=0JnUQlW=`igw@Gtv zB%Ha(hPM{>=V$JM8giV*Qysi-yQv-5uJ|~ut71Wadf4B;;SEvHPim|Z0qN@nL+{^t zdf8xatNBmOc$_9F8it!+N#K@fFAv<2XZ=pxXPYgTFSfZ#%L`n8aBYj&lbX$!kIjop zj3~;Y`?kH8Pz`pqwNh;k>9Y%mEux6WM)B9<=-ohnJ?&TO>S5%K8E50|W54$XFVx(~ z-*{bY_KTX3b4_PV`F%FgYu0>nv?n;g+_$=7KPr=~!7Jm6<&|h3S@v+=thH24t0|38 z@-)}Tp&41&uaDP%boUpZI988fk=(SN`dG&9dwLX5WEtvJrdF5R4cK_mS}g04>!iJG z3)`s+pye{&4uYdV(`S>GaKgG*CEstI_CdYQd|38?LuIAaxMyyNE{`>RtzN{YxSg2@ z8$XwvPQj4lp}xH8Gx-o#l3SM#x6`wK^O|L`4_p$QS)U1sM{-Aq zS5PBxWBF%scB-|86t1osxFFf^N^CYe^TF%e+=MEz0=3Bd=_z+<4{)uo)zpo@?rRW3gTQ)R)6oZ^j}g)u z{-iJ*WIR-VtC<=Ivz0ZhVEORk+{oO&N)0LK6zC1~p>vYoH_N!AVu_%G8p#66Ux!L{ ze8D!lt*J*i)O!EvVl8xZ$?J_Cu3Ya?D-dEgUVQ8u{<(ba+&r;rHxBNv==lksQnEPg zcN;zFW5JiAy%v2$at~6A3p?^mG=kGKc%S{n-y*4hXMvh-AI~>a)@0rTf+`dtmu{h^ zr|SIctc2T*t{=jWH?bb^#-71)vZzu>&(kRg5AD3r)YnKdnTc=oPf_9`go!khTs+;Qo^AzrUDH~;r{X*%guaiQ*)T*z9_N3V8fcByd zU`_3RLUK>c`$N9=iM9CB;NM*&?FbhKk1Oqy>GaG&ymjZ4QY&FN0fL|{={YK^h*HmLjITNoKt1!<&LAle4}`{THjH3sIBGT`L4vhuyS&zgu5E{=L$+D8@V% zi$}R1-khOJ%SJW#r_<1%Vjfy0#=0C2tg^^|6k6QD#+RdJF7@N$UGFP}Vs4A$4FH*a zpKGr$2QzH92eEVi8Hi+&XCjJt;i988iaM5`({449n_jtF3B3 z3Dk>$X}1$Z7MLn}f3WzeO~6M7ZYf|sspzd}c!V>jj+}clX>|lP&$4e)-46J|Hb3dv zu#|(nuRpxVZ}(nA1}!olYl>9D>e@Mw_`=4POvrcs(%94E9__-%=Oplr9lN>=(Rx@M zxyNFlW%qsX+{5@R442YhkA>Khoz{VW{uJ|>Q~@Bqxyqh8n#`?M3Z#FcpvgH84g{(& zy-o8-*>pV?i(9_D?7HQXzSyU3{M;@g2Y^MhJ)rpKZtl|KWV{{Xgc9N?wXLa|+$Y;G z9xX`@xn);}Mt>_sax4IHlJnK+V@(T_F7BIY-L4`vlLdNRZ<`Ob-XT22XPe-EeZS4i zeo+0*y73_wq0{(m^9y~u3Lrs$X7~#rvi7w8&VgcEhu&fc_;aK6J_D$Co2~0HDMn-h%n#k z6?B!)Y$cd$R!>|O!Vtl;2{A4}1MfAI65X5da%gRQ15-^p zUYWS`g?TIdEnZ}|W}r)wwu;2QkfmVh@n?HKU-IT-zM9Yb%^V-tB~BZEk~y^13jqFT zz1*Qrqn*y}r1mbh^o;cJ+g-ANi1p>FVN-~(_c(wVUco7mHPc7&cKRI^o57ZF;WX^( z%1%u2_iA@^LC;;WwN?{(z>S>ys3T=nHfPCLXZd}SMW_Pp#j76I91DWaE`z)+VGiZu zCdklbVn2d3Tx$F93VKC_`) zwv?L@!X?a>YTjah4^g#HKvB)V_Mkm2jO?j|eqLnxsZPT4hQiXs}`T(_H{p#%Qd(iUX77cY2c`;ZHN zl7Sa4SZb`xXE7AFqaug&W8@DY(jP;!*$bqiZuE=sxE3q}4w@4P1UUW%UP0PmAC!N~1ZY@iD3qj zUoUC5C9a%tQfl+2k=swY12$<*1h=H0+Rx{BmOg;3fTS_hi($vbaByvbbExN|{+M<8q1-;E57*H2YlXxn0tw{+Ej0M!BDpq@3P(JlwBp&yPe{WxM+u-p7;XMFstTx)nYY0$0zEyhaAsq{)MpdwmCqtG1Tv5B9Z= zM;Ge3pz`wz3w=M`#ooyww{o`$6>1)>tUhd7AU?IOL?Fwr4pP>UXA4WVdvP>^NRg4~ zTKHw!00?&8?s+OE#NgqB)AJ1l_mkuLmQdRxkOzB`-O(RES4C{y?bU_p7UMyG{Tx(G zcvMZW@6s;2m7SH1AB(280RYEZ)gY<8I+#6;(?Y**cg5xZVDCM)l-Igt!3Uxt0m3z^ z060L$zaj6v0nq~ik`N$3fXvfZEJ2!YyL0Q_>eLwZkF(F0nZiiR%r#9!Ok_jno*KN4 zZCs;`#8!{JraY#L=k9<$4&Is_@4;Ate_Y?Sy<3-o41|I89N&+0h*HydFqFV0x~_tR zz3&YT6|5q4`3C(^a#)sWb4y3h~IBfA$6< z_3Ab{#!1MZxWOTqD*Gh4$pb?@3!GoVC|a+3Nj;tWwKjKc0xF0$in}O2@17w3?QX9~ zyiHXu4!Y$q9s}J9mtc4j&qu1ZU*2O8qR-VZJ!%>q3o8*VeOz<~EOcgK+~iC@oG!10 zb<&sPyvs+UMBAH&pLEt8p_B`ZfA{P$-FMUg&wf&L4OFX<4mrfx{vve_>RD=lM^l zBHW5{_)rBJJzdSSkslp-GYm^0aNJCtQZo1|TuGz!(LAZC%r5wJYpDBpkwv*WfEbq2^l4VvB_p~LS8u|93tj$@K9P9FM^5Gzi?pm8e zK4LdHSEs8#oyYf8at7nEe;JJ?Abp)HhMBF(Ty!fvHx46x&G}4p9rI{xnoah6#Gt*= zi#qMH&G53-YV`#G9=b-%y>@nPL_L*aWzx^P>5@YhBJLn_H^6F?EpJ zL&0_#wXrSL6z}eukrZ=EjZd>nyxeS+=T5Ot!ixn@x3&hWXQm>ve*wjjwKvT#QaA&a z&`Sb&RTlGms###c=JrBRsQOMF5^nN(3=us_TzbA7e ziqZ%&GsiJAV~?A{rMrB$5;xsQKD3+FaB|E9t1GP#(!mk&0Xf3zuJj@LWY59{u_&yUg>(pKV_Sq1yseDu0Jz?KD!KuRQjO6}9x zxJh_EyrT{kGeg0Gw%}4{&)S~b57}M&3|RDqu4Ev6mY3ylSGN26EvXw1I^^KGWx* zdwM&_E5?DoTOzka8qey&{AADF?enn@p82#uePFJ)a*^FEX#js!okoyWoG9@?)A#Nk$N7Fk03~PP|R>H1IB-$Dg|L^^vX8P=P+2uRTpTd!FqW@CWF~~b0M=+ zHD{djrL9-he-jE>D!j%(r0@K7dyWRwK6$L>#W5^TT$)~X!;y;K_;i|s;Blm?*K(&` zJ$Op0hxp~*4QE5<65U>`S8bZiRkyFX4-y=}kIfMxi%Q#BPb%@PiOB3{I~CLdE9Re) zH`OQ7NStI4oLOmJ>lSo#y$tL<6~9Kpc#Lk%kDX&>fB5kEAnYb{a1n;1>1Yhdf2K{* z9DRLn_pKB`h3#-Es%UXe28Xk9E(fvO4Ttm7(|svT;7qT^#HQ2795fU$UbII@l2i*_ zU+*L-)5R7>Kp%w47U)vvnhT|U+?v3zR?0;5Ce1{dIPn4g>2cE?;3<`(W+r-DKdkty zE_3R}e>nN|ybX)x^tp_?Q(M*!5N`(vtQD!loK#`HqFpe_8X%(RiF-TCrv;8(%Lbk` z7?g+UpmkT-E?HTx4f+?TK}?GxFjXCDw6V=8XI?(JO2$l~0_$*K$Zn#1G1zSZ7g?`d zGjxXtBPXv`-Wk^gN3BlK)dCAEyp_jdsTA`Bf36t< zG~0aY8wGt!R_Bg_{Q8)iP54d_?~`Y?0m#DlvAMPfpbgNZTYQS9IT#$}d7|$wxyC*9 z3FHP<=;l2=L}J+@s$OThFRr_I9X=wOb`8{J2S8H05N`!Joto+5#(o%sY6{KD4uto?>$Wnc&kYax!PsBP zL&ckVv88Thy#>JfjyyI~D35IDVTMP%BT!L^9`lFiN-A~jl;&2&?e3Elz5Yffz#tbc zu&H_r4FJceZ#Pw5-CB|I9`$l{Y)=dIe=;j(KJQ%wwt5{-{A_bFa>%6ItNb+6C%dP< zTQcZ>ulZE1w0TH19M9Q&ubIqU>JG3VXf8Tm+ z{92xO%@+HUn~WgpyLc+$*t`$&gBXO5@%qe~Q2uFz#W?YwcAAfCNb&&Cwm)0+8Zc@G zN+>qXSR09HuO|(fX{Wg-&hBEyzt((opQZ>fHK_p|bE@x`vb%WRBUehTu<}8uAx^0 zD|Fj+F??*zFxx@EWy{EpSuKjEjhgaJ^+uG>4;)_ubPuMNe-z47cX$F;1+CncbjI)v3|WskzNcUnk}*rOrYH~ z0azi=)9JbBVr6_0P`Ft$T*BYBC9OIB00m=gKCm8MnsFYYfIW6lBSdXn0-$mF9B8W& z{M1M>H&2iS)B$J$;^1&+eic_`}qBJ&wDJ znLe~jmAxKNo!IK=qfd*wMST^;!XMFxL3ETEwErmSwV^$R@x)>m-H^Aao9O7vRkP|E zJWb9ex{=PdkLxtszqCWfEugO$K98K%v5{n6*RJ3W50gcL%spl){vJx>YOU*=n;H90 zM%#*|XnB?F#xm9ER-K{LkU=QF z)?JXy(=aIPbQc~wBWMO3Tn)#6NjD~{NlaimG<87!d zZWOqpt9rY=f2KwT6@;YNIcT)Hn;qefDZ#F|=hkY+mg9?9xrYawNN92J1s4s+8!VJo z%)zaHYUZ-7%w%O8TUgwq8=Jw>tyco?@RU8BteSd#xlK2iTdaiZ-1C6N5jM9iLb~$i z>%}G*XTrMPSj?fV*x?>>eP`W7ryGY7$7SIT(Z7Pbe;umDc7KF+o2K4kw5q0KW9lBS z*Ys>_=jo9{<82FB>w~vOU~pTCqw%#nHOBDaSaP(eqmdGgVgqML0FF>Wuh^Z>MovIU zVq@eb8l8i8axnwE8g7iXe?osvjBaToN62pKQOk?76#9GeW^iEC+%^!O?8?rCjK!WSf_ns>NAzXB%0y9qI^8_2xy3$sn_~86{wh^+)C{rtW3@nxJaU4j9MCgwBM@8h55T(0~ot zvda;n=H5Gb%ynK@1130j(HRuUtHodPMVndeU{~u#c&?|8>QBx#JBNOyOtExb&}n#f zfAqZa{Yl;}myvwd>h#>X)675QL%vJ*9JCtII&ah{>nW|!!g4ClF#z?zJpwOf0HKb z+h*&C8wJ?^Fy0D>+`DWhDsV5s0D3&#h6%Y!Jr2^v^iWn=aB9zwxSKDiT_CK^i|4?= zvEnE5lc+Qsl~21NtK_@lwiup{eB}%0#$i&(*R(!+D$Hy*c8A;RyghQm#bK&P@nn7U zlN8SEp--q^q3L(pnv2>)zAP>V4{J5sJ%^~bzYs7jR<(KEI&?Qae~sG^8RHAH zm9J+XeRv&+x((1(${+YsD%1g=VJbJB>~qk8)4Q9fbLqNDlxq3f-iy_2xEeb25Cg@+V%}e<@F#NzwkiJP%=?*Yb7cNaxkWYd6WWTiTfzCy(^9+C%3` zFYax+JF53#cSpL9mPa=+$@i!IMdM|G?ZoK8O_+N#l%SWQvCWOmo+rpN+67AEF_`;z ztDB*IF^>-iPRL7`KS;CYm9;vpl*?Eh4k!vzym>6w_9fs|XrBRUe*xrIG;ZzbBRfLr zAKKt!+wseM{Ind(;GhY)Izq`lXo}000u0HjK;Jf(qPdAOCy*m}VvECw=Ad#0&uBF{ zrVxo!yDL(iC3MkhV`%_SNKaI36JvgWUQ9(jw_3Z_sr)(Hx&jA`<+{C~lk^j|K8D)3BB z6ZsT9nieFHD+HeG`ySLFdNo$FVS|~GeRzFguRs)*H;5oMXwuFLt*dQMIgefzf2Eea@axo(+k6g3xDci zzlv^kiWwK@f8gs9NQr@7G!l|EZtU(CtLbft7Me!6@;9J`9%cw@fFYM&w~=O1g_0r( z&d$2$Z8}{YHr?uiG)SAHGROeY{91>r+tj`pb8BF%v`1HNfpP@3Z^sTobGs?l$MBKx zkDzPj@3aq$hrdfym*nVDSe1$yHRwP z##A^8e-_R7wSJxX+qsJCKv7DN_tWuuBQnn=-{82H<68`Y@zY?sU6u36fjS(r=Kyj6 z9KBl!LGZw79dq?;v`1fAyPOZq-3IdJ-3E=$!`JOGfuz4Dw83c++E=ujzR>sk6%{%w zj_AA%^E|ObH0B5(3cWh)PKi9TxpH@z zLXFIL09VnB%rZ35K)`>@8gajT3LppT^zL=>94Job(7$Nxy)&H%^3%SxvnO4#>0@1t zUROmNo(|J71l|p2*#^Ma=_hdnL9=SU9fxy>7@p4236x5`iju*3v#Q&2cAz8A0SFbs ze<;2JJTbRof)Uz7P7C>#(uM2-9565MTzr(6IeO{ro1l}2qY^vZK&s1kt&KJHxbfr4 zJWMsYxua{nn?A+QL!7axP^Yo?=9Oh1^fBock-ZAuM+pW}9I7iwV@O%)aj|mt+{Da=@ z!w#uNc^V)dQdp3Pu2fnrw>u}(#|IW$9OrsM^dQ5%>&iY|&2x;BP9OD4^calu?2Ydhqil6?9MdD8<4wVt0h<;7ZRw{sD1#>Zp64ZYyW zw)W(#4xi8c2D+XrPwQrn#V{*If5}AUbUTbacY`kXyY8?;+Um|e27^omfi{ri{pGb5 zrPb!@i@VLay4;7B$|!X5oa#|^Zar?Oc*PWSs|vzL(g zzW6{x9V#HZcH{G&y3VQF6}3)%dq_4fXK6iGeltRCoAI}cc{s05x1}mFfAU78UWe`o z64_0;TN_h->7qDYA2s*Z2=NK>t<(9*b7*T)+ok};z{y23k0Dwppu3(uJE{H*%?=A@ zi4*oQnljb+ay|lD2u-d%wKbg(x=s)^dF@+$6)`7X8SY!bd`=`~;Xec)YVzW2m`kOS z=9BY2j#HG_U3a0VPFF3bf1U3YXsxmbEn+efAtts*2Kf&8H3J;0R)5@q2hPD>64u5f zaaX0HCt|n1=EM8QM**8@x~{?D#>CH8Nl#82z;GKrZg27Quzm3z70~DNkaG3;;J5~T zO*m!ETpvUSO|h2Is=cK%sI>>;x(179*T~b=Yk)Am1??={^Oq#ge={^IMuCuDJ*c2+ zhF3Niy2=UF88qorL)w7^h`vjZ2(5*!RoU_$UAOnVzY&VU++rgw_@xM zTFqI|g}+B$Y0zwif6J{-7s#M(ADsN^d?6nmpJfJRjMdg0yTOqjrchKsSz8>ys9n)d zkcCKBEi^g;V>G`w3z;}Q$>(S^%$xJ;r422#^i#Rn^=SttpW5A=bd7@8t$k3?PlIm_ z;F!)(KNd$5ZPpyuv$9yNM$A>#j+89swa?uJuewdzJxe*6f21zk%ao2Ow0vG>!itSz z6a!DByrs8A4OX6GnZSpu?ju5$ulE(iu+Cf+^gLIcrlcA5=+4X!>3( zM0+o7Yb3Wde;r6x8m+CMZxrOC<+PezO=~8l?SZ-0x;md0%X>K%k!a0nYhDl2jGJ|7 z?iEXM8M|q40*vz&(b>(rs#OeR=*AO0b(5R>HA7GGWV3D&t#8~bql9ibm|g@S z$R%g=+Mbfg(Y;-FT)mi6mO;>vyuTQaw~}crqRwH_fBtJFY=bBaR*;IWts@(W&w=1w zL=@<+2;~ENMO)gQ-R>rnsm_(>Zp9BThbLw8aZz(fJV$KsYLIK<;Ye=7)KBUZzDmW0 z+;qX8<;iB2K({suyz#7Zc)FmVMTR`Q=WyEc^%{nG`j9$BON5y;J?^>LNPg}tRR0vZ z2>B&Qe`q5Oa%uxOS(b97uX{=1I|-G`;_B)X!?w@sl3{{Da|SkKFBNT!MIG)oYjkUC zqgcKit8(!K52w)c)3JJm6J^mo_DnM1>G@Hbz9!J|G1j^98b<@9%xgetJHd>kx}N5< zwBD?ji2xX?XJ?0wHMN8uM?0p@v*#gn8T$rQe<$~hQRf0!SD`_ChEdAqJ9lC0iJX&0 zAtYmo#up=JvTKC`OvG%;&c#d2LOu{;*0pe+0F97thSTxIzu8yysZXc5yq(SYgvwHC zw$&+p>pzNej-(v(xIn`;-2xuAk0zNe7eW3k&j94{#vl|&K`yK@WGtI0yj zf0s*2kv!`RlIucI1PNe$=}fx-bMWF4GI@}rs$e=VN4Xr{K;RGzd4~3ijxSNQeBHC~ z2S=j?g;4cT&G^+)Xq|2lQ5CbZC<9&Y+-V*TX3#sXPqq!g4)KZR5OExXu%g8qb&1yN zYWq+bsEo2AS_*JC%FQFYLzD{-IK*NaST{DBxT)NeQg0#B+-U+mVV3n+ zbn1!@PwM)$=XQ{!%#Ddud#g?025#a%miI6l2Tc+z_Er;vBIR*PxO0Gzojyl1e+iV~ z`+9v_WL_o~ZocmVV14$3sSHV!Ajk){hujs|gcaIb^YH!E-L{5@p0_c3fB^{ z=$uCHU+CG9m^lTVD=+eIXy3yF*scz=SGzf%jO8{@b}eKuRxu>N_G$V`nb&$Z3tm|* zFE88KkQMd=7(qb7>TCj(x;t!Ee@|)Ois%iI zw_=9Zl!=?e01?!zE0kRse+?GZbqPov;Pdl#x8B7kQ$pjbWwV^C4p^g)3RoVhNwe5d z?HScPZ%HSv)ao)EOqTQFc`UcizP0qDI~*HZ7ea|!(YZM%8L$=vc~rKkxmc86)XN$1 zAXGkAgZXTHo{#ffnwU3brrWSz`LNxc=fUYY1fy?sB$Re{s`>P;Fu7H&*3G>=OYre=g={z6v+RRyX`X$l2H3a6SiS?%Js8 zd}Hn_-;vU}U^?JSQ#(oq>eHJ?ASdn~LL+)J$SgoXa{F=zA}}iR&CHbnt*N~2+NuoK zuvb>2=uqKuqh@JH%~Pn;iVESZobKse%}=Tl0&t67BKx!ze@lo8)7G&V#%WM10BE*W z9=gF5N#tdI5qD_QoA`!5metC+Em(VNqu^3^P^f2~kbGy`cp%Mo7C?m|*Pf%-VIA(x zL(^DW>*51qu@Hl;5(sQPtuE+QdJ^X-w^VC)fX;S|FFo#?M`OcyzD}!((L6M4j0~e$ z@7rN~8{Zpre^k3X!4~wDdpm?K65vPNMVv>bUqRSQUhVH!u;hy?=sA^(N^*qwBBKKt z+)DJP&RR7Qq?->{0BvzVIUtR|nP4ZM%Zh#qv2396h_X1u-b#Ordii|j1-Pk&k)mj}gv_gc zue=G2#BTAr0#7jW@^fn~&|fWShyIvrpzhPD!{D+>-N|$GIPGS_V>nrG>Y>>OXa{?m ztcLC-e;YNs(P)S=E(nQJYJJh$Ej6B^4oW@PXS-48DeM6o4(3z%r&lX~QXsI$%RxQz zs0ju6Yj?SuJa>Q;OscR;kGiwC4!i|D8%_3Tyr}W#@D_kZ>rM`9a#TBR6*J z+2DM9y%OuRz8W?f92tuh!o3wv>L;^L%CpS!f5OZX?z=6z>#?d+`cNyPu8YM%?DfDUWr3_Pw8t<=*!X(n!HZ=3<2mD7IS7`E8N!J4yg zBE_I}F1lO;PMu^cRoXMlWwUumz=j*%e~yx7bu4DtSvKOB%1(PZ5jPTd4djy99JBip z?Txoa_6X#0v3+>2qryQhl4~vsI*~RW`kkXa$`15hC7th4GuFbMR=RGIh@*M zm}R{DPn<@~lRbR`{&(qUlEcFGz} zoaQC#DkbYk=D1qQdLg4GFGy?MS)p(12#WP`SVjw*77yY094O(&95v~_q;FsdS(?Qc zspkVxG`Or4G;n^z+fKX3>fAg7f4i}CjyLB~oF4q;Fu3qJ^6FInc)(8{O&D(!R?oyi z2Niu~wMaN@W{qs3)5kdvB;}$M=E3*nq^*OqDA-H2ygkF@Y(?gv;Dd#)yDw-?>!<`; z1GG%FZQ#Mtx#uRiGs-`27Lz3+@8&qk=sbPR^?m3=y4N(L@EpoBTF_pie;Mir)6BY? zwW{_TaYO{a9j2ld3t{%SI45pCKgz>7^Ng%VGgL4l&P_i+iZfilp22jKoxti-9yGfd z*H83o9Ph!V5cHA_FB5wVnSpr36|sx}W-_oKbRCM1MiqWg7OidzwIU&TlukS zxlyp6p&>x!H11*|e^BtKC`P`5$15i@2!Al6TeBRF?`UPnAUk(jLGAOLY8Q3dEtUM? z?D#ogsE_e*FwY-XkU`Pq`_|35g&Yaj9rY;r2@iR_^EN`qHxuHWwZ-uzx`}}TF0t}r zkXy>b8Xh-*bvYf~CPJkO0O&Qo7h;URi5&;o16A=DL5Wo=_dO2Sm9tYw>QHR9O}i5r4}!~es~9a z;mEnO-YC}?e^SI6#IKJN^ue>&Pb84eBX3iV=1DwRCA7%;`!(9}jDj256<*}k3}K3Y z3y|9#i=u5DPubMv9$?~wjQv`nW3bHnoDZ;NB~P=F1H~j+y5!XMEYCUx5yz{{sAE}$ zdxyE_Dk{)n%*%@F?KoDZ`FKII_1@rbIggf(d7uoZf9*UOOqV*f)6rmzQ>RPu4js)+ zdO5her^(6L`Ei~c%<%+`t+FIs=S&S|AINTDw$9Yo zD^io0WHOLZwOcuQ%x;hb=a*?APsL{p-C4sgMZ4W5E12*onfah}GO55oaoS+Vu?DB( z#z6-!f5w3WzG8Ds*GP^+RNq!A$+VYO{!pr`ygBN^PK%y67+FzV>}_600{@&I{SIA| zgLH7^GcNQJ>9{FF8aRS@t&9OBy55((oLw?g+n$qbFf8l2)OZ_)we|I3UdwdJ&NACx zj5fl4zc_;*;{%Tx=K`vlGLNQJ3ai_dfE{GJADY|B!(=}HO+zawE$DPftZM&Ns zeV6pV8=FC{(0&kd>hk1M;n=^NXG_f=ni-!N40hJejc(Jcs9ucyr5psnEjZqE ze`%mGD-`8d^#CnzvTN!>-Fj*qEmvJvp0YlcqD_V~nW^+83!1!C-| z)}kDKznX#{iDwVJqob2_+(%(?XvfuU3P)`PQENk{!H>dp`2JZ70vBx;(IVjx@k{5? zI>%RMBhVI^oeU{NITSU;Fkel0PG9G>e=F~I+JW0RDCMlD z?r+WO4KmL>L{lxK2Enxsp$pSY97UM!{rD;m57s4Zf)fpz02HwH`YPW?uSt?`f1tY1 zF&Fu`jgq^x9~z8GJsxqfax`-aglBB-!(TlmK*E3WEDF%fc(VkucG})Fwengk>^^J#rfDmZUMqg60O;Knw`#Pam=>O)r7D@ z!>gBPONE83Y~z!pY**UK;yuWl0zHFP0j;V{1o~u6;W-?&N-P+7C9!K1Wp2k+;nOJ; zo|h^b>F*#R@GPr|G_lV!V2Br8u@ovRII=~yy`a9Qq7(D=x!mUge_0gcfAvHUl4iX` zdvnM&q90K=qS)f@nKZg$?f|pUXCQwrM)v};$q0t`mOb8Wi()_6g~4f`?Vq$UNLS_( z01jzGU-@)+Ng8x%70X=K( zRvtNhC?z$EZCWuqc z=%&k&B~G=ta5h)FQ>$oaKdM{dRwH4!qzZlJ?VkQBniXSYI#-z}4fown(YHIP5WV6-tq!HJs_QQDrPQB)Aa(qA6G^KSEF}%zLXIAF8L@9S;SV) z3{YpD1AuaUt=a9Je;z2?~8$w8aYmChgVTTJI@bR%h?&Un8cTABz@to5{4vVY)JMhPzG^)?7TT z<;a9gs^UA&Y^q#@h0u8ux)ZL`l$%52vDx9^GET3+Xel)Cf1y%lK`o#(Ibef5HjQ)0 zZ5WTydR^Ub{vBmhdkie}W+BM&=7vTno2WZXpNct~zov`w5YsgWyS#~C!XBM|_)s@# zMF6SfA&!yFTQ$q`+CobUE^(Qz@m=Ap1AigScsf2@Cf7weMOFz-(>yhEvwDMG!l46A zj2MiA+zDTXe}0%{t3e_9i%4bF#~tOx_UhfTgFTA5CCkQ_CYaLegE6IMXz!rTy+sFT z|1D*^{c*d9k=6E)PxQt-bb2st7t+|f&Y?{o&;Y0(rA<()PZ!LA{OJ;Zxn}7? z)wnBKxeEoFtzM`L#TRZDum@gOp;&buve02LuT2_Le~%|Oapye#YsW8iB-+lKoSn1x z$aINL4n7Zv8zEiu8U9_TYg-hnEa9MAVYNI9I)f4iY@G;fl5Tn7O2)7~uQOU^!_?5$ z$T&evo2#tcYgWiMHF;T{849g&3aONq)1`tQ%c{yyM>M89#7nF6*N|eHT!U?Uz7>@_ zL$zfNe}Uj-+@Ml68}iPo;}MQ|_nSP-04J@Ig6kr%m*ir5yluqiQy<*j4g#H#y3x4@vTxjP$EX#;$#f1N<@>a?Cz+a;V9IXy$A>>TOHo~|6| zN}3eqyw=&9#1CdsW3Z0(T8=>KciO>LcD$MxR_Q^d}dkN*;@`kYR$7bk7nDu z;*65O#-$!E`#AF^Yn`OohgU zQ0ksY!^w0K569!zD=q2yU@*|{+ZkF$ZFYyl>vgm*7<0*I3qIMO)HD10&F*r73;0Meh3>dXOO`Z$5sP!3l-C) zJJfi4sG)kTLE~2Um!9qe@$U116#!_rfv6&Z-L?sJ>l#vQ8u$LxZZ z8|X*jlnbI-Ws}zj$wrQ{wG3J-x+t*XwomO?K(ayQs#mol-``7o@m{%Ie|Ze1BOwA) zUPE50+_v%pB%N%4#0K8K>hFI5a{yJVUd0OUmx$CC1k0ZBa(IgA#~{O3gYt?0`8MRJ zx}8M}gBSiNP5R}FPTNqi;(FC8{2KQkYGA-pw*RP>RU;N^WgOmOHex4&tSb6udqUH0 zGgTvMU$yKO_mgr&Kn%C9e~%#U#cD3>zDEZfj<&lELln{G8T1w)LL8zkH>ly5}Vc%TTg5uH3eLgK!2z?DHg>#Vv=vdz+H|E$h zE$f`$T@>NM2(ur6yEJ#zk$_3zSl6t4nCL zO)gq$ifkxxNYjv?d4-z`YzDWip35`$->4^&}YJHIx~x-eooRvkp#2D-^lZ}V6KH^9=SRR@$8s8@_T~jlCdHMfBw!vCjHOv z#Qb2K*dRQrBtwye*=i2v30o-vm)mjQ{=*v zrt%P5Z(JWz77Esah|ISnw;Y0R7pXIm{Jc2$;*E3X;pel-`&8A(`)YJa6AO-opt{cm z2%AM^mlXqyJI1%#qh*=EhHDkdA%>FCuuU@7PqGo+-4N0Owoq$rQG>Wk-sfeUkm7xH zxl>r0^NJMne`)Fp7{BaF4{*QaeIq$woUiw>WX@dMWK#)qX0qmiQdzLsQRN2X8Cmq2 z)gQxM5Q=FnK(iI=@cbD=GX9+vkhuxboEqyADGZGDd)?)j-`u=N=nCBLC(l*5rk^XU z+xP;d^fTF8mhhQJ@_u}th+dv#F7SHtdn3yvCQ<(nS}3v~GbdNsE_= z{d@@+e@pZ+>ot6n%teWwi1mf6L&kL;V_cC{h4F#~Lr!JR3N(wBl_q8R@lLAeD(i+V z+>#JuNY;N##)ga=8bEdC5qlxP{=_=w(l`=Z^}T0Or~&@$1$=yr7E15;VqD+*hKze= zo`1$x#d|`ehBw{gO7kL=_L&snv0Er=t}472e*#=P_j?_nY>v|!;to(Wq4p}eh)T)D z6$wT!OZ*KnJ=&mr*~>h+ z5L?lo3!ioFK+WuJkFZv=BB{kW3(=+`G+n_@lJ_!~qy$hyV%aZCDi6SBnw?HLNg zJ)I3V=CN*St!6ku;0hji5hXJZc3v9-e;XTi=i)HQ6wc#Tt8h_?ZafsA0VJ}8$YqP8 z@3E@lr#@`1g7FC?cP`5`ds4E{<=Nth9zc92TRQZF*`TH+!)G2mP%&bP>_!@bpd{Pt zy5{&A>%@ipV~V|zmKQcnZ|H-M{OnaR)A^FaS_gj44I*%#T$04Vb2d0j>o@~Fe?L`t zMr8q%b-Q|LH_&?mf*13G?Zo2z67tXaC08{}Mu2Aju;0X1V7#MmJN&Wlkh-~OLs7t+ zITvRT{*H5}KH$uQ-rwBeQvLPsXN-uZUUP+AGGt%DtJrixs&Y8rCCu^r>^_onoy>iI zc7E*zh?gZxyif7nbE2lHs$Of2e+7eCtrKI_#44(LSiXsp*-TR$l{!rmj(1H7!L#5j zg+a3}ZSwuj0cRt4tKZ)bTS5Gb{&{$;|Fxbxv@F2B1%;_~kAODja31mt=1*}e z0pmW~*EBz4RrT}T&x}prQ@BvbMq>n4oU%c_6l$Ef8Vn@vfk(O2xCb0 z6ZQ$7=aJ-6mR{t{jWrMF6CcLxO635@IVmi%pMDL|xYW`b%vDxm0frvm)F8*fEnJcJ z)P@MQJ$wr2bkqDRGk0*9evE{HP)E zQ5;|7G|Y|CFb39O!J(sWaRtDwfeKElqx-NjmC^IKaO1s_qHhiRe>s2GJUY=bd+sW` zm^9oEhmvQ^GAWO0HKcU!Cj7kqe*OnPZ|A{BgJ*#~gLQ;`0{J~y$;`x=zA^s8AA>Jk zv(FS-%pR<)OZF$-?@xU0=lu9Qcl!QLL}=npqW7p@qlP`)?br~#JTO=I4qV&92xDCm zU0b-0ePpAuMM~n1e+`t+Sh5b0s7=t{LxnFup5gs$qZ)n_@YoiI<2ZfqeF*SCr`NcLbTw|Un57 zP?!G92V#PLKM-G1YVkL+U*7gV>d%<|3?P0cUPI38R8*Z)3eC}y<9KtN$LUzbJj%Sd zS}HEid!Rs}F^aulOPsluIWr`-jKmUc8!nK-QAI(N_Mvy%6yAgjM0dj^t;RPk?D5KL zxwAyOYE+1!f7n1eB8pWH8kp*d%na-#ng3nE!wEl zV4c2wo!)+=$^5waGqgN{8%2Hh@fStG;8me!u*RahiT_ ze)%W>czw+a@$*8A)m$LHIP5(6k8`r3KrA{_Q?UP=lH=)Ny*>|fLGcDVxe29IbP&@a z;9JN`A->1}qv~mleTnnMqfhAFhGrz9si*WAfBHfaUko?Pv$^2>Y?iD-270(cg!$%= zF20wU+jWW|U2E94GDlqAls)v3e~S29KwhCn{2iE$nwBK~#=65gd&dJm^V{3l{`q=) zC-(jonOj6^?`I|R9+Ca7b?BK!{5$mEkbHN8eGE>6FgTa@E~+vt9c+! z(G*%kI14+(@+&frPvY;u8NfWkRuO*(&mxk!%h{jUr}r;$o(SeAp6&nsW82^Bm-wSU zK0d}~>%}nSu<1c5z1Jk3a|w<^8ZV(3^fQ3yvK5K znuhGs#W>?*JXMmL8f!CzdyjQmi$nW@Jhva(Y=<+n4WD46Al8^qM3>lgm4z%7>|(-q zkY{Cug4o4IXOWeGUF3ZAiCFy|3t}B2?;vyi{E_%1iT=Ja^A&5skHUI^z5kp8e{K8~3%A7Pf7>9ES7@+b7qO{x7000#BBtO67OtX_ z+z;*%Yg9DZYpz!@u@{Kj&lHcu0N=0~3V`YW^M>=V&lfqw7P;g+a0B=pYazWuU`M<7hUiP)hQp{6WuA(t2c&^l-}c^#CSSFfKpIA+C0`BSr1qP zBxm?JM=&OJpEq>aTqSFINXEoEIyqaz;l-SAuGPn9U|;>scVmM&&q1ie@~{@+$5GWB zKBx;g*w_1z6D9dTe@UQDBtA?^Rv`8S`9S2YG~}R2UJ14XXEyPdKK?Ik(tpQ|0~Kir z7yJC}GvZ$V!#Bp62pJf3`x}bb?Hub2S~f-!#um^<9Ai)RpZY{QFZiDQ`L$pr2JwQ7 zly}X_{rH&{fkcw=>*LBC!8&rz@S&vlI!Q)|THpJB$lzMYe*x2J@pXyDki$atR1Ah> zq45}Uf-HxdontbtVxThIP=zoQdLT=+7;l4MpVAwmuXBgqMQ4dk zO(NgZ9{-m2e>R|aW_hZ}VxoDLxp+g%)P)H+Z()u#&vSX&$|uVgsVP)3h@XUBw;6KJ z@Q=l|y5r~fb01)buX7$A_zP|bdG5K1{t)E3H4nDwJ2r}kA|JkO+mHYEWz)#tk>g9q zIC&p?2EfL*zuxa<*zGF4Ke@`D*y+Ohs%FQn{Og9-ZMO&)? zIfEs$98Ud!wl`@hBUna;82j2sZZSt!oAi2zKP>U08gR2s%|f#nRXScppS|s|G@te^ zwZ%CHAKGMJvo?!M_3&dc2W6R1tp)L6Aw-W5C{!Ln7;u z`>I=re_zl4KASFDE56Z}zj9i{uOdDQe(rN!#E*jQ5gY03BJ|nLg%5@9m>jxc zypkMwBKSl!B(Y*NZgP!hQ4rsj=Q#!j=7DfVr^$RuY_K@G1@cS%`KACC%E!p%AQmO- zLlNAPPf|Ceju#M;kPMx7Jk)O-!0o+d?-3y+WILOvkji#M5tWtAeeJ!K&5=Z9?|t^j zk0U}EXJsAk5RO~F{(D}}-_PswJkMvm-~Ww12fy$V*LKkmXX8R90Oo`}gH(!Xg};Fn z?cC&k>1hgiPTtMeof20J+gHFJ)d~NB`f#Yd+aoKRcCi?(N7KrFjGmXlwZp$<5?4ev zp26e1d=+zFhZ+-QDQ=gHaWKm?jAnv+e`Th>%upDbWhcKEKH{{K&-Hv!&{vp&aX@+Q zL5-J;;X#wm>*h+zI=~;r!a)Ap?&t8rYIJHj`Cw}F?Iiccy2;oYulXt#Nf@4f-(3K9 zwwG7{3y)|D{B!v`Vpp`WZo&@y9G>N0*gcJ7<-1F=@uZ|UdHm+!Wv%|yc7Y9;iTR`; z3QcxQFWTM!4`y0&Fn>XIp;qG{#WMFj2Kc5b^0who98Y@WnN_hT+Aws-)Q$=31j^FDR8Dx(2t~7Ho7;uN#M|8 z=X;_z+teHvo6K=W|0_p1|DRQhruoWCzB;+YR4sX{IZ9QwM6eRymU@4fTavHY*7M#i z89$(gvmq#F2{6a}`C^9ila0J3{G9sCWV#EgtiLb+Y)1Nmla;-@e=nvTbu)cWVd*dLM-kRCMRM#8ypLGf^=Y-jdL+ezt zX$5Ko9%%Ctj~}1;%SPZ$yh!BP4TshTW9ioN>epYxm702N#N0sh=%;#?+hQY5OlQFc zi{nwn&ZIt|9d+I;B$_@q!%|sxCTKZglYiU5={&Q6Ju|_anIsM!P2XaOQ?f`9Rkl2c z!&`i-{t2noh|sx+%gPS8EFluU_-5grqo0BR?9$<-18%r~27EQ)a>%ABQ_AfBXlEd% zKCXi_xf`J{jCFVI@6YS+Dw`U8^2_VIHE=uThfYLYhR`)a>c`f$%$h8zRf^^EwVglS zKeL!M@oq5FFxNQyT_T!)yfxyAdHqyuy6j<-(VCZT69s#ij&b*QcsOjPX)jFRZwM5S zJWyS!mDnx7OaomJYr9_4S*{|8lM-*_8>+tU_83l4@y%wBFU$F;#&t~P?g(Ytm63O1 z*WIVHE69ikQK6dCUkSg!9{vZ%WhAi_Ys$~aLxLf^bG)Z@W~q%OZrxLb1E28yUY$_kGt2#g zBn7{aUc(x5+^q62-PMir!p=RN9!^&kxGy41<;z(PD zw5dmS`~>0tLhl^r?_2U2d6ay$P?K(ND1Rv=(!o#rH)Ms~hmE6vx|lLQ;hs2fwN4Dy zo)y~yO*w4i$A3Hc%GY!wyq>1_DnJ`in%pPf0EB%PrBpz?lRa#dPnJ(ms-KtJ`=>G} z;5|xnku3zWZGPRgC#iCk*NV9a(cG3}iRtr?LHsL0M@b*J0l%!HjTbqc>T5^H`u$tN z41CyQ6USh$OUHZg4tHiOGl1bJ8XUFfQFKer20Xiil;9m2rL_&mH;*Lm(Z(2ll(P(D zJ|K^Ybx98Wy(lQ$=m!-NS9@Dfl@ViavsKkgHNT2@R2(otzgh8xmfGEe&roZ>3K~lTx-;gAR4SGdVM3EYmda znA)nJc#F)@uy_c052%Q$tz`i7%VUT52N4d0iQdcIvt?~zur3ZH$su!ZXvwJ@;?!Mu zRru5*evPK}E=Lp#kg{aQ#e3K*+!I<5s!Pa8c`QZ#lnR${#X@(uwf#WxgMvcwMb@yf zxn+m6b@W`!nLhiC;daol=#&YroEIX46B&^q84uqIG~qu*zSbfX4-Ka$y7xYt;Cbk0 zHXc|0Ms8>8UFDhQvl?s3+Y{bl(ns&>Iy44dllUd>P(d4mfT6!3)Jt3?B<--n$Q@d- zb*}j8qRU=7TbFnYAB3-%qIHMC4=Um57p&E3-hP>3w-AQ9GEZZ9*b-Vym9b(`RQ76T z(e7GI=u|Lhct@y*HC8eSQG3j4F5Gtz(MyV4)I?KQ>{RQ={bPK;`j{eg#XHTs|MaKC zf%{wOH_8(XKzbGh`VkH1Xke0~bYQfQ3?{EZ?O~g)U=}^-tdhYG-}qs}dqdMe!zH_2 zR#ouppK4lBJ{?7t1RFJv$uDtt9=EoRUdhrojG=v5i~pNFFsOZVQ*X^Y0@dDx@K%=T z-!~MT_|CnbW2{$8;TrISxXCV2&5oa+={uS>FD=4N0~f(j{77M8uL`I)2DU*(SIoR8 z0p>&9Y6toK2t7+1Q}q=Tdy_V*s!bTr)= z#Fbr700BRzetYbwVbGT|-*r;F{cGYn7&TGPk2V)b*q)C(yWSRk+!(LjRay0C@IR#E z_OoIErS}n!HL_Nk%{(hUJsa3|V)^jsV3z-rIg_c)*P)V|3*h=F)$vA!c~+Lx@phNu z*Z%|-5rt-hYM+lr4X0GDPWBrTaMP(Xp!7VsJwOaZl}qCpgZ|^lAwx$fdl~lG=uke} zqXCAlpY3e9q;{4J#TKRP3xy=~Tw14lBmQaAik!gK*a;pnSewLEWA=Ft+%`wl%fgV< zZWMpos5Tx_m<=u;3o|^tJIzp1Trf;Th`?f%Uo-5~+ny;xs}Dn$kZ(mQE*g!?PPD;* zz_y|HZCm)b*lYD8q3H~jXULp0JjNbYC$f8*6Jwu+*`Ya5JlXJNI@MD)xyzq{I#1G8 zks=X@=*hOM=WZ?Z2G9ICG9#CY58W3ykRp3r%!m(>y_eN0^yzx0I*7!IL@TyDFm0{w ztI5Cs)Tr)6CIkBc71aeehii~kYmrPXaAq}_0#ch$m*delT z26|fOd$kLa^%P;YcuZ2G(#Lt~hWo{@pK4xnu!DuStFxvDjmcl;xXX`g^jNl0c*x#P zB*yN?5)BsUPh2M;{aA=6#_eberv=-(01>8%UrDpvPhIraw+JT=D45E|9&nW4`qopD z%WEPr(pFc{ZDVkEWVl^$p@U^`uqt3UkYn0*7}qZDrlDvoz?UwZe>32v^H9#^R2i46 zsP)f~fF8Th+v>L^KgRDyp*~x9xS>6{H(@T`ik^%PaY_8&P%Y$rq|YiY@y{W<*}8g8 z!~Y&fiuNC;xl~*8OFf<6P5@!6oxgitwoKHCoXI{$v!mA_IGT^V$m~?G$ta36Rd7O# z0;O05zsy!FDVExXT^7U6E?^N}f((=Oir?DJsZ6+d(TRvF`BPh5`S0%xJCrZ%^G?Lu z-&u$m3c6wr)o<`;F6kWe6?>T++Rgd*onNwJDQn-N+P_bglqS%&i>L|y3Z#S+Al$k zA7;0zuVbMi*#Q}vLYpp_C(r<5TQd~1$$f!|F)tzTArn?u2bz1&B6`!9V6V^Pj&{hm z?KB}upe%>z;#zOOM$*2H-dKk}rRTKFSwpkAQmd4*{L}kJ_eOcN-BP8_Ov+U^6J_VQ zObkl8VcP&RzFzej|7$}}?3rY1iNV&hj4LD)@7t2UkpX0Z22P*F;gm31c$|aSm$2?? z-ShKSsNH|^x9NOT-=)i`4DWE^@XyE-UF)6}cZ$D#+2~|zkn3kQ4w(ZQ@Qboh=o1)S^_Nnk?oIH%&kTl zyIV0oM!)VQ0lyF4Lqm?e4@R6JDj;y)KZO+;L)>UC`N?2U0fOr^b-S$$_+m^gZw z*egr)(nLoDvg0&)%vfHAFP(Wqg=9_Wh9B?q?8l^TnF_Z0IRl|kh+r==3mQg8*;2B9 z;=zY@`t|1FG|!UZ`UZ=0w&o2rY$&k`K1mpSet~=s%mWWGzWvWmZsm17OG4?cTK-vJ z`L!Dbbrbtw9Aj53xfFEM77z`pwn`gzrZCcbO{*LgI6^8g&H8X4-MP-ixUgQm|MGXp z?$hSS4*3#uu0>rj4J2(7_|bb$i@aa6`!L*S2{fu{x~WQ4HKs75FRO?M2_iU}^iG#>g1!w^_k+1lsq?7y_nn_FF|YAReXYIsPHGKFKQiiROE6&t| zZ&+ETZ5GucE5-oM$(xbgk5eM}bSy4tC^@o$U5*(y;iqZZLK%Hb-r8howJE{nO#bVR zu<+kOj|j`wd!010rL=B4g492;Y2r@(XO_oWRWnQiA`JA5)rL5@^=fn1=1L<93xxYsyq~h@6@~lHsTRY61vgy@F3e%Y*dT$@ZQy!Mndv!_ z)f=KkzbVmBJEBo9$tOqUx}?9DT^{ewJb{t1Xz|a>WBo6i_T*m=L+9iA(gZT)o}cu% z``glVi!b!(uk7brg&PypJS!%|EQH1t%5N<$v;PQyYOoNk_D%|vJ>Q^$35ir8c>YY~ zxX{6{Rp7Oznm-J6)_z131M;YFY-mjXs+jNO3$bnOS;x*mZ%@W_%Vd5A&;C%RMP-gl zPsc%~Jf2Ofy9>-Q`iIm)v8#!!p3NT^OSlFtg)we_2ZygIw0{;&PG=F8cK}O2Uz%8An1&|q_4=Og+sx+tnpSz}77&BFDxPw4(hkWp z*E;6Io=$10lgT_F81hq0>w?erk+y>3_bo8N@Bnbwk(!uWxQCPgDYF8ykt*@gi9^@C z0&PksB5im?mrq~Z{|32ghNO)bAg3i9BQ@{Zkbz?cF*bHjn+ctJNe4SSIyOK zpZX=SM;uX#X#4&`;B4ILXBnhw z(aP~OM&`JB-;rS$_2en)X}e8|6YXPeVSkEx{U%wtlrDxz%UB>jBK6!KiT_<0-* zSD{e;j=3>%<(PAivl6K5*%9T1b$^dfqY~!nw6fEUEeie9>704k9q;u3+_v6KE_1NX_G%;ZLzc=rKrB18AOmw# zQ^qw8-b=Itu$Qsh2aOp6`(JcVed((Yb0w%jMtTlry+2UDsRMN7_B1j)E#N~vu1D|J zyUCmrlC(m<{=3GQui5!dVo+>-Cq1YLV!*;V((x4HGk|Iq(7N8152tv2a0#C^fq$jP zaDm-JVtCniSbdw^%^>|t)$DYKc&j2>lPT09ydGd0mL!S&HiXdvDBv~UOLt%H%gi_N zUkLeug0*Y`=c=Za9xy;_3;bFZx^|Tl{UV0Nw!^Yn>Sh<=YMxN&`L8icVCha36v|>4 zr%ipw-zq%BIyEYTMUK+utj080%B#=tk^D~^=cULOd%KM!T>I8($xrL&JP$re*-~e( z0}`8^xyNB`WS?o51h->8mdm&Zk-J})AP&!X*R`YCo@Jh~Y2VoZaQaTTMXd_(A5i@l z@OcyA0oIY3T#z4p`h1ps4SB|569x*Q8Fqm+o+0vin*t#|8?ZOTdd28%bKnnElF+%9 z%0qN;_Gh09nijst*dwohE<}S<>B2RDG&uI007D1e|86$_aY8dH&hm+;_@T|)yKw@T z!^`u({r~#cjM5by6%cTsQ&j%<&*F>+?E;(?56E^`ZcRqC^HX z%#qY8h+Xy80a>UIWb6+9TEl#2$x8_>3TR3;hDqUo^?VLYUcWUP5h1hT@;A`@z}E;2 z-|HODQ5+0%!Y$_|shw1=wxwYri@KtxQk3dH#&(HQ{Gd~ySHNKo{z4c?n5cWA~Ye;vz?Ec{g8B7aDQx7QYPwa}I zkBliVl=j{@=UcrwVRykIlSgrU0{Bo(3nm0au5>GMchC0?_XDvSpoQvFhCGZ#^#bCJ z&*H+?*+(pXhb=IlGJAt!H8bF7&+fxJk95uw)t@g>^~}Rh zB&u!UdHrva&)J-Oa-y!ws%uety>p!EKr;WE?p1dUp~ZcL!MEgfQ}=0_y|)>1iu`)} zazYz73XeDmr5)>nV{$Zy(9aq5PgCx^v!JW|(RQm?-ZK7mS0e)CWd1CQlfW{IBhc0cIKUX!dEmWct?`$(Kw&C)w!w za%OmAn)kS*cSG0jW^;F2Gcrm^?*!JpjKE#)Yyqr~>@zs7#1Qk#RWJ;4n+NfaP%A;A zuSC9fE?)y^aTVY5*^WZSuev;?k+|@x(Je3Ce#p)8JnE_d`k}>u#L1(Y+Uxde%@otF zl1u>GVOH=vTFBm!y9hm1g1`R4s?m!elVaMYe*z&U`-Ij6ElSJx=*(!=aN{N=B_P`Q3+w^cZ=HX0WGtcu7Rd&x}ci+mrD) ziUOD5ND|VBVzCGk>sKS?_B@^+#>p4sb~qfKN~#(q_M&fOjOyRZ^h?&Y8L0h{91{(i*f()xJ3(W!wOGrpA*T~GzoQ@V{uUA7()Iw!cp|MXl* z`jHjg@Xey1GsV@Zr)P!lEX9kY4<=8h0DOl@)9DRH_MAoXIU!N3hcxUlzrjh_(VOZB^`H+-|S!eMDksm!@4=vrF2oiu&?cW;ti%ojCS$iLoeAeGb) zC!|F)t{qk^k=tc(da`%5IVOLoKwWJ0-~IBT1WhsPikx6>Ul3P3!{?qsWurTtSDeL^ z`j>U&^{!Gj7j0cW07&CnAnMcx?yeNjhsI zx%pqL`>tG>d~m6__RAcX7M~8T2e#IPHWA_Dm#QrGh+SwO`VbN3huio?NbU@SiC%A4 zY6l1za_);1TQc`32W~Z&r>kEGy7HWOsC6cLFw4I1F6QDD1!rl3C=$yfAMEgjlL&?d z2KKAY&}Q?|Q*=`Lm~RNKCS+NdJ|PY8ADrOmtx1Z&(7n~_86Zk>f4RH@6eO=Ft)jOU z*Hl%GO2&vEpx5N))kZL%eVCt3e?|9^CAuGW>bw}0V*H=W;G(qj_IGinDdwtFgHwRa z(a$5|<{3{33%^={$}jTNb!GkA^;G@(nJX<{L&`^A>M&iXwq?J0Z0IT$c|Vk&Z<^8% zI!whz_!S0=2!Hp{diIPSfE{f}GLQU3_m(`OJa%VtQY3%)kk#hd_DfH3$Mm22M#n5s zd9vkl@r4@vv%xn$Nfhf`j$T4E7=)S2gWGIOHK)Csj32+9EVXE>zpp9Kq<$iUr?~h! z<`28tYTy9}9(Cu&^a@A#iWB};AhZ#bYGoB2!&DAtWOY5T0@Uv7en>TM8e}jFH8Zf_65x zf-YN)Z{^kZ^B5eaVgl;W`wmnPW4Cf7a0tI&^Rx^7i4zb5x->w(w;Q>L?!JdRGRB0( z6>2|Ai~W0EnI_|XM^^}w0D`4Cuv~};AgI)}W$^VWoR52J5~YH4(Nn4*w{5|U^`-29 zA>I0<`HPS`8hzz^@tW7xF7>oCKSc81dQ^YfYd6K@al3c+hkwuP#V%y*jq#ir9LQ{U zf^gmK8NdGJpWPfQpGTIO%*;fU>haR?)da0b!sW8^?oU>Xnrer|D@Dpgt2c|BYcsu2 z`asQ9#F#WE*Mzhy|MiA4N_)^A|HVE_^%Ek+TPqgabLVl+Gvn$oR?QG&rI*_0GHL#c z4V~ypOZSxq303AL?|^bK<4V^5Buh z!yIdjaOLd_{(G2y=xQBYLOLdV1zLhy*SfCDMr(-&8qfHnPu5h;^QNqZ(-5lW{am5N zWMAfRg(!F_z7fz;*;~-oOoQ0ii93mZ>|Srw6(-B0uH`3HlaPJztU91x6Eqvl(y$iF zu@1mGP`F6Ar-)acI4WM1cPj;to#unQUg_2*)^?H|^LVx!SXYg`XE|a8CbM`o7S#Ee zu>8Usl95G`A+rr@%B&77Xe$dv8ea)m2>#4&wE=4mp&51?F0jW$!!VJs3)CXF4hWT| zV9b!w+BV+8eSPC2?W1>z>S=MKG3M#OqjJ)$YHi^(y$RX^TebaS9CqFQu<2c2cKLdu zFQQB?f$iRa+9gE&7YdBYMDS;15YINm2_B;75K@o(<7!j(A}^R`rh=jWkW|Q!_)NnwX_Yo*HuQzxN*fxCxZ4@BXih6e&@pliwonoxBpViFVyAB9BNsG>Y%80MGxPG z&u&SO$ygs|e9KCSBkk#LjX zGf>B05H1ZEaEW4nph}-0Uks?vx_K3FAJpuE^JNJCMYy~VOXaxZCTiXrJAkYBbumHD zDrsVqjgv+r`>{D^8ebmmhg z8`YNZc!6*vKmNOY^f%ROKFJ+=M7;md{Ys5)HY!Tx^Iv~H*)w)(WZ#fE6tZ}65_zv< z546Izt+BR5-g3M?bN>dQ6;$ztb2&6BJTL3#hlsc5nje^iaDN%QLJ-AN&N~x4dNn&A z){_FY%$ba+d`}Xr@3dK+SydgV6`jg!iq^9pUl&ynwiGGq%wG!`cTn);R6OW1Qi0J1 zUzD!JMgN!kk#*yNT4?|8FZXtz`NR&qS|)?iar13dnjX7Yt}iq)0!isQvAHtEN)ME< z5U`1jeQXdNnB0q@_&XKyYrTEUMed;3HaCXShe<)6pzx(ZWbOJ{ADw`F>UsM!Jt02l zI>;2It@kU8%rWsh{RM6<$VoT?>){P%#1tax&n&$o&h$>yYjYyyjtiFtM~8nIUXc9j zSDl&whqRUw=NR39MO)F5iw|Bo@kLD|%|rFZ>BGyLSW0JYI^`W5`;l4F79Xho*Gw{i$tng#+ zQc|D4k!{agar^EOqs#}YFpTQDTJ`u2jzb>415+h~UIR?$;k(<~D|wg%?A@GdX~M?J zCz7s^Y3IQFr?5L{q44=4>&xlMf?m1AogDSsXm4O0Iy{H1F%YD1$KL*1p+^l9Zgu@}$pA((-+T5`ou-J;1ohHP z>9_dneP9{(fbEcWpPTC+S&Rk56?a<2coTm6T>xokK*KXy=Iq0tpCi|%HO!rb;Xlv5 zg1WRpoj>Y3F0sXguksMDN?hx)p67quMbM?U`Rd_CN@jVsDbIsr--~3Z_Pr19vZOO%gr)t6LJfXPU;bb^C5ttU=O9ej7v??U5i0n zN33PE9kL-WZ(T1~R3A=U;v_t3lCCLGb_^|F<+9Dx|3pGfMmJ`+DnG}*=Fk&D>oV)w z)ZZPZ0?_3a?&8RG*CB0fD<}%5(H6P=3PAs&sM5JZpV+QgeAv72$9{|iYNpy6yR1K2 z@DdQ)YsOT^_4w+qWBK|9zgoFJWjUgXHWqb_Ib`SI%iFc`b1cq8hNHe^DZNJ``$tXG z(6ZE{bW5M^n(&99T*(esxjSF`rhWBl(fi7)O&s4=&wy$QvIa3aXeM z_y^2XY^q_>4|^KtcpK^cdf$5R@!z@qDR<;++R1>C@7VACRyX7|-!CG^W>)FBv} zr&;J2tp9r--YcoX{igA?O@1xc((zE-4-l?V5o~AXef!696+v*wNpeG=J|Yc(GZKt@ zKcjkPjIFaYOsiOYe+#to*gvY+?*oKjg8r!v{s&PWkG$j*b<=Yc%f{Vk|LD@q=+IR| z){CNk;gT~Z!lI_W&&YJ><07Ja=@UdYTbHH(Rb|01@TwJcm|6FYmciOe59-Y*_JWn| zoxs0(@@1FTvu>85CDo9r-`w`UU#W$Jr3NS`_t%Pqdd(ylO_ws}fc`F&`2yD&pmHy!!WVusufMo&)LS%GU-1>*!^LYxy#5_7posBz7CU7n7{=3*!+z`RWHEJmSpbsN)@&r;m2I02)>fFTm&ZeJ28lpD5yxBfjj6FzY(@ z_x$W9Fzol}l-7JNpI&nB&7kq9Iwf4cjB>bGeo4CtC1X!N>R!XXcisY}p* z=a)n>`t|)F04a#{gk(D(9rjNTvfZ^+$cJOP&nG7uIBSxo*2!e&H7G|T-vDEB6z0?< zUSWfx26i{p?fi@6w4vr9$q_)QLpRff%oV|ksNvGK8B$T`UuKzv@Fk9iZ4w&C6sio& zbV%w8dk7})kR0JR^rQO)LbMIGI@T6lN^4XqN~?j020dn50bN;A)nbDBRnPk#A+_=N zz3;Bjw55lsJxL7NjH<2GLe13tre8>ZJpNujY4QMK?Lg_QaJO&VUdBttdV8cl4iS@*pC@~&k=7*o)6d9eQT5B;{qu8RBC|t|n zngwhJgI%=w|LMX`HR>+Uj7O~x#}f2UON#HKjn~eK6ir@T;L6*zLZ%y>IvFJc`>!n5 z-?9^yK(}AN%b{TM99?65o`ZZbA$ApITzjSY^S%>xmI@B4{SwFh83m=-3#b2ixVRgC zYG-%3OlQckH(=rTUCwGN>14>U;;E_WgI_@Rqgllg1Qv~N+|C+*m>Ej->mg!*b(8e5 z=IDkAx-&f`G)pPrhO~;Xn}NlZr(dG(VJ#_wq^T=@^y8I~UPorhr@_v)OAJNB;i002 zEXjMfMQN4K)hg`yn28%Qv$tpRiMS9Cz9H>~x3+wDlYsEGq>|5Wc_M76TR4;nBLk4E zW+ByDR;@7jj|^9Ub@8eAOigF~cD~EL_)C`I$ZEJrfXE9T|KI!k-}&JhRf^odbZ(gD zdVu7J_4o`Qid#9k+QaX4rdc0lTQ(w{L%v%o0SjI4Oc(d&ZWxA4?O4WBNX1j% zeDm&MuDyc(=kPQl#fOWq(hQBwU{`to%svagJ(fXNM4G=|aS-D>v#|eLXO+AAr|j!n z`GvuZW7c)N9}QML>f#FKV@>YQCD&$by9bGL=zzP!$OA|%4qaI}y?G=KIQK+;zom3O zn)yLfzv#oP6BIdSaI8z&MUr$j=a&**ZMIgm>Ev*Wq%f%LX53Bhm!2|S+$y=m{zr(ob!7SqG_7`5og*wD@2;7){1ND% zd&!U9=w2wvrr=EWewN(q7`4XxMWF4@7mI!8o3f*t&c|q{@H*i_=b}a|0lzYw*zS|< z#s{>gn-||b*>7#)K()a^^O;9IPx_|EVNr|Ksl}uPlVo>jPt@3bKl+9Lj<$zzP)e8*In?E4`g!7Ts zH&Y4u;3k8O@Q$nMoh;pr2zs@EL$Dw#`@+NU__ZZA*S2BJ_L~coBv90Di$X5Tm*_6$ z3&UG~l$)fG=K_K!i8hl2H6UQfLYdTTi2f&^(Z9i4==oEZ2>cuQe5z-43GQzAUZAG~re7dxYRM&K|8`g1oJ<%*1akt)~Cw-cG4$Ymgwy0%8#1}wnv*0cW z=vgyY10+J`%s>b}y$LCR5M2=@31maY#vRPQ-`e&Bi&HwZ>l;);0Oo>sO7HY{!#e%PY_#rTzt-;}K$RF(xflb9#`1J{c1J{eA)JuVWAIF5gzGcGga z115Q>i|T%1jiT&4z&8(mbSz-`QZVX;m>l+eW;29Qbl=ORq)W2BXaB36V556ba3x=-JBiX&MNXNz?vw8N1ij>g%_*?hNg@2Ry7Ui_&j^Y3=IcqK5v%uSOiU!ruHyW@J(K{S)8PWTO zZ~eviImKeR)PuUYj?%x2A9@V6zrrB>0kyDc>jpgNEW`iid4f|-+aWs)HGZXgQ9H0` z!#p05+tFd_%{6P|)a7IC3=&ZZq7U~0FkkNp$y77R!eHMhF0D-N4kdaQv zNWghSNviiOSml&=fW0*3R?m8Is>qFW{DWx;IZj`=YS_fuw_vIZHoB7rFC*+Q&;X7$ zIpk8ZAcmEE5z86~U%zc`#oWs>i?h)|IUL@9Ue%Zv6_{LU1+v!p_2OPa8*aPX=PyvN z>jAKZiY$C+|MHNyAiT>t(5C;b>^pW*PgpQ;ivYV0O$0t$n4+M3v7F)X-E)_}^6;oe z^z3&G7QI}5|7BNK|ZF?aFic2zwnxJfynEMQPZL8V5$9Mp@ zrRItg(0qWg)O@q1yCAAfrDok*=^{$XJ$Pm;D8{H_=0+SR{Gs=|w}pQf4SAWH3N&V6 z&#=%1ZndJu(KSe_Ga4{1-hh%QW-LnzcxJi_R;MmXI)@ryzX2hj%+ERfMR+Y24yz-UL| z%)%u*l^dxc2y>&?v-|{Ev#&t=a9d`?*p;tm_>{YKPCzpTUzjZG(w+5+qdYTKt9C2k z-opDF*CC%GyFng0#9xEy50?%DLL}Tg2YmjUUfD3BgP1GdX3!&=a5~A!+q$@n0R7nFAeu z>p;}~tq9d1gr=6kAQY3iG$dHcNH9Tx2wMaZc(j66&KV`Lb*R)(bARCFU+BZ>l0Dp6 zIQk}w6e3N7jkDBj)a<5q(&{FIq!;T&945Byr0-wYa}Ng|t|9e52?o#H2usA&4#Rgc zy_tL>J`I6SvG~72ixWXx+QU2=5-@7Fx`?IU#G_6WH1V2--DJ zrZFo*8X$Ct=rD@(Rd_G_ADa`%`2hoO3#{Iyha~S8VzC~91aRHgL04OsyrT@TKg6Cu zf*f%-03CD%s&`pISG}(wKr}2MY&Y^ZZ%Ow{9 z`Jnr?@a}3$|1_4-2yD9p)uis^me8aC^&wD!+`3ec6z9O;5Q_~J>ng-PIiQ|+f&%gY zyi<(tN&CNjAdiT>Mx6p+A=r838=~WwDECK50RIMDJI_-T=?dqiv>sfgJfrVZg$>?!s&@QMMVt zGC~b|s&PMpe3Cdy)}R?i8GrL+3gT*t1y43L9G)X4Apr8U{>2#x`3S$e$Z)#bZDvD& zw-I(cxhQfI-04XH=`7gfP%nFKuddd=`YLp?Z+#`4p6GPZK79ep3-4jC3(3Oo zbF_}t-#ZI?88S^@hbJrb_dL>^1Tq9<4^O{66@V3v0p`!w`fWC$a80%YxxBykVk|IY z6ba{$K1s=Vd$sT&BK)%K!_}*Ygr2;0Ci2(0;DJ-h#u>42$kpl;F?U&;%dk|Zf_As! z3(d@T>2*Q++zMmbs2UwFw%g-a7I3;fS3iTdeLx9M$HzZgtTmu_BZe5=Iq6Fw zXm`IRaA7@k@>~+113oPuUIc%KE2Coo@wmU5w= zuc)35v+e}I@(6~eWyf>@NKnADy_~4&tLKq$G#v2(nIV4_0TR2Brb~_AJtJc3sc5$| zj>*w zW6>`eyH~5S?HI|-aKTXA4HIJokyaA6>1Fj5qWT!ZkIM&lAt-@hpcX<*FcUNYj?NL6 zDhKVJM0p6;H9f324ZzDK3YQyQ+6x|hpP`O!KE`7OW~_;RydpJs{(WTSNEn3HK-yeH zdZ{9`AcTmk1=BpX-IMHp!27bOLGVt%S3oV}gsYGF0agE(B?yuOvT+71y5j$-rjUPT z+NI=#KS!fTH*QkrvIR5W0)QoEv_=C+8+JrI2Z(2=1lBRWkk3=Mt%e>CG%s^=XNUS>^f;^KcAA&^L@_x#~3yY%FmiqoOZn|Q$#s}gM zRfJa19P^>d63PeCzZ)4w2Ol8HkKs`#xMn=CKz}g@J&!($1hz;}z%u0m9G%yFz6>S? zGhhD%A(*G6PswD<{#NP7Zc$=M7qI?G+6_Xw@4n%@i2A?|RfrA|29+lUA(e?2ThJ{! zC>AEi3?!UiomU*+ID~`w$`arMkc2JLP5%xiXFTx`3L4A6;6b=%1RNOmS zN+1%i_z7V|cMTWN5Ee8>nZ5-@@}`T$x<&Tx)Bnu^EnWgA-3P!)@16hR0AhXV7|=%D zhJ}Yc#iU>rEmNyNcVXJ}~Sgcy$0|ze(jsEU72LiHBrVSpRSobLM~@O)8%6>Gr}N9TfqazVz=#>?)(K?1lxMaIT$J z)UV=XI3G&WeU#PCEmKZ0vtr2GQFm~BLqluBNv$q1`=5gDssJk?9t@{xY9@wDO5pT% zH!`(=2+J+ReZpkR%Si<{;wljhBg(^W5Q$P5*@pmj>|5LGbYiBM#u?FC=HR7F(RUmO zf3@7h4?MgO`opID;`9>i%z%^1C6?Ve1Wj3;3L)olm#wg?njJdCpJHt8X zGq?QU!&7s1ZnEAoTD?=DR&$Jt7eBjv|9+=5rr-t%_2w0NwQ_O@HuGcj9ZG z#qZDxL5AYW$dIoUGL#G!iOZ}ok;KREXr5P=0t2!(g6UNuO_ubZ^UzUIZzI_l^OLg+ zXM&s0Nd_DregFNmk}JUXXP|_d=gz}CkDzD6OHY(q`JeQ0y?n>`s<(9veKBpww7bHZ zDnpQ3`Es+U!BkN%N%rj?EkkhW?Q%V48C$)RAG8lf$cmm^xin<{@=dCikt9NJJ*@U@HyMXkwrI=lB?Z?1ZFFEQ?%k)Xj z+|t-=U;0g0-3BqH89Z`4*j$bXHX!#?-L{gOIG-`T5oXJw^<+55-R7$2k`4VNqjc(jo?>kMrCbJ_=)GC1%Ob+ zBRemfNqELEP0J(GrD7WzaoUfZGMUekYt|WJW*olVt9Cu@``%NccA!MnTKRY5D@yb0 zG{bLtuKH&m9&WS?jD+BaLz+(i8#T+XU>8uI#1yPhKSz{<&Fo4|g~IA?YUW$G{khp+ zi383hBY#eCiTx4e|DHiVDaY5@;l$_it)JcrZ~A53@rzwnpZKKT#qrcb)=|^#o8iN3 zaP5j0#`}LFhRre=K5;TC2}pQ=Z!`=olBBKa-0Hb=V};K7xTn{D#`HJsk5!pql8c{z z9z5unH{a?)ucSG%nd^-sZJiG7!xcR!P~t z>^?xA`M3zF+$#dG1yQ1N`4`Gl@^Tr0^?fX4h}(8i*ojyv#mXRxYr^ zx{YdP^rx%LWmQW+nX@w@=q}lAV*rSf4jj>HFxOg1GauuWy3JV3s-J=N?EECSK^!2~Y<-tp}?lM6{U`UM?cbLOnh_cv{oxtb_`!PTs@|2 zY5CDTb-&((TuP9RjC-DsEqf zHZY{^w9Ip_)FigGC40;a1l;EJ;ad5$R)8m?M#)Got@&8mAKRLl;l`r}Ba!l}W#Z&hlaN_amy!0rho-3y4;R6xk*ARG#MXZ!X& zzPm|_OG7bcU0vK2KfNgb9{eHI!_UVWC}yf;nE9QMb3$EGt&9|fq(QUR1M~LdTE6CgFOArboQ^k=JoOwcT^FJMTsb!aE{=&G z_euQ5X2R5>ivNCiGC^}wpiJggn7RmqjmoN0eR-gE-%U@6GrmR(lQI;yy{il>a+aZRvdKLGZFD@sgqx-A@?9v^SLUgvDJZv`30JqZ#tcO+3;7tx(2@3gK`XZ zZ*KKlBL5))Db^JxzTfb-!rbUCH8jx`k5~E@HL(3{Uc0Y3t_NWNE_~u6^ML2t;SohC z$CYC~bJ^p+leOZ`ZYz8**(eeqJAF3~HO2d}@*VrHMI!^}q?s7bE2uisHN2gu3Qb2; zS!#Xa@=0>ly?4{PqkCRF*K5`1KiY)c$w^8cvWN`E{2T=0YJh{Dd|4M3C3 zB4@lcJZRFLNh%G~kl9scWmPIU{jkQ>&VcG)7K1e{Rxu6cUGIl~ly=$)jS5{6y$DMKSRB#FZ-$^P3BAKY7CjL*Mrt}-vgUGe!g*`n72dC4^7vVngj<>l-E zqy@Oc+C|Lw$}*FD69-@A2?OsT9%G-FB{T9+S12Ktd;0=-)r_~fgJ@M_eo^AyL3D^|A&%DG%5$Hbeg32(%-g%E}? zFVRG}X`5CIBoE&U2&Co1?2wlO^^d#IkT)T7S0|YSuJM&YX|9Yy+(rVb(76LV@5>!Q zPfjJpxA)=SIVC*-5p{p}FXer_p2_)B{11TOkeYr2fhlC{KPbT_?lB0CYcnQL`(t5% z+#e0ec|5~UhQF@xw01(LQ|8)Nvs)ygb|)(!sS))%ofFXu#zK|y%F^O3X97-*+NXG{ ztqvv!I2u;HK(u#Wz)W%fUY^Y3;^FOw8Peu5TDZe8mGy8xp@x4Q?(@%(_AZLjBM5xQdIp zi&F==klHgHK_G3r#>+LF2$SKS_vVG0ok0Jg%TbbQ;OQ4Z71LfQoh`4mx4bL61|suP z13|JUMCB-9W^zH_@eGda{<}v{;n?w_bk9ou8#+{w~*REc%~G zQ$OUW%jkoaxr`Qf6F zsPuo#)+d_^^kJ3_qaG8>@ZmcpO_)W|u0DK`!crrS6WD%BJ&cVV#wyNCa&%JGu1;0k zdb*7JVBej)HhxLu3mw~F+mz@DYEuF?B#-He8`;Xm1qRjU-Nk{k=hS!fTXN^8Hd>(! za>I?n&b}M`zHi>b2=~;4{QOSZWb+;?x&D7*Uk?Lj{~#_})Wi%9oL3r%<8sgeSvr_O zYC+bDeubPx?wad)_~YyBZ#&EfWXhgXOTGhDTVPGP2iqJy4$-C;w+TrHH`H7|iiYNr z*Qa#duq4;mO=X$!g3u|@IICOP`t-a`{hhU`qjI}mwJzlM@rYLga0XCN!-bwL1gn1s zQ{eZ5^^39!svhB)$)~tsvQ+p2p~&XS%A-7_DM)MDzFT);A(SDxJm3L7}TX&uV&knuta{>#eJSc~=dqWDi) zle^dx5g78A7deb>%4OclqGOKwu_^1r3AxKms)}+uxF=cFZ%--)%3XWpUYsBgZi4S1 zaQ8aFN9_>y<-4%_F+Pqu>kEGg!sHnq5o*EhAue`+84yobPtxd>F%sYAO^#jbM4Qe@ zPKV#s$=pj}l!!LCXYnRelNUL$GK03VgK6@;l5SbED^+sg68eq1=N{9VbZuWPnJ*RL z=VA+Ml#B^y^lt!szJ?heLjuylf`0IOJ``OkL*5;rqQ@r z_yNX(tkbNy?03CjzwxOt`U$>juAV(*|FRHU?K#ZChqJ#9#rw)M*WGs5R@wT}k^65< zL}MbxMNw%Y7#uIetSNu*o83#+x=y(vD)bZ=k+23$``B$b+phEH{?zgS>zpaTYwzoR zG^TU>cRn6DzAwkYCX*jlXQ9&+UZ)3V1oCt&1r7tPBTe9`e4m9KYG-y~i(g5hKfUuM zhg4+UJGbu@*f1{4M%-_|h?I}YxDDIP6ywD~RlcszUiu&?Sa-%u8>?_DUHs8N$URT9c!3xXN zBc)#4%6k%77x8}~rC|-%gI!LyH&q85EIL}19xuay2~f&cT)-Y0srO~HSaH5ey~>8g zh*>CQV^&tEd+Cj4{xt@;X`$iEp)Z>x*X}){%Wx5o7y2~j@yk() zYmS*|pM|9Cm*Q1+0ROF+N!9uPy+yiU=PD zTnyhU*|dHR8i6~5zlA2Xwrbw$X2w>0DJ(n?tN!|M7PdN%N=V4lBO zZNDRLENBaH;>*umdzI?w9IlvV(c=(OJh^HI)uS}SUZdlXq~b+=b-Z}*Y9HCMfB*ph z&hD{*XGDLxNpxwKFbAC?p!m~iAiX&gS&B-pUM)w&qWrP!TjFnN>wMmaB!|9xzfHcY zJQ4!u7x@jpE4;0$R6|EbRi260vN&&fVuMGM_Q2C=GA-dKb5=5Ew`92R#8bCWClX`Q z(TG&W&8(@hnpvMxGS3!38 zoqV^dv}4PF&uyRhM*1NKf)55_>e9G;aqZOV zRy}{87ay7Z7zGu`rRh!xw3~(FRyN4zE?_ZwpYNa4mY(qkmp~#Rvy4nb1PzwIAAajB z(h5w;D`Gs#+tcc*Y1)SF%}D-`9f519;IX&Q(kpT(UqWuwH5-RQIpPA;H0BfzgTz|9 zy+S|KPeU~|2O36>m@ht+ch)kHAyy+Vb~}IcH?vJ<)gZ&!PH!tNo@>h3y=ulEa^7y? zkCb1p$zFaSd!evYojwBlZs+Itl@H7?~`-UM`^I zqDDeSbtxvRgy{o{Hn#v0(Hur!hx%1ra^ZAf?%YMO*2{Lhb}p1-jjD6_VmLUXs8!ZO?qhU z`%Bt=AK~=F7gN9Uvus%4#NEWaEc8c-DihTR?zEfWl_qp8a$2E5C6!w8qxbLWula5d zPYe(+4(A*#jNT`)69YC*A($E$Vw8V)Zgs*XUyE~fabg4dRWtl(PZ|)f5Gs^#^p^Q1 z=hW(qKA$oZhaUOpLTChbniCMOlH)@49$38AIQcCqK;%~DCY(MzXP|^R^5&V-8O>i*TmZ_D6V^Dx5s1JLn40*7b3q5 z@>zT{IqAxIl(;AuP+f$IjM;5lvEGje9&qk$gL<$|DS=mehM>PB6f`p&6hFx{m1fP1 zwCF3Q?Bh6#4Tq=M_CSmlIiIm89?5jCJcD~%M0SSL6QJfW83LNOq_h=hxVUT!)5p)q9nB~Ny4+IVZ8R8xj=M!^hzmGV1|z|DV8Kd$ut$WGR1 zE@+kYEAPQ5VT+7hVU<*kMcsIABdIq1>_O#EqriWua^J8fuo-1CkV;M>`WP~CysW#( zKmE{jW1ivABYy=YjE}A{n;ej${O!Fg&WXg1UMF0YDKmk$l7aG)%jfxOBI66`Us22X z$i~T0GCj%ZOo~J*9&w{%UWu&kp<(O*QaJQ`A`$NPb&a0HK8H5r$)z=Wj3&uJ0a7j^ZFbxZl~uU~ zIpS(!ID&1Br^OgtcbflTc4m3y$P7eyj-y5;Jbb1WoiB7lHvgqzYuq)g<8=H-XZpTi z;3AmX#g2@z@Diu_WQ_V+$gSsx8_dd&v)7n6)QgVYhjV|iBqn9d>t?OpTN2i9o7-iS zNTJ3EiNzzE%sT!Qd*TXdaZil@g#&W1I!cddJWEIVoz(5>DP(MoPUcX;yt(WG)RPY3 zm$ropIJ9pkYG_6nqBKx~*d)WrjH`-1LuR_IxKTa6S?%zlPq!@nS#CUB*~bb=KHaoF z#hqgGsEdCBg^i3Dvc#eHbbGA1k*(l+i6ot+tNjWMJ7`ayB_(4VwDr@(aYuYs5WeD` zY)xer+BH*^DD(6*H)R{dow)5Q+FWfVb>^NG_Q#5##(>V8Z--IO0$$f&G!>u>T$!4Z zYY6>}@TsSS5Yw~WXtcm%S*m!gcO$9Jr;}AVjn03&K`-Ox@vw%vy?(DWWR2%fy%KEo zBw5UO!GrH2mVMBv%;LQ$ucTTR7mzgFt7pHNksZJY{AQ0TX8~rX&h1GG@^@^o=|^vP z5khu_b{jb4ofg3cWfU%g7FT6#(9qdkv}mn(bDv$--0WHzF{LFlz*c-sXf>$ID#Z@)DtwmPJ*pD956)h}wA)WnQtN*p zdn}!n20zwF4};g5q~Y&A1M%Pbub*Ef6H!|c8_Qp!Sh<~v+<)xt zJCZ>!`n&Y zI&Q6Y&;uJ(JH(WKPI3uTnF)FJgBO1hyVJ0F;rra)OO_P^jJ38KsVx44VafQwM!zk; z?d^$&S=LEAN~IZ)kuISG5i0x0+}6z^dWEdk1SfD%{UtBozhol6`@WV9ao;-rCVg)c zWh2evM2rwgP`Q674TEmx7~}VcL&n|J@vZc{8SLAIu@Qc!Cv?$11flZ=g4%xuL+&xO z05qLb5{|MIg0TaQzxfu7cD6%v`HipKZNk}jn3etYVV?36E^*8bx}=n!{X>CvWYjBc zHRI2Rf~L?fmvA%VGyzAT>m5-e=5X{%iX8DK&_41|Q;3lDNr8N1{SGaWD-knk4XUm! zvrLY38bDU(mf3#92s8)dChUJwWS-u0K-bdtVw9Ea+G-JtQT4U2xfBSwE+#_Ly>b9q z;E@O9Y;in1f#-xlDTK$gJC?@yTlJ2tgJJ{)4_$2Ra5IsDVpf9ge5}2FgbdNV=*WAF zWTb$ML(y;axUTtj@5^XMwe{HoYAR+eAI`$LBt=#y-suI^4u&*`Fkydm7>|(lHQ(iQ z*R27{FsWHp0C)_WzGrFn>b8(ny4#sOGpm})$Fu2^#{kJT9=ZYP^!~EO6_w)S;Ev8K zHR6-eNpG+ugumlVNq=?zI31tRyLZ{*Eq8UP=z(Mpx9=7AIwNn_@Qny4R)u140H5?< zcs8D@$S@=p_JlqvFTQ_5Oz#t3M%9M7F>x8LRGQQNi6z(E!?qdHMW|DBpm_1tAcNXt zl?&H#IN;ikX@N#NF-|Eiwz0rT5{%#xYy9kRU(fafPn?Y2zG+YCyoU7Sw5v~ok_Q5W zWc^OgR6^bl%(z1aJ$oG{&{`P!J^$UsMoEg4#(#k!X>6u@yN`dr5Pa^XLI5gqHncH8 zYrz?8$Q@l`(7%v}n#rn1JN^l|Q`eVqLu>UJ_`v_lR6yiDdIavS5%Q`7H4K9HgQ-05 zc`jDhEfPaj6_C!1+v{zTEIiwVB5%k;WN&;kn~(So!3XyV+}*n(OYV8I*f;;~iZ@L> z9Dbw{!HywhhwOjIb7y1GxR;g?V8|w;U)~C9TsDYX4Cf!nSOSN(IOvMrg|4e}h&(7q zK`w*LEdX!JzQamGzBvX7Ez;efLS?~5k{7iv0!7Gc|GCGeZCQ9$N80Zu``S_RL-*gL zvXgSpac#dMc=O3KUA|L@y~3k8xS(qrpD;-B9A%U;z5ai~WJA{>e%jOeP&a4(N(SKI zJyV_7>p3?;TT(9|Ew|+>ma_i2`pK!~b0zPm>PMZJ&{!=XT#OA&S; zLOgK+g! z0csWbBKVIYG7_-@n(1bk$|DV!uEkEcG=ekjclv)?cm79yjVTAX8b00_Y_!$nAtUZ2 zM*x{?uU!8$VDF)E1LVz zfcDDEHf^@CRr!;eO;+5RC1yaNfl{?pD8hbx5Rl|Sz3;haI27O00fM>5HO`kjUV+ZH zdc}Wo&U2|G0N54!9CSL!b7ebY0L}#{K08F5weiNlgM4jDi2^FtXI;s6wKU5m!YjNQP(TThNcf>h8_xxm?>I3Kv!sqHlx9FqB1>^+YYb5Q~!Sy z2{FW_&fAvn@f^W?E#$|PIi-JpJ#pJ{p@wNU6|3%_gC(G$8!Gcp-w)9K(Nn?%KBm-OWb>bQ2=W(`|TGBOA*I%Eh_yx~>#)C1hxF z%_Z$$HI+!u?^m4B7K`LNApI1!i&#JOQP%PC=Vw)(Wtz#7v{aAI;p=wt=NkX!T<!*O% zzy!M|*Ln9{7$=zpsFlduCjw$^lfab@8KG##yEq62JheSVFn!-&s$;D?%)7fnl6eqp zK-GM9*^Ptrf|rggIbVN-@{Kyb`f8L@vyil4yl)-o{)P&qfBT8q6Pp362ueq|f`CUs z)}w={2hYp~FWPp{T@t5i1^1Oe7ykn7YwKWHX>{s?hcOwBJwR^hwAy2oD4eNm#bxm? zrnL-qY*zaiYLi=!)yR*?nA3;-jDVtrm~;;6Ty4WZu3>;q%6hW_vrq-LZJeGO;&^6qDPdf_I@11cthhLUlQ6lpzH6IuNb9R zwC7F=8q@^|#3l2N@P^zO`Sa#$Qu(oc3#mBMRqwn$;Z`)ubSw~%Bc9S|_>-zF)pBtO zz2$8BF+KzB^^AXrt7~{JloAOND*)y4(pUPPE!Y=C6eZ{;q03}8478pfbPCRz>zs?Z zatE0-V_SHHBDcFM7-Xf7-Q<~i)1p$oizOM85Lonuf{D~fh6+)~;s(G*yeASJq+A-E zgd86~ISgdEzBG*xoPiPRPbnx(?u=-eLOXSuU`nUmStxfRLt=fn zXE=!8w4{J=d6{B(q87^-W^u9+!!qNVnL+BhMAAD$!xUt@`}8J&e|4;7X|K?W1sJjy zH0$aJ648`}fHu?19pE9&Vaoh@Xg4lu&8U<$r;2}$mu1S6a8IChdL$9OHlP?F>gakq2$~9(Bswx6UN@pRVj~0kha_Chq^|*>WVjOHNy1d zsl_mv{d$gf*I`!qN_7mH2U^GpIKLII`=>MDTG0_Z!imje0h$)LQg+)V|1F$@p+{D%CwMVV*t&|LZD-7{AM+p$ZSW6tpvNAuCE z8@GgCbe(Hk`m}%_H$-;VKnM~h+Z(ZQJPm(VU>LnIil{P#hpA~)v=P*oAh5a*jvl!! zXl@oLekFGMikbX>Btk&rj2G#-=RNKNPLY_P(YmG)|xTudf?7+wCSBw{P?8DlFFc?NFjf5 z|FL!g6_HYezBebae5TMV%3t+>o?qCc;7T`XZS-LxxUF{h9)A1^@NZ8&qGacpD3_Pb zA$M2iud@epJO75qgj^{#LMwZro19@8yOMN|DW51Fa`pGXVV>jz=K0Wln3(K>+vaDh zc;i+1-;aWt9ux$(Z%(4b+P8jp$w+^RN$&6N_%Ii7f?HuN9|~|7`Vj7lrZ3sS}->V9@uhl{b78Kq0)HK_--&Og?ON+u2R+}L;7En zw=(i%_&0Pk@7y^fMOZ=-v8aDz%?0$1YVgb>{}Eij#j``nH7Cg&g-k$m9cDlxlgPxq zAB7QQdJY}XD{q@?tkr@Q&ubQ*k9)$k97~nka&Mf&GD`9Rw-v>QJBIt@O|aoEH`=)% zrGr;uTZSAJbuApAChUfew2{!LeZTD3ND-Bsh6y&9k z(J0c%i#?D*-otWm0Kl8Kzgp%=`GglyQ-{i0A`G6bvu}$D>(_i zrWWl%AIInxML0r=@y&m7TgWleZqSP}B;(o}bD}-&!^{;kJhc<*wD)#~qCQlgb8(_= zaLw2E(t-I|hDr++-(#FJMp^(+0(MRJD~7Js0#JOx4RP>mb;dnbvGiP-EzqL|yF&qi zy6-wfaK1oII6R(p=a%mcZp+TGKtbow8+a&&jE>2+!j!5v?**$+f25F2RP$VP^e%;6V#YQC)2g(|lW@gaIhJcla+CkNnrjT&9M z&O;Cc;3FC)JUNrRTW9Da-hN!KA&6d-$6JmFWF$jX z;J;dT*}#SZb{2o!_&mr5G)41L2bm<7_crVsNUAiiMF4kGv827yF5bf1(`8xQp-%s9 z5vhG?`^_6zcQ1#p+TLzymWF1qJc%3aQmuF?!wrJXs7x!aOoVMhrLj7<(G47T;T_F5 zOEe}p8T=S23t5H`#iM`dN_2G(Cs~H?c#&#NOt)9JN1=aol^IAVIBU~do4&=gTeE{4 zF51oc=8CZ9L2K=w2cv$>nG{2V7NKPW`{qLfb8z>)sy@~ca!ynpCq&B+s`gy)SnKW2 zV(1&*I|nld;pQT?b-yw9V$j&z-qfDj3#t=;>idg$K+B>kvHn#^Y2Xpkhgf#hrU7YQ!`o$-h3DMk=)F^i(Qo zsu?|0Nc8n7zSWra93fBVV@vumvETt?(d9Q?pR+%-DiD{hOKGeglH> z$=oFq6!j^1*sq>~zYUTKa?%k;WbkjFjr|#NALD;U8mqN(DDu#QCz!*2Fnq}Sd45VU z>K^^wuM;qqc4JA00EN!K4~2ok823m$;6aMijsIv_Ab{Bjq&|q8e>4O_0*tQ)36WR5 zHqr2--EVUZRlnI)toGBIs6|X*NKw$c3wBy<9YW!a5F(gmVUp}2*O~Z`?SsEO7YVY( zw@ZJp*|H~pBI=zx6h|pf4M_uMfM`|wp!{+LmNOqSs^C0!0F*!0 zK9n-Rjz%Pdf7uO(B_FC0JZAliZr6#2zVek>wa)>ueh(1+n&1nJl`G8`hI}os8G3*9 zkZ%atO4&dFk4v;nEGm`@uU)=2zkI))!p?zZt%lwS{QWL!njBdiw8}{I8Wy0$y&iYu zLj@KFX+Wj%6*)t_9S3Yq!uXNx6mm{+ewQ|8Ks!4jb&TQb$wY<`;h z^se(!hp^52=HO%W3}e=j5GsF(VLJ~WxxA93)!*quzYW&pMEBx$6HNPl1Kg_!ir=d% zf^WgM@2P$~ee@OKA#>78QG&=hCU85MS8QRi9NKW_2m|~Dq!CXSfho1C zK1XB=j7{)vsd!|kENCQVT#4BF8McntvbslLrFLO4TeRdk%+T?OZr0NM#UOO zd!Ih{BQpZ!0pTAzwS40Vx|;WOSU+-NDP~fTk__1^e7zJa4-C-tRMfI9FAzOoJ@f~! z9E6gYGzsYdR&}oItGRzgmJ7HG{GCvHu}?rV0IsuSLWQ1p&OY4O{%<$C;Xu>q7uA3DhjIyk3-~vHoa$0Z zXiM-*@w$C~$&vsN_`w(6i1GjrI~ig>R0BXae}DS(W8O#ZV9eYmV!Hem|C7!S;CJ}5 z8k8&ytx||U`m=t2=5o zj}pO_-iWqO9}r7_So42?>kt5Vk=IDXvFyhe0Yoa1vKvHEq zk?xj)Cg08wv^oh~a&0%IcM1Vo{Q7(|v@oZ8P9>swLLr|U^|{pSv^3@T+!J|x?8url z*&XH2sZM{cRQ|ot$0H$)BRLpA5msZW`3jgLoI$WSuhgMS=++kS%R!+32+9ix*&9lm zwxWLuZ=?H0%5-RG+i_q0>T9C6XI-r#l`(>ino$CFTD zdos~^poa++TGCRS5?p8?x;f}6=jj%jl}muyEaiW;K>lzF=2qUMyxy*aI9+6#{xEfrC-aQ)Q6~O z-fr48vk9w|6oFmKQarJ0jM6fPI^6HP!N9UO=Yl!B+4)%x*0xaAF`k!UWD{SC^Kv`lNrjj`>K z*;^4|S82c{Z=)7l|a1~5phVka(tJUr0LAL-bG@UaL?IJ{4Y(v$*8jYtR`;!kj# z)P%{hl;J4v@$x~wiva=E<^JJzLPzQ|Z$W>iDsPP3A`!SK(w+H$K^PyQmY3O1oAMx2 z9&Bj<>aKKjUE{=)kuf4)v^pg=?m%IlyupvgjTqZ=)f_6m_t%#Y)ug9nZ>T-sPb z*)rT!tH-SgeV2ECmi@hny7vxz6djtDW;=rbUjfQKU*~Rkl~7*{`3stvZSziUwJ?9) zUMt`OsPH2@cvgC2sX=cdd7v-ndv~o?8LSKkZ|?Qt?Prh*I8aM0<-xwV9^6oB^z8&Z z2<-u%Qk~PltGOof)yJNn+wI0Lmzc0#GI+r9~5X|nAW{+JZFX5 zp@LFX^&8~Qtd_ePL=w;7rs;}lwj6&)y?x8Z9!u9F#`g}m{48qjvpTzbzyi; z>V`T!a+QT{;k=~{6qDLxB@V=u@fvGPloIe~fYRk^50I#!7aLTG;G*x+bsXs>#Qpu7 z3H7`Crkc^qkhcBYiT-UrhGTlXtMapMv+bqbii30qvz_R*$LRx#BH%3~^U8nyoM828 zAIJewLFWTw663UihyGoV+p9QZl#v5Te_K{(f|2hcJQ*wz_En~KEL~kT*y;r^8Ixuf zAvwUY!VH}Bb1*@nCd?My)ba=D`iUr&7+k6)_my^d%dgfw1xNA~w$E^|SbD<^5{g+L zW`kOiSp}NK*{RbRqoMr`YpO!nBe4Xr9V=K!P9v%g%n(t!bk96eQ zQFl>{CIGNpZ4B7^cv8BR4+j^*@h9kmTKUT;4fWxJL^Fl?mg2jI&OU!Tu{2GGQFuIi z+=!@(Kb-EaITYrb+a=OKaOKOR6wd?w$qeCWNL#tR#z!Qx#$euD;lU~eOAPyRx8~Vc z&La?Z`Xic(0ncRRdC3#0ZB_pSo4C>HJ*Q1HZ)%fnY7qD1Oi_eH>fUJ-kqXGY!-Fr< zjBl~Ig>OfL`8~2hqa=R>TV}?PAg(7UHW3%kIR=D=&X&6gZ!_u>5f9AW&>%zi=f}i2`;&Sa8?Y-xX!^D^ExupTU zRYsgc`U20>%h+30xN`{MqG` zrl7r=9kz&qqSV45ln~7Rs&A@i`t^YXkyH`W*9ussamIhfy%5L(-))#(1cRQi)&YWj zCS;TZ7R6`(j{BRgs9S&Q`0hx1@a_Ybs+(ZW;HGgBd)^; zUS^iy5Z0u+K02SVb5Si}AkChi&}9OUInCgt59kT1Rj)>zKXQgze&OQ9_h=b$aMxzi!#ZdlZulH6pF7BRxfZoODG+*+_C3B%?GGSGl9VZTyuJmX7=%0^0Ov;kG$#8lT@ zwLhP=BQ=l-Kv|RnGKP9f3dk58rg*$P!3U!kszj1b&^%?@isjG)i#|6PMI9aj>C!gz z9lzMO0tIe(wN~4csn`497 zN`bS)o(&h($4$^hex+8|1Wg6)C|9N4KQv?A#8)S*%?mQD%)95lKLaQL=CZ1|zADU{ zNCF#TT?s~+i}qFPRiu^^a1SVfd_e}oLre9|1rjsUQ;-P#c`Y{ZjGoW86{6=7f zk~RHh9af~8X|>7Ws4NgJ1n%3n$dliKW^FdV&kpYay&Q7HcqlMN6|(c3L309nyoq}u z*IGj-Ru?R{UWzTt{oGizfk0MJENE%E$Zo-W)-P9-I2cwMrW-w@$4AUv6t9J*EATFz$Gsb^e7;#L_0i*TL&NZ0eSu?Bw{h(&%_^!hCh^~ft{iav zasyEt%U!EnQM;ROr)npapiTsY{Zu5=d&p0abpw1AZdK%9nV$Ch8%7;WVF7=k{H0Sv z!~GjMEUMcQ!*gZ0eLL~N;Xeg-OH%TxL#2ZDPVj3uPoMsph{vXSI#u)0W(k@lsej*q z{q}y#M*vBL+no$jrUX7DOtOIdE7@Xi^j!(oS3GL38^2y`?koLoj&o(hO6!!oKP!vEQW@* zNBD~iXtbbKdCCgl3&$2Gx?|?R=?oG;J9Jc355)m0NYi|{7?5wGE-QcYY*UgW2>c9H z@3SQ$+4#8UFx8~nU}5;0Cc_{)0ZsJ^p2OLD^BP~Vi6BmpYS;Chk1`~BRGdSrknG@@ zA_TsIq+xw4uaVYLBZ9N_^H7HZCIg;FPr=54M~pCa~mz{r4i*|b~a^ZHuMEVsd4 z=1ABoEDS;x`B^gb^GXilYL%~2c%mZp=*FY-XYy3ebGRprs;xOVIKN*(PH-E(M$Hk( znzLD4R7{}M=Lbe0NEc_b0G2Hd?2|bgxl!uz)~%7I9o!V&xcPrdsl*dMFT--o6t)g0 zG;FR#c}Gnr9ZUYry$s^A;4I?dJ*QMx6mK zRj<=0v?(aD$Z4zqb8Bz+)rhnmgr$N$ivuu8o47CNXdaD=VGn=Qu2Yz_h&~z97MMST#TW#2V0d8`*CL z0wWWR7`m*7G>n`B7yh$l@=vjLvH(wdsqF*;kJY`QeLR(DLWzW2g~BZu(Ts)0cR(?Y zO+nnRtv3NNluyBG5OJOe-A(GuFIg~js}f+-o!x&;HgS&N8K3sfOCr1O-eUB| zR39yoLA0f%=A31fli(gm-`@EbaP5!GR!Wd?!z=+$sA-H-uJ#8-ON^q zqRg|Nza1rX5EFLdvyoc%#y(xDKV%Bd+X?c`iHze=E)xp>)S!L+rSW(Kual4jUeRvS z#DMMNGk}W}G9PX+y)dx_0@|=+$)nng(~o~S4jw0oLOL>eOS=ahI5$u*$V8k5Fr)4Q zOv>&Xpx!B^s_3WJ4K?~2Ai^BFn27Gv?CT?5_73EXGyy(j`cW(v;gD=raQxSc#^MgG z5c{DOw_tiCRM-q)LN9q8j31JuV7g79!is1DOq*f};VI?0BJe{+10}9!33xZ^0=$2b zIR%M`i~vr4XZ+y)jdf#2AvE|4Jd4lgs}K^4I(ZM84zXKuuVN#RQH!5dYCs)t{k+~0 zcN3i(uqhqnev;js&mMR$`A0Co&&oM-tG`7=T=c#0=OBbp&*ZH&~{1gfb z9k5{$+0m%9-kG-7WM&YT6C#@^ z1yBPM@%&g1sGmSNhrHiYytqT1g>Q-5bZHJKg&dr6@IaM&@^|UMGM|4@{ffxtNYKxE zv7IrNMh`~c(--2r?C_z6-wXzoLHsW17|$deRC?iZ@IdODw5&gb?y|XjTkHF+*N5_b zakBbL`yVudVD_8KB|sRxjMa-OUz$YpBQn)rXf|DG%0!H9`bvq4lHT4oA&tdUG`WNM zfiB&bGjGNGj+s*avn*o0}rHy2pIb#_rnNo6Z zDWGgjAc((n_(npS3&rUjSM;e$bt0MN0;=X&L~pBffC&t^wKSUHdBvj-LEF>kG1{wt zF77MHx~m1G0|UdrX!vGi7iu)dQ;tD;S?J@kqq27pR#iMgb?O91JUnD_oDy3L9|?VUR+2fdHt zVDy4Dm{(K<!1E4)P_!Eav6Lciy4m6h-!Gbm97$wy%6|lX$}1Cgn5D=RjyPG zpKG)3W3H1x5Go%j1=@0du{j--dl-KMuRDL+BHQ;74?0*eD~*9p-O=w>2(I89$X5h? z6UNZAZSa)$(u==9?LPk4PIU$=jm`s1q=brP+C)fMk|mvZ?q6RSNWLW)Y~|1Od2vt^ z1uWbfpa|6L-|W|u*ym?_8&>4rTFi3~bya#!48c8048JAHyS=YK+d!YU>~-fm+R}f) zZ*v7YU|8i&#fAVoK*YcB%A(fmvM~h0CH4ub z!Ip&!bY+R$#J9@7*Y6z6?w86}Ybp%Li~5S=4Z8tf0~E|~@GL`MflYL>P^?d)mrFy> z9N=Sen#=L0M@QDtxSLA9WWFVt^mU%xc2iLlr!pgcI+z4DrV=|V1~6U~m^x}MG`{hF z%bcBapgHEnfp)&z7TANUJTTIkBCMF zxO?#*s_96yv8h|52Ulw`es@@VIBs}4x|!mtA7Y26$9liivHnP56-|pV==%l{Od-gH z`SQhQ-|O<;w~Ib4l;+LD02GMg{voD+3pub)kfbpu#OJ^0UMxVAIy6?oK;*zISz*b{ zRVD(6|Lw_aaq5F^$f}s?_gkl_P=gmh9z%rY0kSRC>QD~&Xb1uV#=MFi4s})4AuYCJ?U4{pv;+1$ZUupSHo?Bhy%URuD(?N{*kZD4SWfGakCeZ-J? zOizULVIK6^siB5~-xp{=ghyWwpgH zoe*B`sr<;Tyvx49rIzyv;TiRkT|R3MfMNiGpG7R#FYfcvOPZ8aXEt+CZq$eD_D2r> z?qPP;B?Bz=8iN|{NGFIrZf+faJxlDygwqP5A9S9ece0*FQq6Im{7}!F8qP0`92eUpv z1oCFP{Sz5z9lDtt`|vAnd#1w*bC?i>3GYi@VDZRc5p7&)OD(l}7K#UOu)XvBbO_sq zQ_(b1m#?IXuJlw1V9pt8VeFnA)7{DiZY%)}(RHqxkRUeex0`b~fWQ8N&37G{#*dfH zQmUMr9|dR!X?WDFp)taL?FeqWCA$R>(@BnT@w(#YgT>%plhu6oc=Go6(yhipl8g3i zuUv)?Y($c6^_#M5WbPbw;-7eKjh!BR#5B_WKTL^}PAUSt`hcHHfp_|2f&TpoT zfqfl+v1}xRF>P`Era9VYNVZ1Hq}SdFpE}M%);i;YD*FN;B?c5+mG9zatK=8Z2F?Hz z7yPTqyg@n3e^Q7d1wCegIm&&+L4b$&ug9Ycs_(5ir7sO2>J_RXkn{8{0lI&gn%j8? z6M?FKiyMX0m#Vved+OoOAE*&8K4xRA3Kb2ZjG z?=-oS(nKLBsqj-x@F$_{>>+eZO|ttC{R ztkElDK|l%ZR6VV*BUQg%%G$aE;fF)K-b7!np&_q@Nc--8<=HQv_^!RAQh$8mAOKg= z`WE38Xw>Hx&-14FG)Q2&r`oK^(z7>+LSv@$#$oOAf`O{MOhP>R+QkVevyaGt+3vHEKgziARGv5Ggj;@Ecm!!i11I^Gj2oX_W6l+GLf`pKl#!M{_S5(!1uuCO5 zK5U?`8RO}HD!EvbW1$cDu^>A;8Qp0<%fjU1goSG|7Y;5E zbCyXT<;yoQP0YmiIoOTPgTuD6263>!-E8-s!-%UxUUMfrd6H=dLxl`PL-Qg30Lb^@ zZIaCBk}rFrYCv+$gDvR+lDq@^`U|b*7(7b=VW~EM7|hhNB6SWnSdmF?kMuE)jc^J) zqO?)Bz-e$?2N!H0WA&DlJG8n6n~Ws$G}h!kxAx7jU&)=$oKKjS29*13fq3g>>H?3B zaELaPQGmv|cMU3`Z2+@=CmPJo)1%7rN{~jE2)2vsadYdte6Lp{F$8!@g!9s!Sz$~u zk4({jN9XJ5JwES!06t`IlK=xKEn6Cbykn9gE8E=5T%h_ zV%Mc;gf3b_nnY~3;<^R~1>5!AeY zoLj(ci^zB31PbiuG44bO)4jKx3eLN7L77&oj+ZRpGSC!qjhWMML;m~4B+yo7=NrdE zXH;&kl^lMrISTw1drevKEiC7@&feMg_FclG@9 zGU=ZZ1^u28*A;qrBCCo3;O7O#A1+v4C!QX>uX{b}UOR5;CQ798Sr*)1elWUv^mjS! z$pIbfS?qLFzOZb1vAB27z(ErXkl>TNpo?z?UpfF7?5DB|>)D@~7A5q_mHN?tB4!8! z$`_NSS2=&MD;xMc*y#gsqNal0b#Lo-stS;cz@zevltFfuOAY>#@#aox2eK?xxqC^t znr60ieMPJqxve)PRCgc7_R|eq)-j0tROFdvhVnVw&n~QtNGWNXN@nJry;>E($EyOpyde(!frr6U^2aCG-1XrxjHLJKRT?A`J+$|1+IACiLNk|C zqYsog-d3_$S_wtWp)w+L6z{2RYwh>V!KxGz`ng)Xv2@#==@F@XYbED@L-Zq-MTLt3 zFAh*0NtM~Y2x%$6>)Bq;eVKlDc<|@(;*H+y`o@sA+T-5?2_yXPO@-VXUO1k@gxhj| zno!;(4)UCzchAEee^N27(5{TlhEb+pc)Ab+(|W(6iyx0=JbX);9fo!V&vkpno>M&p zL%)D%+*WoJW5(c}X4SQS^Ij2yOs-&3j|#>-UmRKAhzc`263`$?$p^U{;n$+4X|r54 zaL2LtDCIMMn*)vVm_x70hAEDAH!R$qU`EK-uYfO~L3WW`IfN$a=@@T#N+!YrMtxxP z0V$^OYzt(HAS>y@y~FMwGBBSf1pVmd@Gu=B(MSn@Z&MTW8CdLpNyq?5W3{_eB}~*D zg}Pi7&b$acL;Tq;)$H!bC{D-kS2dV(P0AC;6OeIZD3;8W(@GM-KznXA%UB=jV}~X=DV#m z4v(q~CBaa%?2Mp)hlGRSNEq;GS1Fw4?lrI@H&k@Xp&+RA#qT%2$siZWhp8J4N2 z>q`L;*OF0Wju!*cTt66_WB`Ugg70du=$nGcTJ&BF;Ko)LCWxov0#mnN4BlgMaaqEc z)h5MfQWX6>q(J|~9dh+R7mo|u>W-Uh+;|hP9}Jv3*XVhFj%~X@QA9M#?UZ7CN5nuN zrLVw~a|B_63EX$sO^2>ha=*(txA0IHFUocOG=krTOiP*i0>zD8Caa;B%zk?}N$`KL z_h!9{v)j7Zf5}~hA`F;rA4Q7$-ryelCISO)!GHlbQT}_L*D$NC*a%7o(1y`ANY8^WGo+dYAKC7V_|H04)? zCoNY-|1xEiY$5A3^}t+v3|Fs;zL?e)V95YXV{H3#zcJeT%lo9CMpwuS<|BVic)s&X@*`*VzX|msw>RC zSeYYJL;ox(uHCDg3fZFL>lXB-o{m=0$vja`RtrGhErF*S_y)?{CBp)Ku_oS{b^4J#Tm zHcMxJ&4N18YOJpi@!e7@7vWETI-xGg`@)AWtyOp3yme{7sw$AZ@0|FNDyHjz<7tj| zaqos2wIV3ld&=K$jD0RSu&*dror(SY_`fjo*Fu*lt5>?nt1213JDr^r7%GwxfOJKG!5PBd4^VPlX(yzn?0lQ zV9au;IG&BU#_Us6HjdVyCN`+z83ZGc+p!wEIc3xd`Bc$WAItd^ja(x6s2s(58y>oU z3#-kG_&!grl0GufXj>U2Xl+vJ?kFtIZ4|!ISI2Cs(#~|t?dj3$R)QXII!0mRczGCa zQwXvx=m?8l*v>}jR5>1(lS|EQXY1%-PNZlTxx1wz0_XQUc`h^loEwX9H=Qhh+Q(|b?VEF)o|$Ayv13{hfgH{k@~Ld9f0g2NU6``QqtD zV|aU#{`RBG`wVizx%?>e<7<{a(Q_SK!Ta3^6cTTc27?S~@rdU44(y$m$MUtmuE3vk zmap5z^8H*b7&EvVd>&0x0B_KLx7iHol)0Z%H@Cr6P*@KDgG5ekP^~GgmcfFSojpB6 z`M_wp)YeQ%>$PyrM{7QmX!I`Io!ig^x0%{c<`eH^%-%7JC};rsLhx~vJG%J=t4h5@T3JyGPA`cII^;}MvAc4&#{?*WKDcVL>L7d zXb(0AnUV^?s=VnK5Ya(i6Yw2cViCfDWc`pWUW-zCe{Rk?2Zk7Cu{eryF3n6dhT4{H z&Mi?Yr{4`v+pC>ZhnhS_z2c)j%V~C&rqN2iJ;FqJq47^1uVw&t$Im(1GF<1J!<_rM zGMb@j9s2pKb<-P%=0wncci}sdVq|ce`&i3zP+y{(jso5YeNIp3Zw4Xx@*vV1)ycc} z7Fbsbdc143~tj{y@6l#E(oaP z&!g-3l+cz2iaBZTxx2VLWI^U<=d&Pd^_n@Zz1fkglKTi;`=i^--cXtUx`^c{25-K| z29LIsmapm{=8|%uH+%DVp}F1t1)4WHn$0XR070$4PWc^(I3}z4saT+*PBtdTVjSyY zDrxqCwc-X1FDAi%NsuN>Gif*27%69HyoMHHhK~m7;Qf`v5X_1iGdZ5Gp2OIP;#J_o zU@jvhQ{QHN>^<~jVPiYt(5P@M&f9{Pk|&CspCx*Ysfiw9!DpB1ZS1Be)XHpMFFsUq z`t)qOYBh?le%Yf9>WD+7`@*T*<6J!)v}2dgw+Ens!&jz%jjo#~=I&q`bOXFy23l?r zv;yjMKl8feY`W`W#XA#i8IIJ^1*EtftMF*+a-rUVqj?#7nofgQ?@c8}>nk+TV?s%t zx37}w>&qh;hv|uzAMxqBUemUq@O-Aktj8%x&AlUfn+nP(*X~+b+`2Qw0NQ>z<4#DE z@zWUBboics>95@}YffHl-xPLgQ-}BUHtLz(;R(r zD44>IaA3zw=F^9{Y~bjj6CQWFp1uA1dL2iZLB+9M^5=($fralX37IeowzCnx1f@td zK^cm1R~<2Ho#6Tq^VbNaN9}eLJM>{S?HP~(gkoHO@A~^lVNR%Hd7*zS(huFV^PLer zy3K+Mi@I@?*jW0`mXBAy8DGNP%K5AeqOKp^O>Fr z?=!f|qtj9Z&kR(a+rATekL30Y#f8z^(91~A)ki(DU;Jwl?#^L%^d2Xnw9I`T+7pjg zmtC-ba;-kXQa-0$rZR`Xnl=!f=0>JvuntminLy2PGTfqwrac;LJu_|0-o{5CbtRKB zM(fJ=Xt&*FnmS(3#^n;lYuj%7MC#6_}{e!XUU@HK$ZwXZ6Y3S@Hwh6FA<6@W$c=`>&%6cIz>GGZe-B^B-^$jGIt@Fd6;*oup3*AO4)oYF)Tn(2WY7y&TWR5B2$+ zx`$)=n(q91I$1{bbiH@VS9f`6JMi6JpEnhalROYAE%x-K++W_7KQ&ts zmNM1v!u#vO9**8BHV-p)A5fXTJI$omR$a0IZ$u)R45Pm5NNt&5+90!WRe+s;TFbr`;LDJ?k@6kiX2;TG-Q*N%~kH777&hSj^G$K{+hVojaAdRDB4e{;|GGySSNE z(c!Lc&gu&V(4zX(&I&R~i6+yf?1g?8ZAZ zY)Ck*s?amMLPuAsVd?1wCF8e#vr#0pkdfGXh_bH6;H_pPMAx1Pja(rmT@qTY&XT%$ z%Y{yZFZ#6)n+?y94c2MnZWbzOcB;pXLt}Njqm8srm;Bn8R;(hT(3s(-lr<*zL*p2W z(j>-a*JG-=cG4n{qL*=h8UbA7f#&u21~}#MS|HsRV_5?T6Q+!Xe$y^a{fs>%0!0mNZtn|16qY#&& z1&B;mPtFQLEYM`*F+rRb#e-~?ml*gGniJN@)+EDn=xVr(4nOx(k%{P4Qdu3HPh zNX|~^F>#Rg)P9*xliSWh*vK@@s?0j{<=f-|zqP4aGx|e+V0t6e^zSDnB@632GhV+r zlxqP@ww1DNw3lxoT0N_)?J=KUC}5~x4f^y=?R_;}#o>GxQv&0hB2z>v?Ea{E6ellr z`Rd$V+LC61u+j4pyz8Kqn{|sMc<(AS!DAfO3Hr2_7U^^=+HXm`X5+&}TK4jN_Iy|A zIIn7uq0^6l43%PgFXSZIf}UZ3G!NGl)0}kfo^e)AWv)C~0AW5_1lsthfu(>|7J=}% zso6fco~`4{V5Ri(yv)zWY+j&+OzcG&-R#3Ex%mi-K)dtP*@BL|aG5wOUTfHRI^L;K zs=VL65-&xAt~OwTk^JZKetI6cASt8;nhR~Vd)=vjgiQy=S%`Q)UOFg?r4zKP6&szI zg{rw$?`YV54Mn@^=hHS%qN}4)r`txZMPn1(xO1iKvNqRvw7^2aA~Ht8`n2J+ZO~nk z-08*6ag))FOIgp3#e83R)aGP|kv!9s`t}GoDZQe0s&7Hlp68M>1c=Eifwf=P2 zA~BJp8)_uM^sF9QDxpiudaGvfM7l}3020|kR>5|Qjw&e71?kEqi3rp2)wO5&5nS1u zO+U{a>+yKR2-svY<+vV;_F>N0n>p9ZGEkj=iIH-tPNO4muqndyCQk2*o1k3v)3qA8 zRN`%Izz3DND)rS42)yYby@5aSBrWvZw3Mi})yN-7LQcE-m(gG3Lx;3}Df%toKyY(y0zt2$c9B=4_-`fprYI`!Z2G1j$?eiB+?eKpN8c>2?v+Q?r9xZ27@Y(&&feZ4nl99Tc+0g8 z3Jq5w=%E-7)fGArS?H@emDSUA`esmcao^X6^Xtq_7-2Ot)B819#v-L|v?MJr7h%S9 z?af%9CM~C2U)|UPx#$5UT=HS3?o7^hPA}^U@L<>}>YS%aP-M$RuGT_9JNqcK^r8Z3 zNnJQp;$-7M*7MK|NDpN^U0VH?!j@s1mGM+Tl0?}bwH^|8*Plew84k)S(`j-Fed($kC5 zN6F3z;`>o3L7C=JQ=*Ad3R_`5aV2z6=P8i*tY}sX6mX1gUZ?lsg1#R5@fPuC zTUvQvkDRu|-4iUa9@#<_Ts*tDd5cX-!h+3m%c%v`FOCb~-eW+0nPmMe zQQ<4+<)Jj<_{f4g@|6@_@s6*8`tI)?9KGxAV!zP9oo-g(yMo-NQ+0k`pj3utZ;O~Q z)-N-d8a`?*o$Ypixsb2?nn&V?>iOFuVOL`-X^&7`dii$sZp=(TY(THaYi3T5825xt zvpr`U2#;wL)sMULSdGS;nKc{j!SERF?=!XB`~7+Gl8xo+y+G8(bojmDg-+(S7S!{6)*lTwHQgc)remZ|h{CZJ0Tw*-C1q&0e^_ z!SGmn;pPNpZSSpX=ViJZffR4;D~$4dIC|0D?0b6NOhYzcnDE-VC@eVZi}BJpo|F|* zFgwb+Crp3JLGH)ZUaRT*mH&Lrh3MRC?xI_ZAn!+SxT{Nncr_RIZlk;`E^V@xMeON@ zp$hbrzZtN9jf)Qc;P^V`3af7!4NAp$Q~f?cXJH9c32rZ)3ih{nL=qzIJFu>W4t(DD{j~UcyUSunb9HuUd z2-Qxl>Y$ z0Ch)Sh_2Y63E*uzeZFjg0_e8FeY3~qeOTq&)IY~}Fb`^v%Vu+TC}8;X?zE}z^SnXm zak{X7;_E3zimo+R3V>L%%~PJCJD<1qA3D0Lpxv&qAn{5GX|RP;lt7VC1|N9M9{hys zn0c)MFW@(I@3hj!SE&}!#cm$$sM&1>WaDLh*4t^12I~3dgysp0F)EZN4{x4T8u zna|DfcwAY`W!}$s&+X*cFfAf=sCj;m(5`3}z`j7Av+j^l>qUTw<~oZU;6!qTx&YHV?vOM2TF6?nQ-$0=`;UrdhBl4{`jzI$BVZqdA*^eB?u z$u7*9V|)bJ;B(zh+sSM;+oBMgUI9F2yM5o!nxZxz%YNFH@ArFqX6^YJ$Nb%FJZn%u zJ|3gb#JQX7pTTuk`J3G#pDfm=Vg~Jhi|){-l-u~(37vWFK7(*n@ZrhMu6A)SU+o^< zAtKJ$fs_DTdpA2TAa2`e!nL@Z?ib)zT~P>(GY#;GJ^PHLW>ZDfvpQILDM#7S8ymex z2`jpJx_-(VB}iRRIV+%DHF6_=MPUpG zmuKzvzWT;baervu@Juq zvl7S&NZ9UmK#^T|?3g|V^ASJ6OEApn!=yAYC*kAT9lp=L!eCNHra49%cE)qbQ#kur zTK8zI*q|u_h?;ifmu#{l%t`FPl!T9#Ixr_;z!HWqRpB26uVgSRUcmr=#zt^r{53kX zC+ds@)la9o17aTMvv_`n4I2$)R5b}N0w8W|*+$@utai=b=tK)VQ)m=zx*_h8|O5H-7p@#!|RlLY|8gnPZHgBJRV+jNafr z4wZ>k^|?k8P1l-#eeM}cAjg)$NG2aFjImhd;fCuCgg4hc!# zYqS}IKH_@|?-8)^CMn2!lw#_67%MziEaAB?7&FK0tsDgO65E7-xsrtwjNwe_P;HEN zG5Q#F1TlHHOzm)tkEi$@-WMVyjRSlKEc0=1lJokoH&V<6-dpUI-Egl4;U#iE*!oOr zK{5Th9uF!{&;%i|hv+Vc-d~9YREPKLGiy^R7#gT;!+DBmd15RJJc3WsY1bM~qj)Ss zaSRB^L6LSuzyZ&HL8&|=@V1BVF(IJumg7yCt9mO~HH(fr=W`vvz*G>Tp&B2<+yG$D z2p|QMur&>tw-T|8JeEUZl%;4}L-qz7uo7?85|c>r)k0*8F+{#Ok7gD$e6%?-DTyd& zc8Sf;CXI^w;+a^hf(Ps-@PeIMcE z9^v=}V@<~YGp4yRj2XHS`xskKM0X#NjB68<^K!L`Wdm~|0)&b6wq=sxn1}hrTw=~R z?Jaum>(hs zkTo^FbHRWw8~diWKIfx%HZ$yzpSk*dJ(YA{?lG@CjJKTZ5mY;LWG{uR*dbMG7p>9z z9M6P*smxXIR930PS`MF7rpVA6iBs6CE&NGV4djJ@)^E1i>Hj>!18>=RX zU}TOAq&mmwt66hB!(xgPP()xqO#RrStSe(-KQsd5jq)>IvY29w(m~ctFem+IPRP0@ z`vkJ7!I*u<^fUHdcqK9m=g-VRrVZmvKEts$vOzvIm|GV6D)lh_!?{xi-^0ElGMvnR z?M_4sXmSlQm&~CMIaHeq&PEX$!DqYrbjGSuIg6D%@vvcx5S^#HEwE24<0*D;8%}Dc zJ1HsHL=pK8Q4psA&N|$9eQ23ZA#iDdz;;dcg1xaTUy$e#7(D?Bf|cQVvH%_!wNbDa zb)E5IwEM8tokj5`*g9Ymq(Y*AL(6-A9Lum}liGwc0Z>M<-JqFBU<(JS_2znNlyXAr z8XKaRW(}63ld^?u(>vz2VGWgODxPU9Q%k9xdZt&NsMrYOqw>kKEe89vp`dgG7Dg$o z66DjMFI}RSp)c8o;lq>ev=-w8-2mJ+(G?$=^^sL%EOX^S|Ecew!-&2E8xn|r2mW5) zk^8X~GFR-P(UDyZ$^A@>6Ao|a}v9Q&(FL1+C1=u4amu zQ5r_7+e$}gAJ9M!@0yx_;D-J^K8mj()Prl0_3;@G7J88bizN&DHW6Y4+)V-#qi4HN zA~Qdt@(Q9AjX(OSGfxVgvuX}x%`Vyvz%IFtpd%HD$wxFpLDVJ4VAIe;crJcC23xSr zNT@F-&(#$C@LWXp@Yo0F{RUeIas>OJtUM-Ae6{mh-dY{`W`!Jo6y#`0bK9Pdm7cJW z0-)J&3&w+u(=fw2V4;r(d(?s4K}R?xx29Gi;JHp>YSEA%3a4UbEb=_!z>GRP50Ytk zp7P`6JT@JCw+96`zOSWBrqJVaE*n9_yUda^{7=F8VfxJntx#$1^iN6?v^& z`EnkknSXe)vO1UTM8`RcO>5m%i{kP$xvSg+^!Lx%ZU(l0fTrTKZhhS%ui<;fU7(C* zdZ88?SJ>drNo`QmX$28d0)s#l!^8%6OHdAh`I6FMzF0gza>is`#?VtAUG?={IHL2h zF0RCffPKumJZ#{E*ixXjNGe{;)a|CJ4Q97mZ>`be-Y5phw|iJF4rCI?S+U7F7Vgo! znPj(qFscfFTdm=NUdFm4{ua^|;IiXk+Gba_eHQXB2O z=CeOwPv0RMFy6$)hMcrk!+4t<_Bwnome^L{aUjo&cObR{c|U3J8HqjdBg2Pv6%#o~ zfzc)2bL*r)T2K=xXZz~&PayKBIdtT0;=TEMlT$@UC~{jYmR zsl}Rt2F82A=fhmXHWNV+2)RW3Kj<%FBMF0lFRQwt``K4Mx=7OaT@*5YZRr`f-+jnN zTON%uFDwH&&m3W=yPC9lBEw%wnCA>W1I%}E>p^(B=(7m>vIKv<&5A%GqNh4pm*Wxk zRijx%SYllsG0(()a82w7GDm|>{n!oicm95SoF6&x@sXg2dj++Q;wN)rFDHLH~y;hjr zo2*xh$cHBhBVk?^Xt>Xic@cl0nU93^8!C!vHM+WyFJ9zM~KjVQ`OP_PT zvhZg-`oq*Mz@RSM>V^;D+pSUf<|12Zhph&l;Oj-qY`V}v(HC~DtBCVlkUuh_{7>v^zkpyLKTL-w$YpYi|x z$${_?_MgBc$v!PjMkWzHYtG$s6k0FB!LE$53bv{T=p?X!D4;quE0%N7N(iO6_^yO; zBJn$rs$m@_kdLTsHZ3AY?la4O1;QnTOz;1kmqF%XZT+?{hc)@j4vfM0@{uo?7c|R( zZu;#TPFeU4m?QLY`1s$0zxYNuNopLotx+(rH(A8!b+koNQ8WY}8MwGYWm3=?_ZUX- zG+If+8eFw-dK!ELhQ_!@MR%T(<}krdC~CE*u)tvlKrg5T1TK|f<1 ze%IgNvqB66?~Ay} zR|k@K-^5Ia9cPiX>MIMx9b7!4Re@OB-=fiLCW5vD&Mzyb;M})=52M2WWnXb0JMq_j z5MKqMr1hWP2V;$fc0+7VdvUqYVr>uibz5S?c~{jHOuj{%VlvN7i_%=GH6++0ix;)>vft zVabncpm`Y;+JIkwr-(&>JP9I(C`Sc*7#!!!U_;ANg7`aipJj9AbHc&idIO$;U?f ziPsT%&P2AqMRY)!QZ#4Z7?5QJPBv0pC#n6?*o$z*!kKpk2{AtCIajH;K&n z73-fDm9qrHa$`8`JeI+`y_DgLxG4V{cSIH%DE%Sp-av;7(kBCpFoE9T@!AREO_Vp& zIDyDpBG-anaqEv?kG#S#cdkL=uD{nJ(F6H6wt=E968rnw9tm^{;E#jzM@uYIzxODh|IDmEC)QD>2=gLh@$$@ zp5xP<00fYduS^%*)%&L0n5k#z6QrFI*xr`#)Y15Wj16laO*F#1sD*bP!@fME$%;>C zm@v#8fp3%VDTR|Hpe0 zefZC|2i`mNB-r?0+xk~Ln~f&7voF+J%pZq%0xGRnkylwB3)*MBFTpBVA^h(}CdfNg zrY%oF1R(t5*c;GdgT4Q!T{+Bmf<9cG!4JP{t^TX~zjF!x`dbii;`)hp zH~WA=r3#tHL=4zc-GJoogie3HZ?y|2TWy&%xiP4y4j>`%N_7%U)V&2+>rnNh5liO0 zd|Dv|8ypdd)PLKT(2)vI91q=E2)2QL;c7OXCh`TyiHQLkfU)z90pV`}G`fO*I%VKNA$Gh{p~nDo2oHWkPX*MyqSGYD z+!1>~3>`@g5Aj%Te@Q+z6=I#tDTbVfZw4QB$fTkTJ<=H|^DtnK4RIOa8*d_Y z*I>`7ko6ev9xu78H=|10bStsyjOJ06OI{u&c+^Y zwCDyh-V193Fh2E3+!Rq3ra8HBXSrya%}O@H>-Kh_}kL>Bk$kUPqt zlgWC9-SP2B!WrJZ!&>3sqkVF~gC8KJT6bXr8!)DYhzXc0vq<2J1q=`mMhKCYO1aNGZJKXQFC$s6qF5S|E`7fKwi zPv!)2|1awf4242}8vBr&zt>&wO@$PC5$9t5SR$i{9z=eV-E=-DB?2fAK5G#w-vo#% z#+;wg4xNRqHQo{(gv{6YO(St$!@orM=px<6HF8Io+uv(c^w6P5M>iGp^+lBLJVC4; zvda?gj6?JARaukkc?!SzFVBA+ z7tNH13UoDuQ=I1K9ZXcDv)2yukf3H3e(I>v0h&k{Jt1~;azy-Yu$zC$uHUxHpLs>( zC9IEq`77Ul^|ODlRulOFZ%ahK12nS@@ENaBUqDiq}Dd2VpVf`Jg8{ zGVX}`K8T%&JeU9Vf4+9s*QXw0I7Frp8~E3pl<*UOh8WMs_WJmoRdL@G44Uoq2?*$! zAQGt)68aRG6V!8|$o6O;r+tFWWbToq_|eJSq!iHgr!X7jB_Rv4kmRL^4K=0>HJZK*cdJJUkg1*=A?%n zY>~fz`5m|Z?FanCw25t*$ehNl9V?@#DBmM}2wT@ilZGik2v>gtc!>tAN_q0*74n71 zjmF4rw1xnaa*JL#A`_KY%4cv**lci!^WSrYxT7LQ;zO~)x|=Hk&bbNHeZf>12M1`W ztl8`US5c|;12KMk{PnbgoU2DL8)f+$dGEJ%0q(OgP0ZopmKA0w^H&H_5Z3KpT zhT_!9xkhBij}_Zr_WMio+a_i#gpZiy&Qb-f*s|mzqQW5XVI?$t5r9Tgybt_3vfqGz z?KT*Gu)C6Pe3JUwzT`QHT}$H3B=1P<$UppppBUv33;Dg*e)|F!^$xuJ1paCQq#rO4 zCTZX6REWt~lF*vK!L&DYdwCrFN@W~Rb!20zxr)iSm4s;9M(YK3R=!*fvK3i7(=1jk za<@5f6gQp&YigNdrBW(MWSUBOp5wejAbV zL!Jn60K~tA6gE5v<)G6Cd=qRr2EA*xbmqTA$eGkG+PyKkTg+Z(bvRtU<&- zLTwW7K&FHz6w#V63{;lh2huS(0I!04GTWgQ5c8&Uc%^oTf(ShmDg-JXbI{A8wKz z^&84Y(_pWJpn78hy_+9VrVN{ZC067F3LNWEg>FK(*?q7mxIa27ePfZ|&%no!Jp7Qu_>9>%zQAKIPD}=J3c5{5 z(7qT!41FKY$vOgtL{JQ6=`W_wC`BuG;s;=y@tskUql6VQGfI@`B_s!b==tit?>=h;AD`ygSKeof@4t_=D-lO&ix9xnblaE zN4{l`6L8=q@BuNuuy0H3n?TwwFXLB?GyvKTgF+MczY$!--TH-#fL}P=-~GXQeC35t z@|v)b5$l1SsM64Zm61om8*SPkGc1P00Pv>g zaZ2t7IgI99UJCJ@Vg64Zj|old|Zp@qNBOrn5! zQQSDYj(E007y#7yGdikcTuIkc1Gd0_7hghfa}I1*$R2`W8Sp`_ z*E=YZ1%vNKf@iBz=x&LQTq&=*9+3SaS8z@iW}N%+CyY0!Mf2Sl3?P5{OS?}V4t6@M zxQ}kE<h8!+r z(%}1hkk|izgfSc97y}OTlP4qbMbL|Wi% zW;Mk13Mo-Aj|ruwg(rdLUP9alxKMDoDKFT?z^$Nibi8<>f1yexCmxhH@{LwalI;ld zzQ=1zIgtGb90=rOHqo>2w;c&w5ag5hJNNrn3?In>|MHJA=;dK7a2)eZ#-Q{nfDId# zOKyhOda~-^=ZPvfRNN4}k=_kH6l?a%d&?Cv-_oL(qayhu*SYZvY+b<10Jo+2d9mdb zb!TU_47><(e-gwVS!o8J+}Hw@Hcq=F>%<{>v9rM@vpqtuiWKeGmsok`lnq_?1XP_w`cb7r+^;-6_O)*Yg9d%Py(@5AZapw3cT)+ zbl}9no~hLqP0qV*bRf(bmLbYk!bhcg@FrEje?)PvF{>25?})RM_bZFvd>TU03e1k^ zkQ4nYK4zrf^~GY{4E7zt>=C@gPwaZ|U3MQCHDDnI+ys&J@OOx8hs^_7@0cey5i-R} zu;$1*f$uwwmu$)$0lWpi3Kgx6sA7MEl*yYgz)cyw#kwWryd^%rxFzhhqtEg!aOH?O zf5As~5O-l3~U?_76p-si0AWLbiU3L7O5p8W?72{!3)AL5t(_T9+ZO^hMu z`^o9x`vcbQD|@fB$t80}gAB4dNOK~?e_8Jha!!m1Dko9~?kj2qY=z^3h((;V2DX~S zTsLBIM3gVQxs`EK(fE_-6P`zI7V-w0HuP#slU?>^}G>3b1b01(-#f$aiI9GDIF2e^$_Y z=#ZD_(7GT@p@gji%#tN&g0-4HTRP*K7QAb8=8Tu{6$tiKaX8rPz~rf1t3b&@+rs^U z%mBx7NaFLbs}$t5iJU-O_$MFs&vCX5(+qNbkmT``Bi6=u%^d6lEBFc#dtUB{O#(s;Qig*q zI^>C$3~CFJzs2v6Cr&8f-v_-z{QU`R@eJz(vDXmwC_l1pzea41-7~OVf&{JSsNr7L z1-2FbrikT?3&hxf5hnK;FVGB&cx0dZAP$KwSdM!vmz!J%)Ms|ULj24N$FBsK`&iDl7e0=}w; z&Fo$1=0C zbLbIZ(^hxw>*M<+$VS`>f2{_>2@{wZ;!})qR0@UiLGti3SKE;XOalj9kC-9r`{+|l zV{H!WYM7@PSyxa&1poN4*Z+v$eeoooLv)D)@EM5}4RIlZ9s7vcee^WR!;lgt9{63P z76-iZRVML(ArC`x8Nk@KmjA&%d}SuVl()ZdB7a?{n42F=4~b#@f6L#Y0UGjMwML$s z%mEnH-OihfEtuwkU(xd-R?u{h0R_y?N*2G@eT%hyv$&*sW8a=+4!vFQ{kNrnG@b2f8(^llDcsj@ zzQq}8jsOoH^dP}Ie-85w8$>~F3Ye>n(|pST;6%K}+A#*>UW!9x7P#D3!_ zu$$m_LEo~zdEgNv)H&~stClVgWH-hUlq@h%Z?F*rv=s9~rbYF|r9z!UeSxgub?}yY z?j*J+tBZife?$zrKmZrjF@82e{)p41_Qq!GnVVIF4Gi)zA0wZ(XJX*9f8v1|_R^r;LbNJV|Zk4?B4t8vk90M*iAbT?{!&>3$ zMC9tFjMNXCDf%{}YcKa6;LkRAG>c{5lQC?EBL{m)e`tY0FBRIQ>dt8eO=c-mV7sH6 z3F9zF@e63AhxJYDCFG6T=97OzBJC5eB3NCrrpdlf@g88M9M(0F#eb~*6+1^WM);oz zVjjTrT2kXMZGr|w6sk~H4=o{BPU!SqanfMxAm^WgjRwjsq7Ed7@mb>q;(0P`IpXjE zI_I%Ze+KIp)MbCfCBJe9wr6Ji&dGoLf*r=>m;XcR06uZauWtB-bN#`#*%g0@He22} zK*ouz>x)2vWkLh;Jg{IQHt<3%(n<5dox>=op)3}zyYktzQ8gzNjn+Vo3i#-dcmcMG z?8`G)+A@(Lbp_CE$n|Wvl0hkNAS9OLSeC41e>LimiVcf+B9j773_JrF|B&Q!ql!bR zEBreWFC5}t7<*!ic|UcGKYR95cY>JVrydblYZ6}`a^i@&!_FFTzZerZvrm?xX&m=A zWREcda2ttC3iUX~leDZd`NBj!3;!pq(i8um?UK6FQ61=ExN=nU@XrmObFB ze^^++Hew$rp<*_e8KO7#@Ygv^JQ%G8{5A9lnJ>hS2mK68I8UAr`e9h(B$i0t=ZnvE z$omj`bbo=JNa8XDVu-5D8E9_;`vv=VXNh|ljOo6qQVsqMuW#Uw$)l=)qVDJnittYI z7hl@~v1Y|b{0emxB*3D8YygcNhI~EVe~0Ajo56k|u}vRqNbaVf#fOVFwi~gx-Mv2c~vduaKd5*LXMB zJx!cJW|KTJ$%p19a$CP*nEe^ve-r&?0Sf$2;e@?66{l78^EpV;a*cLBZ9 z5_{-72Ph8zu_cE%0?D-_&kLFH!NWL^8KD0JZj|6hAg3XBnYhqETY|bOf1O7<8UyVM zi3cWU)Z>z5fz9kBQXh$TdBM&_+)urUNgy}^86-zIHLGkwTo|6I=Vlcz*E1)5JRUHR7J*njN2k z34SeWiIw``Bm=7!C1igDe<0<Z>r%xQ+um>KBILjyCN>-td4} z#d~g&H9_w8@y~=4k)K08;EP*9K48Fs{1vxC#{Q>HHJr8Lf9=U66In-m9N6uD;(85j7UgS;57 zJ=kW(2g`-r?$m3}d|H9b_h@Lsssol5SbX61Z0p7p)kURNE#s?j5d{W&9(@xm4%{iw z@8r6Jd~`8yf0JJ_{!t+gmQ4^85;$}rL>(EcI`9c>)XSmV4%cB}2X2v%ApRx<+^@Pg z#9}|sMR0~CNW$0&T5B-0#au;^ouC9F{m4U*@^4{ck?(7N%)3kSVR%1aTQGhv8hH{5 zIQj6RJ2Z!W%Z`UM7IH{L4us6P;kLv05A~>6|HJryf7gGE9tMBpGLhdTGJpXkipLnJ zXA&NK8(tj-n4^|ykdj<^IgJ_&)S>c)kGOgXosP!DpSm)NFlcxy4#gA0dWR4G&6{FB zp@xU#eF*0TO0R$73t%vWjh?6o9}+o%G#ZTey$We(pRF%vJ9Q9B3Ol*?@BOLn`K+6tKW zx`ZD9+Ju-x&aNT#1Rx8>eu4Q&;yez?#vdSSqI{i==oV61-f7hTA1TGU4|6TwJ} z0~(teoVC)h&Y>TQyVTaWpyk8Jz0BGemjzc&P208XSzVgeK zgdG2_EsiT*dYYWfq5*G+aRcvGDCdzuaAbQlPgI?`%UPy{oJpydq_+A8uLV97;3Kh$ z@&2f5{g?PiV)vOlkd^|g)C^e7%MCMsL0mA=>LI6|AyCiA>x$%Nu>~|T$L)|ml}Mfp zfAMz27GdX*csp=(=nIspmT-a2Mtlx9eiFYVHM*zC<8fYSWAMYS2o7$%+=>=PurUB( zELgu0(X$%0>XtGFJF_f}X^>nurpd-EW|Gv;%z%^NRR(qG*ayHRK_uUy`xq30A+Cq| z8P@ryx~AXUAgGi1Rm1sHYmfS-KWe$ee}CY3LFoTtX`MfI1#7HYdu5BKlW03~X*$Mncu}e7m2774CD>w_0yG9HaF@hl$!_eamFz#@ofOqu(%0zcc)PoAk`}Kr6 zXw;+1e3q``@p3Y(B#6>s{?Ta@#kf- zL7ZDeMGD~wG5U%#^9r7a;0?cbf223C0mzM@hRZ^|^Ltrxh|RpApOKpuGBhQ^#skMH zo$0QyL|tFb!9N>APX?Aq@^GxyJw|jNf?^63B5(%Ab;1%E$`FX};Li^DX2b@7Z^m;& zj_QBnn^Av4@(Uk7i10e_WN^tE_qC zgUm7TTHpLRuM@cqD-QRId1Ai}xc_1Oed=?ET5X~H;f)w%0I9tho`=*WiY$wA^<}nF zV_rXS=rPy7m)l&8Szbi0`HQK>dSD?R1#W}pz2J;t$DT#z82%-D*?TM>b|#y6KJsX& zp9Pk1z?~2s4EztCcfehCf6C@te@$XJzhmM*akQ`hPU0v-&hJxqK;#wi-~Nof0SB+3 z4is(B98V{4p*{1|`L2fEiGAK_kiab@>0&Lwm@JkY61*i9U8c-UE41;KzQ~-2rohe8g|Ajv=>(>wUOw2nH(y zU(8T@Py_A@T@M>q(24EAT7_Wu;Au?O2djUAngYK^ZIuZ77d7y}NYmhM>f$h^G<5CJ zvF1SJ7XpuEWV0N(f2Ra?7ev{aSDbxnO%N}E>{JjJBJ~vXW8)hk7?dxHO(6z^xKJbT z5%xU;TVm4+2i0NCxMkQJevTlPJZlTN#L*6?Y3Vt)5%(8>VFT`XDwMa)#_|&rBV+ncwVp#9X}}5ms*V4QBXoI)TFtd0+ASB^h9R|uG-g%X zBeLD5VWZ?Gut4svvUZ$Do2XnpA+80!i_q{e_|CvrI z)Rx4^|H6G7c$u(9l{><4rIxNocVOMT!i(k7Zv{$uY8|kUkVB+)9@R$j#u8k|{_u+! z34$>MhYdmVCw~7w!=u2KK;x+)K2Lo255K?$gie-#0xOEhQ^FZF_~DS1&?DH}4m04y zH_#nxe?~?vsyyPiz}WrE>;1_q1EUf8VDPVze;2tg-h8mfjNP~PY6m>p1of7%0?<`3 ztvbmj926@_W>9;RgEj>E18wJ!8!3TRLNFV2hXY@SPP3Cn+z$0>1CNL|+->HPbSBBU+t~pRp4wn$dj6_4e ze_)-WYk=R#6hNh*h(n^`9RivJn;rWdbT<_Pf@Z|WDHOGX{TcCTJ7kbwL_Wn}#RMkN z8Nz`a$A>j>YVg@0pAp+e)Ct7x-_PJS!2KW_EkG6US)e}OAFhYme0+eB`DAW2Kv3EN z{sMPtaQwQ0U8FlW&iTat%?s3d;kZn3e=r3C@_jr%oKlic$Nf_ppUnq^6gduF9*kjv zdJ}w~W^jut1iV|R89qde0`H4VY|`L*ngAgWb|&1PC)ZbF`2Vnf9RWBCAsvqd^87#V zlQKN&H7r&q@htEwioj*S)`#A7!COJz7aWZ^t}>ey;%0)5;2VqP3m`QlpACc^e*i5d z$8kNtIYsb}kk1O^hd3^dVQ;g03y2^fG=c>AE2-gNs@F`6OI!GixDHI-n0&u&c=(YF zo)7U&i_9mEN96n$c|Q1t_g>z4wU`e)t$!=Dq0KSVJi*Ml75vV?#PKm)>P8iS%Z2&n{( zwjsm*;fo}EwE+q7rYCjuKZ44N3LoW@ILrY8)$wB#_3y#Gh3?(tJL}Vcf2BPPc)ZU} z8VF>`#fa}UJOut9B8zw#iy0mUAP$@%5S%aaZ)?IQx@*`4BhyU9a1}AGUJT@Q9&;_> zr9<82%tFoPJmD|R=7dN9Up1Qxvp{tr-c~`V08G2X{ZYS$^^1}L3pE3Xz}*SL1?daJ zQf3UoIGv@3%(LN(W)m`Ve{}vxHi#@3KmcqiSa4)k<1zlw<8d|~$zysu1}Lu|zd+wL zUd+a~_v!_#@A@*H?#JPBou2_BG zU&i-Qiv;E#XNjpSMZIPsHz+eSb9pryf)f$A<$XUxX4y|)p918!KF?`n8?@EVkyfbZ>u1O-@Kz96v0k1r1yHA@0eme(4001m6`7S&8RFqy`O08cRf+PX`&Dn4~q@`v*Xe!FS^A0N)QB zj3bNh1N$~y&r9Ix58pF}@A0^QGK_O}5%4ZIE07p+y=1r^e{)Nu4&6f0w*=1s8u@1u z_+^CXJEtDtw7_CzL=~*gEand1#dUeXI5)`=)WYPw|H}e;^PFR*vB#D?i z;Su-+#v0Ef`FL)&sM!c(sYaMbQRi3@&mpLRSm1xzny|P}oLdhB4JhTHIOG~)f^#MP zJEmx{h1iOss5*pd^0hn4E&~ zgXzQp8Ow#2eZvG8vjFqlRah4VilvA5++Zw`#3QH-e{$WdK?W*{9#CdeR`|ZBhK{Q$ zR4n*=l{-d&#D>1>tH%2Jr@dh4Sh+Udk#TA=&RFACiONke%MdA&tOSg}!<;eLZ;1Sb zVDUSUcMBXcuWT6k7mTTwqiOBWGnoJWnogsV>p+tJWsJ!BbGvtKe!D>l!F%vd&abYvD_j)&oh3 z%t!1|2!{30;eB=JF02(YeilkSz%Gzuji4R9fN@1+^!I17ele%`-f#`2{QRD*|22=M zsd)b*kwgEyc5p9XG_db1lxX7nWNrjD2?DYXac{_Ua0-#R#XK_T98Ge`pY_={8EVLVVOdgDhdBq+=Mn#WN{E0Dud52vCO6Xx+J5|K{w1v zK*mdSYRqSh`9;6xZrVtsek=7JI3D3R|2baaxb->SbuDWT$739qKF28>kB8%O*0mfv z96#f@IvnR}$o}8+551B&6FWq<=Y`Klf1*FgzD0lwYXhF)cVCJZR`H_^BVIBf$0_P6f9rIdfTBrjv%DtKlEn=0DTuZ?FA(MgkLl@~59u|4O9PbBGw%?T9W<5*ragG;HJ1 z?+bg9$Z70lM6^S(9Of$I%yh%nf5ZG3Q2oEwphZi7Dk8tIM`iCF+)jcI+1@~RX2ZFt z0f*dp{8!I6K}>4GULfyn4r~2;zUTkVdtfXav|sz%_xv1(SvmAKfXyku$ELx?1-ws{ zV*IC&ohwC%|NZ-84_g>7oIle!dbh>%A&`=6$vFR8J%Q&DOfpwstxYgDe+7}DVA}E$ zJ!56;Z?bnGlP+3T#JSx;UW+MgsX-Uxm_j2ig?tQ|SF8_)39wJecZ5MFdY|vqkk|N~ zZguF>lhN2K%qq*t{fG|whwnJd3*V73|9H<3zZZ~>(Hf329txt&#P-1W*@2FFN<_Y; zr6H(^w+}pZ(3@OAbThtBf5kcJoPn`HJ9@~Tf3?PlZpIq>*fBPmQd`*fJ|6}kCxIP- zF+B{QMWWLJ?8SdE_6|BL5St^zz1-x0J)l_>uAc>XmhW=}Wc^0OUi=qxfNbLM92?ke zM7G1GU_2tPiCyt8o*Ux7B^GoL*IL5+Gi(?zBZ<(VuuV&%S3YwEdjQ}v zJ;Pdstx10W`xqHp51*}H=du_Z*74PkULxFg2@_7i;PG8sf4|R-7s41^bSLhQ@i2 zTmN`o`n3M}JmN$A?e%cqxipLop4pnDjx>Sogl7qy1zNNS9+9sWY{LyWbpPR8OPUbd z@~`LGkgc2UrIGg;_De;^=0AI%7uK4Ah#QgdtoR+n&;I+Fe|O9i4;%Dnoz%<|9KQx+ z3iDF>?DzeNE=^#MkUIDOE_;~Boxj~H!~Rf|yB5NpWMK=!S=9wK%tv9np^MO+-8YOt4_VL$zwgn(;)W_bx=DHAA`Jq;B-BS z0a(L%&<`ZP@;UD?oCm!_&f9S~5AXgt5AQ~}Y~dT>yc5ntUhjKe*I|rUk^GEk27VSl zKVzV9&(F^p`T6_%&-n`ZIr^Oc`T2AH=jYG)pPxJO9C&xKkEMjX7b)Q<&jC9K&jI~R z&fB9We=GFf_dMt@^8Vg%Uj2E0aX9brd4KkE9{qX$&v|dMhPGV&TW-jh8-$3^kq|5{ z(w=&*aA3z$%3bQ+MI|@f9d`@cZqY^-{&^uS$>^+9-f{4bI**1 zYiBrrcvs@9F(aH0-7*E*_j5jEC(h*%kHNKn&Q-8BE#ddM}On3^4vUZykT58`E&kvOrSDw?O*3+ObR_`pc`xU+jev)hQz8m67xXbFfUUB#sF) z*d%#9;8?&9jtAYY=+AL&;E{PIvN0a-5SJak|NT=i;9n(iR}=w~Q-9*I!)KVme~7dG zK8~pAK$8FS8B{zCDdPS4EFgr+;yHglBYqF8;_uH$asB1E$y|^R`F(xx30OSx9Xx|j zJ1X4f47!IQUVkOvR@BuZ`h#`QAa6-3Z^3X*zPTa1kl@k8$YAY~nTFFRc_@trotcEv z0#Xks8u%Xh?BO$;lmD^h6H@m%e|$z319_PL8}}7R()y44k~lT;7C0aAzg8``|d_{<3yW}(O-;?KyEO?O;K=q_A6Tn=!PE9`Jxh3*kI1l-q z5K~Lei*Y`l3-<@!jYRSpM5pmSuplf1Vex*T+c^*KO`Zp|2Wa6?e%}i`f794!peh^g zPiWsH1Nj!@MUbGxb09gqBIkp7g(l;O=frp>{0{FkJXiF6F1#1! z1)s^d;Jq=9NKv7S-1jrae{;D0mSJbe&oICVQQ#u=Z7c^UQt);s1A;8VaL!I1IU@BegPUmV;>-OMb7&?7v>p=o8f!N@8M8_ ze-HN=xMA|i3nbHfnV-$$g$f0xl+!yv^G7+>%tHYD{| z5PT`THas_(AH3gZA7MYCWiIJ6g4UKZvacW)kOizs5dl)O2*IEiCiD`yIrJ0B%aeyd zu~vfphlhb4-H`XkLxIl$9v3_$(kBlc%0V5PFaar_CwWMscO{Y^McW0mLzyK!IK~X* zefgA3Dr_2we=%$F5J$!=m>F;&!2SToDjS55Xk;p4V@HUG}B4JW7J( zO5!uT*eO${H*{XRp%(WRO>yqT2a7>jAAlwCBpjQtfBpXFnv?*WhWsJ*TjMe2?SK8< zrS$oDou1w8^gsR2|L6ZkI+Xq|=Y4$oKfXU*vaNAYWGw!ekP%yJHT=wA@#H0kN}xj5fKWx}E(_uG>j zM}3H4Rhd$I-WltO)}J&TJn+u>aD8PrhoX;BfAH)jM|W~izi_AZqpz&Be&CH8`UnEx z%h&7oYCMj)N%KO_V*!z}?5^;kw+3`zj{XZ}GC4X+*FHt#`yzg<7uxh4-o#gVT%PAs zdA}}=<4r3OM`)LA5bn!R>@r&=&V&wDM@-oj}H ze13mtCRI%aSqv=y!zx>guh}Zh@jJU%F z^o+~h%v0x7Yypb|l5BKX-ZOf(*^vbpJ;8hqN;B?kKLTr^S-bO*>JCK>Ubodaf1gZ? z@$x8dp2aIfNBcTcvfkXj&!g@Rp)5vMQe3@VLrnsr63`I{W-RHkI-R(@3*&M8KFQOj zT`HD`r)xJlos8w+jBetAj6Qz=t9XS0hNY*A9_DnbwcL$GzmZA8ZXc6=YxtV46_eS; zJ}hNfS$WC*4PKVxESNvufre(%e~;6|d@oMZ^?iJLrQ23HzZ$u^tPh-9oX-n%Ji8w@ z=x4u}*01eL-_B6gv6A!gMu?IZ9i2ZHQeurteT&jpKBQOgS@Mc{f6Av@q>2Sc z)EX&aXt&FvnxECOP_?smIk@daDNp0`-Kv7a z$Xyi+r<|?EqFs zykNd@^hFk{)zUtIf70BaJ-e8z+*LZ7uTRfOi`I8a4a7SqDJV~z^U|TPYac;>Suv}9 zJxP!AaM8LPmhJJ;iK|_Pt-eA#flK<#!^`F|d$^)~d_R-iPmfQU-f*vXG~XS~?kS=S zils*C^6;KKE@eP#5AE^LURtZ8-PilX33KuJ^0!u497aYEe{GfBb+Q_ z?zAF!vjdkd*0b~>0_hTNJ%@{DD+n=xtGDQdJuh?Pypvze4vn&s3>_3k+9GO-_3U!- z_lNj)3&yFj*oU@!acNn8n0ngt+gzO7TVH$-ggPL+ncs=Z$;{KAbQg>dz;x- zrBYytRxbCO&`i9$yf|5gvcA*aJl@O{EmI!dR2=Qln%|$~{p#4-liA@~ua*y`n)KCv zs<=^oMYA|b!D`$iNu>@zn%Nxbk@TD5^)RKZI8>4KZl@2qk>(wUjlzR@wub9 zn$$IK=-}up`^ik0UX}DURvc84=B|$i+>V>z$&E^7+7%nfBw>RxovNRBQ|@xhxB7jv z2(pp>e~h%PVQ<$}>jz$x3wcuY7TUUZhoxT6X3GE;(`4+>SC<(-?iO{PUXSSDBlOpE z#-G)hpHQBzAhP3l=v8Xd6G z;25}c2<%;yt*mJ?(Y%}9DK4cab9G*5#q>P6F^rlAI2h=&)or&f=e0kGIT=(okMW^}e{p2hXP^xz&g*%{t6;xZ(gCZ*2I{|C%mt8TXKd=d8M?_T)WGp{95+^Qg+yz%a!{-D!`#@jMl1)_s>se0d+6e|k|o zn3-Kow7I4T_46I20fP9l}99w5DwmA3N#SVC`UUIs}-w1yXFUPUx zr$UQ&W5z#jcIk4nO=KRvK}I8IO7(mQWdH~iTX>Sx^E2$qb5?PSn?4oAWMYXY&pRGaRkCu{&uyF(nOkc816}m>e>2z6E1$a0 z?^Kmd=rPwn!sl&ePzUu9Xj;U!*?L~AL%#1Q`urTv4!UvG!o}oRGTL^3JnHN8V8h9J zM84}<&)Z_-9(Da5+(-0YoR|wPQer;5kA(HBrMtPex-Q;Z+0kuHzI$mWMdsV44t52J znu?Kd-Io)+IPR$DZt@78f3SoLr(CpsZ^UCp<9AOnM3AT{3scc75Wn=&o!ZaygSZ-r zu2~#e|8aq#^b8$vYHyNL@R$}s_>40CzH4{#1uN*PvMN%_3?EP z-WIbxy6C@a#Al&JTcF$ca&c=a>>6t{Zr8JY5CL6VJ6%qnL~mAGFL{%~aOuH@k_zYS@Ur)>(G5Rs zZ>LM!Pn_G8J@D5_e?1i_Z%Xeb;q1jmw_9)MX+WP(hgA;lHm>@Dqt8y3bc6T_Y_yGVC0x1+4Kyz4u% zwRyjsi;vP@e)FhK@171e)MFBP)4Y{VH&#=lxTrHe-_Q4oe+bj^pfZQsQdq5eDatu+ zcNniXShf4>zFjjWrPqFUFb|h{i_rYs~?GynePdtS(6$slNkUB}>~2 zEBng$AxX~%_P!?27x!|O`4eRtap^O=nzfARUUQhCvk_wIwAx|@!^TlQjP`igmUy6cQHNXz6Kx+&NB?4C*EyCLmU zEe2!Z@#sGuOJ}LD;@$;OCD#|v`7@u)-t4Mc$J~DIw(9%3Qw#H)X`A^Af~KC)m)e6O zOV6g57mITFT)VH4K`oDQgQ_wN&?(-9;&DB>uBYMLe?0H{{^^X;ROQv|4EC|aRwI=f zZ|L%&mc6sut@}XI>5X$Zdueh$>-2naf1@!i*tKYTOCj@Ch-Wjdxr=sPF18`OtVHVL zO-X0dC+O?9jtbk)@0)yXO@dcSd44^0qhrV2X1eCh9@`ud(s$&n4A1hC3phF^hqLTw z!eo2{f5(L|d*|8cxtrap5fPLq7G*c~#e zy(FmG94(h@+dl4rSYazFZef9X(r zus{XQd3S8L2R!9?9d<8yG3Ji`^&;%vyNTf+lxR1J4`QB#-4&`mDmeHuvv>r^Qp?po4DgpHm5|1HC`q%T9q`_gG#dL)}FG#admomVK&aY&|RjdbV^N-Al!b0ThTXt70;L zl}5V0n$_iayM|%Bf3^3wcVPD1`n~b4ql2x^7V~PoQ&k6+;4-0aM%W%AdVSJ&m-+Fr zvD{{27p;hB+@xL~j%%%-`uk_r#m4Da^|Qxr&bsFy@T@f3($Z8%4gF$2Q|b3^`f5k( zxOK(TeRLMUW@QxXrS^#LJHNFLwqxxM&zL%_@6$wcG~Wr1e`l1ZmkojWY@@9^*_+!v zv%g7;{bpwBBh-q(o7!`)etC*cCU0NfJ2hRJ(@@Qh)4gd=_eJYVGodKh9heXO%Hqx4 zR3DYeC0vAyTkYAvmL=LU_2-1r@xI<|kH@TJYAPYu%ixuxFn4Xi z@l&s_b2VDKe^xIbrWH%0`_j6d!&gCpHspG=kzKlke|uBJ^-oUe-M8c9ycYg1!v1U7 zRc+fA1^*@AGRO!(4tU51yg-2PAid!cAV3Cz`1&mBbFO{%zUSnE4Wy`;ogHaBzU`x*}NV}iDCiU|x4A1Xmni%J7yblku)L%v^2NETf zf1YcP?pb&b{hBi6cITDy=8$gy&D$|WCqM5K(ey4$9-qctuS4hNWzI)wZ2Ao|41qqtn z$KO^QWikr2zuym0?`=D`gpN#s0wdoHe}jCdHqG<1FgfwO=z|cA;i2mV#V-rf8Y?y~ zzOl^^jORXeW2~q6vl*Mlz>_a$p1B$y<48T0sNa{1!x7pwP;<~`^TK=U23wa~@hKE8 zYs+S*bkoyH4;~pctG?N-b^LrlIAXo+m}kg@(5|Jg%oWs)$HDbaO}VqeM+>(0e@)rs z(h?HtD!yp${d$VsNR(!7Qe%4i=&q}a^Y)-42=R&Q1+0`K%xpsH~+P~>O z{%6aZ=L6@z`*J_MO~Ck*DHgU^#kPHWhMw3p$xQojVec;5eDZBxgwV#9n0JXL$+1b_ z7~LLk+?$}y&&Es*BqYJ%n3a#qig-Fo>_i`XlTL)reQR%xp*Uq?qm|fhe`_Y}$AeIu z-wo*H!#9VX6!Z~VB_xxm%SXUU2%;LFnBny(lNgb+HHLm?Osv>U5`IW%h1*f8evJH_ z?ccY`e+=y#=K0oAOk1(=VsY=07ddcisH!X?S4IP3jcXdG;<5i!?_}&tpYjUHpr{u? zK@1jYP#Eo|?`NKT0~w4ge^VFda84JA+3yYE@_6c=xy|=!65I=pJG@MFzd0?g^nIhN z95jV9l>{5X)hGzvwDq|B;HJRPse^i$Qs@*s{rC_X@8?{amJbVd>hRPujofXIfXC_m z-u1?t#@#*fBVQ$2%qeEdw?(p+mM9*nnl_U4+lllPUDi}+n1Cehe_q^xrHh6K8Zzlt zm*oI0K+?avdwddE^wl=b1q7b%~_Xo^=vC4#0Gq>3vHPGOK(%$sm}3QJpyMrPv>~+wO$;WgHNGa zlRs^XHs+nueRVi6@}thY>VJ!&B7u1eV}=u-f0O<(cJi6oZ&j)bSf)*&EyGy*YMZVf zX<4uLKoj!#?DsT`teUG+T+TD>jVV9fM4oT$lfEe+v%;Qf?g8<$(_x5(M(0{)c!h1x zK~1f7;AvgRrtT9r$`D)KaA5%PdVcA)X_vJuIwEQ5{7S29=c^XIDt{*j@rdaa^7}bN zE<25qz6`12jp-D=}Q{DU05_at34VqEU(IEIW z{|bFiTp4!iw)TE5Lw|?|bsYQe00bxVotz+hWK~SlFqJhO(z3{@F}f$(W%E1fa`D%7 zgV^Bad2x-rs*h4>Cp|LJga>9FYE)qfm$UPz_M16(2Y<=*Y0 zGZ22_%4C_j$~P4GhPrpXBN~qxsP#Q;2|^y0=Vmi3p%G=#@{DMSG+znDPFh2vi8~SL+A%EL2W=iVx!_$L0q^ zmZJ22nC;kZ7ACk>wG2kR8dL&5eqPL^yq1TM+*dbB;Zhi-6?jMFRkrTBKN1_$X_R=YuPaF9sUi(JA^?!BXec6w{pFde#-eGp|d6Qu1s_XpB zqRVG*XqT!OFR^9hEIp~rIh_ecg?U;2IKBtUGv?`HC%9*}amdEa;Y}Zu)918Zj?tb{ z?xJl%dn^9l-My%XzMr zwSSxR{c1hmjjLR5{oA;{tv9^eDzD)1g!KL~ab$_~=d{1om3i?FHF5bs0yjk*X_&Kq zOLiOa%rxA?awaYU(-~)YG`+Le*Vty~PaobzE?X z8V>D?4Zg_{gwfqey)L4Be=Nmg{$9^K4jWSRr1QFGH@0&w59cJ4lJS-h0fNq!<;)dz zRjZ|fj}$dZOCs&%k12o2klE3yoY&}l?%%t;gbB=W!E!3B{>{42>~pTi{$tUZwtv|< zp-42eZP&qcIker|b(jD%4`u)T3k@!SMg(m$UsYjV?%T9_=;;k*4d&}XNdt=o!Zk; z-Xk4*PMJv_0e7Jk(UP}nxi!@yFxg1gzS0u7&*+52uJn;6$EG&JYpR8>uqH zty&$ja$&5c#R9J*3y5sn>0YFh5X5f3`0t3s5JlLWl^g?( z{BVBmm{3f%UJm@-1EKt;(aoYjxZ$;&-UHVc{mZ0ZPh$?85>}ve=zkl*6MGrL{YSSt zO*Sr|zblVrdMm2hBxL=tO;4$U1K|cS*q=xp>0++FD>yg7=TH+<&XUE`^dwI7W6139 z*O5$vuns<;QG0RqK=Y;^=TeqcvN5xG-sWo_sma@e=x*$R8m=w#mJ#DwGQ!GiRufewKH*JOq{TF}+TmbI<4- zIGiz;H!ucrw8}B3_4|C?f)dsfmxR5^&w@Lq(f3kt^Pzc?-Fdxkq8&^~n)V!<@xB`+ z?0!=XWcWii)=orxbp?!6R&5poK5wa=N*1m7?m_3P|hHZe(PQ)bk+(s~ZtDePp4hWeDpEIp@* zsjF>|hU6!N^9WJk+zziCh5w<=uEb&YR>2pVfU3JvQ~cd*qI)?$^yX=f`!W~E&#Ny) zrTLD!I8Neb+kb$l_F$jJE29M>#0lb?)JY7Yh{@SJQMUa=fVb+iytP8C@%|xM#@qghi4Gv9Io-Do%bh40oHf@6i ziy`n8zlYKeojhg4sQda(?PDcDl70UO-YTu>MdUCGze^0lPpm_uQ$O?$3hpWm2bct` z^eDO7{&k+23Tg(IFXolXAU}d}F1VMG`d^U>0Qyk(_Mqvocq4Z2h?9_{QE5O%24qx+o)U7kB zm3Ab$)1b+eGtCjO{*WfhZTBp{=)-~9hez$AO@G_KYCk|rq~^(9C|j?adE2-EJ)DkaE#&xZnk0&y=PnTkn zP02r=?q}z?`)=G9)FdrIf130PPV0MdlAsEpD1PV9XX{Nv`NI!W>D2n|yz_^-&tLR) z`!W!9|Z^3*{C-s#c z)fr&G!2~Qz7^FQUmf|a;`Yo`lEOQh;YQ}!ratN!SuYb!< zIqctIudS{xdi(ffXINCZvjG}UWwtWCIZi>5Mvqq_`(NA7PcoqvL`oUx%)ZEmnk(}h z8;`kU_TA&))07%6i)L)Sjn*w&%f$r;%7DcME-{041Zlx-`u3w1dlq^Qi4zoTQ+s=k=kF-G3`cDC*4NB@$oj ze0^GuU7xwt8IEjs!8dtrfHAbhTaxU_qbe@ho0dK$GS&TK^I3ILSVMF~_U_Jl=S_9* z;8*iCnG+llT=C%@q0Z{}C__6v$z5|JEUB6&OXi9-=-K^a2x8_lUcmC}#AIi$_IuxExtg~m>M|Fruk zRJT!YXPD|Fw%Fkc>%pf!KHxJMKWqEC>bDLvj%x~QvwgW?lG7y!y3dg$#)c~Lhl`B0 ze~MWG*h5EK!fDefQNu6qdsL>h6osnXNdT@dWP+P{;LhZwJ%2o1W3r(~f4jcYa?eXd z{q)|%K!EN7!qH_fZ{1={ImwtHF$tvV98H)Byd|A*=_smgchB73ix$8-FnkuJHxozQ z0Q;9cLHh3pEtzEv2S|2)nI_4^&4w(425yaj@}N67#W*=LXCp~ z+^n}d0!Lo&rhm`6QHm^@7;o|Qhi9b6>^Cpb)63<;X!Q3bU&qXs^AH{INIUFD6swm* zS%i(`_;ToF$%A@{2CGcly372V-mC-)a@ zK!L)dmfJPE#V(+kBUh77va$zpE1vhaN_|vxx;z*6N`JRsC^iSTE)I65i;QiH7EJd( zpP%uTx;;7ndF(tse{Cw|WWy%>hVZVHx0zH5+kN@Von(I#{kx6Hw|42~AA=qf5e0e z5jlJ9hJQ7D(m?UY`=Qd(B9LqI+3DSU%eFmrXkwl)b2ZJC>q9MsO@`Xsewx7C7k z3$Ii@FSlXJft#PQ&ged^9YgT;i+a<;MxVI)9q;mFBjt>?J-lr>e#NKUPNSH$fFHkc z$2HUs+3pF%432^Q-FGbPwD8-mBQ(w@Ls1FB`hT2`HnBF*!|Nk}S?Z=b&ehZDvbh10 z9WFVSQEM!N>|P&ly>mA|L?aa4g7cBqJqW7PH{xx*ZSk;38D!FYGQ@u3YXc1D;hQCtapT zD}Uwd^Zb~N@a9rwYt^y=(@Vp7jrk+G7dDKbtVc#Qw79vY-HtF0V-W?$DT}V{;Pe+) ztSWKZ)=P&0%Q2k@RbS4^38VULb*x65l+6)+fK6a4=ZEJQtz{nCJi@ za}hesini-uwtQ45qCHv4ZPA`UebxF4=6|S<9Fk?(3wXlLuo_u^xbt@^jrK%~gJ&q{ zrn_FC0P3flK>`ZiN1u{3n0Pi?6gJn5)b6IVri(CX7y^o0u9~1{Eqr$eHHAFGPjxVi zj$)#!P$5)a|by?qsO%>htFd_FE`1TVWU^>%&Vt?J! zXMKan6FkrsQL|$aTI3V{VI2`b6_lCMO9Ix8`kqq#`pxZtdT+xG*+@*=u`H^;( z_6%Lb&}KD^bMv-8ero&SU69*qH-8kP&B3Z}`+j^7jkKZeNQ-K?noD=)ByswanMz-X zE;A!=wqp~%@O<0w#KRz_79ingX8ivmm;KSc{$Ky^|IhzU2K(<^F!w)PFzLs0{D%wX z5q~BN|931fJNQC4AFM7ivrz4HO zJ1PfB&qSrE;wKLPJx=i8et#E3U_P&_imjb)ESOfoKJQ=gV!Y_#^tzP!ovsToEunAl z_a=YzPN;wy^TA2)${VCw-UxrMr$YQ6cDAY=R^2kETk9vn_4&K_&hM>MA>In+vK79F z<#>(n-%EjRhZjK5tWIiSJ?LVNGLv_xQGJ#WcNfUdt^cPyPV&si{*-FruH)BW1kneMx#61>0ezyJ$(oZex`}bKH-DvceVYi6_IBsG1p~~@2FC6m z;U(=Efeqy+`WUx^zm0d^=50?Qj@(SAgdM z%8vZHr>gfTzUgg>{SSb}#EIJzdp5>besAt%M-;GsXUc=v=6`6X!hqu-+q(gE+`-L&f`xFdTQg}V}NcoBnx0ZEOd zX~>ngm_6aXN`KgJkR;+KNBHY3O<8IHW-}|y^t=KQW!bYnTDY0f`q0RLZy$acy3{K> zydF3{oyC&P_f@rL7(YTwK7L#CZat9FHT&F~$kaBk`brK_Zu9i^5$(52@!9OvQrelW ze{G`k)D$qAeLM(16#M+U+d~y_Me>|?Bj>EA3~+68W`A`%x`fcsrz!t&iOTkLi?3rd zvbH{`pZ3T;KUNs5X5VM+2B%eXPL$j!3G^4v5eRTw^i{~Ryb^mTw!72Dbl2Aif4BpA zo8L%0g43{c>Pd-CMFF(tHtM7M`EWa&FCg!@=3CCcsT&2m?NF=fHg-;rnO&1*1RSSj z-Mu1kb$@f}U-p$F$vle5*vwId6U$S&`?rr{yhAQ>`XzZ>^(5StxGXnF9KX!hcWzI7 z->B5u$8_=Z)b>Iz-${kHMK)~#RAtmeZC0<{*TRrK>ekx<#{Mu-kF02&%DW;ES2*o2 z7$oBsrO)n@<@;XcqQcytkA+C(P2;C;vbu77zkdOKEr}lL=7sb7X>QImc+#g$b-U=V zEPQ#De(m};W!w11cTJfTbA5K6_wv*i9(S~XycwCVbIMM<6|$M*zVG$ep~3jiZr|e8`-~+W-R^0jz7K!dX;XtM|DPf>;wT z(SMKjZa6lyoNVE{O@HbwrJWvD-MT+$z{gcIb4Mjd)j%%cCy9Ufgn9m~KDYNl=Aey# zR}LNpe;CTkziT#w;wjxuW;xNPDffV1ZS3nF6Rx}+PnK{SuGO@Dmv>?8JWmd$_VB$` zm>8u1yD&np*e^%wmp#zgJD4E6i0kGYR)6Ss3H{CX3XLhRVK??Y5b|#?JD+IFb6AKT z(j%0%6KV7O9a%H!Hz`&E?+*^fLqV zL{@04yu|h1Mzif&%os6uMwQh5!0s`)(DHVh$ISfo&r=qLKn@hA4|#jQe{4VF^?$zt zXy=ts(5JE2uOBZ#k)1X(&b4ey-|i(`-Ahff4)>y2AL#{7R_C*~36aV>-{m#D;JO!q z3`dem#$rp?$DR^OmpBJNe!$w~rpD$4W;lI6)hW>K#Vd0aU)&Z0#ecoSS**WB`y%;w zxws`kpm-!vy&f`Q>$LnpYSY6(>3_8;4$ntw2Voobhmwfa&-LM-l0tAv$Xd}ov~l{# zy3hWcj+=NcYz}BXLofDTcf}d{OxVx8zntw0`ORIRzLm-M%7*+HPG*R2Mmbjw^c{(B zulXblVBvQw)tf6w1wk9>_Onq=24El19p}Jvdzf(evmW;8l<#-VZchamc7M4IchWxl zV#eT>?mqya-KP8bet9wG({IlEc_W{m-(nmy`$0s{NAFQ?l;?#_axZc{7SoeiChcRi zIv2+|!6t<9z$s&FH&Z_`ZFdvlFU8CI+d-f{JVimefAa~c=YFSQ3!z}YhFw5`hbAoT`N85^HUZ7v+ zpR&U!4I7MVpKgxEz_q6uiU4_;&(~5Mne}n%E=;}f!$Gmfa+@MHWwQiUZ zG}%n3(Y`#da?gH>>o=zJjVtRCU-j){j#c((HW|7sPPN;W21m?nBsOIM-wlmh zHE|eAr%Z0J@p{(vp&*DS#xChi-G@oB-&4?5_pU6E*t^`?k#PdQlDYYwPy_F4h+xA3 zD6tY%vA^Tc`>3x6HQb#eY(%9YU#cugA7Px`6)uKf6WVg(Nq?zG%WW%y#pD}=JD3TL04ahKU^h1S`6DX4JG znA@EY>O)D_3kNu@Z1}mztnfx|k@XyJQ4v(u4M-Sx zL8Yy9yEwORe(0Y@-XYe!x3sTNkJQ_o`rD_j%emX_>2R7(Agrn&)_&Rz)7J zsuQEk6@PZRV*$JdrE!)e$QnCdiVt{h?CDE>QLeJEAYWenR4;Pty*?o7; z=9!9;Ndf97Ve-TM@X2@#}n$Q4LF%sAGo9tbd)|X;IiFlD6aXZ2ES9L$6=5^NsQp z_;tK{lZ;(3X@NSc>?_+BLi?8b(~V*eg3-F<{ABg<)xPRYa-q6uSzJ3|U)SjMqT}299=BsI^$2Nv<)>x{A9+H^Yi)ja@l=HX zCV!>VM;SUUC8{rfn%A{PO@1!|2LdSS;wsWtw1*H}cjvim%wp}Bt0`H^h=oRV5COb| z$N9tEtZRi5dHdZQ*W;s_SJ{g63gXnfk8Zca^Vq&I(35Y;ayvNOv!;*G zCNWRe-1(M{oeEUjN}Z(e}8M?7Vhs z?!&?K^Z{-2J%t%SUaJG>%8Ik;n9YKgC~zOzQh%y}^xeY(s%^jhx7pv+`{z?2`?)MT(8gSBHXockI|%k`7lk@s zq@PA2ylx_t^42%&PM>#?-LH2ET`MW27*6W0rx)$8-uBrUDuoi3VSh89H@4r4Up803 zwKaW5$FFjxcOSicNdzw9^0CGyB6@i=#4tTB_cl1~PN2QW>1HS4Oo z>5EXl+?tPKhoGDtcYo!wV>4qt@M|3r(&VE9ZiZ{vA1TuF3Wp%_*LKMI9Pe*R=jK2P z{&u@NIh?G0)?kTk%1`j>)Eh(mKFi^vsPBhJQVWF^c1xZCQ!7Msji7 z!o)aFU#a*Z6_UOAE84=xCF`nlx!gWWA$s2zoySeL-#?b!iG{oxN{ET?!$#Rb#ABK6 zw0A6@#f$Ol@7LJLE)!m#{Qwxjdb!obO)oDlYftCyi)pod*{ao`DEdIdRH_~ zB&Bmjp6TJ`y^hjE>2}S{pE-i|C%P$j!C4En;&LvMZ5*U~>#37kep$+ZY?LJ_GfTeN z@$vZu;!WI4R%a~4cG#L|Lmc}ezF((94a~3=!KeGlG21KV6=IkWStnhiBN4>cdlnPs zgvC@LqJLjAi|C_zy(*!N>mPf{c2;*5#--7gG5|YUKoqpkQgl8zbW&ES59(a#R-09l zokbuR<#zm{2M)|U0ApH9p*p(Pkdf1%TmHiE!_-F)ks%VaR?PQ%VU_ylW3-A_Opjn* ztvxc7DJtabTSN~A=ltG2uQyE|8k0VKT}6!`8h;LY@^OFZ_l_Po?lBhzxH4PE6w{fr z4H;G7xoKv1lVg~RH^t?iXUnMdk7?AjJ5oN{*9P+Z6Met<&2}%m#6wPhl*dMm&m?3E zv?qx0P&rJa@J$l*j)AWQ}c22Tc)yRh)Zw?8K+gT_#Gmsnw6 zBY!U4feE>@qPyec>`Ocq`aZdM_Ek7pYq7CYdKiXq)-yHOn&}m)5Y6B*(_XUt=(Ir$O!YPaWSYP_biLVusQEaDDKwtAzXe9UaVJvhOqwcCEAP>7e9dfkS%PeavFF1ac9 zjvHP%y^^G60`$MF0Xi$mebxPr@eZ`l+i>Fl5`@k(GFVkPaI0~=ooo6NIc{s0-G7VV zD1}T7^^}~ZbUC*3>Kso;-#`2Q^qzb{mkt;W-FF-$_4q5u888Kf6oJM7wX34?_YG<+ zebBfLD(9x2yn3lQJ$EP-oMt-0_0*$8E6Ael;S<;I*X?X|@9f@YM2`re?BR;K2zkp> z4|oGqhh#kQ%h9Bc;0WB?p}|?Qg@0D0mVPFiR^|_1>z_T`ujBWNxYeIcJ~umZNS!vZ z(@7gVohi9GD?n-!Zig8L7V#MwcO3yoP?;gYJVk+@%;6xp_I&Vq_5yoX7^Mh@ElB6z zf`Q`FVe{EK;obAJQyYu4hG}v5w*9ljgu5J0qsixV-#nN7bnjZ_keNNA`G0|$xlaku zjoPSpdVW43-x?4ZHy6=WccK00%=r0%q=iMtqPyoW~1`!S~ z04kG7u_|8cF*9=zyF1wEQxjYgW^Z=peiuMIO;+NMT^Uv85v_^w}lzuoqvxQhx;c7Hkbdg+QsHAH1Y zxr<DO3a4oV~9&3lmZ5QBG2ieXSNufaWA#9m9c07Y!B{`ytU@Y_|1G z8Y0Fb_T{p3uEm!S&V*+Hvby1@3<+q0t495E7()#%5M70O?XRpVSl2hhgIxSxj4k((a+pt z(OwPK!Uu8{(|^2wCZ_z95bS4FV2$HL_7GAAyGj>eFSQhN)6i7%C^Tq(WAd*BEBc4a z=Quy{-tz(n<}IbR^w%3LcRjtsNPkYRey&ShI#oJOqMq77ZpUIU8x!l9JHU|Wl*5h_ zGX(>7%6odb!GE2N-QGEPYXkx)+%I|w-zoG*c_8aXD8L{4w=D!<@V_7GQ{7~hHl?7Y zgtha&?!w!Vy^GE=rNNuPB<5?;q|-hqbDj6=L+U?Dq?bl{^^pxl>wU1c4zWGEn*MS2 z3^Jg+RJJ^(7t=$dcj*gMM?Py8*zme@H?xw$-nK5*hJULd_!Qx+E0zj9Be4 z^4nB!x|ltKBN2_n?xq1(Gj1^Oc)g$5TqmT#g=h2C_*#}&k79nk;ir*I=}tc?xj8X& zL9R!VXFwG~}m!BGgstuT+oL1?| zh3kCwXzz4rRNNo@hAklA7#rmaNy%Ae(|d8+%i}TgRLwh|$U)A}(OoL(tUYY3i?%#` zKnljX`zq26l$DeJ3GbsD4>2h)XYa5>bMv}L!+)_xs=Rxx?kd%Tv_r!lI)yD2W=wkM zX7!XGnQrQ@cLqHhHaX!#MD@e0pB)a0^+* zLBGDh61Fs2)o!0IOy@?;eX`u*437;tCHF1=nD!dhQ}O1~0nAk0+?=Qpqk-rLRHeMB5@#!S`rRy#7zFyqjDY~EtZh9Uz57qKO~&k$EP+L+7t*MHmD zTZTpza)KIQND$CDiQ(aSB*~Kk2G4^bUU9Zy`KiJy4UO9;CZR`CzJgREGFd-v%|Zu> zV!?dbzsGb?`+I#jEcZ9?t$mx1vZWOOKlmH!u!z*-;(UoBpU_c1?jGiYOuEguThd^c z?%U5v?&*_=^F~^k^Wb{A9w@NK|Y~Qh&CEpxq^yIY4 z%ovY{&(X>O0gFxsl?3N~seb^j3+!eVvCT|c*X5f@q4(-GP-`+CtKt{T)m0aq!?_+c z;AdJzO*J3*=ER4uBs5krdMFaQiw9St*YjpGu$1DKHAsd|YC0H2u(v`Svb}q(YBGI+ zoqK-VlSU!c=h2*GkS!qDl0f*B@I9S4#hl9@4z?ZU+sor|=e5;NOMeO!EkW-6h)QS3 zDLBAI$G4I*Eh2w^R+`&>KxU8wYJi5OvfuKyEjN(l>9)h?ZH(hFzi;jJ4mATeN}f?P zXQX+<==dPbE@cJ|WPMka};rgT3-=k?&!$lkKf{ z6;m<8meG512!_`LVt?4!gY*`@YH)J>!~7L2Ep#p*6pP0f`ia8+(j`>+0&9B%3)ZLG zJ})%4`n5;ra4|oor;nM(9H(nY4ZQ<&dXL&7W^r+#H=lYwx0?^(NsB%}^m+mEn>K3c zJKZefnNlAj_J>y0)AYkI(sTg6d;Xd;6ksBSHDGV`Ib6{8s_$w4jDK{N3xfCn7;NaRw@V2ltsqR- zJYFbBJDG$XI?gpw42Vaf)nY_Z9g#b=Jf6e;EzGg7B^mx zJN#KmdY9y9-Y2^o_X^X9^j56e^UZXwwA}4HNNn|UBCJT$KwZ6%Gr1jmYZl3xCo1CG z+0oN1X7n?@N1lO7IQg!#7S z8Xz&7*1_AEiTPEOdmHrcqiL~cQi0JTh9JOB^b`ve95-!m`B%+2PZKHaV*c4QFEv)%k`L+?+1G$pq5mLUx&GEBYj?YU?_P|vKK3pKu;p>_b2*sMf+a%s zLScgD->W4(@GO*LNG_#8z$V!((r19BfiHP%cf+|P7e05X@081m9CyCc1U_TFY3rT} zP)+%Jf2yzUvMCm=2n7R-nzBp&yessa*&m;lus{$G>U;w5e$tvpJ+k7;XnU8EHQ4QC@Qb#LPku5}fq*^x2S) zbG%OTr#$M<$8zf_7vou)BU1LM&VR$pAA+Mdyf|>|cDFkO!;jC;W7NaOF}R~^g;oa| zsSA%qLBQ3iiM-qQJpJfJ9m817yX=MoB+a&o+4=H$z|hiAwp1WC+#Aq9m_zkA)D{tH zeF>=F&(kgq8Ly`{zP;dkg@Qy?)&r4gEL>6Vj0oXiefG!YL(|qdOkP_Mld&!UU z)S&6)MZy`X%+F<4y%s3E$pD3^#?pnIEk>*BT#YcPkCn~LM(<0hc>WY6p-~LvIv~2s zq!93?$}ugRGvnPgxH@E7G|6Fm-(9VUtgC*NYVPlYhZ2+<2J9&aAj&5Msnov9HNJfaBlXKi#PD9)$yClG)adF9y#;&ylvor><3UaP1&Q5is~AeZ5lFtN~HJO zz>P7t--Y&}gWi!!3Kn>qm&*(*gtw&-1$BS#OAX?`Y7HUZDvS2DKeFD6=~?+Chc$2I zGLX@yAugeLV}F>J5=$q`cnEhUrT5Nbj=h6JYLr`iGo*6F3cHEA=uqHvgder!MebHB~sy`stWUU5`P9q9i7y}Q0sW(OHmYM?TW^{ zXuLFPZo#m_$b;`X@v=iY*5AY`JB66zil5nVpr<1;nZY`9DII0=;jFpjWR^NpA1Ms1 zE59M7$~g!80VM97Ejz>&@!a0H{7IGoJFEfIH!coS%TVgRxV3u>PL~WdhPA0E<9M{d@nH!bAH@qg!9sK@j2nzBz4D<=#*l1rW5&-4np zV;}N-N7N8MV{J54=Iq8_mz%&Prm<@N@azC`mCqZ`(BUrL5A!h@h|6`ia1GmH6wx0@ z%6aS}NmKxnZ3=FHS;RAbolPaKPj>p%41h#w{A3_|A8(da)E21wwstvE_hi|2Dxk*g zsecbKcUcUhKbIrejw~Q3`Y9(w^VphwapeXc$;X{})!z2A^KnU;?Wkyux>g{q7?Gr& zW5O%ZWDtcb29>4PydWUXmi67c&vnx$CRg?D39N->84u>;W;FdaEYw(Ap&x4P4=y;k z%PK5%P(Hj9eNR_u3l*qClBPpfIaPmc=YPa~Ol46P$=74IB&h1bAxKMFIh?)v{m`@i zvfn|P#XTfCx_8m%P?j~y{nv8cD7(!aX032AFA&fhPdyQk-f<*k@FP7huGh759JxCvz&UCz7 z@4JQBYY;Ep3-1kaIc&&_a()S{pWkHG|8sycXWLq*=V7aIJ6@MH!k`H_JJQ#$Y`WjK z2 z*-ktzfyH8AWyaH1fiAgzoVI8Bz83l&JjXZM_EOT*Nk^SN?r9~%pdmv|+PWp8D$#xB zY8S{Eb*3SdF7l7|04c;Kc`9jKyx%5}oR29iLgke3_pEtZ0KiflWh{)d0e>5r`JOhd zUgQM#eFCFJU4wJCB{uMm)^&!szC%-kg}T)9lzggrGa{H7=49K0MitaHvYxPBAZ<{k z&NCYF3^`pMR7Mh1L?%YuRuMo6P#}r!d#VOQ{~(RSN%QAnXYO^69?#c0rfiu0(4O*O z)R%_%rYohJVoIH`>-Y%>et+I;(HX`Em*VZ~`_iC%WjN-CC9+=Er~90aiP+`RyBnx^ zbBheap67#gF}b5R4=ZRfG_uio|BPZM^EZP;ZdjBn3d>MzqH*0&bj@1M{mgA_*oCnB zJHuO3dQ~1xx-3nXukgguQMZNu_57ITb9OCXuOdHK&QsY-R2bZJNk3-sgozM5SCM)W zgx%}(&~(s!b$D?+jb&xV%afxXjv2@e`i)b2*B@fe37g$4BVpL29lMry=Rqiodp8wnH|Ch%j-C$Fq22Z=KIMsPvBz5KUqwO>vXi85+CmpkfTMnT@y{D?R~Rh6q`Ydk)O|RHQC@{=WA~|g7Vf#$yN7m%GI8D~ ztxi;RVW{R{QC+le@G_21?{?b2rKd!+3=@*4XW%AJvf?UpWPd$s-`Ve#x-GNe_Ps-^ zCxHUi+)dZ-k%5Q694pEb?bop*S3dPB2?Ev_=?~VP*(4NrZV=gpaAc&2?d9d;$cJcP zuvLrmCo4yB_yCv*?Syv6*mp}8r|G?B%Ngxu-E!nQ2@MXk_#%b;qY-@N zll-}PSd|C&rnYhNe6-FjdmoAh0t$k|9Y-jbx5(=1Qi7WAq}~Og+oi)~(tLeih|oh?1^u7Ahw2&>x2+J~M?8pZcHUbkJT zJLW6(LWWe$Q9oTe%aV2i$3_Ad=oezE5oZ!o4fE{R)D_UutK+oLP$CsC5s52AIrmh@ zDw8Zdz@gnm<|yK|>z!v`uiN=`oo}VMlCE9DkKTlSK6@|41zHw(%y5E{8(*G+kul-L zktEZ8^nW}!yO_*_6y4@R4tBGTczLrM_1E`4+Ux}roq6L^{&sl#7>*rVHnYdQenV2p zLho5Q$9|*lp5=8n7WzISyRZHnxUGG|Lotyx{T5l{jdkMT+JJ;>Ra0jwvVjv z%zyjr`L#ZFH9vVbZGZc^h9{g1?otug{TB4r!)s-bokCd zXC4+&2i-iOZ=d(=Xst?j-JuU+E=^?EDJyz!UmS&%cN9ftPjs6VJ4TM~XFEiHZcvwM; zP+~W_DI$3dMIYMn?_r^{%j!P(DNiC5(5*-V9KJW1J~XdvZQ9%|UJ!g;=x&0f#r$iA06dEKeD2!!^`m45zc8WB);inNynF%Oq~njm??{e$4imLG z$yH^Z6rE=v)!!e-?Y&1fAtPk|>~T?{6se3NBN;_mS>b-|k*%zZt5g&j$sX6r2=`KQ z?Q38A;&O+({{0`E^XNP}4?gFd&w0P!uNLxEb=9qb=wpkP+XHf|Jue*!*u))6LxiT) z^17~GujYtgF!*oCZl)|kanCR-jJ0f6lSe}MhmY$^W<~8%G<~#Fa;!N}WrMSA9UetO zmz-Z_lXM|*QTDaAh0uQ1`fyzZGT$+R^ud!$Ir&bZ{Rg2-FE6L{;JVbiIpjsY9llz) zW}yDuZRIb?uBau44-4rreqqV{oT52%?P(J$-}&9rfy+(>Nq39!K|JI)!?d)~3UJ#; zOJx{3@ripvy1#MzV!}(nhsHm-lXhB4%~GokU2{`AUTG(f>KZqE6g5k}fAd6_peTJ( zjy7-KPRKv6O4R}}DVw!hJZrfmcg9)D$vyjDPW@6@$I1K>^bd=q2K^t-FN>hdn1`V9 zIpaZ?oK&YjIeHZJc}o=H$Wu5h=BHuKB=uZR#>>a-{^eB98qY7607OBa_00z#ha6r? zc5I_YGTf4#`8Oph9mnH7EsJ>74pq`Tje&W*`S}gY@xE{INIUP%-TvaiiiFoG`xQEb zDw~*##cdAmr7W?_3%0GEAlsAX$xd(m7vK1l`0wD1ouoPpu1pR`feL*Ge!Q@JS4sX) zF@t#C9B%T9v6rrL!lwrZ4y`AlcEC;MMK?p+|wPB+9Bd;$L z-##CE?QJw)mhicUXHz>^99X`f8i=b5nTzVL@#`yZ3ra8uP*()hhP>^wTb<_{zTRrP zF8KLe?z!p6?N1?bFQUV{-QFWx&-UI4{3JCk(QKqOM!mfM#gVjQ9;M*6w4a4L)XZK5G)$Hahwx(ZmRrAMcJQs+6UURiKYO}r)DYlHv zZ&Uc(=_g{pDDohND2`vfeCZ#y$H=3wfN-@p5C+msur61VLRs1M9|uKz-mT$~S__#) zo0eF!tG+7Cx~+lsyYL++2Syj8-LJYmHU4&L;#^;E8_rq44+K^G^JiqpF$9iKO80*Z z=Y1bf36&RF;h;A@m)PHVcs)v@?zT|!uh8

XEa@)~$VJj$3A{VaK?YE&;Nvw51{6 z07e{zm^hfKajRQUdNcQj@rV42rI+fRf3-WSB12^Dsyyy`r5Al3#1yM7!*OGl)+vV9 zo_GvUUTJikI0B)bpzf{3uO3gtjy^Zovg|c`oVYS32~|U{-1s38lECRUc@w96VL|T_ zIK-TM(fr&n$YbvUbB+v)p&Um`+jG0zO_A?Luip3Yo-$@N%6;-;YHlxGcfe1SKVPuf zjJ*zt{maQ}Dts7vn|5~&Mk_xa_BBy_wDH2lTN^j)ov{*t8JxfWEx0w#o1!53Epy|? z*Tpva&}N;JN9B`E+-U3wbuvme)E;pv7!&lT{)aQvanNc0z_jR|t7KD{#3EbuZXSQ~x#6lu69 zU76l~*O0IQ-2XGWIPVtbcEdZQ$3n7rL?< zd~p2RP^MeX7PNzu{YHe)^-8T%aTF-`vEkb9*(F}BY(*R*AzyXljOrJ=>h2%$4!i6$sfQ=ZThisE})tdRvAxT%`)7(bZ>nASVKh9oN z14tU@K$&91>jcw8c86oobw&~-nVpe{LZX~D?6I084d{}@iE=(uRW72CSt+C5y*`s_ zXB5~HC|@{vDZ)@dlrPJ7+0{86;cNZdoUy(tb$;q(qG81{Rm4nfi_n3g!5(LE>~@Bw z9REI`3ps@Z(`Ld9sa>Sf_s8}xe(VEx*s}kIJiImULT8VE=S<_G$Z35DkVy6~z7GSR ziUltXox1^$sq>yOZeRYOu1cipvhb`)lDyn9Cs(UZtr~z9qWU)6{8o2KQeXs$9786mIvL_ji|VghlD0SU zJ9^JG_&C^*Y0c|5FHd?10xJ#5>61l#{zJp$a~ulGa;sx2?)ul0xQ`ukuy^ijk;iiH!yrCI+j^rz`ZarM$Jn%s= zP(571Ing0oRw}#P;aKj9j}aHEqK>kS{5jruoABjF^DSuM^FN;N-<79188tERvAG4+ zHIEdfGOh5P_}aGvQuo(a?lN=M!dCkeuf1bjE7xs+x`M-id-1Wd!yvAZUnU*|dTyxCp&n z&lu0{HT`+VPAaGeHnzwJ{wtz6C@ELfB;kAAA?J`sHBS*Ry*0qfLCN#@*8h7}gT;qS zpjL}Nfz~5R{xmoxCn304<@>XG*iuFWENAMWfZkL- zlbyf2bRc^);Nq7i1o_8%;OtPs?%V5xED zu#F3dIs+kgkr795%5cYjC1W6jU$@E~RnsPdkTN(*BaPLR9?>%W&|BJ^T0M5R>+Y7f(*~k5<1xf2J?PL!o95nSu>GWm_q7hTcP@N}JEO0PYu>Y2QDb{7emLB2 zTLU;s4+Q8}o>TX`T<&A&(LU%M|@7hQe~0pNnGPh}`$ z)F@Jw*##142#;J8wlKO(JtrNX7uk z|9)!BYNTdSj3(?hb3Mio`{wMwRw&~5iYbO?_;C#5&}M70cPTS~;>50l_85Iw*TmOE zvEKFT##G8dtE#jY7Kv+Ye*By=^K3v~ljflYkoz{N)!y}}jbI{1srgI>zcR^|4uG!{mE>yhBGAezbA=)XZp-{<2pdKAQmyGGapSPn~XaFuw} zQ`TDdS5O#^4Zr8*V-5xP`(6&}k7OBnNktAOAUD`i2Tk#myCui32Wr&z?IbnEpOt>w zVx~r;uh-?z>p!dGDD~fZ1`gc(XcF(I}#>;$2B5Nt4@sgDuMF(^Z z2fr)l`Ta7+?#9JMn9<2=C{cc8zyB+jk2%teAfUOx42UHFAi3N z<#mEOb}Xe_{)PC=>bt3T>F9MMF4x#D7;m1N1x@WZ>}`NTc2SmQ%c_(XLz9@dv1$( zoFp~i`0;fU)_2-^%t(H5o`@e8Y~mdt4it~&3hECAUSL3{xlgUALEjx+G1VUqOSeR?K-ya_fF0yZAw1jq!+kb2vThxhDC)U%`sT+fE5C2z;e>n1fMJu&EBUH--! z_y&?#*?$Abc|;c2yUKP?KeNOcJESn{kkb#v^_t9GqQB% z)$^Gm5Tlt3H>~f>lu_a8z*$n$S$G}Il%1?X%A9CVc1rgtJbcb61iMnd{}lggcx=Cy zGc9RQcI~bf-N%-{7t!F@Uax)Hn+&;j>;Mwe`mh|T$8OK!t6O37C8F(2&(`>W041r{ z+~CxQ)d&h?Z+wL@Rf_UWt272U zJ4Sa>6lO?{ZYcV^6-GInKoZ`R-oCtU6ZkT; zSA4UKmQo!VLrKTCsi zfG6L1ev=DQI^-AW_@3q9_s`<5al`#sChUi}TleMR(36nBo$NIl*AHLZ@K|tiFW0X9 zf?L_mO7Ovjm11yvoCdq^JrAT86n|#xZ$|2WbJO~qcSK}`0$N3p=8zjiA=2MDo#BBjH<|H!(&LFyDql#!;J-GShK63zl1C2F5Q z$Vb3j3J|o3zC|Qb`cBN@f!?+xXU=H7yW_u#!;@=LuKYqYFuzZ}8i=Sc`&L@9yOR)8 zOT1rQe9{@;#rRh1>oMPv%#8KNNG+929U{~(n;6z|L$sZK9=J)O{UmhgTwU?6ECCpUpE~Gy zuhhDhUHuRYv9%zP15isqwj(qu9duH^Y@xP;)Fy;O{)K`(oiWEiy4V5A$=XKP-?dx6 zfx0yW<0$7Ad-RHuN2lWp1&v}%6SJpaKApMUOz>%8&u6?z@{+QSm%u08iK`Ghp2VZX zvzK<8wY*UM8J4<@gX^w_IF_IQyF=7Dh3^grNU}(U3E;<)Jlqi3_^z-($zes63N?(nR?Z7^HLo=4oB zi8A(shYDtEUL&Ky(%i(Fd#ZW~lr^Lpniw-f4GW`e#%%wkp=?nRktepUi@;Jp>^-Pe zr2nI!P{$H%_q;FGYFxki`|S-_c(@lDx;KG9N!OP-ggir?xCc@ieeUz(q9^Cr1RA>9&F69yGwP$<1H$*Xjz;FoIq67r*b3kRQFdL<6D*90!EEbd~Hh4zgk zuNlO_EwZ_~egRXy!MkbQZcpPkHZmjto&#Abd>q>REbJQOc#g0GTti0jI`g zi`&OS3o?21yOD?edT|^zgw6|&8$ipdn=v_x5BAiB`(+^&6*|Ty znOx$;FBZ9!2Annfbj)`8fGwbG2ddNkx^woiG+Ycpxy^>66(_>J-!XiN)`eFEJO0dz z$PTH9O=WK$OMLu)(xXD%?V1F;>J+CtwJh`~#1JCxLe zRCC`0;7A<(+cO{<>UKK`vmWqmyXFt3wYG3TY`5pxAA-wrKQz)jHx8w9;%!}9_@FC# z`;Crgw4h@tOHd|K-z&GeD$Z(nTZQQ5tyfhp8vGeJ`3~%rQzePiKL~QY>)~~L`W0&F z5;e4r`WN&@E?$IRPXl>Xditq{mVz4B69mv?an{^)%g-D^cB(kIai=b4Gbx7oT0p!N zzoiEM^^Zk4fpzv5mjeY-`I59NUrN<{$!?$MMfmm3zqLKuNL(n8Kbd4E)?g^@FNXh> zK>7n&s>32O#)3i)hXBn#p~oln^4d!POp5@N`W&S;d^$SkEhTSEc=Accdrk(20e3dl zPT#A|2XN}Y%WqY)eShk54PO3*0+Zn}o%(`ivTtt`+&`zDm~1rm1`7`bEVVaiDhYmYcuXe0Hp(^N1$D zqD=n`Lj{<*psW0H+ZFHsa;^z>?gC3fBUg|9`WmveA{Gz%+w5iNI$X@Wkb!jeVdhnf zJXQ}k4sP&n3(TxSU*Vxkt;gZWX|@!a!-XKT9t!Ai0UERD=C+Pr8~}Q{XjiZkqStI( zbTLR0J>n28X`^$;BjN94&-!?BBJAd(o`BR$kdLne%~h6+P^29!OLW*hYwAD7w9zJiGoC_KjQL&jG=^TU1P(p4%*Fk3$c@rRuRslnU7yrwC zv(J;fqF%Q8vtV4zI!H6J?xt1t`0YXA#=i}5GiR((r3Y|)G5{8njnu!JKRZo7|I_B# z({9d<0*i<1U%az;G3)4T$COQr|J0x0bv><=pNz?}m41oW zTZpPKaQ{a}pVE)M)+HQ6SQ>VpNa=p5Ue&6?KQANA1* z9t<2k%}KJG%^scX&2lILe>-dy>swkl9ewc)yxyCo@;J5ZoYVf|uzGz-lP?V-8ZjXS ztT~!68eJNZg|&ZWjbYj5KNo^0;JKqCt-i-w$^6T{1@eSTd~;L5JC#}S!u)4%r}Ekt z7FU0WEKZq5PrKivi(&8;8osDA;p~mL?s7|DP$P!7uaL!BqnKC%K^_^v$eFewM$};t{@fF?pBB2`*|Hkr?dIVc zB9C8h$ejB*Xyh?`<@ZM=XVXhg9)o2m9|8^mY=l+|LBmYKVHkBqc+O!G2{%6}d$hDj z?>EsDTlU#H33?tg{_*G61HlXPn@yh?=c9Bct#&H9Hv_<4FeLndy!RkN@q{ufuy3zR zM=FeT0YcVUYJU&e6?uwubjV1G|M!W2yR)-zXT7$%;XsG={%|u>j5ku)=G4R|mhu4L zjWq5o_$D_wxn>$y;^*3tqgxU~NiJSsrYr@m0VfhjLLv{Hzu059OpQLFf7<7GV{Lzn z=dW)Cxfq*NdZ#_7{S+f|OZ)r%E&b(gT-U(k&cy-M0r*L@!}p|(;VrgX|DlNc0p(oI zGa$;tQ-0fI2yKt6?>`>EbuX3BOBc5Q?Bjd)ouvO}vDHXH_i*@AAAmJTT)N8-9ozn8 zDDN9baw*1qwxd9Rz5WHPgu0p1qXBFg&xjq1?LSb5_)r6fLTevxXC?d;|1-y9`kMJ~ zHlDVaqNVlq%0*tCEtS0)Q~A}hK$z{PSq~4h8J~6~pMWhmkpYP{ES9>o-a`aJ&YE4M z-4aRMAwKxMeO%6~G<~8Pm>bKbUg9Q2uA}8#w4YL;tIP@ZfqKY z-w0@~YA;+GX-`)iM>0KrfVI#^aed8KXZwJaUzMpa{-uxR*cVi?y}f~HPm{SH_9S4$ zGK8UVHP1O?tx0U{#p`2lh6y0@1az{AN+5axw<p@XjK>x*;6uSAZ ze&57b7R0oFAhJt4i+_mTAM6()-Z^EAw%HKokr%z!@)OMM#{svh{wnSnb{}PIqTq03 zO`oJm&YWm(j6aT-1Gs*!jF(1UWDdy4{_Qjo#uVfcd86=mSc{gE5tNd7H@}D(Ng6;D zQvGUI|K0Lo-krYgz-uspA4NGtI5n7TximFjBdX9nu@!;kn7wpbQ~tzP-11XtZAkXS z%queYXjVd2U{;80pSg(n=xTVdIOcGUA#3nHFG^GQW+G||u=h?rkj_f_l2`;XoNKb5 zAIE%5I2{Wwuf~Gb4Jd=>A0pw@Ru`6&5T`fbbE|V$`Y#{D_bH;!>*RFe!oaPn>Nb=? zquAo$c5e2{6Ktb%j(AkUU5Ay4cbX`Z5y$zBfe>N0h^7lSU(O~V+>)kDKIr@%1iucy z)1IP1r@?Oqv@T;2wz*sSJkjeWv)tke<7okl%orp5AsN`LKXRdcY&8qW9@0c%QQa$; ziNurF^tl!BniJgl^HXCtkz0IT@UMO)n>lC7TtC2TV32+;+zg4 z1RBQA{XWQbe{j83Gi}xNcLt|KM6cC&Wbi35AQ~7)aP`4UAhbHvvWbvjNoLOd2{%G6 z936DL;6pm(r_jD(sO`1wdD3Dlw>6}zgX1RZkW_;#P z0-476TUw_qYTo@rVh&7eaV#B<;sIG0+K`ZF-?VcSMC-NE(UX(%HU!H!f$@K11 zrk}?v7l=h>w^Y&eCju>3GDL=Q9;Iz6(#dMo>o)oQGVy^(I)i69S6V5vnRBWP+eqk| z2}t9gxJo3XPvU|AZ&YMoFo?-j+1mp?3y78quGRUfjavLrB0h<`Qdx@78t0FabeoSQ zF9Ai!qIFH9CoEaQ-x2GyPh(#`q){1|5FBet8Z?Q96-t{pzu|$M&*4Zb;SsgNu=?4( za9GUqfdE5G2gFfyQupik%4Vi40^6;8rlffai?8{}{N(UmGl#mktcqamIKZ42AY^b0 zUcxcsv=TVBNSY)rhAN8mi}mq#U_29ItQhUmofz}(`L3aQeE!o<)q70dtb4(K#Vf+u zINtoa`n{mNiak7^UWa?PkG{cMaY{?+HR>QJQGT0y*%!;KJy=f->ZU9sjNN!~GL6Ii zkxl7Xcl7}8E;`Z>2%}rB;SL2(nxIGNAkSCj{M4RoB!x|l)S`5?C8x^rY6CAxrH4^V zl^tM-VjiT1Za$|MQrhaf)`;d7_{)fOBZ3|X?vL|oa#|JkY5UiWf#4-v7|dr7+>eS6 z3b7FhA%LP?YnvTSgo=V?W1H0Qh@hfI?QAFTc0$|OgxV+QfL6i@uzWrBFx?Wib(SaI z=n6Z2%<&;b>)dEYX;Eqq2AcJ+l?GwYbuHHXn87oV1ug-$PM!FO#D4j-OXQ-qXg?JW z1XR-LEms$B3D)4NsG#X-{(X5v2`4f3*LO$!?uYe1GRD@3R;eH6J&c_Wc#-W6i;BTW z2jDBHD*?=QW4SP5zB}?o$`U>=EFhY#q-mmXja0*3!9c}f& z9>bJ{=?^`XXVP5IL-?%H&PUimkv;$(@>IwFjuR%T96z@R2I(o{d!Qtpea4Icalaun z3{1?tWBd{>?_MV!{DqdExs<#5-BXCJe#J9n<3FF%8`*>)x-Rv7S*(~FG4+^6>PU@a z5BZoE0A|F3VdAUh(q8ag!WK z)NT!(FIOGAu0$ zUz2Q55HDm}Dq>IVb-Y%4dvRT79nUtkJHxfd9`SzW$2&JohBF!*>8reqc3fCY)`;T( z+@cePM zKiSoQR3jAPwhwRo)jV~V`SKTSl+i(BC}&jRNcilqvNC(BG?4XhjJ7iePRNz|mHl*x zGd7ByyaFcD{ueqMV1eu=5jznXTeEvzbuo>HAb9($Lo=Ph?8`&_3k_GU0=(;{^(+N@ zbOrTOcetj+9S*@Is1BlC@2IMkt}gnLTHWf{(1@w;VpVj?4E+2TRB_Fb8as2U*Jnmv ztJ-R(=lE4GoJGwldg95uuHx(3u{an+RR;Wc}hP`%M`9EwuBE(VH9tv4n=HUuc+xpTHckN$Xm4$gVj znT%Ak*7;HNd0#b#1VSn-=gO`4HA<MculVd zOI3ieA(?Td#EQfm@m-zqE&Sbf;qS~eQl9l6rPq{dUkOEMKDsJ0*wtF{#@LstAC`Fu z&t10#IY|*^x$(s-{l?39gP%{v`H)4H$Y_yg6z}Nma1{X7$b)i=zNhBbs~}X<0#9FSc3cmjf$9i8*FC2UJ-ob}eYLJCxT8Xt6+5d>?;4roSCj|Rbk65MWQv6rT1dR~? z8Ix7FWWr-U*p!XE@w}MdnV)<5^3g5k%{7d;A)oeiZJrk8urS7=@p1A8xruYZlyi~r z0G9ys&}pD6<=whrhzo7ckjuS|32g(FzdNckn(;f+B4N1h`eXynl`GQZ3>V35#yC2qvbb^VrEt!Gmi2U%f*y;xx%xS(FX$l>q3Cx`r{RD^6Z5E zAzj7=^(%K2m7emVS=1rDax(rc>!3uMV{}ka*)yOmBlRpx(eA3@Nr+JhLj?H6FuYX) z2$m12J=+hd=>1Qy_s4zRIIDXcwMi3%{P;{w2QkZtUMX|vkNv#d7tpl}7}9VuEuthS z0z)Frgpfth7!oV%6caPf>MI2MBJbu!p4pWj(+?$_c^N@P3m2JJALoXkUaFkX#5SoL zoB@8+(^hQ15r>1^53uq$ebR&Mqs`v|U=e#EWZNSdm~fdC=tL?g5qd&@6tar^03L4y zi4oZ}|7NVkc5aH|ACT-lU+8#Q$reIW93r!OU+`AjX+CuKz7>7gJV$tUV1IT{g#pdh zATbo^A6Jbt!BYIy_#rTjA+{CX)J-=8L|n7*A7YQj?9NJM(PwNU4poZkf!AGD#kW)L z67sWCHwSh68$Y_fVihH8tp)ii7muX+uot6mfWPEy{b z6Q%1Lb|t#fQbRE~a8|@wmA>5QDEv9gwO_1`^bmjEap>(e6^HSA_u_sSv-*ePtAznSDS^havfksrbVi z&4<5s4qc?AB@0_$PRTBh=EseTKV^>kt9OV_cTD>+Z6pOHvwxH^DukQ`7cDq$^284q zk)I;T zNbP7WD@9`M6SL@(Tp_?+&l~z;$d=N7H3mpQ5hyQ`&VTE38zH6EPaV4+4_$!mr4-%} z8@d$CbLjvgHhs#HC?IbME&VM>DyBd26DPIf2^?aTd4k|l4AM|VpsC}mgml;2%0u@b^YXzF!#P`1!hH+ zas(zo@{1+u<6vmq{BZce@to0rDKt#1K||bYGSFS$xMIq=5fmGVY(M@ccXS8N&`IDg z!FO|)9v{yE;{50ANUq&NRS)H9;py?=5%xm!Okdn%hmkLiM@g%y?6e$Z{xwI|y^5iX zHyGO-hZx1ur7jFRdh1!0I@^mzb_=4fJS+KdY`_p zSyLuWk0>JCgGQ2gG}tezkTj%A{z^+YyfG<+mqMQbnUH>y52}O(Td4XBQdGUxVoLek>;!eUDS(3zuDa?71#1LP z7SWeK8M++3w;J6aJC)?90g9h|L5iQm_|+_K=tl!YAK~s{KQ!+V_?U#|LSlsn~X| z(N6!XQUu~0@G^7I*|9d!2p%90BWDm#dMHFG7lD)52GGe5<_$tUrk8;b05^1@lq$IL zs;zI=Lr>2Fm?*>OQp^r={Dn)0w78$-90bATO@ z60=|5wGCSPBWSrQ=e6K+6Fv__Su4tyUJLZqaRM;b6+l%B0}Ba;#e!Eh&<(>V}m z{RMpw9t+pX@_G=`b<{1CO<;*`Qj3R?o&TUoX9NW z+VQjM5je|$xSF}qC^Mw5iz;cimvknMR?xy67tFy!{jq*&N8X@-cgGTZevTVbJ;SIy zFARE#7n$43auX2GHm&&O&doM{$q+TF8ThAAw99b&_jphfI-J#de6hzVnX8GKf_kHQ zA?KA(7QI<1RvRZJ;CJW8Q?2Di5FHDx9UBM$s5@#dQe=q?-+&M>$c1}Y+KQ8f_vU-l z`JVu4g>l8>cx<}{eT3N7D<6~fM&tuH*%Lf$nH(w!0~Em7>La8(Ayjc-i2tza{YTXB z!*R-d2y0JrgOLmB*g8uX3R8p?Acd6^)T zb%djCz3;K~T3tS$uW|bmd*Cf8?!qCdO{;ACd@G3Q(ZCYPpR>J>Z=k}>he#>A;bw^M zp3T4l=Ozpk^4*>2GI%{NgYd!LK23M0PXyfp2X=78aeC}S(#qR@StQN+XmKT1p=a!D zdAXKz30hd$t3~XhOKtMpPtt@}bkcKc!jEg^yHUWq)yo_28=L6txx%KAzdtI_`v<=X zLi62;wH*B>GUnN9G@a=Xck?2}KnB@L-?Kgd46+$sId*Gu*&dsC`apGOGdRX+zc4Bw z5_F|w(+5gp=H7AZ@FnJ9v31k?kv|((ZWqgN@khuark^xUSgeh0BRY-%INHNNgdqrqQkB#^TMtX9E7$X*%h+lR7R%SVrAPN3UeE&X;@3d-H%q&s<=k&!45bCCsmEiV>s^>s`d4N== z*~3&8(dtV#X9HKuhiZjNlf}V%M$J69>+)iJRmedGp0n~{XzCeKB9(4=h#qP^3CFUUQIn+ zNpSP!`u7q(JAs`;oq?G4RQd+n6@cf)GV3=qbQ{VCv7G64h|v8>2^3wHf@2cbch;N?GdUdG?F#@=4_Ik=6y_aovn6 z(ar*eWzHB4T#ZxN)gNDW-{oUaBB@l;JFB({L@}=<5KtLOQc|`HJnAfR&NMP|XSl8r zz%h7Iu_d{pU;5rI9{afs(4!Dck#>U2taZXLp)CS`f7o7ZF#mQ9q5W9o;9W(3LU`lN zb07bkx$TrJsXKA2dNRE^P`1nyRx!?rYsD(4c|4A3On7ZU&qGEO*M~Rl)S@C-`PI|n8*wYBO(Gav^F^w z;*CRl<6@HT1#$ST8A(P6qSN)$8v7>>hW{e`9w5AvP{YK^M^{(t(J&+M)ISw@X}ujY z2pGnrSnri1k$s^>hi=1Mhh8x#3B1u(kRFcnL;q;FcwJjNW%(Y<>9_2tlTNreoTv?^ zU*sCL*XRvkxCk6i4sW+c+VE~W3a)2ek-9Tz{>LC>e3?mm#kO;P+tEep2_TLW--G8U zAx+XyShgSVX96fnD9P}Uf6w5k^)Pn*N|qRM{VqAOX&)EydVLKZ#PIt7ALSAjL?Roz zuC$$ml=dHEqg;r+Sn3XtF_m>a$$w69k;^JYB;_&;z=Tce)5FIo<%J$9MkF5K&#`fc zw^#y-iL>V<4yF7BsUy+$v2-~7#G@Qf$u*(dA7IP%tCrB1mOzi_(+=_1xJT1kjQek4 z)itX0#>e=`sV&ELe=@u9U6PM{!XyM3(fS+l$>&j5x}->_1oE>kDI@bPDW4Vcouzo( zJ$*o&8l4RZa>47HwANEY#1Gh+3JG|@;Z4YT2(bXUJ{b>$j3}+(@1ca*&~|GW_PtB< zmHpA>2>JVcE5(i251j3GDHDDAEb{7V&$Fo>;TiShc$;m&09GWAoy1}u!Wttk)HdF^ zM1b9x-HBA?zPQgi&|W{z7xqLpriY9H2n)i|7`eM_ZRHU@%kvVUTsE<$jOoqy%HU;z zs{2e^zkme-!*U7fYf$K%j(giIVI8;L>b$u0wVi`$DybjF%159z*LkfhHJ zeJy3p?70JqqRxd?bO(ZUw(Tue2|6KC69*(n9e;#mRMNklJ&m1U&|}qk3$WT3Mk<-p zLj1}*f6ZkEyu)a0&bg-WMSns6D_NW;{~AFs^0U7Ut+Pex$g`Pp#2^M9@KHBg7}0AK z@to1CUsUBa$&$Dn;F>_$(PeAR7o1L+=wUoMTBI?RoOfyk9cg@=8!kfFAGKt$)Jng} z1Po_(_&dpUCG+9WhtNR)`ld8rSBPdklKLWJwJ4B!FidkZ<%|=vteeYfO4Q_%GCWd? zX(H7ghuVxQ9tP%pQmiidcdbx=tinn3orAW}N(cx3 z19XU$^s2wj)8CG*^{EOOPL`_`yjYnGJwa$wh+FFWFu=%hi=grLtQkTMqy<~1N6OAC zAMag#%;k7weOyL9&!&Qx2bSO8hVHQsDQ~IdP-_`P-Vo+79ru#{2`dvITy=4;BDk~w z-IdnvUvmm`ei+JXC)ZGuZ5u&9t9?P#_F5SV`4vf6`(AXl328u_(<%U)NFoDWi1p$K zRMfh@(d-$aGY-?*^zCeM?ggC&&A;E2tR^Y)9tDGWJ_&Jj$<{_4Z^xL-dN%wf&87Uw zHl##{9nVqDaIYw&Jv0b4hC!{M01^ZaLQ$hB`cxDKmCz+6Nlu7L_Z2e|!3tuC3LPdW zI%&t(n0`^z1Z5A`py1)VDMF_^zut;$$=Aqjyq$T zx=q>w4C|hjX2;AwQ}K4AL2<`xLOO}ARCxO~$Qd7U_!Jz5hPR*MIsk~4=J}NO1wE;D zlTr}K5ZXtegE9Zv!$rGv4z>Sty|x^o7K6br{;h7vjK~6^z_@#3&{og0m>2kIWZ0@V zaZsGclF*W#M=qt-+}=fpQ1yi{f(Qx_qKbv-oJ2w4Kf7Yj2#1Cms6Vt{#FJUJX2-j{ z<#ftf=_H+mBszO3>#gYOg^=3Ujy#y&@>}0mtQ(++E)Mt*-+baVwO}1x{Cj;|pm!mB z>tDWDIHJ&7bqa#^IaLqd1O-`0m?-gmZrIY|2=RhF=D}~N4l@dra7A5KcpyoqaXZX9EF|9eu zvK&N$Nga0*Iy2F2UUZu83FvJsI{b+S4<88{=q6Ut3|+#BRWY4%pd2}PgxGu;w)Tc} zdMEkbLjA)GrG~?gn20RR7qd7&TSf!0J1>op#kqo8zLobm9~c^;lHzF8lm9)lg|*~5 z1P~LAIukk9i~yup4-6N{Lr3k={jk(l53}mc*JzWBVEn)v$sBD$7ovQPB_~ zcuI^;dBB#lx#1ci*ES7>8ExMdKH$7O9tQ3~9&i`|^e1F3+c{Dv@0|&1&+*ZL55xZe zH9*S0P|&{L=R&)E`v>py{eGUm$8-C`VBzoT&v&E(zSn1vG|HHKWFZf_nfnp#WY71% z&FzX53C7Q_{@^!red+)6S?^?+NH^SCeP_MZObx;P)t*UjyU z_xX-P%ug6SwBI=Y3(&DW&tJUHeEoUc&A*$+ZTa(_*<4ru(=X`9?|fJ1yda+QFeA6V z^ZsAn=U=Z27ZcC(pN`}2`{2F5$^2h=@PD&d z&48VGDSyCQ0cVWW>AQ4&MIkYyzWmBxs3L|sH7~`On!q<8sxsrO=b`jn@4j6*=d0&O z|F(bq@vfrg$yT(tpBSFMzx+>?Fn$9?nO8c7_fj=ri*; zV@1cq<{0-gr7Uzr3}y`!(_E7eBZgWM8$4{lN`V3c50NvVlz>bNFyJMEk^(ph2PCmk zy9+!Na4LxM3d~Rf;-tsS13O4Y_vJj?_E2F!^fxEdt_8b=v1Zr7^XM@jduo3jN50Jd zb$`4CM9j~tG$gBE=S`M}iZQ8znw%hclK6-_r((`k&z#ww4a`1S;aoBpv(R~z1! zaExS8Q}OGXgzzI<@?{m8^QY9`R8}7MXYu&-#sBa>{vZGAtg!MwpN?7azyJEw)#uND z(T-jb{cmn|ulvXU@;xv-KIh`UT;?mBe}Dcm7Ij%Y{>z3S{+HpN{g;2#Y?*~zR=-tS za)Oq=EUsVo1)Jz^l@12nT;9b6U$%|^)R_X7$^kt9mp~rFE;SN_)qyRnnA+x{e>7hS zSO(~tuRx^fW~uAtqO+~dPZBosG1%10;E6v^T z5?8sP`Lz{C-Ok+H2w&^<${Rw|peA8;N}tQs^%C5l`}_L6Pnvab=?i(gSO(N$Z%Q{V zMGX=u+NP_cr+wlQF-jjh+5NQKyMM`!z^HaF1>+;f>SESFe;%uxv~IV4 z9a_&?U-!^Ox<6EMe~=|WFow4Y@x^OqzD!RN4y3!dCyVO%V#C$q0d|3apx&K8YD6Np z%R;?rM3A>7rZzPVTH_y+STY8YT+3(h8WV&CQ z-YvlJ$|!|R=LZ#1hk=+q02vC8TXk4Io}Z*$M5mLS^#O^5qF#o9wWR9x;-Nu+=VD2T z55q97%5E<#v389NmGR|DtbdMq`{~~tXRTkm=Or76EvjLk;f-GRr9DCS6@=As5e2Ap zVm2G<+5s}TXoR6Wn(OAA38xggyPNpt2?v`V)BX1RBxTCf6Ykt8vabtgzpE)<=q1$X zzRRPa#@^wo+81$strtL#5=-IQ9NB`yEOaVzcQii3ErtTIDb~&F@_+Q@pt#!Jr@vRx z64e$+uY(y6y#RUzbppvSBXGo0pbEBy7@TTCc$!;hh)j{vD@R zG6fi(ies7cm-ON@Er0qWz4E$kwIi*_czOAe15e$37q39YbdLupq$g zmg~)0Os>ItM@Z>i8Ka7M$=AJKUh+k8=3jnGzMpL11a;=p)+V|1;Z`7GX_ah_o1XVL z17jG_>B?$vfR8{pw$;L;P4mJ8K@j~`t?!ESZnJj0ZkaB)Tz~B#EzHDi?q^foct)3| zZ@%p&XaNR_>OY`VHtsm^ScBf9d^4t`@cz9yyOAj0nn#m{MSsrRd@9ZM!nI8`Aw?%y z#?^u(bMoMzZIKR7T0rIIA?MY*dNr@BQ&N2>JEx(iX=l#|PeZHY(-F5`@mnK#a+F-K z-Ob&$Tjr>ff`7_?jzG?CduF_Nwn=z*WqW9gbK$5q1Zg{C^Q>OvkjgIkaEp7byM_I@Q{0%xg53}zwyX3_{FbdV?OO{)g>Xa zv9WM-*N?j+-Srizf?)~2XtnC&s~&$Gwio_S&(DXf&k$H z*5!mdez-KX%^vy7`^8w^bCSy2WmXZ7_2sjGB-lD70BI*aFKbIBN|Ubk#TGhY?3SoI zse-Jlr(fy;1oh&;DR)`h8#X~iECgqt^4ny!e7)zuD9>!S)z&|&^O#cZ>c6mEGKoqRh` zZNGo*o4pJ8b*JsNv9DYqqJj!)3h);e#IMR=lyxE56DYdI8)yl-at)zqZs3~t<9@%e zPQ(_}emn`w^F=Vk_j3_s<2I0F`i1gWv07aomVY3wvzrX6GvHx6&A;9RgD(QeTOK8@ zaSVqt3=@#=@$(_t@^;<4Q~&0(j!j^ao{XVCs=gBC3PX9;t{{^0!{#JTYTL}-YQ=I& zwuXdc6L~ovjLYq|ITF#skgT0`;$CEs$orkQHMR*vnU=ftww7b`RFEo=XN2N|MXnqEu{<(riSU+jQw6O?GMgks!#bwb@NQsFAR)z%5w0 zn|i;Y!N&{P$34BCBmPc&zGyENVIo_}+?y5Przz4r(8Mi^)F6m?Ys=+Cu!KN`b) zU2Ui5*nO9tY0U&KLTn}LWb{^>J#IvONtMD}F|Wimg)=2w4%N|_ygm{>s~eC4%45+G ze5F3tc7Y9^Y&&=0!;fxpoEksk-2CHgk73_)pk*^+6|`bg5$R;wYqV4I0Wlnz?0?-X z8^PY+BdWZ0(>9;t$42W;p7L51>Y+RkUHPV`+eg|?>qj_qlV<0n_{jA60n$72cE6jF z37Lstw+_~mnbL@58{L8uSCqs>d+TUDmfSJ_6?wK^$L(%^&B^9J#Q&7*x@ z4CY;QpR&3IlWT!5#%?#fQA=8jdoU^O_Bn4T{C}1#p|g!GJ``BqM}7hMv@Ct6ji4EcdVSYXSV8={3`a1c z3f$#!<&GP|y>NjaYxl-cvv@_8DPU^>d-VH0xJbn-1yrF}pulxoB=>|>n+F7d55D>6 z2$;EjOBP!J6}C)JOU6cd1l2o8z_KSFl!Ky5<+ib&L&1W;Qz=jo$A79$Q&|v(Fv% zIqv#l@v>U@*YS?U3;?}BcRO?4AvODH(xCY67?q~fs@4ZV2O%&=U7v-$UA^7Zbg+6b zexBEkAn$V>G?h|49)BRL>+T}C1M|#cV+_sPPLW zg6b!?P%Cl)R0t1E^sQW>H%58NJ|0c0jkQ~73XZ!cq#oUKcz?ItCk?R?ZE^1VH(B=L zVG#vXy#WHATXK!S-+PuOx}|Nc-w)G%)=nMlwqZyJ@Bszk?OHMQ^iSYhx9g}{d-Zkk zmPN7dQ61IX!fN%z(za0Z*Wgv}oBDJkl6>_W?WAM~++EL;6e2tePGa)Vcf-!t@7f^p zNEx;Zs$bpBc7JIOn}K?_^-`ek&|X}-NLe~$24U3bSzvnlw@=XDT!jaDsV7UGTtwc^ z5)$NT^q81#dK9)ds+FFh6*SZh+ob{$0QJj#B42t(E%Huqip^nz+UyVW0{+Hf!rC%J zKRI9CFX9+>h;k%Rb=vx3IMQ|*Iz zZY6(z2dP{anFnyO-Q2^yi65)+b7kty_KA$nb*dLD|5=ie2yK-7^{&pnX{w2OO!voY zbvc!R_)IsXwW7l~K5Atx<;Q1taihWZYB5c>gk(9KU9?3(8=n7^kKs|;h`mHSCw0~9 zZX~_W(SM|1n!RW#Ru=9WF-)m=AN#W;FX&c<4%RgG`Chkm?_PrNtW*sixEa;GSEq@5 zJjU&78`|B;Ay&yN7uL5cW{&v224ti2fz(t~)o6!GF#9c~Ug{mHOO~s*;2)j0Nvf0M zcKij%)ayPg(`KESmUVwU5+q1YjPAkPvu(cUV}Bj)%`4y4j=Hxqs`EB+;A|ciXnVrf zX>;7B$!qmGv(}=1Q{AIJda?Pm$|-yFmeM}GSg9ULMmW%;m^$7BvWn=!SCgd_uvy zp2-b2y*vA)yuA;&L^K(dMZ*caD}Pt=R6DOG+$tt!;-1ydK`-&@lDKa_uI4`ajgPr- z1q@+-sc)~WT(t;`foEcM)$3EGKRLRkq_@)^BW-;M`lrl3Y+c-u&iG&s>pOdp5M8P3 zNZ7igBcIo17`ey#zJ%tqze+pF&r2s4uN?-o`0?!V(64-_D^u|q6=`t4-+%kN>Xk$k z^4-L9x6{zvP806j*i>fjdl&pQz%k;7;IJo8F)tAw?A z>w>M|Db#)Cb5A9eqNVmcvU<;UyINJXDn*-k`Jg^;AQ7e}D4VaynFEh&qF4FKwU%4rYrrcM=4zW?JHPZ;Y2eJs=Pb zo_Lv^LZ$#-&%L~gwM$Q(_IOp>K&#B;?G|0Sul!}M$~Sd|GSAlsH5+o;4UHXHWGvi+ ztaUsRWXf)gsdh@!K2hoR9SrjAG38sTtBd31=9v9z4T;Q(lZ&8#D1V<7?`^H-u4>Px zZM|emRONB80obVu<*4f4B8`407UNph^jnX*{FB+byrX{#vn!{&V~x{&Dicg8 z@s)E^dfGf1RIiDLCX}3hTo_WFOcjO2%hAC>?;Q}dR zax3zQV!6(s2jS~Rb>j2ooGNl{P)d=5*6g|Ksq-`v-Ca!cu!z%uh92=XA@8a{1Oyp1 zQj+W==DaH3>V5m1CI3L3+P>pL(LEA_C$btubMjLmEThHR-*~$?w+i8g63D~GqR*Wf zxMtk7+kc!mquAWKyK9@>4?c&@?(NPdLatwoBx~H8S?1J}brRbrHqk}YCgE+RlN`S2 z`}PeH0ZZChq#=iS|IqeJLl$~${tEjKo?!WP@F`a0*wAR656JO8U~d zI<2-tV{R#J|CW$y*N*lE)N(Y1}E&qw}gs zVt-#(AA2%0w(lDaG8e51&aqA|Rw9I~eNA4k?sv~E(-$4N2qBzlV;eCib@$et{j)l` z;Lm%ZN}k>yU4%rQ<1IN|F54~PE+2CI+34%tVYMXPydT>I{c-zsUu` z1=BsVXc{3MEROpRxElt0+BZ=e+PlyDe1C;sc<@i52RbOZ)mPZXHF_iJB;|neYiqrx zb4;DmTv4H2X118Vv>$x9t0sqX6h@z96RA|4i|Han1+h#5$+o_|C8NEcl-w{LPv|Ei zdyxhFdf{uJ9*kEA3+3lthv4PIbj*8mSY$o)qwBPP5rqjQxO0kKLPvzVj9Nb+$$$U6 zh0jbemr=B$pl%bX7nR-{t8;yQZbv)f{H{49%j9;^Z%g2j&&3<0F?R3rP_IbdUg&&s zggHAv`}K7uKb-WC=VRUzT`?zCYT<7-s;W^QcADt|6ea!pxvbb{DzVTnnc5Ch<@qgA)h<^)Y zbza)*S7z!X^ad1A-<+5ARiT;=7@`&Yt*f?JnwwinJg&`6->rB=y#cA55L*^5c}|ZQ(uNnq8oG9^xGlDj?t)&7-SM?g?Z>G=0DPSASgJG9 zPh(qEGG#49;$D^N;&3SMoTH6Phu5#F^jb`+NFFunt-OgoT_vYxSAV3R%T;|v6BdW^ z5_|gYQ;Rfsl)AM4co6z`IKaZT>(xUvj(fIuDoGNY)#dQ;3#!TQhZR_jr0pR*>CiEt zAvZ@=@>YTLx?q1ih=G%xpYmM6lY&c5(;_;bcEjDw?$*lOty(HrsDQ13K8So3wFY`D zR2UBHcw3Gu`n8LKVt@aB&TLQT(2dr&{y;#+zCSz~x^t&yxmjln*|}ZsQdhG}ANpP2 zF3u^Tr<=!`(r@?T2=cGXVtw3opIuwngjC;Bb#*(16md~IK;vq+KBZW_7jMgZr^Dhj zJ*mkLtRqYH>C&c-zRPWHO#B2Fnk%Yd#q|xeKPq{=@Azjngs2{X$GN0T~sDsynHKm5DEGOo+ zMj@IP-YRBIvg;iLXT&JrR&UgNK9wu471TOAo%#Len69Zv0(+Hkvmk}yKGK^cLHM60 zq9GdABSM0&VSjCLI~41m0+DkD&4ae0gO$_Ru8kPcW(d?2r&s^h_jGR)GBlfu&1q7W zlP_8q@m1$C2o!GD%S*4`n|n_xg`sLBz0@4)g>s9r5if!qgyoLriZ$Kx%l!4c3y5}; z>Oo8D;@zYamArhToQavzH1ZK)3#5y`Skp2JeJjOQ)qi%M+tHRhIp?jyABCKvcUM_o zymF=I$N03RQqYC3J&iod7h)q>pWs4C?P`T+#j1TFiIr|kRPwY9leYp(UYcrraO*l* zoy3A7l6~r;23NMAWDYi%ES45)N+g-PaycZrc{o3ep*&kXf2eXU3_uXv7fY4O34b_V zfGIEbsDHxC!oI#-Z~Ws;gc4mPpX^|21S(gvKs1(+-RYjmEm*W3d2Pa?OdNGpxE>O;w-EZrv1m2k`zz!klZMHqZ}B3{$wWTolx5=fc!Fn7Lc zmyhuS1|ynE$hL$Wh_{a@x)JUR|Gn`WaxyQT{(oB9vHqqe^AH)^`w`64yIyKGuVB0G zm6cDPgUY%QQ9fvp&Pg;?Lz1`HlKpJ_O}0ZC1yCVosL_Q`3jotepj4A20OsHRYX_(Nm8g{`M&EqYZa8 zv>^I8MW&wV$y8IaxLvW2krVQ)CYzTepMSSCXamluLzzhTVLi7ik=a$;Kra{JOSrzx z)L%zmzRm2s4GIFN1o%C>!yX(g6-4%(lb$VBR3iX$DqZ)rymE%bbC)q?o|x6fSYKV` z^e_b;QC-Qm!ysLjFH?V$dvrbhoZ^1Mt#&%r*SB__!rg0yNX_Z8d1qYdE!M`t)qnZZ z?LkgUrKFat8$|1bd}&-XJIu~Yz30v!5lDkO_&>lew24I$e21^}o^GVqVY-phu-K%5 z{^RCf}d?9#R` zRy%mc(791T7ySLay9}ha>wh)OgY4L#x_n{qgl)8Ov|le?G`(Gck@F0;rr=d#VYnFg z>n^vZ_kN?uuk9G^w|XfxJyiXDtQ~ZhzRmSZS-r0PX}A3J3ena~wR72nAd#R`*uU42 zvbh9Rym?HZcpKO6=9%_owm)hTBH3df1&~d;m|N!=fi*#CZvdJ+^ncglxiuNCYUz}$ zx*SoErOU6L+hlqi?$+g|^&k6O@2WC+Jzp1@+C5Z}zby2NvVIK+vIQcb@>24M-$jt# z%?K8r%S8Qr*Yy~qSFF}MRn3d zaqV@nthTASugcfOe1Fpqgh4%+Y(2jQX0om}cjF`U&Cqr#dL3B`_^cNzVX-7$y6~9SIt5#UkA+bO(gMU8yu2)U z<-D1umN%VX9lydnvG>#VrOCYR$q^k=0PKXxP!buZyV&_&1bjtHkXI2 z_I{rtzdIde&=UD&suEa_w_YIbZU=RLqW9@;eM%k%OX<%8qU=p1FP`lNDc3&QW;s_Z z_L}}#>fwiRc7K=NycluXw$(>w&HS8Ku{4-yJCcv!)3 zp}clt^)X+|=ZSLr`(P!_c-VY)#F7uC^-Xwn5I5BO%|NPbkapYf%GYHFA(TarE%i2Q z=&CtIf*eA!^66hQkanCdcOywvwXdhGsqGr`ma8Zo3V(RcbXfC#(WUhp3VrqJSZ&|V znc74H#2+*ls=CisEUe<_zt&35Q#QX>0BT#FHT#sP#qI2!hN;~Sm;I(|mbxgXxBblp zwW12_bFXwDGb$n~l4%al6du^n>w)t4YtIp7|Slx&QDQ= zi!7b*hO<~~r%SM&_tf=kfpA43#l%s2?2e$`q)@eX*gKq+H%fE@HyU+Qx$ctQYG-{1 z(tkmFzQSP5T|2j0^-Q^j)_XyxTxiOeBSyM@sULr@GhUc&yg}D|dgEwx_>8PrJXGPjMJ=TU2Y=9Y5ONtdPD?&)_&-4CW{2bA{D}+gc&Dhh z5sq?NxH{>jMF{?`8qh%S6PFZEAQ1iDYynlA+?Dns4ivT}pJ9hHpN|PrWDkvc5xO|R zfr`t|8QO@~{VS0cyWM$-)MA@Ht;!Ko1JPNsINtTedK(gyq8T-1TU*pdrRl@CL4Q=I zO{4oONyaMB#kbCiMU842`%R?J3vyRS(cd-;@>77KIdEudINSt_(V9J-$Mjgr7?-a_ zrfpK%O7Wt5ePNvrE3Qk*r!_%72RogJ^H$tc1sx!@*5plQCX?$y5*sgj2dnmqJQHWV zZq566JqG*W0><>yxokHDZN<^$m4A-V)+_8?y4TXR*F`&zYx9z1ZY@EBjgJggJNw1% z*Td7RqRE%%0kuKGUXGiwn0T5!A|`ub0}#)C1^btifwHmc#W@t8S;mskg$G z=sq+rs;5iJtdhy$AAlB>Rx_qkrC;onx;bUW?tLyMH1EBRwL& z*KOi(SeAztbU7@wl=pI81c!m%B4VZ}tJ?M|L*jh3kMvU*4Ab@|$+xq-NH#m@7(-n3 zT33t}PYB8vfP46-ZUf%b!uYhAlHEh%rM^ zv9-0maH-A!Mh;z|s%jx^Sbvq%8iZq{_f3Nm(edEs+ulEZ-kpETwRbhX5B$5)J10Ro z#sO$5ZiO`Ab!F_?Q}dWOlWL}ec%e34wz$dRg0@!0b_&g$-`8nfrT*zcFX+Q+#crG} zi0{D8m`}%twZyx;8dIQSqmfieNcVnzC!XuZe8^|IqFFN8DAXm;=YLIjMXRrmzf0f6 zAzmIKcXE|G!6GJ2Hq!k9FjY1sMfxC7D`UC48QJoM|{<)c4qzF-++A?wfR3dry}e1;^p zNf+W0fQG*%NqIYfPksxa^zCFGvfThUG2}Ng1k-i`@tA^Ol=AJOdQRtyVyzdK4&o^u9wUgA|I=YKoL&-t0>$EmK8RxS&t zqSs_|*uHnyVsTm6Yi+FVi6*SwkCPU`0+kVak{~~mj79mgcGoHkg#`iF&mBaxL2XpA zGz*f{gc}ysdV2`Ar}}xh6Y;(a0kOE9pV=Y0e0J()6lme$RY%iwQT}>ch}0(DB?o&l zk@UL0lHF1j@_#*01<46Yzi;C?U7B0Y`j6x7?i+Nq(-Ue*JfTt(n3wIEUv=W`9gCw{ zrN$P;4+j84>-IgVbgb-G_uG3l4ZBl_^+X3ead|th*9W;-yn&le?)^F=Q7TU8%5wR9 zJnxHWD`P(x0{6?F%R+hi3Po@u&8mD#n~h}3WBt+Jte3@A0Bs}bQxNe*}?)A zfxF|0qjHQ3pZp~CZBUPwgP!T)^CPY%7gG5vl0Rur7(rM9H9}qHcyF6E)O+IZUBbv`-&5}#`3D#gM^pTX2`{i z5!BX-V0ze699>7V)^^E$LyX%nR}c`RZejOcOwz<&FP)fM7yGCtN0e2j)oxWk6>({W zQr-L6r>w?t4=ha!%2lXYSRq6b#yLo-!9uNDLi_h}9jjY{q3%%c?D{ZpUScp*V zDtp~XZ(4rl3@f6UEqyfY>{k+vzis^e*7h4f`KRHGtejbLcYtLOh55)2{Tx60Zn216PhcZA$V__g zpw9G6moU9?wy%l_fS`N$gyw8GiAIiy9Fk7`V}|5y7ySs7W_Z+IdsuDmektuG^wRS7L#E4>$|pt_Zv(p>)AYU`fS4VwkY*kS|1$4gMT~R zrnN*s_^!*gp=>(fnoqz@tlr%>e;`&O-`Fp!j;y|?Tya->D55J6vZAbU{g^7@8cR7{ z#+ehHC}Y1kM|U=qWpVgh)ylV|3NrsAx5W6T%IhT7FEDkZYRH_QxA#0RS3 z7!^c<;75ZxAtTh1te-Z{n6Ay4TC$&Ojc4jxT;OVDx9I?#m1J%|fFkEZ%~yTy)H&L! zD9oNL%3vcA-Re>FVIKI?Er0hP&UJXW@8fY5Z{8EntC^~?lQoVMImD5abjf2+%h0Me zK*;!*JaxUg-P?7&I0f|fDm;SYWqDc4hu1xT2$9AlkeyiYC=ZfRabhF0BV}ROhR?+| zPC6iTpu=Q%ClBv?KkI8s2F{FvB>N>3hzpJCZCt0fcY*+6B=s;Tite)#`oPe`kW@nRp?$9=_N9kHwgwI6o6EPv1MD&9w|rb1MEF?@XHt!VWDTF9UU1)xE=-@FLkb-jtHBa6|~ zS4c>r)M7zzP{bp}p8}iv8ZiU_zfgn1G$HEr+SB>AHh;h#D;T+dwtHwsZFXH} z#bM$D4J2GkZ#=$UTS-tK^3GUNlT1a4FOBNde>B!w=Zw%K<>_#j?k_9Q^m_}KBrEK} z1^pa@;d$PjF&?86J5fO)y-HytzF5P3ot|e;UPH9oY1g+;0}h8ff2V_cah^yiHONey z+67tdmW%VWIe+W*kS9y|%AeHz-ILE8fF<-+Td(UhDR$R8g4JVv>gD6RH6A-|xq4;V zX}_ZhQz_^i`SrCN<@JJk?a|vPw=0l@e^tO zNV;CnDW}7Y_kq)0#PuWk(2wc44%sd3Hc1-p4SfLU(|>*UmC;V;ez7cx$upMn_L_3X z-0G_*u2^f=cJ(BmiM99og>N8xxh*D;*@Y?K_B z#2GOY_*EIJ1rlQvH#;$f);)B`H*}wh1DNmm!asR+R_WCd2{dl0?T($D%ROl(PwU=S z`BkhPCV!F5rz)Qc+HCDCr^?QGPR8=l`}JrI$k};An$ag=v*VuRhN9;cu>}>zcT0pfa@FF?&0pU-!$3ZujAqw)3MgF=rK!vOMf|qUpmmQJ?~}Sk$*RJ9gQP zi`L8TeLrh;p6I*GBkXh$@09Cpv#TaG@QS#s%OK&1(;8G7d$LRTE_z<_$gEgTynD=b zaeqhzii-Hi)@cBu-9Dxb6nFCefnG$Fy+j=alkvON*@l2-D<|XTLmbPC$2I*?g3|#h z^G9>i2taMTfoyL9>ax-g6|C$Ys{iCAyh!fK;?~u(yN#<$b;lN)d`Hf(OgT&KHGn0; z+*RVKX{SsS)D3M&;f<2F?s{Q0`|I(=BY%v%VAWt_91d&#GIQ54tD4vaTk0v8 z`8Bq8wJbM4l>U(KVo(#wrlzfyfo14kBMWiqG= zfiNi{k9*P%uS+$LW3djLaJ(oh&7=WeS>9Mf$l6L+vHGrUlXiJpK6w|ahQ<5U?^a@e zx}L7>wK^nkCqjJ_>W=s9`n7fZ!+$^xL3mx{w`30{GR^=lE6-n|KJrQPx;gG@Z;mlkw?xYp2VhpoGW8IG;TER-m>F8?F=Wd*etScgNk%P7nFI zh(OMJ45r~4aCa2=bvH8?K{$R|zYjqYotmplZW6|sNWb|c_6?f~d#i%pg@4jkT_+mN zY9SnDfjFQvQ`~?SHa|5JiafOBl$er@;+mDVfp>Ic| znh7&_A_FRIiaXoA9&d8}-e z=bhdI{R>#e;UGy4QK6Kdy=|5N3NFF0t}dm%G*4VJYC(3Am#;Bg?Qi@kzn_i5ke4Lw z$)xtUTL*S|ETcm^F6a&=cOdMki)amUox@AY_bv05>+`~_?u51z*nj2L?T5qK+`r67~x+w{$g@#CH8Yvqm?_23-d zx_egM)N8WS21SA{sec=%9ruI``+|yrXM1iphf2Gj2W5AHZU7i{%2BtBn#fe)RRN!= zCYE?O@bXD=8)-C>Qt?x(B_0=T*C*O-17VH{3na)pxzBQWF?#n|C8%(6BZupcMzV#K z3~s#045wr1bwu@XSX)V^NGcvcYC@!fH<5O{`2&9wj?VqAWq-s*@FNdcAu+r+(MQQ@ zSpiwG?P@~gN7{M0v6n!wtd1m?u9;wgywAabRGrxj3)|U=hyILmQD#Btgw>j_tU+oc zB$r#E{kXeJkgT`NGD_c4!k$tqUp9>Xz(F9pVV3-%vtYkiG#@G9L?&pb-Etil+8 z+_5L@<&1Y_x3HiEOTcruh;u7EP%mk%j`=#A4q*;Z1wnrgt z%rUEcV&6AXzj)nDaG?dC2bZz=8U+v4X9_-s;+~q4Po)I<%S%10Z{4xmml?`+2hCnw z1%IC0+#S{5F!g{MoT^eT8N@NQ-3_A1NQ_AF`z&;6UU52$AgZ%ev!!sr8$lQ-T2JZ4 zG2}}hErn$=vZgBHh4{(lVG>&m@>4gqQ?0CQXMxHY3mt_=MmljROz zRN~?n`PJLJfTplbQ*!cXeHeXvY!KmpS&Qlu)sc-VZDzO}8VBC@78U!?(rb|Ks7j2f z&6(UOY50j^VSHIhU9UuJz)?(NyHt@BXTQl&#vTOfRq6dUf+_}+iczyStoISpVt@9A zMER#4_5HbcfOBmJA*tu)DVE~vIsMNCmScP2<+wbBYj$!%M$0&<)O* z<@mr#8XebD^NNAm>yENIJl4^D*E+`%c%H*{k(@hdkex1vZo|-vxEiOMw#`-ow2?0F zDo_XjH0Xn=hyY>4mzsKkFSdc=3|mMVvRnn2cMC^Z-93XS2I79;b$^_U7JtR>S6`Ys zOB(4+Avfc72tS{_aJ!wsvM?Q5pVb@-)ynC~(KpEl3a#ypINxtip+srVSRB^!G;PY= z!i3dWY-^Onknp7d$ho{8Jk^EKg`$Ml<9^6UYO_6s@+#WxH+f-PkQGfo9ksQNGA)LQ zCY{bNezjv7*}DS@=-aoz#WaAnfDQ zxz-7_>rkrkXhhE=Jb!8AyPIDt+aU#5`nf{rL)g~Wj8y0Srp)2xgoM7lNd3u+nf*3;gpg#vtK)^rxl z#4Fw3`D654iQso=HU|Ze;%v2SP=YPHYu@mlC}7&78ZW3q;(t`T!AdLN<4W$2M4_*| za+x<}J=NQ^iXDPA5kb1=QOpae?FXK0LQC{Keiq0nXuPy2IMgwx;>m34Y_-k7o6z-^ zE1kw&%Y~?sg)y3{8U)xaU2mcwjoNU63Q-5mo8k9W)+)B=GBj}PgYMq9+^H1l^+c_SdY2Jf z^@$zMaru%fh*X{WWt_aiJ#TDikV?_OE=b4wHd6ONaep7^bQCi&n1~ljdkWs$O;5L= z!+JU%k>j-Xbv>M3G6BH4rPMMbS9hAQ?t+q;muL7!AOc+TOHje^3#&h-(ea}0((w#J zYuBqgAvxR2??DFvE+o9P&-|I zE~%NBzJGi|29ZD+BfW_^<;j3Bq#wo8QhIj-d6Bo-<&bW-AU#DTApn^$*eF>CyiGJ@29HC`zpai)5F-C_M2K*n#^)ALoI_-k5iAQ`g-*O#VAgUKDDJ$Dg(vX!(z zG#^(>5~G7dPj_Z^iP-CIyVEnDw3ds-y)T)?n14T4RE(;jx3f=9kLgZRk7xxXGd-Y| zDVgMwj^n3yV)%<~S;g|TMUW%$$B$C7fQI$&&9oxw3PitG9~G$vGT|OcD`AfUiskji z#kcdlf;sw#$7;G-J;FDce`-)8q&=-=MnANsG{;&f$4wZ}dtF#A!1@o?s3zXhUMLXR zg@2Zf!}hU@0A9PD$xnL`Mvn|$_I4@R3W6ClrC-+A-|VNmaok##HM!eSuhd*rtD+O>r?lOx>K^5E$=U*$%JX)i-)p2PPe~@;XoFRF>)bi_qCth2 zabcy`#HmPmB6hmSX)4?)w%H`B3tcYF+kahs7kALIN~5CP`>y2N`W$)O2?DO_Bdm-M z3T$aZy>cIDh3k{Eqs$}=fhg+*R3eNu@I{Fn3u**uO3CVhQJ$vIx{b|iO?$~I%NE(k zp|NCMKNLCfd-8I7?7yf>u+h-S`0RQB91xVnp`g+{AlrpTDY#yNZU_PJf`% z3NMCmW8*W=fu{P{9d>JJO*i_c&-kf~ip^8szuzbGzS?ZtsREcRoeE02O-HOWyLgjh zLIz0+T~9EY(L~?uE(U3-BycqaLiy|a@_K$RR$!#My&{i#TeroN=2DDIohv~*1p!js zH~?ps?Ytt~P{niYlR<}cRee#Blz*d*E(X~NxC~>*U)scEo$3349mKR>pY~ll`#iu2 zze@*%YOd50*^BUmh*&GE7&6IGC}^eaBmq!EqD-XVuUl_eCV#Tm0RJ>hgSNUxjP$r~cd<^Kg#=PTja%O)_H>H0<)azv zipo#Jipo-HmGcNRP>(!+%YtO)J`m8?c4A<2$zpZhC0u`JiD|V!{K8r?sMwq$NXn?0 zf7v&q_ZA;hR{$nK<-kND3u&a@Erm4m&hNH^meu{{XwleU2X`Z8r+-QcwYQ4|;F^B< z&()Fg@8|m?yP*`K{!*y+4R_PoBXSbpT61w7(|I+j@JIeIGcq3OITWpaxl5PEsVxrEkbVX^SVze-o_6BCn~A|75wmZwy$0`` zxV*eF=pnVsN(>AM)oj~{qFC@ zV5b#*k}eSxDdAaN!;~K=9oZotBI3*_U)@IAJe0vZuGTh?D2oLkZv}sO^rdIO_{(xQ zH~Lo0pr#up%Jw~o$xF%ntBSCxVp*uR0RvqX<_Ae`9zH+;;`aaK?M-`>=eBLJ|B`PJ zim<_exmS_mzVAEiFA+B277Q4$O_cwhVXEqB-o5v^iLR9QoM%^2Hg49MYp&Ujk&=lq zdh#+-^xf7Q>^nL_6_bA!ID2sT=lcp;ek`Qzajx@^ z2Cxp#(_uayGpTLQ+kJFAGsE3B4sFn6sOoybwgYeGQ@mep_q~76J@=0$^7~OH#sq$p zm8!b?fQdsNU#C)GLg%J;UErAOHSiMmN4}!0+2+u%FW@daURLy~J7CSLm^JtPx{#yt z+8^L$NA#lEkVdwCk~Fbl?UmKbi+k3}F_JE!SxLk)rk=j)kzn&zz#iWZn;12LZ81Jh zPblv#`iumwG|YduzxDUh^_*JX15vc|(jIwVyY9~3h|~!cuS8%KRDe|*xr60q(VeSS zvrE$;ouG;3*|-Jg+3B%<`ZO%(9im|ebmq2bpFW3{#I4CvxR{2JSK9DaNN>Ns5VOX^ zD;hWUVQoTpVF%mpINGup0C7(%pT*!-&-t6b8U-je%M^e8dhfkto>F->7oacK&V1Ny z>f`oAK2^xV>`DS7gIS~NKtVsvWk<%(hY{URKzm-SUm0--7q*Zbv(&&eP~y$l5Aj8R zY-}p6O8b5vW`+AuvL(9tB;mT|?_N|&XnI06HoopSsyU%Eb3^6fiK+t~r2^VwzD~O} zxWf;^zT$rs;CL*w@gfczTQfEu@Z+M}9qAtN$!UAAZG_9Ge!LQcc_Iq_jP(6+q3?Y( zX9$I|USuod;%;;sC(IdU24SXWPkk zSGhx`RNQ5p6x9@*u8mmK43O)kpkd-F(cL_jOMli~)3d%0QB(^0gk5=gMNbA0kB_Ssza{g&ou3TR z?2&(fQs`ZF95sOVT;!rQpYk>s1yNm-o7EVT8ZWNCi1SHsCt_2`(?9Fe7`V|FSHd5|~!h;8=I{ynZEVxkJnT zWdv`fpsYR8xQ>0WirnLD+wRLvw@6E(0;tzBm+ZMYCYGSBi$i7jSwxwtf`Z0NB&H^S zmWcEmTn*=eH_9mL<;A|!`rZ;~kekiD*_s1+9{Sw7y~!uQ^d9rEcTmuDxU+x0eJA%3 zS0`ZG1*qycD68kg*NHKE-us&nul75q+)Cj9I#1NE+b5Ls2?fi}0Ns$VS6MF$gX9l0 z-98V9H`ixWy6b0>c%~O=HPhevu%p=HX_JM)Wfz~{oB7IcfQEVSr8VyM6a34o>q^}7JiFIdr zvia@d_(p`5!vyEAchSZiiwUc?-zEUzkx~hZ05XOJPTMW(1JhT_@ zX?wR4I09)VL3ACr2a{-u>TTU!@00}+dYgv00)t`Ji0K;*(u}hQb5~adH=%pn?&dN^ z(HQ6jSdF$@GV|xMP#loXc89C9vS_EUw{h}dd6b$4TODycp#v4th#U5P#%}*{nkAZgefj} z$7VyCn3-$)5b#N`yZXrD=J_7vl_t!Sf?D>T9IGkgEa{883ABE;q;t0=t)jb~1;+ib|w7WaX{MOz8L9%!bz=i?x z^(vcHC+c9S8Pw9EhsddjiLl5Ade2M_Ub0z##} zRh$``AXrTruecheO8ch;GNo5ftfIll>OGN^r~uwc(fgjqhAK$XiqCB%x7#8=KJQt* zzAZosiDm=v4omfoKP>n473_4Ck`Vd4vu;|BiktP~nqS`o);csX&knhBQ?C#Air!VD zF;*fJ_xyk5H<@~F0Drv@cSg{nJPpaay$<6iI4&#DcMzS_(gjH8lRf)7jR?P%VI9H$ zzJn#!ZV$Tbyf!-|wPT?%wvx~$^-Ud#3qT5LF(qRjn7C?7x0VX3f6EXAA-0%}&hf|= zKYe}t2phT2YZ$#b~Sv)PTt0#8+!LZi- z%m#nrszW0$C)Bj>6rjcSmGBpCHg3I~*ZCr(ZQBk|!v0~E{@4G2>oD(PVd3agecn{M z_nQqGU%~Z0#eOKj{3fP0z;0mh)kJWA1weidY*7Jq2onpyk_Kw~#JCRi~&pW#q{~9U{GulIFNrlQ0^Dt5JVd#Akk`AG7tZ-+@0QUe3f3#&#h@08MgrW^hZEqF==FQacv{P-E5-pAWC6V_JC!kmT-6Ec_m~`w(iSaB=9(@Le;}H{2fcnQf_Q(; z;9m4&!6~Q*viGPO=uPG{^rM&ft*WvP7%62h%D2Gc#6*AY2dMkAdREw&t=l0#w>zNH z7hp&b6~7gV@+`CS`8ID#3%!T)VNTogHO1aqjQ4tu>aAV12U5-M_;?t6vA8|MV<>8S zw8d6;AM9!+#g4Go*`4&n8!%cG=Y3GclbbfWy{F&TS1@Dxl@eKJjLCoT;j62Y zPN4Dwk%A#IyX5ZPfxpK!2Y8?@(J#rF8%5r?VQYCwETQQ z>l`W48-m^|+onA>)%(<6`t8C#r#`(pZy58LCdhrH!%#JcH^BPURZWC`V%?s-l_jUH zJ09R57fhNt4h5A7s(ER_pY(qlyWbuQ2}I5c%D|D6N^f#ZhY}q}M}7N@)0c6)A7MAk zGpif`E{ZDmoHOC2D=}cF3D9IXYPEEDa9?XTMdfTyHIVAlhxg_hBi4OG8eXWbvG=Uc z>J8vM>;g>?duhK&8~S}(9X+CZH$_c?nHC7T9*80j@F;+Ph7CV?t?oMhlE1+hm+t{u5<2F8dTQ~V930R)c{0DvEWp9-m!WZ zl-ocCmsoULpG2c>yT5QEdAxE`M;b;16ya7m<%6$rhwA}FwJHzv%FG%yBPyR20DHud z+Z8jA*Rn!*5P9zw)ZBl`r4n&@13vK`YSO)zMzX7sI0vK!ArC{eVxC-l>X6)2lyzZQ z^kR*MC-e5=g6Q6`PR=*CK=KX*Cku)tNQC8=*X8rQLVeFx@ZUlm*sC#<#^c=v5YNSI zv_za;wA+1M2)+g6s?|BWf~-`pH!%~~8hIe=N}o$>=LrBW@Q!~ygNx733#0&J%UV(u z7hLapwlLmLk>E}t3pf-HkZj)WxsH+}VX#uEBPmuzS`!Z_P`WK{LVHn;yKd&!0ODfD zP;XaC+}%Wk^vyH1j|FL0JWJ{wZ~&3`5|)t7A2sD+`+ z;dNX44AHo1eGGpNP2$EtK2v4-vC*E*(Ry`7FyLP$-U(td5&^5tvI)Uml~Ido!bg z35cLR&-H)XyLgVbz0vb@Za+9DJ)bN?Ys2_butC^R`L@>%yBjL1&pmJrlm&&FqF(6+ z)DKZW#o0^#B@WJOJDM}KN(0sYHCy#c(4U6vjJPP@3_B)Y-AXCgD?kU*t@E3FBjsM6 zHh17W#^Iw3g2Tfo4n$%qYoyfx^df)K2V9Tho#gc#0zL+ zW3{l2wBwvd{dTshORh<fC?J~wCIv?f^V|ULX78`DVP|2P+Yug;B1@x3iII0fwj5Nt)9!H$Od;m9ww+ z84$1!MSj|ghsVD52w))sRP7G9GhSVkJ3tc3$@X?JhGlE)SGND;ifX5uV`*N>xfbV( zbrlwigDb-zA?@~!5(%L?U95}AcR+vIbgPxjzS^wW@^6hKEm<6RezDHk&1nyWG5xu) z>zThK=Ai7D--`Fp2|i+P$??UNfc+R>;z7FHQ3t!7pS>wmCmJ}S8K<;yX;gnPIj_-5 zdyttPB^I?P4VU=h#{<}LT2Qp6voMdJsFxGGxG!n*D$WhI<(*}1ECF2kn%MDsr^_eiy86mdjA8!ohExMiC8@xh2I9FygpHXC!DM#ZZ zwj0TwEz>iPcEJjCz*=I)kqLiv43RMnQX})v^Y=_jA&v{}jd5{>* zs)fSltaW4dBxIeRBjo;!2QXpBZRg?{W}7Ui%%xjpwAm z`V4YiO{Aoc^*!4GrDoXOVq*X${+XJy7-_c^QB9*NDF+OQ-K+~9zOjEiug*z!JnTqR zY1sR0x#FF-4aWJaQI&>MU#~Nlvh*_0j8&E8qFB-EiCSj2(pEZ42fohS*#T?{@k}j* z$SgDWB6^$Vg`o{Ll@e>)Eh-Lzfsf^kG0H9=Tf z#-Nto{c^U%swB{=_W*x_Qn=p$;upB;uqE`GsB84xl;e6CB>vnifPojBInPw@WY(a0buYw~mn~udxeG+ZA8x(imyq-=})MKk! zLA2RzQ{xbFPVma$%i8H-^R9RV)@+J95NqP(u+-uVA{R&c@Jphcd3i(`B7Z*+tdeml z=%g)l*2y~81G|5N%;u+lYwHX8w0nybAkZ7<9F9e}Z&9;G(%oTeYV=z%c^2M`xQsba z_e}IzaH8Nh{_p?jsr#3fy2s;JN8NX87ANHD;04I zF!_JX1FANNTHyFAFFglSw$pOL=yHfjTS<**&A_OF+xz^WgzAv1JV;$ot;_&NoMTG? z)efUEZu zYt#3T5HtC2c>yNB_enMVGe0RF$_dh(t&pDs(f1c0%ejXfGePxpOujA9b zr|i@u+$++1bIZJk8KX6bR1jw zyhp=VzNWrsJXAMf{DM(ACa{ocvCLYT2UvrsN-wk)E4IN~7h$lg$VbYOU@S@TAw=Q#&1z(E{=;p!*|A>|x5#*x~fX={HFUOXGT zxPft59w@mExK=np+33iyram&{J*ed{R5QiOAV^b_ReTh_jH7k+ujDuydgJKo}pu@rq9p~b1~0q zWfw!wKve*@Pvmo5pqziT+&pL#`BuWqO#VG2cF@_J<5*NElz;-}B?f%(1L*Eu8afoL zH_kRdtA-6dmdh@9H8{7ldP)_AZsqw(t@a&!pZE?L^)Y{j`UVFCN@mzzG4MP)xdbYv z)A4#wx$H=+45C+<8kQRS(87XYt%8E{E|8KZh(egHlOkZ5x43@>Lkp-hW%TWs@uaW7 zZ@Od@1^VhhA9nyw>)T0R`JZv0*8i8jnsgM#c+v&XQP3}tgCBkMJx{;)GU$}=@BF1} zCjIbpzWgmmIQGZ>^|k-~(sQbhXK=pHhwM$y@U5Fz?8R?&dX5qQ<{e>=H1ev+Rre+E zLbNQ#4~4y*%nW}WLFK-A*0d3su9ZsNN>O+H+fnxi;!m^Ynf+h z!()C|IOwj)q5>!UX8y5vI)f>C&W9CA*2-hcG0Ld*!nZB*nM16{f2>9RYt8?$7yV-{ zgTMEbKHh(Q%Ma%DOFq8j;Ol$8=5+e!JFtl`pI`Tz&Z)ll&EK}s=Q_CVw8#Cnn|64% zv(jffvB*Zlk3q&;a{{`Rw4O4~AVc!J9{YZe@}l~Ps!A1zV<3aY-<-UV*w|8>Ocq!m z&N$$0^Oz0n176K*V2kE28Du~hXI&XZFroH>7IGHj9)ZRhA~P5uIdFE6w^%#}C@cqpZbcta zTzr3k4TR!00uZDIY%l1+Jdf0}&6T|7)X@;I-x;2ijk@dXW%uYKiVu03WF311>}L>> zX3UmCXE!sSNV`Ft09?4TY0mLdXN*Z+zH|cS`rD?R*8gukKiMf?b9?^Q@tC9E_RWvo z`#rzY{7rMr{v{jJ{7-hppEl8t4e}#9s3(8@B^%RuZ)0KiK&1ZcDZ!5pnPk#pe(qaN z*Zh)GJo}F(X20!pe9v}y*YU7^RB5bFw$K3r#Mzn>2`QnsXbWx*vvjR185$I}-Sl0&auwaUf#(|Dh(M5l=)^n?V2Mh^HOo>+g? z7s-#GuwxP65L%7PIwlADS>8)pNn1P%o2m$w*vEMR@&O@XVGFrTA6x_<1wAZLI${fI zw(hR6xx?Igmxq1NGBr$yPG;R6_OGDgS##L2o_Yr9l-H9Mm#0jDdlr*Ui9L;>BpaBd zzC%eGV$~_+8G><;@*G%Qxy2$d#SedN?u}c4%VSMsJJ{<6-lKj@dPy3=n(TIKIS+6g zh|jLmTl@jGADUx;fhaNYiFau5fMIX)h)GXc(3d-D;)0I2o0MnJ=OF5Elty%G+`EG< zU&H@X7~Y;8Y}7U%oe z@AD>|_ic0hwxyxNKwcRK_&((ImyCYh>-}pjoUvzDf7;^TvN*~9Uv>~kw0}NJ?6#mJlj2&w=CpazOn=#(#%0OdjG@mEsFB*p@iQ-yyM!*||^??G2{ zXW|1tL9QNZp9=;j^y)Z>mZ0E;(0%PbPH3m}c;8^d!gcP|;V(T0`*zxYzWlAv-WM-12AF%y zFEU6!Hqp19`qFQnB4dAl`YnHd%Eff;;>+h(e(Al*-omjTn<>x!+6%w`Er09KKkQ*V z+wXIyJ^V{As=wvz$A?2q0sDm(rQ|!Qn59HFB`X=59h+_bR zBjTzUZHt5=d$GgyTBYW#>&X-W-2(nv>}79*odF$JkK|tEjYpC z@CIRowiw6QsCFfBg25Vbfx5e(9hu+netdi^1=oF~(f{;df2@*w23RtxG=F znc|V;#}@mg2d4SSmcRCy@4W|p*$iv=%cuW-j$e9Y`X>U*3zf^vphW@p5wy`xd(X7) zu~&aFS3RHhWyosLV%XRSIbZZ1$#evB&pAZlOY)Jbhj0H2e$9W! z2m0D0%Ist7{%L#sBd+qbjKgZdc+%l{QhZQ z`^e97z$}Vm{yYQ1)Z%{IZ9BY<;Ew@)IXHgUoSBpM}s9b02go zeuXez9vB$!PfQPMUocVREtK7;gYSREbFu_BucmrND#3RVSR>>ZVyEQ<_8|wku@#GW zi6W1ghsCvm%Uqp=#1zCPzY6YCKBnVdTkm~L%dnD*xk?~f(nGq62##gy^px`jO4$pRt@j@*RKMK41IQk(uI&kVEK=<)1OZj~@KkT#)Cja z+IMzS4CrHbeXWBX;)|E*IiMqc{)VLVm#zKFmt?F#LjxGn1#5RuST28LT)oasCb;h8 zfz)*;!`x;SvjK|W)uBrPzPPH;r07bWG6 zi)ya}OqI#?g1!lmJrxSMbapbq0BHcQVPh_!e)HsZvN@N|N8lf z;e6|j?>O;qJ@oOTCOyRd<*y<3_Z@GB?}Hp0;?3~U{}Dr&bl`L@R`XxB_wtYHAWr@* zi@$6g$Z%;od$~Tnd2l zm^ak?h@^^Gf_`>d8Rc)LD7fXJ0VYu2iZe$~?fATt&RdpF*Q zGfny-_|gx*=YN`a>|sFr|FI#z=LNCsk8SFIVuytJ`&ji4UGVSsz@GW|IUk=b*J=0_ z@EM|HUnhT1kg&NCi-3*d0MEvcR;vf{cgV^p0_RP-qSJYmM%ylp_zLhf98`sIknoSc zj7jv{o5)<%sBBEcDA0BQ!w*QtSYuhvc2|Iv%A8$1+!ArPe99@cux}kCF&V(cf%$IA zo#wIYk-|=`^Fsva4B^SpEfDB!r$#lhQs_fmN{D}5$_T~^4VXh5>49ekj*;ko;I&r|6%+QkNBmhKeqip*5@bw`ib9t zC^j~q7uV?wTH`Nn=M#&@I#X`}c$X+XR(WEe{j1YTGxI>5V z!GeD>jAJ2qSP`jEAiXyxh6Niu{>U|33I}-Oyt5E5hb{_Wdm?}JCQQx~cUqNE*Z`4( zoAz_amcI>B)X;Md4Js_`*({*wU>o)$m8!@CsR3lqiF1>P3`1<;YMS6dV&pwoR>G0J z`K)08`1X1Psu9@#cy2`H%o1z?@Z+#XH35J5L#RBRcd+)E9J_x$F#W?YgXHP6h+*Xv z{JZv;kmSaLUI!G^1^EM<+D-ey$B&C%bCeHf97@+sMn#@jkNj$J1%xyFgDLI^nV<9& ziyYWj&gCDulYh;tO+SCuNBGM(_=nB)@f)T+^VgpGZLj^rOh4xvKkJtjO)#g?4r_88`W=L=ttG{9?!`bh!GQ*7pFBm z_#C(q^j>3z7X%P<*ptBlq_aECtr35lRlwYPb38tK2xX4%I)$?4_85a zkx6MDvdW9Sg8GL{#KkRieU;X`^SO7{=%R7-ir(Yd_JDolsQ86!AqI#z1;-7oD}gRn zCz2;(tq*NLx4T$$h|%hBpfuv(CS#iNt%jX(%?@VDp zVF^n&+-Rio8~qEYJ%Gxff;arq9vnDMCPFw7g9M_4OQqr^9tA%3WUha`Wd!&;8G$fd z>MW0k%d$o+yGQKCQ52JNpuhUJ;0F;qI@tf5K=p)&Qe+x8^fp_MyAv5Z$orG*Kn`AI zXh>&p z*B>#Q-!bZsJv8a~f9QXcKaS(yuey&vY_Q++{5wYaMy}*bf1t?qkKD_rh6S?liKqXL zbAR~&-+JStE57r6AHDPAC;rIgw@l^&y3R~xQ zUEcJ!_xBzrE2Z)stN6(J$8Y?1^#+Ku{mv8qRhKvEHt4>O&O~02i?QELvAfCN{oaRb zW22!?WPg9A8->eY<8IL_8EXvcnGWRD6Obmx`LNdoR0wda?xXEjU}k%iXUZe`V$k5B zq1MRpifo@#u28}GbLqYUwT%q8F!)=4#zaJhBA`p?P%E4ev1!;ct0zJZcWdZREPEI= zjsV>fVsmJS*POuAx%dq#E67o%Pd=aOc7^VAhCY8{N8)mVWED;ygqq_KQmMSp2DK9s3kT1pxWcF_pF_~#U z!g~I(Q@_UcpEW;U{`?=gWX$D&dZXX-@sBw5f5^xEu1orsjj3jG+GqYl-NDyBb%w15 zpACPpO5Z7ivkR12zd1Rvi?68c4f0EiG@cT;O?5uhH&0_y?v zllfDNhP?WeLswDtb(BbSF4hH)Jj)C`xd4W9MFV!C<^p($6CAWmXR;d#+R$Nbs)+T% zh8>KXU*t>Jr&6?WP$y&{(S@nPJ|v?S4zYg+>_?6uhDdVyPa6Fvu3M;`E^{7^lFPC78 zp5kad+(#z!S@xQv#xn?9`6EnO;RLgQ^BxENTxN z#5{Rwqt(Gn8{5W<1ay$sI(mPnr}|RFMA6)wD=+n(?P0&8;e`eq>Xfg;KFm0xTEmV5 z3ZUo`Ci0O2YRmSB13Xfe09}@$ITkR~a9!-3h6@s|liv=C6C=rmhG))Y^hGF{s55k$ zY)VQPSjwnpJ*U8g>CmJpfgR{34*gWd&QA4Nn;!3sc_|&t3(*1=7Il9O!(^w?+{YfD z^yRNwmtS-7=i2%{_Rsj+cg*W=U;d|#@{hdn_rGsDaq>&Qa}m?E{#a}OG4K2xA5L3& z?4WZ7wCGQAiG+PwmxeDM0v~;uuyKKu3L8ZLxLx-Q9mJ=cRhEohX#&#J6yhWNBne~A zOTfKz%l;r1^i97cy^()oU&?a;Up`#cRo;k-bEm9;k&fR`PRqVmXLaaxy$mr4$jkhd zJEpJ;3x!s6I&;cVA0D_nqMnKs5Em*OcXxg7$t@GtJ(d!3U8udw(Ddc*z&?uF88385 zE!-+VY;92&%pt}K8SOcVJQ!fIeQD(i5MclW&ja1?+wkGEbufQqas8Y);SiI>qIFN( z9JnVC9%3gIiu*@CI1+N@0N?Kb>mED63G(3HCwV{x2iB?bkq5**{;buT*4Pwp`}usF za)!U`_MiHx$u|GgWZ`@?+JE-cufI<=`p@3^sZXD5qd)BbuUOJ|z0}`6BCKpzcUlEG8cJ|gz+v=DX@Y-*28Rn%qUN2AP0P=EV^ITA zsAG+s!sZONHyCYvcLk^-s@>V6%AMviRCN8obQOJUG&PzbS9 z2?k;-8WevRVhw0OX0k)VyOz?|Q;dVf9%$JlPaeoAT(i z?Vf40UQGMN1#$?8#IDDXLJon$_|Z#$#+hJuf9$!Bj`^+Y|E#t8yWVfYcKAaF{lu+4 zafxr+{3B=JAN}RWA`gaK&>wcik4*iFQT|JeY_fkH{)%x-_uQwtkMhfvAT$WoCbOhBRb1tkygK_sUzV-_RE_Z%M$w!_tXxnM#!$hPdz zV#P6g>}{wo3wh5)ul3aTBHHL}Lp|3=FF}9AOn~{`Puig0h{*Zwd-%>dS}uj-mawVc-vb{EtbG{nmN8@a1p(&a=Zu&CY+Y ztxz)|s47>JdufUzI1j`Sb>x;5+iNKlWd}OumCGs0eAkExD7*l?daQ8{xaA)i`PBX) z=4kuFHAjt#i`a4GVh=)06x1&8r`^uz`<=uPPiqJq@W7%3MhMk~@~~LNu$#I;O$%a< zXkW^aYepR?Uv@DMYypi`4(Mx&cLaZxYmjvy<)|V{L>l4|GU_&A69VKnHu-^BAb7wv zoKsmW&hL5>)o2jYM#Uj%3Vv_$*|{b>25f03e}Z1J!D%@bpy z{qqL}=+g?BGu0H^U@7av*!O?+-#2m*ep($%=h}l=5Q6Ol3-qqZfcbxLGNB&9x(T>A zz3Tf7?`9^^1CE^niS&T1JxVqG!Q?QRA?$GUKYmjSGW==g@yV{I>8-=q%%>moU;mrW zH?tx46Xx+DD}*EeX)*o$hp&A4DM8QpAz}PnVl)_~x6oWkb7%AqVO)P7#8{9g;dwV zVPr?YPl!i&k|hI<1Z&EF;rboJUYIUsEXM%ik@5XX)BEZt)L-E`4mG{cyT|){jl)0x z140vG{Emc~n7U3i#D9N3^o>ZVw;bw1(L#<2hiR?-*2>krW#YbK~{js?D&phO~ z9@EW3NJmrW z21-0)&kB(A@R?I1Y4y36gv|UxNf8O?8S&gRf>F8fj;1oVSWq5d$?7^p6Os}%lx1Hj z9_X)AhSw1(?Es@MI^Y%GK)a1o_mws<$agz;p@EIoGqo8UIj%fNB4@`7af0tmW&NbE*yaI;(VqyD(9~FjEnCGN`?1wA}LI|0)2(k_wXz%H}=f< z?8U&0l~JgWJ&GK{_;oI7&}&6UCzlSRy}6AV%848ztlSiM)?to&8ch_cX~f~X?o*3! z-vApEs&$yi?eY$R_=uBwOKcKFN9||;ZW#jo&^prwBEEkQzx{JP0H(o1#rK&0^F0(; zN8B{dH(*9{J=d^+LBijs%rvgYotae8nZ@TCe3DBGHi^PI?a-InF(7>gN>|Xp$`YQR z@8Y_KGXN40NFUSp<@!-PG4nXja26H?cc+Yi)f|VIJMWQ19>yBa1N>&NNP4|Ejb&3~ z9)nI10?2=e)5MX3p9ek=%bp!g4owEAUFZsBx<D)w& z*yq4FB=1Jn=V!-=Iezb14-`>467D0X;r;PE&%wak;GX9oEKt?-D1xQ|q)A;w7#l}4 zQB)+{rBTA~^{=^ue95}bsLuF=4uYAxU^)Cvnf`yK5QymlcM_SsNp8yI&afB@7J+%xi9eG`k#8P8h90=ncs@7Z=8`bHrdB3O!wZdy~qmwa&} zI+iMT%tpl}?=J98oK7ioY42J*yIHT;<~2I~#-RKnUuYxN{U)4YmB6JbnM z&n$o3U(Z1N*VjaXV!96Vxz6t~n&cPNC4!6Z#oVIj%W3^%o4Fe%6c6u)cn-I#l(4=j z9*&PAY8OAK;#i}jrWF}DGhXs%>b;tbD_g<46I<2Jn-nN z#WfM|LZ=_bj{|r|UY2Ho=g0eFT;W;&`s_~fi#eV2;B<{L{Q5iOAL$Z#dVd$!!G3@C zFS3Jsfqn$*jzRSbrrea6+nyyE&m7Xh?qUyfa>eC8yc#tZP|3jl;- ze%;`X`^35~aIk@JyvA|kb6hINfn|R`$5R{^zmE4f9)FI@9*z&6;{Z=Uu)jwfSH6zZ zI9`8_BNsHi`}h1qM$arbM)FDl)*u6B3uJtzb=&Ev!@&6ctjj1C9zle^VOM7}OIb4Jh%3uKZjJ?}yEGdcP9yhpY$Qk2COoN|E`J@L#utv`G(H$#^gm3;0hz+k(|qFh7xqbv zM}hS`?M;~D&1dh$7y>C3ItS+N z+N?fn5_)=C%N>l|D4OO9ogNE;lE%;cjDN`B0IVEwk}L6!D(^e;Q*J0mAfc2-bvpNr zbD=k|R{r0ePvE`D@AoFB`}~}bz2V=#2j&m&{h#0S>o_CczmY%%XQsT%0Sp#@p$0z7 zj>kR)W8H-;{a5cl=?;t+&d)l%$Y?wtHO8zD%Ag@o@bg@3uL}6zT-@LV)OMKX#kJ} z{`LTFe5C0ms>=CM``mBZgZ}Y<9l(go3XxTlZGrklu5)?@W2`$PYz#!VCOZbvu(<2N zk>D{Yex6zPnjnoktzYf4H{<)bccBmo-Z_C@RsWrg;h1p$v13^5iwW#FHZY518eaxk zP8aqi_{lzdTFM|h`Y&T&U`>J`Fw&&kVN<})fWH3N760bBH?a56tMRdOBnS61@Do8HPDoMNlKk~NB6JZC9mp_I z2XYiJ)lTaUu|ekfJ)f1$PjW;2aSrUDNp~Y_;Fj1crf0+Yf=tb@jv=eeP9dlt@e9IsjtZ>bhG|4&UZRe0EP$AVM z!hJw-4f{?ACjIAl|8kxuk+5z5dEVqh{KxA}&xdy@L#bmAJRq2V(?Ep1$PbBr7qHhm z(620yab9b9#=p;H(vNNV_qhVj-Dnj0p84^!UOr>q35OR*(4in@*h6?U_9A`)YoyW-P(8pnPK{_`dKd?i#0Uat&^QCAT~H1 zk8e1x{SS`o)4lN=KJ@zZ8O@3eurU|C?@f9$5hr^>5%!aR9sPa(>G>$kXN&8x<|L2) zIKszZrRnzoMnCTO^}C1Po$tSw)9>GZ|2kiueh2yzd=+7Tvf)q-J^hZhCH!9Ccii*yJ8Yk+oagg<^>uze{a$~a|M?yE&vgFh_w(2J zpWiQ^=U`Z2dJa&#;k^VTtMMEX?4YTCMEJ@&W`?>C)YevRAI?N2`GoSOPXXh!LFW`KR`8prhE^)4cc#H!= zs_%1uLE?{V|2h}C5zq1K+>bvs|8pFR&vV13{)`JZ=_kfU9OW}EO7?aB_n4HQ=l*pr z)+?_4>sk$Wu3c zR`K}*a5sH^ItWWbZaWXbl1Z-$NPy|Uq=Pncb)g00wj1fW!YD=jb8ZA5QSCu85teIi z3dcDvQ%Yv=^*yK^`1FXoBMh?$g52Fbqq&N^`6BC9S{H2OqNg2HPs(Xq03Kv@aHp3=bUMOA4gaZ*sZ@m z1NY|>%>DT+FkFrjv|pd6)I7N7etnkyaol9iIeYwlJ&WNT9`ReAk!U6ZuqFe-+`WVN zEc%lVQ{0lvfiJ{drzb%b%!E7yBcFMHV4A0hC*!{=r#ix(G>4ACR`$X z2C{n2R?|;x0XYNwz|Uul7ar~Z!+ixj{Ga#Dr~B&D`LV-3r}K~w#(jhd|F2;HDt3fC zPM^c+vzRlx9!U9;f$_(40k`k~g8b>ZlAa|QY?Zjq$Otj&6mdOJEj&%^|Pae)PaVEjZgz@De`@jYT1*F7_h zKlN)o4>`sS;|}@2_Y4^A|8w(y{xzSU_y0T>-t+TZpK+KzPxFHDP8_)Y(|CP<#ya_2 z-$oNHBjY`LJP+mzf_i)kG3y?>sM<9MDN z&%sj@v<|kS7T=rX3oLV=xIdo%Gd`dBTYTi`Q>lyQhF_J)YyUuK(q}#=l$_8wvjYU&`<2eek{?dHqk%hx73s7(ZbehtIl* z(Y*EjUd!ox$UmBKrt`jko(uDf$l~XF1jFM{j^khJ1oF```UD|@`~9{4@ELOV^_ z`I%pip57b#KtDYv&U0}7qz7nRXFC5cy@J-D&vRq_^?%CoixKEauRtV!@^kzgeIMWX zxqtuP2m55ogH3uHN*fFyX9gXO5CX`1J3*mfeG$_*zNU4FaUG|B^$qzak&gp&2sRNO z3?$S3gq_u%`tFzEbC0eX)QtQGgxkmcd3=o5K9c|a^Y`un%E|wBnJ&KVD5oH zq2*5^j14iis6C#4Qf~8U>j#eKzfT1I`VXoPjjO2Y$j=!778UNc5?X+Vz?H6n4>98w z5EG6OQH&?&7s|-pKIKrDD5PK>P*gc^UcOrRH0g-88>TZbpm@~ndcR6nO0kDxIYXmb zm6-4JwIT-_4bEONkEO-zI6ue^K!;J*88{gjluKq?j9yrOlb5?z0-8H1^~^+F$LaBy zKi%%M!~>nm-G!X*js3onSn#r`K+1V@1IRJbC!>+ecDtLCbMVel$tFF@_lLXS5pBP$ z%HVo72Q64^h%)*>p+cjs)-eK%ug^6gdcT3%R52oc4 z*?=7$D)yi`d+#ZMIQHaui-C3vn^U^-Ah0ub7aTdk&{zZBK_hzlwp*Koj}lpn$~QS6KdV6?n1!y+jU}b5fOD+uvZiOY-pr0SGvQJ#TxE}}*T zn=svf#`l|o?B-B(zh%gP*A&1voAhwI+BFh{msB_&In0Q(A;p1a=8BF=8N zx4^nTR`v;`mg%M*pF46~Ed2gO-RHVJQ|)`G4H71JDxSTv4`O#{B!%jJXLE>?`+B!z=nofOuC5bq5S|n@7_NIht>lGx?fmwC97vQk zZa~M*YSt~{kC94X*^th<&`n)(8>@cbThNDu*P?mkBZ-YGkY20VeO6i|8gvQsX%Jxt z!m=(j2c9lTFhoQ*X7Ng6K{)SAip#u;DbG>EN2!(8G1p$t@SeD{lRp6(AMDpa zOrQzYJy#~XVh>mK8N{2=PG(nsu$ofk3rrL}AnUj9#clIKO%i!{Smi8UvF!4(Sguq? zDhd0%A}OtJ#%L*Udp65%wVpQNvN4=hb00c)tMUtSQEk-~7%zo5wX`FOA7j)x`Rwb8 zA-t~q^o@px2a|c79>+AcE6wN7Bi3yzay)D$IaESq3r6=i)WpT{lOd*ml@)OR#5J^~?oy=O$yM`yrzH`*;Sv*neS=bJpH z3UpVkZcFtnf#on7=4XN)_3{oL`jt**FO!(9r4Z;P7uHIfn9>i!cC*YV<5-j0&e_#c zU*ERr`?jOkO1VmpH6!Y1Um}=H*qqu}T1Ym@9fB8b<^{%p0N z*3XS>rt|mY9;xboE(n47OLiV2tYe@E)ZlW1GF(vN@a&hT2WtgS%c9UzJrs{eaDOkY>oZ4>M9>-DPTe_fb6MW5 zOf7N`D|xrpdKcc#HObfW*gvC$+}bRw=k~Fmsj+{&1D%&TE~6%+)5SaJQ|cChB^yvR z+3|VRK{lp;UuUAa)Y*MY52wp3*btVuX~Q(lZUpK859to3?y& zGu-#Xc|})}*}T2^@`2nOukK4H*j3^8f;O>MV{{x+$@LR5wKtz9>pgX^-@@T)cZrK32d4 z=3HEmJ(w-0?xp7=FOc4outA)q7Ws&!8?(0CF1Ev6YE#ciuUi6e1`8NF@g?LJS>I82 zMv_4sAJr*mEpD7;@;L3LR*|p~OYjj@?izLJF%Om|9sFTBwP%}-2FB-X=amKiUGcBM zzX$$*Rrq%g9^;0!nzOXk4MU3Jy~zd>C9q|#!zxMj{J3@6rFawkJ81}uU9@@-peH%pt;J$AVxVWGc{Z;-^ugcL&Ycnc`#3jb73Go2V{t*G^Q4FC137Npo5j+9 zM1KT7o~ay<^6NQAmGEkIrkCBx2YGCK?+awZYKzcpo^ubGJ)5HBV5bFlY>+R^Y zs>jutT8Mf#cfq)v(b-{sgE<<^I}+f3E5|fDo$RL_$=*Wg7N^T%5oMjRZ*sKmEfS(a zIcN!9GIy{B_Wjm0;}~O>0(J*L(p|xSKAF9G zZcV*vC-6`6mHm{m$X|BVdzPLX1ACIP2)D#N3?(BFjrg&tMd!CKUi2%# zQu8v%CmSp4U9iARZ#;8QI=$^8*ljUoT^|>0ZortirWS%1bN=nbL0k=`x_S z9gL+<)ZCNx+;aNsJ+e2Obhay>PCCD)$um3LPS0(;IGOYP^1QpvqqaDI=}p$EbMRG` zo-;QoFc2M;?1pr-dyX>+&8LgS%L;WppL6T=Uvs(&dX>{S;9p-kV|ek zr0??BT+efnex8*5U4Cp&lx@wt)TxDiL4*_cmH&2&cuAgo>sS~Yd7*WwdTOZ4oT3qN zi=NBoRxG%LH0`;_zkPOpeXnmzV|Py0H#aQ@FB2E1Buxkvc!~5I0sea>u`4;JckB5z z2+l-M@~WR`;bJWvb>%MK-^t9+-d0a0#DL1>Jbt?rX|u+(@gz>G z!q!vWxJuMA7aOZ`Te5p6cbhlPkK4#qg0 z=IVA_A8b-Myt>&d;W|67Mv$VP5s$y$LtP{6?po|_&D)=2$IB9rL>IYLFHwx4u7~xw z5(vG#WZT>Mq?X6?PEOc+t~J~Ka73Ps6lOAc*FwKrJzZ7YR*#9R{^c_}7J2QW*#%6Y zExTcEaYj(?@0qxNmUo#FXXFkY8z%*Id*oK&_s4Nn$V3{y4yVU6_l3fJUa}`_tA*gE zqD}dSbU?z=UT(W?f3fSWs|D@pd6e{lW=UJ#>k4`EW>&f;(g8_WX`N}1yC*>OmC@^9W%&C`Df?UZaPQ8? zwVLts;Otj_S>{`7#(pH`ox#Gtz1XwI#r87rmwoc;U2`i3bf-3)M|NjDc(U`2em@=# zv%J5LF?KVX?1t_gbhrLRUe1wMpVPOwPF&Slo}0BG9v}FN7=kOlscvEXn1RdGe)pPi z+S;?z-Qw|RreI%|?jA&|maN$5CQGWad_KsBmUzp5`)r(_7p<=ieK(lxvABanahoi6 z+hoNKX)?yyGJh81hKl3mJ%zOUbMo|FFEi`0O^2$x%|qu1I{NM`qmeLUn)50`!xwqs z@6i(xn_<4oMu7h}9{IAj1Nm5PUaolDJZIXhaLvs&leg8S-#r#tb6tX@w_m=so||#U zn|Ppq&BJW-xID?-5-v~Aymes2J=*r|_I11y$((-8hAO(h8o*T& z4dD0okFY-h9y6KG1d|tnV6!l?`;oeTJiz9KIwXfJ*lW5fx(mCeDf6^ht^vb0d(Y0@ zQApbMo)Pa=l^;y?qHH%H-yb(1-fb`JA|LZGD;6MGYnQWA6K3M2n6GY1e~!<#VxN>Z zon7U&re{m&hkkJy&@CifG2Gj0VXYnqV?Gqm(|7{S#i^8!L%G`Ss~x%TSG_HN3d`sJX3r%fKF(i{VY%Q|+EWXPZoYr* z4&mykr^>KO=KJ^ZQY_u5+ZeRAY`ojzs-;=ylrQ#b3;y1C=$UOTR*ySFsHsqagBlIs z?)!N_l;pkf9#TRzvmhUr58$PLomYrM$vfAhE*a?3uT_*t(e`=2Ef)iV5zp9u&)`ES zk85yyUgrnxvTb+Ud#{V>jOsU!qC|f$Xl!Zea%68yxmatfqq5YJ-T4Ac(xF)&J5LNV zvUB9)UiF{BZEK5lPs2=fbn*=5WcgO?%<+7U_}lTIxD~Y*PrLfP^%mKGwciSMO5g9^ zdTpLL-{5bE!CtP!28-#jZ)YzM*%tTf@+y~|F%!K<|Lkx5VX@Qi@73tZiFYSJAqmcW z)*9t_)az)|(FU1y+-~t$zquKJv2`LWo9?}Gudn^~Ua4twmDP3kdX^ik>-!6Fzms=t z=xb*!)OAUfeJdZ=sF$>V0J?s-^aKNfNC)8;k2&6seYew&1=x+$1n;W@^!ILdFh#dt!Ix;FQ<-KAcN40 zAXKxx`AVhaNDchfOPQU&O-LQ%$1_==7JJF{)AQ|VACAk@;#}TdYF+Mmst6og?`KzS zHE!>Zx2i>R&hu*ogppu7$nth`8g_GgzME+x0iXh;8#oDk*$SZhB3geWZ5FiGl)D70 zIm(jv6iII7qwofQF7j)yoDXO-%UJE`x?mj23~enbnp9}gh=LdnEvT=#eYLzsP*Ie6 z*(fuSHE(=S4CvepG$saW5USHY)sA#ah z_+eyMe)*`{n|gGMnchEEMEc4}?PcFo=1GqPdQ;?aL}7-Zow29Z-dQ`DGMpV2e7VkB ze2}C1rs5+ypO4xu+bV0&yf)=QyeSv299IjvzZxB=RrlA4wQ`@vtM*yai%v@Y7|_zK z*l5pfPaB(m(X2CgAW~_~Jv42sS4fK#MebuDgQMJ$9tQ>hLjl(5nFcU?b!NfkdG>AK zZ#*`Z66~pNUCwnlh?KUDq{qtLn~T}Nx^d}V%A~z)%O;GPrQa__O00LGpKV`+p zY4VGH%+p#kBlcC@ufw}=&PK37qu=AbmGh-WUM~b!=%X)3ylY%QDf~D9(7IsWH*n-e z;}Ly-S4+wT4QQYoiLQ_t@YQn)AQ_ESDJp=ZR&Kz(ZnPjy#>$A%8Nhh)Td=4JV&N^Q zXiF7?`I)yv;#_YR#C*v;$4nERl*T?!a~APAcW&~x!<|}+R(p`2*xPIwjHyqWLf!}B zv2=GkH8aD~grTzW7OUfZ^CZQkw>+$_1O&l<&0deoxw{c9NR^jJIVI<{xVh;MOMf1& zrfSZ*$d0x$@LS$|O)no5U8T>bLYXh|M$V=DNXP6JE&FlwdgU}q{VLw9bA}|0eR>{l z)iz%|y6&pz?t25~X)BMOpkSrcc7D*w95}tP3Xq;Xi0S7yrWRIsNs|Jf-_9PuZXa@H=o z%%cN-OD_C)f7O>`F;@D{1WX&?hjzjqw#ROWX9faOSFbOYz*?z~F`}bKz-C%a{#wO1 z8#&n9HMomGx=n7E=bRm+C}@}zmn*G$u`g9zB_As z&pAQw?5R-hf)-?+MzJ^dH@!B-a%9|-aB+7|u?!Y*4Rc@)5Js|^zt5Z2eclxRSnaCx zurzgi`9Vc*A4~2*+q2DbSt`cbd10}gGxf6D>#19BP>sD5!FlN1vMQPt8(xdu+C20& zs{iVCF2AV#V~27?_lfL(@1ClTE?d$lgx?&=2cN&6?tDYRrvm1}xS1!R7Tc>Tcv5~s zSC2#7uU|T_cy_^k3GAn!7=rzF5#Q=Xe19R$NJjf*c(TN$TA2ei>YGc!?`WSKa+)6c z;hmW)6lb9@V71h4{_Xr+JyQTka6w`9g0TxXZlv8j2FY>5{9 z$?tnK2Vu_&Hts5EyY0-f|N#Sxd8LC7sOGd*L@bT}r#x#$D#a zX7#d-H}a{QqaELmDIJkJL{8VRTLFLwSQX(h%+%7i*J-*xnGgG(<@TI^&MOcpoi>G6 zmu^)j&skEtuKP%TL4Q7DPmYnWoqIT6j5r5{|6x3lHgHiJ+sL)baz-!BU)nn_L7!Wz z9Rd1&&|K)U=MvV&6lPUYOV{qLT90KXW!w6E*tn-Uc~fuc0ie6S4`Z)S#S_J9PijE} z3=%c*gmzshS8gRrCt|Ea@LgS~J)PGXdp$09=(Jak-kZ^Xf=h1L^M|zPlTbgrmIE=n zJW7FmA~uhv?vS4u;AhvDBs)ICl-!FZyMG)4PdP=2tVJhKha8(rb8a+l-j!FI=lkC9 zjaH5J(apaWz(?53Z|~-i94>6>k#mPUEmoPGBSM3HO|9uRPF4o#rSWb#2#6IPceSiU z_ETKWLFvtZ4Rb!CW}@NFDP~@GC#!#<&lv@3E0q$L!=6pzM}P{Za`vP|LZ6TF44eJ& z9X&!?St|#ktFHh<2JLw7YKPlHz4hKz-`T<2+pdWvqIv66+7i_}1LM|922cE9r-4EH zHF&$X2}W#QDZ`TAsV8OEoY3O#NCU8t%?>HJXTno|>JfI$o;rb^f5Gp>&R+|UH6SjM z1C$&0K()VR6tnA0CDgT9I791Qfo!o6g?4FQvNlw8kd_#Qu#k+P292wGIV1 zrW3J~$9bdn)bsw(>+za^Ki>S)ialDQJZC=?kySOr*t<&eCBlewU^g?^u(lOYF91i#gTjxA%FYbA^2**Tu z3Snm#2<<)0?7B+it9FX^c4NRQNu(L29NzJNaeq<7(^_~gUoODgnR_n5qUMI(nA2$$~j%hf{7q7-j*eBac@F|c3XPKvS*pRg~SHZ3A zJ%CLjgnCup!kG~i(kYE+#q*(ar+zs%kIwq`ieWEqR~zT~R^s_F_t;g&wlfvU|6&Gz zp0>qytB&_8=~X~fG>Ynl3Jk|RTG7$oZ)f6>e^KRQvzoWDFKw4ZrAR`#K?)>!g7ORH zGpiu5&!;$JW3_PS5YRhi=7XMv52WX5=d#fgx`j8X-27e+gojlFaa+GMoAPz&YlM0T zM?o)YuPDaBcnFu(eb!un3$nS&#_48%xJrC{q}%oDbz+Ab^mq{ME}=d%?|>bQ_188$ zgihL?U12yLwLwONm3ktDXKF5ALw~HQ>Uma$efEZ1x* z+uYt32X3DzD&t!7o9a}qmE?9UMlxMI>;s&R6N%cHEqEURlE3cHZ_8f8@SJbtSEj~e zEhTbljI=L;ZA-k*4+mQ3cZkq`o#wj>KV0AGdK9`y+CX;qkKPF zzIlPTdi#`qt@_khZr89xT_NGti$bK{TYAO5Xf8YUILUp^?x(AloagRba|T~-or|-z zE~T`TE}}pl5rLia`iYt&QM*F;zUqgqsTnMUS7s$f;%P>eO;H^i{c)x$j;_omu`XbfW11OSo#)OK zKBLlw5fFhjqDhA(=PEkAzO86N51L-}IPJDL)Wqh6$o^&5=Kf1sJDaUTK8BM8M9udu z0c`s+v0QubS`-?od_ZR=x8v5Vz$ zW%>1q%39||Ubl!YMv_H57Dju;+1^^WqxAn_@87yq)v|4E_y=)+0Sc&Gay|)iM!Zv$ zgNUdoAiw^rXpAvOX3mu>_ulI*WtZ~pSu%1&goHw;(+{oHR-b2{-^gYiorB}c(G7dX zc@;J+v(!SFwoSf+-Fe?>@@Bo9wduK3D)PMC2D_*u7j`QrPcK3YPa_baKmyvls>PDT zTV&XPXLlxgYQEEd5a%<;H+?bV@@HIDr@cI<7gw`!0p5ycntZ0Kn@oi1>EPYp)csgc z3O8I>KX<}cyi{In;Ku1&4QF7P8xO#qz_8tqtCpPkb9g1#yNz;!%!v_HwdL;g0h?H_ zlWfu?x~9vCV|J+DC;IkSZ#D5GADE4HI&H_a?K|7Wbjq1ts>Z%VJ4ev+LjP(`f0H2Fbq{lbblZmcE7j{dFQlSjVYb=d|?Cw z3CHHwnd-BDH9>{!=6cVik+?j9^U{=$&kXBWV(-s8xcd^ygH;G+xv(5K^M+!*2~3W8 zana#D=IzDYjl|3zZ^7IPo;SwD?S#Ir=oIO-SsYSJXftuSAQsDIzdj_+`Sc)_hH0tBeKl3iI(lh|y5&o;DT7^!qs6`qBaYmG#P(|NYiL@`ktjaDDi{pq-W&4hb@ zoeil6-0s_K-t%|5mNsj$pJ{ED6E~VR-u1%JNJ$-@ik7VvgP@PAoxsIpO1ySwM_~zT zOR5HcvLyFycP$P}7wIg@$<&dA#H<=RZWf>Nkbo5J#W?0FzYziKOQ1s99x&)e=SQEf zVenTZrnZ5pGP!%s$ybHk3Yi^4^}QvRGPk`fN(TvxaT_(->qtBH{jMiZnslOtsFqesN2A^_>=i8~D_lhVH(h7HiZlLT{eo=qx>2{M z7sLgp(On@|kCo6}3U}^nE2_nr?Wq;4oZ7LFZWoLrtw`q~-4VBk!S8MVfS>Sr zxF^qy8yi8EuGIFk=ljiUhpOLuvx}z^h%=n=~OdoU&(xq zuc`48bL;L}BSi$7E7?{D7t-AAj4l@w4<_iT`modP=02Kpo7;T9nKLW4+vEfMq~g`c z4wC=OH7l5>YUYOq5a-J~l-c#Qyw>?N`1m20pGlZkR4;wSor^X`;4FUSmrOK&XV=)i zIiqq5`YzmIX(*T8F> zjG6%cGkCK?;gx*jcL}0AD#2%rXynF~zc!HE!-uhNrJ&y!{0i>9xfj4!TAt-0D`btP z@)>H76W!-aUxYL5fx`$dxIog-j zRHH-;h+b4B*x)fFOhNRLW<1P;YmdlZ!!`JYX^1J8H{Vn4* zXTZLU@O3NamBaV8Br#rpoxz$Hex-Od&F~=XtWX3?Z_sTTo>5A;jjceRp_$j>@9<1GVh>6S#c1_FU9j9bp%^Jypddb z2d;UZd|@9|4GhTkhpR8^+o0Lhc~dyzIUlRbI6#V5oI}mGleX37Yyhraj}RUn-nvNPKe#pfe&=cBY|0=8lcnE zO+>@DP`=iGffqsBbzk!Aa3S6UOWi9%7M=qL;|!ZXUwcUG)vHk+vxU4Xayf?=26GTz z&G9^hhoRUp-JC66s{um)!$QxP#ahwMgvpdVC}t{{sFb`#rPGtIog+*^%l_Wvh;r-I zz@6Gob6%ISLpl$piT>tNmwhSEG2MNz(QTT6L$ zkkusF;db?cmxNlWhM&;Ub|v8tGG8#70eO~fUWK3 zxT<1*z((Xg^=JCZY@We-|5i+E3)Q;>8Cht1io3V&LgukMP}iOG2Kd1%X?*z{Y}ReGqH#ywi5m(n4v5=d!w zPqFcRzKZFk$O+2HW)=d)ES(tm|5FVM-kGImkzu=XxsNm=c$%lfF|5||TSJ`o?3+rm zC~CE$_szXZphO_D^Gyo`tk`Z02-z~CHQ9q>DU^6OB`A0EP&I3JZ0>=7AZ+r00g;M- z!t+_#9(t4|Skd`ptl1rXF^uagdR{y)so^xyKHldF76UJY8WQ^A7#YJ5nLF8>@nBBl@149oBfPwgyUY&SmOo7IpRR+JiX2gDW#0 zUb@&iRu6UL>YkaLHe033t7mQMxLn31Hi-RpbuelDb&7YPdTQ594)rDHgY|ZQ9Fcqt z&)#z_wd|2)?&{&DEuA&Q`kM!=R1@=kfE>f?>0M;2i~!BaywIPceMP;wX&v7AU3{VySk z$!wR52F~1V!$qHLu-vVb#@fb#q?u`Ka>4`^t)kS*)09 zkQ#>G!riQj3`0FZ)P6CNhkT+=r&eZ>qd59(DGl6_C8}Wwm=1efY*C^pod&wjgzV1d8$mQH2INVxmE`>DzqZVy|V#lu@3Gn4jzxr2;40S+Q- zLtkb)+Q&KLX4nTxFSg2 z-c^4Lx9)>o`_7RxH&Z}=Qz=x|pzh9n0M*YuWSbkF@BBGCvpzuq&mX-#JF3e}-Fi=YOO3GMxCBbDULKZA!1daH1x}rM6IsIp1R%hN zs$5Z(Tv8$J3K?mK(x;t8p-$&sKnEV#*4^62Wz&_Ex{H(DtCGi~x?^7#OQikUJ05mT zRQMn$)z=v2g|QgjF0aAM9Uml=qfozlj9;($#W(gzH^WLE+JJi@FTM4SyF;i$2erGT zR6D&Qoa92XRkJ&Pb@aAZ%RL;uJHu|8$0(2fm15g|Un0YOJ-{^b)PT>tI`i~=;}d80 z0x^$tUlhWax%uq*R9Of4djy{ac0uMmmvJu35WI2f;0V9P6Ih~Lr-W*mBXX7?-BnK< zt)3w;^k$Uqba#*nx63_S-5+8%awL=nSKlL$(BAL2O10pBmdC}$ahC0U$?^Vn59b`L zKb@X#VXIyifM(d|ErD43jY2(XZ@t~{pdQO3ELQuytTgMwTNph+PGjotp$)KlOh;4_ z4b2iB$kavy?E_imHDFV2)~6auarp<(qo=#I@YQ_l;cs>3DS5OGcT`=r&+C4Fn&qTT zEb@Is>jU0@Z+?gsK+*wZ<1a#gN7?dcJv#Y5eDcoU^=XN&6(eWy zj#VbdQoI?vh51V2UA?Q=CyS`!_CPPl&HWs!EfN!d<~pIH`#?p}&UE&(8?vB&KrO^l zeVOSDIVnHhsJBb8c`^6L{s~FroXu~K>ITW_?A^{iBoLJD;-9E`*q(OkQXY#UDYH9rxG4}pav($>u5GvzF|QVf&JI-xkrvAiXbyzd&6v!k z=$5Uwp+nA%`S$#*<|a2fr<1(87ZQYP^g2Em(R=9#+Rb?!Q82Sr)jKnA^Yfgr(zs=# z85hzG?>FWFVcc-8uZ|dN#XD+f78~_IAj}$nK8Xe}8mxSHGF4Xh>w55XvYU5#9Cjxz z+nRe!0CPISe0h8=S*w5TnVXMroIgK%+f`y@yxcI&K0Dd86pS19{yMB0_jQ*@E`W}( zy_>@%5kP-wf#Qa7&y+*gjxdivp={1)?_As>@xEoLhF^{l&laqy~nMP z4>BBZ`4`FeMJ>=*GSMxiL9vPVU9&|lrt znmDXD=|0Ci{?dqSlab@`;VagoJi}Xm%N^G$Q@#~3uK{1&0Jr3wPr=47q{aFI+vUn! zT5Cd>L+ZW4h5v2g`=s{x7YWGo`C9?qm9_`% zQV(?OeJseQTL&b{UWJvhs;ll3;s>GpoSa>M6dNCf27cVZFB>~NuQ_ZzCRKQ2`e<;G zre6{Vl{+WfqnV#h)${be7Vxn!O2c`FA+QeQ;r2F=frw^I=PjAjd6!drOa>H! zp`R3gNe0?OR)K~Vfn-Ugy9mR!JOc_|hV|@A9eTB1@JRDB7GHd9ssCuF z={W`(^Lu}$>)R?q_`Swo=!!35W4iud`0anfIRAxC|JA-fXy_LY43HgoFTo8J$&X5e zFyYHB!}@A4!AwINAS3aseN-JhNGLIekkf!8ooID(X#jD5G=O>kX`67HPxk@{vl%G$ zQ~UU8`E4JdP)1<+2N~D~iqSr81K>`{9K}A_NI+2tg?mzjO093LstN52C@)zX`Gdyq z4-IOyHJDoio9e-}1S54S8o&Tgepm>+Vd&9iAdQiZ{o-w5$h)1|1lL>fUaFE^g-~`S z$^&*f32?4|UZNrji|gDHWN#ji1p>5gW10eReYXh*2Q;{Af6)uB5%DE(Y@jDZ z2{aX-vET=bMC0EHmHQhr`Gdj*@&qekAPzA!K73J^aGe!qvI)Prbo<6aOj_W58A}o1 zj*6<;yy2C9eooIAFc`9!7E}o&99Dfm1O~1Fn=}o7x8)>_FsC2%J?;$!!vG-5&~Gw& z&6g>{zw2f~58&KOL9`}3>-QNN5?Pu0$aH?r@*#9f_JcAiEZ_H48~cYChzdOKKz<-| zeBCBKuz03e=6;ZyTS3q#RGI|KGJ!mXVgP_Zf4`Ig?eNsUGQ@xpCw$`vV}^G5AW|mW zIQr{vWZ<`5eJ`e93Z&)g27Hc)a2sNwwlC_=)V}%LGH!;^_^$n?f``EG+yF0 z{{;-9u_yP57f5Osd~SMV8IwhC%Qe=$~h`T&m%T!$S%Egk}!O1)w5 zIiOyhdVu^}N_;Hw)yfcQ7*W9$+z1Upo^0A>9CJotuT(FCSUa{-fETx)Q={_8V8Rza}pg87v! zMw2BLf36Mv;AfmnqYu6pp8<>TGv?j;gMh|$VGQ?vq5Y<7#P|W_7#dO$QMCtr)Vl(S zTZWFfg?j{OY+iqr#1_`SAp(Dvt-b4vuTorLeU68GE%-2zcf-<#2mdoyBS~ynEHCb6 zKN2uPZbd061kB)Ok4zJcz<pd0z6^(LSElhCb`y4tRe|csx4W!mWdioQEVRY8H=8e zM7ModaJcdVZuUvMu>Sel;1)0;wIG+yZTR#tll33`F@aGZ@LDjTF41p1IlwuMiZ9QA ze^TlD!;1}@_iay`T&X;ri}1&}Omf-(<~*iz{rhuUul_g}AW)}s z!E{>nmh0t=;N(nWx~Ie6pG!BLr^C;6e+@Vyysq!yiuFNP3b}Kg+HLBu@|XaimjZd0 zPI#xE@l-B>Fz3++fM%Y?T^;Lz--yv^Eua9^`xioQbUPT#av+A@;3Ibvf#?PzE>von zwC7Wi5@g7|f;k|;9@4uVB;n=f-rb>*&&jDq8WHEeoQZfEb9$B8D(}S$c3Df_e|V`Q zgLQ?xhz1v-S9Bo~Mb8*J$;&iXy67p5tNX9Ji7^~&Nl=MU0ufWw4r4O6#gYobtK3;( zo}Aq6cq2f8OOf(FXsZb4G@)K6O#d{G06B&+b4)?rQR*Gqn#^?-&Q=l<<%_EfbpF6w z)B6L1g!P*)sMW|WUyLA#X!b%@f62pzB_H7Y0Nq4pAe*?&gm&J4c6`{ez2?#uh(GL&f$D8t8y1}afs`+eA%6>VGXRYR;K974=5p~ zEZD++w*`Fr9jR{&-EZklWf6!))36bq9uJX+v|t6d5dLbx$_$o#7-_$2e^DR@1!&Yh z7~t?Tf7n4U8s{F-5iGtjCjD=nU22q=kY5$5i{-E;am`gu+<=@D)ALQ6??D0Qi z8x`x0Uru;a340q?87vzee;3!!xR@t*zEGWj094i^lbXfEc*@Z)R~}rV!Kb54sQ}x& zm_=&A>ROK4GdQ}!V-#%!cbyz0beE|uiy&Z_8&%T=2iR{%T*aC|g<1}b7oG#4Pr<~S zU5dwBdPqLdt`q^RGKO&d+YU?W+YX>8PKM-vtzT2z$Deh>KeR)Ff3@$l)|%QZ`Gu^X z#!_J9KWmA9zzzOF#2FHvr(Xy;ARqq;Y5eQo37h#v8~$E1f5wfUwcBrW-5>aHFZgLY zV9a37E2IH7XBPStP-l&-hWSp*bw}`5geErNT!IMMyABYxEm&vRyD?z-HOC0pbzi`G zf1Cc?GfV#7GaVeQe}8+=@VUUGzW=|!XMY_tVd@$Y3!05_mYfct3}wf6FkKY*vG4Hw z9YKGZ$fr0ioEEvqT>^RcqNkvbu$>}2z;Si5*QLppfl>kJFVyrEy)-@V|KB|iqWk~) z*Z*k$>H(tvekRa_Yk?14`b6A^4-c zCd*~C*Y9z+klpXt3>_hp!9dz75>#YT8fn0uAN4~#nb{JMN8J#xfh5e`$~aC&mjMDHvS zf3HHVO|QN{MS_|p%rrF;q-?=`;s?_{5E$QSKLbl%>}Njfd(=F9o^{M@Wotl7)zlf*j~l-B#b&Vq)9`5$0^F?)9+CvE9-&eCArr zJEF@LgcMyC65>~zi)DjA^V+}RL)tB6f3oGZ)8zu`Vq>as)4{%C@^S{Y{;#-lKgA!X z)X=ZEa{8c0@+YQDV{Pm5@pPQ@*Kt$a_Y4ah+T^Xf=>X$Ct@Hl&{p3+eo96p| zga7sYrsK}`DQr+Qu?D$y8GerYmdhrH*?dr&qWp>DY{d4j_>GFnD?R+00vw{`$Kl>e)dujb@CaYbco-I`ISSs203rUc}V1z7-yN2FIkh_R^xr3 za}mn{_WeBd*C0b&eGAlS{>h_F`gP*=@1B1)=hK*PoK$51I(F9%#H&Z!l zjw(u?LWB_SO+IOY$L#UFde{I)XH$wKU zFe$)s+V+e+X@~@_v(4ML#Pj>!`$^EDDgl7) z)NcD6@u&uINj%@l6>&lAxpB+?v7H`38^G#%q8}jsH9c?NXKuR2KV#_M*zNB*7qKSs zvrhZWr>KNQyCH@%jWPKYe;@g@+Z3CFL}rTH{ip5Zfb?XEd*u##lq?)o3X6wOIKn@_ zcQWD(R-_;JZrH<|AX`|%E-?P1c0xvycW=Vs04?ekeP#OFHktY0j>4ZPT%+(o-KK9V_y0E?%T%%e~i!HH9oSLpS8w> z&-VYy+x(#5|I@xEcy9Gj%Hk6A3#3+jNxD1;t??x+femlUo4GTEzvTwz?lNaKGSYez z26{twzU;^)j=zlxNV0H<1Eu>vXkW|$h*7Sf-v?p~4{A@fpCh zzD4R->i+fnfM^nHf9dNm73C>B_cIw}fhqq;1JYqpVj6q_c(U%kKC$?BC(=KSlDuOKke@AmrxAc|V3dA$RVsUm* z`BvQo4dS(KAm!F#<0flW0FYOzh<{q&OzSHJeXN-JCNdQm&wkp>2eak7-kRFT!|`hc zwokAe#r?2isbHpb*BubKqXY*}{#hNcCPZ#_1E4kKY59SG7EzygB`-cXAwQ&&<&@t3 zWV3r7V(m}Lf91Ei!o;@v{PXu{a9HxkxqyeR;_t4w;QeZi-xYx-E$*4NX6wKAH=a&%<(k4u3 zauEhi(1QIs-w97;zEPX?-=f6R4spsbnew~;5lTF5f50QqOcbh!P&d&09_5<3AtF~V zG??F!69BToz*4aK9$$_m*XsySR@g|2JpKr(_(3IZ9HL-gO=ZGXod_Ha-&=>$fw|s_K2>n{5c}qkUMN#=`pdaM|CS8tRG*H{=7rkuDUbJ+btxCFOiu}jn> zhZPYpTKXk9RJhT=&&&~hnh@O>x!{wGe=UkF|B0*SfxMe=j=)fgK=1;)2|OxqwLpN6 zQ1M5AVoJa)OE?DDb>M+zUTs&<@0wqmkGZp0jC(bGvEY)j&FsXf8 z>(8;kKdachW2HNx72}5vX4ixjw~^TcW_j*1NqbZ+!59mY;sIxzC;5cuzcRaP_geew)|DBNXKtB`{PF$dDARYmN0aA3ruYb_Uc#jGB zE&Sm4ai1T!&x0^G#0kGX9V@^D2ztmbw*K=k+FTXzJ+Aq8q zfQj%20sq0q^62BT0o1vde*jmGKqlHK|K2m8H4&LY`{pWedbk(=HD3KUR{rnbvlW4u z#kqXm=XVkkUrnb6&+`RI45BI9Ql z*N4xakhR^U^SqG(Qt zP#I`q9mqbp9DwjHk)8GA*!Q8%3Q_ik-@k`7aQ$oC_wQ@Kc2BR%^Y81*gas4~DT>NL zE-PJ^PyZFdmY-g)BB1`D7NYdr!+o2cDQUW|svrWLOJf*C(*ie+rv@p&PrE7~9JEsW zGkRQ^5RG5-`gTGne$na^PE!A(*(W697rnlp(Bt3NCxm49e?_ySy%BqzUO!Ig@$c&s zGBW?Z4qT-8MZZ7(3BRuXiC^FT#;^DPh+h}}0lz-{!mqCkQE0z{JKpzF%RU z6nj-g1bCX8VgY@6ntRcPYyR~`jbrC00);8Artxd|j9(v=^uOt6q7C+q^_k26L!>)U znG?1d>)hXcP~KoHM?1=KE6G4xhJq48-mYNoyiCVhe>j$RFoyq!$Gf<0U^u7imOg#` z=Xh{2|BLIG#=L)bo!{5_c(Rq7{_~^{0+|B({H~)P*=3(Q6Q=&(UO%2;;E5;XwEga1 z%Gl%l^8Xy~j(dfO^n~GNKVztn0JF^G9-i+Hj=O|}sk!}Nmtld8wbV3^;MHOkW{(~U*>VJnv|DQlUPpIZ;&5_`87#!pU~ClQxpFA2Z@eo z--N9n%MIMJ7u{8~;rk{?!Me8k)xV{~m`f{OfUli&&rb5dY=- zP4mPD;UD6hDLV;^r;GJs4!7e!FyL0(O3j2s|Hos6=d`x`r(=OBmfA-N{J!&jUZ%GB zZ?9+SYna#iX^i)N?)~@qOMdSA5%;Obe_HgXpJ2TRZ1#jT&v6g_K!T%=+ij-x$RF?V zfA$Px-1)DcmFf9-V<}rt>u?XXS}vZ4NJ`?*8WPX2jIwb2tW4|2zkhykEojr-XMRC{ z#WO2oeTwG=YbX=vyb+b%U)T4+Is=*i7Y5q@;I!KZ_7A_&`1)A>4gMZzZ=R1Pf9(8c z-8!way%4sQ3ykXxUIzyH-@ZPrV{jhA2IKnlGZ5fXHR0M(u!?cB3h}ICoNlm=4t_p= z!m^idto!kPGLYN{0qf!kRh~@WV@*7v%0J)3>TE)lf4=v>sPfPE!T0!d`ab*~|M@=p z9{>41{vQAN9_z9X+Wd=R|3RC7e-Z1y(B>b+I@V_&wE6eAAGG=RxL;`V&v96%ebDA# zWcv@={CgbG=7_R*(;5x|%c;G--=m>0y?(#P+6VFe@Ar6)aJ>J)qT@Ni@!#*U{=xCz z@6ms8{P%m@>nU#YMX^JLFrnjrQ0!lH{137n_iZX|_#XH8UjGk@{d@gCe<*fX&QI6> zMYg}b*Z*_e2UFhviA?W*W7$7g^9fb{L9%}^=qc_gp06K_`v-+S{>HL@j-PPlA0+$d z_~ZAP{=v9^j-P*#?0=%uKYe>bt$%Rnr%%70Q0o&8U6|GaUxfRFL)SjY^1qJ#AmV=? zyZViA{~SA6?E}gF$1{_Ce-QB#YW;(hCx2tvKR9&U%P+$HgG0}!HT?(6{z0dUzp?C} zV{uQv2=~vi#pj$?f3%~h{Km3R*Qih9(+A7`X_w;D?h}@MY7zYzZ_mGb9UZ>saWev$1BY7kMN2b_C?{mXB3JFx6=RO7S%cd+dL z9cn%KSJe7nuOkJOfAGU!uLHH7Ok&P|K&``s_z%?jKfDgqI&y=5{ro|#&;S1lwO+xA z89(Fw{sw8TPUnO31Jdn-kOqPo-aZq$8J`Q3G3Fp#{|7PsugG+q|6=+XsN?4kV*AQ# zxc&*%jqCdxgf`I3&Gi02p#ML_`u{_$|GWM%Jum-)B>$_we}3D)Lqulk4?vH9`p@Tm z(O%HgBN+MV7t?*2KL7JC8W$t#=l*;?_x<_#gK+;K%!BE9g?J0+{b~OX`uv|i7gOaw zz3=zFN7LuyetpM<>GW+u4j6FaNqyS^TY8|zn=QV2Yo*Mi`3_I9@BaL+1@zr zb6zMJL|y6kfA1z;3hvAQ2pav<-=_EZJpbMFJ~;j#5cojef6n`Zg2(&)8b9!P&UD|= zj=vqzNJuq5X7T^kf8S&+)_9m7a7xqvAzw>iUY-iXPZ-&eE1x*((7qv32~CRdsdsdt zZ*;{ybcS%}5}{da@$i7l4jlqghYliRs1ZW#qi3P+e^!oAzal_4lJEd>sJVRLyJL$JwDqRn5r1#VzVn!0}Dy+%w5PzkgejJQwB)svbG|xaEyxYrGHi zvBK9&tiQkCsYZ*}tY5Ds@YkGP|03V@H&jXA&%>_M>VMw%tj~VGUcnaN=XE4ke_qEb z;^%dDfBg0LhGe-vufr++uh)P5Jv=df{r$+ZKd+CLEdPETX4l`>DYyT5o%VwK=k=qq z`FVd}>VIB01hm2T^-4x&^7rf1U$1}4)p0*Hk_7Gke-CLt|DQ(Mql)PlD#DwDz|f8U zCrJD7Po#a*H|Rt^Nc)pG6=45D+L!JJX>Yw#e^6s~!(_QdZ06{Vo}p;y(F6 z+8k=(=H5Zs+^R6dV&f*om&0hPo^su6=qTx7MTWY6Q&o99(6>$jDQgAx2?dw0D)jH0 ze@6i(AO)jw)WpJycCKFU2<`ESd5+)$428NFuK;<)#`r#eV=#bN)L20E8eT~5qAd3& z@hI*`J*>p!0*c|1gXaVZTh`Dzo-ZC%BMfw}Ky<(~%i{nh`Kq zGLMW!+A|uq?XxlFU1yX~O@&G}@k;4!xd5tdsWL~lb!gXV-ZQ0LRPRGC76%rPKJ_L% z?&U#_PyD8#yaahBjh)E>?|!0gsqow+i!OXz7e={}4I$k4)gquc zV{xyd8?zFUlHgc>(>_j+9E>JhT}!Pc^D=tX_5RE~_C=?)CzzTvr{w83e@5}RZ_3+E zezOi#S$lP|Rn>;*lM_QQ7>NKC_$-QnDM%#q-Em(Iz0J_7# zk*cG|v2{!qaAUGkMOm2_#mzK#(1SN$&U$mb>DBneonL1llhYTQ8yBkfbI?Jy5044u z>*!0&&XubDpr~_(SslZhe?(R&YCSYGc814F+gWt;6dGnOa&Nghz#@=;G-siI_Jp}t z9(5m_=H=!D6i$Yd9w{kmy;*W_Q_a1i_&i^da%7tIus%6z6NdmnfU8SgC8Zdb1U~Y} zyav|s4~qx*;;6qi z{&Fq73J$5a33vO5dCc;esJ}p~(_iQ26~5o^ngtJ)yq;k_I!}zZVw~@qbMxoOtewQE z7{Fa`DR(T_sR5#`e{g1JMNsQc&nfDhj84k>^n~F+wALLM-TqQdZS#?V=X zAb$6%xA%^6VghgVd;yE5di-Uk2`DT*5mj;2Nj^UgYvTUgtH#!-5bUO}?a6l*xf{lw z1Q_q_6<(>mBIn17KA-baHY?fdWbuX$26y>9H1PGn7i^uYf7z|yw4m~eAL3Dl9%dNogL>|_f;I6;Ar3UAnzL!~f z@$}^^InIute>27TwrH30zH5^`XGS+^zFC_`sROs-%7otBSe?Ngbo|C=o~}1u-r_u5 zmU;M&j_?m9+~?cfco!67immn*G~wPX)1GyTx2e0cjT8u;#ll-2Wd;12t%+HDkjew+ zY{HfSXjG)vDa_sL!(*83_zf(R`;|98#;+||z)yzje}dw5C|58=e89EXjc=Drs|QXB z!04Jj$MJ!FJbU+8R%%eXfzK-0^7Twh_(znZo82E8lEa`!?YAc>IKsmPA>^@sop-}4 ze*3}Ww4?*qYF7_SHm>s(>v^xuTf#`eY4O6Zqn$gyoD4Xz$-?7xL#om%d_#oQ#qxSu ziZGa>e^^O;tvk?GDa4j`vdq4P<5fTQg^`uvbYgRu2iNP-dL;MfHS``vNMW&iSQg%G zw@&S4xM100esRmth%<-0_Up^?Lc(86q$7rAx1crya(!-)Md$rpX+B=!Yqv0OTS}eL ze?oj<_RM(G6c2uj?$h5Y`~zS-M4VJEwCB91Uz!~(BTOh+#fG98n2d;LbHColF%_QO z{r&QC`?i`lu_@ivGF+TrtEMlhggR4+g|GIk0W2J0h*IQ{d)#hyygH|@yHIAiv{rxe4Wo1eSKJ{>|8aj$?0;0)$DwGLC#ivrh@o9 z8p57UvTL%8kLNL(JI}+Oh|X7jKci)J20M)U%zBTGjH&8fy;oi>QRM9jZl!1Ye@us> zl`ZYj3b#BAORkdl}yf@v>?=n*@O_e+v59QW8+`NbS7WI`nJEitrab+;!ml0<+$o587ed znD0xRF%Tc^OM4L~I)7IU#APMz(ANMP%Txv(x>3 z+{?!ZuK4+rX~I^b+}Au?8>aB4qt&&#!$F<2*ByT)kE6qq>pFR!7BesuuIqMl$W#YR zu=Blq_!6arGV5_C{N*D79xT#r)zcdtlO6lKTm_(uiI4@#<`lo|IyQX(L3g(x zDJ}<-%@#d(h|ioel&vz{e|dSgS{fTR+aF+y`68G5QU_eSDBa_SnxJgCTD%s2CR*1r7SXfhkW0Vql*UM`nnXnWe*??t6Oe*%SiCnen`?S@=tNEqlTP`u~$II!!*;e3#!%op%@wz`yGWdLC z&s5$aBpns2SN%|K-E%>fPx9^1FW^-jm^R$sj?F)3*L0O{UkS@q55l2l$E8Zpo*x#o zpfdNoJ!3mLg~Q!W<_O_^lheiXIMa@Y153s;UA-G?1XRL{f16*nmUd>VTZED6n3h7z z*sdS4tW_V5jXbN)vvl{O&9o~nCH*Ep=5gy#5&^K;17FkIb4-fuBfAUrOMTJPGIqA3 z1R!WmPf5$n&!R^d=;TDzVb!T3~b9bCG&xNDTH8U`S!B{{NJ(h9G4Z?E<1J(NI z&kb!)NT(s7z4z%@2(8cI&a$C2-%=RJw)Y*LsimuLF&W$~niw0C99R3%;iX%*vA4Nb z)>Sr{Hi-A5Cxv<#5E9WI+uI2)T(hexZ+1syRrcJ8eoo# z-Jjaz(XfoP*5+6k$!=mDSDdp5WDI(Tb>UoI2XZ-X?dw3M!VKkzVFS0v<^UJ*N`Egn zViy!Sf4$|CD_y+6NMOcey0SNoS+VU*QM-A0i|qb_e>v& zE%H%Hn@Q)zZUsG`a#P>nYo8ZdkUFfmbM%9>f4{1Ps14>+!?GF2PUBo755k>X_-mnA z5Zsn{#h2oM{u~D}U~olkcW}_OY}BoV>WawlE2Sac>t%6)K3pB3D=KM4*X{u1v>%^NbQ7)p#XmP z3Z^c7;`k$t!jSh(Qm<$AzFd0akw#U8a?D=zvCcWoy&UQFy8zVRW#NfjHoFb6ly?cq zF={>MXo3w%0&&k}UF>7p-a4BVsHbNqe~9E+5HG#_eqv$^YYHN}E{;&GEb3uCBF}I`@M^Pb$(n0a4(|Wtl&)^N7 z!uB1lPNBANfV$KW6WG4Honzd0+3|LzLF`wwIXp%Oa^q~Ug?Zd(A_%!AGbi_?f6VR^ zGA{i`1fcJ3H=A7!&fH=T`^%|4S2NZ;8z2|$Z`AxT-#-?pf_6QbT zPHAf{oR_!jLL~_HJ~xEJ3~NitoF@z4KSm%>UF{0ye-?Q;x%+5lY73To&akV`R!z4f zvZmT+YZBP{G%`UyxxR0US3WA(XKkGwRc5oproY!!96ol})4j469yWUJe+9T-*?d+} zk=<{S9Y0oNR~q!Gc(8hOnTKs4Rbdq@yo>XWX3malMxk^C_GLad=Q-t`&++!PY&Rl& zjhiRw9*sG73pj`AT&~~PEpf^q9E`M8rE1dgnCL76pfxg#?7 zm#B+&xZHZQ?pvbEO$jvae|7cTd)rG)??Vq!nf*I|Uef_JJU?5kH1ur?wSS zKj|=g9NDn##H~!Fas`6LFo@SVXLK8S+^bUJ1Z>Y45t~}k9GIPKbwS}kBeB=#=`bSC zk1G5&6LK1kxBGlL!W>F>vhuh6^pYDDqsj6ON)^@#pk578o-R8lTY2p9bC_=3K^0*#d;x&Ff9k$(I;Y8k|urm&~!;l+hpT`(D z*Nf^>?Ks1Bo^%j*{ldR;v$anW_v7t#cvtdbx1DoG)bEanwXNryoF$i6x;D4P(%o7r z*yYc|a>E@@f6LSQqG2Q1d*mywb~Ej@(j6ZrRu8}s<;9Fu*Dl0Z!6s^6!_1Aluo*Ce zzKwV}Yb@32?Vu=)`|FH2>K?vZr}@#B#}jXKeUr+l;TTq;WC`IeUWwc_uC5h%iv=<$UOqqPNP8!ER3aofQplGg=WZYT0JkYY9VJpzn7*3rvbeORtKn77-)a z-fh#}1j%>joRoX&8GEnou}OuBDnnV^_%bs`M51|kvl6QEb5`i`eqAFAkWqKRY-31H znKam?e;j8Wsh8p@;Ui#%JJHI=Z0W7uTPEg*OE_O}dVYXUSh%E#ROSMq3qf`oQ2A8Ke_mB*fJk3NNZamd`CymIqnm5W<;{sZ zYc$qeonOM46m-jK6Q#!evOh0mE|SgH6Iv2`ZDQqq8D}9s#49*>8%&wTcNetyWi4Zk zTJ@P2I=7S_3#jj5cPN~d8py<5Bpg1d4a>VThp6w<8?$Ns9g2Fx;x ze{K}(U3b0`FT9?*7hCReoVoXcfm}48j(M}=_iXoi9APAc#Wj!YMqz+s66RPpd1*MCQ^V>)VIx8A ztGzd@?&$)=lYx+#YvwIe%0Y-@*kRo-?d9iMai|uHEl*O;1aiN|tZg`UM!_%dX-+Q1 z=6b|NDw>Uv*1hWF0?z844Vn3<)ZI*8Sn6)?y|^nRk=lI>O?&i**T&t66brO?e;4ax zptd31trw~-CSJ(@?_<}edZeFRn)4rH*X>X7ugW8U+jAHIq84Gu3w@a*!5M3)J|_6?Al*P zT|Vf{aPej@U`Agn7B~Tq2(hxhoUVaI&h0wTx-V{>e|rS9Um*AZb4QA|f8C|)Zr5xn zzLmQ)fODP}MXHD1OjWVV*tx3*cDrRVz8&Rswj2dM0EfeN?qN9Emp3u=g;9-Y2SCX} z5u4o|)-h}SROx2f)CtmLGkI2|P{hqQ&=9T&9&!0cx@|8a;k+D^-7Qusa%$^+phhi= z;Kkvw$u2Hr#SSQEd|5?Lew6!X|Tm-XrhSAa#Yx8WLc)th^u&9A-f!HY( zEt4H{myh#wMW)M$Nttx zp5@u*ee zVF{k)Zc#Yej_r3#`Bo1G(1{4TV|!hv`Z#|qsF}W|;pgR4AMIdGbyV z{c!}ShdSHs^)>73wOeqZS*#_O0E}qo&F)(zJ8ugIR$zMOJ+#8R=HLhgUD}R2^0dl{cz!yyHNZ#KbOL}uYHIZ;C z)WhT_qp~C&f4sOX3zK~;zaL^bW1hF0PK|;FhYo_sN*!ylE8@#=uAn>kj4)#$>vIn7 zA%~7~>n!hrNY5z>5lLr8wCkt3U)p8aW3ux`>D?qLA9lRQPvZky69a4RBNsWAc z((}GB71)hzso)vPtWvOc__nFTDa8t8%Z1dr+=^SFe}P?pS~J%bSJPqbBIW*^L4Q#1 zw%y1s4jbZC%b{+u%exspRXf;fqkXZj2gDU#@M`T2EX;`G$gjMCSYZV>Is7&QkUW zVMEb?=@IA;aI~H+$q@Y`h()CyGqST=u+@m;W9QMQp5QE+kwL@k_u;;{q3mUU_b(`C z1%Ypof4^_%+Y|u_7n%+0*h=A|GN!VnbEcyxXpG(RbOm~5{x||-y3lQgD|x2e*OFt4+nu%s@p9yE~6 z`59caA}Q||Y*FIWy>``8Z-P_A+c$kf6Yg7df1XwgT9oD=0-fNG}4RTPYLAqn-`CGcCcjkzdL%2x823@Zr!HI!y%5rWn zBHfg8qlzyoaFmImN_`wX6Hj?vc>;4&wmD0u_iaFVg=U)LYroN4ZzJvxr%h3Z!a%Ja ze-8;+?^Ok>4b!YB5pFP~Av+?YiNz+*iyH!%q~ATIxz-eed?5xSzY0i-9u~;h&nTwb z?#s+GIdvdPXW(`WsHWz{&BBLy3fFJ;c{|R=5cc%me782w7R^JIikG6V28w;k*f1~TXsOqNOM0s5%?cs5&XYrE?%Sx`2Ow+l$ z@7ZT9KZ>ZotHZ~I8IJFne~8vuxFL(jQdc2uIQnboe9kJc7$oc`VU4H3Wm>uKWQgL8 zITRihmc;z2lQuO?Ux85H2baeT+q?#Nl;iBYM|rlkb}jA{QG0SxWaZ9T?HI)wf4NiA zt+wmM&?Ckad0iHfvp4X%#MWdjx07#<{#$#jHPEM)zOmF7n_cBCA|q3Wm;cVe=T7Un zR0VO*a$Ry>c(-e#dMDr`l5-95f5xKdGiXtU#mj2mkY_wp^16sG;pvfUa0xNt`R(-O zvE4Pqscd-&M^y=rHJ2^+k!86^zs_pvm+r9c8Ex62(qrXhPGb967x>_(x)E;aW zMxhjVkZaC5i8d4Z7k1S?f1$QX?#)}`A`eTb@&Qjt7v?r74Eob@*EaiA!nK3%&6rQ}fAXTWjaM#)Zk{b( z!F5~I@;nyYdj4$7Ab)wmvI?HcmR+tBLZTy{GOQ?dEK6GxFMVLsV|prm=hOxjLqZ^y z7H)JY*`?_M^(r~$UfBt=urPR<+&t!fu{pu|EaAmN#`x5-1SY0s&6`wn298Y&=+}A0! zJ3z+JIr027NQhis9}0V0CY5YmSRwSoQ4?>Pus`Ovm1`G6JRA84JR@}oV-61fVoVD| zbcic~^0MATa2IAz>tua8vb^NXV+Wfex1Haew%zscEnUg4f3bcW1E~7NCDNCVU{lE1 zx|G6W)nEEEBFyhFD-5vOW+?mct`(x8&-t5C#%D9WH5(c}m%sAQ!zN;&UuH{SQ=d!* zAdZ-u)p(z+X^X&falhY=T2JoPp8b;p(*V&N=ZNTY_tkbqGPNmZS5Lf` zEi!W?y<8o(v)h6`QS>qzu|oIGhlrKs>YkVh$*iu@`T2aZ(RR6hVbqmNZdeu=MQq{n zIPB8;W)WZyIvjCha%3uJ_U#RV>8ahp1tU#rdae3pe>R@tG4cn+^5*SBR3q4k-of17 zskh{P<5RY{?|9@RFOiLs!5y8#bI=b3ue@)XwxIO8aj4pA(=E>$)WB*>REi{bG92Qb@p?7!RSX3|jb~=!vO)SUDF%0}yBsQ+5 z9sA8Zx||hRzFsx?eo-CKK(XDmnnBms#|!shN~=hoZ&qwFbL@*4(zGT>!R5=g(KBF|%MFHsMxe5r56Vty&feEh&sI-D zsCKg57v5H3julABj2C(FBHOX|-O;%^a1h>M;E1zo?H5Mdt9zxOB0Zg%@KZl;LNA4U zfBO=aF!2l#qR~SiW6*oKP}6JVJe8gMOvH2581O<>s=0G|SGVqfl{KULvH~^hewA?) z)LKXlx#4S`nvz-#b%ESdtf?6DasJw#-X_0^G^4YSo|FGU2!f1a4UHO8~(A^T&%o9H-F^KknMI}SU^iRbsP z*zpsptR^7XKV!#DszK5(380YkqoS4XG{qJb~weq40Glo>%bk{Rb1U&<9!Tzcm39oK=&be^$cx zZ2vF;n=suDGsLJafa_hCg8(7H2=jfGlv0Ry&t2WB;;M&rrEj#t-bKdC9WOdR_&qt6 z|T{t4rCB!Rx1`E(a(ei1&Z6-ui2I z8{uz-%40iFo;sLKds%YcU1tC4cH}2< z&78`Wi`0eNIPcv{_ThwsD?XvY@f|sF!mkhVnIXxx;%*P9LLQ5SN`vX9A1?MOdic*Y z%wwjQJVwc?dPzOb|BlIze|{VSo;lthm+i&#Ymo5u#TaYl(>VgJPT5a-My$%`P266y zF1?Q{fdlg8b|jyT$cp}ntPfFupgY_r;KhdFPExuTpbRQUl(zP?u?i;lFK48{(rir! zmW|8Wd}g`?;tnx7Sg3i{mC=?8OolrWfg3h%yt?>}sA;Dq*m|ZAM=I%Vnbz+Y=?}l^K(%J2`TLK2Gv2u=Qwx-C< z?geppJ@@v_e3d5be>R1YoPXM(VsjqcAFXs%F%@c8{~o}YotEXz`g|!CKzZl%dy&W8 zw()9=8VKHvMXN4~xubZUsvvECs8xh}?)0;33i8~v;mKhhrOu!CAy&88h#ldEONj}G z;4LWH_fh~$K(xPf&r}&`v&DOr5{uX*k7gw)g1(65$IU4B`4t`L3w1fUv^`$u?>hC?kHOW5!QFsCdrmL)ftXj1 zty?uW;aMzS{8=rLn{b}>g%;0;VDBD7(EqplbdFT_O5TO{ zRNDvnayampg?m{*tbf?*^MZbZR&Doz0Ba-21j?E9eCS93rQlyWW_WXFa)by&qj1`# ziZ?Fzu0bYeOTcB=!1tHeBJ|(ic*YC%lMfrAr*K|?WcYcxV`=go>01Q*XaIek`y?X2KQb@=> z9A2;C41qmnd>i;5*w`VbYkg~+kZ^$P9vJ_?xk2!vlYf_MpJ(s+RUF8*GyN=Wl=D0i z`wNujIu+f}in$R}vnT6T_O+;YLrAop?9nL*?RVa|pp3}7ZhUmg?deupWL0-VdTHLL zjnS`=#yqOc356k*X{_oeXwKXF4Z(w6y`Y)<$jpX1vf>Xgyw4)~=o*B1ccKZt{omB%%n}_)AC~a&6$V zyS?Fv8jYAO}1lkLx z;}_(Ec@^^N8TDd?wfD69Q9d2QuB;DN!(YB^bG*g;`*zV#k;MoZp)ZM6A#` zj(JN%GA}Jk7{Hyudtb8kyV>g|U37u6{#2WY8ak=W0>o`4;Ke zxvZDo;VIHPHr#Hm`$LXco;&VY7*z81ddxu2px-k#gZ@w$ynAe|=T|Oz$*XdkSxt4M z`gzhe1AABwp<_D>l3m{Pa-dAUELMxYUI^v^^w5r{BkJ&6?T#va%mTYr?~xT*-G8&| z3ZSb_5nLh50uhlGXaryJeRyX8VYNecJjm_Z!Z0-X3!YHOh%8@{V|clA~E7RUV`Dm=~QA+>R~Nn{95iK{c;ghsOin| z1p~509-gl#am*5Sy>Mzs+0xD9E^9U?N?6$(Dq?%Ivh_;4K2V%FBWKd}%wTF*@jZ94 z)wvm6Pv+Zk=!JA$a?&WV)CkcU96(7oCx|9gFWK`^`}fu)9eTCo%%h<(n}6Hvc1z>n zK|kq+S&^^zqoV}lh}yJiJx@xfe#@7p3d7U6ow58uy@K7X@uMMl?QU*Ru=9k^ZJfUp zt+`?`DcOU8tQk^RjI;KNt$Kz7n&dJ^*%t`-QK*t902Gi)So^iQVl0~R2CCx`hQ`(z zMJQUD7Zz$*=2j=%ma(*=mw$J#%u&~e_uD;_5Ua_p(u*NrFv#Y8)|a0gDDsoj`X1}G zITqKq*7ZOnWM9+v2B=WdaWNE&Pld} zQwKQnsGPa*^Rhan_XXGN<&(4R(rQKe&nWk{p+uX6EIG)dlq3zg#-!<b!rb*%d2-Q$-HgY1 zhkecHSI^z`eb_D^q)sB^RB2tf7+U(gKcx3r;~Epvz^zZ@i^|u9cw+a$Yxz=8czJp1 zfvrOdV%3Bl0e_sb77rlZ`>6|D?y236^ckFAQ-{f!l|DvMEuN=UzZ;?IQI*|(#f%ha zAoWlB-aDM2%-+jLkP{E`MD=l;Y2RHzP@&U;Q7Wy`$oi zO=yba#8mu2dy5~Hz?2oAe1mxFAJN_hteO86?G1&_zoNZcxZ3?jdn2~C8GfU^!_61% zy?(pT37v)_`M=QK9~Hp#`G_j~MtdJ$lrTC$GM@nFPvi`r(-t|Xi@V|%lrpRq70w761 zF)`h-P(U~Pg&6n)^R4l)yGQvvj!n|tEx*~47g9hdOCFQYaygVToJzmj66}7-*aQb< z*bEhLGU`(r@+Cp9PV?k5#fAgc1>_7CT4S(T-?vQ}N_~H&GM2?7a_+YtV`3-Lw zoDa}4B2U@BKewkgu*3&WA&7M#i(rPR3#cW#Ji3B@K#6{UNDJPnz#PC1UE^RpdebMh zipw+8^{$Xu;E}6^ZRvEKAN=EVof85mC=lHOH48WuF2zhT9S@^j)bzj(y+};L&nHWC z0e`)SJNB9;Xv`PEJ)u>AZ?%SP&My6_T{QvqCfjkcjN<2ieeMTA9)3|MK%4`E21E)_ zeQQ|e!w7*yklRI$uJ3{4F>Nwg0RaFP;XGUe(KYlUn zA2j9%`~5{zNj-vb6Q&9{moIu^!f||16F^4(Mu7tXuAgDyZ%b<{`=YF22K_c}xE6TK zSHM<;K!?D}!HZIJY=MOb_NUKljY^#lI=Gj&d*DUI{CxoO<%5irs5CS39QOpMbbpDX zWdGr;a(MO6LWGczRpPBNa#uSPF;$yg3w)r*@vu>4G)k=zk-SU~~D`^#a-Q3oG{* z8ht``OxOoT{6Q%BKmXD;j3R#S!GE{?CoDLfDpF6`PRV*Q@R`CGK5Vx_EUU`ZOA7PY zFbI8ykX-bCv?CgY3VGm^T-{oi?QM;XTv}3r_`!hz1%81g0~!(@a(5uXfSz3;AcML$ z_b3CaiHi<_hYNuR^Mp9hZYyDD*ux4LrDOviUSKNqZ5RL(eXJ~4J!D#8!GHT&(KHeE z#O?Aej5aV-I`CwZcuFIQUZ3#uu71>IwI#9rgzWg>A0`y|7mxBATlhh3V^ubx zwtw~S-&nQ}0_B5so{r6B*nc(`it!#&u)s0%@f-|FU|6f;zJuey9IKp*}Z&Pr$SE#fEfH#AOUfA+Q*G;(vbnGI#*Moa7o1 z6y~DvEy5F*A z{ZGB`bA&ld1*6(U%Te0J{$gD#CINag`w$xm`# zk^3kDeh(}S@2m;TT0*;hg7E)vUei?m&*$|)KArV3EHD?KO@BW)=g)kK()^k3!JZKk zC%xFHV)q=8aoz*%w^RLu7E#zIHWut3w3#?mJK*C@J_CVaI1uj$<8jp+0$dG%O>=N= zA-NB@8}Bp!bbd6>Pl3ZWC>wYtffPnk23R?LJ7N02_ZsI0!)ZvZU}A3Nf1xvha_;{? zIZu71=daL-0)I~~Uu_Al5|>7d&yB4EL1^;nE#69&1wAwQ1P4D|VLaEkm2C-k-!%Li z!ucM2dB%Q0?lTTm(JJPkvZ&Apaz(6yE5Tu0fAXfLp~_TnS>Fax2YqBsCZJvBxV{N} zGa%Dmv`@S#4BTNvit)yHmpk8;J3d!-1H}jjcZJJ`wSN>joAV^tUC2xPMk!%2_Zf%3 zxV`Bb>u>)~ec=5?o-&?WMu`@(=VMIK@`zVdY*fDAvcY!fnl2Rm5JHRU{D0}0QL z=dXZz$1XqB<$}R{GIHGw^J;HlY{U38-3L>L?R5kbJIwcj1v$=Vtcriy8(MhC%s<#^ z{Qf&GPJeiypZVZ7{tD-d=jpGx1ovV3oBb6iL|GmhTRK}aO?>VPywCb%I$B4P*$ax5xU%4;^W`_!0phUuU9r917*FVRRUi$#zM}kF?ZIO2oYD1qdE@9Y2 zo>CN|<@F_aH#i1}V_pX)=5mPJmtT|g0hMB)fPe4{2lhdlIE`o%av0%86xQR(C3x+# z&iWs|{zSn#@iVR|KiF5fum}`1|DcAxW9Zb6e=uxcG~xF>F@F8V%AzlP(2MbH3`Lkf z?GYuwtkyr`2J_3C8MU{ZSvXe>6#%*fIxE&-(x59y6@q@1pVSyF0(-|y1Xw_lhpJRZU#hHL|A3gVP);y5Mjw7!e zfIbcBNqmYk_@aePUF&}OIS@0`71Sx>kN|&Qo z>AOfVmC#EPl%J*WP)cwN);PHce3=oWOb{XgZcfrf2G#23%)!#2FTQ>D|Jr{29e=-J zrFn2cr2U(IJ*_$a!0`d?mQL&YPapjc`Z{c?{y+uiDO`lU{r1nk58GSO#(wPw0T_PP zydOMW0%qzj#4x_b&pH_A_0N5=RZzlY8$R>W)fxKVB{oZHG)DAA>A&tv!1zMc1otvA zd99Igbb;RwOX_L8{~YJwzdAGl=6}oz0zp{sCGcKBX%h_7R9AjwsAmRV8uumL%b2$% zp^&yNCH~8JCJb9eOqLKFdEQw;m zB&<>({*ZeGceGvwXg|=c6VcDts55jAz$$A3UnvUk;41GdST~(a5htWA;pA?{FO$iQ z5j&qRDtK~u{XrT3U`Ky&ynpY6#}^HaTn6@3A7ld*tQw~wrGOx{LQhc& z0Z~yve*JkSuC?}#$jEr(ExvP|M((^Kw{B6Y3Cua>822q%)t|NlXn$KWZb85MGY%g9 za|b;lbn95REus(kn`-@|PW=AtbM5dv7tr7$w@wLV2F9uw14If$R2WBTQ_WWGat131 zs*5D7g~Pu4%)_U`DV&d7-~S# zz9)L@u3anq!e9<$EPpxllNaI|$zB6`68+?_X9>|7ll}S8vf@3t);^zM#_e}){4j3E zbq&BL?l(0?C187hwqLW$$)C4>qtY+;z(nl3 zn3|vMU+qWRm}U7fh>UdJW7^v_C-?Q**U4!o&Eh*AMGf&N?0+NTQDh8avk0@zO<;T4 zs2@24%X}r`{mNO8eloH2x)?LrN=M>*n1L1Req`qIco~}1uXq%<`Qvh_5w|@wzRP=Z^zfSl-hH(u+o>32JCP)h*qi5)HZeTsI}guR5>wJK zb{O=T=(7WfeSZT*-CAG>qJb#4*h!BqVq_Tiw4gnh)|O;yokY(=d~8kS%M5ar36(P= zz-Tv!ZJ@A%oOGa(W9)Z~>KbzGV88MFW(*tCV36eCJbr1xv2JK$xi$o0Z^hNf&8=}C zgFOY{Q=m4EJE%9~<*8d~c;Bv$IJdAIpzKQ%fgPpt7=MR(K608V@T8<@OJ8kGmf{X9 z+Ai)V0_+*X_4A4Q{ib;R-1|wJ`W}PfxZwWw>zP56@t-*9Z{vCNQ&!|SK^#X^q~y0g zeVuZ3L+Z(;-k?MxI-Mt$5spS-eTK%U@wPM`swh4&+a;yoKlq|^l2S~`TkQHB3bBn*xAi*fcsdL-L1 zq0j<3JcVX}JdOYHaYOq@{E>_!hj?7N!f{m5CV$b-8KX}}{v*c>TH%$CwuXD?C!T~! z7-Xpst?=jiC-|^pP28F594u3(iULF{YS@@LTCbzB~>%$ zY=6@sPGo^bia7U#=$H{7jZej9Pd!l+2d#agC@Y2uorz_yGLNH9Imc&hb1Tu`=}0Wv z`(yh=7v3cHb(EieDXza`oS%Nmj{?_1>>7RUA)W?(%%_h++9-)HA%1|*6Qm6~pL^za ztW<|G7taPV4oBZ+(Ckid&dAst_flnr<$u27dS9O;^q?xBU?n^F963OXIKCJ>yQH1p>=XUaggrbG- z&*UBE^^ianyU2$RBX5hGuXDHRp+1#xuX%^d`}gq|WQ6i$eY7hypN=-SdFqk)@qZY_ z7Itu0Y!JG)qP;npCWvpCg4Ls~pD#8l5?PUttq#j*Jk`*+p&3myof+Wf#MkY01bH{F zk|_?N0?ujh2~itLp1bYm`HvYLsLzS&{A-+oIT><4_~?pp9==rlU!UO~_A{3H6=zNW zR7b4qvyA6|>C1<6OZqi9kDplomw&?Ca7oPS^Sr}(`pgGFVT=j7qnL_lV2J0uoZQL7 zC(Kkh4%Sgg5i26`=l5xoPoB(%TkfvzEs=n2MvO>YLFI;jvlnv`DxB-hfhXrpho zgOTV@{V&9A(f|56z9hEy>v-dlEv2~FQy4YvVQwT<9gGj%mRBz=SQU_gx``ma6>`i3 z%=e0T4cQQ>L|;hMWj``fR)4{9`?asZW0|$;cy$d(KLC9b3)Qw$CJtpe-;bGnjyV?& z8fNq@AgatubQu(E%@>-i)?7QS)D|FZ2`Sm)qF>w^W%k( zL%PIimGo-E8FydEY!Wpb!>%vM`OMAebz{X3p7iM3+~d*s&GtUjK!1O##<6{pt#Gv+ z3w@7n$3g(d{I3|{3z5W?<}HaUeRkq!wijQ)CVm{fM@zd zaPS?YE@XHQVjn})#eYUi)Wx6o7|Ww`s1Vr&51lX>+hdPapwy9#+;S)!C92~+(E5oq z1@Z6m{a>nM&KHmc4%P7m9ad_(!2bUm)iFYzi01ykRUP9AJpLc5j>$|nX@?)xvGO;n z;|%D9gOagX`_?z=D8H|=O4O1gL5UA*015!#tN-*pB@^D{dw(B=u{uibEB@Ur*90nT|7ar#yqEA#I@7a(4v^nmLr>OGOew)d}S%QF5g(q%FX)hf#(BA*)E#X;8v&fsdJ{ zo{+^JKntrtIe!Vw9CT-^cMIh$#?qKKv7ogRX&uWztz{`5Qf@lcf@XLHz37^#PFtw4 z0IG$~4yr8cydm;86{=3HRd!wk)h?99K&qx5XQfb|Vp*c}Lmvire$1$RYj%%_OIYN4 zL{SUP5R|z!8Y0aQ=yIKaPIJ3{H`GCM<|NV z!}noB3V)6X`bpbCVf;cr3TaUxRI@mD;^TMyEh@coqH0#?LCYy^c#bHP5g3z*K6h=6 zp)^|!3Oy8BeN>=did*@kpC;;N97pI%F$l)KBxG3`s-fezrg``q$2<`6dPP)`e2WD9 zgeYMLrKJ0%+$*FY0GJ{0p-|?H5PHA_IyD!nG=C0?)jmz#JL4(bI+QJIgn@*H*JoLoax4+Q|8-M?sP7478 z&VSSAcYa+9Xupn;+HeIA8c>u?k^RJV(+d`EShSDr5Rb}R3J3M+3d%CfW7$Vk$#@pj zv)DnbpC}pydQcE!d7^-Yf*27Si@p!-kf>w7+7I%Bf9YnG4bg~hKbl|UB`WClVp$vQ z=z?XP4N=xYgBwEi1ZbrZItI~fy7+f{Cx1S1-anto*gkyjetizj8{z~x66`jZZ6&(Q z9Ry(<+BMOolWPPzV&sBAC5*TMQ50h?bZ8H6>@U$;4XRf?*z)b5<`gImIXq2Ra!ncJyrSJf6^Xn{5-UM(ndo(`YB;Xmpg-Noe^%{6e42qqZ$s0YWMRw(WY6S&!HaUKC0n}sCGY}6IB|Bc0@%C-5CE-9mhnq zJABU9f7A`H{-7I15$Tt1nE#`0SbzG1ZW!(HqZ@wyNpW2Mt~ic}B7aaEI&X`=wK~J=^ zM9YU*K?bd8#;pf6{-8xIwd`9nDy8U44bNhHroeYE`k)TsyMto$TUjgqK|_oCHU14+ z(`S)oEUuN+BD;|Fk!R4KzT^DT%0iBYfB&U4hRT%mG01ZjT0LEaHGc(4d_e0H@38aK z;kn7@Fs6D919=!rkuflN7~f&FW(@rv9} zupL|U+sM70XsSJOzb5^Pzu0ap$A^PPS=|j9Wr6(tOVR)P_N3%q1ZCv6`kAPP(T{L~ zU)tEdCGBSU`!e;a!hgLR5w1U|TA={`s4)ix@JG|E6rXd2{s01fn+$Cgea(-O`TH|+ zY(2cUewO8MY&gePKhGu1K46%G(f;MTZTnv8ljlCR8+yy%w;O!GAKfxgY8V{j z82^=e|5qCnF|PPW`d{3){*h9cj3NH*?_s^uL0OH@_trRd6^Go4rp>Md& zpaA~kvX(q1eak;CYvbO!eZMraKg0D>kz?~;?avEsO@AR8LlmLVcn-(#bNzjvcbq2y zebDcA@=*#CjqmF9qu?d$C(qIZ{gKCSzwy7ihRJ>B-(D-j^`R(tZP0R~FGRH5oM)s& z%ROAfHuequk$<88_?OoY*%usBD7$f-7D|t67UM}9_l)7U$#Vx<&Y%4oG^W+>`dFee zU19bDE=Y@%>?@%cnUf5{oHr@#4}tQ*@Q&Q}b1?~-%v z5Ir&aGPrNrxYlu>mPtSQd;QqHEzT#j;#%vHevSC0@_!||Sm{gUJG}l<`3|qYRKCOO zkIL5`G{7H~@8|VL<@+rMu=k;g#&+E_f zpVuE%@_(0lIQgiOKk8xhgNQ2mOHuq$C4Vi4XBgT4k9xTN?EjZ~__P0Cief0;$^L(q zYd#9%-xa}wnwMyW2kmb})Vza&c+md7KiBA&ws=tg4tn07c?2m?+w3{{Q3k=HQL4W^y9u&Pl%g%o^#KW@CN`I8a!?N+P z?dStEGEYUkBl!3@vv+*=#YPw#n|~vSv)L@^Gx8JWZa2s zUVok=KRYob5}P5kBWHp zuPS0#d9#nI7|LK00sE7p7=NSP*&Dbu2hA||qaO~TD%L)#VoVJ3%5L+uEaEq{6Mb~W zNk%SOOueTU|2J*%pe05Pi;&%scs9cL&3`dSpAeM~h+D$&h=JR``@<9UC;1%<`6VPM zIfQ(_-(wt?@L1{hdn6qs@LT@8N7)I&_`lv;s4&KJ}r+ z^PYvm)kHOnP86C@YZ7JhM=d<4JcoEEQ3&UxB{7X&x zQJ`YIQ2GuW6sS%ARv`t8B{|3;Av<+1e_+am=ZRqRJ~3gs->&yUg-#{)g~L8%Jx zV?dT4lkLX#Ln%9`pTCbIrXB_rEPp((_#RRC{_oEBa2{bdg>rV-E+~SB%`46IQg~#QimgVl`_y9orXXWqz`*!jd+l90W97p7J z4@%rWFaJw9YbHdAj8J4kRKWR1=}X%GpyVACyEuQtc^nkLL%aG`MuSF--+z66DS-dJ zeUR$~QyBPsIKNQf{#kL1r$A3_a|I2!z?ETa4e$M;fDz%6G`J-a_U$+m-V?UsD7C)+A z{2e6Z?_t0GaJ&Puz1S|KTYuSK-=ondCFiC3Qg@?0U@DTRhOs`Z2ge7;aZnAD`_b2S z{@(tOkAVF~D6aj#Q5*kIlmsflX!-Bvat7oynFksa^O)iz^P{*PkRO3g3)&6l>oJ+Q zLf!@DyRB83h!UAEy;(#-9AGXP!v+ia7Kw;RT+0a-Y7(slB4fxZ!+(?~=2gH%@&MHn z_aAjGs)D}Nz=967Zb2V*C(XmooU568Kx`3nlQk@)ub= z-^$-y?|+oQ!ng7l%HZ$s_efv+Q3ijnALyB{^&7PJr4%O0->>Z^O5k70;4dXH`TT49 z@Hr0I|56Ri{2!}xXLbHYTz-u!EfX2@9=;> zb88%jHP;;FOt(+hEv@9Xoo9iNJeFFmjty@WPcRc?bV#I0?n-hWn~P-=9hIo8t)qRb znTC({r(ml&JJH0PW|&y{4fegdLxo;)r-l1X)=H&$B>CBwpMRny0KR}KAZYDSehle` zSZ^lM*|XOZ)nIica!*^DT^hW9>MW{V8V@IcHBoZ~?Y7ZkBj;ry8r$t9+*nb`GE#1BUG)di!PI$45jemN$PH1wmf<+$F!`O69Mx8Yg*uz zk`4qybyUMel1U1)OBQ@{ZhBaHKzn+)>3lD6MN=&UwkaXv+n4bZrAchbRbyEBrg5jH zUWVzTEPL}Y6|QCwbi(ZfcVQ@>Wax^X=5^*pdcIsyo`3gPnrUSqU~RNxVA=pVyshWV zd8Ci%*ZI9ZqH>>U@?&&*_EAj5*?VMkmUxMmKxc{@obN%98KiKAX>- zyt$dt(t5y?Gqb@i*O!qo6I6P262@GL1I#_k%C027F)=&zDvfehSD`$S^Qrs>_;^yQ zR}8yu^nb;2g9=typ>f;80i4vjTgSinU7X>IKp$yGkXZe;UIU_bQ9MeJ%% z(mPagztp3izIMhIuxItXOf%>%%4FK>H)=He_!5EK9>aGVNaPa_vbbV%z-a3P{Nqk;tRXxqrrb6=gZIJ)G&N(<%k1o(BTQ6A+ls z)A6-F$elXc#rxNBRhtX;a5%!L=BQ&O6Y0zp-HqpAWZ)+8-r=3w%a-htmjS14o!jc# zAI=9JHS@k9zH1wXSd6m92rzob<5O=K02`mwI2w;HPsW-ak2+8Ys9A$-cp(-oqlf8s zU4L(j7QzWjXNr~0Ow~-Xj)#D^uevlTy49}QCV(_o5U!ZjYrj~(pN0}|()&$)?m4!( zWPau;3q_qDbDque+-kJEzHVKFx!UV}=Y)V+Ok;Y-cjYa)92J02)kp^wXL2pmjS);# z#Y?uAeJJg?^>Nfqt8^B%N7`-1hbUj4kbkMKEVtW9D=rT(p=_^HmWB+m&)%sqQV!<} zTFVxUpX<697Yqxke#|nxe*vfW zu3nPI!)f3$Aq$}W^Hy|D`JIn{&LbK1e} z>Xd43-PD%3A$58`!$ABw#Z;tzzm?|7O7m2k!By3-z3W`0leI|aCYpzqR|V=!vA1FA zp*q{sy^x^4?ux@}DNQV68Y~xGsjO-k#Mk}} z#IF5FSg++|CPtT(X-de0PJ8B#_~QK?xjB`!o%5V}y<3XrauRY^aR(AFs(*U)TQOgR zLIqMKrR|O*i?;ISr1|BYarNEE-iw%Q>@#Nta;BuGLNud|sj<9R{UzSgIj7ycu%N@T zRoU@{;+D8Wx21hhxdnU8jM94ax8>O>EM5TErI&@1pD|$cK$weLGaD~G&bW=3-i7&; zJsc09R*qy;&K&kdEuSPt3V-@A65!;i9vPdX>Q>|nz@b%;8}lZ6uBuLZokvUhCg@G1 z+1;j?GH$trcEP7QyDALLx!=7mnr)otR&+}VK(sKtJ&W*si?nRjKhN^Q6yZLzZpW=| zM0#zrwmkuKE7ByfaZ0pADY_mW_tx96g^;extDm>8vC`J*G2^48*?*?z@D;qb&+{t| z!?uXrVv}4$rlNLHaO%VLjsi^Se(>Yc4abKaADz`pehlw*e9X7cNGiM#%!KzS-LHk? z!6W{^`~Gm=RmbCIyIj8Islj+`+eH=quw*&fViHu+;pnW=L%GmLR5#l~3e;+o0v`S& zlx>yw<|!r2a~!uW|9Ogz(-$PN$0k&E#_WQeTemb7o(fpcAw(2f!R+iL-j+ za*Ug<%JeSBl)n?cgquE==vRnX@67I{^-v7vZ5Wgxd7qA(|+7XA`H4kb#SG_1NPm=7&Fcb8;7SJd#{?N zU-brRN$xS>F8fL-DN8o+(0l4Wr)}GW%I1s0ox^XrYJ{?dEcQ_0+VzfHnO@Gp+X;mJ*Cv8p4qUHGtESKG;jy9aPiUfW*+J}4Rhj{ zrI-A=q7GLWLF~1_oPjYe7XlmTd1w@jK3~@N^J`TkxwF3O z0TjxXUVrRy{CVOkFhy6}6U9)klg(|d>|}+WX!pkn<+q_RUQ-Ud%#(9FSxN(RmRmk52)U(sUY@~JY0PtUe$7+n)NH4-Of_Q~{?X{Zg5Ynw5baThsgxvp zZS&?bVUI^Z5|)>Vx5f?kt~^JzpTyOPXM-f&IDh)Z36E>qx2f4l7aoCn*49~}x##jZ zYi4=VH&=$TAE({(LCcjv!2s`^;qEkpjdwfWx@FW;0z30e4-b=MWgSL_aD%USrZSy5 z&L->ULEvb8dDu@*=hfT^VnB*UD9NlEy;L;n9%HUxn~&IF8XC*_zL01A8K^w{s3m6` zA%DnmbM0IV@8BqyJRFL@&P)tOk;E*?NGdkD#pKk-2KXe264mK83cNhNq~{0MPnqg; zD`&2`cy>j*4d>&ojMwXJbEl`wU6Lr>E*$1!(&FZIdc>3M*;oi`^tHq2yg4s0o7C)j zYL2eoi#=&(45cg|-0AI@QPTOid;^6^KYzE`nBHC&Z8nnF(PozjCOpD)Sv%HqNi})7 zzAvdwQ%#QSY+RZPdfYHOwsWI;#vdDL=?N!ryhf|V);r#ubUM9bZfn2muN)Vj&g$a8 zp60-lO`lDNr#zD`OLjfKJ>7dIgEW-@14u(Lw~gc?y~&b(v#RC$>4KQ)XW`8_HGdwP z@4C@O>x^Jb=hparm-i2-J*n}vG3Vmw3V8c+;zEIVD~@aDUMYa+BUkB}jpCe}QrGc_ z@*BSF#}mN8a%AS!aW;;h1y>zQTyNK;9?$qSG637t=4u`u-P83ME3k`Lr8TL}v9(ar z1j2RnwqMR^UW0Of*wQczCP_YfL4Ridh_}%QV;$e^B)`!PMz==7dRgz^{6-O{?-$Ms zL#ZctNx2O*)n|2jd8ggFTXGj}yroC1vDsdc(&BXOcK*r@_M#uRN+b~A-NwmgWsy-1F!?-)bUkc*ooMIUXQDIDt4s;)7{nmqSlR^c)v(Iky?N;Q6Cm*HNdK#gKL|-U#3f->N z`Lft$k+~UrwiGToK|L2e+Ba#sl;fT{oTXip_(%5n+8m<> zUYE`3_4LDrg1h93_y#pQ3x9=3ADm3ns8_XSM(P;Zk<>-^f&6{f@+&qLY^Cgxua;c3XxQtXWl#eePBrScr z>!jQ?Chcr@VrN~&CjJgGh&@}p70RAzRG`sg3?V@hH35eUMyTqL-GA6H`t><*={Q@{ z&%^7=-R{e^xPzTwEPpd9F;pi{_h3rN(F!h$b@!CA z0$C6Y=i05icx}hV-a71JrFuqbY33_wT4FM;$ixy9QQ1#;c?qXQf2%>A*>2maEnG%T zU-jwG?`4Ol(#~eQqJP}^?QRV%otB>OTcbhKasUoika!hU)8qA$ElP^&H**v5kg;D( zm6<2b=w&=g^2baMHUF(e^BxtHU|etNQ}JF+iH)&bSn&*j4El6~9JvNaMB^U}Dk}6KB}dMpr5NZ3;4=Gdi{9*%7W4s=X7u7Iw!Q9nYzZ&{z>5uZvmfrT(HP_Z| z8`a|zw7)F%$~D~#xkdo5Hk(FpH+O-Gr3wFrQ1CSKUnthzVLl9!s|&NMQhBt2@OGbBws6*&eXuXQ zO~amUQ(p@B|GC;vLg4Ew7DlRN|C!ovJp@a?)qcm)t$);hr{TvCv<9Tky89T)eyjb2 zPx$q()PH_!Qjzi3YClAB|5oki5+0sBe)`=>BA~uO?I!~<{iF8Vzg2#+`&Ro!LqzyX z?T6gO_4`}x$34jJA(%}lf0r|)bJK6NpFBYSK1C;Q389MP?py6ga)j}FUmJN}P0!zI zKLL3Zzp4HF_+fvm{j6a|A3=9g$#H%!dVBw+_J3Q=hMCkqs{QUyf3@{gF?ah4mCN^T z;zNps@n|+StL13=fZ6!GtCu15ESclaw!S3F#$Ki3vM)z=mQD+}tcmbKAdW=-T zs_i@fVr$%ETHVCge$!nl{PSyuzV&F^KyN*mQCnGlz~|4LVYO5wqh-;hI_5OyB{Es;BK~nUwK(UXA?XN_Zn*b zn?RrsO8>4#>&w|;t$jAT?%eWnXkSfwl7DEcTA%yTq>oY4Wkf2~9Z^779Rai78OuU@ zxIoV5lKSdwrEc8H_qB!3gU0-F>_Y34i$T$JeI<=jjOI&@wy;TCK%-ku)1 zT|92XVzyN(jy-h8>8W?7qw)GF8r#j%GFbI|yqv=~4S#bdH9hR9*M;ftg=a*eQLIH4 zdREB2((U4?(`Uq~Cgtva^X^wXihp6nJ^4VLZ?tCr_Sf;)wtfEYyOO%u2^cuYugm4w zY|>XYNvHL}TY;_GvY0Tv(9!9&rY!wAn!ww;Ls`}&IXX*g8(f|SI{`>`XHJ;wn!2>y zqon0f5dh{j5%@WdCu#?h#N{%6As4}4QFlBsw^w^6(mRfxgt6gYl%3rASzOA1-L)!j z=c-oA>t{YatWEE{9EXz|bKj1X$g>>pvfiv4ccq&;`l#~rVZrLJ9IvPGdbL0ke7mTm z!O>|BOCNCw8P7B5|D0L5bCo)g+t(uC+?wClN`)_3#}iVsl>I(M1f!<6daN3+d_ z33ch26^j3uW`D+eEjM?iv+Jdg0G93xBV*#I-gD*l_F}OZmy&i8MO8=mmI%Vn>~j8K zEUirw*=P;3KDU-}Mo)T!ZRe!C;;Hs85qecOn%vEA`Wc2U#cBWBT%qg z%THI^Xf?7V;L`(cngBeMTPTV|MDQ8$fITt*#|RSCX@9H9@JRjiPw*VQt|&*67N!ZO zjL@2xyZehj<}IMaO1ibX``4rR(`6vfQmAJ}x}T7&#mj~T?R7N)zEDW3=jx$bSz8KQ zRTtW|m!br#c-^j%?s{MZNgLtya;fz)CU)JoiBb#E$pL{u+CRnt7gts-*;4(L62 z-UNVXt#%}aR~QG#3@{F(oF)O2)NQlQH*9;(UxxrxXk=w=mbu@gO$&+itGZ1~0IMu! z5$WSMO5b!OYRpjrjegKa)srASDGM{?=7gFas9|@hLXsiU0kH<3g&YoUuq!R^Am>Vp zyKD#Tdbgdmi+8PRd;Jm5-xRaJMxr)|n7i_F$2*rz70aCx;wt0>k7Bn?fI$+P!Y zfBEgWN_YxjMl|GMJjnr?k8$Mx=YOrK*-dj&BEPx}grbk8W;H~-N$hr5o%%;jjr;*2 z{ZUhcM)nt)n$w_p`O?&k_Yw_f$VXGNI_?HdjkL(DCkYl~Q1?qyGeoor2xu5Xe`#tA zoKK|9uXqB0TyY;wjqP^ggao7GA>yome`#t+1e*l3F*FS1%_5)J#f264qY)Y*8#==ORK#V?`8iF&!j|`Y*2S_s@=YT&PP^bWNaXRqs z0lpK&90x2|T{XB1Hdt=DM(CGI8aP_HXu4{^jt7p8OO`9&<4uX1oB$dCFu^y$e=i&q zHu_`PG}AFvn-s{b-ke}Ph{mSrFrsFO=?59>s{(1=1i{!37(wo{El`;d)ZGUseZlr( zKJ1hvTcNeiTT7g5+GxF@=GxX~MWJ1+H$8w9W(|C^EMOR@lDK%2k5#3Jh>t=9!0 z#0NgKm*R*MBFem2Gz z+RbW%Z9(k-i2!{xIcOO~8DRsUVNi`}a=Ui4yo)1#OUki$m}6@GkYM>?l1W?-+Jp}_ zk03(76bb|*{-qpAb?1YLges{cSby;!^$T=12;mF}T!LsOYMc)$7pSHWPVpP7n1YH1 zm6uR*GoZl-b&DXw9i1pDpf4gX`k62&bVNx-;CBtTG@n_AJ+h6-vILtKdJ6%ZVc1C| z|A?X}^6#h#_4Zwr^T}r6S%Ivr$4CVO#KnTeB%g6ASx=InouRB*;VhzMK!4rbLJA8X z+ijwS0~-$ix6gW8bKX$RY9V<(tD)@>gdH++p$PF7R3o;|g0@$!dUk9J6lK88hr1Rp z{5qcG_$xSWHUEV$-HDz4M}5vW<`f`~Z)FQX5}E+t7zA-hP?PBw7V?)uhnyFjlQZB? zEl}6X^BFl$2n4<#>2A%7>y8ihwd1)^}I_kbuB*J@fRq(BbJ92uBY6ymSAQIYU?E&@Fd zgTSD}u^8Y^fpv{C6xlregz<5f4T4L**;MbeWao2L9N!02IQk``k*p( zkej~Hu>;C`*_msrhgz+x*JG`t-~U{H1Jn# zaSOr5Yq_dQa|YU zkGACEmB6^tpZf^ePV@*Y=*!#n0*`s{pIabZaUbY#j1l~VhXp8Xf}b4gBeScZPm}e>4UStiJaQ8jM0u!ZEZY|P1sYbGP=NUK6Bn%qhymkqa) z=GZQ^A=?FQ&PpY^xB)Rdv?*5vvYH@@OUOy=sc(zMl(d9p^~AD$yW`}g%{=$$XH13l>if1d~|%>A4k zMt_BmZ>zUJZNZ>HyAhtGf?Zjj2Q23A)Xm|z565dDuwoF|lbQkVEcnwq%ip3?4t%<%2_U#54IGwgp^c6pp^Yp*4VA1BMg$ zH;dMPmX+VY9TEH_!5bC#H)^K`L?du%1lfpn>c!4ikH7+66#JzS+#fi;z)&LF5V%aB zE(dhsRw_c=xxVn07~gGH)qtM-LVrFIR3SkdV*LdDmDD;x2a@Cb!OrkNTLv8d{R@T2 zmjnKC^1*3_i97+)#|LU*z#w(NcA?<`QMwXe!*}t!7>7SMKwS>sU+2&Cg)0fm5j-Z# zSpX#YMII28qVxO>t%&UgE>R-Kk|5t!Uag40a}u254roRc>kwQHH{h9ovVR;fbNGz` zpBQ}5rZ~?Uj$>_|&;!E00PzE44bmB2UkK3c@HdWmg4e(_RRlGqSiodq{|T-PWwHa# zsr395z3OM`6-A(obD*bqphXR&X^Fu14VclNbBFc?^ak?O-Y<+%(Utk&CV|pfsRW<% zK|89X-FzeSz>-1s8^K~aEq{ z4^DGHaKUbhcIab$JD?xM0kI0fCOKy-TyJPk8Keb3f{-@!Z&8)>-~T4=vgCINLi6`y zMB1-1kwp6ottSAp_d&4f25wY>?PEJ^V4?hc!0HJEg_5XJEu&-%%YTdH`T=ST>oq@k zODvDq1aFCX1;N2n$s-t84!=p-4=AC(P=TsmbS$gX1c!)l zWDbl7Ft_7EVSvx#bo{#%IAfgm&u2$$AF!;(*XIN~h~DSe4{AsOa9`m9AoE1D>kkTa zKwX*xrV_>Nz+VCxgMa?nhTv;~ZTiAk;`s4dRS~os5V0%lKS5`Tzuw!V{oUSj`k1y=bG%lO>lRAfpGgT5P2j#;{uZk%pcY%%zz65s_4^xeXI_jtC%pzS}_B< zRRnt-f1?eM^NM!ICC?6VBTzR8O~Ar1<#Svi;UT|&{1e6zyMK)1HDD?wa(+J;OJJ2m z{)4e3SRVa@xdhsV`(P}AQxc`m=LE$Qd@z>;^Vkia1EVA=pU(+;Cm%l7-hV_{k`%Zf zlqF^=zEGC+pHP;&KcFn_Z0 zTB9mloAW4f_iJ+Q2^LBDU@i$(=!3B=@HtA!$>-$wdY{h;CNvv9 z$Lt+q62s@@_+s!*jt@bbeK49Yf?@o^**17i;&`OEp2@QbVj+TU-QigaMDGUo94yn~ zzSd}S{D=FFKHR?|a$WvSJ6m0V54;S|@_&fOB-kOW3xDY+KW(rRl29V;3MjS?NFmfi z`O;3{@a!CLsy&v)eFN9ZKU<#0eviNQdra2zy*!=`f4v_B7dZc${rr565$9JYNVG-- zCublnX2AFgcuol|ke~krh8Fh@94{;%)tfY0VEfpAj>^Qj@{9{X8g5A&!gz-u>lE=k zV3)}~$$ts%+Bd>(C!;SlJd5$!1qfE#^T>JK{sD(*{f;>7gcVTX1erJ4X`2jDSK+M< z>m$#gKYiyHvJm$DR8=lSh{(y6& z!@omK{-5CU2=!Bqc-0YsYuw(J`(iDwEyfM)*ZT%kXJd*uslmGm_S+~?0{{%qe}I;>oPSyFuW^-R4DoM&k8B^oq55*op+-bfj-G`6qR@rE;o$@v=NZ9E{&CqA zum%6JERb)jxyGVwA8G&OdKr$*f3-iPt$)2|h=39#A`o%p7=Et5^838wJn86*ezgj|!sAR>A82k(gW%Xn63q(Ac8Z~X7BVcd8A{k1Y+)03>a4aisYI|%YsBC3I*uMd1L z`lWbJ)CL5}`d8OI?sqt*!}A60m7tQ*KLxfD_d6ij@$V^+Z{PbjAo748{S9eKFn@VC z=4AZvjTs#<#s7j5Rfp@v`9=qmem3yH9Qi!B<8vVO{`T_?SvR)B#eJQ;*RlL`Kwl0h z%@8S+7`@ufS0y_W3%2W$EK?0^3k{_?Z` zU&{^1$HnjX$!Nen5@hAjcDfG|^BXA%-XgiaK1j>~D;W*w$Dift0nhle{MR-8L1GTe zp9b_J{X2$|`9WL`7|Nkt6Kv&xmOKyGN`kl~Xi0ANgSZ@)#rgXnE{A2~0k`?HEbz$$ zZ}~gU6X)@RxE#=u@qpSS*ni3aExG!^VGd|Xf^HuWm&3BjfZF_77Wc+49Okg>FrK1* zKaT0J-2_`njtfujPani3IW8!b{=#7n$0QrJ`)65PuV0AEVOfHBA8?qTWpQ2L?lX)# zam^oV%#|Rkn#5+vGb^;PO}vcf9>7>f_C=mfe`PJ|EOZ<&tFb1vQwh@G74YK_QZOP@pU{)UtW&+Mz8hkIi z!?;jw2NdQEL3~_uu(insQ2XCVOyDlDFIe|hdn6pi;<>F%&?m%tT0aMIA55`-`$r`h zG@|1{5Nil^erb9>?|&_f#?RfdvHaQT$*FWn7M^V)+-G z?bN|fZ-&VMSKs(XE^@@wh_yn2K?!OhaB3~ zH;&XDaGLZ7FZuWFW4K;`Z^Y-r`Nets6Fw8mONB(4|O@rLtllH)(%YySn?`LDk_ocF)SwGR992YlyW zY#-K({TOhr14a^mXXNi;zy5f<)35EqJR!0rhVS79O|Y8ye89qD`|%#-3%ITcju*!V z-v>q&7=KH0d_UU>oa?Xl2dt$@Fq(Ko|DRwpf99{uk?=D=_t$@hj3PV_2qctTa!Afn z$n`*xNvM5-J3A1|NGuAGH-Vkz3*??4bxK4Y1?Ik?KK|zzOJF9yF_!)63nNJ|li!$1 z9V3t*%q08uIr8j>=ha_i?EQD}k?j95K9VEzvVWhFc@5)l+@~?&{SWYw?r-?WJ4R;= zKwJbL$x1pu!H@G$oS^do0-c_qQryV4eJ+F#zlt3AqdIPTyqJ+s6%p@_4zG0W9yUnY zdRrs+CLU@roN(iVZ%ju!>bkomFXXP<;(Xyhs<#yyA=L`S+@!DW*x2r*EO&V1x9a0i zH-9p#9RnDzmmRN%qad0n+i*6UU!&QAYS%J9J~0ioyg5|F?*Ic@s7^e~=D2?I$xRqz zOd8xY^Sbkw*6Wb^&+Jn5?&S4)T?PMqoz09%aw-!Vm72{#^EdmEKRa;eS=7+swnM5+ zhWf}CHQBspAyn|FGMml8oeQ;dXd?C10)O2?6Z>gYY|Xe2CugAv$149cq60#6pvJ8q z25M7}w^iS5ZC$>Y;~=}6#Uh=W*X4W~A6~I}=FAyxf!frD><;-pi$xz`+HfsaiJDyY z8n7rg-k(W;BzdjmZZ;8bI`ety2#kFy8}0c%=N`CziW0D0wVX8fnmTh`f(%$)TYsad zEWEK&y*{-^v$7|V3*i=Rk3_1TGZgknzoMY5(ljR?>!#ECuF>`LYkWIGQQ_4+s_55m zmSyuWMSYO4J-w0H?mMUBwKXQEM0^GN1A7}ov#ZY*Z)T%D-#c%L zqw$MH>F#+hY|oxP8X02OFHHJ&;W!@E_1;-SLdNLTx+jDs)JHhZQS-6Mr1Hh=<5yLxPM;aelUBSeXM-H#=vchcl|PD*jB^6de_!TSPI!k{+kx7;QA6%C4Bo?ThypQsJ& zwuy|wS?hWw83ikT-E`o!`hRkTiBqwa)?>3|cURvtxNV>9{m!MW`TSwew-0;D(6+fq zG}nsT`Aw?bSvwwUNs!zYi2KOT{&IbU-i?3K&%3)zm?`ul8IzAgW^zoEs@{(I4JLNA z=iZn1_tRqW3W9uY&mZTFVct*!Q!=O21(Z zaajv~?`DmS3&l9@ZhvLT-6o~FOYdf&TWT{or--B6wC8P-^s5S46I=Rzo0rp#8L*%i zO<^qAl5HuPm6iKr~P_QQ{MQnEqy^2tDQC)(Xn zZYSm;%@3-#L!p<|b(^Vrw4{LBOrp-AXC0mmnbu40n{m%T&wqIV$%LI0X4xOInKrh? zxBqY)xhXCA%J8RKr#kBl2&Hk+-F0hLhV|7}{94Bvo*Q>#A5v)&Oq5xzZ&j^)julB} z+~5H&%z9(Gj7dPY{>1=^(*R@E5cR0D5R~w`uAA;eT#I-F%-$t{aJQ_Tsr$mYFAAoA zlyC2aHBe^%muHa80*RWaSpEXsV7Bs#S$q zJtwy!1-Vrj3bZ@qEje^?-(`p5b+~D$7<)LG^puG}yAGw}HM%YD(Xl_h&(0N7xRQNO zZ^~8DnCJ3%X|{09Th|i}N&JBVbV2X7z4>?xetVkto_`{oHXHB!>I<{WP3j$QcB^Aq zIo;c8s!_NW=SiF%3WxHYGD>GtP8~V$<@t7|sC{L@3G`hEM1$mVl*&9bFu+W7k ztH3{}j^6WLKsyJaV5+9l)1BOme9~cRAev4D@9DJ{dbjbPZ~k(XcDtbOjP2oROlR+# z#%bw(I)4jo;DuwfgV{uJ*TJZk-t*4y0ad%M!@Z~8P7_0)9<>=~ThonjLz2YiZ~($& z6@%&0+%87DD>HGM(Ua@@9!aS`#&edI_vzY0C-mTIx<}utu@dHzsJd&HOW3w#^=lw_ zOY0`O`vEI_M0pZ{tiJ|t% z%cl*^M~DJz{z3L)6Rw(eldJouIBVjH%i%Dqd&WGxj_So<+VMRa2i$p86`Mz>*3D82 z;H*R8_WAOr(RjSJIaje-EDOo&d-XYu+s)xVp99*vOS{SY_JE+UKb;=OjfWa8V>_Y~ zZ+|MTOV!@dkCS}JH1^d)JIO3JH>sSUA3Fh&#;QW+rkXboNQr6Z%_n;^mZ@whH#TNk ztx>-e?`5$?b@iDEy%(K1kNz+dtS6Hl=p(ndc++y#EUEDwL=M+uNkN@-Fs+(gU8Mo*p!)a1-Clz8((IK(!<0Tp zs*thvdQZ03R=0Ccb4f7FC?7AKK$)$LoFY*fc3SD^Bj?)o(t=vIvx{dZ9Qh2-@qfzI z#^}eW66&^#dlu#!8`x0VtzN$CDD9z7UkjssSq(2um^U&Ok{OobCAbeZwgA+HbHGD z>NlxgkLxA&H_MI!XK$`^hl?=W(SH}{{inLhYwPwruFf>FRhzWik$hlAQGqrpX~_rl zz#2WO2h;KKu5$bJVrjkE1FruS?-lCWt6WiITyk(BYJlw?dm`76S z`P3{iJsP3)9Kv;XH1vhLHE!=6%9aw-cQ4OXEbPj7@@8L-Yk^soZcVRb806TU`@~sH zQRllZkf^KsyQo!MF3k7b&3|F={JP+5ju*Nqh@-{$b-Em!ni$}TN-#PDfWS-;M zq2ZjxzOJfm{hS<~C}(rjpKbY(bhR8nse?&g9P2F-U>~e9MmE+H^^gmVW!Bvx5srp& zLyom?GwSoML|1*YKR)KxrH=*GDo0V+w%6Nq?9LC3UKm~+98qd|Y=5;JRPOrFoYb~9 z+7Ze|*}CnQPNcAPK9Ss&TF(UkR`!Xy-ClDw-bahljPx7k@owCG3DV}(vi2xBjrr5NI(P<#7n8vWE=J@!R^IIQy%lki-+u4f`}bDr|vTqH_< z@yNn}EuHJLm9%$P8}wMFaG0&9HC=_m|_E5BY^rxuTD|+YG&c8NbMC8PM~pUVluiJt|@M!t{i@#M^P3 zIlE+8OYPl0@QP*L&D%~oxekYl`O&tc8_`7xT$7<#+r@BjZhPt`ER^ez_Po?1OHmmNa-+XZ7FL+Gs z_*3LO8ex5&mVZ?#tyT2m3YtjpSl^>6o~HE-|HsdA>MAGxqcocA zK8|<5o>``!NDuAR-zyH~d&xZjkofN45wl>!ejy_}jAH98it|k&ZKfDzy(DUR-zlf- zE1;-MWeA5j8^I`rJZN7rE@r(<#@lu)HkCg*9%ZX_*(9*}kGP6ynVByolI)M76OkUYkXOQ2|BCJY!Ds8J^;>c+;=r zvdijQBbf&jPXTI(Vc;30XwH6RN0eXb%{C{l8^qyr`RK0i{qBqw(OFlUt^Mx_l&YQy3xZ9gL&vxbdfowOnIuy<5{tgev z^Aq)~E|T)sO&SaQmFmac@)?|Il%#;;ZXMj6*$0h6Z*C#O3HoBhZRO^U!Q?PZu^0_l)A%%w)1rz`!`oz5KNT(kLKa<7wNYm}a^j+sE+zs*u~& z^M4|togQ7FZ04E zw5bmc@$7}VyVJ%!J#t4B;%6Su66w;K{${JQuk!UqN3QX^_%x@~qr$on##vd zXoBZ{-=6tHiP^0)(`}rDS%}kUl7`PK@*)q~GI;k^M(=GkoweSAH%HFiGA8fPl9u~T zwlwe5#VugwJ9um*=sOK{NoM#CRr;n=W=!$g;kxNZ)}1dK1(0uR)}sn-FE198IBC|i z6#T1X%oOcmcSNmN)0fmWWj(56QGe-Zry0gQhSF^=qxf#UqkyVoLrqIjvH%&H)>rxd znlmGFS6cH$>#dieG(9#i9;8`%T<(O}T*wwb^?)u4PT{udF6xz$>=1*yXM0Xl#4h0ACi zUExfq!D5%o)v~vBvqp!Mdw(L_WMmPp?ku}Rcx-O&awJbjx6OKTWzF-PSu{OIy9`H_1vsH+<`iw| zzAhe|f6V9n42N-aIkDyT;x*G7O!rTK3Ky9^!L<@R7WL%lgRDQ14u8xk97o6l&qgj) zyEm_KjrC*XDaV$-)LDR1f9 z>8i9#X;+?UUVrfJ;d@ivg_p2Xycu^`m*5&G%KF}&fs9`59eMp`1fWHFIL!e7~x@^4K zO2c=4ST87l!z4hJ3VeOuj`P6gU45zX=H}3eMXs#@?01b!{&fmyN zdA(|v^?w@E2cGJ3J1t;m;?;G`VGig3@_$Fn8k1?0_5%H!ahG}uy5glS1OtXz zr7i$is>TU@4}*i*?-)9Z!#vwg^DLgip(J^ki~kD0YOZ(c{z&edRSCnBd|^BM-44l6}Aus)8XUYB5^)X_2pY;&w%kti#4v+{;nSbMJ z6-A~RxY=~S<;=^apj};_v4Ys^-~VGn_cfg_==xpO4Z1R;#e>A!8)m*775;`3OVFSc zzDx2Vx$(RY&oSq`j;I->Vs3sJhg2W&5eK%{TAa;``Ba>}+@P#)MdYy4K)qRysi&l( zy^X5Uo1F^nrSF(zyN_x{Vjh*e?|nAB9km>{L1zzpg=Yq6~Y1pcx&3uul?10 zjn2L~X-Abb&zoieEVK5y>-KK4^Xy!Ug1cP3~Z33sHa;4zV?*RA=nt(k# z@9)!6v_#YfAyD^*;OWrOrd<1M%ELarpiaGeTe8cDir32Vxz4ov=>4whJb2$L@wUmc zjCqDnl6#NxBjs=lpkO0XMt=q|+*#@I@)ntxG1#2MJ**J4jyQBTotoF#70qrgIp9ab~eNpAE>X!jwVK=YVB>woEXNyA39OP8lv z@tmScXKAc2=)&9&;r+E@ZFyGwIX4 zhnmh!}Jnfdu>VG1IHQaiw!xkZ`FAmTH zPSVy@g9qlh%IHq3yXK(ZOJL{hK&W4I>`+Y$!`H=mb}u0dF2!k!fk=t@E$`f6(ooG| zOJadcq~Md%6IpXSrmlm=)~9;XqIW~>)Bdtt3n2H~JyIbgJ`zTqG#Z;6Qj^c;!f_WI z-xQnIHn4WuQ-7l)$2*AA>0=B91h+zTTM`=8@lQhkKHe%aefAEE6c5Sie$>;eInC<4 z7i9?@B{{?aM?f7n%{~ekcmHf2f&{nSWgc?(SxM*iUWo${F%owD*uCgi=5yAOy2i`| z0GwB2c3Uy0xcEJv_N*~FP}Fr39Y-eEv~xs#J8FAfzJDEXt>>Y;eM}=nFO0ppMf~TH z-b zt{8&0GW*x|#xF+e*=6q^=yZR>fKO@n3-@-OD66gq!e(h+quuO$GtyZO9T$vei5%$5 z4O}`8j(=)jxCF(-MEWXU3S$(Ng6_Ov_F$GP%W=Ogj}^CQwl-w!#-JG=9slO9|%Fp|-dT05RnZzpr$Aq;`#5E=&dTF{i&{OX^~qL?{H!Gda$ z66T}1C7Vx2$sS1b_224fy7H7j)QjnJt7gx~WPhyOOM6!wr8&l9bBUF-cD7LE>mlek z<#?P?A`!i=D*cF^6d640t}o+!Vg>1}aR?jNesIoH(5&y1rJ+&NfaX_u2XTy0|N=9<9@$1^VPM?dQCVfhXiOP!6T!TcC+^~=lp zXPdO77R?Sh_Wksf*St+;%TmNO_N(~$P= zef&(KclmsY{Mc7lhH62~Ah|d^?|;#&d7>Z!;g^{;Ex;7Xue%$p4!fkhtYV~X>1g-f za-=VH2H`8_c&o!Ly3)KD#`Wpmj9CoSni(BYvcql1s^C=eV}F+!W&(YrJ6^ig&Asgv zr^#*ZHd|xqNDL^x}4lpBsx9m&G-R7-mKxf$>?69 zeZ|Q*)5kl!WM_)WmCprd@dqw&Flx5!ZeS6-rXnnanPlnAD zBgUMfd`6V_K`;vSgOi8tytr-_LX6U5Xylei)~6=+Js!0S`8c|anSUn$(R|&jOm2(M z?XkK~*m1V4Dj{BZvD#|sb)HW%J&n_PXEN8gwo}2nOx9(3)pS;~wW^aB8mH^~{YV?l zrqlfu`x@;$RznDUw!6q3Rm}Ov((!=wv_tk33a?G{R%;tkjClRfCRZ;2p~zC^@`(we z>!YYPmo)DqW2`}_DSz~nG5|0)d7SU{L+i`?eR7!jiDv>m%Fz#+r^@+FJkIRd&DCyO zZOShT;x2M;?~LWoVPDVM7MB`2d-+LvGky~E+gs%;;1Eu|$BfM?vsUk~u*8&z57DB) zmM%Mw*_|XRtX)J8){?HXvsla#=eXpGJ=*%&;zAX-U1vT9tSNZ z(7oeHrG1T+GU4qvV+>*;st#h~foh)#^3)tvoQn4|O)2*TT@o$0UTx;8PA)AQHX@hJ zF2)8a=k)ThJ>N6x0f<-}B4+XiV)H2@m9eyvWm?!g@)XWmQ^ZN^#OES!A7hJ$eYEw4bN4`ObN*j@*$WE6n*Z zKB?yg2fLU@H&L$DPm$#&g(EVtc>qb-lX#))eWtcxoqxVO(3Xtln4`saS!A2(`oN5~ zw^f59lq;Yy9-aCmM98~(ogE4y7i&4zf1&Qt$K>S@*-jWV8u>bc@gEpP=Yt_)(8+xCZI0|8s1Jb!Vio4~|Z`k{+C&sYgPWp|BPwTs(R z<>+FJdxhODRsHxzUyobX$hY(!w}S`MDYbN#8*u|}6#C1%Q-=tZf!c#0`#|^($WP_WB?obBUGx#odmhRVKP0p+Y@tLnW{Wfy1-j^5LK>5} z8EAdY;yK|l?byh0N|WzWm$Zb`O$MA!=-QUTIR8t9Afvb!w;L&u9bm5x=}zQwDNKR0 zeeV=LS1WyFy*t71^k>^@WdL(rsh0jtb$_PCDY_rWCkh+Y1rcNfE!6aqcd(?r?P&KP z(JHo(Bf&q8+16aOECJ)N6qJ|+5f`8X%}^t1ex=rX>VQe;_ zZm!{(OAwli*B#>@#3O2$TxS!y-L4P!sXz6+Tg8X(Z9Puehs#*a`Y9a2ux*sb!GD4& zcg|=vwTkV+n(bpG(Y~zw+Dyx_*Cr@~--6R*-NE-d&cT4^klJ&x=k6((G?T|=vDB}t zIe+PdW;D$nHr$Z*aaldj4tMbL3P79#ruPw-cdKogRH?~dl0GjsyL?(ou|?h9jj!w; zK%u9CLFfKbdU^Hofll_)Wd$KmgMZnfL^pk`UyB7sMZ1J#(FK#Pe9ACTR+>-4d3$k% zg9=;$CBTiE^z>=NU-ZfNf@#QDD4*ep>&yedz{j@Uc>BI_y9%oJ6dWmo;$wlqgYjwh zB{(#b=`-Kmjb<%x5oJIj;I(vZ-^gc(Zcn>n^zNqgJs*kOv*Uyek(D#D-hTsQ@v5Z9 zcqWE$jU=eeW{=#fI$b&sJ;;!4CR60lbhGFDI8$PFwvpDCk;Vc+LR`6-AkC&BQg^%#H?+x~09VSxO zW!fc8)9iiQ>(Z+c=}izrNPlU&F9EsxnC54~WqrGf&TQdvRP706Hdh!mEUD*B8`JOx zSjXTzPn52DYKc1!(>C&S!M~(hom>(m;;;8DOmJ;Ap(pv;$88qrZ^+%zOiuQ-D>up6 z?P3QEW3HZ)6fOqn?iTq@y+mRgIb&s^RZn+wcuShThx}G}7a9_?B!3H|W-hj;p>)a3 z2>CEbOsX95?7^Xnp~QT0<=VX}K32;7HfBKII;6r@pUu~&yK$o?2XA9*ja}wxI)F{`+Apn^B2}#lgSln;BBa^M`md=1jgQR6M71 zwz5vTW=D*NxQG10K!265KpD?>%$#3x(%E2V9H!*Vox0)AW!=|fU5S0wu*B@(uSf1v zJ0FoC2hZVJgK*Z-q+kY$an2m7dmca?IEpVfB;#Q^{>E0RC>|>-ek`XnN)D^6i^daA z)=I0d?yhshTsy-Hb^`JvG z8JTOHzS_vDA03fB8mkXGn(a*;N@99(kG17`yolykTi+s2V7|+i)gHtBqsD-6zY9J2 zTyNN9dnlHUY2~M0KqT~HKQ}@>Jx$rWe7vpa=EherYkzJfe4xFLapBuYWAF``KwOtJ-Q_T_SZ!_C+Spp4^AK+i<2)1q-h=tFrrG-1kmR5d^8?wUR+t$L=H`Dp=~ zIFh~d(KE>dbcje8}?RC-`;C}acXp;^?PW~&D_|j+Rh2@=R9GT0I2R} z$SFU?8sjXFVm3Q0CvOJB^yLg;$J1dPD<&gst$){Sd=|&*LEw=ao(iLJQ{vu(R9H?4 ztBE#03p4Hf+P?OCZC0?sc?@LRBTe>GAK80yw)TyuiMhmCq3#c5B82ZNA1si|AWn;3 zjM$Aq>s$0(Q#kzJn=8=l^<8#)Z;$Zy#xYNnwk=CRl2Uo9(s%RTZMwihY~hCMJ9&;T zQ-5bf@8;HXa`xfCn3OE%+nzpo#a`SB4s157{b}($?`H;OMc$#f@XH$ooUN%K^I22Z zC*8nMP(SZR3t#BnQ8PNv9Hl<-iBiQ|6e}c7*{_mEYm86wJeK$NliAZf%EAj5H5~Kf zoOPG&x&S#2P7}QYO|%tLFHa;{N(31;8Gnxz!Yo0)EgkUHJxv9a&**pku*<3@gOF|) zK)50H5ROb=h0ZfhX`hZ^<);oWrR8;pnX?`5-wGsGB_=!_{D~{yE$&}0^s?1M(2UDt z=m*bxD$eaqxV)ps1XaE@M`~|$RVl%;9|fYTGW3sE9ZEL)&Q7=0qZfMzlJL4cWPc&Z z?ngk(&XuH07ncWH!y?dYVp>RU>2TOf z`D{5q&9 zRBcjw25o_`>Lcd>3lqx0gf|`dT{T~2Ao#6q5l-;O#!hIIPekh#kdzOebIx~t=E%t& zRs=y|pMuRONWoP^Wn*7KFV;M;sq7|R?h zprZW?o968DifqW8P96N99XD^)k51gCDVkKIb;AL*&?BS+Kp)1y3#Yy$ivfrY8%>!N5$?LOlH%gH0_pl7l*UcrT6^;{QNrFOu4=4`LF5e`i_ITpRLPg zeZ+LsZIqwZ*m}9SF-t^Gr|RFbe($g;FmReaKpJ?Ab=Uegs>!gpb>1$r^CL8vOoBu1+ zME8a)sYZ+t6T6Gjcz;d|r=llF)Z-Tv#Gc|9OuWabPM*8#=6{(^=F3-$N7qHE?d`R{ zz47|0AwJ2>pWS@1y{L6}ZkR_uUl{odHIW!>Ihni?1mpA+VHahZBz_6a_7YQjY0-Dc zp?*xE+j2YV(C+lP*)2;Z$XLvW90MvF2S`2YT2pG2Sz4iKr>&Ae@E6v#?rle>QF)zQ z0V-f&4c*KaULYJ2=Kw6EYzFV3vHC@g_6eKQiARRe!>1^u(qs`M|gSs1n3ACed+R z_#-(g+#T}j-&6_dpnCY9sS=)}Zow~;BdWQFO(Uv=0q>3Kd27%l5Jf>sF!eeQW`qmu zG?ou65vl@}ni{L49zq0aixwJ;EcMYuW=C+hqs&*>BuVu$su2E=|U zwuu4Q-*t5~N3Jtq;DNJOJn@j8U{yQaITWH;K(_yXX?wG6Rkdzg^MSY!Q{Yl0lROaw z?+^hIkW>(#J}Qnm=URVycAVI;N||fufT7UoNq_4tsozAQpfWoy?#CC_ab^GZ?Y$kL zRvSeUd5)imR@n37oe=~bq7tsS{HiL?lHicohD5Yr1wS&|Ut|LQA(r__E4bQtme_3| zrw?s>P$00bi&F}u;#|jX0a`dh*L)Tm1R~(DM$!T#S?6|#-wa&^Nh5nmV%zS6KWX0H zB7a#Ofe;*5d-6%H$ZrYr2JiXzd_Jn{=l1y}DGdaI^}RN<4P@W}Jf7iKlK+8{)OQ3~c}R<=^S?+$ANxTf{E}eDJp#}1ONt(n-ShVyZJD-_4g?u~ zkQ_kvA$cGZa1Ue6p5NLLCaP^zcCag+7>_O*U-xz zvEzk|sa)}B9}qEsi{MK6h;~EU9WmzgV*p&qOb^+Q!3D)is<5YlAkai@x?LfP3AM1u zlRqwaS=v+ZBH$-@2_*}_n1l;{A1MNGv>ECKICJ{}(?^lK*i$WlTde>B2#}q(j^KZD zMK@>I^^50G6YqBGArt112GC}Xc*%J`P?$WYJR+gEuf^=x3h%T%B2^?F^I4aIJDiO~ zg(w)@k_$SOS$4JIP7p%;t^uj_(=MMN*WLjS%T8kN=s(ku>&7eXrKqF_Eo4u*(58Y! zFi;u5Q9#F6+Yj#%UBrZEc>}2e5@>%1%*XR;QChQD36e3sviz`HF%Ey86}UsuN`@;; z`$Y5D^nRWV=1$CA|A0(LT?4S%FV;l=&aqLxhxmb*-mx3oUXJ8F&_7_3oY28XASu!# z{sjka+`5b5@H#yLQq4W}?C8fc7j54rg$~~xG3GH2fU^Lu0*d>{Q%OP1q+fp=8%e?n z&lKS|w7mwb07!b+<)qc|Fkw^t?~g;)1SQ}|0ksgI+mrNH_k91wEF5v&xL0I@vuxy} zAA0jWE->#cP$i3sIgCXE2pEflY$>h>tP~(X_9`9W*>dYqilG^+b1B+-FKwQqoWoMU zmUKT`4y^Y0%Jct)V8~oz|IvTeUnGU~?eqN4Jp8+FYuW59=29eW&-38J{q}v?#5(t` zj?OtH{b#l8Go6zkI8C#!t%y%;&-Zm)ELQuF2QmdJG)PW_u~D=o)!<`dQua|T*f%VSi``2yRco{dESI?;SIkq58DK(CBcKJt9icR9}6GOj?vM7 z>@$AGeSFtJfsKD0*kWI7)VI(7;H|LM1|Vk3JaWYO{T-#@3g)ppe}d$Sw5aVElN%^K zGiHR>j^@c(ln*8wECYBXU~p1-e}!kubXkJ~o??F9>;mmAOLKceT?WtFLJs774H2*( zz!S}D_s{%>Il+2juKLb%5(?ru%&+VybUutL+~)yOfb)O+Y9rKc12V?3QUsyqR9+y{ zT}u)`+wq;SQ=p}kBCN*VprXF}Kmc)m?$LAcML3jr?r0j;IRe=Mn=pgT0qlu>axK^v zbNfs)vPQ6Z7xMfpfA4f|PEDG}A^zF>&f~DwL4F3`qiL~1vMR7$kaRmPnsC+`+)Klf z1~LK~;a`7L*KZ`uckcN?bp7>_u<+K?uy9er_UgF8*$2`4#>%~?zq{e#l7flgZWE>jsw;Z zZB~QDntYTkdb{9^7NExg8)uC;?42TNn@juAlw0oj?q^t&pstX92qJ)@L8@`+LpUGIU)MB}@q^63-+gVU50~Y_n;XJA za2IK>^cL)mxiUg&>BN#?5LD2WoRLU>&|<%FVgNs4{rGbq`1c*x7M?SHm;S?8kxX|d zK^Pv|81s1F794PGXy0FJD8zq+Y@e-A8Z!v2H=|bxo=Fus6wI=djqsTywm(5k0cLk0q24b0Z=LW z7Bc%DbUE7)*h_h+&C=f}OaU&d+CsvJM)I%fo*#&h{U^pNp11 z_FpvbC(h{i*gNbc{;uJ<|H2<`J}YKCLII4&9&O{gkhyZfq1xL>QrM3NW&!)K-^|%B zb0{2$GvvrDMpW{V&xKRTKwP4FO8p|aE(-ueYS?8^=l+9$JOz7wzJFAR{WGpAyYI2O z3W8fm#A|s(N(}_(hh%@n4AFrzJ79V7U8AYc1H`Kn?dQ7|LX}Qi#O@Ui-ZaSKZGz(I*~C z+7Z@7;OIt$z{6*~kv4&VltkF;C)ma88>x6vXUy>71@TxYBlLgoKxvvQR@f<;_l3XP z9QXS(&SqQY2cP!!Nj&%z(i2Cd;#!)6J^Q>)9AE*;w`;Y9PY3&u#k}`15Evq0Q;6UN zUBvG^6kwgxkMNzLPt2as>-_!U{Qa}tI=8d-bMF`nk@p<~f7{QX50d)60vYTjU<-;3 zI`+iCTuPt22S0!R4g!L#$!?;3ewXjo=7If*XdHTwu(8kB=a@&O#TUs19YIjYW)~+1 zKE(x_j~>|cRa;^^F0_}rnjUaI6Gg5Fe6h%!>NCN*YEqy1&f)lWkwXveo1zf z3b0$hdjVg-n(%|r``#P9|G3}%&aY>Hc`x{YgeM+&s1<*5Y=UR`yRDU%jK1a+zdbHnJLWayUc0JqL1%0^hF}~gwq%*YT zo^(0SGQ@v9YKPVkB4)XY4@aA3k2zmjPQ|J&{COva_0UE3=NB6vMHHR_t4)ia^h&XB z9&`LHgiHylc#{W9%=5-C^xEDO;Vb2Pl)qhzFGCkyY{&7s89y01Ifv+%2*XuQV<<2^gS$Mwatn`fQx zn^fOo=K4494-K3T_?4_h?ZyArqKsQT94VY`&f404d%P6itZQCxE@~yWp^f&;59>9*2W*JyOwgXCN-> zaY%y$6^{8VmJ0&99(Xe_-NH>6Z6Nh^lH7j|h)}I1H2}7vlluQDD(-LpL911rkpY4i z*j^6o4ETTht?dAR;4bT+NKs@8S?^;JKs#i0$7X#kU+qI0N1kDIv~kD7|Ylm$*`z(4(9*^2V4=4a3^rbGakoU+8cjh z2{fP!;35QW1SkVn%CMg}9%hshJh_Dg6xxiG>(Y&f+K>q6V;_vqKs*EhXk5c@3|ct} zaQFQ}FwHm!4tNL8S^{qXlwHSOIBUjkea{7sN(34W2)7yaaBiVY^~Id&Kn?+WHh<^j zUEHD%=D!s;D27!5N}W+|{EWdl02hDv#ebm+IZyu};!yF4cAW8Tzp!cgjQau}Y(~KS z{2d^&!EtWqA9RBPR9HIy{ufS6`6A@F%NGHs10e_G!v%8XUz7-kxI0h~4zPZEJUjgD zjJJDpB8dpm4?-?>5I;gB0_V+$xl{mT! zKhfqQi`Pg-&SqT8ojV?Z?81ztP^jMdB2g~(8QyGhB{*jp95!G*aG(DT zBlH`!c7WFX0qZfI?Lo;z#0F>V*tdTHN%5~^0ad%dkaS;+%^rQ6m*K&%1K`t~8B@4V zf1VjoAMiq=eN*JL?(r=CGw$sV=I-BrW(Vb^xj%eA=T949=AYb0;M#u>oc>~(jPLlL zF-GvAAp$GR*C9?C@LOLrl{urtkn=ZQM1jY9s3XqbKO=OQA4Hr0MBJVRA`YtzuPfhv zh|lElnREXEZZ2}e4vWw{&Nf%vH5>Ezxp{Y%$NdJ<~%Qh_XYFH9@b&Ix0U6Lv&6m6xHyZRQEzY9 zT|meo;{5Fo8nTdP?7$ZV$K$w?Wxyi=fdW6r9qq~j(emg0+<)h1`h%hS!FbKTNB;*x zFP`sTnvrt<+IQyf`fr#iEGO{J&(9Xmac{P>ZHV@n`!DNu`}u!(?g2xvhvMQfg@lGN zk11|`t|EH?%4dyf6ithbB1VmsE8lh%zIZB>i2oA`O_-4pKPWhVMmqeU-e$bT@dp(* zBO-oKaN&$X`#C-%E!-be9NHW4p7}VW<#_4m_>8bfe~trRari;ewf}^J6aS8b^Z(%B zfK~iE4vzf?9Nd5VHx5pn5kr4N(8+&C(D^gM^oux4{|Q_6A5nXA+w>@s_~Q2X8MSwK zgZJKcM+iLrAkC0RIs@2e)Ef{Zh|bUVcbxGYKPWh0Q+V#@IFKTi^@EC=QF}iqI3QDa z@#py5zR?dV4(*G;&3qiF6kwXa$2UNkeUXfKO=F!rjN*SB;hEQ_^AGwE$e|f?`T9Yh z?SW+ln&Uvv^Vj=3e|^zb|D~TD^t*w!pV#vLA!2UEGvd3Bf9r$T)8{kJh(C864=jg^ z>8#0({quE;N8H)4#~BFFx*j9f-u?US@mo-=omJ*MYupndkNy zBPh)C^=w!CyX&6qy)TXn=+zl>2Rje&!aoSrf4H7`E;46)>Mu0k>;wjm4rs97STBv9 z`;CA3#Zdh^2R7Y2cmESg3w>(FQT;-+wLcg=+5LG|xJJyC8}wD!nlm=?Z^yovI@q&+ zog1KS+`^g9`8(Qq`qj7o6M_-r{4eC-yzb+=|Arm>K?eT8;mxQ`R0E-n{~a3jPagz# zeV*h0&v-WWU(fru$hUb9@gF~LUMIdtK^K45OzMGq?=9GisVU@t!;%5@$yGC=?H|u& z``Nbqr*nZ#;<`2i4$%A=FLT@cAMa=GYgpHSN?nciuV?=={(@i6{f6f>!WRA2Phc-1 zb~j_zQappdA&U-U3c*)KK`SjR6m(F3mgYeQlT^C*7{ z!uuHyK4T64-MIhuO&}G~PUx=~vpnomjF%bViEB0v(2e+We_zZL5PpAQshls~t81}; z_=D=3QJ?>c;{$38G{ryo&DqWdVw1;lJ*#jWSgQYee70k79jt+QeSQy#Z7!ejaVS{8 zyqUWg>zJo2*wLq7-#=sGfbX2Kj~#yxMA#Mb>R)tPFu#UPJfqXTuk9an+V{2dgHHRt z2D@NBKbl{=Kj(j6dq3xYU!Q-@|GtJ@_N{08LBst*t^FY1exugD$T!$$U)0*qdB0F= zKj-~Mt$ojfo%Tho{UGFiq1JxR18NQa6nnPeMA#H)ubb8_>GHFmYeZ+Kf*7BI3SSw&(}R`U@-f zMYDbWVB)^#&-k=2BJO+s?Pq^X|H8_B&tLr@;{J|u`}XY_{r1JP#ovBCqu*vc8#~(q zKS;S5&nA5lYJZ*kMb`Z}m;Qs4`<{#Yo)K|>8#BolSvRBKzKFZUA57dA&xU9DgOvN? z+0xmj|6=04D7V8OOx*X}8H4;q%6-qhf3KPTTRR?vKbW}r9*i(RI{qnpb&HGN+2KU#5JO&`-VAIcgJg+!l>>n>P zURJ;c<1gE>2Ob|Rp*wsY=rh3U^#JRJ6&os)llQF1T%zcL~Fb*_EEzVP{jldFz%3I)O$#{m16KUg^+%D+z5n@7r-p_Tue?PA8%K(4*B7fc+Kx}a<_*TA% zqdmL*g;D}4ZALBq_&1TF4ZkQ_C!G;uaLrmkoc%#`%@67S4j~2v*8hv|<#F+U|K2Yu z3M=O?v=`1pm>J)LxZRAr0*3~OvHya0vp~U|&x;UFLDCS;1LEl~q!-%xH#+KzsKWOG zx%NfmK~(O)BEEmV*ZFS{aQ_8i2ZR{;@2=7~gtN(mI_cvk={Sf_PelGL(e}AHJF{6H;&-Z%~ZO8jB z%I%A6JI#ON6~RAT_qYAO2*`gy-~G$y{yg{piah)o|Nnr%``7E6QF7?lU$owgVw-;+ z=bzvE`iJ($dEe_ongbGje*SL1NBpgt+v|%~L?+g5j_)8|gXf~n?SsGn-Y>p?^XocM ziTzjq8|M24icb6ALC^gXm7FVPHrm`@|MPzsr$2x3*DnGTLk{kX6yt$InhbOiFta0W z2DwQcH5+qohk|^uIj;iwHdu&|=TWinz#$)sWRVMD&RkFAc{s>}Kya4#_IAJuBQDC$ zSuy3WdwWFg5g+g8(CePR!!wEaBObov{zA^NXoN31ZUkCxI{0>d740HeL2>;TQVzfW zjg)`;Xk7jma?bjF9BEI#jvMYDWZl*pe;+USzmEUKga(d^GLu9ejj)L`uyL>JdG5%zmEU;{Dc1M`u_>3_dRX{ z+xOFMg@6qD_xHH#zV%-yKN|&X+V}c!{N#U5e;wEU9G{Cuz9GtFf%q`}e}U&a;heF} z`ndgHX7q7$mgJ9?D~xlzcR=9#A29m7@d#zDA4Xp#K5iztL5A`{-WvyQ|3U^4?3g{O zwf8Y6?RyLW%@ zvZ5JW^ANRnw(Qv~&AK=#g09>r{r2%x&X-mAKq|*h@Jdl&g=>7Xw;w?Y-xOdT)CKZ} z#9e*S%w02(60x#Zk1l6E_wdv%*0Bek+Wq!)< zoe|zGl9r0raW#sN{-1BRb9(y*H3|SvzXR8oI$WLro-Y^C-v$4G>cPt zuoeLeMNhLP5Oi<{qIyvb$aaX}LtKuAPn&~{Kl)c-o zmBab9N+c@vuH32XIp*1yXAplDuJwIV*GCDP#uAF>;&Nj_&Edql0^#~KdwWvJxxBV0 z=c6x&dat|;&cJp`u~+xv+0>WOVD{&uFWQ?+=;m>E9F*7Tg<2Ek(>bwL-EMb*kvanU zgp^Ndw8cCjnO&1zPIK9ZkPy_Vt*M;u=Qhq-${D3J9Iz_`PkNUjvG;#zO5Csxjt`7K zY}w*OeXNP)iB8g8deL9nT8+Xm6ysj^6^dJ-I%A)lJ996ukptvPwHKhsLc4#u@U%0s zb5Y#W7gW{VHEdAeC~3CXAZr84w(14a2O?@#pUNttF7MluBW}ghl~E4yTtEtKcUnD} z6Kz7ge!)s>ZKbx&mEM2K$mXZdLjtZ0iLy%lQ3eC|eoX6iVtAkDq7Ceo0x62iVignT zeW%Kz8Z%S7LkxZILIb}8R1(Du*bo|=!$3n7p`hkwKw0UPl%c)4t$pT7@JT7YkC7$$ z)!`;7{Dq59&b8aEm-}5hzBxj1qrqdw1Ahyq`@NtPkA?tl-q_^YILwlX~ z#mwN?P|nMc$o3r)d*)c#4Kof<1$=VmzJFqDNKeSj7muSJnc?!mS5-p9dS_g2v17#p z{Qbg(5?_w#>Iny`i(DK(g_PTQca|%FLMfzk>#r?h{MG4E>{zr?D{>+5AQk%wK(ai_ zT{33D)mLs7?Js}Xrz;+|Pui|?^DbM<`UKEg7CNVk;+g-dchY+U`%`x@_)! zu}=E8-MZ&}&E^Vi>p2C_Gor?ZNTW0=xP3y7kbwIex;0F~&1!t8+x0_ZTMcqL(hciu zcO6i!Hq@%VvX55@B9aP(MhtEi3Ky;ql1M?9kavr6b+YOF!_!Y|w7mNyuO*P_oD6cr z6ki;#@g9Hjz*p-LbJ}WL8JbW+JuTE;%F`CPDd^cgolfbp@`g9m$-8cua$aAb$GBFq z@d7C*oomxG_ZIVh7*W!`UZJO{pesJ=1D_opzz^-L2W$4-`lyT!_w1CSl;Y!Y@^87G z-KQO$a@p~{KoY?x<0@xgt5EK`b8bN1VY6zse?j?>+qvs3c zfU4Sv1N? zN(+CKEHGTYl3o5D*RpNbGD|tmJ%esfs?$-GGkZ*5)bU#K_5QTnTwk_pO^9F-nK*H2}vO#Qm2Rf>;y@BkaD56{)Faa>*DopmX=zi*F@H{L5n+EA=2 zBP1CHF9$%mTqLS-`ZV@E^pE*NUHfOU0-Sh147Q(cUxc&9mHh|FD z&Sx+>wJ)5lgbsaNNS=dHPhV8oQ~W2>$ImblK}11xl0U1jJ3~UHQ+-OGb_w8WW4||v zqT~uB`DpDZ0`%HW2Z4jkr)Lo%uPFyYE3CqK`=TZQj6HG>g)v*V*ls^5y>V@T@}7U4 zJt*{NibSu^4a8NrWOXPHcecC06gpn{^Znz#r#y?BC<{9)-{?hn;8MRCj_CsfQ}tp; zBLP*bPDd>lcJS}5R_%C5A$Ie5*-RZlx1}o{pJlMrgtwst;}z8q)fuc$4HS{rAsZxq zYvldL--Wt0vCN5iO%9QTsEX7fz_@=s?Nsel(AWHd+&x6;oB}z_)6>hh5_!4@l615y zunr?zFQd=7Ef2@xPzeaj(vWl6U!pvs>#N#Q-^2 zAL2oz#P(CJqTxmk4yn+-wBLVK`(43^_m&N+?hf!!eh4_lSl&)%7`>x1yevoefcr~L8O)yhDsSiOHz@@_l{n#EjD zK&ULTq^6tcxOz}_q>rCxy}v%DbSGf(W1lq@srligBQJPKZpVj=uwRaA>8S*h;kpXA! zfL?S{a=FHC+11E>cp#qo{RCY;z5`t|NjDvrb;oEn>=oy!><_0)>GvNht8=8E8IXlx zjT3`_&|^zQ%*QB<~!UGkWC%Gn4tJ}6WFjM)(|#!c?bh3 z%d;6?`I3IKRiC~6Jgyw~w6kDKxEFDdFHU&OpE=TQ&raD>iBDj9wrT^VEt(Sq$I=N2 z%WW(%AMd#RP$++3t`8=pqHf8(ky+(jH>8qXE)(l!27ov(R$!Endce{y!ZW28-L&D) z8}WVTpT}_Lzh#^Amw`IbHd1Hg=52)BldF<9C!oy5+O?zx-Ipy9(=sEItDaZxWp}uS z25E7q^-j{0Q*Lya8Bg6NNb9aRx}lY#@*1Id-GB{&q$7V#S!}%xOpk{~^%tH^aL3M> zTXO-qpU8^JAqjeedYTHf>-E)kY*uT20EuFA**xCobi?gpcaO?jigXTx%^V3A@lcsQ z#+$x3Z>4e?2c$KD2Y1NnDX=cZns19&3CKE>(9b9b7FpoaUdhuv_50gGE7*?*)jP!* zgOxjN$8dj}k~bIhX{!~;(|$KdMdJv!kK1psfds4XplnF{5DoG2zBl-Fcbbq<*`O6y zq~kuWZ#IFA#4+ZpeHBHQK~kId`Y1Sri*-n?*4w^)yoY*PmqM3phPUnH#=fJ+g&=1# z@EE*)<+*o9p6^Q(4ZPO{y?oo}GrthJMq1yO(DZ*&$g(!w74yN4bZF``P!w*K#U;0r zcz>%lj#pyqBks?)I2qqSZRE;STN~Op;?qbtAH=Lg($P&Fg6yDL=m*p^qFkgsO|pm* zew`4)!68nV^*+PSbGuNga>)MP%}4#Zj33A!PY?_a$mDiTQS2P#&Gp((?5l%X&YqZD zu-<8pA+}=A>JCLW~MO;HlmlW}!+S0}e6baUS z7Fo0#%2fG*y^6FfdpK>UCiS0nc&U4xeO9~!$Zq5W&y^oss?DSHjO-O(H6}SNv#Al| z`qRs0b0XIS^qxQT5)%Hr^iUD~kZ7OI%lm&xd&TL68i{qTK*lRtHIxRM{jzZtyUMO- ziMMH2k>MMAHEh7BpxNEFOJxT|(3i;erK!EIjl*#eqbsjhkkNWCRdh+*sPf+2X=?Yp zKcd|&QoWZ^p_=nn6$P(kT2CpL^+AIG>wBeI`?tb8R6h-o#rSE8zbm`{1&m{h#)tDWf|fR>N7+r|km*CJIhDTcaj8aiFP6@V+43yl6$zi;S#wb&xq zXuQ^^E=(jOzwkTk>T;-wjF0L>-xhy~3z<#hb%Ssf&5-i4-W^3{b6HXB#J5dDsoNs+ zpQBL9E4qGlYi$&uPW1`z*Y3>C!V?`I2WB=N%%G!&K^xfq3=@#?p)(%e5szOSAKn`h zugX&5+DTwaR1HU8q$BbIX785Lji&bsBpw{r(1Kk6nMd?uPmV zWi98galkIWr1np4{U+f*%-fax?tF+^UJfV)c*H@MHzjGa;qFGAmBB?!FTbLqG#$3k z-Ci6YZgmL{ElY9ZrYO`m#r8pnz3Fc`a8+zMLIvM}6HZ!Q|byK2wF zfjHehwxhBM0m#wXbhiq?hk31az$)sx&lXn>sTz&POT87)OK?6QlRSUyZlB$bV)H7h zX(NyyHd)?C2n2ud_YbIVqTJMlie~viDt_+w65Kn|c~rVMj0B^0jl<*OZ&#tMynQEI zh2EXBph~j6Z!71utyru(UMhhJyEZEr{)hxZtpN4Cg#ZwfMRk-=e@gY z-tG8&Z~B!SsNv2AJMBdpjc+FqVa((-uUGf};}pAkuVW((?a}*Yy`@LUK-JnyD+x%= zt)S6PH*tSA_D^^9Ov3Ayc6XF_9nVO#2C{pmkg6|*7`7xNdTNR9M&a1Qru3j6HHGwC%e0(?x%IxIp(fdO=yxMQK5k)1M|&d4C6} z(s(lKAl=&Mn#C^#HDc{aTpS={eml3LS#9%p;yvFxXvSb!$TnpxXsIWstt=F~KDsr1 zBY{DoWzQBu*Vm8+qQ7|*ox1w8vXhgpu)RI}g16bK^&i_lBQF#vd}St+EV7bbmn09* z;p>0Uy96b^UH1L@?B0bkg_r5L<(1RrFzqH1{)r|+DjGY%r0c0+Jj0p8)UATr`)HXH zTY)}ixAj53iCWpa2T84ov$6%@T;P#edAlt)x9!QH!41ucK04fNQ$MCf0$=T&=azNE zgU2r@XCMN0-iZ7)CRU2Tw4b}x?ZxU_$HU%STvgmrc&)Ec%;aDU#iKG3JbN=q&> zZ!L_q=MFq7*Qht?mz~o=ax-;z^>f@EKcYjy@PS%DBIeEc=J!0zzq2N$iV=W_+ndr9 zRduCE{xWa&CkVi9`v;?l0ibH&$O|7fTujmK>|9sr31v#H#HloM25eO?qN8@Yya9h@ z%tu@9pJf8wH8_qJe#sN(l%|eZwa8Jr93B#*SuEYn*!#P8lp9v1^*)dNnwgI(vs%JC z0W(qC2q!U1qL18_amM&aQ$K!^6Co)te@$N-l*rdRc;t_V0lqxpF7)~B_>R|~;I)>{ zBrBTPa^RPhR)i#1D!Ps5Lu;WZKyH7DVqeYY7B8-ktq%DmbT4}#Cn576*Bdn4dx&tl zfoDc9!CdkM>r#wRhbf6-o{DnC5TzcaY;LKiwR%C!Jj0^a5iPu1vXH^c->)ZiudbZo zZtRfdpM@(rpdq#5BX_T|w=3xU_XFZ9!jda@#;biSPn-{-{mTWQEigc9A^Ly3tr0Wv zOLm@N6UAeHgX&D%#7D{LO!eGyvXnk0>neNa!*G6T%YaPV@CE{%_u^#kc|TDr4g?-h z-@83U=PFUJSB1Xlnp=Uvx1?WJ`5NlPaMpB`n~Sld-xVTI0>X*O;dR!f1IaUV z5ihLCr(Vc{zjAsfZ{pA90$^|oRI{X-jd3UQq1{~4?9mS!`C@xUdUYEHo}Yw!IN906B118ffh`3WU1$Odo-E7J*c1|%^NU{8?YxR|YV5YA0CS>+ZPS}5F_<2PZLQ7Qd)3(|oo_aE-NG{J+-8u@FjU}^!1>TUahYXVb+i6^_pb80} zEnVf8^^6ZnO&oX4;E@t6LEVQm0&U)`^y)n{h%h?~OpB}5T^@p^hgA9|4@II-t?OYG z?swx*te#WzUh^)+QwV?dIN^Gy`D-lM;Zr+9sy%p$p=qxwJ+`PVt1cT#etOTV989$s z2(QP;X%kDnl5OY@+govYUF*TFCsygNM%j8QwcObP&CI!IybtC2_4V1CP)5=Z(`HlL z%50&Cz6i{bU6YhoB0ZjpT7DTR&Y^g)TF9R={;KT;2vMmf0D*tdMVCQ{WAC@nt~PAs z4SOB(M!Q#keyH}9KHglVyf($kWS6~5p@moboDpx2=UqoMX6%VFd;qI7bs)XpLAXW@ z=Q`ZSUi7}!bljY7*Uh@oxpQem`u!D&s7R`Ttld1T<>fY_KM}QU?U#$j9D}wgO@#fA z^5zXoUD{`0V{CtPm#r3@w(f=WOq%nORng*(!p=o%orx^HPp-h*n>8(>v|H`2#+i58 z5+1}P`z~%f`)OpZe%(Yokmh9o-p05Um!cU8d>y-55E-`uLIIN^%b;cm+BPw4`5{)y z$MkmHp|x$ehxpeO!52(^*~;1Z?6VM7pE&fTs~87{kFbA(6K@D<8S2NisO;8$pjv){ zkD#?wKKS8VTt1w2e5w}gxm`a2Ct5)^>TnZfV^;&@8zU-nB0)|LnyGY9X!; zq(5~!lZf1qsL#*lR*;!}H0>n<2rKi01dPv$c<9C_tR}BcMkgVkdorHdWlrU9tY<5u z+hHHv7R!I>^AXQNtJxTQO@t)$%!oVZIWgRlncLF=z~% zTRrvIr-E2=0g@m`VUk5jz}|Ha5`lB;$tZV^QlavwD^ohNNmj&X+`Jm&?Oc*9mVnS% z-v-SAM0T4X2R|S~f-mn2o~U04S2Bqx=0GM6jlCyEu4dYdP!^pzT0vSCwIftcQ0;% zd{D#cp=<4ZO7>iH#6CTIyRK`kX*j@+>@KrkUag)M&kF}UdiK=bsV#N!DKC3H-pc8c z+%|uHZBEyZ>edN}nH*3k)p`ltko(P*7d=90B>t=!gR1pz@G2U|wqn|KHQtlbg*73n zYSkLskS3N8**?pKcWGZx>8#rPQuxi|%!O;K`D7oY{fp--W5i<_E;=7fyXid?;<($| z@xZBfO*^Scu}pz32jap$tUfuF?0w=Qn+t!YVUMo`c*GpZ9ZTEif@jhq4WPbtg`eg= zB(EHMS4E)5|&2sha5r!6m~4VG11TGsQK^v@5qGz5Pw zUF>hBR`Ut0t{Rwpa;rNspkrJ2vY>T-_qcj#m8@>Da!IZAa}^>Z!K(>h*}~?PHmhf- zRV@(Y^1CqK@daM$2YsU!DZJQ=(^T#Vx_J=hCsKQHox(R~6;C<&6#f&kB;)knVYPeY zx7tuss?e|Gf=Zsd4R%Vm&H7Tafo^}5k?cXyklHGW)4Gdmvwti;J-K>~A7%7I1iXkP z$(!LYy@?WdexhBU3H@D`?D<(dgGz5NVo%%N`*yu2;L%fFs~;3dmLV9jNBD1Z@5QA= zAM7VHIIHL|WYY3EstX7<9O0-YG7%dF9c5%&%NF<9<9fMeU|ag{*M(X;eUpDwYUK)# zI!liPRG#ly)ugrL;-Pzh0v)!iJDeIKUGKGP1LBvX=1~afCz}yZmxUKh@|IZ zCk~$SR&ZjMsKh%4A7TAmvY~&AHQ-{KI}iT!9*>3MYpQX*XN0v<+^^QuQz6_p)13A9 z=8i0*p~)yIJl&yMdjqjxDU7RICyOiYx}+KNWL58Qb=hwegeyTsN{q3ssc^6zcOfI+ z#u*}iw-)c%0v=Kn;WK{GWEY0&yo`bhnWP+yzOohVL=}C3W815HNEre@pa`I6N@B?)+@lne;jxx60zE>C2OM z@{uvHlRDHwLnx>^*Gp@SqKETz(HO&zZ=DsEYyj3 ztSxg`5RBCyydo>_!2o{&+OxZFiGp8njSZ#^0^=V&W_+uF)=>yJms{wX7g+hwsI0tP zd&UB2nzIiu{@x5FNQ8&ai`U)k>+4MpJ0O)^go&u`aliKKLr4K*@nk8#tR7FxN$&Y2 zx1|O~`93KJ>(Yze%UC{Z`gz`fnoiU84l!;|{k)iqDVmjJKSF=N8|V+eMxl5xwx4~G z*U^yV!K-`Om)Bv_M4P-nQDBtsBKJtYj*I##19Au2;$e4i{B~FmW&H63{w1uVuDsa} zBQ+ln^$?HaX+s1zJ|lAu;+hIVWNi|jgSQFRet*abh1*56$w_2iAP%EA4~T}dFmx>H z5nuQ_oU48~tn%dW zl#e$rJjlo31g^MR%RdA5e5Y zyj5z=l?{J<;1fluSEfAqW+TOs=QJX=CFT_=f;}aQpz%v^e(wCOg!<>_d6Au-QsPod z$WW^87~5eLthQPriDTFyGv-9c-aGPD`3AFqGC?Eo!V1Pl`SLfH#~Cz#*YBWE z)D@;)B%ZnuRQ7czm&>ksm*M@*YV{rJGTmE#+A23Om#y`Pq;-dfSSGu}@~&Og&xk}4 z^$N^n4dFkf5$@O6D|L8>SClR=TqiI?-0ukR&h}%!jNkRKi7ts>ZXaIocCF}`E?mBp zsRn;?kKvBIuR;1Ge38!D>q6VV1+Dd{!>1!y!Z^NefPA68YJi~uE9A`8q!MJXRC~iJ z&aHK&Ff3^*U+j5t__Q{ms^P~Vxeqm*28!BmwyXHEl-6u?t<7P%0u6dOT#%3V%wvLn zPDF3)3v#Vx#$C;u)RpbCN85;>b=d*YG&Z_`Tn(R9wV4v>GRlA5 z#)%35EBQk&y~YXd!VgsKyZ0Ei7rl;~LTq-?^^RzA+;amzWeLi`GZSv?W4EqW#MNCc zOoclqYuzZC5xZqU75Wd9-eAnC#a&Pd$K86iHwxN_5Ye!{oRgt$x*d_MDWQ99&(>h9 zpRW0|VWg%#%g{A4yQuB0Ynz=Zb^L#{QZxN?YLC&{&Uankkh=xhIO(D{RA#tFRiB%r z&pkIaGDfcVJXfq|C@EIGxhuD&=ZSaGD~H>SyN?S?0IhgzdOF2E^Rp=VB`I}FXSIEu z)XxWwo~j~}#fN5UT{zQ+S`Dz#js4<8oSt_t^H2u7+uXNVbE70LjRkppS1Q4-zJY(49tik^efYdB4S)vLb=?Td>pl9=&kOUihenUrZ+CrjeSy49 z1ENq^S~BH%scO^cJ7}T|&#Oj_!L-ik9b%yS14Yt_u+&K`=4+uRW!Q0_58$y%?8yUd zFV7EXBr>T`e!y@o`w40r+b%1H&*n}a(=BgA6}WgC(#w_SsD zNMxAuCh|U({o&YdXhiU5YWyakQJUZlpS?c+oFy9gl zD;Sf3&&X;yoC3r~?H2BYbk*&)47mvG0U}8e-wZ~x&+>k{oEMb&1Zti2kW)b^dIg{q z6U+C*x+6TD^p=0ub12cDkM~}0U7Jmi^SVnA2!PsO`+|0(_%@)U!}ju7(hdQH;@p5N z8>#S*_ig+*qVBWl4I=ZlT*1{&D-eV!NekV!3mxnOeRzU5re2LLl<9|cC!6)~8si%S zo*{V&4}FLJ&%2y?PLFGVc%~RT6ly8?&>S$)W0BKu#6W)`l>ke|#&xn#1$5f1hee~o zYFUSOxbq8W082o$zj-;$^AyWGMQ`YhnaZEwp)dNkBwQw#Y#X}RGH;YlY9A*$!-=+f zI+vU?#6z>CT6J@6W9#!wP5$82?_O@Mfmq0wt}Wl}_9H8Yu+OXoQXaJ$o?Ua_yt+@= zQbKsE;d?c)HE zP=pu7vn4p_ew%bzGc0fs?8h&6z45Ngoq5TE?b(|Kq=@iqcBpn{8 zwr`NuzuiP*cNeJB>7-{{>K6KDq86ybUwgH-xknDMWrlTQy&^<3RL~wDrnKT$2cs3v zEG4cP!+~CZA@4D)_*m?M1-E|SnNI5Q)V3Rfkji4|P8BFR5PQhpyTk*mGn5aAw#QYw za-A_d)y7y7!RHB~qmRWHQ~zcbAGSGNo3}z+wqfB9(%r*w`6#4;>+MVSGhia8-sb=4$Sir$I%W26$EuO9)Z;I zyPLeziOb23jjeZCoEN@fTpz<}2_c~2hKyG){OAS8kt=<#>or|5%*o%ft#m*(6}&1%!;deT#j4?>13= z`WQQZq|+gNK8PwmA~Hv@gXM+k*Vjz|A^gQN7GD!553gEdwh@PrmNW6iguw=uJ$bm*VGumkL_FVLwSoE0NbhiK z4n0TdF*5&kZL#0v3rY6usCi%am-@=|@6emn>gCwy<0^JS{t8OHrD6s-KLtv7)vHu) zBe7S{imIPpYZ&|q7p<2fcg+3ntyZ-is)-pK=qXg0c;$nKz!pF=yx+(0;e}#|fe}6yU0|37SoDt`bmawhv zwN1pVHybAMm;JS=26g$$=!`5OUhOK+=K?tkSXf;w|L3deSk!BJ#Cl5YUTfFyUa=R) zi}n2kPIJ41aU9l)(EH-yU39L0Z_8CoBmGV39_OKbgO%jGpg-r4D;aWkw2urhQ&KY_2_& zQ^=Yg@uELEmVg6SGb@9vI(m&2zc1~soR-T6m#@yC6w2M!DK_2~>DoYloZZgXZTc>1 z#J@CCU_Nzo&96eQhOHyCK^k8y@?tZXt@SROZ1uGeHGtj#xNcY@r% zj+ew;I*Jbh1Vt+hDh5q|xLV`WazN}kuow1LzqVH{p#5Hd((;n<#%*;AiYQFP8{fd8 z_0b1T`UEoeI{N3#aoDr&<%PSn?L=C?%6>z`UALWHnCY?)ToK3Bd9`1NlH8R09Nzx( z?Zs@J=Npm|aZe7kpxX*$Rh;;_Y&-ZOhYPtcX3;Cx*_ zn}J^06uNShp3BviJwW+&Fmb_mK=`benRKf%xJuJDN{iakcdlez)=1PpPNAM#(?fe! zrWi*Vb${)m_u?U!Q-yR5YrSF~Z|v|l4^2?IwqEQY@qh5vz?IP3!Fn)TYq2@`%}o>C z0U5np&3B*=tA~YunpxY@W@5uu?NHYd7dw0R+;k33H@vo1uOL_5J3YyQxf%;zZV$V+ zH(vJlYvxwX@`FtqDWs*dOF*%%HnH2=&_`cVf>&4L;TUe8(!-k=GJ^2+IovaVm0y|h zIU0zdxa#((-^YtA7k5j8PjrHH^f*3(w#}KOI4=fh*vVmkaehGJl980_`T}`v|L#AW zY9%OR7ZdTFoFb}qH^HlQ2{6=J_HkdY4=+rMjpL^e;kDc9ivdn*a3DVhD*kSm4pRFt>O8rbE9b*eq|fp#)jeyJxo$s zOq=C)LXp*f1n#uDOhA6}&kK+%Eh1?ACcYa3^D#w<_;f~eAyP`uJ^q~5@lGkU*fL+k zXNS$l(dNn{z@BfSRT;Ah5grO;f8~?Lwq})pf8E!aP7lc)GDL`Qze-PC`a2;s zdTb~|=;FnfhIlC1s_l?~ws7Lo;Y;{9ZSiErigxaQj_W1MPgd51doAwnmj^??Zxw3V zV)lAdJDD7J+Pk97X8nAn-9{&p^F#a~p|`41!Uz$#8y8hdWZinDc3Y#9aMpV4#rX zDoEmgdi~mQGGUS=GsshB?*#r_lDq2 zWNSo$1Ef$3_g58-#G6e%^vCBUsYQcm^) zdFT80q9M3zp_iYecUL#{EH`oNC`_XINh+^@+asm-ULX5s|{O zO2wonb@u}}i5lKylC`{C^0#}2y}yeVQ+!QgQ=*?VeY!Cz6!%{gfMDfx!IF>g$HKgx zmdr-KrdT?AI8nN$BC2Z@FA@bp^V!Kq64wLbPzG=H>SIFc1N41l23}D%2K#GA3=@VO436UbyU?2id?`j8}9Ua zCsM~U)KRcWx5YbqY{>w5`GRE}1?5vw`e{KR*U2J$;!~LEhw)aC`=(m>|NO|Vg}p6d8QGW`nIq7v6(%DuDv@*2`V1~N1_@6suOFwG(W9MnZc&vuq( zmUBXc1o`#6J}s4#Le&zGM>}gj$9Q7!JP!A5Z%bY#L+U`4`Q%O$G5Q>_-j#lTH3auS zVG(J#Xepr|>xYw0rp%k9@ia|3ao*L^Gn|inb@|!=A8~39L34+L(0a;Jp#@br zJ&Ug^ma#zaJHB7O2c5ke&)u0}2WovXhn}?5Qe(u^0?j0$%y5ctIuhvKQtz$%>@Rs*h^(Um# zY8ev#tx|R#)pU?Bej`u8>|-a+U2Jm7_lJ7Qt9_L~@%e;6pqSYZWiu{+>l2DOH-bmS z`O`4oA>!>z4^!kypl^vI!RLa?&(dDT9gr3!y}I?5aWK+kIo?nh5^n-mXSPs~RwWd7 z9GprF&l)RLhdA*RKH9&^Yja5v4CGQf!0l$!Jkr3QL3tU7;Sv>% z+-mQvB%-StwS73K|8PjFS)StheQC9YqIUi|(^c#>M&<4&lRGspLS6Xj`v|5V0$kbA z3tR3G-dRhoh}Q~bO1(;o3%VVSr-eXhD4vAJ;nEzVOK=z0#jUe{z~zKlvE&a8)~Fk@ z&{Y)ZI*65kC+;^pPdR7zlq7NGd^#dcpq3VHjROOgAaw%om#)|4&8X{ZCttLP!?(6X^ab;9& zcc?_%Gq*#ASdiO>ru=YW!h=EgOt6*l$}GXla}+RGs-*H5KHw6@f-`$vCl4{!z!Xx6ur6tk4iw1ZCgWR zeq7~$74oNUC`$%3e_tB0y|*jFZ&n%RK}?ZZ{D4kJ_eW=%&|EV4CfB#=B4XF1i|rI=bd;uz5F|wSon!()7)< zfov&V=eLS~Ebmt6kD^rdkG)ssKz8}w%{#4s8tHT{Q%uNNxi8!af*e>0ZJKOU?Z$Wj zunp{IDV^TJatE#v|8BKSm8Av!Ciw*qpAwEVsj#lXa*2djBuHOXIp^y>yf{R`o&w@b zo(gIeD2wGjK9z-8y&L+JSO9^_Vnf#hR2rv!W3&0A4_}+CK%EX82a~S80)pu#U7T)z zwg+AE4uv;2x3u2e^Ps!TWiy3OIt@Pg;Xvd|`<73XsV>P$ehUO>ub=u=<<0FfVFw~? zCWAMpoqklrOL2^2IliVS0N9Y1ZGV(d-5wQAZEQ*;d7|R_c~tY@Ah}Ht-vZhl;;!=u z^Z4K}K=b!fTwSSGA4dDA;y%O)IVVnkB|NxCbBaC@D;BE1sa~>o#As?flu=afaSmCG z{8Hc-gm;Tt78Tc<6L%_y?Rq7zD%W51kH@&!bP3dIK(_<)=Popt zSDkEUq!$QW2WQt&d8aoPh$TPk_abRKx49{K!mO7UaynnRV%qx1Dj`nU_9Yd6E`1nW zB)0AyPbnNYteq@J%0r`{Td%prse92&)oX;_@=6Y%T}m+4d6z5_fDj?{ct?FY^%hr2 z9gRw}o8F%^yk1oaj^KVn5)!#O(9lgdBRsL{Ugx4*de8O|ZOIHCt&X~WpxRJ~$XONJ zWyxqyTj)aLdD+NNdq`R`yjb=M6gMlCgVjE}A=Ce*S z;M(0IlHY_omiQr;7N6#|ZpV5flYsVQQn6YvR^j#ACb@pf6Hr#4NMl*LXgjlnO=pr-Ob>B7D@YBD4%+2+5mzZj* zNtMhevqDsHk{F~3R_GDXFON5~($YWi)K~ukRG)wQcU>F%kFL!QVo^W3HX$s-|4!my zO?RSaqB!#WXOYXV(r0SBFMT|J3GDbhXhzLY-1+~=Ew`~t57rly??6rJt5rBFRY5j5 zm2+XeEhF(Zi(QlE{7GMbQp!dPG73qFzliMI?o00dk(2sRzH!tyT@Z;{<)i6MUBTg+$`Zh}lu6t0hsR*UlQeHy(;pry>95nzd zO=~7jsrX~ow8iS#fx6q;(eaPeBIt!s3SW9(EhE#aw+`xky^x8HBh8mvPlR_3KKHsg z(sJiqIyiUaL;)C61|PNv_wYzk>xEk=PfYq@QY}%H6{Syqay+wU%-`XKN-|>O_@Wq? zC@cfjR`wg+3qH4k+i~5y_mtYJYoA$=s5$CRo4lH+&2yTR`;}Uh5;9>*-%g+0E3Q>p z%jv`{`#7SB$kePgDq2aOy6@vl$1FC==IRn3)LVzjO4+Q>yJcA|-$mSdSH=hAF2Tke z32pn9fn7#_2+x}inH=mr0*)8fhI?36FLnyEHT-LKi~Rw}%7L7&-^qHLZURg2y9X1kJ1az|3MapB~D!;R^fUbpV4eNvqK!Lpt27@f1I z&nxMDG&kFgx1iWTeB9x+PBkKB#kC2FMi^5AK zk2}xly(V9-5UfETI&z$HY|VfITZ~T1Eknx$9OQ+XtE{wfmyIsgQPX4oV=o`2#?D*$ zKtPp$x!oa>(L&Ipo{$upzPv3}+f>;q=_~A>+R5lj(me=+0?(X)JhG`(k};OUOS!U8 z$8bUZ;llMd5!d(+f0B?aLeiBLP;fL+N@I#OYGZvRn4Encf0Cpe7k$OA@8p1NBRtZ!Ld*)?#76shl!*={<_oY!P%2{&cDl@ z6jnVk-k5<#z38dDKcq)byl(Jkyf#$a)~I;#*H_542`hQuAN_#1J_yiggK#08KDT%4 zIc>ooPLTul>()q}u-xy&``VbE=}SD3aKMV?d!T|DUU>9$CS%|ILeA=;O?PsX1%c>) z)D}ckOjGBfjcVN;kWO{ZKvgD>#K;@m%eqn=9Srz_T9Ge)a+xPn!jIFl?w|Z=B`=eI523Et(MTPLj{!~^6;MpH%e_n>7TGb*Lv7z83(QlI zsUn^3yY{14%PFkV$uDYPK_0^HT?(bJ*j{e0ggGNmP$TT3lpDRN{l(#Audm6AMB$FE zzXFuCz2)6GAeoOW9v+{1!Rc|So9qI4+@DB(nonb#im)m|Bx1a8s{ z`orE}ZMxJ!btjtJ6pp)lR23*EH5UQ5^-^Lj?v`RslQIHt-4d)FitdcndA&g~PG{{* zCltc44QUVb|9vPzFrzAeZ=K?NG^KljI+2~X(sG4;7s9P)$)h~yGd}#DR;SDE@`)Ow z3i1qQ#-5f4%Is36hM`trse)ge~#}!3-Owp*`lfKn5VaN270I?})QH`F~s5$B<510|u>crc+cNZq?5qexZ6 zaod6zAFL0r#HC_WKuo7wW{t94K5pAPPyiKD9UxNrcqF1aOt&OuZpS3mw$R2SH8kkwOTP1zP>$bnZ z>$0-;=QUidG)?R`a0VJ5bN8;)4+_^K2GszId}mV6(jVQ=!(>dF+juZ8@{hb-b0Vb^ z+C^587Yg0CSLKvQEur+6=g_6<%gSYNvd;TlfMdUS*Hf&B;+D{TUG7o`HOBYe8^?Ed zS0*kEENkq4sN%Xw$(%X5cN7sXR&M?-O@MO`$Ub2g`DwYxq%ek-0ewM%yMbJC{yg-b zQH7G*I_)8(b3&rbvpyZEW;8v7X^Bx@PU2vg{^y-PUJv)^9AAl$S)}C1JEK5R<7&7p zx%0iA^V>SgH+1IaVMkwjAvxW~@UU%j71z37p+N0_CT@-7Rtyg!QRz?i>|V!Q%hxf| zCzZt~c9=9Ouimwt-VMjr#FLP)Ri-qO@S{bVGSO`|BD*o9;nK5P)c;qzH%U&7?Z$9? z*o&HGs-V2N&^lSf#PLK47fVk!$LXdU@8j8*bavARQFO(-krCpx9Y}qX;p_U>eEO%hJY(_FPSAgZSkxIBZPoO`H{2}m z7M3g8sqHJII1a!@MOxP;)^2zssb(~x{lFaD6}Q`=O7p2fjPb=dADz<%NCavu108^- zKksV!ZKK?jLc+uGArg#ch8IF0uCH~mI;)9q?;bA{^`S&NgVN@3+#bUSX{dsD%=epr zlM!>Tc4M`^>jTpsI=5I;koA%6hvWO@_p4pOyXsdi!WT;8v z5<)YhE@j>xb0Uv@Nu6v@%j*qdW|udAcILb@WiQWcubmPK0|NdRs#bQ7leC+jQAIH( zG$l?yT`g$>V_r@3>Cp#(Y*eB8#hq5GpXG^Ck zbwokgaW`xb1*TmiE>tlOk42s9%%{u7=ar2=F5C1$DL)rmly+RAw|fcfrFr5TwvBoh z`L}IuX4_#p@3=h%XBJ;1mC&h@o%Ex7djOBv6ZL5dhl~6w#V8}r&en@RNGCrf&_GZ! z7y6CZgkAzTZ-R##Xk^-%KU zUS;yS@iO(<4dd1_=+KZN2uzf9u4S)geIJwmPU38T@CNB|ZDRN&zvwyu-5RKc7=|$j+xS z;1*Cc3ZF<>Vx4|CY_-dO?z*wz$R{Iuwc?bu_ugY$haETZ!rG{wh+13KC^}j+9}vUi zAVl`+fCuPb%}J$R&mO?le%zT+1s54XnU|^eUU3^nBR z5jU`}s`vCx1eL5&1Z6A~z9BAgTW!}29K^ME-!BdlZI}m2O|{)}25`eaP?&Pul&}sS zN>OYq2jaIL4c(dLQCbwEk|w6Z@vr)jE}*H08sIzO_GEsAu23`P(aG&2(va?IL+iEu zz(%(*#0@s^;^}dJ*wzp$IPIYs`f}d=aJ-)S?Z+yg<@F+=T=+vg9DzJT3$F!H{3tW< zB%Q>^bUynI6)eCvU)I<&ti5}KQW^P%;?d)46W+$HOI@jKtz*F+0xjP6B}yAimUNAC zS|BKPzVY4CyjI3<2_o3ICM0P{AyJ+j2_QGQ8+O&rJ|!G!=Bw zLAL5i+thh=FHXt5XNu@?c`WqRTRZ4)14`SZ$W9OD&Przeo=lr!cy`lpk$v-<5y{#^ z4Asr*b-?s*e3S#uwD`{j)_mc6~&#_m?gDHcVv z3p#t8UmkC9#C;H%%UQgISFd$8TDq;4x7)<5>2R$hyDzLWgv`8#$<_>0i){CX2G!65 zKdE45XR@u6)3;6US(tu zvbI}qLUO+gY_i|l$Cl~u=R6=UWFBr^)}M?BU_i-}4M1xq;EmIY#ia-M?XoCOVpg>=P7NjQ1=luIpK5jJ&RomMrb zUh_JoRe!=nO1dK+cDZ$smdsWU%t(u5?nmzDlF6U~9k zt95j6U#+m#(YrTf7nmq&K`{@9s%<{FE))8HFb0_$e?uC?$gBS1o#gL{Wv2e%; zn`O^ulEZt0oP(2K`eb$ z0w?b6m#;=HsLy7bxu-EY*}^UxN$d1Eo;UXg`F!7e3wdEUuImrdsGNBX@As2ssI^u<7QlZVMc@9R2MQ*~bLMybABn#c{)i$Ukm4KA5~h1Jlu zmoBI3xpnO|SVwYmyiYmE#o9;HLy2zhm-|)N<;(JQTC9BI;NP~y*^j_9?>wQ?lJg5A08TPyd7cd11XK% z0Oktoi}%oHln)^Z%b2N>;opI*5!vB%TcpO}0y^6V@-X=4a= zxN^_S4|?9ew&{C)c-if2A9!1Fsl*3oH(KpwXBFM)r#(NTL{S&Pm~Uo})p$3Klq?-> zsKfKUJ_%`Zc`u$dQd{SLD(qHDQD+?DU!0pBMO`3t-(MBhlf**2`{vb_ANy=kZmZ3l zahi9qKFk(eXPk*!q@Qw7-*Ap|%nnBou({uBKe(kie=d(`_~-2$ZX(m*1_A3da<%OG z4lh!r#daQ5&RlbE;+ne(*J?Cs){$w1Z$}KC+dkaspdH`9lNh6aNip9Ti}Usc{m|1T znsUZQej81JF`oLiNJkJblAi#|sNJ(ez=leh^~}XiE~^BpP&d!uN5+ynP3S}U=!>(z z+D?;$vSsa6qSH*82s4``dbs2NOoP7+UxjKXl=1<4pm>z;)y-G zQt5GQuo}Mx$E~@4vzD zZN86fHD5xp$T-nZ46@SJotzzve|RaBjKvU8k6*}JgU>N6<0ZzC;l8tzOx7RR+@dJ6KYf8a5(-ZU?p?Hi>B+8{-;mgW9_W z6L$GVhnIfTR=`gw7$|I4>M4@Z9pFnM#2uazdkL6U*`4;#Jve4>3RxtjZ7EZFpPEsn;t3Ka-lKYpRquW_5WU>$Np=q6ZXQz00hRWM_<2 ze=+8~`NejB@+XG9_Pu(ZL2^w#1jrvt-?%WKLmp?CP(@;)W!Y}I5aq?_LJRg>kTAbK zseIXCuX6Lc+%P>$a}bEXZQo&tTc$GUm(wpr)!LRbW_!Pu8=7`PH|zSf=pR?chMPu@ zN4#iE1JWe%`ZY%{m0qu*RKDw=IFv0WoBLz2i0ONOHkuyX;8v~g6?L?fDe zwo1;*vUu1$k8YPS7M22a0?B{7&ZgsT_Ix&%VueA*2t1p~ra=q!V!n#A34P7UyGP;` zw)x3_j;J`ryj*vdSDR|HGt!p|Rb;IY?)h|eE4RuCLY-%@z{R$H$8&38?B!FFB+Sd= zahJ|T(Qx|p^eSji?#4d5{dTDu+tg1nw@q~?rI+TWn`~ZXvp-UFu3Z%8e3u+gMpH3- z`8h8`X)1B8cW~eAR7~|$#C@WvF`w1k*NT^g!jq{cB8E&W2kt|sYqI;(=VJZfHG!1=z*v=u7^S)!IfM&V%LP?k5_PIaGsI;#lw7_fPa>0ov+qmuv$kk*vo6~d>T#R`MMRZ^oC8mIqqmz zif->wc+>UJ5E-kFqR_Kms%90Z?Zz>gOjm=|V%o-(0!Zk%JuIg$$eusLX0vKn%%s%E z5JoK}6px*TdBqLJ?Me}U*RxZxVr^Wj#1Bu!*oJ7(NFOC8!crJG#qKnF1=?bptG+V2 zXW?V7p#a(SXIsB3NjJZ-Ye2$`XH%nA5c))N&MyQ!8x+8fooVx=H}4X(*tt}?hPo?; z-qxcOcCV`!{$e~sVb=m)s@mG@!O=W)`}JhzH3uKkLbY1xQzI#V;f>^KNvstXn4V zTs~Q*QYJr-`qW%sVZ(oY9P>q7ZqJ9iY*l{nI-+wtnnl#=nJ_AQc8>2TLXv&lk7irF zXZ0<>89XN#W`cgPeVQO+HUj9kP&V=v1*zU;6Jh*grGv9oXaKjvd9%fa+$6GiX{hvm zv{a*}!*jll?|fN*KxljHbbWHaQj@@(Te=%4vpWXvg77ezrr{$@GW+V9N}IRFp;`5_ zy(VrX$ar3zyqzU``Zk@V4cjXy;5XqEWB$`=XK3ZyP}YwXeRGa3Ma!;FdHr!Ktm5qv z4nNSsT+VZ`FsiUq7EHRpC)kS|8QB?zotz_}$bu6~X3icu6YMea}zK#Mcdf8a;09{ABgiZqppHSsC=>Q#E~Uj%DNf+zm7%RlVo> z#7eO43MSs;vk4slgw*397Tc^37Rq7k-5zGWYR@IyX-3@`DujJFJ?Q5%b7Gcuu?*Od zD`jQ%fPsmB+H673bPvL6p;&M(Y?FA?IM863nRzQ3>n_hn)jr>3ZyTyUy|%Or*?MYr z;J9y9c!8oV$4dzV*1kP1S9CCeP|8Nxt#c)UNrj%#XniTi^Tw(+ajP8DN&4&(2D&3t zn)WO(uI)hBZ}{a_JlC%`Y)K9Vn0UHfON-@n!9dP`=WxpjHNuDVTl+jH>nvPh;m>pN zwqLf22u0!7LmH1@F`sMw_f=4=8>Zz zL7Y6gWSUaYzEj|Dk+8Rw5ATBwyL|C`c{M7OU<+-&ayP~(TQdh;{|XgJ)4!r;Gb$r( zjuzUkVt_#?0Y+PQFlY}))3pwhC(Z1VQLy)a>HUm9VuWY5+{PMT9;$R@E`p-ooA*t$ zxToQjiaK)!FbSE4JMpR+BOlM{iBa2D8hNktHQDUv5|SgormL$UzU`?mN~v=%S5v1r zj6FC8UFk-Dhq86m>Tzu_OXr+L+e>mSxf3&0);(=6CJi^6foUGyr&K9>#z_@ToW~G< zUEI`9FjVQ*F7<+|4+GCH=8V}}TTCX~=O9xf;e9B$5ScfwE|tf(n)br*97^V#Nk)1Tkg-A0|x>**zk zX&?f;4y0J?c&~IfQY1tZV0gM7`mk?#gWgBkMFDSdw;{ZMKe*L^&7EgFd&2q>o?fLnmeTdcOIpZ*cV7x9m6z$)7_pn$!uayo50^ZI|VBpTo3^wvMW6Lp}*XbON)m~YDL{nus zp?13VJcX;ha!>cd?J|3eH#-GlCVOO##%t@p@8{)mjs$j33HnjLO+kDrZgV&=+#hMV zOP`L7q4FFQ2>8w_(%<{H6yc}o+22VkML}3-_`@V=a z=TSn$f9ca?cMji}eCN&-#%Ac?v7ViuP?VW=?izDg&<~BQ4sxEwlZq0A*YdqT-|U{g zXwW}@tNt`M57kE4JI;LsTUN`t1nTGUoQx zlWMLybDOO08(YUChbd0?zL`~&l#8k7N|pHtrpz*4YwOe^4z1~bsCyW|=Ak-6O-O77f5O2K4DV^%af!=V&IL@p#S?$-bZ#Kn17uZ*9d&t^1fkv&XZJj~Voj z(&#;NOBq&Y;(mOoui|!VgnNTlfPF3YyGQKimjHdb^=YAB4$FzUz1hjD%Alk_qwj^9 zK^A}A6&T!`Dg8Q2?%s*kG00JFrPH>~-@MPSYu;q5x3AiN4`sK5gWmN%vX^S~+6XUy z$I3Rao#5(?oaUl--Y&5O7w21Qvhu zX9wE2RXg;5$(YFJrJu%B9iye-7<2w`&;y%KoT7>VMV$P{yO+09#xlauk=m-sEv}`$ zXws~P$+GpBZ$xEnd)A@SO z=g<3P?n&8m3y68LW{(PJ-3;*c(w^n8u1k`RiEK-_@`BX9`eLIk9IcflkggIZRs^}_P#9>6jeG#FD-EXg*GlQPh|7mZ7%T; zJ64Lk7fx*s>CJsSgO{XrKgh7cHzt?fe;T)c_XB#O#hyx5OLpZo!t&9LVr74+>qF2~ z(@Ecev+st5vfsbN!}mj}45iuX_O=8*)ZbiL(Bpa0q zqSSb{+nye^m?kSDcD&i%C0pKqPuEuXn!yG0jA8Bl$%7=y?g;sePNQuAaLv;MaIqh) z=_57XZKy5!KB6Pg0e)4HT;k<2)?j$C~l>0=3vkbiP`(&-V%Lm($0@9Z5}m0p>k_ojt12m|eGM zyV(00V}46t-bNtO+nemRX()%ejd44klqiT=aY17N|vQ^O{gjJ#& zEw_-G-5;O?`;Lx+Uv0V0l&`aBKY2s(A<5?&v;ijX2UG*g$pVwY@fL_w3Fb?=T)t(e z6{@A8nJwtY<-uHf{)JF~zaxDD-=1KO-bUF8gm?j# zSX&Vt(j`J|d;y?P(dI!zz9wd^&MaSRzi8!F2fqWZjN4l%qz`*;Y?in8c(E2RBy5rB z1P7V*dtVr?GJ4DoiRrH4wReB@;{}_Y(?e>>_8C<7Xogu`Kzf3IChuo;5oAW}fe6;z z?kQ_uIrQ}fX^N>oM^$6^2O7?GUpJUQU-l~&S9^J<``J3+VsCTY@@WBwE0q@m)uP&B zy*}-vc9xy!b7`K%+lBAjIDVLVe&P%GIt2kHMRM5}i{RePJ-esq==l^H;XW(!t0xAb zznvQ)f7PeqQ`TdDyO?c)e$leDjD`wxnl4LocfaaoO3VDn_AXMyKc(aVtAi6SuS@F; zmOG5ESmFAZ70cL3r^;MjrOrg&LaoxBD2I!}^>E1Mm#6Rvl~+{mM@T~-th3@Dxp~mH z{bA1+;>~&>N6yo@rxw@sRS_=st$TLezDaBmZXDTkW*^Ugv$Uu{hFD1Z81^qm@5NcC zQF=yd#s%Ws`#km{kA5WXD!Xg>M~nhx4W^>ER-lhBSTvIj5K>< zib?}H7IM|_BN$(IJS)R9HC>1F$~b&vhr?b?Uy77nD%W7d z5=;dcp_;3Is(quv^N690rm_a8^DoYhpdE6CgLSc5Bm380V*s)H>j5&GZ*ECXw-CR+ zZ1jmZRbrS0tu1YRNOmW5;IJy8MxSBgf)L|=R_aHlN??Kl>qmky0ZwVS!5D9>Z(c-1j%vNv4Qwa7=rVS<` z;R=0Ct7a@bjtjX_FSKU&fvjGZ_5^k~1-N5>Dc|`srwyBx4PJz0+}fIT>fGEu_*&6%qi5WI zvm+R8Gxpps75DCRW5d_q-AQQ9+s2(}7Q4_N^whqBBet=W#Ylko$|~7DsNSFK^jGXa zj=ECVd;MY;kJ4D@eqWZ<>G<5;&gXtqfzZ9YrmyCuXm{;pbcwKoMes_t;GqrP&dg)X zW`|6f60f6ObJgIZJ8E&GADJqcWfL@is1Dh*^#tEwF9&-!38vh%INwrhx=@uRtaWbL z`669&P0^mvE4HUqGtA>Y z@lRc2-0hlCbef~kO{gdNWdCj%^BO_Bx)AnL!>VM;j(ItbX4^wN&X@4(5;VAf(|OZ9 z$g;i@73;m*ijT;xR$XV0E~-6SgL$x-@$uG|{kc|}uW@8Lx=jZFBVHX=-_EaUzC)@SU`Uy-3;qrU=bTHyX_$NRc5~L3Iwf$dtdYZ?&bZ z)b|r2fZRcw@H$R)LjnEl~095-N zV&E8&u*Q8}*%Qyq>N;LS$<)-<*#SZKc33S%`UyL_*m^OIyB$NRVYE|!)n;5+7f3-) zmN5IW)(cUj=lNoVd5sk6N>D|f(caM;rR6w*@!Sq$0O$}Z)?;#Gq?_WH@DZGF^O2EX z?yw-)Br8W;N$qA-?F5(@+b3oR4~7Y&=}$eX{pyi{v

){UaF907F2$zb@V}dobK% zVfC+-62F1zkl@beOs%;BMqXB5;cHuR;NJaMD*nD+=C zsI)3rF($q?1qWh+IX&~vTw#skB;Bz;!=~XpGuG1ma-7Ib2T^GLG=JjSoYmc{>HFl` z1S)tnMmuiPqFC=B2>(i&;wZ5hjIU@5OIrf4E0Yp+I$>2~RIL#DcwFJmbgy&|@9}JkM8neoi++dWbi> zq=k5Lqdhz{R=u)`o_kfiO=#pL!mG5D{81bnerB}wvn!!klBzpdv^UaY+U6re((Y6$ zrBMCH$nf~!N9-9xC$7q>yrsJ6l;-m`LwU@(JNV=!AnsceP=vG=eQrw&X-XSkvsV{o}adRDJZ-1j1kVe z&EX-r*OS6R^8`a^Hyq(**ufzG>PZItPj4e?Iy#T-;^Ahtn8C1QhXqtHeTR}tyi-** zRUm6+pP(t)&zV`_f0m;6OguNr5cYQOg@&0yPywGy2O>_7Hl%$kofW&gZ3b&CnFu4t z(t1YQ-UQ6g!?aUYrUz{r<^^D_66Bkuvf7NVpx4`D45c>v`{*n<_Ut%%9hsFOtkw+O z@C(7ZEd}0ka$%m?Py~WddaJ-UX(J~4Hjq!NaI$-u(|dPTe-uXvMNYe56cjBy2VTzY za_;GJr*Oi!ymw-Hcbe%EsluockM25~7!d^MR(U<+>Fo$KEF04F=wv8xw2WMgu0gY5 zm4OMChnJN%D(y8?vL@Hr-u49wp7|$8gLYnN=g@0J**Fosy~974`&^4*sI!Z-E5xGM zdRqsxnbk&1f8TA&DfD7gj)%|5Q8SL(;Zz)4xcgxq7ocG~8CxiAm#1QV+(`KgiprIz zu9+Q2y_~TgoX07oNL6nEEhyo2ZMIwnF^1iCW{ffNHi5j%Ch5<(*{>ChlUr9hSLdSY^TY;Ys|KQUUW6nDYz@If*CnJznx-_hKEX5~H0WL0dr!kn)%%T} z9=0JLO|s)$Ueaiy9qTE?bFC>~aR=ojHtP<4MVVx;Q2!{J^WIU95)+}<8X|O%D)(G? zkf%~)f87rHif*)mDvz;wJVFnPzHgWD;+<^iEhE5YehPy678cl1-%*g^32dAzRlVHR z{n*LF`#gYi@3C&B5b~c#JU!+n-nM#+>+j-T1zWLo(4rQ?PTwb-Sadr{jk4~5sgR9O z)baip>vyh&+Pv^xobcucsO*f4k)|!Q<@wM|eOF6-)|WI0sTXoiro= z83Ep>w|7Uma0+zt*U=TfVu?Nh)Mq%>$>4_gQqCTQCdYEV-Qi%2Fpk9Alw$z-jr@{f#Db<_bGvPLx2KrrQX6fx# ze?ULvFnvximk(Fz(`DjBsol%Auq=CezaB#_Tg`U!$@UHD;a!M&RfD!cSXIjpXc`eRxFYkWJXs?uEg1DV=AM{+?ntqop0xrKJKDueH>k16m*J%jAiK7 z;q2btUKsKD)@HUlUNWmwbB1S;+qiaif3UB{(hawF`(1CDIi%X$(hNG8-q2`7gC5M3 zM+v^unVSUjS2WMHTeCG<_~~xkE6%{~?LGDo%PsE*%!a^AVrq@2iN}g;p{u*I&kMYi zvtF31di&y8b)xkB{y@J&VbieiK@>M{=tqoKV)}%)TGbopHoLjn^<6SY*hdK^f0^lr z&HxKJ6^IOQ-+Br}T7b(C@&}y&8=?i@|ASSo%0jEfXGN3ETCryL74Nc`OoUUY(^@@8 zL@!zI#6kQ0#A4*nX(YQv*_>7K1|vsYZz^efEP1g~leT#B7OmlP6dz|A(_T;fqcXJi zapPC5>1OYz;L{c`xn3wtJ8_qHf4F-(bvs^)a_z1>PI=gFdHwH_lC+zH*amT#DC&Y||XFcUfNp0Geq zG0A^WitupBml!M&j|XpjQaL&c-W)IjHUq>2sVFf=7J5*VI$!XrrIun5e`6Lzy`dL? zD^i`C<;3@NO{ai8KuO}*oTK;tq0Ph#^UhAXq}GRbF&b8PSbsbZ?KWRI+7S(!3Cl9V zAVTr4{|RFZ`(Yv!;{Sux1peRfClQP&cS_QLSVZI~gbY`}vck~u<2LI`uTpk)eOvmI zEjKG4vVW|3ezKj7=kw4Ve?h^}7$o*X$F^lzWwd2xraC!(2S6@I6 zJgUVe-jN=G1N&@PI7Y21vF9vVAX367qgEr`Kz83qWC=R($t<8gc%uU==)2LY_Weuq1E zyA{ZK#Z#`J&+DW$1s(r|-QHKSSk5_0?VU4=mXTMutZE5t|Khyz0Cc%yR~nS`vIIU{ zAtth-@u@*YB|YLBd!%ZeYNYNGIHDuS)c28{<1HJl6f$`Lu%f^)b`S(PGJix#T8oW-%FiO1eBg z#H<^f&BVW?EL=>?)JgGTQ{{pxrdU01I$C8-`2?rYG#lx8f1))9W3KcIyp)#Gcy`nz z&*M@!%q9kuOwY&LNq;1CW?XFZaSbjKJS!HJbHv5jrj!ywx&$65F3};u`?*Vdq-YOQ z$fvhwbciH<$DWh&ZpDyDT2TfrhUx6MS-y4X8XnTo>V0=wh1!PpX*QZ>Mx|-X(e@FU zs`+jOsMo**f1fqiqh%Yi``JRJ4%BRI-B&ky>;zhCw>9G~$ItjiCF2pb9`B4zm90-( zomIp2c~0fLL@fk(e5yfP)8<_BCs%u$ZzyMxpfHtgOTY?lpK8+4+n)?)$6y{W9I@?O zUEAj?rsE%hHa{9vAk`3I?$%bb1^E0_cKj(79;cdWf2YOTFr2nsSTTHJ{I>2PD>Dg} zmv(+%ZxGU_wE!g6;3pCTtAL%lI@O00QhjKzfxB?t2h1F{+17Kvm2NP&@5)o!yDq2A zD@eF*?c1}35b3dL4yg{t5W7!Y7o7050{5z^a5Qe*Zgt4zIObGI+oM&vzbP>j$xOUH z7^`Qze}9>W`(X+e$H*=^k4ABemLR5^GBr*)gidzvYH(T-IM63VCMW*#?p0}6rH z_`afUPnm@eToo z^2r&kd1s}5ze-MLv4E>|-Zvr4^&tYFZr3xoe@IU#ZHF?d_MtVhszKRtml`N3B8!aY zp*QTkKpr7(*hWaRYDS%FC0eIwb(GF!FWcN1Uen&9q7QA@&UckI%JPFDn{a18t>-0v zj2ld7TCG=meaBmdxzSLx!+%+XN;pJ-Zh7F&!7Ztd3EJZ4M~uqnAhxIH_KmstQfN1l ze{|1S`g$7o?B36Wr${en+uE8hr#lL_4y2C@)m(3rSW`kZVJq6*yL0_O!Hqk(ouVP41G`yQ!!kK)qU&-;x!(Gf6Enf=wl#i=!g9yK7o5L>f)>Lu`3r|0nhLq zYG}7e9*aXccYMxWDc86ut7>oOM*#vFPx;JoCwgB$%?X>IJMnbd(EBm`as;}-C*@`e z5gjN>aZIuA`lG(~UuuJLvuZ66K0!)G?iYgN8u{MXfaBtI3u>_Dn$6yimSY*pe-3~H zElZ%`4zuP~p#~s89YL9Ep|x$0Q`<2n6Fx;$dwrz3b(QtEe$R(Mh(}g)>B;%;T0l(?qj!m&adF;wt>04 zjlr(LaG_Jq*Q_|DuNt~+SBJrsf9JQ;fC?b79aLLGshGsM&b6Z2N6hpfNcJWuHTr(s z($|AFUyo034R(ezr+{+nBYHQ{5_+!XXgVppgu#3PbB%F394X^n9dj4ZtkYiC_XMPp zc3ED5_xZ-mK0p**Tj!u|hG8)>b~v8=d2eRW$ircGm$%eT>^iE;oC_lie>6fdaTQ!I zP#|d;LsHk&vCAl};Q~y4J}>Ya_AYU9S{!!_8(O35t%Ls+V}xwbCsX-$5FYKut>_!t z1-m#%1Wfw&ljR0*IMk3=z8j~HA`H_9`0SJ>X^T03Od-yv0$O+ca0Yu`-#oW|kZadd z&O-JSnhS3r?3`|_;%u~?f6r=Cy=0D&L_}y zUVx+Lz7`ye_1<>|^MDn6jM83|SNEfNNqA+B1u38Y}qH`SmESl8Q-3hGngu5Q)REV_tVOd%S;_pyVp^rk(bBrk zIBVQO3*$)flkqXWe~bigg5PSIc|-q^7!< zWn}36#u+h)Q4MP(vwhUe{C;Z$ONjf7bA)yF=h~a;+}jRM$Z5 z-=?t^<-xPuE5Vg}&iu0z++l=1ySnPC zJ8o270QGe@i4S)447;wQq|*B7xILZU&#V6GVZfYXis4R~+x&J<&Jj*7Wbl*z^&a1+`Kp8`G~)8L zV5}vVut2waH-W;4Oa;fa3@5wYwxYo0mpY+zB#pJ>f1ABFH;v)tqvIKz5%_ossMDc{ z$tzs#-&QWO>%45f9H}dxX|Cg^bX>TeF$puam_F2J0MPJ;WMZ31G0PQ4$pNK@dEMdK zHlfQ7&J;(Fzp;X~uwIi)?|>g;#&SJfR^DlB!X?U}HmQ8+vuEl6)h^8F>afL})fHZ! zlYH4>e+A#oD*n99wvu*2&Z-~AW4OjxBCCrKUKzy@v;w+lmJS5_kLnc_FGiz<=g1k~ zUoB|4Ef+kp8MKMW&(ok&#Z|hjW!p1uFBf%LD|Wd#Q)?EguW4=yN^9nCu2H!Qo3M($ z#cWF~`D}BlwihlFYPp?iigdVUh79m=s?es!e|I)gf6cADg3S(P2HQDrW%cPSr5p!d z`it2LP7mDl4oWB0^Jy-gC-t%h^Z*n~qNPLF;?Juvkx!ObH9ND1K=|VtMYnl}d5pA$ z{nRazd-(=w6eWjI`zWNguZLcYeVdj8%L*TBT$7>Rzd?bbrPsUGrniPGGb)1e=Azs< ze=ntrPPX^BEUL*N+U{3|Io+q-bo1Vg!yG8q5y1O>GD(thnh5?0lmuzciF4#9lY0gU z_y_2EQUS^Rw|u-59_5V_j+;L8yBl|_XXQB97h+lN_v5v7gdyFmnKx;7MtwOq-aALn zFhfrKv?a#qOJ?)>R z(d@kH0h&4maW)`B^@xE{dTne}Y~% z(2U6ztoZgcnq%PkwDT>E-SP*%xWdw8-DaBapRO32ogdr?E>K|<1<^#nfC|QrEex$r zyUA=ebB?S_GiYvlLErbYwtBu^py;kgBWA@&3I^GxUTR@h0H32qV+gw|oAH=IeVgXg zJbeQDpX}gY1VgvHrebRiX$kQZe_Wd>{(gj13k1q;llZaTr+G6`JxiZLdu>_mjf-RH zUa{*0k6u1(dXmg_)9w6RvLT>SM)$Br18~|EQxoMwapyPla|qd|V(Eg#&i346E4OyN zr5^R(Es>$kbty=0^w$!#+?*y59f5l{A-6vzC zKeOiByjDU{nz@{}kLgD38R}|;6(o8)vy37CIK~||;18J22JY|=oz8#BbUq$GDxDt` ziM0o5$(GB43FKa|DkKMICCumGe`o+eb(0@dHu1EhHTj^`mFdNW_AgArQAlnb0)cQl z9Q_L{Od*_smnFqSh0+$je^oXIoP@hyhQH>J!Z@ARjeN0&S3tgAVs>(rZ)Fh*F;Kyj z>;YG?KB}67iYC541hfKl%d7VrkCr5wh1s3B@_Rwh8G25 zI8Ym+Qi}hl;=4_j!&@*ru^0UW|C#U8*?pUtisno6X7^anOu%U&XGD7vHB>oc)raRg ze5Ued(fF1S_fj5*Tnk{LP|w;s3GnYoV&f9Jxf4s++o}h1%Qlp`Tzrb;$>iBl= zL|zfpjP)2Oe6uzWe=0jBGVW|nqvgaNWv?L%1hlZ`0$29opF+^U6U!D8B@dxvfZ6rV zq^we1OP;H6Sqvc;@Y>%{ka@>5yf!#Kgm`VSjQ{X@lCy^Y4mo{O@OptFsuBkk2G*3h zrFhVD20sJ;=G=hC8|SiwcRCl6k|74<(J;W*bKPklyL6~oMCypE zl-rxbzscwD-o8JUC!hEEHY2>(ND|<|mnB$h*!1FKLZ*J+e*uR);Bjgf+AIJg^^ND! zyAbJ9FIo{&f8=Ywzn{Doja3!4Z9%*jG5FkWr8a&Utt;XgYX*!Zh290y>gsG(M{u(M z4%sAJp6&2+G0fcn{7rbQo5*Hj%oWTN|JK>|^R=R2Qh)zDlW{9US=r^p0H5;|U}iOb zKZdN}0Sq7+!`tzWYt^F`#*)~XrZA->HfK&=76xM5e>!_N#N$EEz|ue8!?CKwLbMxh zCtG0CJR3^bZ)PBHkD7em4P%-7EP0CGlN~KKGlc?garl0m1N$|4vZ)GmlQUTkKS1K4$q?_dmVef{eBKu<37YX<7-Ot%!be7>yinc zH$MJ(f9B-bf&h%?M9vR+uHo<}SGwh}kFjs@vwI23FBc>JeRm z$Z8XgLK8snQrn2kDSj3wWML||vCp~~9Q2xC!(AUY)&<=T#uo4!?a&Sv)?5aC4H97B ztw(VSSDzBpACZb;#h8s;7E!wM{0ffWg_uYgEpdT*7kd}$r%uf-MzarR{Ikz5TA7^H zfA`P&;S7Zoi~W!DhV^6dZ}U4QAodM0;`6cLIsU{CoE7qOaGr7u$l&vMPB^bF>@m$f z90^Yf>LJ7+IQzTUPpU1ns4Q~U#vCHYdNBsGU5EUA{H)>WP+u@0iXRr%Kt2_LYeV!5 zZI;X|;`^|7aJ(qqvH#85?n^cqk3(*eA)e#DSZ8n!V{fx< zgeFso-_?KC7W)nB`m?v!(kC8~bs_76Jsi(IUxRp5m?hRy;cy=CF%sK`egAnTe+gzi zhPwdIX4vm=$xF_NPZ#YCYa@IOV%lc-cxK{Up$us8d%0vyJ{Ak@3`(%63VRF{K4@g3 z97^Fj)(-I|vuy$AI#*@ct6ej16rOhrEKDV_aNxqe+_{ut_U*-ifxYBbAUTcc6@GTe+sMEUexH&pm|$?n5msu`7SjL+2k-hGf*f679yTK zz7#gdCPW%_uXQmM6b6uUwAdELbZnz~RI7Drsj!_A3bn=hlqMH6#gnNyXVKb$xaGL` zia9E5nz|%;%^k6kjnJTKSzP(MUSL)3`<@-*|Ic2JKYO~wy7?;pZCJQde@(>Z4036> zpX(Hb&5dnEh0PN}9fh_?Gt!{}qqr5Ppf4$SnAU3%L-BjBB>wF~(%r$GyX!?(B)?aZ zaLfM{w}yRy^D^AGWN&<)70wU$@MDK#fwE69%2u*_9*CrtsiGJ+8#bpPIl)X6#0ZJZ zFMyM1LI|64)f8PkoyRzgf5e>vAAT-8x%FN61ubZ9n~`{c_;5Epjupv$kGL8_TAu8^ zuk|2%Z-~t#CVbAk_TD}Ox4Hu-3)v>C)QXrAN1o0&pa zDa_zls;+r>P1JAp4j&7WVd2+86f;xMgUi@`#>!0(jdBu$NZ!%8e=u*r{S4R)_vw3h z9=|b0#+f*{ZHN6Ni(KY#`2X0S5JC_g)LsZbZKt*-;tk?V z%eWF!8uE8T5OLqff3A!UV2q3q3bhXFf++&~13?f=>?3@g%JZCQD;|IsoGVy*`&2B3`qu!k(ZMliw;l`#16m>`T5G&q6MCG0McDc)UZS!o|LPi=Sup z{p`QG*>WpbJ(4?=<^&(Pg-e4%%v|jEgk9&VAh(UjqXgu; zOj1bPcG)?ee+M^VCG5$B{9YOP7S4l;d_*MgFSk94i#hr*zOkGvxEO#{i8wz8_egv{ zED6+><;l9Wi1S!C#NbbU`PrMmTjRaT*O<>?PhL*f8gg*ti$j+f`4L2^NPfh^;9%FI z4Nv03=9A+mQ+CK>zw>Zb0mko2LmpNuaLmEIsB&ZCe;Ym_c`8ZBAoGfIARyo7juTtx z$nQ~E=$%a2gUat+HYCrR#|)a}wj9ZyMeu`^J-PPm{B5PmX z-ql0S;<+IKN6O+GS0>MmxC4*ggosG3alwgmdeg>oc?v5U#$-`|=I!E441bTenFfNB zyA@B!e`DZT9Zr$Ll6}CDc#V=kd$IiRJa)qHJcj&xxF4|>KJh3gIZ6UobrQ3%H!!~Y zmG_ggju^Vr#0*BWxodL*edaOa0hWf&3-N@@)%EycZo~}hoQVPl+Ru%xEv%i3yu%5) z5ceSVst0YT7D{(qZSVo^B?cA@nVAmHpHz#ke-g82nv|?E2C|ePetq&H@)~hoNG$uh z?>~1kIYXqj`ppCIx_{!)cbr9x8uA0|p`DjuBHXOB)`E>~sB4@#v6aUO!Vd6DL~H^+ zH?Aw>II7r#jw9vL&g~H&vj)zu$w3abhL}xE2aISZhz}M&$DJi&@KJ?-EuCp-HE>yI ze~URQaW~4hisq>HC&|cge)9}LS!CgLHepkS-)Edh_9?mRuup?wpMGNVZ_YU6EW@+; zt{;E%l<#wl9NZ-5_iI0q8uI7f{d^pIkgUOwa}Mip^b&MD!!eg-ysX46&VgyW4fY;r zEUs!DA}>zaibsr#RJN+3sIK_Xh4&N;e+WENLoCRFnJ+}I9a$Yk@=IB1cb-&<(qh)E#xP{c-j2NV|aZY&^g)RnS@nvsb8 zOmbCKlRQ8M8H>Fla?4h0;oQMmE$aLLYml*|&Y%5p|C4<|YFDyO*xRT}$Xb;qf2m)- z_V_0!l*spguk}0e$ekv^8WGQ5#%noakPp8JwVZoAB5yER)FC=HuEFE(LQba_Z4FEp zj2&}*dc)p#jXKYzeS8nr$V%Wk0(YVc(r6OY<{OS{$D?zLfwU|WpdBP(&)EtKifRy3 zeTh2{`@?b&U($@!=P*Lq_3!~9fBRzwObTa_JgxgwMb8YL3S!f`s;6Ol>P`E}4M_1yOyna%{;&po4 z(a{xk_$gl066#v+m4TPx3$1hPFQ|=R|ErWF*#3&+U9;n}5H&?1$p(Y3~Xf~l3`_qkzvR!m6TWh}n=Bkq1< zI-ht;&cje2kvs($#IP?uvH7PS`@vkuc>=JFkCB?}6Qh66)cH|Ks!a`mjHt13%DYTx zRIQI%#&AW-7louY+ZJsLf97Fz^-*-wazowLCdCN&B~UTIy5|WfwsEAv;0^0G#m8~4 z#u8>ikp_iEP{tL9Hk6M4(*=u1P4RSc<^id3PPiR-@U4B@r*B3XTWqdwv>-fch=;g2 zk$3dtRiPGecT8~ezf9UQ_J&SiK0G4@H zsj`Oy+7e77DarE+JQ3$K#3gAbTqY{G#~UNz6En*YA1macAvAZ}2l7@IU#RdcqYG~~ zevU;M|3>vZ)OnBbIOb-p_F(yjwE(w}IuCKRH}dK~)Oo{mR6aTNPdp@d*-yOt!qrGE zaKc$Bh8nw6r}$Z4e{m4+pWsTU2Vf@!U?o>$foZU(MHZNn(^iOO6PTP~jWbcl4TH1q zf)3bTjSP<5XH5loXdZ+Ua;k@H^e3D5O|BO?w5T6D``uy27cmR>9aN)g+>IMv^mgkC zMMZ~rQK`WY(_B>%dnVTe_pKG#+k7oDx1iyn4JlHdUW*i-e_zHvNga&!d6J|Es4(JP z^^{2+Uf?W#aI~*y`6ov3d5&LaoqX;G;~6lTG7-LF$fqvCS`IZaxi5h!09abY1suo) z)nsvBx@*t3MT!?g=4Eqj9%8y$@se0!V?)EO<$E|~es-3t238|gAbHIVCci4)BkmHj z14jCG+hl77e^lxVSNnlG=vYB{18xiokNIVfUh^4aC_)5WPRxa3>nPZ(x|?ZQMHYohJ*VXjG+~pY%7`Rkm0o(s;O|JUyaz7VFs z=jIybfrJd=aE0h`t;_Aa@0@l&dbAh_d?$=S(KD^39T^xT?nGRl1jj|b>#{R8b$P>O zf#0JJZ-+Yk^c@2=JWF!MwZHP-5QI=AlrY@Rv-{DlbpfZkP}>V}UDIoUHAYhyUI|j| zNUSMLfAD!FYw#xq<5&jFSDkrf#2E`XawZ7uA>nG!00}sTcnnj`nE%9McZRq!t@prrN}T%wWW(>Z?_v@Ek@ZL7yWOL1e*y+C*5gNPh%K$+<6PvrroaYx@3CTa z?t#Z-?F$O)s6qvJdm({E;b$elUx4ciq*bBV?73p~j75v2z?eJb|Kj@pICHpXzH_v% zdzqXo)cPdud~#5-SH5}@WbfcT%%9;#o$7v~ixd;!^{OJyrI1w;d=fEMqw>;uD@kpV ze;V-8La!o5{X;34@^D1B5dm>l-OSMYR#Q6K0oHFTg&BgTuL^GHaz11m(TG*xYPZd+ z?5lV2>0995z>ro>-XG#G67pi=2{nqD@AvneKYZ`$KXT6@&LWPG82gDK|BOLj{R|g5 z_!m|=#9ng0;%lM2vTb8a>edRtApEx4f96l|Sm9Kg1120!uRAPsgy(C@c&=hgf#AvD zy+oIym7Z$XNKmI%<(w(y){T*zaOUON@~tSVV-O1Y+_roX(;2lcv7uj1CSmR}yb5n0 zu5&Fxa7~;g<9@QB!i|r?rB?SJGU*#3+>-c9@^#oNqJ9!6QdGi&IAtw1Cg=b3e-%?n z&HkHjqaMMzCRk&{VJ(oyf7dKOc?s5u1K=)6l3>k3dcM{Xr@a+e`v z0+2&tY6SQn?r}Kexd!*aOrREoe?}j077Ra#LI=aYEZYM!hcH@;^bZ)#fB`e=_|-F{ zSMkK}=P)66Xo2}=*CnXH6gJBs8>K$dYFLJ1pOU-0MGwk@=y7q9kvHp6nWS>jEQfQ;LTJxbHS;YV4BYX$fY*{>P4Rs^ zs&f*@by5!=T$0zKue@iRJ|C*a#Y&<;>?4*Jx*_KldkR9$SN z-MU3y*H#MOHD|}4C`YtnV6pNHHHz|5%6IM3*U!6h5v^ z?&ydv@=j8Naimw6@UvOy%N+Lfkb{4J{4`l1t>jzO4dDEc8eDkI;Xf4{T%G{fg-#KmH<1o3o}H`W4y0f<@4CoGv5#A+{Q5YL+~)nRVT24gf1oubfkiRvB2PfqOdxeR z_WcL9%@Nyi z4l1knVtfuxdwCtB&(Av^jp<2B;WioGYls{_v6Qzy#@&n76xIZ=$miqL2B?V4v5W-_ zO!^P&aVB{>f9{E)Uq$d%OszrY=ogt6^uI0!Q^ySF=C%Jct$uYpRfOX+$0|-_YA>s!r!mQ$6e5(Jkkq7i*JZS*%n+5z+kQ; zxD4*4P-vETTy7|<0{_MryuZDzm}|k5&`)0Cr+Rr@JF0LpXi4l z@~QFv3&u%e$Q#zPnJb_gjF8J%3GO4DXBGDr$9dAhsUQG`-YNPj2M&ZIXv<2#Lf+tK ze=zv{c*cw&V}hbzWK&GjEg-9n4pfz*rqysaUipAOgWgPDKbCmF8?kQ*!^$N4c-Xg} z9Ohqp_H&-ee)U`oSK{pdu08+#If6wi?GKjywO3;doTIP&-3!9k>K^dO4wmkmsfb^?C2WY$C}kf6QBu!!pYg*GMHa?41IM+f?o_<&U)j-{C;s z_Yk+bEak!LlF3~;(?B9bhQWrm1mY=h5Lv+8$+y;a9jq(_d8B~2OR*2}Gb8jXnSBZ4 zv5WilbFdH5XF{D+vuBevN&gX;<$&LRV{V`43+(m#y#E>hNp3vgHedDf&$=uz ze2?3r{2@3!d0#I2@n~QppMAFp`8(npao5HM#!x?bKj3$uA5qJne#CeFO7`Vf&cY?= z!T;2H-_H-d2)y6#{Ph=$0Ve@Df7_>4L@oG<`>3I!|M<0?h~EQ=;2>V%Y={`az`AB@ z!;e28)IHK-BLn&gImbEvydT-GuI8$)mp_kS0=V&?HKsS{w0Li+9KXQjyN zQS_$c5{Df)7mU&~lgH~q%tB9#;jwPy>u|P7Uln|^)+nefx$S9D!$H!re>JGGD+ARe z=B!B1j?@kKKI}1aen_C=MkMZwL)@Q<hL}xaJk5Er&Vp?Wjtq(WB)+S{wf&Co3cWO1=120d9ae4dxgQ+K@8<-^JHEBQ^(w z(u$JoAsJqa9Y3a-zhXKjs|X%CVS$GNAqC!`K70|FjQGKuuvVx!fAjyZ7|HqM?EG?4 z{=oB4BPHi;5uF5tH}E>1vUuP-ql1L10P4Gahosp}wn-2R7&-flSxNP=Q3^ z`)03VJ^@z*ekbn)e*?TJ&s7nR92l_^#^&!zMv=@oZhW;`(ywQ6{)cC%qsNbXOjO(w zgXHdaewVf6EGpN^GwjlX0=|iX;U_sP399}PY zFCwlKJFd$n;DlkC zhofK;gyxJ(S!_X{w(eYk&%qO1tBaB^7_9})!O$Vas}f8keX)rXCeM3|};)Vg^fAdZBmX(q000%@S~DfcyK%kHB*9dzuNj zc4W4VA+<2le-P%jTrxelO#?m%kKYOSn52*8vJyjhrcu}fqxjT;|Hc9SUW>2X7KYAxU*rk|#$f2~ma)98s(H}YD)Vg!W)b=Tjm zil&E#>HhyAX2hmbG@-)NrIMRu|A9YR9biPW;JwC^@hs3E6MzFfp=6?3K?0%#T606s zFm^~C0*(nFTn2Yom?P!}8gi_!{vrBg8NY8i)M>lk$%p>(?2C6m_7>@b{P3kdHOWu^ z;47#8e-m5y;VOPVgMkzD9e=Us#INV~2VSwm_Jd3mU)3UJmowL$1Ez#^i3xm;!P$$! zRR-jog(`YR8rS8}`~?R$XEm{1IbNI1a6}U~OT4(pk3gRgmzRpOj#f;p$(Y&YcllZD z<`wX1$hv}~a*9`x;2v?`p^vAA+-~67YKY^yf8bfJmNP2y72v9kF6JT}Odp&#p*7Zz z2|vLHP6UiG@^u~@v8)0QiR3i|e+Jr$xCXqP@TztLFJ<6yVc(LT+BdIrh-)MV_+Qn) z;@4e9dbm6EM<>!Ayl4b01Q}Atiz<44GHSE49*7`VkX{?9^IcNMDeQSp@@>=+vm^2` ze^PI8@3(qHjsNMp1=I-p?cX*33;6=6K|F$6Av7PyE9B?suaf&2T+{FTwiUl?D{_|p z;HD4v&8H6g4|y&AL_UtVHpHr-#{J^{{p2%Kt5p_w9>68Cwakj}o{5jDczPo*|BB_( zevNy0$Z0#ALC6ytIVG#;xf&Uga-2!>e{~$;YoF0S{>tTV3r2CKYADF_fLo02;Bs#X z?ty*g;F%k^k#_n;b-=UjhFnH~2^cR!6$J6$rEKJn0}eDESKJ6g;P|yQR57A4Fc}O-s|n$W2)QUQE9k3o zbcBvFsa-ue4=wZ$0EuJmfbDo0u$>O6uRDC4@C-MI`M4&tz1ljXaT-p5e{+)9YH{yd ztGOA+^Sh^>CFuJ?;K@xU6Bo<*HV@dluw!ZPJU!_tKMe3Q$D6RR@cIC&Qe!dejNLMq zu&9V9q^?k2WL?)Jo)BD|#FO71Za=)|A%FPmoDKU+g%;HZ3;w~!e(DX>SHR2NBktE9 ze#__khqWghXW&XdeJ8jce+_sTc+)lBs|cq}Xzg?0`5}xC`!AP$?MgUj=skSZ zt)#wNs=xS;x80I_gX@5ehI4!z--EdnuF9aj(t((?d1k~%8XE`oWzxJ;Iqb=Moyf((HL2Tij6Z**r`>aq}e@a2DCK&9>$&rGQJeBaF zIeg7=4hJM~PKNu9U}-Im8@Mzs_Jk{}2``&&2``%=aU6k+#BrR1PISNP&OhdoemG%E zkH!APnP0D7!_1JYB1RzgoQBAP{l4|_u((|-fq2A_c_K3BfEXB(=9$8drw(4@^a~eW zka-b|N#J#We~NoIArbY3lYeq8a)y35OW!aWoU4R8(76d&w@QG|8lU!EQtp@FC_Xz+{H17w&u+b9hHK zpmDE&Ps!FUAPDrV34io5hG$PW9)agN@Q1-eAp9rr1HO0&a|OtV0^g?^^ON`;Z#*N8 zHCS%ze=J@AN$3H|1*aC!8uZ-09-rNb3iys@X-zV&57W(l3^|6S7D~( zD#Bg8QCsTBxEe*Vta^0eVye64nWZUt$<^@)sVF;Y{2+t%`mn!b~ zA71FsojJs+2)X=EzwE2WfSTs>cdRqXO}@Che_uTX62IDbgIw$jCm8CenRP+z&ckCA zRH{8e{^(aS&IRI?TN!*tEU3_@oH*{JzWX87=qNc2-_+E?1ZNEx?444xphAy6hR!77 zKtN`#s8GSPg>et|6uEOVUF=BDIU{ESyd`hr@a6=wDBxU$P$)u=&Wj<8X;$nTeO7Q_ ze{jbm<^WefN(O$yu-|}(fQ$iL!KX+o9(ih53^>}S{=2Zi7XIY5ea)PJP7{oEh)tUbf1C^7Y7=kSgynsX~Kg2EEXCxoT`~1brzUD3em|yk5gZZOx^x7v%)mPA9pJA2r6f4TB4 zmO%}jgnZ|LC!w;+nCQzB-YR>B$Qr>#dY$l_g!{qq?SjPwtF1jbgs=3~Ll z`F<~`A%1&f%8c7M;0b}Vu)&<>$Zj#4BQSW5^ceW3W>w-v4YqtFhCL8Z0nfm5T+r$u ziUfs)Z4M!I05|9pgXe%dQ#{hVf8bRK?s8QEo(`R@?Ox0l5gvrtTC|B-2iDC$+JAyT z_M=ONZ18X}r(8@7zrxJbz?T6>7w7UjKLD;xYUMA^-6ya87l%8Uf=BbgEeQW|oaubF zjbdQ7eD>a*UDH{Y?Gkv)a94_16RrE)IOMhMmRSk6<`ZU1V$)B+F-H7wf3-Sl6H|0a zE?>Ve^A2ei%vj=?N`6hIGz%hOqHyw6f0Nob*kSG_3yfq(yS@qchX)RvmC#vH+f9vNQjDPHJf-b%Mz8Yz%CaH`!9 zcK(@PPKYbCwPh>mbP+gb+`xfE8DdKs4=H152E6L+w znSiyX{2{OR66^z3V7b{?hA2TX#kj{X_xhb{ z<8JthqsW_KxczlM{59t}&=;kLp%W9n4Vl zZ}#Z95FRS~xa8Vlx*0;cjC2$N#l8(}c%y&Q|5^)^p zH})5Jd7t?n(&P9KaU0Cr?>RLRyMf_<$BBRB^#fM=;g+E$gfHDtC%wS~?CF2RaJced zeoK+v4dU2X%6AYcK#Bn+PCQ4v#r6nZCTbs*b8v1cFF9kze+ih~Z>$j5DBrx1TbwGW zOHjcf0||xn#n6XA|3G4A9L0HUYFvWOt{^-jK7RVOG!L?-@x_aQZwT`InAJ9|edgQEx1F>75e$LFmdlbeBE78!QATM>ecL5*}OoHG;Uwzr{ z*GgV@xcQEApC0Wu#$WTO7r@Q>%~ufL2aN;F6JiECEMyMnE04G8P24DMmdhFZ&@guC zSE!LN1CRPM0FY;H$$QE|pmo&S1|JBC5)YmKgYt&kf8BU$D5ysX*9Z5~^lG*ocp9WO z$Mf8r;Wx$mnYxx+#zuc|s2eeIgu1aIb>lEE=6ejOkGS7_3mk(1{{o+nveLjhuZqPHvH+|{T26qvF;CsBzah;RtyGQ|l0{-8e(=R43>K9~kBa7|zk~i!oJB#c@y+QWGi6_Oe>Hl0h;!z8n4iqeP-eS}nE9ChK|Fj7 zqO}TtwR9hI>K~pCY7pRI1n9N6zM;MKXqy6P|qx1;;O31w1+AmdBaa^9HPJ!%D;5z>)7Kq7VC7K`E|nCw8zjk|=s|ec ze@m`9i&>z+%pm|J>j8Db?oq+8 z%tap8V(()A39CkG0cSOs%EKYI>`}DE4P8 zNPi5_ZRrShHRN-D-bbH){Wng6_X)w5-}`*%w{9u;6L-2C2I%E;fH-p-KOFBxe9Q~sP`g$hl$zk4kU5R@QzQrzojliist|#s1`E)e)t1@BT zLx`I5B)93(5F*52!bdm8YO6xqH6-=kr;q%`i~xBaz~&G;kS7g%*zdU(5?{Xa&woE> zt5KN#&NqMR-C=K%dWrCeaSv&aphYi4gxx!*#^|BRLo5ZRIYqA&d1s~~XR#rpy(jb7 z6LuoB8AGs6WfG&#xNp`o%qgH|hNg+nXX6$1;hPX11YD)9<0gym`w$%Uat>ZUwGusQ zgOjR2ES;o8$c;p9-r~6m13rYg+J6BbI-rkCp68!9Ihpl=0NnTN>u-Nt_5V0iBwjJ3 zU+|gJ^JH->vvC5fafe>YUGfuPMiT3QPa_?dm`Q?fSAa9sV#b9~=v|W2SF%{ScOh#d z1%jfe(G8Kx9vocoNsfR!8fU0RB1bsj^lsqGh$T5kIypy}`bD3tK{o|+?|%Wf?!dKv zcpRuL(tatr4bro7GPZRSBI2YiZ zge%KKzYo0@5yVpLgDt810sBCzaNv*bKJ)tFhx_)0Z~W1F8}85ntNq0`5Ql%~KFQjB z=VO>#-iop%*hPpuM(m71@20ZA2vUF}14Bb@d2YH7@@ksdnX$p%1e|1r3F5&;c2Zo%87v=_v%g@Kb zT^QbX$*d8haNnS(=zm>R;ho+PZol(=lJD%#Fnr)UxU39y0EusS&&ZdD{E;gm4X02n zL}sl4KLn67;D z8dxcrWvP%;yF1xKoQ1>*sE0Lq%*IrX1f?UV0*^-!X39f_a5 z)EDmdnbRNod4qPySDgN?WlHp|hPd_zr;Drs$)!FYFR*5aeiGxUVuqU zkwfl|=S9vq=6}CT>^me!FeyZSB_OZL(LY8!-<`ksatSa8GKU&~E06h*3V8-*WH6_S z*+sZd#E#IRE?S!&a5Cg*9}Xcjc(P{>ew5oF^Mqy+OCz_27!{1ThFFoA&8@)KeSbaS z(0NF;cWV%cFsB1bi0GXP8T*0-61B}J@S{bIpG4~`!GG0;{vbv`NPiIfDIR!-xL*hk z(-B>x?>VEtdKYAk3Mps!U(U-K^~K?9BAk2#%B&EuozJFN2^_3h`}4*X6EI$|F@V(1h84?J}2 zk1uXgM{+IFEBwZJaA$wdR}=o^&tCpx{?;eY3V(PJIU~=ucRO}3;~s>8d_edd0~Zi^ z1Zpo0ISKy#7`TTbw*y%9M=uJxcC5SB*&fxq^Q3VupxHEV-#+Vr+(v$a zS08XbTPy1(#CsG3su6K7pw1zjX5cKDX8&XM0CU~YnDJJpxQ(AO#z|09 zsqvXKA4PWVK$pv}i0%$_3d#K1pvy)0s(6lQi-89-XQ>Evu*my3uGe-azo~V0=AZbT|BT4k97L+Tdt z+Rgx+G4~3sLiGPErh%3h?if4|nWJg&FjH`02~W%b?`3D?(5WR{DKA?aIr!y77Z4nY ztE-`IZ%O|a^Z6Apq(A5Lf4DIP=|K&8j6*LM`-msJtZl7u$fNu)m4C8i_U;ngD}?Kd zO;sWv$SH7CNFLD?5?4PlV@qN%2h8#*6a2uy+!xw=>={dccnIHpl$0f#-mRE4v$1g+VEA2mPR@fpO&5ns?I z1Xl_@U}&7!;Kj4~_67T6)b=5I+z7rT#RBJ0^e{h3bf4N5DoU77#ytf+EzI9xA8_y+ z6cJm8*G*(UM6_s72aq9GyzZgi{nYhe_3J0U`PLi#vzO$vm48(roxro>;)wBZAFLeY zb)-*p&Exa`0@Eh(x*cOKqIbu?;QOfT+Xll-B&E9 zEa)`b6W!vKl-g_hDcrPs-3^9dL$qC(JW-;T8GhlZKUm0bPZ4XB zB40&wQhzvQp(8>!2NDJXwCU#HhGOUs7(!AG8Y1?&;6GHMxI3c^GGs z+Nq82?4Wu5$wR;R+h6;fU<271nvJJ*XWHPIZbgE{x3M62`VP4?$)9DcO`!pm=5uhl zz#V2=!h6EJP)76<2K-_^UWnYp8AF2nPu)M%cz^%W{o~daTj_A^CHg88_GofZt6^@O z!p0);+k}S5e~RB^{+B$9FFiM4!w=|%YF&mYH@22KQfXm`CuhNrQ=bYi#>h7iLlJ*E zXr&`k$*5vK_o?C&9Z{^;|9AH(&engwPycTo^uNl3WWvBW4Ez#A^IZ>a_V@byk39Q7 za)0XoGGDmC?)(SG{?VMUuD}eo=Lnj;;KSj5t1{UGq01pZ*zYWnf>(!BOicrCUhB*HuYNgH!PB`yge75 z13nJG8CtrCx!`Vz{mlO0-+v#y z*$R_8|5V@O|{WfJtD3AwPqr z8HaUqHB)t)wQ-m|4KKt)4j;=W%>TLs(#Py5$m=m)BP>=Rv{KDYMWFjD!9B4@zI3ti zGv}3n88kANg?#}ISOzUvktKXv_J07qI>@{Yaps$U`-8Ip8%Otcz{H2VbU5em&kPB6 z`^~Tay@yDT@w2Y*jw0(aV23|>iNy_kZV&x5+6D#%EfSou~@k0+roM7F6TSL215;v``=}dkJAe0ESKXK_RiN_+@R4;_J2+$Qemzv z!G!^@Z!;1j6^VQ)6c-o#TFjS}WDa?l zQ69xDO{4?CHo|O47NcG#yyr&8{XzB%dIk9AZyW+q2Rg$T)mXfNrvdbd62uP7E}0Ur z4g)U+1xE-%P!n)FlLIf<82V2x(XRks+651q=t{A!1OY3^V?>NsH5f6UZE zZ{UaX{@0w{-#Q8(ot=RXKjyuba`b%FN_+K8!=g#@? z8oo7&&T!lWPcjAoZ+~8ld=D)CZ=I6W8<`V%ADbmO@X25J)c7Kzm!YrExDp-0nf;eO z3A7V%@d<7pt2vE%AfnUIN(aJOhwTn9P=1VA#TChmp-VFK7m1EO^s5Ly_0?beni~Nh z_-`%n4=-`hNc_@*{NZkVbiIeZ6Z(yVp4aDV$Q<(7;VI4X!G9-34J1)j?_t&%F~z~Q zMz^sVW+9+)it~W}en;jOYRrbBkqQV?!t)XZM2Q0pv#(rqT>Ox;2hnO0r>PC`gb*f8 zd0%|yy?*&0>+;@uG+AHhe{Ter*y4aEJj5H5@Om6VVcN5ZH&ZX$NFtng$bB{FPauXM zj{L##!FwgS6@S47erJ)%Ts6LOScd@<|5^udLWu4c^1|f^~t*IP~60F2a`Zqa@sch@NBrkdHyXg7Abk2I?Wp z9_A+z+am^Am8!Mraps7gbxx=FTwp`jh3iBbG;Uxtn17)}UT5d%LAdJC0p|Y83j+Tg z{6Gg7pU|-?xGC6o(&6{>2^{)pWrI6`7up+si36QS4Ec-LnlRJPE6fzr6tlKNJ%0rdCB&gj-LfKMCF|cQxV6Wt)u59macdyr5M~aCqTUQe0H^un zbi+F9%4#or4-NUds&@_cfg7Nq@wp4#1`?+g&Mwq~P@nPHiG}t*4CLS?(pCTg@Nsgl zet*6Wy=h?AUR6Q^1wW_hNDuQQ!JpM#^&yV5C95N)AT@mh+x5i8K-CZ{5fj0IT`5M< zN$^C0S!|=B*V{s?|Df?HW*8o#>XDv3zBY$~einuri}|ENJUAC|^}&lkwpoYR?fbW0#?EE>y{-=IH7k)S@8C12q41dqC$sq0t{sn-cmWrTXkDMIbK5%+2d40rt z9qW?`dR?F#xX?_>F&8ST;9nqP`~T5zgK8z44Td@e`pfveNOAvBmw#z1fAW3uAJvgL z0R)CYPa`3w@W6FQ+*YBbP9c_1W-K-Fcdjf;k3l1Z=uoCaKf=j~egx!13BQ%>v44=k zd4uN!(X9TucL~P{yffr;&$k(w0!4%ZD-J~+=2BP7_Q5G^Fs4EP?hF+cjNpew;C z2RP;+PWv&OC;Fu2XHo!KsXzboiGQ94@_(P?Us;PJc#rK0yKs_oI^f`;VTVBp@5CvM zcw+q}=QNVtt^>kN-n4rawVsl;#}cB#YSO|O*P@ZzRM-@m#VF71TP;O( z)Nyeto!#{_PYh{_URejiuO`Ox{B)4F@Dk{*wS0?ro-BU5yRLrLbr3ep~ zd^uL8;_={(PY?shf;Z=o4RJ@eRz;Pw_2q+_)cJx}EwvO2?4p@^GmF{(Q`@;VCz2~^ z`1|=4-i|mCGdp9OtHJJbI)9?LHojnEjA{6?0T@G>o0}>cW5@pYeNs{Zg>gZ-_YgDf zx};K4D)W^}%9OI@QYm@$6s_J~oL@wSH+H?pDUPH2McX}(=f`P$+Aj*FNq1r6pQum2 zQ|)p7GkJP*{+N4xWuMk`4ky(uzufWZO&ZM2%X*b=H1$`#JCS{=)PIj7nfO*$N9euw zGA9?E_$4|z#;%Xdk^Ywh0V{HXTB5{+>5mA zA3$8pKvbMb`?(z1g0l}L2W8|O2|O=L@*X{5No6i^tsE-D_qWs#e#KwMr*e;8G09e8 zq?4(T_sJJOll$+Qf?I5$o#sSey&kzm%-9B?;vt$esQBIEqmI zVf~~f&#!tgWA>kQC+WcDAzRWonh!HEjb28Mih?=gD@a|=!4Z1qXQ)31Yp}zt@MFA9 zOyfZMe~j8fZn-=0UsUZiZ4-x)X<5BE?Q!hT9;O{Pe>`MtBYzT6b~c%0*^F5#m$iEc zRgTHDEc}m5yDfk6<&*oxEU}YX<6A2>3r#5;ojPk3z2ny&=jO0}d(DxuR`%vckNHel zA7DOMD~DQfQJ+yFMO6qDAI~v}YSKydTBoKn?VaMt|svG9k_yY#)wbIa{zNKbzJ_HcsXv}oKAkl>c`C=>BPoxa`2~lot5YS7UQ)L7?6x za{!qdE^60ABwRt0$vTx%trEF-IVU{YA!MBc=X#Dcf0AoXU>P_Bc6Q}t3WmCk`~|zP z)jFj%)_)zjma|?mmHr%hc2bFOS`PmCQic+qx9uV2 zlmnNt>||URw2@_kO}hI|uHURznZd3^bGLQU*MFi5HDF=b`X+YH5GM1K{|8?*y$M_Tyi zIR3MmVueG}XOfqVHfw^#Yo3>%W^|~H;jq~(7HGTtw0=K8)|97sPq{M#C(}fh=S|wN zK!4y?fpX-}>xBV-zgMCe^65NPrdFvGDYZ*g;Zf*8Tf8(fwHCPwFpaZ{)eQYCo9;nP>A?J)8PVc^r>m`8>-B+HQHBH6n`&^iXFqbglAPws6>8NmM?wurZH-P-pGs- zxiX6hdNFVqy-Gc=ktG8uX0xyhHrwRHs7$kopJgMK$(wp!TwW5NgKFH?N)Cv}v?w$= z6Os362iwfvwljl9t>O|@M*WCN!bWnDGHoU0=6X@i!n;z5{8V}f&x+9>&i#~~z0X-` zX9RdjkZtk$q7>t6t%es&>A45(Eq{9S1Z|ZgfNOOQoowcBn;Z~YZdS_)p5O8Oww`xs zcm1wN`FMY4-q$W!Y0nSJ_q?y>!4<1s;DizMnlCHSv-8@;3v=be%9P&QzGpT(Py7|K zTTtIi=1kEQkzZx<^MZ5ZpU{yC^+&GE=CAHWe`!hu-TB zdF2LTcPzScilDS9?b55)MGwha?ch_Ewxu1dzZ{vzMLwdx1)7TY*;~%`AzOc|SbxO{(xKM3U7kL$`4(-{Yee?Gh zi`cxFUA*ueKIMIr_c{H3!hidUe!p0}u>5Gw%uflJ_FtfqH*w&wDrES|iIf{bYkp{;+51n=*W5 zsJ>Th&&ejh%NQ$p3xDWl{11NR@`7LAm0bqtSt*uJ&>r;Vz1}VJnU0mrHBw@eg45VC z9@mY+P_m%fZs zdYSQp)Vr*7-O$(WKd1+?sLlfFso!(_wtNi8;4gn;UnQKoN`Hn}>OS(1ft~46z7t%3 zY`B8FppRc>6YPBuo8y4N!~k**>?bHszASCU7k7`g83=}21-sU}wE0qGM|Er$Od?qNt0K7Voj%Uz-yjeRu=BiwiDQjmA0QG zKk0)u(p0S(X@9=c{?xOjcJ9r}F#Vu{Xs(-91$2H?N!FI+J_^W}}GOpNC zZs@X{Ca14DNVozg|o;0azE!v*`#TEqAyq(L&jTV zNM4=};;17Sf2;JIx<$$adb!m)xWj=1GsbrrdT8jttxdYVTX|0>-w+@2Bi|&O+aMdt zT#5Fp#VBJZ%2KbMyrAec!B;lPpI_U~CM~-s$bZ<mHSOKhQx+%sT<=FHGbS`KT;QDI+FeceSO7d`JTK5GZb&> zcYp3jtNJmP;7fRVIb{~*wj%N>$(ZDNK_4!<2)@Xe@S2lSa9T`*p%DtiieFL_gre9DT;{hhRR6|BuOmH{_!v2}2|DC1}^f68}F zdtS)+;ZDXidB(*|A>7(rGttO(Mqe{tz<{+AQ-i@b+j+=w>CqqLnh=EonKR7QTx$IhN4VigoPC zr7z{qF~(DJ$$r=LMkaI_@Ltlg@d><_w0t{LjwLPa4Em_=PHeuI!kwa5E`P|}jPzOc zS_#|l)6ZQEYTxbE*s&U6wjD=mne$+6>1oJV!t3+m%)3|*S!bci!&r!FNw497|Sq@ssqfpt+u2V;1ycs$A9im}TSWMjwpjDC~lrly&Q7 zzWQ|UVq1_67OCpG-f?pjF~0e{?oll$F>elnZv2KiGj+9A4=lq zjNO>}VEt2@b&GuUHB;x<>^w16`~8>?#<^Dh$~cEsblf-J?)_=Qa9f7z>Es zJn5;&Na#t2vvHbr4QU^#pO*JS54ln=)>}v?ek8xA7rMPwxmCHErd+kTv=jHzZbj@~ ztrM|GXzp46RQzw)tV-(tFsD4UC+$qRdsd4bU}TZyQrd@dHRtTp!8*%-UZ1v=%Mm#e z6_Aj0oAkUldVj+?iumOK&=b<>_GB>|J5hRvd{q0z~{lmoA9s0(S)*L?2 zgNx89`+tVN=)197u{%#+$nS0r?jWb-`s~eLx#jxo)*tKfddX0ME-Sbvpi zDz(l_{jhu5X(9N5eX*}cL(VBr$T}%L7w(<#)13eqjSUn9z@3WGb5m5?(A)G?VkX8q?@ypn5Ll+>s1^CpRJ)d|0d2 zWq-qRSqEdB`2C@td?tE2&XK^;_GrL~u=vx(?`N$$c5i;|nD^(o>xy|lC}+I)xLy}- zV+FTe&l|d{WSKL!TE+WFqtd);)T4H@n9tUW95Y`W6l$^ISZMEmWL{hKmtXB(nGFvH zx!U*gv*Gz#xf>MQ_2KN{2YwUkSt){Fuz%Wn^O&s*a`S)x`_F^N$#m3m|M=ogXI*#k z-2L+B{~irmZujNwaWZz#4x&e~Xm@mQ(YovY_@AqmJ^uU79(A3Aa(8}kI~ldce>>f& z{rGigkGqHcuHEaqXQcTG2lvn(KXhRd(Rb4>w^MuEJ8R9{iOJ^Rzuyi=&S9T~hkqS% zoVFbIu=VgXbKJA>WZeBPd-Ob+x~;MMwLO`V9;)GFde)gu;pb-M#$%=b9RBJ~9eXl9 zYdyN%>6LVSrFMNg>Amknxa-yaR%Iz#*UY})O(e?*^O{$Bh4Pm?dT@T&Lx@^wNc zkHg9QtZzS1$FI+=hX<)yaqv435aecPD~XKwfF+L8zB?LzI~sTABD=Q- z+j$+dXuiTb(XanonELCYQr`w>_}k!;w8! z;-u!@o6+QT%j2~tzD)o2ZX7hjU6#%stA_R$Qd+nDs1%0$m5fr6i0`%S>VMacyBK!Q zY_~PEJ3fNxNE1A^M)q)Vw$e8rm1A=8QB_-Grz%S5BXv!VKGHihd_F~XE1i+{ABzzT zw-&(dfBm@NlGdx)yMpgX=T%q}=o0t{Jc)k5c#ELhli`E281ZNCs(i?+HFBn-r4p#} zw3oN4!P{Fhv97AVI&`*5HGfOaJ#@CeA(F>wYv9X69&AZp;ITXNH=)%1U3%}~$2>l) zzwivBLQZGu%bNRbs@j>_&nrVy2_OCG_S0~$${VTmqaY4}6}HLV*XGWn?fNnhirwr# zP2xypQ*6BNbce$gxG#~Ie!#U+^Fz_;@mYJ~x|5Ng(Km26$3nLU`*n5jl8ss~rk0U+(i}OnXJ^)aQzIQ+?_u|G&D6o6&RT1((RJv1 z*U>~M502#DLp3rmim%0JY=ohO{jez}ty178)MGVxT9cU!V1gC9gN-k95QAUk&CRx<(MH1vgKtOnc-&n9wD8${ zChW|!6<2Qzx4{iO+2FPQM04UvaLv{V@uqDrxX|kb6xG^JuC>dUv?4o9M*jKLXX89r zCY2U68g>>r2!BwkkAcdsOS@F&V=eiA)bp*pr4Tm+p@I zay>jsdcTwIsuK{X6r?=tj-A=GJ3X3qovsGY=#lz&HQ2Ua`HFZ?V1|kcZo_bn?cy9; z=X?|k!#lol1r2#-8EVE{T9g3ln2P8t9XhTv1kV@xLi8$60l?D>S`gCN4A*npsL z*~E7tg&|l+ic~ZaODFkvPb<(Y0+7NG_|QoBt6ak55r7$nAUqoBgaJTd2>fVdBosRh zlSu$j7y|!@YAK{JnFJt)A@C2ayj=F|dNhv!#D6dZzDz!+Q32p#2z&+}`IJfppok&x zBh<-f#1(`l9)&NP)05BWR}h+b6h6>OCd0K$Vi%eif^|zjiS60p>9hebVhDUt6^eHN zd>8^BM1^A>fFOn-JT~cs0kB~R{MZyu-~o7H2>hTFig^G|7=q6XjdaogcrXN?`4-bj z1AkD#5cteBx+fYK^9Vo#kHW`3c^XdYK{()1_;4V)r#>ck*}oxJH>0rA8k$7_3>X3* z#)KU9Y8C--U|Je;xs-UQEe4FC&6;NyL|=YJQJv84d8@F;v>5sxIyFH|j0oI1U{e$!#V&T41o_H!mtj21w*jTdQZgBDFbl9 z5QIk@oiG3l48dn6opsUx3@`-#FQZVJ9RLD`!2d-Q3)PlEDBw}}KZa7_>~gnZ`hU$o zvF~(BYM0#`f^}U?c$0m5nv!M_004&I6WF3t1^|E|@UdP_wT8(f00|7i?~HgfivUP4 z1fTi1(n$j_!4Uk;Y_w((fDVQrJnra(0oY&&K7%_tX#g}Bf=}U&F`NKo@F+Hs=M$0- z0tk;{1AxLcaC#R`7=m>riYHPr{eKISM*vP3g5SX+%_0CW41q6?^hv03vt|(h7>3|? zKEpJN0KhN=pPK5?X#)_$5crs%h8u8d76FK12!02ZG>ZW2Fa+UINhb_I4ny!6R5F$n z030601};Uk`@)_*zhSc5q8^>9;tLH5|{tNAvVXVBMr5Zum7(wfNg^6aMn^YU?j1x z4H_OnPF9A(2Uzh`G8&5{_jUi#EP}kP423TfmYZVf$VqfhGc-Jc{H_dz53}NNOfiBz zhRP+#1ItkOhi$RwNhFfmcfrx{2=c}<6dS+=zytH)R4m9Z%TR1USARSjN$x!aH9UeG zv<$@tctumOljy$S<(XWNqn1bV2{%WRbPt>NV@ve(^n|t9)V{-q=x$fd_r96}l&2@y zd}Lo=zuj(|@BKCf!J^3&p9T9i$!-VE_a2-eSTvrDU@+lw2?Ce*zMMz#STvc4CnD*6 z&3?C2=XK z+zgK(aCz_bsm5L@(ST5y1Yyg2?@vLnBwg5vgq(&k${=jn;sq)gb`pu=020nD15sv+ zH|TmT9tdn1nN({q;g6z{=O3mpCjTx)?86kseBY&rf0)9U;(xmoiLEI{v!QFAXc_3> zgD@4_WRVDBu`0!QHXN!%5{I`n%))r|<4LjE2s+$m6|eAoXksQmj&NAg963a$4G$;J zJ1eVq2&QzjOca9y}uF zKTTNGD%?Z`c7L=MAA~HARFoLeU0xKJ(|vADTkhn8hFZmr`C@|%m)3OJS`h06li+X~ zF9Eb#GKZag&_D1Gl`#3&7lJG*Pl)vAHhMQ;>@#EIkrYU+T%X4qVC6`@@ru@$mHOeNYpnY2Fcoc zXODVbppO?hO1UIT&t$Z`*L?NuDi$rWrh6;xCZn#~>2Ij_`-uM!BG3FWM245%i(oyb zUO#Sk*?)hbyV|2*wK)N~NGPj4d>6;LSH}I;@R7Ef%vI2mi5L604jT=Sj2y+b@lyKy zaeR#QdIEpJVh0e=Wb>!ydj;jC*$ z0qcf?E=wnGG%#e;D2a!ywkU)^L2%*?<6}_K`GbNS{H1jHPrbd&mVSDfhc@o5nJ6WE z$MJpEA9U-4j4P%ay@gg2`2sjFyWecFO@A-#H?!l8pT)aWTXucf&dB^x8_aAl{irnXAN z7+W)qgQYJ2>sTSiw|^WQ{Oh>iZ9S+fNh}v!KG?qw?1z7Uk!as9-~M&Xy(ZQ)Bo2}q P{Nw)sCjRD4^I1ax;-6bc diff --git a/tools/server/webui/src/index.scss b/tools/server/webui/src/index.scss index 64460b74092e1..362db6e17df5e 100644 --- a/tools/server/webui/src/index.scss +++ b/tools/server/webui/src/index.scss @@ -41,6 +41,10 @@ html { max-width: 900px; } +.chat-bubble { + @apply break-words; +} + .chat-bubble-base-300 { --tw-bg-opacity: 1; --tw-text-opacity: 1; From 11d112f5ff62bce277c8c98a8b9b3cda00e10c93 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Wed, 11 Jun 2025 09:48:52 -0500 Subject: [PATCH 030/192] vulkan: Better thread-safety for command pools/buffers (#14116) This change moves the command pool/buffer tracking into a vk_command_pool structure. There are two instances per context (for compute+transfer) and two instances per device for operations that don't go through a context. This should prevent separate contexts from stomping on each other. --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 213 +++++++++++++++++---------- 1 file changed, 135 insertions(+), 78 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index e5200b96d0d8d..32d6407441535 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -102,18 +102,6 @@ static bool is_pow2(uint32_t x) { return x > 1 && (x & (x-1)) == 0; } struct ggml_backend_vk_context; -struct vk_queue { - uint32_t queue_family_index; - vk::Queue queue; - vk::CommandPool pool; - uint32_t cmd_buffer_idx; - std::vector cmd_buffers; - - vk::PipelineStageFlags stage_flags; - - bool transfer_only; -}; - #define MAX_PARAMETER_COUNT 8 struct vk_pipeline_struct { @@ -165,6 +153,40 @@ struct ggml_backend_vk_buffer_type_context { vk_device device; }; +struct vk_queue; + +// Stores command pool/buffers. There's an instance of this +// for each (context,queue) pair and for each (device,queue) pair. +struct vk_command_pool { + void init(vk_device& device, vk_queue *q_); + void destroy(vk::Device& device); + + vk::CommandPool pool; + uint32_t cmd_buffer_idx; + std::vector cmd_buffers; + + vk_queue *q; +}; + +struct vk_queue { + uint32_t queue_family_index; + vk::Queue queue; + + vk_command_pool cmd_pool; + + vk::PipelineStageFlags stage_flags; + + bool transfer_only; + + // copy everything except the cmd_pool + void copyFrom(vk_queue &other) { + queue_family_index = other.queue_family_index; + queue = other.queue; + stage_flags = other.stage_flags; + transfer_only = other.transfer_only; + } +}; + static const char * ggml_backend_vk_buffer_type_name(ggml_backend_buffer_type_t buft); static ggml_backend_buffer_t ggml_backend_vk_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size); static size_t ggml_backend_vk_buffer_type_get_alignment(ggml_backend_buffer_type_t buft); @@ -482,10 +504,8 @@ struct vk_device_struct { ggml_vk_destroy_buffer(sync_staging); - device.destroyCommandPool(compute_queue.pool); - if (!single_queue) { - device.destroyCommandPool(transfer_queue.pool); - } + compute_queue.cmd_pool.destroy(device); + transfer_queue.cmd_pool.destroy(device); for (auto& pipeline : pipelines) { if (pipeline.second.expired()) { @@ -503,6 +523,20 @@ struct vk_device_struct { } }; +void vk_command_pool::init(vk_device& device, vk_queue *q_) { + cmd_buffer_idx = 0; + q = q_; + + vk::CommandPoolCreateInfo command_pool_create_info(vk::CommandPoolCreateFlags(VK_COMMAND_POOL_CREATE_TRANSIENT_BIT), q->queue_family_index); + pool = device->device.createCommandPool(command_pool_create_info); +} + +void vk_command_pool::destroy(vk::Device& device) { + device.destroyCommandPool(pool); + pool = nullptr; + cmd_buffers.clear(); +} + struct vk_buffer_struct { vk::Buffer buffer = VK_NULL_HANDLE; vk::DeviceMemory device_memory = VK_NULL_HANDLE; @@ -820,7 +854,7 @@ struct vk_context_struct { std::vector in_memcpys; std::vector out_memcpys; - vk_queue * q; + vk_command_pool * p {}; }; typedef std::shared_ptr vk_context; typedef std::weak_ptr vk_context_ref; @@ -936,6 +970,9 @@ struct ggml_backend_vk_context { std::vector descriptor_sets; uint32_t descriptor_set_idx {}; uint32_t pipeline_descriptor_set_requirements {}; + + vk_command_pool compute_cmd_pool; + vk_command_pool transfer_cmd_pool; }; static void * const vk_ptr_base = (void *)(uintptr_t) 0x1000; // NOLINT @@ -1205,41 +1242,31 @@ static void ggml_pipeline_allocate_descriptor_sets(ggml_backend_vk_context * ctx } } -static vk::CommandBuffer ggml_vk_create_cmd_buffer(vk_device& device, vk_queue& q) { +static vk::CommandBuffer ggml_vk_create_cmd_buffer(vk_device& device, vk_command_pool& p) { VK_LOG_DEBUG("ggml_vk_create_cmd_buffer()"); - std::lock_guard guard(device->mutex); - if (q.cmd_buffers.size() > q.cmd_buffer_idx) { + if (p.cmd_buffers.size() > p.cmd_buffer_idx) { // Reuse command buffer - return q.cmd_buffers[q.cmd_buffer_idx++]; + return p.cmd_buffers[p.cmd_buffer_idx++]; } vk::CommandBufferAllocateInfo command_buffer_alloc_info( - q.pool, + p.pool, vk::CommandBufferLevel::ePrimary, 1); const std::vector cmd_buffers = device->device.allocateCommandBuffers(command_buffer_alloc_info); auto buf = cmd_buffers.front(); - q.cmd_buffers.push_back(buf); - q.cmd_buffer_idx++; + p.cmd_buffers.push_back(buf); + p.cmd_buffer_idx++; return buf; } -static vk_submission ggml_vk_create_submission(vk_device& device, vk_queue& q, std::vector wait_semaphores, std::vector signal_semaphores) { - VK_LOG_DEBUG("ggml_vk_create_submission()"); - vk_submission s; - s.buffer = ggml_vk_create_cmd_buffer(device, q); - s.wait_semaphores = std::move(wait_semaphores); - s.signal_semaphores = std::move(signal_semaphores); - return s; -} - static void ggml_vk_submit(vk_context& ctx, vk::Fence fence) { if (ctx->seqs.empty()) { if (fence) { - ctx->q->queue.submit({}, fence); + ctx->p->q->queue.submit({}, fence); } return; } @@ -1278,7 +1305,7 @@ static void ggml_vk_submit(vk_context& ctx, vk::Fence fence) { tl_signal_vals.push_back({}); tl_signal_semaphores.push_back({}); for (size_t i = 0; i < submission.wait_semaphores.size(); i++) { - stage_flags[idx].push_back(ctx->q->stage_flags); + stage_flags[idx].push_back(ctx->p->q->stage_flags); tl_wait_vals[idx].push_back(submission.wait_semaphores[i].value); tl_wait_semaphores[idx].push_back(submission.wait_semaphores[i].s); } @@ -1308,7 +1335,7 @@ static void ggml_vk_submit(vk_context& ctx, vk::Fence fence) { } } - ctx->q->queue.submit(submit_infos, fence); + ctx->p->q->queue.submit(submit_infos, fence); ctx->seqs.clear(); } @@ -1366,28 +1393,25 @@ static void ggml_vk_create_queue(vk_device& device, vk_queue& q, uint32_t queue_ q.queue_family_index = queue_family_index; q.transfer_only = transfer_only; - vk::CommandPoolCreateInfo command_pool_create_info_compute(vk::CommandPoolCreateFlags(VK_COMMAND_POOL_CREATE_TRANSIENT_BIT), queue_family_index); - q.pool = device->device.createCommandPool(command_pool_create_info_compute); - - q.cmd_buffer_idx = 0; + q.cmd_pool.init(device, &q); q.queue = device->device.getQueue(queue_family_index, queue_index); q.stage_flags = stage_flags; } -static vk_context ggml_vk_create_context(ggml_backend_vk_context * ctx, vk_queue& q) { +static vk_context ggml_vk_create_context(ggml_backend_vk_context * ctx, vk_command_pool& p) { vk_context result = std::make_shared(); VK_LOG_DEBUG("ggml_vk_create_context(" << result << ")"); ctx->gc.contexts.emplace_back(result); - result->q = &q; + result->p = &p; return result; } -static vk_context ggml_vk_create_temporary_context(vk_queue& q) { +static vk_context ggml_vk_create_temporary_context(vk_command_pool& p) { vk_context result = std::make_shared(); VK_LOG_DEBUG("ggml_vk_create_temporary_context(" << result << ")"); - result->q = &q; + result->p = &p; return result; } @@ -1420,15 +1444,29 @@ static vk::Event ggml_vk_create_event(ggml_backend_vk_context * ctx) { return ctx->gc.events[ctx->event_idx++]; } -static void ggml_vk_queue_cleanup(vk_device& device, vk_queue& q) { - VK_LOG_DEBUG("ggml_vk_queue_cleanup()"); - std::lock_guard guard(device->mutex); +static void ggml_vk_command_pool_cleanup(vk_device& device, vk_command_pool& p) { + VK_LOG_DEBUG("ggml_vk_command_pool_cleanup()"); // Requires command buffers to be done - device->device.resetCommandPool(q.pool); - q.cmd_buffer_idx = 0; + device->device.resetCommandPool(p.pool); + p.cmd_buffer_idx = 0; +} + +static void ggml_vk_queue_command_pools_cleanup(vk_device& device) { + VK_LOG_DEBUG("ggml_vk_queue_command_pools_cleanup()"); + + // Arbitrary frequency to cleanup/reuse command buffers + static constexpr uint32_t cleanup_frequency = 10; + + if (device->compute_queue.cmd_pool.cmd_buffer_idx >= cleanup_frequency) { + ggml_vk_command_pool_cleanup(device, device->compute_queue.cmd_pool); + } + if (device->transfer_queue.cmd_pool.cmd_buffer_idx >= cleanup_frequency) { + ggml_vk_command_pool_cleanup(device, device->transfer_queue.cmd_pool); + } } + static uint32_t find_properties(const vk::PhysicalDeviceMemoryProperties* mem_props, vk::MemoryRequirements* mem_req, vk::MemoryPropertyFlags flags) { for (uint32_t i = 0; i < mem_props->memoryTypeCount; ++i) { vk::MemoryType memory_type = mem_props->memoryTypes[i]; @@ -1447,8 +1485,6 @@ static vk_buffer ggml_vk_create_buffer(vk_device& device, size_t size, vk::Memor throw vk::OutOfDeviceMemoryError("Requested buffer size exceeds device memory allocation limit"); } - std::lock_guard guard(device->mutex); - vk_buffer buf = std::make_shared(); if (size == 0) { @@ -1577,11 +1613,11 @@ static vk_subbuffer ggml_vk_subbuffer(vk_buffer& buf) { static void ggml_vk_sync_buffers(vk_context& ctx) { VK_LOG_DEBUG("ggml_vk_sync_buffers()"); - const bool transfer_queue = ctx->q->transfer_only; + const bool transfer_queue = ctx->p->q->transfer_only; ctx->s->buffer.pipelineBarrier( - ctx->q->stage_flags, - ctx->q->stage_flags, + ctx->p->q->stage_flags, + ctx->p->q->stage_flags, {}, { { { !transfer_queue ? (vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite | vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite) : (vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eTransferWrite) }, @@ -1600,8 +1636,8 @@ static void ggml_vk_wait_events(vk_context& ctx, std::vector&& events ctx->s->buffer.waitEvents( events, - ctx->q->stage_flags, - ctx->q->stage_flags, + ctx->p->q->stage_flags, + ctx->p->q->stage_flags, {}, {}, {} @@ -3358,7 +3394,8 @@ static vk_device ggml_vk_get_device(size_t idx) { ggml_vk_create_queue(device, device->transfer_queue, transfer_queue_family_index, transfer_queue_index, { vk::PipelineStageFlagBits::eTransfer }, true); } else { // TODO: Use pointer or reference to avoid copy - device->transfer_queue = device->compute_queue; + device->transfer_queue.copyFrom(device->compute_queue); + device->transfer_queue.cmd_pool.init(device, &device->transfer_queue); } device->buffer_type = { @@ -3724,6 +3761,9 @@ static void ggml_vk_init(ggml_backend_vk_context * ctx, size_t idx) { ctx->fence = ctx->device->device.createFence({}); ctx->almost_ready_fence = ctx->device->device.createFence({}); + ctx->compute_cmd_pool.init(ctx->device, &ctx->device->compute_queue); + ctx->transfer_cmd_pool.init(ctx->device, &ctx->device->transfer_queue); + #ifdef GGML_VULKAN_CHECK_RESULTS const char* skip_checks = getenv("GGML_VULKAN_SKIP_CHECKS"); vk_skip_checks = (skip_checks == NULL ? 0 : atoi(skip_checks)); @@ -4089,9 +4129,9 @@ static void ggml_vk_host_get(vk_device& device, const void * ptr, vk_buffer& buf } } -static vk_submission ggml_vk_begin_submission(vk_device& device, vk_queue& q, bool one_time = true) { +static vk_submission ggml_vk_begin_submission(vk_device& device, vk_command_pool& p, bool one_time = true) { vk_submission s; - s.buffer = ggml_vk_create_cmd_buffer(device, q); + s.buffer = ggml_vk_create_cmd_buffer(device, p); if (one_time) { s.buffer.begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit }); } else { @@ -4176,7 +4216,7 @@ static void ggml_vk_ctx_begin(vk_device& device, vk_context& subctx) { ggml_vk_ctx_end(subctx); } - subctx->seqs.push_back({ ggml_vk_begin_submission(device, *subctx->q) }); + subctx->seqs.push_back({ ggml_vk_begin_submission(device, *subctx->p) }); subctx->s = subctx->seqs[subctx->seqs.size() - 1].data(); } @@ -4377,7 +4417,9 @@ static void ggml_vk_buffer_write_2d(vk_buffer& dst, size_t offset, const void * memcpy((uint8_t *)dst->ptr + offset + i * width, (const uint8_t *) src + i * spitch, width); } } else { - vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue); + std::lock_guard guard(dst->device->mutex); + + vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue.cmd_pool); ggml_vk_ctx_begin(dst->device, subctx); ggml_vk_buffer_write_2d_async(subctx, dst, offset, src, spitch, width, height, true); ggml_vk_ctx_end(subctx); @@ -4389,6 +4431,7 @@ static void ggml_vk_buffer_write_2d(vk_buffer& dst, size_t offset, const void * ggml_vk_submit(subctx, dst->device->fence); VK_CHECK(dst->device->device.waitForFences({ dst->device->fence }, true, UINT64_MAX), "vk_buffer_write_2d waitForFences"); dst->device->device.resetFences({ dst->device->fence }); + ggml_vk_queue_command_pools_cleanup(dst->device); } } @@ -4465,7 +4508,9 @@ static void ggml_vk_buffer_read(vk_buffer& src, size_t offset, void * dst, size_ memcpy(dst, (uint8_t *) src->ptr + offset, size); } else { - vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue); + std::lock_guard guard(src->device->mutex); + + vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue.cmd_pool); ggml_vk_ctx_begin(src->device, subctx); ggml_vk_buffer_read_async(subctx, src, offset, dst, size, true); ggml_vk_ctx_end(subctx); @@ -4473,6 +4518,7 @@ static void ggml_vk_buffer_read(vk_buffer& src, size_t offset, void * dst, size_ ggml_vk_submit(subctx, src->device->fence); VK_CHECK(src->device->device.waitForFences({ src->device->fence }, true, UINT64_MAX), "vk_buffer_read waitForFences"); src->device->device.resetFences({ src->device->fence }); + ggml_vk_queue_command_pools_cleanup(src->device); for (auto& cpy : subctx->out_memcpys) { memcpy(cpy.dst, cpy.src, cpy.n); @@ -4492,15 +4538,17 @@ static void ggml_vk_buffer_copy_async(vk_context& ctx, vk_buffer& dst, size_t ds static void ggml_vk_buffer_copy(vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) { if (src->device == dst->device) { + std::lock_guard guard(src->device->mutex); VK_LOG_DEBUG("ggml_vk_buffer_copy(SINGLE_DEVICE, " << size << ")"); // Copy within the device - vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue); + vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue.cmd_pool); ggml_vk_ctx_begin(src->device, subctx); ggml_vk_buffer_copy_async(subctx, dst, dst_offset, src, src_offset, size); ggml_vk_ctx_end(subctx); ggml_vk_submit(subctx, src->device->fence); VK_CHECK(src->device->device.waitForFences({ src->device->fence }, true, UINT64_MAX), "vk_buffer_copy waitForFences"); src->device->device.resetFences({ src->device->fence }); + ggml_vk_queue_command_pools_cleanup(src->device); } else { VK_LOG_DEBUG("ggml_vk_buffer_copy(MULTI_DEVICE, " << size << ")"); // Copy device to device @@ -4525,7 +4573,8 @@ static void ggml_vk_buffer_memset_async(vk_context& ctx, vk_buffer& dst, size_t static void ggml_vk_buffer_memset(vk_buffer& dst, size_t offset, uint32_t c, size_t size) { VK_LOG_DEBUG("ggml_vk_buffer_memset(" << offset << ", " << c << ", " << size << ")"); - vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue); + std::lock_guard guard(dst->device->mutex); + vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue.cmd_pool); ggml_vk_ctx_begin(dst->device, subctx); subctx->s->buffer.fillBuffer(dst->buffer, offset, size, c); ggml_vk_ctx_end(subctx); @@ -4533,6 +4582,7 @@ static void ggml_vk_buffer_memset(vk_buffer& dst, size_t offset, uint32_t c, siz ggml_vk_submit(subctx, dst->device->fence); VK_CHECK(dst->device->device.waitForFences({ dst->device->fence }, true, UINT64_MAX), "vk_memset waitForFences"); dst->device->device.resetFences({ dst->device->fence }); + ggml_vk_queue_command_pools_cleanup(dst->device); } static uint32_t ggml_vk_guess_split_k(ggml_backend_vk_context * ctx, int m, int n, int k, const vk_pipeline& pipeline) { @@ -7894,7 +7944,7 @@ static void ggml_vk_test_matmul(ggml_backend_vk_context * ctx, size_t m, size_t ggml_vk_buffer_write(d_X, 0, x, sizeof(X_TYPE) * k * m * batch); ggml_vk_buffer_write(d_Y, 0, y, sizeof(Y_TYPE) * k * n * batch); - vk_context subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue); + vk_context subctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool); ggml_vk_ctx_begin(ctx->device, subctx); for (size_t i = 0; i < num_it; i++) { ggml_vk_matmul( @@ -7910,6 +7960,7 @@ static void ggml_vk_test_matmul(ggml_backend_vk_context * ctx, size_t m, size_t ggml_vk_submit(subctx, ctx->fence); VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_matmul waitForFences"); ctx->device->device.resetFences({ ctx->fence }); + ggml_vk_queue_command_pools_cleanup(ctx->device); auto end = std::chrono::high_resolution_clock::now(); double time = std::chrono::duration_cast(end-begin).count() / 1000.0; @@ -8011,8 +8062,8 @@ static void ggml_vk_test_matmul(ggml_backend_vk_context * ctx, size_t m, size_t free(d_chk); - ggml_vk_queue_cleanup(ctx->device, ctx->device->transfer_queue); - ggml_vk_queue_cleanup(ctx->device, ctx->device->compute_queue); + ggml_vk_command_pool_cleanup(ctx->device, ctx->compute_cmd_pool); + ggml_vk_command_pool_cleanup(ctx->device, ctx->transfer_cmd_pool); ggml_vk_destroy_buffer(d_X); ggml_vk_destroy_buffer(d_Y); @@ -8105,7 +8156,7 @@ static void ggml_vk_test_dequant(ggml_backend_vk_context * ctx, size_t ne, ggml_ ggml_vk_buffer_write(qx_buf, 0, qx, qx_sz); - vk_context subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue); + vk_context subctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool); ggml_vk_ctx_begin(ctx->device, subctx); const std::vector pc = { 1, (uint32_t)ne, (uint32_t)ne, (uint32_t)ne, (uint32_t)ne }; ggml_vk_dispatch_pipeline(ctx, subctx, p, { vk_subbuffer{ qx_buf, 0, qx_sz }, vk_subbuffer{ x_buf, 0, x_sz_f16 } }, pc, { (uint32_t)ne, 1, 1}); @@ -8116,6 +8167,7 @@ static void ggml_vk_test_dequant(ggml_backend_vk_context * ctx, size_t ne, ggml_ ggml_vk_submit(subctx, ctx->fence); VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_dequant waitForFences"); ctx->device->device.resetFences({ ctx->fence }); + ggml_vk_queue_command_pools_cleanup(ctx->device); auto end = std::chrono::high_resolution_clock::now(); @@ -8205,7 +8257,7 @@ static void ggml_vk_test_dequant(ggml_backend_vk_context * ctx, size_t ne, ggml_ // // ggml_vk_buffer_write(x_buf, 0, x, x_sz); // -// vk_context subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue); +// vk_context subctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool); // ggml_vk_ctx_begin(ctx->device, subctx); // ggml_vk_quantize_q8_1(ctx, subctx, ggml_vk_subbuffer(x_buf), ggml_vk_subbuffer(qx_buf), ne); // ggml_vk_ctx_end(subctx); @@ -8215,6 +8267,7 @@ static void ggml_vk_test_dequant(ggml_backend_vk_context * ctx, size_t ne, ggml_ // ggml_vk_submit(subctx, ctx->fence); // VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_quantize waitForFences"); // ctx->device->device.resetFences({ ctx->fence }); +// ggml_vk_queue_command_pools_cleanup(ctx->device); // // auto end = std::chrono::high_resolution_clock::now(); // @@ -8379,7 +8432,7 @@ static void ggml_vk_test_dequant_matmul(ggml_backend_vk_context * ctx, size_t m, ggml_vk_buffer_write(qx_buf, 0, qx, qx_sz); ggml_vk_buffer_write(y_buf, 0, y, y_sz); - vk_context subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue); + vk_context subctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool); ggml_vk_ctx_begin(ctx->device, subctx); if (mmq) { for (size_t i = 0; i < num_it; i++) { @@ -8408,6 +8461,7 @@ static void ggml_vk_test_dequant_matmul(ggml_backend_vk_context * ctx, size_t m, ggml_vk_submit(subctx, ctx->fence); VK_CHECK(ctx->device->device.waitForFences({ ctx->fence }, true, UINT64_MAX), "ggml_vk_test_dequant waitForFences"); ctx->device->device.resetFences({ ctx->fence }); + ggml_vk_queue_command_pools_cleanup(ctx->device); auto end = std::chrono::high_resolution_clock::now(); @@ -8722,7 +8776,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod if (!dryrun) { if (ctx->compute_ctx.expired()) { - compute_ctx = ggml_vk_create_context(ctx, ctx->device->compute_queue); + compute_ctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool); ctx->compute_ctx = compute_ctx; ggml_vk_ctx_begin(ctx->device, compute_ctx); } else { @@ -9168,8 +9222,8 @@ static void ggml_vk_graph_cleanup(ggml_backend_vk_context * ctx) { } ctx->gc.temp_buffers.clear(); - ggml_vk_queue_cleanup(ctx->device, ctx->device->compute_queue); - ggml_vk_queue_cleanup(ctx->device, ctx->device->transfer_queue); + ggml_vk_command_pool_cleanup(ctx->device, ctx->compute_cmd_pool); + ggml_vk_command_pool_cleanup(ctx->device, ctx->transfer_cmd_pool); for (size_t i = 0; i < ctx->gc.semaphores.size(); i++) { ctx->device->device.destroySemaphore({ ctx->gc.semaphores[i].s }); @@ -9224,6 +9278,9 @@ static void ggml_vk_cleanup(ggml_backend_vk_context * ctx) { } ctx->descriptor_pools.clear(); ctx->descriptor_sets.clear(); + + ctx->compute_cmd_pool.destroy(ctx->device->device); + ctx->transfer_cmd_pool.destroy(ctx->device->device); } static int ggml_vk_get_device_count() { @@ -9490,7 +9547,7 @@ static void ggml_backend_vk_set_tensor_async(ggml_backend_t backend, ggml_tensor if (ctx->transfer_ctx.expired()) { // Initialize new transfer context - transfer_ctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue); + transfer_ctx = ggml_vk_create_context(ctx, ctx->transfer_cmd_pool); ctx->transfer_ctx = transfer_ctx; ggml_vk_ctx_begin(ctx->device, transfer_ctx); } else { @@ -9513,7 +9570,7 @@ static void ggml_backend_vk_get_tensor_async(ggml_backend_t backend, const ggml_ if (ctx->transfer_ctx.expired()) { // Initialize new transfer context - transfer_ctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue); + transfer_ctx = ggml_vk_create_context(ctx, ctx->transfer_cmd_pool); ctx->transfer_ctx = transfer_ctx; ggml_vk_ctx_begin(ctx->device, transfer_ctx); } else { @@ -9536,7 +9593,7 @@ static bool ggml_backend_vk_cpy_tensor_async(ggml_backend_t backend, const ggml_ if (ctx->transfer_ctx.expired()) { // Initialize new transfer context - transfer_ctx = ggml_vk_create_context(ctx, ctx->device->transfer_queue); + transfer_ctx = ggml_vk_create_context(ctx, ctx->transfer_cmd_pool); ctx->transfer_ctx = transfer_ctx; ggml_vk_ctx_begin(ctx->device, transfer_ctx); } else { @@ -9629,7 +9686,7 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg ctx->device->device.resetQueryPool(ctx->device->query_pool, 0, cgraph->n_nodes+1); GGML_ASSERT(ctx->compute_ctx.expired()); - compute_ctx = ggml_vk_create_context(ctx, ctx->device->compute_queue); + compute_ctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool); ctx->compute_ctx = compute_ctx; ggml_vk_ctx_begin(ctx->device, compute_ctx); compute_ctx->s->buffer.writeTimestamp(vk::PipelineStageFlagBits::eAllCommands, ctx->device->query_pool, 0); @@ -9664,7 +9721,7 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg if (vk_perf_logger_enabled) { if (ctx->compute_ctx.expired()) { - compute_ctx = ggml_vk_create_context(ctx, ctx->device->compute_queue); + compute_ctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool); ctx->compute_ctx = compute_ctx; ggml_vk_ctx_begin(ctx->device, compute_ctx); } else { From b61ab1abbf516811be7a80e141872c6c792b55be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Wed, 11 Jun 2025 17:16:32 +0200 Subject: [PATCH 031/192] tests : add test-tokenizers-repo (#14017) --- tests/CMakeLists.txt | 38 +++++++++++++++++++++++++++++++++-- tests/test-tokenizers-repo.sh | 36 +++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 2 deletions(-) create mode 100755 tests/test-tokenizers-repo.sh diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 2f7bad2cf7ec9..85299837c2f8a 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -42,6 +42,34 @@ function(llama_test target) set_property(TEST ${TEST_NAME} PROPERTY LABELS ${LLAMA_TEST_LABEL}) endfunction() +function(llama_test_cmd target) + include(CMakeParseArguments) + set(options) + set(oneValueArgs NAME LABEL WORKING_DIRECTORY) + set(multiValueArgs ARGS) + cmake_parse_arguments(LLAMA_TEST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + if (NOT DEFINED LLAMA_TEST_LABEL) + set(LLAMA_TEST_LABEL "main") + endif() + if (NOT DEFINED LLAMA_TEST_WORKING_DIRECTORY) + set(LLAMA_TEST_WORKING_DIRECTORY .) + endif() + if (DEFINED LLAMA_TEST_NAME) + set(TEST_NAME ${LLAMA_TEST_NAME}) + else() + set(TEST_NAME ${target}) + endif() + + add_test( + NAME ${TEST_NAME} + WORKING_DIRECTORY ${LLAMA_TEST_WORKING_DIRECTORY} + COMMAND ${target} + ${LLAMA_TEST_ARGS}) + + set_property(TEST ${TEST_NAME} PROPERTY LABELS ${LLAMA_TEST_LABEL}) +endfunction() + # Builds and runs a test source file. # Optional args: # - NAME: name of the executable & test target (defaults to the source file name without extension) @@ -97,8 +125,14 @@ llama_test(test-tokenizer-0 NAME test-tokenizer-0-qwen2 ARGS ${CMAKE llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf) llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf) -# TODO: missing HF tokenizer for this model in convert_hf_to_gguf_update.py, see https://github.com/ggml-org/llama.cpp/pull/13847 -# llama_test(test-tokenizer-0 NAME test-tokenizer-0-nomic-bert-moe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-nomic-bert-moe.gguf) +if (NOT WIN32) + llama_test_cmd( + ${CMAKE_CURRENT_SOURCE_DIR}/test-tokenizers-repo.sh + NAME test-tokenizers-ggml-vocabs + WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} + ARGS https://huggingface.co/ggml-org/vocabs ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocabs + ) +endif() if (LLAMA_LLGUIDANCE) llama_build_and_test(test-grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf) diff --git a/tests/test-tokenizers-repo.sh b/tests/test-tokenizers-repo.sh new file mode 100755 index 0000000000000..86e839133ce62 --- /dev/null +++ b/tests/test-tokenizers-repo.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +if [ $# -lt 2 ]; then + printf "Usage: $0 []\n" + exit 1 +fi + +if [ $# -eq 3 ]; then + toktest=$3 +else + toktest="./test-tokenizer-0" +fi + +if [ ! -x $toktest ]; then + printf "Test executable \"$toktest\" not found!\n" + exit 1 +fi + +repo=$1 +folder=$2 + +if [ -d $folder ] && [ -d $folder/.git ]; then + (cd $folder; git pull) +else + git clone $repo $folder +fi + +shopt -s globstar +for gguf in $folder/**/*.gguf; do + if [ -f $gguf.inp ] && [ -f $gguf.out ]; then + $toktest $gguf + else + printf "Found \"$gguf\" without matching inp/out files, ignoring...\n" + fi +done + From 7d9211bdab12d8593ad05a5d957a1863a798e846 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Wed, 11 Jun 2025 19:04:23 +0200 Subject: [PATCH 032/192] chore : clean up relative source dir paths (#14128) --- common/CMakeLists.txt | 8 +++---- tests/CMakeLists.txt | 54 +++++++++++++++++++++---------------------- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index 564af1448f95a..8ba02c1462c91 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -7,8 +7,8 @@ llama_add_compile_flags() # Build info header # -if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/../.git") - set(GIT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../.git") +if(EXISTS "${PROJECT_SOURCE_DIR}/.git") + set(GIT_DIR "${PROJECT_SOURCE_DIR}/.git") # Is git submodule if(NOT IS_DIRECTORY "${GIT_DIR}") @@ -18,7 +18,7 @@ if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/../.git") if (SLASH_POS EQUAL 0) set(GIT_DIR "${REAL_GIT_DIR}") else() - set(GIT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../${REAL_GIT_DIR}") + set(GIT_DIR "${PROJECT_SOURCE_DIR}/${REAL_GIT_DIR}") endif() endif() @@ -42,7 +42,7 @@ add_custom_command( -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_SYSTEM_NAME=${CMAKE_SYSTEM_NAME} -DCMAKE_SYSTEM_PROCESSOR=${CMAKE_SYSTEM_PROCESSOR} -P "${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info-gen-cpp.cmake" - WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/.." + WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}" DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/build-info.cpp.in" ${GIT_INDEX} VERBATIM ) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 85299837c2f8a..db4b2cf65cc43 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -111,31 +111,31 @@ endfunction() # build test-tokenizer-0 target once and add many tests llama_build(test-tokenizer-0.cpp) -llama_test(test-tokenizer-0 NAME test-tokenizer-0-bert-bge ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-bert-bge.gguf) -llama_test(test-tokenizer-0 NAME test-tokenizer-0-command-r ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-command-r.gguf) -llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-coder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-deepseek-coder.gguf) -llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-llm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-deepseek-llm.gguf) -llama_test(test-tokenizer-0 NAME test-tokenizer-0-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf) -llama_test(test-tokenizer-0 NAME test-tokenizer-0-gpt-2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-2.gguf) -llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf) -llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-spm.gguf) -llama_test(test-tokenizer-0 NAME test-tokenizer-0-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf) -llama_test(test-tokenizer-0 NAME test-tokenizer-0-phi-3 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-phi-3.gguf) -llama_test(test-tokenizer-0 NAME test-tokenizer-0-qwen2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-qwen2.gguf) -llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf) -llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf) +llama_test(test-tokenizer-0 NAME test-tokenizer-0-bert-bge ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-bert-bge.gguf) +llama_test(test-tokenizer-0 NAME test-tokenizer-0-command-r ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-command-r.gguf) +llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-coder ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-deepseek-coder.gguf) +llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-llm ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-deepseek-llm.gguf) +llama_test(test-tokenizer-0 NAME test-tokenizer-0-falcon ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-falcon.gguf) +llama_test(test-tokenizer-0 NAME test-tokenizer-0-gpt-2 ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-gpt-2.gguf) +llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-bpe ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-bpe.gguf) +llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-spm ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-spm.gguf) +llama_test(test-tokenizer-0 NAME test-tokenizer-0-mpt ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-mpt.gguf) +llama_test(test-tokenizer-0 NAME test-tokenizer-0-phi-3 ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-phi-3.gguf) +llama_test(test-tokenizer-0 NAME test-tokenizer-0-qwen2 ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-qwen2.gguf) +llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-refact.gguf) +llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-starcoder.gguf) if (NOT WIN32) llama_test_cmd( ${CMAKE_CURRENT_SOURCE_DIR}/test-tokenizers-repo.sh NAME test-tokenizers-ggml-vocabs WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} - ARGS https://huggingface.co/ggml-org/vocabs ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocabs + ARGS https://huggingface.co/ggml-org/vocabs ${PROJECT_SOURCE_DIR}/models/ggml-vocabs ) endif() if (LLAMA_LLGUIDANCE) - llama_build_and_test(test-grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf) + llama_build_and_test(test-grammar-llguidance.cpp ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-bpe.gguf) endif () if (NOT WIN32 OR NOT BUILD_SHARED_LIBS) @@ -147,8 +147,8 @@ if (NOT WIN32 OR NOT BUILD_SHARED_LIBS) llama_build_and_test(test-chat.cpp) # TODO: disabled on loongarch64 because the ggml-ci node lacks Python 3.8 if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64") - llama_build_and_test(test-json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/..) - target_include_directories(test-json-schema-to-grammar PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../tools/server) + llama_build_and_test(test-json-schema-to-grammar.cpp WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) + target_include_directories(test-json-schema-to-grammar PRIVATE ${PROJECT_SOURCE_DIR}/tools/server) endif() if (NOT GGML_BACKEND_DL) @@ -161,20 +161,20 @@ if (NOT WIN32 OR NOT BUILD_SHARED_LIBS) llama_build(test-tokenizer-1-bpe.cpp) # TODO: disabled due to slowness - #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf) - #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf) - #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-2.gguf) - #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-neox ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-neox.gguf) - #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf --ignore-merges) - #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf) - #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf) - #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf) + #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-aquila.gguf) + #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-falcon ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-falcon.gguf) + #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-2 ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-gpt-2.gguf) + #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-neox ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-gpt-neox.gguf) + #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-llama-bpe ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-bpe.gguf --ignore-merges) + #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-mpt ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-mpt.gguf) + #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-refact ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-refact.gguf) + #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-starcoder ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-starcoder.gguf) # build test-tokenizer-1-spm target once and add many tests llama_build(test-tokenizer-1-spm.cpp) - llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-spm.gguf) - #llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-baichuan ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-baichuan.gguf) + llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-llama-spm ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-spm.gguf) + #llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-baichuan ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-baichuan.gguf) # llama_build_and_test(test-double-float.cpp) # SLOW endif() From caa52bcb35a71c9053e95eba128426af9254772e Mon Sep 17 00:00:00 2001 From: Christian Kastner Date: Wed, 11 Jun 2025 19:07:44 +0000 Subject: [PATCH 033/192] Implement GGML_CPU_ALL_VARIANTS for ARM (#14080) * ggml-cpu: Factor out feature detection build from x86 * ggml-cpu: Add ARM feature detection and scoring This is analogous to cpu-feats-x86.cpp. However, to detect compile-time activation of features, we rely on GGML_USE_ which need to be set in cmake, instead of GGML_ that users would set for x86. This is because on ARM, users specify features with GGML_CPU_ARM_ARCH, rather than with individual flags. * ggml-cpu: Implement GGML_CPU_ALL_VARIANTS for ARM Like x86, however to pass around arch flags within cmake, we use GGML_INTERNAL_ as we don't have GGML_. Some features are optional, so we may need to build multiple backends per arch version (armv8.2_1, armv8.2_2, ...), and let the scoring function sort out which one can be used. * ggml-cpu: Limit ARM GGML_CPU_ALL_VARIANTS to Linux for now The other platforms will need their own specific variants. This also fixes the bug that the the variant-building branch was always being executed as the else-branch of GGML_NATIVE=OFF. The branch is moved to an elseif-branch which restores the previous behavior. --- ggml/src/CMakeLists.txt | 44 ++++++++--- ggml/src/ggml-cpu/CMakeLists.txt | 70 +++++++++++++++--- ggml/src/ggml-cpu/arch/arm/cpu-feats.cpp | 94 ++++++++++++++++++++++++ 3 files changed, 184 insertions(+), 24 deletions(-) create mode 100644 ggml/src/ggml-cpu/arch/arm/cpu-feats.cpp diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index d91dbc46fe9e1..726da5e048b18 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -270,17 +270,23 @@ endfunction() function(ggml_add_cpu_backend_variant tag_name) set(GGML_CPU_TAG_NAME ${tag_name}) # other: OPENMP LLAMAFILE CPU_HBM - foreach (feat NATIVE - SSE42 - AVX AVX2 BMI2 AVX_VNNI FMA F16C - AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 - AMX_TILE AMX_INT8 AMX_BF16) - set(GGML_${feat} OFF) - endforeach() - - foreach (feat ${ARGN}) - set(GGML_${feat} ON) - endforeach() + if (GGML_SYSTEM_ARCH STREQUAL "x86") + foreach (feat NATIVE + SSE42 + AVX AVX2 BMI2 AVX_VNNI FMA F16C + AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 + AMX_TILE AMX_INT8 AMX_BF16) + set(GGML_${feat} OFF) + endforeach() + + foreach (feat ${ARGN}) + set(GGML_${feat} ON) + endforeach() + elseif (GGML_SYSTEM_ARCH STREQUAL "ARM") + foreach (feat ${ARGN}) + set(GGML_INTERNAL_${feat} ON) + endforeach() + endif() ggml_add_cpu_backend_variant_impl(${tag_name}) endfunction() @@ -290,6 +296,8 @@ ggml_add_backend(CPU) if (GGML_CPU_ALL_VARIANTS) if (NOT GGML_BACKEND_DL) message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS requires GGML_BACKEND_DL") + elseif (GGML_CPU_ARM_ARCH) + message(FATAL_ERROR "Cannot use both GGML_CPU_ARM_ARCH and GGML_CPU_ALL_VARIANTS") endif() if (GGML_SYSTEM_ARCH STREQUAL "x86") ggml_add_cpu_backend_variant(x64) @@ -303,8 +311,20 @@ if (GGML_CPU_ALL_VARIANTS) # MSVC doesn't support AMX ggml_add_cpu_backend_variant(sapphirerapids SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) endif() + elseif(GGML_SYSTEM_ARCH STREQUAL "ARM" AND CMAKE_SYSTEM_NAME MATCHES "Linux") + # Many of these features are optional so we build versions with popular + # combinations and name the backends based on the version they were + # first released with + ggml_add_cpu_backend_variant(armv8.0_1) + ggml_add_cpu_backend_variant(armv8.2_1 DOTPROD) + ggml_add_cpu_backend_variant(armv8.2_2 DOTPROD FP16_VECTOR_ARITHMETIC) + ggml_add_cpu_backend_variant(armv8.2_3 DOTPROD FP16_VECTOR_ARITHMETIC SVE) + ggml_add_cpu_backend_variant(armv8.6_1 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8) + ggml_add_cpu_backend_variant(armv8.6_2 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SVE2) + ggml_add_cpu_backend_variant(armv9.2_1 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SME) + ggml_add_cpu_backend_variant(armv9.2_2 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SVE2 SME) else() - message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS not yet supported on ${GGML_SYSTEM_ARCH}") + message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS not yet supported with ${GGML_SYSTEM_ARCH} on ${CMAKE_SYSTEM_NAME}") endif() elseif (GGML_CPU) ggml_add_cpu_backend_variant_impl("") diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index 77dfc10df2057..e4c0fa8d0240c 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -1,3 +1,17 @@ +function(ggml_add_cpu_backend_features cpu_name arch) + # The feature detection code is compiled as a separate target so that + # it can be built without the architecture flags + # Since multiple variants of the CPU backend may be included in the same + # build, using set_source_files_properties() to set the arch flags is not possible + set(GGML_CPU_FEATS_NAME ${cpu_name}-feats) + add_library(${GGML_CPU_FEATS_NAME} OBJECT ggml-cpu/arch/${arch}/cpu-feats.cpp) + target_include_directories(${GGML_CPU_FEATS_NAME} PRIVATE . .. ../include) + target_compile_definitions(${GGML_CPU_FEATS_NAME} PRIVATE ${ARGN}) + target_compile_definitions(${GGML_CPU_FEATS_NAME} PRIVATE GGML_BACKEND_DL GGML_BACKEND_BUILD GGML_BACKEND_SHARED) + set_target_properties(${GGML_CPU_FEATS_NAME} PROPERTIES POSITION_INDEPENDENT_CODE ON) + target_link_libraries(${cpu_name} PRIVATE ${GGML_CPU_FEATS_NAME}) +endfunction() + function(ggml_add_cpu_backend_variant_impl tag_name) if (tag_name) set(GGML_CPU_NAME ggml-cpu-${tag_name}) @@ -143,6 +157,49 @@ function(ggml_add_cpu_backend_variant_impl tag_name) else() if (GGML_CPU_ARM_ARCH) list(APPEND ARCH_FLAGS -march=${GGML_CPU_ARM_ARCH}) + elseif(GGML_CPU_ALL_VARIANTS) + if (CMAKE_SYSTEM_NAME MATCHES "Linux") + # Begin with the lowest baseline + set(ARM_MCPU "armv8-a") + set(ARCH_TAGS "") + set(ARCH_DEFINITIONS "") + + # When a feature is selected, bump the MCPU to the first + # version that supported it + if (GGML_INTERNAL_DOTPROD) + set(ARM_MCPU "armv8.2-a") + set(ARCH_TAGS "${ARCH_TAGS}+dotprod") + list(APPEND ARCH_DEFINITIONS GGML_USE_DOTPROD) + endif() + if (GGML_INTERNAL_FP16_VECTOR_ARITHMETIC) + set(ARM_MCPU "armv8.2-a") + set(ARCH_TAGS "${ARCH_TAGS}+fp16") + list(APPEND ARCH_DEFINITIONS GGML_USE_FP16_VECTOR_ARITHMETIC) + endif() + if (GGML_INTERNAL_SVE) + set(ARM_MCPU "armv8.2-a") + set(ARCH_TAGS "${ARCH_TAGS}+sve") + list(APPEND ARCH_DEFINITIONS GGML_USE_SVE) + endif() + if (GGML_INTERNAL_MATMUL_INT8) + set(ARM_MCPU "armv8.6-a") + set(ARCH_TAGS "${ARCH_TAGS}+i8mm") + list(APPEND ARCH_DEFINITIONS GGML_USE_MATMUL_INT8) + endif() + if (GGML_INTERNAL_SVE2) + set(ARM_MCPU "armv8.6-a") + set(ARCH_TAGS "${ARCH_TAGS}+sve2") + list(APPEND ARCH_DEFINITIONS GGML_USE_SVE2) + endif() + if (GGML_INTERNAL_SME) + set(ARM_MCPU "armv9.2-a") + set(ARCH_TAGS "${ARCH_TAGS}+sme") + list(APPEND ARCH_DEFINITIONS GGML_USE_SME) + endif() + + list(APPEND ARCH_FLAGS "-march=${ARM_MCPU}${ARCH_TAGS}") + ggml_add_cpu_backend_features(${GGML_CPU_NAME} arm ${ARCH_DEFINITIONS}) + endif() endif() endif() @@ -306,18 +363,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name) # the feature check relies on ARCH_DEFINITIONS, but it is not set with GGML_NATIVE message(FATAL_ERROR "GGML_NATIVE is not compatible with GGML_BACKEND_DL, consider using GGML_CPU_ALL_VARIANTS") endif() - - # The feature detection code is compiled as a separate target so that - # it can be built without the architecture flags - # Since multiple variants of the CPU backend may be included in the same - # build, using set_source_files_properties() to set the arch flags is not possible - set(GGML_CPU_FEATS_NAME ${GGML_CPU_NAME}-feats) - add_library(${GGML_CPU_FEATS_NAME} OBJECT ggml-cpu/arch/x86/cpu-feats.cpp) - target_include_directories(${GGML_CPU_FEATS_NAME} PRIVATE . .. ../include) - target_compile_definitions(${GGML_CPU_FEATS_NAME} PRIVATE ${ARCH_DEFINITIONS}) - target_compile_definitions(${GGML_CPU_FEATS_NAME} PRIVATE GGML_BACKEND_DL GGML_BACKEND_BUILD GGML_BACKEND_SHARED) - set_target_properties(${GGML_CPU_FEATS_NAME} PROPERTIES POSITION_INDEPENDENT_CODE ON) - target_link_libraries(${GGML_CPU_NAME} PRIVATE ${GGML_CPU_FEATS_NAME}) + ggml_add_cpu_backend_features(${GGML_CPU_NAME} x86 ${ARCH_DEFINITIONS}) endif() elseif (GGML_SYSTEM_ARCH STREQUAL "PowerPC") message(STATUS "PowerPC detected") diff --git a/ggml/src/ggml-cpu/arch/arm/cpu-feats.cpp b/ggml/src/ggml-cpu/arch/arm/cpu-feats.cpp new file mode 100644 index 0000000000000..67369147ce851 --- /dev/null +++ b/ggml/src/ggml-cpu/arch/arm/cpu-feats.cpp @@ -0,0 +1,94 @@ +#include "ggml-backend-impl.h" + +#if defined(__aarch64__) + +#if defined(__linux__) +#include +#elif defined(__APPLE__) +#include +#endif + +#if !defined(HWCAP2_I8MM) +#define HWCAP2_I8MM (1 << 13) +#endif + +#if !defined(HWCAP2_SME) +#define HWCAP2_SME (1 << 23) +#endif + +struct aarch64_features { + // has_neon not needed, aarch64 has NEON guaranteed + bool has_dotprod = false; + bool has_fp16_va = false; + bool has_sve = false; + bool has_sve2 = false; + bool has_i8mm = false; + bool has_sme = false; + + aarch64_features() { +#if defined(__linux__) + uint32_t hwcap = getauxval(AT_HWCAP); + uint32_t hwcap2 = getauxval(AT_HWCAP2); + + has_dotprod = !!(hwcap & HWCAP_ASIMDDP); + has_fp16_va = !!(hwcap & HWCAP_FPHP); + has_sve = !!(hwcap & HWCAP_SVE); + has_sve2 = !!(hwcap2 & HWCAP2_SVE2); + has_i8mm = !!(hwcap2 & HWCAP2_I8MM); + has_sme = !!(hwcap2 & HWCAP2_SME); +#elif defined(__APPLE__) + int oldp = 0; + size_t size = sizeof(oldp); + + if (sysctlbyname("hw.optional.arm.FEAT_DotProd", &oldp, &size, NULL, 0) == 0) { + has_dotprod = static_cast(oldp); + } + + if (sysctlbyname("hw.optional.arm.FEAT_I8MM", &oldp, &size, NULL, 0) == 0) { + has_i8mm = static_cast(oldp); + } + + if (sysctlbyname("hw.optional.arm.FEAT_SME", &oldp, &size, NULL, 0) == 0) { + has_sme = static_cast(oldp); + } + + // Apple apparently does not implement SVE yet +#endif + } +}; + +static int ggml_backend_cpu_aarch64_score() { + int score = 1; + aarch64_features af; + +#ifdef GGML_USE_DOTPROD + if (!af.has_dotprod) { return 0; } + score += 1<<1; +#endif +#ifdef GGML_USE_FP16_VECTOR_ARITHMETIC + if (!af.has_fp16_va) { return 0; } + score += 1<<2; +#endif +#ifdef GGML_USE_SVE + if (!af.has_sve) { return 0; } + score += 1<<3; +#endif +#ifdef GGML_USE_MATMUL_INT8 + if (!af.has_i8mm) { return 0; } + score += 1<<4; +#endif +#ifdef GGML_USE_SVE2 + if (!af.has_sve2) { return 0; } + score += 1<<5; +#endif +#ifdef GGML_USE_SME + if (!af.has_sme) { return 0; } + score += 1<<6; +#endif + + return score; +} + +GGML_BACKEND_DL_SCORE_IMPL(ggml_backend_cpu_aarch64_score) + +# endif // defined(__aarch64__) From 3472d6a7706c6223f8698c209a260e5e6c4d6ed5 Mon Sep 17 00:00:00 2001 From: bandoti <141645996+bandoti@users.noreply.github.com> Date: Wed, 11 Jun 2025 17:19:44 -0300 Subject: [PATCH 034/192] common: fix issue with regex_escape routine on windows (#14133) --- common/common.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/common.cpp b/common/common.cpp index 218f1e1dc0e4d..e23887c70770c 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -466,7 +466,7 @@ size_t string_find_partial_stop(const std::string_view & str, const std::string_ std::string regex_escape(const std::string & s) { static const std::regex special_chars("[.^$|()*+?\\[\\]{}\\\\]"); - return std::regex_replace(s, special_chars, "\\$0"); + return std::regex_replace(s, special_chars, "\\$&"); } std::string string_join(const std::vector & values, const std::string & separator) { From bbe1d8629dfff356cee13d78de6935d6836258eb Mon Sep 17 00:00:00 2001 From: compilade Date: Thu, 12 Jun 2025 02:56:04 -0400 Subject: [PATCH 035/192] context : round n_tokens to next multiple of n_seqs when reserving (#14140) This fixes RWKV inference which otherwise failed when the worst case ubatch.n_seq_tokens rounded to 0. --- src/llama-context.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama-context.cpp b/src/llama-context.cpp index b130b484bcf6f..525a00d8adb95 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -1332,7 +1332,7 @@ ggml_cgraph * llama_context::graph_reserve(uint32_t n_tokens, uint32_t n_seqs, u LLAMA_LOG_DEBUG("%s: reserving a graph for ubatch with n_tokens = %4u, n_seqs = %2u, n_outputs = %4u\n", __func__, n_tokens, n_seqs, n_outputs); if (n_tokens % n_seqs != 0) { - n_tokens = (n_tokens / n_seqs) * n_seqs; + n_tokens = ((n_tokens + (n_seqs - 1)) / n_seqs) * n_seqs; // round to next multiple of n_seqs n_outputs = std::min(n_outputs, n_tokens); LLAMA_LOG_DEBUG("%s: making n_tokens a multiple of n_seqs - n_tokens = %u, n_seqs = %u, n_outputs = %u\n", __func__, n_tokens, n_seqs, n_outputs); From 00c933f91caa61320e6486004c97b76f4fdea1f0 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 12 Jun 2025 10:02:15 +0300 Subject: [PATCH 036/192] kv-cache : fix split_equal handling in unified implementation (#14130) ggml-ci --- src/llama-context.cpp | 2 + src/llama-kv-cache-unified-iswa.cpp | 71 ++++++++++++----- src/llama-kv-cache-unified.cpp | 114 +++++++++++++++++----------- 3 files changed, 122 insertions(+), 65 deletions(-) diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 525a00d8adb95..8cea21d6989ef 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -877,6 +877,8 @@ int llama_context::encode(llama_batch & inp_batch) { memcpy(cross.v_embd.data(), embd, ggml_nbytes(t_embd)); // remember the sequence ids used during the encoding - needed for cross attention later + // TODO: the seuqence indexing here is likely not correct in the general case + // probably works only for split_simple cross.seq_ids_enc.resize(n_tokens); for (int32_t i = 0; i < n_tokens; i++) { cross.seq_ids_enc[i].clear(); diff --git a/src/llama-kv-cache-unified-iswa.cpp b/src/llama-kv-cache-unified-iswa.cpp index 28d1826547649..caa58ea9aa3b0 100644 --- a/src/llama-kv-cache-unified-iswa.cpp +++ b/src/llama-kv-cache-unified-iswa.cpp @@ -98,33 +98,66 @@ llama_pos llama_kv_cache_unified_iswa::seq_pos_max(llama_seq_id seq_id) const { llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_pooled, bool logits_all) { GGML_UNUSED(embd_pooled); - // TODO: if we fail with split_simple, we should attempt different splitting strategies - // but to do that properly, we first have to refactor the batches to be more flexible + // first try simple split + do { + auto sbatch = llama_sbatch(batch, hparams.n_embd, true, logits_all); - auto sbatch = llama_sbatch(batch, hparams.n_embd, true, logits_all); + std::vector ubatches; - std::vector ubatches; + while (sbatch.n_tokens > 0) { + auto ubatch = sbatch.split_simple(n_ubatch); - while (sbatch.n_tokens > 0) { - auto ubatch = sbatch.split_simple(n_ubatch); + ubatches.push_back(ubatch); + } - ubatches.push_back(ubatch); - } + auto heads_base = kv_base->prepare(ubatches); + if (heads_base.empty()) { + break; + } - auto heads_base = kv_base->prepare(ubatches); - if (heads_base.empty()) { - return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); - } + auto heads_swa = kv_swa->prepare(ubatches); + if (heads_swa.empty()) { + break; + } - auto heads_swa = kv_swa->prepare(ubatches); - if (heads_swa.empty()) { - return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); - } + assert(heads_base.size() == heads_swa.size()); + + return std::make_unique( + this, std::move(sbatch), std::move(heads_base), std::move(heads_swa), std::move(ubatches)); + } while (false); + + // if it fails, try equal split + do { + auto sbatch = llama_sbatch(batch, hparams.n_embd, false, logits_all); + + std::vector ubatches; - assert(heads_base.size() == heads_swa.size()); + while (sbatch.n_tokens > 0) { + auto ubatch = sbatch.split_equal(n_ubatch); + + ubatches.push_back(ubatch); + } + + auto heads_base = kv_base->prepare(ubatches); + if (heads_base.empty()) { + break; + } + + auto heads_swa = kv_swa->prepare(ubatches); + if (heads_swa.empty()) { + break; + } + + assert(heads_base.size() == heads_swa.size()); + + return std::make_unique( + this, std::move(sbatch), std::move(heads_base), std::move(heads_swa), std::move(ubatches)); + } while (false); + + // TODO: if we fail again, we should attempt different splitting strategies + // but to do that properly, we first have to refactor the batches to be more flexible - return std::make_unique( - this, std::move(sbatch), std::move(heads_base), std::move(heads_swa), std::move(ubatches)); + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); } llama_memory_state_ptr llama_kv_cache_unified_iswa::init_full() { diff --git a/src/llama-kv-cache-unified.cpp b/src/llama-kv-cache-unified.cpp index 1a9f4e3159f94..ddeb138f38fb9 100644 --- a/src/llama-kv-cache-unified.cpp +++ b/src/llama-kv-cache-unified.cpp @@ -314,20 +314,24 @@ llama_memory_state_ptr llama_kv_cache_unified::init_batch( bool logits_all) { GGML_UNUSED(embd_pooled); - auto sbatch = llama_sbatch(batch, hparams.n_embd, true, logits_all); + do { + auto sbatch = llama_sbatch(batch, hparams.n_embd, true, logits_all); - std::vector ubatches; - while (sbatch.n_tokens > 0) { - ubatches.push_back(sbatch.split_simple(n_ubatch)); - } + std::vector ubatches; + while (sbatch.n_tokens > 0) { + ubatches.push_back(sbatch.split_simple(n_ubatch)); + } - auto heads = prepare(ubatches); - if (heads.empty()) { - return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); - } + auto heads = prepare(ubatches); + if (heads.empty()) { + break; + } - return std::make_unique( - this, std::move(sbatch), std::move(heads), std::move(ubatches)); + return std::make_unique( + this, std::move(sbatch), std::move(heads), std::move(ubatches)); + } while (false); + + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); } llama_memory_state_ptr llama_kv_cache_unified::init_full() { @@ -521,7 +525,6 @@ int32_t llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) const { } if (debug > 0) { - LLAMA_LOG_CONT("\n"); LLAMA_LOG_DEBUG("%s: n = %5d, used = %5d, head = %5d, size = %5d, n_swa = %5d\n", __func__, cells.used_max_p1(), cells.get_used(), head, get_size(), n_swa); if ((debug == 2 && n_swa > 0) || debug > 2) { @@ -530,7 +533,13 @@ int32_t llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) const { if (cells.is_empty(i)) { ss += '.'; } else { - ss += std::to_string(cells.seq_get(i)); + assert(cells.seq_count(i) >= 1); + + if (cells.seq_count(i) == 1) { + ss += std::to_string(cells.seq_get(i)); + } else { + ss += 'M'; + } } if (i%256 == 255) { ss += " *"; @@ -636,6 +645,12 @@ int32_t llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) const { } void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch & ubatch) { + if (debug > 0) { + LLAMA_LOG_DEBUG("%s: ubatch info:\n", __func__); + LLAMA_LOG_DEBUG("%s: n_tokens = %d, equal_seqs = %d\n", __func__, ubatch.n_tokens, ubatch.equal_seqs); + LLAMA_LOG_DEBUG("%s: n_seq_tokens = %d, n_seqs = %d\n", __func__, ubatch.n_seq_tokens, ubatch.n_seqs); + } + // keep track of the max sequence position that we would overwrite with this ubatch // for non-SWA cache, this would be always empty llama_seq_id seq_pos_max_rm[LLAMA_MAX_PARALLEL_SEQUENCES]; @@ -643,22 +658,26 @@ void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch seq_pos_max_rm[s] = -1; } - for (uint32_t i = 0; i < ubatch.n_tokens; ++i) { - if (!cells.is_empty(head_cur + i)) { - assert(cells.seq_count(head_cur + i) == 1); + for (uint32_t s = 0; s < ubatch.n_seqs; ++s) { + for (uint32_t j = 0; j < ubatch.n_seq_tokens; ++j) { + const uint32_t idx = s*ubatch.n_seq_tokens + j; - const llama_seq_id seq_id = cells.seq_get(head_cur + i); - const llama_pos pos = cells.pos_get(head_cur + i); + if (!cells.is_empty(head_cur + idx)) { + assert(cells.seq_count(head_cur + idx) == 1); - seq_pos_max_rm[seq_id] = std::max(seq_pos_max_rm[seq_id], pos); + const llama_seq_id seq_id = cells.seq_get(head_cur + idx); + const llama_pos pos = cells.pos_get(head_cur + idx); - cells.rm(head_cur + i); - } + seq_pos_max_rm[seq_id] = std::max(seq_pos_max_rm[seq_id], pos); + + cells.rm(head_cur + idx); + } - cells.pos_set(head_cur + i, ubatch.pos[i]); + cells.pos_set(head_cur + idx, ubatch.pos[idx]); - for (int32_t j = 0; j < ubatch.n_seq_id[i]; j++) { - cells.seq_add(head_cur + i, ubatch.seq_id[i][j]); + for (int32_t i = 0; i < ubatch.n_seq_id[s]; i++) { + cells.seq_add(head_cur + idx, ubatch.seq_id[s][i]); + } } } @@ -677,7 +696,6 @@ void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch seq_rm(s, cells.seq_pos_min(s), seq_pos_max_rm[s] + 1); } } - // move the head at the end of the slot head = head_cur + ubatch.n_tokens; } @@ -774,14 +792,14 @@ ggml_tensor * llama_kv_cache_unified::cpy_v(ggml_context * ctx, ggml_tensor * v_ } void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const { - const int64_t n_tokens = ubatch->n_tokens; - const int64_t n_seq_tokens = ubatch->n_seq_tokens; - const int64_t n_seqs = ubatch->n_seqs; + const uint32_t n_tokens = ubatch->n_tokens; + const uint32_t n_seq_tokens = ubatch->n_seq_tokens; + const uint32_t n_seqs = ubatch->n_seqs; GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer)); float * data = (float *) dst->data; - const auto n_kv = dst->ne[0]; + const int64_t n_kv = dst->ne[0]; // Use only the previous KV cells of the correct sequence for each token of the ubatch. // It's assumed that if a token in the batch has multiple sequences, they are equivalent. @@ -795,12 +813,14 @@ void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ub // xxxxx----- // xxxxx----- // To visualize the mask, see https://github.com/ggml-org/llama.cpp/pull/12615 - for (int h = 0; h < 1; ++h) { - for (int s = 0; s < n_seqs; ++s) { + for (uint32_t h = 0; h < 1; ++h) { + for (uint32_t s = 0; s < n_seqs; ++s) { const llama_seq_id seq_id = ubatch->seq_id[s][0]; - for (int j = 0; j < n_seq_tokens; ++j) { - const llama_pos p1 = ubatch->pos[s*n_seq_tokens + j]; + for (uint32_t j = 0; j < n_seq_tokens; ++j) { + const uint32_t idx = s*n_seq_tokens + j; + + const llama_pos p1 = ubatch->pos[idx]; for (uint32_t i = 0; i < n_kv; ++i) { float f = 0.0f; @@ -830,16 +850,16 @@ void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ub f = -INFINITY; } - data[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f; + data[h*(n_kv*n_tokens) + idx*n_kv + i] = f; } } } // mask padded tokens if (data) { - for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { - for (uint32_t j = 0; j < n_kv; ++j) { - data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY; + for (uint32_t j = n_tokens; j < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++j) { + for (uint32_t i = 0; i < n_kv; ++i) { + data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY; } } } @@ -1490,9 +1510,11 @@ bool llama_kv_cache_unified::state_read_meta(llama_io_read_i & io, uint32_t cell seq_rm(dest_seq_id, -1, -1); llama_sbatch sbatch; - llama_ubatch batch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false); + llama_ubatch ubatch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false); - batch.n_tokens = cell_count; + ubatch.n_tokens = cell_count; + ubatch.n_seq_tokens = cell_count; + ubatch.n_seqs = 1; for (uint32_t i = 0; i < cell_count; ++i) { llama_pos pos; @@ -1512,18 +1534,18 @@ bool llama_kv_cache_unified::state_read_meta(llama_io_read_i & io, uint32_t cell io.read_to(&seq_id, sizeof(seq_id)); } - batch.pos[i] = pos; - batch.n_seq_id[i] = n_seq_id; - batch.seq_id[i] = &dest_seq_id; + ubatch.pos[i] = pos; + ubatch.n_seq_id[i] = n_seq_id; + ubatch.seq_id[i] = &dest_seq_id; } - const auto head_cur = find_slot(batch); + const auto head_cur = find_slot(ubatch); if (head_cur < 0) { LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__); return false; } - apply_ubatch(head_cur, batch); + apply_ubatch(head_cur, ubatch); // keep the head at the old position because we will read the KV data into it in state_read_data() head = head_cur; @@ -1531,8 +1553,8 @@ bool llama_kv_cache_unified::state_read_meta(llama_io_read_i & io, uint32_t cell // DEBUG CHECK: head_cur should be our first cell, head_cur + cell_count - 1 should be our last cell (verify seq_id and pos values) // Assume that this is one contiguous block of cells GGML_ASSERT(head_cur + cell_count <= cells.size()); - GGML_ASSERT(cells.pos_get(head_cur) == batch.pos[0]); - GGML_ASSERT(cells.pos_get(head_cur + cell_count - 1) == batch.pos[cell_count - 1]); + GGML_ASSERT(cells.pos_get(head_cur) == ubatch.pos[0]); + GGML_ASSERT(cells.pos_get(head_cur + cell_count - 1) == ubatch.pos[cell_count - 1]); GGML_ASSERT(cells.seq_has(head_cur, dest_seq_id)); GGML_ASSERT(cells.seq_has(head_cur + cell_count - 1, dest_seq_id)); } else { From ae2e7f198c97f7807aec15f8a2ded6c882ea7e94 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 12 Jun 2025 10:14:24 +0300 Subject: [PATCH 037/192] cmake : handle whitepsaces in path during metal build (#14126) * cmake : handle whitepsaces in path during metal build ggml-ci * cont : proper fix ggml-ci --------- Co-authored-by: Daniel Bevenius --- ggml/src/ggml-metal/CMakeLists.txt | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/ggml/src/ggml-metal/CMakeLists.txt b/ggml/src/ggml-metal/CMakeLists.txt index e222327809c31..77187efc1756d 100644 --- a/ggml/src/ggml-metal/CMakeLists.txt +++ b/ggml/src/ggml-metal/CMakeLists.txt @@ -44,21 +44,22 @@ if (GGML_METAL_EMBED_LIBRARY) set(METALLIB_SOURCE_EMBED_TMP "${CMAKE_BINARY_DIR}/autogenerated/ggml-metal-embed.metal.tmp") add_custom_command( - OUTPUT ${METALLIB_EMBED_ASM} + OUTPUT "${METALLIB_EMBED_ASM}" COMMAND echo "Embedding Metal library" - COMMAND sed -e '/__embed_ggml-common.h__/r ${METALLIB_COMMON}' -e '/__embed_ggml-common.h__/d' < ${METALLIB_SOURCE} > ${METALLIB_SOURCE_EMBED_TMP} - COMMAND sed -e '/\#include \"ggml-metal-impl.h\"/r ${METALLIB_IMPL}' -e '/\#include \"ggml-metal-impl.h\"/d' < ${METALLIB_SOURCE_EMBED_TMP} > ${METALLIB_SOURCE_EMBED} - COMMAND echo ".section __DATA,__ggml_metallib" > ${METALLIB_EMBED_ASM} - COMMAND echo ".globl _ggml_metallib_start" >> ${METALLIB_EMBED_ASM} - COMMAND echo "_ggml_metallib_start:" >> ${METALLIB_EMBED_ASM} - COMMAND echo ".incbin \\\"${METALLIB_SOURCE_EMBED}\\\"" >> ${METALLIB_EMBED_ASM} - COMMAND echo ".globl _ggml_metallib_end" >> ${METALLIB_EMBED_ASM} - COMMAND echo "_ggml_metallib_end:" >> ${METALLIB_EMBED_ASM} + COMMAND sed -e "/__embed_ggml-common.h__/r ${METALLIB_COMMON}" -e "/__embed_ggml-common.h__/d" < "${METALLIB_SOURCE}" > "${METALLIB_SOURCE_EMBED_TMP}" + COMMAND sed -e "/\#include \"ggml-metal-impl.h\"/r ${METALLIB_IMPL}" -e "/\#include \"ggml-metal-impl.h\"/d" < "${METALLIB_SOURCE_EMBED_TMP}" > "${METALLIB_SOURCE_EMBED}" + COMMAND echo ".section __DATA,__ggml_metallib" > "${METALLIB_EMBED_ASM}" + COMMAND echo ".globl _ggml_metallib_start" >> "${METALLIB_EMBED_ASM}" + COMMAND echo "_ggml_metallib_start:" >> "${METALLIB_EMBED_ASM}" + COMMAND echo .incbin "\"${METALLIB_SOURCE_EMBED}\"" >> "${METALLIB_EMBED_ASM}" + COMMAND echo ".globl _ggml_metallib_end" >> "${METALLIB_EMBED_ASM}" + COMMAND echo "_ggml_metallib_end:" >> "${METALLIB_EMBED_ASM}" DEPENDS ../ggml-common.h ggml-metal.metal ggml-metal-impl.h COMMENT "Generate assembly for embedded Metal library" + VERBATIM ) - target_sources(ggml-metal PRIVATE ${METALLIB_EMBED_ASM}) + target_sources(ggml-metal PRIVATE "${METALLIB_EMBED_ASM}") else() if (GGML_METAL_SHADER_DEBUG) # custom command to do the following: From e7cb2bb607a6b26f4c05089a7aab8195e9cd1e39 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 12 Jun 2025 11:49:26 +0300 Subject: [PATCH 038/192] batch : remove logits_all flag (#14141) ggml-ci --- src/llama-batch.cpp | 10 ++-------- src/llama-batch.h | 4 +--- src/llama-context.cpp | 6 +++--- src/llama-kv-cache-recurrent.cpp | 4 ++-- src/llama-kv-cache-recurrent.h | 3 +-- src/llama-kv-cache-unified-iswa.cpp | 6 +++--- src/llama-kv-cache-unified-iswa.h | 3 +-- src/llama-kv-cache-unified.cpp | 5 ++--- src/llama-kv-cache-unified.h | 3 +-- src/llama-memory.h | 3 +-- 10 files changed, 17 insertions(+), 30 deletions(-) diff --git a/src/llama-batch.cpp b/src/llama-batch.cpp index 6a19a243118d3..58787fdba0d44 100644 --- a/src/llama-batch.cpp +++ b/src/llama-batch.cpp @@ -105,12 +105,7 @@ void llama_sbatch::add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & s ubatch.seq_id = batch->seq_id + seq.offset; } } - if (logits_all) { - for (size_t i = 0; i < length; ++i) { - ubatch.output[ubatch.n_tokens + i] = 1; - out_ids.push_back(ids[seq.offset + i]); - } - } else if (batch->logits) { + if (batch->logits) { if (ubatch.equal_seqs) { for (size_t i = 0; i < length; ++i) { size_t id = ids[seq.offset + i]; @@ -197,11 +192,10 @@ llama_ubatch llama_sbatch::split_seq(size_t n_ubatch) { return ubatch; } -llama_sbatch::llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split, bool logits_all) { +llama_sbatch::llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split) { GGML_ASSERT(batch.n_tokens >= 0); this->batch = &batch; this->n_embd = n_embd; - this->logits_all = logits_all; n_tokens = batch.n_tokens; ids.resize(n_tokens); diff --git a/src/llama-batch.h b/src/llama-batch.h index b8260b94fd2d0..989fb6cf9d95c 100644 --- a/src/llama-batch.h +++ b/src/llama-batch.h @@ -39,8 +39,6 @@ struct llama_sbatch { size_t n_embd; - bool logits_all; // TODO: remove once lctx.logits_all is removed too - // sorted indices into the batch std::vector ids; // batch indices of the output @@ -76,7 +74,7 @@ struct llama_sbatch { llama_ubatch split_seq(size_t n_ubatch); llama_sbatch() = default; - llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split = false, bool logits_all = false); + llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split = false); }; // temporary allocate memory for the input batch if needed diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 8cea21d6989ef..ebcba6993c471 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -764,7 +764,7 @@ int llama_context::encode(llama_batch & inp_batch) { const int64_t n_embd = hparams.n_embd; - llama_sbatch sbatch = llama_sbatch(batch, n_embd, /* simple_split */ true, /* logits_all */ true); + llama_sbatch sbatch = llama_sbatch(batch, n_embd, /* simple_split */ true); const llama_ubatch ubatch = sbatch.split_simple(n_tokens); @@ -976,7 +976,7 @@ int llama_context::decode(llama_batch & inp_batch) { llama_memory_state_ptr mstate; while (true) { - mstate = memory->init_batch(batch, cparams.n_ubatch, embd_pooled, /* logits_all */ n_outputs_all == n_tokens_all); + mstate = memory->init_batch(batch, cparams.n_ubatch, embd_pooled); if (!mstate) { return -2; } @@ -2080,7 +2080,7 @@ void llama_context::opt_epoch_iter( int64_t n_outputs_all = n_tokens_all; - auto mstate = memory->init_batch(batch, cparams.n_ubatch, embd_pooled, /* logits_all */ true); + auto mstate = memory->init_batch(batch, cparams.n_ubatch, embd_pooled); if (!mstate || mstate->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) { LLAMA_LOG_ERROR("%s: could not initialize batch\n", __func__); break; diff --git a/src/llama-kv-cache-recurrent.cpp b/src/llama-kv-cache-recurrent.cpp index f8cdd52808d7b..de23b4ad23bce 100644 --- a/src/llama-kv-cache-recurrent.cpp +++ b/src/llama-kv-cache-recurrent.cpp @@ -359,10 +359,10 @@ llama_pos llama_kv_cache_recurrent::seq_pos_max(llama_seq_id seq_id) const { return result; } -llama_memory_state_ptr llama_kv_cache_recurrent::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_pooled, bool logits_all) { +llama_memory_state_ptr llama_kv_cache_recurrent::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_pooled) { GGML_UNUSED(embd_pooled); - auto sbatch = llama_sbatch(batch, hparams.n_embd, false, logits_all); + auto sbatch = llama_sbatch(batch, hparams.n_embd, false); std::vector ubatches; diff --git a/src/llama-kv-cache-recurrent.h b/src/llama-kv-cache-recurrent.h index 4b33bafd71cca..d7c02ea872160 100644 --- a/src/llama-kv-cache-recurrent.h +++ b/src/llama-kv-cache-recurrent.h @@ -32,8 +32,7 @@ class llama_kv_cache_recurrent : public llama_memory_i { llama_memory_state_ptr init_batch( const llama_batch & batch, uint32_t n_ubatch, - bool embd_pooled, - bool logits_all) override; + bool embd_pooled) override; llama_memory_state_ptr init_full() override; diff --git a/src/llama-kv-cache-unified-iswa.cpp b/src/llama-kv-cache-unified-iswa.cpp index caa58ea9aa3b0..9814f76631203 100644 --- a/src/llama-kv-cache-unified-iswa.cpp +++ b/src/llama-kv-cache-unified-iswa.cpp @@ -95,12 +95,12 @@ llama_pos llama_kv_cache_unified_iswa::seq_pos_max(llama_seq_id seq_id) const { return kv_swa->seq_pos_max(seq_id); } -llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_pooled, bool logits_all) { +llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_pooled) { GGML_UNUSED(embd_pooled); // first try simple split do { - auto sbatch = llama_sbatch(batch, hparams.n_embd, true, logits_all); + auto sbatch = llama_sbatch(batch, hparams.n_embd, true); std::vector ubatches; @@ -128,7 +128,7 @@ llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch // if it fails, try equal split do { - auto sbatch = llama_sbatch(batch, hparams.n_embd, false, logits_all); + auto sbatch = llama_sbatch(batch, hparams.n_embd, false); std::vector ubatches; diff --git a/src/llama-kv-cache-unified-iswa.h b/src/llama-kv-cache-unified-iswa.h index 3dbf33ed7b960..d114c7378fbe9 100644 --- a/src/llama-kv-cache-unified-iswa.h +++ b/src/llama-kv-cache-unified-iswa.h @@ -34,8 +34,7 @@ class llama_kv_cache_unified_iswa : public llama_memory_i { llama_memory_state_ptr init_batch( const llama_batch & batch, uint32_t n_ubatch, - bool embd_pooled, - bool logits_all) override; + bool embd_pooled) override; llama_memory_state_ptr init_full() override; diff --git a/src/llama-kv-cache-unified.cpp b/src/llama-kv-cache-unified.cpp index ddeb138f38fb9..89606c598fc4f 100644 --- a/src/llama-kv-cache-unified.cpp +++ b/src/llama-kv-cache-unified.cpp @@ -310,12 +310,11 @@ llama_pos llama_kv_cache_unified::seq_pos_max(llama_seq_id seq_id) const { llama_memory_state_ptr llama_kv_cache_unified::init_batch( const llama_batch & batch, uint32_t n_ubatch, - bool embd_pooled, - bool logits_all) { + bool embd_pooled) { GGML_UNUSED(embd_pooled); do { - auto sbatch = llama_sbatch(batch, hparams.n_embd, true, logits_all); + auto sbatch = llama_sbatch(batch, hparams.n_embd, true); std::vector ubatches; while (sbatch.n_tokens > 0) { diff --git a/src/llama-kv-cache-unified.h b/src/llama-kv-cache-unified.h index cf4c691babd1e..d6dcd19f2507e 100644 --- a/src/llama-kv-cache-unified.h +++ b/src/llama-kv-cache-unified.h @@ -59,8 +59,7 @@ class llama_kv_cache_unified : public llama_memory_i { llama_memory_state_ptr init_batch( const llama_batch & batch, uint32_t n_ubatch, - bool embd_pooled, - bool logits_all) override; + bool embd_pooled) override; llama_memory_state_ptr init_full() override; diff --git a/src/llama-memory.h b/src/llama-memory.h index 991aae781ba57..42e226dc0ed61 100644 --- a/src/llama-memory.h +++ b/src/llama-memory.h @@ -73,8 +73,7 @@ struct llama_memory_i { virtual llama_memory_state_ptr init_batch( const llama_batch & batch, uint32_t n_ubatch, - bool embd_pooled, - bool logits_all) = 0; + bool embd_pooled) = 0; // simulate full cache, used for allocating worst-case compute buffers virtual llama_memory_state_ptr init_full() = 0; From 96ee2f29dfb7eb3dcd0926a9a884e0da1961a087 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 12 Jun 2025 11:50:01 +0300 Subject: [PATCH 039/192] context : simplify output counting logic during decode (#14142) * batch : remove logits_all flag ggml-ci * context : simplify output counting logic during decode ggml-ci * cont : fix comments --- src/llama-batch.cpp | 7 ++++--- src/llama-batch.h | 2 +- src/llama-context.cpp | 42 +++++++++++++++++++++++------------------- 3 files changed, 28 insertions(+), 23 deletions(-) diff --git a/src/llama-batch.cpp b/src/llama-batch.cpp index 58787fdba0d44..69e0d7549c334 100644 --- a/src/llama-batch.cpp +++ b/src/llama-batch.cpp @@ -306,9 +306,10 @@ llama_batch_allocr::llama_batch_allocr(struct llama_batch in_batch, llama_pos p0 batch.seq_id = seq_id.data(); } if (!batch.logits) { - logits.resize(batch.n_tokens); - logits[logits.size() - 1] = true; - batch.logits = logits.data(); + // by default return the output only for the last token + output.resize(batch.n_tokens); + output[output.size() - 1] = true; + batch.logits = output.data(); } } diff --git a/src/llama-batch.h b/src/llama-batch.h index 989fb6cf9d95c..7ad82b528b18b 100644 --- a/src/llama-batch.h +++ b/src/llama-batch.h @@ -85,7 +85,7 @@ struct llama_batch_allocr { std::vector pos; std::vector n_seq_id; std::vector seq_id; - std::vector logits; + std::vector output; // optionally fulfill the batch returned by llama_batch_get_one llama_batch_allocr(struct llama_batch in_batch, llama_pos p0); diff --git a/src/llama-context.cpp b/src/llama-context.cpp index ebcba6993c471..2e551bf6e111c 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -758,6 +758,7 @@ int llama_context::encode(llama_batch & inp_batch) { t_compute_start_us = ggml_time_us(); } + // TODO: this clear of the buffer can easily be forgotten - need something better embd_seq.clear(); n_queued_tokens += n_tokens; @@ -940,6 +941,25 @@ int llama_context::decode(llama_batch & inp_batch) { } } + // this indicates we are doing pooled embedding + const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE; + + int64_t n_outputs_all = 0; + + // count outputs + for (uint32_t i = 0; i < n_tokens_all; ++i) { + n_outputs_all += batch.logits[i] != 0; + } + + if (embd_pooled) { + // require that all tokens are output + if (n_outputs_all != n_tokens_all) { + LLAMA_LOG_ERROR("%s: pooled embedding requires that all tokens are output (n_outputs_all = %" PRId64 ", n_tokens_all = %" PRId64 ")\n", + __func__, n_outputs_all, n_tokens_all); + return -1; + } + } + GGML_ASSERT(n_tokens_all <= cparams.n_batch); GGML_ASSERT((cparams.causal_attn || cparams.n_ubatch >= n_tokens_all) && "non-causal attention requires n_ubatch >= n_tokens"); @@ -949,25 +969,9 @@ int llama_context::decode(llama_batch & inp_batch) { } n_queued_tokens += n_tokens_all; - // this indicates we are doing pooled embedding, so we ignore batch.logits and output all tokens - const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE; - + // TODO: this clear of the buffer can easily be forgotten - need something better embd_seq.clear(); - int64_t n_outputs_all = 0; - - // count outputs - if (batch.logits && !embd_pooled) { - for (uint32_t i = 0; i < n_tokens_all; ++i) { - n_outputs_all += batch.logits[i] != 0; - } - } else if (embd_pooled) { - n_outputs_all = n_tokens_all; - } else { - // keep last output only - n_outputs_all = 1; - } - bool did_optimize = false; // handle any pending defrags/shifts @@ -1029,7 +1033,7 @@ int llama_context::decode(llama_batch & inp_batch) { do { const auto & ubatch = mstate->get_ubatch(); - // count the outputs in this u_batch + // count the outputs in this ubatch { int32_t n_outputs_new = 0; @@ -2073,7 +2077,7 @@ void llama_context::opt_epoch_iter( n_queued_tokens += n_tokens_all; - // this indicates we are doing pooled embedding, so we ignore batch.logits and output all tokens + // this indicates we are doing pooled embedding const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE; embd_seq.clear(); From f7919ab747ae4f2dd3b0dc67c2c7de0e94e1f5d0 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 12 Jun 2025 11:51:38 +0300 Subject: [PATCH 040/192] server : re-enable SWA speculative decoding (#14131) ggml-ci --- tools/server/server.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tools/server/server.cpp b/tools/server/server.cpp index 1b1cf439baa5f..8efb8b704141f 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -2017,11 +2017,6 @@ struct server_context { params_base.n_cache_reuse = 0; SRV_WRN("%s\n", "cache_reuse is not supported by this context, it will be disabled"); } - - if (!params_base.speculative.model.path.empty()) { - SRV_ERR("%s\n", "err: speculative decode is not supported by this context"); - return false; - } } return true; From aa8c1ee1cdf032616250e35f3f033c451baef97e Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 12 Jun 2025 14:43:09 +0300 Subject: [PATCH 041/192] readme : remove project status link (#14149) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 385ac04d84e56..928100f3c256a 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ [![Release](https://img.shields.io/github/v/release/ggml-org/llama.cpp)](https://github.com/ggml-org/llama.cpp/releases) [![Server](https://github.com/ggml-org/llama.cpp/actions/workflows/server.yml/badge.svg)](https://github.com/ggml-org/llama.cpp/actions/workflows/server.yml) -[Roadmap](https://github.com/users/ggerganov/projects/7) / [Project status](https://github.com/ggml-org/llama.cpp/discussions/3471) / [Manifesto](https://github.com/ggml-org/llama.cpp/discussions/205) / [ggml](https://github.com/ggml-org/ggml) +[Roadmap](https://github.com/users/ggerganov/projects/7) / [Manifesto](https://github.com/ggml-org/llama.cpp/discussions/205) / [ggml](https://github.com/ggml-org/ggml) Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others) in pure C/C++ From ee60236f0d7869741647e029420db81c4d14e41c Mon Sep 17 00:00:00 2001 From: Anton Mitkov Date: Thu, 12 Jun 2025 14:15:11 +0100 Subject: [PATCH 042/192] sycl: Remove not needed copy f16->f32 for dnnl mul mat (#14125) --- ggml/src/ggml-sycl/gemm.hpp | 3 +++ ggml/src/ggml-sycl/ggml-sycl.cpp | 9 +++------ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ggml/src/ggml-sycl/gemm.hpp b/ggml/src/ggml-sycl/gemm.hpp index 6cbc7e0f6938c..5efe03d364b1b 100644 --- a/ggml/src/ggml-sycl/gemm.hpp +++ b/ggml/src/ggml-sycl/gemm.hpp @@ -65,6 +65,9 @@ class DnnlGemmWrapper { dnnl::primitive_attr primitive_attr; primitive_attr.set_scratchpad_mode(dnnl::scratchpad_mode::user); +#ifdef GGML_SYCL_F16 + primitive_attr.set_fpmath_mode(dnnl::fpmath_mode::f16); +#endif auto a_mem = dnnl::memory(a_in_md, eng, const_cast(a)); auto b_mem = dnnl::memory(b_in_md, eng, const_cast(b)); diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 3693b0a4337a5..feb30304fc092 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -2127,21 +2127,18 @@ inline void ggml_sycl_op_mul_mat_sycl( const sycl::half *src1_ptr = src1->type == GGML_TYPE_F16 ? (const sycl::half *)src1->data + src1_padded_row_size : src1_as_f16.get(); - ggml_sycl_pool_alloc dst_f16(ctx.pool(), row_diff * src1_ncols); #if GGML_SYCL_DNNL if (!g_ggml_sycl_disable_dnn) { DnnlGemmWrapper::row_gemm(ctx, src1_ncols, row_diff, ne10, src1_ptr, DnnlGemmWrapper::to_dt(), src0_ptr, DnnlGemmWrapper::to_dt(), - dst_f16.get(), DnnlGemmWrapper::to_dt(), stream); - scope_op_debug_print scope_dbg_print(__func__, "/to_fp32_sycl", dst, /*num_src=*/2, - " : converting dst to fp32"); - const to_fp32_sycl_t to_fp32_sycl = ggml_get_to_fp32_sycl(GGML_TYPE_F16, dst); - to_fp32_sycl(dst_f16.get(), dst_dd_i, row_diff* src1_ncols, stream); + dst_dd_i, DnnlGemmWrapper::to_dt(), stream); } else #endif { + ggml_sycl_pool_alloc dst_f16(ctx.pool(), row_diff * src1_ncols); + const sycl::half alpha_f16 = 1.0f; const sycl::half beta_f16 = 0.0f; SYCL_CHECK(CHECK_TRY_ERROR(dpct::gemm( From 8b8c7b579fe78aad1d99245f1f35d57a3a0a9b53 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 13 Jun 2025 08:03:54 +0300 Subject: [PATCH 043/192] vocab : prevent heap overflow when vocab is too small (#14145) ggml-ci --- src/llama-vocab.cpp | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index ba2e1864ec005..d8c9d9730a095 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -2768,26 +2768,26 @@ void llama_vocab::impl::print_info() const { LLAMA_LOG_INFO("%s: n_merges = %u\n", __func__, (uint32_t) bpe_ranks.size()); // special tokens - if (special_bos_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, special_bos_id, id_to_token[special_bos_id].text.c_str() ); } - if (special_eos_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, special_eos_id, id_to_token[special_eos_id].text.c_str() ); } - if (special_eot_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: EOT token = %d '%s'\n", __func__, special_eot_id, id_to_token[special_eot_id].text.c_str() ); } - if (special_eom_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: EOM token = %d '%s'\n", __func__, special_eom_id, id_to_token[special_eom_id].text.c_str() ); } - if (special_unk_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, special_unk_id, id_to_token[special_unk_id].text.c_str() ); } - if (special_sep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, special_sep_id, id_to_token[special_sep_id].text.c_str() ); } - if (special_pad_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, special_pad_id, id_to_token[special_pad_id].text.c_str() ); } - if (special_mask_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: MASK token = %d '%s'\n", __func__, special_mask_id, id_to_token[special_mask_id].text.c_str() ); } - - if (linefeed_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, linefeed_id, id_to_token[linefeed_id].text.c_str() ); } - - if (special_fim_pre_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM PRE token = %d '%s'\n", __func__, special_fim_pre_id, id_to_token[special_fim_pre_id].text.c_str() ); } - if (special_fim_suf_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM SUF token = %d '%s'\n", __func__, special_fim_suf_id, id_to_token[special_fim_suf_id].text.c_str() ); } - if (special_fim_mid_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM MID token = %d '%s'\n", __func__, special_fim_mid_id, id_to_token[special_fim_mid_id].text.c_str() ); } - if (special_fim_pad_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM PAD token = %d '%s'\n", __func__, special_fim_pad_id, id_to_token[special_fim_pad_id].text.c_str() ); } - if (special_fim_rep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM REP token = %d '%s'\n", __func__, special_fim_rep_id, id_to_token[special_fim_rep_id].text.c_str() ); } - if (special_fim_sep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM SEP token = %d '%s'\n", __func__, special_fim_sep_id, id_to_token[special_fim_sep_id].text.c_str() ); } + if (special_bos_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, special_bos_id, id_to_token.at(special_bos_id).text.c_str() ); } + if (special_eos_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, special_eos_id, id_to_token.at(special_eos_id).text.c_str() ); } + if (special_eot_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: EOT token = %d '%s'\n", __func__, special_eot_id, id_to_token.at(special_eot_id).text.c_str() ); } + if (special_eom_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: EOM token = %d '%s'\n", __func__, special_eom_id, id_to_token.at(special_eom_id).text.c_str() ); } + if (special_unk_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, special_unk_id, id_to_token.at(special_unk_id).text.c_str() ); } + if (special_sep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, special_sep_id, id_to_token.at(special_sep_id).text.c_str() ); } + if (special_pad_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, special_pad_id, id_to_token.at(special_pad_id).text.c_str() ); } + if (special_mask_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: MASK token = %d '%s'\n", __func__, special_mask_id, id_to_token.at(special_mask_id).text.c_str() ); } + + if (linefeed_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, linefeed_id, id_to_token.at(linefeed_id).text.c_str() ); } + + if (special_fim_pre_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM PRE token = %d '%s'\n", __func__, special_fim_pre_id, id_to_token.at(special_fim_pre_id).text.c_str() ); } + if (special_fim_suf_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM SUF token = %d '%s'\n", __func__, special_fim_suf_id, id_to_token.at(special_fim_suf_id).text.c_str() ); } + if (special_fim_mid_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM MID token = %d '%s'\n", __func__, special_fim_mid_id, id_to_token.at(special_fim_mid_id).text.c_str() ); } + if (special_fim_pad_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM PAD token = %d '%s'\n", __func__, special_fim_pad_id, id_to_token.at(special_fim_pad_id).text.c_str() ); } + if (special_fim_rep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM REP token = %d '%s'\n", __func__, special_fim_rep_id, id_to_token.at(special_fim_rep_id).text.c_str() ); } + if (special_fim_sep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM SEP token = %d '%s'\n", __func__, special_fim_sep_id, id_to_token.at(special_fim_sep_id).text.c_str() ); } for (const auto & id : special_eog_ids) { - LLAMA_LOG_INFO( "%s: EOG token = %d '%s'\n", __func__, id, id_to_token[id].text.c_str() ); + LLAMA_LOG_INFO( "%s: EOG token = %d '%s'\n", __func__, id, id_to_token.at(id).text.c_str() ); } LLAMA_LOG_INFO("%s: max token length = %d\n", __func__, max_token_len); From 4973338aaceffa803c1d6a4dcfb791942817ee7a Mon Sep 17 00:00:00 2001 From: Christian Kastner Date: Fri, 13 Jun 2025 06:51:34 +0000 Subject: [PATCH 044/192] cmake : Improve build-info.cpp generation (#14156) * cmake: Simplify build-info.cpp generation The rebuild of build-info.cpp still gets triggered when .git/index gets changes. * cmake: generate build-info.cpp in build dir --- common/CMakeLists.txt | 24 +++++++----------------- common/cmake/build-info-gen-cpp.cmake | 24 ------------------------ 2 files changed, 7 insertions(+), 41 deletions(-) delete mode 100644 common/cmake/build-info-gen-cpp.cmake diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index 8ba02c1462c91..f43a630c900ff 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -23,31 +23,21 @@ if(EXISTS "${PROJECT_SOURCE_DIR}/.git") endif() if(EXISTS "${GIT_DIR}/index") - set(GIT_INDEX "${GIT_DIR}/index") + # For build-info.cpp below + set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS "${GIT_DIR}/index") else() message(WARNING "Git index not found in git repository.") - set(GIT_INDEX "") endif() else() message(WARNING "Git repository not found; to enable automatic generation of build info, make sure Git is installed and the project is a Git repository.") - set(GIT_INDEX "") endif() -# Add a custom command to rebuild build-info.cpp when .git/index changes -add_custom_command( - OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/build-info.cpp" - COMMENT "Generating build details from Git" - COMMAND ${CMAKE_COMMAND} -DMSVC=${MSVC} -DCMAKE_C_COMPILER_VERSION=${CMAKE_C_COMPILER_VERSION} - -DCMAKE_C_COMPILER_ID=${CMAKE_C_COMPILER_ID} -DCMAKE_VS_PLATFORM_NAME=${CMAKE_VS_PLATFORM_NAME} - -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - -DCMAKE_SYSTEM_NAME=${CMAKE_SYSTEM_NAME} -DCMAKE_SYSTEM_PROCESSOR=${CMAKE_SYSTEM_PROCESSOR} - -P "${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info-gen-cpp.cmake" - WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}" - DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/build-info.cpp.in" ${GIT_INDEX} - VERBATIM -) +set(TEMPLATE_FILE "${CMAKE_CURRENT_SOURCE_DIR}/build-info.cpp.in") +set(OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/build-info.cpp") +configure_file(${TEMPLATE_FILE} ${OUTPUT_FILE}) + set(TARGET build_info) -add_library(${TARGET} OBJECT build-info.cpp) +add_library(${TARGET} OBJECT ${OUTPUT_FILE}) if (BUILD_SHARED_LIBS) set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON) endif() diff --git a/common/cmake/build-info-gen-cpp.cmake b/common/cmake/build-info-gen-cpp.cmake deleted file mode 100644 index fbc92b52cc4fe..0000000000000 --- a/common/cmake/build-info-gen-cpp.cmake +++ /dev/null @@ -1,24 +0,0 @@ -include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake) - -set(TEMPLATE_FILE "${CMAKE_CURRENT_SOURCE_DIR}/common/build-info.cpp.in") -set(OUTPUT_FILE "${CMAKE_CURRENT_SOURCE_DIR}/common/build-info.cpp") - -# Only write the build info if it changed -if(EXISTS ${OUTPUT_FILE}) - file(READ ${OUTPUT_FILE} CONTENTS) - string(REGEX MATCH "LLAMA_COMMIT = \"([^\"]*)\";" _ ${CONTENTS}) - set(OLD_COMMIT ${CMAKE_MATCH_1}) - string(REGEX MATCH "LLAMA_COMPILER = \"([^\"]*)\";" _ ${CONTENTS}) - set(OLD_COMPILER ${CMAKE_MATCH_1}) - string(REGEX MATCH "LLAMA_BUILD_TARGET = \"([^\"]*)\";" _ ${CONTENTS}) - set(OLD_TARGET ${CMAKE_MATCH_1}) - if ( - NOT OLD_COMMIT STREQUAL BUILD_COMMIT OR - NOT OLD_COMPILER STREQUAL BUILD_COMPILER OR - NOT OLD_TARGET STREQUAL BUILD_TARGET - ) - configure_file(${TEMPLATE_FILE} ${OUTPUT_FILE}) - endif() -else() - configure_file(${TEMPLATE_FILE} ${OUTPUT_FILE}) -endif() From bd245aa763374cef3a374b21df73f06b8caf0cc4 Mon Sep 17 00:00:00 2001 From: Ewan Crawford Date: Fri, 13 Jun 2025 08:45:37 +0100 Subject: [PATCH 045/192] SYCL: Bump oneMath commit (#14152) Update oneMath commit to merged PR https://github.com/uxlfoundation/oneMath/pull/669 which adds SYCL-Graph support for recording CUDA BLAS commands. With this change the `MUL_MAT` tests now pass on DPC++ CUDA backends with SYCL-Graph enabled. Prior to this change, an error would be thrown. ``` $ GGML_SYCL_DISABLE_GRAPH=0 ./bin/test-backend-ops -b SYCL0 -o MUL_MAT -p type_a=f16,type_b=f32,m=16,n=1,k=256,bs=\\[1,1\\],nr=\\[2 UR CUDA ERROR: Value: 700 Name: CUDA_ERROR_ILLEGAL_ADDRESS Description: an illegal memory access was encountered Function: operator() Source Location: $HOME/dpcpp/unified-runtime/source/adapters/cuda/queue.cpp:154 Native API failed. Native API returns: 2147483646 (UR_RESULT_ERROR_UNKNOWN) Exception caught at file:$HOME/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp, line:3598, func:operator() SYCL error: CHECK_TRY_ERROR((stream)->wait()): Meet error in this line code! in function ggml_backend_sycl_synchronize at $HOME/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp:3598 $HOME/llama.cpp/ggml/src/ggml-sycl/../ggml-sycl/common.hpp:118: SYCL error Could not attach to process. If your uid matches the uid of the target process, check the setting of /proc/sys/kernel/yama/ptrace_scope, or try again as the root user. For more details, see /etc/sysctl.d/10-ptrace.conf ptrace: Operation not permitted. No stack. The program is not being run. ``` --- ggml/src/ggml-sycl/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/ggml-sycl/CMakeLists.txt b/ggml/src/ggml-sycl/CMakeLists.txt index 2a0045bcc158e..efd78b912cc65 100644 --- a/ggml/src/ggml-sycl/CMakeLists.txt +++ b/ggml/src/ggml-sycl/CMakeLists.txt @@ -142,7 +142,7 @@ else() FetchContent_Declare( ONEMATH GIT_REPOSITORY https://github.com/uxlfoundation/oneMath.git - GIT_TAG c255b1b4c41e2ee3059455c1f96a965d6a62568a + GIT_TAG 8efe85f5aaebb37f1d8c503b7af66315feabf142 ) FetchContent_MakeAvailable(ONEMATH) # Create alias to match with find_package targets name From cc04b82f42ae03090a4c617df0e7769cddb6c54a Mon Sep 17 00:00:00 2001 From: Anton Mitkov Date: Fri, 13 Jun 2025 08:51:39 +0100 Subject: [PATCH 046/192] sycl: Adding additional cpy dbg print output (#14034) --- ggml/src/ggml-sycl/common.hpp | 41 +++++++++++++++----------------- ggml/src/ggml-sycl/cpy.cpp | 3 +-- ggml/src/ggml-sycl/ggml-sycl.cpp | 26 ++++++++++---------- 3 files changed, 33 insertions(+), 37 deletions(-) diff --git a/ggml/src/ggml-sycl/common.hpp b/ggml/src/ggml-sycl/common.hpp index 4f17699a5fcc1..753b4af143622 100644 --- a/ggml/src/ggml-sycl/common.hpp +++ b/ggml/src/ggml-sycl/common.hpp @@ -513,9 +513,9 @@ constexpr size_t ceil_div(const size_t m, const size_t n) { bool gpu_has_xmx(sycl::device &dev); -template void debug_print_array(const std::string & prefix, const T array[N]) { +template std::string debug_get_array_str(const std::string & prefix, const T array[N]) { if (LIKELY(!g_ggml_sycl_debug)) { - return; + return ""; } std::stringstream ss; ss << prefix << "=["; @@ -526,29 +526,26 @@ template void debug_print_array(const std::string & prefix, con ss << array[N - 1]; } ss << "]"; - GGML_SYCL_DEBUG("%s", ss.str().c_str()); + return ss.str(); } -inline void debug_print_tensor(const std::string & prefix, const ggml_tensor * tensor, - const std::string & suffix = "") { - if (LIKELY(!g_ggml_sycl_debug)) { - return; - } - GGML_SYCL_DEBUG("%s=", prefix.c_str()); +inline std::string debug_get_tensor_str(const std::string &prefix, + const ggml_tensor *tensor, const std::string &suffix = "") { + std::stringstream ss; + if (LIKELY(!g_ggml_sycl_debug)) { return ss.str(); } + ss << prefix.c_str() << "="; if (tensor) { - GGML_SYCL_DEBUG("'%s':type=%s", tensor->name, ggml_type_name(tensor->type)); - debug_print_array(";ne", tensor->ne); - debug_print_array(";nb", tensor->nb); - if (!ggml_is_contiguous(tensor)) { - GGML_SYCL_DEBUG(";strided"); - } - if (ggml_is_permuted(tensor)) { - GGML_SYCL_DEBUG(";permuted"); - } + ss << "'" << tensor->name << "':type=" << ggml_type_name(tensor->type); + ss << debug_get_array_str(";ne", tensor->ne); + ss << debug_get_array_str(";nb", tensor->nb); + + if (!ggml_is_contiguous(tensor)) { ss << ";strided"; } + if (ggml_is_permuted(tensor)) { ss << ";permuted"; } } else { - GGML_SYCL_DEBUG("nullptr"); + ss << "nullptr"; } - GGML_SYCL_DEBUG("%s", suffix.c_str()); + ss << suffix; + return ss.str(); } // Use scope_op_debug_print to log operations coming from running a model @@ -564,10 +561,10 @@ struct scope_op_debug_print { return; } GGML_SYCL_DEBUG("[SYCL][OP] call %s%s:", func.data(), func_suffix.data()); - debug_print_tensor(" dst", dst); + GGML_SYCL_DEBUG("%s", debug_get_tensor_str(" dst", dst).c_str()); if (dst) { for (std::size_t i = 0; i < num_src; ++i) { - debug_print_tensor("\tsrc" + std::to_string(i), dst->src[i]); + GGML_SYCL_DEBUG("%s", debug_get_tensor_str("\tsrc" + std::to_string(i), dst->src[i]).c_str()); } } GGML_SYCL_DEBUG("%s\n", suffix.data()); diff --git a/ggml/src/ggml-sycl/cpy.cpp b/ggml/src/ggml-sycl/cpy.cpp index 56373b4d085d5..bec1371401955 100644 --- a/ggml/src/ggml-sycl/cpy.cpp +++ b/ggml/src/ggml-sycl/cpy.cpp @@ -723,8 +723,7 @@ static void ggml_cpy_q4_1_q4_1(const char * cx, char * cdst, const int ne, const void ggml_sycl_cpy(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1) try { // Unlike other operators ggml_sycl_cpy takes 2 distinct tensors instead of a dst ggml_tensor and rely on its src field - scope_op_debug_print scope_dbg_print(__func__, src1, /*num_src=*/0, - std::string(" src0 type=") + ggml_type_name(src0->type)); + scope_op_debug_print scope_dbg_print(__func__, src1, /*num_src=*/0, debug_get_tensor_str("\tsrc0", src0)); const int64_t ne = ggml_nelements(src0); GGML_ASSERT(ne == ggml_nelements(src1)); diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index feb30304fc092..4b7610362b608 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -347,7 +347,7 @@ static enum ggml_status ggml_backend_sycl_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor *tensor) try { GGML_SYCL_DEBUG("[SYCL] call %s", __func__); - debug_print_tensor(": tensor=", tensor, "\n"); + GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor, "\n").c_str()); ggml_backend_sycl_buffer_context * ctx = (ggml_backend_sycl_buffer_context *)buffer->context; if (tensor->view_src != NULL) { @@ -385,7 +385,7 @@ static void ggml_backend_sycl_buffer_set_tensor(ggml_backend_buffer_t buffer, const void *data, size_t offset, size_t size) try { GGML_SYCL_DEBUG("[SYCL] call %s", __func__); - debug_print_tensor(": tensor=", tensor); + GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str()); GGML_SYCL_DEBUG(" size=%zu offset=%zu\n", size, offset); ggml_backend_sycl_buffer_context * ctx = ( ggml_backend_sycl_buffer_context *)buffer->context; ggml_sycl_set_device(ctx->device); @@ -413,7 +413,7 @@ static void ggml_backend_sycl_buffer_get_tensor(ggml_backend_buffer_t buffer, void *data, size_t offset, size_t size) try { GGML_SYCL_DEBUG("[SYCL] call %s", __func__); - debug_print_tensor(": tensor=", tensor); + GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str()); GGML_SYCL_DEBUG(" size=%zu offset=%zu\n", size, offset); ggml_backend_sycl_buffer_context * ctx = ( ggml_backend_sycl_buffer_context *)buffer->context; @@ -444,8 +444,8 @@ ggml_backend_sycl_buffer_cpy_tensor(ggml_backend_buffer_t buffer, ggml_tensor *dst) try { bool is_cpy_supported = ggml_backend_buffer_is_sycl(src->buffer); GGML_SYCL_DEBUG("[SYCL] call %s", __func__); - debug_print_tensor(": dst=", dst); - debug_print_tensor(" src=", src); + GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": dst", dst).c_str()); + GGML_SYCL_DEBUG("%s", debug_get_tensor_str(" src", src).c_str()); GGML_SYCL_DEBUG(" is_cpy_supported=%d\n", is_cpy_supported); if (is_cpy_supported) { ggml_backend_sycl_buffer_context * src_ctx = (ggml_backend_sycl_buffer_context *)src->buffer->context; @@ -525,7 +525,7 @@ catch (sycl::exception const &exc) { static void ggml_backend_sycl_buffer_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { GGML_SYCL_DEBUG("[SYCL] call %s", __func__); - debug_print_tensor(": tensor=", tensor); + GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str()); GGML_SYCL_DEBUG(" size=%zu offset=%zu value=%u\n", size, offset, value); ggml_backend_sycl_buffer_context * ctx = (ggml_backend_sycl_buffer_context *) buffer->context; SYCL_CHECK(ggml_sycl_set_device(ctx->device)); @@ -805,7 +805,7 @@ static enum ggml_status ggml_backend_sycl_split_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor *tensor) try { GGML_SYCL_DEBUG("[SYCL] call %s", __func__); - debug_print_tensor(": tensor=", tensor, "\n"); + GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor, "\n").c_str()); GGML_ASSERT(tensor->view_src == nullptr); // views of split tensors are not supported ggml_backend_sycl_split_buffer_context * ctx = (ggml_backend_sycl_split_buffer_context *)buffer->context; @@ -891,7 +891,7 @@ ggml_backend_sycl_split_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor *tensor, const void *data, size_t offset, size_t size) try { GGML_SYCL_DEBUG("[SYCL] call %s", __func__); - debug_print_tensor(": tensor=", tensor); + GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str()); GGML_SYCL_DEBUG(" size=%zu offset=%zu\n", size, offset); // split tensors must always be set in their entirety at once GGML_ASSERT(offset == 0); @@ -947,7 +947,7 @@ ggml_backend_sycl_split_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor *tensor, void *data, size_t offset, size_t size) try { GGML_SYCL_DEBUG("[SYCL] call %s", __func__); - debug_print_tensor(": tensor=", tensor); + GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str()); GGML_SYCL_DEBUG(" size=%zu offset=%zu\n", size, offset); // split tensors must always be set in their entirety at once GGML_ASSERT(offset == 0); @@ -3863,7 +3863,7 @@ static void ggml_backend_sycl_set_tensor_async(ggml_backend_t backend, const void *data, size_t offset, size_t size) try { GGML_SYCL_DEBUG("[SYCL] call %s", __func__); - debug_print_tensor(": tensor=", tensor); + GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str()); GGML_SYCL_DEBUG(" size=%zu offset=%zu\n", size, offset); ggml_backend_sycl_context * sycl_ctx = (ggml_backend_sycl_context *)backend->context; ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; @@ -3884,7 +3884,7 @@ static void ggml_backend_sycl_get_tensor_async(ggml_backend_t backend, void *data, size_t offset, size_t size) try { GGML_SYCL_DEBUG("[SYCL] call %s", __func__); - debug_print_tensor(": tensor=", tensor); + GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str()); GGML_SYCL_DEBUG(" size=%zu offset=%zu\n", size, offset); ggml_backend_sycl_context * sycl_ctx = (ggml_backend_sycl_context *)backend->context; ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; @@ -3907,8 +3907,8 @@ static bool ggml_backend_sycl_cpy_tensor_async(ggml_backend_t backend, bool is_cpy_supported = dst->buffer->buft == ggml_backend_sycl_buffer_type(sycl_ctx->device) && ggml_backend_buffer_is_sycl(src->buffer); GGML_SYCL_DEBUG("[SYCL] call %s", __func__); - debug_print_tensor(": dst=", dst); - debug_print_tensor(" src=", src); + GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": dst", dst).c_str()); + GGML_SYCL_DEBUG("%s", debug_get_tensor_str(" src", src).c_str()); GGML_SYCL_DEBUG(" is_cpy_supported=%d\n", is_cpy_supported); if (is_cpy_supported) { /* From 6e9863adbd887df43ac29a19ae5c942be4e8c411 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 13 Jun 2025 11:18:25 +0300 Subject: [PATCH 047/192] server : fix SWA condition for full context reprocess (#14163) ggml-ci --- tools/server/server.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/server/server.cpp b/tools/server/server.cpp index 8efb8b704141f..b439d8b19dda3 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -3217,7 +3217,7 @@ struct server_context { } const auto n_swa = llama_model_n_swa(model); - if (pos_min > slot.n_past - n_swa) { + if (pos_min > std::max(0, slot.n_past - n_swa)) { SLT_WRN(slot, "n_past = %d, cache_tokens.size() = %d, seq_id = %d, pos_min = %d, n_swa = %d\n", slot.n_past, (int) slot.cache_tokens.size(), slot.id, pos_min, n_swa); SLT_WRN(slot, "forcing full prompt re-processing due to lack of cache data (likely due to SWA, see %s)\n", "https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055"); From 11f5ff18d0b2946ea4cb8fb714ae010d734aed0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90inh=20Tr=E1=BB=8Dng=20Huy?= <77562200+huydt84@users.noreply.github.com> Date: Fri, 13 Jun 2025 17:34:08 +0900 Subject: [PATCH 048/192] pooling : make cls_b and cls_out_b optional (#14165) Co-authored-by: dinhhuy --- src/llama-graph.cpp | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index e74c9ff53b05a..4493fb164eb12 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -1556,23 +1556,30 @@ void llm_graph_context::build_pooling( ggml_tensor * inp_cls = build_inp_cls(); inp = ggml_get_rows(ctx0, inp, inp_cls); - if (cls != nullptr && cls_b != nullptr) { + if (cls) { // classification head // https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/roberta/modeling_roberta.py#L1566 - cur = ggml_add(ctx0, ggml_mul_mat(ctx0, cls, inp), cls_b); + cur = ggml_mul_mat(ctx0, cls, inp); + if (cls_b) { + cur = ggml_add(ctx0, cur, cls_b); + } cur = ggml_tanh(ctx0, cur); // some models don't have `cls_out`, for example: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en // https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/blob/cb5347e43979c3084a890e3f99491952603ae1b7/modeling_bert.py#L884-L896 if (cls_out) { - GGML_ASSERT(cls_out_b != nullptr); - cur = ggml_add(ctx0, ggml_mul_mat(ctx0, cls_out, cur), cls_out_b); + cur = ggml_mul_mat(ctx0, cls_out, cur); + if (cls_out_b) { + cur = ggml_add(ctx0, cur, cls_out_b); + } } } else if (cls_out) { // Single layer classification head (direct projection) // https://github.com/huggingface/transformers/blob/f4fc42216cd56ab6b68270bf80d811614d8d59e4/src/transformers/models/bert/modeling_bert.py#L1476 - GGML_ASSERT(cls_out_b != nullptr); - cur = ggml_add(ctx0, ggml_mul_mat(ctx0, cls_out, inp), cls_out_b); + cur = ggml_mul_mat(ctx0, cls_out, inp); + if (cls_out_b) { + cur = ggml_add(ctx0, cur, cls_out_b); + } } else { GGML_ABORT("RANK pooling requires either cls+cls_b or cls_out+cls_out_b"); } From 8cdc20c24027e3da01aed748f40cc0ef6cb3ba01 Mon Sep 17 00:00:00 2001 From: Christian Kastner Date: Fri, 13 Jun 2025 08:38:52 +0000 Subject: [PATCH 049/192] cmake: Add ability to pass in LLAMA_BUILD_NUMBER/COMMIT (#14167) * cmake: Add ability to pass in LLAMA_BUILD_NUMBER/COMMIT * cmake: Pass on LLAMA_BUILD_* to GGML_BUILD_* --- CMakeLists.txt | 14 ++++++++++---- common/build-info.cpp.in | 4 ++-- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f73470dffd106..50801cdc637bd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -89,6 +89,14 @@ option(LLAMA_LLGUIDANCE "llama-common: include LLGuidance library for structured include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake) include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/common.cmake) +if (NOT DEFINED LLAMA_BUILD_NUMBER) + set(LLAMA_BUILD_NUMBER ${BUILD_NUMBER}) +endif() +if (NOT DEFINED LLAMA_BUILD_COMMIT) + set(LLAMA_BUILD_COMMIT ${BUILD_COMMIT}) +endif() +set(LLAMA_INSTALL_VERSION 0.0.${BUILD_NUMBER}) + # override ggml options set(GGML_ALL_WARNINGS ${LLAMA_ALL_WARNINGS}) set(GGML_FATAL_WARNINGS ${LLAMA_FATAL_WARNINGS}) @@ -155,6 +163,8 @@ if (LLAMA_USE_SYSTEM_GGML) endif() if (NOT TARGET ggml AND NOT LLAMA_USE_SYSTEM_GGML) + set(GGML_BUILD_NUMBER ${LLAMA_BUILD_NUMBER}) + set(GGML_BUILD_COMMIT ${LLAMA_BUILD_COMMIT}) add_subdirectory(ggml) # ... otherwise assume ggml is added by a parent CMakeLists.txt endif() @@ -204,10 +214,6 @@ endif() include(GNUInstallDirs) include(CMakePackageConfigHelpers) -set(LLAMA_BUILD_NUMBER ${BUILD_NUMBER}) -set(LLAMA_BUILD_COMMIT ${BUILD_COMMIT}) -set(LLAMA_INSTALL_VERSION 0.0.${BUILD_NUMBER}) - set(LLAMA_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location of header files") set(LLAMA_LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Location of library files") set(LLAMA_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR} CACHE PATH "Location of binary files") diff --git a/common/build-info.cpp.in b/common/build-info.cpp.in index 0b945aa68fff3..aee9d7eafd681 100644 --- a/common/build-info.cpp.in +++ b/common/build-info.cpp.in @@ -1,4 +1,4 @@ -int LLAMA_BUILD_NUMBER = @BUILD_NUMBER@; -char const *LLAMA_COMMIT = "@BUILD_COMMIT@"; +int LLAMA_BUILD_NUMBER = @LLAMA_BUILD_NUMBER@; +char const *LLAMA_COMMIT = "@LLAMA_BUILD_COMMIT@"; char const *LLAMA_COMPILER = "@BUILD_COMPILER@"; char const *LLAMA_BUILD_TARGET = "@BUILD_TARGET@"; From 0e48242af92d77e7141ee4b40567b7cd83b4b8a8 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 13 Jun 2025 11:55:44 +0300 Subject: [PATCH 050/192] readme : remove survey link (#14168) --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 928100f3c256a..90c7364dfcba0 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,6 @@ Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others) ## Hot topics - 🔥 Multimodal support arrived in `llama-server`: [#12898](https://github.com/ggml-org/llama.cpp/pull/12898) | [documentation](./docs/multimodal.md) -- **GGML developer experience survey (organized and reviewed by NVIDIA):** [link](https://forms.gle/Gasw3cRgyhNEnrwK9) - A new binary `llama-mtmd-cli` is introduced to replace `llava-cli`, `minicpmv-cli`, `gemma3-cli` ([#13012](https://github.com/ggml-org/llama.cpp/pull/13012)) and `qwen2vl-cli` ([#13141](https://github.com/ggml-org/llama.cpp/pull/13141)), `libllava` will be deprecated - VS Code extension for FIM completions: https://github.com/ggml-org/llama.vscode - Universal [tool call support](./docs/function-calling.md) in `llama-server` https://github.com/ggml-org/llama.cpp/pull/9639 From 33e0a79d1a044e8373bb8d37bf44ece3791e1de8 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 13 Jun 2025 13:47:55 +0300 Subject: [PATCH 051/192] batch : rework llama_batch_allocr (#14153) * batch : rework llama_batch_allocr ggml-ci * cont : move validation inside class ggml-ci * cont : move output counting to class ggml-ci * cont : minor ggml-ci * batch : add TODOs ggml-ci --- src/llama-batch.cpp | 68 +++++++++++++++- src/llama-batch.h | 27 ++++-- src/llama-context.cpp | 145 +++++++++++++-------------------- src/llama-context.h | 14 ++-- src/llama-graph.cpp | 7 ++ src/llama-graph.h | 6 +- src/llama-kv-cache-unified.cpp | 1 + 7 files changed, 162 insertions(+), 106 deletions(-) diff --git a/src/llama-batch.cpp b/src/llama-batch.cpp index 69e0d7549c334..9066d5a9b274d 100644 --- a/src/llama-batch.cpp +++ b/src/llama-batch.cpp @@ -1,5 +1,9 @@ #include "llama-batch.h" +#include "llama-impl.h" +#include "llama-cparams.h" +#include "llama-vocab.h" + #include #include #include @@ -279,9 +283,42 @@ llama_sbatch::llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple ); } -llama_batch_allocr::llama_batch_allocr(struct llama_batch in_batch, llama_pos p0) { - batch = in_batch; +llama_batch_allocr::llama_batch_allocr() = default; + +bool llama_batch_allocr::init(const llama_batch & batch_inp, const llama_vocab & vocab, llama_pos p0) { + clear(); + + batch = batch_inp; + GGML_ASSERT(batch.n_tokens > 0); + + if (!batch.pos) { + if (batch.seq_id) { + LLAMA_LOG_ERROR("%s: pos == NULL, but seq_id != NULL\n", __func__); + return false; + } + } + + if (batch.token) { + for (int32_t i = 0; i < batch.n_tokens; ++i) { + if (batch.token[i] < 0 || (uint32_t) batch.token[i] >= vocab.n_tokens()) { + LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch.token[i]); + return false; + } + } + } + + if (batch.seq_id) { + for (int32_t i = 0; i < batch.n_tokens; ++i) { + for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) { + if (batch.seq_id && (batch.seq_id[i][s] < 0 || batch.seq_id[i][s] >= LLAMA_MAX_PARALLEL_SEQUENCES)) { + LLAMA_LOG_ERROR("%s: invalid seq_id[%d][%d] = %d > %d\n", __func__, i, s, batch.seq_id[i][s], LLAMA_MAX_PARALLEL_SEQUENCES); + return false; + } + } + } + } + if (!batch.pos) { assert(p0 >= 0); pos.resize(batch.n_tokens); @@ -290,6 +327,7 @@ llama_batch_allocr::llama_batch_allocr(struct llama_batch in_batch, llama_pos p0 } batch.pos = pos.data(); } + if (!batch.n_seq_id) { n_seq_id.resize(batch.n_tokens); for (int32_t i = 0; i < batch.n_tokens; i++) { @@ -297,6 +335,7 @@ llama_batch_allocr::llama_batch_allocr(struct llama_batch in_batch, llama_pos p0 } batch.n_seq_id = n_seq_id.data(); } + if (!batch.seq_id) { seq_id.resize(batch.n_tokens + 1); seq_id[batch.n_tokens] = NULL; @@ -305,12 +344,37 @@ llama_batch_allocr::llama_batch_allocr(struct llama_batch in_batch, llama_pos p0 } batch.seq_id = seq_id.data(); } + if (!batch.logits) { // by default return the output only for the last token output.resize(batch.n_tokens); output[output.size() - 1] = true; batch.logits = output.data(); } + + for (int32_t i = 0; i < batch.n_tokens; ++i) { + n_outputs += batch.logits[i] != 0; + } + + return true; +} + +const llama_batch & llama_batch_allocr::get_batch() const { + return batch; +} + +uint32_t llama_batch_allocr::get_n_outputs() const { + return n_outputs; +} + +void llama_batch_allocr::clear() { + n_outputs = 0; + + batch = {}; + pos.clear(); + n_seq_id.clear(); + seq_id.clear(); + output.clear(); } // diff --git a/src/llama-batch.h b/src/llama-batch.h index 7ad82b528b18b..24340b00f2702 100644 --- a/src/llama-batch.h +++ b/src/llama-batch.h @@ -18,8 +18,8 @@ struct llama_ubatch { llama_token * token; // [n_tokens] float * embd; // [n_embd, n_tokens] llama_pos * pos; // [n_tokens] - int32_t * n_seq_id; // [n_seqs] // TODO: remove, should belong to only 1 sequence - llama_seq_id ** seq_id; // [n_seqs] // TODO: become llama_seq_id * seq_id; + int32_t * n_seq_id; // [n_seqs] + llama_seq_id ** seq_id; // [n_seqs] int8_t * output; // [n_tokens] }; @@ -78,15 +78,28 @@ struct llama_sbatch { }; // temporary allocate memory for the input batch if needed -struct llama_batch_allocr { - struct llama_batch batch; +class llama_batch_allocr { +public: + llama_batch_allocr(); + + // optionally fulfill the batch returned by llama_batch_get_one + bool init(const llama_batch & batch_inp, const llama_vocab & vocab, llama_pos p0); + + const llama_batch & get_batch() const; + + uint32_t get_n_outputs() const; + +private: + void clear(); + + llama_batch batch; + + uint32_t n_outputs; std::array seq_id_0 = { 0 }; // default sequence id + std::vector pos; std::vector n_seq_id; std::vector seq_id; std::vector output; - - // optionally fulfill the batch returned by llama_batch_get_one - llama_batch_allocr(struct llama_batch in_batch, llama_pos p0); }; diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 2e551bf6e111c..ec1e1189b219a 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -1,6 +1,7 @@ #include "llama-context.h" #include "llama-impl.h" +#include "llama-batch.h" #include "llama-io.h" #include "llama-memory.h" #include "llama-mmap.h" @@ -18,7 +19,8 @@ llama_context::llama_context( const llama_model & model, llama_context_params params) : - model(model) { + model(model), + batch_allocr(std::make_unique()) { LLAMA_LOG_INFO("%s: constructing llama_context\n", __func__); t_start_us = model.t_start_us; @@ -494,7 +496,7 @@ float * llama_context::get_logits() { } float * llama_context::get_logits_ith(int32_t i) { - int32_t j = -1; + int64_t j = -1; try { if (logits == nullptr) { @@ -517,7 +519,7 @@ float * llama_context::get_logits_ith(int32_t i) { } if (j >= n_outputs) { // This should not happen - throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, n_outputs)); + throw std::runtime_error(format("corrupt output buffer (j=%" PRId64 ", n_outputs=%d)", j, n_outputs)); } return logits + j*model.vocab.n_tokens(); @@ -536,7 +538,7 @@ float * llama_context::get_embeddings() { } float * llama_context::get_embeddings_ith(int32_t i) { - int32_t j = -1; + int64_t j = -1; try { if (embd == nullptr) { @@ -559,7 +561,7 @@ float * llama_context::get_embeddings_ith(int32_t i) { } if (j >= n_outputs) { // This should not happen - throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, n_outputs)); + throw std::runtime_error(format("corrupt output buffer (j=%" PRId64 ", n_outputs=%d)", j, n_outputs)); } return embd + j*model.hparams.n_embd; @@ -719,40 +721,27 @@ llm_graph_result_ptr llama_context::process_ubatch(const llama_ubatch & ubatch, return res; } -int llama_context::encode(llama_batch & inp_batch) { - if (inp_batch.n_tokens == 0) { +int llama_context::encode(const llama_batch & batch_inp) { + if (batch_inp.n_tokens == 0) { LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__); return -1; } // temporary allocate memory for the input batch if needed // note: during encode, we always pass the full sequence starting from pos = 0 - llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : 0); + if (!batch_allocr->init(batch_inp, model.vocab, batch_inp.pos ? -1 : 0)) { + LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); + return -1; + } - const llama_batch & batch = batch_allocr.batch; - const int32_t n_tokens = batch.n_tokens; + const llama_batch & batch = batch_allocr->get_batch(); - const auto & hparams = model.hparams; + const uint32_t n_tokens = batch.n_tokens; GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT - // TODO: move the validation to the llama_batch_allocr - if (batch.token) { - for (int32_t i = 0; i < n_tokens; ++i) { - if (batch.token[i] < 0 || (uint32_t) batch.token[i] >= model.vocab.n_tokens()) { - LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch.token[i]); - return -1; - } - - if (batch.seq_id && (batch.seq_id[i][0] < 0 || batch.seq_id[i][0] >= LLAMA_MAX_PARALLEL_SEQUENCES)) { - LLAMA_LOG_ERROR("%s: invalid seq_id[%d] = %d > %d\n", __func__, i, batch.seq_id[i][0], LLAMA_MAX_PARALLEL_SEQUENCES); - throw -1; - } - } - } - // micro-batching is not possible for non-causal encoding, so we process the batch in a single shot - GGML_ASSERT(cparams.n_ubatch >= (uint32_t) n_tokens && "encoder requires n_ubatch >= n_tokens"); + GGML_ASSERT(cparams.n_ubatch >= n_tokens && "encoder requires n_ubatch >= n_tokens"); if (t_compute_start_us == 0) { t_compute_start_us = ggml_time_us(); @@ -763,6 +752,8 @@ int llama_context::encode(llama_batch & inp_batch) { n_queued_tokens += n_tokens; + const auto & hparams = model.hparams; + const int64_t n_embd = hparams.n_embd; llama_sbatch sbatch = llama_sbatch(batch, n_embd, /* simple_split */ true); @@ -775,7 +766,7 @@ int llama_context::encode(llama_batch & inp_batch) { return -2; }; - for (int32_t i = 0; i < n_tokens; ++i) { + for (uint32_t i = 0; i < n_tokens; ++i) { output_ids[i] = i; } @@ -831,7 +822,8 @@ int llama_context::encode(llama_batch & inp_batch) { GGML_ASSERT(!ubatch.equal_seqs); // TODO: handle equal splits - for (int32_t i = 0; i < n_tokens; i++) { + // TODO: fix indexing [UBATCH_IDX] + for (uint32_t i = 0; i < n_tokens; i++) { const llama_seq_id seq_id = ubatch.seq_id[i][0]; if (embd_seq_out.find(seq_id) != embd_seq_out.end()) { continue; @@ -846,6 +838,7 @@ int llama_context::encode(llama_batch & inp_batch) { auto & embd_seq_out = embd_seq; const uint32_t n_cls_out = hparams.n_cls_out; + // TODO: fix indexing [UBATCH_IDX] for (uint32_t s = 0; s < ubatch.n_seqs; ++s) { const llama_seq_id seq_id = ubatch.seq_id[s][0]; if (embd_seq_out.find(seq_id) != embd_seq_out.end()) { @@ -878,13 +871,11 @@ int llama_context::encode(llama_batch & inp_batch) { memcpy(cross.v_embd.data(), embd, ggml_nbytes(t_embd)); // remember the sequence ids used during the encoding - needed for cross attention later - // TODO: the seuqence indexing here is likely not correct in the general case - // probably works only for split_simple cross.seq_ids_enc.resize(n_tokens); - for (int32_t i = 0; i < n_tokens; i++) { + for (uint32_t i = 0; i < n_tokens; i++) { cross.seq_ids_enc[i].clear(); - for (int s = 0; s < ubatch.n_seq_id[i]; s++) { - llama_seq_id seq_id = ubatch.seq_id[i][s]; + for (int s = 0; s < batch.n_seq_id[i]; s++) { + llama_seq_id seq_id = batch.seq_id[i][s]; cross.seq_ids_enc[i].insert(seq_id); } } @@ -893,68 +884,44 @@ int llama_context::encode(llama_batch & inp_batch) { return 0; } -int llama_context::decode(llama_batch & inp_batch) { +int llama_context::decode(const llama_batch & batch_inp) { if (!memory) { LLAMA_LOG_DEBUG("%s: cannot decode batches with this context (calling encode() instead)\n", __func__); - return encode(inp_batch); + return encode(batch_inp); } - if (inp_batch.n_tokens == 0) { + if (batch_inp.n_tokens == 0) { LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__); return -1; } - if (!inp_batch.pos) { - if (inp_batch.seq_id) { - LLAMA_LOG_ERROR("%s: pos == NULL, but seq_id != NULL\n", __func__); - return -1; - } - } - // temporary allocate memory for the input batch if needed - llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : memory->seq_pos_max(0) + 1); + if (!batch_allocr->init(batch_inp, model.vocab, batch_inp.pos ? -1 : memory->seq_pos_max(0) + 1)) { + LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); + return -1; + } - const llama_batch & batch = batch_allocr.batch; + const llama_batch & batch = batch_allocr->get_batch(); const auto & vocab = model.vocab; const auto & hparams = model.hparams; const int32_t n_vocab = vocab.n_tokens(); + const int64_t n_embd = hparams.n_embd; - const int64_t n_tokens_all = batch.n_tokens; - const int64_t n_embd = hparams.n_embd; + const uint32_t n_tokens_all = batch.n_tokens; GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT - // TODO: move the validation to the llama_batch_allocr - if (batch.token) { - for (int64_t i = 0; i < n_tokens_all; ++i) { - if (batch.token[i] < 0 || (uint32_t) batch.token[i] >= model.vocab.n_tokens()) { - LLAMA_LOG_ERROR("%s: invalid token[%" PRId64 "] = %d\n", __func__, i, batch.token[i]); - return -1; - } - - if (batch.seq_id && (batch.seq_id[i][0] < 0 || batch.seq_id[i][0] >= LLAMA_MAX_PARALLEL_SEQUENCES)) { - LLAMA_LOG_ERROR("%s: invalid seq_id[%" PRId64 "] = %d >= %d\n", __func__, i, batch.seq_id[i][0], LLAMA_MAX_PARALLEL_SEQUENCES); - return -1; - } - } - } - // this indicates we are doing pooled embedding const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE; - int64_t n_outputs_all = 0; - - // count outputs - for (uint32_t i = 0; i < n_tokens_all; ++i) { - n_outputs_all += batch.logits[i] != 0; - } + const uint32_t n_outputs_all = batch_allocr->get_n_outputs(); if (embd_pooled) { // require that all tokens are output if (n_outputs_all != n_tokens_all) { - LLAMA_LOG_ERROR("%s: pooled embedding requires that all tokens are output (n_outputs_all = %" PRId64 ", n_tokens_all = %" PRId64 ")\n", + LLAMA_LOG_ERROR("%s: pooled embedding requires that all tokens are output (n_outputs_all = %d, n_tokens_all = %d)\n", __func__, n_outputs_all, n_tokens_all); return -1; } @@ -1024,7 +991,7 @@ int llama_context::decode(llama_batch & inp_batch) { // reserve output buffer if (output_reserve(n_outputs_all) < n_outputs_all) { - LLAMA_LOG_ERROR("%s: could not reserve space for batch with %" PRId64 " outputs\n", __func__, n_outputs_all); + LLAMA_LOG_ERROR("%s: could not reserve space for batch with %d outputs\n", __func__, n_outputs_all); return -2; }; @@ -1063,6 +1030,7 @@ int llama_context::decode(llama_batch & inp_batch) { pos_min[s] = std::numeric_limits::max(); } + // TODO: fix sequence indexing for (uint32_t i = 0; i < ubatch.n_tokens; ++i) { const auto & seq_id = ubatch.seq_id[i][0]; @@ -1176,14 +1144,14 @@ int llama_context::decode(llama_batch & inp_batch) { n_outputs = n_outputs_all; // set output mappings - { + if (n_outputs > 0) { bool sorted_output = true; auto & out_ids = mstate->out_ids(); - GGML_ASSERT(out_ids.size() == (size_t) n_outputs_all); + GGML_ASSERT(out_ids.size() == (size_t) n_outputs); - for (int64_t i = 0; i < n_outputs_all; ++i) { + for (int64_t i = 0; i < n_outputs; ++i) { int64_t out_id = out_ids[i]; output_ids[out_id] = i; if (out_id != i) { @@ -1195,20 +1163,22 @@ int llama_context::decode(llama_batch & inp_batch) { // note: this is mostly relevant for recurrent models atm if (!sorted_output) { const uint32_t n_vocab = model.vocab.n_tokens(); - const uint32_t n_embd = model.hparams.n_embd; + const uint64_t n_embd = model.hparams.n_embd; GGML_ASSERT((size_t) n_outputs == out_ids.size()); // TODO: is there something more efficient which also minimizes swaps? // selection sort, to minimize swaps (from https://en.wikipedia.org/wiki/Selection_sort) - for (int32_t i = 0; i < n_outputs - 1; ++i) { - int32_t j_min = i; - for (int32_t j = i + 1; j < n_outputs; ++j) { + for (uint32_t i = 0; i < n_outputs - 1; ++i) { + uint32_t j_min = i; + for (uint32_t j = i + 1; j < n_outputs; ++j) { if (out_ids[j] < out_ids[j_min]) { j_min = j; } } - if (j_min == i) { continue; } + if (j_min == i) { + continue; + } std::swap(out_ids[i], out_ids[j_min]); if (logits_size > 0) { for (uint32_t k = 0; k < n_vocab; k++) { @@ -1221,8 +1191,10 @@ int llama_context::decode(llama_batch & inp_batch) { } } } + std::fill(output_ids.begin(), output_ids.end(), -1); - for (int32_t i = 0; i < n_outputs; ++i) { + + for (uint32_t i = 0; i < n_outputs; ++i) { output_ids[out_ids[i]] = i; } } @@ -1242,7 +1214,7 @@ int llama_context::decode(llama_batch & inp_batch) { // output // -int32_t llama_context::output_reserve(int32_t n_outputs) { +uint32_t llama_context::output_reserve(int32_t n_outputs) { const auto & hparams = model.hparams; const auto & vocab = model.vocab; @@ -1308,8 +1280,7 @@ int32_t llama_context::output_reserve(int32_t n_outputs) { // set all ids as invalid (negative) std::fill(output_ids.begin(), output_ids.end(), -1); - this->n_outputs = 0; - this->n_outputs_max = n_outputs_max; + this->n_outputs = 0; return n_outputs_max; } @@ -1800,14 +1771,12 @@ size_t llama_context::state_write_data(llama_io_write_i & io) { std::vector w_output_pos; - GGML_ASSERT(n_outputs <= n_outputs_max); - w_output_pos.resize(n_outputs); // build a more compact representation of the output ids for (size_t i = 0; i < n_batch(); ++i) { // map an output id to a position in the batch - int32_t pos = output_ids[i]; + int64_t pos = output_ids[i]; if (pos >= 0) { GGML_ASSERT(pos < n_outputs); w_output_pos[pos] = i; @@ -2082,7 +2051,7 @@ void llama_context::opt_epoch_iter( embd_seq.clear(); - int64_t n_outputs_all = n_tokens_all; + uint32_t n_outputs_all = n_tokens_all; auto mstate = memory->init_batch(batch, cparams.n_ubatch, embd_pooled); if (!mstate || mstate->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) { @@ -2092,7 +2061,7 @@ void llama_context::opt_epoch_iter( // reserve output buffer if (output_reserve(n_outputs_all) < n_outputs_all) { - LLAMA_LOG_ERROR("%s: could not reserve space for batch with %" PRId64 " outputs\n", __func__, n_outputs_all); + LLAMA_LOG_ERROR("%s: could not reserve space for batch with %d outputs\n", __func__, n_outputs_all); GGML_ABORT("TODO: handle this error"); }; diff --git a/src/llama-context.h b/src/llama-context.h index 2e0da8c83bd59..040f03ae42e65 100644 --- a/src/llama-context.h +++ b/src/llama-context.h @@ -1,7 +1,6 @@ #pragma once #include "llama.h" -#include "llama-batch.h" #include "llama-cparams.h" #include "llama-graph.h" #include "llama-adapter.h" @@ -13,6 +12,7 @@ #include struct llama_model; +class llama_batch_allocr; class llama_io_read_i; class llama_io_write_i; @@ -102,8 +102,8 @@ struct llama_context { llama_memory_state_i * mstate, ggml_status & ret); - int encode(llama_batch & inp_batch); - int decode(llama_batch & inp_batch); + int encode(const llama_batch & batch_inp); + int decode(const llama_batch & batch_inp); // // state save/load @@ -181,7 +181,7 @@ struct llama_context { // Make sure enough space is available for outputs. // Returns max number of outputs for which space was reserved. - int32_t output_reserve(int32_t n_outputs); + uint32_t output_reserve(int32_t n_outputs); // // graph @@ -246,8 +246,10 @@ struct llama_context { // populated only when pooling_type != LLAMA_POOLING_TYPE_NONE std::map> embd_seq; - int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch - int32_t n_outputs_max = 0; // capacity (of tokens positions) for the output buffers + // reuse the batch_allocr to avoid unnecessary memory allocations + std::unique_ptr batch_allocr; + + uint32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch std::vector output_ids; // map batch token positions to ids of the logits and embd buffers diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 4493fb164eb12..337fb5cb0df36 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -139,6 +139,7 @@ void llm_graph_input_mean::set_input(const llama_ubatch * ubatch) { std::vector sum(n_tokens, 0); + // TODO: fix indexing [UBATCH_IDX] for (int s = 0; s < n_seqs; ++s) { const llama_seq_id seq_id = ubatch->seq_id[s][0]; @@ -156,6 +157,7 @@ void llm_graph_input_mean::set_input(const llama_ubatch * ubatch) { } } + // TODO: fix indexing [UBATCH_IDX] for (int s = 0; s < n_seqs; ++s) { const llama_seq_id seq_id = ubatch->seq_id[s][0]; @@ -180,6 +182,7 @@ void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) { uint32_t * data = (uint32_t *) cls->data; memset(cls->data, 0, n_tokens * ggml_element_size(cls)); + // TODO: fix indexing [UBATCH_IDX] for (int s = 0; s < n_seqs; ++s) { const llama_seq_id seq_id = ubatch->seq_id[s][0]; @@ -210,6 +213,7 @@ void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) { std::vector last_pos(n_tokens, -1); std::vector last_row(n_tokens, -1); + // TODO: fix indexing [UBATCH_IDX] for (int s = 0; s < n_seqs; ++s) { const llama_seq_id seq_id = ubatch->seq_id[s][0]; @@ -283,6 +287,7 @@ void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) { const int32_t ti = s0*n_seq_tokens + i; float f = -INFINITY; + // TODO: fix indexing [UBATCH_IDX] for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) { if (ubatch->seq_id[s0][s] == seq_id && ubatch->pos[ti] <= ubatch->pos[tj]) { if (hparams.use_alibi) { @@ -322,6 +327,7 @@ void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) { const int32_t ti = s0*n_seq_tokens + i; float f = -INFINITY; + // TODO: fix indexing [UBATCH_IDX] for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) { if (ubatch->seq_id[s0][s] == seq_id) { if (hparams.use_alibi) { @@ -377,6 +383,7 @@ void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) { for (int j = 0; j < n_tokens; ++j) { for (int i = 0; i < n_enc; ++i) { float f = -INFINITY; + // TODO: fix indexing [UBATCH_IDX] for (int s = 0; s < ubatch->n_seq_id[j]; ++s) { const llama_seq_id seq_id = ubatch->seq_id[j][s]; if (cross->seq_ids_enc[i].find(seq_id) != cross->seq_ids_enc[i].end()) { diff --git a/src/llama-graph.h b/src/llama-graph.h index 88fb77f1ddc9a..87813119b1a3c 100644 --- a/src/llama-graph.h +++ b/src/llama-graph.h @@ -378,7 +378,7 @@ struct llm_graph_params { const llama_memory_state_i * mstate; const llama_cross * cross; - int32_t n_outputs; + uint32_t n_outputs; const llm_graph_cb & cb; }; @@ -412,8 +412,8 @@ struct llm_graph_context { const float norm_eps; const float norm_rms_eps; - const int32_t n_tokens; - const int32_t n_outputs; + const int64_t n_tokens; + const int64_t n_outputs; const int32_t n_ctx_orig; // yarn const enum llama_pooling_type pooling_type; diff --git a/src/llama-kv-cache-unified.cpp b/src/llama-kv-cache-unified.cpp index 89606c598fc4f..d4e92eab3a179 100644 --- a/src/llama-kv-cache-unified.cpp +++ b/src/llama-kv-cache-unified.cpp @@ -674,6 +674,7 @@ void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch cells.pos_set(head_cur + idx, ubatch.pos[idx]); + // TODO: fix indexing [UBATCH_IDX] for (int32_t i = 0; i < ubatch.n_seq_id[s]; i++) { cells.seq_add(head_cur + idx, ubatch.seq_id[s][i]); } From 8e7c3d1d1ae998b2b1cf4b94d6a079a3970461f7 Mon Sep 17 00:00:00 2001 From: ddpasa <112642920+ddpasa@users.noreply.github.com> Date: Fri, 13 Jun 2025 15:17:53 +0200 Subject: [PATCH 052/192] docs : Update multimodal.md (#14122) * Update multimodal.md * Update multimodal.md --- docs/multimodal.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/multimodal.md b/docs/multimodal.md index e849c2a0b8ba1..edbd081df7969 100644 --- a/docs/multimodal.md +++ b/docs/multimodal.md @@ -107,3 +107,7 @@ NOTE: some models may require large context window, for example: `-c 8192` (tool_name) -hf ggml-org/Qwen2.5-Omni-3B-GGUF (tool_name) -hf ggml-org/Qwen2.5-Omni-7B-GGUF ``` + +## Finding more models: + +GGUF models on Huggingface with vision capabilities can be found here: https://huggingface.co/models?pipeline_tag=image-text-to-text&sort=trending&search=gguf From b7c230562b3366dfc84b6be3dddc5e1f3d525e42 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 13 Jun 2025 18:35:00 +0300 Subject: [PATCH 053/192] batch : add LLAMA_BATCH_DEBUG environment variable (#14172) * batch : add LLAMA_BATCH_DEBUG environment variable ggml-ci * cont : improve seq_id display --- src/llama-batch.cpp | 53 ++++++++++++++++++++++++++++++++++++++++++++- src/llama-batch.h | 2 ++ 2 files changed, 54 insertions(+), 1 deletion(-) diff --git a/src/llama-batch.cpp b/src/llama-batch.cpp index 9066d5a9b274d..bdbf766266f90 100644 --- a/src/llama-batch.cpp +++ b/src/llama-batch.cpp @@ -7,6 +7,7 @@ #include #include #include +#include llama_ubatch llama_sbatch::reserve_ubatch(size_t n_ubatch, bool has_embd) { // clear empty sequences @@ -283,7 +284,10 @@ llama_sbatch::llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple ); } -llama_batch_allocr::llama_batch_allocr() = default; +llama_batch_allocr::llama_batch_allocr() { + const char * LLAMA_BATCH_DEBUG = getenv("LLAMA_BATCH_DEBUG"); + debug = LLAMA_BATCH_DEBUG ? atoi(LLAMA_BATCH_DEBUG) : 0; +} bool llama_batch_allocr::init(const llama_batch & batch_inp, const llama_vocab & vocab, llama_pos p0) { clear(); @@ -356,6 +360,53 @@ bool llama_batch_allocr::init(const llama_batch & batch_inp, const llama_vocab & n_outputs += batch.logits[i] != 0; } + if (debug > 0) { + LLAMA_LOG_DEBUG("%s: input batch info (p0 = %d):\n", __func__, p0); + LLAMA_LOG_DEBUG("%s: n_tokens = %d\n", __func__, batch.n_tokens); + LLAMA_LOG_DEBUG("%s: token = %p\n", __func__, (void *) batch.token); + LLAMA_LOG_DEBUG("%s: embd = %p\n", __func__, (void *) batch.embd); + LLAMA_LOG_DEBUG("%s: pos = %p\n", __func__, (void *) batch.pos); + LLAMA_LOG_DEBUG("%s: n_seq_id = %p\n", __func__, (void *) batch.n_seq_id); + LLAMA_LOG_DEBUG("%s: seq_id = %p\n", __func__, (void *) batch.seq_id); + LLAMA_LOG_DEBUG("%s: logits = %p\n", __func__, (void *) batch.logits); + LLAMA_LOG_DEBUG("%s: n_outputs = %d\n", __func__, n_outputs); + + if (debug > 1) { + int seq_id_max = 0; + for (int32_t i = 0; i < batch.n_tokens; ++i) { + for (int s = 0; s < batch.n_seq_id[i]; ++s) { + for (int s = 0; s < batch.n_seq_id[i]; ++s) { + seq_id_max = std::max(seq_id_max, batch.seq_id[i][s]); + } + } + } + ++seq_id_max; + + LLAMA_LOG_DEBUG("%s: token = [\n", __func__); + for (int32_t i = 0; i < batch.n_tokens; ++i) { + std::vector seq_id(seq_id_max); + + for (int s = 0; s < batch.n_seq_id[i]; ++s) { + seq_id[batch.seq_id[i][s]] = 1; + } + + std::stringstream ss; + for (int s = 0; s < seq_id_max; ++s) { + if (seq_id[s]) { + ss << s%10; + } else { + ss << "."; + } + } + + LLAMA_LOG_DEBUG("%s: %4d: id = %6d (%16s), pos = %4d, n_seq_id = %2d, seq_id = [%s], output = %d\n", + __func__, i, batch.token[i], vocab.token_to_piece(batch.token[i]).c_str(), + batch.pos[i], batch.n_seq_id[i], ss.str().c_str(), batch.logits[i]); + } + LLAMA_LOG_DEBUG("%s: ]\n", __func__); + } + } + return true; } diff --git a/src/llama-batch.h b/src/llama-batch.h index 24340b00f2702..1e0be8ac2c6ce 100644 --- a/src/llama-batch.h +++ b/src/llama-batch.h @@ -102,4 +102,6 @@ class llama_batch_allocr { std::vector n_seq_id; std::vector seq_id; std::vector output; + + int debug; }; From cdc7c966df04638d75d84e368905976005b9f2f3 Mon Sep 17 00:00:00 2001 From: Guy Goldenberg Date: Fri, 13 Jun 2025 19:20:25 +0300 Subject: [PATCH 054/192] Merge commit from fork * vocab : prevent integer overflow during load * Add static cast and GGML_ABORT --------- Co-authored-by: Georgi Gerganov --- src/llama-vocab.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index d8c9d9730a095..07e692208b0eb 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -19,6 +19,7 @@ #include #include #include +#include // // helpers @@ -2572,6 +2573,10 @@ int32_t llama_vocab::impl::token_to_piece(llama_token token, char * buf, int32_t // copy piece chars to output text buffer // skip up to 'lstrip' leading spaces before copying auto _try_copy = [=] (const char * token, size_t size) -> int32_t { + if (size >= static_cast(std::numeric_limits::max())) { + GGML_ABORT("invalid token size: %zu exceeds int32_t limit", size); + } + for (int32_t i = 0; i < lstrip && size && *token == ' '; ++i) { token++; size--; From d3b5f17dc98ab69edde935683f9f16c0d359889e Mon Sep 17 00:00:00 2001 From: Svetlozar Georgiev <55534064+sgeor255@users.noreply.github.com> Date: Fri, 13 Jun 2025 17:32:56 +0100 Subject: [PATCH 055/192] sycl: fix docker image (#14144) --- .devops/intel.Dockerfile | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/.devops/intel.Dockerfile b/.devops/intel.Dockerfile index 8cad660523ecc..9ce80a71eb950 100644 --- a/.devops/intel.Dockerfile +++ b/.devops/intel.Dockerfile @@ -49,19 +49,23 @@ COPY --from=build /app/full /app WORKDIR /app -RUN apt-get update \ - && apt-get install -y \ - git \ - python3 \ - python3-pip \ - && pip install --upgrade pip setuptools wheel \ - && pip install -r requirements.txt \ - && apt autoremove -y \ - && apt clean -y \ - && rm -rf /tmp/* /var/tmp/* \ - && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \ - && find /var/cache -type f -delete - +RUN apt-get update && \ + apt-get install -y \ + git \ + python3 \ + python3-pip \ + python3-venv && \ + python3 -m venv /opt/venv && \ + . /opt/venv/bin/activate && \ + pip install --upgrade pip setuptools wheel && \ + pip install -r requirements.txt && \ + apt autoremove -y && \ + apt clean -y && \ + rm -rf /tmp/* /var/tmp/* && \ + find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete && \ + find /var/cache -type f -delete + +ENV PATH="/opt/venv/bin:$PATH" ENTRYPOINT ["/app/tools.sh"] From 1d03941752662bdab4eb3a5f3dff4e7f242ff3ad Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 13 Jun 2025 20:03:05 +0300 Subject: [PATCH 056/192] vocab : fix build (#14175) ggml-ci --- src/llama-vocab.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index 07e692208b0eb..905d7c4281d9c 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -9,17 +9,16 @@ #include #include +#include #include -#include #include #include #include +#include #include #include #include #include -#include -#include // // helpers From ea76fac68c29c7d70def7a5e5ad5f33726b36840 Mon Sep 17 00:00:00 2001 From: Aman Gupta Date: Sat, 14 Jun 2025 16:34:20 +0800 Subject: [PATCH 057/192] compare-llama-bench: add option to plot (#14169) * compare llama-bench: add option to plot * Address review comments: convert case + add type hints * Add matplotlib to requirements * fix tests * Improve comment and fix assert condition for test * Add back default test_name, add --plot_log_scale * use log_scale regardless of x_values --- .../requirements-compare-llama-bench.txt | 1 + scripts/compare-llama-bench.py | 169 +++++++++++++++++- 2 files changed, 169 insertions(+), 1 deletion(-) diff --git a/requirements/requirements-compare-llama-bench.txt b/requirements/requirements-compare-llama-bench.txt index e0aaa32043ce2..d87e897e17199 100644 --- a/requirements/requirements-compare-llama-bench.txt +++ b/requirements/requirements-compare-llama-bench.txt @@ -1,2 +1,3 @@ tabulate~=0.9.0 GitPython~=3.1.43 +matplotlib~=3.10.0 diff --git a/scripts/compare-llama-bench.py b/scripts/compare-llama-bench.py index a1013c3b7a66d..30e3cf8649e8a 100755 --- a/scripts/compare-llama-bench.py +++ b/scripts/compare-llama-bench.py @@ -19,6 +19,7 @@ print("the following Python libraries are required: GitPython, tabulate.") # noqa: NP100 raise e + logger = logging.getLogger("compare-llama-bench") # All llama-bench SQL fields @@ -122,11 +123,15 @@ parser.add_argument("--check", action="store_true", help="check if all required Python libraries are installed") parser.add_argument("-s", "--show", help=help_s) parser.add_argument("--verbose", action="store_true", help="increase output verbosity") +parser.add_argument("--plot", help="generate a performance comparison plot and save to specified file (e.g., plot.png)") +parser.add_argument("--plot_x", help="parameter to use as x axis for plotting (default: n_depth)", default="n_depth") +parser.add_argument("--plot_log_scale", action="store_true", help="use log scale for x axis in plots (off by default)") known_args, unknown_args = parser.parse_known_args() logging.basicConfig(level=logging.DEBUG if known_args.verbose else logging.INFO) + if known_args.check: # Check if all required Python libraries are installed. Would have failed earlier if not. sys.exit(0) @@ -499,7 +504,6 @@ def valid_format(data_files: list[str]) -> bool: name_compare = bench_data.get_commit_name(hexsha8_compare) - # If the user provided columns to group the results by, use them: if known_args.show is not None: show = known_args.show.split(",") @@ -544,6 +548,14 @@ def valid_format(data_files: list[str]) -> bool: show.remove(prop) except ValueError: pass + + # Add plot_x parameter to parameters to show if it's not already present: + if known_args.plot: + for k, v in PRETTY_NAMES.items(): + if v == known_args.plot_x and k not in show: + show.append(k) + break + rows_show = bench_data.get_rows(show, hexsha8_baseline, hexsha8_compare) if not rows_show: @@ -600,6 +612,161 @@ def valid_format(data_files: list[str]) -> bool: headers = [PRETTY_NAMES[p] for p in show] headers += ["Test", f"t/s {name_baseline}", f"t/s {name_compare}", "Speedup"] +if known_args.plot: + def create_performance_plot(table_data: list[list[str]], headers: list[str], baseline_name: str, compare_name: str, output_file: str, plot_x_param: str, log_scale: bool = False): + try: + import matplotlib.pyplot as plt + import matplotlib + matplotlib.use('Agg') + except ImportError as e: + logger.error("matplotlib is required for --plot.") + raise e + + data_headers = headers[:-4] # Exclude the last 4 columns (Test, baseline t/s, compare t/s, Speedup) + plot_x_index = None + plot_x_label = plot_x_param + + if plot_x_param not in ["n_prompt", "n_gen", "n_depth"]: + pretty_name = PRETTY_NAMES.get(plot_x_param, plot_x_param) + if pretty_name in data_headers: + plot_x_index = data_headers.index(pretty_name) + plot_x_label = pretty_name + elif plot_x_param in data_headers: + plot_x_index = data_headers.index(plot_x_param) + plot_x_label = plot_x_param + else: + logger.error(f"Parameter '{plot_x_param}' not found in current table columns. Available columns: {', '.join(data_headers)}") + return + + grouped_data = {} + + for i, row in enumerate(table_data): + group_key_parts = [] + test_name = row[-4] + + base_test = "" + x_value = None + + if plot_x_param in ["n_prompt", "n_gen", "n_depth"]: + for j, val in enumerate(row[:-4]): + header_name = data_headers[j] + if val is not None and str(val).strip(): + group_key_parts.append(f"{header_name}={val}") + + if plot_x_param == "n_prompt" and "pp" in test_name: + base_test = test_name.split("@")[0] + x_value = base_test + elif plot_x_param == "n_gen" and "tg" in test_name: + x_value = test_name.split("@")[0] + elif plot_x_param == "n_depth" and "@d" in test_name: + base_test = test_name.split("@d")[0] + x_value = int(test_name.split("@d")[1]) + else: + base_test = test_name + + if base_test.strip(): + group_key_parts.append(f"Test={base_test}") + else: + for j, val in enumerate(row[:-4]): + if j != plot_x_index: + header_name = data_headers[j] + if val is not None and str(val).strip(): + group_key_parts.append(f"{header_name}={val}") + else: + x_value = val + + group_key_parts.append(f"Test={test_name}") + + group_key = tuple(group_key_parts) + + if group_key not in grouped_data: + grouped_data[group_key] = [] + + grouped_data[group_key].append({ + 'x_value': x_value, + 'baseline': float(row[-3]), + 'compare': float(row[-2]), + 'speedup': float(row[-1]) + }) + + if not grouped_data: + logger.error("No data available for plotting") + return + + def make_axes(num_groups, max_cols=2, base_size=(8, 4)): + from math import ceil + cols = 1 if num_groups == 1 else min(max_cols, num_groups) + rows = ceil(num_groups / cols) + + # Scale figure size by grid dimensions + w, h = base_size + fig, ax_arr = plt.subplots(rows, cols, + figsize=(w * cols, h * rows), + squeeze=False) + + axes = ax_arr.flatten()[:num_groups] + return fig, axes + + num_groups = len(grouped_data) + fig, axes = make_axes(num_groups) + + plot_idx = 0 + + for group_key, points in grouped_data.items(): + if plot_idx >= len(axes): + break + ax = axes[plot_idx] + + try: + points_sorted = sorted(points, key=lambda p: float(p['x_value']) if p['x_value'] is not None else 0) + x_values = [float(p['x_value']) if p['x_value'] is not None else 0 for p in points_sorted] + except ValueError: + points_sorted = sorted(points, key=lambda p: group_key) + x_values = [p['x_value'] for p in points_sorted] + + baseline_vals = [p['baseline'] for p in points_sorted] + compare_vals = [p['compare'] for p in points_sorted] + + ax.plot(x_values, baseline_vals, 'o-', color='skyblue', + label=f'{baseline_name}', linewidth=2, markersize=6) + ax.plot(x_values, compare_vals, 's--', color='lightcoral', alpha=0.8, + label=f'{compare_name}', linewidth=2, markersize=6) + + if log_scale: + ax.set_xscale('log', base=2) + unique_x = sorted(set(x_values)) + ax.set_xticks(unique_x) + ax.set_xticklabels([str(int(x)) for x in unique_x]) + + title_parts = [] + for part in group_key: + if '=' in part: + key, value = part.split('=', 1) + title_parts.append(f"{key}: {value}") + + title = ', '.join(title_parts) if title_parts else "Performance comparison" + + ax.set_xlabel(plot_x_label, fontsize=12, fontweight='bold') + ax.set_ylabel('Tokens per second (t/s)', fontsize=12, fontweight='bold') + ax.set_title(title, fontsize=12, fontweight='bold') + ax.legend(loc='best', fontsize=10) + ax.grid(True, alpha=0.3) + + plot_idx += 1 + + for i in range(plot_idx, len(axes)): + axes[i].set_visible(False) + + fig.suptitle(f'Performance comparison: {compare_name} vs. {baseline_name}', + fontsize=14, fontweight='bold') + fig.subplots_adjust(top=1) + + plt.tight_layout() + plt.savefig(output_file, dpi=300, bbox_inches='tight') + plt.close() + + create_performance_plot(table, headers, name_baseline, name_compare, known_args.plot, known_args.plot_x, known_args.plot_log_scale) + print(tabulate( # noqa: NP100 table, headers=headers, From dbf9c07e42340888252e447d31cc35e510e1122b Mon Sep 17 00:00:00 2001 From: Piotr Date: Sat, 14 Jun 2025 18:25:15 +0200 Subject: [PATCH 058/192] llama-chat : Do not throw when tool parsing fails (#14012) Currently when a model generates output which looks like a tool call, but is invalid an exception is thrown and not handled, causing the cli or llama-server to bail. Instead, handle the chat parser exception and simply return the generated text in such cases. Signed-off-by: Piotr Stankiewicz --- common/chat-parser.cpp | 5 +++++ common/chat-parser.h | 2 ++ common/chat.cpp | 4 +++- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/common/chat-parser.cpp b/common/chat-parser.cpp index 65b664cb37da4..18a30e49aa578 100644 --- a/common/chat-parser.cpp +++ b/common/chat-parser.cpp @@ -49,6 +49,7 @@ bool common_chat_msg_parser::add_tool_call(const std::string & name, const std:: // LOG_DBG("Tool call arguments:\n\traw: %s\n\tresult: %s\n", arguments.c_str(), tool_call.arguments.c_str()); result_.tool_calls.emplace_back(tool_call); + return true; } bool common_chat_msg_parser::add_tool_call(const json & tool_call) { @@ -378,3 +379,7 @@ std::optional common_chat_msg_parse /* .is_partial = */ found_healing_marker, }; } + +void common_chat_msg_parser::clear_tools() { + result_.tool_calls.clear(); +} diff --git a/common/chat-parser.h b/common/chat-parser.h index 7ee355056b30a..0e64c341a50aa 100644 --- a/common/chat-parser.h +++ b/common/chat-parser.h @@ -115,4 +115,6 @@ class common_chat_msg_parser { const std::vector> & args_paths = {}, const std::vector> & content_paths = {} ); + + void clear_tools(); }; diff --git a/common/chat.cpp b/common/chat.cpp index 1d6974a8c563b..0dad14fba9ba5 100644 --- a/common/chat.cpp +++ b/common/chat.cpp @@ -1921,7 +1921,9 @@ common_chat_msg common_chat_parse(const std::string & input, bool is_partial, co } catch (const common_chat_msg_partial_exception & ex) { LOG_DBG("Partial parse: %s\n", ex.what()); if (!is_partial) { - throw std::runtime_error(ex.what()); + builder.clear_tools(); + builder.move_to(0); + common_chat_parse_content_only(builder); } } auto msg = builder.result(); From 05747f9f8c9ff78d4564ecae7ccbaab91a8e8543 Mon Sep 17 00:00:00 2001 From: Pepijn de Vos Date: Sun, 15 Jun 2025 08:06:37 +0200 Subject: [PATCH 059/192] docs : remove WIP since PR has been merged (#13912) --- docs/function-calling.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/function-calling.md b/docs/function-calling.md index fd3db9bd16a92..37eacaf3100c1 100644 --- a/docs/function-calling.md +++ b/docs/function-calling.md @@ -11,7 +11,7 @@ Function calling is supported for all models (see https://github.com/ggml-org/ll - Llama 3.1 / 3.3 (including builtin tools support - tool names for `wolfram_alpha`, `web_search` / `brave_search`, `code_interpreter`), Llama 3.2 - Functionary v3.1 / v3.2 - Hermes 2/3, Qwen 2.5 - - Qwen 2.5 Coder (WIP: https://github.com/ggml-org/llama.cpp/pull/12034) + - Qwen 2.5 Coder - Mistral Nemo - Firefunction v2 - Command R7B From 8e95c373310f4f46d897cc94161d0b81d013bc0d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 15 Jun 2025 09:18:37 +0300 Subject: [PATCH 060/192] batch : auto-gen positions + verify multi-sequence input (#14177) * batch : verify multi-sequence input batches ggml-ci * cont : auto-gen positions + verify multi-seq input ggml-ci * cont : first print debug info, then perform validation ggml-ci * cont : fix position auto-gen + add comments ggml-ci --- include/llama.h | 4 +- src/llama-batch.cpp | 153 +++++++++++++++++++++++++++++++++++++----- src/llama-batch.h | 17 ++++- src/llama-context.cpp | 6 +- src/llama-cparams.h | 1 + 5 files changed, 155 insertions(+), 26 deletions(-) diff --git a/include/llama.h b/include/llama.h index 015a57898e22d..d5e4cef68c213 100644 --- a/include/llama.h +++ b/include/llama.h @@ -243,14 +243,14 @@ extern "C" { typedef bool (*llama_progress_callback)(float progress, void * user_data); - // Input data for llama_decode + // Input data for llama_encode/llama_decode // A llama_batch object can contain input about one or many sequences // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens // // - token : the token ids of the input (used when embd is NULL) // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL) // - pos : the positions of the respective token in the sequence - // (if set to NULL, the token position will be tracked automatically by llama_decode) + // (if set to NULL, the token position will be tracked automatically by llama_encode/llama_decode) // - seq_id : the sequence to which the respective token belongs // (if set to NULL, the sequence ID will be assumed to be 0) // - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output diff --git a/src/llama-batch.cpp b/src/llama-batch.cpp index bdbf766266f90..2265db9b235b8 100644 --- a/src/llama-batch.cpp +++ b/src/llama-batch.cpp @@ -3,6 +3,7 @@ #include "llama-impl.h" #include "llama-cparams.h" #include "llama-vocab.h" +#include "llama-memory.h" #include #include @@ -287,21 +288,27 @@ llama_sbatch::llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple llama_batch_allocr::llama_batch_allocr() { const char * LLAMA_BATCH_DEBUG = getenv("LLAMA_BATCH_DEBUG"); debug = LLAMA_BATCH_DEBUG ? atoi(LLAMA_BATCH_DEBUG) : 0; + + seq_pos.resize(LLAMA_MAX_PARALLEL_SEQUENCES); + seq_cpl.resize(LLAMA_MAX_PARALLEL_SEQUENCES); + for (auto & cur : seq_cpl) { + cur.resize(LLAMA_MAX_PARALLEL_SEQUENCES); + } } -bool llama_batch_allocr::init(const llama_batch & batch_inp, const llama_vocab & vocab, llama_pos p0) { +bool llama_batch_allocr::init( + const llama_batch & batch_inp, + const llama_vocab & vocab, + const llama_memory_i * memory) { clear(); batch = batch_inp; GGML_ASSERT(batch.n_tokens > 0); - if (!batch.pos) { - if (batch.seq_id) { - LLAMA_LOG_ERROR("%s: pos == NULL, but seq_id != NULL\n", __func__); - return false; - } - } + // + // validate input batch + // if (batch.token) { for (int32_t i = 0; i < batch.n_tokens; ++i) { @@ -323,14 +330,9 @@ bool llama_batch_allocr::init(const llama_batch & batch_inp, const llama_vocab & } } - if (!batch.pos) { - assert(p0 >= 0); - pos.resize(batch.n_tokens); - for (int32_t i = 0; i < batch.n_tokens; i++) { - pos[i] = p0 + i; - } - batch.pos = pos.data(); - } + // + // auto-generate missing fields + // if (!batch.n_seq_id) { n_seq_id.resize(batch.n_tokens); @@ -349,6 +351,32 @@ bool llama_batch_allocr::init(const llama_batch & batch_inp, const llama_vocab & batch.seq_id = seq_id.data(); } + if (!batch.pos) { + pos.resize(batch.n_tokens); + + // initialize the starting position for each sequence based on the positions in the memory + llama_pos p0[LLAMA_MAX_PARALLEL_SEQUENCES]; + for (int32_t s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + if (!memory) { + p0[s] = 0; + } else { + p0[s] = memory->seq_pos_max(s) + 1; + } + } + + for (int32_t i = 0; i < batch.n_tokens; i++) { + const llama_seq_id seq_id = batch.seq_id[i][0]; + + pos[i] = p0[seq_id]; + + for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) { + p0[batch.seq_id[i][s]] = pos[i] + 1; + } + } + + batch.pos = pos.data(); + } + if (!batch.logits) { // by default return the output only for the last token output.resize(batch.n_tokens); @@ -356,13 +384,36 @@ bool llama_batch_allocr::init(const llama_batch & batch_inp, const llama_vocab & batch.logits = output.data(); } + // + // compute stats + // + for (int32_t i = 0; i < batch.n_tokens; ++i) { n_outputs += batch.logits[i] != 0; } + // determine coupled sequences + // these are pairs of sequences that have at least one token in the input batch that is assigned to both of them + for (int32_t i = 0; i < batch.n_tokens; ++i) { + for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) { + seq_pos[batch.seq_id[i][s]].insert(batch.pos[i]); + + if (s > 0) { + const llama_seq_id s0 = batch.seq_id[i][0]; + const llama_seq_id s1 = batch.seq_id[i][s]; + + // mark that sequence s1 is coupled to s0 + seq_cpl[s1][s0] = true; + + // note: the other way around is not necessary for now + //seq_cpl[s0][s1] = true; + } + } + } + if (debug > 0) { - LLAMA_LOG_DEBUG("%s: input batch info (p0 = %d):\n", __func__, p0); - LLAMA_LOG_DEBUG("%s: n_tokens = %d\n", __func__, batch.n_tokens); + LLAMA_LOG_DEBUG("%s: input batch info:\n", __func__); + LLAMA_LOG_DEBUG("%s: n_tokens = %d\n", __func__, batch.n_tokens); LLAMA_LOG_DEBUG("%s: token = %p\n", __func__, (void *) batch.token); LLAMA_LOG_DEBUG("%s: embd = %p\n", __func__, (void *) batch.embd); LLAMA_LOG_DEBUG("%s: pos = %p\n", __func__, (void *) batch.pos); @@ -404,6 +455,58 @@ bool llama_batch_allocr::init(const llama_batch & batch_inp, const llama_vocab & batch.pos[i], batch.n_seq_id[i], ss.str().c_str(), batch.logits[i]); } LLAMA_LOG_DEBUG("%s: ]\n", __func__); + + LLAMA_LOG_DEBUG("%s: seq = [\n", __func__); + for (int s0 = 0; s0 < (int) seq_pos.size(); ++s0) { + if (seq_pos[s0].empty()) { + continue; + } + + std::stringstream ss; + for (int s1 = 0; s1 < (int) seq_cpl[s0].size(); ++s1) { + if (seq_cpl[s0][s1]) { + ss << s1 << " "; + } + } + + LLAMA_LOG_DEBUG("%s: %4d: pos = [%4d, %4d], cpl = %s\n", + __func__, s0, seq_pos_min(s0), seq_pos_max(s0), ss.str().empty() ? "-" : ss.str().c_str()); + } + LLAMA_LOG_DEBUG("%s: ]\n", __func__); + } + } + + // + // consistency checks + // + + for (int32_t s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + if (seq_pos[s].empty()) { + continue; + } + + if (memory && seq_pos_min(s) != memory->seq_pos_max(s) + 1) { + LLAMA_LOG_ERROR("%s: sequence %d does not start from the last position stored in the memory\n", __func__, s); + return false; + } + + if (seq_pos_max(s) - seq_pos_min(s) + 1 > (int) seq_pos[s].size()) { + LLAMA_LOG_ERROR("%s: sequence %d positions are not continuous\n", __func__, s); + return false; + } + } + + if (memory) { + for (int32_t s0 = 0; s0 < LLAMA_MAX_PARALLEL_SEQUENCES; ++s0) { + for (int32_t s1 = 0; s1 < LLAMA_MAX_PARALLEL_SEQUENCES; ++s1) { + if (seq_cpl[s0][s1]) { + if (memory->seq_pos_min(s0) != memory->seq_pos_min(s1) || + memory->seq_pos_max(s0) != memory->seq_pos_max(s1)) { + LLAMA_LOG_ERROR("%s: sequence %d is coupled to %d in the input batch, but have divereged\n", __func__, s0, s1); + return false; + } + } + } } } @@ -418,6 +521,14 @@ uint32_t llama_batch_allocr::get_n_outputs() const { return n_outputs; } +llama_pos llama_batch_allocr::seq_pos_min(llama_seq_id seq_id) const { + return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].begin(); +} + +llama_pos llama_batch_allocr::seq_pos_max(llama_seq_id seq_id) const { + return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].rbegin(); +} + void llama_batch_allocr::clear() { n_outputs = 0; @@ -426,6 +537,14 @@ void llama_batch_allocr::clear() { n_seq_id.clear(); seq_id.clear(); output.clear(); + + for (auto & cur : seq_pos) { + cur.clear(); + } + + for (auto & cur : seq_cpl) { + std::fill(cur.begin(), cur.end(), false); + } } // diff --git a/src/llama-batch.h b/src/llama-batch.h index 1e0be8ac2c6ce..04501ce5d424c 100644 --- a/src/llama-batch.h +++ b/src/llama-batch.h @@ -4,6 +4,7 @@ #include #include +#include // very similar to llama_batch, // but has more metadata about sequences @@ -77,18 +78,25 @@ struct llama_sbatch { llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split = false); }; -// temporary allocate memory for the input batch if needed +// a helper for sanitizing and fulfilling a batch class llama_batch_allocr { public: llama_batch_allocr(); - // optionally fulfill the batch returned by llama_batch_get_one - bool init(const llama_batch & batch_inp, const llama_vocab & vocab, llama_pos p0); + // sanitize and auto-gen missing data in the input batch + // memory is optional. if provided will be used to check for sequence continuity and to determine the positions + bool init( + const llama_batch & batch_inp, + const llama_vocab & vocab, + const llama_memory_i * memory); const llama_batch & get_batch() const; uint32_t get_n_outputs() const; + llama_pos seq_pos_min(llama_seq_id seq_id) const; + llama_pos seq_pos_max(llama_seq_id seq_id) const; + private: void clear(); @@ -103,5 +111,8 @@ class llama_batch_allocr { std::vector seq_id; std::vector output; + std::vector> seq_pos; // seq_pos[s]: the set of positions in sequence s + std::vector> seq_cpl; // seq_cpl[s0][s1]: if sequence s0 is coupled to sequence s1 + int debug; }; diff --git a/src/llama-context.cpp b/src/llama-context.cpp index ec1e1189b219a..47c60e960dc01 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -727,9 +727,8 @@ int llama_context::encode(const llama_batch & batch_inp) { return -1; } - // temporary allocate memory for the input batch if needed // note: during encode, we always pass the full sequence starting from pos = 0 - if (!batch_allocr->init(batch_inp, model.vocab, batch_inp.pos ? -1 : 0)) { + if (!batch_allocr->init(batch_inp, model.vocab, nullptr)) { LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); return -1; } @@ -895,8 +894,7 @@ int llama_context::decode(const llama_batch & batch_inp) { return -1; } - // temporary allocate memory for the input batch if needed - if (!batch_allocr->init(batch_inp, model.vocab, batch_inp.pos ? -1 : memory->seq_pos_max(0) + 1)) { + if (!batch_allocr->init(batch_inp, model.vocab, memory.get())) { LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); return -1; } diff --git a/src/llama-cparams.h b/src/llama-cparams.h index 2871031ef0961..51ebe5d17efa7 100644 --- a/src/llama-cparams.h +++ b/src/llama-cparams.h @@ -4,6 +4,7 @@ #include +// TODO: rename to something shorter #define LLAMA_MAX_PARALLEL_SEQUENCES 64 struct llama_cparams { From 4d775263f6a2fc40cb22fc459aff77d1d3ede38e Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 15 Jun 2025 10:08:58 +0300 Subject: [PATCH 061/192] cparams : rename LLAMA_MAX_PARALLEL_SEQUENCES to LLAMA_MAX_SEQ (#14188) ggml-ci --- src/llama-batch.cpp | 20 ++++++++++---------- src/llama-context.cpp | 10 +++++----- src/llama-cparams.cpp | 2 +- src/llama-cparams.h | 3 +-- src/llama-kv-cache-unified.cpp | 8 ++++---- src/llama-kv-cells.h | 16 ++++++++-------- 6 files changed, 29 insertions(+), 30 deletions(-) diff --git a/src/llama-batch.cpp b/src/llama-batch.cpp index 2265db9b235b8..a9f4a3d4c45c5 100644 --- a/src/llama-batch.cpp +++ b/src/llama-batch.cpp @@ -289,10 +289,10 @@ llama_batch_allocr::llama_batch_allocr() { const char * LLAMA_BATCH_DEBUG = getenv("LLAMA_BATCH_DEBUG"); debug = LLAMA_BATCH_DEBUG ? atoi(LLAMA_BATCH_DEBUG) : 0; - seq_pos.resize(LLAMA_MAX_PARALLEL_SEQUENCES); - seq_cpl.resize(LLAMA_MAX_PARALLEL_SEQUENCES); + seq_pos.resize(LLAMA_MAX_SEQ); + seq_cpl.resize(LLAMA_MAX_SEQ); for (auto & cur : seq_cpl) { - cur.resize(LLAMA_MAX_PARALLEL_SEQUENCES); + cur.resize(LLAMA_MAX_SEQ); } } @@ -322,8 +322,8 @@ bool llama_batch_allocr::init( if (batch.seq_id) { for (int32_t i = 0; i < batch.n_tokens; ++i) { for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) { - if (batch.seq_id && (batch.seq_id[i][s] < 0 || batch.seq_id[i][s] >= LLAMA_MAX_PARALLEL_SEQUENCES)) { - LLAMA_LOG_ERROR("%s: invalid seq_id[%d][%d] = %d > %d\n", __func__, i, s, batch.seq_id[i][s], LLAMA_MAX_PARALLEL_SEQUENCES); + if (batch.seq_id && (batch.seq_id[i][s] < 0 || batch.seq_id[i][s] >= LLAMA_MAX_SEQ)) { + LLAMA_LOG_ERROR("%s: invalid seq_id[%d][%d] = %d > %d\n", __func__, i, s, batch.seq_id[i][s], LLAMA_MAX_SEQ); return false; } } @@ -355,8 +355,8 @@ bool llama_batch_allocr::init( pos.resize(batch.n_tokens); // initialize the starting position for each sequence based on the positions in the memory - llama_pos p0[LLAMA_MAX_PARALLEL_SEQUENCES]; - for (int32_t s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + llama_pos p0[LLAMA_MAX_SEQ]; + for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) { if (!memory) { p0[s] = 0; } else { @@ -480,7 +480,7 @@ bool llama_batch_allocr::init( // consistency checks // - for (int32_t s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) { if (seq_pos[s].empty()) { continue; } @@ -497,8 +497,8 @@ bool llama_batch_allocr::init( } if (memory) { - for (int32_t s0 = 0; s0 < LLAMA_MAX_PARALLEL_SEQUENCES; ++s0) { - for (int32_t s1 = 0; s1 < LLAMA_MAX_PARALLEL_SEQUENCES; ++s1) { + for (int32_t s0 = 0; s0 < LLAMA_MAX_SEQ; ++s0) { + for (int32_t s1 = 0; s1 < LLAMA_MAX_SEQ; ++s1) { if (seq_cpl[s0][s1]) { if (memory->seq_pos_min(s0) != memory->seq_pos_min(s1) || memory->seq_pos_max(s0) != memory->seq_pos_max(s1)) { diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 47c60e960dc01..3a113d1bcfb2a 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -29,8 +29,8 @@ llama_context::llama_context( const auto & hparams = model.hparams; cparams.n_seq_max = std::max(1u, params.n_seq_max); - if (cparams.n_seq_max > LLAMA_MAX_PARALLEL_SEQUENCES) { - throw std::runtime_error("n_seq_max must be <= " + std::to_string(LLAMA_MAX_PARALLEL_SEQUENCES)); + if (cparams.n_seq_max > LLAMA_MAX_SEQ) { + throw std::runtime_error("n_seq_max must be <= " + std::to_string(LLAMA_MAX_SEQ)); } cparams.n_threads = params.n_threads; @@ -1023,8 +1023,8 @@ int llama_context::decode(const llama_batch & batch_inp) { if (!res) { // the last ubatch failed or was aborted -> remove all positions of that ubatch from the KV cache - llama_pos pos_min[LLAMA_MAX_PARALLEL_SEQUENCES]; - for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + llama_pos pos_min[LLAMA_MAX_SEQ]; + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { pos_min[s] = std::numeric_limits::max(); } @@ -1035,7 +1035,7 @@ int llama_context::decode(const llama_batch & batch_inp) { pos_min[seq_id] = std::min(pos_min[seq_id], ubatch.pos[i]); } - for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { if (pos_min[s] == std::numeric_limits::max()) { continue; } diff --git a/src/llama-cparams.cpp b/src/llama-cparams.cpp index f7b36590fe3e3..a3e7a37ee36d7 100644 --- a/src/llama-cparams.cpp +++ b/src/llama-cparams.cpp @@ -1,5 +1,5 @@ #include "llama-cparams.h" size_t llama_max_parallel_sequences(void) { - return LLAMA_MAX_PARALLEL_SEQUENCES; + return LLAMA_MAX_SEQ; } diff --git a/src/llama-cparams.h b/src/llama-cparams.h index 51ebe5d17efa7..118615d5bd2d5 100644 --- a/src/llama-cparams.h +++ b/src/llama-cparams.h @@ -4,8 +4,7 @@ #include -// TODO: rename to something shorter -#define LLAMA_MAX_PARALLEL_SEQUENCES 64 +#define LLAMA_MAX_SEQ 64 struct llama_cparams { uint32_t n_ctx; // context size used during inference diff --git a/src/llama-kv-cache-unified.cpp b/src/llama-kv-cache-unified.cpp index d4e92eab3a179..03107057079ca 100644 --- a/src/llama-kv-cache-unified.cpp +++ b/src/llama-kv-cache-unified.cpp @@ -572,7 +572,7 @@ int32_t llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) const { LLAMA_LOG_DEBUG("\n%s\n", ss.c_str()); } - for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { if (cells.seq_pos_min(s) < 0) { continue; } @@ -652,8 +652,8 @@ void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch // keep track of the max sequence position that we would overwrite with this ubatch // for non-SWA cache, this would be always empty - llama_seq_id seq_pos_max_rm[LLAMA_MAX_PARALLEL_SEQUENCES]; - for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + llama_seq_id seq_pos_max_rm[LLAMA_MAX_SEQ]; + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { seq_pos_max_rm[s] = -1; } @@ -684,7 +684,7 @@ void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch // note: we want to preserve the invariant that all positions between [pos_min, pos_max] for each sequence // will be present in the cache. so we have to purge any position which is less than those we would overwrite // ref: https://github.com/ggml-org/llama.cpp/pull/13746#issuecomment-2916057092 - for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { if (seq_pos_max_rm[s] == -1) { continue; } diff --git a/src/llama-kv-cells.h b/src/llama-kv-cells.h index acf30aebec69b..1d4e70f4d3212 100644 --- a/src/llama-kv-cells.h +++ b/src/llama-kv-cells.h @@ -23,7 +23,7 @@ class llama_kv_cells_unified { used.clear(); - for (uint32_t s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + for (uint32_t s = 0; s < LLAMA_MAX_SEQ; ++s) { seq_pos[s].clear(); } } @@ -240,7 +240,7 @@ class llama_kv_cells_unified { llama_seq_id seq_get(uint32_t i) const { assert(seq[i].count() == 1); - for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { if (seq[i].test(s)) { return s; } @@ -253,7 +253,7 @@ class llama_kv_cells_unified { // return -1 if the sequence is not present llama_pos seq_pos_min(llama_seq_id seq_id) const { assert(seq_id >= 0); - assert(seq_id < LLAMA_MAX_PARALLEL_SEQUENCES); + assert(seq_id < LLAMA_MAX_SEQ); if (seq_pos[seq_id].empty()) { return -1; @@ -266,7 +266,7 @@ class llama_kv_cells_unified { // return -1 if the sequence is not present llama_pos seq_pos_max(llama_seq_id seq_id) const { assert(seq_id >= 0); - assert(seq_id < LLAMA_MAX_PARALLEL_SEQUENCES); + assert(seq_id < LLAMA_MAX_SEQ); if (seq_pos[seq_id].empty()) { return -1; @@ -384,20 +384,20 @@ class llama_kv_cells_unified { // std::vector shift; - using bits_t = std::bitset; + using bits_t = std::bitset; // the bitset seq[i] tells us which sequences are currently occupying the i-th cell std::vector seq; // the set seq_pos[s] tells us which positions are currently present for sequence s // this way seq_pos[s].begin() and seq_pos[s].rbegin() give us the min/max positions currently in the cache - std::set seq_pos[LLAMA_MAX_PARALLEL_SEQUENCES]; + std::set seq_pos[LLAMA_MAX_SEQ]; // helper functions for updating `seq_pos`, once cell at a time: // remove cell i void seq_pos_rm(uint32_t i) { - for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { if (seq[i].test(s)) { seq_pos[s].erase(pos[i]); } @@ -406,7 +406,7 @@ class llama_kv_cells_unified { // add cell i void seq_pos_add(uint32_t i) { - for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { if (seq[i].test(s)) { seq_pos[s].insert(pos[i]); } From 313c61aa4341258b46c69316607f071564afd00f Mon Sep 17 00:00:00 2001 From: Mikko Juola Date: Sun, 15 Jun 2025 00:52:06 -0700 Subject: [PATCH 062/192] model : add dots.llm1 architecture support (#14044) (#14118) Adds: * Dots1Model to convert_hf_to_gguf.py * Computation graph code to llama-model.cpp * Chat template to llama-chat.cpp to detect this model's template. --- The model is called "dots.llm1" (I decided to shorten it to dots1 or DOTS1 in the code generally) architecture. The only models that exist as of writing of this commit that follow this architecture are "dots.llm1.inst" and "dots.llm1.base" from here: * https://huggingface.co/rednote-hilab/dots.llm1.inst * https://huggingface.co/rednote-hilab/dots.llm1.base The model architecture is a combination of Qwen and Deepseek parts, as seen here: https://github.com/huggingface/transformers/blob/ffe12627b4e84489d2ab91dd0ec00614855edc79/src/transformers/models/dots1/modular_dots1.py --- convert_hf_to_gguf.py | 28 +++++ gguf-py/gguf/constants.py | 26 ++++ gguf-py/gguf/tensor_mapping.py | 2 +- src/llama-arch.cpp | 29 +++++ src/llama-arch.h | 1 + src/llama-chat.cpp | 17 +++ src/llama-chat.h | 1 + src/llama-model.cpp | 222 +++++++++++++++++++++++++++++++++ src/llama-model.h | 1 + 9 files changed, 326 insertions(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 173a103badc60..cff72c85fab69 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -5262,6 +5262,34 @@ def prepare_tensors(self): raise ValueError(f"Unprocessed experts: {experts}") +@ModelBase.register("Dots1ForCausalLM") +class Dots1Model(Qwen2MoeModel): + model_arch = gguf.MODEL_ARCH.DOTS1 + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.hparams["num_experts"] = self.hparams["n_routed_experts"] + + def set_gguf_parameters(self): + super().set_gguf_parameters() + self.gguf_writer.add_leading_dense_block_count(self.hparams["first_k_dense_replace"]) + self.gguf_writer.add_expert_shared_count(self.hparams["n_shared_experts"]) + self.gguf_writer.add_expert_weights_scale(self.hparams["routed_scaling_factor"]) + self.gguf_writer.add_expert_weights_norm(self.hparams["norm_topk_prob"]) + + if self.hparams["scoring_func"] == "noaux_tc": + self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID) + else: + raise ValueError(f"Unsupported scoring_func value: {self.hparams['scoring_func']}") + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None): + if name.endswith("e_score_correction_bias"): + name = name.replace("e_score_correction_bias", "e_score_correction.bias") + if "shared_experts" in name: + return [(self.map_tensor_name(name), data_torch)] + return super().modify_tensors(data_torch, name, bid) + + @ModelBase.register("PLMForCausalLM") class PLMModel(TextModel): model_arch = gguf.MODEL_ARCH.PLM diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 3ee2b2064e1b4..8de2f7a531967 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -343,6 +343,7 @@ class MODEL_ARCH(IntEnum): WAVTOKENIZER_DEC = auto() PLM = auto() BAILINGMOE = auto() + DOTS1 = auto() class VISION_PROJECTOR_TYPE(IntEnum): @@ -623,6 +624,7 @@ class MODEL_TENSOR(IntEnum): MODEL_ARCH.WAVTOKENIZER_DEC: "wavtokenizer-dec", MODEL_ARCH.PLM: "plm", MODEL_ARCH.BAILINGMOE: "bailingmoe", + MODEL_ARCH.DOTS1: "dots1" } VISION_PROJECTOR_TYPE_NAMES: dict[VISION_PROJECTOR_TYPE, str] = { @@ -2044,6 +2046,30 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.FFN_DOWN_SHEXP, MODEL_TENSOR.FFN_UP_SHEXP, ], + MODEL_ARCH.DOTS1: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_Q_NORM, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_K_NORM, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_EXP_PROBS_B, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_GATE_EXP, + MODEL_TENSOR.FFN_GATE_INP, + MODEL_TENSOR.FFN_GATE_SHEXP, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_DOWN_EXP, + MODEL_TENSOR.FFN_DOWN_SHEXP, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.FFN_UP_EXP, + MODEL_TENSOR.FFN_UP_SHEXP, + ], # TODO } diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 439fc1afeeb0c..5e3f01754bf07 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -305,7 +305,7 @@ class TensorNameMap: ), MODEL_TENSOR.FFN_EXP_PROBS_B: ( - "model.layers.{bid}.mlp.gate.e_score_correction", # deepseek-v3 + "model.layers.{bid}.mlp.gate.e_score_correction", # deepseek-v3 dots1 ), # Feed-forward up diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index 43fa60a8070b7..f8f76eedd4fa6 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -72,6 +72,7 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" }, { LLM_ARCH_PLM, "plm" }, { LLM_ARCH_BAILINGMOE, "bailingmoe" }, + { LLM_ARCH_DOTS1, "dots1" }, { LLM_ARCH_UNKNOWN, "(unknown)" }, }; @@ -1555,6 +1556,34 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, }, }, + { + LLM_ARCH_DOTS1, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" }, + { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" }, + { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" }, + { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, + { LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" }, + } + }, { LLM_ARCH_UNKNOWN, { diff --git a/src/llama-arch.h b/src/llama-arch.h index f3825528aefdb..18f6d6b94f137 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -76,6 +76,7 @@ enum llm_arch { LLM_ARCH_WAVTOKENIZER_DEC, LLM_ARCH_PLM, LLM_ARCH_BAILINGMOE, + LLM_ARCH_DOTS1, LLM_ARCH_UNKNOWN, }; diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp index d12743e6b9a0c..bc4fa05a74ef4 100644 --- a/src/llama-chat.cpp +++ b/src/llama-chat.cpp @@ -183,6 +183,8 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { return LLM_CHAT_TEMPLATE_BAILING; } else if (tmpl_contains("<|header_start|>") && tmpl_contains("<|header_end|>")) { return LLM_CHAT_TEMPLATE_LLAMA4; + } else if (tmpl_contains("<|endofuserprompt|>")) { + return LLM_CHAT_TEMPLATE_DOTS1; } return LLM_CHAT_TEMPLATE_UNKNOWN; } @@ -643,6 +645,21 @@ int32_t llm_chat_apply_template( if (add_ass) { ss << "Assistant:"; } + } else if (tmpl == LLM_CHAT_TEMPLATE_DOTS1) { + // dots.llm1.inst (DOTS1) + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + ss << "<|system|>" << message->content << "<|endofsystem|>"; + } else if (role == "user") { + ss << "<|userprompt|>" << message->content << "<|endofuserprompt|>"; + } else { + ss << "<|response|>" << message->content << "<|endofresponse|>"; + } + } + if (add_ass) { + ss << "<|response|>"; + } } else { // template not supported return -1; diff --git a/src/llama-chat.h b/src/llama-chat.h index db24ade21e2ad..38800010ae48b 100644 --- a/src/llama-chat.h +++ b/src/llama-chat.h @@ -43,6 +43,7 @@ enum llm_chat_template { LLM_CHAT_TEMPLATE_BAILING, LLM_CHAT_TEMPLATE_LLAMA4, LLM_CHAT_TEMPLATE_SMOLVLM, + LLM_CHAT_TEMPLATE_DOTS1, LLM_CHAT_TEMPLATE_UNKNOWN, }; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index c64bf9de939f4..fdd5fefd6e778 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -80,6 +80,7 @@ const char * llm_type_name(llm_type type) { case LLM_TYPE_40B: return "40B"; case LLM_TYPE_65B: return "65B"; case LLM_TYPE_70B: return "70B"; + case LLM_TYPE_142B: return "142B"; case LLM_TYPE_236B: return "236B"; case LLM_TYPE_290B: return "290B"; case LLM_TYPE_314B: return "314B"; @@ -1444,6 +1445,20 @@ void llama_model::load_hparams(llama_model_loader & ml) { default: type = LLM_TYPE_UNKNOWN; } } break; + case LLM_ARCH_DOTS1: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead); + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp); + ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared); + ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale); + ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM, hparams.expert_weights_norm, false); + ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func, false); + switch (hparams.n_layer) { + case 62: type = LLM_TYPE_142B; break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; default: throw std::runtime_error("unsupported model architecture"); } @@ -4123,6 +4138,58 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0); } } break; + case LLM_ARCH_DOTS1: + { + const int64_t n_ff_exp = hparams.n_ff_exp; + const int64_t n_expert_shared = hparams.n_expert_shared; + + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + + layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0); + layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + if (i < (int) hparams.n_layer_dense_lead) { + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } else { + layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0); + layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED); + + if (n_expert == 0) { + throw std::runtime_error("n_expert must be > 0"); + } + if (n_expert_used == 0) { + throw std::runtime_error("n_expert_used must be > 0"); + } + + // MoE branch + layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0); + layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0); + layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0); + + // Shared expert branch + layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0); + layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff_exp * n_expert_shared, n_embd}, 0); + layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0); + } + } + } break; default: throw std::runtime_error("unknown architecture"); } @@ -13194,6 +13261,156 @@ struct llm_build_bailingmoe : public llm_graph_context { } }; +struct llm_build_dots1 : public llm_graph_context { + llm_build_dots1(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self_attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il); + cb(Qcur, "Qcur_normed", il); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il); + cb(Kcur, "Kcur_normed", il); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // MoE branch + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + if ((uint32_t) il < hparams.n_layer_dense_lead) { + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } else { + ggml_tensor * moe_out = + build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + model.layers[il].ffn_exp_probs_b, + n_expert, n_expert_used, + LLM_FFN_SILU, hparams.expert_weights_norm, + true, hparams.expert_weights_scale, + (llama_expert_gating_func_type) hparams.expert_gating_func, + il); + cb(moe_out, "ffn_moe_out", il); + + { + ggml_tensor * ffn_shexp = build_ffn(cur, + model.layers[il].ffn_up_shexp, NULL, NULL, + model.layers[il].ffn_gate_shexp, NULL, NULL, + model.layers[il].ffn_down_shexp, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(ffn_shexp, "ffn_shexp", il); + + cur = ggml_add(ctx0, moe_out, ffn_shexp); + cb(cur, "ffn_out", il); + } + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + llama_memory_i * llama_model::create_memory(const llama_memory_params & params, llama_cparams & cparams) const { llama_memory_i * res; @@ -13532,6 +13749,10 @@ llm_graph_result_ptr llama_model::build_graph( { llm = std::make_unique(*this, params, gf); } break; + case LLM_ARCH_DOTS1: + { + llm = std::make_unique(*this, params, gf); + } break; default: GGML_ABORT("fatal error"); } @@ -13714,6 +13935,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { case LLM_ARCH_NEMOTRON: case LLM_ARCH_EXAONE: case LLM_ARCH_MINICPM3: + case LLM_ARCH_DOTS1: return LLAMA_ROPE_TYPE_NEOX; case LLM_ARCH_QWEN2VL: diff --git a/src/llama-model.h b/src/llama-model.h index 18b714620bbcf..06e6c687943cc 100644 --- a/src/llama-model.h +++ b/src/llama-model.h @@ -73,6 +73,7 @@ enum llm_type { LLM_TYPE_40B, LLM_TYPE_65B, LLM_TYPE_70B, + LLM_TYPE_142B, LLM_TYPE_236B, LLM_TYPE_290B, LLM_TYPE_314B, From 595374eec946a893f5149d5e311ee11312d65342 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 15 Jun 2025 10:52:11 +0300 Subject: [PATCH 063/192] kv-cache : fix use-after-move of defrag info (#14189) ggml-ci --- src/llama-kv-cache-unified.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama-kv-cache-unified.cpp b/src/llama-kv-cache-unified.cpp index 03107057079ca..b17936abdb4c6 100644 --- a/src/llama-kv-cache-unified.cpp +++ b/src/llama-kv-cache-unified.cpp @@ -1739,7 +1739,7 @@ llama_kv_cache_unified_state::llama_kv_cache_unified_state( llama_context * lctx, bool do_shift, defrag_info dinfo) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), lctx(lctx), do_shift(do_shift), dinfo(std::move(dinfo)) { - if (!do_shift && dinfo.empty()) { + if (!do_shift && this->dinfo.empty()) { status = LLAMA_MEMORY_STATUS_NO_UPDATE; } } From 0e68fabbaffe0e5ce31c19d8099745f281a4521e Mon Sep 17 00:00:00 2001 From: uvos Date: Sun, 15 Jun 2025 15:45:27 +0200 Subject: [PATCH 064/192] HIP: Replace usage of depricated preprocessor macro __AMDGCN_WAVEFRONT_SIZE__ (#14183) --- ggml/src/ggml-cuda/common.cuh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index a82ec26ee1a2d..563a7828bdd14 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -262,11 +262,11 @@ static bool cp_async_available(const int cc) { } static constexpr __device__ int ggml_cuda_get_physical_warp_size() { -#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) - return __AMDGCN_WAVEFRONT_SIZE; +#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) && (defined(__GFX9__) || defined(__GFX8__)) + return 64; #else return 32; -#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) +#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) && (defined(__GFX9__) || defined(__GFX8__)) } [[noreturn]] From 9fe2d8c492283ee3988b89fe9d45cf2db4562c6d Mon Sep 17 00:00:00 2001 From: uvos Date: Sun, 15 Jun 2025 17:30:13 +0200 Subject: [PATCH 065/192] CUDA/HIP: fix ssm_scan on devices where warp size is not 32 (#14196) --- ggml/src/ggml-cuda/ssm-scan.cu | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ggml/src/ggml-cuda/ssm-scan.cu b/ggml/src/ggml-cuda/ssm-scan.cu index 37ee208c09d46..2d34b836054f8 100644 --- a/ggml/src/ggml-cuda/ssm-scan.cu +++ b/ggml/src/ggml-cuda/ssm-scan.cu @@ -10,6 +10,8 @@ __global__ void __launch_bounds__(splitD, 2) float * __restrict__ dst, const int64_t L) { GGML_UNUSED(src1_nb0); GGML_UNUSED(src2_nb0); + + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); const int bidx = blockIdx.x; // split along B const int bidy = blockIdx.y; // split along D const int tid = threadIdx.x; @@ -44,16 +46,16 @@ __global__ void __launch_bounds__(splitD, 2) if (N == 16) { #pragma unroll for (size_t i = 0; i < splitD / 4; i += 2) { - float value = A_block[(wid * warpSize + i) * stride_A + wtid]; + float value = A_block[(wid * warp_size + i) * stride_A + wtid]; // todo: bank conflict // I am always confused with how to use the swizzling method to solve // bank conflit. Hoping somebody can tell me. - smem_A[(wid * warpSize + i) * stride_sA + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value; + smem_A[(wid * warp_size + i) * stride_sA + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value; } #pragma unroll for (size_t i = 0; i < splitD / 4; i += 2) { - float value = s0_block[(wid * warpSize + i) * stride_s0 + wtid]; - smem_s0[(wid * warpSize + i) * stride_ss0 + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value; + float value = s0_block[(wid * warp_size + i) * stride_s0 + wtid]; + smem_s0[(wid * warp_size + i) * stride_ss0 + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value; } } From a3f26462621acc2c320b1fddc67a59a3e2df3c9c Mon Sep 17 00:00:00 2001 From: Ed Addario <29247825+EAddario@users.noreply.github.com> Date: Sun, 15 Jun 2025 17:53:45 +0100 Subject: [PATCH 066/192] quantize : change int to unsigned int for KV overrides (#14197) --- src/llama-quant.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 159b1307a4c5d..8cf45732fd6d4 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -585,7 +585,8 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: if (o.tag == LLAMA_KV_OVERRIDE_TYPE_FLOAT) { gguf_set_val_f32(ctx_out.get(), o.key, o.val_f64); } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) { - gguf_set_val_i32(ctx_out.get(), o.key, o.val_i64); + // Setting type to UINT32. See https://github.com/ggml-org/llama.cpp/pull/14182 for context + gguf_set_val_u32(ctx_out.get(), o.key, (uint32_t)abs(o.val_i64)); } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) { gguf_set_val_bool(ctx_out.get(), o.key, o.val_bool); } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_STR) { From 4a176287f93a07c31e15f22113bcb7bbd94f1267 Mon Sep 17 00:00:00 2001 From: Eric Curtin Date: Sun, 15 Jun 2025 23:36:22 +0200 Subject: [PATCH 067/192] server : When listening on a unix domain socket don't print http:// and port (#14180) Instead show something like this: main: server is listening on file.sock - starting the main loop Signed-off-by: Eric Curtin --- tools/server/server.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/server/server.cpp b/tools/server/server.cpp index b439d8b19dda3..626c58bd304ff 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -4878,7 +4878,9 @@ int main(int argc, char ** argv) { }; bool was_bound = false; + bool is_sock = false; if (string_ends_with(std::string(params.hostname), ".sock")) { + is_sock = true; LOG_INF("%s: setting address family to AF_UNIX\n", __func__); svr->set_address_family(AF_UNIX); // bind_to_port requires a second arg, any value other than 0 should @@ -4956,7 +4958,9 @@ int main(int argc, char ** argv) { SetConsoleCtrlHandler(reinterpret_cast(console_ctrl_handler), true); #endif - LOG_INF("%s: server is listening on http://%s:%d - starting the main loop\n", __func__, params.hostname.c_str(), params.port); + LOG_INF("%s: server is listening on %s - starting the main loop\n", __func__, + is_sock ? string_format("unix://%s", params.hostname.c_str()).c_str() : + string_format("http://%s:%d", params.hostname.c_str(), params.port).c_str()); // this call blocks the main thread until queue_tasks.terminate() is called ctx_server.queue_tasks.start_loop(); From d7d67ea370bbcf8be9084f01ccdc09ec7b5eaf75 Mon Sep 17 00:00:00 2001 From: Bartowski <3266127+bartowski1182@users.noreply.github.com> Date: Mon, 16 Jun 2025 00:04:06 +0100 Subject: [PATCH 068/192] model : Add support for Arcee AI's upcoming AFM model (#14185) * Add Arcee AFM support * Add draft update code * Fix linter and update URL, may still not be final * Update src/llama-model.cpp Co-authored-by: Xuan-Son Nguyen * Remote accidental blank line --------- Co-authored-by: Xuan-Son Nguyen --- convert_hf_to_gguf.py | 14 +++ convert_hf_to_gguf_update.py | 1 + gguf-py/gguf/constants.py | 19 +++- src/llama-arch.cpp | 19 ++++ src/llama-arch.h | 1 + src/llama-model.cpp | 181 +++++++++++++++++++++++++++++++++++ src/llama-vocab.cpp | 1 + 7 files changed, 235 insertions(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index cff72c85fab69..2232a7d82349e 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -2020,6 +2020,20 @@ def prepare_tensors(self): raise ValueError(f"Unprocessed experts: {experts}") +@ModelBase.register("ArceeForCausalLM") +class ArceeModel(LlamaModel): + model_arch = gguf.MODEL_ARCH.ARCEE + + def set_gguf_parameters(self): + super().set_gguf_parameters() + self._try_set_pooling_type() + rope_scaling = self.hparams.get("rope_scaling") or {} + if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling: + self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) + self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"]) + self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"]) + + @ModelBase.register( "LlavaForConditionalGeneration", # pixtral "Mistral3ForConditionalGeneration", # mistral small 3.1 diff --git a/convert_hf_to_gguf_update.py b/convert_hf_to_gguf_update.py index 2f733f0973686..fae4f72605f46 100755 --- a/convert_hf_to_gguf_update.py +++ b/convert_hf_to_gguf_update.py @@ -128,6 +128,7 @@ class TOKENIZER_TYPE(IntEnum): {"name": "llama4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct", }, {"name": "pixtral", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mistral-community/pixtral-12b", }, {"name": "seed-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base", }, + {"name": "arcee", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/arcee-ai/AFM-4.5B", }, # TODO confirm final URL ] # some models are known to be broken upstream, so we will skip them as exceptions diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 8de2f7a531967..9b2143c7c2eaa 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -344,6 +344,7 @@ class MODEL_ARCH(IntEnum): PLM = auto() BAILINGMOE = auto() DOTS1 = auto() + ARCEE = auto() class VISION_PROJECTOR_TYPE(IntEnum): @@ -624,7 +625,8 @@ class MODEL_TENSOR(IntEnum): MODEL_ARCH.WAVTOKENIZER_DEC: "wavtokenizer-dec", MODEL_ARCH.PLM: "plm", MODEL_ARCH.BAILINGMOE: "bailingmoe", - MODEL_ARCH.DOTS1: "dots1" + MODEL_ARCH.DOTS1: "dots1", + MODEL_ARCH.ARCEE: "arcee", } VISION_PROJECTOR_TYPE_NAMES: dict[VISION_PROJECTOR_TYPE, str] = { @@ -2070,6 +2072,21 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.FFN_UP_EXP, MODEL_TENSOR.FFN_UP_SHEXP, ], + MODEL_ARCH.ARCEE: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], # TODO } diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index f8f76eedd4fa6..a3e7c861ca02f 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -73,6 +73,7 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_PLM, "plm" }, { LLM_ARCH_BAILINGMOE, "bailingmoe" }, { LLM_ARCH_DOTS1, "dots1" }, + { LLM_ARCH_ARCEE, "arcee" }, { LLM_ARCH_UNKNOWN, "(unknown)" }, }; @@ -244,6 +245,24 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, }, }, + { + LLM_ARCH_ARCEE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, { LLM_ARCH_LLAMA4, { diff --git a/src/llama-arch.h b/src/llama-arch.h index 18f6d6b94f137..168fdcb401cfd 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -77,6 +77,7 @@ enum llm_arch { LLM_ARCH_PLM, LLM_ARCH_BAILINGMOE, LLM_ARCH_DOTS1, + LLM_ARCH_ARCEE, LLM_ARCH_UNKNOWN, }; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index fdd5fefd6e778..dcc8b0be72563 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -599,6 +599,16 @@ void llama_model::load_hparams(llama_model_loader & ml) { hparams.use_kq_norm = false; } } break; + case LLM_ARCH_ARCEE: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + // Arcee uses the same structure as Llama + switch (hparams.n_layer) { + case 36: type = LLM_TYPE_4B; break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; case LLM_ARCH_DECI: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); @@ -4190,6 +4200,37 @@ bool llama_model::load_tensors(llama_model_loader & ml) { } } } break; + case LLM_ARCH_ARCEE: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } + } break; default: throw std::runtime_error("unknown architecture"); } @@ -13411,6 +13452,141 @@ struct llm_build_dots1 : public llm_graph_context { } }; +struct llm_build_arcee : public llm_graph_context { + llm_build_arcee(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(); + + const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // rope freq factors for llama3; may return nullptr for llama2 and other models + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); + + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); + cb(cur, "attn_out", il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + // ARCEE uses relu^2 instead of silu + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + NULL, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_RELU_SQR, LLM_FFN_SEQ, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + llama_memory_i * llama_model::create_memory(const llama_memory_params & params, llama_cparams & cparams) const { llama_memory_i * res; @@ -13753,6 +13929,10 @@ llm_graph_result_ptr llama_model::build_graph( { llm = std::make_unique(*this, params, gf); } break; + case LLM_ARCH_ARCEE: + { + llm = std::make_unique(*this, params, gf); + } break; default: GGML_ABORT("fatal error"); } @@ -13902,6 +14082,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { case LLM_ARCH_GRANITE_MOE: case LLM_ARCH_CHAMELEON: case LLM_ARCH_BAILINGMOE: + case LLM_ARCH_ARCEE: return LLAMA_ROPE_TYPE_NORM; // the pairs of head values are offset by n_rot/2 diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index 905d7c4281d9c..dd2251ef3cbef 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -1987,6 +1987,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { || t.first == "<|eom_id|>" || t.first == "" || t.first == "_" + || t.first == "<|end_of_text|>" ) { special_eog_ids.insert(t.second); if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) { From bd03b667957cdd5a5ed333342d09b7def373a0dd Mon Sep 17 00:00:00 2001 From: xctan Date: Mon, 16 Jun 2025 13:54:15 +0800 Subject: [PATCH 069/192] ggml-cpu : rework weak alias on apple targets (#14146) * ggml-cpu : rework weak alias on apple targets * fix powerpc detection * fix ppc detection * fix powerpc detection on darwin --- ggml/cmake/common.cmake | 3 +- ggml/src/ggml-cpu/apple-fallback.h | 88 ++++++++++++++++++++++++++++++ ggml/src/ggml-cpu/ggml-cpu-impl.h | 2 +- ggml/src/ggml-cpu/quants.c | 4 ++ ggml/src/ggml-cpu/quants.h | 27 --------- ggml/src/ggml-cpu/repack.cpp | 4 ++ ggml/src/ggml-cpu/repack.h | 18 +----- 7 files changed, 99 insertions(+), 47 deletions(-) create mode 100644 ggml/src/ggml-cpu/apple-fallback.h diff --git a/ggml/cmake/common.cmake b/ggml/cmake/common.cmake index bb1ec9b37a7f0..cb66388332040 100644 --- a/ggml/cmake/common.cmake +++ b/ggml/cmake/common.cmake @@ -36,8 +36,7 @@ function(ggml_get_system_arch) (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|i686|AMD64|amd64)$")) set(GGML_SYSTEM_ARCH "x86" PARENT_SCOPE) - elseif ("${CMAKE_SYSTEM_PROCESSOR} " STREQUAL "ppc64le " OR - "${CMAKE_SYSTEM_PROCESSOR} " STREQUAL "powerpc ") + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc|power") set(GGML_SYSTEM_ARCH "PowerPC" PARENT_SCOPE) elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64") set(GGML_SYSTEM_ARCH "loongarch64" PARENT_SCOPE) diff --git a/ggml/src/ggml-cpu/apple-fallback.h b/ggml/src/ggml-cpu/apple-fallback.h new file mode 100644 index 0000000000000..f477505d787a7 --- /dev/null +++ b/ggml/src/ggml-cpu/apple-fallback.h @@ -0,0 +1,88 @@ +#pragma once + +// Solve alias issue for Apple targets (currently PowerPC, x86, and ARM64). +// Mach-O has a weak alias equivalent but no practical compiler support can +// be found, so we need to do it manually. +// ref: https://stackoverflow.com/questions/42757744 +// +// This file is a complement to native implementations in the `arch` folder. +// A kernel in quants.c or repack.cpp is either: +// - implemented in the `arch` folder, or +// - defined in this file to remove the `_generic` suffix + +#if defined(GGML_CPU_GENERIC) +// quants.c +#define quantize_row_q8_0_generic quantize_row_q8_0 +#define quantize_row_q8_1_generic quantize_row_q8_1 +#define quantize_row_q8_K_generic quantize_row_q8_K +#define ggml_vec_dot_q4_0_q8_0_generic ggml_vec_dot_q4_0_q8_0 +#define ggml_vec_dot_q4_1_q8_1_generic ggml_vec_dot_q4_1_q8_1 +#define ggml_vec_dot_q5_0_q8_0_generic ggml_vec_dot_q5_0_q8_0 +#define ggml_vec_dot_q5_1_q8_1_generic ggml_vec_dot_q5_1_q8_1 +#define ggml_vec_dot_q8_0_q8_0_generic ggml_vec_dot_q8_0_q8_0 +#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K +#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K +#define ggml_vec_dot_q2_K_q8_K_generic ggml_vec_dot_q2_K_q8_K +#define ggml_vec_dot_q3_K_q8_K_generic ggml_vec_dot_q3_K_q8_K +#define ggml_vec_dot_q4_K_q8_K_generic ggml_vec_dot_q4_K_q8_K +#define ggml_vec_dot_q5_K_q8_K_generic ggml_vec_dot_q5_K_q8_K +#define ggml_vec_dot_q6_K_q8_K_generic ggml_vec_dot_q6_K_q8_K +#define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K +#define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K +#define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K +#define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K +#define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K +#define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K +#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K +#define ggml_vec_dot_iq4_nl_q8_0_generic ggml_vec_dot_iq4_nl_q8_0 +#define ggml_vec_dot_iq4_xs_q8_K_generic ggml_vec_dot_iq4_xs_q8_K +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#elif defined(__aarch64__) || defined(__arm__) +// repack.cpp +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#elif defined(__x86_64__) || defined(__i386__) +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#elif defined(__POWERPC__) +// ref: https://github.com/ggml-org/llama.cpp/pull/14146#issuecomment-2972561679 +// quants.c +#define quantize_row_q8_K_generic quantize_row_q8_K +#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K +#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K +#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#endif diff --git a/ggml/src/ggml-cpu/ggml-cpu-impl.h b/ggml/src/ggml-cpu/ggml-cpu-impl.h index 69415daa82025..9662e4d7b5a6a 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-impl.h +++ b/ggml/src/ggml-cpu/ggml-cpu-impl.h @@ -509,7 +509,7 @@ void ggml_barrier(struct ggml_threadpool * tp); #define GGML_DO_PRAGMA_(x) _Pragma (#x) #define GGML_DO_PRAGMA(x) GGML_DO_PRAGMA_(x) -#if defined(GGML_CPU_GENERIC) || defined(__HIPCC__) +#if defined(GGML_CPU_GENERIC) || defined(__HIPCC__) || defined(__APPLE__) // Note for Apple targets: // - clang: aliases are not supported on darwin // - all native kernels need to be implemented in both x86 and arm files diff --git a/ggml/src/ggml-cpu/quants.c b/ggml/src/ggml-cpu/quants.c index 1ca9c50e724a3..516c5b2ced06d 100644 --- a/ggml/src/ggml-cpu/quants.c +++ b/ggml/src/ggml-cpu/quants.c @@ -5,6 +5,10 @@ #include "ggml-quants.h" #include "quants.h" +#if defined(__APPLE__) +#include "apple-fallback.h" +#endif + #include #include #include diff --git a/ggml/src/ggml-cpu/quants.h b/ggml/src/ggml-cpu/quants.h index d729e07d633f5..dc4342c87f592 100644 --- a/ggml/src/ggml-cpu/quants.h +++ b/ggml/src/ggml-cpu/quants.h @@ -84,33 +84,6 @@ void ggml_vec_dot_iq1_m_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, void ggml_vec_dot_iq4_nl_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq4_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -#if defined(GGML_CPU_GENERIC) -#define quantize_row_q8_0_generic quantize_row_q8_0 -#define quantize_row_q8_1_generic quantize_row_q8_1 -#define quantize_row_q8_K_generic quantize_row_q8_K -#define ggml_vec_dot_q4_0_q8_0_generic ggml_vec_dot_q4_0_q8_0 -#define ggml_vec_dot_q4_1_q8_1_generic ggml_vec_dot_q4_1_q8_1 -#define ggml_vec_dot_q5_0_q8_0_generic ggml_vec_dot_q5_0_q8_0 -#define ggml_vec_dot_q5_1_q8_1_generic ggml_vec_dot_q5_1_q8_1 -#define ggml_vec_dot_q8_0_q8_0_generic ggml_vec_dot_q8_0_q8_0 -#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K -#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K -#define ggml_vec_dot_q2_K_q8_K_generic ggml_vec_dot_q2_K_q8_K -#define ggml_vec_dot_q3_K_q8_K_generic ggml_vec_dot_q3_K_q8_K -#define ggml_vec_dot_q4_K_q8_K_generic ggml_vec_dot_q4_K_q8_K -#define ggml_vec_dot_q5_K_q8_K_generic ggml_vec_dot_q5_K_q8_K -#define ggml_vec_dot_q6_K_q8_K_generic ggml_vec_dot_q6_K_q8_K -#define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K -#define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K -#define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K -#define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K -#define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K -#define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K -#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K -#define ggml_vec_dot_iq4_nl_q8_0_generic ggml_vec_dot_iq4_nl_q8_0 -#define ggml_vec_dot_iq4_xs_q8_K_generic ggml_vec_dot_iq4_xs_q8_K -#endif - #ifdef __cplusplus } #endif diff --git a/ggml/src/ggml-cpu/repack.cpp b/ggml/src/ggml-cpu/repack.cpp index 628142d5f630a..604ccee907843 100644 --- a/ggml/src/ggml-cpu/repack.cpp +++ b/ggml/src/ggml-cpu/repack.cpp @@ -8,6 +8,10 @@ #include "ggml-cpu-impl.h" #include "traits.h" +#if defined(__APPLE__) +#include "apple-fallback.h" +#endif + #include #include #include diff --git a/ggml/src/ggml-cpu/repack.h b/ggml/src/ggml-cpu/repack.h index 8ee6e92ea96b8..b13d2d0c73495 100644 --- a/ggml/src/ggml-cpu/repack.h +++ b/ggml/src/ggml-cpu/repack.h @@ -67,7 +67,7 @@ extern "C" { // Workaround for clang: // clang++ complains: ``error: call to 'ggml_gemm_q4_0_4x4_q8_0' is ambiguous'' // repro: https://godbolt.org/z/oKdeWKonM (ICE), https://godbolt.org/z/1szq6P36v (ambiguous call) -#if defined(GGML_CPU_CLANG_WORKAROUND) || !(defined(__GNUC__) && defined(__clang__)) || defined(__HIPCC__) +#if defined(GGML_CPU_CLANG_WORKAROUND) || defined(__APPLE__) || !(defined(__GNUC__) && defined(__clang__)) || defined(__HIPCC__) void ggml_quantize_mat_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); void ggml_quantize_mat_q8_K_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); @@ -98,22 +98,6 @@ void ggml_gemm_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, void ggml_gemm_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); -#if defined(GGML_CPU_GENERIC) -#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 -#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 -#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 -#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 -#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 -#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 -#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K -#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 -#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 -#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 -#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 -#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K -#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 -#endif - #if defined(__cplusplus) } // extern "C" #endif From 41efafc947040e05cee35e8c0341aa6d9b6cfc7b Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Mon, 16 Jun 2025 00:21:08 -0600 Subject: [PATCH 070/192] vulkan: mutex around vkQueueSubmit (#14127) This fixes the remaining crash in test-thread-safety on my system. --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 32d6407441535..8d62303aabd7f 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -168,6 +168,11 @@ struct vk_command_pool { vk_queue *q; }; +// Prevent simultaneous submissions to the same queue. +// This could be per vk_queue if we stopped having two vk_queue structures +// sharing the same vk::Queue. +static std::mutex queue_mutex; + struct vk_queue { uint32_t queue_family_index; vk::Queue queue; @@ -1266,6 +1271,7 @@ static vk::CommandBuffer ggml_vk_create_cmd_buffer(vk_device& device, vk_command static void ggml_vk_submit(vk_context& ctx, vk::Fence fence) { if (ctx->seqs.empty()) { if (fence) { + std::lock_guard guard(queue_mutex); ctx->p->q->queue.submit({}, fence); } return; @@ -1335,6 +1341,7 @@ static void ggml_vk_submit(vk_context& ctx, vk::Fence fence) { } } + std::lock_guard guard(queue_mutex); ctx->p->q->queue.submit(submit_infos, fence); ctx->seqs.clear(); From c72974fd7ab074a8cf756943a221ea9c1af65eeb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90inh=20Tr=E1=BB=8Dng=20Huy?= <77562200+huydt84@users.noreply.github.com> Date: Mon, 16 Jun 2025 16:20:59 +0900 Subject: [PATCH 071/192] gguf-py : allow key override when adding value to GGUFWriter (#14194) Co-authored-by: dinhhuy --- gguf-py/gguf/gguf_writer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index adc673e38ff07..54ca0c33fd336 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -271,7 +271,7 @@ def write_ti_data_to_file(self) -> None: def add_key_value(self, key: str, val: Any, vtype: GGUFValueType, sub_type: GGUFValueType | None = None) -> None: if any(key in kv_data for kv_data in self.kv_data): - raise ValueError(f'Duplicated key name {key!r}') + logger.warning(f'Duplicated key name {key!r}, overwriting it with new value {val!r} of type {vtype.name}') self.kv_data[0][key] = GGUFValue(value=val, type=vtype, sub_type=sub_type) From b4a42883952a234d26dca0b5cd37ee0dda84ac6b Mon Sep 17 00:00:00 2001 From: Bartowski <3266127+bartowski1182@users.noreply.github.com> Date: Mon, 16 Jun 2025 09:16:06 +0100 Subject: [PATCH 072/192] convert : remove arcee change in convert_hf_to_gguf_update.py (#14207) --- convert_hf_to_gguf_update.py | 1 - 1 file changed, 1 deletion(-) diff --git a/convert_hf_to_gguf_update.py b/convert_hf_to_gguf_update.py index fae4f72605f46..2f733f0973686 100755 --- a/convert_hf_to_gguf_update.py +++ b/convert_hf_to_gguf_update.py @@ -128,7 +128,6 @@ class TOKENIZER_TYPE(IntEnum): {"name": "llama4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct", }, {"name": "pixtral", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mistral-community/pixtral-12b", }, {"name": "seed-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base", }, - {"name": "arcee", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/arcee-ai/AFM-4.5B", }, # TODO confirm final URL ] # some models are known to be broken upstream, so we will skip them as exceptions From 1c4aea399d13e4109cd2646eab0827c5de31312f Mon Sep 17 00:00:00 2001 From: Charles Xu Date: Mon, 16 Jun 2025 11:47:57 +0200 Subject: [PATCH 073/192] ggml: Add Android support for GGML_CPU_ALL_VARIANTS (#14206) --- ggml/src/CMakeLists.txt | 34 +++++++++----- ggml/src/ggml-cpu/CMakeLists.txt | 79 +++++++++++++++----------------- 2 files changed, 60 insertions(+), 53 deletions(-) diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index 726da5e048b18..17c9366f4a3cf 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -311,18 +311,28 @@ if (GGML_CPU_ALL_VARIANTS) # MSVC doesn't support AMX ggml_add_cpu_backend_variant(sapphirerapids SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) endif() - elseif(GGML_SYSTEM_ARCH STREQUAL "ARM" AND CMAKE_SYSTEM_NAME MATCHES "Linux") - # Many of these features are optional so we build versions with popular - # combinations and name the backends based on the version they were - # first released with - ggml_add_cpu_backend_variant(armv8.0_1) - ggml_add_cpu_backend_variant(armv8.2_1 DOTPROD) - ggml_add_cpu_backend_variant(armv8.2_2 DOTPROD FP16_VECTOR_ARITHMETIC) - ggml_add_cpu_backend_variant(armv8.2_3 DOTPROD FP16_VECTOR_ARITHMETIC SVE) - ggml_add_cpu_backend_variant(armv8.6_1 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8) - ggml_add_cpu_backend_variant(armv8.6_2 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SVE2) - ggml_add_cpu_backend_variant(armv9.2_1 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SME) - ggml_add_cpu_backend_variant(armv9.2_2 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SVE2 SME) + elseif(GGML_SYSTEM_ARCH STREQUAL "ARM") + if (CMAKE_SYSTEM_NAME MATCHES "Linux") + # Many of these features are optional so we build versions with popular + # combinations and name the backends based on the version they were + # first released with + ggml_add_cpu_backend_variant(armv8.0_1) + ggml_add_cpu_backend_variant(armv8.2_1 DOTPROD) + ggml_add_cpu_backend_variant(armv8.2_2 DOTPROD FP16_VECTOR_ARITHMETIC) + ggml_add_cpu_backend_variant(armv8.2_3 DOTPROD FP16_VECTOR_ARITHMETIC SVE) + ggml_add_cpu_backend_variant(armv8.6_1 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8) + ggml_add_cpu_backend_variant(armv8.6_2 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SVE2) + ggml_add_cpu_backend_variant(armv9.2_1 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SME) + ggml_add_cpu_backend_variant(armv9.2_2 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SVE2 SME) + elseif (CMAKE_SYSTEM_NAME MATCHES "Android") + # Android-specific backends with SoC-compatible feature sets + ggml_add_cpu_backend_variant(android_armv8.0_1) + ggml_add_cpu_backend_variant(android_armv8.2_1 DOTPROD) + ggml_add_cpu_backend_variant(android_armv8.2_2 DOTPROD FP16_VECTOR_ARITHMETIC) + ggml_add_cpu_backend_variant(android_armv8.6_1 DOTPROD FP16_VECTOR_ARITHMETIC MATMUL_INT8) + else() + message(FATAL_ERROR "Unsupported ARM target OS: ${CMAKE_SYSTEM_NAME}") + endif() else() message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS not yet supported with ${GGML_SYSTEM_ARCH} on ${CMAKE_SYSTEM_NAME}") endif() diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index e4c0fa8d0240c..3bd1b0507e2cb 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -158,48 +158,45 @@ function(ggml_add_cpu_backend_variant_impl tag_name) if (GGML_CPU_ARM_ARCH) list(APPEND ARCH_FLAGS -march=${GGML_CPU_ARM_ARCH}) elseif(GGML_CPU_ALL_VARIANTS) - if (CMAKE_SYSTEM_NAME MATCHES "Linux") - # Begin with the lowest baseline - set(ARM_MCPU "armv8-a") - set(ARCH_TAGS "") - set(ARCH_DEFINITIONS "") - - # When a feature is selected, bump the MCPU to the first - # version that supported it - if (GGML_INTERNAL_DOTPROD) - set(ARM_MCPU "armv8.2-a") - set(ARCH_TAGS "${ARCH_TAGS}+dotprod") - list(APPEND ARCH_DEFINITIONS GGML_USE_DOTPROD) - endif() - if (GGML_INTERNAL_FP16_VECTOR_ARITHMETIC) - set(ARM_MCPU "armv8.2-a") - set(ARCH_TAGS "${ARCH_TAGS}+fp16") - list(APPEND ARCH_DEFINITIONS GGML_USE_FP16_VECTOR_ARITHMETIC) - endif() - if (GGML_INTERNAL_SVE) - set(ARM_MCPU "armv8.2-a") - set(ARCH_TAGS "${ARCH_TAGS}+sve") - list(APPEND ARCH_DEFINITIONS GGML_USE_SVE) - endif() - if (GGML_INTERNAL_MATMUL_INT8) - set(ARM_MCPU "armv8.6-a") - set(ARCH_TAGS "${ARCH_TAGS}+i8mm") - list(APPEND ARCH_DEFINITIONS GGML_USE_MATMUL_INT8) - endif() - if (GGML_INTERNAL_SVE2) - set(ARM_MCPU "armv8.6-a") - set(ARCH_TAGS "${ARCH_TAGS}+sve2") - list(APPEND ARCH_DEFINITIONS GGML_USE_SVE2) - endif() - if (GGML_INTERNAL_SME) - set(ARM_MCPU "armv9.2-a") - set(ARCH_TAGS "${ARCH_TAGS}+sme") - list(APPEND ARCH_DEFINITIONS GGML_USE_SME) - endif() - - list(APPEND ARCH_FLAGS "-march=${ARM_MCPU}${ARCH_TAGS}") - ggml_add_cpu_backend_features(${GGML_CPU_NAME} arm ${ARCH_DEFINITIONS}) + # Begin with the lowest baseline + set(ARM_MCPU "armv8-a") + set(ARCH_TAGS "") + set(ARCH_DEFINITIONS "") + + # When a feature is selected, bump the MCPU to the first + # version that supported it + if (GGML_INTERNAL_DOTPROD) + set(ARM_MCPU "armv8.2-a") + set(ARCH_TAGS "${ARCH_TAGS}+dotprod") + list(APPEND ARCH_DEFINITIONS GGML_USE_DOTPROD) + endif() + if (GGML_INTERNAL_FP16_VECTOR_ARITHMETIC) + set(ARM_MCPU "armv8.2-a") + set(ARCH_TAGS "${ARCH_TAGS}+fp16") + list(APPEND ARCH_DEFINITIONS GGML_USE_FP16_VECTOR_ARITHMETIC) + endif() + if (GGML_INTERNAL_SVE) + set(ARM_MCPU "armv8.2-a") + set(ARCH_TAGS "${ARCH_TAGS}+sve") + list(APPEND ARCH_DEFINITIONS GGML_USE_SVE) + endif() + if (GGML_INTERNAL_MATMUL_INT8) + set(ARM_MCPU "armv8.6-a") + set(ARCH_TAGS "${ARCH_TAGS}+i8mm") + list(APPEND ARCH_DEFINITIONS GGML_USE_MATMUL_INT8) + endif() + if (GGML_INTERNAL_SVE2) + set(ARM_MCPU "armv8.6-a") + set(ARCH_TAGS "${ARCH_TAGS}+sve2") + list(APPEND ARCH_DEFINITIONS GGML_USE_SVE2) + endif() + if (GGML_INTERNAL_SME) + set(ARM_MCPU "armv9.2-a") + set(ARCH_TAGS "${ARCH_TAGS}+sme") + list(APPEND ARCH_DEFINITIONS GGML_USE_SME) endif() + list(APPEND ARCH_FLAGS "-march=${ARM_MCPU}${ARCH_TAGS}") + ggml_add_cpu_backend_features(${GGML_CPU_NAME} arm ${ARCH_DEFINITIONS}) endif() endif() From 6ce670ef904a519f861effb9f9aaf80b6d9a9ed5 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 16 Jun 2025 14:14:00 +0300 Subject: [PATCH 074/192] llama : rework embeddings logic (#14208) * llama : rework embeddings logic ggml-ci * cont : fix rerank ggml-ci * cont : engrish [no ci] * cont : fix rerank ggml-ci * server : support both embeddings and completions with single model ggml-ci * cont : avoid embeddings_org ggml-ci --- common/arg.cpp | 9 +-- common/common.cpp | 62 +++++++++--------- common/common.h | 1 - examples/gritlm/gritlm.cpp | 8 ++- include/llama.h | 12 ++-- src/llama-batch.cpp | 30 +++++++-- src/llama-batch.h | 3 +- src/llama-context.cpp | 26 ++++---- src/llama-kv-cache-recurrent.cpp | 8 +-- src/llama-kv-cache-recurrent.h | 2 +- src/llama-kv-cache-unified-iswa.cpp | 4 +- src/llama-kv-cache-unified-iswa.h | 2 +- src/llama-kv-cache-unified.cpp | 4 +- src/llama-kv-cache-unified.h | 2 +- src/llama-memory.h | 2 +- tools/server/server.cpp | 98 +++++++++++++++++++---------- 16 files changed, 159 insertions(+), 114 deletions(-) diff --git a/common/arg.cpp b/common/arg.cpp index 0d0daa3610105..231de227a9122 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -988,10 +988,6 @@ static bool common_params_parse_ex(int argc, char ** argv, common_params_context params.tensor_buft_overrides.push_back({nullptr, nullptr}); } - if (params.reranking && params.embedding) { - throw std::invalid_argument("error: either --embedding or --reranking can be specified, but not both"); - } - if (!params.chat_template.empty() && !common_chat_verify_template(params.chat_template, params.use_jinja)) { throw std::runtime_error(string_format( "error: the supplied chat template is not supported: %s%s\n", @@ -2747,9 +2743,10 @@ common_params_context common_params_parser_init(common_params & params, llama_ex ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_EMBEDDINGS")); add_opt(common_arg( {"--reranking", "--rerank"}, - string_format("enable reranking endpoint on server (default: %s)", params.reranking ? "enabled" : "disabled"), + string_format("enable reranking endpoint on server (default: %s)", "disabled"), [](common_params & params) { - params.reranking = true; + params.embedding = true; + params.pooling_type = LLAMA_POOLING_TYPE_RANK; } ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_RERANKING")); add_opt(common_arg( diff --git a/common/common.cpp b/common/common.cpp index e23887c70770c..5b465150f0533 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -897,34 +897,6 @@ struct common_init_result common_init_from_params(common_params & params) { const llama_vocab * vocab = llama_model_get_vocab(model); - if (params.reranking) { - bool ok = true; - - if (llama_vocab_bos(vocab) == LLAMA_TOKEN_NULL) { - LOG_WRN("%s: warning: vocab does not have a BOS token, reranking will not work\n", __func__); - ok = false; - } - - bool has_eos = llama_vocab_eos(vocab) != LLAMA_TOKEN_NULL; - bool has_sep = llama_vocab_sep(vocab) != LLAMA_TOKEN_NULL; - - if (!has_eos && !has_sep) { - LOG_WRN("%s: warning: vocab does not have an EOS token or SEP token, reranking will not work\n", __func__); - ok = false; - } else if (!has_eos) { - LOG_WRN("%s: warning: vocab does not have an EOS token, using SEP token as fallback\n", __func__); - } else if (!has_sep) { - LOG_WRN("%s: warning: vocab does not have a SEP token, reranking will not work\n", __func__); - ok = false; - } - - if (!ok) { - llama_model_free(model); - - return iparams; - } - } - auto cparams = common_context_params_to_llama(params); llama_context * lctx = llama_init_from_model(model, cparams); @@ -966,6 +938,35 @@ struct common_init_result common_init_from_params(common_params & params) { } } + if (llama_pooling_type(lctx) == LLAMA_POOLING_TYPE_RANK) { + bool ok = true; + + if (llama_vocab_bos(vocab) == LLAMA_TOKEN_NULL) { + LOG_WRN("%s: warning: vocab does not have a BOS token, reranking will not work\n", __func__); + ok = false; + } + + bool has_eos = llama_vocab_eos(vocab) != LLAMA_TOKEN_NULL; + bool has_sep = llama_vocab_sep(vocab) != LLAMA_TOKEN_NULL; + + if (!has_eos && !has_sep) { + LOG_WRN("%s: warning: vocab does not have an EOS token or SEP token, reranking will not work\n", __func__); + ok = false; + } else if (!has_eos) { + LOG_WRN("%s: warning: vocab does not have an EOS token, using SEP token as fallback\n", __func__); + } else if (!has_sep) { + LOG_WRN("%s: warning: vocab does not have a SEP token, reranking will not work\n", __func__); + ok = false; + } + + if (!ok) { + llama_free(lctx); + llama_model_free(model); + + return iparams; + } + } + // load and optionally apply lora adapters for (auto & la : params.lora_adapters) { llama_adapter_lora_ptr lora; @@ -1143,11 +1144,6 @@ struct llama_context_params common_context_params_to_llama(const common_params & cparams.op_offload = !params.no_op_offload; cparams.swa_full = params.swa_full; - if (params.reranking) { - cparams.embeddings = true; - cparams.pooling_type = LLAMA_POOLING_TYPE_RANK; - } - cparams.type_k = params.cache_type_k; cparams.type_v = params.cache_type_v; diff --git a/common/common.h b/common/common.h index f26724b6e1495..00b6ca03a20b4 100644 --- a/common/common.h +++ b/common/common.h @@ -355,7 +355,6 @@ struct common_params { int32_t embd_normalize = 2; // normalisation for embeddings (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm) std::string embd_out = ""; // empty = default, "array" = [[],[]...], "json" = openai style, "json+" = same "json" + cosine similarity matrix std::string embd_sep = "\n"; // separator of embeddings - bool reranking = false; // enable reranking support on server // server params int32_t port = 8080; // server listens on this network port diff --git a/examples/gritlm/gritlm.cpp b/examples/gritlm/gritlm.cpp index 041da61c743c1..bdab052c3390f 100644 --- a/examples/gritlm/gritlm.cpp +++ b/examples/gritlm/gritlm.cpp @@ -41,12 +41,11 @@ static std::vector> encode(llama_context * ctx, const std::ve // add input to batch (this increments n_tokens) for (int32_t j = 0; j < n_toks; j++) { - common_batch_add(batch, inputs[j], j, { 0 }, j >= n_inst); + common_batch_add(batch, inputs[j], j, { 0 }, true); } // clear previous kv_cache values (irrelevant for embeddings) llama_memory_clear(llama_get_memory(ctx), true); - llama_set_embeddings(ctx, true); llama_set_causal_attn(ctx, false); // run model @@ -103,7 +102,6 @@ static std::string generate(llama_context * ctx, llama_sampler * smpl, const std llama_token eos_token = llama_vocab_eos(vocab); llama_memory_clear(llama_get_memory(ctx), true); - llama_set_embeddings(ctx, false); llama_set_causal_attn(ctx, true); llama_batch bat = llama_batch_init(llama_n_batch(ctx), 0, 1); @@ -166,6 +164,8 @@ int main(int argc, char * argv[]) { llama_model_params mparams = common_model_params_to_llama(params); llama_context_params cparams = common_context_params_to_llama(params); + cparams.embeddings = true; + llama_backend_init(); llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams); @@ -213,6 +213,8 @@ int main(int argc, char * argv[]) { std::printf("Cosine similarity between \"%.50s\" and \"%.50s\" is: %.3f\n", queries[1].c_str(), documents[1].c_str(), cosine_sim_q1_d1); } + llama_set_embeddings(ctx, false); + // ### Generation ### // GritLM models are not finetuned with system prompts, as you can just include system-like instructions together with your user instruction { diff --git a/include/llama.h b/include/llama.h index d5e4cef68c213..b086b68e6d4ea 100644 --- a/include/llama.h +++ b/include/llama.h @@ -254,7 +254,10 @@ extern "C" { // - seq_id : the sequence to which the respective token belongs // (if set to NULL, the sequence ID will be assumed to be 0) // - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output - // (if set to NULL, only the logits for last token will be returned) + // (if set to NULL: + // - if embeddings: all tokens are output + // - if not: only the last token is output + // ) // typedef struct llama_batch { int32_t n_tokens; @@ -262,8 +265,8 @@ extern "C" { llama_token * token; float * embd; llama_pos * pos; - int32_t * n_seq_id; // TODO: remove, should belong to only 1 sequence - llama_seq_id ** seq_id; // TODO: become llama_seq_id * seq_id; + int32_t * n_seq_id; + llama_seq_id ** seq_id; int8_t * logits; // TODO: rename this to "output" } llama_batch; @@ -961,8 +964,7 @@ extern "C" { // Get the number of threads used for prompt and batch processing (multiple token). LLAMA_API int32_t llama_n_threads_batch(struct llama_context * ctx); - // Set whether the model is in embeddings mode or not - // If true, embeddings will be returned but logits will not + // Set whether the context outputs embeddings or not LLAMA_API void llama_set_embeddings(struct llama_context * ctx, bool embeddings); // Set whether to use causal attention or not diff --git a/src/llama-batch.cpp b/src/llama-batch.cpp index a9f4a3d4c45c5..8b6d14fe8813c 100644 --- a/src/llama-batch.cpp +++ b/src/llama-batch.cpp @@ -299,7 +299,8 @@ llama_batch_allocr::llama_batch_allocr() { bool llama_batch_allocr::init( const llama_batch & batch_inp, const llama_vocab & vocab, - const llama_memory_i * memory) { + const llama_memory_i * memory, + bool embd_all) { clear(); batch = batch_inp; @@ -378,10 +379,31 @@ bool llama_batch_allocr::init( } if (!batch.logits) { - // by default return the output only for the last token - output.resize(batch.n_tokens); - output[output.size() - 1] = true; + if (embd_all) { + // return the output for all tokens + output.resize(batch.n_tokens, true); + } else { + // return the output only for the last token + output.resize(batch.n_tokens, false); + output[output.size() - 1] = true; + } + batch.logits = output.data(); + } else if (embd_all) { + bool warn = false; + + for (int32_t i = 0; i < batch.n_tokens; ++i) { + if (batch.logits[i] == 0) { + warn = true; + } + } + + if (warn) { + LLAMA_LOG_WARN("%s: embeddings required but some input tokens were not marked as outputs -> overriding\n", __func__); + + output.resize(batch.n_tokens, true); + batch.logits = output.data(); + } } // diff --git a/src/llama-batch.h b/src/llama-batch.h index 04501ce5d424c..a555c157234be 100644 --- a/src/llama-batch.h +++ b/src/llama-batch.h @@ -88,7 +88,8 @@ class llama_batch_allocr { bool init( const llama_batch & batch_inp, const llama_vocab & vocab, - const llama_memory_i * memory); + const llama_memory_i * memory, + bool embd_all); const llama_batch & get_batch() const; diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 3a113d1bcfb2a..f56a58e9b6ec6 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -728,7 +728,7 @@ int llama_context::encode(const llama_batch & batch_inp) { } // note: during encode, we always pass the full sequence starting from pos = 0 - if (!batch_allocr->init(batch_inp, model.vocab, nullptr)) { + if (!batch_allocr->init(batch_inp, model.vocab, nullptr, true)) { LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); return -1; } @@ -894,7 +894,10 @@ int llama_context::decode(const llama_batch & batch_inp) { return -1; } - if (!batch_allocr->init(batch_inp, model.vocab, memory.get())) { + // when computing embeddings, all tokens are output + const bool embd_all = cparams.embeddings; + + if (!batch_allocr->init(batch_inp, model.vocab, memory.get(), embd_all)) { LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); return -1; } @@ -911,12 +914,9 @@ int llama_context::decode(const llama_batch & batch_inp) { GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT - // this indicates we are doing pooled embedding - const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE; - const uint32_t n_outputs_all = batch_allocr->get_n_outputs(); - if (embd_pooled) { + if (embd_all) { // require that all tokens are output if (n_outputs_all != n_tokens_all) { LLAMA_LOG_ERROR("%s: pooled embedding requires that all tokens are output (n_outputs_all = %d, n_tokens_all = %d)\n", @@ -945,7 +945,7 @@ int llama_context::decode(const llama_batch & batch_inp) { llama_memory_state_ptr mstate; while (true) { - mstate = memory->init_batch(batch, cparams.n_ubatch, embd_pooled); + mstate = memory->init_batch(batch, cparams.n_ubatch, embd_all); if (!mstate) { return -2; } @@ -1058,7 +1058,7 @@ int llama_context::decode(const llama_batch & batch_inp) { // ggml_graph_dump_dot(gf, NULL, "llama.dot"); //} - auto * t_logits = cparams.embeddings ? nullptr : res->get_logits(); + auto * t_logits = res->get_logits(); auto * t_embd = cparams.embeddings ? res->get_embd() : nullptr; if (t_embd && res->get_embd_pooled()) { @@ -1222,9 +1222,8 @@ uint32_t llama_context::output_reserve(int32_t n_outputs) { const auto n_vocab = vocab.n_tokens(); const auto n_embd = hparams.n_embd; - // TODO: use a per-batch flag for logits presence instead - bool has_logits = !cparams.embeddings; - bool has_embd = cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE); + bool has_logits = true; + bool has_embd = cparams.embeddings; // TODO: hacky enc-dec support if (model.arch == LLM_ARCH_T5) { @@ -2044,14 +2043,11 @@ void llama_context::opt_epoch_iter( n_queued_tokens += n_tokens_all; - // this indicates we are doing pooled embedding - const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE; - embd_seq.clear(); uint32_t n_outputs_all = n_tokens_all; - auto mstate = memory->init_batch(batch, cparams.n_ubatch, embd_pooled); + auto mstate = memory->init_batch(batch, cparams.n_ubatch, true); if (!mstate || mstate->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) { LLAMA_LOG_ERROR("%s: could not initialize batch\n", __func__); break; diff --git a/src/llama-kv-cache-recurrent.cpp b/src/llama-kv-cache-recurrent.cpp index de23b4ad23bce..8f6f120f682b7 100644 --- a/src/llama-kv-cache-recurrent.cpp +++ b/src/llama-kv-cache-recurrent.cpp @@ -359,9 +359,7 @@ llama_pos llama_kv_cache_recurrent::seq_pos_max(llama_seq_id seq_id) const { return result; } -llama_memory_state_ptr llama_kv_cache_recurrent::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_pooled) { - GGML_UNUSED(embd_pooled); - +llama_memory_state_ptr llama_kv_cache_recurrent::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_all) { auto sbatch = llama_sbatch(batch, hparams.n_embd, false); std::vector ubatches; @@ -369,8 +367,8 @@ llama_memory_state_ptr llama_kv_cache_recurrent::init_batch(const llama_batch & while (sbatch.n_tokens > 0) { llama_ubatch ubatch; - if (embd_pooled) { - // Pooled embeddings cannot be split across ubatches (yet) + if (embd_all) { + // if all tokens are output, split by sequence ubatch = sbatch.split_seq(n_ubatch); } else { ubatch = sbatch.split_equal(n_ubatch); diff --git a/src/llama-kv-cache-recurrent.h b/src/llama-kv-cache-recurrent.h index d7c02ea872160..f9b01a6513393 100644 --- a/src/llama-kv-cache-recurrent.h +++ b/src/llama-kv-cache-recurrent.h @@ -32,7 +32,7 @@ class llama_kv_cache_recurrent : public llama_memory_i { llama_memory_state_ptr init_batch( const llama_batch & batch, uint32_t n_ubatch, - bool embd_pooled) override; + bool embd_all) override; llama_memory_state_ptr init_full() override; diff --git a/src/llama-kv-cache-unified-iswa.cpp b/src/llama-kv-cache-unified-iswa.cpp index 9814f76631203..a4a4c2b1b859d 100644 --- a/src/llama-kv-cache-unified-iswa.cpp +++ b/src/llama-kv-cache-unified-iswa.cpp @@ -95,8 +95,8 @@ llama_pos llama_kv_cache_unified_iswa::seq_pos_max(llama_seq_id seq_id) const { return kv_swa->seq_pos_max(seq_id); } -llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_pooled) { - GGML_UNUSED(embd_pooled); +llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_all) { + GGML_UNUSED(embd_all); // first try simple split do { diff --git a/src/llama-kv-cache-unified-iswa.h b/src/llama-kv-cache-unified-iswa.h index d114c7378fbe9..6e941e1a41b88 100644 --- a/src/llama-kv-cache-unified-iswa.h +++ b/src/llama-kv-cache-unified-iswa.h @@ -34,7 +34,7 @@ class llama_kv_cache_unified_iswa : public llama_memory_i { llama_memory_state_ptr init_batch( const llama_batch & batch, uint32_t n_ubatch, - bool embd_pooled) override; + bool embd_all) override; llama_memory_state_ptr init_full() override; diff --git a/src/llama-kv-cache-unified.cpp b/src/llama-kv-cache-unified.cpp index b17936abdb4c6..3b37679859d39 100644 --- a/src/llama-kv-cache-unified.cpp +++ b/src/llama-kv-cache-unified.cpp @@ -310,8 +310,8 @@ llama_pos llama_kv_cache_unified::seq_pos_max(llama_seq_id seq_id) const { llama_memory_state_ptr llama_kv_cache_unified::init_batch( const llama_batch & batch, uint32_t n_ubatch, - bool embd_pooled) { - GGML_UNUSED(embd_pooled); + bool embd_all) { + GGML_UNUSED(embd_all); do { auto sbatch = llama_sbatch(batch, hparams.n_embd, true); diff --git a/src/llama-kv-cache-unified.h b/src/llama-kv-cache-unified.h index d6dcd19f2507e..d96571d952b81 100644 --- a/src/llama-kv-cache-unified.h +++ b/src/llama-kv-cache-unified.h @@ -59,7 +59,7 @@ class llama_kv_cache_unified : public llama_memory_i { llama_memory_state_ptr init_batch( const llama_batch & batch, uint32_t n_ubatch, - bool embd_pooled) override; + bool embd_all) override; llama_memory_state_ptr init_full() override; diff --git a/src/llama-memory.h b/src/llama-memory.h index 42e226dc0ed61..24668f861b976 100644 --- a/src/llama-memory.h +++ b/src/llama-memory.h @@ -73,7 +73,7 @@ struct llama_memory_i { virtual llama_memory_state_ptr init_batch( const llama_batch & batch, uint32_t n_ubatch, - bool embd_pooled) = 0; + bool embd_all) = 0; // simulate full cache, used for allocating worst-case compute buffers virtual llama_memory_state_ptr init_full() = 0; diff --git a/tools/server/server.cpp b/tools/server/server.cpp index 626c58bd304ff..c08e421255fce 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -88,6 +88,26 @@ enum error_type { ERROR_TYPE_NOT_SUPPORTED, // custom error }; +static bool server_task_type_need_embd(server_task_type task_type) { + switch (task_type) { + case SERVER_TASK_TYPE_EMBEDDING: + case SERVER_TASK_TYPE_RERANK: + return true; + default: + return false; + } +} + +static bool server_task_type_need_logits(server_task_type task_type) { + switch (task_type) { + case SERVER_TASK_TYPE_COMPLETION: + case SERVER_TASK_TYPE_INFILL: + return true; + default: + return false; + } +} + struct slot_params { bool stream = true; bool cache_prompt = true; // remember the prompt to avoid reprocessing all prompt @@ -1330,13 +1350,16 @@ struct server_slot { n_draft_accepted = 0; } - bool is_non_causal() const { - return task_type == SERVER_TASK_TYPE_EMBEDDING || task_type == SERVER_TASK_TYPE_RERANK; + bool need_embd() const { + return server_task_type_need_embd(task_type); + } + + bool need_logits() const { + return server_task_type_need_logits(task_type); } bool can_batch_with(server_slot & other_slot) const { - return is_non_causal() == other_slot.is_non_causal() - && are_lora_equal(lora, other_slot.lora); + return task_type == other_slot.task_type && are_lora_equal(lora, other_slot.lora); } bool has_budget(const common_params & global_params) { @@ -1480,7 +1503,6 @@ struct server_slot { {"n_ctx", n_ctx}, {"speculative", can_speculate()}, {"is_processing", is_processing()}, - {"non_causal", is_non_causal()}, {"params", params.to_json()}, {"prompt", prompt_tokens.detokenize(ctx, true)}, {"next_token", @@ -1907,6 +1929,14 @@ struct server_context { llama_batch_free(batch); } + // if the context does not have a memory module then all embeddings have to be computed within a single ubatch + // also we cannot split if the pooling would require any past tokens + bool can_split() const { + return + !llama_get_embeddings(ctx) || + (llama_get_memory(ctx) && llama_pooling_type(ctx) == LLAMA_POOLING_TYPE_LAST); + } + bool load_model(const common_params & params) { SRV_INF("loading model '%s'\n", params.model.path.c_str()); @@ -2730,6 +2760,7 @@ struct server_context { queue_tasks.defer(std::move(task)); break; } + if (slot->is_processing()) { // if requested slot is unavailable, we defer this task for processing later SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id); @@ -3092,7 +3123,14 @@ struct server_context { continue; } - if (slot.is_non_causal()) { + // TODO: support memory-less logits computation + if (slot.need_logits() && !llama_get_memory(ctx)) { + slot.release(); + send_error(slot, "the current context does not logits computation. skipping", ERROR_TYPE_SERVER); + continue; + } + + if (!can_split()) { if (slot.n_prompt_tokens > n_ubatch) { slot.release(); send_error(slot, "input is too large to process. increase the physical batch size", ERROR_TYPE_SERVER); @@ -3227,8 +3265,7 @@ struct server_context { } if (slot.n_past == slot.n_prompt_tokens && slot.n_past > 0) { - // we have to evaluate at least 1 token to generate logits. - SLT_WRN(slot, "need to evaluate at least 1 token to generate logits, n_past = %d, n_prompt_tokens = %d\n", slot.n_past, slot.n_prompt_tokens); + SLT_WRN(slot, "need to evaluate at least 1 token for each active slot, n_past = %d, n_prompt_tokens = %d\n", slot.n_past, slot.n_prompt_tokens); slot.n_past--; } @@ -3236,8 +3273,7 @@ struct server_context { slot.n_prompt_tokens_processed = 0; } - // non-causal tasks require to fit the entire prompt in the physical batch - if (slot.is_non_causal()) { + if (!can_split()) { // cannot fit the prompt in the current batch - will try next iter if (batch.n_tokens + slot.n_prompt_tokens > n_batch) { continue; @@ -3259,8 +3295,7 @@ struct server_context { slot.cache_tokens.keep_first(slot.n_past); // check if we should process the image - if (slot.n_past < slot.n_prompt_tokens - && slot.prompt_tokens[slot.n_past] == LLAMA_TOKEN_NULL) { + if (slot.n_past < slot.n_prompt_tokens && slot.prompt_tokens[slot.n_past] == LLAMA_TOKEN_NULL) { // process the image int32_t new_n_past; int32_t res = slot.prompt_tokens.process_chunk(ctx, mctx, slot.n_past, slot.id, new_n_past); @@ -3291,8 +3326,8 @@ struct server_context { break; // end of text chunk } - // without pooling, we want to output the embeddings for all the tokens in the batch - const bool need_embd = slot.task_type == SERVER_TASK_TYPE_EMBEDDING && llama_pooling_type(slot.ctx) == LLAMA_POOLING_TYPE_NONE; + // embedding requires all tokens in the batch to be output + const bool need_embd = server_task_type_need_embd(slot.task_type); common_batch_add(batch, cur_tok, slot.n_past, { slot.id }, need_embd); slot.cache_tokens.push_back(cur_tok); @@ -3346,17 +3381,15 @@ struct server_context { SRV_DBG("decoding batch, n_tokens = %d\n", batch.n_tokens); if (slot_batched) { - // make sure we're in the right embedding mode - llama_set_embeddings(ctx, slot_batched->is_non_causal()); // apply lora, only need to do it once per batch common_set_adapter_lora(ctx, slot_batched->lora); - } - const bool do_encode = (params_base.embedding || params_base.reranking); + llama_set_embeddings(ctx, slot_batched->need_embd()); + } // pad the batch so that batch.n_tokens >= n_slots // TODO: temporary workaround for https://github.com/ggml-org/llama.cpp/issues/13689 - if (do_encode) { + if (slot_batched->need_embd()) { const int n_slots = slots.size(); if (batch.n_tokens < n_slots) { @@ -3378,8 +3411,11 @@ struct server_context { SRV_WRN("adding %d dummy tokens to the batch, seq_id = %d\n", n_add, seq_id); for (int j = 0; j < n_add; ++j) { - common_batch_add(batch, 0, j, { seq_id }, false); + common_batch_add(batch, 0, j, { seq_id }, true); } + + slots[seq_id].cache_tokens.clear(); + llama_memory_seq_rm(llama_get_memory(ctx), seq_id, -1, -1); } } @@ -4174,11 +4210,6 @@ int main(int argc, char ** argv) { oaicompat_type oaicompat) -> void { GGML_ASSERT(type == SERVER_TASK_TYPE_COMPLETION || type == SERVER_TASK_TYPE_INFILL); - if (ctx_server.params_base.embedding) { - res_error(res, format_error_response("This server does not support completions. Start it without `--embeddings`", ERROR_TYPE_NOT_SUPPORTED)); - return; - } - auto completion_id = gen_chatcmplid(); std::unordered_set task_ids; try { @@ -4433,12 +4464,8 @@ int main(int argc, char ** argv) { OAICOMPAT_TYPE_NONE); // infill is not OAI compatible }; - const auto handle_chat_completions = [&ctx_server, &res_error, &handle_completions_impl](const httplib::Request & req, httplib::Response & res) { + const auto handle_chat_completions = [&ctx_server, &handle_completions_impl](const httplib::Request & req, httplib::Response & res) { LOG_DBG("request: %s\n", req.body.c_str()); - if (ctx_server.params_base.embedding) { - res_error(res, format_error_response("This server does not support completions. Start it without `--embeddings`", ERROR_TYPE_NOT_SUPPORTED)); - return; - } auto body = json::parse(req.body); std::vector files; @@ -4566,13 +4593,18 @@ int main(int argc, char ** argv) { }; const auto handle_embeddings_impl = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res, oaicompat_type oaicompat) { - const json body = json::parse(req.body); + if (!ctx_server.params_base.embedding) { + res_error(res, format_error_response("This server does not support embeddings. Start it with `--embeddings`", ERROR_TYPE_NOT_SUPPORTED)); + return; + } if (oaicompat != OAICOMPAT_TYPE_NONE && llama_pooling_type(ctx_server.ctx) == LLAMA_POOLING_TYPE_NONE) { res_error(res, format_error_response("Pooling type 'none' is not OAI compatible. Please use a different pooling type", ERROR_TYPE_INVALID_REQUEST)); return; } + const json body = json::parse(req.body); + // for the shape of input/content, see tokenize_input_prompts() json prompt; if (body.count("input") != 0) { @@ -4662,8 +4694,8 @@ int main(int argc, char ** argv) { }; const auto handle_rerank = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res) { - if (!ctx_server.params_base.reranking || ctx_server.params_base.embedding) { - res_error(res, format_error_response("This server does not support reranking. Start it with `--reranking` and without `--embedding`", ERROR_TYPE_NOT_SUPPORTED)); + if (!ctx_server.params_base.embedding || ctx_server.params_base.pooling_type != LLAMA_POOLING_TYPE_RANK) { + res_error(res, format_error_response("This server does not support reranking. Start it with `--reranking`", ERROR_TYPE_NOT_SUPPORTED)); return; } From 0efd1109464abf21d03d95ff177480f0a1e725ee Mon Sep 17 00:00:00 2001 From: uvos Date: Mon, 16 Jun 2025 13:47:38 +0200 Subject: [PATCH 075/192] HIP: disable rocwmma on gfx12 by default until rocm 7.0 (#14202) --- ggml/CMakeLists.txt | 1 + ggml/src/ggml-cuda/common.cuh | 4 ++-- ggml/src/ggml-hip/CMakeLists.txt | 4 ++++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index 727139cf385b7..7b398ae8e30ed 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -172,6 +172,7 @@ option(GGML_HIP "ggml: use HIP" option(GGML_HIP_GRAPHS "ggml: use HIP graph, experimental, slow" OFF) option(GGML_HIP_NO_VMM "ggml: do not try to use HIP VMM" ON) option(GGML_HIP_ROCWMMA_FATTN "ggml: enable rocWMMA for FlashAttention" OFF) +option(GGML_HIP_FORCE_ROCWMMA_FATTN_GFX12 "ggml: enable rocWMMA FlashAttention on GFX12" OFF) option(GGML_VULKAN "ggml: use Vulkan" OFF) option(GGML_VULKAN_CHECK_RESULTS "ggml: run Vulkan op checks" OFF) option(GGML_VULKAN_DEBUG "ggml: enable Vulkan debug output" OFF) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index 563a7828bdd14..c14a12f54a8d6 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -207,9 +207,9 @@ typedef float2 dfloat2; #define FP16_MMA_AVAILABLE #endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA -#if defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3) || defined(RDNA4)) +#if defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3) || (defined(GGML_HIP_ROCWMMA_FATTN_GFX12) && defined(RDNA4))) #define FP16_MMA_AVAILABLE -#endif // defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3) || defined(RDNA4)) +#endif // defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3) || (defined(GGML_HIP_ROCWMMA_FATTN_GFX12) && defined(RDNA4))) #if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_TURING #define NEW_MMA_AVAILABLE diff --git a/ggml/src/ggml-hip/CMakeLists.txt b/ggml/src/ggml-hip/CMakeLists.txt index 1fe8fe3b8d079..e29df98560e07 100644 --- a/ggml/src/ggml-hip/CMakeLists.txt +++ b/ggml/src/ggml-hip/CMakeLists.txt @@ -113,6 +113,10 @@ if (GGML_HIP_ROCWMMA_FATTN) add_compile_definitions(GGML_HIP_ROCWMMA_FATTN) endif() +if (GGML_HIP_FORCE_ROCWMMA_FATTN_GFX12 OR ${hip_VERSION} VERSION_GREATER_EQUAL 7.0) + add_compile_definitions(GGML_HIP_ROCWMMA_FATTN_GFX12) +endif() + if (NOT GGML_CUDA_FA) add_compile_definitions(GGML_CUDA_NO_FA) endif() From aa257146ee381b0242cc290ec0ed5731c0550259 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90inh=20Tr=E1=BB=8Dng=20Huy?= <77562200+huydt84@users.noreply.github.com> Date: Mon, 16 Jun 2025 21:53:41 +0900 Subject: [PATCH 076/192] model : add NeoBERT (#14164) * convert neobert model to gguf * add inference graph * fix flake8 lint * followed reviewer suggestions Co-authored-by: Georgi Gerganov * follow reviewers suggestions Co-authored-by: Georgi Gerganov * override NeoBERT feed-forward length --------- Co-authored-by: dinhhuy Co-authored-by: Georgi Gerganov --- convert_hf_to_gguf.py | 30 ++++++- gguf-py/gguf/constants.py | 14 +++ gguf-py/gguf/tensor_mapping.py | 9 ++ src/llama-arch.cpp | 16 ++++ src/llama-arch.h | 1 + src/llama-model.cpp | 153 +++++++++++++++++++++++++++++++++ 6 files changed, 222 insertions(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 2232a7d82349e..58e455ae645ed 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -519,7 +519,7 @@ def prepare_metadata(self, vocab_only: bool): def set_gguf_parameters(self): self.gguf_writer.add_block_count(self.block_count) - if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx", "n_positions"], optional=True)) is not None: + if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx", "n_positions", "max_length"], optional=True)) is not None: self.gguf_writer.add_context_length(n_ctx) logger.info(f"gguf: context length = {n_ctx}") @@ -4076,6 +4076,34 @@ def _is_tokenizer_xlmroberta(self) -> bool: raise ValueError(f"unknown tokenizer: {toktyp}") +@ModelBase.register("NeoBERT", "NeoBERTLMHead", "NeoBERTForSequenceClassification") +class NeoBert(BertModel): + model_arch = gguf.MODEL_ARCH.NEO_BERT + + def set_gguf_parameters(self): + super().set_gguf_parameters() + + # NeoBERT uses 2/3 of the intermediate size as feed forward length + self.gguf_writer.add_feed_forward_length(int(2 * self.hparams["intermediate_size"] / 3)) + self.gguf_writer.add_rope_freq_base(10000.0) # default value for NeoBERT + self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE) + + f_rms_eps = self.hparams.get("norm_eps", 1e-6) # default value for NeoBERT + self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps) + logger.info(f"gguf: rms norm epsilon = {f_rms_eps}") + + self.gguf_writer.add_pooling_type(gguf.PoolingType.CLS) # https://huggingface.co/chandar-lab/NeoBERT#how-to-use + + def modify_tensors(self, data_torch, name, bid): + if name.startswith("decoder."): + return [] + + if name.startswith("model."): + name = name[6:] + + return super().modify_tensors(data_torch, name, bid) + + @ModelBase.register("XLMRobertaModel", "XLMRobertaForSequenceClassification") class XLMRobertaModel(BertModel): model_arch = gguf.MODEL_ARCH.BERT diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 9b2143c7c2eaa..834a1d5e1a97e 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -291,6 +291,7 @@ class MODEL_ARCH(IntEnum): BERT = auto() NOMIC_BERT = auto() NOMIC_BERT_MOE = auto() + NEO_BERT = auto() JINA_BERT_V2 = auto() BLOOM = auto() STABLELM = auto() @@ -573,6 +574,7 @@ class MODEL_TENSOR(IntEnum): MODEL_ARCH.BERT: "bert", MODEL_ARCH.NOMIC_BERT: "nomic-bert", MODEL_ARCH.NOMIC_BERT_MOE: "nomic-bert-moe", + MODEL_ARCH.NEO_BERT: "neo-bert", MODEL_ARCH.JINA_BERT_V2: "jina-bert-v2", MODEL_ARCH.BLOOM: "bloom", MODEL_ARCH.STABLELM: "stablelm", @@ -1081,6 +1083,18 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.FFN_UP_EXP, MODEL_TENSOR.LAYER_OUT_NORM, ], + MODEL_ARCH.NEO_BERT: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.ENC_OUTPUT_NORM, + MODEL_TENSOR.CLS, + MODEL_TENSOR.CLS_OUT, + ], MODEL_ARCH.JINA_BERT_V2: [ MODEL_TENSOR.TOKEN_EMBD, MODEL_TENSOR.TOKEN_EMBD_NORM, diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 5e3f01754bf07..79f044d2a5945 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -31,6 +31,7 @@ class TensorNameMap: "model.embeddings", # rwkv7 "model.word_embeddings", # bailingmoe "language_model.model.embed_tokens", # llama4 + "encoder", # neobert ), # Token type embeddings @@ -134,6 +135,7 @@ class TensorNameMap: "rwkv.blocks.{bid}.ln1", # rwkv6 "model.layers.{bid}.ln1", # rwkv7 "model.layers.{bid}.input_layernorm", # llama4 + "transformer_encoder.{bid}.attention_norm", # neobert ), # Attention norm 2 @@ -161,6 +163,7 @@ class TensorNameMap: "model.layers.{bid}.self_attn.qkv_proj", # phi3 "encoder.layers.{bid}.self_attention.query_key_value", # chatglm "transformer.layers.{bid}.attn.qkv_proj", # openelm + "transformer_encoder.{bid}.qkv", # neobert ), # Attention query @@ -236,6 +239,7 @@ class TensorNameMap: "transformer.layers.{bid}.attn.out_proj", # openelm "transformer.h.{bid}.attn.attention.out_proj", # exaone "model.layers.{bid}.self_attn.o_proj", # llama4 + "transformer_encoder.{bid}.wo", # neobert ), # Attention output norm @@ -276,6 +280,7 @@ class TensorNameMap: "encoder.layers.{bid}.post_attention_layernorm", # chatglm "transformer.layers.{bid}.ffn_norm", # openelm "model.layers.{bid}.post_attention_layernorm", # llama4 + "transformer_encoder.{bid}.ffn_norm", # neobert ), # Post feed-forward norm @@ -340,6 +345,7 @@ class TensorNameMap: "encoder.layers.{bid}.mlp.dense_h_to_4h", # chatglm "transformer.h.{bid}.mlp.c_fc_1", # exaone "model.layers.{bid}.feed_forward.up_proj", # llama4 + "transformer_encoder.{bid}.ffn.w12", # neobert ), MODEL_TENSOR.FFN_UP_EXP: ( @@ -422,6 +428,7 @@ class TensorNameMap: "encoder.layers.{bid}.mlp.dense_4h_to_h", # chatglm "model.layers.h.{bid}.mlp.c_proj", # exaone "model.layers.{bid}.feed_forward.down_proj", # llama4 + "transformer_encoder.{bid}.ffn.w3", # neobert ), MODEL_TENSOR.FFN_DOWN_EXP: ( @@ -832,12 +839,14 @@ class TensorNameMap: # TODO: these do not belong to block_mappings_cfg - move them to mappings_cfg MODEL_TENSOR.ENC_OUTPUT_NORM: ( "encoder.final_layer_norm", # t5 + "layer_norm", # neobert ), MODEL_TENSOR.CLS: ( "classifier", # jina "classifier.dense", # roberta "pre_classifier", # distillbert + "dense", # neobert ), MODEL_TENSOR.CLS_OUT: ( diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index a3e7c861ca02f..de8d289cf967e 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -20,6 +20,7 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_BERT, "bert" }, { LLM_ARCH_NOMIC_BERT, "nomic-bert" }, { LLM_ARCH_NOMIC_BERT_MOE, "nomic-bert-moe" }, + { LLM_ARCH_NEO_BERT, "neo-bert" }, { LLM_ARCH_JINA_BERT_V2, "jina-bert-v2" }, { LLM_ARCH_BLOOM, "bloom" }, { LLM_ARCH_STABLELM, "stablelm" }, @@ -514,6 +515,21 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, }, }, + { + LLM_ARCH_NEO_BERT, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_ENC_OUTPUT_NORM, "enc.output_norm" }, + { LLM_TENSOR_CLS, "cls" }, + { LLM_TENSOR_CLS_OUT, "cls.output" }, + }, + }, { LLM_ARCH_JINA_BERT_V2, { diff --git a/src/llama-arch.h b/src/llama-arch.h index 168fdcb401cfd..3e8a61da3c13e 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -24,6 +24,7 @@ enum llm_arch { LLM_ARCH_BERT, LLM_ARCH_NOMIC_BERT, LLM_ARCH_NOMIC_BERT_MOE, + LLM_ARCH_NEO_BERT, LLM_ARCH_JINA_BERT_V2, LLM_ARCH_BLOOM, LLM_ARCH_STABLELM, diff --git a/src/llama-model.cpp b/src/llama-model.cpp index dcc8b0be72563..a5eb122f998d8 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -749,6 +749,16 @@ void llama_model::load_hparams(llama_model_loader & ml) { } } } break; + case LLM_ARCH_NEO_BERT: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); + ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type); + + if (hparams.n_layer == 28) { + type = LLM_TYPE_250M; + } + } break; case LLM_ARCH_BLOOM: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); @@ -2212,6 +2222,32 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.layer_out_norm_b = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd}, 0); } } break; + case LLM_ARCH_NEO_BERT: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + cls = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, n_embd}, TENSOR_NOT_REQUIRED); + cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"), {n_embd}, TENSOR_NOT_REQUIRED); + + cls_out = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, hparams.n_cls_out}, TENSOR_NOT_REQUIRED); + cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"), {hparams.n_cls_out}, TENSOR_NOT_REQUIRED); + + output_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff*2}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0); + } + } break; case LLM_ARCH_JINA_BERT_V2: { tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); // word_embeddings @@ -6182,6 +6218,117 @@ struct llm_build_bert : public llm_graph_context { } }; +struct llm_build_neo_bert : public llm_graph_context { + llm_build_neo_bert(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + ggml_tensor * inp_pos = build_inp_pos(); + + // construct input embeddings (token, type, position) + inpL = build_inp_embd(model.tok_embd); + cb(inpL, "inp_embd", -1); + + auto * inp_attn = build_attn_inp_no_cache(); + + // iterate layers + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * cur = inpL; + + ggml_tensor * Qcur; + ggml_tensor * Kcur; + ggml_tensor * Vcur; + + // pre-norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + + // self-attention + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + // RoPE + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, nullptr, + Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + cb(cur, "kqv_out", il); + + if (il == n_layer - 1 && pooling_type == LLAMA_POOLING_TYPE_NONE) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + // re-add the layer input + cur = ggml_add(ctx0, cur, inpL); + + ggml_tensor * ffn_inp = cur; + cb(ffn_inp, "ffn_inp", il); + + // pre-norm + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + // feed-forward network + cur = build_ffn(cur, + model.layers[il].ffn_up, + NULL, NULL, NULL, NULL, NULL, + model.layers[il].ffn_down, + NULL, NULL, NULL, + LLM_FFN_SWIGLU, LLM_FFN_SEQ, il); + + // attentions bypass the intermediate layer + cur = ggml_add(ctx0, cur, ffn_inp); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm_enc, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_embd", -1); + res->t_embd = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + struct llm_build_bloom : public llm_graph_context { llm_build_bloom(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; @@ -13595,6 +13742,7 @@ llama_memory_i * llama_model::create_memory(const llama_memory_params & params, case LLM_ARCH_JINA_BERT_V2: case LLM_ARCH_NOMIC_BERT: case LLM_ARCH_NOMIC_BERT_MOE: + case LLM_ARCH_NEO_BERT: case LLM_ARCH_WAVTOKENIZER_DEC: { res = nullptr; @@ -13703,6 +13851,10 @@ llm_graph_result_ptr llama_model::build_graph( { llm = std::make_unique(*this, params, gf); } break; + case LLM_ARCH_NEO_BERT: + { + llm = std::make_unique(*this, params, gf); + } break; case LLM_ARCH_BLOOM: { llm = std::make_unique(*this, params, gf); @@ -14082,6 +14234,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { case LLM_ARCH_GRANITE_MOE: case LLM_ARCH_CHAMELEON: case LLM_ARCH_BAILINGMOE: + case LLM_ARCH_NEO_BERT: case LLM_ARCH_ARCEE: return LLAMA_ROPE_TYPE_NORM; From 714684acc5e2227f350431910f890ed8b026c500 Mon Sep 17 00:00:00 2001 From: bandoti <141645996+bandoti@users.noreply.github.com> Date: Mon, 16 Jun 2025 10:32:13 -0300 Subject: [PATCH 077/192] cmake: clean up external project logic for vulkan-shaders-gen (#14179) * Remove install step for vulkan-shaders-gen * Add install step to normalize msvc with make * Regenerate modified shaders at build-time --- .github/workflows/build.yml | 2 +- ggml/src/ggml-vulkan/CMakeLists.txt | 49 ++++++++----------- .../ggml-vulkan/vulkan-shaders/CMakeLists.txt | 12 ----- 3 files changed, 22 insertions(+), 41 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5422dd81723f9..85c4f3512b0e7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -693,7 +693,7 @@ jobs: - build: 'openblas-x64' defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/x64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_OPENMP=OFF -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"' - build: 'vulkan-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_VULKAN=ON' + defines: '-DCMAKE_BUILD_TYPE=Release -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_VULKAN=ON' - build: 'llvm-arm64' defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON' - build: 'llvm-arm64-opencl-adreno' diff --git a/ggml/src/ggml-vulkan/CMakeLists.txt b/ggml/src/ggml-vulkan/CMakeLists.txt index 4a88415f96eae..95e2ebe643732 100644 --- a/ggml/src/ggml-vulkan/CMakeLists.txt +++ b/ggml/src/ggml-vulkan/CMakeLists.txt @@ -49,15 +49,7 @@ if (Vulkan_FOUND) ../../include/ggml-vulkan.h ) - set(VULKAN_SHADER_GEN_CMAKE_ARGS - -DCMAKE_INSTALL_PREFIX=${CMAKE_BINARY_DIR} - -DCMAKE_RUNTIME_OUTPUT_DIRECTORY=${CMAKE_RUNTIME_OUTPUT_DIRECTORY} - ) - - set(VULKAN_SHADER_GEN_CMAKE_BUILD_ARGS "") - if (CMAKE_BUILD_TYPE AND CMAKE_BUILD_TYPE MATCHES "Debug|Release|MinSizeRel|RelWithDebInfo") - list(APPEND VULKAN_SHADER_GEN_CMAKE_BUILD_ARGS --config=${CMAKE_BUILD_TYPE}) - endif() + set(VULKAN_SHADER_GEN_CMAKE_ARGS "") # Test all shader extensions test_shader_extension_support( @@ -136,42 +128,39 @@ if (Vulkan_FOUND) set(HOST_CMAKE_TOOLCHAIN_FILE "") endif() - # Always use ExternalProject_Add approach include(ExternalProject) - # Add toolchain file if cross-compiling if (CMAKE_CROSSCOMPILING) list(APPEND VULKAN_SHADER_GEN_CMAKE_ARGS -DCMAKE_TOOLCHAIN_FILE=${HOST_CMAKE_TOOLCHAIN_FILE}) message(STATUS "vulkan-shaders-gen toolchain file: ${HOST_CMAKE_TOOLCHAIN_FILE}") endif() - # Native build through ExternalProject_Add ExternalProject_Add( vulkan-shaders-gen SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders - CMAKE_ARGS ${VULKAN_SHADER_GEN_CMAKE_ARGS} - BUILD_COMMAND ${CMAKE_COMMAND} --build . ${VULKAN_SHADER_GEN_CMAKE_BUILD_ARGS} - INSTALL_COMMAND ${CMAKE_COMMAND} --install . - INSTALL_DIR ${CMAKE_BINARY_DIR} + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${CMAKE_BINARY_DIR}/$ + -DCMAKE_INSTALL_BINDIR=. + -DCMAKE_BUILD_TYPE=$ + ${VULKAN_SHADER_GEN_CMAKE_ARGS} + + BUILD_COMMAND ${CMAKE_COMMAND} --build . --config $ + INSTALL_COMMAND ${CMAKE_COMMAND} --install . --config $ ) ExternalProject_Add_StepTargets(vulkan-shaders-gen build install) set (_ggml_vk_host_suffix $,.exe,>) - set (_ggml_vk_genshaders_cmd ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/vulkan-shaders-gen${_ggml_vk_host_suffix}) - set (_ggml_vk_header ${CMAKE_CURRENT_BINARY_DIR}/ggml-vulkan-shaders.hpp) - set (_ggml_vk_source ${CMAKE_CURRENT_BINARY_DIR}/ggml-vulkan-shaders.cpp) - set (_ggml_vk_input_dir ${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders) - set (_ggml_vk_output_dir ${CMAKE_CURRENT_BINARY_DIR}/vulkan-shaders.spv) - - file(GLOB _ggml_vk_shader_deps "${_ggml_vk_input_dir}/*.comp") - set (_ggml_vk_shader_deps ${_ggml_vk_shader_deps} vulkan-shaders-gen) + set (_ggml_vk_genshaders_dir "${CMAKE_BINARY_DIR}/$") + set (_ggml_vk_genshaders_cmd "${_ggml_vk_genshaders_dir}/vulkan-shaders-gen${_ggml_vk_host_suffix}") + set (_ggml_vk_header "${CMAKE_CURRENT_BINARY_DIR}/ggml-vulkan-shaders.hpp") + set (_ggml_vk_source "${CMAKE_CURRENT_BINARY_DIR}/ggml-vulkan-shaders.cpp") + set (_ggml_vk_input_dir "${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders") + set (_ggml_vk_output_dir "${CMAKE_CURRENT_BINARY_DIR}/vulkan-shaders.spv") - # Add build and install dependencies for all builds - set(_ggml_vk_shader_deps ${_ggml_vk_shader_deps} vulkan-shaders-gen-build vulkan-shaders-gen-install) + file(GLOB _ggml_vk_shader_files CONFIGURE_DEPENDS "${_ggml_vk_input_dir}/*.comp") add_custom_command( OUTPUT ${_ggml_vk_header} - ${_ggml_vk_source} + ${_ggml_vk_source} COMMAND ${_ggml_vk_genshaders_cmd} --glslc ${Vulkan_GLSLC_EXECUTABLE} @@ -181,7 +170,11 @@ if (Vulkan_FOUND) --target-cpp ${_ggml_vk_source} --no-clean - DEPENDS ${_ggml_vk_shader_deps} + DEPENDS ${_ggml_vk_shader_files} + vulkan-shaders-gen + vulkan-shaders-gen-build + vulkan-shaders-gen-install + COMMENT "Generate vulkan shaders" ) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt b/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt index e60e9d1e5b5c5..14e9daaa01a25 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +++ b/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt @@ -25,15 +25,3 @@ add_executable(${TARGET} vulkan-shaders-gen.cpp) install(TARGETS ${TARGET} RUNTIME) target_compile_features(${TARGET} PRIVATE cxx_std_17) target_link_libraries(vulkan-shaders-gen PUBLIC Threads::Threads) - -# Configure output directories for MSVC builds -if(MSVC) - # Get the main project's runtime output directory if possible - if(DEFINED CMAKE_RUNTIME_OUTPUT_DIRECTORY) - foreach(CONFIG ${CMAKE_CONFIGURATION_TYPES}) - string(TOUPPER ${CONFIG} CONFIG) - set_target_properties(${TARGET} PROPERTIES - RUNTIME_OUTPUT_DIRECTORY_${CONFIG} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) - endforeach() - endif() -endif() From ca3c490796861f1ad9883007d879a0618bae9e36 Mon Sep 17 00:00:00 2001 From: Diego Devesa Date: Mon, 16 Jun 2025 08:11:43 -0700 Subject: [PATCH 078/192] llama : add thread safety test (#14035) * llama : add thread safety test * llamafile : remove global state * llama : better LLAMA_SPLIT_MODE_NONE logic when main_gpu < 0 GPU devices are not used --------- Co-authored-by: Georgi Gerganov --- .github/workflows/build.yml | 1 + ci/run.sh | 2 +- common/common.cpp | 16 ++- ggml/src/ggml-cpu/ggml-cpu-impl.h | 3 + ggml/src/ggml-cpu/ggml-cpu.c | 8 ++ ggml/src/ggml-cpu/llamafile/sgemm.cpp | 8 +- src/llama.cpp | 18 +-- tests/CMakeLists.txt | 2 + tests/test-thread-safety.cpp | 152 ++++++++++++++++++++++++++ 9 files changed, 192 insertions(+), 18 deletions(-) create mode 100644 tests/test-thread-safety.cpp diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 85c4f3512b0e7..c4783a6df8882 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -778,6 +778,7 @@ jobs: cmake -S . -B build ${{ matrix.defines }} ` -DCURL_LIBRARY="$env:CURL_PATH/lib/libcurl.dll.a" -DCURL_INCLUDE_DIR="$env:CURL_PATH/include" cmake --build build --config Release -j ${env:NUMBER_OF_PROCESSORS} + cp $env:CURL_PATH/bin/libcurl-*.dll build/bin/Release - name: Add libopenblas.dll id: add_libopenblas_dll diff --git a/ci/run.sh b/ci/run.sh index 2968a7dd48d42..94005570511b6 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -39,7 +39,7 @@ sd=`dirname $0` cd $sd/../ SRC=`pwd` -CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=OFF" +CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=ON" if [ ! -z ${GG_BUILD_METAL} ]; then CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=ON -DGGML_METAL_USE_BF16=ON" diff --git a/common/common.cpp b/common/common.cpp index 5b465150f0533..eb80cee0894a6 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -767,6 +767,9 @@ bool fs_validate_filename(const std::string & filename) { return true; } +#include + + // returns true if successful, false otherwise bool fs_create_directory_with_parents(const std::string & path) { #ifdef _WIN32 @@ -784,9 +787,16 @@ bool fs_create_directory_with_parents(const std::string & path) { // process path from front to back, procedurally creating directories while ((pos_slash = path.find('\\', pos_slash)) != std::string::npos) { const std::wstring subpath = wpath.substr(0, pos_slash); - const wchar_t * test = subpath.c_str(); - const bool success = CreateDirectoryW(test, NULL); + pos_slash += 1; + + // skip the drive letter, in some systems it can return an access denied error + if (subpath.length() == 2 && subpath[1] == ':') { + continue; + } + + const bool success = CreateDirectoryW(subpath.c_str(), NULL); + if (!success) { const DWORD error = GetLastError(); @@ -800,8 +810,6 @@ bool fs_create_directory_with_parents(const std::string & path) { return false; } } - - pos_slash += 1; } return true; diff --git a/ggml/src/ggml-cpu/ggml-cpu-impl.h b/ggml/src/ggml-cpu/ggml-cpu-impl.h index 9662e4d7b5a6a..ae68cd006336d 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-impl.h +++ b/ggml/src/ggml-cpu/ggml-cpu-impl.h @@ -503,6 +503,9 @@ static __m256 __lasx_xvreplfr2vr_s(const float val) { // TODO: move to ggml-threading void ggml_barrier(struct ggml_threadpool * tp); +void ggml_threadpool_chunk_set(struct ggml_threadpool * tp, int value); +int ggml_threadpool_chunk_add(struct ggml_threadpool * tp, int value); + #ifdef __cplusplus } #endif diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index ff28bf98bc7df..2c12e493bc9b0 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -559,6 +559,14 @@ void ggml_barrier(struct ggml_threadpool * tp) { #endif } +void ggml_threadpool_chunk_set(struct ggml_threadpool * tp, int value) { + atomic_store_explicit(&tp->current_chunk, value, memory_order_relaxed); +} + +int ggml_threadpool_chunk_add(struct ggml_threadpool * tp, int value) { + return atomic_fetch_add_explicit(&tp->current_chunk, value, memory_order_relaxed); +} + #if defined(__gnu_linux__) static cpu_set_t ggml_get_numa_affinity(void) { cpu_set_t cpuset; diff --git a/ggml/src/ggml-cpu/llamafile/sgemm.cpp b/ggml/src/ggml-cpu/llamafile/sgemm.cpp index 1d46158f928c4..1c545f803327b 100644 --- a/ggml/src/ggml-cpu/llamafile/sgemm.cpp +++ b/ggml/src/ggml-cpu/llamafile/sgemm.cpp @@ -53,7 +53,6 @@ #include "ggml-cpu-impl.h" #include "ggml-quants.h" -#include #include #include @@ -394,8 +393,6 @@ class tinyBLAS { template NOINLINE void gemm(int64_t m, int64_t n, int64_t BN) { - static std::atomic current_chunk; - GGML_ASSERT(m % (RM * BM) == 0); const int64_t ytiles = m / (RM * BM); const int64_t xtiles = (n + RN -1) / RN; @@ -410,7 +407,7 @@ class tinyBLAS { if (params->ith == 0) { GGML_ASSERT( jj_BN * SIZE_BN + (NB_BN - jj_BN) * (SIZE_BN - 1) == xtiles); // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. - std::atomic_store_explicit(¤t_chunk, (int64_t)params->nth, std::memory_order_relaxed); + ggml_threadpool_chunk_set(params->threadpool, params->nth); } ggml_barrier(params->threadpool); @@ -439,8 +436,7 @@ class tinyBLAS { GGML_ASSERT(jj == jj2); } - // next step. - job = std::atomic_fetch_add_explicit(¤t_chunk, (int64_t)1, std::memory_order_relaxed); + job = ggml_threadpool_chunk_add(params->threadpool, 1); } ggml_barrier(params->threadpool); diff --git a/src/llama.cpp b/src/llama.cpp index 2f06e0f8ce12d..34906cdb62844 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -198,14 +198,18 @@ static struct llama_model * llama_model_load_from_file_impl( // if using single GPU mode, remove all except the main GPU if (params.split_mode == LLAMA_SPLIT_MODE_NONE) { - if (params.main_gpu < 0 || params.main_gpu >= (int)model->devices.size()) { - LLAMA_LOG_ERROR("%s: invalid value for main_gpu: %d (available devices: %d)\n", __func__, params.main_gpu, (int)model->devices.size()); - llama_model_free(model); - return nullptr; + if (params.main_gpu < 0) { + model->devices.clear(); + } else { + if (params.main_gpu >= (int)model->devices.size()) { + LLAMA_LOG_ERROR("%s: invalid value for main_gpu: %d (available devices: %zu)\n", __func__, params.main_gpu, model->devices.size()); + llama_model_free(model); + return nullptr; + } + ggml_backend_dev_t main_gpu = model->devices[params.main_gpu]; + model->devices.clear(); + model->devices.push_back(main_gpu); } - ggml_backend_dev_t main_gpu = model->devices[params.main_gpu]; - model->devices.clear(); - model->devices.push_back(main_gpu); } for (auto * dev : model->devices) { diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index db4b2cf65cc43..fc1557a2d4065 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -185,6 +185,8 @@ llama_build_and_test(test-json-partial.cpp) llama_build_and_test(test-log.cpp) llama_build_and_test(test-regex-partial.cpp) +llama_build_and_test(test-thread-safety.cpp ARGS -hf ggml-org/models -hff tinyllamas/stories15M-q4_0.gguf -ngl 99 -p "The meaning of life is" -n 128 -c 256 -ub 32 -np 4) + # this fails on windows (github hosted runner) due to curl DLL not found (exit code 0xc0000135) if (NOT WIN32) llama_build_and_test(test-arg-parser.cpp) diff --git a/tests/test-thread-safety.cpp b/tests/test-thread-safety.cpp new file mode 100644 index 0000000000000..d525b7430f9d9 --- /dev/null +++ b/tests/test-thread-safety.cpp @@ -0,0 +1,152 @@ +// thread safety test +// - Loads a copy of the same model on each GPU, plus a copy on the CPU +// - Creates n_parallel (--parallel) contexts per model +// - Runs inference in parallel on each context + +#include +#include +#include +#include "llama.h" +#include "arg.h" +#include "common.h" +#include "log.h" +#include "sampling.h" + +int main(int argc, char ** argv) { + common_params params; + + if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) { + return 1; + } + + common_init(); + + llama_backend_init(); + llama_numa_init(params.numa); + + LOG_INF("%s\n", common_params_get_system_info(params).c_str()); + + //llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) { + // if (level == GGML_LOG_LEVEL_ERROR) { + // common_log_add(common_log_main(), level, "%s", text); + // } + //}, NULL); + + auto cparams = common_context_params_to_llama(params); + + int dev_count = ggml_backend_dev_count(); + int gpu_dev_count = 0; + for (int i = 0; i < dev_count; ++i) { + auto * dev = ggml_backend_dev_get(i); + if (dev && ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_GPU) { + gpu_dev_count++; + } + } + const int num_models = gpu_dev_count + 1 + 1; // GPUs + 1 CPU model + 1 layer split + //const int num_models = std::max(1, gpu_dev_count); + const int num_contexts = std::max(1, params.n_parallel); + + std::vector models; + std::vector threads; + std::atomic failed = false; + + for (int m = 0; m < num_models; ++m) { + auto mparams = common_model_params_to_llama(params); + + if (m < gpu_dev_count) { + mparams.split_mode = LLAMA_SPLIT_MODE_NONE; + mparams.main_gpu = m; + } else if (m == gpu_dev_count) { + mparams.split_mode = LLAMA_SPLIT_MODE_NONE; + mparams.main_gpu = -1; // CPU model + } else { + mparams.split_mode = LLAMA_SPLIT_MODE_LAYER;; + } + + llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams); + if (model == NULL) { + LOG_ERR("%s: failed to load model '%s'\n", __func__, params.model.path.c_str()); + return 1; + } + + models.emplace_back(model); + } + + for (int m = 0; m < num_models; ++m) { + auto * model = models[m].get(); + for (int c = 0; c < num_contexts; ++c) { + threads.emplace_back([&, m, c, model]() { + LOG_INF("Creating context %d/%d for model %d/%d\n", c + 1, num_contexts, m + 1, num_models); + + llama_context_ptr ctx { llama_init_from_model(model, cparams) }; + if (ctx == NULL) { + LOG_ERR("failed to create context\n"); + failed.store(true); + return; + } + + std::unique_ptr sampler { common_sampler_init(model, params.sampling), common_sampler_free }; + if (sampler == NULL) { + LOG_ERR("failed to create sampler\n"); + failed.store(true); + return; + } + + llama_batch batch = {}; + { + auto prompt = common_tokenize(ctx.get(), params.prompt, true); + if (prompt.empty()) { + LOG_ERR("failed to tokenize prompt\n"); + failed.store(true); + return; + } + batch = llama_batch_get_one(prompt.data(), prompt.size()); + if (llama_decode(ctx.get(), batch)) { + LOG_ERR("failed to decode prompt\n"); + failed.store(true); + return; + } + } + + const auto * vocab = llama_model_get_vocab(model); + std::string result = params.prompt; + + for (int i = 0; i < params.n_predict; i++) { + llama_token token; + if (batch.n_tokens > 0) { + token = common_sampler_sample(sampler.get(), ctx.get(), batch.n_tokens - 1); + } else { + token = llama_vocab_bos(vocab); + } + + result += common_token_to_piece(ctx.get(), token); + + if (llama_vocab_is_eog(vocab, token)) { + break; + } + + batch = llama_batch_get_one(&token, 1); + if (llama_decode(ctx.get(), batch)) { + LOG_ERR("Model %d/%d, Context %d/%d: failed to decode\n", m + 1, num_models, c + 1, num_contexts); + failed.store(true); + return; + } + } + + LOG_INF("Model %d/%d, Context %d/%d: %s\n\n", m + 1, num_models, c + 1, num_contexts, result.c_str()); + }); + } + } + + for (auto & thread : threads) { + thread.join(); + } + + if (failed) { + LOG_ERR("One or more threads failed.\n"); + return 1; + } + + LOG_INF("All threads finished without errors.\n"); + return 0; +} From 44e4d3b8091b8cd03d367be60ea502632fe9e032 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 16 Jun 2025 22:33:27 +0300 Subject: [PATCH 079/192] server : fix incorrect usage of llama_get_embeddings() (#14225) * server : fix incorrect usage of llama_get_embeddings() ggml-ci * cont : fix the fix ggml-ci --- include/llama.h | 1 + tools/server/server.cpp | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/include/llama.h b/include/llama.h index b086b68e6d4ea..635508b10f2ff 100644 --- a/include/llama.h +++ b/include/llama.h @@ -965,6 +965,7 @@ extern "C" { LLAMA_API int32_t llama_n_threads_batch(struct llama_context * ctx); // Set whether the context outputs embeddings or not + // TODO: rename to avoid confusion with llama_get_embeddings() LLAMA_API void llama_set_embeddings(struct llama_context * ctx, bool embeddings); // Set whether to use causal attention or not diff --git a/tools/server/server.cpp b/tools/server/server.cpp index c08e421255fce..721d09182845d 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -1358,6 +1358,14 @@ struct server_slot { return server_task_type_need_logits(task_type); } + // if the context does not have a memory module then all embeddings have to be computed within a single ubatch + // also we cannot split if the pooling would require any past tokens + bool can_split() const { + return + !need_embd() || + (llama_get_memory(ctx) && llama_pooling_type(ctx) == LLAMA_POOLING_TYPE_LAST); + } + bool can_batch_with(server_slot & other_slot) const { return task_type == other_slot.task_type && are_lora_equal(lora, other_slot.lora); } @@ -1929,14 +1937,6 @@ struct server_context { llama_batch_free(batch); } - // if the context does not have a memory module then all embeddings have to be computed within a single ubatch - // also we cannot split if the pooling would require any past tokens - bool can_split() const { - return - !llama_get_embeddings(ctx) || - (llama_get_memory(ctx) && llama_pooling_type(ctx) == LLAMA_POOLING_TYPE_LAST); - } - bool load_model(const common_params & params) { SRV_INF("loading model '%s'\n", params.model.path.c_str()); @@ -3130,7 +3130,7 @@ struct server_context { continue; } - if (!can_split()) { + if (!slot.can_split()) { if (slot.n_prompt_tokens > n_ubatch) { slot.release(); send_error(slot, "input is too large to process. increase the physical batch size", ERROR_TYPE_SERVER); @@ -3273,7 +3273,7 @@ struct server_context { slot.n_prompt_tokens_processed = 0; } - if (!can_split()) { + if (!slot.can_split()) { // cannot fit the prompt in the current batch - will try next iter if (batch.n_tokens + slot.n_prompt_tokens > n_batch) { continue; From 4323b94b75851e8e0d870384dbddfc8a56659eab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Mon, 16 Jun 2025 21:58:42 +0200 Subject: [PATCH 080/192] common : suggest --jinja when autodetection fails (#14222) --- common/chat.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/chat.cpp b/common/chat.cpp index 0dad14fba9ba5..7d9aaeb12a190 100644 --- a/common/chat.cpp +++ b/common/chat.cpp @@ -1838,7 +1838,7 @@ static common_chat_params common_chat_templates_apply_legacy( if (res < 0) { // if the custom "tmpl" is not supported, we throw an error // this is a bit redundant (for good), since we're not sure if user validated the custom template with llama_chat_verify_template() - throw std::runtime_error("this custom template is not supported"); + throw std::runtime_error("this custom template is not supported, try using --jinja"); } // if it turns out that our buffer is too small, we resize it From abc759f4e58cefdca482ff3dd98cff0ab83cfaf3 Mon Sep 17 00:00:00 2001 From: R0CKSTAR Date: Tue, 17 Jun 2025 17:48:08 +0800 Subject: [PATCH 081/192] musa: fix build warning (unused variable) (#14231) Signed-off-by: Xiaodong Ye --- ggml/src/ggml-cuda/ggml-cuda.cu | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 0bd2904e1c9d1..898b24341471d 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -2664,7 +2664,9 @@ static void evaluate_and_capture_cuda_graph(ggml_backend_cuda_context * cuda_ctx ggml_backend_buft_is_cuda_split(node->src[j]->buffer->buft) || (integrated && ggml_backend_buft_is_cuda_host(node->src[j]->buffer->buft))); } } -#endif +#else + GGML_UNUSED(integrated); +#endif // NDEBUG bool ok = ggml_cuda_compute_forward(*cuda_ctx, node); if (!ok) { From 84c4938f216067e80471f8fe1b000a650ab63904 Mon Sep 17 00:00:00 2001 From: xctan Date: Tue, 17 Jun 2025 17:58:32 +0800 Subject: [PATCH 082/192] ggml-cpu : remove the weak alias trick (#14221) --- ggml/src/ggml-cpu/apple-fallback.h | 88 -------------- ggml/src/ggml-cpu/arch-fallback.h | 184 +++++++++++++++++++++++++++++ ggml/src/ggml-cpu/ggml-cpu-impl.h | 25 ---- ggml/src/ggml-cpu/quants.c | 28 +---- ggml/src/ggml-cpu/repack.cpp | 17 +-- ggml/src/ggml-cpu/repack.h | 5 - 6 files changed, 186 insertions(+), 161 deletions(-) delete mode 100644 ggml/src/ggml-cpu/apple-fallback.h create mode 100644 ggml/src/ggml-cpu/arch-fallback.h diff --git a/ggml/src/ggml-cpu/apple-fallback.h b/ggml/src/ggml-cpu/apple-fallback.h deleted file mode 100644 index f477505d787a7..0000000000000 --- a/ggml/src/ggml-cpu/apple-fallback.h +++ /dev/null @@ -1,88 +0,0 @@ -#pragma once - -// Solve alias issue for Apple targets (currently PowerPC, x86, and ARM64). -// Mach-O has a weak alias equivalent but no practical compiler support can -// be found, so we need to do it manually. -// ref: https://stackoverflow.com/questions/42757744 -// -// This file is a complement to native implementations in the `arch` folder. -// A kernel in quants.c or repack.cpp is either: -// - implemented in the `arch` folder, or -// - defined in this file to remove the `_generic` suffix - -#if defined(GGML_CPU_GENERIC) -// quants.c -#define quantize_row_q8_0_generic quantize_row_q8_0 -#define quantize_row_q8_1_generic quantize_row_q8_1 -#define quantize_row_q8_K_generic quantize_row_q8_K -#define ggml_vec_dot_q4_0_q8_0_generic ggml_vec_dot_q4_0_q8_0 -#define ggml_vec_dot_q4_1_q8_1_generic ggml_vec_dot_q4_1_q8_1 -#define ggml_vec_dot_q5_0_q8_0_generic ggml_vec_dot_q5_0_q8_0 -#define ggml_vec_dot_q5_1_q8_1_generic ggml_vec_dot_q5_1_q8_1 -#define ggml_vec_dot_q8_0_q8_0_generic ggml_vec_dot_q8_0_q8_0 -#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K -#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K -#define ggml_vec_dot_q2_K_q8_K_generic ggml_vec_dot_q2_K_q8_K -#define ggml_vec_dot_q3_K_q8_K_generic ggml_vec_dot_q3_K_q8_K -#define ggml_vec_dot_q4_K_q8_K_generic ggml_vec_dot_q4_K_q8_K -#define ggml_vec_dot_q5_K_q8_K_generic ggml_vec_dot_q5_K_q8_K -#define ggml_vec_dot_q6_K_q8_K_generic ggml_vec_dot_q6_K_q8_K -#define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K -#define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K -#define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K -#define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K -#define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K -#define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K -#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K -#define ggml_vec_dot_iq4_nl_q8_0_generic ggml_vec_dot_iq4_nl_q8_0 -#define ggml_vec_dot_iq4_xs_q8_K_generic ggml_vec_dot_iq4_xs_q8_K -// repack.cpp -#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 -#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 -#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 -#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 -#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 -#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 -#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K -#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 -#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 -#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 -#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 -#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K -#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 -#elif defined(__aarch64__) || defined(__arm__) -// repack.cpp -#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 -#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K -#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K -#elif defined(__x86_64__) || defined(__i386__) -// repack.cpp -#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 -#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 -#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 -#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 -#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 -#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 -#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 -#elif defined(__POWERPC__) -// ref: https://github.com/ggml-org/llama.cpp/pull/14146#issuecomment-2972561679 -// quants.c -#define quantize_row_q8_K_generic quantize_row_q8_K -#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K -#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K -#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K -// repack.cpp -#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 -#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 -#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 -#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 -#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 -#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 -#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K -#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 -#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 -#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 -#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 -#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K -#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 -#endif diff --git a/ggml/src/ggml-cpu/arch-fallback.h b/ggml/src/ggml-cpu/arch-fallback.h new file mode 100644 index 0000000000000..10e5342516a9c --- /dev/null +++ b/ggml/src/ggml-cpu/arch-fallback.h @@ -0,0 +1,184 @@ +#pragma once + +// Rename `_generic` functions if no native implementation is available. +// This effectively selects the generic implementation. + +#if defined(GGML_CPU_GENERIC) +// quants.c +#define quantize_row_q8_0_generic quantize_row_q8_0 +#define quantize_row_q8_1_generic quantize_row_q8_1 +#define quantize_row_q8_K_generic quantize_row_q8_K +#define ggml_vec_dot_q4_0_q8_0_generic ggml_vec_dot_q4_0_q8_0 +#define ggml_vec_dot_q4_1_q8_1_generic ggml_vec_dot_q4_1_q8_1 +#define ggml_vec_dot_q5_0_q8_0_generic ggml_vec_dot_q5_0_q8_0 +#define ggml_vec_dot_q5_1_q8_1_generic ggml_vec_dot_q5_1_q8_1 +#define ggml_vec_dot_q8_0_q8_0_generic ggml_vec_dot_q8_0_q8_0 +#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K +#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K +#define ggml_vec_dot_q2_K_q8_K_generic ggml_vec_dot_q2_K_q8_K +#define ggml_vec_dot_q3_K_q8_K_generic ggml_vec_dot_q3_K_q8_K +#define ggml_vec_dot_q4_K_q8_K_generic ggml_vec_dot_q4_K_q8_K +#define ggml_vec_dot_q5_K_q8_K_generic ggml_vec_dot_q5_K_q8_K +#define ggml_vec_dot_q6_K_q8_K_generic ggml_vec_dot_q6_K_q8_K +#define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K +#define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K +#define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K +#define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K +#define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K +#define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K +#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K +#define ggml_vec_dot_iq4_nl_q8_0_generic ggml_vec_dot_iq4_nl_q8_0 +#define ggml_vec_dot_iq4_xs_q8_K_generic ggml_vec_dot_iq4_xs_q8_K +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#elif defined(__aarch64__) || defined(__arm__) || defined(_M_ARM) || defined(_M_ARM64) +// repack.cpp +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#elif defined(__x86_64__) || defined(__i386__) || defined(_M_IX86) || defined(_M_X64) +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#elif defined(__POWERPC__) || defined(__powerpc__) +// ref: https://github.com/ggml-org/llama.cpp/pull/14146#issuecomment-2972561679 +// quants.c +#define quantize_row_q8_K_generic quantize_row_q8_K +#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K +#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K +#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#elif defined(__loongarch64) +// quants.c +#define quantize_row_q8_K_generic quantize_row_q8_K +#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K +#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K +#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#elif defined(__riscv) +// quants.c +#define quantize_row_q8_K_generic quantize_row_q8_K +#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K +#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K +#define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K +#define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K +#define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K +#define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K +#define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K +#define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K +#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K +#define ggml_vec_dot_iq4_nl_q8_0_generic ggml_vec_dot_iq4_nl_q8_0 +#define ggml_vec_dot_iq4_xs_q8_K_generic ggml_vec_dot_iq4_xs_q8_K +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#elif defined(__s390x__) +// quants.c +#define quantize_row_q8_K_generic quantize_row_q8_K +#define ggml_vec_dot_q5_0_q8_0_generic ggml_vec_dot_q5_0_q8_0 +#define ggml_vec_dot_q5_1_q8_1_generic ggml_vec_dot_q5_1_q8_1 +#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K +#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K +#define ggml_vec_dot_q2_K_q8_K_generic ggml_vec_dot_q2_K_q8_K +#define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K +#define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K +#define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K +#define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K +#define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K +#define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K +#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#elif defined(__wasm__) +// quants.c +#define ggml_vec_dot_q4_1_q8_1_generic ggml_vec_dot_q4_1_q8_1 +#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K +#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K +#define ggml_vec_dot_iq2_xxs_q8_K_generic ggml_vec_dot_iq2_xxs_q8_K +#define ggml_vec_dot_iq2_xs_q8_K_generic ggml_vec_dot_iq2_xs_q8_K +#define ggml_vec_dot_iq2_s_q8_K_generic ggml_vec_dot_iq2_s_q8_K +#define ggml_vec_dot_iq3_xxs_q8_K_generic ggml_vec_dot_iq3_xxs_q8_K +#define ggml_vec_dot_iq3_s_q8_K_generic ggml_vec_dot_iq3_s_q8_K +#define ggml_vec_dot_iq1_s_q8_K_generic ggml_vec_dot_iq1_s_q8_K +#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K +#define ggml_vec_dot_iq4_nl_q8_0_generic ggml_vec_dot_iq4_nl_q8_0 +#define ggml_vec_dot_iq4_xs_q8_K_generic ggml_vec_dot_iq4_xs_q8_K +// repack.cpp +#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4 +#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8 +#define ggml_quantize_mat_q8_K_4x8_generic ggml_quantize_mat_q8_K_4x8 +#define ggml_gemv_q4_0_4x4_q8_0_generic ggml_gemv_q4_0_4x4_q8_0 +#define ggml_gemv_q4_0_4x8_q8_0_generic ggml_gemv_q4_0_4x8_q8_0 +#define ggml_gemv_q4_0_8x8_q8_0_generic ggml_gemv_q4_0_8x8_q8_0 +#define ggml_gemv_q4_K_8x8_q8_K_generic ggml_gemv_q4_K_8x8_q8_K +#define ggml_gemv_iq4_nl_4x4_q8_0_generic ggml_gemv_iq4_nl_4x4_q8_0 +#define ggml_gemm_q4_0_4x4_q8_0_generic ggml_gemm_q4_0_4x4_q8_0 +#define ggml_gemm_q4_0_4x8_q8_0_generic ggml_gemm_q4_0_4x8_q8_0 +#define ggml_gemm_q4_0_8x8_q8_0_generic ggml_gemm_q4_0_8x8_q8_0 +#define ggml_gemm_q4_K_8x8_q8_K_generic ggml_gemm_q4_K_8x8_q8_K +#define ggml_gemm_iq4_nl_4x4_q8_0_generic ggml_gemm_iq4_nl_4x4_q8_0 +#endif diff --git a/ggml/src/ggml-cpu/ggml-cpu-impl.h b/ggml/src/ggml-cpu/ggml-cpu-impl.h index ae68cd006336d..bbd93c0ef66fe 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-impl.h +++ b/ggml/src/ggml-cpu/ggml-cpu-impl.h @@ -509,28 +509,3 @@ int ggml_threadpool_chunk_add(struct ggml_threadpool * tp, int value); #ifdef __cplusplus } #endif - -#define GGML_DO_PRAGMA_(x) _Pragma (#x) -#define GGML_DO_PRAGMA(x) GGML_DO_PRAGMA_(x) -#if defined(GGML_CPU_GENERIC) || defined(__HIPCC__) || defined(__APPLE__) -// Note for Apple targets: -// - clang: aliases are not supported on darwin -// - all native kernels need to be implemented in both x86 and arm files -// - on iOS, tvOS, and visionOS, if cmake cannot determine the target architecture, all `_generic` names are replaced by defines -# define GGML_WEAK_ALIAS(name, alias) -#elif defined(__GNUC__) -// GCC/Clang on *nix -# define GGML_WEAK_ALIAS(name, alias) GGML_DO_PRAGMA(weak name = alias) // NOLINT -#elif defined(_MSC_VER) && defined(_WIN64) -// MSVC -// Note: C name mangling varies across different calling conventions -// see https://learn.microsoft.com/en-us/cpp/build/reference/decorated-names?view=msvc-170 -# define GGML_WEAK_ALIAS(name, alias) GGML_DO_PRAGMA(comment(linker, "/alternatename:" #name "=" #alias)) -#elif defined(_MSC_VER) && defined(WIN32) -// ref: https://github.com/ggml-org/whisper.cpp/pull/3239#issuecomment-2958224591 -# define GGML_WEAK_ALIAS(name, alias) GGML_DO_PRAGMA(comment(linker, "/alternatename:_" #name "=_" #alias)) -#else -# error "Unsupported compiler for GGML_WEAK_ALIAS" -#endif - -#define GGML_CPU_NATIVE_IMPL(name) GGML_WEAK_ALIAS(name, name ## _generic) diff --git a/ggml/src/ggml-cpu/quants.c b/ggml/src/ggml-cpu/quants.c index 516c5b2ced06d..d2e705f287af5 100644 --- a/ggml/src/ggml-cpu/quants.c +++ b/ggml/src/ggml-cpu/quants.c @@ -5,9 +5,7 @@ #include "ggml-quants.h" #include "quants.h" -#if defined(__APPLE__) -#include "apple-fallback.h" -#endif +#include "arch-fallback.h" #include #include @@ -42,12 +40,10 @@ void quantize_row_q5_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, in void quantize_row_q8_0_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { quantize_row_q8_0_ref(x, y, k); } -GGML_CPU_NATIVE_IMPL(quantize_row_q8_0) void quantize_row_q8_1_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { quantize_row_q8_1_ref(x, y, k); } -GGML_CPU_NATIVE_IMPL(quantize_row_q8_1) // // 2-6 bit quantization in super-blocks @@ -108,7 +104,6 @@ void quantize_row_tq2_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, void quantize_row_q8_K_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { quantize_row_q8_K_ref(x, y, k); } -GGML_CPU_NATIVE_IMPL(quantize_row_q8_K) //===================================== Dot products ================================= @@ -147,7 +142,6 @@ void ggml_vec_dot_q4_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, c *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q4_0_q8_0) // TODO: add WASM SIMD void ggml_vec_dot_q4_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { @@ -185,7 +179,6 @@ void ggml_vec_dot_q4_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, c *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q4_1_q8_1) void ggml_vec_dot_q5_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; @@ -229,7 +222,6 @@ void ggml_vec_dot_q5_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, c *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q5_0_q8_0) void ggml_vec_dot_q5_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_1; @@ -273,7 +265,6 @@ void ggml_vec_dot_q5_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, c *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q5_1_q8_1) void ggml_vec_dot_q8_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { const int qk = QK8_0; @@ -304,7 +295,6 @@ void ggml_vec_dot_q8_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, c *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q8_0_q8_0) void ggml_vec_dot_tq1_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); @@ -357,7 +347,6 @@ void ggml_vec_dot_tq1_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_tq1_0_q8_K) void ggml_vec_dot_tq2_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); @@ -390,7 +379,6 @@ void ggml_vec_dot_tq2_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_tq2_0_q8_K) void ggml_vec_dot_q2_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); @@ -443,7 +431,6 @@ void ggml_vec_dot_q2_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, c } *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q2_K_q8_K) void ggml_vec_dot_q3_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -523,7 +510,6 @@ void ggml_vec_dot_q3_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, c for (int l = 0; l < 8; ++l) sumf += sums[l]; *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q3_K_q8_K) void ggml_vec_dot_q4_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -599,7 +585,6 @@ void ggml_vec_dot_q4_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, c for (int l = 0; l < 8; ++l) sumf += sums[l]; *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q4_K_q8_K) void ggml_vec_dot_q5_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -680,7 +665,6 @@ void ggml_vec_dot_q5_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, c for (int l = 0; l < 8; ++l) sumf += sums[l]; *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q5_K_q8_K) void ggml_vec_dot_q6_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -736,7 +720,6 @@ void ggml_vec_dot_q6_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, c for (int l = 0; l < 8; ++l) sumf += sums[l]; *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_q6_K_q8_K) void ggml_vec_dot_iq2_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -779,7 +762,6 @@ void ggml_vec_dot_iq2_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs } *s = 0.125f * sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq2_xxs_q8_K) void ggml_vec_dot_iq2_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -830,7 +812,6 @@ void ggml_vec_dot_iq2_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, } *s = 0.125f * sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq2_xs_q8_K) void ggml_vec_dot_iq2_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -883,7 +864,6 @@ void ggml_vec_dot_iq2_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, *s = 0.125f * sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq2_s_q8_K) void ggml_vec_dot_iq3_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -928,7 +908,6 @@ void ggml_vec_dot_iq3_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs } *s = 0.25f * sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq3_xxs_q8_K) void ggml_vec_dot_iq3_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -985,7 +964,6 @@ void ggml_vec_dot_iq3_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, } *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq3_s_q8_K) void ggml_vec_dot_iq1_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -1029,7 +1007,6 @@ void ggml_vec_dot_iq1_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq1_s_q8_K) void ggml_vec_dot_iq1_m_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); @@ -1091,7 +1068,6 @@ void ggml_vec_dot_iq1_m_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq1_m_q8_K) void ggml_vec_dot_iq4_nl_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); @@ -1121,7 +1097,6 @@ void ggml_vec_dot_iq4_nl_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, } *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq4_nl_q8_0) void ggml_vec_dot_iq4_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(nrc == 1); @@ -1168,7 +1143,6 @@ void ggml_vec_dot_iq4_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, } *s = sumf; } -GGML_CPU_NATIVE_IMPL(ggml_vec_dot_iq4_xs_q8_K) // ============================ 4-bit non-linear quants diff --git a/ggml/src/ggml-cpu/repack.cpp b/ggml/src/ggml-cpu/repack.cpp index 604ccee907843..5c6715d5c01ea 100644 --- a/ggml/src/ggml-cpu/repack.cpp +++ b/ggml/src/ggml-cpu/repack.cpp @@ -8,9 +8,7 @@ #include "ggml-cpu-impl.h" #include "traits.h" -#if defined(__APPLE__) -#include "apple-fallback.h" -#endif +#include "arch-fallback.h" #include #include @@ -87,7 +85,6 @@ void ggml_quantize_mat_q8_0_4x4_generic(const float * GGML_RESTRICT x, void * GG } } } -GGML_CPU_NATIVE_IMPL(ggml_quantize_mat_q8_0_4x4) void ggml_quantize_mat_q8_0_4x8_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); @@ -126,7 +123,6 @@ void ggml_quantize_mat_q8_0_4x8_generic(const float * GGML_RESTRICT x, void * GG } } } -GGML_CPU_NATIVE_IMPL(ggml_quantize_mat_q8_0_4x8) void ggml_quantize_mat_q8_K_4x8_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK_K == 256); @@ -178,7 +174,6 @@ void ggml_quantize_mat_q8_K_4x8_generic(const float * GGML_RESTRICT x, void * GG } } } -GGML_CPU_NATIVE_IMPL(ggml_quantize_mat_q8_K_4x8) } // extern "C" @@ -248,7 +243,6 @@ void ggml_gemv_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; } } -GGML_CPU_NATIVE_IMPL(ggml_gemv_q4_0_4x4_q8_0) void ggml_gemv_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; @@ -293,7 +287,6 @@ void ggml_gemv_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; } } -GGML_CPU_NATIVE_IMPL(ggml_gemv_q4_0_4x8_q8_0) void ggml_gemv_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; @@ -340,7 +333,6 @@ void ggml_gemv_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, } } } -GGML_CPU_NATIVE_IMPL(ggml_gemv_q4_0_8x8_q8_0) void ggml_gemv_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK_K; @@ -419,7 +411,6 @@ void ggml_gemv_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, } } } -GGML_CPU_NATIVE_IMPL(ggml_gemv_q4_K_8x8_q8_K) void ggml_gemv_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; @@ -466,7 +457,6 @@ void ggml_gemv_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs } } } -GGML_CPU_NATIVE_IMPL(ggml_gemv_iq4_nl_4x4_q8_0) void ggml_gemm_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; @@ -523,7 +513,6 @@ void ggml_gemm_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, } } } -GGML_CPU_NATIVE_IMPL(ggml_gemm_q4_0_4x4_q8_0) void ggml_gemm_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; @@ -578,7 +567,6 @@ void ggml_gemm_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, } } } -GGML_CPU_NATIVE_IMPL(ggml_gemm_q4_0_4x8_q8_0) void ggml_gemm_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; @@ -633,7 +621,6 @@ void ggml_gemm_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, } } } -GGML_CPU_NATIVE_IMPL(ggml_gemm_q4_0_8x8_q8_0) void ggml_gemm_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK_K; @@ -723,7 +710,6 @@ void ggml_gemm_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, } } } -GGML_CPU_NATIVE_IMPL(ggml_gemm_q4_K_8x8_q8_K) void ggml_gemm_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; @@ -780,7 +766,6 @@ void ggml_gemm_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs } } } -GGML_CPU_NATIVE_IMPL(ggml_gemm_iq4_nl_4x4_q8_0) } // extern "C" diff --git a/ggml/src/ggml-cpu/repack.h b/ggml/src/ggml-cpu/repack.h index b13d2d0c73495..4421e5f8e7046 100644 --- a/ggml/src/ggml-cpu/repack.h +++ b/ggml/src/ggml-cpu/repack.h @@ -64,10 +64,6 @@ static_assert(sizeof(block_iq4_nlx4) == 4 * sizeof(ggml_half) + QK4_NL * 2, "wro extern "C" { #endif -// Workaround for clang: -// clang++ complains: ``error: call to 'ggml_gemm_q4_0_4x4_q8_0' is ambiguous'' -// repro: https://godbolt.org/z/oKdeWKonM (ICE), https://godbolt.org/z/1szq6P36v (ambiguous call) -#if defined(GGML_CPU_CLANG_WORKAROUND) || defined(__APPLE__) || !(defined(__GNUC__) && defined(__clang__)) || defined(__HIPCC__) void ggml_quantize_mat_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); void ggml_quantize_mat_q8_K_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); @@ -81,7 +77,6 @@ void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); -#endif // !defined(__clang__) // Native implementations void ggml_quantize_mat_q8_0_4x4_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); From 0b4b2718dd0ac31259afdc1d79f69e23f315a630 Mon Sep 17 00:00:00 2001 From: bandoti <141645996+bandoti@users.noreply.github.com> Date: Tue, 17 Jun 2025 17:33:25 -0300 Subject: [PATCH 083/192] cmake: remove shader-gen step-targets from ggml-vulkan (#14226) * Remove step-targets from vulkan-shaders-gen * Unset DESTDIR when building vulkan-shaders-gen --- ggml/src/ggml-vulkan/CMakeLists.txt | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/ggml/src/ggml-vulkan/CMakeLists.txt b/ggml/src/ggml-vulkan/CMakeLists.txt index 95e2ebe643732..39f022f33d856 100644 --- a/ggml/src/ggml-vulkan/CMakeLists.txt +++ b/ggml/src/ggml-vulkan/CMakeLists.txt @@ -144,9 +144,15 @@ if (Vulkan_FOUND) ${VULKAN_SHADER_GEN_CMAKE_ARGS} BUILD_COMMAND ${CMAKE_COMMAND} --build . --config $ - INSTALL_COMMAND ${CMAKE_COMMAND} --install . --config $ + + # NOTE: When DESTDIR is set using Makefile generators and + # "make install" triggers the build step, vulkan-shaders-gen + # would be installed into the DESTDIR prefix, so it is unset + # to ensure that does not happen. + + INSTALL_COMMAND ${CMAKE_COMMAND} -E env --unset=DESTDIR + ${CMAKE_COMMAND} --install . --config $ ) - ExternalProject_Add_StepTargets(vulkan-shaders-gen build install) set (_ggml_vk_host_suffix $,.exe,>) set (_ggml_vk_genshaders_dir "${CMAKE_BINARY_DIR}/$") @@ -172,8 +178,6 @@ if (Vulkan_FOUND) DEPENDS ${_ggml_vk_shader_files} vulkan-shaders-gen - vulkan-shaders-gen-build - vulkan-shaders-gen-install COMMENT "Generate vulkan shaders" ) From d3a0914e84d0f2ed28317e1210a6abe9efd749f6 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Thu, 12 Jun 2025 12:27:09 +0200 Subject: [PATCH 084/192] examples : include examples in msvc disable warn (ggml/1270) This commit adds the examples in the "list" of targets to ignore MSVC warnings. The motivation for this is that currently the examples generate a number of warnings that are ignore/disabled for the core ggml project. This makes for a cleaner output when building. --- ggml/CMakeLists.txt | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index 7b398ae8e30ed..fe0acc81e6ace 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -368,6 +368,7 @@ if (MSVC) /wd4005 # Macro redefinition /wd4244 # Conversion from one type to another type, possible loss of data /wd4267 # Conversion from 'size_t' to a smaller type, possible loss of data + /wd4566 # Conversion from 'char' to 'wchar_t', possible loss of data /wd4996 # Disable POSIX deprecation warnings /wd4702 # Unreachable code warnings ) @@ -387,4 +388,30 @@ if (MSVC) disable_msvc_warnings(ggml-cpu-skylakex) disable_msvc_warnings(ggml-cpu-icelake) disable_msvc_warnings(ggml-cpu-alderlake) + + if (GGML_BUILD_EXAMPLES) + disable_msvc_warnings(common-ggml) + disable_msvc_warnings(common) + + disable_msvc_warnings(mnist-common) + disable_msvc_warnings(mnist-eval) + disable_msvc_warnings(mnist-train) + + disable_msvc_warnings(gpt-2-ctx) + disable_msvc_warnings(gpt-2-alloc) + disable_msvc_warnings(gpt-2-backend) + disable_msvc_warnings(gpt-2-sched) + disable_msvc_warnings(gpt-2-quantize) + disable_msvc_warnings(gpt-2-batched) + + disable_msvc_warnings(gpt-j) + disable_msvc_warnings(gpt-j-quantize) + + disable_msvc_warnings(magika) + disable_msvc_warnings(yolov3-tiny) + disable_msvc_warnings(sam) + + disable_msvc_warnings(simple-ctx) + disable_msvc_warnings(simple-backend) + endif() endif() From 17e1e356202fea90446645a7107805dc6b4985d6 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Fri, 13 Jun 2025 09:05:44 +0200 Subject: [PATCH 085/192] ggml : remove unused ggml_context_container (ggml/1272) This commit removes the unused `ggml_context_container` structure from the ggml library. It looks like the usage of this struct was removed in Commit 4757fe18d56ec11bf9c07feaca6e9d5b5357e7f4 ("ggml : alloc ggml_contexts on the heap (whisper/2525)"). The motivation for this changes is to improve code clarity/readability. --- ggml/src/ggml.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 196b7b8f3e2ae..a8edad3778aa9 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -888,12 +888,6 @@ struct ggml_context { struct ggml_object * objects_end; }; -struct ggml_context_container { - bool used; - - struct ggml_context context; -}; - // // data types // From 2d57d9943d4ada3168da97273ad5742241eb348f Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Fri, 13 Jun 2025 15:06:42 +0200 Subject: [PATCH 086/192] ggml : disable warnings for tests when using MSVC (ggml/1273) * ggml : disable warnings for tests when using MSVC This commit disables warnings for tests on windows when using MSVC. The motivation for this is that this brings the build output more inline with what Linux/MacOS systems produce. There is still one warning generated for the tests which is: ```console Building Custom Rule C:/ggml/tests/CMakeLists.txt cl : command line warning D9025: overriding '/DNDEBUG' with '/UNDEBUG' [C:\ggml\build\tests\test-arange.vcxproj] test-arange.cpp test-arange.vcxproj -> C:\ggml\build\bin\Release\test-arange.exe ``` * ggml : fix typo in tests disable list --- ggml/CMakeLists.txt | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index fe0acc81e6ace..4e7399f9e68f9 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -368,6 +368,7 @@ if (MSVC) /wd4005 # Macro redefinition /wd4244 # Conversion from one type to another type, possible loss of data /wd4267 # Conversion from 'size_t' to a smaller type, possible loss of data + /wd4305 # Conversion from 'type1' to 'type2', possible loss of data /wd4566 # Conversion from 'char' to 'wchar_t', possible loss of data /wd4996 # Disable POSIX deprecation warnings /wd4702 # Unreachable code warnings @@ -414,4 +415,20 @@ if (MSVC) disable_msvc_warnings(simple-ctx) disable_msvc_warnings(simple-backend) endif() + + if (GGML_BUILD_TESTS) + disable_msvc_warnings(test-mul-mat) + disable_msvc_warnings(test-arange) + disable_msvc_warnings(test-backend-ops) + disable_msvc_warnings(test-cont) + disable_msvc_warnings(test-conv-transpose) + disable_msvc_warnings(test-conv-transpose-1d) + disable_msvc_warnings(test-conv1d) + disable_msvc_warnings(test-conv2d) + disable_msvc_warnings(test-conv2d-dw) + disable_msvc_warnings(test-customop) + disable_msvc_warnings(test-dup) + disable_msvc_warnings(test-opt) + disable_msvc_warnings(test-pool) + endif () endif() From 8023efa63b897171d6aa476f1bf7bd26f60108fd Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 18 Jun 2025 09:58:23 +0300 Subject: [PATCH 087/192] sync : ggml ggml-ci --- scripts/sync-ggml.last | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/sync-ggml.last b/scripts/sync-ggml.last index 914fe47ff6a34..bb5d56a0e0c92 100644 --- a/scripts/sync-ggml.last +++ b/scripts/sync-ggml.last @@ -1 +1 @@ -6a7d170c04789f6ebcf320ed03c1b16973f93bd7 +8cda0a3c19f2c7dc493887353c42f6956bc268b1 From d311034c4d862df00eb6bd67eb7d9592ccea9679 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Wed, 18 Jun 2025 09:52:07 +0200 Subject: [PATCH 088/192] convert : fix null head_dim AutoConfig regression (#14248) --- convert_hf_to_gguf.py | 38 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 58e455ae645ed..b754dd815a2dc 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -556,11 +556,8 @@ def set_gguf_parameters(self): logger.info(f"gguf: experts used count = {n_experts_used}") if (head_dim := self.hparams.get("head_dim")) is not None: - # Workaround for incorrect AutoConfig value for DeepSeekV3 (is set correctly in DeepSeekV2Model class) - # https://github.com/huggingface/transformers/blob/19224c3642705c5b6988c9f5f4251f83323d05ae/src/transformers/models/deepseek_v3/configuration_deepseek_v3.py#L210 - if self.hparams.get("model_type") != "deepseek_v3": - self.gguf_writer.add_key_length(head_dim) - self.gguf_writer.add_value_length(head_dim) + self.gguf_writer.add_key_length(head_dim) + self.gguf_writer.add_value_length(head_dim) self.gguf_writer.add_file_type(self.ftype) logger.info(f"gguf: file type = {self.ftype}") @@ -1901,9 +1898,7 @@ def set_gguf_parameters(self): hparams = self.hparams self.gguf_writer.add_vocab_size(hparams["vocab_size"]) - if "head_dim" in hparams: - rope_dim = hparams["head_dim"] - else: + if (rope_dim := hparams.get("head_dim")) is None: rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"] self.gguf_writer.add_rope_dimension_count(rope_dim) @@ -1985,7 +1980,8 @@ def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: if rope_scaling := self.find_hparam(["rope_scaling"], optional=True): if rope_scaling.get("rope_type", '').lower() == "llama3": base = self.hparams.get("rope_theta", 10000.0) - dim = self.hparams.get("head_dim", self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) + if (dim := self.hparams.get("head_dim")) is None: + dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"] freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim)) factor = rope_scaling.get("factor", 8.0) @@ -2321,9 +2317,7 @@ def set_gguf_parameters(self): hparams = self.hparams self.gguf_writer.add_vocab_size(hparams["vocab_size"]) - if "head_dim" in hparams: - rope_dim = hparams["head_dim"] - else: + if (rope_dim := hparams.get("head_dim")) is None: rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"] self.gguf_writer.add_rope_dimension_count(rope_dim) @@ -2363,7 +2357,8 @@ def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: if rope_scaling := self.find_hparam(["rope_scaling"], optional=True): if rope_scaling.get("rope_type", '').lower() == "llama3": base = self.hparams.get("rope_theta", 10000.0) - dim = self.hparams.get("head_dim", self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) + if (dim := self.hparams.get("head_dim")) is None: + dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"] freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim)) factor = rope_scaling.get("factor", 8.0) @@ -3681,9 +3676,7 @@ def set_gguf_parameters(self): hparams = self.hparams self.gguf_writer.add_vocab_size(hparams["vocab_size"]) - if "head_dim" in hparams: - rope_dim = hparams["head_dim"] - else: + if (rope_dim := hparams.get("head_dim")) is None: rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"] self.gguf_writer.add_rope_dimension_count(rope_dim) @@ -5098,9 +5091,7 @@ def set_vocab(self): def set_gguf_parameters(self): super().set_gguf_parameters() hparams = self.hparams - if "head_dim" in hparams: - rope_dim = hparams["head_dim"] - else: + if (rope_dim := hparams.get("head_dim")) is None: rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"] self.gguf_writer.add_rope_dimension_count(rope_dim) @@ -5990,7 +5981,8 @@ def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: if rope_scaling := self.find_hparam(["rope_scaling"], optional=True): if rope_scaling.get("rope_type", '').lower() == "llama3": base = self.hparams.get("rope_theta", 10000.0) - dim = self.hparams.get("head_dim", self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) + if (dim := self.hparams.get("head_dim")) is None: + dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"] freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim)) factor = rope_scaling.get("factor", 8.0) @@ -6102,7 +6094,8 @@ def set_vocab(self): def set_gguf_parameters(self): super().set_gguf_parameters() hparams = self.hparams - rope_dim = hparams.get("head_dim") or hparams["hidden_size"] // hparams["num_attention_heads"] + if (rope_dim := hparams.get("head_dim")) is None: + rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"] self.gguf_writer.add_rope_dimension_count(rope_dim) rope_scaling = self.hparams.get("rope_scaling") or {} @@ -6134,7 +6127,8 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter n_head = self.hparams["num_attention_heads"] n_kv_head = self.hparams.get("num_key_value_heads") n_embd = self.hparams["hidden_size"] - head_dim = self.hparams.get("head_dim") or n_embd // n_head + if (head_dim := self.hparams.get("head_dim")) is None: + head_dim = n_embd // n_head output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT) From c8f423b428bd072f4641eb1488a46c12398ca12e Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Wed, 18 Jun 2025 09:58:43 +0200 Subject: [PATCH 089/192] llama-chat : fix multiple system message for gemma, orion (#14246) --- src/llama-chat.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp index bc4fa05a74ef4..0839cad3ee6db 100644 --- a/src/llama-chat.cpp +++ b/src/llama-chat.cpp @@ -333,7 +333,7 @@ int32_t llm_chat_apply_template( std::string role(message->role); if (role == "system") { // there is no system message for gemma, but we will merge it with user prompt, so nothing is broken - system_prompt = trim(message->content); + system_prompt += trim(message->content); continue; } // in gemma, "assistant" is "model" @@ -355,7 +355,7 @@ int32_t llm_chat_apply_template( std::string role(message->role); if (role == "system") { // there is no system message support, we will merge it with user prompt - system_prompt = message->content; + system_prompt += message->content; continue; } else if (role == "user") { ss << "Human: "; From f4cee554fe5ad6f20f4d23139b55e9b429c7e649 Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Wed, 18 Jun 2025 10:43:57 +0200 Subject: [PATCH 090/192] mtmd : refactor llava-uhd preprocessing logic (#14247) * mtmd : refactor llava-uhd preprocessing logic * fix editorconfig --- tools/mtmd/clip.cpp | 183 ++++++++++++++++++++++++++------------------ tools/mtmd/clip.h | 3 - tools/mtmd/mtmd.cpp | 6 +- 3 files changed, 111 insertions(+), 81 deletions(-) diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp index c25bacc17769b..30283d6f1f032 100644 --- a/tools/mtmd/clip.cpp +++ b/tools/mtmd/clip.cpp @@ -187,7 +187,7 @@ struct clip_hparams { float eps = 1e-6; float rope_theta = 0.0; - std::vector image_grid_pinpoints; + std::vector image_res_candidates; // for llava-uhd style models int32_t image_crop_resolution; std::unordered_set vision_feature_layer; int32_t attn_window_size = 0; @@ -2109,8 +2109,7 @@ struct clip_model_loader { if (is_vision) { get_u32(KEY_IMAGE_SIZE, hparams.image_size); get_u32(KEY_PATCH_SIZE, hparams.patch_size); - get_u32(KEY_IMAGE_CROP_RESOLUTION, hparams.image_crop_resolution, false); - get_arr_int(KEY_IMAGE_GRID_PINPOINTS, hparams.image_grid_pinpoints, false); + get_u32(KEY_IMAGE_CROP_RESOLUTION, hparams.image_crop_resolution, false); get_i32(KEY_MINICPMV_VERSION, hparams.minicpmv_version, false); // legacy } else if (is_audio) { @@ -2120,6 +2119,20 @@ struct clip_model_loader { GGML_ASSERT(false && "unknown modality"); } + // for pinpoints, we need to convert it into a list of resolution candidates + { + std::vector pinpoints; + get_arr_int(KEY_IMAGE_GRID_PINPOINTS, pinpoints, false); + if (!pinpoints.empty()) { + for (size_t i = 0; i < pinpoints.size(); i += 2) { + hparams.image_res_candidates.push_back({ + pinpoints[i], + pinpoints[i+1], + }); + } + } + } + // default warmup value hparams.warmup_image_size = hparams.image_size; @@ -2231,16 +2244,7 @@ struct clip_model_loader { { hparams.rope_theta = 10000.0f; get_u32(KEY_PROJ_SCALE_FACTOR, hparams.proj_scale_factor); - - // borrowed from llava-1.6 - const int isize = hparams.image_size; - hparams.image_grid_pinpoints = { - isize, isize*2, // 336, 672 - isize*2, isize, // 672, 336 - isize*2, isize*2, // 672, 672 - isize*3, isize, // 1008, 336 - isize, isize*3, // 336, 1008 - }; + set_llava_uhd_res_candidates(model, 3); } break; case PROJECTOR_TYPE_ULTRAVOX: case PROJECTOR_TYPE_QWEN2A: @@ -2674,6 +2678,21 @@ struct clip_model_loader { output[i] = values[i]; } } + + void set_llava_uhd_res_candidates(clip_model & model, const int max_patches_per_side) { + auto & hparams = model.hparams; + for (int x = 1; x <= max_patches_per_side; x++) { + for (int y = 1; y <= max_patches_per_side; y++) { + if (x == 1 && y == 1) { + continue; // skip the first point + } + hparams.image_res_candidates.push_back(clip_image_size{ + x*hparams.image_size, + y*hparams.image_size, + }); + } + } + } }; struct clip_init_result clip_init(const char * fname, struct clip_context_params ctx_params) { @@ -3028,36 +3047,41 @@ struct llava_uhd { bool padding_refined = false; // if true, refine image will be padded to the grid size (e.g. llava-1.6) }; - static int get_max_slices(struct clip_ctx * ctx) { - if (clip_is_minicpmv(ctx)) { - return 9; - } - return 0; - } - static slice_instructions get_slice_instructions(struct clip_ctx * ctx, const clip_image_size & original_size) { slice_instructions res; const int patch_size = clip_get_patch_size(ctx); const int slice_size = clip_get_image_size(ctx); - const int max_slice_nums = get_max_slices(ctx); const int original_width = original_size.width; const int original_height = original_size.height; - const float log_ratio = log((float)original_width / original_height); - const float ratio = (float)original_width * original_height / (slice_size * slice_size); - const int multiple = fmin(ceil(ratio), max_slice_nums); - const bool has_slices = (multiple > 1); - const bool has_pinpoints = !ctx->model.hparams.image_grid_pinpoints.empty(); + + const bool has_slices = original_size.width > slice_size || original_size.height > slice_size; + const bool has_pinpoints = !ctx->model.hparams.image_res_candidates.empty(); + + if (!has_slices) { + // skip slicing logic + res.overview_size = clip_image_size{slice_size, slice_size}; + res.refined_size = clip_image_size{0, 0}; + res.grid_size = clip_image_size{0, 0}; + + return res; + } if (has_pinpoints) { // has pinpoints, use them to calculate the grid size (e.g. llava-1.6) auto refine_size = llava_uhd::select_best_resolution( - ctx->model.hparams.image_grid_pinpoints, - original_size); + original_size, + ctx->model.hparams.image_res_candidates); res.overview_size = clip_image_size{slice_size, slice_size}; res.refined_size = refine_size; res.grid_size = clip_image_size{0, 0}; res.padding_refined = true; + LOG_DBG("%s: using pinpoints for slicing\n", __func__); + LOG_DBG("%s: original size: %d x %d, overview size: %d x %d, refined size: %d x %d\n", + __func__, original_width, original_height, + res.overview_size.width, res.overview_size.height, + res.refined_size.width, res.refined_size.height); + for (int y = 0; y < refine_size.height; y += slice_size) { for (int x = 0; x < refine_size.width; x += slice_size) { slice_coordinates slice; @@ -3066,13 +3090,16 @@ struct llava_uhd { slice.size.width = std::min(slice_size, refine_size.width - x); slice.size.height = std::min(slice_size, refine_size.height - y); res.slices.push_back(slice); - if (x == 0) { - res.grid_size.width++; - } + LOG_DBG("%s: slice %d: x=%d, y=%d, size=%dx%d\n", + __func__, (int)res.slices.size() - 1, + slice.x, slice.y, slice.size.width, slice.size.height); } - res.grid_size.height++; } + res.grid_size.height = refine_size.height / slice_size; + res.grid_size.width = refine_size.width / slice_size; + LOG_DBG("%s: grid size: %d x %d\n", __func__, res.grid_size.width, res.grid_size.height); + return res; } @@ -3081,17 +3108,23 @@ struct llava_uhd { auto best_size = get_best_resize(original_size, slice_size, patch_size, !has_slices); res.overview_size = best_size; - if (!has_slices) { - // skip slicing logic - res.refined_size = clip_image_size{0, 0}; - res.grid_size = clip_image_size{0, 0}; + { + const int max_slice_nums = 9; // TODO: this is only used by minicpmv, maybe remove it + const float log_ratio = log((float)original_width / original_height); + const float ratio = (float)original_width * original_height / (slice_size * slice_size); + const int multiple = fmin(ceil(ratio), max_slice_nums); - } else { auto best_grid = get_best_grid(max_slice_nums, multiple, log_ratio); auto refine_size = get_refine_size(original_size, best_grid, slice_size, patch_size, true); res.grid_size = best_grid; res.refined_size = refine_size; + LOG_DBG("%s: original size: %d x %d, overview size: %d x %d, refined size: %d x %d, grid size: %d x %d\n", + __func__, original_width, original_height, + res.overview_size.width, res.overview_size.height, + res.refined_size.width, res.refined_size.height, + res.grid_size.width, res.grid_size.height); + int width = refine_size.width; int height = refine_size.height; int grid_x = int(width / best_grid.width); @@ -3108,7 +3141,9 @@ struct llava_uhd { slice.size.width = grid_x; slice.size.height = grid_y; res.slices.push_back(slice); - // LOG_INF("slice %d: %d %d %d %d\n", ic, patches_i, patches_j, grid_x, grid_y); + LOG_DBG("%s: slice %d: x=%d, y=%d, size=%dx%d\n", + __func__, (int)res.slices.size() - 1, + slice.x, slice.y, slice.size.width, slice.size.height); } } } @@ -3166,48 +3201,55 @@ struct llava_uhd { return res; } + static clip_image_size resize_maintain_aspect_ratio(const clip_image_size & orig, const clip_image_size & target_max) { + float scale_width = static_cast(target_max.width) / orig.width; + float scale_height = static_cast(target_max.height) / orig.height; + float scale = std::min(scale_width, scale_height); + return clip_image_size{ + static_cast(orig.width * scale), + static_cast(orig.height * scale), + }; + } + /** * Selects the best resolution from a list of possible resolutions based on the original size. * + * For example, when given a list of resolutions: + * - 100x100 + * - 200x100 + * - 100x200 + * - 200x200 + * + * And an input image of size 111x200, then 100x200 is the best fit (least wasted resolution). + * * @param original_size The original size of the image * @param possible_resolutions A list of possible resolutions * @return The best fit resolution */ static clip_image_size select_best_resolution(const clip_image_size & original_size, const std::vector & possible_resolutions) { - int original_width = original_size.width; - int original_height = original_size.height; clip_image_size best_fit; + int min_wasted_area = std::numeric_limits::max(); int max_effective_resolution = 0; - int min_wasted_resolution = std::numeric_limits::max(); - - for (const auto & resolution : possible_resolutions) { - int width = resolution.width; - int height = resolution.height; - float scale = std::min(static_cast(width) / original_width, static_cast(height) / original_height); - int downscaled_width = static_cast(original_width * scale); - int downscaled_height = static_cast(original_height * scale); - int effective_resolution = std::min(downscaled_width * downscaled_height, original_width * original_height); - int wasted_resolution = (width * height) - effective_resolution; - // LOG_INF("resolution: %d %d, scale: %f, downscaled: %d %d, effective: %d, wasted: %d\n", width, height, scale, downscaled_width, downscaled_height, effective_resolution, wasted_resolution); - if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_resolution < min_wasted_resolution)) { + + for (const clip_image_size & candidate : possible_resolutions) { + auto target_size = resize_maintain_aspect_ratio(original_size, candidate); + int effective_resolution = std::min( + target_size.width * target_size.height, + original_size.width * original_size.height); + int wasted_area = (candidate.width * candidate.height) - effective_resolution; + + if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_area < min_wasted_area)) { max_effective_resolution = effective_resolution; - min_wasted_resolution = wasted_resolution; - best_fit = resolution; + min_wasted_area = wasted_area; + best_fit = candidate; } + + LOG_DBG("%s: candidate: %d x %d, target: %d x %d, wasted: %d, effective: %d\n", __func__, candidate.width, candidate.height, target_size.width, target_size.height, wasted_area, effective_resolution); } return best_fit; } - // used by llava 1.6 with custom list of pinpoints - static clip_image_size select_best_resolution(const std::vector & pinpoints, const clip_image_size & original_size) { - std::vector possible_resolutions; // TODO @ngxson : construct this inside hparams, not here - for (size_t i = 0; i < pinpoints.size(); i += 2) { - possible_resolutions.push_back(clip_image_size{pinpoints[i], pinpoints[i+1]}); - } - return select_best_resolution(original_size, possible_resolutions); - } - static int ensure_divide(int length, int patch_size) { return std::max(static_cast(std::round(static_cast(length) / patch_size) * patch_size), patch_size); } @@ -3331,7 +3373,7 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, str return true; } else if (ctx->proj_type() == PROJECTOR_TYPE_LLAMA4) { - GGML_ASSERT(!params.image_grid_pinpoints.empty()); + GGML_ASSERT(!params.image_res_candidates.empty()); auto const inst = llava_uhd::get_slice_instructions(ctx, original_size); std::vector imgs = llava_uhd::slice_image(img, inst); @@ -3371,7 +3413,7 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, str res_imgs->entries.push_back(std::move(res)); return true; - } else if (!params.image_grid_pinpoints.empty()) { + } else if (!params.image_res_candidates.empty()) { // "spatial_unpad" with "anyres" processing for llava-1.6 auto const inst = llava_uhd::get_slice_instructions(ctx, original_size); std::vector imgs = llava_uhd::slice_image(img, inst); @@ -3431,17 +3473,6 @@ const char * clip_patch_merge_type(const struct clip_ctx * ctx) { return ctx->model.hparams.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD ? "spatial_unpad" : "flat"; } -const int32_t * clip_image_grid(const struct clip_ctx * ctx) { - if (ctx->model.hparams.image_grid_pinpoints.size()) { - return &ctx->model.hparams.image_grid_pinpoints.front(); - } - return nullptr; -} - -size_t get_clip_image_grid_size(const struct clip_ctx * ctx) { - return ctx->model.hparams.image_grid_pinpoints.size(); -} - int clip_n_output_tokens_x(const struct clip_ctx * ctx, struct clip_image_f32 * img) { const auto & params = ctx->model.hparams; const int n_total = clip_n_output_tokens(ctx, img); diff --git a/tools/mtmd/clip.h b/tools/mtmd/clip.h index cb2eb261fe2e8..08f3efb7b1daf 100644 --- a/tools/mtmd/clip.h +++ b/tools/mtmd/clip.h @@ -46,9 +46,6 @@ int32_t clip_get_hidden_size(const struct clip_ctx * ctx); // TODO: should be enum, not string const char * clip_patch_merge_type(const struct clip_ctx * ctx); -const int32_t * clip_image_grid(const struct clip_ctx * ctx); -size_t get_clip_image_grid_size(const struct clip_ctx * ctx); - int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * img); // for M-RoPE, this will be the number of token positions in X and Y directions diff --git a/tools/mtmd/mtmd.cpp b/tools/mtmd/mtmd.cpp index 8573f11437f1b..e3829738338c3 100644 --- a/tools/mtmd/mtmd.cpp +++ b/tools/mtmd/mtmd.cpp @@ -501,7 +501,10 @@ struct mtmd_tokenizer { || ctx->slice_tmpl == MTMD_SLICE_TMPL_MINICPMV_2_6 || ctx->slice_tmpl == MTMD_SLICE_TMPL_LLAMA4 ) { + const int n_col = batch_f32.grid_x; + const int n_row = batch_f32.grid_y; // split batch into chunks of single images + // NOTE: batch_f32 will be invalidated after this call auto chunks = split_batch_to_chunk(std::move(batch_f32), bitmap->id); GGML_ASSERT(chunks.size() > 0); @@ -521,8 +524,7 @@ struct mtmd_tokenizer { // add slices (or tiles) if (!chunks.empty()) { - const int n_col = batch_f32.grid_x; - const int n_row = batch_f32.grid_y; + GGML_ASSERT((int)chunks.size() == n_row * n_col); if (ctx->tok_slices_start != LLAMA_TOKEN_NULL) { add_text({ctx->tok_slices_start}); } From 51c17663c81524eccc5772a543c132cd494a3e7a Mon Sep 17 00:00:00 2001 From: Charles Xu Date: Wed, 18 Jun 2025 13:40:07 +0200 Subject: [PATCH 091/192] ggml: Add Apple support for GGML_CPU_ALL_VARIANTS (#14258) --- ggml/src/CMakeLists.txt | 4 ++++ ggml/src/ggml-cpu/CMakeLists.txt | 3 +++ 2 files changed, 7 insertions(+) diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index 17c9366f4a3cf..0c453741b5d84 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -330,6 +330,10 @@ if (GGML_CPU_ALL_VARIANTS) ggml_add_cpu_backend_variant(android_armv8.2_1 DOTPROD) ggml_add_cpu_backend_variant(android_armv8.2_2 DOTPROD FP16_VECTOR_ARITHMETIC) ggml_add_cpu_backend_variant(android_armv8.6_1 DOTPROD FP16_VECTOR_ARITHMETIC MATMUL_INT8) + elseif (APPLE) + ggml_add_cpu_backend_variant(apple_m1 DOTPROD) + ggml_add_cpu_backend_variant(apple_m2_m3 DOTPROD MATMUL_INT8) + ggml_add_cpu_backend_variant(apple_m4 DOTPROD MATMUL_INT8 NOSVE SME) else() message(FATAL_ERROR "Unsupported ARM target OS: ${CMAKE_SYSTEM_NAME}") endif() diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index 3bd1b0507e2cb..df00340570baa 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -190,6 +190,9 @@ function(ggml_add_cpu_backend_variant_impl tag_name) set(ARCH_TAGS "${ARCH_TAGS}+sve2") list(APPEND ARCH_DEFINITIONS GGML_USE_SVE2) endif() + if (GGML_INTERNAL_NOSVE) + set(ARCH_TAGS "${ARCH_TAGS}+nosve") + endif() if (GGML_INTERNAL_SME) set(ARM_MCPU "armv9.2-a") set(ARCH_TAGS "${ARCH_TAGS}+sme") From 708fcea384537c8d39de2f9b55c7c352f51fd842 Mon Sep 17 00:00:00 2001 From: Aaron Teo Date: Thu, 19 Jun 2025 01:06:49 +0800 Subject: [PATCH 092/192] ggml-cpu: fix uncaught underscore terminators (#14023) Signed-off-by: Aaron Teo --- ggml/src/ggml-cpu/ggml-cpu-impl.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu-impl.h b/ggml/src/ggml-cpu/ggml-cpu-impl.h index bbd93c0ef66fe..73a8f93987aa3 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-impl.h +++ b/ggml/src/ggml-cpu/ggml-cpu-impl.h @@ -371,7 +371,7 @@ inline static int32x4_t ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b) #define vec_xor(a, b) ((a) ^ (b)) // Vector XOR #endif -typedef signed char char8x16_t __attribute__((vector_size(16))); +typedef signed char char8x16_t __attribute__((vector_size(16))); typedef unsigned char uchar8x16_t __attribute__((vector_size(16))); typedef int8_t int8x16_t __attribute__((vector_size(16))); @@ -382,10 +382,10 @@ typedef uint8_t uint8x16_t __attribute__((vector_size(16))); typedef uint16_t uint16x8_t __attribute__((vector_size(16))); typedef uint32_t uint32x4_t __attribute__((vector_size(16))); -typedef float float32x4_t __attribute__((vector_size(16))); -typedef double double64x2_t __attribute((vector_size(16))); +typedef float float32x4_t __attribute__((vector_size(16))); +typedef double double64x2_t __attribute__((vector_size(16))); -typedef signed long long long64x2_t __attribute((vector_size(16))); +typedef signed long long long64x2_t __attribute__((vector_size(16))); typedef unsigned long long ulong64x2_t __attribute__((vector_size(16))); typedef struct ggml_uint8x16x2_t { From 13c556ebf1e2b1bb6d4f3cd7f72c6c05c3f87c98 Mon Sep 17 00:00:00 2001 From: Aaron Teo Date: Thu, 19 Jun 2025 01:10:08 +0800 Subject: [PATCH 093/192] ggml-cpu: reduce asm calls for hsum (#14037) Signed-off-by: Aaron Teo --- ggml/src/ggml-cpu/simd-mappings.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ggml/src/ggml-cpu/simd-mappings.h b/ggml/src/ggml-cpu/simd-mappings.h index 2e3669c0186c9..e42364c59aa10 100644 --- a/ggml/src/ggml-cpu/simd-mappings.h +++ b/ggml/src/ggml-cpu/simd-mappings.h @@ -944,10 +944,8 @@ static inline void __lsx_f16x4_store(ggml_fp16_t * x, __m128 y) { for (int i = 0; i < offset; ++i) { \ x[i] = vec_add(x[i], x[offset + i]); \ } \ - res = vec_extract(x[0], 0) + \ - vec_extract(x[0], 1) + \ - vec_extract(x[0], 2) + \ - vec_extract(x[0], 3); \ + float32x4_t tmp = x[0] + vec_reve(x[0]); \ + res = tmp[0] + tmp[1]; \ } #define GGML_F32_VEC GGML_F32x4 From 6e9c823ff490e9c1ac256e39f1f58d58b881bd85 Mon Sep 17 00:00:00 2001 From: Aaron Teo Date: Thu, 19 Jun 2025 01:10:26 +0800 Subject: [PATCH 094/192] docs: add s390x build documentation (#14264) * docs: add s390x-specific build docs Signed-off-by: Aaron Teo * docs: add s390x model conversion steps Signed-off-by: Aaron Teo * docs: s390x build indent Signed-off-by: Aaron Teo * docs: update hyperlinks for s390x docs Signed-off-by: Aaron Teo * docs: update llama.h docs Signed-off-by: Aaron Teo * docs: s390x add accelerator and perf optimizations Signed-off-by: Aaron Teo * docs: s390x indent blocks Signed-off-by: Aaron Teo * docs: revert block indentation Signed-off-by: Aaron Teo * docs: add support information for s390x Signed-off-by: Aaron Teo * docs: s390x reword Signed-off-by: Aaron Teo * docs: remove indentation for accelerator section s390x Signed-off-by: Aaron Teo * docs: remove redundant words s390x Signed-off-by: Aaron Teo * docs: reword for s390x Signed-off-by: Aaron Teo * docs: s390x reword simd Signed-off-by: Aaron Teo * docs: fix trailing whitespace for s390x Signed-off-by: Aaron Teo --------- Signed-off-by: Aaron Teo --- docs/build-s390x.md | 157 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 157 insertions(+) create mode 100644 docs/build-s390x.md diff --git a/docs/build-s390x.md b/docs/build-s390x.md new file mode 100644 index 0000000000000..f44038c586ddc --- /dev/null +++ b/docs/build-s390x.md @@ -0,0 +1,157 @@ +> [!IMPORTANT] +> This build documentation is specific only to IBM Z & LinuxONE mainframes (s390x). You can find the build documentation for other architectures: [build.md](build.md). + +# Build llama.cpp locally (for s390x) + +The main product of this project is the `llama` library. Its C-style interface can be found in [include/llama.h](../include/llama.h). + +The project also includes many example programs and tools using the `llama` library. The examples range from simple, minimal code snippets to sophisticated sub-projects such as an OpenAI-compatible HTTP server. + +**To get the code:** + +```bash +git clone https://github.com/ggml-org/llama.cpp +cd llama.cpp +``` + +## CPU Build with BLAS + +Building llama.cpp with BLAS support is highly recommended as it has shown to provide performance improvements. + +```bash +cmake -S . -B build \ + -DCMAKE_BUILD_TYPE=Release \ + -DGGML_BLAS=ON \ + -DGGML_BLAS_VENDOR=OpenBLAS + +cmake --build build --config Release -j $(nproc) +``` + +**Notes**: +- For faster repeated compilation, install [ccache](https://ccache.dev/) +- By default, VXE/VXE2 is enabled. To disable it (not recommended): + + ```bash + cmake -S . -B build \ + -DCMAKE_BUILD_TYPE=Release \ + -DGGML_BLAS=ON \ + -DGGML_BLAS_VENDOR=OpenBLAS \ + -DGGML_VXE=OFF + + cmake --build build --config Release -j $(nproc) + ``` + +- For debug builds: + + ```bash + cmake -S . -B build \ + -DCMAKE_BUILD_TYPE=Debug \ + -DGGML_BLAS=ON \ + -DGGML_BLAS_VENDOR=OpenBLAS + + cmake --build build --config Debug -j $(nproc) + ``` + +- For static builds, add `-DBUILD_SHARED_LIBS=OFF`: + + ```bash + cmake -S . -B build \ + -DCMAKE_BUILD_TYPE=Release \ + -DGGML_BLAS=ON \ + -DGGML_BLAS_VENDOR=OpenBLAS \ + -DBUILD_SHARED_LIBS=OFF + + cmake --build build --config Release -j $(nproc) + ``` + +## Getting GGUF Models + +All models need to be converted to Big-Endian. You can achieve this in three cases: + +1. **Use pre-converted models verified for use on IBM Z & LinuxONE (easiest)** + + You can find popular models pre-converted and verified at [s390x Ready Models](hf.co/collections/taronaeo/s390x-ready-models-672765393af438d0ccb72a08). + + These models and their respective tokenizers are verified to run correctly on IBM Z & LinuxONE. + +2. **Convert safetensors model to GGUF Big-Endian directly (recommended)** + + ```bash + python3 convert_hf_to_gguf.py \ + --outfile model-name-be.f16.gguf \ + --outtype f16 \ + --bigendian \ + model-directory/ + ``` + + For example, + + ```bash + python3 convert_hf_to_gguf.py \ + --outfile granite-3.3-2b-instruct-be.f16.gguf \ + --outtype f16 \ + --bigendian \ + granite-3.3-2b-instruct/ + ``` + +3. **Convert existing GGUF Little-Endian model to Big-Endian** + + ```bash + python3 gguf-py/gguf/scripts/gguf_convert_endian.py model-name.f16.gguf BIG + ``` + + For example, + ```bash + python3 gguf-py/gguf/scripts/gguf_convert_endian.py granite-3.3-2b-instruct-le.f16.gguf BIG + mv granite-3.3-2b-instruct-le.f16.gguf granite-3.3-2b-instruct-be.f16.gguf + ``` + + **Notes:** + - The GGUF endian conversion script may not support all data types at the moment and may fail for some models/quantizations. When that happens, please try manually converting the safetensors model to GGUF Big-Endian via Step 2. + +## IBM Accelerators + +### 1. SIMD Acceleration + +Only available in IBM z15 or later system with the `-DGGML_VXE=ON` (turned on by default) compile flag. No hardware acceleration is possible with llama.cpp with older systems, such as IBM z14 or EC13. In such systems, the APIs can still run but will use a scalar implementation. + +### 2. zDNN Accelerator + +*Only available in IBM z16 or later system. No direction at the moment.* + +### 3. Spyre Accelerator + +*No direction at the moment.* + +## Performance Tuning + +### 1. Virtualization Setup + +It is strongly recommended to use only LPAR (Type-1) virtualization to get the most performance. + +Note: Type-2 virtualization is not supported at the moment, while you can get it running, the performance will not be the best. + +### 2. IFL (Core) Count + +It is recommended to allocate a minimum of 8 shared IFLs assigned to the LPAR. Increasing the IFL count past 8 shared IFLs will only improve Prompt Processing performance but not Token Generation. + +Note: IFL count does not equate to vCPU count. + +### 3. SMT vs NOSMT (Simultaneous Multithreading) + +It is strongly recommended to disable SMT via the kernel boot parameters as it negatively affects performance. Please refer to your Linux distribution's guide on disabling SMT via kernel boot parameters. + +### 4. BLAS vs NOBLAS + +IBM VXE/VXE2 SIMD acceleration depends on the BLAS implementation. It is strongly recommended to use BLAS. + +## Getting Help on IBM Z & LinuxONE + +1. **Bugs, Feature Requests** + + Please file an issue in llama.cpp and ensure that the title contains "s390x". + +2. **Other Questions** + + Please reach out directly to [aionz@us.ibm.com](mailto:aionz@us.ibm.com). + From 0db418da9cefb75b065eaed60134c2e126b1cf0c Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 19 Jun 2025 08:05:21 +0300 Subject: [PATCH 095/192] metal : add mean kernel (#14267) * metal : add mean kernel ggml-ci * cont : dedup implementation ggml-ci --- ggml/src/ggml-metal/ggml-metal.m | 33 ++++++++++++++++--- ggml/src/ggml-metal/ggml-metal.metal | 48 ++++++++++++++++++++++------ 2 files changed, 67 insertions(+), 14 deletions(-) diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index bc93bc633a49b..4e7f373cb435a 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -498,6 +498,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_COS, GGML_METAL_KERNEL_TYPE_NEG, GGML_METAL_KERNEL_TYPE_SUM_ROWS, + GGML_METAL_KERNEL_TYPE_MEAN, GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32, GGML_METAL_KERNEL_TYPE_POOL_2D_MAX_F32, GGML_METAL_KERNEL_TYPE_ARGMAX, @@ -1454,6 +1455,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_COS, cos, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NEG, neg, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS, sum_rows, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MEAN, mean, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGMAX, argmax, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32, pool_2d_avg_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_POOL_2D_MAX_F32, pool_2d_max_f32, true); @@ -1653,6 +1655,7 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex case GGML_OP_LOG: return false; // TODO: implement case GGML_OP_SUM_ROWS: + case GGML_OP_MEAN: case GGML_OP_SOFT_MAX: case GGML_OP_GROUP_NORM: return has_simdgroup_reduction && ggml_is_contiguous(op->src[0]); @@ -2400,11 +2403,30 @@ static bool ggml_metal_encode_node( [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; case GGML_OP_SUM_ROWS: + case GGML_OP_MEAN: { GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type)); - id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SUM_ROWS].pipeline; + id pipeline = nil; + + switch (dst->op) { + case GGML_OP_SUM_ROWS: + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SUM_ROWS].pipeline; + break; + case GGML_OP_MEAN: + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MEAN].pipeline; + break; + default: + GGML_ABORT("fatal error"); + } + + int nth = 32; // SIMD width + + while (nth < ne00 && nth < (int) pipeline.maxTotalThreadsPerThreadgroup) { + nth *= 2; + } + nth = MIN(nth, ne00); ggml_metal_kargs_sum_rows args = { /*.ne00 =*/ ne00, @@ -2434,11 +2456,12 @@ static bool ggml_metal_encode_node( }; [encoder setComputePipelineState:pipeline]; - [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; - [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; - [encoder setBytes:&args length:sizeof(args) atIndex:2]; + [encoder setBytes:&args length:sizeof(args) atIndex:0]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; + [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0]; - [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; + [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; case GGML_OP_SOFT_MAX: { diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index 5d7760217f826..3da19879b4b36 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -993,31 +993,61 @@ kernel void kernel_neg( dst[tpig] = -src0[tpig]; } +template kernel void kernel_sum_rows( + constant ggml_metal_kargs_sum_rows & args, device const float * src0, device float * dst, - constant ggml_metal_kargs_sum_rows & args, - uint3 tpig[[thread_position_in_grid]]) { - int64_t i3 = tpig.z; - int64_t i2 = tpig.y; - int64_t i1 = tpig.x; + threadgroup float * shmem_f32 [[threadgroup(0)]], + uint3 tgpig[[threadgroup_position_in_grid]], + ushort3 tpitg[[thread_position_in_threadgroup]], + ushort sgitg[[simdgroup_index_in_threadgroup]], + ushort tiisg[[thread_index_in_simdgroup]], + ushort3 ntg[[threads_per_threadgroup]]) { + int64_t i3 = tgpig.z; + int64_t i2 = tgpig.y; + int64_t i1 = tgpig.x; if (i3 >= args.ne03 || i2 >= args.ne02 || i1 >= args.ne01) { return; } + if (sgitg == 0) { + shmem_f32[tiisg] = 0.0f; + } + device const float * src_row = (device const float *) ((device const char *) src0 + i1*args.nb01 + i2*args.nb02 + i3*args.nb03); device float * dst_row = (device float *) ((device char *) dst + i1*args.nb1 + i2*args.nb2 + i3*args.nb3); - float row_sum = 0; + float sumf = 0; - for (int64_t i0 = 0; i0 < args.ne00; i0++) { - row_sum += src_row[i0]; + for (int64_t i0 = tpitg.x; i0 < args.ne00; i0 += ntg.x) { + sumf += src_row[i0]; } - dst_row[0] = row_sum; + sumf = simd_sum(sumf); + + threadgroup_barrier(mem_flags::mem_threadgroup); + + if (tiisg == 0) { + shmem_f32[sgitg] = sumf; + } + + threadgroup_barrier(mem_flags::mem_threadgroup); + + sumf = shmem_f32[tiisg]; + sumf = simd_sum(sumf); + + if (tpitg.x == 0) { + dst_row[0] = norm ? sumf / args.ne00 : sumf; + } } +typedef decltype(kernel_sum_rows) kernel_sum_rows_t; + +template [[host_name("kernel_sum_rows")]] kernel kernel_sum_rows_t kernel_sum_rows; +template [[host_name("kernel_mean")]] kernel kernel_sum_rows_t kernel_sum_rows; + template kernel void kernel_soft_max( device const char * src0, From 0fc0feb0bb942b7cc0c3ae2453b0e15b175c6e7e Mon Sep 17 00:00:00 2001 From: Gabe Goodhart Date: Thu, 19 Jun 2025 00:08:14 -0500 Subject: [PATCH 096/192] memory : Hybrid recurrent cache (#13979) * feat: Add llama_model_is_hybrid API call Also, split llama_model_is_recurrent into llm_arch_is_recurrent in llama-arch with llama_model_is_recurrent delegating to llm_arch_is_recurrent. The same split is done for hybird. This is needed because there are places where the llama_model has not yet been initialized but we need to check if the model is recurrent (specifically for the per-layer recurrent check array in hparams). Branch: GraniteFour Signed-off-by: Gabe Goodhart * feat: Add c++ side constants for attention layer indices hparam Branch: GraniteFour * feat: Add support for distinguishing recurrent vs non-recurrent layers in hparams Branch: GraniteFour Signed-off-by: Gabe Goodhart * feat: Auto-fill hparams.recurrent_layer_arr based on whether the model is recurrent Branch: GraniteFour Signed-off-by: Gabe Goodhart * refactor: rename *_is_hybrid -> *_is_hybrid_recurrent The implementation of the hybrid cache intentionally does not specify the types of the child caches, so there was a naming mismatch with these predicate functions that used "hybrid" to imply "hybrid recurrent." Branch: HybridCache Signed-off-by: Gabe Goodhart * feat: Add layer filter to recurrent cache Branch: HybridCache Signed-off-by: Gabe Goodhart * fix: Use per-layer sizing everywhere in kv caches Branch: GraniteFour Signed-off-by: Gabe Goodhart * feat: First pass at llama_kv_cache_hybrid_recurrent This follows the pattern in iswa where the two child caches are held explicitly to support the case where a model requires a single attention cache and a single recurrent cache where each layer uses exactly one of the caches. This is a rewrite of the more generic approach in the original hybrid cache PR: https://github.com/ggml-org/llama.cpp/pull/13276 Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * feat: Construct hybrid recurrent cache for hybrid recurrent models This includes a refactor of the create_memory logic to avoid needing to use the arch enum explicitly unless a model needs explicit cache instantiation logic beyond the standard logic for recurrent, hybrid, unified, and iswa. Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * fix: Fix wrong bool condition for split equal in hybrid cache Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * fix: Fix shift logic to defer to unified cache Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * feat: Support hybrid recurrent in llama-graph NOTE: I intentionally did not add support for s_mask since it will be going away soon Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * fix: Fix logic for initializing inputs and attn layers for hybrid caches Branch: GraniteFour Signed-off-by: Gabe Goodhart * fix: Update recurrent cache for changes to remove intermediate kv_cache interface Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * fix: Fix status for init_update sig for recurrent cache state Branch: GraniteFour Signed-off-by: Gabe Goodhart * fix: Add missing padding to n_ctx for hybrid cache construction Branch: GraniteFour Signed-off-by: Gabe Goodhart * fix: Update clear signature for data argument after rebase Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * fix: Remove errant virtual destructor leftover from previous impl attempt Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * fix: Use per-layer n_embd_k/v_s calls for mamba (1) layers Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * refactor: Remove n_embd_k/v_s from unified cache No longer needed now that unified isn't also supporting recurrent https://github.com/ggml-org/llama.cpp/pull/13979#discussion_r2140761069 Branch: HybridRecurrentCache * refactor: Remove layer index from n_embd_k/v_s Now that it's not used at all in the unified cache, we don't need to use the layer index to zero it out for attention layers. Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * refactor: Remove n_embd_k/v_gqa from recurrent cache This is no longer needed now that there are separate implementations https://github.com/ggml-org/llama.cpp/pull/13979#discussion_r2140825128 Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * feat: Allow custom layer filters for hybrid recurrent This should help support architectures like Falcon H1 where there is overlap between layers that need attention and recurrent caches. https://github.com/ggml-org/llama.cpp/pull/13979#discussion_r2140748922 Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * fix: Remove logits_all after rebase Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * fix: Remove llama_model_is_hybrid_Recurrent public API https://github.com/ggml-org/llama.cpp/pull/13979#discussion_r2141728423 Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * refactor: Use llama_memory_state_ptr for child states in hybrid memory state Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * feat: Overhaul build_recurrent_state / build_inp_s_copy to match attention pattern https://github.com/ggml-org/llama.cpp/pull/13979/files#r2141701738 This is a big overhaul to bring consistency between how inputs and per- layer components are created for attention layers and recurrent layers. The main changes are: - Rename class llm_graph_input_s_copy -> llm_graph_input_rs - Add a corresponding llm_graph_input_rs_hybrid_recurrent - Rename build_inp_s_copy -> build_rs_inp_recurrent - Add a corresponding build_rs_inp_hybrid_recurrent - Rename build_recurrent_state -> build_rs to match build_attn w/ llm_graph_input_rs android-build AUTHORS bamba-9b-2.2T.gguf bamba-9b-2.2T.q4_k_m.gguf broken.log build build-rel build-xcframework.sh build.android build.android.bak ci cmake CMakeLists.txt CMakePresets.json CODEOWNERS common common.o CONTRIBUTING.md convert_hf_to_gguf_update.py convert_hf_to_gguf.py convert_llama_ggml_to_gguf.py convert_lora_to_gguf.py debug.log docs examples flake.lock flake.nix ggml ggml-alloc.o ggml-backend.o ggml-metal.o ggml-model-BF16.gguf ggml-model-Q4_K_M.gguf ggml-quants.o ggml.o gguf-py grammar-parser.o grammars include LICENSE licenses llama.log llama.o llamacpp_trace.log main.log Makefile media models mypy.ini pocs poetry.lock prompts pyproject.toml pyrightconfig.json q4_k_m_boot.log q8_0_boot.log quant.log quant2.log README.md requirements requirements.txt sampling.o scripts SECURITY.md src test-grammar-output.tmp test-json-schema-input.tmp tests tools vendor working.log as the first input - Add a corresponding overload of build_rs w/ llm_graph_input_rs_hybrid_recurrent android-build AUTHORS bamba-9b-2.2T.gguf bamba-9b-2.2T.q4_k_m.gguf broken.log build build-rel build-xcframework.sh build.android build.android.bak ci cmake CMakeLists.txt CMakePresets.json CODEOWNERS common common.o CONTRIBUTING.md convert_hf_to_gguf_update.py convert_hf_to_gguf.py convert_llama_ggml_to_gguf.py convert_lora_to_gguf.py debug.log docs examples flake.lock flake.nix ggml ggml-alloc.o ggml-backend.o ggml-metal.o ggml-model-BF16.gguf ggml-model-Q4_K_M.gguf ggml-quants.o ggml.o gguf-py grammar-parser.o grammars include LICENSE licenses llama.log llama.o llamacpp_trace.log main.log Makefile media models mypy.ini pocs poetry.lock prompts pyproject.toml pyrightconfig.json q4_k_m_boot.log q8_0_boot.log quant.log quant2.log README.md requirements requirements.txt sampling.o scripts SECURITY.md src test-grammar-output.tmp test-json-schema-input.tmp tests tools vendor working.log as the first input - Add a llm_graph_input_attn_kv_hybrid_recurrent analogous to llm_graph_input_attn_kv_unified - Add a build_attn override that takes llm_graph_input_attn_kv_hybrid_recurrent android-build AUTHORS bamba-9b-2.2T.gguf bamba-9b-2.2T.q4_k_m.gguf broken.log build build-rel build-xcframework.sh build.android build.android.bak ci cmake CMakeLists.txt CMakePresets.json CODEOWNERS common common.o CONTRIBUTING.md convert_hf_to_gguf_update.py convert_hf_to_gguf.py convert_llama_ggml_to_gguf.py convert_lora_to_gguf.py debug.log docs examples flake.lock flake.nix ggml ggml-alloc.o ggml-backend.o ggml-metal.o ggml-model-BF16.gguf ggml-model-Q4_K_M.gguf ggml-quants.o ggml.o gguf-py grammar-parser.o grammars include LICENSE licenses llama.log llama.o llamacpp_trace.log main.log Makefile media models mypy.ini pocs poetry.lock prompts pyproject.toml pyrightconfig.json q4_k_m_boot.log q8_0_boot.log quant.log quant2.log README.md requirements requirements.txt sampling.o scripts SECURITY.md src test-grammar-output.tmp test-json-schema-input.tmp tests tools vendor working.log as the first input This makes the two paradigms fully consistent. The main drawback is the code duplication in the build_attn and build_rs implementations where the only difference between implementations is how they cast the memory state. Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * fix: Fix resize vs reserve and skip null tensors in size computation https://github.com/ggml-org/llama.cpp/pull/13979/files#r2149469788 Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart Co-Authored-By: @younesbelkada * fix: Fix initialization of child states Since initially writing this PR, the logic in the child state types changed such that using the "init full" signature and keeping the ubatches on the parent struct no longer worked. Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * refactor: Use a common build_recurrent_state method that is cache-agnostic This reduces the code duplication between the different build_rs impls and also retains a similar signature to the previous build_recurrent_state method while standardizing on the input-dispatched build_rs implementation. Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * recurrent : rework graph inputs + add TODOs ggml-ci * refactor: Make status and child states const in hybrid and iswa Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * refactor: Rename llama_kv_cache_[recurrent|hybrid_recurrent] to remove kv cache This removes the notion of "kv" from the interface names for these memory types. There are still many references to kv in the implementation of the recurrent memory which will need further adjustment. Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * refactor!: Rename all k/v related values for recurrent/hybrid to r/s Anywhere that "kv_" is used, I've used the more generic "mem_" prefix. The specifics of "k" (key) translate to "r" (recurrent state) and "v" (value) translate to "s" (state-space embedding states). Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * refacor: _recurrent -> _recr for brevity It just _happens_ to have the same number of letters as _attn! Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * style: Fix spacing for ref Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * refactor: recurrent_layer() -> is_recurrent() Branch: HybridRecurrentCache Signed-off-by: Gabe Goodhart * style: Fix spacing for size_s_bytes declaration Co-authored-by: Georgi Gerganov --------- Signed-off-by: Gabe Goodhart Co-authored-by: Georgi Gerganov --- src/CMakeLists.txt | 3 +- src/llama-arch.cpp | 23 ++ src/llama-arch.h | 4 + src/llama-graph.cpp | 265 ++++++++---- src/llama-graph.h | 101 ++++- src/llama-hparams.cpp | 8 +- src/llama-hparams.h | 10 +- src/llama-kv-cache-unified-iswa.cpp | 32 +- src/llama-kv-cache-unified-iswa.h | 8 +- src/llama-kv-cache-unified.cpp | 16 +- src/llama-memory-hybrid.cpp | 247 +++++++++++ src/llama-memory-hybrid.h | 143 +++++++ ...current.cpp => llama-memory-recurrent.cpp} | 389 +++++++++--------- ...e-recurrent.h => llama-memory-recurrent.h} | 70 ++-- src/llama-model.cpp | 228 +++++----- 15 files changed, 1085 insertions(+), 462 deletions(-) create mode 100644 src/llama-memory-hybrid.cpp create mode 100644 src/llama-memory-hybrid.h rename src/{llama-kv-cache-recurrent.cpp => llama-memory-recurrent.cpp} (67%) rename src/{llama-kv-cache-recurrent.h => llama-memory-recurrent.h} (72%) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 70be604e4b0d3..8f9cd652447ab 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -22,8 +22,9 @@ add_library(llama llama-io.cpp llama-kv-cache-unified.cpp llama-kv-cache-unified-iswa.cpp - llama-kv-cache-recurrent.cpp llama-memory.cpp + llama-memory-hybrid.cpp + llama-memory-recurrent.cpp llama-mmap.cpp llama-model-loader.cpp llama-model-saver.cpp diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index de8d289cf967e..0bc60565df12c 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -147,6 +147,7 @@ static const std::map LLM_KV_NAMES = { { LLM_KV_ATTENTION_SCALE, "%s.attention.scale" }, { LLM_KV_ATTENTION_KEY_LENGTH_MLA, "%s.attention.key_length_mla" }, { LLM_KV_ATTENTION_VALUE_LENGTH_MLA, "%s.attention.value_length_mla" }, + { LLM_KV_ATTENTION_LAYER_INDICES, "%s.attention.layer_indices" }, { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" }, { LLM_KV_ROPE_DIMENSION_SECTIONS, "%s.rope.dimension_sections" }, @@ -1816,3 +1817,25 @@ llm_arch llm_arch_from_string(const std::string & name) { const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor) { return LLM_TENSOR_INFOS.at(tensor); } + +bool llm_arch_is_recurrent(const llm_arch & arch) { + switch (arch) { + case LLM_ARCH_MAMBA: + case LLM_ARCH_RWKV6: + case LLM_ARCH_RWKV6QWEN2: + case LLM_ARCH_RWKV7: + case LLM_ARCH_ARWKV7: + return true; + default: + return false; + } +} + +bool llm_arch_is_hybrid(const llm_arch & arch) { + // TODO: There are currently no hybrid models! Once there are, this will be + // the place to identify them + switch (arch) { + default: + return false; + } +} diff --git a/src/llama-arch.h b/src/llama-arch.h index 3e8a61da3c13e..51b242c66b824 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -151,6 +151,7 @@ enum llm_kv { LLM_KV_ATTENTION_SCALE, LLM_KV_ATTENTION_KEY_LENGTH_MLA, LLM_KV_ATTENTION_VALUE_LENGTH_MLA, + LLM_KV_ATTENTION_LAYER_INDICES, LLM_KV_ROPE_DIMENSION_COUNT, LLM_KV_ROPE_DIMENSION_SECTIONS, @@ -439,3 +440,6 @@ const char * llm_arch_name(llm_arch arch); llm_arch llm_arch_from_string(const std::string & name); const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor); + +bool llm_arch_is_recurrent(const llm_arch & arch); +bool llm_arch_is_hybrid (const llm_arch & arch); diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 337fb5cb0df36..65d98cbbb3987 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -6,7 +6,8 @@ #include "llama-kv-cache-unified.h" #include "llama-kv-cache-unified-iswa.h" -#include "llama-kv-cache-recurrent.h" +#include "llama-memory-hybrid.h" +#include "llama-memory-recurrent.h" #include #include @@ -238,18 +239,18 @@ void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) { } } -void llm_graph_input_s_copy::set_input(const llama_ubatch * ubatch) { +void llm_graph_input_rs::set_input(const llama_ubatch * ubatch) { GGML_UNUSED(ubatch); - const int64_t n_kv = kv_state->get_n_kv(); + const int64_t n_rs = mem_state->get_n_rs(); if (s_copy) { GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer)); int32_t * data = (int32_t *) s_copy->data; // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n - for (uint32_t i = 0; i < n_kv; ++i) { - data[i] = kv_state->s_copy(i); + for (uint32_t i = 0; i < n_rs; ++i) { + data[i] = mem_state->s_copy(i); } } } @@ -403,6 +404,24 @@ void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) { } } +void llm_graph_input_mem_hybrid::set_input(const llama_ubatch * ubatch) { + if (self_kq_mask) { + mem_state->get_state_attn()->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn); + } + + const int64_t n_rs = mem_state->get_state_recr()->get_n_rs(); + + if (s_copy) { + GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer)); + int32_t * data = (int32_t *) s_copy->data; + + // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n + for (uint32_t i = 0; i < n_rs; ++i) { + data[i] = mem_state->get_state_recr()->s_copy(i); + } + } +} + // // llm_graph_context // @@ -961,23 +980,6 @@ ggml_tensor * llm_graph_context::build_inp_cls() const { return cur; } -ggml_tensor * llm_graph_context::build_inp_s_copy() const { - const auto * kv_state = static_cast(mstate); - - auto inp = std::make_unique(kv_state); - - const auto n_kv = kv_state->get_n_kv(); - - auto & cur = inp->s_copy; - - cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_kv); - ggml_set_input(cur); - - res->add_input(std::move(inp)); - - return cur; -} - ggml_tensor * llm_graph_context::build_inp_cross_embd() const { auto inp = std::make_unique(cross); @@ -1047,6 +1049,33 @@ ggml_tensor * llm_graph_context::build_pos_bias(ggml_tensor * pos_bucket, ggml_t return pos_bias; } +llm_graph_input_mem_hybrid * llm_graph_context::build_inp_mem_hybrid() const { + const auto * mem_state = static_cast(mstate); + + auto inp = std::make_unique(hparams, cparams, mem_state); + + { + GGML_ASSERT(hparams.swa_type == LLAMA_SWA_TYPE_NONE && "Hybrid recurrent is not supported with SWA attention layers"); + + const auto n_kv = inp->mem_state->get_state_attn()->get_n_kv(); + + inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); + //cb(inp->self_kq_mask, "KQ_mask", -1); + ggml_set_input(inp->self_kq_mask); + + inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask; + } + + { + const auto n_rs = mem_state->get_state_recr()->get_n_rs(); + + inp->s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_rs); + ggml_set_input(inp->s_copy); + } + + return (llm_graph_input_mem_hybrid *) res->add_input(std::move(inp)); +} + ggml_tensor * llm_graph_context::build_attn_mha( ggml_cgraph * gf, ggml_tensor * q, @@ -1291,36 +1320,6 @@ ggml_tensor * llm_graph_context::build_attn( return cur; } -llm_graph_input_attn_kv_unified_iswa * llm_graph_context::build_attn_inp_kv_unified_iswa() const { - const auto * kv_state = static_cast(mstate); - - auto inp = std::make_unique(hparams, cparams, kv_state); - - { - const auto n_kv = kv_state->get_base()->get_n_kv(); - - inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); - //cb(inp->self_kq_mask, "KQ_mask", -1); - ggml_set_input(inp->self_kq_mask); - - inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask; - } - - { - GGML_ASSERT(hparams.swa_type != LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_unified for non-SWA"); - - const auto n_kv = kv_state->get_swa()->get_n_kv(); - - inp->self_kq_mask_swa = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); - //cb(inp->self_kq_mask_swa, "KQ_mask_swa", -1); - ggml_set_input(inp->self_kq_mask_swa); - - inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa; - } - - return (llm_graph_input_attn_kv_unified_iswa *) res->add_input(std::move(inp)); -} - ggml_tensor * llm_graph_context::build_attn( llm_graph_input_attn_kv_unified_iswa * inp, ggml_cgraph * gf, @@ -1430,20 +1429,99 @@ ggml_tensor * llm_graph_context::build_attn( return cur; } -ggml_tensor * llm_graph_context::build_recurrent_state( - ggml_cgraph * gf, - ggml_tensor * s, - ggml_tensor * state_copy, - int32_t state_size, - int32_t n_seqs, - bool avoid_copies) const { - const auto * kv_state = static_cast(mstate); - - const auto n_kv = kv_state->get_n_kv(); - const auto kv_head = kv_state->get_head(); - const auto rs_zero = kv_state->get_rs_z(); +ggml_tensor * llm_graph_context::build_attn( + llm_graph_input_mem_hybrid * inp, + ggml_cgraph * gf, + ggml_tensor * wo, + ggml_tensor * wo_b, + ggml_tensor * q_cur, + ggml_tensor * k_cur, + ggml_tensor * v_cur, + ggml_tensor * kq_b, + ggml_tensor * v_mla, + float kq_scale, + int il) const { + // these nodes are added to the graph together so that they are not reordered + // by doing so, the number of splits in the graph is reduced + ggml_build_forward_expand(gf, q_cur); + ggml_build_forward_expand(gf, k_cur); + ggml_build_forward_expand(gf, v_cur); + + const auto * kv_state = static_cast(mstate)->get_state_attn(); + + // store to KV cache + { + ggml_build_forward_expand(gf, kv_state->cpy_k(ctx0, k_cur, il)); + ggml_build_forward_expand(gf, kv_state->cpy_v(ctx0, v_cur, il)); + } + + const auto & kq_mask = inp->get_kq_mask(); + + ggml_tensor * q = q_cur; + ggml_tensor * k = kv_state->get_k(ctx0, il); + ggml_tensor * v = kv_state->get_v(ctx0, il); + + ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_mla, kq_scale); + cb(cur, "kqv_out", il); + + if (wo) { + cur = build_lora_mm(wo, cur); + if (arch == LLM_ARCH_GLM4) { + // GLM4 seems to have numerical issues with half-precision accumulators + ggml_mul_mat_set_prec(cur, GGML_PREC_F32); + } + } + + if (wo_b) { + cur = ggml_add(ctx0, cur, wo_b); + } + + return cur; +} + +llm_graph_input_attn_kv_unified_iswa * llm_graph_context::build_attn_inp_kv_unified_iswa() const { + const auto * kv_state = static_cast(mstate); + + auto inp = std::make_unique(hparams, cparams, kv_state); - ggml_tensor * states = ggml_reshape_2d(ctx0, s, state_size, kv_state->get_size()); + { + const auto n_kv = kv_state->get_base()->get_n_kv(); + + inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); + //cb(inp->self_kq_mask, "KQ_mask", -1); + ggml_set_input(inp->self_kq_mask); + + inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask; + } + + { + GGML_ASSERT(hparams.swa_type != LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_unified for non-SWA"); + + const auto n_kv = kv_state->get_swa()->get_n_kv(); + + inp->self_kq_mask_swa = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); + //cb(inp->self_kq_mask_swa, "KQ_mask_swa", -1); + ggml_set_input(inp->self_kq_mask_swa); + + inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa; + } + + return (llm_graph_input_attn_kv_unified_iswa *) res->add_input(std::move(inp)); +} + +ggml_tensor * llm_graph_context::build_rs( + ggml_cgraph * gf, + ggml_tensor * s, + ggml_tensor * state_copy, + int32_t state_size, + int32_t n_seqs, + uint32_t n_kv, + uint32_t kv_head, + uint32_t kv_size, + int32_t rs_zero, + bool avoid_copies) const { + + ggml_tensor * states = ggml_reshape_2d(ctx0, s, state_size, kv_size); // Clear a single state which will then be copied to the other cleared states. // Note that this is a no-op when the view is zero-sized. @@ -1474,22 +1552,59 @@ ggml_tensor * llm_graph_context::build_recurrent_state( return output_states; } +llm_graph_input_rs * llm_graph_context::build_rs_inp() const { + const auto * kv_state = static_cast(mstate); + + auto inp = std::make_unique(kv_state); + + const auto n_rs = kv_state->get_n_rs(); + + inp->s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_rs); + ggml_set_input(inp->s_copy); + + return (llm_graph_input_rs *) res->add_input(std::move(inp)); +} + +ggml_tensor * llm_graph_context::build_rs( + llm_graph_input_rs * inp, + ggml_cgraph * gf, + ggml_tensor * s, + int32_t state_size, + int32_t n_seqs, + bool avoid_copies) const { + const auto * kv_state = static_cast(mstate); + + return build_rs(gf, s, inp->s_copy, state_size, n_seqs, kv_state->get_n_rs(), kv_state->get_head(), kv_state->get_size(), kv_state->get_rs_z(), avoid_copies); +} + +ggml_tensor * llm_graph_context::build_rs( + llm_graph_input_mem_hybrid * inp, + ggml_cgraph * gf, + ggml_tensor * s, + int32_t state_size, + int32_t n_seqs, + bool avoid_copies) const { + const auto * kv_state = static_cast(mstate)->get_state_recr(); + + return build_rs(gf, s, inp->s_copy, state_size, n_seqs, kv_state->get_n_rs(), kv_state->get_head(), kv_state->get_size(), kv_state->get_rs_z(), avoid_copies); +} + ggml_tensor * llm_graph_context::build_rwkv_token_shift_load( - ggml_cgraph * gf, - ggml_tensor * state_copy, - const llama_ubatch & ubatch, + llm_graph_input_rs * inp, + ggml_cgraph * gf, + const llama_ubatch & ubatch, int il) const { - const auto * kv_state = static_cast(mstate); + const auto * kv_state = static_cast(mstate); const auto token_shift_count = hparams.token_shift_count; const int64_t n_seqs = ubatch.n_seqs; - ggml_tensor * token_shift_all = kv_state->get_k_l(il); + ggml_tensor * token_shift_all = kv_state->get_r_l(il); - ggml_tensor * token_shift = build_recurrent_state( - gf, token_shift_all, state_copy, - hparams.n_embd_k_s(), n_seqs); + ggml_tensor * token_shift = build_rs( + inp, gf, token_shift_all, + hparams.n_embd_r(), n_seqs); token_shift = ggml_reshape_3d(ctx0, token_shift, hparams.n_embd, token_shift_count, n_seqs); @@ -1500,7 +1615,7 @@ ggml_tensor * llm_graph_context::build_rwkv_token_shift_store( ggml_tensor * token_shift, const llama_ubatch & ubatch, int il) const { - const auto * kv_state = static_cast(mstate); + const auto * kv_state = static_cast(mstate); const auto token_shift_count = hparams.token_shift_count; const auto n_embd = hparams.n_embd; @@ -1512,7 +1627,7 @@ ggml_tensor * llm_graph_context::build_rwkv_token_shift_store( return ggml_cpy( ctx0, ggml_view_1d(ctx0, token_shift, n_embd * n_seqs * token_shift_count, 0), - ggml_view_1d(ctx0, kv_state->get_k_l(il), hparams.n_embd_k_s()*n_seqs, hparams.n_embd_k_s()*kv_head*ggml_element_size(kv_state->get_k_l(il))) + ggml_view_1d(ctx0, kv_state->get_r_l(il), hparams.n_embd_r()*n_seqs, hparams.n_embd_r()*kv_head*ggml_element_size(kv_state->get_r_l(il))) ); } diff --git a/src/llama-graph.h b/src/llama-graph.h index 87813119b1a3c..58845e284abed 100644 --- a/src/llama-graph.h +++ b/src/llama-graph.h @@ -21,7 +21,8 @@ struct llama_memory_state_i; class llama_kv_cache_unified_state; class llama_kv_cache_unified_iswa_state; -class llama_kv_cache_recurrent_state; +class llama_memory_recurrent_state; +class llama_memory_hybrid_state; // certain models (typically multi-modal) can produce different types of graphs enum llm_graph_type { @@ -188,16 +189,16 @@ class llm_graph_input_cls : public llm_graph_input_i { const llama_cparams & cparams; }; -class llm_graph_input_s_copy : public llm_graph_input_i { +class llm_graph_input_rs : public llm_graph_input_i { public: - llm_graph_input_s_copy(const llama_kv_cache_recurrent_state * kv_state) : kv_state(kv_state) {} - virtual ~llm_graph_input_s_copy() = default; + llm_graph_input_rs(const llama_memory_recurrent_state * mem_state) : mem_state(mem_state) {} + virtual ~llm_graph_input_rs() = default; void set_input(const llama_ubatch * ubatch) override; ggml_tensor * s_copy; // I32 [kv_size] - const llama_kv_cache_recurrent_state * kv_state; + const llama_memory_recurrent_state * mem_state; }; class llm_graph_input_cross_embd : public llm_graph_input_i { @@ -300,6 +301,33 @@ class llm_graph_input_attn_cross : public llm_graph_input_i { const llama_cross * cross = nullptr; }; +class llm_graph_input_mem_hybrid : public llm_graph_input_i { +public: + llm_graph_input_mem_hybrid( + const llama_hparams & hparams, + const llama_cparams & cparams, + const llama_memory_hybrid_state * mem_state) : + hparams(hparams), + cparams(cparams), + mem_state(mem_state) { + } + virtual ~llm_graph_input_mem_hybrid() = default; + + void set_input(const llama_ubatch * ubatch) override; + + ggml_tensor * s_copy; // I32 [kv_size] + + ggml_tensor * get_kq_mask() const { return self_kq_mask_cnv; } + + ggml_tensor * self_kq_mask = nullptr; // F32 [n_kv, n_batch] + ggml_tensor * self_kq_mask_cnv = nullptr; // [n_kv, n_batch] + + const llama_hparams & hparams; + const llama_cparams & cparams; + + const llama_memory_hybrid_state * mem_state; +}; + // // llm_graph_result // @@ -508,13 +536,14 @@ struct llm_graph_context { ggml_tensor * build_inp_out_ids() const; ggml_tensor * build_inp_mean() const; ggml_tensor * build_inp_cls() const; - ggml_tensor * build_inp_s_copy() const; ggml_tensor * build_inp_cross_embd() const; ggml_tensor * build_inp_pos_bucket_enc() const; ggml_tensor * build_inp_pos_bucket_dec() const; ggml_tensor * build_pos_bias(ggml_tensor * pos_bucket, ggml_tensor * attn_rel_b) const; + llm_graph_input_mem_hybrid * build_inp_mem_hybrid() const; + // // attention // @@ -589,22 +618,62 @@ struct llm_graph_context { float kq_scale, int il) const; + ggml_tensor * build_attn( + llm_graph_input_mem_hybrid * inp, + ggml_cgraph * gf, + ggml_tensor * wo, + ggml_tensor * wo_b, + ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens] + ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens] + ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens] + ggml_tensor * kq_b, + ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v] + float kq_scale, + int il) const; // // recurrent // - ggml_tensor * build_recurrent_state( - ggml_cgraph * gf, - ggml_tensor * s, - ggml_tensor * state_copy, - int32_t state_size, - int32_t n_seqs, - bool avoid_copies = false) const; + // TODO: avoid notion of "kv" + // TODO: move this implementation to llama_memory_recurrent. + // this is analogous to llama_kv_cache_unified::cpy_k / cpy_v + // when moving, avoid passing `ggml_cgraph` - only pass `ggml_context`. would likely need to split the + // implementation in 2 separate methods. the goal is to avoid calling `ggml_build_forward_expand` in + // `llama_memory_recurrent` + ggml_tensor * build_rs( + ggml_cgraph * gf, + ggml_tensor * s, + ggml_tensor * state_copy, + int32_t state_size, + int32_t n_seqs, + uint32_t n_kv, + uint32_t kv_head, + uint32_t kv_size, + int32_t rs_zero, + bool avoid_copies = false) const; + + llm_graph_input_rs * build_rs_inp() const; + + ggml_tensor * build_rs( + llm_graph_input_rs * inp, + ggml_cgraph * gf, + ggml_tensor * s, + int32_t state_size, + int32_t n_seqs, + bool avoid_copies = false) const; + + ggml_tensor * build_rs( + llm_graph_input_mem_hybrid * inp, + ggml_cgraph * gf, + ggml_tensor * s, + int32_t state_size, + int32_t n_seqs, + bool avoid_copies = false) const; ggml_tensor * build_rwkv_token_shift_load( - ggml_cgraph * gf, - ggml_tensor * state_copy, - const llama_ubatch & ubatch, + llm_graph_input_rs * inp, + ggml_cgraph * gf, + const llama_ubatch & ubatch, int il) const; ggml_tensor * build_rwkv_token_shift_store( diff --git a/src/llama-hparams.cpp b/src/llama-hparams.cpp index 1499eb08a5dd9..b40566ced99ee 100644 --- a/src/llama-hparams.cpp +++ b/src/llama-hparams.cpp @@ -65,7 +65,7 @@ uint32_t llama_hparams::n_embd_v_gqa(uint32_t il) const { return n_embd_head_v * n_head_kv; } -uint32_t llama_hparams::n_embd_k_s() const { +uint32_t llama_hparams::n_embd_r() const { if (wkv_head_size != 0) { // for RWKV models return token_shift_count * n_embd; @@ -76,7 +76,7 @@ uint32_t llama_hparams::n_embd_k_s() const { return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * ssm_d_inner; } -uint32_t llama_hparams::n_embd_v_s() const { +uint32_t llama_hparams::n_embd_s() const { if (wkv_head_size != 0) { // corresponds to RWKV's wkv_states size return n_embd * wkv_head_size; @@ -86,6 +86,10 @@ uint32_t llama_hparams::n_embd_v_s() const { return ssm_d_state * ssm_d_inner; } +bool llama_hparams::is_recurrent(uint32_t il) const { + return recurrent_layer_arr[il]; +} + bool llama_hparams::is_swa(uint32_t il) const { if (il < n_layer) { return swa_layers[il]; diff --git a/src/llama-hparams.h b/src/llama-hparams.h index b2bcb8b01a18b..82bb5b6084946 100644 --- a/src/llama-hparams.h +++ b/src/llama-hparams.h @@ -115,6 +115,9 @@ struct llama_hparams { uint32_t ssm_d_state = 0; uint32_t ssm_dt_rank = 0; + // for hybrid state space models + std::array recurrent_layer_arr; + bool ssm_dt_b_c_rms = false; float f_clamp_kqv = 0.0f; @@ -181,10 +184,13 @@ struct llama_hparams { // dimension of the rolling state embeddings // corresponds to Mamba's conv_states size or RWKV's token_shift states size - uint32_t n_embd_k_s() const; + uint32_t n_embd_r() const; // dimension of the recurrent state embeddings - uint32_t n_embd_v_s() const; + uint32_t n_embd_s() const; + + // whether or not the given layer is recurrent (for hybrid models) + bool is_recurrent(uint32_t il) const; bool is_swa(uint32_t il) const; }; diff --git a/src/llama-kv-cache-unified-iswa.cpp b/src/llama-kv-cache-unified-iswa.cpp index a4a4c2b1b859d..a869b1de8c2a3 100644 --- a/src/llama-kv-cache-unified-iswa.cpp +++ b/src/llama-kv-cache-unified-iswa.cpp @@ -197,21 +197,19 @@ llama_kv_cache_unified * llama_kv_cache_unified_iswa::get_swa() const { llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state(llama_memory_status status) : status(status) {} llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state( - llama_kv_cache_unified_iswa * kv) : status(LLAMA_MEMORY_STATUS_SUCCESS) { - state_base = kv->get_base()->init_full(); - state_swa = kv->get_swa ()->init_full(); - - status = llama_memory_status_combine(state_base->get_status(), state_swa->get_status()); + llama_kv_cache_unified_iswa * kv) : + state_base(kv->get_base()->init_full()), + state_swa (kv->get_swa ()->init_full()), + status(llama_memory_status_combine(state_base->get_status(), state_swa->get_status())) { } llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state( llama_kv_cache_unified_iswa * kv, llama_context * lctx, - bool optimize) : status(LLAMA_MEMORY_STATUS_SUCCESS) { - state_base = kv->get_base()->init_update(lctx, optimize); - state_swa = kv->get_swa ()->init_update(lctx, optimize); - - status = llama_memory_status_combine(state_base->get_status(), state_swa->get_status()); + bool optimize) : + state_base(kv->get_base()->init_update(lctx, optimize)), + state_swa (kv->get_swa ()->init_update(lctx, optimize)), + status(llama_memory_status_combine(state_base->get_status(), state_swa->get_status())) { } llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state( @@ -219,15 +217,13 @@ llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state( llama_sbatch sbatch, std::vector heads_base, std::vector heads_swa, - std::vector ubatches) - : status(LLAMA_MEMORY_STATUS_SUCCESS), - sbatch(std::move(sbatch)), - ubatches(std::move(ubatches)) { + std::vector ubatches) : + sbatch(std::move(sbatch)), + ubatches(std::move(ubatches)), // note: here we copy the ubatches. not sure if this is ideal - state_base.reset(new llama_kv_cache_unified_state(kv->get_base(), {}, std::move(heads_base), this->ubatches)); - state_swa .reset(new llama_kv_cache_unified_state(kv->get_swa (), {}, std::move(heads_swa), this->ubatches)); - - status = llama_memory_status_combine(state_base->get_status(), state_swa->get_status()); + state_base(new llama_kv_cache_unified_state(kv->get_base(), {}, std::move(heads_base), this->ubatches)), + state_swa (new llama_kv_cache_unified_state(kv->get_swa (), {}, std::move(heads_swa), this->ubatches)), + status(llama_memory_status_combine(state_base->get_status(), state_swa->get_status())) { } llama_kv_cache_unified_iswa_state:: ~llama_kv_cache_unified_iswa_state() = default; diff --git a/src/llama-kv-cache-unified-iswa.h b/src/llama-kv-cache-unified-iswa.h index 6e941e1a41b88..813eaf39b25b0 100644 --- a/src/llama-kv-cache-unified-iswa.h +++ b/src/llama-kv-cache-unified-iswa.h @@ -117,8 +117,6 @@ class llama_kv_cache_unified_iswa_state : public llama_memory_state_i { const llama_kv_cache_unified_state * get_swa() const; private: - llama_memory_status status; - //llama_kv_cache_unified_iswa * kv; llama_sbatch sbatch; @@ -128,6 +126,8 @@ class llama_kv_cache_unified_iswa_state : public llama_memory_state_i { std::vector ubatches; - llama_memory_state_ptr state_base; - llama_memory_state_ptr state_swa; + const llama_memory_state_ptr state_base; + const llama_memory_state_ptr state_swa; + + const llama_memory_status status; }; diff --git a/src/llama-kv-cache-unified.cpp b/src/llama-kv-cache-unified.cpp index 3b37679859d39..d4412288925c3 100644 --- a/src/llama-kv-cache-unified.cpp +++ b/src/llama-kv-cache-unified.cpp @@ -68,8 +68,8 @@ llama_kv_cache_unified::llama_kv_cache_unified( continue; } - const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); const char * dev_name = "CPU"; @@ -1430,7 +1430,7 @@ void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std:: for (const auto & layer : layers) { const uint32_t il = layer.il; - const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); + const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); // Write key type const int32_t k_type_i = (int32_t)layer.k->type; @@ -1452,7 +1452,7 @@ void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std:: for (const auto & layer : layers) { const uint32_t il = layer.il; - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); // Write value type const int32_t v_type_i = (int32_t)layer.v->type; @@ -1476,7 +1476,7 @@ void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std:: for (const auto & layer : layers) { const uint32_t il = layer.il; - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); // Write value type const int32_t v_type_i = (int32_t)layer.v->type; @@ -1621,7 +1621,7 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell for (const auto & layer : layers) { const uint32_t il = layer.il; - const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); + const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); // Read type of key int32_t k_type_i_ref; @@ -1651,7 +1651,7 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell for (const auto & layer : layers) { const uint32_t il = layer.il; - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); // Read type of value int32_t v_type_i_ref; @@ -1681,7 +1681,7 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell for (const auto & layer : layers) { const uint32_t il = layer.il; - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); // Read type of value int32_t v_type_i_ref; diff --git a/src/llama-memory-hybrid.cpp b/src/llama-memory-hybrid.cpp new file mode 100644 index 0000000000000..d4b260db4c8e7 --- /dev/null +++ b/src/llama-memory-hybrid.cpp @@ -0,0 +1,247 @@ +#include "llama-memory-hybrid.h" + +#include "llama-impl.h" +#include "llama-model.h" +#include "llama-context.h" + +// +// llama_memory_hybrid +// + +llama_memory_hybrid::llama_memory_hybrid( + const llama_model & model, + /* attn */ + ggml_type type_k, + ggml_type type_v, + bool v_trans, + uint32_t kv_size, + uint32_t n_pad, + uint32_t n_swa, + llama_swa_type swa_type, + /* recurrent */ + ggml_type type_r, + ggml_type type_s, + uint32_t rs_size, + /* common */ + uint32_t n_seq_max, + bool offload, + /* layer filters */ + layer_filter_cb && filter_attn, + layer_filter_cb && filter_recr) : + hparams(model.hparams), + mem_attn(new llama_kv_cache_unified( + model, + filter_attn == nullptr ? + [&](int32_t il) { return !model.hparams.is_recurrent(il); } + : filter_attn, + type_k, + type_v, + v_trans, + offload, + kv_size, + n_seq_max, + n_pad, + n_swa, + swa_type + )), + mem_recr(new llama_memory_recurrent( + model, + filter_recr == nullptr ? + [&](int32_t il) { return model.hparams.is_recurrent(il); } + : filter_recr, + type_r, + type_s, + offload, + rs_size, + n_seq_max + )) {} + +llama_memory_state_ptr llama_memory_hybrid::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_pooled) { + + // since this includes a recurrent cache, we cannot use split_simple + auto sbatch = llama_sbatch(batch, hparams.n_embd, false); + + // follow the recurrent pattern for creating the ubatch splits + std::vector ubatches; + while (sbatch.n_tokens > 0) { + llama_ubatch ubatch; + + if (embd_pooled) { + // Pooled embeddings cannot be split across ubatches (yet) + ubatch = sbatch.split_seq(n_ubatch); + } else { + ubatch = sbatch.split_equal(n_ubatch); + } + + ubatches.push_back(ubatch); + } + + // prepare the recurrent batches first + if (!mem_recr->prepare(ubatches)) { + // TODO: will the recurrent cache be in an undefined state at this point? + LLAMA_LOG_ERROR("%s: failed to prepare recurrent ubatches\n", __func__); + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); + } + + // prepare the attention cache + auto heads_attn = mem_attn->prepare(ubatches); + if (heads_attn.empty()) { + LLAMA_LOG_ERROR("%s: failed to prepare attention ubatches\n", __func__); + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); + } + + return std::make_unique( + this, std::move(sbatch), std::move(heads_attn), std::move(ubatches)); +} + +llama_memory_state_ptr llama_memory_hybrid::init_full() { + return std::make_unique(this); +} + +llama_memory_state_ptr llama_memory_hybrid::init_update(llama_context * lctx, bool optimize) { + return std::make_unique(this, lctx, optimize); +} + +bool llama_memory_hybrid::get_can_shift() const { + // Shifting is trivially supported for recurrent + return mem_attn->get_can_shift(); +} + +void llama_memory_hybrid::clear(bool data) { + mem_attn->clear(data); + mem_recr->clear(data); +} + +bool llama_memory_hybrid::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) { + // Try removing from the recurrent cache first since it may fail. If it does + // fail, the cache will not have been mutated. + if (!mem_recr->seq_rm(seq_id, p0, p1)) { + return false; + } + return mem_attn->seq_rm(seq_id, p0, p1); +} + +void llama_memory_hybrid::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) { + mem_attn->seq_cp(seq_id_src, seq_id_dst, p0, p1); + mem_recr->seq_cp(seq_id_src, seq_id_dst, p0, p1); +} + +void llama_memory_hybrid::seq_keep(llama_seq_id seq_id) { + mem_attn->seq_keep(seq_id); + mem_recr->seq_keep(seq_id); +} + +void llama_memory_hybrid::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) { + mem_attn->seq_add(seq_id, p0, p1, shift); + mem_recr->seq_add(seq_id, p0, p1, shift); +} + +void llama_memory_hybrid::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) { + mem_attn->seq_div(seq_id, p0, p1, d); + mem_recr->seq_div(seq_id, p0, p1, d); +} + +llama_pos llama_memory_hybrid::seq_pos_min(llama_seq_id seq_id) const { + // the min of the total cache is the max of the two caches' min values + return std::max(mem_attn->seq_pos_min(seq_id), mem_recr->seq_pos_min(seq_id)); +} + +llama_pos llama_memory_hybrid::seq_pos_max(llama_seq_id seq_id) const { + // the max of the total cache is the min of the two caches' max values + return std::min(mem_attn->seq_pos_max(seq_id), mem_recr->seq_pos_max(seq_id)); +} + +void llama_memory_hybrid::state_write(llama_io_write_i & io, llama_seq_id seq_id) const { + mem_attn->state_write(io, seq_id); + mem_recr->state_write(io, seq_id); +} + +void llama_memory_hybrid::state_read(llama_io_read_i & io, llama_seq_id seq_id) { + mem_attn->state_read(io, seq_id); + mem_recr->state_read(io, seq_id); +} + +llama_kv_cache_unified * llama_memory_hybrid::get_mem_attn() const { + return mem_attn.get(); +} + +llama_memory_recurrent * llama_memory_hybrid::get_mem_recr() const { + return mem_recr.get(); +} + +llama_memory_hybrid_state::llama_memory_hybrid_state(llama_memory_status status) : status(status) {} + +llama_memory_hybrid_state::llama_memory_hybrid_state(llama_memory_hybrid * mem) : + state_attn(mem->get_mem_attn()->init_full()), + state_recr(mem->get_mem_recr()->init_full()), + status(llama_memory_status_combine(state_attn->get_status(), state_recr->get_status())) { +} + +llama_memory_hybrid_state::llama_memory_hybrid_state( + llama_memory_hybrid * mem, + llama_context * lctx, + bool optimize) : + state_attn(mem->get_mem_attn()->init_update(lctx, optimize)), + state_recr(mem->get_mem_recr()->init_update(lctx, optimize)), + status(llama_memory_status_combine(state_attn->get_status(), state_recr->get_status())) { +} + +llama_memory_hybrid_state::llama_memory_hybrid_state( + llama_memory_hybrid * mem, + llama_sbatch sbatch, + std::vector heads_attn, + std::vector ubatches) : + sbatch(std::move(sbatch)), + ubatches(std::move(ubatches)), + // note: here we copy the ubatches. not sure if this is ideal + state_attn(new llama_kv_cache_unified_state(mem->get_mem_attn(), {}, std::move(heads_attn), this->ubatches)), + state_recr(new llama_memory_recurrent_state(mem->get_mem_recr(), {}, this->ubatches)), + status(LLAMA_MEMORY_STATUS_SUCCESS) { +} + +bool llama_memory_hybrid_state::next() { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + state_attn->next(); + state_recr->next(); + + if (++i_next >= ubatches.size()) { + return false; + } + + return true; +} + +bool llama_memory_hybrid_state::apply() { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + bool res = true; + + res = res & state_attn->apply(); + res = res & state_recr->apply(); + + return res; +} + +std::vector & llama_memory_hybrid_state::out_ids() { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + return sbatch.out_ids; +} + +llama_memory_status llama_memory_hybrid_state::get_status() const { + return status; +} + +const llama_ubatch & llama_memory_hybrid_state::get_ubatch() const { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + return ubatches[i_next]; +} + +const llama_kv_cache_unified_state * llama_memory_hybrid_state::get_state_attn() const { + return static_cast(state_attn.get()); +} + +const llama_memory_recurrent_state * llama_memory_hybrid_state::get_state_recr() const { + return static_cast(state_recr.get()); +} diff --git a/src/llama-memory-hybrid.h b/src/llama-memory-hybrid.h new file mode 100644 index 0000000000000..b5700c5225f18 --- /dev/null +++ b/src/llama-memory-hybrid.h @@ -0,0 +1,143 @@ +#pragma once + +#include "llama-batch.h" +#include "llama-graph.h" +#include "llama-kv-cache-unified.h" +#include "llama-memory.h" +#include "llama-memory-recurrent.h" + +#include +#include + +// +// llama_memory_hybrid +// + +// utilizes instances of llama_memory_recurrent and llama_kv_cache_unified to +// support models where each layer may be either attention-based or recurrent + +class llama_memory_hybrid : public llama_memory_i { +public: + + // this callback is used to filter out layers that should not be included in the cache + using layer_filter_cb = std::function; + + llama_memory_hybrid( + const llama_model & model, + /* attn */ + ggml_type type_k, + ggml_type type_v, + bool v_trans, + uint32_t kv_size, + uint32_t n_pad, + uint32_t n_swa, + llama_swa_type swa_type, + /* recurrent */ + ggml_type type_r, + ggml_type type_s, + uint32_t rs_size, + /* common */ + uint32_t n_seq_max, + bool offload, + /* layer filters */ + layer_filter_cb && filter_attn = nullptr, + layer_filter_cb && filter_recr = nullptr); + + ~llama_memory_hybrid() = default; + + // + // llama_memory_i + // + + llama_memory_state_ptr init_batch( + const llama_batch & batch, + uint32_t n_ubatch, + bool embd_pooled) override; + + llama_memory_state_ptr init_full() override; + + llama_memory_state_ptr init_update(llama_context * lctx, bool optimize) override; + + bool get_can_shift() const override; + + void clear(bool data) override; + + bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override; + void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override; + void seq_keep(llama_seq_id seq_id) override; + void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override; + void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override; + + llama_pos seq_pos_min(llama_seq_id seq_id) const override; + llama_pos seq_pos_max(llama_seq_id seq_id) const override; + + // state write/load + + void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override; + void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override; + + // + // llama_memory_hybrid specific API + // + + llama_kv_cache_unified * get_mem_attn() const; + llama_memory_recurrent * get_mem_recr() const; + +private: + const llama_hparams & hparams; + + const std::unique_ptr mem_attn; + const std::unique_ptr mem_recr; +}; + +class llama_memory_hybrid_state : public llama_memory_state_i { +public: + // init failure + explicit llama_memory_hybrid_state(llama_memory_status status); + + // init full + explicit llama_memory_hybrid_state(llama_memory_hybrid * mem); + + // init update + explicit llama_memory_hybrid_state( + llama_memory_hybrid * mem, + llama_context * lctx, + bool optimize); + + // init success + llama_memory_hybrid_state( + llama_memory_hybrid * mem, + llama_sbatch sbatch, + std::vector heads_attn, + std::vector ubatches); + + ~llama_memory_hybrid_state() = default; + + bool next() override; + bool apply() override; + + std::vector & out_ids() override; + + llama_memory_status get_status() const override; + const llama_ubatch & get_ubatch() const override; + + // + // llama_memory_hybrid_state + // + + const llama_kv_cache_unified_state * get_state_attn() const; + const llama_memory_recurrent_state * get_state_recr() const; + +private: + llama_sbatch sbatch; + + // the index of the next ubatch to process + size_t i_next = 0; + + std::vector ubatches; + + const llama_memory_state_ptr state_attn; + const llama_memory_state_ptr state_recr; + + const llama_memory_status status; +}; diff --git a/src/llama-kv-cache-recurrent.cpp b/src/llama-memory-recurrent.cpp similarity index 67% rename from src/llama-kv-cache-recurrent.cpp rename to src/llama-memory-recurrent.cpp index 8f6f120f682b7..c4f9a6f1ddc98 100644 --- a/src/llama-kv-cache-recurrent.cpp +++ b/src/llama-memory-recurrent.cpp @@ -1,4 +1,4 @@ -#include "llama-kv-cache-recurrent.h" +#include "llama-memory-recurrent.h" #include "llama-impl.h" #include "llama-io.h" @@ -12,27 +12,28 @@ #include // -// llama_kv_cache_recurrent +// llama_memory_recurrent // -llama_kv_cache_recurrent::llama_kv_cache_recurrent( - const llama_model & model, - ggml_type type_k, - ggml_type type_v, - bool offload, - uint32_t kv_size, - uint32_t n_seq_max) : hparams(model.hparams), n_seq_max(n_seq_max) { +llama_memory_recurrent::llama_memory_recurrent( + const llama_model & model, + layer_filter_cb && filter, + ggml_type type_r, + ggml_type type_s, + bool offload, + uint32_t mem_size, + uint32_t n_seq_max) : hparams(model.hparams), n_seq_max(n_seq_max) { const int32_t n_layer = hparams.n_layer; - LLAMA_LOG_INFO("%s: kv_size = %u, n_seq_max = %u, type_k = '%s', type_v = '%s', n_layer = %d\n", - __func__, kv_size, n_seq_max, ggml_type_name(type_k), ggml_type_name(type_v), n_layer); + LLAMA_LOG_INFO("%s: mem_size = %u, n_seq_max = %u, type_r = '%s', type_s = '%s', n_layer = %d\n", + __func__, mem_size, n_seq_max, ggml_type_name(type_r), ggml_type_name(type_s), n_layer); head = 0; - size = kv_size; + size = mem_size; used = 0; cells.clear(); - cells.resize(kv_size); + cells.resize(mem_size); // create a context for each buffer type std::map ctx_map; @@ -59,12 +60,14 @@ llama_kv_cache_recurrent::llama_kv_cache_recurrent( return it->second; }; - k_l.reserve(n_layer); - v_l.reserve(n_layer); + r_l.resize(n_layer); + s_l.resize(n_layer); for (int i = 0; i < n_layer; i++) { - const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s(); - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s(); + if (filter && !filter(i)) { + LLAMA_LOG_DEBUG("%s: layer %3d: skipped\n", __func__, i); + continue; + } const char * dev_name = "CPU"; @@ -84,12 +87,12 @@ llama_kv_cache_recurrent::llama_kv_cache_recurrent( throw std::runtime_error("failed to create ggml context for kv cache"); } - ggml_tensor * k = ggml_new_tensor_1d(ctx, type_k, n_embd_k_gqa*kv_size); - ggml_tensor * v = ggml_new_tensor_1d(ctx, type_v, n_embd_v_gqa*kv_size); - ggml_format_name(k, "cache_k_l%d", i); - ggml_format_name(v, "cache_v_l%d", i); - k_l.push_back(k); - v_l.push_back(v); + ggml_tensor * r = ggml_new_tensor_1d(ctx, type_r, hparams.n_embd_r()*mem_size); + ggml_tensor * s = ggml_new_tensor_1d(ctx, type_s, hparams.n_embd_s()*mem_size); + ggml_format_name(r, "cache_r_l%d", i); + ggml_format_name(s, "cache_s_l%d", i); + r_l[i] = r; + s_l[i] = s; } // allocate tensors and initialize the buffers to avoid NaNs in the padding @@ -107,17 +110,17 @@ llama_kv_cache_recurrent::llama_kv_cache_recurrent( } { - const size_t memory_size_k = size_k_bytes(); - const size_t memory_size_v = size_v_bytes(); + const size_t memory_size_r = size_r_bytes(); + const size_t memory_size_s = size_s_bytes(); - LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__, - (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f), - ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f), - ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f)); + LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, R (%s): %7.2f MiB, S (%s): %7.2f MiB\n", __func__, + (float)(memory_size_r + memory_size_s) / (1024.0f * 1024.0f), + ggml_type_name(type_r), (float)memory_size_r / (1024.0f * 1024.0f), + ggml_type_name(type_s), (float)memory_size_s / (1024.0f * 1024.0f)); } } -void llama_kv_cache_recurrent::clear(bool data) { +void llama_memory_recurrent::clear(bool data) { for (int32_t i = 0; i < (int32_t) size; ++i) { cells[i].pos = -1; cells[i].seq_id.clear(); @@ -135,7 +138,7 @@ void llama_kv_cache_recurrent::clear(bool data) { } } -bool llama_kv_cache_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) { +bool llama_memory_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) { uint32_t new_head = size; if (p0 < 0) { @@ -154,7 +157,7 @@ bool llama_kv_cache_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_p if (0 <= seq_id) { int32_t & tail_id = cells[seq_id].tail; if (tail_id >= 0) { - const kv_cell & cell = cells[tail_id]; + const auto & cell = cells[tail_id]; // partial intersection is invalid if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) { return false; @@ -202,7 +205,7 @@ bool llama_kv_cache_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_p return true; } -void llama_kv_cache_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) { +void llama_memory_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) { if (seq_id_src == seq_id_dst) { return; } @@ -216,11 +219,11 @@ void llama_kv_cache_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_ } if ((uint32_t) seq_id_dst < size && (uint32_t) seq_id_src < size) { - kv_cell & tail_src = cells[seq_id_src]; - kv_cell & tail_dst = cells[seq_id_dst]; + auto & tail_src = cells[seq_id_src]; + auto & tail_dst = cells[seq_id_dst]; if (tail_dst.tail >= 0) { // clear destination seq_id if it wasn't empty - kv_cell & cell_dst = cells[tail_dst.tail]; + auto & cell_dst = cells[tail_dst.tail]; cell_dst.seq_id.erase(seq_id_dst); tail_dst.tail = -1; @@ -231,7 +234,7 @@ void llama_kv_cache_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_ } } if (tail_src.tail >= 0) { - kv_cell & cell_src = cells[tail_src.tail]; + auto & cell_src = cells[tail_src.tail]; cell_src.seq_id.insert(seq_id_dst); tail_dst.tail = tail_src.tail; @@ -239,7 +242,7 @@ void llama_kv_cache_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_ } } -void llama_kv_cache_recurrent::seq_keep(llama_seq_id seq_id) { +void llama_memory_recurrent::seq_keep(llama_seq_id seq_id) { uint32_t new_head = size; for (uint32_t i = 0; i < size; ++i) { @@ -271,7 +274,7 @@ void llama_kv_cache_recurrent::seq_keep(llama_seq_id seq_id) { } } -void llama_kv_cache_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) { +void llama_memory_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) { if (shift == 0) { return; } @@ -293,7 +296,7 @@ void llama_kv_cache_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_ if (0 <= seq_id && seq_id < (int64_t) size) { const int32_t tail_id = cells[seq_id].tail; if (tail_id >= 0) { - kv_cell & cell = cells[tail_id]; + auto & cell = cells[tail_id]; if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) { cell.pos += shift; } @@ -301,7 +304,7 @@ void llama_kv_cache_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_ } } -void llama_kv_cache_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) { +void llama_memory_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) { if (d == 1) { return; } @@ -323,7 +326,7 @@ void llama_kv_cache_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_ if (0 <= seq_id && seq_id < (int64_t) size) { const int32_t tail_id = cells[seq_id].tail; if (tail_id >= 0) { - kv_cell & cell = cells[tail_id]; + auto & cell = cells[tail_id]; if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) { cell.pos /= d; } @@ -331,7 +334,7 @@ void llama_kv_cache_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_ } } -llama_pos llama_kv_cache_recurrent::seq_pos_min(llama_seq_id seq_id) const { +llama_pos llama_memory_recurrent::seq_pos_min(llama_seq_id seq_id) const { llama_pos result = std::numeric_limits::max(); for (uint32_t i = 0; i < size; ++i) { @@ -347,7 +350,7 @@ llama_pos llama_kv_cache_recurrent::seq_pos_min(llama_seq_id seq_id) const { return result; } -llama_pos llama_kv_cache_recurrent::seq_pos_max(llama_seq_id seq_id) const { +llama_pos llama_memory_recurrent::seq_pos_max(llama_seq_id seq_id) const { llama_pos result = -1; for (uint32_t i = 0; i < size; ++i) { @@ -359,7 +362,7 @@ llama_pos llama_kv_cache_recurrent::seq_pos_max(llama_seq_id seq_id) const { return result; } -llama_memory_state_ptr llama_kv_cache_recurrent::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_all) { +llama_memory_state_ptr llama_memory_recurrent::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_all) { auto sbatch = llama_sbatch(batch, hparams.n_embd, false); std::vector ubatches; @@ -378,24 +381,24 @@ llama_memory_state_ptr llama_kv_cache_recurrent::init_batch(const llama_batch & } if (!prepare(ubatches)) { - return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); } - return std::make_unique(LLAMA_MEMORY_STATUS_SUCCESS, this, std::move(sbatch), std::move(ubatches)); + return std::make_unique(this, std::move(sbatch), std::move(ubatches)); } -llama_memory_state_ptr llama_kv_cache_recurrent::init_full() { - return std::make_unique(LLAMA_MEMORY_STATUS_SUCCESS, this); +llama_memory_state_ptr llama_memory_recurrent::init_full() { + return std::make_unique(this); } -llama_memory_state_ptr llama_kv_cache_recurrent::init_update(llama_context * lctx, bool optimize) { +llama_memory_state_ptr llama_memory_recurrent::init_update(llama_context * lctx, bool optimize) { GGML_UNUSED(lctx); GGML_UNUSED(optimize); - return std::make_unique(LLAMA_MEMORY_STATUS_NO_UPDATE); + return std::make_unique(LLAMA_MEMORY_STATUS_NO_UPDATE); } -bool llama_kv_cache_recurrent::prepare(const std::vector & ubatches) { +bool llama_memory_recurrent::prepare(const std::vector & ubatches) { // simply remember the full state because it is very small for this type of cache // TODO: optimize auto org_cells = cells; @@ -419,7 +422,7 @@ bool llama_kv_cache_recurrent::prepare(const std::vector & ubatche return success; } -bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) { +bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) { const uint32_t n_seqs = ubatch.n_seqs; const uint32_t n_seq_tokens = ubatch.n_seq_tokens; @@ -453,9 +456,9 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) { return false; } if (j > 0) { - kv_cell & seq = cells[seq_id]; + auto & seq = cells[seq_id]; if (seq.tail >= 0) { - kv_cell & cell = cells[seq.tail]; + auto & cell = cells[seq.tail]; // clear cells from seq_ids that become shared // (should not normally happen, but let's handle it anyway) cell.seq_id.erase(seq_id); @@ -475,7 +478,7 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) { std::vector tails_verif; tails_verif.assign(size, -1); for (uint32_t i = 0; i < size; ++i) { - kv_cell & cell = cells[i]; + auto & cell = cells[i]; for (llama_seq_id seq_id : cell.seq_id) { if (tails_verif[seq_id] != -1) { LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]); @@ -496,7 +499,7 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) { for (uint32_t i = 0; i < size; ++i) { if (next_empty_cell >= size) { next_empty_cell -= size; } - kv_cell & cell = cells[next_empty_cell]; + auto & cell = cells[next_empty_cell]; if (cell.is_empty()) { break; } next_empty_cell += 1; } @@ -504,20 +507,20 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) { // find usable cell range for (uint32_t s = 0; s < n_seqs; ++s) { const llama_seq_id seq_id = ubatch.seq_id[s][0]; - kv_cell & seq_meta = cells[seq_id]; + auto & seq_meta = cells[seq_id]; bool has_cell = false; if (seq_meta.tail >= 0) { - kv_cell & cell = cells[seq_meta.tail]; + auto & cell = cells[seq_meta.tail]; GGML_ASSERT(cell.has_seq_id(seq_id)); // does this seq_id "own" the cell? if (cell.seq_id.size() == 1) { has_cell = true; } } if (!has_cell) { - kv_cell & empty_cell = cells[next_empty_cell]; + auto & empty_cell = cells[next_empty_cell]; GGML_ASSERT(empty_cell.is_empty()); // copy old tail into the empty cell if (seq_meta.tail >= 0) { - kv_cell & orig_cell = cells[seq_meta.tail]; + auto & orig_cell = cells[seq_meta.tail]; empty_cell.pos = orig_cell.pos; empty_cell.src = orig_cell.src; orig_cell.seq_id.erase(seq_id); @@ -530,7 +533,7 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) { for (uint32_t i = 0; i < size; ++i) { next_empty_cell += 1; if (next_empty_cell >= size) { next_empty_cell -= size; } - kv_cell & cell = cells[next_empty_cell]; + auto & cell = cells[next_empty_cell]; if (cell.is_empty()) { break; } } } @@ -544,8 +547,8 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) { const int32_t dst_id = s + min; const int32_t src_id = cells[ubatch.seq_id[s][0]].tail; if (dst_id != src_id) { - kv_cell & dst_cell = cells[dst_id]; - kv_cell & src_cell = cells[src_id]; + auto & dst_cell = cells[dst_id]; + auto & src_cell = cells[src_id]; std::swap(dst_cell.pos, src_cell.pos); std::swap(dst_cell.src, src_cell.src); @@ -567,7 +570,7 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) { for (uint32_t s = 0; s < n_seqs; ++s) { const llama_pos last_pos = ubatch.pos[n_seq_tokens * s + n_seq_tokens - 1]; const int32_t cell_id = s + min; - kv_cell & cell = cells[cell_id]; + auto & cell = cells[cell_id]; if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) { // What should happen when the pos backtracks or skips a value? @@ -620,18 +623,18 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) { head = min; n = max - min + 1; used = std::count_if(cells.begin(), cells.end(), - [](const kv_cell & cell){ return !cell.is_empty(); }); + [](const mem_cell & cell){ return !cell.is_empty(); }); // sanity check return n >= n_seqs; } -bool llama_kv_cache_recurrent::get_can_shift() const { +bool llama_memory_recurrent::get_can_shift() const { // shifting the pos is trivial for recurrent models return true; } -size_t llama_kv_cache_recurrent::total_size() const { +size_t llama_memory_recurrent::total_size() const { size_t size = 0; for (const auto & buf : bufs) { size += ggml_backend_buffer_get_size(buf.get()); @@ -640,27 +643,31 @@ size_t llama_kv_cache_recurrent::total_size() const { return size; } -size_t llama_kv_cache_recurrent::size_k_bytes() const { - size_t size_k_bytes = 0; +size_t llama_memory_recurrent::size_r_bytes() const { + size_t size_r_bytes = 0; - for (const auto & k : k_l) { - size_k_bytes += ggml_nbytes(k); + for (const auto & r : r_l) { + if (r != nullptr) { + size_r_bytes += ggml_nbytes(r); + } } - return size_k_bytes; + return size_r_bytes; } -size_t llama_kv_cache_recurrent::size_v_bytes() const { - size_t size_v_bytes = 0; +size_t llama_memory_recurrent::size_s_bytes() const { + size_t size_s_bytes = 0; - for (const auto & v : v_l) { - size_v_bytes += ggml_nbytes(v); + for (const auto & s : s_l) { + if (s != nullptr) { + size_s_bytes += ggml_nbytes(s); + } } - return size_v_bytes; + return size_s_bytes; } -void llama_kv_cache_recurrent::state_write(llama_io_write_i & io, llama_seq_id seq_id) const { +void llama_memory_recurrent::state_write(llama_io_write_i & io, llama_seq_id seq_id) const { std::vector> cell_ranges; // ranges, from inclusive, to exclusive uint32_t cell_count = 0; @@ -698,7 +705,7 @@ void llama_kv_cache_recurrent::state_write(llama_io_write_i & io, llama_seq_id s state_write_data(io, cell_ranges); } -void llama_kv_cache_recurrent::state_read(llama_io_read_i & io, llama_seq_id seq_id) { +void llama_memory_recurrent::state_read(llama_io_read_i & io, llama_seq_id seq_id) { uint32_t cell_count; io.read_to(&cell_count, sizeof(cell_count)); @@ -717,7 +724,7 @@ void llama_kv_cache_recurrent::state_read(llama_io_read_i & io, llama_seq_id seq } } -void llama_kv_cache_recurrent::state_write_meta(llama_io_write_i & io, const std::vector> & cell_ranges, llama_seq_id seq_id) const { +void llama_memory_recurrent::state_write_meta(llama_io_write_i & io, const std::vector> & cell_ranges, llama_seq_id seq_id) const { for (const auto & range : cell_ranges) { for (uint32_t i = range.first; i < range.second; ++i) { const auto & cell = cells[i]; @@ -736,87 +743,85 @@ void llama_kv_cache_recurrent::state_write_meta(llama_io_write_i & io, const std } } -void llama_kv_cache_recurrent::state_write_data(llama_io_write_i & io, const std::vector> & cell_ranges) const { - const uint32_t v_trans = 0; +void llama_memory_recurrent::state_write_data(llama_io_write_i & io, const std::vector> & cell_ranges) const { + const uint32_t s_trans = 0; const uint32_t n_layer = hparams.n_layer; - io.write(&v_trans, sizeof(v_trans)); - io.write(&n_layer, sizeof(n_layer)); + io.write(&s_trans, sizeof(s_trans)); + io.write(&n_layer, sizeof(n_layer)); std::vector tmp_buf; // Iterate and write all the keys first, each row is a cell // Get whole range at a time for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); // Write key type - const int32_t k_type_i = (int32_t)k_l[il]->type; - io.write(&k_type_i, sizeof(k_type_i)); + const int32_t r_type_i = (int32_t)r_l[il]->type; + io.write(&r_type_i, sizeof(r_type_i)); // Write row size of key - const uint64_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa); - io.write(&k_size_row, sizeof(k_size_row)); + const uint64_t r_size_row = ggml_row_size(r_l[il]->type, hparams.n_embd_r()); + io.write(&r_size_row, sizeof(r_size_row)); // Read each range of cells of k_size length each into tmp_buf and write out for (const auto & range : cell_ranges) { const size_t range_size = range.second - range.first; - const size_t buf_size = range_size * k_size_row; - io.write_tensor(k_l[il], range.first * k_size_row, buf_size); + const size_t buf_size = range_size * r_size_row; + io.write_tensor(r_l[il], range.first * r_size_row, buf_size); } } - if (!v_trans) { + if (!s_trans) { for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); // Write value type - const int32_t v_type_i = (int32_t)v_l[il]->type; - io.write(&v_type_i, sizeof(v_type_i)); + const int32_t s_type_i = (int32_t)s_l[il]->type; + io.write(&s_type_i, sizeof(s_type_i)); // Write row size of value - const uint64_t v_size_row = ggml_row_size(v_l[il]->type, n_embd_v_gqa); - io.write(&v_size_row, sizeof(v_size_row)); + const uint64_t s_size_row = ggml_row_size(s_l[il]->type, hparams.n_embd_s()); + io.write(&s_size_row, sizeof(s_size_row)); - // Read each range of cells of v_size length each into tmp_buf and write out + // Read each range of cells of s_size length each into tmp_buf and write out for (const auto & range : cell_ranges) { const size_t range_size = range.second - range.first; - const size_t buf_size = range_size * v_size_row; - io.write_tensor(v_l[il], range.first * v_size_row, buf_size); + const size_t buf_size = range_size * s_size_row; + io.write_tensor(s_l[il], range.first * s_size_row, buf_size); } } } else { // When v is transposed, we also need the element size and get the element ranges from each row - const uint32_t kv_size = size; + const uint32_t mem_size = size; for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + const uint32_t n_embd_s = hparams.n_embd_s(); // Write value type - const int32_t v_type_i = (int32_t)v_l[il]->type; - io.write(&v_type_i, sizeof(v_type_i)); + const int32_t s_type_i = (int32_t)s_l[il]->type; + io.write(&s_type_i, sizeof(s_type_i)); // Write element size - const uint32_t v_size_el = ggml_type_size(v_l[il]->type); - io.write(&v_size_el, sizeof(v_size_el)); + const uint32_t s_size_el = ggml_type_size(s_l[il]->type); + io.write(&s_size_el, sizeof(s_size_el)); // Write GQA embedding size - io.write(&n_embd_v_gqa, sizeof(n_embd_v_gqa)); + io.write(&n_embd_s, sizeof(n_embd_s)); // For each row, we get the element values of each cell - for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { + for (uint32_t j = 0; j < n_embd_s; ++j) { // Read each range of cells of v_size_el length each into tmp_buf and write out for (const auto & range : cell_ranges) { const size_t range_size = range.second - range.first; - const size_t src_offset = (range.first + j * kv_size) * v_size_el; - const size_t buf_size = range_size * v_size_el; - io.write_tensor(v_l[il], src_offset, buf_size); + const size_t src_offset = (range.first + j * mem_size) * s_size_el; + const size_t buf_size = range_size * s_size_el; + io.write_tensor(s_l[il], src_offset, buf_size); } } } } } -bool llama_kv_cache_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) { +bool llama_memory_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) { if (dest_seq_id != -1) { // single sequence @@ -869,7 +874,7 @@ bool llama_kv_cache_recurrent::state_read_meta(llama_io_read_i & io, uint32_t ce clear(true); for (uint32_t i = 0; i < cell_count; ++i) { - kv_cell & cell = cells[i]; + auto & cell = cells[i]; llama_pos pos; uint32_t n_seq_id; @@ -883,7 +888,7 @@ bool llama_kv_cache_recurrent::state_read_meta(llama_io_read_i & io, uint32_t ce llama_seq_id seq_id; io.read_to(&seq_id, sizeof(seq_id)); - // TODO: llama_kv_cache_recurrent should have a notion of max sequences + // TODO: llama_memory_recurrent should have a notion of max sequences //if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) { if (seq_id < 0) { //LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx)); @@ -915,10 +920,10 @@ bool llama_kv_cache_recurrent::state_read_meta(llama_io_read_i & io, uint32_t ce return true; } -bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t cell_count) { - uint32_t v_trans; +bool llama_memory_recurrent::state_read_data(llama_io_read_i & io, uint32_t cell_count) { + uint32_t s_trans; uint32_t n_layer; - io.read_to(&v_trans, sizeof(v_trans)); + io.read_to(&s_trans, sizeof(s_trans)); io.read_to(&n_layer, sizeof(n_layer)); if (n_layer != hparams.n_layer) { @@ -929,102 +934,100 @@ bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t ce LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, size); return false; } - if (false != (bool) v_trans) { - LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__); + if (false != (bool) s_trans) { + LLAMA_LOG_ERROR("%s: incompatible s transposition\n", __func__); return false; } // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); // Read type of key - int32_t k_type_i_ref; - io.read_to(&k_type_i_ref, sizeof(k_type_i_ref)); - const int32_t k_type_i = (int32_t) k_l[il]->type; - if (k_type_i != k_type_i_ref) { - LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il); + int32_t r_type_i_ref; + io.read_to(&r_type_i_ref, sizeof(r_type_i_ref)); + const int32_t r_type_i = (int32_t) r_l[il]->type; + if (r_type_i != r_type_i_ref) { + LLAMA_LOG_ERROR("%s: mismatched r type (%d != %d, layer %d)\n", __func__, r_type_i, r_type_i_ref, il); return false; } // Read row size of key - uint64_t k_size_row_ref; - io.read_to(&k_size_row_ref, sizeof(k_size_row_ref)); - const size_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa); - if (k_size_row != k_size_row_ref) { - LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il); + uint64_t r_size_row_ref; + io.read_to(&r_size_row_ref, sizeof(r_size_row_ref)); + const size_t r_size_row = ggml_row_size(r_l[il]->type, hparams.n_embd_r()); + if (r_size_row != r_size_row_ref) { + LLAMA_LOG_ERROR("%s: mismatched r row size (%zu != %zu, layer %d)\n", __func__, r_size_row, (size_t) r_size_row_ref, il); return false; } if (cell_count) { // Read and set the keys for the whole cell range - ggml_backend_tensor_set(k_l[il], io.read(cell_count * k_size_row), head * k_size_row, cell_count * k_size_row); + ggml_backend_tensor_set(r_l[il], io.read(cell_count * r_size_row), head * r_size_row, cell_count * r_size_row); } } - if (!v_trans) { + if (!s_trans) { for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); // Read type of value - int32_t v_type_i_ref; - io.read_to(&v_type_i_ref, sizeof(v_type_i_ref)); - const int32_t v_type_i = (int32_t)v_l[il]->type; - if (v_type_i != v_type_i_ref) { - LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); + int32_t s_type_i_ref; + io.read_to(&s_type_i_ref, sizeof(s_type_i_ref)); + const int32_t s_type_i = (int32_t)s_l[il]->type; + if (s_type_i != s_type_i_ref) { + LLAMA_LOG_ERROR("%s: mismatched s type (%d != %d, layer %d)\n", __func__, s_type_i, s_type_i_ref, il); return false; } // Read row size of value - uint64_t v_size_row_ref; - io.read_to(&v_size_row_ref, sizeof(v_size_row_ref)); - const size_t v_size_row = ggml_row_size(v_l[il]->type, n_embd_v_gqa); - if (v_size_row != v_size_row_ref) { - LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il); + uint64_t s_size_row_ref; + io.read_to(&s_size_row_ref, sizeof(s_size_row_ref)); + const size_t s_size_row = ggml_row_size(s_l[il]->type, hparams.n_embd_s()); + if (s_size_row != s_size_row_ref) { + LLAMA_LOG_ERROR("%s: mismatched s row size (%zu != %zu, layer %d)\n", __func__, s_size_row, (size_t) s_size_row_ref, il); return false; } if (cell_count) { // Read and set the values for the whole cell range - ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_row), head * v_size_row, cell_count * v_size_row); + ggml_backend_tensor_set(s_l[il], io.read(cell_count * s_size_row), head * s_size_row, cell_count * s_size_row); } } } else { // For each layer, read the values for each cell (transposed) for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + const uint32_t n_embd_s = hparams.n_embd_s(); // Read type of value - int32_t v_type_i_ref; - io.read_to(&v_type_i_ref, sizeof(v_type_i_ref)); - const int32_t v_type_i = (int32_t)v_l[il]->type; - if (v_type_i != v_type_i_ref) { - LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); + int32_t s_type_i_ref; + io.read_to(&s_type_i_ref, sizeof(s_type_i_ref)); + const int32_t s_type_i = (int32_t)s_l[il]->type; + if (s_type_i != s_type_i_ref) { + LLAMA_LOG_ERROR("%s: mismatched s type (%d != %d, layer %d)\n", __func__, s_type_i, s_type_i_ref, il); return false; } // Read element size of value - uint32_t v_size_el_ref; - io.read_to(&v_size_el_ref, sizeof(v_size_el_ref)); - const size_t v_size_el = ggml_type_size(v_l[il]->type); - if (v_size_el != v_size_el_ref) { - LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il); + uint32_t s_size_el_ref; + io.read_to(&s_size_el_ref, sizeof(s_size_el_ref)); + const size_t s_size_el = ggml_type_size(s_l[il]->type); + if (s_size_el != s_size_el_ref) { + LLAMA_LOG_ERROR("%s: mismatched s element size (%zu != %zu, layer %d)\n", __func__, s_size_el, (size_t) s_size_el_ref, il); return false; } - // Read GQA embedding size - uint32_t n_embd_v_gqa_ref; - io.read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref)); - if (n_embd_v_gqa != n_embd_v_gqa_ref) { - LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il); + // Read state embedding size + uint32_t n_embd_s_ref; + io.read_to(&n_embd_s_ref, sizeof(n_embd_s_ref)); + if (n_embd_s != n_embd_s_ref) { + LLAMA_LOG_ERROR("%s: mismatched s embedding size (%u != %u, layer %d)\n", __func__, n_embd_s, n_embd_s_ref, il); return false; } if (cell_count) { // For each row in the transposed matrix, read the values for the whole cell range - for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { - const size_t dst_offset = (head + j * size) * v_size_el; - ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_el), dst_offset, cell_count * v_size_el); + for (uint32_t j = 0; j < n_embd_s; ++j) { + const size_t dst_offset = (head + j * size) * s_size_el; + ggml_backend_tensor_set(s_l[il], io.read(cell_count * s_size_el), dst_offset, cell_count * s_size_el); } } } @@ -1034,25 +1037,23 @@ bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t ce } // -// llama_kv_cache_recurrent_state +// llama_memory_recurrent_state // -llama_kv_cache_recurrent_state::llama_kv_cache_recurrent_state(llama_memory_status status) : status(status) {} +llama_memory_recurrent_state::llama_memory_recurrent_state(llama_memory_status status) : status(status) {} -llama_kv_cache_recurrent_state::llama_kv_cache_recurrent_state( - llama_memory_status status, - llama_kv_cache_recurrent * kv) : status(status), kv(kv), is_full(true) { +llama_memory_recurrent_state::llama_memory_recurrent_state( + llama_memory_recurrent * mem) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), is_full(true) { } -llama_kv_cache_recurrent_state::llama_kv_cache_recurrent_state( - llama_memory_status status, - llama_kv_cache_recurrent * kv, +llama_memory_recurrent_state::llama_memory_recurrent_state( + llama_memory_recurrent * mem, llama_sbatch sbatch, - std::vector ubatches) : status(status), kv(kv), sbatch(std::move(sbatch)), ubatches(std::move(ubatches)) {} + std::vector ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), sbatch(std::move(sbatch)), ubatches(std::move(ubatches)) {} -llama_kv_cache_recurrent_state::~llama_kv_cache_recurrent_state() = default; +llama_memory_recurrent_state::~llama_memory_recurrent_state() = default; -bool llama_kv_cache_recurrent_state::next() { +bool llama_memory_recurrent_state::next() { assert(status == LLAMA_MEMORY_STATUS_SUCCESS); if (++i_next >= ubatches.size()) { @@ -1062,54 +1063,54 @@ bool llama_kv_cache_recurrent_state::next() { return true; } -bool llama_kv_cache_recurrent_state::apply() { +bool llama_memory_recurrent_state::apply() { assert(status == LLAMA_MEMORY_STATUS_SUCCESS); - kv->find_slot(ubatches[i_next]); + mem->find_slot(ubatches[i_next]); return true; } -std::vector & llama_kv_cache_recurrent_state::out_ids() { +std::vector & llama_memory_recurrent_state::out_ids() { assert(status == LLAMA_MEMORY_STATUS_SUCCESS); return sbatch.out_ids; } -llama_memory_status llama_kv_cache_recurrent_state::get_status() const { +llama_memory_status llama_memory_recurrent_state::get_status() const { return status; } -const llama_ubatch & llama_kv_cache_recurrent_state::get_ubatch() const { +const llama_ubatch & llama_memory_recurrent_state::get_ubatch() const { assert(status == LLAMA_MEMORY_STATUS_SUCCESS); return ubatches[i_next]; } -uint32_t llama_kv_cache_recurrent_state::get_n_kv() const { - return is_full ? kv->size : kv->n; +uint32_t llama_memory_recurrent_state::get_n_rs() const { + return is_full ? mem->size : mem->n; } -uint32_t llama_kv_cache_recurrent_state::get_head() const { - return is_full ? 0 : kv->head; +uint32_t llama_memory_recurrent_state::get_head() const { + return is_full ? 0 : mem->head; } -int32_t llama_kv_cache_recurrent_state::get_rs_z() const { - return is_full ? 0 : kv->rs_z; +int32_t llama_memory_recurrent_state::get_rs_z() const { + return is_full ? 0 : mem->rs_z; } -uint32_t llama_kv_cache_recurrent_state::get_size() const { - return kv->size; +uint32_t llama_memory_recurrent_state::get_size() const { + return mem->size; } -ggml_tensor * llama_kv_cache_recurrent_state::get_k_l(int32_t il) const { - return kv->k_l[il]; +ggml_tensor * llama_memory_recurrent_state::get_r_l(int32_t il) const { + return mem->r_l[il]; } -ggml_tensor * llama_kv_cache_recurrent_state::get_v_l(int32_t il) const { - return kv->v_l[il]; +ggml_tensor * llama_memory_recurrent_state::get_s_l(int32_t il) const { + return mem->s_l[il]; } -int32_t llama_kv_cache_recurrent_state::s_copy(int i) const { - return kv->cells[i + kv->head].src0; +int32_t llama_memory_recurrent_state::s_copy(int i) const { + return mem->cells[i + mem->head].src0; } diff --git a/src/llama-kv-cache-recurrent.h b/src/llama-memory-recurrent.h similarity index 72% rename from src/llama-kv-cache-recurrent.h rename to src/llama-memory-recurrent.h index f9b01a6513393..290cc84ab3fbc 100644 --- a/src/llama-kv-cache-recurrent.h +++ b/src/llama-memory-recurrent.h @@ -8,22 +8,27 @@ #include // -// llama_kv_cache_recurrent +// llama_memory_recurrent // -// TODO: extract the KV cache state used for graph computation into llama_kv_cache_recurrent_state_i +// TODO: extract the cache state used for graph computation into llama_memory_recurrent_state_i // see the implementation of llama_kv_cache_unified_state_i for an example how to do it -class llama_kv_cache_recurrent : public llama_memory_i { +class llama_memory_recurrent : public llama_memory_i { public: - llama_kv_cache_recurrent( - const llama_model & model, - ggml_type type_k, - ggml_type type_v, - bool offload, - uint32_t kv_size, - uint32_t n_seq_max); - ~llama_kv_cache_recurrent() = default; + // this callback is used to filter out layers that should not be included in the cache + using layer_filter_cb = std::function; + + llama_memory_recurrent( + const llama_model & model, + layer_filter_cb && filter, + ggml_type type_r, + ggml_type type_s, + bool offload, + uint32_t mem_size, + uint32_t n_seq_max); + + ~llama_memory_recurrent() = default; // // llama_memory_i @@ -51,7 +56,7 @@ class llama_kv_cache_recurrent : public llama_memory_i { bool prepare(const std::vector & ubatches); - // find a contiguous slot of kv cells and emplace the ubatch there + // find a contiguous slot of memory cells and emplace the ubatch there bool find_slot(const llama_ubatch & ubatch); bool get_can_shift() const override; @@ -72,7 +77,7 @@ class llama_kv_cache_recurrent : public llama_memory_i { int32_t rs_z = -1; // TODO: optimize for recurrent state needs - struct kv_cell { + struct mem_cell { llama_pos pos = -1; int32_t src = -1; // used to know where states should be copied from int32_t src0 = -1; // like src, but only used when setting the inputs (allowing to copy once) @@ -88,15 +93,16 @@ class llama_kv_cache_recurrent : public llama_memory_i { return seq_id.empty(); } - bool is_same_seq(const kv_cell & other) const { + bool is_same_seq(const mem_cell & other) const { return seq_id == other.seq_id; } }; - std::vector cells; + std::vector cells; - std::vector k_l; // per layer - std::vector v_l; + // per layer + std::vector r_l; + std::vector s_l; private: //const llama_model & model; @@ -109,8 +115,8 @@ class llama_kv_cache_recurrent : public llama_memory_i { size_t total_size() const; - size_t size_k_bytes() const; - size_t size_v_bytes() const; + size_t size_r_bytes() const; + size_t size_s_bytes() const; void state_write_meta(llama_io_write_i & io, const std::vector> & cell_ranges, llama_seq_id seq_id = -1) const; void state_write_data(llama_io_write_i & io, const std::vector> & cell_ranges) const; @@ -119,24 +125,22 @@ class llama_kv_cache_recurrent : public llama_memory_i { bool state_read_data(llama_io_read_i & io, uint32_t cell_count); }; -class llama_kv_cache_recurrent_state : public llama_memory_state_i { +class llama_memory_recurrent_state : public llama_memory_state_i { public: // used for errors - llama_kv_cache_recurrent_state(llama_memory_status status); + llama_memory_recurrent_state(llama_memory_status status); // used to create a full-cache state - llama_kv_cache_recurrent_state( - llama_memory_status status, - llama_kv_cache_recurrent * kv); + llama_memory_recurrent_state( + llama_memory_recurrent * mem); // used to create a state from a batch - llama_kv_cache_recurrent_state( - llama_memory_status status, - llama_kv_cache_recurrent * kv, + llama_memory_recurrent_state( + llama_memory_recurrent * mem, llama_sbatch sbatch, std::vector ubatches); - virtual ~llama_kv_cache_recurrent_state(); + virtual ~llama_memory_recurrent_state(); // // llama_memory_state_i @@ -151,23 +155,23 @@ class llama_kv_cache_recurrent_state : public llama_memory_state_i { const llama_ubatch & get_ubatch() const override; // - // llama_kv_cache_recurrent_state specific API + // llama_memory_recurrent_state specific API // - uint32_t get_n_kv() const; + uint32_t get_n_rs() const; uint32_t get_head() const; int32_t get_rs_z() const; uint32_t get_size() const; - ggml_tensor * get_k_l(int32_t il) const; - ggml_tensor * get_v_l(int32_t il) const; + ggml_tensor * get_r_l(int32_t il) const; + ggml_tensor * get_s_l(int32_t il) const; int32_t s_copy(int i) const; private: const llama_memory_status status; - llama_kv_cache_recurrent * kv; + llama_memory_recurrent * mem; llama_sbatch sbatch; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index a5eb122f998d8..a5853f8b12dc0 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -8,7 +8,8 @@ #include "llama-kv-cache-unified.h" #include "llama-kv-cache-unified-iswa.h" -#include "llama-kv-cache-recurrent.h" +#include "llama-memory-hybrid.h" +#include "llama-memory-recurrent.h" #include "ggml-cpp.h" @@ -470,6 +471,10 @@ void llama_model::load_hparams(llama_model_loader & ml) { std::fill(hparams.n_head_arr.begin(), hparams.n_head_arr.end(), 0); std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0); std::fill(hparams.n_ff_arr.begin(), hparams.n_ff_arr.end(), 0); + std::fill( + hparams.recurrent_layer_arr.begin(), + hparams.recurrent_layer_arr.end(), + llm_arch_is_recurrent(ml.get_arch())); std::fill(hparams.rope_sections.begin(), hparams.rope_sections.end(), 0); @@ -9111,7 +9116,7 @@ struct llm_build_mamba : public llm_graph_context { // {n_embd, n_tokens} inpL = build_inp_embd(model.tok_embd); - ggml_tensor * state_copy = build_inp_s_copy(); + auto * rs_inp = build_rs_inp(); for (int il = 0; il < n_layer; ++il) { // norm @@ -9120,7 +9125,7 @@ struct llm_build_mamba : public llm_graph_context { LLM_NORM_RMS, il); cb(cur, "attn_norm", il); - cur = build_mamba_layer(gf, cur, state_copy, ubatch, il); + cur = build_mamba_layer(rs_inp, gf, cur, ubatch, il); if (il == n_layer - 1) { // skip computing output for unused tokens @@ -9158,12 +9163,12 @@ struct llm_build_mamba : public llm_graph_context { // TODO: split ggml_tensor * build_mamba_layer( - ggml_cgraph * gf, - ggml_tensor * cur, - ggml_tensor * state_copy, - const llama_ubatch & ubatch, - int il) const { - const auto * kv_state = static_cast(mstate); + llm_graph_input_rs * inp, + ggml_cgraph * gf, + ggml_tensor * cur, + const llama_ubatch & ubatch, + int il) const { + const auto * kv_state = static_cast(mstate); const auto kv_head = kv_state->get_head(); @@ -9183,17 +9188,17 @@ struct llm_build_mamba : public llm_graph_context { GGML_ASSERT(ubatch.equal_seqs); GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs); - ggml_tensor * conv_states_all = kv_state->get_k_l(il); - ggml_tensor * ssm_states_all = kv_state->get_v_l(il); + ggml_tensor * conv_states_all = kv_state->get_r_l(il); + ggml_tensor * ssm_states_all = kv_state->get_s_l(il); // (ab)using the KV cache to store the states - ggml_tensor * conv = build_recurrent_state( - gf, conv_states_all, state_copy, - hparams.n_embd_k_s(), n_seqs); + ggml_tensor * conv = build_rs( + inp, gf, conv_states_all, + hparams.n_embd_r(), n_seqs); conv = ggml_reshape_3d(ctx0, conv, d_conv - 1, d_inner, n_seqs); - ggml_tensor * ssm = build_recurrent_state( - gf, ssm_states_all, state_copy, - hparams.n_embd_v_s(), n_seqs); + ggml_tensor * ssm = build_rs( + inp, gf, ssm_states_all, + hparams.n_embd_s(), n_seqs); ssm = ggml_reshape_3d(ctx0, ssm, d_state, d_inner, n_seqs); // {n_embd, n_tokens} => {n_embd, n_seq_tokens, n_seqs} @@ -11904,13 +11909,13 @@ struct llm_build_rwkv6_base : public llm_graph_context { } ggml_tensor * build_rwkv6_time_mix( + llm_graph_input_rs * inp, ggml_cgraph * gf, ggml_tensor * cur, ggml_tensor * x_prev, - ggml_tensor * state_copy, const llama_ubatch & ubatch, int il) const { - const auto * kv_state = static_cast(mstate); + const auto * kv_state = static_cast(mstate); const auto n_tokens = ubatch.n_tokens; const auto n_seqs = ubatch.n_seqs; @@ -12031,9 +12036,9 @@ struct llm_build_rwkv6_base : public llm_graph_context { k = ggml_sub(ctx0, k, ggml_mul(ctx0, k, w)); } - ggml_tensor * wkv_state = build_recurrent_state( - gf, kv_state->get_v_l(il), state_copy, - hparams.n_embd_v_s(), n_seqs); + ggml_tensor * wkv_state = build_rs( + inp, gf, kv_state->get_s_l(il), + hparams.n_embd_s(), n_seqs); ggml_tensor * wkv_output; if (is_qrwkv) { @@ -12051,9 +12056,9 @@ struct llm_build_rwkv6_base : public llm_graph_context { wkv_state, ggml_view_1d( ctx0, - kv_state->get_v_l(il), - hparams.n_embd_v_s() * n_seqs, - hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_state->get_v_l(il)) + kv_state->get_s_l(il), + hparams.n_embd_s() * n_seqs, + hparams.n_embd_s() * kv_head * ggml_element_size(kv_state->get_s_l(il)) ) ) ); @@ -12087,7 +12092,7 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base { inpL = build_inp_embd(model.tok_embd); inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1); - ggml_tensor * state_copy = build_inp_s_copy(); + auto * rs_inp = build_rs_inp(); const auto n_embd = hparams.n_embd; const auto n_seq_tokens = ubatch.n_seq_tokens; @@ -12097,9 +12102,7 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base { const llama_layer * layer = &model.layers[il]; inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); - ggml_tensor * token_shift = build_rwkv_token_shift_load( - gf, state_copy, ubatch, il - ); + ggml_tensor * token_shift = build_rwkv_token_shift_load(rs_inp, gf, ubatch, il); ggml_tensor * att_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0); ggml_tensor * ffn_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], n_embd * ggml_element_size(token_shift)); @@ -12114,7 +12117,7 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base { 1 ); - cur = build_rwkv6_time_mix(gf, att_norm, x_prev, state_copy, ubatch, il); + cur = build_rwkv6_time_mix(rs_inp, gf, att_norm, x_prev, ubatch, il); ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); cb(ffn_inp, "ffn_inp", il); @@ -12177,14 +12180,14 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base { // ref: https://huggingface.co/recursal/QRWKV6-32B-Instruct-Preview-v0.1/blob/main/modeling_rwkv6qwen2.py struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base { llm_build_rwkv6qwen2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv6_base(model, params) { - GGML_ASSERT(n_embd == hparams.n_embd_k_s()); + GGML_ASSERT(n_embd == hparams.n_embd_r()); ggml_tensor * cur; ggml_tensor * inpL; inpL = build_inp_embd(model.tok_embd); - ggml_tensor * state_copy = build_inp_s_copy(); + auto * rs_inp = build_rs_inp(); const auto n_embd = hparams.n_embd; const auto n_seq_tokens = ubatch.n_seq_tokens; @@ -12194,9 +12197,7 @@ struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base { const llama_layer * layer = &model.layers[il]; inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); - ggml_tensor * token_shift = build_rwkv_token_shift_load( - gf, state_copy, ubatch, il - ); + ggml_tensor * token_shift = build_rwkv_token_shift_load(rs_inp, gf, ubatch, il); ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM_RMS, il); cb(att_norm, "attn_norm", il); @@ -12208,7 +12209,7 @@ struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base { 1 ); - cur = build_rwkv6_time_mix(gf, att_norm, x_prev, state_copy, ubatch, il); + cur = build_rwkv6_time_mix(rs_inp, gf, att_norm, x_prev, ubatch, il); token_shift = ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm)); ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il)); @@ -12296,14 +12297,14 @@ struct llm_build_rwkv7_base : public llm_graph_context { } ggml_tensor * build_rwkv7_time_mix( + llm_graph_input_rs * inp, ggml_cgraph * gf, ggml_tensor * cur, ggml_tensor * x_prev, - ggml_tensor * state_copy, ggml_tensor *& first_layer_value, const llama_ubatch & ubatch, int il) const { - const auto * kv_state = static_cast(mstate); + const auto * kv_state = static_cast(mstate); const auto n_tokens = ubatch.n_tokens; const auto n_seqs = ubatch.n_seqs; @@ -12382,9 +12383,9 @@ struct llm_build_rwkv7_base : public llm_graph_context { v = ggml_reshape_3d(ctx0, v, head_size, head_count, n_tokens); a = ggml_reshape_3d(ctx0, a, head_size, head_count, n_tokens); - ggml_tensor * wkv_state = build_recurrent_state( - gf, kv_state->get_v_l(il), state_copy, - hparams.n_embd_v_s(), n_seqs); + ggml_tensor * wkv_state = build_rs( + inp, gf, kv_state->get_s_l(il), + hparams.n_embd_s(), n_seqs); ggml_tensor * wkv_output = ggml_rwkv_wkv7(ctx0, r, w, k, v, ggml_neg(ctx0, kk), ggml_mul(ctx0, kk, a), wkv_state); cur = ggml_view_1d(ctx0, wkv_output, n_embd * n_tokens, 0); @@ -12397,9 +12398,9 @@ struct llm_build_rwkv7_base : public llm_graph_context { wkv_state, ggml_view_1d( ctx0, - kv_state->get_v_l(il), - hparams.n_embd_v_s() * n_seqs, - hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_state->get_v_l(il)) + kv_state->get_s_l(il), + hparams.n_embd_s() * n_seqs, + hparams.n_embd_s() * kv_head * ggml_element_size(kv_state->get_s_l(il)) ) ) ); @@ -12440,7 +12441,7 @@ struct llm_build_rwkv7 : public llm_build_rwkv7_base { inpL = build_inp_embd(model.tok_embd); inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1); - ggml_tensor * state_copy = build_inp_s_copy(); + auto * rs_inp = build_rs_inp(); const auto n_embd = hparams.n_embd; const auto n_seq_tokens = ubatch.n_seq_tokens; @@ -12450,9 +12451,7 @@ struct llm_build_rwkv7 : public llm_build_rwkv7_base { const llama_layer * layer = &model.layers[il]; inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); - ggml_tensor * token_shift = build_rwkv_token_shift_load( - gf, state_copy, ubatch, il - ); + ggml_tensor * token_shift = build_rwkv_token_shift_load(rs_inp, gf, ubatch, il); ggml_tensor * att_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0); ggml_tensor * ffn_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], n_embd * ggml_element_size(token_shift)); @@ -12467,7 +12466,7 @@ struct llm_build_rwkv7 : public llm_build_rwkv7_base { 1 ); - cur = build_rwkv7_time_mix(gf, att_norm, x_prev, state_copy, v_first, ubatch, il); + cur = build_rwkv7_time_mix(rs_inp, gf, att_norm, x_prev, v_first, ubatch, il); ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); cb(ffn_inp, "ffn_inp", il); @@ -12525,7 +12524,7 @@ struct llm_build_rwkv7 : public llm_build_rwkv7_base { struct llm_build_arwkv7 : public llm_build_rwkv7_base { llm_build_arwkv7(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv7_base(model, params) { - GGML_ASSERT(n_embd == hparams.n_embd_k_s()); + GGML_ASSERT(n_embd == hparams.n_embd_r()); ggml_tensor * cur; ggml_tensor * inpL; @@ -12533,7 +12532,7 @@ struct llm_build_arwkv7 : public llm_build_rwkv7_base { inpL = build_inp_embd(model.tok_embd); - ggml_tensor * state_copy = build_inp_s_copy(); + auto * rs_inp = build_rs_inp(); const auto n_embd = hparams.n_embd; const auto n_seq_tokens = ubatch.n_seq_tokens; @@ -12543,9 +12542,7 @@ struct llm_build_arwkv7 : public llm_build_rwkv7_base { const llama_layer * layer = &model.layers[il]; inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); - ggml_tensor * token_shift = build_rwkv_token_shift_load( - gf, state_copy, ubatch, il - ); + ggml_tensor * token_shift = build_rwkv_token_shift_load(rs_inp, gf, ubatch, il); ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM_RMS, il); cb(att_norm, "attn_norm", il); @@ -12557,7 +12554,7 @@ struct llm_build_arwkv7 : public llm_build_rwkv7_base { 1 ); - cur = build_rwkv7_time_mix(gf, att_norm, x_prev, state_copy, v_first, ubatch, il); + cur = build_rwkv7_time_mix(rs_inp, gf, att_norm, x_prev, v_first, ubatch, il); token_shift = ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm)); ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il)); @@ -13738,6 +13735,8 @@ llama_memory_i * llama_model::create_memory(const llama_memory_params & params, llama_memory_i * res; switch (arch) { + // Models that need specific instantiation should be handled in the + // switch statement case LLM_ARCH_BERT: case LLM_ARCH_JINA_BERT_V2: case LLM_ARCH_NOMIC_BERT: @@ -13747,57 +13746,75 @@ llama_memory_i * llama_model::create_memory(const llama_memory_params & params, { res = nullptr; } break; - case LLM_ARCH_MAMBA: - case LLM_ARCH_RWKV6: - case LLM_ARCH_RWKV6QWEN2: - case LLM_ARCH_RWKV7: - case LLM_ARCH_ARWKV7: - { - res = new llama_kv_cache_recurrent( - *this, - GGML_TYPE_F32, - GGML_TYPE_F32, - cparams.offload_kqv, - std::max((uint32_t) 1, cparams.n_seq_max), - cparams.n_seq_max); - } break; + // Models that need standard caching should rely on recurrent/hybrid + // checks default: { - const auto padding = llama_kv_cache_unified::get_padding(cparams); - - cparams.n_ctx = GGML_PAD(cparams.n_ctx, padding); - - LLAMA_LOG_DEBUG("%s: n_ctx = %u (padded)\n", __func__, cparams.n_ctx); - - if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) { - GGML_ASSERT(hparams.is_swa_any()); - - res = new llama_kv_cache_unified_iswa( - *this, - params.type_k, - params.type_v, - !cparams.flash_attn, - cparams.offload_kqv, - params.swa_full, - cparams.n_ctx, - cparams.n_seq_max, - cparams.n_ubatch, - padding); - } else { - GGML_ASSERT(!hparams.is_swa_any()); - - res = new llama_kv_cache_unified( + if (llm_arch_is_recurrent(arch)) { + res = new llama_memory_recurrent( *this, nullptr, - params.type_k, - params.type_v, - !cparams.flash_attn, + GGML_TYPE_F32, + GGML_TYPE_F32, cparams.offload_kqv, - cparams.n_ctx, - cparams.n_seq_max, - padding, - hparams.n_swa, - hparams.swa_type); + std::max((uint32_t) 1, cparams.n_seq_max), + cparams.n_seq_max); + } else if (llm_arch_is_hybrid(arch)) { + const auto padding = llama_kv_cache_unified::get_padding(cparams); + + cparams.n_ctx = GGML_PAD(cparams.n_ctx, padding); + + res = new llama_memory_hybrid( + /* model */ *this, + /* attn_type_k */ params.type_k, + /* attn_type_v */ params.type_v, + /* attn_v_trans */ !cparams.flash_attn, + /* attn_kv_size */ cparams.n_ctx, + /* attn_n_pad */ padding, + /* attn_n_swa */ hparams.n_swa, + /* attn_swa_type */ hparams.swa_type, + /* recurrent_type_k */ GGML_TYPE_F32, + /* recurrent_type_v */ GGML_TYPE_F32, + /* recurrent_kv_size */ std::max((uint32_t) 1, cparams.n_seq_max), + /* n_seq_max */ cparams.n_seq_max, + /* offload */ cparams.offload_kqv); + } else { + const auto padding = llama_kv_cache_unified::get_padding(cparams); + + cparams.n_ctx = GGML_PAD(cparams.n_ctx, padding); + + LLAMA_LOG_DEBUG("%s: n_ctx = %u (padded)\n", __func__, cparams.n_ctx); + + if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) { + GGML_ASSERT(hparams.is_swa_any()); + + res = new llama_kv_cache_unified_iswa( + *this, + params.type_k, + params.type_v, + !cparams.flash_attn, + cparams.offload_kqv, + params.swa_full, + cparams.n_ctx, + cparams.n_seq_max, + cparams.n_ubatch, + padding); + } else { + GGML_ASSERT(!hparams.is_swa_any()); + + res = new llama_kv_cache_unified( + *this, + nullptr, + params.type_k, + params.type_v, + !cparams.flash_attn, + cparams.offload_kqv, + cparams.n_ctx, + cparams.n_seq_max, + padding, + hparams.n_swa, + hparams.swa_type); + } } } } @@ -14377,14 +14394,7 @@ llama_token llama_model_decoder_start_token(const llama_model * model) { } bool llama_model_is_recurrent(const llama_model * model) { - switch (model->arch) { - case LLM_ARCH_MAMBA: return true; - case LLM_ARCH_RWKV6: return true; - case LLM_ARCH_RWKV6QWEN2: return true; - case LLM_ARCH_RWKV7: return true; - case LLM_ARCH_ARWKV7: return true; - default: return false; - } + return llm_arch_is_recurrent(model->arch); } const std::vector> & llama_internal_get_tensor_map(const llama_model * model) { From 35fc17b0ec5c7ffa3f1f6bff869b71ab514101e8 Mon Sep 17 00:00:00 2001 From: 0cc4m Date: Thu, 19 Jun 2025 09:15:42 +0200 Subject: [PATCH 097/192] Vulkan: Set device max size for host memory to avoid OOM warning and fallback to CPU buffer (#14249) --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 8d62303aabd7f..1375bfeb9dc50 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -9495,6 +9495,12 @@ static size_t ggml_backend_vk_host_buffer_type_get_alignment(ggml_backend_buffer UNUSED(buft); } +static size_t ggml_backend_vk_host_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) { + return vk_instance.devices[0]->suballocation_block_size; + + UNUSED(buft); +} + // Should be changed to return device-specific host buffer type // but that probably requires changes in llama.cpp ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type() { @@ -9503,7 +9509,7 @@ ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type() { /* .get_name = */ ggml_backend_vk_host_buffer_type_name, /* .alloc_buffer = */ ggml_backend_vk_host_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_vk_host_buffer_type_get_alignment, - /* .get_max_size = */ NULL, // defaults to SIZE_MAX + /* .get_max_size = */ ggml_backend_vk_host_buffer_type_get_max_size, /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size, /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host, }, From 2fd26670a33380765a6c6eba9f8c875bf9ed207d Mon Sep 17 00:00:00 2001 From: Aaron Teo Date: Thu, 19 Jun 2025 17:48:54 +0800 Subject: [PATCH 098/192] llamafile : support s390x SIMD instruction set (#14273) --- ggml/src/ggml-cpu/llamafile/sgemm.cpp | 55 ++++++++++++++++++++++++++- ggml/src/ggml-cpu/llamafile/sgemm.h | 5 +++ 2 files changed, 59 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-cpu/llamafile/sgemm.cpp b/ggml/src/ggml-cpu/llamafile/sgemm.cpp index 1c545f803327b..7ed3874afb87a 100644 --- a/ggml/src/ggml-cpu/llamafile/sgemm.cpp +++ b/ggml/src/ggml-cpu/llamafile/sgemm.cpp @@ -62,7 +62,7 @@ #define NOINLINE __attribute__((__noinline__)) #endif -#if defined(__ARM_NEON) || defined(__AVX512F__) +#if defined(__ARM_NEON) || defined(__AVX512F__) || defined(__VXE__) || defined(__VXE2__) #define VECTOR_REGISTERS 32 #else #define VECTOR_REGISTERS 16 @@ -109,6 +109,12 @@ inline float16x8_t sub(float16x8_t x, float16x8_t y) { return vsubq_f16(x, y); } inline float16x8_t mul(float16x8_t x, float16x8_t y) { return vmulq_f16(x, y); } #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#if defined(__VXE__) || defined(__VXE2__) +inline float32x4_t add(float32x4_t x, float32x4_t y) { return vec_add(x, y); } +inline float32x4_t sub(float32x4_t x, float32x4_t y) { return vec_sub(x, y); } +inline float32x4_t mul(float32x4_t x, float32x4_t y) { return vec_mul(x, y); } +#endif + #if defined(__MMA__) typedef vector unsigned char vec_t; typedef __vector_quad acc_t; @@ -162,6 +168,13 @@ inline float16x8_t madd(float16x8_t a, float16x8_t b, float16x8_t c) { #endif #endif +#if defined(__VXE__) || defined(__VXE2__) +template <> +inline float32x4_t madd(float32x4_t a, float32x4_t b, float32x4_t c) { + return vec_madd(a, b, c); +} +#endif + //////////////////////////////////////////////////////////////////////////////////////////////////// // VECTORIZED HORIZONTAL SUM @@ -178,6 +191,13 @@ inline float hsum(float16x8_t x) { } #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#if defined(__VXE__) || defined(__VXE2__) +inline float hsum(float32x4_t x) { + float32x4_t tmp = x + vec_reve(x); + return tmp[0] + tmp[1]; +} +#endif + #if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) inline float hsum(__m128 x) { #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) @@ -227,6 +247,21 @@ template <> inline float32x4_t load(const ggml_fp16_t *p) { #endif // _MSC_VER #endif // __ARM_NEON +#if defined(__VXE__) || defined(__VXE2__) +template <> inline float32x4_t load(const ggml_fp16_t * p) { + float tmp[4]; + + for (int i = 0; i < 4; i++) { + tmp[i] = GGML_FP16_TO_FP32(p[i]); + } + + return vec_xl(0, (const float *)(tmp)); +} +template <> inline float32x4_t load(const float * p) { + return vec_xl(0, p); +} +#endif + #if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) template <> inline __m128 load(const float *p) { return _mm_loadu_ps(p); @@ -3319,6 +3354,14 @@ bool llamafile_sgemm(const struct ggml_compute_params * params, int64_t m, int64 (const float *)B, ldb, (float *)C, ldc}; return tb.matmul(m, n); +#elif defined(__VXE__) || defined(__VXE2__) + if (n < 4) + return false; + tinyBLAS<4, float32x4_t, float32x4_t, float, float, float> tb{ params, + k, (const float *)A, lda, + (const float *)B, ldb, + (float *)C, ldc}; + return tb.matmul(m, n); #elif defined(__MMA__) if (k % 8) return false; @@ -3410,6 +3453,16 @@ bool llamafile_sgemm(const struct ggml_compute_params * params, int64_t m, int64 (float *)C, ldc}; return tb.matmul(m, n); } +#elif defined(__VXE__) || defined(__VXE2__) + if (n < 4) + return false; + if (Btype == GGML_TYPE_F16) { + tinyBLAS<4, float32x4_t, float32x4_t, ggml_fp16_t, ggml_fp16_t, float> tb{ params, + k, (const ggml_fp16_t *)A, lda, + (const ggml_fp16_t *)B, ldb, + (float *)C, ldc}; + return tb.matmul(m, n); + } #endif return false; } diff --git a/ggml/src/ggml-cpu/llamafile/sgemm.h b/ggml/src/ggml-cpu/llamafile/sgemm.h index 3d2909515242a..729e8853d516c 100644 --- a/ggml/src/ggml-cpu/llamafile/sgemm.h +++ b/ggml/src/ggml-cpu/llamafile/sgemm.h @@ -1,6 +1,11 @@ #pragma once #include #include + +#if defined(__VXE__) || defined(__VXE2__) +#include +#endif + #ifdef __cplusplus extern "C" { #endif From 7577185638820d03dcfde2fab95c8a2ed77e6b5a Mon Sep 17 00:00:00 2001 From: pqnet <119850+pqnet@users.noreply.github.com> Date: Thu, 19 Jun 2025 12:21:40 +0200 Subject: [PATCH 099/192] convert : fix remote option in Windows (#14100) --- convert_hf_to_gguf.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index b754dd815a2dc..2e08db3457b60 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -6389,8 +6389,8 @@ def parse_args() -> argparse.Namespace: help="model is executed on big endian machine", ) parser.add_argument( - "model", type=Path, - help="directory containing model file", + "model", type=str, + help="directory containing model file or huggingface repository ID (if --remote)", nargs="?", ) parser.add_argument( @@ -6493,18 +6493,20 @@ def main() -> None: else: logging.basicConfig(level=logging.INFO) - dir_model = args.model - if args.remote: + hf_repo_id = args.model from huggingface_hub import snapshot_download local_dir = snapshot_download( - repo_id=str(dir_model), + repo_id=hf_repo_id, allow_patterns=["LICENSE", "*.json", "*.md", "*.txt", "tokenizer.model"]) dir_model = Path(local_dir) logger.info(f"Downloaded config and tokenizer to {local_dir}") + else: + hf_repo_id = None + dir_model = Path(args.model) if not dir_model.is_dir(): - logger.error(f'Error: {args.model} is not a directory') + logger.error(f'Error: {dir_model} is not a directory') sys.exit(1) ftype_map: dict[str, gguf.LlamaFileType] = { @@ -6524,9 +6526,9 @@ def main() -> None: if args.outfile is not None: fname_out = args.outfile - elif args.remote: + elif hf_repo_id: # if remote, use the model ID as the output file name - fname_out = Path("./" + str(args.model).replace("/", "-") + "-{ftype}.gguf") + fname_out = Path("./" + hf_repo_id.replace("/", "-") + "-{ftype}.gguf") else: fname_out = dir_model @@ -6555,7 +6557,7 @@ def main() -> None: split_max_tensors=args.split_max_tensors, split_max_size=split_str_to_n_bytes(args.split_max_size), dry_run=args.dry_run, small_first_shard=args.no_tensor_first_split, - remote_hf_model_id=str(args.model) if args.remote else None) + remote_hf_model_id=hf_repo_id) if args.vocab_only: logger.info("Exporting model vocab...") From e106cfe4efb09e4d63d641e9a7f35cad764ea977 Mon Sep 17 00:00:00 2001 From: bashayer hijji Date: Thu, 19 Jun 2025 13:24:12 +0300 Subject: [PATCH 100/192] llama-bench : add --no-warmup flag (#14224) (#14270) Add no_warmup parameter to cmd_params struct and command-line parsing to allow users to skip warmup runs before benchmarking. - Add no_warmup boolean field to cmd_params struct - Add --no-warmup command-line argument parsing - Add help text documentation for the new flag - Wrap existing warmup logic in conditional check - Maintain full backward compatibility (warmup enabled by default) Addresses #14224 --- tools/llama-bench/llama-bench.cpp | 44 ++++++++++++++++++------------- 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/tools/llama-bench/llama-bench.cpp b/tools/llama-bench/llama-bench.cpp index e59d61f195675..b80e984d0245b 100644 --- a/tools/llama-bench/llama-bench.cpp +++ b/tools/llama-bench/llama-bench.cpp @@ -267,6 +267,7 @@ struct cmd_params { int delay; bool verbose; bool progress; + bool no_warmup; output_formats output_format; output_formats output_format_stderr; }; @@ -303,6 +304,7 @@ static const cmd_params cmd_params_defaults = { /* delay */ 0, /* verbose */ false, /* progress */ false, + /* no_warmup */ false, /* output_format */ MARKDOWN, /* output_format_stderr */ NONE, }; @@ -325,6 +327,7 @@ static void print_usage(int /* argc */, char ** argv) { output_format_str(cmd_params_defaults.output_format_stderr)); printf(" -v, --verbose verbose output\n"); printf(" --progress print test progress indicators\n"); + printf(" --no-warmup skip warmup runs before benchmarking\n"); printf("\n"); printf("test parameters:\n"); printf(" -m, --model (default: %s)\n", join(cmd_params_defaults.model, ",").c_str()); @@ -425,6 +428,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { params.prio = cmd_params_defaults.prio; params.delay = cmd_params_defaults.delay; params.progress = cmd_params_defaults.progress; + params.no_warmup = cmd_params_defaults.no_warmup; for (int i = 1; i < argc; i++) { arg = argv[i]; @@ -798,6 +802,8 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { params.verbose = true; } else if (arg == "--progress") { params.progress = true; + } else if (arg == "--no-warmup") { + params.no_warmup = true; } else { invalid_param = true; break; @@ -1925,25 +1931,27 @@ int main(int argc, char ** argv) { llama_attach_threadpool(ctx, threadpool, NULL); // warmup run - if (t.n_prompt > 0) { - if (params.progress) { - fprintf(stderr, "llama-bench: benchmark %d/%zu: warmup prompt run\n", params_idx, params_count); - } - //test_prompt(ctx, std::min(t.n_batch, std::min(t.n_prompt, 32)), 0, t.n_batch, t.n_threads); - bool res = test_prompt(ctx, t.n_prompt, t.n_batch, t.n_threads); - if (!res) { - fprintf(stderr, "%s: error: failed to run prompt warmup\n", __func__); - exit(1); - } - } - if (t.n_gen > 0) { - if (params.progress) { - fprintf(stderr, "llama-bench: benchmark %d/%zu: warmup generation run\n", params_idx, params_count); + if (!params.no_warmup) { + if (t.n_prompt > 0) { + if (params.progress) { + fprintf(stderr, "llama-bench: benchmark %d/%zu: warmup prompt run\n", params_idx, params_count); + } + //test_prompt(ctx, std::min(t.n_batch, std::min(t.n_prompt, 32)), 0, t.n_batch, t.n_threads); + bool res = test_prompt(ctx, t.n_prompt, t.n_batch, t.n_threads); + if (!res) { + fprintf(stderr, "%s: error: failed to run prompt warmup\n", __func__); + exit(1); + } } - bool res = test_gen(ctx, 1, t.n_threads); - if (!res) { - fprintf(stderr, "%s: error: failed to run gen warmup\n", __func__); - exit(1); + if (t.n_gen > 0) { + if (params.progress) { + fprintf(stderr, "llama-bench: benchmark %d/%zu: warmup generation run\n", params_idx, params_count); + } + bool res = test_gen(ctx, 1, t.n_threads); + if (!res) { + fprintf(stderr, "%s: error: failed to run gen warmup\n", __func__); + exit(1); + } } } From ace9d1984e070d1faf0d6511e7f8f4b659976f34 Mon Sep 17 00:00:00 2001 From: Anton Mitkov Date: Thu, 19 Jun 2025 11:40:21 +0100 Subject: [PATCH 101/192] sycl: Cleanup codepaths in Get Rows in sycl backend (#14215) Addresses unused reorder path --- ggml/src/ggml-sycl/getrows.cpp | 98 +--------------------------------- 1 file changed, 2 insertions(+), 96 deletions(-) diff --git a/ggml/src/ggml-sycl/getrows.cpp b/ggml/src/ggml-sycl/getrows.cpp index 4a7712781364e..03f8dd907485e 100644 --- a/ggml/src/ggml-sycl/getrows.cpp +++ b/ggml/src/ggml-sycl/getrows.cpp @@ -60,54 +60,6 @@ static void k_get_rows( dst_row[iybs + iqs + y_offset] = v.y(); } -template -static void k_get_rows_reorder( - const void * src0, const void *src0_dq, const int32_t * src1, dst_t * dst, - int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/ - /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/ - /*size_t s0,*/ size_t s1, size_t s2, size_t s3, - /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03, - size_t s10, size_t s11, size_t s12, - const sycl::nd_item<3> &item_ct1/*, size_t s13*/) { - - const int i00 = (item_ct1.get_group(2) * item_ct1.get_local_range(2) + - item_ct1.get_local_id(2)) * - 2; - const int i10 = item_ct1.get_local_range(1) * item_ct1.get_group(1) + - item_ct1.get_local_id(1); - const int i11 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) + - item_ct1.get_local_id(0)) / - ne12; - const int i12 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) + - item_ct1.get_local_id(0)) % - ne12; - - if (i00 >= ne00) { - return; - } - auto ncols = ne00; - const int i01 = src1[i10*s10 + i11*s11 + i12*s12]; - - dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3; - - const int src0_off = i01 * ncols + i00; - const int ib = src0_off / QK4_0; // block index - const int iqs = (i00%qk)/qr; // x quant index - const int iybs = i00 - i00%qk; // dst block start index - const int y_offset = qr == 1 ? 1 : qk/2; - - // dequantize - dfloat2 v; - dequantize_kernel_recorder((const void *)src0_dq, ib, (const void *)src0, src0_off/2, v); - - dst_row[iybs + iqs + 0] = v.x(); - dst_row[iybs + iqs + y_offset] = v.y(); - - GGML_UNUSED(nb01); - GGML_UNUSED(nb02); - GGML_UNUSED(nb03); -} - template static void k_get_rows_float( const src0_t * src0, const int32_t * src1, dst_t * dst, @@ -177,47 +129,6 @@ static void get_rows_sycl(ggml_backend_sycl_context & ctx, const ggml_tensor *sr GGML_UNUSED(ctx); } -template -static void get_rows_sycl_reorder(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, - ggml_tensor *dst, const void *src0_dd, - const int32_t *src1_dd, float *dst_dd, - queue_ptr stream) { - - GGML_TENSOR_BINARY_OP_LOCALS - - const sycl::range<3> block_dims(1, 1, SYCL_GET_ROWS_BLOCK_SIZE); - const int block_num_x = (ne00 + 2*SYCL_GET_ROWS_BLOCK_SIZE - 1) / (2*SYCL_GET_ROWS_BLOCK_SIZE); - const sycl::range<3> block_nums(ne11 * ne12, ne10, block_num_x); - - // strides in elements - //const size_t s0 = nb0 / ggml_element_size(dst); - const size_t s1 = nb1 / ggml_element_size(dst); - const size_t s2 = nb2 / ggml_element_size(dst); - const size_t s3 = nb3 / ggml_element_size(dst); - - const size_t s10 = nb10 / ggml_element_size(src1); - const size_t s11 = nb11 / ggml_element_size(src1); - const size_t s12 = nb12 / ggml_element_size(src1); - //const size_t s13 = nb13 / ggml_element_size(src1); - - GGML_ASSERT(ne00 % 2 == 0); - - const uint8_t* src0_q = (const uint8_t*)src0_dd; - const size_t ncols = ne00; - const size_t nrows = ne01; - const sycl::half* src0_dq = (const sycl::half*)(src0_q + nrows * ncols / 2); - stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]]{ - k_get_rows_reorder( - src0_dd, src0_dq, src1_dd, dst_dd, ne00, ne12, s1, s2, - s3, nb01, nb02, nb03, s10, s11, s12, item_ct1); - }); - - GGML_UNUSED(dst); - GGML_UNUSED(ctx); -} - - template static void get_rows_sycl_float(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst, @@ -277,13 +188,8 @@ void ggml_sycl_op_get_rows(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { src1_i32, (float *)dst->data, ctx.stream()); break; case GGML_TYPE_Q4_0: - if (ctx.opt_feature.reorder && dst->op == GGML_OP_MUL_MAT) { - get_rows_sycl_reorder(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data, - src1_i32, (float *)dst->data, ctx.stream()); - } else { - get_rows_sycl(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data, - src1_i32, (float *)dst->data, ctx.stream()); - } + get_rows_sycl(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data, + src1_i32, (float *)dst->data, ctx.stream()); break; case GGML_TYPE_Q4_1: get_rows_sycl(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data, From bdd16d2769777765854fee944cffc4126a88d4a9 Mon Sep 17 00:00:00 2001 From: fanyang Date: Thu, 19 Jun 2025 20:49:48 +0800 Subject: [PATCH 102/192] build : suppress gcc15 compile warnings (#14261) * Change _contains_any() substrs to std::string_view and fix the find comparison logic. --- common/common.cpp | 6 ++++++ ggml/src/ggml-backend-reg.cpp | 5 +++++ src/llama-vocab.cpp | 4 ++-- src/unicode.cpp | 5 +++++ 4 files changed, 18 insertions(+), 2 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index eb80cee0894a6..c2c94e7ae6c08 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -706,11 +706,17 @@ bool fs_validate_filename(const std::string & filename) { // disable C++17 deprecation warning for std::codecvt_utf8 # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wdeprecated-declarations" +#elif defined(__GNUC__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wdeprecated-declarations" #endif + std::wstring_convert, char32_t> converter; #if defined(__clang__) # pragma clang diagnostic pop +#elif defined(__GNUC__) +# pragma GCC diagnostic pop #endif filename_utf32 = converter.from_bytes(filename); diff --git a/ggml/src/ggml-backend-reg.cpp b/ggml/src/ggml-backend-reg.cpp index 405d8e31514b5..2d93771fd1cc0 100644 --- a/ggml/src/ggml-backend-reg.cpp +++ b/ggml/src/ggml-backend-reg.cpp @@ -69,6 +69,9 @@ #if defined(__clang__) # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wdeprecated-declarations" +#elif defined(__GNUC__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wdeprecated-declarations" #endif namespace fs = std::filesystem; @@ -91,6 +94,8 @@ static std::string path_str(const fs::path & path) { #if defined(__clang__) # pragma clang diagnostic pop +#elif defined(__GNUC__) +# pragma GCC diagnostic pop #endif #ifdef _WIN32 diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index dd2251ef3cbef..d90f1d6b1ea63 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -2060,9 +2060,9 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { //NOTE: Per token attributes are missing from the GGUF file. //TODO: Extract attributes from GGUF file. { - auto _contains_any = [] (const std::string & str, const std::vector & substrs) -> bool { + auto _contains_any = [] (const std::string & str, const std::vector & substrs) -> bool { for (const auto & substr : substrs) { - if (str.find(substr) < std::string::npos) { + if (str.find(substr) != std::string::npos) { return true; } } diff --git a/src/unicode.cpp b/src/unicode.cpp index e63bb4ab085d6..43a4581b961fe 100644 --- a/src/unicode.cpp +++ b/src/unicode.cpp @@ -204,12 +204,17 @@ static inline std::wstring unicode_wstring_from_utf8(const std::string & s) { // disable C++17 deprecation warning for std::codecvt_utf8 # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wdeprecated-declarations" +#elif defined(__GNUC__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wdeprecated-declarations" #endif std::wstring_convert> conv; #if defined(__clang__) # pragma clang diagnostic pop +#elif defined(__GNUC__) +# pragma GCC diagnostic pop #endif return conv.from_bytes(s); From 28076dcf696eb26dfdcde67d86d83225c63a667c Mon Sep 17 00:00:00 2001 From: aa956 Date: Thu, 19 Jun 2025 16:01:03 +0300 Subject: [PATCH 103/192] server : add server parameters for draft model cache type (#13782) Co-authored-by: aa956 <27946957+aa956@users.noreply.github.com> --- common/arg.cpp | 26 ++++++++++++++++++++++++++ common/common.h | 3 +++ tools/server/README.md | 2 ++ tools/server/server.cpp | 6 ++---- 4 files changed, 33 insertions(+), 4 deletions(-) diff --git a/common/arg.cpp b/common/arg.cpp index 231de227a9122..3dfaa71eff188 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -3210,6 +3210,32 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.speculative.model.path = value; } ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_MODEL_DRAFT")); + add_opt(common_arg( + {"-ctkd", "--cache-type-k-draft"}, "TYPE", + string_format( + "KV cache data type for K for the draft model\n" + "allowed values: %s\n" + "(default: %s)", + get_all_kv_cache_types().c_str(), + ggml_type_name(params.speculative.cache_type_k) + ), + [](common_params & params, const std::string & value) { + params.speculative.cache_type_k = kv_cache_type_from_str(value); + } + ).set_env("LLAMA_ARG_CACHE_TYPE_K_DRAFT")); + add_opt(common_arg( + {"-ctvd", "--cache-type-v-draft"}, "TYPE", + string_format( + "KV cache data type for V for the draft model\n" + "allowed values: %s\n" + "(default: %s)", + get_all_kv_cache_types().c_str(), + ggml_type_name(params.speculative.cache_type_v) + ), + [](common_params & params, const std::string & value) { + params.speculative.cache_type_v = kv_cache_type_from_str(value); + } + ).set_env("LLAMA_ARG_CACHE_TYPE_V_DRAFT")); add_opt(common_arg( {"-mv", "--model-vocoder"}, "FNAME", diff --git a/common/common.h b/common/common.h index 00b6ca03a20b4..5710c4e9735fd 100644 --- a/common/common.h +++ b/common/common.h @@ -199,6 +199,9 @@ struct common_params_speculative { float p_split = 0.1f; // speculative decoding split probability float p_min = 0.75f; // minimum speculative decoding probability (greedy) + ggml_type cache_type_k = GGML_TYPE_F16; // KV cache data type for the K + ggml_type cache_type_v = GGML_TYPE_F16; // KV cache data type for the V + struct cpu_params cpuparams; struct cpu_params cpuparams_batch; diff --git a/tools/server/README.md b/tools/server/README.md index 06533c172e530..43aa65d50ce3f 100644 --- a/tools/server/README.md +++ b/tools/server/README.md @@ -187,6 +187,8 @@ The project is under active development, and we are [looking for feedback and co | `-devd, --device-draft ` | comma-separated list of devices to use for offloading the draft model (none = don't offload)
use --list-devices to see a list of available devices | | `-ngld, --gpu-layers-draft, --n-gpu-layers-draft N` | number of layers to store in VRAM for the draft model
(env: LLAMA_ARG_N_GPU_LAYERS_DRAFT) | | `-md, --model-draft FNAME` | draft model for speculative decoding (default: unused)
(env: LLAMA_ARG_MODEL_DRAFT) | +| `-ctkd, --cache-type-k-draft TYPE` | KV cache data type for K for speculative decoding model
allowed values: f32, f16, bf16, q8_0, q4_0, q4_1, iq4_nl, q5_0, q5_1
(default: f16)
(env: LLAMA_ARG_CACHE_TYPE_K_DRAFT) | +| `-ctvd, --cache-type-v-draft TYPE` | KV cache data type for V for speculative decoding model
allowed values: f32, f16, bf16, q8_0, q4_0, q4_1, iq4_nl, q5_0, q5_1
(default: f16)
(env: LLAMA_ARG_CACHE_TYPE_V_DRAFT) | | `-mv, --model-vocoder FNAME` | vocoder model for audio generation (default: unused) | | `--tts-use-guide-tokens` | Use guide tokens to improve TTS word recall | | `--embd-bge-small-en-default` | use default bge-small-en-v1.5 model (note: can download weights from the internet) | diff --git a/tools/server/server.cpp b/tools/server/server.cpp index 721d09182845d..9d55b3338bcfe 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -1969,10 +1969,8 @@ struct server_context { params_dft.n_ctx = params_base.speculative.n_ctx == 0 ? params_base.n_ctx / params_base.n_parallel : params_base.speculative.n_ctx; params_dft.n_gpu_layers = params_base.speculative.n_gpu_layers; params_dft.n_parallel = 1; - - // force F16 KV cache for the draft model for extra performance - params_dft.cache_type_k = GGML_TYPE_F16; - params_dft.cache_type_v = GGML_TYPE_F16; + params_dft.cache_type_k = params_base.speculative.cache_type_k; + params_dft.cache_type_v = params_base.speculative.cache_type_v; llama_init_dft = common_init_from_params(params_dft); From d54b24f32635af7775735b2dae73340040085ed7 Mon Sep 17 00:00:00 2001 From: Alex Trotta <44127594+Ahajha@users.noreply.github.com> Date: Thu, 19 Jun 2025 09:56:12 -0400 Subject: [PATCH 104/192] gguf-py : make sentencepiece optional (#14200) * Make sentencepiece optional * Bump to 0.18.0 * Bump patch instead of minor Co-authored-by: compilade --------- Co-authored-by: compilade --- gguf-py/gguf/vocab.py | 8 +++++++- gguf-py/pyproject.toml | 4 ++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/gguf-py/gguf/vocab.py b/gguf-py/gguf/vocab.py index cca0979862a71..44d066ee75a7e 100644 --- a/gguf-py/gguf/vocab.py +++ b/gguf-py/gguf/vocab.py @@ -7,7 +7,10 @@ from pathlib import Path from typing import Any, Callable, Sequence, Mapping, Iterable, Protocol, ClassVar, runtime_checkable -from sentencepiece import SentencePieceProcessor +try: + from sentencepiece import SentencePieceProcessor +except ImportError: + SentencePieceProcessor = None import gguf @@ -302,6 +305,9 @@ class SentencePieceVocab(Vocab): name = "spm" def __init__(self, base_path: Path): + if SentencePieceProcessor is None: + raise RuntimeError("sentencepiece is not installed") + added_tokens: dict[str, int] = {} if (fname_tokenizer := base_path / 'tokenizer.model').exists(): # normal location diff --git a/gguf-py/pyproject.toml b/gguf-py/pyproject.toml index f11351cba1767..0f3a1eeee8304 100644 --- a/gguf-py/pyproject.toml +++ b/gguf-py/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "gguf" -version = "0.17.0" +version = "0.17.1" description = "Read and write ML models in GGUF for GGML" authors = ["GGML "] packages = [ @@ -22,7 +22,7 @@ python = ">=3.8" numpy = ">=1.17" tqdm = ">=4.27" pyyaml = ">=5.1" -sentencepiece = ">=0.1.98,<=0.2.0" +sentencepiece = { version = ">=0.1.98,<=0.2.0", optional = true } PySide6 = { version = "^6.9", python = ">=3.9,<3.14", optional = true } [tool.poetry.dev-dependencies] From ab2de47a4b4ab34efb07a2d02adb0b6957c987a8 Mon Sep 17 00:00:00 2001 From: Diego Devesa Date: Thu, 19 Jun 2025 12:24:14 -0700 Subject: [PATCH 105/192] ggml-cpu : remove unnecesary arm feature detection (#14281) Support for Arm runtime feature detection has now been added to GGML_CPU_ALL_VARIANTS. This removes the old and not very functional code. --- ggml/src/ggml-cpu/arch/arm/repack.cpp | 2040 ++++++++++++------------- ggml/src/ggml-cpu/ggml-cpu.c | 95 +- 2 files changed, 1023 insertions(+), 1112 deletions(-) diff --git a/ggml/src/ggml-cpu/arch/arm/repack.cpp b/ggml/src/ggml-cpu/arch/arm/repack.cpp index 9337e01b62390..39a0dd301db08 100644 --- a/ggml/src/ggml-cpu/arch/arm/repack.cpp +++ b/ggml/src/ggml-cpu/arch/arm/repack.cpp @@ -256,45 +256,43 @@ void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo UNUSED(blocklen); #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx; - - for (int c = 0; c < nc; c += ncols_interleaved) { - const block_q8_0 * a_ptr = (const block_q8_0 *) vy; - float32x4_t acc = vdupq_n_f32(0); - for (int b = 0; b < nb; b++) { - int8x16_t b0 = vld1q_s8((const int8_t *) b_ptr->qs); - int8x16_t b1 = vld1q_s8((const int8_t *) b_ptr->qs + 16); - int8x16_t b2 = vld1q_s8((const int8_t *) b_ptr->qs + 32); - int8x16_t b3 = vld1q_s8((const int8_t *) b_ptr->qs + 48); - float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d); - - int8x16_t a0 = vld1q_s8(a_ptr->qs); - int8x16_t a1 = vld1q_s8(a_ptr->qs + qk/2); - float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d); - - int32x4_t ret = vdupq_n_s32(0); - - ret = vdotq_laneq_s32(ret, b0 << 4, a0, 0); - ret = vdotq_laneq_s32(ret, b1 << 4, a0, 1); - ret = vdotq_laneq_s32(ret, b2 << 4, a0, 2); - ret = vdotq_laneq_s32(ret, b3 << 4, a0, 3); - - ret = vdotq_laneq_s32(ret, b0 & 0xf0U, a1, 0); - ret = vdotq_laneq_s32(ret, b1 & 0xf0U, a1, 1); - ret = vdotq_laneq_s32(ret, b2 & 0xf0U, a1, 2); - ret = vdotq_laneq_s32(ret, b3 & 0xf0U, a1, 3); - - acc = vfmaq_f32(acc, vcvtq_n_f32_s32(ret, 4), - vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd))); - a_ptr++; - b_ptr++; - } - vst1q_f32(s, acc); - s += ncols_interleaved; + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx; + + for (int c = 0; c < nc; c += ncols_interleaved) { + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + float32x4_t acc = vdupq_n_f32(0); + for (int b = 0; b < nb; b++) { + int8x16_t b0 = vld1q_s8((const int8_t *) b_ptr->qs); + int8x16_t b1 = vld1q_s8((const int8_t *) b_ptr->qs + 16); + int8x16_t b2 = vld1q_s8((const int8_t *) b_ptr->qs + 32); + int8x16_t b3 = vld1q_s8((const int8_t *) b_ptr->qs + 48); + float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d); + + int8x16_t a0 = vld1q_s8(a_ptr->qs); + int8x16_t a1 = vld1q_s8(a_ptr->qs + qk/2); + float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d); + + int32x4_t ret = vdupq_n_s32(0); + + ret = vdotq_laneq_s32(ret, b0 << 4, a0, 0); + ret = vdotq_laneq_s32(ret, b1 << 4, a0, 1); + ret = vdotq_laneq_s32(ret, b2 << 4, a0, 2); + ret = vdotq_laneq_s32(ret, b3 << 4, a0, 3); + + ret = vdotq_laneq_s32(ret, b0 & 0xf0U, a1, 0); + ret = vdotq_laneq_s32(ret, b1 & 0xf0U, a1, 1); + ret = vdotq_laneq_s32(ret, b2 & 0xf0U, a1, 2); + ret = vdotq_laneq_s32(ret, b3 & 0xf0U, a1, 3); + + acc = vfmaq_f32(acc, vcvtq_n_f32_s32(ret, 4), + vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd))); + a_ptr++; + b_ptr++; } - return; + vst1q_f32(s, acc); + s += ncols_interleaved; } + return; #endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) float sumf[4]; int sumi; @@ -341,50 +339,48 @@ void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo UNUSED(blocklen); #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx; - - for (int c = 0; c < nc; c += ncols_interleaved) { - const block_q8_0 * a_ptr = (const block_q8_0 *) vy; - float32x4_t acc = vdupq_n_f32(0); - for (int b = 0; b < nb; b++) { - int8x16_t b0 = vld1q_s8((const int8_t *) b_ptr->qs); - int8x16_t b1 = vld1q_s8((const int8_t *) b_ptr->qs + 16); - int8x16_t b2 = vld1q_s8((const int8_t *) b_ptr->qs + 32); - int8x16_t b3 = vld1q_s8((const int8_t *) b_ptr->qs + 48); - float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d); - - int8x16_t a0 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs); - int8x16_t a1 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 1); - int8x16_t a2 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 2); - int8x16_t a3 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 3); - float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d); - - int32x4_t ret0 = vdupq_n_s32(0); - int32x4_t ret1 = vdupq_n_s32(0); - - ret0 = vdotq_s32(ret0, b0 << 4, a0); - ret1 = vdotq_s32(ret1, b1 << 4, a0); - ret0 = vdotq_s32(ret0, b2 << 4, a1); - ret1 = vdotq_s32(ret1, b3 << 4, a1); - - ret0 = vdotq_s32(ret0, b0 & 0xf0U, a2); - ret1 = vdotq_s32(ret1, b1 & 0xf0U, a2); - ret0 = vdotq_s32(ret0, b2 & 0xf0U, a3); - ret1 = vdotq_s32(ret1, b3 & 0xf0U, a3); - - int32x4_t ret = vpaddq_s32(ret0, ret1); - - acc = vfmaq_f32(acc, vcvtq_n_f32_s32(ret, 4), - vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd))); - a_ptr++; - b_ptr++; - } - vst1q_f32(s, acc); - s += ncols_interleaved; + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx; + + for (int c = 0; c < nc; c += ncols_interleaved) { + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + float32x4_t acc = vdupq_n_f32(0); + for (int b = 0; b < nb; b++) { + int8x16_t b0 = vld1q_s8((const int8_t *) b_ptr->qs); + int8x16_t b1 = vld1q_s8((const int8_t *) b_ptr->qs + 16); + int8x16_t b2 = vld1q_s8((const int8_t *) b_ptr->qs + 32); + int8x16_t b3 = vld1q_s8((const int8_t *) b_ptr->qs + 48); + float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d); + + int8x16_t a0 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs); + int8x16_t a1 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 1); + int8x16_t a2 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 2); + int8x16_t a3 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 3); + float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d); + + int32x4_t ret0 = vdupq_n_s32(0); + int32x4_t ret1 = vdupq_n_s32(0); + + ret0 = vdotq_s32(ret0, b0 << 4, a0); + ret1 = vdotq_s32(ret1, b1 << 4, a0); + ret0 = vdotq_s32(ret0, b2 << 4, a1); + ret1 = vdotq_s32(ret1, b3 << 4, a1); + + ret0 = vdotq_s32(ret0, b0 & 0xf0U, a2); + ret1 = vdotq_s32(ret1, b1 & 0xf0U, a2); + ret0 = vdotq_s32(ret0, b2 & 0xf0U, a3); + ret1 = vdotq_s32(ret1, b3 & 0xf0U, a3); + + int32x4_t ret = vpaddq_s32(ret0, ret1); + + acc = vfmaq_f32(acc, vcvtq_n_f32_s32(ret, 4), + vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd))); + a_ptr++; + b_ptr++; } - return; + vst1q_f32(s, acc); + s += ncols_interleaved; } + return; #endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) float sumf[4]; int sumi; @@ -432,7 +428,7 @@ void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) #if defined(__ARM_FEATURE_SVE) - if (ggml_cpu_has_sve() && ggml_cpu_get_sve_cnt() == QK8_0) { + if (ggml_cpu_get_sve_cnt() == QK8_0) { const void * b_ptr = vx; const void * a_ptr = vy; float * res_ptr = s; @@ -547,54 +543,52 @@ void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const UNUSED(blocklen); #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl); - const block_q8_0 * a_ptr = (const block_q8_0 *) vy; - float * res_ptr = s; - - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); - - float32x4_t sumf = vdupq_n_f32(0); - for (int l = 0; l < nb; l++) { - uint8x16_t b_0 = vld1q_u8(b_ptr[l].qs + 0); - uint8x16_t b_1 = vld1q_u8(b_ptr[l].qs + 16); - uint8x16_t b_2 = vld1q_u8(b_ptr[l].qs + 32); - uint8x16_t b_3 = vld1q_u8(b_ptr[l].qs + 48); - - int8x16_t b_0_hi = vqtbl1q_s8(kvalues, b_0 >> 4); - int8x16_t b_0_lo = vqtbl1q_s8(kvalues, b_0 & 0x0F); - int8x16_t b_1_hi = vqtbl1q_s8(kvalues, b_1 >> 4); - int8x16_t b_1_lo = vqtbl1q_s8(kvalues, b_1 & 0x0F); - int8x16_t b_2_hi = vqtbl1q_s8(kvalues, b_2 >> 4); - int8x16_t b_2_lo = vqtbl1q_s8(kvalues, b_2 & 0x0F); - int8x16_t b_3_hi = vqtbl1q_s8(kvalues, b_3 >> 4); - int8x16_t b_3_lo = vqtbl1q_s8(kvalues, b_3 & 0x0F); - - int8x16_t a_0 = vld1q_s8(a_ptr[l].qs + 0); - int8x16_t a_1 = vld1q_s8(a_ptr[l].qs + 16); - - int32x4_t sumi = vdupq_n_s32(0); - sumi = vdotq_laneq_s32(sumi, b_0_lo, a_0, 0); - sumi = vdotq_laneq_s32(sumi, b_0_hi, a_1, 0); - sumi = vdotq_laneq_s32(sumi, b_1_lo, a_0, 1); - sumi = vdotq_laneq_s32(sumi, b_1_hi, a_1, 1); - sumi = vdotq_laneq_s32(sumi, b_2_lo, a_0, 2); - sumi = vdotq_laneq_s32(sumi, b_2_hi, a_1, 2); - sumi = vdotq_laneq_s32(sumi, b_3_lo, a_0, 3); - sumi = vdotq_laneq_s32(sumi, b_3_hi, a_1, 3); - - float32x4_t a_d = vcvt_f32_f16(vld1_dup_f16((const float16_t *)&a_ptr[l].d)); - float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *)b_ptr[l].d)); - float32x4_t d = a_d * b_d; + const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl); + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + float * res_ptr = s; - sumf = vmlaq_f32(sumf, d, vcvtq_f32_s32(sumi)); - } + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); - vst1q_f32(res_ptr + x * 4, sumf); + float32x4_t sumf = vdupq_n_f32(0); + for (int l = 0; l < nb; l++) { + uint8x16_t b_0 = vld1q_u8(b_ptr[l].qs + 0); + uint8x16_t b_1 = vld1q_u8(b_ptr[l].qs + 16); + uint8x16_t b_2 = vld1q_u8(b_ptr[l].qs + 32); + uint8x16_t b_3 = vld1q_u8(b_ptr[l].qs + 48); + + int8x16_t b_0_hi = vqtbl1q_s8(kvalues, b_0 >> 4); + int8x16_t b_0_lo = vqtbl1q_s8(kvalues, b_0 & 0x0F); + int8x16_t b_1_hi = vqtbl1q_s8(kvalues, b_1 >> 4); + int8x16_t b_1_lo = vqtbl1q_s8(kvalues, b_1 & 0x0F); + int8x16_t b_2_hi = vqtbl1q_s8(kvalues, b_2 >> 4); + int8x16_t b_2_lo = vqtbl1q_s8(kvalues, b_2 & 0x0F); + int8x16_t b_3_hi = vqtbl1q_s8(kvalues, b_3 >> 4); + int8x16_t b_3_lo = vqtbl1q_s8(kvalues, b_3 & 0x0F); + + int8x16_t a_0 = vld1q_s8(a_ptr[l].qs + 0); + int8x16_t a_1 = vld1q_s8(a_ptr[l].qs + 16); + + int32x4_t sumi = vdupq_n_s32(0); + sumi = vdotq_laneq_s32(sumi, b_0_lo, a_0, 0); + sumi = vdotq_laneq_s32(sumi, b_0_hi, a_1, 0); + sumi = vdotq_laneq_s32(sumi, b_1_lo, a_0, 1); + sumi = vdotq_laneq_s32(sumi, b_1_hi, a_1, 1); + sumi = vdotq_laneq_s32(sumi, b_2_lo, a_0, 2); + sumi = vdotq_laneq_s32(sumi, b_2_hi, a_1, 2); + sumi = vdotq_laneq_s32(sumi, b_3_lo, a_0, 3); + sumi = vdotq_laneq_s32(sumi, b_3_hi, a_1, 3); + + float32x4_t a_d = vcvt_f32_f16(vld1_dup_f16((const float16_t *)&a_ptr[l].d)); + float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *)b_ptr[l].d)); + float32x4_t d = a_d * b_d; + + sumf = vmlaq_f32(sumf, d, vcvtq_f32_s32(sumi)); } - return; + + vst1q_f32(res_ptr + x * 4, sumf); } + return; #endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) { float sumf[4]; @@ -643,465 +637,463 @@ void ggml_gemm_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo UNUSED(ncols_interleaved); UNUSED(blocklen); -#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - const void * b_ptr = vx; - const void * a_ptr = vy; - float * res_ptr = s; - size_t res_stride = bs * sizeof(float); - - __asm__ __volatile__( - "mov x10, %x[nr]\n" - "mov x9, #0x88\n" - "cmp x10, #0x10\n" - "mul x9, %x[nb], x9\n" - "blt 4f\n" - "1:" // Row loop - "add x28, %x[b_ptr], #0x8\n" - "mov x27, %x[nc]\n" - "add x26, %x[res_ptr], %x[res_stride], LSL #4\n" - "2:" // Column loop - "add x25, %x[a_ptr], #0x8\n" - "movi v15.16b, #0x0\n" - "movi v19.16b, #0x0\n" - "mov x24, %x[nb]\n" - "add x23, x25, x9\n" - "movi v18.16b, #0x0\n" - "movi v14.16b, #0x0\n" - "add x22, x23, x9\n" - "movi v11.16b, #0x0\n" - "movi v13.16b, #0x0\n" - "add x21, x22, x9\n" - "movi v23.16b, #0x0\n" - "movi v16.16b, #0x0\n" - "movi v25.16b, #0x0\n" - "movi v7.16b, #0x0\n" - "movi v0.16b, #0x0\n" - "movi v4.16b, #0x0\n" - "movi v5.16b, #0x0\n" - "movi v21.16b, #0x0\n" - "movi v8.16b, #0x0\n" - "movi v1.16b, #0x0\n" - "3:" // Block loop - "ldr q3, [x28, #0x0]\n" - "ldr q31, [x25, #0x0]\n" - "movi v28.16b, #0x4\n" - "movi v10.4s, #0x0\n" - "ldr q22, [x28, #0x10]\n" - "ldr q6, [x25, #0x10]\n" - "movi v29.4s, #0x0\n" - "movi v9.4s, #0x0\n" - "ldr q27, [x28, #0x20]\n" - "ldr q30, [x28, #0x30]\n" - "movi v20.4s, #0x0\n" - "movi v24.16b, #0xf0\n" - "ldr d2, [x25, #-0x8]\n" - "ldr d26, [x23, #-0x8]\n" - "sshl v12.16b, v3.16b, v28.16b\n" - "sub x20, x28, #0x8\n" - "ldr d17, [x20, #0x0]\n" - "and v3.16b, v3.16b, v24.16b\n" - "subs x24, x24, #0x1\n" - "add x28, x28, #0x48\n" - ".inst 0x4f9fe18a // sdot v10.4s, v12.16b, v31.4b[0]\n" - ".inst 0x4fbfe19d // sdot v29.4s, v12.16b, v31.4b[1]\n" - ".inst 0x4f9fe989 // sdot v9.4s, v12.16b, v31.4b[2]\n" - ".inst 0x4fbfe994 // sdot v20.4s, v12.16b, v31.4b[3]\n" - "sshl v31.16b, v22.16b, v28.16b\n" - "and v22.16b, v22.16b, v24.16b\n" - "fcvtl v17.4s, v17.4h\n" - "fcvtl v2.4s, v2.4h\n" - "fcvtl v26.4s, v26.4h\n" - ".inst 0x4f86e3ea // sdot v10.4s, v31.16b, v6.4b[0]\n" - ".inst 0x4fa6e3fd // sdot v29.4s, v31.16b, v6.4b[1]\n" - ".inst 0x4f86ebe9 // sdot v9.4s, v31.16b, v6.4b[2]\n" - ".inst 0x4fa6ebf4 // sdot v20.4s, v31.16b, v6.4b[3]\n" - "sshl v6.16b, v27.16b, v28.16b\n" - "sshl v28.16b, v30.16b, v28.16b\n" - "and v27.16b, v27.16b, v24.16b\n" - "and v30.16b, v30.16b, v24.16b\n" - "ldr q24, [x25, #0x20]\n" - ".inst 0x4f98e0ca // sdot v10.4s, v6.16b, v24.4b[0]\n" - ".inst 0x4fb8e0dd // sdot v29.4s, v6.16b, v24.4b[1]\n" - ".inst 0x4f98e8c9 // sdot v9.4s, v6.16b, v24.4b[2]\n" - ".inst 0x4fb8e8d4 // sdot v20.4s, v6.16b, v24.4b[3]\n" - "ldr q24, [x25, #0x30]\n" - ".inst 0x4f98e38a // sdot v10.4s, v28.16b, v24.4b[0]\n" - ".inst 0x4fb8e39d // sdot v29.4s, v28.16b, v24.4b[1]\n" - ".inst 0x4f98eb89 // sdot v9.4s, v28.16b, v24.4b[2]\n" - ".inst 0x4fb8eb94 // sdot v20.4s, v28.16b, v24.4b[3]\n" - "ldr q24, [x25, #0x40]\n" - ".inst 0x4f98e06a // sdot v10.4s, v3.16b, v24.4b[0]\n" - ".inst 0x4fb8e07d // sdot v29.4s, v3.16b, v24.4b[1]\n" - ".inst 0x4f98e869 // sdot v9.4s, v3.16b, v24.4b[2]\n" - ".inst 0x4fb8e874 // sdot v20.4s, v3.16b, v24.4b[3]\n" - "ldr q24, [x25, #0x50]\n" - ".inst 0x4f98e2ca // sdot v10.4s, v22.16b, v24.4b[0]\n" - ".inst 0x4fb8e2dd // sdot v29.4s, v22.16b, v24.4b[1]\n" - ".inst 0x4f98eac9 // sdot v9.4s, v22.16b, v24.4b[2]\n" - ".inst 0x4fb8ead4 // sdot v20.4s, v22.16b, v24.4b[3]\n" - "ldr q24, [x25, #0x60]\n" - ".inst 0x4f98e36a // sdot v10.4s, v27.16b, v24.4b[0]\n" - ".inst 0x4fb8e37d // sdot v29.4s, v27.16b, v24.4b[1]\n" - ".inst 0x4f98eb69 // sdot v9.4s, v27.16b, v24.4b[2]\n" - ".inst 0x4fb8eb74 // sdot v20.4s, v27.16b, v24.4b[3]\n" - "ldr q24, [x25, #0x70]\n" - "add x25, x25, #0x88\n" - ".inst 0x4f98e3ca // sdot v10.4s, v30.16b, v24.4b[0]\n" - ".inst 0x4fb8e3dd // sdot v29.4s, v30.16b, v24.4b[1]\n" - ".inst 0x4f98ebc9 // sdot v9.4s, v30.16b, v24.4b[2]\n" - ".inst 0x4fb8ebd4 // sdot v20.4s, v30.16b, v24.4b[3]\n" - "fmul v24.4s, v17.4s, v2.s[0]\n" - "scvtf v10.4s, v10.4s, #0x4\n" - "scvtf v29.4s, v29.4s, #0x4\n" - "scvtf v9.4s, v9.4s, #0x4\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "fmla v15.4s, v10.4s, v24.4s\n" - "ldr q24, [x23, #0x0]\n" - "fmul v10.4s, v17.4s, v2.s[1]\n" - "fmla v19.4s, v29.4s, v10.4s\n" - "ldr q10, [x23, #0x10]\n" - "fmul v29.4s, v17.4s, v2.s[2]\n" - "fmul v2.4s, v17.4s, v2.s[3]\n" - "fmla v18.4s, v9.4s, v29.4s\n" - "movi v9.4s, #0x0\n" - "movi v29.4s, #0x0\n" - ".inst 0x4f98e189 // sdot v9.4s, v12.16b, v24.4b[0]\n" - ".inst 0x4fb8e19d // sdot v29.4s, v12.16b, v24.4b[1]\n" - "fmla v14.4s, v20.4s, v2.4s\n" - "movi v20.4s, #0x0\n" - "movi v2.4s, #0x0\n" - ".inst 0x4f98e994 // sdot v20.4s, v12.16b, v24.4b[2]\n" - ".inst 0x4fb8e982 // sdot v2.4s, v12.16b, v24.4b[3]\n" - "ldr q24, [x23, #0x20]\n" - ".inst 0x4f8ae3e9 // sdot v9.4s, v31.16b, v10.4b[0]\n" - ".inst 0x4faae3fd // sdot v29.4s, v31.16b, v10.4b[1]\n" - ".inst 0x4f8aebf4 // sdot v20.4s, v31.16b, v10.4b[2]\n" - ".inst 0x4faaebe2 // sdot v2.4s, v31.16b, v10.4b[3]\n" - "ldr q10, [x23, #0x30]\n" - ".inst 0x4f98e0c9 // sdot v9.4s, v6.16b, v24.4b[0]\n" - ".inst 0x4fb8e0dd // sdot v29.4s, v6.16b, v24.4b[1]\n" - ".inst 0x4f98e8d4 // sdot v20.4s, v6.16b, v24.4b[2]\n" - ".inst 0x4fb8e8c2 // sdot v2.4s, v6.16b, v24.4b[3]\n" - "ldr q24, [x23, #0x40]\n" - ".inst 0x4f8ae389 // sdot v9.4s, v28.16b, v10.4b[0]\n" - ".inst 0x4faae39d // sdot v29.4s, v28.16b, v10.4b[1]\n" - ".inst 0x4f8aeb94 // sdot v20.4s, v28.16b, v10.4b[2]\n" - ".inst 0x4faaeb82 // sdot v2.4s, v28.16b, v10.4b[3]\n" - "ldr q10, [x23, #0x50]\n" - ".inst 0x4f98e069 // sdot v9.4s, v3.16b, v24.4b[0]\n" - ".inst 0x4fb8e07d // sdot v29.4s, v3.16b, v24.4b[1]\n" - ".inst 0x4f98e874 // sdot v20.4s, v3.16b, v24.4b[2]\n" - ".inst 0x4fb8e862 // sdot v2.4s, v3.16b, v24.4b[3]\n" - "ldr q24, [x23, #0x60]\n" - ".inst 0x4f8ae2c9 // sdot v9.4s, v22.16b, v10.4b[0]\n" - ".inst 0x4faae2dd // sdot v29.4s, v22.16b, v10.4b[1]\n" - ".inst 0x4f8aead4 // sdot v20.4s, v22.16b, v10.4b[2]\n" - ".inst 0x4faaeac2 // sdot v2.4s, v22.16b, v10.4b[3]\n" - "ldr q10, [x23, #0x70]\n" - "add x23, x23, #0x88\n" - ".inst 0x4f98e369 // sdot v9.4s, v27.16b, v24.4b[0]\n" - ".inst 0x4fb8e37d // sdot v29.4s, v27.16b, v24.4b[1]\n" - ".inst 0x4f98eb74 // sdot v20.4s, v27.16b, v24.4b[2]\n" - ".inst 0x4fb8eb62 // sdot v2.4s, v27.16b, v24.4b[3]\n" - "ldr q24, [x22, #0x0]\n" - ".inst 0x4f8ae3c9 // sdot v9.4s, v30.16b, v10.4b[0]\n" - ".inst 0x4faae3dd // sdot v29.4s, v30.16b, v10.4b[1]\n" - ".inst 0x4f8aebd4 // sdot v20.4s, v30.16b, v10.4b[2]\n" - ".inst 0x4faaebc2 // sdot v2.4s, v30.16b, v10.4b[3]\n" - "fmul v10.4s, v17.4s, v26.s[0]\n" - "scvtf v9.4s, v9.4s, #0x4\n" - "scvtf v29.4s, v29.4s, #0x4\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "scvtf v2.4s, v2.4s, #0x4\n" - "fmla v11.4s, v9.4s, v10.4s\n" - "ldr q9, [x22, #0x10]\n" - "fmul v10.4s, v17.4s, v26.s[1]\n" - "fmla v13.4s, v29.4s, v10.4s\n" - "ldr d29, [x22, #-0x8]\n" - "fmul v10.4s, v17.4s, v26.s[2]\n" - "fmul v26.4s, v17.4s, v26.s[3]\n" - "fcvtl v29.4s, v29.4h\n" - "fmla v23.4s, v20.4s, v10.4s\n" - "movi v20.4s, #0x0\n" - "movi v10.4s, #0x0\n" - "fmla v16.4s, v2.4s, v26.4s\n" - "movi v26.4s, #0x0\n" - "movi v2.4s, #0x0\n" - ".inst 0x4f98e194 // sdot v20.4s, v12.16b, v24.4b[0]\n" - ".inst 0x4fb8e18a // sdot v10.4s, v12.16b, v24.4b[1]\n" - ".inst 0x4f98e99a // sdot v26.4s, v12.16b, v24.4b[2]\n" - ".inst 0x4fb8e982 // sdot v2.4s, v12.16b, v24.4b[3]\n" - "ldr q24, [x22, #0x20]\n" - ".inst 0x4f89e3f4 // sdot v20.4s, v31.16b, v9.4b[0]\n" - ".inst 0x4fa9e3ea // sdot v10.4s, v31.16b, v9.4b[1]\n" - ".inst 0x4f89ebfa // sdot v26.4s, v31.16b, v9.4b[2]\n" - ".inst 0x4fa9ebe2 // sdot v2.4s, v31.16b, v9.4b[3]\n" - "ldr q9, [x22, #0x30]\n" - ".inst 0x4f98e0d4 // sdot v20.4s, v6.16b, v24.4b[0]\n" - ".inst 0x4fb8e0ca // sdot v10.4s, v6.16b, v24.4b[1]\n" - ".inst 0x4f98e8da // sdot v26.4s, v6.16b, v24.4b[2]\n" - ".inst 0x4fb8e8c2 // sdot v2.4s, v6.16b, v24.4b[3]\n" - "ldr q24, [x22, #0x40]\n" - ".inst 0x4f89e394 // sdot v20.4s, v28.16b, v9.4b[0]\n" - ".inst 0x4fa9e38a // sdot v10.4s, v28.16b, v9.4b[1]\n" - ".inst 0x4f89eb9a // sdot v26.4s, v28.16b, v9.4b[2]\n" - ".inst 0x4fa9eb82 // sdot v2.4s, v28.16b, v9.4b[3]\n" - "ldr q9, [x22, #0x50]\n" - ".inst 0x4f98e074 // sdot v20.4s, v3.16b, v24.4b[0]\n" - ".inst 0x4fb8e06a // sdot v10.4s, v3.16b, v24.4b[1]\n" - ".inst 0x4f98e87a // sdot v26.4s, v3.16b, v24.4b[2]\n" - ".inst 0x4fb8e862 // sdot v2.4s, v3.16b, v24.4b[3]\n" - "ldr q24, [x22, #0x60]\n" - ".inst 0x4f89e2d4 // sdot v20.4s, v22.16b, v9.4b[0]\n" - ".inst 0x4fa9e2ca // sdot v10.4s, v22.16b, v9.4b[1]\n" - ".inst 0x4f89eada // sdot v26.4s, v22.16b, v9.4b[2]\n" - ".inst 0x4fa9eac2 // sdot v2.4s, v22.16b, v9.4b[3]\n" - "ldr q9, [x22, #0x70]\n" - "add x22, x22, #0x88\n" - ".inst 0x4f98e374 // sdot v20.4s, v27.16b, v24.4b[0]\n" - ".inst 0x4fb8e36a // sdot v10.4s, v27.16b, v24.4b[1]\n" - ".inst 0x4f98eb7a // sdot v26.4s, v27.16b, v24.4b[2]\n" - ".inst 0x4fb8eb62 // sdot v2.4s, v27.16b, v24.4b[3]\n" - "ldr q24, [x21, #0x0]\n" - ".inst 0x4f89e3d4 // sdot v20.4s, v30.16b, v9.4b[0]\n" - ".inst 0x4fa9e3ca // sdot v10.4s, v30.16b, v9.4b[1]\n" - ".inst 0x4f89ebda // sdot v26.4s, v30.16b, v9.4b[2]\n" - ".inst 0x4fa9ebc2 // sdot v2.4s, v30.16b, v9.4b[3]\n" - "fmul v9.4s, v17.4s, v29.s[0]\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "scvtf v10.4s, v10.4s, #0x4\n" - "scvtf v26.4s, v26.4s, #0x4\n" - "scvtf v2.4s, v2.4s, #0x4\n" - "fmla v25.4s, v20.4s, v9.4s\n" - "ldr q9, [x21, #0x10]\n" - "fmul v20.4s, v17.4s, v29.s[1]\n" - "fmla v7.4s, v10.4s, v20.4s\n" - "ldr d20, [x21, #-0x8]\n" - "fmul v10.4s, v17.4s, v29.s[2]\n" - "fmul v29.4s, v17.4s, v29.s[3]\n" - "fcvtl v20.4s, v20.4h\n" - "fmla v0.4s, v26.4s, v10.4s\n" - "movi v26.4s, #0x0\n" - "movi v10.4s, #0x0\n" - "fmla v4.4s, v2.4s, v29.4s\n" - "movi v2.4s, #0x0\n" - "movi v29.4s, #0x0\n" - ".inst 0x4f98e19a // sdot v26.4s, v12.16b, v24.4b[0]\n" - ".inst 0x4fb8e18a // sdot v10.4s, v12.16b, v24.4b[1]\n" - ".inst 0x4f98e982 // sdot v2.4s, v12.16b, v24.4b[2]\n" - ".inst 0x4fb8e99d // sdot v29.4s, v12.16b, v24.4b[3]\n" - "ldr q12, [x21, #0x20]\n" - "fmul v24.4s, v17.4s, v20.s[0]\n" - ".inst 0x4f89e3fa // sdot v26.4s, v31.16b, v9.4b[0]\n" - ".inst 0x4fa9e3ea // sdot v10.4s, v31.16b, v9.4b[1]\n" - ".inst 0x4f89ebe2 // sdot v2.4s, v31.16b, v9.4b[2]\n" - ".inst 0x4fa9ebfd // sdot v29.4s, v31.16b, v9.4b[3]\n" - "ldr q9, [x21, #0x30]\n" - "fmul v31.4s, v17.4s, v20.s[1]\n" - ".inst 0x4f8ce0da // sdot v26.4s, v6.16b, v12.4b[0]\n" - ".inst 0x4face0ca // sdot v10.4s, v6.16b, v12.4b[1]\n" - ".inst 0x4f8ce8c2 // sdot v2.4s, v6.16b, v12.4b[2]\n" - ".inst 0x4face8dd // sdot v29.4s, v6.16b, v12.4b[3]\n" - "ldr q12, [x21, #0x40]\n" - "fmul v6.4s, v17.4s, v20.s[2]\n" - "fmul v20.4s, v17.4s, v20.s[3]\n" - ".inst 0x4f89e39a // sdot v26.4s, v28.16b, v9.4b[0]\n" - ".inst 0x4fa9e38a // sdot v10.4s, v28.16b, v9.4b[1]\n" - ".inst 0x4f89eb82 // sdot v2.4s, v28.16b, v9.4b[2]\n" - ".inst 0x4fa9eb9d // sdot v29.4s, v28.16b, v9.4b[3]\n" - "ldr q9, [x21, #0x50]\n" - ".inst 0x4f8ce07a // sdot v26.4s, v3.16b, v12.4b[0]\n" - ".inst 0x4face06a // sdot v10.4s, v3.16b, v12.4b[1]\n" - ".inst 0x4f8ce862 // sdot v2.4s, v3.16b, v12.4b[2]\n" - ".inst 0x4face87d // sdot v29.4s, v3.16b, v12.4b[3]\n" - "ldr q12, [x21, #0x60]\n" - ".inst 0x4f89e2da // sdot v26.4s, v22.16b, v9.4b[0]\n" - ".inst 0x4fa9e2ca // sdot v10.4s, v22.16b, v9.4b[1]\n" - ".inst 0x4f89eac2 // sdot v2.4s, v22.16b, v9.4b[2]\n" - ".inst 0x4fa9eadd // sdot v29.4s, v22.16b, v9.4b[3]\n" - "ldr q17, [x21, #0x70]\n" - "add x21, x21, #0x88\n" - ".inst 0x4f8ce37a // sdot v26.4s, v27.16b, v12.4b[0]\n" - ".inst 0x4face36a // sdot v10.4s, v27.16b, v12.4b[1]\n" - ".inst 0x4f8ceb62 // sdot v2.4s, v27.16b, v12.4b[2]\n" - ".inst 0x4faceb7d // sdot v29.4s, v27.16b, v12.4b[3]\n" - ".inst 0x4f91e3da // sdot v26.4s, v30.16b, v17.4b[0]\n" - ".inst 0x4fb1e3ca // sdot v10.4s, v30.16b, v17.4b[1]\n" - ".inst 0x4f91ebc2 // sdot v2.4s, v30.16b, v17.4b[2]\n" - ".inst 0x4fb1ebdd // sdot v29.4s, v30.16b, v17.4b[3]\n" - "scvtf v26.4s, v26.4s, #0x4\n" - "scvtf v10.4s, v10.4s, #0x4\n" - "fmla v5.4s, v26.4s, v24.4s\n" - "scvtf v2.4s, v2.4s, #0x4\n" - "scvtf v29.4s, v29.4s, #0x4\n" - "fmla v21.4s, v10.4s, v31.4s\n" - "fmla v8.4s, v2.4s, v6.4s\n" - "fmla v1.4s, v29.4s, v20.4s\n" - "bgt 3b\n" - "mov x20, %x[res_ptr]\n" - "subs x27, x27, #0x4\n" - "add %x[res_ptr], %x[res_ptr], #0x10\n" - "str q15, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q19, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q18, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q14, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q11, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q13, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q23, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q16, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q25, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q7, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q0, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q4, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q5, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q21, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q8, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q1, [x20, #0x0]\n" - "bne 2b\n" - "mov x20, #0x4\n" - "sub x10, x10, #0x10\n" - "cmp x10, #0x10\n" - "mov %x[res_ptr], x26\n" - "madd %x[a_ptr], x20, x9, %x[a_ptr]\n" - "bge 1b\n" - "4:" // Row loop skip - "cbz x10, 9f\n" - "5:" // Row tail: Row loop - "add x24, %x[b_ptr], #0x8\n" - "mov x23, %x[nc]\n" - "add x22, %x[res_ptr], %x[res_stride], LSL #2\n" - "6:" // Row tail: Column loop - "movi v15.16b, #0x0\n" - "movi v19.16b, #0x0\n" - "add x25, %x[a_ptr], #0x8\n" - "mov x21, %x[nb]\n" - "movi v18.16b, #0x0\n" - "movi v14.16b, #0x0\n" - "7:" // Row tail: Block loop - "ldr q7, [x24, #0x0]\n" - "ldr q5, [x25, #0x0]\n" - "movi v9.16b, #0x4\n" - "movi v4.4s, #0x0\n" - "ldr q3, [x24, #0x10]\n" - "ldr q2, [x25, #0x10]\n" - "movi v1.4s, #0x0\n" - "movi v0.4s, #0x0\n" - "ldr q13, [x24, #0x20]\n" - "ldr q31, [x25, #0x20]\n" - "movi v30.4s, #0x0\n" - "movi v29.16b, #0xf0\n" - "ldr q28, [x24, #0x30]\n" - "ldr q27, [x25, #0x30]\n" - "sshl v20.16b, v7.16b, v9.16b\n" - "sub x20, x24, #0x8\n" - "ldr q26, [x25, #0x40]\n" - "ldr q25, [x25, #0x50]\n" - "sshl v17.16b, v3.16b, v9.16b\n" - "and v7.16b, v7.16b, v29.16b\n" - "ldr q24, [x25, #0x60]\n" - "ldr q16, [x25, #0x70]\n" - "sshl v22.16b, v13.16b, v9.16b\n" - "and v3.16b, v3.16b, v29.16b\n" - "ldr d21, [x20, #0x0]\n" - "ldr d12, [x25, #-0x8]\n" - ".inst 0x4f85e284 // sdot v4.4s, v20.16b, v5.4b[0]\n" - ".inst 0x4fa5e281 // sdot v1.4s, v20.16b, v5.4b[1]\n" - ".inst 0x4f85ea80 // sdot v0.4s, v20.16b, v5.4b[2]\n" - ".inst 0x4fa5ea9e // sdot v30.4s, v20.16b, v5.4b[3]\n" - "sshl v9.16b, v28.16b, v9.16b\n" - "subs x21, x21, #0x1\n" - "and v13.16b, v13.16b, v29.16b\n" - "and v28.16b, v28.16b, v29.16b\n" - "add x25, x25, #0x88\n" - "add x24, x24, #0x48\n" - "fcvtl v21.4s, v21.4h\n" - "fcvtl v12.4s, v12.4h\n" - ".inst 0x4f82e224 // sdot v4.4s, v17.16b, v2.4b[0]\n" - ".inst 0x4fa2e221 // sdot v1.4s, v17.16b, v2.4b[1]\n" - ".inst 0x4f82ea20 // sdot v0.4s, v17.16b, v2.4b[2]\n" - ".inst 0x4fa2ea3e // sdot v30.4s, v17.16b, v2.4b[3]\n" - "fmul v11.4s, v21.4s, v12.s[0]\n" - "fmul v23.4s, v21.4s, v12.s[1]\n" - "fmul v17.4s, v21.4s, v12.s[2]\n" - ".inst 0x4f9fe2c4 // sdot v4.4s, v22.16b, v31.4b[0]\n" - "fmul v6.4s, v21.4s, v12.s[3]\n" - ".inst 0x4fbfe2c1 // sdot v1.4s, v22.16b, v31.4b[1]\n" - ".inst 0x4f9feac0 // sdot v0.4s, v22.16b, v31.4b[2]\n" - ".inst 0x4fbfeade // sdot v30.4s, v22.16b, v31.4b[3]\n" - ".inst 0x4f9be124 // sdot v4.4s, v9.16b, v27.4b[0]\n" - ".inst 0x4fbbe121 // sdot v1.4s, v9.16b, v27.4b[1]\n" - ".inst 0x4f9be920 // sdot v0.4s, v9.16b, v27.4b[2]\n" - ".inst 0x4fbbe93e // sdot v30.4s, v9.16b, v27.4b[3]\n" - ".inst 0x4f9ae0e4 // sdot v4.4s, v7.16b, v26.4b[0]\n" - ".inst 0x4fbae0e1 // sdot v1.4s, v7.16b, v26.4b[1]\n" - ".inst 0x4f9ae8e0 // sdot v0.4s, v7.16b, v26.4b[2]\n" - ".inst 0x4fbae8fe // sdot v30.4s, v7.16b, v26.4b[3]\n" - ".inst 0x4f99e064 // sdot v4.4s, v3.16b, v25.4b[0]\n" - ".inst 0x4fb9e061 // sdot v1.4s, v3.16b, v25.4b[1]\n" - ".inst 0x4f99e860 // sdot v0.4s, v3.16b, v25.4b[2]\n" - ".inst 0x4fb9e87e // sdot v30.4s, v3.16b, v25.4b[3]\n" - ".inst 0x4f98e1a4 // sdot v4.4s, v13.16b, v24.4b[0]\n" - ".inst 0x4fb8e1a1 // sdot v1.4s, v13.16b, v24.4b[1]\n" - ".inst 0x4f98e9a0 // sdot v0.4s, v13.16b, v24.4b[2]\n" - ".inst 0x4fb8e9be // sdot v30.4s, v13.16b, v24.4b[3]\n" - ".inst 0x4f90e384 // sdot v4.4s, v28.16b, v16.4b[0]\n" - ".inst 0x4fb0e381 // sdot v1.4s, v28.16b, v16.4b[1]\n" - ".inst 0x4f90eb80 // sdot v0.4s, v28.16b, v16.4b[2]\n" - ".inst 0x4fb0eb9e // sdot v30.4s, v28.16b, v16.4b[3]\n" - "scvtf v4.4s, v4.4s, #0x4\n" - "scvtf v1.4s, v1.4s, #0x4\n" - "scvtf v0.4s, v0.4s, #0x4\n" - "fmla v15.4s, v4.4s, v11.4s\n" - "scvtf v30.4s, v30.4s, #0x4\n" - "fmla v19.4s, v1.4s, v23.4s\n" - "fmla v18.4s, v0.4s, v17.4s\n" - "fmla v14.4s, v30.4s, v6.4s\n" - "bgt 7b\n" - "mov x20, %x[res_ptr]\n" - "cmp x10, #0x1\n" - "str q15, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "cmp x10, #0x2\n" - "str q19, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "cmp x10, #0x3\n" - "str q18, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "str q14, [x20, #0x0]\n" - "8:" // Row tail: Accumulator store skip - "subs x23, x23, #0x4\n" - "add %x[res_ptr], %x[res_ptr], #0x10\n" - "bne 6b\n" - "subs x10, x10, #0x4\n" - "add %x[a_ptr], %x[a_ptr], x9\n" - "mov %x[res_ptr], x22\n" - "bgt 5b\n" - "9:" // Row tail: Row loop skip - : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) - : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" - ); - return; - } +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) + const void * b_ptr = vx; + const void * a_ptr = vy; + float * res_ptr = s; + size_t res_stride = bs * sizeof(float); + + __asm__ __volatile__( + "mov x10, %x[nr]\n" + "mov x9, #0x88\n" + "cmp x10, #0x10\n" + "mul x9, %x[nb], x9\n" + "blt 4f\n" + "1:" // Row loop + "add x28, %x[b_ptr], #0x8\n" + "mov x27, %x[nc]\n" + "add x26, %x[res_ptr], %x[res_stride], LSL #4\n" + "2:" // Column loop + "add x25, %x[a_ptr], #0x8\n" + "movi v15.16b, #0x0\n" + "movi v19.16b, #0x0\n" + "mov x24, %x[nb]\n" + "add x23, x25, x9\n" + "movi v18.16b, #0x0\n" + "movi v14.16b, #0x0\n" + "add x22, x23, x9\n" + "movi v11.16b, #0x0\n" + "movi v13.16b, #0x0\n" + "add x21, x22, x9\n" + "movi v23.16b, #0x0\n" + "movi v16.16b, #0x0\n" + "movi v25.16b, #0x0\n" + "movi v7.16b, #0x0\n" + "movi v0.16b, #0x0\n" + "movi v4.16b, #0x0\n" + "movi v5.16b, #0x0\n" + "movi v21.16b, #0x0\n" + "movi v8.16b, #0x0\n" + "movi v1.16b, #0x0\n" + "3:" // Block loop + "ldr q3, [x28, #0x0]\n" + "ldr q31, [x25, #0x0]\n" + "movi v28.16b, #0x4\n" + "movi v10.4s, #0x0\n" + "ldr q22, [x28, #0x10]\n" + "ldr q6, [x25, #0x10]\n" + "movi v29.4s, #0x0\n" + "movi v9.4s, #0x0\n" + "ldr q27, [x28, #0x20]\n" + "ldr q30, [x28, #0x30]\n" + "movi v20.4s, #0x0\n" + "movi v24.16b, #0xf0\n" + "ldr d2, [x25, #-0x8]\n" + "ldr d26, [x23, #-0x8]\n" + "sshl v12.16b, v3.16b, v28.16b\n" + "sub x20, x28, #0x8\n" + "ldr d17, [x20, #0x0]\n" + "and v3.16b, v3.16b, v24.16b\n" + "subs x24, x24, #0x1\n" + "add x28, x28, #0x48\n" + ".inst 0x4f9fe18a // sdot v10.4s, v12.16b, v31.4b[0]\n" + ".inst 0x4fbfe19d // sdot v29.4s, v12.16b, v31.4b[1]\n" + ".inst 0x4f9fe989 // sdot v9.4s, v12.16b, v31.4b[2]\n" + ".inst 0x4fbfe994 // sdot v20.4s, v12.16b, v31.4b[3]\n" + "sshl v31.16b, v22.16b, v28.16b\n" + "and v22.16b, v22.16b, v24.16b\n" + "fcvtl v17.4s, v17.4h\n" + "fcvtl v2.4s, v2.4h\n" + "fcvtl v26.4s, v26.4h\n" + ".inst 0x4f86e3ea // sdot v10.4s, v31.16b, v6.4b[0]\n" + ".inst 0x4fa6e3fd // sdot v29.4s, v31.16b, v6.4b[1]\n" + ".inst 0x4f86ebe9 // sdot v9.4s, v31.16b, v6.4b[2]\n" + ".inst 0x4fa6ebf4 // sdot v20.4s, v31.16b, v6.4b[3]\n" + "sshl v6.16b, v27.16b, v28.16b\n" + "sshl v28.16b, v30.16b, v28.16b\n" + "and v27.16b, v27.16b, v24.16b\n" + "and v30.16b, v30.16b, v24.16b\n" + "ldr q24, [x25, #0x20]\n" + ".inst 0x4f98e0ca // sdot v10.4s, v6.16b, v24.4b[0]\n" + ".inst 0x4fb8e0dd // sdot v29.4s, v6.16b, v24.4b[1]\n" + ".inst 0x4f98e8c9 // sdot v9.4s, v6.16b, v24.4b[2]\n" + ".inst 0x4fb8e8d4 // sdot v20.4s, v6.16b, v24.4b[3]\n" + "ldr q24, [x25, #0x30]\n" + ".inst 0x4f98e38a // sdot v10.4s, v28.16b, v24.4b[0]\n" + ".inst 0x4fb8e39d // sdot v29.4s, v28.16b, v24.4b[1]\n" + ".inst 0x4f98eb89 // sdot v9.4s, v28.16b, v24.4b[2]\n" + ".inst 0x4fb8eb94 // sdot v20.4s, v28.16b, v24.4b[3]\n" + "ldr q24, [x25, #0x40]\n" + ".inst 0x4f98e06a // sdot v10.4s, v3.16b, v24.4b[0]\n" + ".inst 0x4fb8e07d // sdot v29.4s, v3.16b, v24.4b[1]\n" + ".inst 0x4f98e869 // sdot v9.4s, v3.16b, v24.4b[2]\n" + ".inst 0x4fb8e874 // sdot v20.4s, v3.16b, v24.4b[3]\n" + "ldr q24, [x25, #0x50]\n" + ".inst 0x4f98e2ca // sdot v10.4s, v22.16b, v24.4b[0]\n" + ".inst 0x4fb8e2dd // sdot v29.4s, v22.16b, v24.4b[1]\n" + ".inst 0x4f98eac9 // sdot v9.4s, v22.16b, v24.4b[2]\n" + ".inst 0x4fb8ead4 // sdot v20.4s, v22.16b, v24.4b[3]\n" + "ldr q24, [x25, #0x60]\n" + ".inst 0x4f98e36a // sdot v10.4s, v27.16b, v24.4b[0]\n" + ".inst 0x4fb8e37d // sdot v29.4s, v27.16b, v24.4b[1]\n" + ".inst 0x4f98eb69 // sdot v9.4s, v27.16b, v24.4b[2]\n" + ".inst 0x4fb8eb74 // sdot v20.4s, v27.16b, v24.4b[3]\n" + "ldr q24, [x25, #0x70]\n" + "add x25, x25, #0x88\n" + ".inst 0x4f98e3ca // sdot v10.4s, v30.16b, v24.4b[0]\n" + ".inst 0x4fb8e3dd // sdot v29.4s, v30.16b, v24.4b[1]\n" + ".inst 0x4f98ebc9 // sdot v9.4s, v30.16b, v24.4b[2]\n" + ".inst 0x4fb8ebd4 // sdot v20.4s, v30.16b, v24.4b[3]\n" + "fmul v24.4s, v17.4s, v2.s[0]\n" + "scvtf v10.4s, v10.4s, #0x4\n" + "scvtf v29.4s, v29.4s, #0x4\n" + "scvtf v9.4s, v9.4s, #0x4\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "fmla v15.4s, v10.4s, v24.4s\n" + "ldr q24, [x23, #0x0]\n" + "fmul v10.4s, v17.4s, v2.s[1]\n" + "fmla v19.4s, v29.4s, v10.4s\n" + "ldr q10, [x23, #0x10]\n" + "fmul v29.4s, v17.4s, v2.s[2]\n" + "fmul v2.4s, v17.4s, v2.s[3]\n" + "fmla v18.4s, v9.4s, v29.4s\n" + "movi v9.4s, #0x0\n" + "movi v29.4s, #0x0\n" + ".inst 0x4f98e189 // sdot v9.4s, v12.16b, v24.4b[0]\n" + ".inst 0x4fb8e19d // sdot v29.4s, v12.16b, v24.4b[1]\n" + "fmla v14.4s, v20.4s, v2.4s\n" + "movi v20.4s, #0x0\n" + "movi v2.4s, #0x0\n" + ".inst 0x4f98e994 // sdot v20.4s, v12.16b, v24.4b[2]\n" + ".inst 0x4fb8e982 // sdot v2.4s, v12.16b, v24.4b[3]\n" + "ldr q24, [x23, #0x20]\n" + ".inst 0x4f8ae3e9 // sdot v9.4s, v31.16b, v10.4b[0]\n" + ".inst 0x4faae3fd // sdot v29.4s, v31.16b, v10.4b[1]\n" + ".inst 0x4f8aebf4 // sdot v20.4s, v31.16b, v10.4b[2]\n" + ".inst 0x4faaebe2 // sdot v2.4s, v31.16b, v10.4b[3]\n" + "ldr q10, [x23, #0x30]\n" + ".inst 0x4f98e0c9 // sdot v9.4s, v6.16b, v24.4b[0]\n" + ".inst 0x4fb8e0dd // sdot v29.4s, v6.16b, v24.4b[1]\n" + ".inst 0x4f98e8d4 // sdot v20.4s, v6.16b, v24.4b[2]\n" + ".inst 0x4fb8e8c2 // sdot v2.4s, v6.16b, v24.4b[3]\n" + "ldr q24, [x23, #0x40]\n" + ".inst 0x4f8ae389 // sdot v9.4s, v28.16b, v10.4b[0]\n" + ".inst 0x4faae39d // sdot v29.4s, v28.16b, v10.4b[1]\n" + ".inst 0x4f8aeb94 // sdot v20.4s, v28.16b, v10.4b[2]\n" + ".inst 0x4faaeb82 // sdot v2.4s, v28.16b, v10.4b[3]\n" + "ldr q10, [x23, #0x50]\n" + ".inst 0x4f98e069 // sdot v9.4s, v3.16b, v24.4b[0]\n" + ".inst 0x4fb8e07d // sdot v29.4s, v3.16b, v24.4b[1]\n" + ".inst 0x4f98e874 // sdot v20.4s, v3.16b, v24.4b[2]\n" + ".inst 0x4fb8e862 // sdot v2.4s, v3.16b, v24.4b[3]\n" + "ldr q24, [x23, #0x60]\n" + ".inst 0x4f8ae2c9 // sdot v9.4s, v22.16b, v10.4b[0]\n" + ".inst 0x4faae2dd // sdot v29.4s, v22.16b, v10.4b[1]\n" + ".inst 0x4f8aead4 // sdot v20.4s, v22.16b, v10.4b[2]\n" + ".inst 0x4faaeac2 // sdot v2.4s, v22.16b, v10.4b[3]\n" + "ldr q10, [x23, #0x70]\n" + "add x23, x23, #0x88\n" + ".inst 0x4f98e369 // sdot v9.4s, v27.16b, v24.4b[0]\n" + ".inst 0x4fb8e37d // sdot v29.4s, v27.16b, v24.4b[1]\n" + ".inst 0x4f98eb74 // sdot v20.4s, v27.16b, v24.4b[2]\n" + ".inst 0x4fb8eb62 // sdot v2.4s, v27.16b, v24.4b[3]\n" + "ldr q24, [x22, #0x0]\n" + ".inst 0x4f8ae3c9 // sdot v9.4s, v30.16b, v10.4b[0]\n" + ".inst 0x4faae3dd // sdot v29.4s, v30.16b, v10.4b[1]\n" + ".inst 0x4f8aebd4 // sdot v20.4s, v30.16b, v10.4b[2]\n" + ".inst 0x4faaebc2 // sdot v2.4s, v30.16b, v10.4b[3]\n" + "fmul v10.4s, v17.4s, v26.s[0]\n" + "scvtf v9.4s, v9.4s, #0x4\n" + "scvtf v29.4s, v29.4s, #0x4\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "scvtf v2.4s, v2.4s, #0x4\n" + "fmla v11.4s, v9.4s, v10.4s\n" + "ldr q9, [x22, #0x10]\n" + "fmul v10.4s, v17.4s, v26.s[1]\n" + "fmla v13.4s, v29.4s, v10.4s\n" + "ldr d29, [x22, #-0x8]\n" + "fmul v10.4s, v17.4s, v26.s[2]\n" + "fmul v26.4s, v17.4s, v26.s[3]\n" + "fcvtl v29.4s, v29.4h\n" + "fmla v23.4s, v20.4s, v10.4s\n" + "movi v20.4s, #0x0\n" + "movi v10.4s, #0x0\n" + "fmla v16.4s, v2.4s, v26.4s\n" + "movi v26.4s, #0x0\n" + "movi v2.4s, #0x0\n" + ".inst 0x4f98e194 // sdot v20.4s, v12.16b, v24.4b[0]\n" + ".inst 0x4fb8e18a // sdot v10.4s, v12.16b, v24.4b[1]\n" + ".inst 0x4f98e99a // sdot v26.4s, v12.16b, v24.4b[2]\n" + ".inst 0x4fb8e982 // sdot v2.4s, v12.16b, v24.4b[3]\n" + "ldr q24, [x22, #0x20]\n" + ".inst 0x4f89e3f4 // sdot v20.4s, v31.16b, v9.4b[0]\n" + ".inst 0x4fa9e3ea // sdot v10.4s, v31.16b, v9.4b[1]\n" + ".inst 0x4f89ebfa // sdot v26.4s, v31.16b, v9.4b[2]\n" + ".inst 0x4fa9ebe2 // sdot v2.4s, v31.16b, v9.4b[3]\n" + "ldr q9, [x22, #0x30]\n" + ".inst 0x4f98e0d4 // sdot v20.4s, v6.16b, v24.4b[0]\n" + ".inst 0x4fb8e0ca // sdot v10.4s, v6.16b, v24.4b[1]\n" + ".inst 0x4f98e8da // sdot v26.4s, v6.16b, v24.4b[2]\n" + ".inst 0x4fb8e8c2 // sdot v2.4s, v6.16b, v24.4b[3]\n" + "ldr q24, [x22, #0x40]\n" + ".inst 0x4f89e394 // sdot v20.4s, v28.16b, v9.4b[0]\n" + ".inst 0x4fa9e38a // sdot v10.4s, v28.16b, v9.4b[1]\n" + ".inst 0x4f89eb9a // sdot v26.4s, v28.16b, v9.4b[2]\n" + ".inst 0x4fa9eb82 // sdot v2.4s, v28.16b, v9.4b[3]\n" + "ldr q9, [x22, #0x50]\n" + ".inst 0x4f98e074 // sdot v20.4s, v3.16b, v24.4b[0]\n" + ".inst 0x4fb8e06a // sdot v10.4s, v3.16b, v24.4b[1]\n" + ".inst 0x4f98e87a // sdot v26.4s, v3.16b, v24.4b[2]\n" + ".inst 0x4fb8e862 // sdot v2.4s, v3.16b, v24.4b[3]\n" + "ldr q24, [x22, #0x60]\n" + ".inst 0x4f89e2d4 // sdot v20.4s, v22.16b, v9.4b[0]\n" + ".inst 0x4fa9e2ca // sdot v10.4s, v22.16b, v9.4b[1]\n" + ".inst 0x4f89eada // sdot v26.4s, v22.16b, v9.4b[2]\n" + ".inst 0x4fa9eac2 // sdot v2.4s, v22.16b, v9.4b[3]\n" + "ldr q9, [x22, #0x70]\n" + "add x22, x22, #0x88\n" + ".inst 0x4f98e374 // sdot v20.4s, v27.16b, v24.4b[0]\n" + ".inst 0x4fb8e36a // sdot v10.4s, v27.16b, v24.4b[1]\n" + ".inst 0x4f98eb7a // sdot v26.4s, v27.16b, v24.4b[2]\n" + ".inst 0x4fb8eb62 // sdot v2.4s, v27.16b, v24.4b[3]\n" + "ldr q24, [x21, #0x0]\n" + ".inst 0x4f89e3d4 // sdot v20.4s, v30.16b, v9.4b[0]\n" + ".inst 0x4fa9e3ca // sdot v10.4s, v30.16b, v9.4b[1]\n" + ".inst 0x4f89ebda // sdot v26.4s, v30.16b, v9.4b[2]\n" + ".inst 0x4fa9ebc2 // sdot v2.4s, v30.16b, v9.4b[3]\n" + "fmul v9.4s, v17.4s, v29.s[0]\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "scvtf v10.4s, v10.4s, #0x4\n" + "scvtf v26.4s, v26.4s, #0x4\n" + "scvtf v2.4s, v2.4s, #0x4\n" + "fmla v25.4s, v20.4s, v9.4s\n" + "ldr q9, [x21, #0x10]\n" + "fmul v20.4s, v17.4s, v29.s[1]\n" + "fmla v7.4s, v10.4s, v20.4s\n" + "ldr d20, [x21, #-0x8]\n" + "fmul v10.4s, v17.4s, v29.s[2]\n" + "fmul v29.4s, v17.4s, v29.s[3]\n" + "fcvtl v20.4s, v20.4h\n" + "fmla v0.4s, v26.4s, v10.4s\n" + "movi v26.4s, #0x0\n" + "movi v10.4s, #0x0\n" + "fmla v4.4s, v2.4s, v29.4s\n" + "movi v2.4s, #0x0\n" + "movi v29.4s, #0x0\n" + ".inst 0x4f98e19a // sdot v26.4s, v12.16b, v24.4b[0]\n" + ".inst 0x4fb8e18a // sdot v10.4s, v12.16b, v24.4b[1]\n" + ".inst 0x4f98e982 // sdot v2.4s, v12.16b, v24.4b[2]\n" + ".inst 0x4fb8e99d // sdot v29.4s, v12.16b, v24.4b[3]\n" + "ldr q12, [x21, #0x20]\n" + "fmul v24.4s, v17.4s, v20.s[0]\n" + ".inst 0x4f89e3fa // sdot v26.4s, v31.16b, v9.4b[0]\n" + ".inst 0x4fa9e3ea // sdot v10.4s, v31.16b, v9.4b[1]\n" + ".inst 0x4f89ebe2 // sdot v2.4s, v31.16b, v9.4b[2]\n" + ".inst 0x4fa9ebfd // sdot v29.4s, v31.16b, v9.4b[3]\n" + "ldr q9, [x21, #0x30]\n" + "fmul v31.4s, v17.4s, v20.s[1]\n" + ".inst 0x4f8ce0da // sdot v26.4s, v6.16b, v12.4b[0]\n" + ".inst 0x4face0ca // sdot v10.4s, v6.16b, v12.4b[1]\n" + ".inst 0x4f8ce8c2 // sdot v2.4s, v6.16b, v12.4b[2]\n" + ".inst 0x4face8dd // sdot v29.4s, v6.16b, v12.4b[3]\n" + "ldr q12, [x21, #0x40]\n" + "fmul v6.4s, v17.4s, v20.s[2]\n" + "fmul v20.4s, v17.4s, v20.s[3]\n" + ".inst 0x4f89e39a // sdot v26.4s, v28.16b, v9.4b[0]\n" + ".inst 0x4fa9e38a // sdot v10.4s, v28.16b, v9.4b[1]\n" + ".inst 0x4f89eb82 // sdot v2.4s, v28.16b, v9.4b[2]\n" + ".inst 0x4fa9eb9d // sdot v29.4s, v28.16b, v9.4b[3]\n" + "ldr q9, [x21, #0x50]\n" + ".inst 0x4f8ce07a // sdot v26.4s, v3.16b, v12.4b[0]\n" + ".inst 0x4face06a // sdot v10.4s, v3.16b, v12.4b[1]\n" + ".inst 0x4f8ce862 // sdot v2.4s, v3.16b, v12.4b[2]\n" + ".inst 0x4face87d // sdot v29.4s, v3.16b, v12.4b[3]\n" + "ldr q12, [x21, #0x60]\n" + ".inst 0x4f89e2da // sdot v26.4s, v22.16b, v9.4b[0]\n" + ".inst 0x4fa9e2ca // sdot v10.4s, v22.16b, v9.4b[1]\n" + ".inst 0x4f89eac2 // sdot v2.4s, v22.16b, v9.4b[2]\n" + ".inst 0x4fa9eadd // sdot v29.4s, v22.16b, v9.4b[3]\n" + "ldr q17, [x21, #0x70]\n" + "add x21, x21, #0x88\n" + ".inst 0x4f8ce37a // sdot v26.4s, v27.16b, v12.4b[0]\n" + ".inst 0x4face36a // sdot v10.4s, v27.16b, v12.4b[1]\n" + ".inst 0x4f8ceb62 // sdot v2.4s, v27.16b, v12.4b[2]\n" + ".inst 0x4faceb7d // sdot v29.4s, v27.16b, v12.4b[3]\n" + ".inst 0x4f91e3da // sdot v26.4s, v30.16b, v17.4b[0]\n" + ".inst 0x4fb1e3ca // sdot v10.4s, v30.16b, v17.4b[1]\n" + ".inst 0x4f91ebc2 // sdot v2.4s, v30.16b, v17.4b[2]\n" + ".inst 0x4fb1ebdd // sdot v29.4s, v30.16b, v17.4b[3]\n" + "scvtf v26.4s, v26.4s, #0x4\n" + "scvtf v10.4s, v10.4s, #0x4\n" + "fmla v5.4s, v26.4s, v24.4s\n" + "scvtf v2.4s, v2.4s, #0x4\n" + "scvtf v29.4s, v29.4s, #0x4\n" + "fmla v21.4s, v10.4s, v31.4s\n" + "fmla v8.4s, v2.4s, v6.4s\n" + "fmla v1.4s, v29.4s, v20.4s\n" + "bgt 3b\n" + "mov x20, %x[res_ptr]\n" + "subs x27, x27, #0x4\n" + "add %x[res_ptr], %x[res_ptr], #0x10\n" + "str q15, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q19, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q18, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q14, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q11, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q13, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q23, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q16, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q25, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q7, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q0, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q4, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q5, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q21, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q8, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q1, [x20, #0x0]\n" + "bne 2b\n" + "mov x20, #0x4\n" + "sub x10, x10, #0x10\n" + "cmp x10, #0x10\n" + "mov %x[res_ptr], x26\n" + "madd %x[a_ptr], x20, x9, %x[a_ptr]\n" + "bge 1b\n" + "4:" // Row loop skip + "cbz x10, 9f\n" + "5:" // Row tail: Row loop + "add x24, %x[b_ptr], #0x8\n" + "mov x23, %x[nc]\n" + "add x22, %x[res_ptr], %x[res_stride], LSL #2\n" + "6:" // Row tail: Column loop + "movi v15.16b, #0x0\n" + "movi v19.16b, #0x0\n" + "add x25, %x[a_ptr], #0x8\n" + "mov x21, %x[nb]\n" + "movi v18.16b, #0x0\n" + "movi v14.16b, #0x0\n" + "7:" // Row tail: Block loop + "ldr q7, [x24, #0x0]\n" + "ldr q5, [x25, #0x0]\n" + "movi v9.16b, #0x4\n" + "movi v4.4s, #0x0\n" + "ldr q3, [x24, #0x10]\n" + "ldr q2, [x25, #0x10]\n" + "movi v1.4s, #0x0\n" + "movi v0.4s, #0x0\n" + "ldr q13, [x24, #0x20]\n" + "ldr q31, [x25, #0x20]\n" + "movi v30.4s, #0x0\n" + "movi v29.16b, #0xf0\n" + "ldr q28, [x24, #0x30]\n" + "ldr q27, [x25, #0x30]\n" + "sshl v20.16b, v7.16b, v9.16b\n" + "sub x20, x24, #0x8\n" + "ldr q26, [x25, #0x40]\n" + "ldr q25, [x25, #0x50]\n" + "sshl v17.16b, v3.16b, v9.16b\n" + "and v7.16b, v7.16b, v29.16b\n" + "ldr q24, [x25, #0x60]\n" + "ldr q16, [x25, #0x70]\n" + "sshl v22.16b, v13.16b, v9.16b\n" + "and v3.16b, v3.16b, v29.16b\n" + "ldr d21, [x20, #0x0]\n" + "ldr d12, [x25, #-0x8]\n" + ".inst 0x4f85e284 // sdot v4.4s, v20.16b, v5.4b[0]\n" + ".inst 0x4fa5e281 // sdot v1.4s, v20.16b, v5.4b[1]\n" + ".inst 0x4f85ea80 // sdot v0.4s, v20.16b, v5.4b[2]\n" + ".inst 0x4fa5ea9e // sdot v30.4s, v20.16b, v5.4b[3]\n" + "sshl v9.16b, v28.16b, v9.16b\n" + "subs x21, x21, #0x1\n" + "and v13.16b, v13.16b, v29.16b\n" + "and v28.16b, v28.16b, v29.16b\n" + "add x25, x25, #0x88\n" + "add x24, x24, #0x48\n" + "fcvtl v21.4s, v21.4h\n" + "fcvtl v12.4s, v12.4h\n" + ".inst 0x4f82e224 // sdot v4.4s, v17.16b, v2.4b[0]\n" + ".inst 0x4fa2e221 // sdot v1.4s, v17.16b, v2.4b[1]\n" + ".inst 0x4f82ea20 // sdot v0.4s, v17.16b, v2.4b[2]\n" + ".inst 0x4fa2ea3e // sdot v30.4s, v17.16b, v2.4b[3]\n" + "fmul v11.4s, v21.4s, v12.s[0]\n" + "fmul v23.4s, v21.4s, v12.s[1]\n" + "fmul v17.4s, v21.4s, v12.s[2]\n" + ".inst 0x4f9fe2c4 // sdot v4.4s, v22.16b, v31.4b[0]\n" + "fmul v6.4s, v21.4s, v12.s[3]\n" + ".inst 0x4fbfe2c1 // sdot v1.4s, v22.16b, v31.4b[1]\n" + ".inst 0x4f9feac0 // sdot v0.4s, v22.16b, v31.4b[2]\n" + ".inst 0x4fbfeade // sdot v30.4s, v22.16b, v31.4b[3]\n" + ".inst 0x4f9be124 // sdot v4.4s, v9.16b, v27.4b[0]\n" + ".inst 0x4fbbe121 // sdot v1.4s, v9.16b, v27.4b[1]\n" + ".inst 0x4f9be920 // sdot v0.4s, v9.16b, v27.4b[2]\n" + ".inst 0x4fbbe93e // sdot v30.4s, v9.16b, v27.4b[3]\n" + ".inst 0x4f9ae0e4 // sdot v4.4s, v7.16b, v26.4b[0]\n" + ".inst 0x4fbae0e1 // sdot v1.4s, v7.16b, v26.4b[1]\n" + ".inst 0x4f9ae8e0 // sdot v0.4s, v7.16b, v26.4b[2]\n" + ".inst 0x4fbae8fe // sdot v30.4s, v7.16b, v26.4b[3]\n" + ".inst 0x4f99e064 // sdot v4.4s, v3.16b, v25.4b[0]\n" + ".inst 0x4fb9e061 // sdot v1.4s, v3.16b, v25.4b[1]\n" + ".inst 0x4f99e860 // sdot v0.4s, v3.16b, v25.4b[2]\n" + ".inst 0x4fb9e87e // sdot v30.4s, v3.16b, v25.4b[3]\n" + ".inst 0x4f98e1a4 // sdot v4.4s, v13.16b, v24.4b[0]\n" + ".inst 0x4fb8e1a1 // sdot v1.4s, v13.16b, v24.4b[1]\n" + ".inst 0x4f98e9a0 // sdot v0.4s, v13.16b, v24.4b[2]\n" + ".inst 0x4fb8e9be // sdot v30.4s, v13.16b, v24.4b[3]\n" + ".inst 0x4f90e384 // sdot v4.4s, v28.16b, v16.4b[0]\n" + ".inst 0x4fb0e381 // sdot v1.4s, v28.16b, v16.4b[1]\n" + ".inst 0x4f90eb80 // sdot v0.4s, v28.16b, v16.4b[2]\n" + ".inst 0x4fb0eb9e // sdot v30.4s, v28.16b, v16.4b[3]\n" + "scvtf v4.4s, v4.4s, #0x4\n" + "scvtf v1.4s, v1.4s, #0x4\n" + "scvtf v0.4s, v0.4s, #0x4\n" + "fmla v15.4s, v4.4s, v11.4s\n" + "scvtf v30.4s, v30.4s, #0x4\n" + "fmla v19.4s, v1.4s, v23.4s\n" + "fmla v18.4s, v0.4s, v17.4s\n" + "fmla v14.4s, v30.4s, v6.4s\n" + "bgt 7b\n" + "mov x20, %x[res_ptr]\n" + "cmp x10, #0x1\n" + "str q15, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "cmp x10, #0x2\n" + "str q19, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "cmp x10, #0x3\n" + "str q18, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "str q14, [x20, #0x0]\n" + "8:" // Row tail: Accumulator store skip + "subs x23, x23, #0x4\n" + "add %x[res_ptr], %x[res_ptr], #0x10\n" + "bne 6b\n" + "subs x10, x10, #0x4\n" + "add %x[a_ptr], %x[a_ptr], x9\n" + "mov %x[res_ptr], x22\n" + "bgt 5b\n" + "9:" // Row tail: Row loop skip + : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) + : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" + ); + return; #endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) { float sumf[4][4]; @@ -1160,404 +1152,402 @@ void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo UNUSED(blocklen); #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) - if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) { - const void * b_ptr = vx; - const void * a_ptr = vy; - float * res_ptr = s; - size_t res_stride = bs * sizeof(float); - - __asm__ __volatile__( - "mov x10, %x[nr]\n" - "mov x9, #0x88\n" - "cmp x10, #0x10\n" - "mul x9, %x[nb], x9\n" - "blt 4f\n" - "1:" // Row loop - "add x28, %x[b_ptr], #0x8\n" - "mov x27, %x[nc]\n" - "add x26, %x[res_ptr], %x[res_stride], LSL #4\n" - "2:" // Column loop - "add x25, %x[a_ptr], #0x8\n" - "movi v2.16b, #0x0\n" - "movi v10.16b, #0x0\n" - "mov x24, %x[nb]\n" - "add x23, x25, x9\n" - "movi v12.16b, #0x0\n" - "movi v28.16b, #0x0\n" - "add x22, x23, x9\n" - "movi v11.16b, #0x0\n" - "movi v13.16b, #0x0\n" - "add x21, x22, x9\n" - "movi v22.16b, #0x0\n" - "movi v23.16b, #0x0\n" - "movi v25.16b, #0x0\n" - "movi v5.16b, #0x0\n" - "movi v7.16b, #0x0\n" - "movi v4.16b, #0x0\n" - "movi v6.16b, #0x0\n" - "movi v30.16b, #0x0\n" - "movi v24.16b, #0x0\n" - "movi v14.16b, #0x0\n" - "3:" // Block loop - "ldr q21, [x28, #0x0]\n" - "ldr q16, [x28, #0x10]\n" - "movi v1.16b, #0x4\n" - "movi v19.4s, #0x0\n" - "ldr q27, [x25, #0x0]\n" - "ldr q15, [x25, #0x10]\n" - "movi v26.4s, #0x0\n" - "movi v18.4s, #0x0\n" - "ldr q29, [x28, #0x20]\n" - "ldr q3, [x28, #0x30]\n" - "movi v17.4s, #0x0\n" - "movi v0.16b, #0xf0\n" - "ldr d20, [x25, #-0x8]\n" - "ldr d9, [x23, #-0x8]\n" - "sshl v8.16b, v21.16b, v1.16b\n" - "sshl v31.16b, v16.16b, v1.16b\n" - "and v21.16b, v21.16b, v0.16b\n" - "and v16.16b, v16.16b, v0.16b\n" - "sub x20, x28, #0x8\n" - "subs x24, x24, #0x1\n" - "add x28, x28, #0x48\n" - ".inst 0x4e88a773 // smmla v19.4s, v27.16b, v8.16b\n" - ".inst 0x4e9fa77a // smmla v26.4s, v27.16b, v31.16b\n" - "ldr q27, [x25, #0x20]\n" - ".inst 0x4e88a5f2 // smmla v18.4s, v15.16b, v8.16b\n" - ".inst 0x4e9fa5f1 // smmla v17.4s, v15.16b, v31.16b\n" - "sshl v15.16b, v29.16b, v1.16b\n" - "sshl v1.16b, v3.16b, v1.16b\n" - "and v29.16b, v29.16b, v0.16b\n" - "and v3.16b, v3.16b, v0.16b\n" - "ldr q0, [x25, #0x30]\n" - "fcvtl v20.4s, v20.4h\n" - ".inst 0x4e8fa773 // smmla v19.4s, v27.16b, v15.16b\n" - "fcvtl v9.4s, v9.4h\n" - ".inst 0x4e81a77a // smmla v26.4s, v27.16b, v1.16b\n" - "ldr q27, [x25, #0x40]\n" - ".inst 0x4e8fa412 // smmla v18.4s, v0.16b, v15.16b\n" - ".inst 0x4e81a411 // smmla v17.4s, v0.16b, v1.16b\n" - "ldr q0, [x25, #0x50]\n" - ".inst 0x4e95a773 // smmla v19.4s, v27.16b, v21.16b\n" - ".inst 0x4e90a77a // smmla v26.4s, v27.16b, v16.16b\n" - "ldr q27, [x25, #0x60]\n" - ".inst 0x4e95a412 // smmla v18.4s, v0.16b, v21.16b\n" - ".inst 0x4e90a411 // smmla v17.4s, v0.16b, v16.16b\n" - "ldr q0, [x25, #0x70]\n" - "add x25, x25, #0x88\n" - ".inst 0x4e9da773 // smmla v19.4s, v27.16b, v29.16b\n" - ".inst 0x4e83a77a // smmla v26.4s, v27.16b, v3.16b\n" - "ldr d27, [x20, #0x0]\n" - ".inst 0x4e9da412 // smmla v18.4s, v0.16b, v29.16b\n" - ".inst 0x4e83a411 // smmla v17.4s, v0.16b, v3.16b\n" - "fcvtl v27.4s, v27.4h\n" - "uzp1 v0.2d, v19.2d, v26.2d\n" - "uzp2 v26.2d, v19.2d, v26.2d\n" - "fmul v19.4s, v27.4s, v20.s[0]\n" - "scvtf v0.4s, v0.4s, #0x4\n" - "scvtf v26.4s, v26.4s, #0x4\n" - "fmla v2.4s, v0.4s, v19.4s\n" - "ldr q19, [x23, #0x0]\n" - "uzp1 v0.2d, v18.2d, v17.2d\n" - "uzp2 v18.2d, v18.2d, v17.2d\n" - "fmul v17.4s, v27.4s, v20.s[1]\n" - "scvtf v0.4s, v0.4s, #0x4\n" - "scvtf v18.4s, v18.4s, #0x4\n" - "fmla v10.4s, v26.4s, v17.4s\n" - "ldr q17, [x23, #0x10]\n" - "fmul v26.4s, v27.4s, v20.s[2]\n" - "fmul v20.4s, v27.4s, v20.s[3]\n" - "fmla v12.4s, v0.4s, v26.4s\n" - "ldr d0, [x22, #-0x8]\n" - "ldr d26, [x21, #-0x8]\n" - "fcvtl v0.4s, v0.4h\n" - "fmla v28.4s, v18.4s, v20.4s\n" - "movi v20.4s, #0x0\n" - "movi v18.4s, #0x0\n" - ".inst 0x4e88a674 // smmla v20.4s, v19.16b, v8.16b\n" - ".inst 0x4e9fa672 // smmla v18.4s, v19.16b, v31.16b\n" - "ldr q19, [x23, #0x20]\n" - "fcvtl v26.4s, v26.4h\n" - ".inst 0x4e8fa674 // smmla v20.4s, v19.16b, v15.16b\n" - ".inst 0x4e81a672 // smmla v18.4s, v19.16b, v1.16b\n" - "ldr q19, [x23, #0x40]\n" - ".inst 0x4e95a674 // smmla v20.4s, v19.16b, v21.16b\n" - ".inst 0x4e90a672 // smmla v18.4s, v19.16b, v16.16b\n" - "ldr q19, [x23, #0x60]\n" - ".inst 0x4e9da674 // smmla v20.4s, v19.16b, v29.16b\n" - ".inst 0x4e83a672 // smmla v18.4s, v19.16b, v3.16b\n" - "uzp1 v19.2d, v20.2d, v18.2d\n" - "scvtf v19.4s, v19.4s, #0x4\n" - "uzp2 v20.2d, v20.2d, v18.2d\n" - "fmul v18.4s, v27.4s, v9.s[0]\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "fmla v11.4s, v19.4s, v18.4s\n" - "ldr q18, [x22, #0x0]\n" - "fmul v19.4s, v27.4s, v9.s[1]\n" - "fmla v13.4s, v20.4s, v19.4s\n" - "movi v19.4s, #0x0\n" - "movi v20.4s, #0x0\n" - ".inst 0x4e88a633 // smmla v19.4s, v17.16b, v8.16b\n" - ".inst 0x4e9fa634 // smmla v20.4s, v17.16b, v31.16b\n" - "ldr q17, [x23, #0x30]\n" - ".inst 0x4e8fa633 // smmla v19.4s, v17.16b, v15.16b\n" - ".inst 0x4e81a634 // smmla v20.4s, v17.16b, v1.16b\n" - "ldr q17, [x23, #0x50]\n" - ".inst 0x4e95a633 // smmla v19.4s, v17.16b, v21.16b\n" - ".inst 0x4e90a634 // smmla v20.4s, v17.16b, v16.16b\n" - "ldr q17, [x23, #0x70]\n" - "add x23, x23, #0x88\n" - ".inst 0x4e9da633 // smmla v19.4s, v17.16b, v29.16b\n" - ".inst 0x4e83a634 // smmla v20.4s, v17.16b, v3.16b\n" - "uzp1 v17.2d, v19.2d, v20.2d\n" - "scvtf v17.4s, v17.4s, #0x4\n" - "uzp2 v20.2d, v19.2d, v20.2d\n" - "fmul v19.4s, v27.4s, v9.s[2]\n" - "fmul v9.4s, v27.4s, v9.s[3]\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "fmla v22.4s, v17.4s, v19.4s\n" - "ldr q17, [x22, #0x10]\n" - "movi v19.4s, #0x0\n" - ".inst 0x4e88a653 // smmla v19.4s, v18.16b, v8.16b\n" - "fmla v23.4s, v20.4s, v9.4s\n" - "movi v20.4s, #0x0\n" - "movi v9.4s, #0x0\n" - ".inst 0x4e9fa654 // smmla v20.4s, v18.16b, v31.16b\n" - "ldr q18, [x22, #0x20]\n" - ".inst 0x4e88a629 // smmla v9.4s, v17.16b, v8.16b\n" - ".inst 0x4e8fa653 // smmla v19.4s, v18.16b, v15.16b\n" - ".inst 0x4e81a654 // smmla v20.4s, v18.16b, v1.16b\n" - "ldr q18, [x22, #0x40]\n" - ".inst 0x4e95a653 // smmla v19.4s, v18.16b, v21.16b\n" - ".inst 0x4e90a654 // smmla v20.4s, v18.16b, v16.16b\n" - "ldr q18, [x22, #0x60]\n" - ".inst 0x4e9da653 // smmla v19.4s, v18.16b, v29.16b\n" - ".inst 0x4e83a654 // smmla v20.4s, v18.16b, v3.16b\n" - "movi v18.4s, #0x0\n" - ".inst 0x4e9fa632 // smmla v18.4s, v17.16b, v31.16b\n" - "ldr q17, [x22, #0x30]\n" - ".inst 0x4e8fa629 // smmla v9.4s, v17.16b, v15.16b\n" - ".inst 0x4e81a632 // smmla v18.4s, v17.16b, v1.16b\n" - "ldr q17, [x22, #0x50]\n" - ".inst 0x4e95a629 // smmla v9.4s, v17.16b, v21.16b\n" - ".inst 0x4e90a632 // smmla v18.4s, v17.16b, v16.16b\n" - "ldr q17, [x22, #0x70]\n" - "add x22, x22, #0x88\n" - ".inst 0x4e9da629 // smmla v9.4s, v17.16b, v29.16b\n" - ".inst 0x4e83a632 // smmla v18.4s, v17.16b, v3.16b\n" - "uzp1 v17.2d, v19.2d, v20.2d\n" - "uzp2 v20.2d, v19.2d, v20.2d\n" - "fmul v19.4s, v27.4s, v0.s[0]\n" - "scvtf v17.4s, v17.4s, #0x4\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "fmla v25.4s, v17.4s, v19.4s\n" - "ldr q19, [x21, #0x0]\n" - "fmul v17.4s, v27.4s, v0.s[1]\n" - "fmla v5.4s, v20.4s, v17.4s\n" - "ldr q17, [x21, #0x10]\n" - "uzp1 v20.2d, v9.2d, v18.2d\n" - "uzp2 v9.2d, v9.2d, v18.2d\n" - "fmul v18.4s, v27.4s, v0.s[2]\n" - "fmul v0.4s, v27.4s, v0.s[3]\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "scvtf v9.4s, v9.4s, #0x4\n" - "fmla v7.4s, v20.4s, v18.4s\n" - "movi v20.4s, #0x0\n" - "movi v18.4s, #0x0\n" - ".inst 0x4e88a674 // smmla v20.4s, v19.16b, v8.16b\n" - ".inst 0x4e9fa672 // smmla v18.4s, v19.16b, v31.16b\n" - "ldr q19, [x21, #0x20]\n" - "fmla v4.4s, v9.4s, v0.4s\n" - "movi v9.4s, #0x0\n" - "movi v0.4s, #0x0\n" - ".inst 0x4e88a629 // smmla v9.4s, v17.16b, v8.16b\n" - "fmul v8.4s, v27.4s, v26.s[0]\n" - ".inst 0x4e9fa620 // smmla v0.4s, v17.16b, v31.16b\n" - "ldr q17, [x21, #0x30]\n" - ".inst 0x4e8fa674 // smmla v20.4s, v19.16b, v15.16b\n" - "fmul v31.4s, v27.4s, v26.s[1]\n" - ".inst 0x4e81a672 // smmla v18.4s, v19.16b, v1.16b\n" - "ldr q19, [x21, #0x40]\n" - ".inst 0x4e8fa629 // smmla v9.4s, v17.16b, v15.16b\n" - "fmul v15.4s, v27.4s, v26.s[2]\n" - "fmul v27.4s, v27.4s, v26.s[3]\n" - ".inst 0x4e81a620 // smmla v0.4s, v17.16b, v1.16b\n" - "ldr q1, [x21, #0x50]\n" - ".inst 0x4e95a674 // smmla v20.4s, v19.16b, v21.16b\n" - ".inst 0x4e90a672 // smmla v18.4s, v19.16b, v16.16b\n" - "ldr q26, [x21, #0x60]\n" - ".inst 0x4e95a429 // smmla v9.4s, v1.16b, v21.16b\n" - ".inst 0x4e90a420 // smmla v0.4s, v1.16b, v16.16b\n" - "ldr q21, [x21, #0x70]\n" - "add x21, x21, #0x88\n" - ".inst 0x4e9da754 // smmla v20.4s, v26.16b, v29.16b\n" - ".inst 0x4e83a752 // smmla v18.4s, v26.16b, v3.16b\n" - ".inst 0x4e9da6a9 // smmla v9.4s, v21.16b, v29.16b\n" - ".inst 0x4e83a6a0 // smmla v0.4s, v21.16b, v3.16b\n" - "uzp1 v29.2d, v20.2d, v18.2d\n" - "uzp2 v21.2d, v20.2d, v18.2d\n" - "scvtf v29.4s, v29.4s, #0x4\n" - "uzp1 v18.2d, v9.2d, v0.2d\n" - "uzp2 v16.2d, v9.2d, v0.2d\n" - "scvtf v21.4s, v21.4s, #0x4\n" - "fmla v6.4s, v29.4s, v8.4s\n" - "scvtf v18.4s, v18.4s, #0x4\n" - "scvtf v16.4s, v16.4s, #0x4\n" - "fmla v30.4s, v21.4s, v31.4s\n" - "fmla v24.4s, v18.4s, v15.4s\n" - "fmla v14.4s, v16.4s, v27.4s\n" - "bgt 3b\n" - "mov x20, %x[res_ptr]\n" - "subs x27, x27, #0x4\n" - "add %x[res_ptr], %x[res_ptr], #0x10\n" - "str q2, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q10, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q12, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q28, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q11, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q13, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q22, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q23, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q25, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q5, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q7, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q4, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q6, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q30, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q24, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q14, [x20, #0x0]\n" - "bne 2b\n" - "mov x20, #0x4\n" - "sub x10, x10, #0x10\n" - "cmp x10, #0x10\n" - "mov %x[res_ptr], x26\n" - "madd %x[a_ptr], x20, x9, %x[a_ptr]\n" - "bge 1b\n" - "4:" // Row loop skip - "cbz x10, 9f\n" - "5:" // Row tail: Row loop - "add x24, %x[b_ptr], #0x8\n" - "mov x23, %x[nc]\n" - "add x22, %x[res_ptr], %x[res_stride], LSL #2\n" - "6:" // Row tail: Column loop - "movi v2.16b, #0x0\n" - "movi v10.16b, #0x0\n" - "add x25, %x[a_ptr], #0x8\n" - "mov x21, %x[nb]\n" - "movi v12.16b, #0x0\n" - "movi v28.16b, #0x0\n" - "7:" // Row tail: Block loop - "ldr q6, [x24, #0x0]\n" - "ldr q5, [x24, #0x10]\n" - "movi v17.16b, #0x4\n" - "movi v8.4s, #0x0\n" - "ldr q4, [x25, #0x0]\n" - "ldr q13, [x25, #0x10]\n" - "movi v27.4s, #0x0\n" - "movi v0.4s, #0x0\n" - "ldr q31, [x24, #0x20]\n" - "ldr q14, [x24, #0x30]\n" - "movi v29.4s, #0x0\n" - "movi v22.16b, #0xf0\n" - "ldr q11, [x25, #0x20]\n" - "ldr q23, [x25, #0x30]\n" - "sshl v21.16b, v6.16b, v17.16b\n" - "sshl v16.16b, v5.16b, v17.16b\n" - "ldr q20, [x25, #0x40]\n" - "ldr q26, [x25, #0x50]\n" - "and v6.16b, v6.16b, v22.16b\n" - "and v5.16b, v5.16b, v22.16b\n" - "ldr q25, [x25, #0x60]\n" - "ldr q3, [x25, #0x70]\n" - "sshl v19.16b, v31.16b, v17.16b\n" - "sshl v18.16b, v14.16b, v17.16b\n" - "ldr d17, [x25, #-0x8]\n" - ".inst 0x4e95a488 // smmla v8.4s, v4.16b, v21.16b\n" - ".inst 0x4e90a49b // smmla v27.4s, v4.16b, v16.16b\n" - "and v31.16b, v31.16b, v22.16b\n" - ".inst 0x4e95a5a0 // smmla v0.4s, v13.16b, v21.16b\n" - ".inst 0x4e90a5bd // smmla v29.4s, v13.16b, v16.16b\n" - "and v14.16b, v14.16b, v22.16b\n" - "sub x20, x24, #0x8\n" - "ldr d16, [x20, #0x0]\n" - "subs x21, x21, #0x1\n" - "add x25, x25, #0x88\n" - "fcvtl v17.4s, v17.4h\n" - "add x24, x24, #0x48\n" - ".inst 0x4e93a568 // smmla v8.4s, v11.16b, v19.16b\n" - ".inst 0x4e92a57b // smmla v27.4s, v11.16b, v18.16b\n" - ".inst 0x4e93a6e0 // smmla v0.4s, v23.16b, v19.16b\n" - ".inst 0x4e92a6fd // smmla v29.4s, v23.16b, v18.16b\n" - "fcvtl v16.4s, v16.4h\n" - ".inst 0x4e86a688 // smmla v8.4s, v20.16b, v6.16b\n" - ".inst 0x4e85a69b // smmla v27.4s, v20.16b, v5.16b\n" - "fmul v23.4s, v16.4s, v17.s[0]\n" - "fmul v21.4s, v16.4s, v17.s[1]\n" - "fmul v1.4s, v16.4s, v17.s[2]\n" - "fmul v20.4s, v16.4s, v17.s[3]\n" - ".inst 0x4e86a740 // smmla v0.4s, v26.16b, v6.16b\n" - ".inst 0x4e85a75d // smmla v29.4s, v26.16b, v5.16b\n" - ".inst 0x4e9fa728 // smmla v8.4s, v25.16b, v31.16b\n" - ".inst 0x4e8ea73b // smmla v27.4s, v25.16b, v14.16b\n" - ".inst 0x4e9fa460 // smmla v0.4s, v3.16b, v31.16b\n" - ".inst 0x4e8ea47d // smmla v29.4s, v3.16b, v14.16b\n" - "uzp1 v19.2d, v8.2d, v27.2d\n" - "uzp2 v18.2d, v8.2d, v27.2d\n" - "scvtf v19.4s, v19.4s, #0x4\n" - "uzp1 v17.2d, v0.2d, v29.2d\n" - "uzp2 v16.2d, v0.2d, v29.2d\n" - "scvtf v18.4s, v18.4s, #0x4\n" - "fmla v2.4s, v19.4s, v23.4s\n" - "scvtf v17.4s, v17.4s, #0x4\n" - "scvtf v16.4s, v16.4s, #0x4\n" - "fmla v10.4s, v18.4s, v21.4s\n" - "fmla v12.4s, v17.4s, v1.4s\n" - "fmla v28.4s, v16.4s, v20.4s\n" - "bgt 7b\n" - "mov x20, %x[res_ptr]\n" - "cmp x10, #0x1\n" - "str q2, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "cmp x10, #0x2\n" - "str q10, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "cmp x10, #0x3\n" - "str q12, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "str q28, [x20, #0x0]\n" - "8:" // Row tail: Accumulator store skip - "subs x23, x23, #0x4\n" - "add %x[res_ptr], %x[res_ptr], #0x10\n" - "bne 6b\n" - "subs x10, x10, #0x4\n" - "add %x[a_ptr], %x[a_ptr], x9\n" - "mov %x[res_ptr], x22\n" - "bgt 5b\n" - "9:" // Row tail: Row loop skip - : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) - : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" - ); - return; - } + const void * b_ptr = vx; + const void * a_ptr = vy; + float * res_ptr = s; + size_t res_stride = bs * sizeof(float); + + __asm__ __volatile__( + "mov x10, %x[nr]\n" + "mov x9, #0x88\n" + "cmp x10, #0x10\n" + "mul x9, %x[nb], x9\n" + "blt 4f\n" + "1:" // Row loop + "add x28, %x[b_ptr], #0x8\n" + "mov x27, %x[nc]\n" + "add x26, %x[res_ptr], %x[res_stride], LSL #4\n" + "2:" // Column loop + "add x25, %x[a_ptr], #0x8\n" + "movi v2.16b, #0x0\n" + "movi v10.16b, #0x0\n" + "mov x24, %x[nb]\n" + "add x23, x25, x9\n" + "movi v12.16b, #0x0\n" + "movi v28.16b, #0x0\n" + "add x22, x23, x9\n" + "movi v11.16b, #0x0\n" + "movi v13.16b, #0x0\n" + "add x21, x22, x9\n" + "movi v22.16b, #0x0\n" + "movi v23.16b, #0x0\n" + "movi v25.16b, #0x0\n" + "movi v5.16b, #0x0\n" + "movi v7.16b, #0x0\n" + "movi v4.16b, #0x0\n" + "movi v6.16b, #0x0\n" + "movi v30.16b, #0x0\n" + "movi v24.16b, #0x0\n" + "movi v14.16b, #0x0\n" + "3:" // Block loop + "ldr q21, [x28, #0x0]\n" + "ldr q16, [x28, #0x10]\n" + "movi v1.16b, #0x4\n" + "movi v19.4s, #0x0\n" + "ldr q27, [x25, #0x0]\n" + "ldr q15, [x25, #0x10]\n" + "movi v26.4s, #0x0\n" + "movi v18.4s, #0x0\n" + "ldr q29, [x28, #0x20]\n" + "ldr q3, [x28, #0x30]\n" + "movi v17.4s, #0x0\n" + "movi v0.16b, #0xf0\n" + "ldr d20, [x25, #-0x8]\n" + "ldr d9, [x23, #-0x8]\n" + "sshl v8.16b, v21.16b, v1.16b\n" + "sshl v31.16b, v16.16b, v1.16b\n" + "and v21.16b, v21.16b, v0.16b\n" + "and v16.16b, v16.16b, v0.16b\n" + "sub x20, x28, #0x8\n" + "subs x24, x24, #0x1\n" + "add x28, x28, #0x48\n" + ".inst 0x4e88a773 // smmla v19.4s, v27.16b, v8.16b\n" + ".inst 0x4e9fa77a // smmla v26.4s, v27.16b, v31.16b\n" + "ldr q27, [x25, #0x20]\n" + ".inst 0x4e88a5f2 // smmla v18.4s, v15.16b, v8.16b\n" + ".inst 0x4e9fa5f1 // smmla v17.4s, v15.16b, v31.16b\n" + "sshl v15.16b, v29.16b, v1.16b\n" + "sshl v1.16b, v3.16b, v1.16b\n" + "and v29.16b, v29.16b, v0.16b\n" + "and v3.16b, v3.16b, v0.16b\n" + "ldr q0, [x25, #0x30]\n" + "fcvtl v20.4s, v20.4h\n" + ".inst 0x4e8fa773 // smmla v19.4s, v27.16b, v15.16b\n" + "fcvtl v9.4s, v9.4h\n" + ".inst 0x4e81a77a // smmla v26.4s, v27.16b, v1.16b\n" + "ldr q27, [x25, #0x40]\n" + ".inst 0x4e8fa412 // smmla v18.4s, v0.16b, v15.16b\n" + ".inst 0x4e81a411 // smmla v17.4s, v0.16b, v1.16b\n" + "ldr q0, [x25, #0x50]\n" + ".inst 0x4e95a773 // smmla v19.4s, v27.16b, v21.16b\n" + ".inst 0x4e90a77a // smmla v26.4s, v27.16b, v16.16b\n" + "ldr q27, [x25, #0x60]\n" + ".inst 0x4e95a412 // smmla v18.4s, v0.16b, v21.16b\n" + ".inst 0x4e90a411 // smmla v17.4s, v0.16b, v16.16b\n" + "ldr q0, [x25, #0x70]\n" + "add x25, x25, #0x88\n" + ".inst 0x4e9da773 // smmla v19.4s, v27.16b, v29.16b\n" + ".inst 0x4e83a77a // smmla v26.4s, v27.16b, v3.16b\n" + "ldr d27, [x20, #0x0]\n" + ".inst 0x4e9da412 // smmla v18.4s, v0.16b, v29.16b\n" + ".inst 0x4e83a411 // smmla v17.4s, v0.16b, v3.16b\n" + "fcvtl v27.4s, v27.4h\n" + "uzp1 v0.2d, v19.2d, v26.2d\n" + "uzp2 v26.2d, v19.2d, v26.2d\n" + "fmul v19.4s, v27.4s, v20.s[0]\n" + "scvtf v0.4s, v0.4s, #0x4\n" + "scvtf v26.4s, v26.4s, #0x4\n" + "fmla v2.4s, v0.4s, v19.4s\n" + "ldr q19, [x23, #0x0]\n" + "uzp1 v0.2d, v18.2d, v17.2d\n" + "uzp2 v18.2d, v18.2d, v17.2d\n" + "fmul v17.4s, v27.4s, v20.s[1]\n" + "scvtf v0.4s, v0.4s, #0x4\n" + "scvtf v18.4s, v18.4s, #0x4\n" + "fmla v10.4s, v26.4s, v17.4s\n" + "ldr q17, [x23, #0x10]\n" + "fmul v26.4s, v27.4s, v20.s[2]\n" + "fmul v20.4s, v27.4s, v20.s[3]\n" + "fmla v12.4s, v0.4s, v26.4s\n" + "ldr d0, [x22, #-0x8]\n" + "ldr d26, [x21, #-0x8]\n" + "fcvtl v0.4s, v0.4h\n" + "fmla v28.4s, v18.4s, v20.4s\n" + "movi v20.4s, #0x0\n" + "movi v18.4s, #0x0\n" + ".inst 0x4e88a674 // smmla v20.4s, v19.16b, v8.16b\n" + ".inst 0x4e9fa672 // smmla v18.4s, v19.16b, v31.16b\n" + "ldr q19, [x23, #0x20]\n" + "fcvtl v26.4s, v26.4h\n" + ".inst 0x4e8fa674 // smmla v20.4s, v19.16b, v15.16b\n" + ".inst 0x4e81a672 // smmla v18.4s, v19.16b, v1.16b\n" + "ldr q19, [x23, #0x40]\n" + ".inst 0x4e95a674 // smmla v20.4s, v19.16b, v21.16b\n" + ".inst 0x4e90a672 // smmla v18.4s, v19.16b, v16.16b\n" + "ldr q19, [x23, #0x60]\n" + ".inst 0x4e9da674 // smmla v20.4s, v19.16b, v29.16b\n" + ".inst 0x4e83a672 // smmla v18.4s, v19.16b, v3.16b\n" + "uzp1 v19.2d, v20.2d, v18.2d\n" + "scvtf v19.4s, v19.4s, #0x4\n" + "uzp2 v20.2d, v20.2d, v18.2d\n" + "fmul v18.4s, v27.4s, v9.s[0]\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "fmla v11.4s, v19.4s, v18.4s\n" + "ldr q18, [x22, #0x0]\n" + "fmul v19.4s, v27.4s, v9.s[1]\n" + "fmla v13.4s, v20.4s, v19.4s\n" + "movi v19.4s, #0x0\n" + "movi v20.4s, #0x0\n" + ".inst 0x4e88a633 // smmla v19.4s, v17.16b, v8.16b\n" + ".inst 0x4e9fa634 // smmla v20.4s, v17.16b, v31.16b\n" + "ldr q17, [x23, #0x30]\n" + ".inst 0x4e8fa633 // smmla v19.4s, v17.16b, v15.16b\n" + ".inst 0x4e81a634 // smmla v20.4s, v17.16b, v1.16b\n" + "ldr q17, [x23, #0x50]\n" + ".inst 0x4e95a633 // smmla v19.4s, v17.16b, v21.16b\n" + ".inst 0x4e90a634 // smmla v20.4s, v17.16b, v16.16b\n" + "ldr q17, [x23, #0x70]\n" + "add x23, x23, #0x88\n" + ".inst 0x4e9da633 // smmla v19.4s, v17.16b, v29.16b\n" + ".inst 0x4e83a634 // smmla v20.4s, v17.16b, v3.16b\n" + "uzp1 v17.2d, v19.2d, v20.2d\n" + "scvtf v17.4s, v17.4s, #0x4\n" + "uzp2 v20.2d, v19.2d, v20.2d\n" + "fmul v19.4s, v27.4s, v9.s[2]\n" + "fmul v9.4s, v27.4s, v9.s[3]\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "fmla v22.4s, v17.4s, v19.4s\n" + "ldr q17, [x22, #0x10]\n" + "movi v19.4s, #0x0\n" + ".inst 0x4e88a653 // smmla v19.4s, v18.16b, v8.16b\n" + "fmla v23.4s, v20.4s, v9.4s\n" + "movi v20.4s, #0x0\n" + "movi v9.4s, #0x0\n" + ".inst 0x4e9fa654 // smmla v20.4s, v18.16b, v31.16b\n" + "ldr q18, [x22, #0x20]\n" + ".inst 0x4e88a629 // smmla v9.4s, v17.16b, v8.16b\n" + ".inst 0x4e8fa653 // smmla v19.4s, v18.16b, v15.16b\n" + ".inst 0x4e81a654 // smmla v20.4s, v18.16b, v1.16b\n" + "ldr q18, [x22, #0x40]\n" + ".inst 0x4e95a653 // smmla v19.4s, v18.16b, v21.16b\n" + ".inst 0x4e90a654 // smmla v20.4s, v18.16b, v16.16b\n" + "ldr q18, [x22, #0x60]\n" + ".inst 0x4e9da653 // smmla v19.4s, v18.16b, v29.16b\n" + ".inst 0x4e83a654 // smmla v20.4s, v18.16b, v3.16b\n" + "movi v18.4s, #0x0\n" + ".inst 0x4e9fa632 // smmla v18.4s, v17.16b, v31.16b\n" + "ldr q17, [x22, #0x30]\n" + ".inst 0x4e8fa629 // smmla v9.4s, v17.16b, v15.16b\n" + ".inst 0x4e81a632 // smmla v18.4s, v17.16b, v1.16b\n" + "ldr q17, [x22, #0x50]\n" + ".inst 0x4e95a629 // smmla v9.4s, v17.16b, v21.16b\n" + ".inst 0x4e90a632 // smmla v18.4s, v17.16b, v16.16b\n" + "ldr q17, [x22, #0x70]\n" + "add x22, x22, #0x88\n" + ".inst 0x4e9da629 // smmla v9.4s, v17.16b, v29.16b\n" + ".inst 0x4e83a632 // smmla v18.4s, v17.16b, v3.16b\n" + "uzp1 v17.2d, v19.2d, v20.2d\n" + "uzp2 v20.2d, v19.2d, v20.2d\n" + "fmul v19.4s, v27.4s, v0.s[0]\n" + "scvtf v17.4s, v17.4s, #0x4\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "fmla v25.4s, v17.4s, v19.4s\n" + "ldr q19, [x21, #0x0]\n" + "fmul v17.4s, v27.4s, v0.s[1]\n" + "fmla v5.4s, v20.4s, v17.4s\n" + "ldr q17, [x21, #0x10]\n" + "uzp1 v20.2d, v9.2d, v18.2d\n" + "uzp2 v9.2d, v9.2d, v18.2d\n" + "fmul v18.4s, v27.4s, v0.s[2]\n" + "fmul v0.4s, v27.4s, v0.s[3]\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "scvtf v9.4s, v9.4s, #0x4\n" + "fmla v7.4s, v20.4s, v18.4s\n" + "movi v20.4s, #0x0\n" + "movi v18.4s, #0x0\n" + ".inst 0x4e88a674 // smmla v20.4s, v19.16b, v8.16b\n" + ".inst 0x4e9fa672 // smmla v18.4s, v19.16b, v31.16b\n" + "ldr q19, [x21, #0x20]\n" + "fmla v4.4s, v9.4s, v0.4s\n" + "movi v9.4s, #0x0\n" + "movi v0.4s, #0x0\n" + ".inst 0x4e88a629 // smmla v9.4s, v17.16b, v8.16b\n" + "fmul v8.4s, v27.4s, v26.s[0]\n" + ".inst 0x4e9fa620 // smmla v0.4s, v17.16b, v31.16b\n" + "ldr q17, [x21, #0x30]\n" + ".inst 0x4e8fa674 // smmla v20.4s, v19.16b, v15.16b\n" + "fmul v31.4s, v27.4s, v26.s[1]\n" + ".inst 0x4e81a672 // smmla v18.4s, v19.16b, v1.16b\n" + "ldr q19, [x21, #0x40]\n" + ".inst 0x4e8fa629 // smmla v9.4s, v17.16b, v15.16b\n" + "fmul v15.4s, v27.4s, v26.s[2]\n" + "fmul v27.4s, v27.4s, v26.s[3]\n" + ".inst 0x4e81a620 // smmla v0.4s, v17.16b, v1.16b\n" + "ldr q1, [x21, #0x50]\n" + ".inst 0x4e95a674 // smmla v20.4s, v19.16b, v21.16b\n" + ".inst 0x4e90a672 // smmla v18.4s, v19.16b, v16.16b\n" + "ldr q26, [x21, #0x60]\n" + ".inst 0x4e95a429 // smmla v9.4s, v1.16b, v21.16b\n" + ".inst 0x4e90a420 // smmla v0.4s, v1.16b, v16.16b\n" + "ldr q21, [x21, #0x70]\n" + "add x21, x21, #0x88\n" + ".inst 0x4e9da754 // smmla v20.4s, v26.16b, v29.16b\n" + ".inst 0x4e83a752 // smmla v18.4s, v26.16b, v3.16b\n" + ".inst 0x4e9da6a9 // smmla v9.4s, v21.16b, v29.16b\n" + ".inst 0x4e83a6a0 // smmla v0.4s, v21.16b, v3.16b\n" + "uzp1 v29.2d, v20.2d, v18.2d\n" + "uzp2 v21.2d, v20.2d, v18.2d\n" + "scvtf v29.4s, v29.4s, #0x4\n" + "uzp1 v18.2d, v9.2d, v0.2d\n" + "uzp2 v16.2d, v9.2d, v0.2d\n" + "scvtf v21.4s, v21.4s, #0x4\n" + "fmla v6.4s, v29.4s, v8.4s\n" + "scvtf v18.4s, v18.4s, #0x4\n" + "scvtf v16.4s, v16.4s, #0x4\n" + "fmla v30.4s, v21.4s, v31.4s\n" + "fmla v24.4s, v18.4s, v15.4s\n" + "fmla v14.4s, v16.4s, v27.4s\n" + "bgt 3b\n" + "mov x20, %x[res_ptr]\n" + "subs x27, x27, #0x4\n" + "add %x[res_ptr], %x[res_ptr], #0x10\n" + "str q2, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q10, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q12, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q28, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q11, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q13, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q22, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q23, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q25, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q5, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q7, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q4, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q6, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q30, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q24, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q14, [x20, #0x0]\n" + "bne 2b\n" + "mov x20, #0x4\n" + "sub x10, x10, #0x10\n" + "cmp x10, #0x10\n" + "mov %x[res_ptr], x26\n" + "madd %x[a_ptr], x20, x9, %x[a_ptr]\n" + "bge 1b\n" + "4:" // Row loop skip + "cbz x10, 9f\n" + "5:" // Row tail: Row loop + "add x24, %x[b_ptr], #0x8\n" + "mov x23, %x[nc]\n" + "add x22, %x[res_ptr], %x[res_stride], LSL #2\n" + "6:" // Row tail: Column loop + "movi v2.16b, #0x0\n" + "movi v10.16b, #0x0\n" + "add x25, %x[a_ptr], #0x8\n" + "mov x21, %x[nb]\n" + "movi v12.16b, #0x0\n" + "movi v28.16b, #0x0\n" + "7:" // Row tail: Block loop + "ldr q6, [x24, #0x0]\n" + "ldr q5, [x24, #0x10]\n" + "movi v17.16b, #0x4\n" + "movi v8.4s, #0x0\n" + "ldr q4, [x25, #0x0]\n" + "ldr q13, [x25, #0x10]\n" + "movi v27.4s, #0x0\n" + "movi v0.4s, #0x0\n" + "ldr q31, [x24, #0x20]\n" + "ldr q14, [x24, #0x30]\n" + "movi v29.4s, #0x0\n" + "movi v22.16b, #0xf0\n" + "ldr q11, [x25, #0x20]\n" + "ldr q23, [x25, #0x30]\n" + "sshl v21.16b, v6.16b, v17.16b\n" + "sshl v16.16b, v5.16b, v17.16b\n" + "ldr q20, [x25, #0x40]\n" + "ldr q26, [x25, #0x50]\n" + "and v6.16b, v6.16b, v22.16b\n" + "and v5.16b, v5.16b, v22.16b\n" + "ldr q25, [x25, #0x60]\n" + "ldr q3, [x25, #0x70]\n" + "sshl v19.16b, v31.16b, v17.16b\n" + "sshl v18.16b, v14.16b, v17.16b\n" + "ldr d17, [x25, #-0x8]\n" + ".inst 0x4e95a488 // smmla v8.4s, v4.16b, v21.16b\n" + ".inst 0x4e90a49b // smmla v27.4s, v4.16b, v16.16b\n" + "and v31.16b, v31.16b, v22.16b\n" + ".inst 0x4e95a5a0 // smmla v0.4s, v13.16b, v21.16b\n" + ".inst 0x4e90a5bd // smmla v29.4s, v13.16b, v16.16b\n" + "and v14.16b, v14.16b, v22.16b\n" + "sub x20, x24, #0x8\n" + "ldr d16, [x20, #0x0]\n" + "subs x21, x21, #0x1\n" + "add x25, x25, #0x88\n" + "fcvtl v17.4s, v17.4h\n" + "add x24, x24, #0x48\n" + ".inst 0x4e93a568 // smmla v8.4s, v11.16b, v19.16b\n" + ".inst 0x4e92a57b // smmla v27.4s, v11.16b, v18.16b\n" + ".inst 0x4e93a6e0 // smmla v0.4s, v23.16b, v19.16b\n" + ".inst 0x4e92a6fd // smmla v29.4s, v23.16b, v18.16b\n" + "fcvtl v16.4s, v16.4h\n" + ".inst 0x4e86a688 // smmla v8.4s, v20.16b, v6.16b\n" + ".inst 0x4e85a69b // smmla v27.4s, v20.16b, v5.16b\n" + "fmul v23.4s, v16.4s, v17.s[0]\n" + "fmul v21.4s, v16.4s, v17.s[1]\n" + "fmul v1.4s, v16.4s, v17.s[2]\n" + "fmul v20.4s, v16.4s, v17.s[3]\n" + ".inst 0x4e86a740 // smmla v0.4s, v26.16b, v6.16b\n" + ".inst 0x4e85a75d // smmla v29.4s, v26.16b, v5.16b\n" + ".inst 0x4e9fa728 // smmla v8.4s, v25.16b, v31.16b\n" + ".inst 0x4e8ea73b // smmla v27.4s, v25.16b, v14.16b\n" + ".inst 0x4e9fa460 // smmla v0.4s, v3.16b, v31.16b\n" + ".inst 0x4e8ea47d // smmla v29.4s, v3.16b, v14.16b\n" + "uzp1 v19.2d, v8.2d, v27.2d\n" + "uzp2 v18.2d, v8.2d, v27.2d\n" + "scvtf v19.4s, v19.4s, #0x4\n" + "uzp1 v17.2d, v0.2d, v29.2d\n" + "uzp2 v16.2d, v0.2d, v29.2d\n" + "scvtf v18.4s, v18.4s, #0x4\n" + "fmla v2.4s, v19.4s, v23.4s\n" + "scvtf v17.4s, v17.4s, #0x4\n" + "scvtf v16.4s, v16.4s, #0x4\n" + "fmla v10.4s, v18.4s, v21.4s\n" + "fmla v12.4s, v17.4s, v1.4s\n" + "fmla v28.4s, v16.4s, v20.4s\n" + "bgt 7b\n" + "mov x20, %x[res_ptr]\n" + "cmp x10, #0x1\n" + "str q2, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "cmp x10, #0x2\n" + "str q10, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "cmp x10, #0x3\n" + "str q12, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "str q28, [x20, #0x0]\n" + "8:" // Row tail: Accumulator store skip + "subs x23, x23, #0x4\n" + "add %x[res_ptr], %x[res_ptr], #0x10\n" + "bne 6b\n" + "subs x10, x10, #0x4\n" + "add %x[a_ptr], %x[a_ptr], x9\n" + "mov %x[res_ptr], x22\n" + "bgt 5b\n" + "9:" // Row tail: Row loop skip + : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) + : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" + ); + return; #endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) float sumf[4][4]; int sumi; @@ -1615,7 +1605,7 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) #if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8) - if (ggml_cpu_has_sve() && ggml_cpu_has_matmul_int8() && ggml_cpu_get_sve_cnt() == QK8_0) { + if (ggml_cpu_get_sve_cnt() == QK8_0) { const void * b_ptr = vx; const void * a_ptr = vy; float * res_ptr = s; @@ -2083,59 +2073,57 @@ void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const UNUSED(blocklen); #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl); + const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl); - for (int y = 0; y < nr / 4; y++) { - const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); - float32x4_t sumf[4]; - for (int m = 0; m < 4; m++) { - sumf[m] = vdupq_n_f32(0); - } + float32x4_t sumf[4]; + for (int m = 0; m < 4; m++) { + sumf[m] = vdupq_n_f32(0); + } - for (int l = 0; l < nb; l++) { - float32x4_t a_d = vcvt_f32_f16(vld1_f16((const float16_t *)a_ptr[l].d)); - float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *)b_ptr[l].d)); - - int32x4_t sumi_0 = vdupq_n_s32(0); - int32x4_t sumi_1 = vdupq_n_s32(0); - int32x4_t sumi_2 = vdupq_n_s32(0); - int32x4_t sumi_3 = vdupq_n_s32(0); - - for (int k = 0; k < 4; k++) { - int8x16_t a_0 = vld1q_s8(a_ptr[l].qs + 16 * k + 0); - int8x16_t a_1 = vld1q_s8(a_ptr[l].qs + 16 * k + 64); - - uint8x16_t b = vld1q_u8(b_ptr[l].qs + 16 * k); - int8x16_t b_hi = vqtbl1q_s8(kvalues, b >> 4); - int8x16_t b_lo = vqtbl1q_s8(kvalues, b & 0xF); - - sumi_0 = vdotq_laneq_s32(sumi_0, b_lo, a_0, 0); - sumi_1 = vdotq_laneq_s32(sumi_1, b_lo, a_0, 1); - sumi_2 = vdotq_laneq_s32(sumi_2, b_lo, a_0, 2); - sumi_3 = vdotq_laneq_s32(sumi_3, b_lo, a_0, 3); - sumi_0 = vdotq_laneq_s32(sumi_0, b_hi, a_1, 0); - sumi_1 = vdotq_laneq_s32(sumi_1, b_hi, a_1, 1); - sumi_2 = vdotq_laneq_s32(sumi_2, b_hi, a_1, 2); - sumi_3 = vdotq_laneq_s32(sumi_3, b_hi, a_1, 3); - } + for (int l = 0; l < nb; l++) { + float32x4_t a_d = vcvt_f32_f16(vld1_f16((const float16_t *)a_ptr[l].d)); + float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *)b_ptr[l].d)); - sumf[0] = vmlaq_f32(sumf[0], vmulq_laneq_f32(b_d, a_d, 0), vcvtq_f32_s32(sumi_0)); - sumf[1] = vmlaq_f32(sumf[1], vmulq_laneq_f32(b_d, a_d, 1), vcvtq_f32_s32(sumi_1)); - sumf[2] = vmlaq_f32(sumf[2], vmulq_laneq_f32(b_d, a_d, 2), vcvtq_f32_s32(sumi_2)); - sumf[3] = vmlaq_f32(sumf[3], vmulq_laneq_f32(b_d, a_d, 3), vcvtq_f32_s32(sumi_3)); + int32x4_t sumi_0 = vdupq_n_s32(0); + int32x4_t sumi_1 = vdupq_n_s32(0); + int32x4_t sumi_2 = vdupq_n_s32(0); + int32x4_t sumi_3 = vdupq_n_s32(0); + + for (int k = 0; k < 4; k++) { + int8x16_t a_0 = vld1q_s8(a_ptr[l].qs + 16 * k + 0); + int8x16_t a_1 = vld1q_s8(a_ptr[l].qs + 16 * k + 64); + + uint8x16_t b = vld1q_u8(b_ptr[l].qs + 16 * k); + int8x16_t b_hi = vqtbl1q_s8(kvalues, b >> 4); + int8x16_t b_lo = vqtbl1q_s8(kvalues, b & 0xF); + + sumi_0 = vdotq_laneq_s32(sumi_0, b_lo, a_0, 0); + sumi_1 = vdotq_laneq_s32(sumi_1, b_lo, a_0, 1); + sumi_2 = vdotq_laneq_s32(sumi_2, b_lo, a_0, 2); + sumi_3 = vdotq_laneq_s32(sumi_3, b_lo, a_0, 3); + sumi_0 = vdotq_laneq_s32(sumi_0, b_hi, a_1, 0); + sumi_1 = vdotq_laneq_s32(sumi_1, b_hi, a_1, 1); + sumi_2 = vdotq_laneq_s32(sumi_2, b_hi, a_1, 2); + sumi_3 = vdotq_laneq_s32(sumi_3, b_hi, a_1, 3); } - for (int m = 0; m < 4; m++) { - vst1q_f32(s + (y * 4 + m) * bs + x * 4, sumf[m]); - } + sumf[0] = vmlaq_f32(sumf[0], vmulq_laneq_f32(b_d, a_d, 0), vcvtq_f32_s32(sumi_0)); + sumf[1] = vmlaq_f32(sumf[1], vmulq_laneq_f32(b_d, a_d, 1), vcvtq_f32_s32(sumi_1)); + sumf[2] = vmlaq_f32(sumf[2], vmulq_laneq_f32(b_d, a_d, 2), vcvtq_f32_s32(sumi_2)); + sumf[3] = vmlaq_f32(sumf[3], vmulq_laneq_f32(b_d, a_d, 3), vcvtq_f32_s32(sumi_3)); + } + + for (int m = 0; m < 4; m++) { + vst1q_f32(s + (y * 4 + m) * bs + x * 4, sumf[m]); } } - return; } + return; #endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) { float sumf[4][4]; diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 2c12e493bc9b0..1bb9c4e367f0f 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -74,13 +74,8 @@ #if defined(__ARM_ARCH) struct ggml_arm_arch_features_type { - int has_neon; - int has_dotprod; - int has_i8mm; - int has_sve; int sve_cnt; - int has_sme; -} ggml_arm_arch_features = {-1, -1, -1, -1, 0, -1}; +} ggml_arm_arch_features = { 0 }; #endif @@ -678,87 +673,15 @@ bool ggml_is_numa(void) { #if defined(__linux__) && defined(__aarch64__) #include -#elif defined(__APPLE__) -#include -#endif - -#if !defined(HWCAP2_I8MM) -#define HWCAP2_I8MM (1 << 13) -#endif - -#if !defined(HWCAP2_SME) -#define HWCAP2_SME (1 << 23) #endif static void ggml_init_arm_arch_features(void) { -#if defined(__linux__) && defined(__aarch64__) - uint32_t hwcap = getauxval(AT_HWCAP); - uint32_t hwcap2 = getauxval(AT_HWCAP2); - - ggml_arm_arch_features.has_neon = !!(hwcap & HWCAP_ASIMD); - ggml_arm_arch_features.has_dotprod = !!(hwcap & HWCAP_ASIMDDP); - ggml_arm_arch_features.has_i8mm = !!(hwcap2 & HWCAP2_I8MM); - ggml_arm_arch_features.has_sve = !!(hwcap & HWCAP_SVE); - ggml_arm_arch_features.has_sme = !!(hwcap2 & HWCAP2_SME); - -#if defined(__ARM_FEATURE_SVE) +#if defined(__linux__) && defined(__aarch64__) && defined(__ARM_FEATURE_SVE) ggml_arm_arch_features.sve_cnt = PR_SVE_VL_LEN_MASK & prctl(PR_SVE_GET_VL); #endif -#elif defined(__APPLE__) - int oldp = 0; - size_t size = sizeof(oldp); - if (sysctlbyname("hw.optional.AdvSIMD", &oldp, &size, NULL, 0) != 0) { - oldp = 0; - } - ggml_arm_arch_features.has_neon = oldp; - - if (sysctlbyname("hw.optional.arm.FEAT_DotProd", &oldp, &size, NULL, 0) != 0) { - oldp = 0; - } - ggml_arm_arch_features.has_dotprod = oldp; - - if (sysctlbyname("hw.optional.arm.FEAT_I8MM", &oldp, &size, NULL, 0) != 0) { - oldp = 0; - } - ggml_arm_arch_features.has_i8mm = oldp; - - if (sysctlbyname("hw.optional.arm.FEAT_SME", &oldp, &size, NULL, 0) != 0) { - oldp = 0; - } - ggml_arm_arch_features.has_sme = oldp; - - ggml_arm_arch_features.has_sve = 0; - ggml_arm_arch_features.sve_cnt = 0; -#else -// Run-time CPU feature detection not implemented for this platform, fallback to compile time -#if defined(__ARM_NEON) - ggml_arm_arch_features.has_neon = 1; -#else - ggml_arm_arch_features.has_neon = 0; -#endif - -#if defined(__ARM_FEATURE_MATMUL_INT8) - ggml_arm_arch_features.has_i8mm = 1; -#else - ggml_arm_arch_features.has_i8mm = 0; -#endif - -#if defined(__ARM_FEATURE_SVE) - ggml_arm_arch_features.has_sve = 1; - ggml_arm_arch_features.sve_cnt = 16; -#else - ggml_arm_arch_features.has_sve = 0; - ggml_arm_arch_features.sve_cnt = 0; -#endif - -#if defined(__ARM_FEATURE_SME) || defined(__ARM_FEATURE_SME2) - ggml_arm_arch_features.has_sme = 1; -#else - ggml_arm_arch_features.has_sme = 0; -#endif -#endif } -#endif + +#endif // __ARM_ARCH struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) { GGML_ASSERT(!ggml_get_no_alloc(ctx)); @@ -3443,7 +3366,7 @@ int ggml_cpu_has_vxe(void) { int ggml_cpu_has_neon(void) { #if defined(__ARM_ARCH) && defined(__ARM_NEON) - return ggml_arm_arch_features.has_neon; + return 1; #else return 0; #endif @@ -3451,7 +3374,7 @@ int ggml_cpu_has_neon(void) { int ggml_cpu_has_dotprod(void) { #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_DOTPROD) - return ggml_arm_arch_features.has_dotprod; + return 1; #else return 0; #endif @@ -3459,7 +3382,7 @@ int ggml_cpu_has_dotprod(void) { int ggml_cpu_has_sve(void) { #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SVE) - return ggml_arm_arch_features.has_sve; + return 1; #else return 0; #endif @@ -3467,7 +3390,7 @@ int ggml_cpu_has_sve(void) { int ggml_cpu_has_matmul_int8(void) { #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_MATMUL_INT8) - return ggml_arm_arch_features.has_i8mm; + return 1; #else return 0; #endif @@ -3483,7 +3406,7 @@ int ggml_cpu_get_sve_cnt(void) { int ggml_cpu_has_sme(void) { #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SME) - return ggml_arm_arch_features.has_sme; + return 1; #else return 0; #endif From 4a5248e41aaa543a3cb7c23045c060d1c1e1e3e5 Mon Sep 17 00:00:00 2001 From: Aman Gupta Date: Fri, 20 Jun 2025 09:50:24 +0800 Subject: [PATCH 106/192] CUDA: add conv_2d_dw (#14265) * CUDA: add conv_2d_dw * better naming * simplify using template * Review: fix operation ordering in ggml-cuda, use __forceinline__, use more const --- ggml/src/ggml-cuda/conv2d-dw.cu | 161 +++++++++++++++++++++++++++++++ ggml/src/ggml-cuda/conv2d-dw.cuh | 5 + ggml/src/ggml-cuda/ggml-cuda.cu | 5 + 3 files changed, 171 insertions(+) create mode 100644 ggml/src/ggml-cuda/conv2d-dw.cu create mode 100644 ggml/src/ggml-cuda/conv2d-dw.cuh diff --git a/ggml/src/ggml-cuda/conv2d-dw.cu b/ggml/src/ggml-cuda/conv2d-dw.cu new file mode 100644 index 0000000000000..7583233b1b7cd --- /dev/null +++ b/ggml/src/ggml-cuda/conv2d-dw.cu @@ -0,0 +1,161 @@ +#include "conv2d-dw.cuh" + +struct conv_params { + int in_w, in_h; + int out_w, out_h; + int kernel_w, kernel_h; + int stride_x, stride_y; + int padding_x, padding_y; + int dilation_x, dilation_y; + int channels, batches; +}; + +struct kernel_bounds { + int y_min, y_max; + int x_min, x_max; +}; + +__device__ __forceinline__ kernel_bounds calculate_kernel_bounds(int out_x, int out_y, const conv_params & params) { + kernel_bounds bounds; + bounds.y_min = max(0, (params.padding_y - out_y * params.stride_y + params.dilation_y - 1) / params.dilation_y); + bounds.y_max = + min(params.kernel_h, + (params.in_h + params.padding_y - out_y * params.stride_y + params.dilation_y - 1) / params.dilation_y); + bounds.x_min = max(0, (params.padding_x - out_x * params.stride_x + params.dilation_x - 1) / params.dilation_x); + bounds.x_max = + min(params.kernel_w, + (params.in_w + params.padding_x - out_x * params.stride_x + params.dilation_x - 1) / params.dilation_x); + return bounds; +} + +__device__ __forceinline__ int calculate_input_coord(int out_coord, int kern_coord, int stride, int dilation, int padding) { + return out_coord * stride + kern_coord * dilation - padding; +} + +struct whcn_layout { + __device__ static int input_index(int n, int c, int y, int x, const conv_params & params) { + return n * (params.channels * params.in_w * params.in_h) + c * params.in_w * params.in_h + y * params.in_w + x; + } + + __device__ static int kernel_index(int c, int ky, int kx, const conv_params & params) { + return c * params.kernel_h * params.kernel_w + ky * params.kernel_w + kx; + } + + __device__ static int output_index(int n, int c, int y, int x, const conv_params & params) { + return n * (params.channels * params.out_w * params.out_h) + c * params.out_w * params.out_h + + y * params.out_w + x; + } + + __device__ static void unpack_indices(int global_idx, const conv_params & params, int & n, int & c, int & out_y, + int & out_x) { + out_x = global_idx % params.out_w; + out_y = (global_idx / params.out_w) % params.out_h; + c = (global_idx / (params.out_w * params.out_h)) % params.channels; + n = global_idx / (params.out_w * params.out_h * params.channels); + } +}; + +struct cwhn_layout { + __device__ static int input_index(int n, int c, int y, int x, const conv_params & params) { + return n * (params.channels * params.in_w * params.in_h) + (y * params.in_w + x) * params.channels + c; + } + + __device__ static int kernel_index(int c, int ky, int kx, const conv_params & params) { + return (ky * params.kernel_w + kx) * params.channels + c; + } + + __device__ static int output_index(int n, int c, int y, int x, const conv_params & params) { + return n * (params.channels * params.out_w * params.out_h) + y * (params.out_w * params.channels) + + x * params.channels + c; + } + + __device__ static void unpack_indices(int global_idx, const conv_params & params, int & n, int & c, int & out_y, + int & out_x) { + c = global_idx % params.channels; + out_x = (global_idx / params.channels) % params.out_w; + out_y = (global_idx / (params.channels * params.out_w)) % params.out_h; + n = global_idx / (params.channels * params.out_w * params.out_h); + } +}; + +template +__global__ void conv2d_dw_kernel(const T * __restrict__ input, const T * __restrict__ kernel, T * __restrict__ output, + const int in_w, const int in_h, const int out_w, const int out_h, + const int kernel_w, const int kernel_h, const int stride_x, const int stride_y, + const int padding_x, const int padding_y, const int dilation_x, const int dilation_y, + const int channels, const int batches) { + const int global_idx = blockIdx.x * blockDim.x + threadIdx.x; + const int total_elements = batches * channels * out_h * out_w; + + if (global_idx >= total_elements) { + return; + } + + conv_params params = { in_w, in_h, out_w, out_h, kernel_w, kernel_h, stride_x, + stride_y, padding_x, padding_y, dilation_x, dilation_y, channels, batches }; + + int batch_idx, channel_idx, out_y_idx, out_x_idx; + Layout::unpack_indices(global_idx, params, batch_idx, channel_idx, out_y_idx, out_x_idx); + + T accumulator = 0; + kernel_bounds bounds = calculate_kernel_bounds(out_x_idx, out_y_idx, params); + + for (int kern_y = bounds.y_min; kern_y < bounds.y_max; ++kern_y) { + int in_y_idx = calculate_input_coord(out_y_idx, kern_y, params.stride_y, params.dilation_y, params.padding_y); + + for (int kern_x = bounds.x_min; kern_x < bounds.x_max; ++kern_x) { + int in_x_idx = calculate_input_coord(out_x_idx, kern_x, params.stride_x, params.dilation_x, params.padding_x); + + const T input_val = input[Layout::input_index(batch_idx, channel_idx, in_y_idx, in_x_idx, params)]; + const T kernel_val = kernel[Layout::kernel_index(channel_idx, kern_y, kern_x, params)]; + + accumulator += input_val * kernel_val; + } + } + + output[Layout::output_index(batch_idx, channel_idx, out_y_idx, out_x_idx, params)] = accumulator; +} + +void ggml_cuda_op_conv2d_dw(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * kernel = dst->src[0]; + const ggml_tensor * input = dst->src[1]; + + GGML_ASSERT(kernel->type == GGML_TYPE_F32 && input->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32); + const float * w_d = (const float *) kernel->data; + const float * x_d = (const float *) input->data; + float * y_d = (float *) dst->data; + + const int32_t * p = (const int32_t *) dst->op_params; + const int stride_x = p[0]; + const int stride_y = p[1]; + const int padding_x = p[2]; + const int padding_y = p[3]; + const int dilation_x = p[4]; + const int dilation_y = p[5]; + + const int in_w = input->ne[0]; + const int in_h = input->ne[1]; + const int kernel_w = kernel->ne[0]; + const int kernel_h = kernel->ne[1]; + const int out_w = dst->ne[0]; + const int out_h = dst->ne[1]; + const int channels = dst->ne[2]; + const int batches = dst->ne[3]; + + cudaStream_t st = ctx.stream(); + + const int total = batches * channels * out_h * out_w; + const int blocks = (total + CUDA_CONV2D_DW_BLOCK_SIZE - 1) / CUDA_CONV2D_DW_BLOCK_SIZE; + + if (ggml_is_contiguous(input)) { + conv2d_dw_kernel<<>>( + x_d, w_d, y_d, in_w, in_h, out_w, out_h, kernel_w, kernel_h, stride_x, stride_y, padding_x, padding_y, + dilation_x, dilation_y, channels, batches); + } else if (ggml_is_contiguous_channels(input)) { + conv2d_dw_kernel<<>>( + x_d, w_d, y_d, in_w, in_h, out_w, out_h, kernel_w, kernel_h, stride_x, stride_y, padding_x, padding_y, + dilation_x, dilation_y, channels, batches); + } else { + GGML_ABORT("Unsupported memory layout for conv_2d_dw"); + } +} diff --git a/ggml/src/ggml-cuda/conv2d-dw.cuh b/ggml/src/ggml-cuda/conv2d-dw.cuh new file mode 100644 index 0000000000000..b5d5a69d345cf --- /dev/null +++ b/ggml/src/ggml-cuda/conv2d-dw.cuh @@ -0,0 +1,5 @@ +#pragma once +#include "common.cuh" + +#define CUDA_CONV2D_DW_BLOCK_SIZE 256 +void ggml_cuda_op_conv2d_dw(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 898b24341471d..80fe050734dfa 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -11,6 +11,7 @@ #include "ggml-cuda/clamp.cuh" #include "ggml-cuda/concat.cuh" #include "ggml-cuda/conv-transpose-1d.cuh" +#include "ggml-cuda/conv2d-dw.cuh" #include "ggml-cuda/convert.cuh" #include "ggml-cuda/count-equal.cuh" #include "ggml-cuda/cpy.cuh" @@ -2310,6 +2311,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg case GGML_OP_IM2COL: ggml_cuda_op_im2col(ctx, dst); break; + case GGML_OP_CONV_2D_DW: + ggml_cuda_op_conv2d_dw(ctx, dst); + break; case GGML_OP_CONV_TRANSPOSE_1D: ggml_cuda_op_conv_transpose_1d(ctx,dst); break; @@ -3209,6 +3213,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g return op->src[0]->nb[0] == ggml_type_size(op->src[0]->type) && ggml_is_contiguous_2(op->src[0]); } case GGML_OP_IM2COL: + case GGML_OP_CONV_2D_DW: case GGML_OP_POOL_2D: case GGML_OP_SUM: case GGML_OP_SUM_ROWS: From a596c8f5bb5878f033a29bfd38fadc816b3d2896 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 20 Jun 2025 10:14:14 +0300 Subject: [PATCH 107/192] ubatch : new splitting logic (#14217) ggml-ci --- src/llama-batch.cpp | 916 +++++++++++++++++----------- src/llama-batch.h | 168 ++--- src/llama-context.cpp | 133 ++-- src/llama-context.h | 2 +- src/llama-graph.cpp | 270 +++----- src/llama-graph.h | 6 +- src/llama-hparams.cpp | 4 + src/llama-hparams.h | 2 + src/llama-kv-cache-unified-iswa.cpp | 40 +- src/llama-kv-cache-unified-iswa.h | 7 +- src/llama-kv-cache-unified.cpp | 134 ++-- src/llama-kv-cache-unified.h | 7 +- src/llama-kv-cells.h | 4 +- src/llama-memory-hybrid.cpp | 81 ++- src/llama-memory-hybrid.h | 9 +- src/llama-memory-recurrent.cpp | 76 ++- src/llama-memory-recurrent.h | 7 +- src/llama-memory.h | 7 +- tools/server/server.cpp | 32 - 19 files changed, 991 insertions(+), 914 deletions(-) diff --git a/src/llama-batch.cpp b/src/llama-batch.cpp index 8b6d14fe8813c..b3c996e18ab41 100644 --- a/src/llama-batch.cpp +++ b/src/llama-batch.cpp @@ -1,7 +1,6 @@ #include "llama-batch.h" #include "llama-impl.h" -#include "llama-cparams.h" #include "llama-vocab.h" #include "llama-memory.h" @@ -10,282 +9,7 @@ #include #include -llama_ubatch llama_sbatch::reserve_ubatch(size_t n_ubatch, bool has_embd) { - // clear empty sequences - // the previous ubatch is assumed to be gone, - // so nothing should refer to values in these sequences anymore. - for (size_t i = seq.size(); i-- > 0;) { - if (seq[i].length == 0) { - seq.pop_back(); - } else { - break; - } - } - - udatas.push_back({}); - - auto & udata = udatas.back(); - - udata.token.resize(!has_embd ? n_ubatch : 0); - udata.embd.resize(has_embd ? n_embd * n_ubatch : 0); - udata.pos.resize(n_ubatch); - udata.n_seq_id.resize(n_ubatch); - udata.seq_id.resize(n_ubatch); - udata.output.resize(n_ubatch); - - llama_ubatch ubatch = { - /*equal_seqs =*/ true, - /*n_tokens =*/ 0, - /*n_seq_tokens =*/ 0, - /*n_seqs =*/ 0, - /*token =*/ !has_embd ? udata.token.data() : nullptr, - /*embd =*/ has_embd ? udata.embd.data() : nullptr, - /*pos =*/ udata.pos.data(), - /*n_seq_id =*/ udata.n_seq_id.data(), - /*seq_id =*/ udata.seq_id.data(), - /*output =*/ udata.output.data(), - }; - - return ubatch; -} - -void llama_sbatch::add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length) { - GGML_ASSERT(batch != nullptr); - GGML_ASSERT(length <= seq.length); - // Can only add sequences of equal lengths to a batch, - // otherwise it isn't clear to which sequence a token belongs - GGML_ASSERT(seq.n_seq_id == 0 || ubatch.n_seqs == 0 || length == (size_t) ubatch.n_tokens / ubatch.n_seqs); - GGML_ASSERT((seq.n_seq_id != 0) == ubatch.equal_seqs); - // NOTE: loops are separated for cache-friendliness - if (batch->token) { - if (ubatch.equal_seqs) { - for (size_t i = 0; i < length; ++i) { - ubatch.token[ubatch.n_tokens + i] = batch->token[ids[seq.offset + i]]; - } - } else { - // simple split - ubatch.token = batch->token + seq.offset; - } - } else { - ubatch.token = nullptr; - } - if (batch->embd) { - if (ubatch.equal_seqs) { - for (size_t i = 0; i < length; ++i) { - memcpy( - ubatch.embd + (n_embd * (ubatch.n_tokens + i)), - batch->embd + (n_embd * ids[seq.offset + i]), - n_embd * sizeof(float) - ); - } - } else { - // simple split - ubatch.embd = batch->embd + (n_embd * seq.offset); - } - } else { - ubatch.embd = nullptr; - } - if (ubatch.equal_seqs) { - for (size_t i = 0; i < length; ++i) { - ubatch.pos[ubatch.n_tokens + i] = batch->pos[ids[seq.offset + i]]; - } - } else { - // simple split - ubatch.pos = batch->pos + seq.offset; - } - if (ubatch.equal_seqs) { - ubatch.n_seq_id[ubatch.n_seqs] = seq.n_seq_id; - if (seq.seq_id) { - ubatch.seq_id[ubatch.n_seqs] = seq.seq_id; - } - } else { - // simple split - if (batch->n_seq_id) { - ubatch.n_seq_id = batch->n_seq_id + seq.offset; - } else { - for (size_t i = 0; i < length; ++i) { - ubatch.n_seq_id[ubatch.n_seqs + i] = 1; - } - } - if (batch->seq_id) { - ubatch.seq_id = batch->seq_id + seq.offset; - } - } - if (batch->logits) { - if (ubatch.equal_seqs) { - for (size_t i = 0; i < length; ++i) { - size_t id = ids[seq.offset + i]; - int8_t is_output = batch->logits[id]; - ubatch.output[ubatch.n_tokens + i] = is_output; - if (is_output) { out_ids.push_back(id); } - } - } else { - // simple split - ubatch.output = batch->logits + seq.offset; - for (size_t i = 0; i < length; ++i) { - if (ubatch.output[i] != 0) { out_ids.push_back(seq.offset + i); } - } - } - } else { - // only get last output - for (size_t i = 0; i < length; ++i) { - size_t id = ids[seq.offset + i]; - int8_t is_last = id == ids.size() - 1; - ubatch.output[ubatch.n_tokens + i] = is_last; - if (is_last) { out_ids.push_back(id); } - } - } - if (ubatch.n_tokens == 0 && ubatch.n_seqs == 0) { - ubatch.n_seq_tokens = ubatch.equal_seqs ? length : 1; - } - ubatch.n_tokens += length; - ubatch.n_seqs += ubatch.equal_seqs ? 1 : length; // virtual sequences for simple splits - seq.offset += length; - seq.length -= length; - n_tokens -= length; - GGML_ASSERT(ubatch.n_tokens == ubatch.n_seq_tokens * ubatch.n_seqs); -} - -llama_ubatch llama_sbatch::split_simple(size_t n_ubatch) { - n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch; - llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr); - ubatch.equal_seqs = false; - if (!seq.empty()) { - llama_sbatch_seq & s = seq[0]; - size_t length = s.length < n_ubatch ? s.length : n_ubatch; - GGML_ASSERT(seq.size() == 1 && s.n_seq_id == 0); // don't mix with other splits - add_seq_to_ubatch(ubatch, s, length); - } - return ubatch; -} - -llama_ubatch llama_sbatch::split_equal(size_t n_ubatch) { - n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch; - llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr); - if (!seq.empty()) { - size_t length = 0; - size_t n_tokens_in_ubatch = 0; - GGML_ASSERT(seq[0].n_seq_id > 0); // should not be mixed with simple splits - // smallest first, because it's easier to split this way; - // starting from the end to pop in constant time. - for (size_t i = seq.size(); i-- > 0;) { - llama_sbatch_seq & s = seq[i]; - GGML_ASSERT(s.length > 0); - if (length == 0) { - length = s.length < n_ubatch ? s.length : n_ubatch; - } - add_seq_to_ubatch(ubatch, s, length); - n_tokens_in_ubatch += length; - // shared prompts can't be mixed with any of their sequences, - // so it's safer to compute them in their own ubatch - if (s.n_seq_id > 1) { break; } - // stop when there isn't enough space for another sequence - if (length + n_tokens_in_ubatch > n_ubatch) { break; } - } - } - return ubatch; -} - -llama_ubatch llama_sbatch::split_seq(size_t n_ubatch) { - n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch; - llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr); - if (!seq.empty()) { - llama_sbatch_seq & s = seq[seq.size() - 1]; - size_t length = s.length < n_ubatch ? s.length : n_ubatch; - GGML_ASSERT(s.n_seq_id > 0); // should not be mixed with simple splits - add_seq_to_ubatch(ubatch, s, length); - } - return ubatch; -} - -llama_sbatch::llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split) { - GGML_ASSERT(batch.n_tokens >= 0); - this->batch = &batch; - this->n_embd = n_embd; - - n_tokens = batch.n_tokens; - ids.resize(n_tokens); - out_ids.clear(); - // TODO: reserve out_ids and seq - - for (size_t i = 0; i < n_tokens; ++i) { - ids[i] = i; - } - - if (simple_split) { - seq.resize(1); - llama_sbatch_seq & s = seq[0]; - s.n_seq_id = 0; - s.seq_id = nullptr; - s.offset = 0; - s.length = n_tokens; - return; - } - - std::sort(ids.begin(), ids.end(), - [&batch](size_t a, size_t b) { - int32_t n_seq_a = batch.n_seq_id ? batch.n_seq_id[a] : 1; - int32_t n_seq_b = batch.n_seq_id ? batch.n_seq_id[b] : 1; - // sort by seq_id, then by pos - if (n_seq_a == n_seq_b) { - if (batch.seq_id) { - for (int32_t i = 0; i < n_seq_a; ++i) { - llama_seq_id seq_id_a = batch.seq_id[a][i]; - llama_seq_id seq_id_b = batch.seq_id[b][i]; - // smaller seq_ids go first - if (seq_id_a != seq_id_b) { - return seq_id_a < seq_id_b; - } - } - } - // when all else is equal, sort by pos - if (batch.pos) { - return batch.pos[a] < batch.pos[b]; - } - // no pos, sort by id - return a < b; - } - // shared prompts go first - return n_seq_a > n_seq_b; - } - ); - - // init seq - llama_sbatch_seq * last_seq = nullptr; - - for (size_t i = 0; i < n_tokens; ++i) { - const size_t bi = ids[i]; - const int32_t n_seqs = batch.n_seq_id[bi]; - llama_seq_id * seq_ids = batch.seq_id[bi]; - if (last_seq != nullptr) { - bool same = n_seqs == last_seq->n_seq_id; - for (int32_t j = 0; same && j < n_seqs; ++j) { - if (seq_ids[j] != last_seq->seq_id[j]) { - same = false; - } - } - if (same) { - last_seq->length += 1; - continue; - } - } - llama_sbatch_seq new_seq = {n_seqs, seq_ids, i, 1}; - seq.push_back(new_seq); - last_seq = &seq.back(); - } - - // keep shared prompts first at the end, then sort by length descending. - std::sort(seq.begin(), seq.end(), - [](llama_sbatch_seq & a, llama_sbatch_seq & b) { - if (a.n_seq_id == b.n_seq_id) { - return a.length > b.length; - } - return a.n_seq_id < b.n_seq_id; - } - ); -} - -llama_batch_allocr::llama_batch_allocr() { +llama_batch_allocr::llama_batch_allocr(uint32_t n_pos_per_embd) : n_pos_per_embd(n_pos_per_embd) { const char * LLAMA_BATCH_DEBUG = getenv("LLAMA_BATCH_DEBUG"); debug = LLAMA_BATCH_DEBUG ? atoi(LLAMA_BATCH_DEBUG) : 0; @@ -294,17 +18,22 @@ llama_batch_allocr::llama_batch_allocr() { for (auto & cur : seq_cpl) { cur.resize(LLAMA_MAX_SEQ); } + + seq_idx.resize(LLAMA_MAX_SEQ, -1); } bool llama_batch_allocr::init( const llama_batch & batch_inp, const llama_vocab & vocab, const llama_memory_i * memory, - bool embd_all) { + uint32_t n_embd, + bool output_all) { clear(); batch = batch_inp; + this->vocab = &vocab; + GGML_ASSERT(batch.n_tokens > 0); // @@ -359,6 +88,7 @@ bool llama_batch_allocr::init( llama_pos p0[LLAMA_MAX_SEQ]; for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) { if (!memory) { + // if no memory -> start from 0 p0[s] = 0; } else { p0[s] = memory->seq_pos_max(s) + 1; @@ -370,8 +100,11 @@ bool llama_batch_allocr::init( pos[i] = p0[seq_id]; + // update the starting position for all sequences that are assigned to the this token for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) { - p0[batch.seq_id[i][s]] = pos[i] + 1; + const llama_seq_id seq_id = batch.seq_id[i][s]; + + p0[seq_id] = pos[i] + 1; } } @@ -379,7 +112,7 @@ bool llama_batch_allocr::init( } if (!batch.logits) { - if (embd_all) { + if (output_all) { // return the output for all tokens output.resize(batch.n_tokens, true); } else { @@ -389,7 +122,7 @@ bool llama_batch_allocr::init( } batch.logits = output.data(); - } else if (embd_all) { + } else if (output_all) { bool warn = false; for (int32_t i = 0; i < batch.n_tokens; ++i) { @@ -410,6 +143,9 @@ bool llama_batch_allocr::init( // compute stats // + this->n_embd = n_embd; + + // count the outputs in this batch for (int32_t i = 0; i < batch.n_tokens; ++i) { n_outputs += batch.logits[i] != 0; } @@ -417,85 +153,86 @@ bool llama_batch_allocr::init( // determine coupled sequences // these are pairs of sequences that have at least one token in the input batch that is assigned to both of them for (int32_t i = 0; i < batch.n_tokens; ++i) { + const llama_seq_id s0 = batch.seq_id[i][0]; + for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) { - seq_pos[batch.seq_id[i][s]].insert(batch.pos[i]); + const llama_seq_id s1 = batch.seq_id[i][s]; - if (s > 0) { - const llama_seq_id s0 = batch.seq_id[i][0]; - const llama_seq_id s1 = batch.seq_id[i][s]; + seq_pos[s1].insert(batch.pos[i]); + if (s > 0) { // mark that sequence s1 is coupled to s0 seq_cpl[s1][s0] = true; - // note: the other way around is not necessary for now + // note: tracking the other way around is not necessary for now //seq_cpl[s0][s1] = true; } } } - if (debug > 0) { - LLAMA_LOG_DEBUG("%s: input batch info:\n", __func__); - LLAMA_LOG_DEBUG("%s: n_tokens = %d\n", __func__, batch.n_tokens); - LLAMA_LOG_DEBUG("%s: token = %p\n", __func__, (void *) batch.token); - LLAMA_LOG_DEBUG("%s: embd = %p\n", __func__, (void *) batch.embd); - LLAMA_LOG_DEBUG("%s: pos = %p\n", __func__, (void *) batch.pos); - LLAMA_LOG_DEBUG("%s: n_seq_id = %p\n", __func__, (void *) batch.n_seq_id); - LLAMA_LOG_DEBUG("%s: seq_id = %p\n", __func__, (void *) batch.seq_id); - LLAMA_LOG_DEBUG("%s: logits = %p\n", __func__, (void *) batch.logits); - LLAMA_LOG_DEBUG("%s: n_outputs = %d\n", __func__, n_outputs); + // precompute the sequence sets for each token and determine the unique sequence ids that participate in the batch + { + seq_set_t seq_set_unq; - if (debug > 1) { - int seq_id_max = 0; - for (int32_t i = 0; i < batch.n_tokens; ++i) { - for (int s = 0; s < batch.n_seq_id[i]; ++s) { - for (int s = 0; s < batch.n_seq_id[i]; ++s) { - seq_id_max = std::max(seq_id_max, batch.seq_id[i][s]); - } - } + for (int32_t i = 0; i < batch.n_tokens; ++i) { + seq_set_t cur; + for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) { + const llama_seq_id seq_id = batch.seq_id[i][s]; + + cur .set(seq_id); + seq_set_unq.set(seq_id); } - ++seq_id_max; - LLAMA_LOG_DEBUG("%s: token = [\n", __func__); - for (int32_t i = 0; i < batch.n_tokens; ++i) { - std::vector seq_id(seq_id_max); + seq_set.push_back(cur); + seq_set_map[cur].push_back(i); + } - for (int s = 0; s < batch.n_seq_id[i]; ++s) { - seq_id[batch.seq_id[i][s]] = 1; - } + for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) { + if (seq_set_unq.test(s)) { + seq_idx[s] = seq_id_unq.size(); + seq_id_unq.push_back(s); + } + } + } - std::stringstream ss; - for (int s = 0; s < seq_id_max; ++s) { - if (seq_id[s]) { - ss << s%10; - } else { - ss << "."; - } - } + if (debug > 0) { + LLAMA_LOG_DEBUG("%s: input batch info:\n", __func__); - LLAMA_LOG_DEBUG("%s: %4d: id = %6d (%16s), pos = %4d, n_seq_id = %2d, seq_id = [%s], output = %d\n", - __func__, i, batch.token[i], vocab.token_to_piece(batch.token[i]).c_str(), - batch.pos[i], batch.n_seq_id[i], ss.str().c_str(), batch.logits[i]); + llama_ubatch ubatch { + /*.equal_seqs =*/ false, + /*.n_tokens =*/ (uint32_t) batch.n_tokens, + /*.n_seq_tokens =*/ (uint32_t) 1, + /*.n_seqs =*/ (uint32_t) batch.n_tokens, + /*.n_seqs_unq =*/ (uint32_t) this->seq_id_unq.size(), + /*.token =*/ batch.token, + /*.embd =*/ batch.embd, + /*.pos =*/ batch.pos, + /*.n_seq_id =*/ batch.n_seq_id, + /*.seq_id =*/ batch.seq_id, + /*.seq_id_unq =*/ this->seq_id_unq.data(), + /*.seq_idx =*/ this->seq_idx.data(), + /*.output =*/ batch.logits, + }; + + ubatch_print(ubatch, debug); + + LLAMA_LOG_DEBUG("%s: seq = [\n", __func__); + for (int s0 = 0; s0 < (int) seq_pos.size(); ++s0) { + if (seq_pos[s0].empty()) { + continue; } - LLAMA_LOG_DEBUG("%s: ]\n", __func__); - - LLAMA_LOG_DEBUG("%s: seq = [\n", __func__); - for (int s0 = 0; s0 < (int) seq_pos.size(); ++s0) { - if (seq_pos[s0].empty()) { - continue; - } - std::stringstream ss; - for (int s1 = 0; s1 < (int) seq_cpl[s0].size(); ++s1) { - if (seq_cpl[s0][s1]) { - ss << s1 << " "; - } + std::stringstream ss; + for (int s1 = 0; s1 < (int) seq_cpl[s0].size(); ++s1) { + if (seq_cpl[s0][s1]) { + ss << s1 << " "; } - - LLAMA_LOG_DEBUG("%s: %4d: pos = [%4d, %4d], cpl = %s\n", - __func__, s0, seq_pos_min(s0), seq_pos_max(s0), ss.str().empty() ? "-" : ss.str().c_str()); } - LLAMA_LOG_DEBUG("%s: ]\n", __func__); + + LLAMA_LOG_DEBUG("%s: %4d: pos = [%4d, %4d], cpl = %s\n", + __func__, s0, seq_pos_min(s0), seq_pos_max(s0), ss.str().empty() ? "-" : ss.str().c_str()); } + LLAMA_LOG_DEBUG("%s: ]\n", __func__); } // @@ -507,9 +244,22 @@ bool llama_batch_allocr::init( continue; } - if (memory && seq_pos_min(s) != memory->seq_pos_max(s) + 1) { - LLAMA_LOG_ERROR("%s: sequence %d does not start from the last position stored in the memory\n", __func__, s); - return false; + if (memory) { + if (batch.token) { + if (seq_pos_min(s) != memory->seq_pos_max(s) + 1) { + LLAMA_LOG_ERROR("%s: sequence %d does not start from the last position stored in the memory\n", __func__, s); + return false; + } + } else { + assert(batch.embd); + + // for embeddings (typically used as vision input), we allow them to have repeating positions + // ref: https://github.com/ggml-org/llama.cpp/issues/13694#issuecomment-2983871762 + if (seq_pos_min(s) != memory->seq_pos_max(s) && seq_pos_min(s) != memory->seq_pos_max(s) + 1) { + LLAMA_LOG_ERROR("%s: sequence %d does not start from the last position stored in the memory\n", __func__, s); + return false; + } + } } if (seq_pos_max(s) - seq_pos_min(s) + 1 > (int) seq_pos[s].size()) { @@ -532,17 +282,120 @@ bool llama_batch_allocr::init( } } + // disallow partial sequence sub-sets: + // + // invalid: x + // i: 0 1 2 ... + // --------------------------------------- + // seq_id[i][0]: 0 0 1 + // seq_id[i][1]: 1 1 2 + // seq_id[i][2]: 2 + // + // disallow decreasing sequence positions: + // + // invalid: x + // i: 0 1 2 3 4 5 6 ... + // --------------------------------------- + // pos[i]: 4 5 0 1 6 2 3 + // seq_id[i][0]: 0 0 1 1 0 1 0 + // + { + seq_set_t cur_seq_set[LLAMA_MAX_SEQ]; + for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) { + cur_seq_set[s].set(); + } + + llama_pos cur_seq_pos[LLAMA_MAX_SEQ]; + for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) { + cur_seq_pos[s] = -1; + } + + for (int32_t i = 0; i < batch.n_tokens; ++i) { + const llama_pos pos = batch.pos[i]; + + for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) { + const llama_seq_id seq_id = batch.seq_id[i][s]; + + cur_seq_set[seq_id] &= seq_set[i]; + + if (cur_seq_set[seq_id].none()) { + LLAMA_LOG_ERROR("%s: sequence %d belongs to incompatible sequence sets (not allowed)\n", __func__, seq_id); + return false; + } + + if (pos < cur_seq_pos[seq_id]) { + LLAMA_LOG_ERROR("%s: sequence %d positions are decreasing (not allowed)\n", __func__, seq_id); + return false; + } + } + } + } + + split_reset(); + return true; } +llama_ubatch llama_batch_allocr::ubatch_reserve(uint32_t n_seq_tokens, uint32_t n_seqs) { + const uint32_t n_tokens = n_seq_tokens*n_seqs; + + clear(); + split_reset(); + + ubatches.emplace_back(); + + auto & ubatch = ubatches.back(); + + ubatch.token .resize(n_tokens); + ubatch.embd .clear(); + ubatch.pos .resize(n_tokens); + ubatch.n_seq_id .resize(n_tokens); + ubatch.seq_id .resize(n_tokens); + ubatch.seq_id_unq.resize(0); + ubatch.seq_idx .resize(LLAMA_MAX_SEQ, -1); + ubatch.output .resize(n_tokens); + + for (uint32_t s = 0; s < n_seqs; ++s) { + ubatch.seq_idx[s] = s; + ubatch.seq_id_unq.push_back(s); + } + + llama_ubatch res { + /*.equal_seqs =*/ true, + /*.n_tokens =*/ n_tokens, + /*.n_seq_tokens =*/ n_seq_tokens, + /*.n_seqs =*/ n_seqs, + /*.n_seqs_unq =*/ n_seqs, + + /*.token =*/ ubatch.token.data(), + /*.embd =*/ nullptr, + /*.pos =*/ ubatch.pos.data(), + /*.n_seq_id =*/ ubatch.n_seq_id.data(), + /*.seq_id =*/ ubatch.seq_id.data(), + /*.seq_id_unq =*/ ubatch.seq_id_unq.data(), + /*.seq_idx =*/ ubatch.seq_idx.data(), + /*.output =*/ ubatch.output.data(), + }; + + return res; +} + const llama_batch & llama_batch_allocr::get_batch() const { return batch; } +uint32_t llama_batch_allocr::get_n_tokens() const { + return batch.n_tokens; +} + uint32_t llama_batch_allocr::get_n_outputs() const { return n_outputs; } +std::vector & llama_batch_allocr::get_out_ids() { + return out_ids; +} + llama_pos llama_batch_allocr::seq_pos_min(llama_seq_id seq_id) const { return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].begin(); } @@ -551,14 +404,188 @@ llama_pos llama_batch_allocr::seq_pos_max(llama_seq_id seq_id) const { return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].rbegin(); } +void llama_batch_allocr::split_reset() { + out_ids.clear(); + + used.clear(); + used.resize(get_n_tokens(), false); + + ubatches.clear(); +} + +llama_ubatch llama_batch_allocr::split_simple(uint32_t n_ubatch) { + // find the first unused token + uint32_t cur_idx = 0; + while (cur_idx < used.size() && used[cur_idx]) { + ++cur_idx; + } + + // we are done + if (cur_idx >= used.size()) { + return {}; + } + + std::vector idxs; + + while (true) { + idxs.push_back(cur_idx); + + used[cur_idx] = true; + + ++cur_idx; + + if (cur_idx >= used.size()) { + break; + } + + if (idxs.size() >= n_ubatch) { + break; + } + } + + return ubatch_add(idxs, idxs.size(), false); +} + +llama_ubatch llama_batch_allocr::split_equal(uint32_t n_ubatch) { + std::vector cur_seq_set; + + // determine the non-overlapping sequence sets participating in this ubatch + for (int32_t i = 0; i < batch.n_tokens; ++i) { + if (used[i]) { + continue; + } + + bool add = true; + + for (uint32_t s = 0; s < cur_seq_set.size(); ++s) { + // no overlap with existing sequence sets: + if (!(cur_seq_set[s] & seq_set[i]).none()) { + add = false; + break; + } + } + + if (add) { + cur_seq_set.push_back(seq_set[i]); + + if (cur_seq_set.size() > n_ubatch) { + break; + } + } + } + + const uint32_t n_seqs = cur_seq_set.size(); + + // we are done + if (n_seqs == 0) { + return {}; + } + + // the current batch index of each sequence set + std::vector cur_idx(n_seqs, 0); + + for (uint32_t s = 0; s < n_seqs; ++s) { + while (used[seq_set_map[cur_seq_set[s]][cur_idx[s]]]) { + ++cur_idx[s]; + } + } + + // the list of batch indices for each sequence set + // at the end we will concat these to get the final ubatch + std::vector idxs_per_seq(n_seqs); + + while (true) { + // we can only add new n_seq_tokens tokens if all the sequence sets have at least one more unused token and + // if we haven't reached n_ubatch + bool can_expand = true; + + for (uint32_t s = 0; s < n_seqs; ++s) { + if (cur_idx[s] >= (int32_t) seq_set_map[cur_seq_set[s]].size()) { + can_expand = false; + break; + } + } + + if (!can_expand) { + break; + } + + for (uint32_t s = 0; s < n_seqs; ++s) { + const int32_t idx = seq_set_map[cur_seq_set[s]][cur_idx[s]]; + + idxs_per_seq[s].push_back(idx); + + used[idx] = true; + + ++cur_idx[s]; + } + + if ((idxs_per_seq[0].size() + 1)*n_seqs > n_ubatch) { + break; + } + } + + // concat the per-sequence-set lists + std::vector idxs; + + for (uint32_t s = 0; s < n_seqs; ++s) { + idxs.insert(idxs.end(), idxs_per_seq[s].begin(), idxs_per_seq[s].end()); + } + + return ubatch_add(idxs, n_seqs, true); +} + +llama_ubatch llama_batch_allocr::split_seq(uint32_t n_ubatch) { + // find the first unused token + uint32_t cur_idx = 0; + while (cur_idx < used.size() && used[cur_idx]) { + ++cur_idx; + } + + // we are done + if (cur_idx >= used.size()) { + return {}; + } + + // this is the starting sequence set + // we allow adding tokens only if their sequence set is a subset of the current sequence set + auto cur_seq_set = seq_set[cur_idx]; + + std::vector idxs; + + while (true) { + idxs.push_back(cur_idx); + + used[cur_idx] = true; + + if (idxs.size() >= n_ubatch) { + break; + } + + do { + ++cur_idx; + } while (cur_idx < get_n_tokens() && (used[cur_idx] || ((cur_seq_set & seq_set[cur_idx]) != seq_set[cur_idx]))); + + if (cur_idx == get_n_tokens()) { + break; + } + + cur_seq_set = seq_set[cur_idx]; + } + + return ubatch_add(idxs, 1, true); +} + void llama_batch_allocr::clear() { n_outputs = 0; batch = {}; - pos.clear(); - n_seq_id.clear(); - seq_id.clear(); - output.clear(); + + pos .clear(); + n_seq_id .clear(); + seq_id .clear(); + seq_id_unq.clear(); + output .clear(); for (auto & cur : seq_pos) { cur.clear(); @@ -567,6 +594,177 @@ void llama_batch_allocr::clear() { for (auto & cur : seq_cpl) { std::fill(cur.begin(), cur.end(), false); } + + seq_set.clear(); + + seq_set_map.clear(); + + std::fill(seq_idx.begin(), seq_idx.end(), -1); +} + +llama_ubatch llama_batch_allocr::ubatch_add(const std::vector & idxs, uint32_t n_seqs, bool equal_seqs) { + const uint32_t n_tokens = idxs.size(); + + assert(n_tokens%n_seqs == 0); + + ubatches.emplace_back(); + + auto & ubatch = ubatches.back(); + + const int32_t n_pos_cur = batch.embd ? n_pos_per_embd : 1; + + const int64_t n_embd_all = batch.embd ? (int64_t) n_tokens*n_embd : 0; + const int64_t n_pos_all = (int64_t) n_tokens*n_pos_cur; + + ubatch.token .resize(n_tokens); + ubatch.embd .resize(n_embd_all); + ubatch.pos .resize(n_pos_all); + ubatch.n_seq_id .resize(n_tokens); + ubatch.seq_id .resize(n_tokens); + ubatch.seq_id_unq.resize(0); + ubatch.seq_idx .resize(LLAMA_MAX_SEQ, -1); + ubatch.output .resize(n_tokens); + + seq_set_t seq_set_unq; + + for (size_t i = 0; i < idxs.size(); ++i) { + if (batch.token) { + ubatch.token[i] = batch.token[idxs[i]]; + } + + if (batch.embd) { + memcpy(ubatch.embd.data() + i*n_embd, batch.embd + (int64_t) idxs[i]*n_embd, n_embd*sizeof(float)); + } + + for (int j = 0; j < n_pos_cur; ++j) { + ubatch.pos[j*n_tokens + i] = batch.pos[j*batch.n_tokens + idxs[i]]; + } + + ubatch.n_seq_id[i] = batch.n_seq_id[idxs[i]]; + ubatch.seq_id[i] = batch.seq_id[idxs[i]]; + ubatch.output[i] = batch.logits[idxs[i]]; + + for (int s = 0; s < ubatch.n_seq_id[i]; ++s) { + seq_set_unq.set(ubatch.seq_id[i][s]); + } + + if (ubatch.output[i]) { + out_ids.push_back(idxs[i]); + } + } + + for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) { + if (seq_set_unq.test(s)) { + ubatch.seq_idx[s] = ubatch.seq_id_unq.size(); + ubatch.seq_id_unq.push_back(s); + } + } + + llama_ubatch res { + /*.equal_seqs =*/ equal_seqs, + /*.n_tokens =*/ n_tokens, + /*.n_seq_tokens =*/ n_tokens/n_seqs, + /*.n_seqs =*/ n_seqs, + /*.n_seqs_unq =*/ (uint32_t) ubatch.seq_id_unq.size(), + + /*.token =*/ batch.token ? ubatch.token.data() : nullptr, + /*.embd =*/ batch.embd ? ubatch.embd.data() : nullptr, + /*.pos =*/ ubatch.pos.data(), + /*.n_seq_id =*/ ubatch.n_seq_id.data(), + /*.seq_id =*/ ubatch.seq_id.data(), + /*.seq_id_unq =*/ ubatch.seq_id_unq.data(), + /*.seq_idx =*/ ubatch.seq_idx.data(), + /*.output =*/ ubatch.output.data(), + }; + + if (debug > 0) { + LLAMA_LOG_DEBUG("%s: added ubatch %d to split:\n", __func__, (int) ubatches.size() - 1); + + ubatch_print(res, debug); + } + + return res; +} + +void llama_batch_allocr::ubatch_print(const llama_ubatch & ubatch, int debug) { + if (debug > 0) { + LLAMA_LOG_DEBUG("%s: equal_seqs = %d\n", __func__, ubatch.equal_seqs); + LLAMA_LOG_DEBUG("%s: n_tokens = %d\n", __func__, ubatch.n_tokens); + LLAMA_LOG_DEBUG("%s: n_seq_tokens = %d\n", __func__, ubatch.n_seq_tokens); + LLAMA_LOG_DEBUG("%s: n_seqs = %d\n", __func__, ubatch.n_seqs); + LLAMA_LOG_DEBUG("%s: n_seqs_unq = %d\n", __func__, ubatch.n_seqs_unq); + + std::stringstream ss_seq_id_unq; + std::stringstream ss_seq_idx; + + ss_seq_id_unq << "[ "; + ss_seq_idx << "["; + + for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { + ss_seq_id_unq << ubatch.seq_id_unq[s] << " "; + } + + for (uint32_t s = 0; s < LLAMA_MAX_SEQ; ++s) { + if (ubatch.seq_idx[s] >= 0) { + ss_seq_idx << ubatch.seq_idx[s]%10; + } else { + ss_seq_idx << "."; + } + } + + ss_seq_id_unq << "]"; + ss_seq_idx << "]"; + + LLAMA_LOG_DEBUG("%s: token = %p\n", __func__, (void *) ubatch.token); + LLAMA_LOG_DEBUG("%s: embd = %p\n", __func__, (void *) ubatch.embd); + LLAMA_LOG_DEBUG("%s: pos = %p\n", __func__, (void *) ubatch.pos); + LLAMA_LOG_DEBUG("%s: n_seq_id = %p\n", __func__, (void *) ubatch.n_seq_id); + LLAMA_LOG_DEBUG("%s: seq_id = %p\n", __func__, (void *) ubatch.seq_id); + LLAMA_LOG_DEBUG("%s: seq_id_unq = %s\n", __func__, ss_seq_id_unq.str().c_str()); + LLAMA_LOG_DEBUG("%s: seq_idx = %s\n", __func__, ss_seq_idx.str().c_str()); + LLAMA_LOG_DEBUG("%s: output = %p\n", __func__, (void *) ubatch.output); + LLAMA_LOG_DEBUG("%s: n_outputs = %d\n", __func__, n_outputs); + + if (debug > 1) { + int seq_id_max = 0; + for (uint32_t i = 0; i < ubatch.n_tokens; ++i) { + for (int s = 0; s < ubatch.n_seq_id[i]; ++s) { + for (int s = 0; s < ubatch.n_seq_id[i]; ++s) { + seq_id_max = std::max(seq_id_max, ubatch.seq_id[i][s]); + } + } + } + ++seq_id_max; + + LLAMA_LOG_DEBUG("%s: token = [\n", __func__); + for (uint32_t i = 0; i < ubatch.n_tokens; ++i) { + std::vector seq_id(seq_id_max); + + for (int s = 0; s < ubatch.n_seq_id[i]; ++s) { + seq_id[ubatch.seq_id[i][s]] = 1; + } + + std::stringstream ss; + for (int s = 0; s < seq_id_max; ++s) { + if (seq_id[s]) { + ss << s%10; + } else { + ss << "."; + } + } + + if (ubatch.token) { + LLAMA_LOG_DEBUG("%s: %4d: id = %6d (%16s), pos = %4d, n_seq_id = %2d, seq_id = [%s], output = %d\n", + __func__, i, ubatch.token[i], vocab->token_to_piece(ubatch.token[i]).c_str(), + ubatch.pos[i], ubatch.n_seq_id[i], ss.str().c_str(), ubatch.output[i]); + } else { + LLAMA_LOG_DEBUG("%s: %4d: [embd], pos = %4d, n_seq_id = %2d, seq_id = [%s], output = %d\n", + __func__, i, ubatch.pos[i], ubatch.n_seq_id[i], ss.str().c_str(), ubatch.output[i]); + } + } + LLAMA_LOG_DEBUG("%s: ]\n", __func__); + } + } } // @@ -577,25 +775,25 @@ struct llama_batch llama_batch_get_one( llama_token * tokens, int32_t n_tokens) { return { - /*n_tokens =*/ n_tokens, - /*tokens =*/ tokens, - /*embd =*/ nullptr, - /*pos =*/ nullptr, - /*n_seq_id =*/ nullptr, - /*seq_id =*/ nullptr, - /*logits =*/ nullptr, + /*n_tokens =*/ n_tokens, + /*tokens =*/ tokens, + /*embd =*/ nullptr, + /*pos =*/ nullptr, + /*n_seq_id =*/ nullptr, + /*seq_id =*/ nullptr, + /*logits =*/ nullptr, }; } struct llama_batch llama_batch_init(int32_t n_tokens_alloc, int32_t embd, int32_t n_seq_max) { llama_batch batch = { - /*n_tokens =*/ 0, - /*tokens =*/ nullptr, - /*embd =*/ nullptr, - /*pos =*/ nullptr, - /*n_seq_id =*/ nullptr, - /*seq_id =*/ nullptr, - /*logits =*/ nullptr, + /*n_tokens =*/ 0, + /*tokens =*/ nullptr, + /*embd =*/ nullptr, + /*pos =*/ nullptr, + /*n_seq_id =*/ nullptr, + /*seq_id =*/ nullptr, + /*logits =*/ nullptr, }; if (embd) { diff --git a/src/llama-batch.h b/src/llama-batch.h index a555c157234be..d2c5376188a0b 100644 --- a/src/llama-batch.h +++ b/src/llama-batch.h @@ -2,86 +2,44 @@ #include "llama.h" +#include "llama-cparams.h" + #include #include #include +#include +#include -// very similar to llama_batch, -// but has more metadata about sequences +// keep this struct lightweight +// it points to data in `llama_batch_allocr` struct llama_ubatch { bool equal_seqs; // TODO: whole_seqs for embeddings? uint32_t n_tokens; // total tokens (n_seq_tokens * n_seqs) - uint32_t n_seq_tokens; // tokens per sequence - uint32_t n_seqs; - - llama_token * token; // [n_tokens] - float * embd; // [n_embd, n_tokens] - llama_pos * pos; // [n_tokens] - int32_t * n_seq_id; // [n_seqs] - llama_seq_id ** seq_id; // [n_seqs] - int8_t * output; // [n_tokens] -}; - -struct llama_sbatch_seq { - int32_t n_seq_id; - - llama_seq_id * seq_id; - - size_t offset; - size_t length; -}; - -// sequence-length-aware batch splitting -struct llama_sbatch { - // tokens left in this batch - size_t n_tokens; - - size_t n_embd; - - // sorted indices into the batch - std::vector ids; - // batch indices of the output - std::vector out_ids; - std::vector seq; - - const llama_batch * batch = nullptr; - - // buffers for the ubatches - // TODO: very hacky, this needs a complete rework - struct ubatch_data { - std::vector token; - std::vector embd; - std::vector pos; - std::vector n_seq_id; - std::vector seq_id; - std::vector output; - }; - - std::vector udatas; - - llama_ubatch reserve_ubatch(size_t n_ubatch, bool has_embd = false); - - void add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length); - - // simple split, unknown number of sequences of unequal lengths - llama_ubatch split_simple(size_t n_ubatch); - - // make batches of equal-length sequences - llama_ubatch split_equal(size_t n_ubatch); - - // sequence-wise split - llama_ubatch split_seq(size_t n_ubatch); - - llama_sbatch() = default; - llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split = false); + uint32_t n_seq_tokens; // tokens per sequence set + uint32_t n_seqs; // sequence sets in the ubatch + uint32_t n_seqs_unq; // unique sequence ids in the ubatch + + // seq_id_unq: unique sequence ids in the ubatch + // seq_idx: indices of the unique sequence ids in the ubatch in [0, n_seqs_unq) + // used for extracting sequence pooled embeddings + + // // size | idx | val + llama_token * token; // [n_tokens] | i | id, token + float * embd; // [n_embd, n_tokens] | i | embd + llama_pos * pos; // [n_tokens] | i | pos + int32_t * n_seq_id; // [n_tokens] | i | - + llama_seq_id ** seq_id; // [n_tokens] | s | s0, s1, seq_id + llama_seq_id * seq_id_unq; // [n_seqs_unq] | s | seq_id + int32_t * seq_idx; // [LLAMA_MAX_SEQ] | - | seq_idx + int8_t * output; // [n_tokens] | i | - }; -// a helper for sanitizing and fulfilling a batch +// a helper for sanitizing, fulfilling and splitting a batch class llama_batch_allocr { public: - llama_batch_allocr(); + llama_batch_allocr(uint32_t n_pos_per_embd); // sanitize and auto-gen missing data in the input batch // memory is optional. if provided will be used to check for sequence continuity and to determine the positions @@ -89,20 +47,57 @@ class llama_batch_allocr { const llama_batch & batch_inp, const llama_vocab & vocab, const llama_memory_i * memory, - bool embd_all); + uint32_t n_embd, + bool output_all); const llama_batch & get_batch() const; + uint32_t get_n_tokens() const; uint32_t get_n_outputs() const; + // the array of output indices in the order they were encountered during the ubatch splitting + std::vector & get_out_ids(); + + // min/max positions of each sequence in the current ubatch llama_pos seq_pos_min(llama_seq_id seq_id) const; llama_pos seq_pos_max(llama_seq_id seq_id) const; + // call once before splitting the batch to reset the internal state + void split_reset(); + + // simple split, unknown number of sequence sets of unequal lengths + llama_ubatch split_simple(uint32_t n_ubatch); + + // make ubatches of equal-length sequences sets + llama_ubatch split_equal(uint32_t n_ubatch); + + // sequence-set-wise split - each ubatch contains a single sequence-set + llama_ubatch split_seq(uint32_t n_ubatch); + + // a helper method for creating a well-defined ubatch of tokens + // TODO: support embeddings if needed in the future + llama_ubatch ubatch_reserve(uint32_t n_seq_tokens, uint32_t n_seqs); + private: void clear(); + // create the next ubatch based on the provided batch indices (idxs) and the number of sequence sets (n_seqs) + // return llama_ubatch.n_tokens == 0 if the entire batch was consumed + llama_ubatch ubatch_add(const std::vector & idxs, uint32_t n_seqs, bool equal_seqs); + + // for debugging, start with LLAMA_BATCH_DEBUG=2 + void ubatch_print(const llama_ubatch & ubatch, int debug); + llama_batch batch; + // only for debugging purposes + const llama_vocab * vocab; + + // TODO: this is more of a temporary solution until we have a better way to handle multiple positions per token/embd + // ref: https://github.com/ggml-org/llama.cpp/issues/13694#issuecomment-2983871762 + const uint32_t n_pos_per_embd; + + uint32_t n_embd; uint32_t n_outputs; std::array seq_id_0 = { 0 }; // default sequence id @@ -110,10 +105,43 @@ class llama_batch_allocr { std::vector pos; std::vector n_seq_id; std::vector seq_id; + std::vector seq_id_unq; + std::vector seq_idx; std::vector output; - std::vector> seq_pos; // seq_pos[s]: the set of positions in sequence s - std::vector> seq_cpl; // seq_cpl[s0][s1]: if sequence s0 is coupled to sequence s1 + using pos_set_t = std::set; + using seq_cpl_t = std::vector; + + std::vector seq_pos; // seq_pos[s]: the set of positions in sequence s + std::vector seq_cpl; // seq_cpl[s0][s1]: if sequence s0 is coupled to sequence s1 + + using idx_vec_t = std::vector; + using seq_set_t = std::bitset; + + std::vector seq_set; // seq_set[i]: the sequence set of token i + + std::unordered_map seq_set_map; // the indices at which the sequence set appears + + // batch indices of the output + std::vector out_ids; + + // used[i] indicates if token i has already been used in a previous ubatch + std::vector used; + + // llama_ubatch points to this data: + struct ubatch { + std::vector token; + std::vector embd; + std::vector pos; + std::vector n_seq_id; + std::vector seq_id; + std::vector seq_id_unq; + std::vector seq_idx; + std::vector output; + }; + + // current splitting state: + std::vector ubatches; int debug; }; diff --git a/src/llama-context.cpp b/src/llama-context.cpp index f56a58e9b6ec6..5a18a4fb3939a 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -20,7 +20,7 @@ llama_context::llama_context( const llama_model & model, llama_context_params params) : model(model), - batch_allocr(std::make_unique()) { + balloc(std::make_unique(model.hparams.n_pos_per_embd())) { LLAMA_LOG_INFO("%s: constructing llama_context\n", __func__); t_start_us = model.t_start_us; @@ -722,22 +722,26 @@ llm_graph_result_ptr llama_context::process_ubatch(const llama_ubatch & ubatch, } int llama_context::encode(const llama_batch & batch_inp) { + GGML_ASSERT((!batch_inp.token && batch_inp.embd) || (batch_inp.token && !batch_inp.embd)); // NOLINT + if (batch_inp.n_tokens == 0) { LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__); return -1; } + const auto & hparams = model.hparams; + + const int64_t n_embd = hparams.n_embd; + // note: during encode, we always pass the full sequence starting from pos = 0 - if (!batch_allocr->init(batch_inp, model.vocab, nullptr, true)) { + if (!balloc->init(batch_inp, model.vocab, nullptr, n_embd, true)) { LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); return -1; } - const llama_batch & batch = batch_allocr->get_batch(); + const uint32_t n_tokens = balloc->get_n_tokens(); - const uint32_t n_tokens = batch.n_tokens; - - GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT + const llama_ubatch ubatch = balloc->split_simple(n_tokens); // micro-batching is not possible for non-causal encoding, so we process the batch in a single shot GGML_ASSERT(cparams.n_ubatch >= n_tokens && "encoder requires n_ubatch >= n_tokens"); @@ -751,14 +755,6 @@ int llama_context::encode(const llama_batch & batch_inp) { n_queued_tokens += n_tokens; - const auto & hparams = model.hparams; - - const int64_t n_embd = hparams.n_embd; - - llama_sbatch sbatch = llama_sbatch(batch, n_embd, /* simple_split */ true); - - const llama_ubatch ubatch = sbatch.split_simple(n_tokens); - // reserve output buffer if (output_reserve(n_tokens) < n_tokens) { LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_tokens); @@ -817,34 +813,28 @@ int llama_context::encode(const llama_batch & batch_inp) { { // extract sequence embeddings auto & embd_seq_out = embd_seq; - embd_seq_out.clear(); - GGML_ASSERT(!ubatch.equal_seqs); // TODO: handle equal splits + for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { + const llama_seq_id seq_id = ubatch.seq_id_unq[s]; + const int32_t seq_idx = ubatch.seq_idx[seq_id]; - // TODO: fix indexing [UBATCH_IDX] - for (uint32_t i = 0; i < n_tokens; i++) { - const llama_seq_id seq_id = ubatch.seq_id[i][0]; - if (embd_seq_out.find(seq_id) != embd_seq_out.end()) { - continue; - } embd_seq_out[seq_id].resize(n_embd); - ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float)); + ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_idx)*sizeof(float), n_embd*sizeof(float)); } } break; case LLAMA_POOLING_TYPE_RANK: { // extract the rerank score - n_cls_out floats per sequence auto & embd_seq_out = embd_seq; + const uint32_t n_cls_out = hparams.n_cls_out; - // TODO: fix indexing [UBATCH_IDX] - for (uint32_t s = 0; s < ubatch.n_seqs; ++s) { - const llama_seq_id seq_id = ubatch.seq_id[s][0]; - if (embd_seq_out.find(seq_id) != embd_seq_out.end()) { - continue; - } + for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { + const llama_seq_id seq_id = ubatch.seq_id_unq[s]; + const int32_t seq_idx = ubatch.seq_idx[seq_id]; + embd_seq_out[seq_id].resize(n_cls_out); - ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_id)*sizeof(float), n_cls_out*sizeof(float)); + ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_idx)*sizeof(float), n_cls_out*sizeof(float)); } } break; case LLAMA_POOLING_TYPE_UNSPECIFIED: @@ -869,12 +859,16 @@ int llama_context::encode(const llama_batch & batch_inp) { cross.v_embd.resize(cross.n_embd*cross.n_enc); memcpy(cross.v_embd.data(), embd, ggml_nbytes(t_embd)); + const auto & batch = balloc->get_batch(); + // remember the sequence ids used during the encoding - needed for cross attention later cross.seq_ids_enc.resize(n_tokens); for (uint32_t i = 0; i < n_tokens; i++) { cross.seq_ids_enc[i].clear(); + for (int s = 0; s < batch.n_seq_id[i]; s++) { - llama_seq_id seq_id = batch.seq_id[i][s]; + const llama_seq_id seq_id = batch.seq_id[i][s]; + cross.seq_ids_enc[i].insert(seq_id); } } @@ -884,6 +878,8 @@ int llama_context::encode(const llama_batch & batch_inp) { } int llama_context::decode(const llama_batch & batch_inp) { + GGML_ASSERT((!batch_inp.token && batch_inp.embd) || (batch_inp.token && !batch_inp.embd)); // NOLINT + if (!memory) { LLAMA_LOG_DEBUG("%s: cannot decode batches with this context (calling encode() instead)\n", __func__); return encode(batch_inp); @@ -894,29 +890,24 @@ int llama_context::decode(const llama_batch & batch_inp) { return -1; } - // when computing embeddings, all tokens are output - const bool embd_all = cparams.embeddings; - - if (!batch_allocr->init(batch_inp, model.vocab, memory.get(), embd_all)) { - LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); - return -1; - } - - const llama_batch & batch = batch_allocr->get_batch(); - const auto & vocab = model.vocab; const auto & hparams = model.hparams; const int32_t n_vocab = vocab.n_tokens(); const int64_t n_embd = hparams.n_embd; - const uint32_t n_tokens_all = batch.n_tokens; + // when computing embeddings, all tokens are output + const bool output_all = cparams.embeddings; - GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT + if (!balloc->init(batch_inp, vocab, memory.get(), n_embd, output_all)) { + LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); + return -1; + } - const uint32_t n_outputs_all = batch_allocr->get_n_outputs(); + const uint32_t n_tokens_all = balloc->get_n_tokens(); + const uint32_t n_outputs_all = balloc->get_n_outputs(); - if (embd_all) { + if (output_all) { // require that all tokens are output if (n_outputs_all != n_tokens_all) { LLAMA_LOG_ERROR("%s: pooled embedding requires that all tokens are output (n_outputs_all = %d, n_tokens_all = %d)\n", @@ -945,7 +936,7 @@ int llama_context::decode(const llama_batch & batch_inp) { llama_memory_state_ptr mstate; while (true) { - mstate = memory->init_batch(batch, cparams.n_ubatch, embd_all); + mstate = memory->init_batch(*balloc, cparams.n_ubatch, output_all); if (!mstate) { return -2; } @@ -966,19 +957,19 @@ int llama_context::decode(const llama_batch & batch_inp) { did_optimize = true; if (kv_self_update(true)) { - LLAMA_LOG_DEBUG("%s: retrying batch size %d after cache optimization\n", __func__, batch.n_tokens); + LLAMA_LOG_DEBUG("%s: retrying batch size %d after cache optimization\n", __func__, balloc->get_n_tokens()); continue; } } - LLAMA_LOG_WARN("%s: failed to find a memory slot for batch of size %d\n", __func__, batch.n_tokens); + LLAMA_LOG_WARN("%s: failed to find a memory slot for batch of size %d\n", __func__, balloc->get_n_tokens()); return 1; } case LLAMA_MEMORY_STATUS_FAILED_COMPUTE: { - LLAMA_LOG_ERROR("%s: compute failed while preparing batch of size %d\n", __func__, batch.n_tokens); + LLAMA_LOG_ERROR("%s: compute failed while preparing batch of size %d\n", __func__, balloc->get_n_tokens()); return -2; } @@ -1005,7 +996,6 @@ int llama_context::decode(const llama_batch & batch_inp) { if (n_outputs_all == n_tokens_all) { n_outputs_new = ubatch.n_tokens; } else { - GGML_ASSERT(ubatch.output); for (uint32_t i = 0; i < ubatch.n_tokens; i++) { n_outputs_new += (int32_t) (ubatch.output[i] != 0); } @@ -1105,27 +1095,27 @@ int llama_context::decode(const llama_batch & batch_inp) { // extract sequence embeddings (cleared before processing each batch) auto & embd_seq_out = embd_seq; - for (uint32_t s = 0; s < ubatch.n_seqs; ++s) { - const llama_seq_id seq_id = ubatch.seq_id[s][0]; - if (embd_seq_out.find(seq_id) != embd_seq_out.end()) { - continue; - } + for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { + const llama_seq_id seq_id = ubatch.seq_id_unq[s]; + const int32_t seq_idx = ubatch.seq_idx[seq_id]; + embd_seq_out[seq_id].resize(n_embd); - ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float)); + ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_idx)*sizeof(float), n_embd*sizeof(float)); } } break; case LLAMA_POOLING_TYPE_RANK: { - // extract the rerank score - a single float per sequence + // extract the rerank score - n_cls_out floats per sequence auto & embd_seq_out = embd_seq; - for (uint32_t s = 0; s < ubatch.n_seqs; ++s) { - const llama_seq_id seq_id = ubatch.seq_id[s][0]; - if (embd_seq_out.find(seq_id) != embd_seq_out.end()) { - continue; - } - embd_seq_out[seq_id].resize(1); - ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (seq_id)*sizeof(float), sizeof(float)); + const uint32_t n_cls_out = hparams.n_cls_out; + + for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { + const llama_seq_id seq_id = ubatch.seq_id_unq[s]; + const int32_t seq_idx = ubatch.seq_idx[seq_id]; + + embd_seq_out[seq_id].resize(n_cls_out); + ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_idx)*sizeof(float), n_cls_out*sizeof(float)); } } break; case LLAMA_POOLING_TYPE_UNSPECIFIED: @@ -1145,7 +1135,7 @@ int llama_context::decode(const llama_batch & batch_inp) { if (n_outputs > 0) { bool sorted_output = true; - auto & out_ids = mstate->out_ids(); + auto & out_ids = balloc->get_out_ids(); GGML_ASSERT(out_ids.size() == (size_t) n_outputs); @@ -1318,8 +1308,8 @@ ggml_cgraph * llama_context::graph_reserve(uint32_t n_tokens, uint32_t n_seqs, u this->n_outputs = n_outputs; - llama_token token = model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph - llama_ubatch ubatch = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; + llama_batch_allocr balloc(model.hparams.n_pos_per_embd()); + llama_ubatch ubatch = balloc.ubatch_reserve(n_tokens/n_seqs, n_seqs); auto * gf = graph_init(); auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT, mstate); @@ -2039,7 +2029,12 @@ void llama_context::opt_epoch_iter( batch.logits [pos_batch] = true; } - const auto n_tokens_all = batch.n_tokens; + if (!balloc->init(batch, model.vocab, nullptr, model.hparams.n_embd, true)) { + LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); + return; + } + + const uint32_t n_tokens_all = balloc->get_n_tokens(); n_queued_tokens += n_tokens_all; @@ -2047,7 +2042,7 @@ void llama_context::opt_epoch_iter( uint32_t n_outputs_all = n_tokens_all; - auto mstate = memory->init_batch(batch, cparams.n_ubatch, true); + auto mstate = memory->init_batch(*balloc, cparams.n_ubatch, true); if (!mstate || mstate->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) { LLAMA_LOG_ERROR("%s: could not initialize batch\n", __func__); break; diff --git a/src/llama-context.h b/src/llama-context.h index 040f03ae42e65..7d300c14572e9 100644 --- a/src/llama-context.h +++ b/src/llama-context.h @@ -247,7 +247,7 @@ struct llama_context { std::map> embd_seq; // reuse the batch_allocr to avoid unnecessary memory allocations - std::unique_ptr batch_allocr; + std::unique_ptr balloc; uint32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 65d98cbbb3987..083366fd68d07 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -130,110 +130,97 @@ void llm_graph_input_mean::set_input(const llama_ubatch * ubatch) { if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) { const int64_t n_tokens = ubatch->n_tokens; const int64_t n_seq_tokens = ubatch->n_seq_tokens; - const int64_t n_seqs = ubatch->n_seqs; + const int64_t n_seqs_unq = ubatch->n_seqs_unq; GGML_ASSERT(mean); GGML_ASSERT(ggml_backend_buffer_is_host(mean->buffer)); float * data = (float *) mean->data; - memset(mean->data, 0, n_tokens * n_tokens * ggml_element_size(mean)); + memset(mean->data, 0, n_tokens*n_seqs_unq*ggml_element_size(mean)); - std::vector sum(n_tokens, 0); + std::vector sums(n_seqs_unq, 0); + for (int i = 0; i < n_tokens; i += n_seq_tokens) { + for (int s = 0; s < ubatch->n_seq_id[i]; ++s) { + const llama_seq_id seq_id = ubatch->seq_id[i][s]; + const int32_t seq_idx = ubatch->seq_idx[seq_id]; - // TODO: fix indexing [UBATCH_IDX] - for (int s = 0; s < n_seqs; ++s) { - const llama_seq_id seq_id = ubatch->seq_id[s][0]; - - // TODO: adapt limits to n_seqs when ubatch->equal_seqs is true - GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN"); - - sum[seq_id] += ubatch->n_seq_tokens; + sums[seq_idx] += ubatch->n_seq_tokens; + } } - std::vector div(n_tokens, 0.0f); - for (int i = 0; i < n_tokens; ++i) { - const uint64_t s = sum[i]; - if (s > 0) { - div[i] = 1.0f/float(s); + std::vector div(n_seqs_unq, 0.0f); + for (int s = 0; s < n_seqs_unq; ++s) { + const uint64_t sum = sums[s]; + if (sum > 0) { + div[s] = 1.0f/float(sum); } } - // TODO: fix indexing [UBATCH_IDX] - for (int s = 0; s < n_seqs; ++s) { - const llama_seq_id seq_id = ubatch->seq_id[s][0]; + for (int i = 0; i < n_tokens; i += n_seq_tokens) { + for (int s = 0; s < ubatch->n_seq_id[i]; ++s) { + const llama_seq_id seq_id = ubatch->seq_id[i][s]; + const int32_t seq_idx = ubatch->seq_idx[seq_id]; - for (int i = 0; i < n_seq_tokens; ++i) { - data[seq_id*n_tokens + s*n_seq_tokens + i] = div[seq_id]; + for (int j = 0; j < n_seq_tokens; ++j) { + data[seq_idx*n_tokens + i + j] = div[seq_idx]; + } } } } } void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) { - if (cparams.embeddings && ( - cparams.pooling_type == LLAMA_POOLING_TYPE_CLS || - cparams.pooling_type == LLAMA_POOLING_TYPE_RANK)) { - const int64_t n_tokens = ubatch->n_tokens; - const int64_t n_seq_tokens = ubatch->n_seq_tokens; - const int64_t n_seqs = ubatch->n_seqs; + const int64_t n_tokens = ubatch->n_tokens; + const int64_t n_seq_tokens = ubatch->n_seq_tokens; + const int64_t n_seqs_unq = ubatch->n_seqs_unq; + if (cparams.embeddings && ( + cparams.pooling_type == LLAMA_POOLING_TYPE_CLS || + cparams.pooling_type == LLAMA_POOLING_TYPE_RANK + )) { GGML_ASSERT(cls); GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer)); uint32_t * data = (uint32_t *) cls->data; - memset(cls->data, 0, n_tokens * ggml_element_size(cls)); + memset(cls->data, 0, n_seqs_unq*ggml_element_size(cls)); - // TODO: fix indexing [UBATCH_IDX] - for (int s = 0; s < n_seqs; ++s) { - const llama_seq_id seq_id = ubatch->seq_id[s][0]; + for (int i = 0; i < n_tokens; i += n_seq_tokens) { + for (int s = 0; s < ubatch->n_seq_id[i]; ++s) { + const llama_seq_id seq_id = ubatch->seq_id[i][s]; + const int32_t seq_idx = ubatch->seq_idx[seq_id]; - // TODO: adapt limits to n_seqs when ubatch->equal_seqs is true - GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS or RANK"); - - for (int i = 0; i < n_seq_tokens; ++i) { - const llama_pos pos = ubatch->pos[s*n_seq_tokens + i]; - - if (pos == 0) { - data[seq_id] = s*n_seq_tokens + i; - } + data[seq_idx] = i; } } } if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) { - const int64_t n_tokens = ubatch->n_tokens; - const int64_t n_seq_tokens = ubatch->n_seq_tokens; - const int64_t n_seqs = ubatch->n_seqs; - GGML_ASSERT(cls); GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer)); uint32_t * data = (uint32_t *) cls->data; - memset(cls->data, 0, n_tokens * ggml_element_size(cls)); + memset(cls->data, 0, n_seqs_unq*ggml_element_size(cls)); - std::vector last_pos(n_tokens, -1); - std::vector last_row(n_tokens, -1); + std::vector last_pos(n_seqs_unq, -1); + std::vector last_row(n_seqs_unq, -1); - // TODO: fix indexing [UBATCH_IDX] - for (int s = 0; s < n_seqs; ++s) { - const llama_seq_id seq_id = ubatch->seq_id[s][0]; - - // TODO: adapt limits to n_seqs when ubatch->equal_seqs is true - GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == LAST"); + for (int i = 0; i < n_tokens; ++i) { + const llama_pos pos = ubatch->pos[i]; - for (int i = 0; i < n_seq_tokens; ++i) { - const llama_pos pos = ubatch->pos[s*n_seq_tokens + i]; + for (int s = 0; s < ubatch->n_seq_id[i]; ++s) { + const llama_seq_id seq_id = ubatch->seq_id[i][s]; + const int32_t seq_idx = ubatch->seq_idx[seq_id]; - if (pos >= last_pos[seq_id]) { - last_pos[seq_id] = pos; - last_row[seq_id] = s*n_seq_tokens + i; + if (pos >= last_pos[seq_idx]) { + last_pos[seq_idx] = pos; + last_row[seq_idx] = i; } } } - for (int i = 0; i < n_tokens; ++i) { - if (last_row[i] >= 0) { - data[i] = last_row[i]; + for (int s = 0; s < n_seqs_unq; ++s) { + if (last_row[s] >= 0) { + data[s] = last_row[s]; } } } @@ -266,89 +253,36 @@ void llm_graph_input_cross_embd::set_input(const llama_ubatch * ubatch) { } void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) { - if (kq_mask) { - if (cparams.causal_attn) { - const int64_t n_kv = ubatch->n_tokens; - const int64_t n_tokens = ubatch->n_tokens; - const int64_t n_seq_tokens = ubatch->n_seq_tokens; - const int64_t n_seqs = ubatch->n_seqs; - - GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer)); - float * data = (float *) kq_mask->data; - - for (int h = 0; h < 1; ++h) { - for (int s1 = 0; s1 < n_seqs; ++s1) { - const llama_seq_id seq_id = ubatch->seq_id[s1][0]; - - for (int j = 0; j < n_seq_tokens; ++j) { - const int32_t tj = s1*n_seq_tokens + j; - - for (int s0 = 0; s0 < n_seqs; ++s0) { - for (int i = 0; i < n_seq_tokens; ++i) { - const int32_t ti = s0*n_seq_tokens + i; - float f = -INFINITY; - - // TODO: fix indexing [UBATCH_IDX] - for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) { - if (ubatch->seq_id[s0][s] == seq_id && ubatch->pos[ti] <= ubatch->pos[tj]) { - if (hparams.use_alibi) { - f = -std::abs(ubatch->pos[ti] - ubatch->pos[tj]); - } else { - f = 0.0f; - } - break; - } - } - - data[h*(n_kv*n_tokens) + tj*n_kv + ti] = f; - } - } - } - } - } - } else { - const int64_t n_tokens = ubatch->n_tokens; - const int64_t n_seq_tokens = ubatch->n_seq_tokens; - const int64_t n_seqs = ubatch->n_seqs; - const int64_t n_stride = ubatch->n_tokens; - - GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer)); - - float * data = (float *) kq_mask->data; - - for (int h = 0; h < 1; ++h) { - for (int s1 = 0; s1 < n_seqs; ++s1) { - const llama_seq_id seq_id = ubatch->seq_id[s1][0]; - - for (int j = 0; j < n_seq_tokens; ++j) { - const int32_t tj = s1*n_seq_tokens + j; - - for (int s0 = 0; s0 < n_seqs; ++s0) { - for (int i = 0; i < n_seq_tokens; ++i) { - const int32_t ti = s0*n_seq_tokens + i; - float f = -INFINITY; - - // TODO: fix indexing [UBATCH_IDX] - for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) { - if (ubatch->seq_id[s0][s] == seq_id) { - if (hparams.use_alibi) { - f = -std::abs(ubatch->pos[ti] - ubatch->pos[tj]); - } else { - f = 0.0f; - } - break; - } - } - - data[h*(n_tokens*n_tokens) + tj*n_stride + ti] = f; - } - } + const int64_t n_kv = ubatch->n_tokens; + const int64_t n_tokens = ubatch->n_tokens; + + GGML_ASSERT(kq_mask); + GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer)); + + float * data = (float *) kq_mask->data; + + for (int h = 0; h < 1; ++h) { + for (int i1 = 0; i1 < n_tokens; ++i1) { + const llama_seq_id s1 = ubatch->seq_id[i1][0]; + + for (int i0 = 0; i0 < n_tokens; ++i0) { + float f = -INFINITY; + + for (int s = 0; s < ubatch->n_seq_id[i0]; ++s) { + const llama_seq_id s0 = ubatch->seq_id[i0][0]; - for (int i = n_tokens; i < n_stride; ++i) { - data[h*(n_tokens*n_tokens) + tj*n_stride + i] = -INFINITY; + // TODO: reimplement this like in llama_kv_cache_unified + if (s0 == s1 && (!cparams.causal_attn || ubatch->pos[i0] <= ubatch->pos[i1])) { + if (hparams.use_alibi) { + f = -std::abs(ubatch->pos[i0] - ubatch->pos[i1]); + } else { + f = 0.0f; } + break; } } + + data[h*(n_kv*n_tokens) + i1*n_kv + i0] = f; } } } @@ -371,34 +305,36 @@ void llm_graph_input_attn_kv_unified_iswa::set_input(const llama_ubatch * ubatch } void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) { - if (cross_kq_mask) { - const int64_t n_enc = cross_kq_mask->ne[0]; - const int64_t n_tokens = ubatch->n_tokens; + GGML_ASSERT(cross_kq_mask); - GGML_ASSERT(ggml_backend_buffer_is_host(cross_kq_mask->buffer)); - GGML_ASSERT(!ubatch->equal_seqs); // TODO: use ubatch->n_seqs instead of failing + const int64_t n_enc = cross_kq_mask->ne[0]; + const int64_t n_tokens = ubatch->n_tokens; - float * data = (float *) cross_kq_mask->data; + GGML_ASSERT(ggml_backend_buffer_is_host(cross_kq_mask->buffer)); + GGML_ASSERT(!ubatch->equal_seqs); // TODO: use ubatch->n_seqs instead of failing - for (int h = 0; h < 1; ++h) { - for (int j = 0; j < n_tokens; ++j) { - for (int i = 0; i < n_enc; ++i) { - float f = -INFINITY; - // TODO: fix indexing [UBATCH_IDX] - for (int s = 0; s < ubatch->n_seq_id[j]; ++s) { - const llama_seq_id seq_id = ubatch->seq_id[j][s]; - if (cross->seq_ids_enc[i].find(seq_id) != cross->seq_ids_enc[i].end()) { - f = 0.0f; - } + float * data = (float *) cross_kq_mask->data; + + for (int h = 0; h < 1; ++h) { + for (int i = 0; i < n_tokens; ++i) { + for (int j = 0; j < n_enc; ++j) { + float f = -INFINITY; + + for (int s = 0; s < ubatch->n_seq_id[i]; ++s) { + const llama_seq_id seq_id = ubatch->seq_id[i][s]; + + if (cross->seq_ids_enc[j].find(seq_id) != cross->seq_ids_enc[j].end()) { + f = 0.0f; } - data[h*(n_enc*n_tokens) + j*n_enc + i] = f; } + + data[h*(n_enc*n_tokens) + i*n_enc + j] = f; } + } - for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { - for (int j = 0; j < n_enc; ++j) { - data[h*(n_enc*n_tokens) + i*n_enc + j] = -INFINITY; - } + for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { + for (int j = 0; j < n_enc; ++j) { + data[h*(n_enc*n_tokens) + i*n_enc + j] = -INFINITY; } } } @@ -467,10 +403,6 @@ llm_graph_context::llm_graph_context(const llm_graph_params & params) : res (std::make_unique()) { } -int64_t llm_graph_context::n_pos_per_embd() const { - return hparams.rope_type == LLAMA_ROPE_TYPE_MROPE ? 4 : 1; -} - void llm_graph_context::cb(ggml_tensor * cur, const char * name, int il) const { if (cb_func) { cb_func(ubatch, cur, name, il); @@ -915,11 +847,11 @@ ggml_tensor * llm_graph_context::build_inp_embd(ggml_tensor * tok_embd) const { } ggml_tensor * llm_graph_context::build_inp_pos() const { - auto inp = std::make_unique(n_pos_per_embd()); + auto inp = std::make_unique(hparams.n_pos_per_embd()); auto & cur = inp->pos; - cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens*n_pos_per_embd()); + cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, (int64_t)n_tokens*hparams.n_pos_per_embd()); ggml_set_input(cur); res->add_input(std::move(inp)); @@ -959,7 +891,7 @@ ggml_tensor * llm_graph_context::build_inp_mean() const { auto & cur = inp->mean; - cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens); + cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, ubatch.n_seqs_unq); ggml_set_input(cur); res->add_input(std::move(inp)); @@ -972,7 +904,7 @@ ggml_tensor * llm_graph_context::build_inp_cls() const { auto & cur = inp->cls; - cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_seqs_unq); ggml_set_input(cur); res->add_input(std::move(inp)); diff --git a/src/llama-graph.h b/src/llama-graph.h index 58845e284abed..9e62fa60720d7 100644 --- a/src/llama-graph.h +++ b/src/llama-graph.h @@ -95,14 +95,14 @@ class llm_graph_input_embd : public llm_graph_input_i { class llm_graph_input_pos : public llm_graph_input_i { public: - llm_graph_input_pos(int64_t n_pos_per_embd) : n_pos_per_embd(n_pos_per_embd) {} + llm_graph_input_pos(uint32_t n_pos_per_embd) : n_pos_per_embd(n_pos_per_embd) {} virtual ~llm_graph_input_pos() = default; void set_input(const llama_ubatch * ubatch) override; ggml_tensor * pos = nullptr; // I32 [n_batch] - const int64_t n_pos_per_embd = 1; + const uint32_t n_pos_per_embd = 1; }; // temperature tuning, used by llama4 @@ -464,8 +464,6 @@ struct llm_graph_context { llm_graph_context(const llm_graph_params & params); - int64_t n_pos_per_embd() const; - void cb(ggml_tensor * cur, const char * name, int il) const; // diff --git a/src/llama-hparams.cpp b/src/llama-hparams.cpp index b40566ced99ee..bba7a12dc5496 100644 --- a/src/llama-hparams.cpp +++ b/src/llama-hparams.cpp @@ -90,6 +90,10 @@ bool llama_hparams::is_recurrent(uint32_t il) const { return recurrent_layer_arr[il]; } +uint32_t llama_hparams::n_pos_per_embd() const { + return rope_type == LLAMA_ROPE_TYPE_MROPE ? 4 : 1; +} + bool llama_hparams::is_swa(uint32_t il) const { if (il < n_layer) { return swa_layers[il]; diff --git a/src/llama-hparams.h b/src/llama-hparams.h index 82bb5b6084946..7b315a9a74b1d 100644 --- a/src/llama-hparams.h +++ b/src/llama-hparams.h @@ -192,6 +192,8 @@ struct llama_hparams { // whether or not the given layer is recurrent (for hybrid models) bool is_recurrent(uint32_t il) const; + uint32_t n_pos_per_embd() const; + bool is_swa(uint32_t il) const; }; diff --git a/src/llama-kv-cache-unified-iswa.cpp b/src/llama-kv-cache-unified-iswa.cpp index a869b1de8c2a3..0ced340dec6c5 100644 --- a/src/llama-kv-cache-unified-iswa.cpp +++ b/src/llama-kv-cache-unified-iswa.cpp @@ -95,19 +95,22 @@ llama_pos llama_kv_cache_unified_iswa::seq_pos_max(llama_seq_id seq_id) const { return kv_swa->seq_pos_max(seq_id); } -llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_all) { +llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) { GGML_UNUSED(embd_all); // first try simple split do { - auto sbatch = llama_sbatch(batch, hparams.n_embd, true); + balloc.split_reset(); std::vector ubatches; + while (true) { + auto ubatch = balloc.split_simple(n_ubatch); - while (sbatch.n_tokens > 0) { - auto ubatch = sbatch.split_simple(n_ubatch); + if (ubatch.n_tokens == 0) { + break; + } - ubatches.push_back(ubatch); + ubatches.push_back(std::move(ubatch)); // NOLINT } auto heads_base = kv_base->prepare(ubatches); @@ -123,19 +126,22 @@ llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch assert(heads_base.size() == heads_swa.size()); return std::make_unique( - this, std::move(sbatch), std::move(heads_base), std::move(heads_swa), std::move(ubatches)); + this, std::move(heads_base), std::move(heads_swa), std::move(ubatches)); } while (false); // if it fails, try equal split do { - auto sbatch = llama_sbatch(batch, hparams.n_embd, false); + balloc.split_reset(); std::vector ubatches; + while (true) { + auto ubatch = balloc.split_equal(n_ubatch); - while (sbatch.n_tokens > 0) { - auto ubatch = sbatch.split_equal(n_ubatch); + if (ubatch.n_tokens == 0) { + break; + } - ubatches.push_back(ubatch); + ubatches.push_back(std::move(ubatch)); // NOLINT } auto heads_base = kv_base->prepare(ubatches); @@ -151,7 +157,7 @@ llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch assert(heads_base.size() == heads_swa.size()); return std::make_unique( - this, std::move(sbatch), std::move(heads_base), std::move(heads_swa), std::move(ubatches)); + this, std::move(heads_base), std::move(heads_swa), std::move(ubatches)); } while (false); // TODO: if we fail again, we should attempt different splitting strategies @@ -214,15 +220,13 @@ llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state( llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state( llama_kv_cache_unified_iswa * kv, - llama_sbatch sbatch, std::vector heads_base, std::vector heads_swa, std::vector ubatches) : - sbatch(std::move(sbatch)), ubatches(std::move(ubatches)), // note: here we copy the ubatches. not sure if this is ideal - state_base(new llama_kv_cache_unified_state(kv->get_base(), {}, std::move(heads_base), this->ubatches)), - state_swa (new llama_kv_cache_unified_state(kv->get_swa (), {}, std::move(heads_swa), this->ubatches)), + state_base(new llama_kv_cache_unified_state(kv->get_base(), std::move(heads_base), this->ubatches)), + state_swa (new llama_kv_cache_unified_state(kv->get_swa (), std::move(heads_swa), this->ubatches)), status(llama_memory_status_combine(state_base->get_status(), state_swa->get_status())) { } @@ -252,12 +256,6 @@ bool llama_kv_cache_unified_iswa_state::apply() { return res; } -std::vector & llama_kv_cache_unified_iswa_state::out_ids() { - assert(status == LLAMA_MEMORY_STATUS_SUCCESS); - - return sbatch.out_ids; -} - llama_memory_status llama_kv_cache_unified_iswa_state::get_status() const { return status; } diff --git a/src/llama-kv-cache-unified-iswa.h b/src/llama-kv-cache-unified-iswa.h index 813eaf39b25b0..071041585db38 100644 --- a/src/llama-kv-cache-unified-iswa.h +++ b/src/llama-kv-cache-unified-iswa.h @@ -32,7 +32,7 @@ class llama_kv_cache_unified_iswa : public llama_memory_i { // llama_memory_state_ptr init_batch( - const llama_batch & batch, + llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) override; @@ -90,7 +90,6 @@ class llama_kv_cache_unified_iswa_state : public llama_memory_state_i { // used to create a state from a batch llama_kv_cache_unified_iswa_state( llama_kv_cache_unified_iswa * kv, - llama_sbatch sbatch, std::vector heads_base, std::vector heads_swa, std::vector ubatches); @@ -104,8 +103,6 @@ class llama_kv_cache_unified_iswa_state : public llama_memory_state_i { bool next() override; bool apply() override; - std::vector & out_ids() override; - llama_memory_status get_status() const override; const llama_ubatch & get_ubatch() const override; @@ -119,8 +116,6 @@ class llama_kv_cache_unified_iswa_state : public llama_memory_state_i { private: //llama_kv_cache_unified_iswa * kv; - llama_sbatch sbatch; - // the index of the next ubatch to process size_t i_next = 0; diff --git a/src/llama-kv-cache-unified.cpp b/src/llama-kv-cache-unified.cpp index d4412288925c3..6897b797153db 100644 --- a/src/llama-kv-cache-unified.cpp +++ b/src/llama-kv-cache-unified.cpp @@ -308,17 +308,23 @@ llama_pos llama_kv_cache_unified::seq_pos_max(llama_seq_id seq_id) const { } llama_memory_state_ptr llama_kv_cache_unified::init_batch( - const llama_batch & batch, + llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) { GGML_UNUSED(embd_all); do { - auto sbatch = llama_sbatch(batch, hparams.n_embd, true); + balloc.split_reset(); std::vector ubatches; - while (sbatch.n_tokens > 0) { - ubatches.push_back(sbatch.split_simple(n_ubatch)); + while (true) { + auto ubatch = balloc.split_simple(n_ubatch); + + if (ubatch.n_tokens == 0) { + break; + } + + ubatches.push_back(std::move(ubatch)); // NOLINT } auto heads = prepare(ubatches); @@ -327,7 +333,7 @@ llama_memory_state_ptr llama_kv_cache_unified::init_batch( } return std::make_unique( - this, std::move(sbatch), std::move(heads), std::move(ubatches)); + this, std::move(heads), std::move(ubatches)); } while (false); return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); @@ -644,12 +650,6 @@ int32_t llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) const { } void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch & ubatch) { - if (debug > 0) { - LLAMA_LOG_DEBUG("%s: ubatch info:\n", __func__); - LLAMA_LOG_DEBUG("%s: n_tokens = %d, equal_seqs = %d\n", __func__, ubatch.n_tokens, ubatch.equal_seqs); - LLAMA_LOG_DEBUG("%s: n_seq_tokens = %d, n_seqs = %d\n", __func__, ubatch.n_seq_tokens, ubatch.n_seqs); - } - // keep track of the max sequence position that we would overwrite with this ubatch // for non-SWA cache, this would be always empty llama_seq_id seq_pos_max_rm[LLAMA_MAX_SEQ]; @@ -657,27 +657,22 @@ void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch seq_pos_max_rm[s] = -1; } - for (uint32_t s = 0; s < ubatch.n_seqs; ++s) { - for (uint32_t j = 0; j < ubatch.n_seq_tokens; ++j) { - const uint32_t idx = s*ubatch.n_seq_tokens + j; - - if (!cells.is_empty(head_cur + idx)) { - assert(cells.seq_count(head_cur + idx) == 1); + for (uint32_t i = 0; i < ubatch.n_tokens; ++i) { + if (!cells.is_empty(head_cur + i)) { + assert(cells.seq_count(head_cur + i) == 1); - const llama_seq_id seq_id = cells.seq_get(head_cur + idx); - const llama_pos pos = cells.pos_get(head_cur + idx); + const llama_seq_id seq_id = cells.seq_get(head_cur + i); + const llama_pos pos = cells.pos_get(head_cur + i); - seq_pos_max_rm[seq_id] = std::max(seq_pos_max_rm[seq_id], pos); + seq_pos_max_rm[seq_id] = std::max(seq_pos_max_rm[seq_id], pos); - cells.rm(head_cur + idx); - } + cells.rm(head_cur + i); + } - cells.pos_set(head_cur + idx, ubatch.pos[idx]); + cells.pos_set(head_cur + i, ubatch.pos[i]); - // TODO: fix indexing [UBATCH_IDX] - for (int32_t i = 0; i < ubatch.n_seq_id[s]; i++) { - cells.seq_add(head_cur + idx, ubatch.seq_id[s][i]); - } + for (int32_t s = 0; s < ubatch.n_seq_id[i]; s++) { + cells.seq_add(head_cur + i, ubatch.seq_id[i][s]); } } @@ -696,6 +691,7 @@ void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch seq_rm(s, cells.seq_pos_min(s), seq_pos_max_rm[s] + 1); } } + // move the head at the end of the slot head = head_cur + ubatch.n_tokens; } @@ -792,9 +788,7 @@ ggml_tensor * llama_kv_cache_unified::cpy_v(ggml_context * ctx, ggml_tensor * v_ } void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const { - const uint32_t n_tokens = ubatch->n_tokens; - const uint32_t n_seq_tokens = ubatch->n_seq_tokens; - const uint32_t n_seqs = ubatch->n_seqs; + const uint32_t n_tokens = ubatch->n_tokens; GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer)); float * data = (float *) dst->data; @@ -814,52 +808,48 @@ void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ub // xxxxx----- // To visualize the mask, see https://github.com/ggml-org/llama.cpp/pull/12615 for (uint32_t h = 0; h < 1; ++h) { - for (uint32_t s = 0; s < n_seqs; ++s) { - const llama_seq_id seq_id = ubatch->seq_id[s][0]; + for (uint32_t i = 0; i < n_tokens; ++i) { + const llama_seq_id seq_id = ubatch->seq_id[i][0]; - for (uint32_t j = 0; j < n_seq_tokens; ++j) { - const uint32_t idx = s*n_seq_tokens + j; + const llama_pos p1 = ubatch->pos[i]; - const llama_pos p1 = ubatch->pos[idx]; + for (uint32_t j = 0; j < n_kv; ++j) { + float f = 0.0f; - for (uint32_t i = 0; i < n_kv; ++i) { - float f = 0.0f; + bool masked = false; - bool masked = false; - - if (cells.is_empty(i)) { - masked = true; - } else { - const llama_pos p0 = cells.pos_get(i); - - // mask the token if not the same sequence - masked = masked || (!cells.seq_has(i, seq_id)); + if (cells.is_empty(j)) { + masked = true; + } else { + const llama_pos p0 = cells.pos_get(j); - // mask future tokens - masked = masked || (causal_attn && p0 > p1); + // mask the token if not the same sequence + masked = masked || (!cells.seq_has(j, seq_id)); - // apply SWA if any - masked = masked || (is_masked_swa(p0, p1)); + // mask future tokens + masked = masked || (causal_attn && p0 > p1); - if (!masked && hparams.use_alibi) { - f = -std::abs(p0 - p1); - } - } + // apply SWA if any + masked = masked || (is_masked_swa(p0, p1)); - if (masked) { - f = -INFINITY; + if (!masked && hparams.use_alibi) { + f = -std::abs(p0 - p1); } + } - data[h*(n_kv*n_tokens) + idx*n_kv + i] = f; + if (masked) { + f = -INFINITY; } + + data[h*(n_kv*n_tokens) + i*n_kv + j] = f; } } // mask padded tokens if (data) { - for (uint32_t j = n_tokens; j < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++j) { - for (uint32_t i = 0; i < n_kv; ++i) { - data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY; + for (uint32_t i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { + for (uint32_t j = 0; j < n_kv; ++j) { + data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY; } } } @@ -887,12 +877,12 @@ void llama_kv_cache_unified::set_input_pos_bucket(ggml_tensor * dst, const llama const int32_t n_kv = dst->ne[0]; for (int h = 0; h < 1; ++h) { - for (int j = 0; j < n_tokens; ++j) { - for (int i = 0; i < n_kv; ++i) { + for (int i = 0; i < n_tokens; ++i) { + for (int j = 0; j < n_kv; ++j) { // the position when the cells is empty is irrelevant - it will be masked out later in the attention - const llama_pos p0 = cells.is_empty(i) ? -1 : cells.pos_get(i); + const llama_pos p0 = cells.is_empty(j) ? -1 : cells.pos_get(j); - data[h*(n_kv*n_tokens) + j*n_kv + i] = llama_relative_position_bucket(p0, ubatch->pos[j], hparams.n_rel_attn_bkts, false); + data[h*(n_kv*n_tokens) + i*n_kv + j] = llama_relative_position_bucket(p0, ubatch->pos[i], hparams.n_rel_attn_bkts, false); } } } @@ -1509,12 +1499,9 @@ bool llama_kv_cache_unified::state_read_meta(llama_io_read_i & io, uint32_t cell seq_rm(dest_seq_id, -1, -1); - llama_sbatch sbatch; - llama_ubatch ubatch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false); + llama_batch_allocr balloc(hparams.n_pos_per_embd()); - ubatch.n_tokens = cell_count; - ubatch.n_seq_tokens = cell_count; - ubatch.n_seqs = 1; + llama_ubatch ubatch = balloc.ubatch_reserve(cell_count, 1); for (uint32_t i = 0; i < cell_count; ++i) { llama_pos pos; @@ -1746,9 +1733,8 @@ llama_kv_cache_unified_state::llama_kv_cache_unified_state( llama_kv_cache_unified_state::llama_kv_cache_unified_state( llama_kv_cache_unified * kv, - llama_sbatch sbatch, llama_kv_cache_unified::ubatch_heads heads, - std::vector ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), sbatch(std::move(sbatch)), heads(std::move(heads)), ubatches(std::move(ubatches)) { + std::vector ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), heads(std::move(heads)), ubatches(std::move(ubatches)) { } llama_kv_cache_unified_state::~llama_kv_cache_unified_state() = default; @@ -1781,12 +1767,6 @@ bool llama_kv_cache_unified_state::apply() { return true; } -std::vector & llama_kv_cache_unified_state::out_ids() { - assert(status == LLAMA_MEMORY_STATUS_SUCCESS); - - return sbatch.out_ids; -} - llama_memory_status llama_kv_cache_unified_state::get_status() const { return status; } diff --git a/src/llama-kv-cache-unified.h b/src/llama-kv-cache-unified.h index d96571d952b81..1560640045c82 100644 --- a/src/llama-kv-cache-unified.h +++ b/src/llama-kv-cache-unified.h @@ -57,7 +57,7 @@ class llama_kv_cache_unified : public llama_memory_i { // llama_memory_state_ptr init_batch( - const llama_batch & batch, + llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) override; @@ -231,7 +231,6 @@ class llama_kv_cache_unified_state : public llama_memory_state_i { // used to create a decode state from a batch llama_kv_cache_unified_state( llama_kv_cache_unified * kv, - llama_sbatch sbatch, ubatch_heads heads, std::vector ubatches); @@ -244,8 +243,6 @@ class llama_kv_cache_unified_state : public llama_memory_state_i { bool next() override; bool apply() override; - std::vector & out_ids() override; - llama_memory_status get_status() const override; const llama_ubatch & get_ubatch() const override; @@ -286,8 +283,6 @@ class llama_kv_cache_unified_state : public llama_memory_state_i { // batch processing state // - llama_sbatch sbatch; - // the index of the next ubatch to process size_t i_next = 0; diff --git a/src/llama-kv-cells.h b/src/llama-kv-cells.h index 1d4e70f4d3212..349e9032e2484 100644 --- a/src/llama-kv-cells.h +++ b/src/llama-kv-cells.h @@ -384,10 +384,10 @@ class llama_kv_cells_unified { // std::vector shift; - using bits_t = std::bitset; + using seq_set_t = std::bitset; // the bitset seq[i] tells us which sequences are currently occupying the i-th cell - std::vector seq; + std::vector seq; // the set seq_pos[s] tells us which positions are currently present for sequence s // this way seq_pos[s].begin() and seq_pos[s].rbegin() give us the min/max positions currently in the cache diff --git a/src/llama-memory-hybrid.cpp b/src/llama-memory-hybrid.cpp index d4b260db4c8e7..1b16686819eff 100644 --- a/src/llama-memory-hybrid.cpp +++ b/src/llama-memory-hybrid.cpp @@ -32,7 +32,7 @@ llama_memory_hybrid::llama_memory_hybrid( mem_attn(new llama_kv_cache_unified( model, filter_attn == nullptr ? - [&](int32_t il) { return !model.hparams.is_recurrent(il); } + [&](int32_t il) { return !hparams.is_recurrent(il); } : filter_attn, type_k, type_v, @@ -47,7 +47,7 @@ llama_memory_hybrid::llama_memory_hybrid( mem_recr(new llama_memory_recurrent( model, filter_recr == nullptr ? - [&](int32_t il) { return model.hparams.is_recurrent(il); } + [&](int32_t il) { return hparams.is_recurrent(il); } : filter_recr, type_r, type_s, @@ -56,42 +56,49 @@ llama_memory_hybrid::llama_memory_hybrid( n_seq_max )) {} -llama_memory_state_ptr llama_memory_hybrid::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_pooled) { +llama_memory_state_ptr llama_memory_hybrid::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) { + do { + balloc.split_reset(); - // since this includes a recurrent cache, we cannot use split_simple - auto sbatch = llama_sbatch(batch, hparams.n_embd, false); + // follow the recurrent pattern for creating the ubatch splits + std::vector ubatches; - // follow the recurrent pattern for creating the ubatch splits - std::vector ubatches; - while (sbatch.n_tokens > 0) { - llama_ubatch ubatch; + while (true) { + llama_ubatch ubatch; - if (embd_pooled) { - // Pooled embeddings cannot be split across ubatches (yet) - ubatch = sbatch.split_seq(n_ubatch); - } else { - ubatch = sbatch.split_equal(n_ubatch); + if (embd_all) { + // if all tokens are output, split by sequence + ubatch = balloc.split_seq(n_ubatch); + } else { + ubatch = balloc.split_equal(n_ubatch); + } + + if (ubatch.n_tokens == 0) { + break; + } + + ubatches.push_back(std::move(ubatch)); // NOLINT } - ubatches.push_back(ubatch); - } + // prepare the recurrent batches first + if (!mem_recr->prepare(ubatches)) { + // TODO: will the recurrent cache be in an undefined state at this point? + LLAMA_LOG_ERROR("%s: failed to prepare recurrent ubatches\n", __func__); + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); + } - // prepare the recurrent batches first - if (!mem_recr->prepare(ubatches)) { - // TODO: will the recurrent cache be in an undefined state at this point? - LLAMA_LOG_ERROR("%s: failed to prepare recurrent ubatches\n", __func__); - return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); - } + // prepare the attention cache + auto heads_attn = mem_attn->prepare(ubatches); + if (heads_attn.empty()) { + LLAMA_LOG_ERROR("%s: failed to prepare attention ubatches\n", __func__); + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); + } - // prepare the attention cache - auto heads_attn = mem_attn->prepare(ubatches); - if (heads_attn.empty()) { - LLAMA_LOG_ERROR("%s: failed to prepare attention ubatches\n", __func__); - return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); - } + return std::make_unique( + this, std::move(heads_attn), std::move(ubatches)); + } while(false); - return std::make_unique( - this, std::move(sbatch), std::move(heads_attn), std::move(ubatches)); + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); } llama_memory_state_ptr llama_memory_hybrid::init_full() { @@ -188,15 +195,13 @@ llama_memory_hybrid_state::llama_memory_hybrid_state( llama_memory_hybrid_state::llama_memory_hybrid_state( llama_memory_hybrid * mem, - llama_sbatch sbatch, std::vector heads_attn, std::vector ubatches) : - sbatch(std::move(sbatch)), ubatches(std::move(ubatches)), // note: here we copy the ubatches. not sure if this is ideal - state_attn(new llama_kv_cache_unified_state(mem->get_mem_attn(), {}, std::move(heads_attn), this->ubatches)), - state_recr(new llama_memory_recurrent_state(mem->get_mem_recr(), {}, this->ubatches)), - status(LLAMA_MEMORY_STATUS_SUCCESS) { + state_attn(new llama_kv_cache_unified_state(mem->get_mem_attn(), std::move(heads_attn), this->ubatches)), + state_recr(new llama_memory_recurrent_state(mem->get_mem_recr(), this->ubatches)), + status(llama_memory_status_combine(state_attn->get_status(), state_recr->get_status())) { } bool llama_memory_hybrid_state::next() { @@ -223,12 +228,6 @@ bool llama_memory_hybrid_state::apply() { return res; } -std::vector & llama_memory_hybrid_state::out_ids() { - assert(status == LLAMA_MEMORY_STATUS_SUCCESS); - - return sbatch.out_ids; -} - llama_memory_status llama_memory_hybrid_state::get_status() const { return status; } diff --git a/src/llama-memory-hybrid.h b/src/llama-memory-hybrid.h index b5700c5225f18..4d27ab896aa05 100644 --- a/src/llama-memory-hybrid.h +++ b/src/llama-memory-hybrid.h @@ -50,9 +50,9 @@ class llama_memory_hybrid : public llama_memory_i { // llama_memory_state_ptr init_batch( - const llama_batch & batch, + llama_batch_allocr & balloc, uint32_t n_ubatch, - bool embd_pooled) override; + bool embd_all) override; llama_memory_state_ptr init_full() override; @@ -107,7 +107,6 @@ class llama_memory_hybrid_state : public llama_memory_state_i { // init success llama_memory_hybrid_state( llama_memory_hybrid * mem, - llama_sbatch sbatch, std::vector heads_attn, std::vector ubatches); @@ -116,8 +115,6 @@ class llama_memory_hybrid_state : public llama_memory_state_i { bool next() override; bool apply() override; - std::vector & out_ids() override; - llama_memory_status get_status() const override; const llama_ubatch & get_ubatch() const override; @@ -129,8 +126,6 @@ class llama_memory_hybrid_state : public llama_memory_state_i { const llama_memory_recurrent_state * get_state_recr() const; private: - llama_sbatch sbatch; - // the index of the next ubatch to process size_t i_next = 0; diff --git a/src/llama-memory-recurrent.cpp b/src/llama-memory-recurrent.cpp index c4f9a6f1ddc98..b064da0084c52 100644 --- a/src/llama-memory-recurrent.cpp +++ b/src/llama-memory-recurrent.cpp @@ -362,29 +362,31 @@ llama_pos llama_memory_recurrent::seq_pos_max(llama_seq_id seq_id) const { return result; } -llama_memory_state_ptr llama_memory_recurrent::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_all) { - auto sbatch = llama_sbatch(batch, hparams.n_embd, false); - +llama_memory_state_ptr llama_memory_recurrent::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) { std::vector ubatches; - while (sbatch.n_tokens > 0) { + while (true) { llama_ubatch ubatch; if (embd_all) { // if all tokens are output, split by sequence - ubatch = sbatch.split_seq(n_ubatch); + ubatch = balloc.split_seq(n_ubatch); } else { - ubatch = sbatch.split_equal(n_ubatch); + ubatch = balloc.split_equal(n_ubatch); + } + + if (ubatch.n_tokens == 0) { + break; } - ubatches.push_back(ubatch); + ubatches.push_back(std::move(ubatch)); // NOLINT } if (!prepare(ubatches)) { return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); } - return std::make_unique(this, std::move(sbatch), std::move(ubatches)); + return std::make_unique(this, std::move(ubatches)); } llama_memory_state_ptr llama_memory_recurrent::init_full() { @@ -423,9 +425,8 @@ bool llama_memory_recurrent::prepare(const std::vector & ubatches) } bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) { - const uint32_t n_seqs = ubatch.n_seqs; - const uint32_t n_seq_tokens = ubatch.n_seq_tokens; + const uint32_t n_seqs = ubatch.n_seqs; // if we have enough unused cells before the current head -> // better to start searching from the beginning of the cache, hoping to fill it @@ -445,9 +446,11 @@ bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) { // everything should fit if all seq_ids are smaller than the max for (uint32_t s = 0; s < n_seqs; ++s) { - const uint32_t n_seq_id = ubatch.n_seq_id[s]; + const uint32_t i = s*n_seq_tokens; // first token of sequence set s + const uint32_t n_seq_id = ubatch.n_seq_id[i]; + for (uint32_t j = 0; j < n_seq_id; ++j) { - const llama_seq_id seq_id = ubatch.seq_id[s][j]; + const llama_seq_id seq_id = ubatch.seq_id[i][j]; if (seq_id < 0 || (uint32_t) seq_id >= size) { // too big seq_id @@ -506,7 +509,8 @@ bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) { // find usable cell range for (uint32_t s = 0; s < n_seqs; ++s) { - const llama_seq_id seq_id = ubatch.seq_id[s][0]; + const uint32_t i = s*n_seq_tokens; + const llama_seq_id seq_id = ubatch.seq_id[i][0]; auto & seq_meta = cells[seq_id]; bool has_cell = false; if (seq_meta.tail >= 0) { @@ -530,7 +534,7 @@ bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) { seq_meta.tail = next_empty_cell; // find next empty cell if (s + 1 < n_seqs) { - for (uint32_t i = 0; i < size; ++i) { + for (uint32_t j = 0; j < size; ++j) { next_empty_cell += 1; if (next_empty_cell >= size) { next_empty_cell -= size; } auto & cell = cells[next_empty_cell]; @@ -544,8 +548,9 @@ bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) { // gather and re-order for (uint32_t s = 0; s < n_seqs; ++s) { + const uint32_t i = s*n_seq_tokens; const int32_t dst_id = s + min; - const int32_t src_id = cells[ubatch.seq_id[s][0]].tail; + const int32_t src_id = cells[ubatch.seq_id[i][0]].tail; if (dst_id != src_id) { auto & dst_cell = cells[dst_id]; auto & src_cell = cells[src_id]; @@ -555,8 +560,8 @@ bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) { std::swap(dst_cell.seq_id, src_cell.seq_id); // swap tails - for (uint32_t i = 0; i < size; ++i) { - int32_t & tail = cells[i].tail; + for (uint32_t j = 0; j < size; ++j) { + int32_t & tail = cells[j].tail; if (tail == src_id) { tail = dst_id; } else if (tail == dst_id) { @@ -568,7 +573,8 @@ bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) { // update the pos of the used seqs for (uint32_t s = 0; s < n_seqs; ++s) { - const llama_pos last_pos = ubatch.pos[n_seq_tokens * s + n_seq_tokens - 1]; + const uint32_t i = s*n_seq_tokens; + const llama_pos last_pos = ubatch.pos[i + n_seq_tokens - 1]; const int32_t cell_id = s + min; auto & cell = cells[cell_id]; @@ -576,12 +582,12 @@ bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) { // What should happen when the pos backtracks or skips a value? // Clearing the state mid-batch would require special-casing which isn't done. LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n", - __func__, last_pos, cell.pos, ubatch.seq_id[s][0], n_seq_tokens); + __func__, last_pos, cell.pos, ubatch.seq_id[i][0], n_seq_tokens); } cell.pos = last_pos; cell.seq_id.clear(); - for (int32_t j = 0; j < ubatch.n_seq_id[s]; ++j) { - const llama_seq_id seq_id = ubatch.seq_id[s][j]; + for (int32_t j = 0; j < ubatch.n_seq_id[i]; ++j) { + const llama_seq_id seq_id = ubatch.seq_id[i][j]; cell.seq_id.insert(seq_id); cells[seq_id].tail = cell_id; } @@ -827,12 +833,9 @@ bool llama_memory_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell seq_rm(dest_seq_id, -1, -1); - llama_sbatch sbatch; - llama_ubatch batch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false); + llama_batch_allocr balloc(hparams.n_pos_per_embd()); - batch.n_tokens = cell_count; - batch.n_seq_tokens = cell_count; - batch.n_seqs = 1; + llama_ubatch ubatch = balloc.ubatch_reserve(cell_count, 1); for (uint32_t i = 0; i < cell_count; ++i) { llama_pos pos; @@ -846,12 +849,12 @@ bool llama_memory_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell return false; } - batch.pos[i] = pos; + ubatch.pos[i] = pos; } - batch.n_seq_id[0] = 1; - batch.seq_id[0] = &dest_seq_id; + ubatch.n_seq_id[0] = 1; + ubatch.seq_id[0] = &dest_seq_id; - if (!find_slot(batch)) { + if (!find_slot(ubatch)) { LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__); return false; } @@ -859,8 +862,8 @@ bool llama_memory_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell // DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values) // Assume that this is one contiguous block of cells GGML_ASSERT(head + cell_count <= size); - GGML_ASSERT(cells[head].pos == batch.pos[0]); - GGML_ASSERT(cells[head + cell_count - 1].pos == batch.pos[cell_count - 1]); + GGML_ASSERT(cells[head].pos == ubatch.pos[0]); + GGML_ASSERT(cells[head + cell_count - 1].pos == ubatch.pos[cell_count - 1]); GGML_ASSERT(cells[head].has_seq_id(dest_seq_id)); GGML_ASSERT(cells[head + cell_count - 1].has_seq_id(dest_seq_id)); } else { @@ -1048,8 +1051,7 @@ llama_memory_recurrent_state::llama_memory_recurrent_state( llama_memory_recurrent_state::llama_memory_recurrent_state( llama_memory_recurrent * mem, - llama_sbatch sbatch, - std::vector ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), sbatch(std::move(sbatch)), ubatches(std::move(ubatches)) {} + std::vector ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), ubatches(std::move(ubatches)) {} llama_memory_recurrent_state::~llama_memory_recurrent_state() = default; @@ -1071,12 +1073,6 @@ bool llama_memory_recurrent_state::apply() { return true; } -std::vector & llama_memory_recurrent_state::out_ids() { - assert(status == LLAMA_MEMORY_STATUS_SUCCESS); - - return sbatch.out_ids; -} - llama_memory_status llama_memory_recurrent_state::get_status() const { return status; } diff --git a/src/llama-memory-recurrent.h b/src/llama-memory-recurrent.h index 290cc84ab3fbc..be58dae7cfe33 100644 --- a/src/llama-memory-recurrent.h +++ b/src/llama-memory-recurrent.h @@ -35,7 +35,7 @@ class llama_memory_recurrent : public llama_memory_i { // llama_memory_state_ptr init_batch( - const llama_batch & batch, + llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) override; @@ -137,7 +137,6 @@ class llama_memory_recurrent_state : public llama_memory_state_i { // used to create a state from a batch llama_memory_recurrent_state( llama_memory_recurrent * mem, - llama_sbatch sbatch, std::vector ubatches); virtual ~llama_memory_recurrent_state(); @@ -149,8 +148,6 @@ class llama_memory_recurrent_state : public llama_memory_state_i { bool next() override; bool apply() override; - std::vector & out_ids() override; - llama_memory_status get_status() const override; const llama_ubatch & get_ubatch() const override; @@ -173,8 +170,6 @@ class llama_memory_recurrent_state : public llama_memory_state_i { llama_memory_recurrent * mem; - llama_sbatch sbatch; - size_t i_next = 0; std::vector ubatches; diff --git a/src/llama-memory.h b/src/llama-memory.h index 24668f861b976..d2ef0c2a3b4aa 100644 --- a/src/llama-memory.h +++ b/src/llama-memory.h @@ -7,6 +7,8 @@ struct llama_ubatch; +class llama_batch_allocr; + class llama_io_write_i; class llama_io_read_i; @@ -50,9 +52,6 @@ struct llama_memory_state_i { // return false on failure virtual bool apply() = 0; - // TODO: this might get reworked in the future when refactoring llama_batch - virtual std::vector & out_ids() = 0; - // get the current ubatch virtual const llama_ubatch & get_ubatch() const = 0; @@ -71,7 +70,7 @@ struct llama_memory_i { // return a state object containing the ubatches and KV cache state required to process them // check the llama_memory_state_i::get_status() for the result virtual llama_memory_state_ptr init_batch( - const llama_batch & batch, + llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) = 0; diff --git a/tools/server/server.cpp b/tools/server/server.cpp index 9d55b3338bcfe..aa18513e393b4 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -3385,38 +3385,6 @@ struct server_context { llama_set_embeddings(ctx, slot_batched->need_embd()); } - // pad the batch so that batch.n_tokens >= n_slots - // TODO: temporary workaround for https://github.com/ggml-org/llama.cpp/issues/13689 - if (slot_batched->need_embd()) { - const int n_slots = slots.size(); - - if (batch.n_tokens < n_slots) { - std::set seq_ids; - for (int j = 0; j < batch.n_tokens; ++j) { - seq_ids.insert(batch.seq_id[j][0]); - } - - // find unused sequence id - llama_seq_id seq_id = -1; - for (int i = 0; i < n_slots; ++i) { - if (seq_ids.find(i) == seq_ids.end()) { - seq_id = i; - } - } - - const int n_add = n_slots - batch.n_tokens; - - SRV_WRN("adding %d dummy tokens to the batch, seq_id = %d\n", n_add, seq_id); - - for (int j = 0; j < n_add; ++j) { - common_batch_add(batch, 0, j, { seq_id }, true); - } - - slots[seq_id].cache_tokens.clear(); - llama_memory_seq_rm(llama_get_memory(ctx), seq_id, -1, -1); - } - } - int32_t i_next = 0; // process the created batch of tokens From f1e9fd2d425ccfe93efae71124e6bbd8e204d0a9 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 20 Jun 2025 10:50:27 +0300 Subject: [PATCH 108/192] model : more uniform output id handling (#14275) * model : more uniform output id handling ggml-ci * cont : revert n_outputs < n_tokens optimization ggml-ci * cont : fix out_ids initialization ggml-ci --- src/llama-graph.cpp | 54 +-- src/llama-model.cpp | 839 ++++++++++++++++++++++---------------------- 2 files changed, 455 insertions(+), 438 deletions(-) diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 083366fd68d07..7e162c5552204 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -92,36 +92,28 @@ void llm_graph_input_pos_bucket_kv::set_input(const llama_ubatch * ubatch) { } void llm_graph_input_out_ids::set_input(const llama_ubatch * ubatch) { - if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) { - //GGML_ASSERT(out_ids && "every model that can must skip unused outputs"); + GGML_ASSERT(out_ids); - if (!out_ids) { - LLAMA_LOG_WARN("%s: 'out_ids' is not created\n", __func__); - } else { - const int64_t n_tokens = ubatch->n_tokens; + const int64_t n_tokens = ubatch->n_tokens; - GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer)); - int32_t * data = (int32_t *) out_ids->data; + GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer)); + int32_t * data = (int32_t *) out_ids->data; - if (n_outputs == n_tokens) { - for (int i = 0; i < n_tokens; ++i) { - data[i] = i; - } - } else if (ubatch->output) { - int32_t n_outputs = 0; - for (int i = 0; i < n_tokens; ++i) { - if (ubatch->output[i]) { - data[n_outputs++] = i; - } - } - // the graph needs to have been passed the correct number of outputs - GGML_ASSERT(n_outputs == n_outputs); - } else if (n_outputs == 1) { - // only keep last output - data[0] = n_tokens - 1; - } else { - GGML_ASSERT(n_outputs == 0); - } + if (n_outputs == n_tokens) { + for (int i = 0; i < n_tokens; ++i) { + data[i] = i; + } + + return; + } + + GGML_ASSERT(ubatch->output); + + int n_outputs = 0; + + for (int i = 0; i < n_tokens; ++i) { + if (ubatch->output[i]) { + data[n_outputs++] = i; } } } @@ -874,6 +866,14 @@ ggml_tensor * llm_graph_context::build_inp_attn_scale() const { } ggml_tensor * llm_graph_context::build_inp_out_ids() const { + // note: when all tokens are output, we could skip this optimization to spare the ggml_get_rows() calls, + // but this would make the graph topology depend on the number of output tokens, which can interere with + // features that require constant topology such as pipline parallelism + // ref: https://github.com/ggml-org/llama.cpp/pull/14275#issuecomment-2987424471 + //if (n_outputs < n_tokens) { + // return nullptr; + //} + auto inp = std::make_unique(hparams, cparams, n_outputs); auto & cur = inp->out_ids; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index a5853f8b12dc0..e2c82017f6890 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -4707,6 +4707,8 @@ struct llm_build_llama : public llm_graph_context { const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -4769,9 +4771,7 @@ struct llm_build_llama : public llm_graph_context { cb(cur, "attn_out", il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -4867,6 +4867,8 @@ struct llm_build_llama_iswa : public llm_graph_context { const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -4943,9 +4945,7 @@ struct llm_build_llama_iswa : public llm_graph_context { cb(cur, "attn_out", il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -5045,6 +5045,9 @@ struct llm_build_deci : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; const int64_t n_head_kv = hparams.n_head_kv(il); @@ -5118,9 +5121,7 @@ struct llm_build_deci : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -5199,6 +5200,8 @@ struct llm_build_baichuan : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -5250,9 +5253,7 @@ struct llm_build_baichuan : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -5321,6 +5322,8 @@ struct llm_build_xverse : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -5365,9 +5368,7 @@ struct llm_build_xverse : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -5435,6 +5436,8 @@ struct llm_build_falcon : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * attn_norm; @@ -5490,9 +5493,7 @@ struct llm_build_falcon : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); attn_norm = ggml_get_rows(ctx0, attn_norm, inp_out_ids); @@ -5561,6 +5562,8 @@ struct llm_build_grok : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -5620,9 +5623,7 @@ struct llm_build_grok : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -5721,6 +5722,8 @@ struct llm_build_dbrx : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -5771,9 +5774,7 @@ struct llm_build_dbrx : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -5853,6 +5854,8 @@ struct llm_build_starcoder : public llm_graph_context { inpL = ggml_add(ctx0, inpL, pos); cb(inpL, "inpL", -1); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { cur = build_norm(inpL, model.layers[il].attn_norm, @@ -5885,9 +5888,7 @@ struct llm_build_starcoder : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -5952,6 +5953,8 @@ struct llm_build_refact : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -5984,9 +5987,7 @@ struct llm_build_refact : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -6072,78 +6073,79 @@ struct llm_build_bert : public llm_graph_context { auto * inp_attn = build_attn_inp_no_cache(); - // iterate layers + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * cur = inpL; - ggml_tensor * Qcur; - ggml_tensor * Kcur; - ggml_tensor * Vcur; + { + ggml_tensor * Qcur; + ggml_tensor * Kcur; + ggml_tensor * Vcur; - // self-attention - if (model.layers[il].wqkv) { - cur = build_lora_mm(model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); + // self-attention + if (model.layers[il].wqkv) { + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); - if (model.layers[il].bqkv) { - cur = ggml_add(ctx0, cur, model.layers[il].bqkv); - cb(cur, "bqkv", il); - } + if (model.layers[il].bqkv) { + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); + } - Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - } else { - Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, cur), model.layers[il].bq); - Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, cur), model.layers[il].bk); - Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, cur), model.layers[il].bv); - } + Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + } else { + Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, cur), model.layers[il].bq); + Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, cur), model.layers[il].bk); + Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, cur), model.layers[il].bv); + } - if (model.layers[il].attn_q_norm) { - Qcur = build_norm(Qcur, - model.layers[il].attn_q_norm, - model.layers[il].attn_q_norm_b, - LLM_NORM, il); - } + if (model.layers[il].attn_q_norm) { + Qcur = build_norm(Qcur, + model.layers[il].attn_q_norm, + model.layers[il].attn_q_norm_b, + LLM_NORM, il); + } - if (model.layers[il].attn_k_norm) { - Kcur = build_norm(Kcur, - model.layers[il].attn_k_norm, - model.layers[il].attn_k_norm_b, - LLM_NORM, il); - } + if (model.layers[il].attn_k_norm) { + Kcur = build_norm(Kcur, + model.layers[il].attn_k_norm, + model.layers[il].attn_k_norm_b, + LLM_NORM, il); + } - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); - // RoPE - if (model.arch == LLM_ARCH_NOMIC_BERT || model.arch == LLM_ARCH_NOMIC_BERT_MOE) { - Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); + // RoPE + if (model.arch == LLM_ARCH_NOMIC_BERT || model.arch == LLM_ARCH_NOMIC_BERT_MOE) { + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); - Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - } + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + } - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, - model.layers[il].wo, model.layers[il].bo, - Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); - cb(cur, "kqv_out", il); + cur = build_attn(inp_attn, gf, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + cb(cur, "kqv_out", il); + } - if (il == n_layer - 1 && pooling_type == LLAMA_POOLING_TYPE_NONE) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -6240,56 +6242,57 @@ struct llm_build_neo_bert : public llm_graph_context { auto * inp_attn = build_attn_inp_no_cache(); - // iterate layers + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * cur = inpL; - ggml_tensor * Qcur; - ggml_tensor * Kcur; - ggml_tensor * Vcur; - // pre-norm cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il); - // self-attention - cur = build_lora_mm(model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); - - Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); - - // RoPE - Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); + { + ggml_tensor * Qcur; + ggml_tensor * Kcur; + ggml_tensor * Vcur; - Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); + // self-attention + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + // RoPE + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, - model.layers[il].wo, nullptr, - Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); - cb(cur, "kqv_out", il); + cur = build_attn(inp_attn, gf, + model.layers[il].wo, nullptr, + Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + cb(cur, "kqv_out", il); + } - if (il == n_layer - 1 && pooling_type == LLAMA_POOLING_TYPE_NONE) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -6354,6 +6357,8 @@ struct llm_build_bloom : public llm_graph_context { LLM_NORM, -1); cb(inpL, "inp_norm", -1); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { cur = build_norm(inpL, model.layers[il].attn_norm, @@ -6386,9 +6391,7 @@ struct llm_build_bloom : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -6465,6 +6468,8 @@ struct llm_build_mpt : public llm_graph_context { cb(inpL, "inpL", -1); } + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * attn_norm; @@ -6527,9 +6532,7 @@ struct llm_build_mpt : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -6598,6 +6601,8 @@ struct llm_build_stablelm : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { // norm cur = build_norm(inpL, @@ -6673,9 +6678,7 @@ struct llm_build_stablelm : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); @@ -6750,6 +6753,8 @@ struct llm_build_qwen : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -6796,9 +6801,7 @@ struct llm_build_qwen : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -6867,6 +6870,8 @@ struct llm_build_qwen2 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -6916,9 +6921,7 @@ struct llm_build_qwen2 : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -6988,6 +6991,8 @@ struct llm_build_qwen2vl : public llm_graph_context { int sections[4]; std::copy(std::begin(hparams.rope_sections), std::begin(hparams.rope_sections) + 4, sections); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -7037,9 +7042,7 @@ struct llm_build_qwen2vl : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -7106,6 +7109,8 @@ struct llm_build_qwen2moe : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -7164,9 +7169,7 @@ struct llm_build_qwen2moe : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -7265,6 +7268,8 @@ struct llm_build_qwen3 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -7317,9 +7322,7 @@ struct llm_build_qwen3 : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -7386,6 +7389,8 @@ struct llm_build_qwen3moe : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -7438,9 +7443,7 @@ struct llm_build_qwen3moe : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -7516,6 +7519,8 @@ struct llm_build_phi2 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { attn_norm_output = build_norm(inpL, model.layers[il].attn_norm, @@ -7578,9 +7583,7 @@ struct llm_build_phi2 : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); attn_norm_output = ggml_get_rows(ctx0, attn_norm_output, inp_out_ids); @@ -7652,6 +7655,8 @@ struct llm_build_phi3 : public llm_graph_context { inp_attn = build_attn_inp_kv_unified(); } + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { auto * residual = inpL; @@ -7715,9 +7720,7 @@ struct llm_build_phi3 : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor* inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); residual = ggml_get_rows(ctx0, residual, inp_out_ids); } @@ -7803,15 +7806,16 @@ struct llm_build_plamo : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); - for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { // norm cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il); cb(cur, "attn_norm", il); - ggml_tensor * attention_norm = cur; + ggml_tensor * sa_inp = cur; // self-attention { @@ -7849,18 +7853,17 @@ struct llm_build_plamo : public llm_graph_context { model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - ggml_tensor * sa_out = cur; - - cur = attention_norm; - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); - sa_out = ggml_get_rows(ctx0, sa_out, inp_out_ids); + sa_inp = ggml_get_rows(ctx0, sa_inp, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } + ggml_tensor * sa_out = cur; + + cur = sa_inp; + // feed-forward network { cur = build_ffn(cur, @@ -7925,6 +7928,8 @@ struct llm_build_gpt2 : public llm_graph_context { inpL = ggml_add(ctx0, inpL, pos); cb(inpL, "inpL", -1); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { cur = build_norm(inpL, model.layers[il].attn_norm, @@ -7957,9 +7962,7 @@ struct llm_build_gpt2 : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -8029,6 +8032,8 @@ struct llm_build_codeshell : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { cur = build_norm(inpL, model.layers[il].attn_norm, @@ -8073,9 +8078,7 @@ struct llm_build_codeshell : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -8129,128 +8132,128 @@ struct llm_build_codeshell : public llm_graph_context { struct llm_build_orion : public llm_graph_context { llm_build_orion(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { - const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); - ggml_tensor * cur; - ggml_tensor * inpL; + ggml_tensor * cur; + ggml_tensor * inpL; - inpL = build_inp_embd(model.tok_embd); + inpL = build_inp_embd(model.tok_embd); - // inp_pos - contains the positions - ggml_tensor * inp_pos = build_inp_pos(); + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(); + auto * inp_attn = build_attn_inp_kv_unified(); - for (int il = 0; il < n_layer; ++il) { - ggml_tensor * inpSA = inpL; + ggml_tensor * inp_out_ids = build_inp_out_ids(); - // norm - cur = build_norm(inpL, - model.layers[il].attn_norm, model.layers[il].attn_norm_b, - LLM_NORM, il); - cb(cur, "attn_norm", il); + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; - // self-attention - { - // compute Q and K and RoPE them - ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - // if (model.layers[il].bq) { - // Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - // cb(Qcur, "Qcur", il); - // } - - ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - // if (model.layers[il].bk) { - // Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - // cb(Kcur, "Kcur", il); - // } - - ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - // if (model.layers[il].bv) { - // Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - // cb(Vcur, "Vcur", il); - // } - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); - - Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, model.layers[il].attn_norm_b, + LLM_NORM, il); + cb(cur, "attn_norm", il); - Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + // if (model.layers[il].bq) { + // Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + // cb(Qcur, "Qcur", il); + // } - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + // if (model.layers[il].bk) { + // Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + // cb(Kcur, "Kcur", il); + // } - cur = build_attn(inp_attn, gf, - model.layers[il].wo, NULL, - Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); - } + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + // if (model.layers[il].bv) { + // Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + // cb(Vcur, "Vcur", il); + // } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); - ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); - // feed-forward network - cur = build_norm(ffn_inp, - model.layers[il].ffn_norm, model.layers[il].ffn_norm_b, - LLM_NORM, il); - cb(cur, "ffn_norm", il); + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } - cur = build_ffn(cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, il); - cb(cur, "ffn_out", il); + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } - cur = ggml_add(ctx0, cur, ffn_inp); + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); - cur = build_cvec(cur, il); - cb(cur, "l_out", il); + // feed-forward network + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, model.layers[il].ffn_norm_b, + LLM_NORM, il); + cb(cur, "ffn_norm", il); - // input for next layer - inpL = cur; - } + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); - cur = inpL; + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; - cur = build_norm(cur, - model.output_norm, model.output_norm_b, - LLM_NORM, -1); + cur = build_norm(cur, + model.output_norm, model.output_norm_b, + LLM_NORM, -1); - cb(cur, "result_norm", -1); - res->t_embd = cur; + cb(cur, "result_norm", -1); + res->t_embd = cur; - // lm_head - cur = build_lora_mm(model.output, cur); + // lm_head + cur = build_lora_mm(model.output, cur); - cb(cur, "result_output", -1); - res->t_logits = cur; + cb(cur, "result_output", -1); + res->t_logits = cur; - ggml_build_forward_expand(gf, cur); + ggml_build_forward_expand(gf, cur); } }; @@ -8271,6 +8274,8 @@ struct llm_build_internlm2 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -8329,9 +8334,7 @@ struct llm_build_internlm2 : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -8407,6 +8410,8 @@ struct llm_build_minicpm3 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -8526,15 +8531,13 @@ struct llm_build_minicpm3 : public llm_graph_context { q_states, k_states, v_states, nullptr, nullptr, kq_scale, il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } // scale_res - scale the hidden states for residual connection - const float scale_res = scale_depth/sqrtf(float(n_layer)); + const float scale_res = scale_depth/sqrtf(float(n_layer)); // TODO: is this correct? cur = ggml_scale(ctx0, cur, scale_res); cb(cur, "hidden_scaled", il); @@ -8611,6 +8614,8 @@ struct llm_build_gemma : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { // norm cur = build_norm(inpL, @@ -8656,9 +8661,7 @@ struct llm_build_gemma : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -8727,6 +8730,8 @@ struct llm_build_gemma2_iswa : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified_iswa(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { // norm cur = build_norm(inpL, @@ -8771,18 +8776,16 @@ struct llm_build_gemma2_iswa : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il); } + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + cur = build_norm(cur, model.layers[il].attn_post_norm, NULL, LLM_NORM_RMS, il); cb(cur, "attn_post_norm", il); - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - } - ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL); cb(sa_out, "sa_out", il); @@ -8861,6 +8864,8 @@ struct llm_build_gemma3_iswa : public llm_graph_context { // TODO: is causal == true correct? might need some changes auto * inp_attn = build_attn_inp_kv_unified_iswa(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { const float freq_base_l = model.get_rope_freq_base (cparams, il); const float freq_scale_l = model.get_rope_freq_scale(cparams, il); @@ -8913,18 +8918,16 @@ struct llm_build_gemma3_iswa : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il); } + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + cur = build_norm(cur, model.layers[il].attn_post_norm, NULL, LLM_NORM_RMS, il); cb(cur, "attn_post_norm", il); - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - } - ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL); cb(sa_out, "sa_out", il); @@ -8995,6 +8998,8 @@ struct llm_build_starcoder2 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -9053,9 +9058,7 @@ struct llm_build_starcoder2 : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -9118,6 +9121,8 @@ struct llm_build_mamba : public llm_graph_context { auto * rs_inp = build_rs_inp(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { // norm cur = build_norm(inpL, @@ -9127,9 +9132,7 @@ struct llm_build_mamba : public llm_graph_context { cur = build_mamba_layer(rs_inp, gf, cur, ubatch, il); - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -9311,13 +9314,15 @@ struct llm_build_command_r : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); - for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { // norm cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM, il); cb(cur, "attn_norm", il); + ggml_tensor * ffn_inp = cur; // self-attention @@ -9385,9 +9390,7 @@ struct llm_build_command_r : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids); @@ -9458,6 +9461,8 @@ struct llm_build_cohere2_iswa : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified_iswa(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { const bool is_swa = hparams.is_swa(il); @@ -9520,9 +9525,7 @@ struct llm_build_cohere2_iswa : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids); @@ -9593,6 +9596,8 @@ struct llm_build_olmo : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -9651,9 +9656,7 @@ struct llm_build_olmo : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -9721,6 +9724,8 @@ struct llm_build_olmo2 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -9771,18 +9776,16 @@ struct llm_build_olmo2 : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + cur = build_norm(cur, model.layers[il].attn_post_norm, NULL, LLM_NORM_RMS, il); cb(cur, "attn_post_norm", il); - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); cb(ffn_inp, "ffn_inp", il); @@ -9850,6 +9853,8 @@ struct llm_build_olmoe : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -9904,9 +9909,7 @@ struct llm_build_olmoe : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -9976,6 +9979,8 @@ struct llm_build_openelm : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { const int64_t n_head = hparams.n_head(il); const int64_t n_head_kv = hparams.n_head_kv(il); @@ -10037,11 +10042,9 @@ struct llm_build_openelm : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { residual = ggml_get_rows(ctx0, residual, inp_out_ids); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); } ggml_tensor * ffn_inp = ggml_add(ctx0, residual, cur); @@ -10107,6 +10110,8 @@ struct llm_build_gptneox : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { cur = build_norm(inpL, model.layers[il].attn_norm, @@ -10151,9 +10156,7 @@ struct llm_build_gptneox : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -10255,6 +10258,8 @@ struct llm_build_arctic : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -10301,9 +10306,7 @@ struct llm_build_arctic : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -10395,6 +10398,8 @@ struct llm_build_deepseek : public llm_graph_context { const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -10456,14 +10461,11 @@ struct llm_build_deepseek : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } - ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); cb(ffn_inp, "ffn_inp", il); @@ -10571,6 +10573,8 @@ struct llm_build_deepseek2 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -10720,9 +10724,7 @@ struct llm_build_deepseek2 : public llm_graph_context { } } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -10818,6 +10820,8 @@ struct llm_build_bitnet : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -10900,9 +10904,7 @@ struct llm_build_bitnet : public llm_graph_context { cb(cur, "attn_o_out", il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -10977,6 +10979,8 @@ struct llm_build_t5_enc : public llm_graph_context { auto * inp_attn = build_attn_inp_no_cache(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -11010,9 +11014,7 @@ struct llm_build_t5_enc : public llm_graph_context { cb(cur, "kqv_out", il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -11083,6 +11085,8 @@ struct llm_build_t5_dec : public llm_graph_context { auto * inp_attn_self = build_attn_inp_kv_unified(); auto * inp_attn_cross = build_attn_inp_cross(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -11174,11 +11178,8 @@ struct llm_build_t5_dec : public llm_graph_context { //cb(cur, "kqv_out", il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); inpCA = ggml_get_rows(ctx0, inpCA, inp_out_ids); } @@ -11248,6 +11249,8 @@ struct llm_build_jais : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { cur = build_norm(inpL, model.layers[il].attn_norm, @@ -11280,9 +11283,7 @@ struct llm_build_jais : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/float(n_embd_head), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -11346,6 +11347,8 @@ struct llm_build_chatglm : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -11412,9 +11415,7 @@ struct llm_build_chatglm : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -11479,6 +11480,8 @@ struct llm_build_glm4 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -11545,9 +11548,7 @@ struct llm_build_glm4 : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -11630,6 +11631,8 @@ struct llm_build_nemotron : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -11689,9 +11692,7 @@ struct llm_build_nemotron : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -11759,6 +11760,8 @@ struct llm_build_exaone : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -11820,9 +11823,7 @@ struct llm_build_exaone : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -12098,6 +12099,8 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base { const auto n_seq_tokens = ubatch.n_seq_tokens; const auto n_seqs = ubatch.n_seqs; + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { const llama_layer * layer = &model.layers[il]; inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); @@ -12139,13 +12142,16 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base { ); ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il)); - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids); - ffn_norm = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens), inp_out_ids); - x_prev = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens), inp_out_ids); - cur = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, cur, n_embd, n_tokens), inp_out_ids); + ffn_inp = ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens); + ffn_norm = ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens); + x_prev = ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens); + cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens); + + if (il == n_layer - 1 && inp_out_ids) { + ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids); + ffn_norm = ggml_get_rows(ctx0, ffn_norm, inp_out_ids); + x_prev = ggml_get_rows(ctx0, x_prev, inp_out_ids); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); } cur = build_rwkv6_channel_mix(layer, ffn_norm, x_prev, LLM_ARCH_RWKV6); @@ -12193,6 +12199,8 @@ struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base { const auto n_seq_tokens = ubatch.n_seq_tokens; const auto n_seqs = ubatch.n_seqs; + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { const llama_layer * layer = &model.layers[il]; inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); @@ -12217,11 +12225,12 @@ struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base { ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); cb(ffn_inp, "ffn_inp", il); - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, cur, n_embd, n_tokens), inp_out_ids); - ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids); + cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens); + ffn_inp = ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens); + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids); } // feed-forward network @@ -12447,6 +12456,8 @@ struct llm_build_rwkv7 : public llm_build_rwkv7_base { const auto n_seq_tokens = ubatch.n_seq_tokens; const auto n_seqs = ubatch.n_seqs; + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { const llama_layer * layer = &model.layers[il]; inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); @@ -12488,12 +12499,14 @@ struct llm_build_rwkv7 : public llm_build_rwkv7_base { ); ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il)); - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids); - ffn_norm = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens), inp_out_ids); - x_prev = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens), inp_out_ids); + ffn_inp = ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens); + ffn_norm = ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens); + x_prev = ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens); + + if (il == n_layer - 1 && inp_out_ids) { + ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids); + ffn_norm = ggml_get_rows(ctx0, ffn_norm, inp_out_ids); + x_prev = ggml_get_rows(ctx0, x_prev, inp_out_ids); } cur = build_rwkv7_channel_mix(layer, ffn_norm, x_prev, LLM_ARCH_RWKV7); @@ -12538,6 +12551,8 @@ struct llm_build_arwkv7 : public llm_build_rwkv7_base { const auto n_seq_tokens = ubatch.n_seq_tokens; const auto n_seqs = ubatch.n_seqs; + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { const llama_layer * layer = &model.layers[il]; inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); @@ -12562,11 +12577,12 @@ struct llm_build_arwkv7 : public llm_build_rwkv7_base { ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); cb(ffn_inp, "ffn_inp", il); - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, cur, n_embd, n_tokens), inp_out_ids); - ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids); + cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens); + ffn_inp = ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens); + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids); } // feed-forward network @@ -12635,6 +12651,9 @@ struct llm_build_granite : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -12697,9 +12716,7 @@ struct llm_build_granite : public llm_graph_context { cb(cur, "attn_out", il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -12818,6 +12835,8 @@ struct llm_build_chameleon : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -12894,21 +12913,19 @@ struct llm_build_chameleon : public llm_graph_context { cur = build_attn(inp_attn, gf, model.layers[il].wo, nullptr, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); - - if (hparams.swin_norm) { - cur = build_norm(cur, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, il); - } } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } + if (hparams.swin_norm) { + cur = build_norm(cur, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + } + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); cb(ffn_inp, "ffn_inp", il); @@ -13149,6 +13166,8 @@ struct llm_build_plm : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -13252,9 +13271,7 @@ struct llm_build_plm : public llm_graph_context { q_states, k_states, v_states, nullptr, nullptr, kq_scale, il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -13314,6 +13331,8 @@ struct llm_build_bailingmoe : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -13375,9 +13394,7 @@ struct llm_build_bailingmoe : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_rot)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -13463,6 +13480,8 @@ struct llm_build_dots1 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -13515,9 +13534,7 @@ struct llm_build_dots1 : public llm_graph_context { Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -13615,6 +13632,8 @@ struct llm_build_arcee : public llm_graph_context { const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -13677,9 +13696,7 @@ struct llm_build_arcee : public llm_graph_context { cb(cur, "attn_out", il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } From 2a5a3e374c96e28849f37ed5c53354b9d61d68c3 Mon Sep 17 00:00:00 2001 From: Charles Xu Date: Fri, 20 Jun 2025 09:51:01 +0200 Subject: [PATCH 109/192] ggml: Update KleidiAI to v1.9.0 (#14277) --- ggml/src/ggml-cpu/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index df00340570baa..52cae778cac18 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -465,9 +465,9 @@ function(ggml_add_cpu_backend_variant_impl tag_name) # Fetch KleidiAI sources: include(FetchContent) - set(KLEIDIAI_COMMIT_TAG "v1.6.0") + set(KLEIDIAI_COMMIT_TAG "v1.9.0") set(KLEIDIAI_DOWNLOAD_URL "https://github.com/ARM-software/kleidiai/archive/refs/tags/${KLEIDIAI_COMMIT_TAG}.tar.gz") - set(KLEIDIAI_ARCHIVE_MD5 "75b4ad68f25ab673dcc01065e5a0b05f") + set(KLEIDIAI_ARCHIVE_MD5 "2a8e1bb55d201557553545536489a017") if (POLICY CMP0135) cmake_policy(SET CMP0135 NEW) From 65bdc3835374d37c92d8cca2ac993399aa96684b Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 20 Jun 2025 11:19:15 +0300 Subject: [PATCH 110/192] ggml : fix repack work size for mul_mat_id (#14292) ggml-ci --- ggml/src/ggml-cpu/repack.cpp | 38 ++++++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/ggml/src/ggml-cpu/repack.cpp b/ggml/src/ggml-cpu/repack.cpp index 5c6715d5c01ea..2907192904a72 100644 --- a/ggml/src/ggml-cpu/repack.cpp +++ b/ggml/src/ggml-cpu/repack.cpp @@ -1163,13 +1163,24 @@ template op) { case GGML_OP_MUL_MAT: - size = ggml_row_size(PARAM_TYPE, ggml_nelements(op->src[1])); - return true; + { + size = ggml_row_size(PARAM_TYPE, ggml_nelements(op->src[1])); + return true; + } case GGML_OP_MUL_MAT_ID: - size = ggml_row_size(PARAM_TYPE, ggml_nelements(op->src[1])); - size = GGML_PAD(size, sizeof(int64_t)); // + padding for next bloc. - size += sizeof(int64_t) * (1+op->src[0]->ne[2]) * op->src[1]->ne[2]; - return true; + { + size = ggml_row_size(PARAM_TYPE, ggml_nelements(op->src[1])); + size = GGML_PAD(size, sizeof(int64_t)); // + padding for next bloc. + + const int64_t ne02 = op->src[0]->ne[2]; // n_as, n_expert + const int64_t ne12 = op->src[1]->ne[2]; // n_tokens + + const size_t sizeof_mmid_row_mapping = sizeof(int64_t); + + size += sizeof_mmid_row_mapping*ne02*(ne12 + 1); + + return true; + } default: // GGML_ABORT("fatal error"); break; @@ -1305,14 +1316,17 @@ template wsize >= (GGML_PAD(nbw3, sizeof(int64_t)) + n_as * sizeof(int64_t) + - n_as * ne12 * sizeof(mmid_row_mapping))); + GGML_ASSERT(params->wsize >= + (GGML_PAD(nbw3, sizeof(int64_t)) + + n_as*(ne12 + 1)*sizeof(mmid_row_mapping)) + ); - auto * wdata = (char *) params->wdata; - auto * wdata_src1_end = (char *) wdata + GGML_PAD(nbw3, sizeof(int64_t)); - auto * matrix_row_counts = (int64_t *) (wdata_src1_end); // [n_as] + auto * wdata = (char *)params->wdata; + auto * wdata_src1_end = (char *)wdata + GGML_PAD(nbw3, sizeof(int64_t)); - struct mmid_row_mapping * matrix_rows = (struct mmid_row_mapping *) (matrix_row_counts + n_as); // [n_as][ne12] + // total of [n_as][ne12 + 1] elemets of type mmid_row_mapping (2*int32_t = int64_t) + auto * matrix_row_counts = (int64_t *) (wdata_src1_end); // [n_as] + struct mmid_row_mapping * matrix_rows = (struct mmid_row_mapping *) (matrix_row_counts + n_as); // [n_as][ne12] // src1: float32 => param type for (int64_t i12 = 0; i12 < ne12; ++i12) { From 85b281590f9c134162b1c472729142788a484f33 Mon Sep 17 00:00:00 2001 From: Diego Devesa Date: Fri, 20 Jun 2025 04:57:36 -0700 Subject: [PATCH 111/192] cuda : synchronize graph capture and cublas handle destruction (#14288) Workarounds an issue that may cause CUDA graph capture to fail when a cuBLAS handle is destroyed in a different thread --- ggml/src/ggml-cuda/common.cuh | 18 ++------------ ggml/src/ggml-cuda/ggml-cuda.cu | 44 ++++++++++++++++++++++++++++++--- 2 files changed, 43 insertions(+), 19 deletions(-) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index c14a12f54a8d6..364efcaeccc07 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -19,10 +19,10 @@ #endif #include "ggml-common.h" -#include #include #include #include +#include #include #include @@ -767,21 +767,7 @@ struct ggml_backend_cuda_context { name(GGML_CUDA_NAME + std::to_string(device)) { } - ~ggml_backend_cuda_context() { - if (copy_event != nullptr) { - CUDA_CHECK(cudaEventDestroy(copy_event)); - } - for (int i = 0; i < GGML_CUDA_MAX_DEVICES; ++i) { - for (int j = 0; j < GGML_CUDA_MAX_STREAMS; ++j) { - if (streams[i][j] != nullptr) { - CUDA_CHECK(cudaStreamDestroy(streams[i][j])); - } - } - if (cublas_handles[i] != nullptr) { - CUBLAS_CHECK(cublasDestroy(cublas_handles[i])); - } - } - } + ~ggml_backend_cuda_context(); cudaStream_t stream(int device, int stream) { if (streams[device][stream] == nullptr) { diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 80fe050734dfa..530f541f97d62 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -48,6 +48,7 @@ #include #include #include +#include #include #include #include @@ -55,9 +56,8 @@ #include #include #include -#include -#include #include +#include #include #include #include @@ -515,6 +515,33 @@ std::unique_ptr ggml_backend_cuda_context::new_pool_for_device(i return std::unique_ptr(new ggml_cuda_pool_leg(device)); } +// destroying a cuBLAS handle while a graph is being captured in a different thread can result in a CUDA error +// this lock is used to ensure that no cuBLAS handle is destroyed while a graph is being captured + +static std::mutex ggml_cuda_lock; +static std::condition_variable ggml_cuda_lock_cv; +static std::atomic ggml_cuda_lock_counter; + +ggml_backend_cuda_context::~ggml_backend_cuda_context() { + std::unique_lock lock(ggml_cuda_lock); + ggml_cuda_lock_cv.wait(lock, []{ return ggml_cuda_lock_counter.load(std::memory_order_relaxed) == 0; }); + + if (copy_event != nullptr) { + CUDA_CHECK(cudaEventDestroy(copy_event)); + } + for (int i = 0; i < GGML_CUDA_MAX_DEVICES; ++i) { + for (int j = 0; j < GGML_CUDA_MAX_STREAMS; ++j) { + if (streams[i][j] != nullptr) { + CUDA_CHECK(cudaStreamDestroy(streams[i][j])); + } + } + if (cublas_handles[i] != nullptr) { + CUBLAS_CHECK(cublasDestroy(cublas_handles[i])); + } + } +} + + // cuda buffer struct ggml_backend_cuda_buffer_context { @@ -2689,6 +2716,11 @@ static void evaluate_and_capture_cuda_graph(ggml_backend_cuda_context * cuda_ctx CUDA_CHECK(cudaStreamEndCapture(cuda_ctx->stream(), &cuda_ctx->cuda_graph->graph)); graph_evaluated_or_captured = true; // CUDA graph has been captured + + std::lock_guard lock(ggml_cuda_lock); + if (ggml_cuda_lock_counter.fetch_sub(1, std::memory_order_relaxed) == 1) { + ggml_cuda_lock_cv.notify_all(); + } } else { graph_evaluated_or_captured = true; // ggml graph has been directly evaluated } @@ -2764,7 +2796,13 @@ static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t backend, } } - if (use_cuda_graph && cuda_graph_update_required) { // Start CUDA graph capture + if (use_cuda_graph && cuda_graph_update_required) { + // Start CUDA graph capture + { + std::lock_guard lock(ggml_cuda_lock); + ggml_cuda_lock_counter.fetch_add(1, std::memory_order_relaxed); + } + CUDA_CHECK(cudaStreamBeginCapture(cuda_ctx->stream(), cudaStreamCaptureModeRelaxed)); } From cc7ec0cda5990642ab92399101608b3662573b36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Fri, 20 Jun 2025 14:04:09 +0200 Subject: [PATCH 112/192] llama : improve sep token handling (#14272) --- ci/run.sh | 2 +- common/arg.cpp | 7 +++ common/common.h | 1 + convert_hf_to_gguf.py | 14 ------ examples/embedding/embedding.cpp | 34 +++++++++++-- gguf-py/gguf/constants.py | 1 + gguf-py/gguf/gguf_writer.py | 3 ++ gguf-py/gguf/vocab.py | 83 ++++++++++++++++++++++++++++++-- include/llama.h | 1 + src/llama-arch.cpp | 1 + src/llama-arch.h | 1 + src/llama-model-saver.cpp | 1 + src/llama-vocab.cpp | 24 +++++++-- src/llama-vocab.h | 1 + tools/server/utils.hpp | 16 ++++-- 15 files changed, 161 insertions(+), 29 deletions(-) diff --git a/ci/run.sh b/ci/run.sh index 94005570511b6..e1b777c304eaf 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -779,7 +779,7 @@ function gg_run_rerank_tiny { model_f16="${path_models}/ggml-model-f16.gguf" # for this model, the SEP token is "
" - (time ./bin/llama-embedding --model ${model_f16} -p "what is panda?
hi\nwhat is panda?
it's a bear\nwhat is panda?
The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China." -ngl 99 -c 0 --pooling rank --embd-normalize -1 --verbose-prompt) 2>&1 | tee -a $OUT/${ci}-rk-f16.log + (time ./bin/llama-embedding --model ${model_f16} -p "what is panda?\thi\nwhat is panda?\tit's a bear\nwhat is panda?\tThe giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China." -ngl 99 -c 0 --pooling rank --embd-normalize -1 --verbose-prompt) 2>&1 | tee -a $OUT/${ci}-rk-f16.log # sample output # rerank score 0: 0.029 diff --git a/common/arg.cpp b/common/arg.cpp index 3dfaa71eff188..c4ad85c47b61b 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -2706,6 +2706,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.embd_sep = value; } ).set_examples({LLAMA_EXAMPLE_EMBEDDING})); + add_opt(common_arg( + {"--cls-separator"}, "STRING", + "separator of classification sequences (default \\t) for example \"<#seq#>\"", + [](common_params & params, const std::string & value) { + params.cls_sep = value; + } + ).set_examples({LLAMA_EXAMPLE_EMBEDDING})); add_opt(common_arg( {"--host"}, "HOST", string_format("ip address to listen, or bind to an UNIX socket if the address ends with .sock (default: %s)", params.hostname.c_str()), diff --git a/common/common.h b/common/common.h index 5710c4e9735fd..e08a59eae7543 100644 --- a/common/common.h +++ b/common/common.h @@ -358,6 +358,7 @@ struct common_params { int32_t embd_normalize = 2; // normalisation for embeddings (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm) std::string embd_out = ""; // empty = default, "array" = [[],[]...], "json" = openai style, "json+" = same "json" + cosine similarity matrix std::string embd_sep = "\n"; // separator of embeddings + std::string cls_sep = "\t"; // separator of classification sequences // server params int32_t port = 8080; // server listens on this network port diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 2e08db3457b60..2fe76589eb062 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -2145,7 +2145,6 @@ def __init__(self, *args, **kwargs): def set_vocab(self): self._set_vocab_gpt2() - self.gguf_writer.add_add_bos_token(True) def set_gguf_parameters(self): super().set_gguf_parameters() @@ -3918,9 +3917,6 @@ def _xlmroberta_set_vocab(self) -> None: special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) special_vocab.add_to_gguf(self.gguf_writer) - self.gguf_writer.add_add_bos_token(True) - self.gguf_writer.add_add_eos_token(True) - @ModelBase.register("DistilBertModel", "DistilBertForMaskedLM", "DistilBertForSequenceClassification") class DistilBertModel(BertModel): @@ -3962,8 +3958,6 @@ def set_vocab(self): bpe_tok_path = self.dir_model / "tokenizer.json" if bpe_tok_path.exists(): self._set_vocab_gpt2() - self.gguf_writer.add_add_bos_token(True) - self.gguf_writer.add_add_eos_token(True) # we need this to validate the size of the token_type embeddings # though currently we are passing all zeros to the token_type embeddings @@ -4848,8 +4842,6 @@ def set_vocab(self): self.gguf_writer.add_token_type_count(2) else: raise NotImplementedError(f'Tokenizer {tokenizer_class} is not supported for JinaBertModel') - self.gguf_writer.add_add_bos_token(True) - self.gguf_writer.add_add_eos_token(True) @ModelBase.register("OpenELMForCausalLM") @@ -5451,9 +5443,6 @@ def set_vocab(self): special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) special_vocab.add_to_gguf(self.gguf_writer) - self.gguf_writer.add_add_bos_token(False) - self.gguf_writer.add_add_eos_token(True) - def set_gguf_parameters(self): if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None: logger.warning("Couldn't find context length in config.json, assuming default value of 512") @@ -5591,9 +5580,6 @@ def set_vocab(self): special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) special_vocab.add_to_gguf(self.gguf_writer) - self.gguf_writer.add_add_bos_token(False) - self.gguf_writer.add_add_eos_token(True) - def set_gguf_parameters(self): if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None: logger.warning("Couldn't find context length in config.json, assuming default value of 512") diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp index 681929d27d617..0ec2999a0c8e9 100644 --- a/examples/embedding/embedding.cpp +++ b/examples/embedding/embedding.cpp @@ -133,10 +133,36 @@ int main(int argc, char ** argv) { // max batch size const uint64_t n_batch = params.n_batch; + // get added sep and eos token, if any + const std::string added_sep_token = llama_vocab_get_add_sep(vocab) ? llama_vocab_get_text(vocab, llama_vocab_sep(vocab)) : ""; + const std::string added_eos_token = llama_vocab_get_add_eos(vocab) ? llama_vocab_get_text(vocab, llama_vocab_eos(vocab)) : ""; + // tokenize the prompts and trim std::vector> inputs; for (const auto & prompt : prompts) { - auto inp = common_tokenize(ctx, prompt, true, true); + std::vector inp; + + // split classification pairs and insert expected separator tokens + if (pooling_type == LLAMA_POOLING_TYPE_RANK && prompt.find(params.cls_sep) != std::string::npos) { + std::vector pairs = split_lines(prompt, params.cls_sep); + std::string final_prompt; + + for (size_t i = 0; i < pairs.size(); i++) { + final_prompt += pairs[i]; + if (i != pairs.size() - 1) { + if (!added_eos_token.empty()) { + final_prompt += added_eos_token; + } + if (!added_sep_token.empty()) { + final_prompt += added_sep_token; + } + } + } + + inp = common_tokenize(ctx, final_prompt, true, true); + } else { + inp = common_tokenize(ctx, prompt, true, true); + } if (inp.size() > n_batch) { LOG_ERR("%s: number of tokens in input line (%lld) exceeds batch size (%lld), increase batch size and re-run\n", __func__, (long long int) inp.size(), (long long int) n_batch); @@ -145,11 +171,11 @@ int main(int argc, char ** argv) { inputs.push_back(inp); } - // check if the last token is SEP + // check if the last token is SEP/EOS // it should be automatically added by the tokenizer when 'tokenizer.ggml.add_eos_token' is set to 'true' for (auto & inp : inputs) { - if (inp.empty() || inp.back() != llama_vocab_sep(vocab)) { - LOG_WRN("%s: last token in the prompt is not SEP\n", __func__); + if (inp.empty() || (inp.back() != llama_vocab_sep(vocab) && inp.back() != llama_vocab_eos(vocab))) { + LOG_WRN("%s: last token in the prompt is not SEP or EOS\n", __func__); LOG_WRN("%s: 'tokenizer.ggml.add_eos_token' should be set to 'true' in the GGUF header\n", __func__); } } diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 834a1d5e1a97e..0429b0aaf135d 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -198,6 +198,7 @@ class Tokenizer: MASK_ID = "tokenizer.ggml.mask_token_id" ADD_BOS = "tokenizer.ggml.add_bos_token" ADD_EOS = "tokenizer.ggml.add_eos_token" + ADD_SEP = "tokenizer.ggml.add_sep_token" ADD_PREFIX = "tokenizer.ggml.add_space_prefix" REMOVE_EXTRA_WS = "tokenizer.ggml.remove_extra_whitespaces" PRECOMPILED_CHARSMAP = "tokenizer.ggml.precompiled_charsmap" diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index 54ca0c33fd336..b9b63d052624d 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -891,6 +891,9 @@ def add_add_bos_token(self, value: bool) -> None: def add_add_eos_token(self, value: bool) -> None: self.add_bool(Keys.Tokenizer.ADD_EOS, value) + def add_add_sep_token(self, value: bool) -> None: + self.add_bool(Keys.Tokenizer.ADD_SEP, value) + def add_add_space_prefix(self, value: bool) -> None: self.add_bool(Keys.Tokenizer.ADD_PREFIX, value) diff --git a/gguf-py/gguf/vocab.py b/gguf-py/gguf/vocab.py index 44d066ee75a7e..6c4d3a422b99d 100644 --- a/gguf-py/gguf/vocab.py +++ b/gguf-py/gguf/vocab.py @@ -119,6 +119,7 @@ def _set_special_token(self, typ: str, tid: Any) -> None: logger.warning(f'Special token type {typ}, id {tid} out of range, must be under {self.n_vocab} - skipping') def _try_load_from_tokenizer_json(self, path: Path) -> bool: + tokenizer = None tokenizer_file = path / 'tokenizer.json' if tokenizer_file.is_file(): with open(tokenizer_file, encoding = 'utf-8') as f: @@ -152,11 +153,87 @@ def _try_load_from_tokenizer_json(self, path: Path) -> bool: added_tokens = tokenizer.get('added_tokens', {}) else: added_tokens = {} + tokenizer_config = None tokenizer_config_file = path / 'tokenizer_config.json' - if not tokenizer_config_file.is_file(): + if tokenizer_config_file.is_file(): + with open(tokenizer_config_file, encoding = 'utf-8') as f: + tokenizer_config = json.load(f) + if tokenizer: + special_bos = (tokenizer_config or {}).get('bos_token') + special_cls = (tokenizer_config or {}).get('cls_token') + special_eos = (tokenizer_config or {}).get('eos_token') + special_sep = (tokenizer_config or {}).get('sep_token') + if not special_bos and special_cls and tokenizer_config: + tokenizer_config['bos_token'] = special_bos = special_cls + if not special_eos and special_sep and tokenizer_config: + tokenizer_config['eos_token'] = special_eos = special_sep + post_processor = tokenizer.get('post_processor', {}) + for processor in post_processor.get('processors', [post_processor]): + if processor.get('type') == 'RobertaProcessing': + self.add_special_token['bos'] = True + self.add_special_token['eos'] = True + self.add_special_token['sep'] = True + if not special_cls and tokenizer_config: + special_cls = processor.get('cls', [special_bos])[0] + tokenizer_config['cls_token'] = special_cls + if not special_sep and tokenizer_config: + special_sep = processor.get('sep', [special_eos])[0] + tokenizer_config['sep_token'] = special_sep + continue + # Crude parsing of TemplateProcessing to determine if BOS/SEP/EOS should be added + # Only works with simple templates, **will** get it wrong on unusual sequences + if processor.get('type') == 'TemplateProcessing': + tmpl_single = processor.get('single', []) + tmpl_pair = processor.get('pair', []) + special_first = None + special_last = None + if len(tmpl_single) > 1: + if special_first := tmpl_single[0].get('SpecialToken', {}).get('id'): + if not tokenizer_config: + special_bos = special_first + self.add_special_token['bos'] = True if special_first in (special_bos, special_cls) else False + if special_first not in (special_bos, special_cls): + logger.warning(f'Unknown leading special token {special_first!r} in TemplateProcessing') + if special_last := tmpl_single[-1].get('SpecialToken', {}).get('id'): + if not tokenizer_config: + special_eos = special_last + self.add_special_token['eos'] = True if special_last == special_eos else False + if special_last != special_eos: + logger.warning(f'Unknown trailing special token {special_last!r} in TemplateProcessing') + if tmpl_pair: + seq_start = 1 if tmpl_pair[0].get('SpecialToken', {}).get('id') == special_first else 0 + seq_stop = -1 if tmpl_pair[-1].get('SpecialToken', {}).get('id') == special_last else None + if seq_start == 0 or seq_stop is None: + logger.warning('TemplateProcessing leading/trailing special tokens do not match TemplateProcessing') + if tmpl_pair := tmpl_pair[slice(seq_start, seq_stop)]: + tmpl_a = tmpl_pair[0].get('Sequence', {}).get('id') + tmpl_b = tmpl_pair[-1].get('Sequence', {}).get('id') + if tmpl_a != 'A' or tmpl_b != 'B': + logger.warning(f'Unknown sequence {tmpl_a}...{tmpl_b} in TemplateProcessing') + # A [sep] [eos] B + if tmpl_a == 'A' and tmpl_b == 'B' and (tmpl_pair := tmpl_pair[1:-1]): + add_sep = False + if special_entry := tmpl_pair[0].get('SpecialToken', {}).get('id'): + if special_entry in (special_sep, special_eos) and not special_last: + add_sep = True + if special_entry not in (special_sep, special_eos): + logger.warning(f'Unknown separator token {special_entry!r} in TemplateProcessing') + else: + logger.warning(f'Unknown middle sequence {tmpl_pair[0]!r} in TemplateProcessing') + if len(tmpl_pair) == 2: + if special_entry := tmpl_pair[1].get('SpecialToken', {}).get('id'): + if special_entry in (special_sep, special_eos): + add_sep = True + if special_entry not in (special_sep, special_eos): + logger.warning(f'Unknown second separator token {special_entry!r} in TemplateProcessing') + else: + logger.warning(f'Unknown second middle sequence {tmpl_pair[1]!r} in TemplateProcessing') + self.add_special_token['sep'] = add_sep + if add_sep and not special_sep and tokenizer_config: + tokenizer_config['sep_token'] = special_eos + continue + if not tokenizer_config: return True - with open(tokenizer_config_file, encoding = 'utf-8') as f: - tokenizer_config = json.load(f) chat_template_alt = None chat_template_file = path / 'chat_template.json' if chat_template_file.is_file(): diff --git a/include/llama.h b/include/llama.h index 635508b10f2ff..3475d596502c6 100644 --- a/include/llama.h +++ b/include/llama.h @@ -1044,6 +1044,7 @@ extern "C" { LLAMA_API bool llama_vocab_get_add_bos(const struct llama_vocab * vocab); LLAMA_API bool llama_vocab_get_add_eos(const struct llama_vocab * vocab); + LLAMA_API bool llama_vocab_get_add_sep(const struct llama_vocab * vocab); LLAMA_API llama_token llama_vocab_fim_pre(const struct llama_vocab * vocab); LLAMA_API llama_token llama_vocab_fim_suf(const struct llama_vocab * vocab); diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index 0bc60565df12c..8dadef204f9d7 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -198,6 +198,7 @@ static const std::map LLM_KV_NAMES = { { LLM_KV_TOKENIZER_MASK_ID, "tokenizer.ggml.mask_token_id" }, { LLM_KV_TOKENIZER_ADD_BOS, "tokenizer.ggml.add_bos_token" }, { LLM_KV_TOKENIZER_ADD_EOS, "tokenizer.ggml.add_eos_token" }, + { LLM_KV_TOKENIZER_ADD_SEP, "tokenizer.ggml.add_sep_token" }, { LLM_KV_TOKENIZER_ADD_PREFIX, "tokenizer.ggml.add_space_prefix" }, { LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, "tokenizer.ggml.remove_extra_whitespaces" }, { LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, "tokenizer.ggml.precompiled_charsmap" }, diff --git a/src/llama-arch.h b/src/llama-arch.h index 51b242c66b824..5b0230c150678 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -194,6 +194,7 @@ enum llm_kv { LLM_KV_TOKENIZER_MASK_ID, LLM_KV_TOKENIZER_ADD_BOS, LLM_KV_TOKENIZER_ADD_EOS, + LLM_KV_TOKENIZER_ADD_SEP, LLM_KV_TOKENIZER_ADD_PREFIX, LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, diff --git a/src/llama-model-saver.cpp b/src/llama-model-saver.cpp index a70b9892347cb..563823dc35d8e 100644 --- a/src/llama-model-saver.cpp +++ b/src/llama-model-saver.cpp @@ -228,6 +228,7 @@ void llama_model_saver::add_kv_from_model() { // add_kv(LLM_KV_TOKENIZER_MASK_ID, ???); add_kv(LLM_KV_TOKENIZER_ADD_BOS, vocab.get_add_bos()); add_kv(LLM_KV_TOKENIZER_ADD_EOS, vocab.get_add_eos()); + add_kv(LLM_KV_TOKENIZER_ADD_SEP, vocab.get_add_sep()); add_kv(LLM_KV_TOKENIZER_ADD_PREFIX, vocab.get_add_space_prefix()); add_kv(LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, vocab.get_remove_extra_whitespaces()); add_kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, vocab.get_precompiled_charsmap()); diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index d90f1d6b1ea63..4ab120d9ba818 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -1269,6 +1269,7 @@ struct llama_vocab::impl { bool add_space_prefix = false; bool add_bos = false; bool add_eos = false; + bool add_sep = false; bool ignore_merges = false; bool clean_spaces = false; // clean_up_tokenization_spaces bool remove_extra_whitespaces = false; @@ -1421,6 +1422,8 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { special_sep_id = 102; special_pad_id = 0; special_mask_id = 103; + + add_sep = true; } else if (tokenizer_model == "gpt2") { type = LLAMA_VOCAB_TYPE_BPE; @@ -1550,12 +1553,15 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { tokenizer_pre == "jina-es" || tokenizer_pre == "jina-de" || tokenizer_pre == "gigachat" || - tokenizer_pre == "jina-v1-en" || tokenizer_pre == "jina-v2-es" || - tokenizer_pre == "jina-v2-de" || + tokenizer_pre == "jina-v2-de") { + pre_type = LLAMA_VOCAB_PRE_TYPE_GPT2; + } else if ( + tokenizer_pre == "jina-v1-en" || tokenizer_pre == "jina-v2-code" || tokenizer_pre == "roberta-bpe") { pre_type = LLAMA_VOCAB_PRE_TYPE_GPT2; + add_sep = true; } else if ( tokenizer_pre == "refact") { pre_type = LLAMA_VOCAB_PRE_TYPE_REFACT; @@ -1665,6 +1671,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { clean_spaces = true; add_bos = true; add_eos = false; + add_sep = true; } else if (type == LLAMA_VOCAB_TYPE_UGM) { pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT; add_bos = false; @@ -1801,7 +1808,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { } } - // Handle add_bos and add_eos + // Handle add_bos, add_eos and add_sep { bool temp = true; @@ -1811,6 +1818,9 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) { add_eos = temp; } + if (ml.get_key(LLM_KV_TOKENIZER_ADD_SEP, temp, false)) { + add_sep = temp; + } } // auto-detect special tokens by text @@ -3000,6 +3010,10 @@ bool llama_vocab::get_add_eos() const { return pimpl->add_eos; } +bool llama_vocab::get_add_sep() const { + return pimpl->add_sep; +} + bool llama_vocab::get_ignore_merges() const { return pimpl->ignore_merges; } @@ -3191,6 +3205,10 @@ bool llama_vocab_get_add_eos(const struct llama_vocab * vocab) { return vocab->get_add_eos(); } +bool llama_vocab_get_add_sep(const struct llama_vocab * vocab) { + return vocab->get_add_sep(); +} + llama_token llama_vocab_fim_pre(const struct llama_vocab * vocab) { return vocab->token_fim_pre(); } diff --git a/src/llama-vocab.h b/src/llama-vocab.h index daa6cf3082f90..40e4d1c05b18e 100644 --- a/src/llama-vocab.h +++ b/src/llama-vocab.h @@ -74,6 +74,7 @@ struct llama_vocab { bool get_add_space_prefix () const; bool get_add_bos () const; bool get_add_eos () const; + bool get_add_sep () const; bool get_ignore_merges () const; bool get_clean_spaces () const; bool get_remove_extra_whitespaces () const; diff --git a/tools/server/utils.hpp b/tools/server/utils.hpp index f3e0392a4e9d1..f8fab2c86664e 100644 --- a/tools/server/utils.hpp +++ b/tools/server/utils.hpp @@ -271,12 +271,20 @@ static llama_tokens format_rerank(const struct llama_vocab * vocab, const llama_ } result.reserve(doc.size() + query.size() + 4); - result.push_back(llama_vocab_bos(vocab)); + if (llama_vocab_get_add_bos(vocab)) { + result.push_back(llama_vocab_bos(vocab)); + } result.insert(result.end(), query.begin(), query.end()); - result.push_back(eos_token); - result.push_back(llama_vocab_sep(vocab)); + if (llama_vocab_get_add_eos(vocab)) { + result.push_back(eos_token); + } + if (llama_vocab_get_add_sep(vocab)) { + result.push_back(llama_vocab_sep(vocab)); + } result.insert(result.end(), doc.begin(), doc.end()); - result.push_back(eos_token); + if (llama_vocab_get_add_eos(vocab)) { + result.push_back(eos_token); + } return result; } From c3c4e29198ecb76c9cb00f2b0779ea2fb487cf98 Mon Sep 17 00:00:00 2001 From: Christian Kastner Date: Fri, 20 Jun 2025 12:17:32 +0000 Subject: [PATCH 113/192] Implement GGML_CPU_ALL_VARIANTS for PowerPC (#14286) * Add PowerPC feature detection and scoring * ggml-cpu: Implement GGML_CPU_ALL_VARIANTS for PowerPC * ggml-cpu: Delay some initializations until function is called When using GGML_BACKEND_DL=ON, these initializations might use instructions that are not supported by the current CPU. --------- Co-authored-by: Diego Devesa --- ggml/src/CMakeLists.txt | 17 ++++ ggml/src/ggml-cpu/CMakeLists.txt | 21 +++++ ggml/src/ggml-cpu/arch/powerpc/cpu-feats.cpp | 82 ++++++++++++++++++++ ggml/src/ggml-cpu/repack.cpp | 29 +++---- 4 files changed, 135 insertions(+), 14 deletions(-) create mode 100644 ggml/src/ggml-cpu/arch/powerpc/cpu-feats.cpp diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index 0c453741b5d84..9cb2c228dcfb2 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -286,6 +286,10 @@ function(ggml_add_cpu_backend_variant tag_name) foreach (feat ${ARGN}) set(GGML_INTERNAL_${feat} ON) endforeach() + elseif (GGML_SYSTEM_ARCH STREQUAL "PowerPC") + foreach (feat ${ARGN}) + set(GGML_INTERNAL_${feat} ON) + endforeach() endif() ggml_add_cpu_backend_variant_impl(${tag_name}) @@ -337,6 +341,19 @@ if (GGML_CPU_ALL_VARIANTS) else() message(FATAL_ERROR "Unsupported ARM target OS: ${CMAKE_SYSTEM_NAME}") endif() + elseif (GGML_SYSTEM_ARCH STREQUAL "PowerPC") + if (CMAKE_SYSTEM_NAME MATCHES "Linux") + ggml_add_cpu_backend_variant(power0) + ggml_add_cpu_backend_variant(power7_1 POWER7) + ggml_add_cpu_backend_variant(power7_2 POWER7 VSX) + ggml_add_cpu_backend_variant(power8_1 POWER8) + ggml_add_cpu_backend_variant(power8_2 POWER8 VSX) + ggml_add_cpu_backend_variant(power9 POWER9 VSX) + ggml_add_cpu_backend_variant(power10 POWER10 VSX) + ggml_add_cpu_backend_variant(power11 POWER11 VSX) + else() + message(FATAL_ERROR "Unsupported PowerPC target OS: ${CMAKE_SYSTEM_NAME}") + endif() else() message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS not yet supported with ${GGML_SYSTEM_ARCH} on ${CMAKE_SYSTEM_NAME}") endif() diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index 52cae778cac18..71b1d67b8d0a6 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -388,6 +388,27 @@ function(ggml_add_cpu_backend_variant_impl tag_name) else() list(APPEND ARCH_FLAGS -mcpu=native -mtune=native -mpowerpc64) endif() + elseif(GGML_CPU_ALL_VARIANTS) + # Begin with the lowest baseline + set(ARCH_DEFINITIONS "") + + # When a feature is selected, bump the MCPU to the first + # version that supported it + foreach(PVER RANGE 7 11) + if(DEFINED GGML_INTERNAL_POWER${PVER}) + set(POWERPC_MCPU "power${PVER}") + list(APPEND ARCH_DEFINITIONS GGML_USE_POWER${PVER}) + endif() + endforeach() + if (GGML_INTERNAL_VSX) + list(APPEND ARCH_DEFINITIONS GGML_USE_VSX) + list(APPEND ARCH_FLAGS -mvsx) + endif() + + if (DEFINED POWERPC_MCPU) + list(APPEND ARCH_FLAGS -mcpu=${POWERPC_MCPU}) + endif() + ggml_add_cpu_backend_features(${GGML_CPU_NAME} powerpc ${ARCH_DEFINITIONS}) else() if (GGML_CPU_POWERPC_CPUTYPE) list(APPEND ARCH_FLAGS -mcpu=${GGML_CPU_POWERPC_CPUTYPE}) diff --git a/ggml/src/ggml-cpu/arch/powerpc/cpu-feats.cpp b/ggml/src/ggml-cpu/arch/powerpc/cpu-feats.cpp new file mode 100644 index 0000000000000..fedd6430278c2 --- /dev/null +++ b/ggml/src/ggml-cpu/arch/powerpc/cpu-feats.cpp @@ -0,0 +1,82 @@ +# include "ggml-backend-impl.h" + +#if defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) + +#if defined(__linux__) +#include +#endif + +#include + +struct powerpc_features { + std::string platform = ""; + int power_version = -1; + + bool has_vsx = false; + + powerpc_features() { +#if defined(__linux__) + unsigned long auxval = getauxval(AT_PLATFORM); + if (auxval) { + platform = std::string(reinterpret_cast(auxval)); + // TBD: Do systems exist that return this in uppercase? + if (platform.substr(0, 5) == "power") { + // Extractt a numeric suffix, if one exists + int vpos = -1; + for (int i = platform.length() - 1; i >= 0; i--) { + if (std::isdigit(platform[i])) { + vpos = i; + } else { + break; + } + } + if (vpos > -1) { + power_version = std::stoi(platform.substr(vpos)); + } + } + } +#endif + if (power_version >= 9) { + has_vsx = true; + } + } +}; + +static int ggml_backend_cpu_powerpc_score() { + int score = 1; + powerpc_features pf; + +// Platform scores +#if defined(GGML_USE_POWER7) + if (pf.power_version < 7) { return 0; } + score += 1<<1; +#endif +#if defined(GGML_USE_POWER8) + if (pf.power_version < 8) { return 0; } + score += 1<<2; +#endif +#if defined(GGML_USE_POWER9) + if (pf.power_version < 9) { return 0; } + score += 1<<3; +#endif +#if defined(GGML_USE_POWER10) + if (pf.power_version < 10) { return 0; } + score += 1<<4; +#endif +#if defined(GGML_USE_POWER11) + if (pf.power_version < 11) { return 0; } + score += 1<<5; +#endif + +// Feature scores +#if defined(GGML_USE_VSX) + if (!pf.has_vsx) { return 0; } + score += 1<<6; +#endif + + return score; +} + +GGML_BACKEND_DL_SCORE_IMPL(ggml_backend_cpu_powerpc_score) + +#endif // defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) diff --git a/ggml/src/ggml-cpu/repack.cpp b/ggml/src/ggml-cpu/repack.cpp index 2907192904a72..692c53e01c08e 100644 --- a/ggml/src/ggml-cpu/repack.cpp +++ b/ggml/src/ggml-cpu/repack.cpp @@ -1411,44 +1411,45 @@ template q4_0_4x4_q8_0; -static const tensor_traits q4_0_4x8_q8_0; -static const tensor_traits q4_0_8x8_q8_0; -static const tensor_traits q4_K_8x8_q8_K; - -// instance for IQ4 -static const tensor_traits iq4_nl_4x4_q8_0; - } // namespace ggml::cpu::repack static const ggml::cpu::tensor_traits * ggml_repack_get_optimal_repack_type(const struct ggml_tensor * cur) { + + // instance for Q4 + static const ggml::cpu::repack::tensor_traits q4_0_4x4_q8_0; + static const ggml::cpu::repack::tensor_traits q4_0_4x8_q8_0; + static const ggml::cpu::repack::tensor_traits q4_0_8x8_q8_0; + static const ggml::cpu::repack::tensor_traits q4_K_8x8_q8_K; + + // instance for IQ4 + static const ggml::cpu::repack::tensor_traits iq4_nl_4x4_q8_0; + if (cur->type == GGML_TYPE_Q4_0) { if (ggml_cpu_has_avx2() || (ggml_cpu_has_sve() && ggml_cpu_has_matmul_int8() && ggml_cpu_get_sve_cnt() == QK8_0)) { if (cur->ne[1] % 8 == 0) { - return &ggml::cpu::repack::q4_0_8x8_q8_0; + return &q4_0_8x8_q8_0; } } if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) { if (cur->ne[1] % 4 == 0) { - return &ggml::cpu::repack::q4_0_4x8_q8_0; + return &q4_0_4x8_q8_0; } } if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { if (cur->ne[1] % 4 == 0) { - return &ggml::cpu::repack::q4_0_4x4_q8_0; + return &q4_0_4x4_q8_0; } } } else if (cur->type == GGML_TYPE_Q4_K) { if (ggml_cpu_has_avx2()) { if (cur->ne[1] % 8 == 0) { - return &ggml::cpu::repack::q4_K_8x8_q8_K; + return &q4_K_8x8_q8_K; } } } else if (cur->type == GGML_TYPE_IQ4_NL) { if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { if (cur->ne[1] % 4 == 0) { - return &ggml::cpu::repack::iq4_nl_4x4_q8_0; + return &iq4_nl_4x4_q8_0; } } } From 9b2a7742a0f8e4dac7715e9ab9643eb8f459595d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=B2=20Scipione?= Date: Fri, 20 Jun 2025 15:07:21 +0200 Subject: [PATCH 114/192] sycl: add usage of enqueue_functions extension (#14244) * Add header and namespace to use enqueue_functions extension * Convert submit and parallel_for to use new extension in convert.cpp * Convert submit and parallel_for to use extension in ggml-sycl.cpp * Convert submit and parallel_for to use extension in gla.cpp * Convert submit and parallel_for in mmq.cpp * Convert submit and parallel_for in mmvq.cpp * Convert submit and parallel_for in remaining files * Convert all simple parallel_for to nd_launch from enqueue_functions extension * Wrapping extension in general function Create a general function that enable the enqueue_functions extension if it is enable in the compiler, otherwise call the general SYCL function to launch kernels. --------- Signed-off-by: nscipione --- ggml/src/ggml-sycl/binbcast.cpp | 11 +- ggml/src/ggml-sycl/concat.cpp | 69 +++--- ggml/src/ggml-sycl/conv.cpp | 14 +- ggml/src/ggml-sycl/convert.cpp | 265 +++++++++------------- ggml/src/ggml-sycl/cpy.cpp | 166 ++++++++------ ggml/src/ggml-sycl/dmmv.cpp | 116 ++++------ ggml/src/ggml-sycl/dpct/helper.hpp | 32 ++- ggml/src/ggml-sycl/element_wise.cpp | 258 +++++++++------------ ggml/src/ggml-sycl/getrows.cpp | 15 +- ggml/src/ggml-sycl/ggml-sycl.cpp | 93 ++++---- ggml/src/ggml-sycl/gla.cpp | 4 +- ggml/src/ggml-sycl/im2col.cpp | 2 +- ggml/src/ggml-sycl/mmq.cpp | 140 +++++------- ggml/src/ggml-sycl/mmvq.cpp | 333 +++++++++++----------------- ggml/src/ggml-sycl/norm.cpp | 129 +++++------ ggml/src/ggml-sycl/rope.cpp | 44 ++-- ggml/src/ggml-sycl/softmax.cpp | 6 +- ggml/src/ggml-sycl/tsembd.cpp | 11 +- ggml/src/ggml-sycl/wkv.cpp | 28 +-- 19 files changed, 750 insertions(+), 986 deletions(-) diff --git a/ggml/src/ggml-sycl/binbcast.cpp b/ggml/src/ggml-sycl/binbcast.cpp index 0a3883ae1eda5..741630dba342c 100644 --- a/ggml/src/ggml-sycl/binbcast.cpp +++ b/ggml/src/ggml-sycl/binbcast.cpp @@ -225,9 +225,9 @@ struct bin_bcast_sycl { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, block_num) * - sycl::range<3>(1, 1, block_size), + sycl_parallel_for( + stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, block_num) * sycl::range<3>(1, 1, block_size), sycl::range<3>(1, 1, block_size)), [=](sycl::nd_item<3> item_ct1) { k_bin_bcast_unravel( @@ -246,9 +246,8 @@ struct bin_bcast_sycl { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { k_bin_bcast(src0_dd, src1_dd, dst_dd, ne0, ne1, ne2, ne3, ne10, ne11, ne12, ne13, s1, s2, s3, s01, s02, s03, s11, s12, s13, diff --git a/ggml/src/ggml-sycl/concat.cpp b/ggml/src/ggml-sycl/concat.cpp index 7aa91c861d583..3501484a14611 100644 --- a/ggml/src/ggml-sycl/concat.cpp +++ b/ggml/src/ggml-sycl/concat.cpp @@ -89,33 +89,24 @@ static void concat_f32_sycl(const float *x, const float *y, float *dst, sycl::range<3> gridDim(ne2, ne1, num_blocks); switch (dim) { case 0: - stream->parallel_for( - sycl::nd_range<3>(gridDim * - sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - concat_f32_dim0(x, y, dst, ne0, ne00, item_ct1); - }); - break; + sycl_parallel_for(stream, + sycl::nd_range<3>(gridDim * sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { concat_f32_dim0(x, y, dst, ne0, ne00, item_ct1); }); + break; case 1: - stream->parallel_for( - sycl::nd_range<3>(gridDim * - sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - concat_f32_dim1(x, y, dst, ne0, ne01, item_ct1); - }); - break; + sycl_parallel_for(stream, + sycl::nd_range<3>(gridDim * sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { concat_f32_dim1(x, y, dst, ne0, ne01, item_ct1); }); + break; // dim >=2 will be dispatched to the default path default: - stream->parallel_for( - sycl::nd_range<3>(gridDim * - sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - concat_f32_dim2(x, y, dst, ne0, ne02, item_ct1); - }); - break; + sycl_parallel_for(stream, + sycl::nd_range<3>(gridDim * sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { concat_f32_dim2(x, y, dst, ne0, ne02, item_ct1); }); + break; } } @@ -129,33 +120,29 @@ static void concat_f32_sycl_non_cont( int64_t ne2, int64_t ne3, uint64_t nb0, uint64_t nb1, uint64_t nb2, uint64_t nb3, int32_t dim) { sycl::range<3> gridDim(ne3, ne2, ne1); - stream->parallel_for( - sycl::nd_range<3>(gridDim, sycl::range<3>(1, 1, 1)), - [=](sycl::nd_item<3> item_ct1) { - int64_t i3 = item_ct1.get_group(0); - int64_t i2 = item_ct1.get_group(1); - int64_t i1 = item_ct1.get_group(2); + sycl_parallel_for(stream, sycl::nd_range<3>(gridDim, sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) { + int64_t i3 = item_ct1.get_group(0); + int64_t i2 = item_ct1.get_group(1); + int64_t i1 = item_ct1.get_group(2); - int64_t o[4] = {0, 0, 0, 0}; - o[dim] = dim == 0 ? ne00 : (dim == 1 ? ne01 : (dim == 2 ? ne02 : ne03)); + int64_t o[4] = { 0, 0, 0, 0 }; + o[dim] = dim == 0 ? ne00 : (dim == 1 ? ne01 : (dim == 2 ? ne02 : ne03)); - const float *x; + const float * x; - for (int i0 = item_ct1.get_local_id(2); i0 < ne0; - i0 += item_ct1.get_local_range(2)) { + for (int i0 = item_ct1.get_local_id(2); i0 < ne0; i0 += item_ct1.get_local_range(2)) { if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { - x = (const float *)(src0 + (i3)*nb03 + (i2)*nb02 + (i1)*nb01 + - (i0)*nb00); + x = (const float *) (src0 + (i3) *nb03 + (i2) *nb02 + (i1) *nb01 + (i0) *nb00); } else { - x = (const float *)(src1 + (i3 - o[3]) * nb13 + (i2 - o[2]) * nb12 + - (i1 - o[1]) * nb11 + (i0 - o[0]) * nb10); + x = (const float *) (src1 + (i3 - o[3]) * nb13 + (i2 - o[2]) * nb12 + (i1 - o[1]) * nb11 + + (i0 - o[0]) * nb10); } float *y = (float *)(dst + i3 * nb3 + i2 * nb2 + i1 * nb1 + i0 * nb0); *y = *x; - } - }); + } + }); } void ggml_sycl_op_concat(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { diff --git a/ggml/src/ggml-sycl/conv.cpp b/ggml/src/ggml-sycl/conv.cpp index 475bd34a25d56..c2f991e8d64a7 100644 --- a/ggml/src/ggml-sycl/conv.cpp +++ b/ggml/src/ggml-sycl/conv.cpp @@ -59,16 +59,10 @@ static void conv_transpose_1d_f32_f32_sycl( const int num_blocks = (output_size + SYCL_CONV_TRANPOSE_1D_BLOCK_SIZE - 1) / SYCL_CONV_TRANPOSE_1D_BLOCK_SIZE; const sycl::range<3> block_dims(1, 1, SYCL_CONV_TRANPOSE_1D_BLOCK_SIZE); const sycl::range<3> block_nums(1, 1, num_blocks); - stream->parallel_for( - sycl::nd_range<3>( - block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { - conv_transpose_1d_kernel( - s0, output_size, - src0_ne0, src0_ne1, src0_ne2, - src1_ne0, dst_ne0, - src0, src1, dst, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { + conv_transpose_1d_kernel(s0, output_size, src0_ne0, src0_ne1, src0_ne2, src1_ne0, dst_ne0, src0, src1, dst, + item_ct1); + }); } void ggml_sycl_op_conv_transpose_1d(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { diff --git a/ggml/src/ggml-sycl/convert.cpp b/ggml/src/ggml-sycl/convert.cpp index 96d2583b13b83..0ef567122dddb 100644 --- a/ggml/src/ggml-sycl/convert.cpp +++ b/ggml/src/ggml-sycl/convert.cpp @@ -33,14 +33,11 @@ static void dequantize_block_sycl(const void *__restrict__ vx, { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for( - sycl::nd_range<3>( - sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_DEQUANTIZE_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_DEQUANTIZE_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block(vx, y, k, item_ct1); - }); + sycl_parallel_for( + stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_DEQUANTIZE_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_DEQUANTIZE_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block(vx, y, k, item_ct1); }); } } @@ -53,24 +50,18 @@ static void dequantize_row_q2_K_sycl(const void *vx, dst_t *y, const int64_t k, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 64), - sycl::range<3>(1, 1, 64)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_q2_K(vx, y, item_ct1); - }); + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 64), sycl::range<3>(1, 1, 64)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block_q2_K(vx, y, item_ct1); }); } #else { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 32), - sycl::range<3>(1, 1, 32)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_q2_K(vx, y, item_ct1); - }); + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block_q2_K(vx, y, item_ct1); }); } #endif @@ -85,24 +76,18 @@ static void dequantize_row_q3_K_sycl(const void *vx, dst_t *y, const int64_t k, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 64), - sycl::range<3>(1, 1, 64)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_q3_K(vx, y, item_ct1); - }); + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 64), sycl::range<3>(1, 1, 64)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block_q3_K(vx, y, item_ct1); }); } #else { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 32), - sycl::range<3>(1, 1, 32)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_q3_K(vx, y, item_ct1); - }); + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block_q3_K(vx, y, item_ct1); }); } #endif } @@ -116,12 +101,9 @@ static void dequantize_row_q4_0_sycl(const void *vx, dst_t *y, const int64_t k, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 32), - sycl::range<3>(1, 1, 32)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_q4_0(vx, y, nb32, item_ct1); - }); + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block_q4_0(vx, y, nb32, item_ct1); }); } } @@ -135,13 +117,12 @@ static void dequantize_row_q4_0_sycl_reorder(const void *vx, dst_t *y, const int int constexpr WARP_K = WARP_SIZE * QK4_0; const int n_warp = (k + WARP_K - 1) / WARP_K; GGML_ASSERT(k % 2 == 0); - stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, n_warp) * - sycl::range<3>(1, 1, WARP_SIZE), - sycl::range<3>(1, 1, WARP_SIZE)), - [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]]{ - dequantize_block_q4_0_reorder(vx, y, k, item_ct1); - }); - + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, n_warp) * sycl::range<3>(1, 1, WARP_SIZE), + sycl::range<3>(1, 1, WARP_SIZE)), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + dequantize_block_q4_0_reorder(vx, y, k, item_ct1); + }); } template @@ -153,12 +134,9 @@ static void dequantize_row_q4_1_sycl(const void *vx, dst_t *y, const int64_t k, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 32), - sycl::range<3>(1, 1, 32)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_q4_1(vx, y, nb32, item_ct1); - }); + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block_q4_1(vx, y, nb32, item_ct1); }); } } @@ -171,14 +149,13 @@ static void dequantize_row_q4_K_sycl(const void *vx, dst_t *y, const int64_t k, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor scale_local_acc(sycl::range<1>(12), cgh); - cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 32), - sycl::range<3>(1, 1, 32)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_q4_K(vx, y, get_pointer(scale_local_acc), item_ct1); - }); + sycl_parallel_for( + cgh, sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), + [=](sycl::nd_item<3> item_ct1) { + dequantize_block_q4_K(vx, y, get_pointer(scale_local_acc), item_ct1); + }); }); } } @@ -191,13 +168,13 @@ static void dequantize_row_q4_K_sycl_reorder(const void * vx, dst_t * y, const i dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); - stream->submit([&](sycl::handler & cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor scale_local_acc(sycl::range<1>(12), cgh); - cgh.parallel_for(sycl::nd_range<1>(sycl::range<1>(global_size), sycl::range<1>(local_size)), - [=](sycl::nd_item<1> item_ct1) { - dequantize_block_q4_K_reorder(vx, y, get_pointer(scale_local_acc), item_ct1, nb); - }); + sycl_parallel_for<1>(cgh, sycl::nd_range<1>(sycl::range<1>(global_size), sycl::range<1>(local_size)), + [=](sycl::nd_item<1> item_ct1) { + dequantize_block_q4_K_reorder(vx, y, get_pointer(scale_local_acc), item_ct1, nb); + }); }); } @@ -210,24 +187,18 @@ static void dequantize_row_q5_K_sycl(const void *vx, dst_t *y, const int64_t k, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 64), - sycl::range<3>(1, 1, 64)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_q5_K(vx, y, item_ct1); - }); + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 64), sycl::range<3>(1, 1, 64)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block_q5_K(vx, y, item_ct1); }); } #else { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 32), - sycl::range<3>(1, 1, 32)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_q5_K(vx, y, item_ct1); - }); + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block_q5_K(vx, y, item_ct1); }); } #endif @@ -242,24 +213,18 @@ static void dequantize_row_q6_K_sycl(const void *vx, dst_t *y, const int64_t k, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 64), - sycl::range<3>(1, 1, 64)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_q6_K(vx, y, item_ct1); - }); + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 64), sycl::range<3>(1, 1, 64)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block_q6_K(vx, y, item_ct1); }); } #else { dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 32), - sycl::range<3>(1, 1, 32)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_q6_K(vx, y, item_ct1); - }); + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block_q6_K(vx, y, item_ct1); }); } #endif @@ -271,9 +236,9 @@ static void dequantize_row_q6_K_sycl_reorder(const void * vx, dst_t * y, const i dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 64), sycl::range<3>(1, 1, 64)), - [=](sycl::nd_item<3> item_ct1) { dequantize_block_q6_K_reorder(vx, y, item_ct1, nb); }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 64), sycl::range<3>(1, 1, 64)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block_q6_K_reorder(vx, y, item_ct1, nb); }); } template @@ -284,15 +249,10 @@ static void dequantize_row_iq1_s_sycl(const void *vx, dst_t *y, const int64_t k, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { - cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 32), - sycl::range<3>(1, 1, 32)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_iq1_s( - vx, y, item_ct1, iq1s_grid_gpu - ); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block_iq1_s(vx, y, item_ct1, iq1s_grid_gpu); }); }); } } @@ -305,15 +265,10 @@ static void dequantize_row_iq1_m_sycl(const void *vx, dst_t *y, const int64_t k, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { - cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 32), - sycl::range<3>(1, 1, 32)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_iq1_m( - vx, y, item_ct1, iq1s_grid_gpu - ); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block_iq1_m(vx, y, item_ct1, iq1s_grid_gpu); }); }); } } @@ -326,15 +281,12 @@ static void dequantize_row_iq2_xxs_sycl(const void *vx, dst_t *y, const int64_t dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { - cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 32), - sycl::range<3>(1, 1, 32)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_iq2_xxs( - vx, y, item_ct1, iq2xxs_grid, - ksigns_iq2xs, kmask_iq2xs); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), + [=](sycl::nd_item<3> item_ct1) { + dequantize_block_iq2_xxs(vx, y, item_ct1, iq2xxs_grid, ksigns_iq2xs, kmask_iq2xs); + }); }); } } @@ -347,15 +299,12 @@ static void dequantize_row_iq2_xs_sycl(const void *vx, dst_t *y, const int64_t k dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { - cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 32), - sycl::range<3>(1, 1, 32)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_iq2_xs( - vx, y, item_ct1, iq2xs_grid, - ksigns_iq2xs, kmask_iq2xs); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), + [=](sycl::nd_item<3> item_ct1) { + dequantize_block_iq2_xs(vx, y, item_ct1, iq2xs_grid, ksigns_iq2xs, kmask_iq2xs); + }); }); } } @@ -368,13 +317,10 @@ static void dequantize_row_iq2_s_sycl(const void *vx, dst_t *y, const int64_t k, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { - cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 32), - sycl::range<3>(1, 1, 32)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_iq2_s(vx, y, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block_iq2_s(vx, y, item_ct1); }); }); } } @@ -388,15 +334,12 @@ static void dequantize_row_iq3_xxs_sycl(const void *vx, dst_t *y, const int64_t dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { - cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 32), - sycl::range<3>(1, 1, 32)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_iq3_xxs( - vx, y, item_ct1, iq3xxs_grid, - ksigns_iq2xs, kmask_iq2xs); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), + [=](sycl::nd_item<3> item_ct1) { + dequantize_block_iq3_xxs(vx, y, item_ct1, iq3xxs_grid, ksigns_iq2xs, kmask_iq2xs); + }); }); } } @@ -409,14 +352,10 @@ static void dequantize_row_iq3_s_sycl(const void *vx, dst_t *y, const int64_t k, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { - cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 32), - sycl::range<3>(1, 1, 32)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_iq3_s( - vx, y, item_ct1, kmask_iq2xs, iq3s_grid); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block_iq3_s(vx, y, item_ct1, kmask_iq2xs, iq3s_grid); }); }); } } @@ -432,14 +371,11 @@ static void dequantize_row_iq4_xs_sycl(const void *vx, dst_t *y, const int64_t k dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { - cgh.parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 32), - sycl::range<3>(1, 1, 32)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_iq4_xs(vx, y, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for( + cgh, + sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block_iq4_xs(vx, y, item_ct1); }); }); } #endif @@ -453,14 +389,11 @@ static void dequantize_row_iq4_nl_sycl(const void *vx, dst_t *y, const int64_t k dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { - cgh.parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * - sycl::range<3>(1, 1, 32), - sycl::range<3>(1, 1, 32)), - [=](sycl::nd_item<3> item_ct1) { - dequantize_block_iq4_nl(vx, y, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for( + cgh, + sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)), + [=](sycl::nd_item<3> item_ct1) { dequantize_block_iq4_nl(vx, y, item_ct1); }); }); } } diff --git a/ggml/src/ggml-sycl/cpy.cpp b/ggml/src/ggml-sycl/cpy.cpp index bec1371401955..1ffd7f1226724 100644 --- a/ggml/src/ggml-sycl/cpy.cpp +++ b/ggml/src/ggml-sycl/cpy.cpp @@ -413,7 +413,8 @@ static void ggml_cpy_f16_f32_sycl(const char * cx, char * cdst, const int ne, co { dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); - stream->parallel_for( + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { @@ -431,7 +432,8 @@ static void ggml_cpy_f32_f32_sycl(const char * cx, char * cdst, const int ne, co { dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); - stream->parallel_for( + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { @@ -449,7 +451,8 @@ static void ggml_cpy_f32_f16_sycl(const char * cx, char * cdst, const int ne, co { dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); - stream->parallel_for( + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { @@ -465,11 +468,11 @@ static void ggml_cpy_f32_q8_0_sycl(const char * cx, char * cdst, const int ne, c const int nb12, const int nb13, queue_ptr stream) { GGML_ASSERT(ne % QK8_0 == 0); const int num_blocks = ne / QK8_0; - stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), - [=](sycl::nd_item<3> item_ct1) { - cpy_f32_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, - ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), + [=](sycl::nd_item<3> item_ct1) { + cpy_f32_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, + ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); + }); } static void ggml_cpy_q8_0_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, @@ -477,11 +480,11 @@ static void ggml_cpy_q8_0_f32_sycl(const char * cx, char * cdst, const int ne, c const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ne; - stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), - [=](sycl::nd_item<3> item_ct1) { - cpy_q_f32(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, - ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), + [=](sycl::nd_item<3> item_ct1) { + cpy_q_f32(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, + ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); + }); } static void ggml_cpy_f32_q4_0_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, @@ -490,11 +493,11 @@ static void ggml_cpy_f32_q4_0_sycl(const char * cx, char * cdst, const int ne, c const int nb12, const int nb13, queue_ptr stream) { GGML_ASSERT(ne % QK4_0 == 0); const int num_blocks = ne / QK4_0; - stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), - [=](sycl::nd_item<3> item_ct1) { - cpy_f32_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, - ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), + [=](sycl::nd_item<3> item_ct1) { + cpy_f32_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, + ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); + }); } static void ggml_cpy_q4_0_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, @@ -502,8 +505,9 @@ static void ggml_cpy_q4_0_f32_sycl(const char * cx, char * cdst, const int ne, c const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ne; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), + [=](sycl::nd_item<3> item_ct1) { cpy_q_f32, QK4_0>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); @@ -516,11 +520,11 @@ static void ggml_cpy_f32_q4_1_sycl(const char * cx, char * cdst, const int ne, c const int nb12, const int nb13, queue_ptr stream) { GGML_ASSERT(ne % QK4_1 == 0); const int num_blocks = ne / QK4_1; - stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), - [=](sycl::nd_item<3> item_ct1) { - cpy_f32_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, - ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), + [=](sycl::nd_item<3> item_ct1) { + cpy_f32_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, + ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); + }); } static void ggml_cpy_q4_1_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, @@ -528,8 +532,9 @@ static void ggml_cpy_q4_1_f32_sycl(const char * cx, char * cdst, const int ne, c const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ne; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), + [=](sycl::nd_item<3> item_ct1) { cpy_q_f32, QK4_1>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); @@ -542,11 +547,11 @@ static void ggml_cpy_f32_q5_0_sycl(const char * cx, char * cdst, const int ne, c const int nb12, const int nb13, queue_ptr stream) { GGML_ASSERT(ne % QK5_0 == 0); const int num_blocks = ne / QK5_0; - stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), - [=](sycl::nd_item<3> item_ct1) { - cpy_f32_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, - ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), + [=](sycl::nd_item<3> item_ct1) { + cpy_f32_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, + ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); + }); } static void ggml_cpy_q5_0_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, @@ -554,8 +559,9 @@ static void ggml_cpy_q5_0_f32_sycl(const char * cx, char * cdst, const int ne, c const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ne; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), + [=](sycl::nd_item<3> item_ct1) { cpy_q_f32, QK5_0>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); @@ -568,11 +574,11 @@ static void ggml_cpy_f32_q5_1_sycl(const char * cx, char * cdst, const int ne, c const int nb12, const int nb13, queue_ptr stream) { GGML_ASSERT(ne % QK5_1 == 0); const int num_blocks = ne / QK5_1; - stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), - [=](sycl::nd_item<3> item_ct1) { - cpy_f32_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, - ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), + [=](sycl::nd_item<3> item_ct1) { + cpy_f32_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, + ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); + }); } static void ggml_cpy_q5_1_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, @@ -580,8 +586,9 @@ static void ggml_cpy_q5_1_f32_sycl(const char * cx, char * cdst, const int ne, c const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ne; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), + [=](sycl::nd_item<3> item_ct1) { cpy_q_f32, QK5_1>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); @@ -594,11 +601,11 @@ static void ggml_cpy_f32_iq4_nl_sycl(const char * cx, char * cdst, const int ne, const int nb12, const int nb13, queue_ptr stream) { GGML_ASSERT(ne % QK4_NL == 0); const int num_blocks = ne / QK4_NL; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) { - cpy_f32_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, - ne12, nb10, nb11, nb12, nb13, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), + [=](sycl::nd_item<3> item_ct1) { + cpy_f32_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, + ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); + }); } static void ggml_cpy_f16_f16_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, @@ -609,7 +616,8 @@ static void ggml_cpy_f16_f16_sycl(const char * cx, char * cdst, const int ne, co { dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); - stream->parallel_for( + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { @@ -628,7 +636,8 @@ static void ggml_cpy_i16_i16_sycl(const char * cx, char * cdst, const int ne, co // dpct::has_capability_or_fail(stream->get_device(), // {sycl::aspect::fp16}); - stream->parallel_for( + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { @@ -647,7 +656,8 @@ static void ggml_cpy_i32_i32_sycl(const char * cx, char * cdst, const int ne, co // dpct::has_capability_or_fail(stream->get_device(), // {sycl::aspect::fp16}); - stream->parallel_for( + sycl_parallel_for( + stream, sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { @@ -662,11 +672,13 @@ static void ggml_cpy_q8_0_q8_0(const char * cx, char * cdst, const int ne, const const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ceil_div(ne, SYCL_CPY_BLOCK_SIZE); - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { - cpy_q_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { + cpy_q_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, + ne12, nb10, nb11, nb12, nb13, item_ct1); + }); } @@ -675,11 +687,13 @@ static void ggml_cpy_q5_0_q5_0(const char * cx, char * cdst, const int ne, const const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ceil_div(ne, SYCL_CPY_BLOCK_SIZE); - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { - cpy_q_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { + cpy_q_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, + ne12, nb10, nb11, nb12, nb13, item_ct1); + }); } @@ -689,11 +703,13 @@ static void ggml_cpy_q5_1_q5_1(const char * cx, char * cdst, const int ne, const const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ceil_div(ne, SYCL_CPY_BLOCK_SIZE); - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { - cpy_q_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { + cpy_q_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, + ne12, nb10, nb11, nb12, nb13, item_ct1); + }); } @@ -702,10 +718,13 @@ static void ggml_cpy_q4_0_q4_0(const char * cx, char * cdst, const int ne, const const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ceil_div(ne, SYCL_CPY_BLOCK_SIZE); - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { - cpy_q_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { + cpy_q_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, + ne12, nb10, nb11, nb12, nb13, item_ct1); + }); } @@ -715,10 +734,13 @@ static void ggml_cpy_q4_1_q4_1(const char * cx, char * cdst, const int ne, const const int nb12, const int nb13, queue_ptr stream) { const int num_blocks = ceil_div(ne, SYCL_CPY_BLOCK_SIZE); - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) { - cpy_q_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { + cpy_q_q(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, + ne12, nb10, nb11, nb12, nb13, item_ct1); + }); } void ggml_sycl_cpy(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1) try { diff --git a/ggml/src/ggml-sycl/dmmv.cpp b/ggml/src/ggml-sycl/dmmv.cpp index 4f2760110c212..70579c0c3be11 100644 --- a/ggml/src/ggml-sycl/dmmv.cpp +++ b/ggml/src/ggml-sycl/dmmv.cpp @@ -208,12 +208,10 @@ static void convert_mul_mat_vec_f16_sycl(const void *vx, const dfloat *y, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - dequantize_mul_mat_vec<1, 1, convert_f16>(vx, y, dst, ncols, - nrows, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + dequantize_mul_mat_vec<1, 1, convert_f16>(vx, y, dst, ncols, nrows, item_ct1); + }); } } @@ -877,12 +875,11 @@ static void dequantize_mul_mat_vec_q4_0_sycl_reorder(const void *vx, const dfloa dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - dequantize_mul_mat_vec_reorder( - vx, y, dst, ncols, nrows, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + dequantize_mul_mat_vec_reorder(vx, y, dst, ncols, + nrows, item_ct1); + }); } } @@ -900,12 +897,10 @@ static void dequantize_mul_mat_vec_q4_0_sycl(const void *vx, const dfloat *y, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - dequantize_mul_mat_vec( - vx, y, dst, ncols, nrows, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + dequantize_mul_mat_vec(vx, y, dst, ncols, nrows, item_ct1); + }); } } @@ -921,12 +916,10 @@ static void dequantize_mul_mat_vec_q4_1_sycl(const void *vx, const dfloat *y, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - dequantize_mul_mat_vec( - vx, y, dst, ncols, nrows, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + dequantize_mul_mat_vec(vx, y, dst, ncols, nrows, item_ct1); + }); } } @@ -942,12 +935,10 @@ static void dequantize_mul_mat_vec_q5_0_sycl(const void *vx, const dfloat *y, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - dequantize_mul_mat_vec( - vx, y, dst, ncols, nrows, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + dequantize_mul_mat_vec(vx, y, dst, ncols, nrows, item_ct1); + }); } } @@ -963,12 +954,10 @@ static void dequantize_mul_mat_vec_q5_1_sycl(const void *vx, const dfloat *y, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - dequantize_mul_mat_vec( - vx, y, dst, ncols, nrows, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + dequantize_mul_mat_vec(vx, y, dst, ncols, nrows, item_ct1); + }); } } @@ -984,12 +973,10 @@ static void dequantize_mul_mat_vec_q8_0_sycl(const void *vx, const dfloat *y, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - dequantize_mul_mat_vec( - vx, y, dst, ncols, nrows, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + dequantize_mul_mat_vec(vx, y, dst, ncols, nrows, item_ct1); + }); } } @@ -1002,11 +989,10 @@ static void dequantize_mul_mat_vec_q2_K_sycl(const void *vx, const float *y, const int block_num_y = (nrows + ny - 1) / ny; const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE); - stream->parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { - dequantize_mul_mat_vec_q2_k(vx, y, dst, ncols, nrows, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { + dequantize_mul_mat_vec_q2_k(vx, y, dst, ncols, nrows, item_ct1); + }); } static void dequantize_mul_mat_vec_q3_K_sycl(const void *vx, const float *y, @@ -1018,11 +1004,10 @@ static void dequantize_mul_mat_vec_q3_K_sycl(const void *vx, const float *y, const int block_num_y = (nrows + ny - 1) / ny; const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE); - stream->parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { - dequantize_mul_mat_vec_q3_k(vx, y, dst, ncols, nrows, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { + dequantize_mul_mat_vec_q3_k(vx, y, dst, ncols, nrows, item_ct1); + }); } static void dequantize_mul_mat_vec_q4_K_sycl(const void *vx, const float *y, @@ -1034,11 +1019,10 @@ static void dequantize_mul_mat_vec_q4_K_sycl(const void *vx, const float *y, const int block_num_y = (nrows + ny - 1) / ny; const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE); - stream->parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { - dequantize_mul_mat_vec_q4_k(vx, y, dst, ncols, nrows, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { + dequantize_mul_mat_vec_q4_k(vx, y, dst, ncols, nrows, item_ct1); + }); } static void dequantize_mul_mat_vec_q5_K_sycl(const void *vx, const float *y, @@ -1047,11 +1031,10 @@ static void dequantize_mul_mat_vec_q5_K_sycl(const void *vx, const float *y, dpct::queue_ptr stream) { GGML_ASSERT(ncols % QK_K == 0); const sycl::range<3> block_dims(1, 1, QK_WARP_SIZE); - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { - dequantize_mul_mat_vec_q5_k(vx, y, dst, ncols, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { + dequantize_mul_mat_vec_q5_k(vx, y, dst, ncols, item_ct1); + }); } static void dequantize_mul_mat_vec_q6_K_sycl(const void *vx, const float *y, @@ -1063,11 +1046,10 @@ static void dequantize_mul_mat_vec_q6_K_sycl(const void *vx, const float *y, const int block_num_y = (nrows + ny - 1) / ny; const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE); - stream->parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { - dequantize_mul_mat_vec_q6_k(vx, y, dst, ncols, nrows, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] { + dequantize_mul_mat_vec_q6_k(vx, y, dst, ncols, nrows, item_ct1); + }); } void ggml_sycl_op_dequantize_mul_mat_vec( diff --git a/ggml/src/ggml-sycl/dpct/helper.hpp b/ggml/src/ggml-sycl/dpct/helper.hpp index d538965b096bf..27c7278607832 100644 --- a/ggml/src/ggml-sycl/dpct/helper.hpp +++ b/ggml/src/ggml-sycl/dpct/helper.hpp @@ -13,10 +13,10 @@ #ifndef GGML_SYCL_DPCT_HELPER_HPP #define GGML_SYCL_DPCT_HELPER_HPP +#include #include #include #include -#include #ifdef GGML_SYCL_USE_INTEL_ONEMKL #include @@ -118,6 +118,36 @@ inline auto get_onemath_backend(sycl::queue& queue) #endif } +#ifdef SYCL_EXT_ONEAPI_ENQUEUE_FUNCTIONS + namespace syclex = sycl::ext::oneapi::experimental; +#endif + +template +__dpct_inline__ void sycl_parallel_for(sycl::handler & cgh, sycl::nd_range nd_range, Func && func) { +#ifdef SYCL_EXT_ONEAPI_ENQUEUE_FUNCTIONS + syclex::nd_launch(cgh, nd_range, func); +#else + cgh.parallel_for(nd_range, func); +#endif +} + +template +__dpct_inline__ void sycl_parallel_for(sycl::queue * q, sycl::nd_range nd_range, Func && func) { +#ifdef SYCL_EXT_ONEAPI_ENQUEUE_FUNCTIONS + syclex::nd_launch(*q, nd_range, func); +#else + q->parallel_for(nd_range, func); +#endif +} + +template __dpct_inline__ void sycl_launch(sycl::queue * stream, Func && func) { +#ifdef SYCL_EXT_ONEAPI_ENQUEUE_FUNCTIONS + syclex::submit(*stream, func); +#else + stream->submit(func); +#endif +} + namespace dpct { typedef sycl::queue *queue_ptr; diff --git a/ggml/src/ggml-sycl/element_wise.cpp b/ggml/src/ggml-sycl/element_wise.cpp index 5b7c4f0b4f003..c56924ce8322f 100644 --- a/ggml/src/ggml-sycl/element_wise.cpp +++ b/ggml/src/ggml-sycl/element_wise.cpp @@ -329,60 +329,51 @@ static void acc_f32_sycl(const float *x, const float *y, float *dst, const int ne12, const int nb1, const int nb2, const int offset, queue_ptr stream) { int num_blocks = (n_elements + SYCL_ACC_BLOCK_SIZE - 1) / SYCL_ACC_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_ACC_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_ACC_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - acc_f32(x, y, dst, n_elements, ne10, ne11, ne12, nb1, nb2, offset, - item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_ACC_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_ACC_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { + acc_f32(x, y, dst, n_elements, ne10, ne11, ne12, nb1, nb2, offset, item_ct1); + }); } template static void gelu_sycl(const T *x, T *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_GELU_BLOCK_SIZE - 1) / SYCL_GELU_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - gelu(x, dst, k, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { gelu(x, dst, k, item_ct1); }); } template static void silu_sycl(const T *x, T *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_SILU_BLOCK_SIZE - 1) / SYCL_SILU_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_SILU_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_SILU_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - silu(x, dst, k, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_SILU_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_SILU_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { silu(x, dst, k, item_ct1); }); } template static void sgn_sycl(const T * x, T * dst, const int k, queue_ptr stream) { // hard code for now const int num_blocks = ceil_div(k, 256); - stream->parallel_for( - sycl::nd_range<3>((sycl::range<3>(1, 1, num_blocks) * sycl::range(1, 1, 256)), sycl::range(1, 1, 256)), [=](sycl::nd_item<3> item_ct1) { - sgn(x, dst, k, item_ct1); - }); + sycl_parallel_for( + stream, sycl::nd_range<3>((sycl::range<3>(1, 1, num_blocks) * sycl::range(1, 1, 256)), sycl::range(1, 1, 256)), + [=](sycl::nd_item<3> item_ct1) { sgn(x, dst, k, item_ct1); }); } template static void abs_sycl(const T * x, T * dst, const int k, queue_ptr stream) { // hard code for now const int num_blocks = ceil_div(k, 256); - stream->parallel_for( - sycl::nd_range<3>((sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, 256)), sycl::range<3>(1, 1, 256)), [=](sycl::nd_item<3> item_ct1) { - abs_op(x, dst, k, item_ct1); - }); + sycl_parallel_for( + stream, + sycl::nd_range<3>((sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, 256)), sycl::range<3>(1, 1, 256)), + [=](sycl::nd_item<3> item_ct1) { abs_op(x, dst, k, item_ct1); }); } @@ -390,23 +381,20 @@ template static void elu_sycl(const T * x, T * dst, const int k, queue_ptr stream) { // hard code for now const int num_blocks = ceil_div(k, 256); - stream->parallel_for( - sycl::nd_range<3>((sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, 256)), sycl::range<3>(1, 1, 256)), [=](sycl::nd_item<3> item_ct1) { - elu_op(x, dst, k, item_ct1); - }); + sycl_parallel_for( + stream, + sycl::nd_range<3>((sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, 256)), sycl::range<3>(1, 1, 256)), + [=](sycl::nd_item<3> item_ct1) { elu_op(x, dst, k, item_ct1); }); } template static void gelu_quick_sycl(const T *x, T *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_GELU_BLOCK_SIZE - 1) / SYCL_GELU_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - gelu_quick(x, dst, k, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { gelu_quick(x, dst, k, item_ct1); }); } @@ -414,169 +402,133 @@ template static void gelu_erf_sycl(const T *x, T *dst, const int k, queue_ptr stream) { const int num_blocks = ceil_div(k, SYCL_GELU_BLOCK_SIZE); - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - gelu_erf(x, dst, k, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { gelu_erf(x, dst, k, item_ct1); }); } template static void tanh_sycl(const T *x, T *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_TANH_BLOCK_SIZE - 1) / SYCL_TANH_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_TANH_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_TANH_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - tanh(x, dst, k, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_TANH_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_TANH_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { tanh(x, dst, k, item_ct1); }); } template static void relu_sycl(const T *x, T *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_RELU_BLOCK_SIZE - 1) / SYCL_RELU_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - relu(x, dst, k, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { relu(x, dst, k, item_ct1); }); } template static void hardsigmoid_sycl(const T *x, T *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_HARDSIGMOID_BLOCK_SIZE - 1) / SYCL_HARDSIGMOID_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_HARDSIGMOID_BLOCK_SIZE), + sycl_parallel_for( + stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_HARDSIGMOID_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_HARDSIGMOID_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - hardsigmoid(x, dst, k, item_ct1); - }); + [=](sycl::nd_item<3> item_ct1) { hardsigmoid(x, dst, k, item_ct1); }); } template static void hardswish_sycl(const T *x, T *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_HARDSWISH_BLOCK_SIZE - 1) / SYCL_HARDSWISH_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_HARDSWISH_BLOCK_SIZE), + sycl_parallel_for( + stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_HARDSWISH_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_HARDSWISH_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - hardswish(x, dst, k, item_ct1); - }); + [=](sycl::nd_item<3> item_ct1) { hardswish(x, dst, k, item_ct1); }); } template static void exp_sycl(const T *x, T *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_EXP_BLOCK_SIZE - 1) / SYCL_EXP_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - exp(x, dst, k, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { exp(x, dst, k, item_ct1); }); } template static void log_sycl(const T *x, T *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_EXP_BLOCK_SIZE - 1) / SYCL_EXP_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - log(x, dst, k, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { log(x, dst, k, item_ct1); }); } template static void neg_sycl(const T *x, T *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_NEG_BLOCK_SIZE - 1) / SYCL_NEG_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - neg(x, dst, k, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { neg(x, dst, k, item_ct1); }); } template static void step_sycl(const T *x, T *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_NEG_BLOCK_SIZE - 1) / SYCL_NEG_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - step(x, dst, k, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { step(x, dst, k, item_ct1); }); } template static void sigmoid_sycl(const T *x, T *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_SIGMOID_BLOCK_SIZE - 1) / SYCL_SIGMOID_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_SIGMOID_BLOCK_SIZE), + sycl_parallel_for( + stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_SIGMOID_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_SIGMOID_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - sigmoid(x, dst, k, item_ct1); - }); + [=](sycl::nd_item<3> item_ct1) { sigmoid(x, dst, k, item_ct1); }); } template static void sqrt_sycl(const T *x, T *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_SQRT_BLOCK_SIZE - 1) / SYCL_SQRT_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_SQRT_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_SQRT_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - sqrt(x, dst, k, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_SQRT_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_SQRT_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { sqrt(x, dst, k, item_ct1); }); } template static void sin_sycl(const T *x, T *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_SIN_BLOCK_SIZE - 1) / SYCL_SIN_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - sin(x, dst, k, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { sin(x, dst, k, item_ct1); }); } template static void cos_sycl(const T *x, T *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_SIN_BLOCK_SIZE - 1) / SYCL_SIN_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - cos(x, dst, k, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { cos(x, dst, k, item_ct1); }); } template @@ -584,26 +536,20 @@ static void leaky_relu_sycl(const T *x, T *dst, const int k, const float negative_slope, queue_ptr stream) { const int num_blocks = (k + SYCL_RELU_BLOCK_SIZE - 1) / SYCL_RELU_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - leaky_relu(x, dst, k, negative_slope, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { leaky_relu(x, dst, k, negative_slope, item_ct1); }); } template static void sqr_sycl(const T *x, T *dst, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_SQR_BLOCK_SIZE - 1) / SYCL_SQR_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_SQR_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_SQR_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - sqr(x, dst, k, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_SQR_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_SQR_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { sqr(x, dst, k, item_ct1); }); } template @@ -614,9 +560,8 @@ static void upscale_sycl(const T *x, T *dst, const int nb00, const int nb01, int dst_size = ne10 * ne11 * ne12 * ne13; int num_blocks = (dst_size + SYCL_UPSCALE_BLOCK_SIZE - 1) / SYCL_UPSCALE_BLOCK_SIZE; sycl::range<1> gridDim(num_blocks * SYCL_UPSCALE_BLOCK_SIZE); - stream->parallel_for( - sycl::nd_range<1>(gridDim, sycl::range<1>(SYCL_UPSCALE_BLOCK_SIZE)), - [=](sycl::nd_item<1> item_ct1) { + sycl_parallel_for<1>( + stream, sycl::nd_range<1>(gridDim, sycl::range<1>(SYCL_UPSCALE_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { upscale(x, dst, nb00, nb01, nb02, nb03, ne10, ne11, ne12, ne13, sf0, sf1, sf2, sf3, item_ct1); }); } @@ -627,12 +572,10 @@ static void pad_sycl(const T *x, T *dst, const int ne00, const int ne1, const int ne2, queue_ptr stream) { int num_blocks = (ne0 + SYCL_PAD_BLOCK_SIZE - 1) / SYCL_PAD_BLOCK_SIZE; sycl::range<3> gridDim(ne2, ne1, num_blocks); - stream->parallel_for( - sycl::nd_range<3>(gridDim * sycl::range<3>(1, 1, SYCL_PAD_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_PAD_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - pad(x, dst, ne0, ne00, ne01, ne02, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(gridDim * sycl::range<3>(1, 1, SYCL_PAD_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_PAD_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { pad(x, dst, ne0, ne00, ne01, ne02, item_ct1); }); } template @@ -640,13 +583,10 @@ static void clamp_sycl(const T *x, T *dst, const float min, const float max, const int k, queue_ptr stream) { const int num_blocks = (k + SYCL_CLAMP_BLOCK_SIZE - 1) / SYCL_CLAMP_BLOCK_SIZE; - stream->parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * - sycl::range<3>(1, 1, SYCL_CLAMP_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_CLAMP_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - clamp(x, dst, min, max, k, item_ct1); - }); + sycl_parallel_for(stream, + sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CLAMP_BLOCK_SIZE), + sycl::range<3>(1, 1, SYCL_CLAMP_BLOCK_SIZE)), + [=](sycl::nd_item<3> item_ct1) { clamp(x, dst, min, max, k, item_ct1); }); } inline void ggml_sycl_op_sgn(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { diff --git a/ggml/src/ggml-sycl/getrows.cpp b/ggml/src/ggml-sycl/getrows.cpp index 03f8dd907485e..9c76ffeb9508a 100644 --- a/ggml/src/ggml-sycl/getrows.cpp +++ b/ggml/src/ggml-sycl/getrows.cpp @@ -118,12 +118,10 @@ static void get_rows_sycl(ggml_backend_sycl_context & ctx, const ggml_tensor *sr GGML_ASSERT(ne00 % 2 == 0); - stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { - k_get_rows( - src0_dd, src1_dd, dst_dd, ne00, ne12, s1, s2, - s3, nb01, nb02, nb03, s10, s11, s12, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { + k_get_rows(src0_dd, src1_dd, dst_dd, ne00, ne12, s1, s2, s3, nb01, nb02, nb03, s10, s11, s12, + item_ct1); + }); GGML_UNUSED(dst); GGML_UNUSED(ctx); @@ -156,9 +154,8 @@ static void get_rows_sycl_float(ggml_backend_sycl_context & ctx, const ggml_tens dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { k_get_rows_float(src0_dd, src1_dd, dst_dd, ne00, ne12, s1, s2, s3, nb01, nb02, nb03, s10, s11, s12, item_ct1); }); diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 4b7610362b608..f25a96a625c51 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -1887,13 +1887,12 @@ static void argsort_f32_i32_sycl(const float *x, int *dst, const int ncols, const size_t shared_mem = ncols_pad * sizeof(int); if (order == GGML_SORT_ORDER_ASC) { - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor dpct_local_acc_ct1( sycl::range<1>(shared_mem), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { k_argsort_f32_i32( x, dst, ncols, ncols_pad, item_ct1, dpct_local_acc_ct1.get_multi_ptr() @@ -1901,13 +1900,12 @@ static void argsort_f32_i32_sycl(const float *x, int *dst, const int ncols, }); }); } else if (order == GGML_SORT_ORDER_DESC) { - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor dpct_local_acc_ct1( sycl::range<1>(shared_mem), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { k_argsort_f32_i32( x, dst, ncols, ncols_pad, item_ct1, dpct_local_acc_ct1.get_multi_ptr() @@ -1925,50 +1923,47 @@ static void argmax_f32_i32_sycl(const float *x, int *dst, const int ncols, const sycl::range<3> block_nums(1, nrows, 1); const size_t shared_mem = 256 * sizeof(float); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor shared_data( sycl::range<1>(shared_mem/sizeof(float)), cgh); sycl::local_accessor shared_indices( sycl::range<1>(shared_mem/sizeof(float)), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { - const int tid = item_ct1.get_local_id(2); - const int row = item_ct1.get_global_id(1); - - float max_val = -INFINITY; - int max_idx = -1; - - for (int col = tid; col < ncols; col += 256) { - float val = x[row * ncols + col]; - if (val > max_val) { - max_val = val; - max_idx = col; - } - } + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { + const int tid = item_ct1.get_local_id(2); + const int row = item_ct1.get_global_id(1); - shared_data[tid] = max_val; - shared_indices[tid] = max_idx; - item_ct1.barrier(sycl::access::fence_space::local_space); + float max_val = -INFINITY; + int max_idx = -1; - for (int stride = 256/2; stride > 0; stride >>= 1) { - if (tid < stride) { - float val1 = shared_data[tid]; - float val2 = shared_data[tid + stride]; - if (val2 > val1) { - shared_data[tid] = val2; - shared_indices[tid] = shared_indices[tid + stride]; - } - } - item_ct1.barrier(sycl::access::fence_space::local_space); + for (int col = tid; col < ncols; col += 256) { + float val = x[row * ncols + col]; + if (val > max_val) { + max_val = val; + max_idx = col; } + } + shared_data[tid] = max_val; + shared_indices[tid] = max_idx; + item_ct1.barrier(sycl::access::fence_space::local_space); - if (tid == 0) { - dst[row] = shared_indices[0]; + for (int stride = 256 / 2; stride > 0; stride >>= 1) { + if (tid < stride) { + float val1 = shared_data[tid]; + float val2 = shared_data[tid + stride]; + if (val2 > val1) { + shared_data[tid] = val2; + shared_indices[tid] = shared_indices[tid + stride]; + } } - }); + item_ct1.barrier(sycl::access::fence_space::local_space); + } + + if (tid == 0) { + dst[row] = shared_indices[0]; + } + }); }); } static void diag_mask_inf_f32_sycl(const float *x, float *dst, @@ -2952,7 +2947,7 @@ static void ggml_sycl_mul_mat_batched_sycl(ggml_backend_sycl_context & ctx, cons void ** ptrs_dst_get = ptrs_dst.get(); size_t nb12_scaled = src1->type == GGML_TYPE_F16 ? nb12 : s12 * sizeof(sycl::half); size_t nb13_scaled = src1->type == GGML_TYPE_F16 ? nb13 : s13 * sizeof(sycl::half); - cgh.parallel_for(sycl::nd_range<3>(block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { k_compute_batched_ptrs(src0_f16, src1_f16, dst_ddf, ptrs_src_get, ptrs_dst_get, ne12, ne13, ne23, nb02, nb03, nb12_scaled, nb13_scaled, nbd2, nbd3, r2, r3, item_ct1); }); @@ -3456,7 +3451,7 @@ static void ggml_sycl_mul_mat_id(ggml_backend_sycl_context & ctx, { sycl::range<3> block_dims(1, 1, std::min((unsigned int)ne10, 768u)); sycl::range<3> grid_dims(1, n_ids, ids->ne[1]); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor src1_row_acc(cgh); char *__restrict src1_contiguous_get = @@ -3468,9 +3463,8 @@ static void ggml_sycl_mul_mat_id(ggml_backend_sycl_context & ctx, size_t ids_nb_ct6 = ids->nb[1]; size_t ids_nb_ct7 = ids->nb[0]; - cgh.parallel_for( - sycl::nd_range<3>(grid_dims * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(grid_dims * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { k_copy_src1_to_contiguous( src1_original, src1_contiguous_get, dev_cur_src1_row_get, @@ -3501,15 +3495,14 @@ static void ggml_sycl_mul_mat_id(ggml_backend_sycl_context & ctx, { sycl::range<3> block_dims(1, 1, std::min((unsigned int)ne0, 768u)); sycl::range<3> grid_dims(1, 1, num_src1_rows); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { const char *__restrict dst_contiguous_get = dst_contiguous.get(); const mmid_row_mapping *__restrict dev_row_mapping_get = dev_row_mapping.get(); - cgh.parallel_for( - sycl::nd_range<3>(grid_dims * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(grid_dims * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { k_copy_dst_from_contiguous(dst_original, dst_contiguous_get, dev_row_mapping_get, diff --git a/ggml/src/ggml-sycl/gla.cpp b/ggml/src/ggml-sycl/gla.cpp index 879184fdd3111..b40cbf1f14fb2 100644 --- a/ggml/src/ggml-sycl/gla.cpp +++ b/ggml/src/ggml-sycl/gla.cpp @@ -11,13 +11,13 @@ static void gated_linear_attn_f32_kernel(const dpct::queue_ptr stream, u_int B, const u_int n_seq_tokens = T / B; sycl::range<1> block_dims((C / H)); sycl::range<1> grid_dims((B * H)); - stream->submit([&](sycl::handler & cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { /* local memory accessors*/ auto _k = sycl::local_accessor(sycl::range<1>(head_size), cgh); auto _r = sycl::local_accessor(sycl::range<1>(head_size), cgh); auto _td = sycl::local_accessor(sycl::range<1>(head_size), cgh); - cgh.parallel_for(sycl::nd_range<1>(grid_dims * block_dims, block_dims), [=](sycl::nd_item<1> item) { + sycl_parallel_for<1>(cgh, sycl::nd_range<1>(grid_dims * block_dims, block_dims), [=](sycl::nd_item<1> item) { u_int tid = item.get_local_id(0); u_int bid = item.get_group(0); diff --git a/ggml/src/ggml-sycl/im2col.cpp b/ggml/src/ggml-sycl/im2col.cpp index aa19c2527dc41..52737cc746dfa 100644 --- a/ggml/src/ggml-sycl/im2col.cpp +++ b/ggml/src/ggml-sycl/im2col.cpp @@ -70,7 +70,7 @@ static void im2col_sycl_internal(const float * x, T * dst, int64_t IW, int64_t I const int64_t CHW = IC * KH * KW; - stream->parallel_for(sycl::nd_range<3>(block_nums * local_range, local_range), [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for(stream, sycl::nd_range<3>(block_nums * local_range, local_range), [=](sycl::nd_item<3> item_ct1) { im2col_kernel(x, dst, batch_offset, offset_delta, IC, IW, IH, OH, OW, KW, KH, parallel_elements, CHW, s0, s1, p0, p1, d0, d1, item_ct1); }); diff --git a/ggml/src/ggml-sycl/mmq.cpp b/ggml/src/ggml-sycl/mmq.cpp index ffb272aa28378..c72fcd38ebeff 100644 --- a/ggml/src/ggml-sycl/mmq.cpp +++ b/ggml/src/ggml-sycl/mmq.cpp @@ -1818,7 +1818,7 @@ static void ggml_mul_mat_q4_0_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_qs_q4_0_acc_ct1( sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh); sycl::local_accessor tile_x_d_q4_0_acc_ct1( @@ -1829,9 +1829,8 @@ static void ggml_mul_mat_q4_0_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q4_0( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -1853,7 +1852,7 @@ static void ggml_mul_mat_q4_0_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_qs_q4_0_acc_ct1( sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh); sycl::local_accessor tile_x_d_q4_0_acc_ct1( @@ -1864,9 +1863,8 @@ static void ggml_mul_mat_q4_0_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q4_0( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -1933,7 +1931,7 @@ static void ggml_mul_mat_q4_1_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_qs_q4_1_acc_ct1( sycl::range<1>(mmq_y * (WARP_SIZE) + +mmq_y), cgh); sycl::local_accessor tile_x_dm_q4_1_acc_ct1( @@ -1944,9 +1942,8 @@ static void ggml_mul_mat_q4_1_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q4_1( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -1968,7 +1965,7 @@ static void ggml_mul_mat_q4_1_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_qs_q4_1_acc_ct1( sycl::range<1>(mmq_y * (WARP_SIZE) + +mmq_y), cgh); sycl::local_accessor tile_x_dm_q4_1_acc_ct1( @@ -1979,9 +1976,8 @@ static void ggml_mul_mat_q4_1_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q4_1( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -2048,7 +2044,7 @@ static void ggml_mul_mat_q5_0_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_ql_q5_0_acc_ct1( sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh); sycl::local_accessor tile_x_d_q5_0_acc_ct1( @@ -2059,9 +2055,8 @@ static void ggml_mul_mat_q5_0_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q5_0( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -2083,7 +2078,7 @@ static void ggml_mul_mat_q5_0_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_ql_q5_0_acc_ct1( sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh); sycl::local_accessor tile_x_d_q5_0_acc_ct1( @@ -2094,9 +2089,8 @@ static void ggml_mul_mat_q5_0_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q5_0( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -2163,7 +2157,7 @@ static void ggml_mul_mat_q5_1_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_ql_q5_1_acc_ct1( sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh); sycl::local_accessor tile_x_dm_q5_1_acc_ct1( @@ -2174,9 +2168,8 @@ static void ggml_mul_mat_q5_1_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q5_1( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -2198,7 +2191,7 @@ static void ggml_mul_mat_q5_1_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_ql_q5_1_acc_ct1( sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh); sycl::local_accessor tile_x_dm_q5_1_acc_ct1( @@ -2209,9 +2202,8 @@ static void ggml_mul_mat_q5_1_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q5_1( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -2278,7 +2270,7 @@ static void ggml_mul_mat_q8_0_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_qs_q8_0_acc_ct1( sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh); sycl::local_accessor tile_x_d_q8_0_acc_ct1( @@ -2289,9 +2281,8 @@ static void ggml_mul_mat_q8_0_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q8_0( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -2313,7 +2304,7 @@ static void ggml_mul_mat_q8_0_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_qs_q8_0_acc_ct1( sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh); sycl::local_accessor tile_x_d_q8_0_acc_ct1( @@ -2324,9 +2315,8 @@ static void ggml_mul_mat_q8_0_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q8_0( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -2393,7 +2383,7 @@ static void ggml_mul_mat_q2_K_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_ql_q2_K_acc_ct1( sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh); sycl::local_accessor tile_x_dm_q2_K_acc_ct1( @@ -2406,9 +2396,8 @@ static void ggml_mul_mat_q2_K_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q2_K( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -2431,7 +2420,7 @@ static void ggml_mul_mat_q2_K_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_ql_q2_K_acc_ct1( sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh); sycl::local_accessor tile_x_dm_q2_K_acc_ct1( @@ -2444,9 +2433,8 @@ static void ggml_mul_mat_q2_K_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q2_K( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -2516,7 +2504,7 @@ static void ggml_mul_mat_q3_K_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_ql_q3_K_acc_ct1( sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh); sycl::local_accessor tile_x_dm_q3_K_acc_ct1( @@ -2531,9 +2519,8 @@ static void ggml_mul_mat_q3_K_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q3_K( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -2557,7 +2544,7 @@ static void ggml_mul_mat_q3_K_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_ql_q3_K_acc_ct1( sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh); sycl::local_accessor tile_x_dm_q3_K_acc_ct1( @@ -2572,9 +2559,8 @@ static void ggml_mul_mat_q3_K_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q3_K( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -2644,7 +2630,7 @@ static void ggml_mul_mat_q4_K_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_ql_q4_K_acc_ct1( sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh); sycl::local_accessor tile_x_dm_q4_K_acc_ct1( @@ -2657,9 +2643,8 @@ static void ggml_mul_mat_q4_K_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q4_K( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -2682,7 +2667,7 @@ static void ggml_mul_mat_q4_K_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_ql_q4_K_acc_ct1( sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh); sycl::local_accessor tile_x_dm_q4_K_acc_ct1( @@ -2695,9 +2680,8 @@ static void ggml_mul_mat_q4_K_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q4_K( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -2765,7 +2749,7 @@ static void ggml_mul_mat_q5_K_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_ql_q5_K_acc_ct1( sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh); sycl::local_accessor tile_x_dm_q5_K_acc_ct1( @@ -2778,9 +2762,8 @@ static void ggml_mul_mat_q5_K_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q5_K( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -2803,7 +2786,7 @@ static void ggml_mul_mat_q5_K_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_ql_q5_K_acc_ct1( sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh); sycl::local_accessor tile_x_dm_q5_K_acc_ct1( @@ -2816,9 +2799,8 @@ static void ggml_mul_mat_q5_K_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q5_K( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -2886,7 +2868,7 @@ static void ggml_mul_mat_q6_K_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_ql_acc_ct1( sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh); sycl::local_accessor tile_x_dm_acc_ct1( @@ -2899,9 +2881,8 @@ static void ggml_mul_mat_q6_K_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q6_K( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, @@ -2924,7 +2905,7 @@ static void ggml_mul_mat_q6_K_q8_1_sycl(const void *vx, const void *vy, dpct::has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16}); - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor tile_x_ql_acc_ct1( sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh); sycl::local_accessor tile_x_dm_acc_ct1( @@ -2937,9 +2918,8 @@ static void ggml_mul_mat_q6_K_q8_1_sycl(const void *vx, const void *vy, sycl::local_accessor tile_y_ds_acc_ct1( sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { mul_mat_q6_K( vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, item_ct1, diff --git a/ggml/src/ggml-sycl/mmvq.cpp b/ggml/src/ggml-sycl/mmvq.cpp index 5b7f064074937..c21929d51e94c 100644 --- a/ggml/src/ggml-sycl/mmvq.cpp +++ b/ggml/src/ggml-sycl/mmvq.cpp @@ -544,12 +544,12 @@ static void reorder_mul_mat_vec_q4_0_q8_1_sycl(const void * vx, const void * vy, const sycl::range<3> global_size(1, GGML_SYCL_MMV_Y, (block_num_y * WARP_SIZE)); const sycl::range<3> workgroup_size(1, GGML_SYCL_MMV_Y, num_subgroups * WARP_SIZE); - stream->submit([&](sycl::handler & cgh) { - cgh.parallel_for(sycl::nd_range<3>(global_size, workgroup_size), - [=](sycl::nd_item<3> nd_item) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q_reorder>(vx, vy, dst, ncols, nrows, - nd_item); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(global_size, workgroup_size), + [=](sycl::nd_item<3> nd_item) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q_reorder>(vx, vy, dst, ncols, nrows, + nd_item); + }); }); } @@ -561,12 +561,12 @@ static void mul_mat_vec_q4_0_q8_1_sycl(const void * vx, const void * vy, float * const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - stream->submit([&](sycl::handler & cgh) { - cgh.parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q( + vx, vy, dst, ncols, nrows, item_ct1); + }); }); } } @@ -580,17 +580,12 @@ static void mul_mat_vec_q4_1_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - - stream->submit([&](sycl::handler &cgh) { - - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q( + vx, vy, dst, ncols, nrows, item_ct1); + }); }); } } @@ -604,17 +599,12 @@ static void mul_mat_vec_q5_0_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - - stream->submit([&](sycl::handler &cgh) { - - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q( + vx, vy, dst, ncols, nrows, item_ct1); + }); }); } } @@ -628,17 +618,12 @@ static void mul_mat_vec_q5_1_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - - stream->submit([&](sycl::handler &cgh) { - - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q( + vx, vy, dst, ncols, nrows, item_ct1); + }); }); } } @@ -652,17 +637,12 @@ static void mul_mat_vec_q8_0_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - - stream->submit([&](sycl::handler &cgh) { - - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q( + vx, vy, dst, ncols, nrows, item_ct1); + }); }); } } @@ -676,17 +656,12 @@ static void mul_mat_vec_q2_K_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - - stream->submit([&](sycl::handler &cgh) { - - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q( + vx, vy, dst, ncols, nrows, item_ct1); + }); }); } } @@ -700,17 +675,12 @@ static void mul_mat_vec_q3_K_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - - stream->submit([&](sycl::handler &cgh) { - - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q( + vx, vy, dst, ncols, nrows, item_ct1); + }); }); } } @@ -724,17 +694,12 @@ static void mul_mat_vec_q4_K_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - - stream->submit([&](sycl::handler &cgh) { - - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q( + vx, vy, dst, ncols, nrows, item_ct1); + }); }); } } @@ -750,12 +715,12 @@ static void reorder_mul_mat_vec_q4_k_q8_1_sycl(const void * vx, const void * vy, const sycl::range<3> global_size(1, GGML_SYCL_MMV_Y, block_num_y * WARP_SIZE); const sycl::range<3> workgroup_size(1, GGML_SYCL_MMV_Y, num_subgroups * WARP_SIZE); - stream->submit([&](sycl::handler & cgh) { - cgh.parallel_for(sycl::nd_range<3>(global_size, workgroup_size), - [=](sycl::nd_item<3> nd_item) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q_reorder>(vx, vy, dst, ncols, - nrows, nd_item); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(global_size, workgroup_size), + [=](sycl::nd_item<3> nd_item) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q_reorder>(vx, vy, dst, ncols, nrows, + nd_item); + }); }); } @@ -769,17 +734,12 @@ static void mul_mat_vec_q5_K_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - - stream->submit([&](sycl::handler &cgh) { - - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q( + vx, vy, dst, ncols, nrows, item_ct1); + }); }); } } @@ -794,12 +754,12 @@ static void reorder_mul_mat_vec_q6_k_q8_1_sycl(const void * vx, const void * vy, const sycl::range<3> global_size(1, GGML_SYCL_MMV_Y, block_num_y * WARP_SIZE); const sycl::range<3> workgroup_size(1, GGML_SYCL_MMV_Y, num_subgroups * WARP_SIZE); - stream->submit([&](sycl::handler & cgh) { - cgh.parallel_for(sycl::nd_range<3>(global_size, workgroup_size), - [=](sycl::nd_item<3> nd_item) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q_reorder>(vx, vy, dst, ncols, nrows, - nd_item); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(global_size, workgroup_size), + [=](sycl::nd_item<3> nd_item) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q_reorder>(vx, vy, dst, ncols, nrows, + nd_item); + }); }); } static void mul_mat_vec_q6_K_q8_1_sycl(const void *vx, const void *vy, @@ -811,17 +771,12 @@ static void mul_mat_vec_q6_K_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - - stream->submit([&](sycl::handler &cgh) { - - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q( + vx, vy, dst, ncols, nrows, item_ct1); + }); }); } } @@ -836,14 +791,12 @@ static void mul_mat_vec_iq2_xxs_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - stream->submit([&](sycl::handler &cgh) { - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q_iq2_xxs_q8_1( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q_iq2_xxs_q8_1(vx, vy, dst, ncols, + nrows, item_ct1); + }); }); } } @@ -857,14 +810,12 @@ static void mul_mat_vec_iq2_xs_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - stream->submit([&](sycl::handler & cgh) { - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q_iq2_xs_q8_1( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q_iq2_xs_q8_1(vx, vy, dst, ncols, + nrows, item_ct1); + }); }); } } @@ -878,15 +829,12 @@ static void mul_mat_vec_iq2_s_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - - stream->submit([&](sycl::handler &cgh) { - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q_iq2_s_q8_1( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q_iq2_s_q8_1(vx, vy, dst, ncols, nrows, + item_ct1); + }); }); } } @@ -900,15 +848,12 @@ static void mul_mat_vec_iq3_xxs_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - - stream->submit([&](sycl::handler &cgh) { - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q_iq3_xxs_q8_1( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q_iq3_xxs_q8_1(vx, vy, dst, ncols, + nrows, item_ct1); + }); }); } } @@ -922,15 +867,12 @@ static void mul_mat_vec_iq3_s_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - - stream->submit([&](sycl::handler &cgh) { - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q_iq3_s_q8_1( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q_iq3_s_q8_1(vx, vy, dst, ncols, nrows, + item_ct1); + }); }); } } @@ -944,15 +886,12 @@ static void mul_mat_vec_iq1_s_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - - stream->submit([&](sycl::handler &cgh) { - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q_iq1_s_q8_1( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q_iq1_s_q8_1(vx, vy, dst, ncols, nrows, + item_ct1); + }); }); } } @@ -966,14 +905,12 @@ static void mul_mat_vec_iq1_m_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - stream->submit([&](sycl::handler &cgh) { - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q_iq1_m_q8_1( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q_iq1_m_q8_1(vx, vy, dst, ncols, nrows, + item_ct1); + }); }); } } @@ -987,15 +924,12 @@ static void mul_mat_vec_iq4_nl_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - - stream->submit([&](sycl::handler &cgh) { - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q_iq4_nl_q8_1( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q_iq4_nl_q8_1(vx, vy, dst, ncols, nrows, + item_ct1); + }); }); } } @@ -1009,15 +943,12 @@ static void mul_mat_vec_iq4_xs_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE); { - - stream->submit([&](sycl::handler &cgh) { - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - mul_mat_vec_q_iq4_xs_q8_1( - vx, vy, dst, ncols, nrows, item_ct1); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + mul_mat_vec_q_iq4_xs_q8_1(vx, vy, dst, ncols, + nrows, item_ct1); + }); }); } } diff --git a/ggml/src/ggml-sycl/norm.cpp b/ggml/src/ggml-sycl/norm.cpp index 4ec1416849c7e..79d846b41a15d 100644 --- a/ggml/src/ggml-sycl/norm.cpp +++ b/ggml/src/ggml-sycl/norm.cpp @@ -254,14 +254,13 @@ static void norm_f32_sycl(const float * x, float * dst, const int ncols, const i GGML_ASSERT(ncols % WARP_SIZE == 0); if (ncols < 1024) { const sycl::range<3> block_dims(1, 1, WARP_SIZE); - stream->submit([&](sycl::handler& cgh) { - cgh.parallel_for( - sycl::nd_range<3>(global_dims * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - norm_f32(x, dst, ncols, stride_row, stride_channel, stride_sample, eps, item_ct1, nullptr, WARP_SIZE); - }); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(global_dims * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + norm_f32(x, dst, ncols, stride_row, stride_channel, stride_sample, eps, item_ct1, + nullptr, WARP_SIZE); + }); + }); } else { const int work_group_size = ggml_sycl_info().max_work_group_sizes[device]; @@ -272,16 +271,15 @@ static void norm_f32_sycl(const float * x, float * dst, const int ncols, const i the limit. To get the device limit, query info::device::max_work_group_size. Adjust the work-group size if needed. */ - stream->submit([&](sycl::handler& cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor s_sum_acc_ct1( sycl::range<1>(work_group_size / WARP_SIZE), cgh); - cgh.parallel_for( - sycl::nd_range<3>(global_dims * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - norm_f32(x, dst, ncols, stride_row, stride_channel, stride_sample, eps, item_ct1, get_pointer(s_sum_acc_ct1), work_group_size); - }); - }); + sycl_parallel_for(cgh, sycl::nd_range<3>(global_dims * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + norm_f32(x, dst, ncols, stride_row, stride_channel, stride_sample, eps, item_ct1, + get_pointer(s_sum_acc_ct1), work_group_size); + }); + }); } } @@ -290,18 +288,14 @@ static void group_norm_f32_sycl(const float* x, float* dst, const int ne_elements, queue_ptr stream, int device) { if (group_size < 1024) { const sycl::range<3> block_dims(1, 1, WARP_SIZE); - stream->submit([&](sycl::handler& cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { const float eps_ct4 = eps; - cgh.parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_groups) * block_dims, - block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - group_norm_f32( - x, dst, group_size, ne_elements, eps_ct4, item_ct1, - nullptr, WARP_SIZE); - }); - }); + sycl_parallel_for(cgh, sycl::nd_range<3>(sycl::range<3>(1, 1, num_groups) * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + group_norm_f32(x, dst, group_size, ne_elements, eps_ct4, item_ct1, nullptr, + WARP_SIZE); + }); + }); } else { const int work_group_size = ggml_sycl_info().max_work_group_sizes[device]; @@ -313,22 +307,18 @@ static void group_norm_f32_sycl(const float* x, float* dst, info::device::max_work_group_size. Adjust the work-group size if needed. */ - stream->submit([&](sycl::handler& cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor s_sum_acc_ct1(sycl::range<1>(work_group_size / WARP_SIZE), cgh); const float eps_ct4 = eps; - cgh.parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, num_groups) * block_dims, - block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - group_norm_f32(x, dst, group_size, ne_elements, - eps_ct4, item_ct1, - get_pointer(s_sum_acc_ct1), work_group_size); - }); - }); + sycl_parallel_for(cgh, sycl::nd_range<3>(sycl::range<3>(1, 1, num_groups) * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + group_norm_f32(x, dst, group_size, ne_elements, eps_ct4, item_ct1, + get_pointer(s_sum_acc_ct1), work_group_size); + }); + }); } } @@ -340,14 +330,13 @@ static void rms_norm_f32_sycl(const float* x, float* dst, const int ncols, const const sycl::range<3> global_dims(nsamples, nchannels, nrows); if (ncols < 1024) { const sycl::range<3> block_dims(1, 1, WARP_SIZE); - stream->submit([&](sycl::handler& cgh) { - cgh.parallel_for( - sycl::nd_range<3>(global_dims * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - rms_norm_f32(x, dst, ncols, stride_row, stride_channel, stride_sample, eps, item_ct1, nullptr, WARP_SIZE); - }); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(global_dims * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + rms_norm_f32(x, dst, ncols, stride_row, stride_channel, stride_sample, eps, item_ct1, + nullptr, WARP_SIZE); + }); + }); } else { const int work_group_size = ggml_sycl_info().max_work_group_sizes[device]; @@ -358,16 +347,15 @@ static void rms_norm_f32_sycl(const float* x, float* dst, const int ncols, const the limit. To get the device limit, query info::device::max_work_group_size. Adjust the work-group size if needed. */ - stream->submit([&](sycl::handler& cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor s_sum_acc_ct1(sycl::range<1>(work_group_size / WARP_SIZE), cgh); - cgh.parallel_for( - sycl::nd_range<3>(global_dims * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - rms_norm_f32(x, dst, ncols, stride_row, stride_channel, stride_sample, eps, item_ct1, get_pointer(s_sum_acc_ct1), work_group_size); - }); - }); + sycl_parallel_for(cgh, sycl::nd_range<3>(global_dims * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + rms_norm_f32(x, dst, ncols, stride_row, stride_channel, stride_sample, eps, item_ct1, + get_pointer(s_sum_acc_ct1), work_group_size); + }); + }); } } @@ -378,16 +366,12 @@ static void l2_norm_f32_sycl(const float* x, float* dst, const int ncols, // printf("%s ncols=%d, nrows=%d, WARP_SIZE=%d\n", __func__, ncols, nrows, WARP_SIZE); if (ncols < 1024) { const sycl::range<3> block_dims(1, 1, WARP_SIZE); - stream->submit([&](sycl::handler& cgh) { - cgh.parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, - block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - l2_norm_f32(x, dst, ncols, eps, item_ct1, - nullptr, WARP_SIZE); - }); - }); + sycl_launch(stream, [&](sycl::handler & cgh) { + sycl_parallel_for(cgh, sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + l2_norm_f32(x, dst, ncols, eps, item_ct1, nullptr, WARP_SIZE); + }); + }); } else { const int work_group_size = ggml_sycl_info().max_work_group_sizes[device]; @@ -398,18 +382,15 @@ static void l2_norm_f32_sycl(const float* x, float* dst, const int ncols, the limit. To get the device limit, query info::device::max_work_group_size. Adjust the work-group size if needed. */ - stream->submit([&](sycl::handler& cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor s_sum_acc_ct1(sycl::range<1>(work_group_size / WARP_SIZE), cgh); - cgh.parallel_for( - sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, - block_dims), - [=](sycl::nd_item<3> item_ct1) - [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - l2_norm_f32(x, dst, ncols, eps, item_ct1, - get_pointer(s_sum_acc_ct1), work_group_size); - }); - }); + sycl_parallel_for(cgh, sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { + l2_norm_f32(x, dst, ncols, eps, item_ct1, get_pointer(s_sum_acc_ct1), + work_group_size); + }); + }); } } diff --git a/ggml/src/ggml-sycl/rope.cpp b/ggml/src/ggml-sycl/rope.cpp index 44473e1e5580c..e44c6b6ef8f42 100644 --- a/ggml/src/ggml-sycl/rope.cpp +++ b/ggml/src/ggml-sycl/rope.cpp @@ -235,20 +235,22 @@ static void rope_norm_sycl(const T * x, T * dst, const int ne0, const int ne1, c the limit. To get the device limit, query info::device::max_work_group_size. Adjust the work-group size if needed. */ - stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { - rope_norm(x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, - theta_scale, freq_factors, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) { + rope_norm(x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, + attn_factor, corr_dims, theta_scale, freq_factors, item_ct1); + }); } else { /* DPCT1049:41: The work-group size passed to the SYCL kernel may exceed the limit. To get the device limit, query info::device::max_work_group_size. Adjust the work-group size if needed. */ - stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { - rope_norm(x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, - theta_scale, freq_factors, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) { + rope_norm(x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, + attn_factor, corr_dims, theta_scale, freq_factors, item_ct1); + }); } } @@ -267,15 +269,17 @@ static void rope_neox_sycl(const T * x, T * dst, const int ne0, const int ne1, c dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); if (freq_factors == nullptr) { - stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { - rope_neox(x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, - theta_scale, freq_factors, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) { + rope_neox(x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, + attn_factor, corr_dims, theta_scale, freq_factors, item_ct1); + }); } else { - stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { - rope_neox(x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, - theta_scale, freq_factors, item_ct1); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(block_nums * block_dims, block_dims), + [=](sycl::nd_item<3> item_ct1) { + rope_neox(x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, + attn_factor, corr_dims, theta_scale, freq_factors, item_ct1); + }); } } @@ -298,12 +302,12 @@ static void rope_multi_sycl(const T * x, T * dst, const int ne0, const int ne1, } // launch kernel if (freq_factors == nullptr) { - stream->parallel_for(nd_range, [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for(stream, nd_range, [=](sycl::nd_item<3> item_ct1) { rope_multi(x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, theta_scale, freq_factors, sections, item_ct1); }); } else { - stream->parallel_for(nd_range, [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for(stream, nd_range, [=](sycl::nd_item<3> item_ct1) { rope_multi(x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, theta_scale, freq_factors, sections, item_ct1); }); @@ -333,12 +337,12 @@ static void rope_vision_sycl(const T * x, T * dst, const int ne0, const int ne1, } // launch kernel if (freq_factors == nullptr) { - stream->parallel_for(nd_range, [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for(stream, nd_range, [=](sycl::nd_item<3> item_ct1) { rope_vision(x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, theta_scale, freq_factors, sections, item_ct1); }); } else { - stream->parallel_for(nd_range, [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for(stream, nd_range, [=](sycl::nd_item<3> item_ct1) { rope_vision(x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, theta_scale, freq_factors, sections, item_ct1); }); diff --git a/ggml/src/ggml-sycl/softmax.cpp b/ggml/src/ggml-sycl/softmax.cpp index 52fcf4b3dbd24..7b60c292e0c92 100644 --- a/ggml/src/ggml-sycl/softmax.cpp +++ b/ggml/src/ggml-sycl/softmax.cpp @@ -127,11 +127,11 @@ static void soft_max_f32_submitter(const float * x, const T * mask, float * dst, const int nrows_y, const float scale, const float max_bias, const float m0, const float m1, uint32_t n_head_log2, sycl::range<3> block_nums, sycl::range<3> block_dims, const size_t n_local_scratch, queue_ptr stream) { - stream->submit([&](sycl::handler &cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor local_buf_acc(n_local_scratch, cgh); - cgh.parallel_for( - sycl::nd_range<3>(block_nums * block_dims, block_dims), + sycl_parallel_for( + cgh, sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { soft_max_f32(x, mask, dst, ncols_par, nrows_y, scale, max_bias, m0, diff --git a/ggml/src/ggml-sycl/tsembd.cpp b/ggml/src/ggml-sycl/tsembd.cpp index f6ca626ea7a53..721c8fa6fa27e 100644 --- a/ggml/src/ggml-sycl/tsembd.cpp +++ b/ggml/src/ggml-sycl/tsembd.cpp @@ -45,14 +45,9 @@ static void timestep_embedding_f32_sycl( int num_blocks = (half_ceil + SYCL_TIMESTEP_EMBEDDING_BLOCK_SIZE - 1) / SYCL_TIMESTEP_EMBEDDING_BLOCK_SIZE; sycl::range<3> block_dims(1, 1, SYCL_TIMESTEP_EMBEDDING_BLOCK_SIZE); sycl::range<3> gridDim(1, ne00, num_blocks); - stream->parallel_for( - sycl::nd_range<3>( - gridDim * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { - timestep_embedding_f32( - x, dst, nb1, dim, max_period, item_ct1 - ); - }); + sycl_parallel_for(stream, sycl::nd_range<3>(gridDim * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { + timestep_embedding_f32(x, dst, nb1, dim, max_period, item_ct1); + }); } void ggml_sycl_op_timestep_embedding(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { diff --git a/ggml/src/ggml-sycl/wkv.cpp b/ggml/src/ggml-sycl/wkv.cpp index c10e2f7645e89..3ed5bbf355ad9 100644 --- a/ggml/src/ggml-sycl/wkv.cpp +++ b/ggml/src/ggml-sycl/wkv.cpp @@ -207,12 +207,11 @@ void ggml_sycl_op_rwkv_wkv6(ggml_backend_sycl_context& ctx, ggml_tensor* dst) { // Submit kernel if (C / H == WKV_BLOCK_SIZE) { - stream->submit([&](sycl::handler& cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor shared_mem_acc(shared_mem_size, cgh); - cgh.parallel_for( - sycl::nd_range<3>(grid_dims * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(grid_dims * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { rwkv_wkv6_f32_kernel( B, T, C, H, k_d, v_d, r_d, tf_d, td_d, s_d, dst_d, item_ct1, (float*)shared_mem_acc.get_multi_ptr().get() @@ -220,12 +219,11 @@ void ggml_sycl_op_rwkv_wkv6(ggml_backend_sycl_context& ctx, ggml_tensor* dst) { }); }); } else { - stream->submit([&](sycl::handler& cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor shared_mem_acc(shared_mem_size, cgh); - cgh.parallel_for( - sycl::nd_range<3>(grid_dims * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(grid_dims * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { rwkv_wkv6_f32_kernel( B, T, C, H, k_d, v_d, r_d, tf_d, td_d, s_d, dst_d, item_ct1, (float*)shared_mem_acc.get_multi_ptr().get() @@ -264,12 +262,11 @@ void ggml_sycl_op_rwkv_wkv7(ggml_backend_sycl_context& ctx, ggml_tensor* dst) { // Submit kernel if (C / H == WKV_BLOCK_SIZE) { - stream->submit([&](sycl::handler& cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor shared_mem_acc(shared_mem_size, cgh); - cgh.parallel_for( - sycl::nd_range<3>(grid_dims * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(grid_dims * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { rwkv_wkv7_f32_kernel( B, T, C, H, r_d, w_d, k_d, v_d, a_d, b_d, s_d, dst_d, item_ct1, (float*)shared_mem_acc.get_multi_ptr().get() @@ -277,12 +274,11 @@ void ggml_sycl_op_rwkv_wkv7(ggml_backend_sycl_context& ctx, ggml_tensor* dst) { }); }); } else { - stream->submit([&](sycl::handler& cgh) { + sycl_launch(stream, [&](sycl::handler & cgh) { sycl::local_accessor shared_mem_acc(shared_mem_size, cgh); - cgh.parallel_for( - sycl::nd_range<3>(grid_dims * block_dims, block_dims), - [=](sycl::nd_item<3> item_ct1) { + sycl_parallel_for( + cgh, sycl::nd_range<3>(grid_dims * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) { rwkv_wkv7_f32_kernel( B, T, C, H, r_d, w_d, k_d, v_d, a_d, b_d, s_d, dst_d, item_ct1, (float*)shared_mem_acc.get_multi_ptr().get() From 4bf6bc5988364ecd0ea86372d1f546487ff1773a Mon Sep 17 00:00:00 2001 From: Ruikai Peng Date: Fri, 20 Jun 2025 22:13:06 +0800 Subject: [PATCH 115/192] vocab : prevent tokenizer overflow (#14301) * vocab : prevent stack overflow in tokenize * vocab : return error instead of aborting on oversized token count * vocab : INT32_MIN from llama_tokenize on overflow --- common/common.cpp | 3 +++ include/llama.h | 1 + src/llama-vocab.cpp | 5 +++++ 3 files changed, 9 insertions(+) diff --git a/common/common.cpp b/common/common.cpp index c2c94e7ae6c08..e4e71ad13fb59 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1290,6 +1290,9 @@ std::vector common_tokenize( int n_tokens = text.length() + 2 * add_special; std::vector result(n_tokens); n_tokens = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); + if (n_tokens == std::numeric_limits::min()) { + throw std::runtime_error("Tokenization failed: input text too large, tokenization result exceeds int32_t limit"); + } if (n_tokens < 0) { result.resize(-n_tokens); int check = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); diff --git a/include/llama.h b/include/llama.h index 3475d596502c6..b04720bee59ef 100644 --- a/include/llama.h +++ b/include/llama.h @@ -1088,6 +1088,7 @@ extern "C" { /// @param tokens The tokens pointer must be large enough to hold the resulting tokens. /// @return Returns the number of tokens on success, no more than n_tokens_max /// @return Returns a negative number on failure - the number of tokens that would have been returned + /// @return Returns INT32_MIN on overflow (e.g., tokenization result size exceeds int32_t limit) /// @param add_special Allow to add BOS and EOS tokens if model is configured to do so. /// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated /// as plaintext. Does not insert a leading space. diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index 4ab120d9ba818..4aaf4c8250ce5 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -3074,6 +3074,11 @@ int32_t llama_vocab::tokenize( bool add_special, bool parse_special) const { auto res = tokenize(std::string(text, text_len), add_special, parse_special); + if (res.size() >= static_cast(std::numeric_limits::max())) { + LLAMA_LOG_ERROR("%s: tokenization result size %zu exceeds int32_t limit\n", __func__, res.size()); + return std::numeric_limits::min(); + } + if (n_tokens_max < (int) res.size()) { // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__); return -((int) res.size()); From e6d6a55904b32301404793405d12592b066ec771 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Fri, 20 Jun 2025 16:37:44 +0200 Subject: [PATCH 116/192] lint : remove trailing whitepace (#14304) --- src/llama-vocab.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index 4aaf4c8250ce5..5c9eb87566dde 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -3078,7 +3078,7 @@ int32_t llama_vocab::tokenize( LLAMA_LOG_ERROR("%s: tokenization result size %zu exceeds int32_t limit\n", __func__, res.size()); return std::numeric_limits::min(); } - + if (n_tokens_max < (int) res.size()) { // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__); return -((int) res.size()); From 7cc2c0b773f7cb950aad3ffebb41310c136f1bcb Mon Sep 17 00:00:00 2001 From: Aman Gupta Date: Fri, 20 Jun 2025 22:48:24 +0800 Subject: [PATCH 117/192] CUDA: add conv_2d_transpose (#14287) * CUDA: add conv_2d_transpose * remove direct include of cuda_fp16 * Review: add brackets for readability, remove ggml_set_param and add asserts --- ggml/src/ggml-cuda/conv2d-transpose.cu | 91 +++++++++++++++++++++++++ ggml/src/ggml-cuda/conv2d-transpose.cuh | 4 ++ ggml/src/ggml-cuda/ggml-cuda.cu | 5 ++ tests/test-backend-ops.cpp | 34 +++++++++ 4 files changed, 134 insertions(+) create mode 100644 ggml/src/ggml-cuda/conv2d-transpose.cu create mode 100644 ggml/src/ggml-cuda/conv2d-transpose.cuh diff --git a/ggml/src/ggml-cuda/conv2d-transpose.cu b/ggml/src/ggml-cuda/conv2d-transpose.cu new file mode 100644 index 0000000000000..03224e404d32d --- /dev/null +++ b/ggml/src/ggml-cuda/conv2d-transpose.cu @@ -0,0 +1,91 @@ +#include + +#include "conv2d-transpose.cuh" +#include "ggml.h" + +__global__ void conv2d_transpose_kernel(const float * __restrict__ input, const half * __restrict__ kernel, + float * __restrict__ output, const int in_w, const int in_h, const int out_w, + const int out_h, const int kernel_w, const int kernel_h, const int stride, + const int c_in, const int c_out, const int batches) { + const int global_idx = blockIdx.x * blockDim.x + threadIdx.x; + + const int total_elements = out_w * out_h * c_out * batches; + + if (global_idx >= total_elements) { + return; + } + + const int out_x_idx = global_idx % out_w; + const int out_y_idx = (global_idx / out_w) % out_h; + const int c_idx = (global_idx / (out_w * out_h)) % c_out; + const int n_idx = global_idx / (out_w * out_h * c_out); + + float accumulator = 0; + // For each output idx, find the inputs that contribute to it by checking stride alignment and bounds + + for (int c_in_idx = 0; c_in_idx < c_in; c_in_idx++) { + for (int kh = 0; kh < kernel_h; ++kh) { + int in_y = out_y_idx - kh; + if (in_y < 0 || in_y % stride) continue; + in_y /= stride; + if (in_y >= in_h) continue; + + for (int kw = 0; kw < kernel_w; ++kw) { + int in_x = out_x_idx - kw; + if (in_x < 0 || in_x % stride) continue; + in_x /= stride; + if (in_x >= in_w) continue; + + const int input_idx = (in_w * in_h * c_in) * n_idx + (in_w * in_h) * c_in_idx + (in_w) *in_y + in_x; + const int kernel_idx = + (kernel_h * kernel_w * c_out) * c_in_idx + (kernel_h * kernel_w) * c_idx + (kernel_w) *kh + kw; + + float input_val = input[input_idx]; + half kern_val = kernel[kernel_idx]; + + accumulator += input_val * (float) kern_val; + } + } + } + + output[(out_w * out_h * c_out) * n_idx + (out_w * out_h) * c_idx + (out_w) *out_y_idx + out_x_idx] = accumulator; +} + +//input is (W, H, C_in, N), Kernel is (W, H, C_out, C_in) +void ggml_cuda_conv_2d_transpose_p0(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * kernel = dst->src[0]; + const ggml_tensor * input = dst->src[1]; + + GGML_ASSERT(kernel->type == GGML_TYPE_F16 && input->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32); + + const float * input_data = (const float *) input->data; + float * output_data = (float *) dst->data; + const half * kernel_data = (const half *) kernel->data; + + const int input_w = input->ne[0]; + const int input_h = input->ne[1]; + const int output_w = dst->ne[0]; + const int output_h = dst->ne[1]; + const int channels_in = input->ne[2]; + const int channels_out = kernel->ne[2]; + const int kernel_w = kernel->ne[0]; + const int kernel_h = kernel->ne[1]; + const int stride = dst->op_params[0]; + const int batches = input->ne[3]; + + GGML_ASSERT(channels_in == kernel->ne[3]); + GGML_ASSERT(stride > 0); + + cudaStream_t st = ctx.stream(); + + GGML_ASSERT(ggml_is_contiguous(input)); + GGML_ASSERT(ggml_is_contiguous(kernel)); + GGML_ASSERT(ggml_is_contiguous(dst)); + + const int total = (output_w * output_h * channels_out * batches); + const int blocks = (total + CUDA_CONV2D_TRANSPOSE_BLOCK_SIZE - 1) / CUDA_CONV2D_TRANSPOSE_BLOCK_SIZE; + + conv2d_transpose_kernel<<>>( + input_data, kernel_data, output_data, input_w, input_h, output_w, output_h, kernel_w, kernel_h, stride, + channels_in, channels_out, batches); +} diff --git a/ggml/src/ggml-cuda/conv2d-transpose.cuh b/ggml/src/ggml-cuda/conv2d-transpose.cuh new file mode 100644 index 0000000000000..c9430b2485021 --- /dev/null +++ b/ggml/src/ggml-cuda/conv2d-transpose.cuh @@ -0,0 +1,4 @@ +#include "common.cuh" + +#define CUDA_CONV2D_TRANSPOSE_BLOCK_SIZE 256 +void ggml_cuda_conv_2d_transpose_p0(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 530f541f97d62..5bab92e347a7e 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -12,6 +12,7 @@ #include "ggml-cuda/concat.cuh" #include "ggml-cuda/conv-transpose-1d.cuh" #include "ggml-cuda/conv2d-dw.cuh" +#include "ggml-cuda/conv2d-transpose.cuh" #include "ggml-cuda/convert.cuh" #include "ggml-cuda/count-equal.cuh" #include "ggml-cuda/cpy.cuh" @@ -2341,6 +2342,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg case GGML_OP_CONV_2D_DW: ggml_cuda_op_conv2d_dw(ctx, dst); break; + case GGML_OP_CONV_TRANSPOSE_2D: + ggml_cuda_conv_2d_transpose_p0(ctx, dst); + break; case GGML_OP_CONV_TRANSPOSE_1D: ggml_cuda_op_conv_transpose_1d(ctx,dst); break; @@ -3252,6 +3256,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g } case GGML_OP_IM2COL: case GGML_OP_CONV_2D_DW: + case GGML_OP_CONV_TRANSPOSE_2D: case GGML_OP_POOL_2D: case GGML_OP_SUM: case GGML_OP_SUM_ROWS: diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 509a4b35f57cb..772bee346f000 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -2725,6 +2725,35 @@ struct test_conv_transpose_1d : public test_case { } }; +// GGML_OP_CONV_TRANSPOSE_2D +struct test_conv_transpose_2d : public test_case { + const std::array ne_input; + const std::array ne_kernel; + const int stride; + + std::string vars() override { + return VARS_TO_STR3(ne_input, ne_kernel, stride); + } + + test_conv_transpose_2d(std::array ne_input = {10, 10, 3, 1}, // [input_width, input_height, input_channels, 1] + std::array ne_kernel = {3, 3, 3, 1}, // [kernel_width, kernel_height, input_channels, 1] + int stride = 1) + : ne_input(ne_input), ne_kernel(ne_kernel), stride(stride){} + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data()); + ggml_set_name(input, "input"); + + ggml_tensor * kernel = ggml_new_tensor(ctx, GGML_TYPE_F16, 4, ne_kernel.data()); + ggml_set_name(kernel, "kernel"); + + ggml_tensor * out = ggml_conv_transpose_2d_p0(ctx, kernel, input, stride); + ggml_set_name(out, "out"); + + return out; + } +}; + // GGML_OP_IM2COL struct test_im2col : public test_case { const ggml_type type_input; @@ -4050,6 +4079,9 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,1,2,1}, 1, 0, 1)); test_cases.emplace_back(new test_conv_transpose_1d({2,1,1,1}, {3,1,1,1}, 1, 0, 1)); + test_cases.emplace_back(new test_conv_transpose_2d({3, 2, 3, 1}, {2, 2, 1, 3}, 1)); + test_cases.emplace_back(new test_conv_transpose_2d({10, 10, 9, 1}, {3, 3, 1, 9}, 2)); + test_cases.emplace_back(new test_count_equal(GGML_TYPE_F32, {4, 500, 1, 1})); test_cases.emplace_back(new test_count_equal(GGML_TYPE_F32, {4, 5000, 1, 1})); @@ -4618,6 +4650,8 @@ static std::vector> make_test_cases_perf() { test_cases.emplace_back(new test_conv_2d_dw({512, 512, 256, 1}, {3, 3, 1, 256}, 1, 1, 1, false)); test_cases.emplace_back(new test_conv_2d_dw({512, 512, 256, 1}, {3, 3, 1, 256}, 1, 1, 1, true)); + test_cases.emplace_back(new test_conv_transpose_2d({256, 256, 256, 1}, {3, 3, 16, 256}, 1)); + return test_cases; } From cf7d3eaa2d50ac5b5ee6b5363e9243a523b3f509 Mon Sep 17 00:00:00 2001 From: David Chiu Date: Sat, 21 Jun 2025 01:43:35 +0800 Subject: [PATCH 118/192] docs : fix the link to llama.h (#14293) --- docs/build.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/build.md b/docs/build.md index 680b0d8398741..20a6f606eb779 100644 --- a/docs/build.md +++ b/docs/build.md @@ -1,6 +1,6 @@ # Build llama.cpp locally -The main product of this project is the `llama` library. Its C-style interface can be found in [include/llama.h](include/llama.h). +The main product of this project is the `llama` library. Its C-style interface can be found in [include/llama.h](../include/llama.h). The project also includes many example programs and tools using the `llama` library. The examples range from simple, minimal code snippets to sophisticated sub-projects such as an OpenAI-compatible HTTP server. From bbbf0609e807e9cfa5b31a06c0ee96d2e918d16c Mon Sep 17 00:00:00 2001 From: Acly Date: Wed, 18 Jun 2025 13:34:50 +0200 Subject: [PATCH 119/192] Add `ggml_roll` (ggml/1274) * ggml : add ggml_roll * use set/get_op_params & std::min --- ggml/include/ggml.h | 12 +++++++ ggml/src/ggml-cpu/ggml-cpu.c | 5 +++ ggml/src/ggml-cpu/ops.cpp | 67 ++++++++++++++++++++++++++++++++++++ ggml/src/ggml-cpu/ops.h | 1 + ggml/src/ggml.c | 34 ++++++++++++++++-- 5 files changed, 117 insertions(+), 2 deletions(-) diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 1a57f1cd75a31..9c4e24023b5ad 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -489,6 +489,7 @@ extern "C" { GGML_OP_UPSCALE, // nearest interpolate GGML_OP_PAD, GGML_OP_PAD_REFLECT_1D, + GGML_OP_ROLL, GGML_OP_ARANGE, GGML_OP_TIMESTEP_EMBEDDING, GGML_OP_ARGSORT, @@ -1801,6 +1802,17 @@ extern "C" { int p0, int p1); + // Move tensor elements by an offset given for each dimension. Elements that + // are shifted beyond the last position are wrapped around to the beginning. + GGML_API struct ggml_tensor * ggml_roll( + struct ggml_context * ctx, + struct ggml_tensor * a, + int shift0, + int shift1, + int shift2, + int shift3); + + // Ref: https://github.com/CompVis/stable-diffusion/blob/main/ldm/modules/diffusionmodules/util.py#L151 // timesteps: [N,] // return: [N, dim] diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 1bb9c4e367f0f..1d3cd009affc6 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -1890,6 +1890,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_pad_reflect_1d(params, tensor); } break; + case GGML_OP_ROLL: + { + ggml_compute_forward_roll(params, tensor); + } break; case GGML_OP_ARANGE: { ggml_compute_forward_arange(params, tensor); @@ -2214,6 +2218,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { case GGML_OP_UPSCALE: case GGML_OP_PAD: case GGML_OP_PAD_REFLECT_1D: + case GGML_OP_ROLL: case GGML_OP_ARANGE: case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_ARGSORT: diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index 08facb6d03d5e..eff4a53e3442b 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -6793,6 +6793,73 @@ void ggml_compute_forward_pad_reflect_1d( } } +// ggml_compute_forward_roll + +static int64_t ggml_wrap_index(int64_t i, int64_t ne) { + if (i < 0) { + return i + ne; + } else if (i >= ne) { + return i - ne; + } + return i; +} + +static void ggml_compute_forward_roll_f32( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const float * src_data = (const float *) src0->data; + float * dst_data = (float *) dst->data; + + GGML_TENSOR_UNARY_OP_LOCALS + + const int s0 = ggml_get_op_params_i32(dst, 0); + const int s1 = ggml_get_op_params_i32(dst, 1); + const int s2 = ggml_get_op_params_i32(dst, 2); + const int s3 = ggml_get_op_params_i32(dst, 3); + + const int64_t total = ne1 * ne2 * ne3; + const int64_t per_thread = (total + params->nth) / params->nth; + const int64_t start = params->ith * per_thread; + const int64_t end = std::min(start + per_thread, total); + + for (int64_t i = start; i < end; ++i) { + const int64_t i1 = i % ne1; + const int64_t i2 = (i / ne1) % ne2; + const int64_t i3 = i / (ne2 * ne1); + float * dst_row = dst_data + (i3*nb3 + i2*nb2 + i1*nb1) / sizeof(float); + + const int64_t i01 = ggml_wrap_index(i1 - s1, ne01); + const int64_t i02 = ggml_wrap_index(i2 - s2, ne02); + const int64_t i03 = ggml_wrap_index(i3 - s3, ne03); + const float * src_row = src_data + (i03*nb03 + i02*nb02 + i01*nb01) / sizeof(float); + + const int64_t s = ggml_wrap_index(-s0, ne00); + const int64_t n = ne00 - s; + ggml_vec_cpy_f32(n, dst_row, src_row + s); + ggml_vec_cpy_f32(s, dst_row + n, src_row); + } +} + +void ggml_compute_forward_roll( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_roll_f32(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + // ggml_compute_forward_arange static void ggml_compute_forward_arange_f32( diff --git a/ggml/src/ggml-cpu/ops.h b/ggml/src/ggml-cpu/ops.h index dc081b9e66397..2d8544d7d3d43 100644 --- a/ggml/src/ggml-cpu/ops.h +++ b/ggml/src/ggml-cpu/ops.h @@ -72,6 +72,7 @@ void ggml_compute_forward_pool_2d_back(const struct ggml_compute_params * params void ggml_compute_forward_upscale(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_pad(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_pad_reflect_1d(const struct ggml_compute_params * params, struct ggml_tensor * dst); +void ggml_compute_forward_roll(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_arange(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_timestep_embedding(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_argsort(const struct ggml_compute_params * params, struct ggml_tensor * dst); diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index a8edad3778aa9..f8e7c595bce15 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -955,6 +955,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "UPSCALE", "PAD", "PAD_REFLECT_1D", + "ROLL", "ARANGE", "TIMESTEP_EMBEDDING", "ARGSORT", @@ -985,7 +986,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "OPT_STEP_ADAMW", }; -static_assert(GGML_OP_COUNT == 82, "GGML_OP_COUNT != 82"); +static_assert(GGML_OP_COUNT == 83, "GGML_OP_COUNT != 83"); static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "none", @@ -1050,6 +1051,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "upscale(x)", "pad(x)", "pad_reflect_1d(x)", + "roll(x)", "arange(start, stop, step)", "timestep_embedding(timesteps, dim, max_period)", "argsort(x)", @@ -1080,7 +1082,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "adamw(x)", }; -static_assert(GGML_OP_COUNT == 82, "GGML_OP_COUNT != 82"); +static_assert(GGML_OP_COUNT == 83, "GGML_OP_COUNT != 83"); static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2"); @@ -4341,6 +4343,34 @@ struct ggml_tensor * ggml_pad_reflect_1d( return result; } +// ggml_roll + +struct ggml_tensor * ggml_roll( + struct ggml_context * ctx, + struct ggml_tensor * a, + int shift0, + int shift1, + int shift2, + int shift3) { + GGML_ASSERT(a->nb[0] == ggml_type_size(a->type)); + GGML_ASSERT(abs(shift0) < a->ne[0]); + GGML_ASSERT(abs(shift1) < a->ne[1]); + GGML_ASSERT(abs(shift2) < a->ne[2]); + GGML_ASSERT(abs(shift3) < a->ne[3]); + + struct ggml_tensor * result = ggml_dup_tensor(ctx, a); + + ggml_set_op_params_i32(result, 0, shift0); + ggml_set_op_params_i32(result, 1, shift1); + ggml_set_op_params_i32(result, 2, shift2); + ggml_set_op_params_i32(result, 3, shift3); + + result->op = GGML_OP_ROLL; + result->src[0] = a; + + return result; +} + // ggml_arange struct ggml_tensor * ggml_arange( From 107ea62047b780225f5036f3ecf4e1764c9a1f9f Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 20 Jun 2025 20:50:24 +0300 Subject: [PATCH 120/192] sync : ggml ggml-ci --- scripts/sync-ggml.last | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/sync-ggml.last b/scripts/sync-ggml.last index bb5d56a0e0c92..bd1e04ed07434 100644 --- a/scripts/sync-ggml.last +++ b/scripts/sync-ggml.last @@ -1 +1 @@ -8cda0a3c19f2c7dc493887353c42f6956bc268b1 +9e4bee1c5afc2d677a5b32ecb90cbdb483e81fff From ffdd7a08c26ee1c24df23612ebc272124ea2ee26 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 20 Jun 2025 21:32:01 -0700 Subject: [PATCH 121/192] convert : fix Llama 4 conversion (#14311) --- convert_hf_to_gguf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 2fe76589eb062..bbf8b30ff5324 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -2193,7 +2193,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter name += ".weight" if "multi_modal_projector.linear_1" in name: # despite the name with number postfix, this is a single fully connected layer - return [(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_MMPROJ_FC], data_torch)] + return [(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_MMPROJ_FC] + '.weight', data_torch)] return [(self.map_tensor_name(name), data_torch)] return [] From 3de2fe47376e858a33eac3ee0f172325df42bce3 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 21 Jun 2025 08:03:46 +0300 Subject: [PATCH 122/192] memory : rename interface to llama_memory_context_i (#14296) * memory : rename interface to llama_memory_context_i ggml-ci * cont : fix comments * cont : use "mctx" for referencing a memory context ggml-ci --- src/llama-context.cpp | 78 ++++++++++----------- src/llama-context.h | 24 +++---- src/llama-graph.cpp | 104 ++++++++++++++-------------- src/llama-graph.h | 53 +++++++------- src/llama-kv-cache-unified-iswa.cpp | 70 +++++++++---------- src/llama-kv-cache-unified-iswa.h | 36 +++++----- src/llama-kv-cache-unified.cpp | 50 ++++++------- src/llama-kv-cache-unified.h | 32 ++++----- src/llama-memory-hybrid.cpp | 70 +++++++++---------- src/llama-memory-hybrid.h | 28 ++++---- src/llama-memory-recurrent.cpp | 46 ++++++------ src/llama-memory-recurrent.h | 28 ++++---- src/llama-memory.h | 33 ++++----- src/llama-model.cpp | 28 ++++---- 14 files changed, 339 insertions(+), 341 deletions(-) diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 5a18a4fb3939a..e352d81e4ed7c 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -280,8 +280,8 @@ llama_context::llama_context( // simulate full KV cache - const auto mstate = memory->init_full(); - if (!mstate) { + const auto mctx = memory->init_full(); + if (!mctx) { throw std::runtime_error("failed to initialize KV cache"); } @@ -289,7 +289,7 @@ llama_context::llama_context( // reserve pp graph first so that buffers are only allocated once { - auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mstate.get()); + auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get()); if (!gf) { throw std::runtime_error("failed to allocate compute pp buffers"); } @@ -300,7 +300,7 @@ llama_context::llama_context( // reserve with tg graph to get the number of splits and nodes { - auto * gf = graph_reserve(1, 1, 1, mstate.get()); + auto * gf = graph_reserve(1, 1, 1, mctx.get()); if (!gf) { throw std::runtime_error("failed to allocate compute tg buffers"); } @@ -311,7 +311,7 @@ llama_context::llama_context( // reserve again with pp graph to avoid ggml-alloc reallocations during inference { - auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mstate.get()); + auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get()); if (!gf) { throw std::runtime_error("failed to allocate compute pp buffers"); } @@ -444,8 +444,8 @@ bool llama_context::kv_self_update(bool optimize) { optimize |= memory_force_optimize; memory_force_optimize = false; - const auto mstate = memory->init_update(this, optimize); - switch (mstate->get_status()) { + const auto mctx = memory->init_update(this, optimize); + switch (mctx->get_status()) { case LLAMA_MEMORY_STATUS_SUCCESS: { // noop @@ -463,22 +463,22 @@ bool llama_context::kv_self_update(bool optimize) { } } - if (!mstate->apply()) { + if (!mctx->apply()) { LLAMA_LOG_ERROR("%s: failed to apply memory update\n", __func__); } } // if the memory module did any computation, we have to reserve a new worst-case graph { - const auto mstate = memory->init_full(); - if (!mstate) { - throw std::runtime_error("failed to initialize memory state"); + const auto mctx = memory->init_full(); + if (!mctx) { + throw std::runtime_error("failed to initialize memory context"); } const uint32_t n_seqs = cparams.n_seq_max; const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch); - auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mstate.get()); + auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get()); if (!gf) { LLAMA_LOG_ERROR("%s: failed to reserve graph after the memory update\n", __func__); } @@ -678,9 +678,9 @@ bool llama_context::apply_adapter_cvec( return cvec.apply(model, data, len, n_embd, il_start, il_end); } -llm_graph_result_ptr llama_context::process_ubatch(const llama_ubatch & ubatch, llm_graph_type gtype, llama_memory_state_i * mstate, ggml_status & ret) { - if (mstate && !mstate->apply()) { - LLAMA_LOG_ERROR("%s: failed to apply memory state\n", __func__); +llm_graph_result_ptr llama_context::process_ubatch(const llama_ubatch & ubatch, llm_graph_type gtype, llama_memory_context_i * mctx, ggml_status & ret) { + if (mctx && !mctx->apply()) { + LLAMA_LOG_ERROR("%s: failed to apply memory context\n", __func__); ret = GGML_STATUS_FAILED; return nullptr; } @@ -692,7 +692,7 @@ llm_graph_result_ptr llama_context::process_ubatch(const llama_ubatch & ubatch, return nullptr; } - auto res = graph_build(ctx_compute.get(), gf, ubatch, gtype, mstate); + auto res = graph_build(ctx_compute.get(), gf, ubatch, gtype, mctx); if (!res) { LLAMA_LOG_ERROR("%s: failed to build graph\n", __func__); ret = GGML_STATUS_FAILED; @@ -933,21 +933,21 @@ int llama_context::decode(const llama_batch & batch_inp) { // handle any pending defrags/shifts kv_self_update(false); - llama_memory_state_ptr mstate; + llama_memory_context_ptr mctx; while (true) { - mstate = memory->init_batch(*balloc, cparams.n_ubatch, output_all); - if (!mstate) { + mctx = memory->init_batch(*balloc, cparams.n_ubatch, output_all); + if (!mctx) { return -2; } - switch (mstate->get_status()) { + switch (mctx->get_status()) { case LLAMA_MEMORY_STATUS_SUCCESS: { } break; case LLAMA_MEMORY_STATUS_NO_UPDATE: { - LLAMA_LOG_ERROR("%s: unexpected memory state status: %d\n", __func__, mstate->get_status()); + LLAMA_LOG_ERROR("%s: unexpected memory context status: %d\n", __func__, mctx->get_status()); return -2; } @@ -987,7 +987,7 @@ int llama_context::decode(const llama_batch & batch_inp) { int64_t n_outputs_prev = 0; do { - const auto & ubatch = mstate->get_ubatch(); + const auto & ubatch = mctx->get_ubatch(); // count the outputs in this ubatch { @@ -1009,7 +1009,7 @@ int llama_context::decode(const llama_batch & batch_inp) { ggml_backend_sched_set_eval_callback(sched.get(), cparams.cb_eval, cparams.cb_eval_user_data); ggml_status status; - const auto res = process_ubatch(ubatch, LLM_GRAPH_TYPE_DECODER, mstate.get(), status); + const auto res = process_ubatch(ubatch, LLM_GRAPH_TYPE_DECODER, mctx.get(), status); if (!res) { // the last ubatch failed or was aborted -> remove all positions of that ubatch from the KV cache @@ -1126,7 +1126,7 @@ int llama_context::decode(const llama_batch & batch_inp) { } n_outputs_prev += n_outputs; - } while (mstate->next()); + } while (mctx->next()); // set to total number of outputs in the batch, for use in llama_get_logits_ith n_outputs = n_outputs_all; @@ -1292,7 +1292,7 @@ ggml_cgraph * llama_context::graph_init() { return ggml_new_graph_custom(ctx_compute.get(), graph_max_nodes(), false); } -ggml_cgraph * llama_context::graph_reserve(uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_state_i * mstate) { +ggml_cgraph * llama_context::graph_reserve(uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_context_i * mctx) { LLAMA_LOG_DEBUG("%s: reserving a graph for ubatch with n_tokens = %4u, n_seqs = %2u, n_outputs = %4u\n", __func__, n_tokens, n_seqs, n_outputs); if (n_tokens % n_seqs != 0) { @@ -1312,7 +1312,7 @@ ggml_cgraph * llama_context::graph_reserve(uint32_t n_tokens, uint32_t n_seqs, u llama_ubatch ubatch = balloc.ubatch_reserve(n_tokens/n_seqs, n_seqs); auto * gf = graph_init(); - auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT, mstate); + auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT, mctx); this->n_outputs = save_n_outputs; @@ -1333,11 +1333,11 @@ ggml_cgraph * llama_context::graph_reserve(uint32_t n_tokens, uint32_t n_seqs, u } llm_graph_result_ptr llama_context::graph_build( - ggml_context * ctx, - ggml_cgraph * gf, - const llama_ubatch & ubatch, - llm_graph_type gtype, - const llama_memory_state_i * mstate) { + ggml_context * ctx, + ggml_cgraph * gf, + const llama_ubatch & ubatch, + llm_graph_type gtype, + const llama_memory_context_i * mctx) { return model.build_graph( { /*.ctx =*/ ctx, @@ -1349,7 +1349,7 @@ llm_graph_result_ptr llama_context::graph_build( /*.backend_cpu =*/ backend_cpu, /*.cvec =*/ &cvec, /*.loras =*/ &loras, - /*.mstate =*/ mstate, + /*.mctx =*/ mctx, /*.cross =*/ &cross, /*.n_outputs =*/ n_outputs, /*.cb =*/ graph_get_cb(), @@ -2042,8 +2042,8 @@ void llama_context::opt_epoch_iter( uint32_t n_outputs_all = n_tokens_all; - auto mstate = memory->init_batch(*balloc, cparams.n_ubatch, true); - if (!mstate || mstate->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) { + auto mctx = memory->init_batch(*balloc, cparams.n_ubatch, true); + if (!mctx || mctx->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) { LLAMA_LOG_ERROR("%s: could not initialize batch\n", __func__); break; } @@ -2056,17 +2056,17 @@ void llama_context::opt_epoch_iter( uint32_t pos_batch = 0; do { - const auto & ubatch = mstate->get_ubatch(); + const auto & ubatch = mctx->get_ubatch(); n_outputs = ubatch.n_tokens; - if (!mstate->apply()) { - LLAMA_LOG_ERROR("%s: failed to update the memory state\n", __func__); + if (!mctx->apply()) { + LLAMA_LOG_ERROR("%s: failed to update the memory context\n", __func__); break; } auto * gf = graph_init(); - auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT, mstate.get()); + auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT, mctx.get()); struct ggml_context * ctx_compute_opt; { @@ -2101,7 +2101,7 @@ void llama_context::opt_epoch_iter( ggml_free(ctx_compute_opt); pos_batch += ubatch.n_tokens; - } while (mstate->next()); + } while (mctx->next()); } } diff --git a/src/llama-context.h b/src/llama-context.h index 7d300c14572e9..9ce05715a8c03 100644 --- a/src/llama-context.h +++ b/src/llama-context.h @@ -18,7 +18,7 @@ class llama_io_read_i; class llama_io_write_i; struct llama_memory_i; -struct llama_memory_state_i; +struct llama_memory_context_i; struct llama_context { // init scheduler and compute buffers, reserve worst-case graphs @@ -93,14 +93,14 @@ struct llama_context { int32_t il_end); // process a single ubatch with a specific graph type - // if memory_state is provided, it will be applied first to the context's memory + // if memory_context is provided, it will be applied first to the context's memory // ret contains the status of the graph computation // returns nullptr only if ret != GGML_STATUS_SUCCESS llm_graph_result_ptr process_ubatch( - const llama_ubatch & ubatch, - llm_graph_type gtype, - llama_memory_state_i * mstate, - ggml_status & ret); + const llama_ubatch & ubatch, + llm_graph_type gtype, + llama_memory_context_i * mctx, + ggml_status & ret); int encode(const llama_batch & batch_inp); int decode(const llama_batch & batch_inp); @@ -197,15 +197,15 @@ struct llama_context { ggml_status graph_compute(ggml_cgraph * gf, bool batched); // reserve a graph with a dummy ubatch of the specified size - ggml_cgraph * graph_reserve(uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_state_i * mstate); + ggml_cgraph * graph_reserve(uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_context_i * mctx); private: llm_graph_result_ptr graph_build( - ggml_context * ctx, - ggml_cgraph * gf, - const llama_ubatch & ubatch, - llm_graph_type gtype, - const llama_memory_state_i * mstate); + ggml_context * ctx, + ggml_cgraph * gf, + const llama_ubatch & ubatch, + llm_graph_type gtype, + const llama_memory_context_i * mctx); llm_graph_cb graph_get_cb() const; diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 7e162c5552204..48589a50ab24d 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -87,7 +87,7 @@ void llm_graph_input_pos_bucket::set_input(const llama_ubatch * ubatch) { void llm_graph_input_pos_bucket_kv::set_input(const llama_ubatch * ubatch) { if (pos_bucket) { - kv_state->set_input_pos_bucket(pos_bucket, ubatch); + mctx->set_input_pos_bucket(pos_bucket, ubatch); } } @@ -221,7 +221,7 @@ void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) { void llm_graph_input_rs::set_input(const llama_ubatch * ubatch) { GGML_UNUSED(ubatch); - const int64_t n_rs = mem_state->get_n_rs(); + const int64_t n_rs = mctx->get_n_rs(); if (s_copy) { GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer)); @@ -229,7 +229,7 @@ void llm_graph_input_rs::set_input(const llama_ubatch * ubatch) { // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n for (uint32_t i = 0; i < n_rs; ++i) { - data[i] = mem_state->s_copy(i); + data[i] = mctx->s_copy(i); } } } @@ -282,17 +282,17 @@ void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) { void llm_graph_input_attn_kv_unified::set_input(const llama_ubatch * ubatch) { if (self_kq_mask) { - kv_state->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn); + mctx->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn); } } void llm_graph_input_attn_kv_unified_iswa::set_input(const llama_ubatch * ubatch) { if (self_kq_mask) { - kv_state->get_base()->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn); + mctx->get_base()->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn); } if (self_kq_mask_swa) { - kv_state->get_swa()->set_input_kq_mask(self_kq_mask_swa, ubatch, cparams.causal_attn); + mctx->get_swa()->set_input_kq_mask(self_kq_mask_swa, ubatch, cparams.causal_attn); } } @@ -334,10 +334,10 @@ void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) { void llm_graph_input_mem_hybrid::set_input(const llama_ubatch * ubatch) { if (self_kq_mask) { - mem_state->get_state_attn()->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn); + mctx->get_attn()->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn); } - const int64_t n_rs = mem_state->get_state_recr()->get_n_rs(); + const int64_t n_rs = mctx->get_recr()->get_n_rs(); if (s_copy) { GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer)); @@ -345,7 +345,7 @@ void llm_graph_input_mem_hybrid::set_input(const llama_ubatch * ubatch) { // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n for (uint32_t i = 0; i < n_rs; ++i) { - data[i] = mem_state->get_state_recr()->s_copy(i); + data[i] = mctx->get_recr()->s_copy(i); } } } @@ -389,7 +389,7 @@ llm_graph_context::llm_graph_context(const llm_graph_params & params) : backend_cpu (params.backend_cpu), cvec (params.cvec), loras (params.loras), - mstate (params.mstate), + mctx (params.mctx), cross (params.cross), cb_func (params.cb), res (std::make_unique()) { @@ -950,11 +950,11 @@ ggml_tensor * llm_graph_context::build_inp_pos_bucket_enc() const { } ggml_tensor * llm_graph_context::build_inp_pos_bucket_dec() const { - const auto * kv_state = static_cast(mstate); + const auto * mctx_cur = static_cast(mctx); - auto inp = std::make_unique(hparams, kv_state); + auto inp = std::make_unique(hparams, mctx_cur); - const auto n_kv = kv_state->get_n_kv(); + const auto n_kv = mctx_cur->get_n_kv(); auto & cur = inp->pos_bucket; @@ -982,14 +982,14 @@ ggml_tensor * llm_graph_context::build_pos_bias(ggml_tensor * pos_bucket, ggml_t } llm_graph_input_mem_hybrid * llm_graph_context::build_inp_mem_hybrid() const { - const auto * mem_state = static_cast(mstate); + const auto * mctx_cur = static_cast(mctx); - auto inp = std::make_unique(hparams, cparams, mem_state); + auto inp = std::make_unique(hparams, cparams, mctx_cur); { GGML_ASSERT(hparams.swa_type == LLAMA_SWA_TYPE_NONE && "Hybrid recurrent is not supported with SWA attention layers"); - const auto n_kv = inp->mem_state->get_state_attn()->get_n_kv(); + const auto n_kv = inp->mctx->get_attn()->get_n_kv(); inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); //cb(inp->self_kq_mask, "KQ_mask", -1); @@ -999,7 +999,7 @@ llm_graph_input_mem_hybrid * llm_graph_context::build_inp_mem_hybrid() const { } { - const auto n_rs = mem_state->get_state_recr()->get_n_rs(); + const auto n_rs = mctx_cur->get_recr()->get_n_rs(); inp->s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_rs); ggml_set_input(inp->s_copy); @@ -1183,14 +1183,14 @@ ggml_tensor * llm_graph_context::build_attn( } llm_graph_input_attn_kv_unified * llm_graph_context::build_attn_inp_kv_unified() const { - const auto * kv_state = static_cast(mstate); + const auto * mctx_cur = static_cast(mctx); - auto inp = std::make_unique(hparams, cparams, kv_state); + auto inp = std::make_unique(hparams, cparams, mctx_cur); { GGML_ASSERT(hparams.swa_type == LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_unified_iswa for SWA"); - const auto n_kv = kv_state->get_n_kv(); + const auto n_kv = mctx_cur->get_n_kv(); inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); //cb(inp->self_kq_mask, "KQ_mask", -1); @@ -1220,19 +1220,19 @@ ggml_tensor * llm_graph_context::build_attn( ggml_build_forward_expand(gf, k_cur); ggml_build_forward_expand(gf, v_cur); - const auto * kv_state = static_cast(mstate); + const auto * mctx_cur = static_cast(mctx); // store to KV cache { - ggml_build_forward_expand(gf, kv_state->cpy_k(ctx0, k_cur, il)); - ggml_build_forward_expand(gf, kv_state->cpy_v(ctx0, v_cur, il)); + ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, il)); + ggml_build_forward_expand(gf, mctx_cur->cpy_v(ctx0, v_cur, il)); } const auto & kq_mask = inp->get_kq_mask(); ggml_tensor * q = q_cur; - ggml_tensor * k = kv_state->get_k(ctx0, il); - ggml_tensor * v = kv_state->get_v(ctx0, il); + ggml_tensor * k = mctx_cur->get_k(ctx0, il); + ggml_tensor * v = mctx_cur->get_v(ctx0, il); ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_mla, kq_scale); cb(cur, "kqv_out", il); @@ -1270,23 +1270,23 @@ ggml_tensor * llm_graph_context::build_attn( ggml_build_forward_expand(gf, k_cur); ggml_build_forward_expand(gf, v_cur); - const auto * kv_state_iswa = static_cast(mstate); + const auto * mctx_iswa = static_cast(mctx); const bool is_swa = hparams.is_swa(il); - const auto * kv_state = is_swa ? kv_state_iswa->get_swa() : kv_state_iswa->get_base(); + const auto * mctx_cur = is_swa ? mctx_iswa->get_swa() : mctx_iswa->get_base(); // store to KV cache { - ggml_build_forward_expand(gf, kv_state->cpy_k(ctx0, k_cur, il)); - ggml_build_forward_expand(gf, kv_state->cpy_v(ctx0, v_cur, il)); + ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, il)); + ggml_build_forward_expand(gf, mctx_cur->cpy_v(ctx0, v_cur, il)); } const auto & kq_mask = is_swa ? inp->get_kq_mask_swa() : inp->get_kq_mask(); ggml_tensor * q = q_cur; - ggml_tensor * k = kv_state->get_k(ctx0, il); - ggml_tensor * v = kv_state->get_v(ctx0, il); + ggml_tensor * k = mctx_cur->get_k(ctx0, il); + ggml_tensor * v = mctx_cur->get_v(ctx0, il); ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_mla, kq_scale); cb(cur, "kqv_out", il); @@ -1379,19 +1379,19 @@ ggml_tensor * llm_graph_context::build_attn( ggml_build_forward_expand(gf, k_cur); ggml_build_forward_expand(gf, v_cur); - const auto * kv_state = static_cast(mstate)->get_state_attn(); + const auto * mctx_cur = static_cast(mctx)->get_attn(); // store to KV cache { - ggml_build_forward_expand(gf, kv_state->cpy_k(ctx0, k_cur, il)); - ggml_build_forward_expand(gf, kv_state->cpy_v(ctx0, v_cur, il)); + ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, il)); + ggml_build_forward_expand(gf, mctx_cur->cpy_v(ctx0, v_cur, il)); } const auto & kq_mask = inp->get_kq_mask(); ggml_tensor * q = q_cur; - ggml_tensor * k = kv_state->get_k(ctx0, il); - ggml_tensor * v = kv_state->get_v(ctx0, il); + ggml_tensor * k = mctx_cur->get_k(ctx0, il); + ggml_tensor * v = mctx_cur->get_v(ctx0, il); ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_mla, kq_scale); cb(cur, "kqv_out", il); @@ -1412,12 +1412,12 @@ ggml_tensor * llm_graph_context::build_attn( } llm_graph_input_attn_kv_unified_iswa * llm_graph_context::build_attn_inp_kv_unified_iswa() const { - const auto * kv_state = static_cast(mstate); + const auto * mctx_cur = static_cast(mctx); - auto inp = std::make_unique(hparams, cparams, kv_state); + auto inp = std::make_unique(hparams, cparams, mctx_cur); { - const auto n_kv = kv_state->get_base()->get_n_kv(); + const auto n_kv = mctx_cur->get_base()->get_n_kv(); inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); //cb(inp->self_kq_mask, "KQ_mask", -1); @@ -1429,7 +1429,7 @@ llm_graph_input_attn_kv_unified_iswa * llm_graph_context::build_attn_inp_kv_unif { GGML_ASSERT(hparams.swa_type != LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_unified for non-SWA"); - const auto n_kv = kv_state->get_swa()->get_n_kv(); + const auto n_kv = mctx_cur->get_swa()->get_n_kv(); inp->self_kq_mask_swa = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); //cb(inp->self_kq_mask_swa, "KQ_mask_swa", -1); @@ -1485,11 +1485,11 @@ ggml_tensor * llm_graph_context::build_rs( } llm_graph_input_rs * llm_graph_context::build_rs_inp() const { - const auto * kv_state = static_cast(mstate); + const auto * mctx_cur = static_cast(mctx); - auto inp = std::make_unique(kv_state); + auto inp = std::make_unique(mctx_cur); - const auto n_rs = kv_state->get_n_rs(); + const auto n_rs = mctx_cur->get_n_rs(); inp->s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_rs); ggml_set_input(inp->s_copy); @@ -1504,9 +1504,9 @@ ggml_tensor * llm_graph_context::build_rs( int32_t state_size, int32_t n_seqs, bool avoid_copies) const { - const auto * kv_state = static_cast(mstate); + const auto * mctx_cur = static_cast(mctx); - return build_rs(gf, s, inp->s_copy, state_size, n_seqs, kv_state->get_n_rs(), kv_state->get_head(), kv_state->get_size(), kv_state->get_rs_z(), avoid_copies); + return build_rs(gf, s, inp->s_copy, state_size, n_seqs, mctx_cur->get_n_rs(), mctx_cur->get_head(), mctx_cur->get_size(), mctx_cur->get_rs_z(), avoid_copies); } ggml_tensor * llm_graph_context::build_rs( @@ -1516,9 +1516,9 @@ ggml_tensor * llm_graph_context::build_rs( int32_t state_size, int32_t n_seqs, bool avoid_copies) const { - const auto * kv_state = static_cast(mstate)->get_state_recr(); + const auto * mctx_cur = static_cast(mctx)->get_recr(); - return build_rs(gf, s, inp->s_copy, state_size, n_seqs, kv_state->get_n_rs(), kv_state->get_head(), kv_state->get_size(), kv_state->get_rs_z(), avoid_copies); + return build_rs(gf, s, inp->s_copy, state_size, n_seqs, mctx_cur->get_n_rs(), mctx_cur->get_head(), mctx_cur->get_size(), mctx_cur->get_rs_z(), avoid_copies); } ggml_tensor * llm_graph_context::build_rwkv_token_shift_load( @@ -1526,13 +1526,13 @@ ggml_tensor * llm_graph_context::build_rwkv_token_shift_load( ggml_cgraph * gf, const llama_ubatch & ubatch, int il) const { - const auto * kv_state = static_cast(mstate); + const auto * mctx_cur = static_cast(mctx); const auto token_shift_count = hparams.token_shift_count; const int64_t n_seqs = ubatch.n_seqs; - ggml_tensor * token_shift_all = kv_state->get_r_l(il); + ggml_tensor * token_shift_all = mctx_cur->get_r_l(il); ggml_tensor * token_shift = build_rs( inp, gf, token_shift_all, @@ -1547,19 +1547,19 @@ ggml_tensor * llm_graph_context::build_rwkv_token_shift_store( ggml_tensor * token_shift, const llama_ubatch & ubatch, int il) const { - const auto * kv_state = static_cast(mstate); + const auto * mctx_cur = static_cast(mctx); const auto token_shift_count = hparams.token_shift_count; const auto n_embd = hparams.n_embd; const int64_t n_seqs = ubatch.n_seqs; - const auto kv_head = kv_state->get_head(); + const auto kv_head = mctx_cur->get_head(); return ggml_cpy( ctx0, ggml_view_1d(ctx0, token_shift, n_embd * n_seqs * token_shift_count, 0), - ggml_view_1d(ctx0, kv_state->get_r_l(il), hparams.n_embd_r()*n_seqs, hparams.n_embd_r()*kv_head*ggml_element_size(kv_state->get_r_l(il))) + ggml_view_1d(ctx0, mctx_cur->get_r_l(il), hparams.n_embd_r()*n_seqs, hparams.n_embd_r()*kv_head*ggml_element_size(mctx_cur->get_r_l(il))) ); } diff --git a/src/llama-graph.h b/src/llama-graph.h index 9e62fa60720d7..b433f266d1b29 100644 --- a/src/llama-graph.h +++ b/src/llama-graph.h @@ -17,12 +17,12 @@ struct ggml_tensor; struct llama_ubatch; struct llama_cparams; -struct llama_memory_state_i; +struct llama_memory_context_i; -class llama_kv_cache_unified_state; -class llama_kv_cache_unified_iswa_state; -class llama_memory_recurrent_state; -class llama_memory_hybrid_state; +class llama_kv_cache_unified_context; +class llama_kv_cache_unified_iswa_context; +class llama_memory_recurrent_context; +class llama_memory_hybrid_context; // certain models (typically multi-modal) can produce different types of graphs enum llm_graph_type { @@ -136,7 +136,7 @@ class llm_graph_input_pos_bucket_kv : public llm_graph_input_i { public: llm_graph_input_pos_bucket_kv( const llama_hparams & hparams, - const llama_kv_cache_unified_state * kv_state) : hparams(hparams), kv_state(kv_state) {} + const llama_kv_cache_unified_context * mctx) : hparams(hparams), mctx(mctx) {} virtual ~llm_graph_input_pos_bucket_kv() = default; void set_input(const llama_ubatch * ubatch) override; @@ -144,7 +144,8 @@ class llm_graph_input_pos_bucket_kv : public llm_graph_input_i { ggml_tensor * pos_bucket = nullptr; // I32 [n_kv, n_batch] const llama_hparams & hparams; - const llama_kv_cache_unified_state * kv_state; + + const llama_kv_cache_unified_context * mctx; }; class llm_graph_input_out_ids : public llm_graph_input_i { @@ -191,14 +192,14 @@ class llm_graph_input_cls : public llm_graph_input_i { class llm_graph_input_rs : public llm_graph_input_i { public: - llm_graph_input_rs(const llama_memory_recurrent_state * mem_state) : mem_state(mem_state) {} + llm_graph_input_rs(const llama_memory_recurrent_context * mctx) : mctx(mctx) {} virtual ~llm_graph_input_rs() = default; void set_input(const llama_ubatch * ubatch) override; ggml_tensor * s_copy; // I32 [kv_size] - const llama_memory_recurrent_state * mem_state; + const llama_memory_recurrent_context * mctx; }; class llm_graph_input_cross_embd : public llm_graph_input_i { @@ -238,10 +239,10 @@ class llm_graph_input_attn_kv_unified : public llm_graph_input_i { llm_graph_input_attn_kv_unified( const llama_hparams & hparams, const llama_cparams & cparams, - const llama_kv_cache_unified_state * kv_state) : + const llama_kv_cache_unified_context * mctx) : hparams(hparams), cparams(cparams), - kv_state(kv_state) { + mctx(mctx) { } ~llm_graph_input_attn_kv_unified() = default; @@ -255,7 +256,7 @@ class llm_graph_input_attn_kv_unified : public llm_graph_input_i { const llama_hparams & hparams; const llama_cparams & cparams; - const llama_kv_cache_unified_state * kv_state; + const llama_kv_cache_unified_context * mctx; }; class llm_graph_input_attn_kv_unified_iswa : public llm_graph_input_i { @@ -263,10 +264,10 @@ class llm_graph_input_attn_kv_unified_iswa : public llm_graph_input_i { llm_graph_input_attn_kv_unified_iswa( const llama_hparams & hparams, const llama_cparams & cparams, - const llama_kv_cache_unified_iswa_state * kv_state) : + const llama_kv_cache_unified_iswa_context * mctx) : hparams(hparams), cparams(cparams), - kv_state(kv_state) { + mctx(mctx) { } ~llm_graph_input_attn_kv_unified_iswa() = default; @@ -283,7 +284,7 @@ class llm_graph_input_attn_kv_unified_iswa : public llm_graph_input_i { const llama_hparams & hparams; const llama_cparams & cparams; - const llama_kv_cache_unified_iswa_state * kv_state; + const llama_kv_cache_unified_iswa_context * mctx; }; class llm_graph_input_attn_cross : public llm_graph_input_i { @@ -306,10 +307,10 @@ class llm_graph_input_mem_hybrid : public llm_graph_input_i { llm_graph_input_mem_hybrid( const llama_hparams & hparams, const llama_cparams & cparams, - const llama_memory_hybrid_state * mem_state) : + const llama_memory_hybrid_context * mctx) : hparams(hparams), cparams(cparams), - mem_state(mem_state) { + mctx(mctx) { } virtual ~llm_graph_input_mem_hybrid() = default; @@ -325,7 +326,7 @@ class llm_graph_input_mem_hybrid : public llm_graph_input_i { const llama_hparams & hparams; const llama_cparams & cparams; - const llama_memory_hybrid_state * mem_state; + const llama_memory_hybrid_context * mctx; }; // @@ -401,10 +402,10 @@ struct llm_graph_params { ggml_backend_sched_t sched; ggml_backend_t backend_cpu; - const llama_adapter_cvec * cvec; - const llama_adapter_loras * loras; - const llama_memory_state_i * mstate; - const llama_cross * cross; + const llama_adapter_cvec * cvec; + const llama_adapter_loras * loras; + const llama_memory_context_i * mctx; + const llama_cross * cross; uint32_t n_outputs; @@ -453,10 +454,10 @@ struct llm_graph_context { ggml_backend_t backend_cpu; // TODO: needed by build_attn_mha, figure out a way to remove? - const llama_adapter_cvec * cvec; - const llama_adapter_loras * loras; - const llama_memory_state_i * mstate; - const llama_cross * cross; + const llama_adapter_cvec * cvec; + const llama_adapter_loras * loras; + const llama_memory_context_i * mctx; + const llama_cross * cross; const llm_graph_cb & cb_func; diff --git a/src/llama-kv-cache-unified-iswa.cpp b/src/llama-kv-cache-unified-iswa.cpp index 0ced340dec6c5..b9169299c0760 100644 --- a/src/llama-kv-cache-unified-iswa.cpp +++ b/src/llama-kv-cache-unified-iswa.cpp @@ -95,7 +95,7 @@ llama_pos llama_kv_cache_unified_iswa::seq_pos_max(llama_seq_id seq_id) const { return kv_swa->seq_pos_max(seq_id); } -llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) { +llama_memory_context_ptr llama_kv_cache_unified_iswa::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) { GGML_UNUSED(embd_all); // first try simple split @@ -125,7 +125,7 @@ llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(llama_batch_alloc assert(heads_base.size() == heads_swa.size()); - return std::make_unique( + return std::make_unique( this, std::move(heads_base), std::move(heads_swa), std::move(ubatches)); } while (false); @@ -156,22 +156,22 @@ llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(llama_batch_alloc assert(heads_base.size() == heads_swa.size()); - return std::make_unique( + return std::make_unique( this, std::move(heads_base), std::move(heads_swa), std::move(ubatches)); } while (false); // TODO: if we fail again, we should attempt different splitting strategies // but to do that properly, we first have to refactor the batches to be more flexible - return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); } -llama_memory_state_ptr llama_kv_cache_unified_iswa::init_full() { - return std::make_unique(this); +llama_memory_context_ptr llama_kv_cache_unified_iswa::init_full() { + return std::make_unique(this); } -llama_memory_state_ptr llama_kv_cache_unified_iswa::init_update(llama_context * lctx, bool optimize) { - return std::make_unique(this, lctx, optimize); +llama_memory_context_ptr llama_kv_cache_unified_iswa::init_update(llama_context * lctx, bool optimize) { + return std::make_unique(this, lctx, optimize); } bool llama_kv_cache_unified_iswa::get_can_shift() const { @@ -197,46 +197,46 @@ llama_kv_cache_unified * llama_kv_cache_unified_iswa::get_swa() const { } // -// llama_kv_cache_unified_iswa_state +// llama_kv_cache_unified_iswa_context // -llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state(llama_memory_status status) : status(status) {} +llama_kv_cache_unified_iswa_context::llama_kv_cache_unified_iswa_context(llama_memory_status status) : status(status) {} -llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state( +llama_kv_cache_unified_iswa_context::llama_kv_cache_unified_iswa_context( llama_kv_cache_unified_iswa * kv) : - state_base(kv->get_base()->init_full()), - state_swa (kv->get_swa ()->init_full()), - status(llama_memory_status_combine(state_base->get_status(), state_swa->get_status())) { + ctx_base(kv->get_base()->init_full()), + ctx_swa (kv->get_swa ()->init_full()), + status(llama_memory_status_combine(ctx_base->get_status(), ctx_swa->get_status())) { } -llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state( +llama_kv_cache_unified_iswa_context::llama_kv_cache_unified_iswa_context( llama_kv_cache_unified_iswa * kv, llama_context * lctx, bool optimize) : - state_base(kv->get_base()->init_update(lctx, optimize)), - state_swa (kv->get_swa ()->init_update(lctx, optimize)), - status(llama_memory_status_combine(state_base->get_status(), state_swa->get_status())) { + ctx_base(kv->get_base()->init_update(lctx, optimize)), + ctx_swa (kv->get_swa ()->init_update(lctx, optimize)), + status(llama_memory_status_combine(ctx_base->get_status(), ctx_swa->get_status())) { } -llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state( +llama_kv_cache_unified_iswa_context::llama_kv_cache_unified_iswa_context( llama_kv_cache_unified_iswa * kv, std::vector heads_base, std::vector heads_swa, std::vector ubatches) : ubatches(std::move(ubatches)), // note: here we copy the ubatches. not sure if this is ideal - state_base(new llama_kv_cache_unified_state(kv->get_base(), std::move(heads_base), this->ubatches)), - state_swa (new llama_kv_cache_unified_state(kv->get_swa (), std::move(heads_swa), this->ubatches)), - status(llama_memory_status_combine(state_base->get_status(), state_swa->get_status())) { + ctx_base(new llama_kv_cache_unified_context(kv->get_base(), std::move(heads_base), this->ubatches)), + ctx_swa (new llama_kv_cache_unified_context(kv->get_swa (), std::move(heads_swa), this->ubatches)), + status(llama_memory_status_combine(ctx_base->get_status(), ctx_swa->get_status())) { } -llama_kv_cache_unified_iswa_state:: ~llama_kv_cache_unified_iswa_state() = default; +llama_kv_cache_unified_iswa_context:: ~llama_kv_cache_unified_iswa_context() = default; -bool llama_kv_cache_unified_iswa_state::next() { +bool llama_kv_cache_unified_iswa_context::next() { assert(status == LLAMA_MEMORY_STATUS_SUCCESS); - state_base->next(); - state_swa ->next(); + ctx_base->next(); + ctx_swa ->next(); if (++i_next >= ubatches.size()) { return false; @@ -245,35 +245,35 @@ bool llama_kv_cache_unified_iswa_state::next() { return true; } -bool llama_kv_cache_unified_iswa_state::apply() { +bool llama_kv_cache_unified_iswa_context::apply() { assert(status == LLAMA_MEMORY_STATUS_SUCCESS); bool res = true; - res = res & state_base->apply(); - res = res & state_swa ->apply(); + res = res & ctx_base->apply(); + res = res & ctx_swa ->apply(); return res; } -llama_memory_status llama_kv_cache_unified_iswa_state::get_status() const { +llama_memory_status llama_kv_cache_unified_iswa_context::get_status() const { return status; } -const llama_ubatch & llama_kv_cache_unified_iswa_state::get_ubatch() const { +const llama_ubatch & llama_kv_cache_unified_iswa_context::get_ubatch() const { assert(status == LLAMA_MEMORY_STATUS_SUCCESS); return ubatches[i_next]; } -const llama_kv_cache_unified_state * llama_kv_cache_unified_iswa_state::get_base() const { +const llama_kv_cache_unified_context * llama_kv_cache_unified_iswa_context::get_base() const { assert(status == LLAMA_MEMORY_STATUS_SUCCESS); - return static_cast(state_base.get()); + return static_cast(ctx_base.get()); } -const llama_kv_cache_unified_state * llama_kv_cache_unified_iswa_state::get_swa() const { +const llama_kv_cache_unified_context * llama_kv_cache_unified_iswa_context::get_swa() const { assert(status == LLAMA_MEMORY_STATUS_SUCCESS); - return static_cast(state_swa.get()); + return static_cast(ctx_swa.get()); } diff --git a/src/llama-kv-cache-unified-iswa.h b/src/llama-kv-cache-unified-iswa.h index 071041585db38..46c1ed614f2f0 100644 --- a/src/llama-kv-cache-unified-iswa.h +++ b/src/llama-kv-cache-unified-iswa.h @@ -31,14 +31,14 @@ class llama_kv_cache_unified_iswa : public llama_memory_i { // llama_memory_i // - llama_memory_state_ptr init_batch( + llama_memory_context_ptr init_batch( llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) override; - llama_memory_state_ptr init_full() override; + llama_memory_context_ptr init_full() override; - llama_memory_state_ptr init_update(llama_context * lctx, bool optimize) override; + llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) override; bool get_can_shift() const override; @@ -72,32 +72,32 @@ class llama_kv_cache_unified_iswa : public llama_memory_i { std::unique_ptr kv_swa; }; -class llama_kv_cache_unified_iswa_state : public llama_memory_state_i { +class llama_kv_cache_unified_iswa_context : public llama_memory_context_i { public: // used for errors - llama_kv_cache_unified_iswa_state(llama_memory_status status); + llama_kv_cache_unified_iswa_context(llama_memory_status status); - // used to create a full-cache state - llama_kv_cache_unified_iswa_state( + // used to create a full-cache context + llama_kv_cache_unified_iswa_context( llama_kv_cache_unified_iswa * kv); - // used to create an update state - llama_kv_cache_unified_iswa_state( + // used to create an update context + llama_kv_cache_unified_iswa_context( llama_kv_cache_unified_iswa * kv, llama_context * lctx, bool optimize); - // used to create a state from a batch - llama_kv_cache_unified_iswa_state( + // used to create a batch processing context from a batch + llama_kv_cache_unified_iswa_context( llama_kv_cache_unified_iswa * kv, std::vector heads_base, std::vector heads_swa, std::vector ubatches); - virtual ~llama_kv_cache_unified_iswa_state(); + virtual ~llama_kv_cache_unified_iswa_context(); // - // llama_memory_state_i + // llama_memory_context_i // bool next() override; @@ -107,11 +107,11 @@ class llama_kv_cache_unified_iswa_state : public llama_memory_state_i { const llama_ubatch & get_ubatch() const override; // - // llama_kv_cache_unified_iswa_state specific API + // llama_kv_cache_unified_iswa_context specific API // - const llama_kv_cache_unified_state * get_base() const; - const llama_kv_cache_unified_state * get_swa() const; + const llama_kv_cache_unified_context * get_base() const; + const llama_kv_cache_unified_context * get_swa() const; private: //llama_kv_cache_unified_iswa * kv; @@ -121,8 +121,8 @@ class llama_kv_cache_unified_iswa_state : public llama_memory_state_i { std::vector ubatches; - const llama_memory_state_ptr state_base; - const llama_memory_state_ptr state_swa; + const llama_memory_context_ptr ctx_base; + const llama_memory_context_ptr ctx_swa; const llama_memory_status status; }; diff --git a/src/llama-kv-cache-unified.cpp b/src/llama-kv-cache-unified.cpp index 6897b797153db..b506d32ed4d06 100644 --- a/src/llama-kv-cache-unified.cpp +++ b/src/llama-kv-cache-unified.cpp @@ -307,7 +307,7 @@ llama_pos llama_kv_cache_unified::seq_pos_max(llama_seq_id seq_id) const { return cells.seq_pos_max(seq_id); } -llama_memory_state_ptr llama_kv_cache_unified::init_batch( +llama_memory_context_ptr llama_kv_cache_unified::init_batch( llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) { @@ -332,18 +332,18 @@ llama_memory_state_ptr llama_kv_cache_unified::init_batch( break; } - return std::make_unique( + return std::make_unique( this, std::move(heads), std::move(ubatches)); } while (false); - return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); } -llama_memory_state_ptr llama_kv_cache_unified::init_full() { - return std::make_unique(this); +llama_memory_context_ptr llama_kv_cache_unified::init_full() { + return std::make_unique(this); } -llama_memory_state_ptr llama_kv_cache_unified::init_update(llama_context * lctx, bool optimize) { +llama_memory_context_ptr llama_kv_cache_unified::init_update(llama_context * lctx, bool optimize) { bool do_shift = get_has_shift(); defrag_info dinfo; @@ -373,7 +373,7 @@ llama_memory_state_ptr llama_kv_cache_unified::init_update(llama_context * lctx, } } - return std::make_unique(this, lctx, do_shift, std::move(dinfo)); + return std::make_unique(this, lctx, do_shift, std::move(dinfo)); } llama_kv_cache_unified::ubatch_heads llama_kv_cache_unified::prepare(const std::vector & ubatches) { @@ -1710,18 +1710,18 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell } // -// llama_kv_cache_unified_state +// llama_kv_cache_unified_context // -llama_kv_cache_unified_state::llama_kv_cache_unified_state(llama_memory_status status) : status(status) {} +llama_kv_cache_unified_context::llama_kv_cache_unified_context(llama_memory_status status) : status(status) {} -llama_kv_cache_unified_state::llama_kv_cache_unified_state( +llama_kv_cache_unified_context::llama_kv_cache_unified_context( llama_kv_cache_unified * kv) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv) { n_kv = kv->get_size(); head = 0; } -llama_kv_cache_unified_state::llama_kv_cache_unified_state( +llama_kv_cache_unified_context::llama_kv_cache_unified_context( llama_kv_cache_unified * kv, llama_context * lctx, bool do_shift, @@ -1731,15 +1731,15 @@ llama_kv_cache_unified_state::llama_kv_cache_unified_state( } } -llama_kv_cache_unified_state::llama_kv_cache_unified_state( +llama_kv_cache_unified_context::llama_kv_cache_unified_context( llama_kv_cache_unified * kv, llama_kv_cache_unified::ubatch_heads heads, std::vector ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), heads(std::move(heads)), ubatches(std::move(ubatches)) { } -llama_kv_cache_unified_state::~llama_kv_cache_unified_state() = default; +llama_kv_cache_unified_context::~llama_kv_cache_unified_context() = default; -bool llama_kv_cache_unified_state::next() { +bool llama_kv_cache_unified_context::next() { assert(status == LLAMA_MEMORY_STATUS_SUCCESS); if (++i_next >= ubatches.size()) { @@ -1749,7 +1749,7 @@ bool llama_kv_cache_unified_state::next() { return true; } -bool llama_kv_cache_unified_state::apply() { +bool llama_kv_cache_unified_context::apply() { assert(status == LLAMA_MEMORY_STATUS_SUCCESS); // no ubatches -> this is a KV cache update @@ -1767,45 +1767,45 @@ bool llama_kv_cache_unified_state::apply() { return true; } -llama_memory_status llama_kv_cache_unified_state::get_status() const { +llama_memory_status llama_kv_cache_unified_context::get_status() const { return status; } -const llama_ubatch & llama_kv_cache_unified_state::get_ubatch() const { +const llama_ubatch & llama_kv_cache_unified_context::get_ubatch() const { assert(status == LLAMA_MEMORY_STATUS_SUCCESS); return ubatches[i_next]; } -uint32_t llama_kv_cache_unified_state::get_n_kv() const { +uint32_t llama_kv_cache_unified_context::get_n_kv() const { return n_kv; } -ggml_tensor * llama_kv_cache_unified_state::get_k(ggml_context * ctx, int32_t il) const { +ggml_tensor * llama_kv_cache_unified_context::get_k(ggml_context * ctx, int32_t il) const { return kv->get_k(ctx, il, n_kv); } -ggml_tensor * llama_kv_cache_unified_state::get_v(ggml_context * ctx, int32_t il) const { +ggml_tensor * llama_kv_cache_unified_context::get_v(ggml_context * ctx, int32_t il) const { return kv->get_v(ctx, il, n_kv); } -ggml_tensor * llama_kv_cache_unified_state::cpy_k(ggml_context * ctx, ggml_tensor * k_cur, int32_t il) const { +ggml_tensor * llama_kv_cache_unified_context::cpy_k(ggml_context * ctx, ggml_tensor * k_cur, int32_t il) const { return kv->cpy_k(ctx, k_cur, il, head); } -ggml_tensor * llama_kv_cache_unified_state::cpy_v(ggml_context * ctx, ggml_tensor * v_cur, int32_t il) const { +ggml_tensor * llama_kv_cache_unified_context::cpy_v(ggml_context * ctx, ggml_tensor * v_cur, int32_t il) const { return kv->cpy_v(ctx, v_cur, il, head); } -void llama_kv_cache_unified_state::set_input_k_shift(ggml_tensor * dst) const { +void llama_kv_cache_unified_context::set_input_k_shift(ggml_tensor * dst) const { kv->set_input_k_shift(dst); } -void llama_kv_cache_unified_state::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const { +void llama_kv_cache_unified_context::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const { kv->set_input_kq_mask(dst, ubatch, causal_attn); } -void llama_kv_cache_unified_state::set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const { +void llama_kv_cache_unified_context::set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const { kv->set_input_pos_bucket(dst, ubatch); } diff --git a/src/llama-kv-cache-unified.h b/src/llama-kv-cache-unified.h index 1560640045c82..4c53f1273ab88 100644 --- a/src/llama-kv-cache-unified.h +++ b/src/llama-kv-cache-unified.h @@ -56,14 +56,14 @@ class llama_kv_cache_unified : public llama_memory_i { // llama_memory_i // - llama_memory_state_ptr init_batch( + llama_memory_context_ptr init_batch( llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) override; - llama_memory_state_ptr init_full() override; + llama_memory_context_ptr init_full() override; - llama_memory_state_ptr init_update(llama_context * lctx, bool optimize) override; + llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) override; bool get_can_shift() const override; @@ -208,36 +208,36 @@ class llama_kv_cache_unified : public llama_memory_i { bool state_read_data(llama_io_read_i & io, uint32_t cell_count); }; -class llama_kv_cache_unified_state : public llama_memory_state_i { +class llama_kv_cache_unified_context : public llama_memory_context_i { public: // some shorthands using ubatch_heads = llama_kv_cache_unified::ubatch_heads; using defrag_info = llama_kv_cache_unified::defrag_info; // used for errors - llama_kv_cache_unified_state(llama_memory_status status); + llama_kv_cache_unified_context(llama_memory_status status); - // used to create a full-cache state - llama_kv_cache_unified_state( + // used to create a full-cache context + llama_kv_cache_unified_context( llama_kv_cache_unified * kv); - // used to create an update state - llama_kv_cache_unified_state( + // used to create an update context + llama_kv_cache_unified_context( llama_kv_cache_unified * kv, llama_context * lctx, bool do_shift, defrag_info dinfo); - // used to create a decode state from a batch - llama_kv_cache_unified_state( + // used to create a batch procesing context from a batch + llama_kv_cache_unified_context( llama_kv_cache_unified * kv, ubatch_heads heads, std::vector ubatches); - virtual ~llama_kv_cache_unified_state(); + virtual ~llama_kv_cache_unified_context(); // - // llama_memory_state_i + // llama_memory_context_i // bool next() override; @@ -247,7 +247,7 @@ class llama_kv_cache_unified_state : public llama_memory_state_i { const llama_ubatch & get_ubatch() const override; // - // llama_kv_cache_unified_state specific API + // llama_kv_cache_unified_context specific API // uint32_t get_n_kv() const; @@ -272,7 +272,7 @@ class llama_kv_cache_unified_state : public llama_memory_state_i { llama_context * lctx; // - // update state + // update context // bool do_shift = false; @@ -280,7 +280,7 @@ class llama_kv_cache_unified_state : public llama_memory_state_i { defrag_info dinfo; // - // batch processing state + // batch processing context // // the index of the next ubatch to process diff --git a/src/llama-memory-hybrid.cpp b/src/llama-memory-hybrid.cpp index 1b16686819eff..15cde98d138a8 100644 --- a/src/llama-memory-hybrid.cpp +++ b/src/llama-memory-hybrid.cpp @@ -56,7 +56,7 @@ llama_memory_hybrid::llama_memory_hybrid( n_seq_max )) {} -llama_memory_state_ptr llama_memory_hybrid::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) { +llama_memory_context_ptr llama_memory_hybrid::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) { do { balloc.split_reset(); @@ -82,31 +82,31 @@ llama_memory_state_ptr llama_memory_hybrid::init_batch(llama_batch_allocr & ball // prepare the recurrent batches first if (!mem_recr->prepare(ubatches)) { - // TODO: will the recurrent cache be in an undefined state at this point? + // TODO: will the recurrent cache be in an undefined context at this point? LLAMA_LOG_ERROR("%s: failed to prepare recurrent ubatches\n", __func__); - return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); } // prepare the attention cache auto heads_attn = mem_attn->prepare(ubatches); if (heads_attn.empty()) { LLAMA_LOG_ERROR("%s: failed to prepare attention ubatches\n", __func__); - return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); } - return std::make_unique( + return std::make_unique( this, std::move(heads_attn), std::move(ubatches)); } while(false); - return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); } -llama_memory_state_ptr llama_memory_hybrid::init_full() { - return std::make_unique(this); +llama_memory_context_ptr llama_memory_hybrid::init_full() { + return std::make_unique(this); } -llama_memory_state_ptr llama_memory_hybrid::init_update(llama_context * lctx, bool optimize) { - return std::make_unique(this, lctx, optimize); +llama_memory_context_ptr llama_memory_hybrid::init_update(llama_context * lctx, bool optimize) { + return std::make_unique(this, lctx, optimize); } bool llama_memory_hybrid::get_can_shift() const { @@ -176,39 +176,39 @@ llama_memory_recurrent * llama_memory_hybrid::get_mem_recr() const { return mem_recr.get(); } -llama_memory_hybrid_state::llama_memory_hybrid_state(llama_memory_status status) : status(status) {} +llama_memory_hybrid_context::llama_memory_hybrid_context(llama_memory_status status) : status(status) {} -llama_memory_hybrid_state::llama_memory_hybrid_state(llama_memory_hybrid * mem) : - state_attn(mem->get_mem_attn()->init_full()), - state_recr(mem->get_mem_recr()->init_full()), - status(llama_memory_status_combine(state_attn->get_status(), state_recr->get_status())) { +llama_memory_hybrid_context::llama_memory_hybrid_context(llama_memory_hybrid * mem) : + ctx_attn(mem->get_mem_attn()->init_full()), + ctx_recr(mem->get_mem_recr()->init_full()), + status(llama_memory_status_combine(ctx_attn->get_status(), ctx_recr->get_status())) { } -llama_memory_hybrid_state::llama_memory_hybrid_state( +llama_memory_hybrid_context::llama_memory_hybrid_context( llama_memory_hybrid * mem, llama_context * lctx, bool optimize) : - state_attn(mem->get_mem_attn()->init_update(lctx, optimize)), - state_recr(mem->get_mem_recr()->init_update(lctx, optimize)), - status(llama_memory_status_combine(state_attn->get_status(), state_recr->get_status())) { + ctx_attn(mem->get_mem_attn()->init_update(lctx, optimize)), + ctx_recr(mem->get_mem_recr()->init_update(lctx, optimize)), + status(llama_memory_status_combine(ctx_attn->get_status(), ctx_recr->get_status())) { } -llama_memory_hybrid_state::llama_memory_hybrid_state( +llama_memory_hybrid_context::llama_memory_hybrid_context( llama_memory_hybrid * mem, std::vector heads_attn, std::vector ubatches) : ubatches(std::move(ubatches)), // note: here we copy the ubatches. not sure if this is ideal - state_attn(new llama_kv_cache_unified_state(mem->get_mem_attn(), std::move(heads_attn), this->ubatches)), - state_recr(new llama_memory_recurrent_state(mem->get_mem_recr(), this->ubatches)), - status(llama_memory_status_combine(state_attn->get_status(), state_recr->get_status())) { + ctx_attn(new llama_kv_cache_unified_context(mem->get_mem_attn(), std::move(heads_attn), this->ubatches)), + ctx_recr(new llama_memory_recurrent_context(mem->get_mem_recr(), this->ubatches)), + status(llama_memory_status_combine(ctx_attn->get_status(), ctx_recr->get_status())) { } -bool llama_memory_hybrid_state::next() { +bool llama_memory_hybrid_context::next() { assert(status == LLAMA_MEMORY_STATUS_SUCCESS); - state_attn->next(); - state_recr->next(); + ctx_attn->next(); + ctx_recr->next(); if (++i_next >= ubatches.size()) { return false; @@ -217,30 +217,30 @@ bool llama_memory_hybrid_state::next() { return true; } -bool llama_memory_hybrid_state::apply() { +bool llama_memory_hybrid_context::apply() { assert(status == LLAMA_MEMORY_STATUS_SUCCESS); bool res = true; - res = res & state_attn->apply(); - res = res & state_recr->apply(); + res = res & ctx_attn->apply(); + res = res & ctx_recr->apply(); return res; } -llama_memory_status llama_memory_hybrid_state::get_status() const { +llama_memory_status llama_memory_hybrid_context::get_status() const { return status; } -const llama_ubatch & llama_memory_hybrid_state::get_ubatch() const { +const llama_ubatch & llama_memory_hybrid_context::get_ubatch() const { assert(status == LLAMA_MEMORY_STATUS_SUCCESS); return ubatches[i_next]; } -const llama_kv_cache_unified_state * llama_memory_hybrid_state::get_state_attn() const { - return static_cast(state_attn.get()); +const llama_kv_cache_unified_context * llama_memory_hybrid_context::get_attn() const { + return static_cast(ctx_attn.get()); } -const llama_memory_recurrent_state * llama_memory_hybrid_state::get_state_recr() const { - return static_cast(state_recr.get()); +const llama_memory_recurrent_context * llama_memory_hybrid_context::get_recr() const { + return static_cast(ctx_recr.get()); } diff --git a/src/llama-memory-hybrid.h b/src/llama-memory-hybrid.h index 4d27ab896aa05..f0c2420e9a2df 100644 --- a/src/llama-memory-hybrid.h +++ b/src/llama-memory-hybrid.h @@ -49,14 +49,14 @@ class llama_memory_hybrid : public llama_memory_i { // llama_memory_i // - llama_memory_state_ptr init_batch( + llama_memory_context_ptr init_batch( llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) override; - llama_memory_state_ptr init_full() override; + llama_memory_context_ptr init_full() override; - llama_memory_state_ptr init_update(llama_context * lctx, bool optimize) override; + llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) override; bool get_can_shift() const override; @@ -90,27 +90,27 @@ class llama_memory_hybrid : public llama_memory_i { const std::unique_ptr mem_recr; }; -class llama_memory_hybrid_state : public llama_memory_state_i { +class llama_memory_hybrid_context : public llama_memory_context_i { public: // init failure - explicit llama_memory_hybrid_state(llama_memory_status status); + explicit llama_memory_hybrid_context(llama_memory_status status); // init full - explicit llama_memory_hybrid_state(llama_memory_hybrid * mem); + explicit llama_memory_hybrid_context(llama_memory_hybrid * mem); // init update - explicit llama_memory_hybrid_state( + explicit llama_memory_hybrid_context( llama_memory_hybrid * mem, llama_context * lctx, bool optimize); // init success - llama_memory_hybrid_state( + llama_memory_hybrid_context( llama_memory_hybrid * mem, std::vector heads_attn, std::vector ubatches); - ~llama_memory_hybrid_state() = default; + ~llama_memory_hybrid_context() = default; bool next() override; bool apply() override; @@ -119,11 +119,11 @@ class llama_memory_hybrid_state : public llama_memory_state_i { const llama_ubatch & get_ubatch() const override; // - // llama_memory_hybrid_state + // llama_memory_hybrid_context // - const llama_kv_cache_unified_state * get_state_attn() const; - const llama_memory_recurrent_state * get_state_recr() const; + const llama_kv_cache_unified_context * get_attn() const; + const llama_memory_recurrent_context * get_recr() const; private: // the index of the next ubatch to process @@ -131,8 +131,8 @@ class llama_memory_hybrid_state : public llama_memory_state_i { std::vector ubatches; - const llama_memory_state_ptr state_attn; - const llama_memory_state_ptr state_recr; + const llama_memory_context_ptr ctx_attn; + const llama_memory_context_ptr ctx_recr; const llama_memory_status status; }; diff --git a/src/llama-memory-recurrent.cpp b/src/llama-memory-recurrent.cpp index b064da0084c52..1b1e95d567a6c 100644 --- a/src/llama-memory-recurrent.cpp +++ b/src/llama-memory-recurrent.cpp @@ -362,7 +362,7 @@ llama_pos llama_memory_recurrent::seq_pos_max(llama_seq_id seq_id) const { return result; } -llama_memory_state_ptr llama_memory_recurrent::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) { +llama_memory_context_ptr llama_memory_recurrent::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) { std::vector ubatches; while (true) { @@ -383,21 +383,21 @@ llama_memory_state_ptr llama_memory_recurrent::init_batch(llama_batch_allocr & b } if (!prepare(ubatches)) { - return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); } - return std::make_unique(this, std::move(ubatches)); + return std::make_unique(this, std::move(ubatches)); } -llama_memory_state_ptr llama_memory_recurrent::init_full() { - return std::make_unique(this); +llama_memory_context_ptr llama_memory_recurrent::init_full() { + return std::make_unique(this); } -llama_memory_state_ptr llama_memory_recurrent::init_update(llama_context * lctx, bool optimize) { +llama_memory_context_ptr llama_memory_recurrent::init_update(llama_context * lctx, bool optimize) { GGML_UNUSED(lctx); GGML_UNUSED(optimize); - return std::make_unique(LLAMA_MEMORY_STATUS_NO_UPDATE); + return std::make_unique(LLAMA_MEMORY_STATUS_NO_UPDATE); } bool llama_memory_recurrent::prepare(const std::vector & ubatches) { @@ -1040,22 +1040,22 @@ bool llama_memory_recurrent::state_read_data(llama_io_read_i & io, uint32_t cell } // -// llama_memory_recurrent_state +// llama_memory_recurrent_context // -llama_memory_recurrent_state::llama_memory_recurrent_state(llama_memory_status status) : status(status) {} +llama_memory_recurrent_context::llama_memory_recurrent_context(llama_memory_status status) : status(status) {} -llama_memory_recurrent_state::llama_memory_recurrent_state( +llama_memory_recurrent_context::llama_memory_recurrent_context( llama_memory_recurrent * mem) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), is_full(true) { } -llama_memory_recurrent_state::llama_memory_recurrent_state( +llama_memory_recurrent_context::llama_memory_recurrent_context( llama_memory_recurrent * mem, std::vector ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), ubatches(std::move(ubatches)) {} -llama_memory_recurrent_state::~llama_memory_recurrent_state() = default; +llama_memory_recurrent_context::~llama_memory_recurrent_context() = default; -bool llama_memory_recurrent_state::next() { +bool llama_memory_recurrent_context::next() { assert(status == LLAMA_MEMORY_STATUS_SUCCESS); if (++i_next >= ubatches.size()) { @@ -1065,7 +1065,7 @@ bool llama_memory_recurrent_state::next() { return true; } -bool llama_memory_recurrent_state::apply() { +bool llama_memory_recurrent_context::apply() { assert(status == LLAMA_MEMORY_STATUS_SUCCESS); mem->find_slot(ubatches[i_next]); @@ -1073,40 +1073,40 @@ bool llama_memory_recurrent_state::apply() { return true; } -llama_memory_status llama_memory_recurrent_state::get_status() const { +llama_memory_status llama_memory_recurrent_context::get_status() const { return status; } -const llama_ubatch & llama_memory_recurrent_state::get_ubatch() const { +const llama_ubatch & llama_memory_recurrent_context::get_ubatch() const { assert(status == LLAMA_MEMORY_STATUS_SUCCESS); return ubatches[i_next]; } -uint32_t llama_memory_recurrent_state::get_n_rs() const { +uint32_t llama_memory_recurrent_context::get_n_rs() const { return is_full ? mem->size : mem->n; } -uint32_t llama_memory_recurrent_state::get_head() const { +uint32_t llama_memory_recurrent_context::get_head() const { return is_full ? 0 : mem->head; } -int32_t llama_memory_recurrent_state::get_rs_z() const { +int32_t llama_memory_recurrent_context::get_rs_z() const { return is_full ? 0 : mem->rs_z; } -uint32_t llama_memory_recurrent_state::get_size() const { +uint32_t llama_memory_recurrent_context::get_size() const { return mem->size; } -ggml_tensor * llama_memory_recurrent_state::get_r_l(int32_t il) const { +ggml_tensor * llama_memory_recurrent_context::get_r_l(int32_t il) const { return mem->r_l[il]; } -ggml_tensor * llama_memory_recurrent_state::get_s_l(int32_t il) const { +ggml_tensor * llama_memory_recurrent_context::get_s_l(int32_t il) const { return mem->s_l[il]; } -int32_t llama_memory_recurrent_state::s_copy(int i) const { +int32_t llama_memory_recurrent_context::s_copy(int i) const { return mem->cells[i + mem->head].src0; } diff --git a/src/llama-memory-recurrent.h b/src/llama-memory-recurrent.h index be58dae7cfe33..4d094f9a05788 100644 --- a/src/llama-memory-recurrent.h +++ b/src/llama-memory-recurrent.h @@ -11,8 +11,8 @@ // llama_memory_recurrent // -// TODO: extract the cache state used for graph computation into llama_memory_recurrent_state_i -// see the implementation of llama_kv_cache_unified_state_i for an example how to do it +// TODO: extract the cache state used for graph computation into llama_memory_recurrent_context_i +// see the implementation of llama_kv_cache_unified_context_i for an example how to do it class llama_memory_recurrent : public llama_memory_i { public: @@ -34,14 +34,14 @@ class llama_memory_recurrent : public llama_memory_i { // llama_memory_i // - llama_memory_state_ptr init_batch( + llama_memory_context_ptr init_batch( llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) override; - llama_memory_state_ptr init_full() override; + llama_memory_context_ptr init_full() override; - llama_memory_state_ptr init_update(llama_context * lctx, bool optimize) override; + llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) override; void clear(bool data) override; @@ -125,24 +125,24 @@ class llama_memory_recurrent : public llama_memory_i { bool state_read_data(llama_io_read_i & io, uint32_t cell_count); }; -class llama_memory_recurrent_state : public llama_memory_state_i { +class llama_memory_recurrent_context : public llama_memory_context_i { public: // used for errors - llama_memory_recurrent_state(llama_memory_status status); + llama_memory_recurrent_context(llama_memory_status status); - // used to create a full-cache state - llama_memory_recurrent_state( + // used to create a full-cache or update context + llama_memory_recurrent_context( llama_memory_recurrent * mem); - // used to create a state from a batch - llama_memory_recurrent_state( + // used to create a batch processing context from a batch + llama_memory_recurrent_context( llama_memory_recurrent * mem, std::vector ubatches); - virtual ~llama_memory_recurrent_state(); + virtual ~llama_memory_recurrent_context(); // - // llama_memory_state_i + // llama_memory_context_i // bool next() override; @@ -152,7 +152,7 @@ class llama_memory_recurrent_state : public llama_memory_state_i { const llama_ubatch & get_ubatch() const override; // - // llama_memory_recurrent_state specific API + // llama_memory_recurrent_context specific API // uint32_t get_n_rs() const; diff --git a/src/llama-memory.h b/src/llama-memory.h index d2ef0c2a3b4aa..16b7e5ee2484a 100644 --- a/src/llama-memory.h +++ b/src/llama-memory.h @@ -3,7 +3,6 @@ #include "llama.h" #include -#include struct llama_ubatch; @@ -28,23 +27,21 @@ enum llama_memory_status { LLAMA_MEMORY_STATUS_FAILED_COMPUTE, }; -// helper function for combining the status of two memory states +// helper function for combining the status of two memory contexts // useful for implementing hybrid memory types (e.g. iSWA) llama_memory_status llama_memory_status_combine(llama_memory_status s0, llama_memory_status s1); -// the interface for managing the memory state during batch processing +// the interface for managing the memory context during batch processing // this interface is implemented per memory type. see: -// - llama_kv_cache_unified_state -// - llama_kv_cache_unified_iswa_state +// - llama_kv_cache_unified_context +// - llama_kv_cache_unified_iswa_context // ... // -// the only method that can mutate the memory and the memory state is llama_memory_i::apply() -// -// TODO: rename to llama_memory_context_i ? -struct llama_memory_state_i { - virtual ~llama_memory_state_i() = default; +// the only method that should mutate the memory and the memory context is llama_memory_i::apply() +struct llama_memory_context_i { + virtual ~llama_memory_context_i() = default; - // consume the current ubatch from the state and proceed to the next one + // consume the current ubatch from the context and proceed to the next one // return false if we are done virtual bool next() = 0; @@ -55,11 +52,11 @@ struct llama_memory_state_i { // get the current ubatch virtual const llama_ubatch & get_ubatch() const = 0; - // get the status of the memory state - used for error handling and checking if any updates would be applied + // get the status of the memory context - used for error handling and checking if any updates would be applied virtual llama_memory_status get_status() const = 0; }; -using llama_memory_state_ptr = std::unique_ptr; +using llama_memory_context_ptr = std::unique_ptr; // general concept of LLM memory // the KV cache is a type of LLM memory, but there can be other types @@ -67,19 +64,19 @@ struct llama_memory_i { virtual ~llama_memory_i() = default; // split the input batch into a set of ubatches and verify that they can fit into the cache - // return a state object containing the ubatches and KV cache state required to process them - // check the llama_memory_state_i::get_status() for the result - virtual llama_memory_state_ptr init_batch( + // return a context object containing the ubatches and memory state required to process them + // check the llama_memory_context_i::get_status() for the result + virtual llama_memory_context_ptr init_batch( llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) = 0; // simulate full cache, used for allocating worst-case compute buffers - virtual llama_memory_state_ptr init_full() = 0; + virtual llama_memory_context_ptr init_full() = 0; // prepare for any pending memory updates, such as shifts, defrags, etc. // status == LLAMA_MEMORY_STATUS_NO_UPDATE if there is nothing to update - virtual llama_memory_state_ptr init_update(llama_context * lctx, bool optimize) = 0; + virtual llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) = 0; // getters virtual bool get_can_shift() const = 0; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index e2c82017f6890..9b19da984081e 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -9171,9 +9171,9 @@ struct llm_build_mamba : public llm_graph_context { ggml_tensor * cur, const llama_ubatch & ubatch, int il) const { - const auto * kv_state = static_cast(mstate); + const auto * mctx_cur = static_cast(mctx); - const auto kv_head = kv_state->get_head(); + const auto kv_head = mctx_cur->get_head(); const int64_t d_conv = hparams.ssm_d_conv; const int64_t d_inner = hparams.ssm_d_inner; @@ -9191,8 +9191,8 @@ struct llm_build_mamba : public llm_graph_context { GGML_ASSERT(ubatch.equal_seqs); GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs); - ggml_tensor * conv_states_all = kv_state->get_r_l(il); - ggml_tensor * ssm_states_all = kv_state->get_s_l(il); + ggml_tensor * conv_states_all = mctx_cur->get_r_l(il); + ggml_tensor * ssm_states_all = mctx_cur->get_s_l(il); // (ab)using the KV cache to store the states ggml_tensor * conv = build_rs( @@ -11916,7 +11916,7 @@ struct llm_build_rwkv6_base : public llm_graph_context { ggml_tensor * x_prev, const llama_ubatch & ubatch, int il) const { - const auto * kv_state = static_cast(mstate); + const auto * mctx_cur = static_cast(mctx); const auto n_tokens = ubatch.n_tokens; const auto n_seqs = ubatch.n_seqs; @@ -11926,7 +11926,7 @@ struct llm_build_rwkv6_base : public llm_graph_context { const auto n_head = n_embd / head_size; const auto n_head_kv = hparams.n_head_kv(il); - const auto kv_head = kv_state->get_head(); + const auto kv_head = mctx_cur->get_head(); const auto & layer = model.layers[il]; @@ -12038,7 +12038,7 @@ struct llm_build_rwkv6_base : public llm_graph_context { } ggml_tensor * wkv_state = build_rs( - inp, gf, kv_state->get_s_l(il), + inp, gf, mctx_cur->get_s_l(il), hparams.n_embd_s(), n_seqs); ggml_tensor * wkv_output; @@ -12057,9 +12057,9 @@ struct llm_build_rwkv6_base : public llm_graph_context { wkv_state, ggml_view_1d( ctx0, - kv_state->get_s_l(il), + mctx_cur->get_s_l(il), hparams.n_embd_s() * n_seqs, - hparams.n_embd_s() * kv_head * ggml_element_size(kv_state->get_s_l(il)) + hparams.n_embd_s() * kv_head * ggml_element_size(mctx_cur->get_s_l(il)) ) ) ); @@ -12313,7 +12313,7 @@ struct llm_build_rwkv7_base : public llm_graph_context { ggml_tensor *& first_layer_value, const llama_ubatch & ubatch, int il) const { - const auto * kv_state = static_cast(mstate); + const auto * mctx_cur = static_cast(mctx); const auto n_tokens = ubatch.n_tokens; const auto n_seqs = ubatch.n_seqs; @@ -12322,7 +12322,7 @@ struct llm_build_rwkv7_base : public llm_graph_context { const auto head_count = n_embd / head_size; const auto n_seq_tokens = ubatch.n_seq_tokens; - const auto kv_head = kv_state->get_head(); + const auto kv_head = mctx_cur->get_head(); const auto & layer = model.layers[il]; @@ -12393,7 +12393,7 @@ struct llm_build_rwkv7_base : public llm_graph_context { a = ggml_reshape_3d(ctx0, a, head_size, head_count, n_tokens); ggml_tensor * wkv_state = build_rs( - inp, gf, kv_state->get_s_l(il), + inp, gf, mctx_cur->get_s_l(il), hparams.n_embd_s(), n_seqs); ggml_tensor * wkv_output = ggml_rwkv_wkv7(ctx0, r, w, k, v, ggml_neg(ctx0, kk), ggml_mul(ctx0, kk, a), wkv_state); @@ -12407,9 +12407,9 @@ struct llm_build_rwkv7_base : public llm_graph_context { wkv_state, ggml_view_1d( ctx0, - kv_state->get_s_l(il), + mctx_cur->get_s_l(il), hparams.n_embd_s() * n_seqs, - hparams.n_embd_s() * kv_head * ggml_element_size(kv_state->get_s_l(il)) + hparams.n_embd_s() * kv_head * ggml_element_size(mctx_cur->get_s_l(il)) ) ) ); From eb336c1058f93c0125a029105cf34e573ad738d3 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 21 Jun 2025 08:04:18 +0300 Subject: [PATCH 123/192] metal : fix thread-safety (#14300) ggml-ci --- ggml/src/ggml-metal/ggml-metal.m | 88 ++++++++++++++++++++++---------- 1 file changed, 60 insertions(+), 28 deletions(-) diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index 4e7f373cb435a..19f4d59e59747 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -48,22 +48,28 @@ int mtl_device_ref_count; id mtl_library; + NSLock * mtl_lock; + bool has_simdgroup_reduction; bool has_simdgroup_mm; bool has_residency_sets; bool has_bfloat; bool use_bfloat; + size_t max_size; + char name[128]; } g_ggml_ctx_dev_main = { /*.mtl_device =*/ nil, /*.mtl_device_ref_count =*/ 0, /*.mtl_library =*/ nil, + /*.mtl_lock =*/ nil, /*.has_simdgroup_reduction =*/ false, /*.has_simdgroup_mm =*/ false, /*.has_residency_sets =*/ false, /*.has_bfloat =*/ false, /*.use_bfloat =*/ false, + /*.max_size =*/ 0, /*.name =*/ "", }; @@ -71,6 +77,10 @@ static id ggml_backend_metal_device_acq(struct ggml_backend_metal_device_context * ctx) { assert(ctx != NULL); + if (ctx->mtl_lock == nil) { + ctx->mtl_lock = [[NSLock alloc] init]; + } + if (ctx->mtl_device == nil) { ctx->mtl_device = MTLCreateSystemDefaultDevice(); } @@ -94,6 +104,8 @@ ctx->use_bfloat = false; #endif + ctx->max_size = ctx->mtl_device.maxBufferLength; + strncpy(ctx->name, [[ctx->mtl_device name] UTF8String], sizeof(ctx->name) - 1); } @@ -110,6 +122,11 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte ctx->mtl_device_ref_count--; if (ctx->mtl_device_ref_count == 0) { + if (ctx->mtl_lock) { + [ctx->mtl_lock release]; + ctx->mtl_lock = nil; + } + if (ctx->mtl_library) { [ctx->mtl_library release]; ctx->mtl_library = nil; @@ -977,7 +994,7 @@ @implementation GGMLMetalClass struct ggml_backend_metal_context * ctx = calloc(1, sizeof(struct ggml_backend_metal_context)); struct ggml_backend_metal_device_context * ctx_dev = dev->context; - id device = ggml_backend_metal_device_acq(ctx_dev); + id device = ctx_dev->mtl_device; GGML_LOG_INFO("%s: picking default device: %s\n", __func__, [[device name] UTF8String]); @@ -991,9 +1008,16 @@ @implementation GGMLMetalClass ctx->d_queue = dispatch_queue_create("ggml-metal", DISPATCH_QUEUE_CONCURRENT); // load library - if (ctx_dev->mtl_library == nil) { - ctx_dev->mtl_library = ggml_metal_load_library(device, ctx_dev->use_bfloat); + { + [ctx_dev->mtl_lock lock]; + + if (ctx_dev->mtl_library == nil) { + ctx_dev->mtl_library = ggml_metal_load_library(device, ctx_dev->use_bfloat); + } + + [ctx_dev->mtl_lock unlock]; } + id metal_library = ctx_dev->mtl_library; if (metal_library == nil) { GGML_LOG_ERROR("%s: error: metal library is nil\n", __func__); @@ -5284,7 +5308,6 @@ static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) } ggml_backend_metal_buffer_rset_free(ctx); - ggml_backend_metal_device_rel(buffer->buft->device->context); if (ctx->owned) { #if TARGET_OS_OSX @@ -5393,7 +5416,10 @@ static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_ba } struct ggml_backend_metal_device_context * ctx_dev = (struct ggml_backend_metal_device_context *)buft->device->context; - id device = ggml_backend_metal_device_acq(ctx_dev); + + GGML_ASSERT(ctx_dev->mtl_device != nil); + + id device = ctx_dev->mtl_device; ctx->all_data = ggml_metal_host_malloc(size_aligned); ctx->all_size = size_aligned; @@ -5416,14 +5442,12 @@ static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_ba if (size_aligned > 0 && (ctx->all_data == NULL || ctx->buffers[0].metal == nil)) { GGML_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0); free(ctx); - ggml_backend_metal_device_rel(ctx_dev); return NULL; } if (!ggml_backend_metal_buffer_rset_init(ctx, ctx_dev, device)) { GGML_LOG_ERROR("%s: error: failed to initialize residency set\n", __func__); free(ctx); - ggml_backend_metal_device_rel(ctx_dev); return NULL; } @@ -5434,17 +5458,14 @@ static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_ba static size_t ggml_backend_metal_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { return 32; + GGML_UNUSED(buft); } static size_t ggml_backend_metal_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) { - id device = ggml_backend_metal_device_acq(buft->device->context); - const size_t max_size = device.maxBufferLength; - ggml_backend_metal_device_rel(buft->device->context); + const size_t max_size = ((struct ggml_backend_metal_device_context *)buft->device->context)->max_size; return max_size; - - GGML_UNUSED(buft); } static bool ggml_backend_metal_buffer_type_is_host(ggml_backend_buffer_type_t buft) { @@ -5517,7 +5538,10 @@ ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t siz } struct ggml_backend_metal_device_context * ctx_dev = &g_ggml_ctx_dev_main; - id device = ggml_backend_metal_device_acq(ctx_dev); + + GGML_ASSERT(ctx_dev->mtl_device != nil); + + id device = ctx_dev->mtl_device; // the buffer fits into the max buffer size allowed by the device if (size_aligned <= device.maxBufferLength) { @@ -5573,7 +5597,6 @@ ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t siz if (!ggml_backend_metal_buffer_rset_init(ctx, ctx_dev, device)) { GGML_LOG_ERROR("%s: error: failed to initialize residency set\n", __func__); free(ctx); - ggml_backend_metal_device_rel(ctx_dev); return NULL; } @@ -5589,10 +5612,8 @@ ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t siz } static void ggml_backend_metal_free(ggml_backend_t backend) { - struct ggml_backend_metal_context * ctx = backend->context; - struct ggml_backend_metal_device_context * ctx_dev = backend->device->context; + struct ggml_backend_metal_context * ctx = backend->context; - ggml_backend_metal_device_rel(ctx_dev); ggml_metal_free(ctx); free(backend); @@ -5732,6 +5753,8 @@ bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family) { struct ggml_backend_metal_device_context * ctx_dev = backend->device->context; + GGML_ASSERT(ctx_dev->mtl_device != nil); + return [ctx_dev->mtl_device supportsFamily:(MTLGPUFamilyApple1 + family - 1)]; } @@ -5751,10 +5774,7 @@ void ggml_backend_metal_capture_next_compute(ggml_backend_t backend) { } static const char * ggml_backend_metal_device_get_description(ggml_backend_dev_t dev) { - // acq/rel just to populate ctx->name in case it hasn't been done yet struct ggml_backend_metal_device_context * ctx_dev = (struct ggml_backend_metal_device_context *)dev->context; - ggml_backend_metal_device_acq(ctx_dev); - ggml_backend_metal_device_rel(ctx_dev); return ctx_dev->name; } @@ -5762,12 +5782,10 @@ void ggml_backend_metal_capture_next_compute(ggml_backend_t backend) { static void ggml_backend_metal_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { if (@available(macOS 10.12, iOS 16.0, *)) { struct ggml_backend_metal_device_context * ctx_dev = (struct ggml_backend_metal_device_context *)dev->context; - id device = ggml_backend_metal_device_acq(ctx_dev); + id device = ctx_dev->mtl_device; *total = device.recommendedMaxWorkingSetSize; *free = *total - device.currentAllocatedSize; - - ggml_backend_metal_device_rel(ctx_dev); } else { *free = 1; *total = 1; @@ -5845,7 +5863,10 @@ static ggml_backend_buffer_t ggml_backend_metal_device_buffer_from_ptr(ggml_back } struct ggml_backend_metal_device_context * ctx_dev = (struct ggml_backend_metal_device_context *)dev->context; - id device = ggml_backend_metal_device_acq(ctx_dev); + + GGML_ASSERT(ctx_dev->mtl_device != nil); + + id device = ctx_dev->mtl_device; // the buffer fits into the max buffer size allowed by the device if (size_aligned <= device.maxBufferLength) { @@ -5901,7 +5922,6 @@ static ggml_backend_buffer_t ggml_backend_metal_device_buffer_from_ptr(ggml_back if (!ggml_backend_metal_buffer_rset_init(ctx, ctx_dev, device)) { GGML_LOG_ERROR("%s: error: failed to initialize residency set\n", __func__); free(ctx); - ggml_backend_metal_device_rel(ctx_dev); return NULL; } @@ -5915,8 +5935,9 @@ static bool ggml_backend_metal_device_supports_op(ggml_backend_dev_t dev, const } static bool ggml_backend_metal_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { - return buft->iface.get_name == ggml_backend_metal_buffer_type_get_name || - buft->iface.get_name == ggml_backend_metal_buffer_from_ptr_type_get_name; + return + buft->iface.get_name == ggml_backend_metal_buffer_type_get_name || + buft->iface.get_name == ggml_backend_metal_buffer_from_ptr_type_get_name; GGML_UNUSED(dev); } @@ -6001,8 +6022,19 @@ static ggml_backend_dev_t ggml_backend_metal_reg_device_get(ggml_backend_reg_t r /* .get_proc_address = */ ggml_backend_metal_get_proc_address, }; +// called upon program exit +static void ggml_metal_cleanup(void) { + ggml_backend_metal_device_rel(&g_ggml_ctx_dev_main); +} + +// TODO: make thread-safe ggml_backend_reg_t ggml_backend_metal_reg(void) { - // TODO: make this thread-safe somehow? + ggml_backend_metal_device_acq(&g_ggml_ctx_dev_main); + + // register cleanup callback + // TODO: not ideal, but not sure if there is a better way to do this in Objective-C + atexit(ggml_metal_cleanup); + { g_ggml_backend_metal_reg = (struct ggml_backend_reg) { /* .api_version = */ GGML_BACKEND_API_VERSION, From 6f3eff547ef9726902165d0cc8dd602f9b621842 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Sat, 21 Jun 2025 07:33:21 +0200 Subject: [PATCH 124/192] gguf-py : fix TemplateProcessing pair when bos/eos is missing (#14312) --- gguf-py/gguf/vocab.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gguf-py/gguf/vocab.py b/gguf-py/gguf/vocab.py index 6c4d3a422b99d..a792d56f0677d 100644 --- a/gguf-py/gguf/vocab.py +++ b/gguf-py/gguf/vocab.py @@ -201,9 +201,9 @@ def _try_load_from_tokenizer_json(self, path: Path) -> bool: if special_last != special_eos: logger.warning(f'Unknown trailing special token {special_last!r} in TemplateProcessing') if tmpl_pair: - seq_start = 1 if tmpl_pair[0].get('SpecialToken', {}).get('id') == special_first else 0 - seq_stop = -1 if tmpl_pair[-1].get('SpecialToken', {}).get('id') == special_last else None - if seq_start == 0 or seq_stop is None: + seq_start = 1 if special_first and tmpl_pair[0].get('SpecialToken', {}).get('id') == special_first else 0 + seq_stop = -1 if special_last and tmpl_pair[-1].get('SpecialToken', {}).get('id') == special_last else None + if (special_first and seq_start == 0) or (special_last and seq_stop is None): logger.warning('TemplateProcessing leading/trailing special tokens do not match TemplateProcessing') if tmpl_pair := tmpl_pair[slice(seq_start, seq_stop)]: tmpl_a = tmpl_pair[0].get('Sequence', {}).get('id') From ecdc6f46c28ed54440334cfc163a1db4d6ea5091 Mon Sep 17 00:00:00 2001 From: Markus Tavenrath Date: Sat, 21 Jun 2025 08:17:12 +0200 Subject: [PATCH 125/192] Add support for VK_EXT_debug_utils to add labels to Vulkan objects. (#13792) * Add support for VK_EXT_debug_utils to add labels to Vulkan objects. In step 1 compute pipelines are getting labeled. * remove #ifdef for debug utils and add queue marker. --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 58 +++++++++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 1375bfeb9dc50..99be5e45b2af7 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -1041,6 +1041,14 @@ void vk_memory_logger::log_deallocation(vk_buffer_ref buf_ref) { struct vk_instance_t { vk::Instance instance; + bool debug_utils_support = false; // VK_EXT_debug_utils enabled + PFN_vkSetDebugUtilsObjectNameEXT pfn_vkSetDebugUtilsObjectNameEXT = {}; + PFN_vkQueueBeginDebugUtilsLabelEXT pfn_vkQueueBeginDebugUtilsLabelEXT = {}; + PFN_vkQueueEndDebugUtilsLabelEXT pfn_vkQueueEndDebugUtilsLabelEXT = {}; + PFN_vkCmdBeginDebugUtilsLabelEXT pfn_vkCmdBeginDebugUtilsLabelEXT = {}; + PFN_vkCmdEndDebugUtilsLabelEXT pfn_vkCmdEndDebugUtilsLabelEXT = {}; + PFN_vkCmdInsertDebugUtilsLabelEXT pfn_vkCmdInsertDebugUtilsLabelEXT = {}; + std::vector device_indices; vk_device devices[GGML_VK_MAX_DEVICES]; }; @@ -1180,6 +1188,14 @@ static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipelin } pipeline->compiled = true; + if (vk_instance.debug_utils_support) { + vk::DebugUtilsObjectNameInfoEXT duoni; + duoni.objectType = vk::ObjectType::ePipeline; + duoni.pObjectName = pipeline->name.c_str(); + duoni.objectHandle = reinterpret_cast(static_cast(pipeline->pipeline)); + vk_instance.pfn_vkSetDebugUtilsObjectNameEXT(device->device, &static_cast(duoni)); + } + { std::lock_guard guard(device->mutex); device->pipelines.insert({ pipeline->name, pipeline }); @@ -3561,6 +3577,8 @@ static void ggml_vk_print_gpu_info(size_t idx) { static bool ggml_vk_instance_validation_ext_available(const std::vector& instance_extensions); static bool ggml_vk_instance_portability_enumeration_ext_available(const std::vector& instance_extensions); +static bool ggml_vk_instance_debug_utils_ext_available(const std::vector & instance_extensions); + static void ggml_vk_instance_init() { if (vk_instance_initialized) { return; @@ -3581,7 +3599,7 @@ static void ggml_vk_instance_init() { #ifdef __APPLE__ const bool portability_enumeration_ext = ggml_vk_instance_portability_enumeration_ext_available(instance_extensions); #endif - + const bool debug_utils_ext = ggml_vk_instance_debug_utils_ext_available(instance_extensions) && getenv("GGML_VK_DEBUG_MARKERS") != nullptr; std::vector layers; if (validation_ext) { @@ -3596,6 +3614,9 @@ static void ggml_vk_instance_init() { extensions.push_back("VK_KHR_portability_enumeration"); } #endif + if (debug_utils_ext) { + extensions.push_back("VK_EXT_debug_utils"); + } vk::InstanceCreateInfo instance_create_info(vk::InstanceCreateFlags{}, &app_info, layers, extensions); #ifdef __APPLE__ if (portability_enumeration_ext) { @@ -3619,6 +3640,18 @@ static void ggml_vk_instance_init() { vk_instance.instance = vk::createInstance(instance_create_info); vk_instance_initialized = true; + if (debug_utils_ext) { + vk_instance.debug_utils_support = true; + vk_instance.pfn_vkSetDebugUtilsObjectNameEXT = (PFN_vkSetDebugUtilsObjectNameEXT) vkGetInstanceProcAddr(vk_instance.instance, "vkSetDebugUtilsObjectNameEXT"); + vk_instance.pfn_vkQueueBeginDebugUtilsLabelEXT = (PFN_vkQueueBeginDebugUtilsLabelEXT) vkGetInstanceProcAddr(vk_instance.instance, "vkQueueBeginDebugUtilsLabelEXT"); + vk_instance.pfn_vkQueueEndDebugUtilsLabelEXT = (PFN_vkQueueEndDebugUtilsLabelEXT) vkGetInstanceProcAddr(vk_instance.instance, "vkQueueEndDebugUtilsLabelEXT"); + vk_instance.pfn_vkCmdBeginDebugUtilsLabelEXT = (PFN_vkCmdBeginDebugUtilsLabelEXT) vkGetInstanceProcAddr(vk_instance.instance, "vkCmdBeginDebugUtilsLabelEXT"); + vk_instance.pfn_vkCmdEndDebugUtilsLabelEXT = (PFN_vkCmdEndDebugUtilsLabelEXT) vkGetInstanceProcAddr(vk_instance.instance, "vkCmdEndDebugUtilsLabelEXT"); + vk_instance.pfn_vkCmdInsertDebugUtilsLabelEXT = (PFN_vkCmdInsertDebugUtilsLabelEXT) vkGetInstanceProcAddr(vk_instance.instance, "vkCmdInsertDebugUtilsLabelEXT"); + + } + + size_t num_available_devices = vk_instance.instance.enumeratePhysicalDevices().size(); vk_perf_logger_enabled = getenv("GGML_VK_PERF_LOGGER") != nullptr; // Emulate behavior of CUDA_VISIBLE_DEVICES for Vulkan @@ -9656,6 +9689,13 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg VK_LOG_DEBUG("ggml_backend_vk_graph_compute(" << cgraph->n_nodes << " nodes)"); ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context; + if (vk_instance.debug_utils_support) { + vk::DebugUtilsLabelEXT dul = {}; + dul.pLabelName = "ggml_backend_vk_graph_compute"; + dul.color = std::array{1.0f, 1.0f, 1.0f, 1.0f}; + vk_instance.pfn_vkQueueBeginDebugUtilsLabelEXT(ctx->device->compute_queue.queue, reinterpret_cast(&dul)); + } + uint64_t total_mat_mul_bytes = 0; for (int i = 0; i < cgraph->n_nodes; i++) { ggml_vk_build_graph(ctx, cgraph->nodes[i], i, nullptr, 0, true, false, false, false); @@ -10345,6 +10385,22 @@ static bool ggml_vk_instance_portability_enumeration_ext_available(const std::ve UNUSED(instance_extensions); } +// Extension availability +static bool ggml_vk_instance_debug_utils_ext_available( + const std::vector & instance_extensions) { + // Check for portability enumeration extension for MoltenVK support + for (const auto & properties : instance_extensions) { + if (strcmp("VK_EXT_debug_utils", properties.extensionName) == 0) { + return true; + } + } + + std::cerr << "ggml_vulkan: WARNING: Instance extension VK_EXT_debug_utils not found." << std::endl; + return false; + + UNUSED(instance_extensions); +} + static bool ggml_vk_khr_cooperative_matrix_support(const vk::PhysicalDeviceProperties& props, const vk::PhysicalDeviceDriverProperties& driver_props, vk_device_architecture arch) { switch (props.vendorID) { case VK_VENDOR_ID_INTEL: From 034639de85941fd7a17e7b271fa0d93a899af96e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Sat, 21 Jun 2025 18:12:05 +0200 Subject: [PATCH 126/192] gguf-py : fix Qwen3-Embedding eos token (#14314) --- gguf-py/gguf/vocab.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/gguf-py/gguf/vocab.py b/gguf-py/gguf/vocab.py index a792d56f0677d..3b08f6134a67a 100644 --- a/gguf-py/gguf/vocab.py +++ b/gguf-py/gguf/vocab.py @@ -197,6 +197,16 @@ def _try_load_from_tokenizer_json(self, path: Path) -> bool: if special_last := tmpl_single[-1].get('SpecialToken', {}).get('id'): if not tokenizer_config: special_eos = special_last + elif special_last != special_eos: + if 'eot' not in self.special_token_types: + self.special_token_types = tuple(self.special_token_types) + ('eot', ) + tokenizer_config['eot_token'] = special_eos + elif 'eom' not in self.special_token_types: + self.special_token_types = tuple(self.special_token_types) + ('eom', ) + tokenizer_config['eom_token'] = special_eos + else: + logger.warning(f'Overriding EOS token {special_eos!r} with {special_last!r} without EOT/EOM fallback!') + tokenizer_config['eos_token'] = special_eos = special_last self.add_special_token['eos'] = True if special_last == special_eos else False if special_last != special_eos: logger.warning(f'Unknown trailing special token {special_last!r} in TemplateProcessing') From 0f1694d39b53861acb3b888d77ef8cb02ed3ea91 Mon Sep 17 00:00:00 2001 From: Aman Gupta Date: Sun, 22 Jun 2025 12:39:54 +0800 Subject: [PATCH 127/192] CUDA: add mean operation (#14313) * CUDA: add mean operation * add back sum_rows_f32_cuda * Review: early exit if col!=0 --- ggml/src/ggml-cuda/common.cuh | 20 ++++++++++++++++++++ ggml/src/ggml-cuda/ggml-cuda.cu | 5 +++++ ggml/src/ggml-cuda/mean.cu | 19 +++++++++++++++++++ ggml/src/ggml-cuda/mean.cuh | 3 +++ ggml/src/ggml-cuda/sumrows.cu | 23 +++++------------------ ggml/src/ggml-cuda/sumrows.cuh | 1 - tests/test-backend-ops.cpp | 2 ++ 7 files changed, 54 insertions(+), 19 deletions(-) create mode 100644 ggml/src/ggml-cuda/mean.cu create mode 100644 ggml/src/ggml-cuda/mean.cuh diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index 364efcaeccc07..2f2fce0677066 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -362,6 +362,26 @@ static __device__ __forceinline__ half2 warp_reduce_sum(half2 a) { #endif // FP16_AVAILABLE } +// Row reduction kernel template - compute sum (norm=false) or mean (norm=true) +template +static __global__ void reduce_rows_f32(const float * x, float * dst, const int ncols) { + const int row = blockIdx.x; + const int col = threadIdx.x; + + float sum = 0.0f; + for (int i = col; i < ncols; i += blockDim.x) { + sum += x[row * ncols + i]; + } + + sum = warp_reduce_sum(sum); + + if (col != 0) { + return; + } + + dst[row] = norm ? sum / ncols : sum; +} + template static __device__ __forceinline__ float warp_reduce_max(float x) { #pragma unroll diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 5bab92e347a7e..c6bdd4fb3021f 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -37,6 +37,7 @@ #include "ggml-cuda/ssm-scan.cuh" #include "ggml-cuda/sum.cuh" #include "ggml-cuda/sumrows.cuh" +#include "ggml-cuda/mean.cuh" #include "ggml-cuda/tsembd.cuh" #include "ggml-cuda/unary.cuh" #include "ggml-cuda/upscale.cuh" @@ -2357,6 +2358,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg case GGML_OP_SUM_ROWS: ggml_cuda_op_sum_rows(ctx, dst); break; + case GGML_OP_MEAN: + ggml_cuda_op_mean(ctx, dst); + break; case GGML_OP_SSM_CONV: ggml_cuda_op_ssm_conv(ctx, dst); break; @@ -3260,6 +3264,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_POOL_2D: case GGML_OP_SUM: case GGML_OP_SUM_ROWS: + case GGML_OP_MEAN: case GGML_OP_ARGSORT: case GGML_OP_ACC: return true; diff --git a/ggml/src/ggml-cuda/mean.cu b/ggml/src/ggml-cuda/mean.cu new file mode 100644 index 0000000000000..4b238a3998ba3 --- /dev/null +++ b/ggml/src/ggml-cuda/mean.cu @@ -0,0 +1,19 @@ +#include "mean.cuh" + +void ggml_cuda_op_mean(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *) src0->data; + float * dst_d = (float *) dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + GGML_ASSERT(ggml_is_contiguous(src0)); + + const int64_t ncols = src0->ne[0]; + const int64_t nrows = ggml_nrows(src0); + + const dim3 block_dims(WARP_SIZE, 1, 1); + const dim3 block_nums(nrows, 1, 1); + reduce_rows_f32<<>>(src0_d, dst_d, ncols); +} diff --git a/ggml/src/ggml-cuda/mean.cuh b/ggml/src/ggml-cuda/mean.cuh new file mode 100644 index 0000000000000..2b9b10433438e --- /dev/null +++ b/ggml/src/ggml-cuda/mean.cuh @@ -0,0 +1,3 @@ +#include "common.cuh" + +void ggml_cuda_op_mean(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ggml/src/ggml-cuda/sumrows.cu b/ggml/src/ggml-cuda/sumrows.cu index 38dbf1b5e1fa9..2eee08fa07375 100644 --- a/ggml/src/ggml-cuda/sumrows.cu +++ b/ggml/src/ggml-cuda/sumrows.cu @@ -1,25 +1,9 @@ #include "sumrows.cuh" -static __global__ void k_sum_rows_f32(const float * x, float * dst, const int ncols) { - const int row = blockIdx.x; - const int col = threadIdx.x; - - float sum = 0.0f; - for (int i = col; i < ncols; i += blockDim.x) { - sum += x[row * ncols + i]; - } - - sum = warp_reduce_sum(sum); - - if (col == 0) { - dst[row] = sum; - } -} - void sum_rows_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream) { const dim3 block_dims(WARP_SIZE, 1, 1); const dim3 block_nums(nrows, 1, 1); - k_sum_rows_f32<<>>(x, dst, ncols); + reduce_rows_f32<<>>(x, dst, ncols); } void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { @@ -35,5 +19,8 @@ void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const int64_t ncols = src0->ne[0]; const int64_t nrows = ggml_nrows(src0); - sum_rows_f32_cuda(src0_d, dst_d, ncols, nrows, stream); + const dim3 block_dims(WARP_SIZE, 1, 1); + const dim3 block_nums(nrows, 1, 1); + + reduce_rows_f32<<>>(src0_d, dst_d, ncols); } diff --git a/ggml/src/ggml-cuda/sumrows.cuh b/ggml/src/ggml-cuda/sumrows.cuh index 191db1c13167e..3431c599b1b89 100644 --- a/ggml/src/ggml-cuda/sumrows.cuh +++ b/ggml/src/ggml-cuda/sumrows.cuh @@ -1,5 +1,4 @@ #include "common.cuh" void sum_rows_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream); - void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 772bee346f000..7be7f2205fa04 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -4652,6 +4652,8 @@ static std::vector> make_test_cases_perf() { test_cases.emplace_back(new test_conv_transpose_2d({256, 256, 256, 1}, {3, 3, 16, 256}, 1)); + test_cases.emplace_back(new test_mean(GGML_TYPE_F32, {256, 256, 3, 1})); + return test_cases; } From 72ab54381490b29ff6cabd1936a3cfbe4d239e26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Sun, 22 Jun 2025 07:37:43 +0200 Subject: [PATCH 128/192] common : use std::string_view now that we target c++17 (#14319) --- common/json-schema-to-grammar.cpp | 49 ++----------------------------- 1 file changed, 3 insertions(+), 46 deletions(-) diff --git a/common/json-schema-to-grammar.cpp b/common/json-schema-to-grammar.cpp index d38a74f95c213..637891f50699c 100644 --- a/common/json-schema-to-grammar.cpp +++ b/common/json-schema-to-grammar.cpp @@ -41,49 +41,6 @@ static std::string build_repetition(const std::string & item_rule, int min_items return result; } -/* Minimalistic replacement for std::string_view, which is only available from C++17 onwards */ -class string_view { - const std::string & _str; - const size_t _start; - const size_t _end; -public: - string_view(const std::string & str, size_t start = 0, size_t end = std::string::npos) : _str(str), _start(start), _end(end == std::string::npos ? str.length() : end) {} - - size_t size() const { - return _end - _start; - } - - size_t length() const { - return size(); - } - - operator std::string() const { - return str(); - } - - std::string str() const { - return _str.substr(_start, _end - _start); - } - - string_view substr(size_t pos, size_t len = std::string::npos) const { - return string_view(_str, _start + pos, len == std::string::npos ? _end : _start + pos + len); - } - - char operator[](size_t pos) const { - auto index = _start + pos; - if (index >= _end) { - throw std::out_of_range("string_view index out of range"); - } - return _str[_start + pos]; - } - - bool operator==(const string_view & other) const { - std::string this_str = *this; - std::string other_str = other; - return this_str == other_str; - } -}; - static void _build_min_max_int(int min_value, int max_value, std::stringstream & out, int decimals_left = 16, bool top_level = true) { auto has_min = min_value != std::numeric_limits::min(); auto has_max = max_value != std::numeric_limits::max(); @@ -112,14 +69,14 @@ static void _build_min_max_int(int min_value, int max_value, std::stringstream & } out << "}"; }; - std::function uniform_range = - [&](const string_view & from, const string_view & to) { + std::function uniform_range = + [&](const std::string_view & from, const std::string_view & to) { size_t i = 0; while (i < from.length() && i < to.length() && from[i] == to[i]) { i++; } if (i > 0) { - out << "\"" << from.substr(0, i).str() << "\""; + out << "\"" << from.substr(0, i) << "\""; } if (i < from.length() && i < to.length()) { if (i > 0) { From d13978e8773b7cea10aae9faf327f15ed93e47a6 Mon Sep 17 00:00:00 2001 From: yuiseki Date: Sun, 22 Jun 2025 21:44:57 +0900 Subject: [PATCH 129/192] mtmd : fix Pixtral OOM with large images by capping image_size to 1024 (#14326) Mistral Small 2506 models using Pixtral vision encoder were running out of GPU memory when processing images larger than 1024x1024 pixels due to exponential memory growth from unlimited image size. This fix applies the same 1024x1024 limit used by Qwen2VL models to prevent OOM issues while maintaining compatibility with existing models. --- tools/mtmd/clip.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp index 30283d6f1f032..a990520ed3fbb 100644 --- a/tools/mtmd/clip.cpp +++ b/tools/mtmd/clip.cpp @@ -2211,6 +2211,9 @@ struct clip_model_loader { { hparams.rope_theta = 10000.0f; hparams.warmup_image_size = hparams.patch_size * 8; + // Mistral Small 2506 needs 1024x1024 image size cap to prevent OOM + // ref: https://github.com/ggml-org/llama.cpp/issues/14310 + hparams.image_size = 1024; get_u32(KEY_SPATIAL_MERGE_SIZE, hparams.spatial_merge_size, false); } break; case PROJECTOR_TYPE_GEMMA3: From 3e11cbb0a5be45d00c1ad9e2f1b6d19dd6ea3845 Mon Sep 17 00:00:00 2001 From: uvos Date: Sun, 22 Jun 2025 16:51:23 +0200 Subject: [PATCH 130/192] HIP: enable vec fattn on RDNA4 (#14323) --- ggml/src/ggml-cuda/common.cuh | 14 ++++++++++++-- ggml/src/ggml-cuda/ggml-cuda.cu | 7 ++----- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index 2f2fce0677066..86c4d29a5d254 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -241,8 +241,18 @@ static bool fp16_mma_available(const int cc) { #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) && !defined(GGML_HIP_ROCWMMA_FATTN) return false; #else - return (GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA) || - GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc) || GGML_CUDA_CC_IS_RDNA4(cc); + if ((GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA) || + GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc)) { + return true; + } else if (GGML_CUDA_CC_IS_RDNA4(cc)) { +#if defined(GGML_HIP_ROCWMMA_FATTN) && defined(GGML_HIP_ROCWMMA_FATTN_GFX12) + return true; +#else + return false; +#endif // defined(GGML_HIP_ROCWMMA_FATTN) && defined(GGML_HIP_ROCWMMA_FATTN_GFX12) + } else { + return false; + } #endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) && !defined(GGML_HIP_ROCWMMA_FATTN) } diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index c6bdd4fb3021f..462db71e1a610 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -100,8 +100,7 @@ int ggml_cuda_get_device() { static cudaError_t ggml_cuda_device_malloc(void ** ptr, size_t size, int device) { ggml_cuda_set_device(device); cudaError_t err; - if (getenv("GGML_CUDA_ENABLE_UNIFIED_MEMORY") != nullptr) - { + if (getenv("GGML_CUDA_ENABLE_UNIFIED_MEMORY") != nullptr) { err = cudaMallocManaged(ptr, size); #if defined(GGML_USE_HIP) if (err == hipSuccess) { @@ -119,9 +118,7 @@ static cudaError_t ggml_cuda_device_malloc(void ** ptr, size_t size, int device) err = cudaMalloc(ptr, size); } #endif // defined(GGML_USE_HIP) - } - else - { + } else { err = cudaMalloc(ptr, size); } return err; From fa8fca69f69e9abed1880391d56651002f3ca0e7 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 22 Jun 2025 20:10:07 +0300 Subject: [PATCH 131/192] examples : fix is_first logic for tokenization (#14329) ggml-ci --- examples/simple-chat/simple-chat.cpp | 2 +- tools/run/run.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/simple-chat/simple-chat.cpp b/examples/simple-chat/simple-chat.cpp index 2aee0a919e60d..cf1178043d8d1 100644 --- a/examples/simple-chat/simple-chat.cpp +++ b/examples/simple-chat/simple-chat.cpp @@ -98,7 +98,7 @@ int main(int argc, char ** argv) { auto generate = [&](const std::string & prompt) { std::string response; - const bool is_first = llama_memory_seq_pos_max(llama_get_memory(ctx), 0) == 0; + const bool is_first = llama_memory_seq_pos_max(llama_get_memory(ctx), 0) == -1; // tokenize the prompt const int n_prompt_tokens = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, is_first, true); diff --git a/tools/run/run.cpp b/tools/run/run.cpp index c65afd61e023c..b8556515564a6 100644 --- a/tools/run/run.cpp +++ b/tools/run/run.cpp @@ -939,7 +939,7 @@ static int apply_chat_template(const struct common_chat_templates * tmpls, Llama // Function to tokenize the prompt static int tokenize_prompt(const llama_vocab * vocab, const std::string & prompt, std::vector & prompt_tokens, const LlamaData & llama_data) { - const bool is_first = llama_memory_seq_pos_max(llama_get_memory(llama_data.context.get()), 0) == 0; + const bool is_first = llama_memory_seq_pos_max(llama_get_memory(llama_data.context.get()), 0) == -1; const int n_prompt_tokens = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, is_first, true); prompt_tokens.resize(n_prompt_tokens); From 6a45b538e3d0781ea98a8550c3ab4c214f2662dc Mon Sep 17 00:00:00 2001 From: Ruikai Peng Date: Mon, 23 Jun 2025 01:28:06 +0800 Subject: [PATCH 132/192] run : avoid double tokenization (#14327) * run : avoid double tokenization by adopting common_tokenize heuristic * build : fix windows gcc and clang warnings * lint : fixed trailing whitepace * run : fix is_first flag --- tools/run/run.cpp | 32 ++++++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/tools/run/run.cpp b/tools/run/run.cpp index b8556515564a6..6fe728c685358 100644 --- a/tools/run/run.cpp +++ b/tools/run/run.cpp @@ -9,6 +9,9 @@ #include #if defined(_WIN32) +# ifndef NOMINMAX +# define NOMINMAX +# endif # include # include #else @@ -940,16 +943,29 @@ static int apply_chat_template(const struct common_chat_templates * tmpls, Llama static int tokenize_prompt(const llama_vocab * vocab, const std::string & prompt, std::vector & prompt_tokens, const LlamaData & llama_data) { const bool is_first = llama_memory_seq_pos_max(llama_get_memory(llama_data.context.get()), 0) == -1; - - const int n_prompt_tokens = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, is_first, true); - prompt_tokens.resize(n_prompt_tokens); - if (llama_tokenize(vocab, prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), is_first, - true) < 0) { - printe("failed to tokenize the prompt\n"); + int n_tokens = prompt.size() + 2 * is_first; + prompt_tokens.resize(n_tokens); + n_tokens = llama_tokenize(vocab, prompt.c_str(), prompt.size(), + prompt_tokens.data(), prompt_tokens.size(), + is_first, /*parse_special =*/true); + if (n_tokens == std::numeric_limits::min()) { + printe("tokenization failed: input too large\n"); return -1; } - - return n_prompt_tokens; + if (n_tokens < 0) { + prompt_tokens.resize(-n_tokens); + int check = llama_tokenize(vocab, prompt.c_str(), prompt.size(), + prompt_tokens.data(), prompt_tokens.size(), + is_first, /*parse_special =*/true); + if (check != -n_tokens) { + printe("failed to tokenize the prompt (size mismatch)\n"); + return -1; + } + n_tokens = check; + } else { + prompt_tokens.resize(n_tokens); + } + return n_tokens; } // Check if we have enough space in the context to evaluate this batch From ade7b2d28533482e3d77265fda6cdc13d910e63b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Sun, 22 Jun 2025 19:46:17 +0200 Subject: [PATCH 133/192] gguf-py : fix SpecialVocab parsing when post_processor is null (#14330) --- gguf-py/gguf/vocab.py | 144 +++++++++++++++++++++--------------------- 1 file changed, 72 insertions(+), 72 deletions(-) diff --git a/gguf-py/gguf/vocab.py b/gguf-py/gguf/vocab.py index 3b08f6134a67a..3f541b0c02e52 100644 --- a/gguf-py/gguf/vocab.py +++ b/gguf-py/gguf/vocab.py @@ -167,81 +167,81 @@ def _try_load_from_tokenizer_json(self, path: Path) -> bool: tokenizer_config['bos_token'] = special_bos = special_cls if not special_eos and special_sep and tokenizer_config: tokenizer_config['eos_token'] = special_eos = special_sep - post_processor = tokenizer.get('post_processor', {}) - for processor in post_processor.get('processors', [post_processor]): - if processor.get('type') == 'RobertaProcessing': - self.add_special_token['bos'] = True - self.add_special_token['eos'] = True - self.add_special_token['sep'] = True - if not special_cls and tokenizer_config: - special_cls = processor.get('cls', [special_bos])[0] - tokenizer_config['cls_token'] = special_cls - if not special_sep and tokenizer_config: - special_sep = processor.get('sep', [special_eos])[0] - tokenizer_config['sep_token'] = special_sep - continue - # Crude parsing of TemplateProcessing to determine if BOS/SEP/EOS should be added - # Only works with simple templates, **will** get it wrong on unusual sequences - if processor.get('type') == 'TemplateProcessing': - tmpl_single = processor.get('single', []) - tmpl_pair = processor.get('pair', []) - special_first = None - special_last = None - if len(tmpl_single) > 1: - if special_first := tmpl_single[0].get('SpecialToken', {}).get('id'): - if not tokenizer_config: - special_bos = special_first - self.add_special_token['bos'] = True if special_first in (special_bos, special_cls) else False - if special_first not in (special_bos, special_cls): - logger.warning(f'Unknown leading special token {special_first!r} in TemplateProcessing') - if special_last := tmpl_single[-1].get('SpecialToken', {}).get('id'): - if not tokenizer_config: - special_eos = special_last - elif special_last != special_eos: - if 'eot' not in self.special_token_types: - self.special_token_types = tuple(self.special_token_types) + ('eot', ) - tokenizer_config['eot_token'] = special_eos - elif 'eom' not in self.special_token_types: - self.special_token_types = tuple(self.special_token_types) + ('eom', ) - tokenizer_config['eom_token'] = special_eos - else: - logger.warning(f'Overriding EOS token {special_eos!r} with {special_last!r} without EOT/EOM fallback!') - tokenizer_config['eos_token'] = special_eos = special_last - self.add_special_token['eos'] = True if special_last == special_eos else False - if special_last != special_eos: - logger.warning(f'Unknown trailing special token {special_last!r} in TemplateProcessing') - if tmpl_pair: - seq_start = 1 if special_first and tmpl_pair[0].get('SpecialToken', {}).get('id') == special_first else 0 - seq_stop = -1 if special_last and tmpl_pair[-1].get('SpecialToken', {}).get('id') == special_last else None - if (special_first and seq_start == 0) or (special_last and seq_stop is None): - logger.warning('TemplateProcessing leading/trailing special tokens do not match TemplateProcessing') - if tmpl_pair := tmpl_pair[slice(seq_start, seq_stop)]: - tmpl_a = tmpl_pair[0].get('Sequence', {}).get('id') - tmpl_b = tmpl_pair[-1].get('Sequence', {}).get('id') - if tmpl_a != 'A' or tmpl_b != 'B': - logger.warning(f'Unknown sequence {tmpl_a}...{tmpl_b} in TemplateProcessing') - # A [sep] [eos] B - if tmpl_a == 'A' and tmpl_b == 'B' and (tmpl_pair := tmpl_pair[1:-1]): - add_sep = False - if special_entry := tmpl_pair[0].get('SpecialToken', {}).get('id'): - if special_entry in (special_sep, special_eos) and not special_last: - add_sep = True - if special_entry not in (special_sep, special_eos): - logger.warning(f'Unknown separator token {special_entry!r} in TemplateProcessing') - else: - logger.warning(f'Unknown middle sequence {tmpl_pair[0]!r} in TemplateProcessing') - if len(tmpl_pair) == 2: - if special_entry := tmpl_pair[1].get('SpecialToken', {}).get('id'): - if special_entry in (special_sep, special_eos): + if post_processor := tokenizer.get('post_processor'): + for processor in post_processor.get('processors', [post_processor]): + if processor.get('type') == 'RobertaProcessing': + self.add_special_token['bos'] = True + self.add_special_token['eos'] = True + self.add_special_token['sep'] = True + if not special_cls and tokenizer_config: + special_cls = processor.get('cls', [special_bos])[0] + tokenizer_config['cls_token'] = special_cls + if not special_sep and tokenizer_config: + special_sep = processor.get('sep', [special_eos])[0] + tokenizer_config['sep_token'] = special_sep + continue + # Crude parsing of TemplateProcessing to determine if BOS/SEP/EOS should be added + # Only works with simple templates, **will** get it wrong on unusual sequences + if processor.get('type') == 'TemplateProcessing': + tmpl_single = processor.get('single', []) + tmpl_pair = processor.get('pair', []) + special_first = None + special_last = None + if len(tmpl_single) > 1: + if special_first := tmpl_single[0].get('SpecialToken', {}).get('id'): + if not tokenizer_config: + special_bos = special_first + self.add_special_token['bos'] = True if special_first in (special_bos, special_cls) else False + if special_first not in (special_bos, special_cls): + logger.warning(f'Unknown leading special token {special_first!r} in TemplateProcessing') + if special_last := tmpl_single[-1].get('SpecialToken', {}).get('id'): + if not tokenizer_config: + special_eos = special_last + elif special_last != special_eos: + if 'eot' not in self.special_token_types: + self.special_token_types = tuple(self.special_token_types) + ('eot', ) + tokenizer_config['eot_token'] = special_eos + elif 'eom' not in self.special_token_types: + self.special_token_types = tuple(self.special_token_types) + ('eom', ) + tokenizer_config['eom_token'] = special_eos + else: + logger.warning(f'Overriding EOS token {special_eos!r} with {special_last!r} without EOT/EOM fallback!') + tokenizer_config['eos_token'] = special_eos = special_last + self.add_special_token['eos'] = True if special_last == special_eos else False + if special_last != special_eos: + logger.warning(f'Unknown trailing special token {special_last!r} in TemplateProcessing') + if tmpl_pair: + seq_start = 1 if special_first and tmpl_pair[0].get('SpecialToken', {}).get('id') == special_first else 0 + seq_stop = -1 if special_last and tmpl_pair[-1].get('SpecialToken', {}).get('id') == special_last else None + if (special_first and seq_start == 0) or (special_last and seq_stop is None): + logger.warning('TemplateProcessing leading/trailing special tokens do not match TemplateProcessing') + if tmpl_pair := tmpl_pair[slice(seq_start, seq_stop)]: + tmpl_a = tmpl_pair[0].get('Sequence', {}).get('id') + tmpl_b = tmpl_pair[-1].get('Sequence', {}).get('id') + if tmpl_a != 'A' or tmpl_b != 'B': + logger.warning(f'Unknown sequence {tmpl_a}...{tmpl_b} in TemplateProcessing') + # A [sep] [eos] B + if tmpl_a == 'A' and tmpl_b == 'B' and (tmpl_pair := tmpl_pair[1:-1]): + add_sep = False + if special_entry := tmpl_pair[0].get('SpecialToken', {}).get('id'): + if special_entry in (special_sep, special_eos) and not special_last: add_sep = True if special_entry not in (special_sep, special_eos): - logger.warning(f'Unknown second separator token {special_entry!r} in TemplateProcessing') + logger.warning(f'Unknown separator token {special_entry!r} in TemplateProcessing') else: - logger.warning(f'Unknown second middle sequence {tmpl_pair[1]!r} in TemplateProcessing') - self.add_special_token['sep'] = add_sep - if add_sep and not special_sep and tokenizer_config: - tokenizer_config['sep_token'] = special_eos - continue + logger.warning(f'Unknown middle sequence {tmpl_pair[0]!r} in TemplateProcessing') + if len(tmpl_pair) == 2: + if special_entry := tmpl_pair[1].get('SpecialToken', {}).get('id'): + if special_entry in (special_sep, special_eos): + add_sep = True + if special_entry not in (special_sep, special_eos): + logger.warning(f'Unknown second separator token {special_entry!r} in TemplateProcessing') + else: + logger.warning(f'Unknown second middle sequence {tmpl_pair[1]!r} in TemplateProcessing') + self.add_special_token['sep'] = add_sep + if add_sep and not special_sep and tokenizer_config: + tokenizer_config['sep_token'] = special_eos + continue if not tokenizer_config: return True chat_template_alt = None From 113c4225e557f4414d8a23f4ada64c718de0a7db Mon Sep 17 00:00:00 2001 From: Ed Addario <29247825+EAddario@users.noreply.github.com> Date: Sun, 22 Jun 2025 22:16:26 +0100 Subject: [PATCH 134/192] quantize : handle user-defined pruning of whole layers (blocks) (#13037) --- include/llama.h | 1 + src/llama-quant.cpp | 83 +++++++++++++++++++++++++++++++++++-- tools/quantize/quantize.cpp | 44 +++++++++++++++++--- 3 files changed, 119 insertions(+), 9 deletions(-) diff --git a/include/llama.h b/include/llama.h index b04720bee59ef..f4123d14ac1d8 100644 --- a/include/llama.h +++ b/include/llama.h @@ -390,6 +390,7 @@ extern "C" { void * imatrix; // pointer to importance matrix data void * kv_overrides; // pointer to vector containing overrides void * tensor_types; // pointer to vector containing tensor types + void * prune_layers; // pointer to vector containing layer indices to prune } llama_model_quantize_params; typedef struct llama_logit_bias { diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 8cf45732fd6d4..43229e1938597 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -1,5 +1,4 @@ #include "llama-quant.h" - #include "llama-impl.h" #include "llama-model.h" #include "llama-model-loader.h" @@ -27,6 +26,56 @@ static void zeros(std::ofstream & file, size_t n) { } } +static std::string remap_layer(const std::string & orig_name, const std::vector & prune, std::map & mapped, int & next_id) { + if (prune.empty()) { + return orig_name; + } + + static const std::regex pattern(R"(blk\.(\d+)\.)"); + if (std::smatch match; std::regex_search(orig_name, match, pattern)) { + const int blk = std::stoi(match[1]); + std::string new_name = orig_name; + + if (mapped.count(blk)) { + // Already mapped, do nothing + } else if (std::find(prune.begin(), prune.end(), blk) != prune.end()) { + mapped[blk] = ""; + } else if (blk < prune.front()) { + mapped[blk] = std::to_string(blk); + next_id = blk + 1; + } else { + mapped[blk] = std::to_string(next_id); + ++next_id; + } + + return mapped[blk].empty() ? mapped[blk] : new_name.replace(match.position(1), match.length(1), mapped[blk]); + } + + return orig_name; +} + +static std::string remap_imatrix (const std::string & orig_name, const std::map & mapped) { + if (mapped.empty()) { + return orig_name; + } + + static const std::regex pattern(R"(blk\.(\d+)\.)"); + if (std::smatch match; std::regex_search(orig_name, match, pattern)) { + const std::string blk(match[1]); + std::string new_name = orig_name; + + for (const auto & p : mapped) { + if (p.second == blk) { + LLAMA_LOG_DEBUG("(blk.%d imatrix) ", p.first); + return new_name.replace(match.position(1), match.length(1), std::to_string(p.first)); + } + } + GGML_ABORT("\n%s: imatrix mapping error for %s\n", __func__, orig_name.c_str()); + } + + return orig_name; +} + struct quantize_state_impl { const llama_model & model; const llama_model_quantize_params * params; @@ -568,6 +617,11 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: const size_t align = GGUF_DEFAULT_ALIGNMENT; gguf_context_ptr ctx_out { gguf_init_empty() }; + std::vector prune_list = {}; + if (params->prune_layers) { + prune_list = *static_cast *>(params->prune_layers); + } + // copy the KV pairs from the input file gguf_set_kv (ctx_out.get(), ml.meta.get()); gguf_set_val_u32(ctx_out.get(), "general.quantization_version", GGML_QNT_VERSION); // TODO: use LLM_KV @@ -597,12 +651,32 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: } } + std::map mapped; + int blk_id = 0; + int pruned_attention_w = 0; + // make a list of weights std::vector tensors; tensors.reserve(ml.weights_map.size()); for (const auto & it : ml.weights_map) { + const std::string remapped_name(remap_layer(it.first, prune_list, mapped, blk_id)); + if (remapped_name.empty()) { + if (it.first.find("attn_v.weight") != std::string::npos || + it.first.find("attn_qkv.weight") != std::string::npos || + it.first.find("attn_kv_b.weight") != std::string::npos) { + pruned_attention_w++; + } + LLAMA_LOG_DEBUG("%s: pruning tensor %s\n", __func__, it.first.c_str()); + continue; + } else if (remapped_name != it.first) { + ggml_set_name(it.second.tensor, remapped_name.c_str()); + LLAMA_LOG_DEBUG("%s: tensor %s remapped to %s\n", __func__, it.first.c_str(), ggml_get_name(it.second.tensor)); + } tensors.push_back(&it.second); } + if (!prune_list.empty()) { + gguf_set_val_u32(ctx_out.get(), ml.llm_kv(LLM_KV_BLOCK_COUNT).c_str(), blk_id); + } // keep_split requires that the weights are sorted by split index if (params->keep_split) { @@ -640,7 +714,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: if (llama_model_has_encoder(&model)) { n_attn_layer *= 3; } - GGML_ASSERT((qs.n_attention_wv == n_attn_layer) && "n_attention_wv is unexpected"); + GGML_ASSERT((qs.n_attention_wv == n_attn_layer - pruned_attention_w) && "n_attention_wv is unexpected"); } size_t total_size_org = 0; @@ -681,7 +755,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: for (size_t i = 0; i < ctx_outs.size(); ++i) { gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_NO).c_str(), i); gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str(), n_split); - gguf_set_val_i32(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), ml.n_tensors); + gguf_set_val_i32(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), (int32_t)tensors.size()); } } @@ -832,7 +906,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: const float * imatrix = nullptr; if (imatrix_data) { - auto it = imatrix_data->find(tensor->name); + auto it = imatrix_data->find(remap_imatrix(tensor->name, mapped)); if (it == imatrix_data->end()) { LLAMA_LOG_INFO("\n====== %s: did not find weights for %s\n", __func__, tensor->name); } else { @@ -947,6 +1021,7 @@ llama_model_quantize_params llama_model_quantize_default_params() { /*.imatrix =*/ nullptr, /*.kv_overrides =*/ nullptr, /*.tensor_type =*/ nullptr, + /*.prune_layers =*/ nullptr }; return result; diff --git a/tools/quantize/quantize.cpp b/tools/quantize/quantize.cpp index 3f54af7c58158..8acc765178846 100644 --- a/tools/quantize/quantize.cpp +++ b/tools/quantize/quantize.cpp @@ -107,13 +107,11 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp return false; } -// usage: -// ./llama-quantize [--allow-requantize] [--leave-output-tensor] [--pure] models/llama/ggml-model.gguf [models/llama/ggml-model-quant.gguf] type [nthreads] -// [[noreturn]] static void usage(const char * executable) { - printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type]\n", executable); - printf(" [--token-embedding-type] [--tensor-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n"); + printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights]\n", executable); + printf(" [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--tensor-type] [--prune-layers] [--keep-split] [--override-kv]\n"); + printf(" model-f32.gguf [model-quant.gguf] type [nthreads]\n\n"); printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n"); printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n"); printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n"); @@ -124,6 +122,8 @@ static void usage(const char * executable) { printf(" --token-embedding-type ggml_type: use this ggml_type for the token embeddings tensor\n"); printf(" --tensor-type TENSOR=TYPE: quantize this tensor to this ggml_type. example: --tensor-type attn_q=q8_0\n"); printf(" Advanced option to selectively quantize tensors. May be specified multiple times.\n"); + printf(" --prune-layers L0,L1,L2...comma-separated list of layer numbers to prune from the model\n"); + printf(" Advanced option to remove all tensors from the given layers\n"); printf(" --keep-split: will generate quantized model in the same shards as input\n"); printf(" --override-kv KEY=TYPE:VALUE\n"); printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n"); @@ -286,6 +286,32 @@ static bool parse_tensor_type(const char * data, std::vector & prune_layers) { + if (!data) { + printf("\n%s: no layer pruning ids provided\n\n", __func__); + return false; + } + + const auto block_ids = string_split(data, ','); + for (const auto & block_id : block_ids) { + int id; + try { + id = std::stoi(block_id); + } catch (...) { + id = -1; + } + if (id < 0) { + printf("\n%s: invalid layer id '%s'\n\n", __func__, block_id.c_str()); + return false; + } + prune_layers.emplace_back(id); + } + + sort(prune_layers.begin(), prune_layers.end()); + prune_layers.erase(std::unique(prune_layers.begin(), prune_layers.end()), prune_layers.end()); + return true; +} + int main(int argc, char ** argv) { if (argc < 3) { usage(argv[0]); @@ -298,6 +324,7 @@ int main(int argc, char ** argv) { std::vector included_weights, excluded_weights; std::vector kv_overrides; std::vector tensor_types; + std::vector prune_layers; for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) { if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) { @@ -324,6 +351,10 @@ int main(int argc, char ** argv) { if (arg_idx == argc-1 || !parse_tensor_type(argv[++arg_idx], tensor_types)) { usage(argv[0]); } + } else if (strcmp(argv[arg_idx], "--prune-layers") == 0) { + if (arg_idx == argc-1 || !parse_layer_prune(argv[++arg_idx], prune_layers)) { + usage(argv[0]); + } } else if (strcmp(argv[arg_idx], "--override-kv") == 0) { if (arg_idx == argc-1 || !string_parse_kv_override(argv[++arg_idx], kv_overrides)) { usage(argv[0]); @@ -411,6 +442,9 @@ int main(int argc, char ** argv) { if (!tensor_types.empty()) { params.tensor_types = &tensor_types; } + if (!prune_layers.empty()) { + params.prune_layers = &prune_layers; + } llama_backend_init(); From dffd80672b5578fb83d0150f4d6df873c3c0ee27 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Mon, 23 Jun 2025 03:19:24 -0500 Subject: [PATCH 135/192] vulkan: update windows SDK in CI (#14334) --- .github/workflows/build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c4783a6df8882..be282897380ac 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -683,7 +683,7 @@ jobs: env: OPENBLAS_VERSION: 0.3.23 SDE_VERSION: 9.33.0-2024-01-07 - VULKAN_VERSION: 1.4.309.0 + VULKAN_VERSION: 1.4.313.2 strategy: matrix: @@ -736,7 +736,7 @@ jobs: id: get_vulkan if: ${{ matrix.build == 'kompute-x64' || matrix.build == 'vulkan-x64' }} run: | - curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/VulkanSDK-${env:VULKAN_VERSION}-Installer.exe" + curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/vulkansdk-windows-X64-${env:VULKAN_VERSION}.exe" & "$env:RUNNER_TEMP\VulkanSDK-Installer.exe" --accept-licenses --default-answer --confirm-command install Add-Content $env:GITHUB_ENV "VULKAN_SDK=C:\VulkanSDK\${env:VULKAN_VERSION}" Add-Content $env:GITHUB_PATH "C:\VulkanSDK\${env:VULKAN_VERSION}\bin" From f5e61b9130b572146ae14499bf19d6c216897888 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 23 Jun 2025 12:27:35 +0300 Subject: [PATCH 136/192] kv-cells : fix tracking of seq_pos (#14339) * kv-cells : fix tracking of seq_pos during cache reuse ggml-ci * cont : improve error message ggml-ci * cont : add more comments --- include/llama.h | 8 +++++--- src/llama-batch.cpp | 19 +++++++++++++++---- src/llama-context.cpp | 1 - src/llama-kv-cells.h | 42 ++++++++++++++++++++++++++++++++--------- tools/server/server.cpp | 3 +++ 5 files changed, 56 insertions(+), 17 deletions(-) diff --git a/include/llama.h b/include/llama.h index f4123d14ac1d8..3eda9bc68608c 100644 --- a/include/llama.h +++ b/include/llama.h @@ -944,12 +944,14 @@ extern "C" { // Requires the context to have a memory. // For encode-decoder contexts, processes the batch using the decoder. // Positive return values does not mean a fatal error, but rather a warning. - // Upon non-zero return values, the memory state is restored to the state before this call + // Upon fatal-error or abort, the ubatches that managed to be been processed will remain in the memory state of the context + // To handle this correctly, query the memory state using llama_memory_seq_pos_min() and llama_memory_seq_pos_max() + // Upon other return values, the memory state is restored to the state before this call // 0 - success // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context) - // 2 - aborted + // 2 - aborted (processed ubatches will remain in the context's memory) // -1 - invalid input batch - // < -1 - error + // < -1 - fatal error (processed ubatches will remain in the context's memory) LLAMA_API int32_t llama_decode( struct llama_context * ctx, struct llama_batch batch); diff --git a/src/llama-batch.cpp b/src/llama-batch.cpp index b3c996e18ab41..401e11364dbc9 100644 --- a/src/llama-batch.cpp +++ b/src/llama-batch.cpp @@ -245,10 +245,11 @@ bool llama_batch_allocr::init( } if (memory) { + bool ok = true; + if (batch.token) { if (seq_pos_min(s) != memory->seq_pos_max(s) + 1) { - LLAMA_LOG_ERROR("%s: sequence %d does not start from the last position stored in the memory\n", __func__, s); - return false; + ok = false; } } else { assert(batch.embd); @@ -256,10 +257,20 @@ bool llama_batch_allocr::init( // for embeddings (typically used as vision input), we allow them to have repeating positions // ref: https://github.com/ggml-org/llama.cpp/issues/13694#issuecomment-2983871762 if (seq_pos_min(s) != memory->seq_pos_max(s) && seq_pos_min(s) != memory->seq_pos_max(s) + 1) { - LLAMA_LOG_ERROR("%s: sequence %d does not start from the last position stored in the memory\n", __func__, s); - return false; + ok = false; } } + + if (!ok) { + LLAMA_LOG_ERROR( + "%s: the tokens of sequence %d in the input batch have inconsistent sequence positions:\n" + " - the last position stored in the memory module of the context (i.e. the KV cache) for sequence %d is X = %d\n" + " - the tokens for sequence %d in the input batch have a starting position of Y = %d\n" + " it is required that the sequence positions remain consecutive: Y = X + 1\n", + __func__, s, s, memory->seq_pos_max(s), s, seq_pos_min(s)); + + return false; + } } if (seq_pos_max(s) - seq_pos_min(s) + 1 > (int) seq_pos[s].size()) { diff --git a/src/llama-context.cpp b/src/llama-context.cpp index e352d81e4ed7c..06e93b19cbf40 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -1018,7 +1018,6 @@ int llama_context::decode(const llama_batch & batch_inp) { pos_min[s] = std::numeric_limits::max(); } - // TODO: fix sequence indexing for (uint32_t i = 0; i < ubatch.n_tokens; ++i) { const auto & seq_id = ubatch.seq_id[i][0]; diff --git a/src/llama-kv-cells.h b/src/llama-kv-cells.h index 349e9032e2484..c95d635948b5d 100644 --- a/src/llama-kv-cells.h +++ b/src/llama-kv-cells.h @@ -7,6 +7,7 @@ #include #include #include +#include // meta information about KV cells that can be part of multiple sequences at the same time // TODO: add unit tests @@ -164,7 +165,7 @@ class llama_kv_cells_unified { assert(seq_id >= 0); seq[i].reset(seq_id); - seq_pos[seq_id].erase(pos[i]); + seq_pos_dec(seq_id, pos[i]); if (seq[i].none()) { pos[i] = -1; @@ -187,7 +188,7 @@ class llama_kv_cells_unified { seq[i].reset(); seq[i].set(seq_id); - seq_pos[seq_id].insert(pos[i]); + seq_pos_inc(seq_id, pos[i]); return false; } @@ -232,7 +233,7 @@ class llama_kv_cells_unified { assert(!seq[i].test(seq_id)); seq[i].set(seq_id); - seq_pos[seq_id].insert(pos[i]); + seq_pos_inc(seq_id, pos[i]); } // return the sequence id of this cell @@ -259,7 +260,9 @@ class llama_kv_cells_unified { return -1; } - return *seq_pos[seq_id].begin(); + assert(seq_pos[seq_id].begin()->second > 0); + + return seq_pos[seq_id].begin()->first; } // the maximum position of sequence seq_id currently present in any of the cells @@ -272,7 +275,9 @@ class llama_kv_cells_unified { return -1; } - return *seq_pos[seq_id].rbegin(); + assert(seq_pos[seq_id].rbegin()->second > 0); + + return seq_pos[seq_id].rbegin()->first; } // note: call only if the cell is not empty @@ -389,17 +394,36 @@ class llama_kv_cells_unified { // the bitset seq[i] tells us which sequences are currently occupying the i-th cell std::vector seq; - // the set seq_pos[s] tells us which positions are currently present for sequence s + // the set seq_pos[s][p] tells us how many times the position p is currently present for sequence s + // if the position p is not present, seq_pos[s][p] is not set // this way seq_pos[s].begin() and seq_pos[s].rbegin() give us the min/max positions currently in the cache - std::set seq_pos[LLAMA_MAX_SEQ]; + // + // note that we cannot a use an std::set because in some cases a position can occur more than once for the same seq: + // - during performing a cache reuse via (rm + add) + // - some vision models have input embeddings with repeating positions + // + std::map seq_pos[LLAMA_MAX_SEQ]; // helper functions for updating `seq_pos`, once cell at a time: + void seq_pos_dec(llama_seq_id s, llama_pos p) { + auto it = seq_pos[s].find(p); + assert(it != seq_pos[s].end()); + + if (--it->second == 0) { + seq_pos[s].erase(it); + } + } + + void seq_pos_inc(llama_seq_id s, llama_pos p) { + seq_pos[s][p]++; + } + // remove cell i void seq_pos_rm(uint32_t i) { for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { if (seq[i].test(s)) { - seq_pos[s].erase(pos[i]); + seq_pos_dec(s, pos[i]); } } } @@ -408,7 +432,7 @@ class llama_kv_cells_unified { void seq_pos_add(uint32_t i) { for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { if (seq[i].test(s)) { - seq_pos[s].insert(pos[i]); + seq_pos_inc(s, pos[i]); } } } diff --git a/tools/server/server.cpp b/tools/server/server.cpp index aa18513e393b4..852352383bdbe 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -3418,9 +3418,12 @@ struct server_context { } if (ret < -1) { + // TODO: update slot state based on llama_memory_seq_pos_min() and llama_memory_seq_pos_max() err = "Compute error."; } + // TODO: handle ret == 2 (abort) when we start aborting + if (!err.empty()) { SRV_ERR("%s, i = %d, n_batch = %d, ret = %d\n", err.c_str(), i, n_batch, ret); for (auto & slot : slots) { From 6296acc8646ab69aa599f14575c2cf037d2038fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Mon, 23 Jun 2025 13:11:31 +0200 Subject: [PATCH 137/192] CUDA: mul_mat_v support for batch sizes > 1 (#14262) * CUDA: mul_mat_v support for batch sizes > 1 * use 64 bit math for initial offset calculation --- ggml/src/ggml-cuda/common.cuh | 4 + ggml/src/ggml-cuda/ggml-cuda.cu | 24 ++- ggml/src/ggml-cuda/mmv.cu | 326 +++++++++++++++++++++++--------- ggml/src/ggml-cuda/mmv.cuh | 5 +- 4 files changed, 256 insertions(+), 103 deletions(-) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index 86c4d29a5d254..1369bc2d9e5e3 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -262,6 +262,10 @@ static bool fp16_mma_hardware_available(const int cc) { GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc) || GGML_CUDA_CC_IS_RDNA4(cc); } +static bool bf16_mma_hardware_available(const int cc) { + return GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_AMPERE; +} + // Volta technically had FP16 tensor cores but they work very differently compared to Turing and later. static bool new_mma_available(const int cc) { return GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_TURING; diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 462db71e1a610..b3e6833c396fd 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -1943,16 +1943,14 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor && ggml_nbytes(src0) != ggml_backend_buffer_get_alloc_size(src0->buffer, src0) && src0->view_src; bool use_mul_mat_vec = (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16) - && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 - && src0->ne[0] % 2 == 0 && src1->ne[1] == 1; + && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32; bool use_mul_mat_vec_q = ggml_is_quantized(src0->type) && !bad_padding_clear && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 && src1->ne[1] <= MMVQ_MAX_BATCH_SIZE; bool use_mul_mat_q = ggml_is_quantized(src0->type) && !bad_padding_clear && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32; - bool any_gpus_with_slow_fp16 = false; - bool any_gpus_without_fp16_mma = false; + bool any_gpus_with_slow_fp16 = false; if (split) { ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *) src0->buffer->buft->context; @@ -1963,16 +1961,16 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor continue; } - const int cc = ggml_cuda_info().devices[id].cc; - use_mul_mat_q = use_mul_mat_q && ggml_cuda_should_use_mmq(src0->type, cc, src1->ne[1]); - any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_hardware_available(cc); - any_gpus_without_fp16_mma = any_gpus_without_fp16_mma || !fp16_mma_hardware_available(cc); + const int cc = ggml_cuda_info().devices[id].cc; + use_mul_mat_q = use_mul_mat_q && ggml_cuda_should_use_mmq(src0->type, cc, src1->ne[1]); + use_mul_mat_vec = use_mul_mat_vec && ggml_cuda_should_use_mmv(src0->type, cc, src0->ne, src1->ne[1]); + any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_hardware_available(cc); } } else { - const int cc = ggml_cuda_info().devices[ctx.device].cc; - use_mul_mat_q = use_mul_mat_q && ggml_cuda_should_use_mmq(src0->type, cc, src1->ne[1]); - any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_hardware_available(cc); - any_gpus_without_fp16_mma = any_gpus_without_fp16_mma || !fp16_mma_hardware_available(cc); + const int cc = ggml_cuda_info().devices[ctx.device].cc; + use_mul_mat_q = use_mul_mat_q && ggml_cuda_should_use_mmq(src0->type, cc, src1->ne[1]); + use_mul_mat_vec = use_mul_mat_vec && ggml_cuda_should_use_mmv(src0->type, cc, src0->ne, src1->ne[1]); + any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_hardware_available(cc); } // debug helpers @@ -1983,7 +1981,7 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor //printf("src0 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name); //printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name); - if (!split && use_mul_mat_vec && (src0->ne[1] <= MMV_MAX_ROWS || any_gpus_without_fp16_mma)) { + if (!split && use_mul_mat_vec) { // the custom F16 vector kernel can be used over batched cuBLAS GEMM // but this is only faster for GPUs without tensor cores or with a thin src0 matrix (particularly KQV in attention) ggml_cuda_mul_mat_vec(ctx, src0, src1, nullptr, dst); diff --git a/ggml/src/ggml-cuda/mmv.cu b/ggml/src/ggml-cuda/mmv.cu index d8c385e2399ae..1502e9d942fbc 100644 --- a/ggml/src/ggml-cuda/mmv.cu +++ b/ggml/src/ggml-cuda/mmv.cu @@ -2,25 +2,26 @@ #include "common.cuh" #include "mmv.cuh" -template +template static __global__ void mul_mat_vec( const T * __restrict__ x, const float * __restrict__ y, const int32_t * __restrict__ ids, float * __restrict__ dst, - const int64_t ncols2, const int64_t nchannels_y, const int64_t stride_row, - const int64_t channel_ratio, const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, - const int64_t sample_ratio, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst) { - const int64_t row = blockIdx.x; - const int64_t channel_dst = blockIdx.y; - const int64_t channel_x = ids ? ids[channel_dst] : channel_dst / channel_ratio; - const int64_t channel_y = ids ? channel_dst % nchannels_y : channel_dst; - const int64_t sample_dst = blockIdx.z; - const int64_t sample_x = sample_dst / sample_ratio; - const int64_t sample_y = sample_dst; - const int tid = threadIdx.x; + const int ncols2, const int nchannels_y, const int stride_row, const int stride_col_y2, const int stride_col_dst, + const int channel_ratio, const int stride_channel_x, const int stride_channel_y, const int stride_channel_dst, + const int sample_ratio, const int stride_sample_x, const int stride_sample_y, const int stride_sample_dst) { + const int row = blockIdx.x; + const int channel_dst = blockIdx.y; + const int channel_x = ids ? ids[channel_dst] : channel_dst / channel_ratio; + const int channel_y = ids ? channel_dst % nchannels_y : channel_dst; + const int sample_dst = blockIdx.z; + const int sample_x = sample_dst / sample_ratio; + const int sample_y = sample_dst; + const int tid = threadIdx.x; + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); - x += sample_x *stride_sample_x + channel_x *stride_channel_x + row*stride_row; - y += sample_y *stride_sample_y + channel_y *stride_channel_y; - dst += sample_dst*stride_sample_dst + channel_dst*stride_channel_dst; + x += int64_t(sample_x) *stride_sample_x + channel_x *stride_channel_x + row*stride_row; + y += int64_t(sample_y) *stride_sample_y + channel_y *stride_channel_y; + dst += int64_t(sample_dst)*stride_sample_dst + channel_dst*stride_channel_dst; const float2 * y2 = (const float2 *) y; @@ -34,81 +35,108 @@ static __global__ void mul_mat_vec( __syncthreads(); } - float sumf = 0.0f; + float sumf[ncols_dst] = {0.0f}; if constexpr (std::is_same::value) { const float2 * x2 = (const float2 *) x; - for (int64_t col2 = tid; col2 < ncols2; col2 += block_size) { + for (int col2 = tid; col2 < ncols2; col2 += block_size) { const float2 tmpx = x2[col2]; - const float2 tmpy = y2[col2]; - sumf += tmpx.x*tmpy.x; - sumf += tmpx.y*tmpy.y; + +#pragma unroll + for (int j = 0; j < ncols_dst; ++j) { + const float2 tmpy = y2[j*stride_col_y2 + col2]; + sumf[j] += tmpx.x*tmpy.x; + sumf[j] += tmpx.y*tmpy.y; + } } } else if constexpr (std::is_same::value) { const half2 * x2 = (const half2 *) x; if (std::is_same::value) { - for (int64_t col2 = tid; col2 < ncols2; col2 += block_size) { + for (int col2 = tid; col2 < ncols2; col2 += block_size) { const float2 tmpx = __half22float2(x2[col2]); - const float2 tmpy = y2[col2]; - sumf += tmpx.x * tmpy.x; - sumf += tmpx.y * tmpy.y; + +#pragma unroll + for (int j = 0; j < ncols_dst; ++j) { + const float2 tmpy = y2[j*stride_col_y2 + col2]; + sumf[j] += tmpx.x * tmpy.x; + sumf[j] += tmpx.y * tmpy.y; + } } } else { #ifdef FP16_AVAILABLE - half2 sumh2 = make_half2(0.0f, 0.0f); + half2 sumh2[ncols_dst] = {{0.0f, 0.0f}}; + + for (int col2 = tid; col2 < ncols2; col2 += block_size) { + const half2 tmpx = x2[col2]; - for (int64_t col2 = tid; col2 < ncols2; col2 += block_size) { - const float2 tmp = y2[col2]; - sumh2 += x2[col2] * make_half2(tmp.x, tmp.y); +#pragma unroll + for (int j = 0; j < ncols_dst; ++j) { + const float2 tmpy = y2[j*stride_col_y2 + col2]; + sumh2[j] += tmpx * make_half2(tmpy.x, tmpy.y); + } } - sumf = __low2float(sumh2) + __high2float(sumh2); +#pragma unroll + for (int j = 0; j < ncols_dst; ++j) { + sumf[j] = __low2float(sumh2[j]) + __high2float(sumh2[j]); + } #else NO_DEVICE_CODE; #endif // FP16_AVAILABLE } } else if constexpr (std::is_same::value) { const int * x2 = (const int *) x; - for (int64_t col2 = tid; col2 < ncols2; col2 += block_size) { - const int tmpx = x2[col2]; - const float2 tmpy = y2[col2]; - sumf += float(reinterpret_cast(&tmpx)[0]) * tmpy.x; - sumf += float(reinterpret_cast(&tmpx)[1]) * tmpy.y; + for (int col2 = tid; col2 < ncols2; col2 += block_size) { + const int tmpx = x2[col2]; +#pragma unroll + for (int j = 0; j < ncols_dst; ++j) { + const float2 tmpy = y2[j*stride_col_y2 + col2]; + sumf[j] += float(reinterpret_cast(&tmpx)[0]) * tmpy.x; + sumf[j] += float(reinterpret_cast(&tmpx)[1]) * tmpy.y; + } } } else { static_assert(std::is_same::value, "unsupported type"); } - sumf = warp_reduce_sum(sumf); +#pragma unroll + for (int j = 0; j < ncols_dst; ++j) { + sumf[j] = warp_reduce_sum(sumf[j]); - if (block_size > warp_size) { - buf_iw[tid/warp_size] = sumf; - __syncthreads(); - if (tid >= warp_size) { - return; + if (block_size > warp_size) { + buf_iw[tid/warp_size] = sumf[j]; + __syncthreads(); + if (tid < warp_size) { + sumf[j] = buf_iw[tid]; + sumf[j] = warp_reduce_sum(sumf[j]); + } + if (j < ncols_dst) { + __syncthreads(); + } } - sumf = buf_iw[tid]; - sumf = warp_reduce_sum(sumf); } - if (tid != 0) { + if (tid >= ncols_dst) { return; } - dst[row] = sumf; + dst[tid*stride_col_dst + row] = sumf[tid]; } -template +template static void launch_mul_mat_vec_cuda( const T * x, const float * y, const int32_t * ids, float * dst, - const int64_t ncols, const int64_t nrows, const int64_t stride_row, const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, + const int64_t ncols, const int64_t nrows, + const int64_t stride_row, const int64_t stride_col_y, const int64_t stride_col_dst, + const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, cudaStream_t stream) { - GGML_ASSERT(ncols % 2 == 0); - GGML_ASSERT(stride_row % 2 == 0); + GGML_ASSERT(ncols % 2 == 0); + GGML_ASSERT(stride_row % 2 == 0); + GGML_ASSERT(stride_col_y % 2 == 0); GGML_ASSERT(ids || nchannels_dst % nchannels_x == 0); GGML_ASSERT( nsamples_dst % nsamples_x == 0); const int64_t channel_ratio = nchannels_dst / nchannels_x; @@ -138,44 +166,52 @@ static void launch_mul_mat_vec_cuda( const dim3 block_dims(block_size_best, 1, 1); switch (block_size_best) { case 32: { - mul_mat_vec<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + mul_mat_vec<<>> + (x, y, ids, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); } break; case 64: { - mul_mat_vec<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + mul_mat_vec<<>> + (x, y, ids, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); } break; case 96: { - mul_mat_vec<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + mul_mat_vec<<>> + (x, y, ids, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); } break; case 128: { - mul_mat_vec<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + mul_mat_vec<<>> + (x, y, ids, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); } break; case 160: { - mul_mat_vec<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + mul_mat_vec<<>> + (x, y, ids, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); } break; case 192: { - mul_mat_vec<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + mul_mat_vec<<>> + (x, y, ids, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); } break; case 224: { - mul_mat_vec<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + mul_mat_vec<<>> + (x, y, ids, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); } break; case 256: { - mul_mat_vec<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + mul_mat_vec<<>> + (x, y, ids, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); } break; default: { GGML_ABORT("fatal error"); @@ -183,23 +219,91 @@ static void launch_mul_mat_vec_cuda( } } +template +static void mul_mat_vec_cuda_switch_ncols_dst( + const T * x, const float * y, const int32_t * ids, float * dst, + const int64_t ncols, const int64_t nrows, const int64_t ncols_dst, + const int64_t stride_row, const int64_t stride_col_y, const int64_t stride_col_dst, + const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, + const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, + const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, + cudaStream_t stream) { + switch (ncols_dst) { + case 1: + launch_mul_mat_vec_cuda + (x, y, ids, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + break; + case 2: + launch_mul_mat_vec_cuda + (x, y, ids, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + break; + case 3: + launch_mul_mat_vec_cuda + (x, y, ids, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + break; + case 4: + launch_mul_mat_vec_cuda + (x, y, ids, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + break; + case 5: + launch_mul_mat_vec_cuda + (x, y, ids, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + break; + case 6: + launch_mul_mat_vec_cuda + (x, y, ids, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + break; + case 7: + launch_mul_mat_vec_cuda + (x, y, ids, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + break; + case 8: + launch_mul_mat_vec_cuda + (x, y, ids, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + break; + default: + GGML_ABORT("fatal error"); + break; + } +} + template static void mul_mat_vec_cuda( const T * x, const float * y, const int32_t * ids, float * dst, - const int64_t ncols, const int64_t nrows, const int64_t stride_row, const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, + const int64_t ncols, const int64_t nrows, const int64_t ncols_dst, + const int64_t stride_row, const int64_t stride_col_y, const int stride_col_dst, + const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, enum ggml_prec prec, cudaStream_t stream) { if constexpr(std::is_same::value) { if (prec == GGML_PREC_DEFAULT) { - launch_mul_mat_vec_cuda - (x, y, ids, dst, ncols, nrows, stride_row, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + mul_mat_vec_cuda_switch_ncols_dst + (x, y, ids, dst, ncols, nrows, ncols_dst, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); return; } } - launch_mul_mat_vec_cuda - (x, y, ids, dst, ncols, nrows, stride_row, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + mul_mat_vec_cuda_switch_ncols_dst + (x, y, ids, dst, ncols, nrows, ncols_dst, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); } @@ -246,24 +350,24 @@ void ggml_cuda_mul_mat_vec(ggml_backend_cuda_context & ctx, const ggml_tensor * const int64_t stride_channel_dst = ids ? s1 : s2; const int64_t stride_channel_y = ids ? s11 : s12; - GGML_ASSERT(ncols_dst == 1); + GGML_ASSERT(!ids || ncols_dst == 1); switch (src0->type) { case GGML_TYPE_F32: { const float * src0_d = (const float *) src0->data; - mul_mat_vec_cuda(src0_d, src1_d, ids_d, dst_d, ne00, ne01, s01, + mul_mat_vec_cuda(src0_d, src1_d, ids_d, dst_d, ne00, ne01, ncols_dst, s01, s11, s1, ne02, nchannels_y, nchannels_dst, s02, stride_channel_y, stride_channel_dst, ne03, ne3, s03, s13, s3, prec, ctx.stream()); } break; case GGML_TYPE_F16: { const half * src0_d = (const half *) src0->data; - mul_mat_vec_cuda(src0_d, src1_d, ids_d, dst_d, ne00, ne01, s01, + mul_mat_vec_cuda(src0_d, src1_d, ids_d, dst_d, ne00, ne01, ncols_dst, s01, s11, s1, ne02, nchannels_y, nchannels_dst, s02, stride_channel_y, stride_channel_dst, ne03, ne3, s03, s13, s3, prec, ctx.stream()); } break; case GGML_TYPE_BF16: { const nv_bfloat16 * src0_d = (const nv_bfloat16 *) src0->data; - mul_mat_vec_cuda(src0_d, src1_d, ids_d, dst_d, ne00, ne01, s01, + mul_mat_vec_cuda(src0_d, src1_d, ids_d, dst_d, ne00, ne01, ncols_dst, s01, s11, s1, ne02, nchannels_y, nchannels_dst, s02, stride_channel_y, stride_channel_dst, ne03, ne3, s03, s13, s3, prec, ctx.stream()); } break; @@ -282,16 +386,19 @@ void ggml_cuda_op_mul_mat_vec( GGML_ASSERT(dst->type == GGML_TYPE_F32); const int64_t ne00 = src0->ne[0]; + const int64_t ne10 = src1->ne[0]; + const int64_t ne0 = dst->ne[0]; const int64_t row_diff = row_high - row_low; - GGML_ASSERT(src1_ncols == 1); - - const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; + const int id = ggml_cuda_get_device(); + const int cc = ggml_cuda_info().devices[id].cc; const enum ggml_prec prec = fast_fp16_available(cc) ? ggml_prec(dst->op_params[0]) : GGML_PREC_F32; // ggml_cuda_op provides single, contiguous matrices const int64_t stride_row = ne00; + const int64_t stride_col_y = ne10; + const int64_t stride_col_dst = id == ctx.device ? ne0 : row_diff; // main device has larger memory buffer const int64_t nchannels_x = 1; const int64_t nchannels_y = 1; const int64_t nchannels_dst = 1; @@ -307,19 +414,19 @@ void ggml_cuda_op_mul_mat_vec( switch (src0->type) { case GGML_TYPE_F32: { const float * src0_d = (const float *) src0_dd_i; - mul_mat_vec_cuda(src0_d, src1_ddf_i, nullptr, dst_dd_i, ne00, row_diff, stride_row, + mul_mat_vec_cuda(src0_d, src1_ddf_i, nullptr, dst_dd_i, ne00, row_diff, src1_ncols, stride_row, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, prec, stream); } break; case GGML_TYPE_F16: { const half * src0_d = (const half *) src0_dd_i; - mul_mat_vec_cuda(src0_d, src1_ddf_i, nullptr, dst_dd_i, ne00, row_diff, stride_row, + mul_mat_vec_cuda(src0_d, src1_ddf_i, nullptr, dst_dd_i, ne00, row_diff, src1_ncols, stride_row, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, prec, stream); } break; case GGML_TYPE_BF16: { const nv_bfloat16 * src0_d = (const nv_bfloat16 *) src0_dd_i; - mul_mat_vec_cuda(src0_d, src1_ddf_i, nullptr, dst_dd_i, ne00, row_diff, stride_row, + mul_mat_vec_cuda(src0_d, src1_ddf_i, nullptr, dst_dd_i, ne00, row_diff, src1_ncols, stride_row, stride_col_y, stride_col_dst, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, prec, stream); } break; @@ -334,3 +441,48 @@ void ggml_cuda_op_mul_mat_vec( GGML_UNUSED(src1_ncols); GGML_UNUSED(src1_padded_row_size); } + +bool ggml_cuda_should_use_mmv(enum ggml_type type, int cc, const int64_t * src0_ne, int64_t ne11) { + if (src0_ne[0] % 2 != 0) { + return false; + } + switch (type) { + case GGML_TYPE_F32: + if (GGML_CUDA_CC_IS_NVIDIA(cc)) { + if (cc >= GGML_CUDA_CC_ADA_LOVELACE) { + return ne11 <= 8; + } + if (cc >= GGML_CUDA_CC_TURING) { + return ne11 <= 4; + } + return ne11 <= 3; + } + return ne11 <= 8; + case GGML_TYPE_F16: + if (GGML_CUDA_CC_IS_NVIDIA(cc)) { + const bool src0_small = (src0_ne[1] <= 512 || src0_ne[2]*src0_ne[3] == 1); + if (cc >= GGML_CUDA_CC_ADA_LOVELACE) { + return src0_small && ne11 <= 4; + } + if (fp16_mma_hardware_available(cc)) { + return src0_small && ne11 <= 3; + } + return ne11 <= 8; + } + return ne11 <= 8; + case GGML_TYPE_BF16: + if (GGML_CUDA_CC_IS_NVIDIA(cc)) { + const bool src0_small = (src0_ne[1] <= 512 || src0_ne[2]*src0_ne[3] == 1); + if (cc >= GGML_CUDA_CC_ADA_LOVELACE) { + return src0_small && ne11 <= 4; + } + if (bf16_mma_hardware_available(cc)) { + return src0_small && ne11 <= 3; + } + return ne11 <= 8; + } + return ne11 <= 8; + default: + return false; + } +} diff --git a/ggml/src/ggml-cuda/mmv.cuh b/ggml/src/ggml-cuda/mmv.cuh index 756e7e1cc7fc3..1330bcb6a8860 100644 --- a/ggml/src/ggml-cuda/mmv.cuh +++ b/ggml/src/ggml-cuda/mmv.cuh @@ -1,8 +1,5 @@ #include "common.cuh" -// maximum number of src0 rows with which to use mul_mat_vec over cuBLAS if FP16 tensor cores are available -#define MMV_MAX_ROWS 512 - void ggml_cuda_mul_mat_vec(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst); void ggml_cuda_op_mul_mat_vec( @@ -10,3 +7,5 @@ void ggml_cuda_op_mul_mat_vec( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, const int64_t src1_padded_row_size, cudaStream_t stream); + +bool ggml_cuda_should_use_mmv(enum ggml_type type, int cc, const int64_t * src0_ne, int64_t ne11); From 7ab587eec986f2d04e1b47d7b88fb9de75a6b862 Mon Sep 17 00:00:00 2001 From: Molly Sophia Date: Mon, 23 Jun 2025 19:56:19 +0800 Subject: [PATCH 138/192] llama : better rwkv chat template and add missing `inputs.use_jinja` setting (#14336) * llama-cli : add missing `inputs.use_jinja` setting Signed-off-by: Molly Sophia * llama : better legacy chat template for rwkv Signed-off-by: Molly Sophia --------- Signed-off-by: Molly Sophia --- src/llama-chat.cpp | 17 +++++++++++------ tools/main/main.cpp | 1 + 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp index 0839cad3ee6db..5d317f4ee62eb 100644 --- a/src/llama-chat.cpp +++ b/src/llama-chat.cpp @@ -528,12 +528,17 @@ int32_t llm_chat_apply_template( } } else if (tmpl == LLM_CHAT_TEMPLATE_RWKV_WORLD) { // this template requires the model to have "\n\n" as EOT token - for (auto message : chat) { - std::string role(message->role); - if (role == "user") { - ss << "User: " << message->content << "\n\nAssistant:"; - } else { - ss << message->content << "\n\n"; + for (size_t i = 0; i < chat.size(); i++) { + std::string role(chat[i]->role); + if (role == "system") { + ss << "System: " << trim(chat[i]->content) << "\n\n"; + } else if (role == "user") { + ss << "User: " << trim(chat[i]->content) << "\n\n"; + if (i == chat.size() - 1) { + ss << "Assistant:"; + } + } else if (role == "assistant") { + ss << "Assistant: " << trim(chat[i]->content) << "\n\n"; } } } else if (tmpl == LLM_CHAT_TEMPLATE_GRANITE) { diff --git a/tools/main/main.cpp b/tools/main/main.cpp index 19b247b0d672f..154b37cdb01d0 100644 --- a/tools/main/main.cpp +++ b/tools/main/main.cpp @@ -292,6 +292,7 @@ int main(int argc, char ** argv) { if (!params.system_prompt.empty() || !params.prompt.empty()) { common_chat_templates_inputs inputs; + inputs.use_jinja = g_params->use_jinja; inputs.messages = chat_msgs; inputs.add_generation_prompt = !params.prompt.empty(); From ca0943e12c6bd267a84ebbde630267024ef2783b Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Mon, 23 Jun 2025 08:44:48 -0500 Subject: [PATCH 139/192] vulkan: update windows SDK in release.yml (#14344) --- .github/workflows/release.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9874736cbd8de..64fff175e227b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -302,7 +302,7 @@ jobs: env: OPENBLAS_VERSION: 0.3.23 - VULKAN_VERSION: 1.4.309.0 + VULKAN_VERSION: 1.4.313.2 strategy: matrix: @@ -332,7 +332,7 @@ jobs: id: get_vulkan if: ${{ matrix.backend == 'vulkan' }} run: | - curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/VulkanSDK-${env:VULKAN_VERSION}-Installer.exe" + curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/vulkansdk-windows-X64-${env:VULKAN_VERSION}.exe" & "$env:RUNNER_TEMP\VulkanSDK-Installer.exe" --accept-licenses --default-answer --confirm-command install Add-Content $env:GITHUB_ENV "VULKAN_SDK=C:\VulkanSDK\${env:VULKAN_VERSION}" Add-Content $env:GITHUB_PATH "C:\VulkanSDK\${env:VULKAN_VERSION}\bin" From d7508921e198c677c1a7b7153935225573216845 Mon Sep 17 00:00:00 2001 From: bandoti <141645996+bandoti@users.noreply.github.com> Date: Mon, 23 Jun 2025 15:30:51 -0300 Subject: [PATCH 140/192] ci: add workflow for relocatable cmake package (#14346) --- .github/workflows/build-cmake-pkg.yml | 51 +++++++++++++++++++++++++++ .github/workflows/build.yml | 40 +++++++++++++++++++-- 2 files changed, 89 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/build-cmake-pkg.yml diff --git a/.github/workflows/build-cmake-pkg.yml b/.github/workflows/build-cmake-pkg.yml new file mode 100644 index 0000000000000..fee2ab96bd0e8 --- /dev/null +++ b/.github/workflows/build-cmake-pkg.yml @@ -0,0 +1,51 @@ +name: Build relocatable cmake package +on: + workflow_dispatch: + workflow_call: + +jobs: + linux: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Install dependencies + run: | + sudo apt update + sudo apt install -y build-essential tcl + + - name: Build + run: | + PREFIX="$(pwd)"/inst + cmake -S . -B build -DCMAKE_PREFIX_PATH="$PREFIX" \ + -DLLAMA_CURL=OFF -DLLAMA_BUILD_TESTS=OFF -DLLAMA_BUILD_TOOLS=OFF \ + -DLLAMA_BUILD_EXAMPLES=OFF -DCMAKE_BUILD_TYPE=Release + cmake --build build --config Release + cmake --install build --prefix "$PREFIX" --config Release + + export LLAMA_CONFIG="$PREFIX"/lib/cmake/llama/llama-config.cmake + tclsh <<'EOF' + set build(commit) [string trim [exec git rev-parse --short HEAD]] + set build(number) [string trim [exec git rev-list --count HEAD]] + set build(version) "0.0.$build(number)" + + set llamaconfig [read [open "$env(LLAMA_CONFIG)" r]] + set checks [list "set\\(LLAMA_VERSION \\s+$build(version)\\)" \ + "set\\(LLAMA_BUILD_COMMIT\\s+$build(commit)\\)" \ + "set\\(LLAMA_BUILD_NUMBER\\s+$build(number)\\)"] + + puts -nonewline "Checking llama-config.cmake version... " + foreach check $checks { + if {![regexp -expanded -- $check $llamaconfig]} { + puts "\"$check\" failed!" + exit 1 + } + } + puts "success." + EOF + + cd examples/simple-cmake-pkg + cmake -S . -B build -DCMAKE_PREFIX_PATH="$PREFIX"/lib/cmake + cmake --build build diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index be282897380ac..4feccf21e9e3e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -5,10 +5,43 @@ on: push: branches: - master - paths: ['.github/workflows/build.yml', '.github/workflows/build-linux-cross.yml', '**/CMakeLists.txt', '**/.cmake', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.cuh', '**/*.swift', '**/*.m', '**/*.metal', '**/*.comp'] + paths: [ + '.github/workflows/build.yml', + '.github/workflows/build-linux-cross.yml', + '.github/workflows/build-cmake-pkg.yml', + '**/CMakeLists.txt', + '**/.cmake', + '**/*.h', + '**/*.hpp', + '**/*.c', + '**/*.cpp', + '**/*.cu', + '**/*.cuh', + '**/*.swift', + '**/*.m', + '**/*.metal', + '**/*.comp' + ] + pull_request: types: [opened, synchronize, reopened] - paths: ['.github/workflows/build.yml', '.github/workflows/build-linux-cross.yml', '**/CMakeLists.txt', '**/.cmake', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.cuh', '**/*.swift', '**/*.m', '**/*.metal', '**/*.comp'] + paths: [ + '.github/workflows/build.yml', + '.github/workflows/build-linux-cross.yml', + '.github/workflows/build-cmake-pkg.yml', + '**/CMakeLists.txt', + '**/.cmake', + '**/*.h', + '**/*.hpp', + '**/*.c', + '**/*.cpp', + '**/*.cu', + '**/*.cuh', + '**/*.swift', + '**/*.m', + '**/*.metal', + '**/*.comp' + ] concurrency: group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} @@ -478,6 +511,9 @@ jobs: build-linux-cross: uses: ./.github/workflows/build-linux-cross.yml + build-cmake-pkg: + uses: ./.github/workflows/build-cmake-pkg.yml + macOS-latest-cmake-ios: runs-on: macos-latest From ec7486060670f5334ab385ae890bffdb2bb26403 Mon Sep 17 00:00:00 2001 From: uvos Date: Tue, 24 Jun 2025 01:12:56 +0200 Subject: [PATCH 141/192] CUDA/HIP: optimize mmv paths taken for HIP devices (#14324) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Johannes Gäßler --- ggml/src/ggml-cuda/common.cuh | 6 +++++- ggml/src/ggml-cuda/mmv.cu | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index 1369bc2d9e5e3..f6127aeee425a 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -263,7 +263,11 @@ static bool fp16_mma_hardware_available(const int cc) { } static bool bf16_mma_hardware_available(const int cc) { - return GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_AMPERE; + return (GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_AMPERE) || GGML_CUDA_CC_IS_CDNA(cc) || cc >= GGML_CUDA_CC_RDNA3; +} + +static bool fp32_mma_hardware_available(const int cc) { + return GGML_CUDA_CC_IS_CDNA(cc); } // Volta technically had FP16 tensor cores but they work very differently compared to Turing and later. diff --git a/ggml/src/ggml-cuda/mmv.cu b/ggml/src/ggml-cuda/mmv.cu index 1502e9d942fbc..e14c93516bddf 100644 --- a/ggml/src/ggml-cuda/mmv.cu +++ b/ggml/src/ggml-cuda/mmv.cu @@ -456,6 +456,11 @@ bool ggml_cuda_should_use_mmv(enum ggml_type type, int cc, const int64_t * src0_ return ne11 <= 4; } return ne11 <= 3; + } else if (GGML_CUDA_CC_IS_AMD(cc)) { + if (fp32_mma_hardware_available(cc)) { + return ne11 <= 3; + } + return ne11 <= 8; } return ne11 <= 8; case GGML_TYPE_F16: @@ -468,6 +473,14 @@ bool ggml_cuda_should_use_mmv(enum ggml_type type, int cc, const int64_t * src0_ return src0_small && ne11 <= 3; } return ne11 <= 8; + } else if (GGML_CUDA_CC_IS_AMD(cc)) { + if (fp16_mma_hardware_available(cc)) { + if (GGML_CUDA_CC_IS_RDNA3(cc) || GGML_CUDA_CC_IS_RDNA4(cc)) { + return ne11 <= 5; + } + return ne11 <= 2; + } + return ne11 <= 8; } return ne11 <= 8; case GGML_TYPE_BF16: @@ -480,6 +493,11 @@ bool ggml_cuda_should_use_mmv(enum ggml_type type, int cc, const int64_t * src0_ return src0_small && ne11 <= 3; } return ne11 <= 8; + } else if (GGML_CUDA_CC_IS_AMD(cc)) { + if (bf16_mma_hardware_available(cc)) { + return ne11 <= 3; + } + return ne11 <= 8; } return ne11 <= 8; default: From 1f7b36a044224e352b001277d21da26107221228 Mon Sep 17 00:00:00 2001 From: Bartowski <3266127+bartowski1182@users.noreply.github.com> Date: Tue, 24 Jun 2025 02:17:58 -0400 Subject: [PATCH 142/192] jinja : Add Mistral-Small-3.2-24B-Instruct-2506.jinja (#14349) This will allow the use of tools on the llama-server --- .../Mistral-Small-3.2-24B-Instruct-2506.jinja | 124 ++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 models/templates/Mistral-Small-3.2-24B-Instruct-2506.jinja diff --git a/models/templates/Mistral-Small-3.2-24B-Instruct-2506.jinja b/models/templates/Mistral-Small-3.2-24B-Instruct-2506.jinja new file mode 100644 index 0000000000000..19a3eaee49be6 --- /dev/null +++ b/models/templates/Mistral-Small-3.2-24B-Instruct-2506.jinja @@ -0,0 +1,124 @@ +{%- set today = strftime_now("%Y-%m-%d") %} +{%- set default_system_message = "You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris.\nYour knowledge base was last updated on 2023-10-01. The current date is " + today + ".\n\nWhen you're not sure about some information or when the user's request requires up-to-date or specific data, you must use the available tools to fetch the information. Do not hesitate to use tools whenever they can provide a more accurate or complete response. If no relevant tools are available, then clearly state that you don't have the information and avoid making up anything. + +If the user's question is not clear, ambiguous, or does not provide enough context for you to accurately answer the question, you do not try to answer it right away and you rather ask the user to clarify their request (e.g. \"What are some good restaurants around me?\" => \"Where are you?\" or \"When is the next flight to Tokyo\" => \"Where do you travel from?\"). +You are always very attentive to dates, and when asked about information at specific dates, you discard information that is at another date. +You follow these instructions in all languages, and always respond to the user in the language they use or request. +Next sections describe the capabilities that you have. + +# WEB BROWSING INSTRUCTIONS + +You cannot perform any web search or access internet to open URLs, links etc. If it seems like the user is expecting you to do so, you clarify the situation and ask the user to copy paste the text directly in the chat. + +# MULTI-MODAL INSTRUCTIONS + +You have the ability to read images, but you cannot generate images. You also cannot transcribe audio files or videos. +You cannot read nor transcribe audio files or videos. + +# TOOL CALLING INSTRUCTIONS + +You may have access to tools that you can use to fetch information or perform actions. You must use these tools in the following situations: + +1. When the request requires up-to-date information. +2. When the request requires specific data that you do not have in your knowledge base. +3. When the request involves actions that you cannot perform without tools. + +Always prioritize using tools to provide the most accurate and helpful response. If tools are not available, inform the user that you cannot perform the requested action at the moment." %} + +{{- bos_token }} + +{%- set system_prompt = default_system_message %} +{%- set loop_messages = messages %} + +{%- if not tools is defined %} + {%- set tools = none %} +{%- endif %} + +{%- if messages|length > 0 and messages[0]['role'] == 'system' %} + {%- if messages[0]['content'] is string %} + {%- set system_prompt = messages[0]['content'] %} + {%- else %} + {%- set system_prompt = messages[0]['content'][0]['text'] %} + {%- endif %} + {%- set loop_messages = messages[1:] %} +{%- endif %} + +{%- set user_messages = loop_messages | selectattr("role", "equalto", "user") | list %} + +{%- set ns = namespace(index=0) %} +{%- for message in loop_messages %} + {%- if not (message.role == "tool" or (message.get('tool_calls'))) %} + {%- if (message["role"] == "user") != (ns.index % 2 == 0) %} + {{- raise_exception("After the optional system message, conversation roles must alternate user/assistant/user/assistant/...") }} + {%- endif %} + {%- set ns.index = ns.index + 1 %} + {%- endif %} +{%- endfor %} + +{{- '[SYSTEM_PROMPT]' + system_prompt + '[/SYSTEM_PROMPT]' }} + +{%- for message in loop_messages %} + {%- if message['role'] == 'system' %} + {%- if message['content'] is string %} + {{- '[SYSTEM_PROMPT]' + message['content'] + '[/SYSTEM_PROMPT]' }} + {%- else %} + {{- '[SYSTEM_PROMPT]' + message['content'][0]['text'] + '[/SYSTEM_PROMPT]' }} + {%- endif %} + {%- elif message['role'] == 'user' %} + {%- if tools is not none and (message == user_messages[-1]) %} + {{- '[AVAILABLE_TOOLS]' + tools|tojson + '[/AVAILABLE_TOOLS]' }} + {%- endif %} + {{- '[INST]' }} + {%- if message['content'] is string %} + {{- message['content'] }} + {%- else %} + {%- for block in message['content'] %} + {%- if block['type'] == 'text' %} + {{- block['text'] }} + {%- elif block['type'] in ['image', 'image_url'] %} + {{- '[IMG]' }} + {%- else %} + {{- raise_exception('Only text and image blocks are supported in message content!') }} + {%- endif %} + {%- endfor %} + {%- endif %} + {{- '[/INST]' }} + {%- elif message['role'] == 'assistant' %} + {%- if message.get('tool_calls') %} + {%- for tool_call in message.tool_calls %} + {{- '[TOOL_CALLS]' + tool_call.function.name }} + {%- if not tool_call.id is defined or tool_call.id is not string or tool_call.id|length != 9 %} + {{- raise_exception("Tool call IDs should be alphanumeric strings with length 9!") }} + {%- endif %} + {{- '[CALL_ID]' + tool_call.id }} + {{- '[ARGS]' + tool_call['function']['arguments']|tojson }} + {%- endfor %} + {{- eos_token }} + {%- elif message['content'] is string %} + {{- message['content'] + eos_token }} + {%- else %} + {%- for block in message['content'] %} + {%- if block['type'] == 'text' %} + {{- block['text'] }} + {%- elif block['type'] in ['image', 'image_url'] %} + {{- '[IMG]' }} + {%- else %} + {{- raise_exception('Only text and image blocks are supported in assistant content!') }} + {%- endif %} + {%- endfor %} + {{- eos_token }} + {%- endif %} + {%- elif message['role'] == 'tool_results' or message['role'] == 'tool' %} + {%- if message.content is defined and message.content.content is defined %} + {%- set content = message.content.content %} + {%- else %} + {%- set content = message.content %} + {%- endif %} + {%- if not message.tool_call_id is defined or message.tool_call_id is not string or message['tool_call_id']|length != 9 %} + {{- raise_exception("Tool call IDs should be alphanumeric strings with length 9!") }} + {%- endif %} + {{- '[TOOL_RESULTS]' + message.tool_call_id + '[TOOL_CONTENT]' + content|string + '[/TOOL_RESULTS]' }} + {%- else %} + {{- raise_exception('Only system, user, assistant, and tool roles are supported!') }} + {%- endif %} +{%- endfor %} From 2e44f93adeadd10b00f909ff4756834248cc64a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Tue, 24 Jun 2025 09:31:00 +0200 Subject: [PATCH 143/192] main : honor --verbose-prompt on interactive prompts (#14350) --- tools/main/main.cpp | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tools/main/main.cpp b/tools/main/main.cpp index 154b37cdb01d0..516bf09652484 100644 --- a/tools/main/main.cpp +++ b/tools/main/main.cpp @@ -917,10 +917,19 @@ int main(int argc, char ** argv) { embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end()); embd_inp.insert(embd_inp.end(), line_sfx.begin(), line_sfx.end()); + if (params.verbose_prompt) { + LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size() - original_size); + } + for (size_t i = original_size; i < embd_inp.size(); ++i) { const llama_token token = embd_inp[i]; + const std::string token_str = common_token_to_piece(ctx, token); output_tokens.push_back(token); - output_ss << common_token_to_piece(ctx, token); + output_ss << token_str; + + if (params.verbose_prompt) { + LOG_INF("%6d -> '%s'\n", token, token_str.c_str()); + } } // reset assistant message From 38a6de623bdb4e1f38222a07d91e48deeca6f8e6 Mon Sep 17 00:00:00 2001 From: Nigel Bosch Date: Tue, 24 Jun 2025 08:59:11 +0000 Subject: [PATCH 144/192] server : move no API key doc to /health (#14352) --- tools/server/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/server/README.md b/tools/server/README.md index 43aa65d50ce3f..1a624c13bee96 100644 --- a/tools/server/README.md +++ b/tools/server/README.md @@ -370,6 +370,8 @@ node index.js ### GET `/health`: Returns heath check result +This endpoint is public (no API key check). + **Response format** - HTTP status code 503 @@ -708,7 +710,7 @@ If the tokens are missing, then the extra context is simply prefixed at the star ### **GET** `/props`: Get server global properties. -This endpoint is public (no API key check). By default, it is read-only. To make POST request to change global properties, you need to start server with `--props` +By default, it is read-only. To make POST request to change global properties, you need to start server with `--props` **Response format** From 5e7ce246aeefe2f9c401bb8bc49af3d37a3fa838 Mon Sep 17 00:00:00 2001 From: Mathieu Baudier Date: Tue, 24 Jun 2025 15:05:31 +0200 Subject: [PATCH 145/192] cmake : use LLAMA_BUILD_NUMBER when defining LLAMA_INSTALL_VERSION (#14362) --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 50801cdc637bd..d2becb04c6bb9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -95,7 +95,7 @@ endif() if (NOT DEFINED LLAMA_BUILD_COMMIT) set(LLAMA_BUILD_COMMIT ${BUILD_COMMIT}) endif() -set(LLAMA_INSTALL_VERSION 0.0.${BUILD_NUMBER}) +set(LLAMA_INSTALL_VERSION 0.0.${LLAMA_BUILD_NUMBER}) # override ggml options set(GGML_ALL_WARNINGS ${LLAMA_ALL_WARNINGS}) From 1e8ba97b45e44e3465b55a201fbc535859574174 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 24 Jun 2025 18:26:30 +0300 Subject: [PATCH 146/192] batch : fix check for empty sequences in memory (#14364) * batch : fix check for empty sequences in memory ggml-ci * cont : reuse the var ggml-ci --- src/llama-batch.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/llama-batch.cpp b/src/llama-batch.cpp index 401e11364dbc9..91b1d6078a252 100644 --- a/src/llama-batch.cpp +++ b/src/llama-batch.cpp @@ -244,11 +244,13 @@ bool llama_batch_allocr::init( continue; } - if (memory) { + const llama_pos p0 = memory ? memory->seq_pos_max(s) : -1; + + if (p0 >= 0) { bool ok = true; if (batch.token) { - if (seq_pos_min(s) != memory->seq_pos_max(s) + 1) { + if (seq_pos_min(s) != p0 + 1) { ok = false; } } else { @@ -256,7 +258,7 @@ bool llama_batch_allocr::init( // for embeddings (typically used as vision input), we allow them to have repeating positions // ref: https://github.com/ggml-org/llama.cpp/issues/13694#issuecomment-2983871762 - if (seq_pos_min(s) != memory->seq_pos_max(s) && seq_pos_min(s) != memory->seq_pos_max(s) + 1) { + if (seq_pos_min(s) != p0 && seq_pos_min(s) != p0 + 1) { ok = false; } } @@ -267,7 +269,7 @@ bool llama_batch_allocr::init( " - the last position stored in the memory module of the context (i.e. the KV cache) for sequence %d is X = %d\n" " - the tokens for sequence %d in the input batch have a starting position of Y = %d\n" " it is required that the sequence positions remain consecutive: Y = X + 1\n", - __func__, s, s, memory->seq_pos_max(s), s, seq_pos_min(s)); + __func__, s, s, p0, s, seq_pos_min(s)); return false; } From 50b288f6a96041a563bb0529e019faf3347ddbd9 Mon Sep 17 00:00:00 2001 From: lhez Date: Tue, 24 Jun 2025 11:46:25 -0700 Subject: [PATCH 147/192] opencl: ref count `ggml_backend_opencl_context` and refactor profiling (#14254) * Move profiling info into `ggml_backend_opencl_context` * Add `enqueue_ndrange_kernel` to launch kernel --- ggml/src/ggml-opencl/ggml-opencl.cpp | 775 +++++++++------------------ 1 file changed, 240 insertions(+), 535 deletions(-) diff --git a/ggml/src/ggml-opencl/ggml-opencl.cpp b/ggml/src/ggml-opencl/ggml-opencl.cpp index 628e574f0f71e..96e8a8588dcb8 100644 --- a/ggml/src/ggml-opencl/ggml-opencl.cpp +++ b/ggml/src/ggml-opencl/ggml-opencl.cpp @@ -231,6 +231,71 @@ static ggml_cl_compiler_version get_adreno_cl_compiler_version(const char *drive return { type, major, minor, patch }; } +// Profiling +struct ProfilingInfo { + std::string op_name; + std::string kernel_name; + + cl_kernel kernel; + cl_event evt; + + cl_ulong cmd_queued; + cl_ulong cmd_submit; + cl_ulong cmd_start; + cl_ulong cmd_end; + cl_ulong overhead_start; + cl_ulong overhead_end; + // For the times below, see spec for clGetEventProfilingInfo + // The time kernel spent in cmd queue - SUBMIT - QUEUED + cl_ulong cmd_queued_duration_ns; + // The time kernel spent for submission - START - SUBMIT + cl_ulong cmd_submit_duration_ns; + // Kernel execution time in nanoseconds - END - START + cl_ulong cmd_duration_ns; + // The time for the kernel to complete - COMPLETE - END + cl_ulong cmd_complete_duration_ns; + // Total time to finish the kernel - COMPELTE - QUEUED + cl_ulong cmd_total_duration_ns; + // Global and local work sizes. + size_t global_size[3]; + size_t local_size[3]; + // Op output size. + size_t output_size[4]; +}; + +static void populateProfilingInfo( + ProfilingInfo& info, cl_event evt, cl_kernel kernel, cl_uint work_dim, + size_t global_size[3], size_t local_size[3], + const ggml_tensor * tensor) { + info.op_name = tensor->name; + info.kernel = kernel; + info.evt = evt; + + // 0 means not specified, e.g., 2D workgroup, or NULL for driver to choose + info.local_size[0] = 0; + info.local_size[1] = 0; + info.local_size[2] = 0; + + info.global_size[0] = 0; + info.global_size[1] = 0; + info.global_size[2] = 0; + + if (local_size) { + for (cl_uint i = 0; i < work_dim; ++i) { + info.local_size[i] = local_size[i]; + } + } + + for (cl_uint i = 0; i < work_dim; ++i) { + info.global_size[i] = global_size[i]; + } + + info.output_size[0] = tensor->ne[0]; + info.output_size[1] = tensor->ne[1]; + info.output_size[2] = tensor->ne[2]; + info.output_size[3] = tensor->ne[3]; +} + struct ggml_backend_opencl_context; // backend device context @@ -254,6 +319,8 @@ struct ggml_backend_opencl_device_context { // backend context struct ggml_backend_opencl_context { + int ref_count; + cl_device_id device; std::string device_name; @@ -369,6 +436,108 @@ struct ggml_backend_opencl_context { cl_kernel kernel_timestep_embedding; cl_kernel kernel_mul_mv_id_q4_0_f32_8x_flat; + std::vector profiling_info; + + void write_profiling_info() { + FILE * fperf = fopen("cl_profiling.csv", "w"); + if (!fperf) { + GGML_LOG_ERROR("Failed to open cl_profiling.csv\n"); + return; + } + + // Populate profiling info + for (ProfilingInfo & info : profiling_info) { + cl_ulong cmd_queued; + cl_ulong cmd_submit; + cl_ulong cmd_start; + cl_ulong cmd_end; + cl_ulong cmd_complete; + + CL_CHECK(clWaitForEvents(1, &info.evt)); + CL_CHECK(clGetEventProfilingInfo( + info.evt, CL_PROFILING_COMMAND_QUEUED, sizeof(cl_ulong), &cmd_queued, NULL)); + CL_CHECK(clGetEventProfilingInfo( + info.evt, CL_PROFILING_COMMAND_SUBMIT, sizeof(cl_ulong), &cmd_submit, NULL)); + CL_CHECK(clGetEventProfilingInfo( + info.evt, CL_PROFILING_COMMAND_START, sizeof(cl_ulong), &cmd_start, NULL)); + CL_CHECK(clGetEventProfilingInfo( + info.evt, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &cmd_end, NULL)); + CL_CHECK(clGetEventProfilingInfo( + info.evt, CL_PROFILING_COMMAND_COMPLETE, sizeof(cl_ulong), &cmd_complete, NULL)); + CL_CHECK(clReleaseEvent(info.evt)); + + char kernel_name[512]; + CL_CHECK(clGetKernelInfo(info.kernel, CL_KERNEL_FUNCTION_NAME, + sizeof(kernel_name), kernel_name, NULL)); + info.kernel_name = kernel_name; + + info.cmd_queued = cmd_queued; + info.cmd_submit = cmd_submit; + info.cmd_start = cmd_start; + info.cmd_end = cmd_end; + + info.cmd_queued_duration_ns = cmd_submit - cmd_queued; + info.cmd_submit_duration_ns = cmd_start - cmd_submit; + info.cmd_duration_ns = cmd_end - cmd_start; + info.cmd_complete_duration_ns = cmd_complete - cmd_end; + info.cmd_total_duration_ns = cmd_complete - cmd_queued; + } + + // Dump a csv + float total_kernel_time = 0; + fprintf(fperf, "op name, kernel name, queued duration (ms), submit duration(ms), exec duration (ms), complete duration (ms), total duration (ms), global size, local size, output size\n"); + for (const ProfilingInfo & info : profiling_info) { + total_kernel_time += info.cmd_duration_ns/1.e6f; + fprintf(fperf, "%s,%s,%f,%f,%f,%f,%f,%zux%zux%zu,%zux%zux%zu,%zux%zux%zux%zu\n", + info.op_name.c_str(), info.kernel_name.c_str(), + info.cmd_queued_duration_ns/1.e6f, + info.cmd_submit_duration_ns/1.e6f, + info.cmd_duration_ns/1.e6f, + info.cmd_complete_duration_ns/1.e6f, + info.cmd_total_duration_ns/1.e6f, + info.global_size[0], info.global_size[1], info.global_size[2], + info.local_size[0], info.local_size[1], info.local_size[2], + info.output_size[0], info.output_size[1], info.output_size[2], info.output_size[3]); + } + fclose(fperf); + + GGML_LOG_INFO("ggml_opencl: total kernel time: %f\n", total_kernel_time); + + // Dump a simple chrome trace + FILE* ftrace = fopen("cl_trace.json", "w"); + if (!ftrace) { + GGML_LOG_ERROR("Failed to open cl_trace.json\n"); + return; + } + + fprintf(ftrace, "[\n"); + for (const ProfilingInfo & info : profiling_info) { + fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"B\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Host\"},\n", + info.kernel_name.c_str(), info.cmd_queued/1000); + fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"E\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Host\"},\n", + info.kernel_name.c_str(), info.cmd_submit/1000); + + fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"B\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Device\"},\n", + info.kernel_name.c_str(), info.cmd_start/1000); + fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"E\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Device\"},\n", + info.kernel_name.c_str(), info.cmd_end/1000); + } + fclose(ftrace); + } + + void enqueue_ndrange_kernel(cl_kernel kernel, cl_uint work_dim, size_t *global_work_size, size_t *local_work_size, const ggml_tensor * tensor) { +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, work_dim, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + profiling_info.emplace_back(); + populateProfilingInfo(profiling_info.back(), evt, kernel, work_dim, global_work_size, local_work_size, tensor); +#else + GGML_UNUSED(tensor); + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, work_dim, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif + } + #ifdef GGML_OPENCL_USE_ADRENO_KERNELS // Transpose kernels cl_program program_transpose; @@ -395,46 +564,19 @@ struct ggml_backend_opencl_context { cl_kernel CL_mul_mat_vec_q4_0_f32_1d_4x_flat_11008_1_4096; cl_kernel CL_mul_mat_vec_q4_0_f32_1d_4x_flat_32000_1_4096; #endif // GGML_OPENCL_USE_ADRENO_KERNELS -}; -// All registered devices with a default device in the front. -static std::vector g_ggml_backend_opencl_devices; - -// Profiling + void free() { + ref_count--; + if (ref_count == 0) { #ifdef GGML_OPENCL_PROFILING -struct ProfilingInfo { - std::string op_name; - std::string kernel_name; - - cl_kernel kernel; - cl_event evt; - - cl_ulong cmd_queued; - cl_ulong cmd_submit; - cl_ulong cmd_start; - cl_ulong cmd_end; - cl_ulong overhead_start; - cl_ulong overhead_end; - // For the times below, see spec for clGetEventProfilingInfo - // The time kernel spent in cmd queue - SUBMIT - QUEUED - cl_ulong cmd_queued_duration_ns; - // The time kernel spent for submission - START - SUBMIT - cl_ulong cmd_submit_duration_ns; - // Kernel execution time in nanoseconds - END - START - cl_ulong cmd_duration_ns; - // The time for the kernel to complete - COMPLETE - END - cl_ulong cmd_complete_duration_ns; - // Total time to finish the kernel - COMPELTE - QUEUED - cl_ulong cmd_total_duration_ns; - // Global and local work sizes. - size_t global_size[3]; - size_t local_size[3]; - // Op output size. - size_t output_size[4]; + write_profiling_info(); +#endif + } + } }; -std::vector g_profiling_info; -#endif +// All registered devices with a default device in the front. +static std::vector g_ggml_backend_opencl_devices; inline std::string read_file(const std::string &path) { std::ifstream ifs(path); @@ -1669,6 +1811,12 @@ static ggml_backend_opencl_context * ggml_cl2_init(ggml_backend_dev_t dev) { backend_ctx->device = dev_ctx->device; backend_ctx->gpu_family = GPU_FAMILY::UNKNOWN; + // ref_count get increased in ggml_backend_opencl_device_init + // This function is also used to retrieve backend context, so we don't want + // to increase ref_count for each call. We only want to increase ref_count + // when the associated device is initialized + backend_ctx->ref_count = 0; + if (strstr(dev_ctx->device_name.c_str(), "Adreno") || strstr(dev_ctx->device_name.c_str(), "Qualcomm") || strstr(dev_ctx->device_version.c_str(), "Adreno")) { @@ -1841,93 +1989,22 @@ static ggml_backend_opencl_context * ggml_cl2_init(ggml_backend_dev_t dev) { return dev_ctx->backend_ctx; } -static void ggml_cl2_free(void) { -#ifdef GGML_OPENCL_PROFILING - FILE * fperf = fopen("cl_profiling.csv", "w"); - if (!fperf) { - GGML_LOG_ERROR("Failed to open cl_profiling.csv\n"); - return; - } +static void ggml_cl2_free(ggml_backend_t backend) { + ggml_backend_opencl_context * ctx = (ggml_backend_opencl_context *) backend->context; + ctx->free(); - // Populate profiling info - for (ProfilingInfo & info : g_profiling_info) { - cl_ulong cmd_queued; - cl_ulong cmd_submit; - cl_ulong cmd_start; - cl_ulong cmd_end; - cl_ulong cmd_complete; - - CL_CHECK(clWaitForEvents(1, &info.evt)); - CL_CHECK(clGetEventProfilingInfo( - info.evt, CL_PROFILING_COMMAND_QUEUED, sizeof(cl_ulong), &cmd_queued, NULL)); - CL_CHECK(clGetEventProfilingInfo( - info.evt, CL_PROFILING_COMMAND_SUBMIT, sizeof(cl_ulong), &cmd_submit, NULL)); - CL_CHECK(clGetEventProfilingInfo( - info.evt, CL_PROFILING_COMMAND_START, sizeof(cl_ulong), &cmd_start, NULL)); - CL_CHECK(clGetEventProfilingInfo( - info.evt, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &cmd_end, NULL)); - CL_CHECK(clGetEventProfilingInfo( - info.evt, CL_PROFILING_COMMAND_COMPLETE, sizeof(cl_ulong), &cmd_complete, NULL)); - CL_CHECK(clReleaseEvent(info.evt)); - - char kernel_name[512]; - CL_CHECK(clGetKernelInfo(info.kernel, CL_KERNEL_FUNCTION_NAME, - sizeof(kernel_name), kernel_name, NULL)); - info.kernel_name = kernel_name; - - info.cmd_queued = cmd_queued; - info.cmd_submit = cmd_submit; - info.cmd_start = cmd_start; - info.cmd_end = cmd_end; - - info.cmd_queued_duration_ns = cmd_submit - cmd_queued; - info.cmd_submit_duration_ns = cmd_start - cmd_submit; - info.cmd_duration_ns = cmd_end - cmd_start; - info.cmd_complete_duration_ns = cmd_complete - cmd_end; - info.cmd_total_duration_ns = cmd_complete - cmd_queued; - } - - // Dump a csv - float total_kernel_time = 0; - fprintf(fperf, "op name, kernel name, queued duration (ms), submit duration(ms), exec duration (ms), complete duration (ms), total duration (ms), global size, local size, output size\n"); - for (const ProfilingInfo & info : g_profiling_info) { - total_kernel_time += info.cmd_duration_ns/1.e6f; - fprintf(fperf, "%s,%s,%f,%f,%f,%f,%f,%zux%zux%zu,%zux%zux%zu,%zux%zux%zux%zu\n", - info.op_name.c_str(), info.kernel_name.c_str(), - info.cmd_queued_duration_ns/1.e6f, - info.cmd_submit_duration_ns/1.e6f, - info.cmd_duration_ns/1.e6f, - info.cmd_complete_duration_ns/1.e6f, - info.cmd_total_duration_ns/1.e6f, - info.global_size[0], info.global_size[1], info.global_size[2], - info.local_size[0], info.local_size[1], info.local_size[2], - info.output_size[0], info.output_size[1], info.output_size[2], info.output_size[3]); - } - fclose(fperf); - - GGML_LOG_INFO("ggml_opencl: total kernel time: %f\n", total_kernel_time); - - // Dump a simple chrome trace - FILE* ftrace = fopen("cl_trace.json", "w"); - if (!ftrace) { - GGML_LOG_ERROR("Failed to open cl_trace.json\n"); - return; + // The CL context is shared by all backends, release it if all backends have been released + bool should_release_opencl = true; + for (auto device : g_ggml_backend_opencl_devices) { + ggml_backend_opencl_device_context * ctx_dev = (ggml_backend_opencl_device_context *) device.context; + if (ctx_dev->backend_ctx->ref_count > 0) { + should_release_opencl = false; + } } - fprintf(ftrace, "[\n"); - for (const ProfilingInfo & info : g_profiling_info) { - fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"B\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Host\"},\n", - info.kernel_name.c_str(), info.cmd_queued/1000); - fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"E\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Host\"},\n", - info.kernel_name.c_str(), info.cmd_submit/1000); - - fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"B\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Device\"},\n", - info.kernel_name.c_str(), info.cmd_start/1000); - fprintf(ftrace, "{\"name\": \"%s\", \"cat\": \"OpenCL\", \"ph\": \"E\", \"ts\": %lu, \"pid\": \"\", \"tid\": \"Device\"},\n", - info.kernel_name.c_str(), info.cmd_end/1000); + if (should_release_opencl) { + CL_CHECK(clReleaseContext(ctx->context)); } - fclose(ftrace); -#endif } //------------------------------------------------------------------------------ @@ -2011,9 +2088,7 @@ static const char * ggml_backend_opencl_name(ggml_backend_t backend) { } static void ggml_backend_opencl_free(ggml_backend_t backend) { - ggml_cl2_free(); - - GGML_UNUSED(backend); + ggml_cl2_free(backend); } static void ggml_backend_opencl_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { @@ -2899,6 +2974,8 @@ static void ggml_backend_opencl_device_get_props(ggml_backend_dev_t dev, struct static ggml_backend_t ggml_backend_opencl_device_init(ggml_backend_dev_t dev, const char * params) { ggml_backend_opencl_context * backend_ctx = ggml_cl2_init(dev); + // Getting a new reference to the backend, increase ref_count + backend_ctx->ref_count++; ggml_backend_t backend = new ggml_backend { /* .guid = */ ggml_backend_opencl_guid(), @@ -3159,31 +3236,6 @@ static void dump_tensor(ggml_backend_t backend, const struct ggml_tensor * tenso #define dump_tensor(tensor) #endif -//------------------------------------------------------------------------------ -// Profiling utility -//------------------------------------------------------------------------------ -#ifdef GGML_OPENCL_PROFILING -static void populateProfilingInfo( - ProfilingInfo& info, cl_event evt, cl_kernel kernel, - size_t global_size[3], size_t local_size[3], - const ggml_tensor * tensor) { - info.op_name = tensor->name; - info.kernel = kernel; - info.evt = evt; - - info.local_size[0] = local_size[0]; - info.local_size[1] = local_size[1]; - info.local_size[2] = local_size[2]; - info.global_size[0] = global_size[0]; - info.global_size[1] = global_size[1]; - info.global_size[2] = global_size[2]; - info.output_size[0] = tensor->ne[0]; - info.output_size[1] = tensor->ne[1]; - info.output_size[2] = tensor->ne[2]; - info.output_size[3] = tensor->ne[3]; -} -#endif - //------------------------------------------------------------------------------ // Ops //------------------------------------------------------------------------------ @@ -3227,7 +3279,6 @@ static void ggml_cl_get_rows(ggml_backend_t backend, const ggml_tensor * src0, c const cl_ulong nb2 = dst ? dst->nb[2] : 0; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; @@ -3271,15 +3322,7 @@ static void ggml_cl_get_rows(ggml_backend_t backend, const ggml_tensor * src0, c size_t global_work_size[] = {(size_t)ne10, (size_t)ne11, 1}; size_t local_work_size[] = {1, 1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_add(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -3321,7 +3364,6 @@ static void ggml_cl_add(ggml_backend_t backend, const ggml_tensor * src0, const const cl_ulong nb3 = dst ? dst->nb[3] : 0; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; @@ -3396,29 +3438,13 @@ static void ggml_cl_add(ggml_backend_t backend, const ggml_tensor * src0, const local_work_size_ptr = nullptr; // Let driver choose the work-group sizes. } -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size_ptr, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } else { unsigned int nth = MIN(64, ne0); size_t global_work_size[] = {ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {nth, 1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } } @@ -3461,7 +3487,6 @@ static void ggml_cl_mul(ggml_backend_t backend, const ggml_tensor * src0, const const cl_ulong nb3 = dst ? dst->nb[3] : 0; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; @@ -3536,29 +3561,13 @@ static void ggml_cl_mul(ggml_backend_t backend, const ggml_tensor * src0, const local_work_size_ptr = nullptr; // Let driver choose the work-group sizes. } -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size_ptr, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } else { unsigned int nth = MIN(64, ne0); size_t global_work_size[] = {ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {nth, 1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } } @@ -3598,7 +3607,6 @@ static void ggml_cl_div(ggml_backend_t backend, const ggml_tensor * src0, const const cl_ulong nb3 = dst->nb[3]; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; @@ -3661,29 +3669,13 @@ static void ggml_cl_div(ggml_backend_t backend, const ggml_tensor * src0, const size_t global_work_size[] = {(size_t)n, 1, 1}; size_t local_work_size[] = {64, 1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } else { unsigned int nth = MIN(64, ne0); size_t global_work_size[] = {ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {nth, 1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } } @@ -3723,7 +3715,6 @@ static void ggml_cl_sub(ggml_backend_t backend, const ggml_tensor * src0, const const cl_ulong nb3 = dst->nb[3]; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; @@ -3786,29 +3777,13 @@ static void ggml_cl_sub(ggml_backend_t backend, const ggml_tensor * src0, const size_t global_work_size[] = {(size_t)n, 1, 1}; size_t local_work_size[] = {64, 1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } else { unsigned int nth = MIN(64, ne0); size_t global_work_size[] = {ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {nth, 1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } } @@ -3821,7 +3796,6 @@ static void ggml_cl_gelu(ggml_backend_t backend, const ggml_tensor * src0, const UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; @@ -3848,15 +3822,7 @@ static void ggml_cl_gelu(ggml_backend_t backend, const ggml_tensor * src0, const size_t global_work_size[] = {(size_t)n, 1, 1}; size_t local_work_size[] = {64, 1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_gelu_quick(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -3868,7 +3834,6 @@ static void ggml_cl_gelu_quick(ggml_backend_t backend, const ggml_tensor * src0, UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; @@ -3895,15 +3860,7 @@ static void ggml_cl_gelu_quick(ggml_backend_t backend, const ggml_tensor * src0, size_t global_work_size[] = {(size_t)n, 1, 1}; size_t local_work_size[] = {64, 1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_silu(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -3915,7 +3872,6 @@ static void ggml_cl_silu(ggml_backend_t backend, const ggml_tensor * src0, const UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; @@ -3947,15 +3903,7 @@ static void ggml_cl_silu(ggml_backend_t backend, const ggml_tensor * src0, const local_work_size_ptr = nullptr; // Let driver choose the work-group sizes. } -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size_ptr, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_relu(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -3967,7 +3915,6 @@ static void ggml_cl_relu(ggml_backend_t backend, const ggml_tensor * src0, const UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; @@ -3992,15 +3939,7 @@ static void ggml_cl_relu(ggml_backend_t backend, const ggml_tensor * src0, const local_work_size_ptr = nullptr; // Let driver choose the work-group sizes. } -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size_ptr, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_sigmoid(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -4012,7 +3951,6 @@ static void ggml_cl_sigmoid(ggml_backend_t backend, const ggml_tensor * src0, co UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; @@ -4044,15 +3982,7 @@ static void ggml_cl_sigmoid(ggml_backend_t backend, const ggml_tensor * src0, co local_work_size_ptr = nullptr; // Let driver choose the work-group sizes. } -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size_ptr, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_clamp(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -4064,7 +3994,6 @@ static void ggml_cl_clamp(ggml_backend_t backend, const ggml_tensor * src0, cons UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; @@ -4096,15 +4025,7 @@ static void ggml_cl_clamp(ggml_backend_t backend, const ggml_tensor * src0, cons local_work_size_ptr = nullptr; // Let driver choose the work-group sizes. } -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size_ptr, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_norm(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -4116,7 +4037,6 @@ static void ggml_cl_norm(ggml_backend_t backend, const ggml_tensor * src0, const UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; @@ -4157,15 +4077,7 @@ static void ggml_cl_norm(ggml_backend_t backend, const ggml_tensor * src0, const size_t global_work_size[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {(size_t)nth, 1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_rms_norm(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -4177,7 +4089,6 @@ static void ggml_cl_rms_norm(ggml_backend_t backend, const ggml_tensor * src0, c UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; //ggml_backend_opencl_device_context * dev_ctx = // (ggml_backend_opencl_device_context *)backend->device->context; @@ -4241,15 +4152,7 @@ static void ggml_cl_rms_norm(ggml_backend_t backend, const ggml_tensor * src0, c // This is local memory - the size depends on subgroup size. CL_CHECK(clSetKernelArg(kernel, 12, sizeof(float)*nth/sgs, NULL)); -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_group_norm(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -4261,7 +4164,6 @@ static void ggml_cl_group_norm(ggml_backend_t backend, const ggml_tensor * src0, UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; @@ -4300,15 +4202,7 @@ static void ggml_cl_group_norm(ggml_backend_t backend, const ggml_tensor * src0, size_t global_work_size[] = {(size_t)n_groups*sgs, 1, 1}; size_t local_work_size[] = {(size_t)sgs, 1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_tanh(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -4320,7 +4214,6 @@ static void ggml_cl_tanh(ggml_backend_t backend, const ggml_tensor * src0, const UNUSED(src1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; @@ -4397,16 +4290,7 @@ static void ggml_cl_tanh(ggml_backend_t backend, const ggml_tensor * src0, const } if (global_work_size[0] == 0 || global_work_size[1] == 0 || global_work_size[2] == 0) return; - -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size_ptr ? local_work_size : (size_t[3]){0,0,0}, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_repeat(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1_shape_def, ggml_tensor * dst) { @@ -4419,7 +4303,6 @@ static void ggml_cl_repeat(ggml_backend_t backend, const ggml_tensor * src0, con UNUSED(src1_shape_def); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; if (backend_ctx->kernel_repeat == nullptr) { GGML_LOG_WARN("%s: repeat kernel not available, skipping OpenCL execution.\n", __func__); @@ -4467,15 +4350,7 @@ static void ggml_cl_repeat(ggml_backend_t backend, const ggml_tensor * src0, con size_t global_work_size[] = { gws0, gws1, gws2 }; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, NULL, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, (size_t[3]){0,0,0}, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, NULL, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, NULL, dst); } static void ggml_cl_pad(ggml_backend_t backend, const ggml_tensor * src0, ggml_tensor * dst) { @@ -4488,7 +4363,6 @@ static void ggml_cl_pad(ggml_backend_t backend, const ggml_tensor * src0, ggml_t GGML_ASSERT(src0->ne[3] == 1 && dst->ne[3] == 1); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; if (backend_ctx->kernel_pad == nullptr) { GGML_LOG_WARN("%s: pad kernel not available, skipping OpenCL execution.\n", __func__); @@ -4533,15 +4407,7 @@ static void ggml_cl_pad(ggml_backend_t backend, const ggml_tensor * src0, ggml_t local_work_size_ptr = nullptr; } -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size_ptr ? local_work_size : (size_t[3]){0,0,0}, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_upscale(ggml_backend_t backend, const ggml_tensor * src0, ggml_tensor * dst) { @@ -4553,7 +4419,6 @@ static void ggml_cl_upscale(ggml_backend_t backend, const ggml_tensor * src0, gg GGML_ASSERT(dst->type == GGML_TYPE_F32); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; const ggml_scale_mode mode = (ggml_scale_mode) ggml_get_op_params_i32(dst, 0); cl_kernel kernel = nullptr; @@ -4644,17 +4509,7 @@ static void ggml_cl_upscale(ggml_backend_t backend, const ggml_tensor * src0, gg local_work_size_ptr = nullptr; } -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 1, NULL, global_work_size, local_work_size_ptr, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - size_t profiling_gws[3] = {global_work_size[0], 1, 1}; - size_t profiling_lws[3] = {local_work_size_ptr ? local_work_size[0] : 0, 1, 1}; - populateProfilingInfo(g_profiling_info.back(), evt, kernel, profiling_gws, profiling_lws, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 1, NULL, global_work_size, local_work_size_ptr, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_concat(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -4732,7 +4587,7 @@ static void ggml_cl_concat(ggml_backend_t backend, const ggml_tensor * src0, con global_work_size[1] = d_ne1; global_work_size[2] = d_ne2; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, NULL, 0, NULL, NULL)); + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, NULL, dst); } } } else { @@ -4782,7 +4637,7 @@ static void ggml_cl_concat(ggml_backend_t backend, const ggml_tensor * src0, con d_ne2 > 0 ? (size_t)d_ne2 : 1, d_ne3 > 0 ? (size_t)d_ne3 : 1 }; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size_nc, NULL, 0, NULL, NULL)); + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size_nc, NULL, dst); } } @@ -4795,7 +4650,6 @@ static void ggml_cl_timestep_embedding(ggml_backend_t backend, const ggml_tensor GGML_ASSERT(dst->type == GGML_TYPE_F32); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; if (backend_ctx->kernel_timestep_embedding == nullptr) { GGML_LOG_WARN("%s: timestep_embedding kernel not available, skipping OpenCL execution.\n", __func__); @@ -4828,17 +4682,7 @@ static void ggml_cl_timestep_embedding(ggml_backend_t backend, const ggml_tensor size_t global_work_size[] = {gws0, gws1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 2, NULL, global_work_size, NULL, 0, NULL, &evt)); // Pass 2 for 2D problem - - g_profiling_info.emplace_back(); - size_t profiling_gws[3] = {global_work_size[0], global_work_size[1], 1}; - size_t profiling_lws[3] = {0,0,0}; // Reflects NULL LWS - populateProfilingInfo(g_profiling_info.back(), evt, kernel, profiling_gws, profiling_lws, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 2, NULL, global_work_size, NULL, 0, NULL, NULL)); // Pass 2 for 2D problem -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, NULL, dst); } static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -4853,7 +4697,6 @@ static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, co const enum ggml_type src1t = src1 ? src1->type : GGML_TYPE_COUNT; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; @@ -5058,15 +4901,7 @@ static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, co static_cast(padded_height_B) }; - #ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 2, NULL, global_size_t, local_size_t, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_size_t, local_size_t, dst); - #else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 2, NULL, global_size_t, local_size_t, 0, NULL, NULL)); - #endif + backend_ctx->enqueue_ndrange_kernel(kernel, 2, global_size_t, local_size_t, dst); } else { // no need to transpose B in other cases // create an image for B from sub_buffer @@ -5188,16 +5023,7 @@ static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, co // enqueue kernel with profiling // <--------------------------------------------> // - #ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); - // enqueue kernel without profiling - #else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); - #endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); // <--------------------------------------------> // // deallocate sub buffers and images @@ -5277,15 +5103,7 @@ static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, co global_work_size[2] = (size_t)ne12*ne13; } -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); return; } #else // GGML_OPENCL_SOA_Q @@ -5515,15 +5333,7 @@ static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, co size_t global_work_size[] = {(size_t)(ne01 + ndst-1)/ndst*nth0, (size_t)ne11*nth1, (size_t)ne12*ne13}; size_t local_work_size[] = {(size_t)nth0, (size_t)nth1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } else if (src0t == GGML_TYPE_Q4_K) { GGML_ASSERT(false && "not implemented"); } else if (src0t == GGML_TYPE_Q3_K) { @@ -5534,30 +5344,14 @@ static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, co size_t global_work_size[] = {(size_t)(ne01+1)/2*nth0, (size_t)ne11*nth1, (size_t)ne12*ne13}; size_t local_work_size[] = {(size_t)nth0, (size_t)nth1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } else { int64_t ny = (ne11 + nrows - 1)/nrows; size_t global_work_size[] = {(size_t)ne01*nth0, (size_t)ny*nth1, (size_t)ne12*ne13}; size_t local_work_size[] = {(size_t)nth0, (size_t)nth1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } } @@ -5574,7 +5368,6 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0, GGML_ASSERT(src2->extra); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extra2 = (ggml_tensor_extra_cl *)src2->extra; @@ -5680,15 +5473,7 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0, size_t global_work_size[] = {(size_t)(ne01+ndst*nsg-1)/(ndst*nsg)*sgs, (size_t)(_ne1+nrows-1)/nrows*nsg, (size_t)ne123}; size_t local_work_size[] = {(size_t)sgs, (size_t)nsg, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_scale(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -5701,7 +5486,6 @@ static void ggml_cl_scale(ggml_backend_t backend, const ggml_tensor * src0, cons GGML_ASSERT(ggml_is_contiguous(src0)); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; float scale; memcpy(&scale, dst->op_params, sizeof(scale)); @@ -5730,15 +5514,7 @@ static void ggml_cl_scale(ggml_backend_t backend, const ggml_tensor * src0, cons local_work_size_ptr = nullptr; // Let driver choose the work-group sizes. } -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size_ptr, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } static void ggml_cl_cpy(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -5775,7 +5551,6 @@ static void ggml_cl_cpy(ggml_backend_t backend, const ggml_tensor * src0, const const enum ggml_type src1t = src1 ? src1->type : GGML_TYPE_COUNT; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; @@ -5840,15 +5615,7 @@ static void ggml_cl_cpy(ggml_backend_t backend, const ggml_tensor * src0, const size_t global_work_size[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {(size_t)nth, 1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, src1); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, src1); } static void ggml_cl_dup(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -5871,7 +5638,6 @@ static void ggml_cl_diag_mask_inf(ggml_backend_t backend, const ggml_tensor * sr const int ne02 = src0 ? src0->ne[2] : 0; ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; @@ -5895,15 +5661,7 @@ static void ggml_cl_diag_mask_inf(ggml_backend_t backend, const ggml_tensor * sr size_t global_work_size[] = {(size_t)ne00*ne01*ne02/8, 1, 1}; size_t local_work_size[] = {64, 1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } else { kernel = backend_ctx->kernel_diag_mask_inf; @@ -5923,15 +5681,7 @@ static void ggml_cl_diag_mask_inf(ggml_backend_t backend, const ggml_tensor * sr local_work_size_ptr = nullptr; // Let driver choose the work-group sizes. } -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size_ptr, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size_ptr, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size_ptr, dst); } } @@ -5951,7 +5701,6 @@ static void ggml_cl_soft_max(ggml_backend_t backend, const ggml_tensor * src0, c } ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; @@ -6031,15 +5780,7 @@ static void ggml_cl_soft_max(ggml_backend_t backend, const ggml_tensor * src0, c size_t global_work_size[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {(size_t)nth, 1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_rope(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -6051,7 +5792,6 @@ static void ggml_cl_rope(ggml_backend_t backend, const ggml_tensor * src0, const GGML_ASSERT(dst->extra); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; @@ -6217,15 +5957,7 @@ static void ggml_cl_rope(ggml_backend_t backend, const ggml_tensor * src0, const size_t global_work_size[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {(size_t)nth, 1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_im2col(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -6240,7 +5972,6 @@ static void ggml_cl_im2col(ggml_backend_t backend, const ggml_tensor * src0, con GGML_ASSERT(dst->type == GGML_TYPE_F16 || dst->type == GGML_TYPE_F32); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; @@ -6309,15 +6040,7 @@ static void ggml_cl_im2col(ggml_backend_t backend, const ggml_tensor * src0, con size_t global_work_size[] = {(size_t)num_blocks*256, (size_t)OH, (size_t)batch*IC}; size_t local_work_size[] = {256, 1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_argsort(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -6332,7 +6055,6 @@ static void ggml_cl_argsort(ggml_backend_t backend, const ggml_tensor * src0, co GGML_ASSERT(ggml_is_contiguous(src0)); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; @@ -6364,15 +6086,7 @@ static void ggml_cl_argsort(ggml_backend_t backend, const ggml_tensor * src0, co size_t global_work_size[] = {(size_t)ne00_padded, (size_t)nrows, (size_t)1}; size_t local_work_size[] = {(size_t)ne00_padded, 1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } static void ggml_cl_sum_rows(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -6386,7 +6100,6 @@ static void ggml_cl_sum_rows(ggml_backend_t backend, const ggml_tensor * src0, c GGML_ASSERT(ggml_is_contiguous(src0)); ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; - cl_command_queue queue = backend_ctx->queue; ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; @@ -6427,15 +6140,7 @@ static void ggml_cl_sum_rows(ggml_backend_t backend, const ggml_tensor * src0, c size_t global_work_size[] = {(size_t)ne01, (size_t)ne02, (size_t)ne03}; size_t local_work_size[] = {(size_t)64, 1, 1}; -#ifdef GGML_OPENCL_PROFILING - cl_event evt; - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); - - g_profiling_info.emplace_back(); - populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); -#else - CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); -#endif + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } //------------------------------------------------------------------------------ From 873e9737b32f4bfa42a169c8bf0526cf0483e898 Mon Sep 17 00:00:00 2001 From: Anton Mitkov Date: Wed, 25 Jun 2025 17:09:55 +0100 Subject: [PATCH 148/192] sycl: GGML_SYCL_DISABLE_OPT on by default for all Intel Devices (#13973) --- docs/backend/SYCL.md | 2 +- ggml/src/ggml-sycl/common.hpp | 25 +------------------------ ggml/src/ggml-sycl/ggml-sycl.cpp | 6 ++---- ggml/src/ggml-sycl/sycl_hw.cpp | 4 +++- ggml/src/ggml-sycl/sycl_hw.hpp | 3 +++ 5 files changed, 10 insertions(+), 30 deletions(-) diff --git a/docs/backend/SYCL.md b/docs/backend/SYCL.md index 249e73451e66b..6e9b88935da97 100644 --- a/docs/backend/SYCL.md +++ b/docs/backend/SYCL.md @@ -757,7 +757,7 @@ use 1 SYCL GPUs: [0] with Max compute units:512 | Name | Value | Function | |-------------------|------------------|---------------------------------------------------------------------------------------------------------------------------| | GGML_SYCL_DEBUG | 0 (default) or 1 | Enable log function by macro: GGML_SYCL_DEBUG | -| GGML_SYCL_DISABLE_OPT | 0 (default) or 1 | Disable optimize features based on Intel GPU type, to compare the performance increase | +| GGML_SYCL_DISABLE_OPT | 0 (default) or 1 | Disable optimize features for Intel GPUs. (Recommended to 1 for intel devices older than Gen 10) | | GGML_SYCL_DISABLE_GRAPH | 0 or 1 (default) | Disable running computations through SYCL Graphs feature. Disabled by default because graph performance isn't yet better than non-graph performance. | | GGML_SYCL_DISABLE_DNN | 0 (default) or 1 | Disable running computations through oneDNN and always use oneMKL. | | ZES_ENABLE_SYSMAN | 0 (default) or 1 | Support to get free memory of GPU by sycl::aspect::ext_intel_free_memory.
Recommended to use when --split-mode = layer | diff --git a/ggml/src/ggml-sycl/common.hpp b/ggml/src/ggml-sycl/common.hpp index 753b4af143622..4e7449d06ecfe 100644 --- a/ggml/src/ggml-sycl/common.hpp +++ b/ggml/src/ggml-sycl/common.hpp @@ -199,7 +199,7 @@ struct sycl_device_info { // size_t smpb; // max. shared memory per block bool vmm; // virtual memory support size_t total_vram; - sycl_hw_info hw_info; + //sycl_hw_info hw_info; \\ device id and aarch, currently not used optimize_feature opt_feature; }; @@ -286,29 +286,6 @@ struct ggml_tensor_extra_gpu { void release_extra_gpu(ggml_tensor_extra_gpu * extra, std::vector streams={}); -inline optimize_feature check_gpu_optimize_feature(syclex::architecture &arch) { - optimize_feature opt; - - opt.reorder = - (arch == syclex::architecture::intel_gpu_dg1 || - arch == syclex::architecture::intel_gpu_acm_g10 || - arch == syclex::architecture::intel_gpu_acm_g11 || - arch == syclex::architecture::intel_gpu_acm_g12 || - arch == syclex::architecture::intel_gpu_pvc || - arch == syclex::architecture::intel_gpu_pvc_vg || - arch == syclex::architecture::intel_gpu_mtl_u || - arch == syclex::architecture::intel_gpu_mtl_s || - arch == syclex::architecture::intel_gpu_mtl_h || - arch == syclex::architecture::intel_gpu_arl_u || - arch == syclex::architecture::intel_gpu_arl_s || - arch == syclex::architecture::intel_gpu_arl_h || - arch == syclex::architecture::intel_gpu_bmg_g21 || - arch == syclex::architecture::intel_gpu_lnl_m - ); - - return opt; -} - namespace sycl_ex = sycl::ext::oneapi::experimental; struct ggml_backend_sycl_context { int device; diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index f25a96a625c51..9cb36ae99e7f5 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -83,9 +83,7 @@ static ggml_sycl_device_info ggml_sycl_init() { info.devices[i].cc = 100 * prop.get_major_version() + 10 * prop.get_minor_version(); - info.devices[i].hw_info = get_device_hw_info(&device); - info.devices[i].opt_feature = check_gpu_optimize_feature(info.devices[i].hw_info.arch); - + info.devices[i].opt_feature.reorder = !device.ext_oneapi_architecture_is(syclex::arch_category::intel_gpu); info.max_work_group_sizes[i] = prop.get_max_work_group_size(); } @@ -195,7 +193,7 @@ static void ggml_check_sycl() try { if (!initialized) { g_ggml_sycl_debug = get_sycl_env("GGML_SYCL_DEBUG", 0); - g_ggml_sycl_disable_optimize= get_sycl_env("GGML_SYCL_DISABLE_OPT", 1); + g_ggml_sycl_disable_optimize = get_sycl_env("GGML_SYCL_DISABLE_OPT", 0); g_ggml_sycl_disable_graph = get_sycl_env("GGML_SYCL_DISABLE_GRAPH", 1); g_ggml_sycl_disable_dnn = get_sycl_env("GGML_SYCL_DISABLE_DNN", 0); g_ggml_sycl_prioritize_dmmv = get_sycl_env("GGML_SYCL_PRIORITIZE_DMMV", 0); diff --git a/ggml/src/ggml-sycl/sycl_hw.cpp b/ggml/src/ggml-sycl/sycl_hw.cpp index da121ffc261e8..7041140034b45 100644 --- a/ggml/src/ggml-sycl/sycl_hw.cpp +++ b/ggml/src/ggml-sycl/sycl_hw.cpp @@ -1,6 +1,7 @@ #include "sycl_hw.hpp" - +// TODO: currently not used +/* sycl_hw_info get_device_hw_info(sycl::device *device_ptr) { sycl_hw_info res; int32_t id = device_ptr->get_info(); @@ -11,3 +12,4 @@ sycl_hw_info get_device_hw_info(sycl::device *device_ptr) { return res; } +*/ diff --git a/ggml/src/ggml-sycl/sycl_hw.hpp b/ggml/src/ggml-sycl/sycl_hw.hpp index bf689450ce61f..36b140bf03737 100644 --- a/ggml/src/ggml-sycl/sycl_hw.hpp +++ b/ggml/src/ggml-sycl/sycl_hw.hpp @@ -10,6 +10,8 @@ namespace syclex = sycl::ext::oneapi::experimental; +// TODO: currently not used +/* struct sycl_hw_info { syclex::architecture arch; int32_t device_id; @@ -18,6 +20,7 @@ struct sycl_hw_info { bool is_in_vector(std::vector &vec, int item); sycl_hw_info get_device_hw_info(sycl::device *device_ptr); +*/ #endif // SYCL_HW_HPP From 8b4e408fb7fc5a10015bca81d6690722af4a4224 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Wed, 25 Jun 2025 23:26:51 +0200 Subject: [PATCH 149/192] ggml : do not output unprintable characters on GGUF load failure (#14381) --- ggml/src/gguf.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ggml/src/gguf.cpp b/ggml/src/gguf.cpp index a0a318a29f5b9..5ffd12b8b2795 100644 --- a/ggml/src/gguf.cpp +++ b/ggml/src/gguf.cpp @@ -335,7 +335,11 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par for (uint32_t i = 0; i < magic.size(); i++) { if (magic[i] != GGUF_MAGIC[i]) { - GGML_LOG_ERROR("%s: invalid magic characters: '%c%c%c%c', expected 'GGUF'\n", __func__, magic[0], magic[1], magic[2], magic[3]); + char c0 = isprint(magic[0]) ? magic[0] : '?'; + char c1 = isprint(magic[1]) ? magic[1] : '?'; + char c2 = isprint(magic[2]) ? magic[2] : '?'; + char c3 = isprint(magic[3]) ? magic[3] : '?'; + GGML_LOG_ERROR("%s: invalid magic characters: '%c%c%c%c', expected 'GGUF'\n", __func__, c0, c1, c2, c3); gguf_free(ctx); return nullptr; } From 43e4955eae0c554733bcb83f1eef265ee2c8ac92 Mon Sep 17 00:00:00 2001 From: Aaron Teo Date: Thu, 26 Jun 2025 05:49:04 +0800 Subject: [PATCH 150/192] ggml-cpu: enable IBM NNPA Vector Intrinsics (#14317) * ggml-cpu: add nnpa compile flag Signed-off-by: Aaron Teo (cherry picked from commit 4a9f60c201573128f73a65999b3e5cc497fae5c1) * ggml-cpu: add fp16->fp32 nnpa first Signed-off-by: Aaron Teo (cherry picked from commit 8d4a7987f9c1887f716be96250f2caeee0253929) * ggml-cpu: add fp32->fp16 Signed-off-by: Aaron Teo (cherry picked from commit 0ff0d6516247a41d2ade42b42cf0d676a4dd1627) * ggml-cpu: better variable names Signed-off-by: Aaron Teo (cherry picked from commit 2f58bbcbb89c183340e252362b2a40651f573f1f) * docs: update s390x docs Signed-off-by: Aaron Teo (cherry picked from commit 01b929491b50071a5d0572235dcf5a449da70aa7) * ggml-cpu: add debugging prints to see if dlf16 is correct Signed-off-by: Aaron Teo * ggml-cpu: fix print vs printf Signed-off-by: Aaron Teo * ggml-cpu: fix float placeholder Signed-off-by: Aaron Teo * ggml-cpu: ensure fp16 and fp32 load and stores are called Signed-off-by: Aaron Teo * ggml-cpu: fp16 load ensured to hit Signed-off-by: Aaron Teo * ggml-cpu: remove sigint from fp16 store for some reason, the function is not getting a hit when debugged with gdb. we will need to investigate further Signed-off-by: Aaron Teo * ggml-cpu: activate nnpa for ggml_cpu_fp16_to_fp32 Signed-off-by: Aaron Teo * ggml-cpu: nnpa activate ggml_cpu_fp16_to_fp32 for 8 elements Signed-off-by: Aaron Teo * ggml-cpu: nnpa switch to vec_xst test Signed-off-by: Aaron Teo * ggml-cpu: switch to vec_xst for 4 element loops also Signed-off-by: Aaron Teo * ggml-cpu: rework noop Signed-off-by: Aaron Teo * ggml-cpu: remove noop, general code cleanup Signed-off-by: Aaron Teo * ggml-cpu: clarify variable naming Signed-off-by: Aaron Teo * ggml-cpu: activate nnpa for ggml_cpu_fp32_to_fp16 Signed-off-by: Aaron Teo * ggml-cpu: add breakpoint for debugging Signed-off-by: Aaron Teo * ggml-cpu: test fix for conversion failure Signed-off-by: Aaron Teo * ggml-cpu: disable fp32->fp16 nnpa conversions for now there are some conversion failures in nnpa that requires the eyes of an ibm stsm. will create a separate pr to introduce the fp32->fp16 change. Signed-off-by: Aaron Teo * ggml-cpu: switch to elif macro Signed-off-by: Aaron Teo * ggml-cpu: reattempt fp32->fp16 Signed-off-by: Aaron Teo * ggml-cpu: fix typo Signed-off-by: Aaron Teo * ggml-cpu: reattempt fp32->fp16 Signed-off-by: Aaron Teo * ggml-cpu: fix compiler types Signed-off-by: Aaron Teo * ggml-cpu: change to typedef vector types Signed-off-by: Aaron Teo * ggml-cpu: add 4 element loops for fp32->fp16 Signed-off-by: Aaron Teo * ggml-cpu: clarified vector naming Signed-off-by: Aaron Teo * ggml-cpu: bring back fp32->fp16 store nnpa Signed-off-by: Aaron Teo * ggml-cpu: activate nnpa fp32->fp16 or fp16->fp32 compute Signed-off-by: Aaron Teo * ggml-cpu: add nnpa macro check in ggml-impl Signed-off-by: Aaron Teo * ggml-cpu: add missing __func__ Signed-off-by: Aaron Teo * ggml-cpu: diagnose why __NNPA__ macro is not being defined Signed-off-by: Aaron Teo * ggml-cpu: import vecintrin.h to fix compiler errors Signed-off-by: Aaron Teo * ggml-cpu: update macro tests Signed-off-by: Aaron Teo * ggml-cpu: move s390x typedef to own header file Signed-off-by: Aaron Teo * Revert "ggml-cpu: move s390x typedef to own header file" This reverts commit 157f856c34589566151630e294563a420702db39. Signed-off-by: Aaron Teo * ggml-cpu: switch to importing ggml-cpu-impl instead Signed-off-by: Aaron Teo * ggml-cpu: fix macro declaration Signed-off-by: Aaron Teo * ggml-cpu: test more macros Signed-off-by: Aaron Teo * ggml-cpu: add debug prints Signed-off-by: Aaron Teo * ggml-cpu: bruteforce macro definitions Signed-off-by: Aaron Teo * ggml-cpu: move macro definitions Signed-off-by: Aaron Teo * ggml-cpu: add ggml-impl.h to cmakelists Signed-off-by: Aaron Teo * ggml-cpu: switch to private macros Signed-off-by: Aaron Teo * ggml-cpu: move s390x typedef to own header file Signed-off-by: Aaron Teo (cherry picked from commit 157f856c34589566151630e294563a420702db39) * ggml-cpu: move things around Signed-off-by: Aaron Teo * ggml-cpu: bring back compile macros Signed-off-by: Aaron Teo * ggml-cpu: switch to quotes for import Signed-off-by: Aaron Teo * ggml-cpu: add compiler error macro Signed-off-by: Aaron Teo * ggml-cpu: add s390x detection in ggml-src Signed-off-by: Aaron Teo * ggml-cpu: bring back compile definitions Signed-off-by: Aaron Teo * ggml-cpu: undo cmakelists work Signed-off-by: Aaron Teo * Revert "ggml-cpu: move s390x typedef to own header file" This reverts commit 18d79e1a30b39d9aaa0bd58400c5cf2c32135c9a. Signed-off-by: Aaron Teo * ggml-cpu: remove typedefs.h Signed-off-by: Aaron Teo * ggml-cpu: remove typedef from cmakelists Signed-off-by: Aaron Teo * ggml-cpu: add ggml-impl.h future notes Signed-off-by: Aaron Teo * ggml-cpu: add todo comment for future reference Signed-off-by: Aaron Teo * ggml-cpu: clarify naming of dlf16 Signed-off-by: Aaron Teo * ggml-cpu: remove unnecessary target compile definitions Signed-off-by: Aaron Teo * ggml-cpu: move nnpa fp16->fp32 and fp32->fp16 to simd-mappings Signed-off-by: Aaron Teo * ggml: refactor fp32->fp16 and fp16->fp32 simd to ggml-cpu Signed-off-by: Aaron Teo * docs: update broken huggingface link for s390x Signed-off-by: Aaron Teo * ggml-cpu: fix duplicate func names during compile Signed-off-by: Aaron Teo * Revert "ggml-cpu: fix duplicate func names during compile" This reverts commit fbb733451f27677063b914d4f6c9a9841d45b38d. Signed-off-by: Aaron Teo * Revert "ggml: refactor fp32->fp16 and fp16->fp32 simd to ggml-cpu" This reverts commit bd288e8fa52b5244f65cee21cb61062f1a9e0ca5. Signed-off-by: Aaron Teo * ggml: refactor fp16<->fp32 simd to ggml-cpu Signed-off-by: Aaron Teo * ggml-cpu: fix missing simd-mappings.h import in quants.c Signed-off-by: Aaron Teo * ggml-cpu: fix missing simd-mappings.h within repack Signed-off-by: Aaron Teo * ggml-cpu: fix amx mmq missing simd-mappings.h Signed-off-by: Aaron Teo * ggml-cpu: attempt at fixing loongarch failing build Signed-off-by: Aaron Teo * ggml-cpu: move nnpa together with other fp16<->fp32 simd Signed-off-by: Aaron Teo * ggml-cpu: fix wrong refactor of ggml-base ref: https://github.com/ggml-org/llama.cpp/pull/14317#discussion_r2164176555 Signed-off-by: Aaron Teo * ggml: remove dependency on ggml-cpu from ggml-base Signed-off-by: Aaron Teo * ggml-cpu: rename all fp16<->fp32 macros to prefix with ggml_cpu ref: https://github.com/ggml-org/llama.cpp/pull/14317#discussion_r2164449406 Signed-off-by: Aaron Teo * ggml-cpu: remove mistaken fallback macro fallback logic was already implemented but i was too sleepy to realise Signed-off-by: Aaron Teo * ggml: move ggml_table_f32_f16 to ggml-cpu ref: https://github.com/ggml-org/llama.cpp/pull/14317#discussion_r2164775006 Signed-off-by: Aaron Teo * ggml-cpu: move ggml_table_f32_f16 back to ggml-base due to ci failures Signed-off-by: Aaron Teo * Revert "ggml-cpu: move ggml_table_f32_f16 back to ggml-base due to ci failures" This reverts commit 32a3533564bdb7902cefb9c89b1c9e956a81ce29. Signed-off-by: Aaron Teo * Revert "ggml: move ggml_table_f32_f16 to ggml-cpu" This reverts commit 9e40d984ad27d7b60392fb2b7548885201864fe4. Signed-off-by: Aaron Teo * ggml: move ggml_table_f32_f16 to ggml-cpu ref: https://github.com/ggml-org/llama.cpp/pull/14317#discussion_r2164775006 Signed-off-by: Aaron Teo (cherry picked from commit 9e40d984ad27d7b60392fb2b7548885201864fe4) * ggml: move ggml_table_f32_f16 to ggml-cpu.c Signed-off-by: Aaron Teo * ggml-cpu: extern c ggml_table_f32_f16 + chore docs Signed-off-by: Aaron Teo * ggml-cpu: dedup ggml_table_f32_f16 from simd-mappings.h we rely on the variable declaration in ggml-cpu.c instead Signed-off-by: Aaron Teo * Revert "ggml-cpu: dedup ggml_table_f32_f16 from simd-mappings.h" This reverts commit f71b21d2f74f5e03ec0c2b4fefd3cbf395aecf16. Signed-off-by: Aaron Teo * ggml-cpu: bring back ggml_table_f32_f16 Signed-off-by: Aaron Teo * Revert "ggml-cpu: bring back ggml_table_f32_f16" This reverts commit 2dce119178bed5ef5c8398c4230ddd14fef80e49. Signed-off-by: Aaron Teo * fix ggml time initialization * fix f32_f16 table init * remove extra line --------- Signed-off-by: Aaron Teo Co-authored-by: slaren --- docs/build-s390x.md | 41 ++-- docs/build.md | 4 + ggml/CMakeLists.txt | 1 + ggml/include/ggml-cpu.h | 1 + ggml/src/ggml-cpu/CMakeLists.txt | 8 + ggml/src/ggml-cpu/amx/mmq.cpp | 19 +- ggml/src/ggml-cpu/arch/arm/quants.c | 217 +++++++++---------- ggml/src/ggml-cpu/arch/arm/repack.cpp | 25 +-- ggml/src/ggml-cpu/arch/loongarch/quants.c | 105 +++++----- ggml/src/ggml-cpu/arch/powerpc/quants.c | 111 +++++----- ggml/src/ggml-cpu/arch/riscv/quants.c | 83 ++++---- ggml/src/ggml-cpu/arch/riscv/repack.cpp | 47 +++-- ggml/src/ggml-cpu/arch/s390/quants.c | 57 ++--- ggml/src/ggml-cpu/arch/wasm/quants.c | 59 +++--- ggml/src/ggml-cpu/arch/x86/quants.c | 165 +++++++-------- ggml/src/ggml-cpu/arch/x86/repack.cpp | 39 ++-- ggml/src/ggml-cpu/common.h | 5 +- ggml/src/ggml-cpu/ggml-cpu-impl.h | 12 +- ggml/src/ggml-cpu/ggml-cpu.c | 75 +++++-- ggml/src/ggml-cpu/ggml-cpu.cpp | 3 + ggml/src/ggml-cpu/llamafile/sgemm.cpp | 5 +- ggml/src/ggml-cpu/ops.cpp | 96 ++++----- ggml/src/ggml-cpu/quants.c | 49 ++--- ggml/src/ggml-cpu/repack.cpp | 29 +-- ggml/src/ggml-cpu/simd-mappings.h | 244 +++++++++++++++++++--- ggml/src/ggml-cpu/vec.cpp | 4 +- ggml/src/ggml-cpu/vec.h | 90 ++++---- ggml/src/ggml-impl.h | 244 ++++++---------------- ggml/src/ggml.c | 11 - 29 files changed, 996 insertions(+), 853 deletions(-) diff --git a/docs/build-s390x.md b/docs/build-s390x.md index f44038c586ddc..bb6eae784d6d0 100644 --- a/docs/build-s390x.md +++ b/docs/build-s390x.md @@ -28,8 +28,9 @@ cmake --build build --config Release -j $(nproc) ``` **Notes**: -- For faster repeated compilation, install [ccache](https://ccache.dev/) -- By default, VXE/VXE2 is enabled. To disable it (not recommended): + +- For faster repeated compilation, install [ccache](https://ccache.dev/) +- By default, VXE/VXE2 is enabled. To disable it (not recommended): ```bash cmake -S . -B build \ @@ -41,18 +42,29 @@ cmake --build build --config Release -j $(nproc) cmake --build build --config Release -j $(nproc) ``` -- For debug builds: +- By default, NNPA is enabled when available. To disable it (not recommended): + + ```bash + cmake -S . -B build \ + -DCMAKE_BUILD_TYPE=Release \ + -DGGML_BLAS=ON \ + -DGGML_BLAS_VENDOR=OpenBLAS \ + -DGGML_NNPA=OFF + + cmake --build build --config Release -j $(nproc) + ``` + +- For debug builds: ```bash cmake -S . -B build \ -DCMAKE_BUILD_TYPE=Debug \ -DGGML_BLAS=ON \ -DGGML_BLAS_VENDOR=OpenBLAS - cmake --build build --config Debug -j $(nproc) ``` -- For static builds, add `-DBUILD_SHARED_LIBS=OFF`: +- For static builds, add `-DBUILD_SHARED_LIBS=OFF`: ```bash cmake -S . -B build \ @@ -70,7 +82,7 @@ All models need to be converted to Big-Endian. You can achieve this in three cas 1. **Use pre-converted models verified for use on IBM Z & LinuxONE (easiest)** - You can find popular models pre-converted and verified at [s390x Ready Models](hf.co/collections/taronaeo/s390x-ready-models-672765393af438d0ccb72a08). + You can find popular models pre-converted and verified at [s390x Ready Models](https://huggingface.co/collections/taronaeo/s390x-ready-models-672765393af438d0ccb72a08). These models and their respective tokenizers are verified to run correctly on IBM Z & LinuxONE. @@ -101,27 +113,33 @@ All models need to be converted to Big-Endian. You can achieve this in three cas ``` For example, + ```bash python3 gguf-py/gguf/scripts/gguf_convert_endian.py granite-3.3-2b-instruct-le.f16.gguf BIG mv granite-3.3-2b-instruct-le.f16.gguf granite-3.3-2b-instruct-be.f16.gguf ``` **Notes:** + - The GGUF endian conversion script may not support all data types at the moment and may fail for some models/quantizations. When that happens, please try manually converting the safetensors model to GGUF Big-Endian via Step 2. ## IBM Accelerators ### 1. SIMD Acceleration -Only available in IBM z15 or later system with the `-DGGML_VXE=ON` (turned on by default) compile flag. No hardware acceleration is possible with llama.cpp with older systems, such as IBM z14 or EC13. In such systems, the APIs can still run but will use a scalar implementation. +Only available in IBM z15 or later system with the `-DGGML_VXE=ON` (turned on by default) compile flag. No hardware acceleration is possible with llama.cpp with older systems, such as IBM z14/arch12. In such systems, the APIs can still run but will use a scalar implementation. + +### 2. NNPA Vector Intrinsics Acceleration -### 2. zDNN Accelerator +Only available in IBM z16 or later system with the `-DGGML_NNPA=ON` (turned on when available) compile flag. No hardware acceleration is possible with llama.cpp with older systems, such as IBM z15/arch13. In such systems, the APIs can still run but will use a scalar implementation. -*Only available in IBM z16 or later system. No direction at the moment.* +### 3. zDNN Accelerator -### 3. Spyre Accelerator +_Only available in IBM z16 or later system. No direction at the moment._ -*No direction at the moment.* +### 4. Spyre Accelerator + +_No direction at the moment._ ## Performance Tuning @@ -154,4 +172,3 @@ IBM VXE/VXE2 SIMD acceleration depends on the BLAS implementation. It is strongl 2. **Other Questions** Please reach out directly to [aionz@us.ibm.com](mailto:aionz@us.ibm.com). - diff --git a/docs/build.md b/docs/build.md index 20a6f606eb779..2e0b5d970c91a 100644 --- a/docs/build.md +++ b/docs/build.md @@ -557,6 +557,10 @@ ninja To read documentation for how to build on Android, [click here](./android.md) +## IBM Z & LinuxONE + +To read documentation for how to build on IBM Z & LinuxONE, [click here](./build-s390x.md) + ## Notes about GPU-accelerated backends The GPU may still be used to accelerate some parts of the computation even when using the `-ngl 0` option. You can fully disable GPU acceleration by using `--device none`. diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index 4e7399f9e68f9..215eb23486814 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -131,6 +131,7 @@ option(GGML_RVV "ggml: enable rvv" ON) option(GGML_RV_ZFH "ggml: enable riscv zfh" OFF) option(GGML_XTHEADVECTOR "ggml: enable xtheadvector" OFF) option(GGML_VXE "ggml: enable vxe" ON) +option(GGML_NNPA "ggml: enable nnpa" ON) option(GGML_CPU_ALL_VARIANTS "ggml: build all variants of the CPU backend (requires GGML_BACKEND_DL)" OFF) set(GGML_CPU_ARM_ARCH "" CACHE STRING "ggml: CPU architecture for ARM") diff --git a/ggml/include/ggml-cpu.h b/ggml/include/ggml-cpu.h index de77a875ec533..e3b79d09bb66f 100644 --- a/ggml/include/ggml-cpu.h +++ b/ggml/include/ggml-cpu.h @@ -101,6 +101,7 @@ extern "C" { GGML_BACKEND_API int ggml_cpu_has_riscv_v (void); GGML_BACKEND_API int ggml_cpu_has_vsx (void); GGML_BACKEND_API int ggml_cpu_has_vxe (void); + GGML_BACKEND_API int ggml_cpu_has_nnpa (void); GGML_BACKEND_API int ggml_cpu_has_wasm_simd (void); GGML_BACKEND_API int ggml_cpu_has_llamafile (void); diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index 71b1d67b8d0a6..671fad4d228d4 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -448,6 +448,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name) # TODO: Separation to determine activation of VX/VXE/VXE2 if (${S390X_M} MATCHES "8561|8562") + set(GGML_NNPA OFF) message(STATUS "z15 target") list(APPEND ARCH_FLAGS -march=z15) elseif (${S390X_M} MATCHES "3931") @@ -464,7 +465,14 @@ function(ggml_add_cpu_backend_variant_impl tag_name) endif() if (GGML_VXE) + message(STATUS "VX/VXE/VXE2 enabled") list(APPEND ARCH_FLAGS -mvx -mzvector) + list(APPEND ARCH_DEFINITIONS GGML_VXE) + endif() + + if (GGML_NNPA) + message(STATUS "NNPA enabled") + list(APPEND ARCH_DEFINITIONS GGML_NNPA) endif() elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "wasm") message(STATUS "Wasm detected") diff --git a/ggml/src/ggml-cpu/amx/mmq.cpp b/ggml/src/ggml-cpu/amx/mmq.cpp index cec34eb6416ac..47c61b88164b8 100644 --- a/ggml/src/ggml-cpu/amx/mmq.cpp +++ b/ggml/src/ggml-cpu/amx/mmq.cpp @@ -8,6 +8,7 @@ #include "mmq.h" #include "ggml-impl.h" #include "ggml-cpu-impl.h" +#include "simd-mappings.h" #include "quants.h" #include "ggml-quants.h" #include @@ -453,7 +454,7 @@ void quantize_row_q8_K_vnni(const float * RESTRICT x, void * RESTRICT vy, int64_ // Quantize these floats const float iscale = 127.f / amax; - y[i].d = GGML_FP32_TO_FP16(1 / iscale); + y[i].d = GGML_CPU_FP32_TO_FP16(1 / iscale); const float id = ( amax != 0.0f ) ? iscale : 0.f; const __m512 vscale = _mm512_set1_ps(id); @@ -1090,7 +1091,7 @@ struct acc_C { const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)((const char *)packed_B + offset))); for (int m = 0; m < nr; ++m) { - const __m512 vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[m * lda].d)); + const __m512 vd1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[m * lda].d)); const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N)); __m512 vsum; @@ -1113,8 +1114,8 @@ struct acc_C { const __m512 vm0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)((const char *)packed_B + offset + TILE_N * sizeof(ggml_half)))); for (int m = 0; m < nr; ++m) { - const __m512 vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[m * lda].d)); - const __m512 vs1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[m * lda].s)); + const __m512 vd1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[m * lda].d)); + const __m512 vs1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[m * lda].s)); const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N)); __m512 vsum; @@ -1137,7 +1138,7 @@ struct acc_C { const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)((const char *)packed_B + offset))); for (int m = 0; m < nr; ++m) { - const __m512 vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[m * lda].d)); + const __m512 vd1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[m * lda].d)); const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N)); __m512 vsum; @@ -1437,7 +1438,7 @@ struct tinygemm_kernel_vnni for (int k = 0; k < 8; ++k) { va[k] = _mm512_set1_epi32(a_ptr[k]); } - vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[0 * KB + i].d)); - vs1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[0 * KB + i].s)); + vd1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[0 * KB + i].d)); + vs1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[0 * KB + i].s)); } // load b @@ -1571,7 +1572,7 @@ struct tinygemm_kernel_vnniqs + 16); float32_t _scale[4] = { - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d) + GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y0->d), + GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y1->d), + GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y0->d), + GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y1->d) }; float32x4_t scale = vld1q_f32(_scale); @@ -274,10 +275,10 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi // dot product sumv0 = svmla_n_f32_x(ph4, sumv0, svcvt_f32_s32_x(ph4, svadd_x(ph4, svdot_s32(svdup_n_s32(0), qx0ls, qy0l), - svdot_s32(svdup_n_s32(0), qx0hs, qy0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + svdot_s32(svdup_n_s32(0), qx0hs, qy0h))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); sumv1 = svmla_n_f32_x(ph4, sumv1, svcvt_f32_s32_x(ph4, svadd_x(ph4, svdot_s32(svdup_n_s32(0), qx1ls, qy1l), - svdot_s32(svdup_n_s32(0), qx1hs, qy1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + svdot_s32(svdup_n_s32(0), qx1hs, qy1h))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1)); @@ -313,9 +314,9 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi // dot product sumv0 = svmla_n_f32_x(svptrue_b32(), sumv0, svcvt_f32_s32_x(svptrue_b32(), - svdot_s32(svdup_n_s32(0), qx0s, qy0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + svdot_s32(svdup_n_s32(0), qx0s, qy0)), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); sumv1 = svmla_n_f32_x(svptrue_b32(), sumv1, svcvt_f32_s32_x(svptrue_b32(), - svdot_s32(svdup_n_s32(0), qx1s, qy1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + svdot_s32(svdup_n_s32(0), qx1s, qy1)), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1)); @@ -354,9 +355,9 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi // dot product sumv0 = svmla_n_f32_x(ph32, sumv0, svcvt_f32_s32_x(ph32, - svdot_s32(svdup_n_s32(0), qx0s, qy0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + svdot_s32(svdup_n_s32(0), qx0s, qy0)), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); sumv1 = svmla_n_f32_x(ph32, sumv1, svcvt_f32_s32_x(ph32, - svdot_s32(svdup_n_s32(0), qx1s, qy1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + svdot_s32(svdup_n_s32(0), qx1s, qy1)), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = svaddv_f32(ph32, svadd_f32_x(ph32, sumv0, sumv1)); @@ -404,8 +405,8 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h); const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h); - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); @@ -423,7 +424,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); + sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); } *s = sumf; @@ -464,10 +465,10 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi const block_q8_1 * GGML_RESTRICT b_y1 = &vy1[i]; float32_t summs_t[4] = { - GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y0->s), - GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y0->s), - GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y1->s), - GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y1->s) + GGML_CPU_FP16_TO_FP32(b_x0->m) * GGML_CPU_FP16_TO_FP32(b_y0->s), + GGML_CPU_FP16_TO_FP32(b_x1->m) * GGML_CPU_FP16_TO_FP32(b_y0->s), + GGML_CPU_FP16_TO_FP32(b_x0->m) * GGML_CPU_FP16_TO_FP32(b_y1->s), + GGML_CPU_FP16_TO_FP32(b_x1->m) * GGML_CPU_FP16_TO_FP32(b_y1->s) }; summs0 = vaddq_f32(summs0, vld1q_f32(summs_t)); @@ -490,10 +491,10 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi // mmla into int32x4_t float32_t _scale[4] = { - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d) + GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y0->d), + GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y1->d), + GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y0->d), + GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y1->d) }; float32x4_t scale = vld1q_f32(_scale); @@ -539,7 +540,7 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi const block_q8_1 * GGML_RESTRICT y0 = &y[ib + 0]; const block_q8_1 * GGML_RESTRICT y1 = &y[ib + 1]; - summs += GGML_FP16_TO_FP32(x0->m) * GGML_FP16_TO_FP32(y0->s) + GGML_FP16_TO_FP32(x1->m) * GGML_FP16_TO_FP32(y1->s); + summs += GGML_CPU_FP16_TO_FP32(x0->m) * GGML_CPU_FP16_TO_FP32(y0->s) + GGML_CPU_FP16_TO_FP32(x1->m) * GGML_CPU_FP16_TO_FP32(y1->s); const uint8x16_t m4b = vdupq_n_u8(0x0F); @@ -562,8 +563,8 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h); const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h); - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs; @@ -582,7 +583,7 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; @@ -666,10 +667,10 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), - ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), - ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); @@ -694,7 +695,7 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)) * sumi; + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi; } *s = sumf; @@ -739,8 +740,8 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi const uint8x16_t m4b = vdupq_n_u8(0x0F); - summs0 += GGML_FP16_TO_FP32(x0->m) * GGML_FP16_TO_FP32(y0->s); - summs1 += GGML_FP16_TO_FP32(x1->m) * GGML_FP16_TO_FP32(y1->s); + summs0 += GGML_CPU_FP16_TO_FP32(x0->m) * GGML_CPU_FP16_TO_FP32(y0->s); + summs1 += GGML_CPU_FP16_TO_FP32(x1->m) * GGML_CPU_FP16_TO_FP32(y1->s); // extract the 5th bit via lookup table ((b) << 4) memcpy(&qh0, x0->qh, sizeof(qh0)); @@ -784,10 +785,10 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), - ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), - ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1; @@ -812,7 +813,7 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; @@ -864,10 +865,10 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); float32_t _scale[4] = { - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d) + GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y0->d), + GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y1->d), + GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y0->d), + GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y1->d) }; float32x4_t scale = vld1q_f32(_scale); @@ -934,10 +935,10 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi sumv0 = svmla_n_f32_x(pl16, sumv0, svcvt_f32_s32_x(pl16, svadd_x(pl16, svdot_s32(svdup_n_s32(0), qx0_0, qy0_0), - svdot_s32(svdup_n_s32(0), qx0_1, qy0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + svdot_s32(svdup_n_s32(0), qx0_1, qy0_1))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); sumv1 = svmla_n_f32_x(pl16, sumv1, svcvt_f32_s32_x(pl16, svadd_x(pl16, svdot_s32(svdup_n_s32(0), qx1_0, qy1_0), - svdot_s32(svdup_n_s32(0), qx1_1, qy1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + svdot_s32(svdup_n_s32(0), qx1_1, qy1_1))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = svaddv_f32(pl16, svadd_f32_x(pl16, sumv0, sumv1)); @@ -960,9 +961,9 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi const svint8_t qy1 = svld1_s8(svptrue_b8(), y1->qs); sumv0 = svmla_n_f32_x(svptrue_b32(), sumv0, svcvt_f32_s32_x(svptrue_b32(), - svdot_s32(svdup_n_s32(0), qx0, qy0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + svdot_s32(svdup_n_s32(0), qx0, qy0)), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); sumv1 = svmla_n_f32_x(svptrue_b32(), sumv1, svcvt_f32_s32_x(svptrue_b32(), - svdot_s32(svdup_n_s32(0), qx1, qy1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + svdot_s32(svdup_n_s32(0), qx1, qy1)), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1)); @@ -1002,8 +1003,8 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi qy_64 = svadd_s8_x(svptrue_b8(), qy_32, qy_64); // scale creation - const float32_t deq1 = GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d); - const float32_t deq2 = GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d); + const float32_t deq1 = GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d); + const float32_t deq2 = GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d); // duplicate deq1 in first half of vector and deq2 in second half of vector const svfloat32_t temp = svdup_f32_m(svdup_f32_z(ph8, deq1), pl8, deq2); @@ -1043,11 +1044,11 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( ggml_vdotq_s32(vdupq_n_s32(0), x0_0, y0_0), - ggml_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + ggml_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( ggml_vdotq_s32(vdupq_n_s32(0), x1_0, y1_0), - ggml_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + ggml_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); } sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); @@ -1059,7 +1060,7 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi sumi += x[ib].qs[j]*y[ib].qs[j]; } - sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); + sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)); } *s = sumf; @@ -1217,7 +1218,7 @@ void ggml_vec_dot_tq1_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo const int16x8_t ysum0 = vld1q_s16(y[i].bsums); const int16x8_t ysum1 = vld1q_s16(y[i].bsums + 8); - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; #if defined(__ARM_FEATURE_DOTPROD) sumi0 = vaddq_s32(sumi0, sumi1); @@ -1269,7 +1270,7 @@ void ggml_vec_dot_tq1_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo } } - sumf += (float) sum * (GGML_FP16_TO_FP32(x[i].d) * y[i].d); + sumf += (float) sum * (GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d); } *s = sumf; @@ -1362,7 +1363,7 @@ void ggml_vec_dot_tq2_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo const int16x8_t ysum0 = vld1q_s16(y[i].bsums); const int16x8_t ysum1 = vld1q_s16(y[i].bsums + 8); - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; #if defined(__ARM_FEATURE_DOTPROD) sumi0 = vaddq_s32(sumi0, sumi1); @@ -1393,7 +1394,7 @@ void ggml_vec_dot_tq2_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo } } - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); sumf += (float) sumi * d; } @@ -1425,9 +1426,9 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi switch (vector_length) { case 128: for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); svfloat32_t d_broad = svdup_n_f32((float32_t)d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); svfloat32_t dmin_broad = svdup_n_f32((float32_t)dmin); const uint8_t * GGML_RESTRICT q2 = x[i].qs; @@ -1570,9 +1571,9 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi case 256: case 512: for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); svfloat32_t d_broad = svdup_n_f32((float32_t)d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); svfloat32_t dmin_broad = svdup_n_f32((float32_t)dmin); const uint8_t * GGML_RESTRICT q2 = x[i].qs; @@ -1671,8 +1672,8 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi float sum = 0; for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const uint8_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -1742,8 +1743,8 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi summs += y[i].bsums[j] * (sc[j] >> 4); } - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); int isum = 0; int is = 0; @@ -1805,7 +1806,7 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q3_sv = x[i].qs; const uint8_t * GGML_RESTRICT qh_sv = x[i].hmask; @@ -1981,7 +1982,7 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].hmask; @@ -2112,7 +2113,7 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -2258,18 +2259,18 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi bias[3] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y1_sums), vget_low_s16(x1_mins)), vmull_s16(vget_high_s16(y1_sums), vget_high_s16(x1_mins)))); const float32x4_t dmins = { - GGML_FP16_TO_FP32(x0->dmin) * y0->d, - GGML_FP16_TO_FP32(x0->dmin) * y1->d, - GGML_FP16_TO_FP32(x1->dmin) * y0->d, - GGML_FP16_TO_FP32(x1->dmin) * y1->d, + GGML_CPU_FP16_TO_FP32(x0->dmin) * y0->d, + GGML_CPU_FP16_TO_FP32(x0->dmin) * y1->d, + GGML_CPU_FP16_TO_FP32(x1->dmin) * y0->d, + GGML_CPU_FP16_TO_FP32(x1->dmin) * y1->d, }; vfsum = vmlsq_f32(vfsum, vcvtq_f32_s32(vld1q_s32(bias)), dmins); const float32x4_t superblock_scale = { - GGML_FP16_TO_FP32(x0->d) * y0->d, - GGML_FP16_TO_FP32(x0->d) * y1->d, - GGML_FP16_TO_FP32(x1->d) * y0->d, - GGML_FP16_TO_FP32(x1->d) * y1->d, + GGML_CPU_FP16_TO_FP32(x0->d) * y0->d, + GGML_CPU_FP16_TO_FP32(x0->d) * y1->d, + GGML_CPU_FP16_TO_FP32(x1->d) * y0->d, + GGML_CPU_FP16_TO_FP32(x1->d) * y1->d, }; vfsum = vmlaq_f32(vfsum, vcvtq_f32_s32(visum), superblock_scale); } @@ -2289,8 +2290,8 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi float sumf = 0; for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); @@ -2377,8 +2378,8 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); @@ -2478,9 +2479,9 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -2520,8 +2521,8 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); @@ -2630,9 +2631,9 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -2827,10 +2828,10 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi const int32x4_t vibias = vmulq_n_s32(vld1q_s32(bias), 32); const float32x4_t superblock_scale = { - GGML_FP16_TO_FP32(x0->d) * y0->d, - GGML_FP16_TO_FP32(x0->d) * y1->d, - GGML_FP16_TO_FP32(x1->d) * y0->d, - GGML_FP16_TO_FP32(x1->d) * y1->d, + GGML_CPU_FP16_TO_FP32(x0->d) * y0->d, + GGML_CPU_FP16_TO_FP32(x0->d) * y1->d, + GGML_CPU_FP16_TO_FP32(x1->d) * y0->d, + GGML_CPU_FP16_TO_FP32(x1->d) * y1->d, }; visum = vsubq_s32(visum, vibias); @@ -2858,7 +2859,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi svuint8_t q6h_1, q6h_2, q6h_3, q6h_4; for (int i = 0; i < nb; ++i) { - const float d_all = GGML_FP16_TO_FP32(x[i].d); + const float d_all = GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q6 = x[i].ql; const uint8_t * GGML_RESTRICT qh = x[i].qh; @@ -3011,7 +3012,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float d_all = GGML_FP16_TO_FP32(x[i].d); + const float d_all = GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q6 = x[i].ql; const uint8_t * GGML_RESTRICT qh = x[i].qh; @@ -3128,7 +3129,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -3199,7 +3200,7 @@ void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const float sumf = 0; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; float sumf1 = 0, sumf2 = 0; @@ -3234,7 +3235,7 @@ void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; int32_t bsum = 0; @@ -3284,7 +3285,7 @@ void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v float sumf = 0; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; const uint8x8_t scales8 = vld1_u8(x[i].scales); @@ -3329,7 +3330,7 @@ void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const uint8_t * GGML_RESTRICT sc = x[i].scales; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -3398,7 +3399,7 @@ void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo float sumf = 0; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; @@ -3458,7 +3459,7 @@ void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo float sumf = 0; for (int i = 0; i < nb; i++) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const int8_t * q8 = y[i].qs; const uint8_t * qs = x[i].qs; const uint8_t * qh = x[i].qh; @@ -3521,7 +3522,7 @@ void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const float sumf = 0; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -3557,7 +3558,7 @@ void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -3630,7 +3631,7 @@ void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo float sumf = 0; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; @@ -3691,7 +3692,7 @@ void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint8_t * GGML_RESTRICT signs = x[i].signs; @@ -3786,7 +3787,7 @@ void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo } - sumf += y[i].d * GGML_FP16_TO_FP32(x[i].d) * (sumi1 + sumi2 + IQ1S_DELTA * sumi3); + sumf += y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d) * (sumi1 + sumi2 + IQ1S_DELTA * sumi3); } *s = sumf; @@ -3817,7 +3818,7 @@ void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo qs += 4; } - sumf += GGML_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); + sumf += GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); } *s = sumf; @@ -3905,7 +3906,7 @@ void ggml_vec_dot_iq1_m_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo } - sumf += y[i].d * GGML_FP16_TO_FP32(scale.f16) * (vaddvq_s32(sumi1) + IQ1M_DELTA * vaddvq_s32(sumi2)); + sumf += y[i].d * GGML_CPU_FP16_TO_FP32(scale.f16) * (vaddvq_s32(sumi1) + IQ1M_DELTA * vaddvq_s32(sumi2)); } *s = sumf; @@ -3952,7 +3953,7 @@ void ggml_vec_dot_iq1_m_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo qh += 2; } - sumf += GGML_FP16_TO_FP32(scale.f16) * y[i].d * (sumi1 + IQ1M_DELTA * sumi2); + sumf += GGML_CPU_FP16_TO_FP32(scale.f16) * y[i].d * (sumi1 + IQ1M_DELTA * sumi2); } *s = sumf; @@ -4003,13 +4004,13 @@ void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const v prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]); sumf += - GGML_FP16_TO_FP32(x[ib+0].d) * GGML_FP16_TO_FP32(y[ib + 0].d) * vaddvq_s32(prod_1) + - GGML_FP16_TO_FP32(x[ib+1].d) * GGML_FP16_TO_FP32(y[ib + 1].d) * vaddvq_s32(prod_2); + GGML_CPU_FP16_TO_FP32(x[ib+0].d) * GGML_CPU_FP16_TO_FP32(y[ib + 0].d) * vaddvq_s32(prod_1) + + GGML_CPU_FP16_TO_FP32(x[ib+1].d) * GGML_CPU_FP16_TO_FP32(y[ib + 1].d) * vaddvq_s32(prod_2); } #endif for (; ib < nb; ++ib) { - const float d = GGML_FP16_TO_FP32(y[ib].d)*GGML_FP16_TO_FP32(x[ib].d); + const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d); int sumi1 = 0, sumi2 = 0; for (int j = 0; j < QK4_NL/2; ++j) { sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; @@ -4071,7 +4072,7 @@ void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v } - sumf += GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d * (sumi1 + sumi2); + sumf += GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d * (sumi1 + sumi2); } *s = sumf; @@ -4079,7 +4080,7 @@ void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v #else float sumf = 0; for (int ibl = 0; ibl < nb; ++ibl) { - const float d4d8 = GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d; + const float d4d8 = GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d; uint16_t h = x[ibl].scales_h; const uint8_t * qs = x[ibl].qs; const int8_t * q8 = y[ibl].qs; diff --git a/ggml/src/ggml-cpu/arch/arm/repack.cpp b/ggml/src/ggml-cpu/arch/arm/repack.cpp index 39a0dd301db08..2f8bc9e251735 100644 --- a/ggml/src/ggml-cpu/arch/arm/repack.cpp +++ b/ggml/src/ggml-cpu/arch/arm/repack.cpp @@ -6,6 +6,7 @@ #include "ggml-impl.h" #include "ggml-cpu.h" #include "ggml-cpu-impl.h" +#include "simd-mappings.h" #include "traits.h" #include @@ -51,7 +52,7 @@ void ggml_quantize_mat_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTR const float d = amax / ((1 << 7) - 1); id[row_iter] = d ? 1.0f / d : 0.0f; - y[i].d[row_iter] = GGML_FP32_TO_FP16(d); + y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d); } for (int j = 0; j < 8; j++) { @@ -102,7 +103,7 @@ void ggml_quantize_mat_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTR const float d = amax / ((1 << 7) - 1); id[row_iter] = d ? 1.0f / d : 0.0f; - y[i].d[row_iter] = GGML_FP32_TO_FP16(d); + y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d); } for (int j = 0; j < QK8_0 * 4; j++) { @@ -145,7 +146,7 @@ void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTR const float d = amax / ((1 << 7) - 1); id[row_iter] = d ? 1.0f / d : 0.0f; - y[i].d[row_iter] = GGML_FP32_TO_FP16(d); + y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d); } for (int j = 0; j < 4; j++) { @@ -221,7 +222,7 @@ void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTR const float d = amax / ((1 << 7) - 1); id[row_iter] = d ? 1.0f / d : 0.0f; - y[i].d[row_iter] = GGML_FP32_TO_FP16(d); + y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d); } for (int j = 0; j < QK8_0 * 4; j++) { @@ -311,7 +312,7 @@ void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); } } } @@ -399,7 +400,7 @@ void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); } } } @@ -514,7 +515,7 @@ void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); } } } @@ -608,7 +609,7 @@ void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4]; sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])); } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); } } } @@ -1117,7 +1118,7 @@ void ggml_gemm_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); } } } @@ -1570,7 +1571,7 @@ void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); } } } @@ -2039,7 +2040,7 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); } } } @@ -2147,7 +2148,7 @@ void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])); } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); } } } diff --git a/ggml/src/ggml-cpu/arch/loongarch/quants.c b/ggml/src/ggml-cpu/arch/loongarch/quants.c index f2ea965724a3d..9e33fb3228633 100644 --- a/ggml/src/ggml-cpu/arch/loongarch/quants.c +++ b/ggml/src/ggml-cpu/arch/loongarch/quants.c @@ -3,6 +3,7 @@ #include "ggml-quants.h" #include "ggml-impl.h" #include "ggml-cpu.h" +#include "simd-mappings.h" #include "../../quants.h" #include "../../ggml-cpu-impl.h" @@ -474,7 +475,7 @@ void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i // Quantize these floats const float d = max_scalar / 127.f; - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = GGML_CPU_FP32_TO_FP16(d); const float id = ( max_scalar != 0.0f ) ? 127.f / max_scalar : 0.0f; const __m256 mul = (__m256)__lasx_xvreplfr2vr_s( id ); @@ -548,7 +549,7 @@ void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i // Quantize these floats const float d = max_scalar / 127.f; - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = GGML_CPU_FP32_TO_FP16(d); const float id = ( max_scalar != 0.0f ) ? 127.f / max_scalar : 0.0f; const __m256 mul = __lasx_xvreplfr2vr_s( id ); @@ -576,7 +577,7 @@ void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i // Compute the sum of the quants and set y[i].s const __m128i s0 = __lsx_vadd_w(__lsx_vadd_w(ni0, ni1), __lsx_vadd_w(ni2, ni3)); const __m128i s1 = __lsx_vadd_w(__lsx_vadd_w(ni4, ni5), __lsx_vadd_w(ni6, ni7)); - y[i].s = GGML_FP32_TO_FP16(d * hsum_i32_4(__lsx_vadd_w(s0, s1))); + y[i].s = GGML_CPU_FP32_TO_FP16(d * hsum_i32_4(__lsx_vadd_w(s0, s1))); // Convert int32 to int16 ni0 = lsx_packs_w( ni0, ni1 ); @@ -667,7 +668,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi // Main loop for (; ib < nb; ++ib) { /* Compute combined scale for the block */ - const __m256 d = __lasx_xvreplfr2vr_s( GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d) ); + const __m256 d = __lasx_xvreplfr2vr_s( GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d) ); __m256i qx = bytes_from_nibbles_32(x[ib].qs); @@ -699,7 +700,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi for (; ib + 1 < nb; ib += 2) { // Compute combined scale for the block 0 and 1 - const __m128 d_0_1 = (__m128)__lsx_vreplgr2vr_w( GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d) ); + const __m128 d_0_1 = (__m128)__lsx_vreplgr2vr_w( GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d) ); const __m128i tmp_0_1 = __lsx_vld((const __m128i *)x[ib].qs, 0); @@ -717,7 +718,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi //_mm_prefetch(&y[ib] + 2 * sizeof(block_q8_0), _MM_HINT_T0); // Compute combined scale for the block 2 and 3 - const __m128 d_2_3 = (__m128)__lsx_vreplgr2vr_w( GGML_FP16_TO_FP32(x[ib + 1].d) * GGML_FP16_TO_FP32(y[ib + 1].d) ); + const __m128 d_2_3 = (__m128)__lsx_vreplgr2vr_w( GGML_CPU_FP16_TO_FP32(x[ib + 1].d) * GGML_CPU_FP16_TO_FP32(y[ib + 1].d) ); const __m128i tmp_2_3 = __lsx_vld((const __m128i *)x[ib + 1].qs, 0); @@ -766,7 +767,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); + sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); } *s = sumf; @@ -797,10 +798,10 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi // Main loop for (; ib < nb; ++ib) { - const float d0 = GGML_FP16_TO_FP32(x[ib].d); - const float d1 = GGML_FP16_TO_FP32(y[ib].d); + const float d0 = GGML_CPU_FP16_TO_FP32(x[ib].d); + const float d1 = GGML_CPU_FP16_TO_FP32(y[ib].d); - summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); + summs += GGML_CPU_FP16_TO_FP32(x[ib].m) * GGML_CPU_FP16_TO_FP32(y[ib].s); const __m256 d0v = __lasx_xvreplfr2vr_s( d0 ); const __m256 d1v = __lasx_xvreplfr2vr_s( d1 ); @@ -834,7 +835,7 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; @@ -865,7 +866,7 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi // Main loop for (; ib < nb; ++ib) { /* Compute combined scale for the block */ - const __m256 d = __lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); //FIXME + const __m256 d = __lasx_xvreplfr2vr_s(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); //FIXME __m256i qx = bytes_from_nibbles_32(x[ib].qs); __m256i bxhi = bytes_from_bits_32(x[ib].qh); @@ -902,7 +903,7 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)) * sumi; + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi; } *s = sumf; @@ -934,16 +935,16 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi // Main loop for (; ib < nb; ++ib) { - const __m256 dx = __lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(x[ib].d)); + const __m256 dx = __lasx_xvreplfr2vr_s(GGML_CPU_FP16_TO_FP32(x[ib].d)); - summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); + summs += GGML_CPU_FP16_TO_FP32(x[ib].m) * GGML_CPU_FP16_TO_FP32(y[ib].s); __m256i qx = bytes_from_nibbles_32(x[ib].qs); __m256i bxhi = bytes_from_bits_32(x[ib].qh); bxhi = __lasx_xvand_v(bxhi, __lasx_xvreplgr2vr_b(0x10)); qx = __lasx_xvor_v(qx, bxhi); - const __m256 dy = __lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(y[ib].d)); + const __m256 dy = __lasx_xvreplfr2vr_s(GGML_CPU_FP16_TO_FP32(y[ib].d)); const __m256i qy = __lasx_xvld((const __m256i *)y[ib].qs, 0); const __m256 q = mul_sum_us8_pairs_float(qx, qy); @@ -973,7 +974,7 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; @@ -1003,7 +1004,7 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi // Main loop for (; ib < nb; ++ib) { // Compute combined scale for the block - const __m256 d = __lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); + const __m256 d = __lasx_xvreplfr2vr_s(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); __m256i qx = __lasx_xvld((const __m256i *)x[ib].qs, 0); __m256i qy = __lasx_xvld((const __m256i *)y[ib].qs, 0); @@ -1023,7 +1024,7 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi sumi += x[ib].qs[j]*y[ib].qs[j]; } - sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); + sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)); } *s = sumf; @@ -1047,8 +1048,8 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const uint8_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -1116,8 +1117,8 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi summs += y[i].bsums[j] * (sc[j] >> 4); } - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); int isum = 0; int is = 0; @@ -1170,7 +1171,7 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q3 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; // Set up scales @@ -1294,7 +1295,7 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -1330,8 +1331,8 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); @@ -1438,9 +1439,9 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -1477,8 +1478,8 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi const uint8_t * GGML_RESTRICT q5 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); @@ -1593,9 +1594,9 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -1624,7 +1625,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q4 = x[i].ql; const uint8_t * GGML_RESTRICT qh = x[i].qh; @@ -1713,7 +1714,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -1780,7 +1781,7 @@ void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const __m256 accumf = (__m256)__lasx_xvldi(0); for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; __m256i sumi1 = __lasx_xvldi(0); @@ -1820,7 +1821,7 @@ void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; int32_t bsum = 0; @@ -1895,7 +1896,7 @@ void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v __m256 accumf = (__m256)__lasx_xvldi(0); for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -1980,7 +1981,7 @@ void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const uint8_t * GGML_RESTRICT sc = x[i].scales; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -2049,7 +2050,7 @@ void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo __m256 accumf = (__m256)__lasx_xvldi(0); for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); @@ -2108,7 +2109,7 @@ void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo float sumf = 0; for (int i = 0; i < nb; i++) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const int8_t * q8 = y[i].qs; const uint8_t * qs = x[i].qs; const uint8_t * qh = x[i].qh; @@ -2168,7 +2169,7 @@ void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const __m256 accumf = (__m256)__lasx_xvldi(0); for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -2213,7 +2214,7 @@ void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -2279,7 +2280,7 @@ void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo __m256 accumf = (__m256)__lasx_xvldi(0); for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; @@ -2340,7 +2341,7 @@ void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint8_t * GGML_RESTRICT signs = x[i].signs; @@ -2451,7 +2452,7 @@ void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2; } - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); accum = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), accum); accum1 += d * sumi1; } @@ -2484,7 +2485,7 @@ void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo qs += 4; } - sumf += GGML_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); + sumf += GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); } *s = sumf; @@ -2530,9 +2531,9 @@ void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const v const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); const __m256i p_1 = lasx_madd_h(p16_1, mone); const __m256i p_2 = lasx_madd_h(p16_2, mone); - accum1 = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(y[ib + 0].d)*GGML_FP16_TO_FP32(x[ib + 0].d)), + accum1 = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(GGML_CPU_FP16_TO_FP32(y[ib + 0].d)*GGML_CPU_FP16_TO_FP32(x[ib + 0].d)), __lasx_xvffint_s_w(p_1), accum1); - accum2 = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(y[ib + 1].d)*GGML_FP16_TO_FP32(x[ib + 1].d)), + accum2 = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(GGML_CPU_FP16_TO_FP32(y[ib + 1].d)*GGML_CPU_FP16_TO_FP32(x[ib + 1].d)), __lasx_xvffint_s_w(p_2), accum2); } @@ -2540,7 +2541,7 @@ void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const v #endif for (; ib < nb; ++ib) { - const float d = GGML_FP16_TO_FP32(y[ib].d)*GGML_FP16_TO_FP32(x[ib].d); + const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d); int sumi1 = 0, sumi2 = 0; for (int j = 0; j < QK4_NL/2; ++j) { sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; @@ -2595,7 +2596,7 @@ void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v sumi1 = __lasx_xvadd_w(p_1, sumi1); sumi2 = __lasx_xvadd_w(p_2, sumi2); } - accum = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(x[ibl].d)*y[ibl].d), + accum = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(GGML_CPU_FP16_TO_FP32(x[ibl].d)*y[ibl].d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accum); } @@ -2604,7 +2605,7 @@ void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v #else float sumf = 0; for (int ibl = 0; ibl < nb; ++ibl) { - const float d4d8 = GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d; + const float d4d8 = GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d; uint16_t h = x[ibl].scales_h; const uint8_t * qs = x[ibl].qs; const int8_t * q8 = y[ibl].qs; diff --git a/ggml/src/ggml-cpu/arch/powerpc/quants.c b/ggml/src/ggml-cpu/arch/powerpc/quants.c index ce4e47a863994..053d5cbdc7bd8 100644 --- a/ggml/src/ggml-cpu/arch/powerpc/quants.c +++ b/ggml/src/ggml-cpu/arch/powerpc/quants.c @@ -3,6 +3,7 @@ #include "ggml-quants.h" #include "ggml-impl.h" #include "ggml-cpu.h" +#include "simd-mappings.h" #include "../../quants.h" #include "../../ggml-cpu-impl.h" @@ -67,7 +68,7 @@ void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i const float id = d ? 1.0f/d : 0.0f; const vector float vid = vec_splats(id); - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = GGML_CPU_FP32_TO_FP16(d); for (int j = 0; j < 8; j++) { const vector float v = vec_round(vec_mul(srcv[j], vid)); @@ -112,7 +113,7 @@ void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i const float id = d ? 1.0f/d : 0.0f; const vector float vid = vec_splats(id); - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = GGML_CPU_FP32_TO_FP16(d); vector int accv = vec_splats(0); @@ -127,7 +128,7 @@ void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i accv = vec_add(accv, vec_sld(accv, accv, 4)); accv = vec_add(accv, vec_sld(accv, accv, 8)); - y[i].s = GGML_FP32_TO_FP16(d * vec_extract(accv, 0)); + y[i].s = GGML_CPU_FP32_TO_FP16(d * vec_extract(accv, 0)); } #else @@ -170,8 +171,8 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); - vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); + vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d)); + vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d)); vector float vd = vec_mul(vxd, vyd); vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); @@ -214,7 +215,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); + sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); } *s = sumf; @@ -249,12 +250,12 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); - vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); + vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d)); + vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d)); vector float vd = vec_mul(vxd, vyd); - vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[ib].m)); - vector float vys = {GGML_FP16_TO_FP32(y[ib].s), 0.0f, 0.0f, 0.0f}; + vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].m)); + vector float vys = {GGML_CPU_FP16_TO_FP32(y[ib].s), 0.0f, 0.0f, 0.0f}; vsumf0 = vec_madd(vxmin, vys, vsumf0); vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); @@ -291,7 +292,7 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; @@ -326,8 +327,8 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); - vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); + vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d)); + vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d)); vector float vd = vec_mul(vxd, vyd); vector signed long long aux64x2_0 = {(uint64_t)(table_b2b_1[x[ib].qh[0]]), (uint64_t)(table_b2b_1[x[ib].qh[1]])}; @@ -379,7 +380,7 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)) * sumi; + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi; } *s = sumf; @@ -415,12 +416,12 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); - vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); + vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d)); + vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d)); vector float vd = vec_mul(vxd, vyd); - vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[ib].m)); - vector float vys = {GGML_FP16_TO_FP32(y[ib].s), 0.f, 0.f, 0.f}; + vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].m)); + vector float vys = {GGML_CPU_FP16_TO_FP32(y[ib].s), 0.f, 0.f, 0.f}; vsumf0 = vec_madd(vxmin, vys, vsumf0); vector unsigned long long aux64x2_0 = {(uint64_t)(table_b2b_0[x[ib].qh[0]]), (uint64_t)(table_b2b_0[x[ib].qh[1]])}; @@ -470,7 +471,7 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; @@ -502,8 +503,8 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); - vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); + vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d)); + vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d)); vector float vd = vec_mul(vxd, vyd); vector signed char q8x0 = vec_xl( 0, x[ib].qs); @@ -542,7 +543,7 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi sumi += x[ib].qs[j]*y[ib].qs[j]; } - sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); + sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)); } *s = sumf; @@ -574,11 +575,11 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); - vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); + vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].dmin)); vector float vdmin = vec_mul(vxmin, vyd); vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); @@ -708,8 +709,8 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi summs += y[i].bsums[j] * (sc[j] >> 4); } - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); int isum = 0; int is = 0; @@ -770,7 +771,7 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); @@ -962,7 +963,7 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -1005,11 +1006,11 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); - vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); + vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].dmin)); vector float vdmin = vec_mul(vxmin, vyd); vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); @@ -1177,9 +1178,9 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -1222,11 +1223,11 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); - vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); + vector float vxmin = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].dmin)); vector float vdmin = vec_mul(vxmin, vyd); UNUSED(kmask1); @@ -1394,9 +1395,9 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -1432,7 +1433,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); @@ -1591,7 +1592,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -1659,7 +1660,7 @@ void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); @@ -1742,7 +1743,7 @@ void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; int32_t bsum = 0; @@ -1790,7 +1791,7 @@ void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); @@ -1871,7 +1872,7 @@ void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const uint8_t * GGML_RESTRICT sc = x[i].scales; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -1939,7 +1940,7 @@ void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2); for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); @@ -2033,7 +2034,7 @@ void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo float sumf = 0; for (int i = 0; i < nb; i++) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const int8_t * q8 = y[i].qs; const uint8_t * qs = x[i].qs; const uint8_t * qh = x[i].qh; @@ -2096,7 +2097,7 @@ void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); @@ -2176,7 +2177,7 @@ void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -2236,7 +2237,7 @@ void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2); for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); @@ -2329,7 +2330,7 @@ void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint8_t * GGML_RESTRICT signs = x[i].signs; @@ -2394,7 +2395,7 @@ void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo vector float vsumf3 = vec_splats(0.0f); for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[i].d)); vector float vyd = vec_splats(y[i].d); vector float vd = vec_mul(vxd, vyd); @@ -2505,7 +2506,7 @@ void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo qs += 4; } - sumf += GGML_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); + sumf += GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); } *s = sumf; @@ -2546,8 +2547,8 @@ void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const v __builtin_prefetch(y[ib].qs, 0, 1); - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); - vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); + vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d)); + vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d)); vector float vd = vec_mul(vxd, vyd); vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); @@ -2582,7 +2583,7 @@ void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const v #endif for (; ib < nb; ++ib) { - const float d = GGML_FP16_TO_FP32(y[ib].d)*GGML_FP16_TO_FP32(x[ib].d); + const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d); int sumi1 = 0, sumi2 = 0; for (int j = 0; j < QK4_NL/2; ++j) { sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; @@ -2620,7 +2621,7 @@ void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v for (int ibl = 0; ibl < nb; ++ibl) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ibl].d)); + vector float vxd = vec_splats(GGML_CPU_FP16_TO_FP32(x[ibl].d)); vector float vyd = vec_splats(y[ibl].d); vector float vd = vec_mul(vxd, vyd); @@ -2697,7 +2698,7 @@ void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v #else float sumf = 0; for (int ibl = 0; ibl < nb; ++ibl) { - const float d4d8 = GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d; + const float d4d8 = GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d; uint16_t h = x[ibl].scales_h; const uint8_t * qs = x[ibl].qs; const int8_t * q8 = y[ibl].qs; diff --git a/ggml/src/ggml-cpu/arch/riscv/quants.c b/ggml/src/ggml-cpu/arch/riscv/quants.c index 6f3aa94fbbe98..8b64d8adc48f4 100644 --- a/ggml/src/ggml-cpu/arch/riscv/quants.c +++ b/ggml/src/ggml-cpu/arch/riscv/quants.c @@ -3,6 +3,7 @@ #include "ggml-quants.h" #include "ggml-impl.h" #include "ggml-cpu.h" +#include "simd-mappings.h" #include "../../quants.h" #include "../../ggml-cpu-impl.h" @@ -45,7 +46,7 @@ void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = GGML_CPU_FP32_TO_FP16(d); vfloat32m8_t x0 = __riscv_vfmul_vf_f32m8(v_x, id, vl); @@ -85,7 +86,7 @@ void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = GGML_CPU_FP32_TO_FP16(d); vfloat32m8_t x0 = __riscv_vfmul_vf_f32m8(v_x, id, vl); @@ -102,7 +103,7 @@ void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i // set y[i].s int sum = __riscv_vmv_x_s_i16m1_i16(vwrs); - y[i].s = GGML_FP32_TO_FP16(sum*d); + y[i].s = GGML_CPU_FP32_TO_FP16(sum*d); } #else @@ -160,7 +161,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); - sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); + sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); } #endif @@ -177,7 +178,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); + sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); } *s = sumf; @@ -225,7 +226,7 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } #endif @@ -242,7 +243,7 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; @@ -293,7 +294,7 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi vint32m1_t sum = __riscv_vwredsum_vs_i16m4_i32m1(mul, zero, vl); int32_t sumi = __riscv_vmv_x_s_i32m1_i32(sum); - sumf += (GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)) * sumi; + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi; } #endif @@ -316,7 +317,7 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)) * sumi; + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi; } *s = sumf; @@ -366,7 +367,7 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi vint32m1_t sum = __riscv_vwredsum_vs_i16m4_i32m1(mul, zero, vl); int32_t sumi = __riscv_vmv_x_s_i32m1_i32(sum); - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } #endif @@ -389,7 +390,7 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; @@ -427,7 +428,7 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum); - sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); + sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)); } #endif @@ -438,7 +439,7 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi sumi += x[ib].qs[j]*y[ib].qs[j]; } - sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); + sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)); } *s = sumf; @@ -465,8 +466,8 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi const uint8_t * q2 = x[i].qs; const int8_t * q8 = y[i].qs; const uint8_t * sc = x[i].scales; - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); uint8_t *patmp = atmp; int vsums; int tmp; @@ -569,8 +570,8 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi const int8_t * q8 = y[i].qs; const uint8_t * sc = x[i].scales; - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); size_t vl = 16; @@ -644,8 +645,8 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi const uint8_t * q2 = x[i].qs; const int8_t * q8 = y[i].qs; const uint8_t * sc = x[i].scales; - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); uint8_t *patmp = atmp; int vsums; int tmp; @@ -750,8 +751,8 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi summs += y[i].bsums[j] * (sc[j] >> 4); } - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); int isum = 0; int is = 0; @@ -916,7 +917,7 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi q3 += 32; q8 += 128; scale += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; sumf += d * isum; } @@ -1017,7 +1018,7 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; sumf += d*sum_t; @@ -1134,7 +1135,7 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi q3 += 32; q8 += 128; scale += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; sumf += d * isum; } break; @@ -1202,7 +1203,7 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -1239,8 +1240,8 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi float sumf = 0; for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); int tmp, tmp2, sumi; __asm__ __volatile__( @@ -1361,8 +1362,8 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi size_t vl = 8; - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl); vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl); @@ -1422,8 +1423,8 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi break; case 128: for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); int tmp, tmp2, sumi; __asm__ __volatile__( @@ -1580,9 +1581,9 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -1627,8 +1628,8 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi const uint8_t * GGML_RESTRICT hm = x[i].qh; const int8_t * GGML_RESTRICT q8 = y[i].qs; - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; vint16m1_t q8sums_0 = __riscv_vlse16_v_i16m1(y[i].bsums, 4, vl); vint16m1_t q8sums_1 = __riscv_vlse16_v_i16m1(y[i].bsums+1, 4, vl); @@ -1749,9 +1750,9 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -1778,7 +1779,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * restrict q6 = x[i].ql; const uint8_t * restrict qh = x[i].qh; @@ -1862,7 +1863,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi case 256: for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT q6 = x[i].ql; const uint8_t * GGML_RESTRICT qh = x[i].qh; @@ -1943,7 +1944,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi case 128: for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * restrict q6 = x[i].ql; const uint8_t * restrict qh = x[i].qh; @@ -2058,7 +2059,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; diff --git a/ggml/src/ggml-cpu/arch/riscv/repack.cpp b/ggml/src/ggml-cpu/arch/riscv/repack.cpp index 0882b41024362..45c91a694820a 100644 --- a/ggml/src/ggml-cpu/arch/riscv/repack.cpp +++ b/ggml/src/ggml-cpu/arch/riscv/repack.cpp @@ -6,6 +6,7 @@ #include "ggml-impl.h" #include "ggml-cpu.h" #include "ggml-cpu-impl.h" +#include "simd-mappings.h" #include "traits.h" #include @@ -90,16 +91,16 @@ void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); // vector version needs Zvfhmin extension - const float a_scale = GGML_FP16_TO_FP32(a_ptr[l].d); + const float a_scale = GGML_CPU_FP16_TO_FP32(a_ptr[l].d); const float b_scales[8] = { - GGML_FP16_TO_FP32(b_ptr[l].d[0]), - GGML_FP16_TO_FP32(b_ptr[l].d[1]), - GGML_FP16_TO_FP32(b_ptr[l].d[2]), - GGML_FP16_TO_FP32(b_ptr[l].d[3]), - GGML_FP16_TO_FP32(b_ptr[l].d[4]), - GGML_FP16_TO_FP32(b_ptr[l].d[5]), - GGML_FP16_TO_FP32(b_ptr[l].d[6]), - GGML_FP16_TO_FP32(b_ptr[l].d[7]) + GGML_CPU_FP16_TO_FP32(b_ptr[l].d[0]), + GGML_CPU_FP16_TO_FP32(b_ptr[l].d[1]), + GGML_CPU_FP16_TO_FP32(b_ptr[l].d[2]), + GGML_CPU_FP16_TO_FP32(b_ptr[l].d[3]), + GGML_CPU_FP16_TO_FP32(b_ptr[l].d[4]), + GGML_CPU_FP16_TO_FP32(b_ptr[l].d[5]), + GGML_CPU_FP16_TO_FP32(b_ptr[l].d[6]), + GGML_CPU_FP16_TO_FP32(b_ptr[l].d[7]) }; const vfloat32m1_t b_scales_vec = __riscv_vle32_v_f32m1(b_scales, vl / 4); const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scale, vl / 4); @@ -129,7 +130,7 @@ void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); } } } @@ -181,20 +182,20 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo // vector version needs Zvfhmin extension const float a_scales[4] = { - GGML_FP16_TO_FP32(a_ptr[l].d[0]), - GGML_FP16_TO_FP32(a_ptr[l].d[1]), - GGML_FP16_TO_FP32(a_ptr[l].d[2]), - GGML_FP16_TO_FP32(a_ptr[l].d[3]) + GGML_CPU_FP16_TO_FP32(a_ptr[l].d[0]), + GGML_CPU_FP16_TO_FP32(a_ptr[l].d[1]), + GGML_CPU_FP16_TO_FP32(a_ptr[l].d[2]), + GGML_CPU_FP16_TO_FP32(a_ptr[l].d[3]) }; const float b_scales[8] = { - GGML_FP16_TO_FP32(b_ptr[l].d[0]), - GGML_FP16_TO_FP32(b_ptr[l].d[1]), - GGML_FP16_TO_FP32(b_ptr[l].d[2]), - GGML_FP16_TO_FP32(b_ptr[l].d[3]), - GGML_FP16_TO_FP32(b_ptr[l].d[4]), - GGML_FP16_TO_FP32(b_ptr[l].d[5]), - GGML_FP16_TO_FP32(b_ptr[l].d[6]), - GGML_FP16_TO_FP32(b_ptr[l].d[7]) + GGML_CPU_FP16_TO_FP32(b_ptr[l].d[0]), + GGML_CPU_FP16_TO_FP32(b_ptr[l].d[1]), + GGML_CPU_FP16_TO_FP32(b_ptr[l].d[2]), + GGML_CPU_FP16_TO_FP32(b_ptr[l].d[3]), + GGML_CPU_FP16_TO_FP32(b_ptr[l].d[4]), + GGML_CPU_FP16_TO_FP32(b_ptr[l].d[5]), + GGML_CPU_FP16_TO_FP32(b_ptr[l].d[6]), + GGML_CPU_FP16_TO_FP32(b_ptr[l].d[7]) }; const vfloat32m1_t b_scales_vec = __riscv_vle32_v_f32m1(b_scales, vl / 4); @@ -382,7 +383,7 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); } } } diff --git a/ggml/src/ggml-cpu/arch/s390/quants.c b/ggml/src/ggml-cpu/arch/s390/quants.c index 26bd908757114..a840219a4fc08 100644 --- a/ggml/src/ggml-cpu/arch/s390/quants.c +++ b/ggml/src/ggml-cpu/arch/s390/quants.c @@ -3,6 +3,7 @@ #include "ggml-quants.h" #include "ggml-impl.h" #include "ggml-cpu.h" +#include "simd-mappings.h" #include "../../quants.h" #include "../../ggml-cpu-impl.h" @@ -49,7 +50,7 @@ void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f / d : 0.0f; - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = GGML_CPU_FP32_TO_FP16(d); for (int j = 0; j < 8; j++) { const __vector float v = vec_mul(srcv[j], vec_splats(id)); @@ -94,7 +95,7 @@ void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f / d : 0.0f; - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = GGML_CPU_FP32_TO_FP16(d); __vector int32_t acc = vec_splats(0); @@ -110,7 +111,7 @@ void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i acc = vec_add(acc, vi); } - y[i].s = GGML_FP32_TO_FP16(d * (acc[0] + acc[1] + acc[2] + acc[3])); + y[i].s = GGML_CPU_FP32_TO_FP16(d * (acc[0] + acc[1] + acc[2] + acc[3])); } #else GGML_UNUSED(nb); @@ -164,7 +165,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi __vector int16_t v_xy_ = v_xylso + v_xylse + v_xyhso + v_xyhse; v_xy_ += vec_reve(v_xy_); const __vector float v_xy = vec_float(vec_unpackh(v_xy_)); - const __vector float v_d = vec_splats(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); + const __vector float v_d = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); acc = vec_madd(v_xy, v_d, acc); } @@ -185,7 +186,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); + sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); } *s = sumf; @@ -219,7 +220,7 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi __builtin_prefetch(x[ib].qs, 0, 1); __builtin_prefetch(y[ib].qs, 0, 1); - summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); + summs += GGML_CPU_FP16_TO_FP32(x[ib].m) * GGML_CPU_FP16_TO_FP32(y[ib].s); const uint8x16_t v_x = vec_xl(0, x[ib].qs); const int8x16_t v_xl = (const int8x16_t)(v_x & v_m); @@ -231,7 +232,7 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi const int32x4_t v_xy_ = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh); const float32x4_t v_xy = vec_float(v_xy_); - const float32x4_t v_d = vec_splats(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); + const float32x4_t v_d = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); acc = vec_madd(v_xy, v_d, acc); } @@ -252,7 +253,7 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; @@ -290,7 +291,7 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi const int32x4_t v_xy_ = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh); const float32x4_t v_xy = vec_float(v_xy_); - const float32x4_t v_d = vec_splats(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); + const float32x4_t v_d = vec_splats(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); acc = vec_madd(v_xy, v_d, acc); } @@ -305,7 +306,7 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi sumi += x[ib].qs[j]*y[ib].qs[j]; } - sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); + sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)); } *s = sumf; @@ -348,7 +349,7 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi float sum = 0; for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * restrict x0l = x[i].qs; const uint8_t * restrict x0h = x[i].hmask; @@ -497,7 +498,7 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -537,8 +538,8 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi float sumf = 0; for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const int16x8_t v_ysumsl = vec_xl(0 , y[i].bsums); const int16x8_t v_ysumsh = vec_xl(16, y[i].bsums); @@ -647,9 +648,9 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -698,8 +699,8 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi float sumf = 0; for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const int16x8_t v_ysumsl = vec_xl(0 , y[i].bsums); const int16x8_t v_ysumsh = vec_xl(16, y[i].bsums); @@ -819,9 +820,9 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -859,7 +860,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi int8x16_t v_y[4]; for (int i = 0; i < nb; ++i) { - const float d_all = GGML_FP16_TO_FP32(x[i].d); + const float d_all = GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT x0l = x[i].ql; const uint8_t * GGML_RESTRICT x0h = x[i].qh; @@ -1004,7 +1005,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -1071,7 +1072,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi // float sumf = 0; // for (int i = 0; i < nb; ++i) { -// const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; +// const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; // const uint16_t * GGML_RESTRICT q2 = x[i].qs; // const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -1121,7 +1122,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi // float sumf = 0.f; // for (int i = 0; i < nb; ++i) { -// const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; +// const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; // const uint16_t * GGML_RESTRICT q2 = x[i].qs; // const int8_t * GGML_RESTRICT q8 = y[i].qs; // int32_t bsum = 0; @@ -1182,12 +1183,12 @@ void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const v const int8x16_t v_yh = vec_xl(QK8_0/2, y0->qs); const int32x4_t v_xy = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh); - sumf += GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d) * (v_xy[0] + v_xy[1] + v_xy[2] + v_xy[3]); + sumf += GGML_CPU_FP16_TO_FP32(x0->d) * GGML_CPU_FP16_TO_FP32(y0->d) * (v_xy[0] + v_xy[1] + v_xy[2] + v_xy[3]); } #endif for (; ib < nb; ++ib) { - const float d = GGML_FP16_TO_FP32(y[ib].d)*GGML_FP16_TO_FP32(x[ib].d); + const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d); int sumi1 = 0, sumi2 = 0; for (int j = 0; j < QK4_NL/2; ++j) { sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; @@ -1257,7 +1258,7 @@ void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v sumi2 += (vsumi1[0] + vsumi1[1] + vsumi1[2] + vsumi1[3]) * ls2; } - sumf += GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d * (sumi1 + sumi2); + sumf += GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d * (sumi1 + sumi2); } *s = sumf; @@ -1265,7 +1266,7 @@ void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v #else float sumf = 0; for (int ibl = 0; ibl < nb; ++ibl) { - const float d4d8 = GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d; + const float d4d8 = GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d; uint16_t h = x[ibl].scales_h; const uint8_t * qs = x[ibl].qs; const int8_t * q8 = y[ibl].qs; diff --git a/ggml/src/ggml-cpu/arch/wasm/quants.c b/ggml/src/ggml-cpu/arch/wasm/quants.c index 4ec97f533f1e4..b0904d8a3ab5e 100644 --- a/ggml/src/ggml-cpu/arch/wasm/quants.c +++ b/ggml/src/ggml-cpu/arch/wasm/quants.c @@ -3,6 +3,7 @@ #include "ggml-quants.h" #include "ggml-impl.h" #include "ggml-cpu.h" +#include "simd-mappings.h" #include "../../quants.h" #include "../../ggml-cpu-impl.h" @@ -65,7 +66,7 @@ void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = GGML_CPU_FP32_TO_FP16(d); for (int j = 0; j < 8; j++) { const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id)); @@ -110,7 +111,7 @@ void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = GGML_CPU_FP32_TO_FP16(d); v128_t accv = wasm_i32x4_splat(0); @@ -126,7 +127,7 @@ void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i accv = wasm_i32x4_add(accv, vi); } - y[i].s = GGML_FP32_TO_FP16( + y[i].s = GGML_CPU_FP32_TO_FP16( d * (wasm_i32x4_extract_lane(accv, 0) + wasm_i32x4_extract_lane(accv, 1) + wasm_i32x4_extract_lane(accv, 2) + @@ -324,8 +325,8 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi ); // Accumulate results with scaling - float scale0 = GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d); - float scale1 = GGML_FP16_TO_FP32(x1->d) * GGML_FP16_TO_FP32(y1->d); + float scale0 = GGML_CPU_FP16_TO_FP32(x0->d) * GGML_CPU_FP16_TO_FP32(y0->d); + float scale1 = GGML_CPU_FP16_TO_FP32(x1->d) * GGML_CPU_FP16_TO_FP32(y1->d); sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(dp0), wasm_f32x4_splat(scale0))); sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(dp1), wasm_f32x4_splat(scale1))); @@ -348,7 +349,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); + sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); } *s = sumf; @@ -428,7 +429,7 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi wasm_i32x4_dot_i16x8(v0lfh, v1lh)), wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), - wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d)))); + wasm_f32x4_splat(GGML_CPU_FP16_TO_FP32(x0->d) * GGML_CPU_FP16_TO_FP32(y0->d)))); } sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + @@ -454,7 +455,7 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)) * sumi; + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi; } *s = sumf; @@ -491,7 +492,7 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi const block_q5_1 * GGML_RESTRICT x0 = &x[ib]; const block_q8_1 * GGML_RESTRICT y0 = &y[ib]; - summs += GGML_FP16_TO_FP32(x0->m) * GGML_FP16_TO_FP32(y0->s); + summs += GGML_CPU_FP16_TO_FP32(x0->m) * GGML_CPU_FP16_TO_FP32(y0->s); const v128_t m4b = wasm_i8x16_splat(0x0F); @@ -538,7 +539,7 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi wasm_i32x4_dot_i16x8(v0lfh, v1lh)), wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), - wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d)))); + wasm_f32x4_splat(GGML_CPU_FP16_TO_FP32(x0->d) * GGML_CPU_FP16_TO_FP32(y0->d)))); } sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + @@ -564,7 +565,7 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; @@ -620,7 +621,7 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi const v128_t sum_dots = wasm_i32x4_add(wasm_i32x4_add(dx0_0, dx0_1), wasm_i32x4_add(dx1_0, dx1_1)); // Convert to float and accumulate - const float scale = GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d); + const float scale = GGML_CPU_FP16_TO_FP32(x0->d) * GGML_CPU_FP16_TO_FP32(y0->d); sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(sum_dots), wasm_f32x4_splat(scale))); } @@ -635,7 +636,7 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi sumi += x[ib].qs[j]*y[ib].qs[j]; } - sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); + sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)); } *s = sumf; @@ -746,8 +747,8 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi isum += wasm_i32x4_extract_lane(isum_vec, 0); } - const float dall = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + const float dall = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf += dall * isum - dmin * summs; } @@ -768,8 +769,8 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi summs += y[i].bsums[j] * (sc[j] >> 4); } - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); int isum = 0; int is = 0; @@ -880,7 +881,7 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi } // Accumulate results - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const v128_t v_d = wasm_f32x4_splat(d); v128_t v_sum = wasm_f32x4_add( wasm_f32x4_mul(wasm_f32x4_convert_i32x4(v_acc0), v_d), @@ -957,7 +958,7 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -991,8 +992,8 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi float sumf = 0; for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); // Corrected sign + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); // Corrected sign const uint8_t * GGML_RESTRICT q4 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -1136,9 +1137,9 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -1170,8 +1171,8 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi float sumf = 0; for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); // Fixed sign + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); // Fixed sign const uint8_t * GGML_RESTRICT q5 = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; @@ -1331,9 +1332,9 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -1420,7 +1421,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi wasm_v128_store(&aux32[0], acc0); wasm_v128_store(&aux32[4], acc1); - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) { sums[l] += d * aux32[l]; } @@ -1470,7 +1471,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; diff --git a/ggml/src/ggml-cpu/arch/x86/quants.c b/ggml/src/ggml-cpu/arch/x86/quants.c index e3f722b52c9b2..e7527c00a8f17 100644 --- a/ggml/src/ggml-cpu/arch/x86/quants.c +++ b/ggml/src/ggml-cpu/arch/x86/quants.c @@ -3,6 +3,7 @@ #include "ggml-quants.h" #include "ggml-impl.h" #include "ggml-cpu.h" +#include "simd-mappings.h" #include "../../quants.h" #include "../../ggml-cpu-impl.h" @@ -256,9 +257,9 @@ static inline __m256 mul_sum_i8_quad_float(const __m128i x_1_0, const __m128i x_ // quad fp16 delta calculation static inline __m256 quad_fp16_delta_float(const float x0, const float y0, const float x1, const float y1) { - // GGML_FP16_TO_FP32 is faster than Intel F16C - return _mm256_set_m128(_mm_set1_ps(GGML_FP16_TO_FP32(x1) * GGML_FP16_TO_FP32(y1)), - _mm_set1_ps(GGML_FP16_TO_FP32(x0) * GGML_FP16_TO_FP32(y0))); + // GGML_CPU_FP16_TO_FP32 is faster than Intel F16C + return _mm256_set_m128(_mm_set1_ps(GGML_CPU_FP16_TO_FP32(x1) * GGML_CPU_FP16_TO_FP32(y1)), + _mm_set1_ps(GGML_CPU_FP16_TO_FP32(x0) * GGML_CPU_FP16_TO_FP32(y0))); } #endif #elif defined(__SSSE3__) @@ -305,7 +306,7 @@ void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i // Quantize these floats const float d = maxScalar / 127.f; - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = GGML_CPU_FP32_TO_FP16(d); const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; const __m256 mul = _mm256_set1_ps( id ); @@ -401,7 +402,7 @@ void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i // Quantize these floats const float d = max_scalar / 127.f; - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = GGML_CPU_FP32_TO_FP16(d); const float id = ( max_scalar != 0.0f ) ? 127.f / max_scalar : 0.0f; const __m256 mul = _mm256_set1_ps( id ); @@ -425,7 +426,7 @@ void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i #if defined(__AVX2__) // Compute the sum of the quants and set y[i].s - y[i].s = GGML_FP32_TO_FP16(d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)))); + y[i].s = GGML_CPU_FP32_TO_FP16(d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)))); // Convert int32 to int16 i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 @@ -455,7 +456,7 @@ void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, i // Compute the sum of the quants and set y[i].s const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3)); const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7)); - y[i].s = GGML_FP32_TO_FP16(d * hsum_i32_4(_mm_add_epi32(s0, s1))); + y[i].s = GGML_CPU_FP32_TO_FP16(d * hsum_i32_4(_mm_add_epi32(s0, s1))); // Convert int32 to int16 ni0 = _mm_packs_epi32( ni0, ni1 ); @@ -552,7 +553,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi // Main loop for (; ib < nb; ++ib) { /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d) ); + const __m256 d = _mm256_set1_ps( GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d) ); __m256i qx = bytes_from_nibbles_32(x[ib].qs); @@ -613,7 +614,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi _mm_prefetch(&y[ib] + sizeof(block_q8_0), _MM_HINT_T0); // Compute combined scale for the block 0 and 1 - const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d) ); + const __m128 d_0_1 = _mm_set1_ps( GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d) ); const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[ib].qs); @@ -631,7 +632,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi _mm_prefetch(&y[ib] + 2 * sizeof(block_q8_0), _MM_HINT_T0); // Compute combined scale for the block 2 and 3 - const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[ib + 1].d) * GGML_FP16_TO_FP32(y[ib + 1].d) ); + const __m128 d_2_3 = _mm_set1_ps( GGML_CPU_FP16_TO_FP32(x[ib + 1].d) * GGML_CPU_FP16_TO_FP32(y[ib + 1].d) ); const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); @@ -680,7 +681,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); + sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); } *s = sumf; @@ -711,10 +712,10 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi // Main loop for (; ib < nb; ++ib) { - const float d0 = GGML_FP16_TO_FP32(x[ib].d); - const float d1 = GGML_FP16_TO_FP32(y[ib].d); + const float d0 = GGML_CPU_FP16_TO_FP32(x[ib].d); + const float d1 = GGML_CPU_FP16_TO_FP32(y[ib].d); - summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); + summs += GGML_CPU_FP16_TO_FP32(x[ib].m) * GGML_CPU_FP16_TO_FP32(y[ib].s); const __m256 d0v = _mm256_set1_ps( d0 ); const __m256 d1v = _mm256_set1_ps( d1 ); @@ -752,7 +753,7 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; @@ -783,7 +784,7 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi // Main loop for (; ib < nb; ++ib) { /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); + const __m256 d = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); __m256i qx = bytes_from_nibbles_32(x[ib].qs); __m256i bxhi = bytes_from_bits_32(x[ib].qh); @@ -807,7 +808,7 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi // Main loop for (; ib < nb; ++ib) { /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); + const __m256 d = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); __m256i bx_0 = bytes_from_nibbles_32(x[ib].qs); const __m256i bxhi = bytes_from_bits_32(x[ib].qh); @@ -851,7 +852,7 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)) * sumi; + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi; } *s = sumf; @@ -883,16 +884,16 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi // Main loop for (; ib < nb; ++ib) { - const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d)); + const __m256 dx = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ib].d)); - summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); + summs += GGML_CPU_FP16_TO_FP32(x[ib].m) * GGML_CPU_FP16_TO_FP32(y[ib].s); __m256i qx = bytes_from_nibbles_32(x[ib].qs); __m256i bxhi = bytes_from_bits_32(x[ib].qh); bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10)); qx = _mm256_or_si256(qx, bxhi); - const __m256 dy = _mm256_set1_ps(GGML_FP16_TO_FP32(y[ib].d)); + const __m256 dy = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(y[ib].d)); const __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); const __m256 q = mul_sum_us8_pairs_float(qx, qy); @@ -910,9 +911,9 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi // Main loop for (; ib < nb; ++ib) { - const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d)); + const __m256 dx = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ib].d)); - summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); + summs += GGML_CPU_FP16_TO_FP32(x[ib].m) * GGML_CPU_FP16_TO_FP32(y[ib].s); __m256i bx_0 = bytes_from_nibbles_32(x[ib].qs); const __m256i bxhi = bytes_from_bits_32(x[ib].qh); @@ -926,7 +927,7 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi bxh = _mm_or_si128(bxh, bxhih); bx_0 = MM256_SET_M128I(bxh, bxl); - const __m256 dy = _mm256_set1_ps(GGML_FP16_TO_FP32(y[ib].d)); + const __m256 dy = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(y[ib].d)); const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[ib].qs); const __m256 q = mul_sum_us8_pairs_float(bx_0, by_0); @@ -956,7 +957,7 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; @@ -986,7 +987,7 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi // Main loop for (; ib < nb; ++ib) { // Compute combined scale for the block - const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); + const __m256 d = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); __m256i qx = _mm256_loadu_si256((const __m256i *)x[ib].qs); __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); @@ -1025,7 +1026,7 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi sumi += x[ib].qs[j]*y[ib].qs[j]; } - sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); + sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)); } *s = sumf; @@ -1144,7 +1145,7 @@ void ggml_vec_dot_tq1_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo } const __m256i ysum = _mm256_loadu_si256((const __m256i *) y[i].bsums); - const __m256 d = _mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(x[i].d)); + const __m256 d = _mm256_set1_ps(y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d)); sumi0 = _mm256_sub_epi16(sumi0, ysum); sumi0 = _mm256_add_epi16(sumi0, _mm256_add_epi16(sumi1, sumi2)); @@ -1190,7 +1191,7 @@ void ggml_vec_dot_tq1_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo } } - sumf += (float) sum * (GGML_FP16_TO_FP32(x[i].d) * y[i].d); + sumf += (float) sum * (GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d); } *s = sumf; @@ -1244,7 +1245,7 @@ void ggml_vec_dot_tq2_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo } const __m256i ysum = _mm256_loadu_si256((const __m256i *) y[i].bsums); - const __m256 d = _mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(x[i].d)); + const __m256 d = _mm256_set1_ps(y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d)); sumi0 = _mm256_add_epi16(sumi0, sumi1); sumi0 = _mm256_sub_epi16(sumi0, ysum); @@ -1269,7 +1270,7 @@ void ggml_vec_dot_tq2_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo } } - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); sumf += (float) sumi * d; } @@ -1299,8 +1300,8 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const uint8_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -1366,8 +1367,8 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const uint8_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -1477,8 +1478,8 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi summs += y[i].bsums[j] * (sc[j] >> 4); } - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); int isum = 0; int is = 0; @@ -1533,7 +1534,7 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q3 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -1638,7 +1639,7 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q3 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -1824,7 +1825,7 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -1862,8 +1863,8 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); @@ -1928,8 +1929,8 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const uint8_t * GGML_RESTRICT q4 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -2049,9 +2050,9 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -2092,8 +2093,8 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi const uint8_t * GGML_RESTRICT q5 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); @@ -2170,8 +2171,8 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); const uint8_t * GGML_RESTRICT q5 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -2311,9 +2312,9 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -2344,7 +2345,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q4 = x[i].ql; const uint8_t * GGML_RESTRICT qh = x[i].qh; @@ -2422,7 +2423,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); const uint8_t * GGML_RESTRICT q4 = x[i].ql; const uint8_t * GGML_RESTRICT qh = x[i].qh; @@ -2555,7 +2556,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -2622,7 +2623,7 @@ void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; __m256i sumi1 = _mm256_setzero_si256(); @@ -2663,7 +2664,7 @@ void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; __m128i sumi1_0 = _mm_setzero_si128(); @@ -2717,7 +2718,7 @@ void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; int32_t bsum = 0; @@ -2792,7 +2793,7 @@ void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -2913,7 +2914,7 @@ void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -3035,7 +3036,7 @@ void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const uint8_t * GGML_RESTRICT sc = x[i].scales; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -3104,7 +3105,7 @@ void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); @@ -3177,7 +3178,7 @@ void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); @@ -3253,7 +3254,7 @@ void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo float sumf = 0; for (int i = 0; i < nb; i++) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const int8_t * q8 = y[i].qs; const uint8_t * qs = x[i].qs; const uint8_t * qh = x[i].qh; @@ -3313,7 +3314,7 @@ void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -3358,7 +3359,7 @@ void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -3414,7 +3415,7 @@ void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -3480,7 +3481,7 @@ void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; @@ -3565,7 +3566,7 @@ void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo __m256 accumf = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; @@ -3648,7 +3649,7 @@ void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint8_t * GGML_RESTRICT signs = x[i].signs; @@ -3753,7 +3754,7 @@ void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2; } - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); accum = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(sumi), accum); accum1 += d * sumi1; @@ -3801,7 +3802,7 @@ void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2; } - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); accum = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi1_1, sumi1_0))), accum); accum1 += d * sumi1; @@ -3835,7 +3836,7 @@ void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo qs += 4; } - sumf += GGML_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); + sumf += GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); } *s = sumf; @@ -3947,7 +3948,7 @@ void ggml_vec_dot_iq1_m_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo qs += 8; qh += 4; } - const __m256 d = _mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(scale.f16)); + const __m256 d = _mm256_set1_ps(y[i].d * GGML_CPU_FP16_TO_FP32(scale.f16)); accum1 = _mm256_fmadd_ps(d, _mm256_cvtepi32_ps(sumi1), accum1); accum2 = _mm256_fmadd_ps(d, _mm256_cvtepi32_ps(sumi2), accum2); @@ -4033,7 +4034,7 @@ void ggml_vec_dot_iq1_m_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo qs += 8; qh += 4; } - const __m256 d = _mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(scale.f16)); + const __m256 d = _mm256_set1_ps(y[i].d * GGML_CPU_FP16_TO_FP32(scale.f16)); accum1 = _mm256_add_ps(_mm256_mul_ps(d, _mm256_cvtepi32_ps(MM256_SET_M128I(sumi1_1, sumi1_0))), accum1); accum2 = _mm256_add_ps(_mm256_mul_ps(d, _mm256_cvtepi32_ps(MM256_SET_M128I(sumi2_1, sumi2_0))), accum2); @@ -4083,7 +4084,7 @@ void ggml_vec_dot_iq1_m_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo qh += 2; } - sumf += GGML_FP16_TO_FP32(scale.f16) * y[i].d * (sumi1 + IQ1M_DELTA * sumi2); + sumf += GGML_CPU_FP16_TO_FP32(scale.f16) * y[i].d * (sumi1 + IQ1M_DELTA * sumi2); } *s = sumf; @@ -4129,9 +4130,9 @@ void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const v const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); const __m256i p_1 = _mm256_madd_epi16(p16_1, mone); const __m256i p_2 = _mm256_madd_epi16(p16_2, mone); - accum1 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(y[ib + 0].d)*GGML_FP16_TO_FP32(x[ib + 0].d)), + accum1 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_CPU_FP16_TO_FP32(y[ib + 0].d)*GGML_CPU_FP16_TO_FP32(x[ib + 0].d)), _mm256_cvtepi32_ps(p_1), accum1); - accum2 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(y[ib + 1].d)*GGML_FP16_TO_FP32(x[ib + 1].d)), + accum2 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_CPU_FP16_TO_FP32(y[ib + 1].d)*GGML_CPU_FP16_TO_FP32(x[ib + 1].d)), _mm256_cvtepi32_ps(p_2), accum2); } @@ -4164,7 +4165,7 @@ void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const v #endif for (; ib < nb; ++ib) { - const float d = GGML_FP16_TO_FP32(y[ib].d)*GGML_FP16_TO_FP32(x[ib].d); + const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d); int sumi1 = 0, sumi2 = 0; for (int j = 0; j < QK4_NL/2; ++j) { sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; @@ -4219,7 +4220,7 @@ void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v sumi1 = _mm256_add_epi32(p_1, sumi1); sumi2 = _mm256_add_epi32(p_2, sumi2); } - accum = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(x[ibl].d)*y[ibl].d), + accum = _mm256_fmadd_ps(_mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ibl].d)*y[ibl].d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accum); } @@ -4267,7 +4268,7 @@ void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v } __m128i sumi12_0 = _mm_add_epi32(sumi1_0, sumi2_0); __m128i sumi12_1 = _mm_add_epi32(sumi1_1, sumi2_1); - accum = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(x[ibl].d)*y[ibl].d), + accum = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ibl].d)*y[ibl].d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi12_1, sumi12_0))), accum); } @@ -4276,7 +4277,7 @@ void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const v #else float sumf = 0; for (int ibl = 0; ibl < nb; ++ibl) { - const float d4d8 = GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d; + const float d4d8 = GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d; uint16_t h = x[ibl].scales_h; const uint8_t * qs = x[ibl].qs; const int8_t * q8 = y[ibl].qs; diff --git a/ggml/src/ggml-cpu/arch/x86/repack.cpp b/ggml/src/ggml-cpu/arch/x86/repack.cpp index e7635a294a796..c00c1e541cb44 100644 --- a/ggml/src/ggml-cpu/arch/x86/repack.cpp +++ b/ggml/src/ggml-cpu/arch/x86/repack.cpp @@ -6,6 +6,7 @@ #include "ggml-impl.h" #include "ggml-cpu.h" #include "ggml-cpu-impl.h" +#include "simd-mappings.h" #include "traits.h" #include @@ -39,11 +40,11 @@ static inline __m512 __avx512_f32cx8x2_load(ggml_fp16_t *x, ggml_fp16_t *y) { float tmp[16]; for (int i = 0; i < 8; i++) { - tmp[i] = GGML_FP16_TO_FP32(x[i]); + tmp[i] = GGML_CPU_FP16_TO_FP32(x[i]); } for (int i = 0; i < 8; i++) { - tmp[i + 8] = GGML_FP16_TO_FP32(y[i]); + tmp[i + 8] = GGML_CPU_FP16_TO_FP32(y[i]); } return _mm512_loadu_ps(tmp); @@ -54,10 +55,10 @@ static inline __m512 __avx512_repeat_f32cx16_load(__m128i x) { _mm_storeu_si128((__m128i*)tmphalf, x); for (int i = 0; i < 4; i++) { - tmp[i] = GGML_FP16_TO_FP32(tmphalf[i]); - tmp[i + 4] = GGML_FP16_TO_FP32(tmphalf[i]); - tmp[i + 8] = GGML_FP16_TO_FP32(tmphalf[i]); - tmp[i + 12] = GGML_FP16_TO_FP32(tmphalf[i]); + tmp[i] = GGML_CPU_FP16_TO_FP32(tmphalf[i]); + tmp[i + 4] = GGML_CPU_FP16_TO_FP32(tmphalf[i]); + tmp[i + 8] = GGML_CPU_FP16_TO_FP32(tmphalf[i]); + tmp[i + 12] = GGML_CPU_FP16_TO_FP32(tmphalf[i]); } return _mm512_loadu_ps(tmp); @@ -67,7 +68,7 @@ static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) { float tmp[8]; for (int i = 0; i < 8; i++) { - tmp[i] = GGML_FP16_TO_FP32(x[i]); + tmp[i] = GGML_CPU_FP16_TO_FP32(x[i]); } return _mm256_loadu_ps(tmp); @@ -76,8 +77,8 @@ static inline __m256 __avx_repeat_f32cx8_load(ggml_fp16_t *x) { float tmp[8]; for (int i = 0; i < 4; i++) { - tmp[i] = GGML_FP16_TO_FP32(x[i]); - tmp[i + 4] = GGML_FP16_TO_FP32(x[i]); + tmp[i] = GGML_CPU_FP16_TO_FP32(x[i]); + tmp[i + 4] = GGML_CPU_FP16_TO_FP32(x[i]); } return _mm256_loadu_ps(tmp); @@ -88,7 +89,7 @@ static inline __m256 __avx_rearranged_f32cx8_load(ggml_fp16_t *x, __m128i arrang _mm_storeu_si128((__m128i*)tmphalf, _mm_shuffle_epi8(_mm_loadu_si128((const __m128i *) x), arrangeMask)); for (int i = 0; i < 8; i++) { - tmp[i] = GGML_FP16_TO_FP32(tmphalf[i]); + tmp[i] = GGML_CPU_FP16_TO_FP32(tmphalf[i]); } return _mm256_loadu_ps(tmp); @@ -211,7 +212,7 @@ void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTR id[row_iter] = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; //d ? 1.0f / d : 0.0f; // Store the scale for the individual block - y[i].d[row_iter] = GGML_FP32_TO_FP16(d); + y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d); // Store the values in blocks of eight values - Aim is to use these later for block interleaving srcv[row_iter][0] = v0; @@ -297,7 +298,7 @@ void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTR const float d = amax / ((1 << 7) - 1); id[row_iter] = d ? 1.0f / d : 0.0f; - y[i].d[row_iter] = GGML_FP32_TO_FP16(d); + y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d); } for (int j = 0; j < QK8_0 * 4; j++) { @@ -647,7 +648,7 @@ void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo const __m256 col_scale_f32 = GGML_F32Cx8_REARRANGE_LOAD(b_ptr[b].d, changemask); // Load and convert to FP32 scale from block_q8_0 - const __m256 row_scale_f32 = _mm256_set1_ps(GGML_FP16_TO_FP32(a_ptr[b].d)); + const __m256 row_scale_f32 = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(a_ptr[b].d)); // Load the block values in block_q8_0 in batches of 16 bytes and replicate the same across 256 bit vector __m256i lhs_vec_0 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)a_ptr[b].qs)); @@ -706,7 +707,7 @@ void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); } } } @@ -972,13 +973,13 @@ void ggml_gemv_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo sumi2 = sumi2 * scales_1[j]; sumi += sumi1 + sumi2; } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d; + sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d; } } for (int sb = 0; sb < 8; sb++) { uint8_t *mins = (uint8_t*) utmp + 8 + sb * 16; for (int j = 0; j < ncols_interleaved; j++) { - sum_minf[j] += mins[j] * (a_ptr[l].bsums[sb * 2] + a_ptr[l].bsums[sb * 2 + 1]) * GGML_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d; + sum_minf[j] += mins[j] * (a_ptr[l].bsums[sb * 2] + a_ptr[l].bsums[sb * 2 + 1]) * GGML_CPU_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d; } } } @@ -1755,7 +1756,7 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const vo sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); } } } @@ -3259,7 +3260,7 @@ void ggml_gemm_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo sumi2 = sumi2 * scales_1[j]; sumi += sumi1 + sumi2; } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d[m]; + sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d[m]; } } } @@ -3268,7 +3269,7 @@ void ggml_gemm_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const vo for(int m = 0; m < 4; m++) { const int16_t *bsums = a_ptr[l].bsums + (sb * 8) + (m * 4) - ((sb % 2) * 6); for(int j = 0; j < ncols_interleaved; j++) { - sum_minf[m][j] += mins[j] * (bsums[0] + bsums[1]) * GGML_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d[m]; + sum_minf[m][j] += mins[j] * (bsums[0] + bsums[1]) * GGML_CPU_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d[m]; } } } diff --git a/ggml/src/ggml-cpu/common.h b/ggml/src/ggml-cpu/common.h index 5624176cce94b..353563dc35c5d 100644 --- a/ggml/src/ggml-cpu/common.h +++ b/ggml/src/ggml-cpu/common.h @@ -4,6 +4,7 @@ #include "traits.h" #include "ggml-cpu-impl.h" #include "ggml-impl.h" +#include "simd-mappings.h" #ifdef __cplusplus @@ -12,11 +13,11 @@ // convenience functions/macros for use in template calls // note: these won't be required after the 'traits' lookup table is used. static inline ggml_fp16_t f32_to_f16(float x) { - return GGML_FP32_TO_FP16(x); + return GGML_CPU_FP32_TO_FP16(x); } static inline float f16_to_f32(ggml_fp16_t x) { - return GGML_FP16_TO_FP32(x); + return GGML_CPU_FP16_TO_FP32(x); } static inline ggml_bf16_t f32_to_bf16(float x) { diff --git a/ggml/src/ggml-cpu/ggml-cpu-impl.h b/ggml/src/ggml-cpu/ggml-cpu-impl.h index 73a8f93987aa3..d839cf5c55e81 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-impl.h +++ b/ggml/src/ggml-cpu/ggml-cpu-impl.h @@ -62,11 +62,17 @@ struct ggml_compute_params { #if defined(__s390x__) && defined(__VEC__) #ifndef __VXE__ #define __VXE__ -#endif +#endif // __VXE__ #ifndef __VXE2__ #define __VXE2__ -#endif -#endif +#endif // __VXE2__ +#endif // __s390x__ && __VEC__ + +#if defined(__s390x__) && defined(GGML_NNPA) +#ifndef __NNPA__ +#define __NNPA__ +#endif // __NNPA__ +#endif // __s390x__ && GGML_NNPA #if defined(__ARM_FEATURE_SVE) #include diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 1d3cd009affc6..7cae96f4b4885 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -72,6 +72,9 @@ #define UNUSED GGML_UNUSED #define SWAP(x, y, T) do { T SWAP = x; (x) = y; (y) = SWAP; } while (0) +// precomputed f32 table for f16 (256 KB) (simd-mappings.h) +float ggml_table_f32_f16[1 << 16]; + #if defined(__ARM_ARCH) struct ggml_arm_arch_features_type { int sve_cnt; @@ -736,7 +739,7 @@ struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) { { assert(tensor->nb[0] == sizeof(ggml_fp16_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value)); + ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_CPU_FP32_TO_FP16(value)); } } break; case GGML_TYPE_BF16: @@ -795,7 +798,7 @@ struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) { { assert(tensor->nb[0] == sizeof(ggml_fp16_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value)); + ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_CPU_FP32_TO_FP16(value)); } } break; case GGML_TYPE_BF16: @@ -846,7 +849,7 @@ int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) { case GGML_TYPE_F16: { GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); - return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); + return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); } case GGML_TYPE_BF16: { @@ -891,7 +894,7 @@ void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) { case GGML_TYPE_F16: { GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); - ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value); + ((ggml_fp16_t *)(tensor->data))[i] = GGML_CPU_FP32_TO_FP16(value); } break; case GGML_TYPE_BF16: { @@ -920,7 +923,7 @@ int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i case GGML_TYPE_I32: return ((int32_t *) data)[0]; case GGML_TYPE_F16: - return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]); + return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *) data)[0]); case GGML_TYPE_BF16: return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]); case GGML_TYPE_F32: @@ -947,7 +950,7 @@ void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, } break; case GGML_TYPE_F16: { - ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value); + ((ggml_fp16_t *)(data))[0] = GGML_CPU_FP32_TO_FP16(value); } break; case GGML_TYPE_BF16: { @@ -985,7 +988,7 @@ float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) { } case GGML_TYPE_F16: { - return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); + return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); } case GGML_TYPE_BF16: { @@ -1024,7 +1027,7 @@ void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) { } break; case GGML_TYPE_F16: { - ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value); + ((ggml_fp16_t *)(tensor->data))[i] = GGML_CPU_FP32_TO_FP16(value); } break; case GGML_TYPE_BF16: { @@ -1051,7 +1054,7 @@ float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, case GGML_TYPE_I32: return ((int32_t *) data)[0]; case GGML_TYPE_F16: - return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]); + return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *) data)[0]); case GGML_TYPE_BF16: return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]); case GGML_TYPE_F32: @@ -1078,7 +1081,7 @@ void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, } break; case GGML_TYPE_F16: { - ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value); + ((ggml_fp16_t *)(data))[0] = GGML_CPU_FP32_TO_FP16(value); } break; case GGML_TYPE_BF16: { @@ -3141,9 +3144,24 @@ void ggml_cpu_fp32_to_fp16(const float * x, ggml_fp16_t * y, int64_t n) { __m128i y_vec = _mm_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT); _mm_storel_epi64((__m128i *)(y + i), y_vec); } +#elif defined(__NNPA__) + for (; i + 7 < n; i += 8) { + float32x4_t v_xh = vec_xl(0, (const float *)(x + i + 0)); + float32x4_t v_xl = vec_xl(0, (const float *)(x + i + 4)); + uint16x8_t v_yd = vec_round_from_fp32(v_xh, v_xl, 0); + uint16x8_t v_y = vec_convert_to_fp16(v_yd, 0); + vec_xst(v_y, 0, (ggml_fp16_t *)(y + i)); + } + for (; i + 3 < n; i += 4) { + float32x4_t v_x = vec_xl(0, (const float *)(x + i)); + float32x4_t v_zero = vec_splats(0.0f); + uint16x8_t v_yd = vec_round_from_fp32(v_x, v_zero, 0); + uint16x8_t v_y = vec_convert_to_fp16(v_yd, 0); + vec_xst(v_y, 0, (ggml_fp16_t *)(y + i)); + } #endif for (; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(x[i]); + y[i] = GGML_CPU_FP32_TO_FP16(x[i]); } } @@ -3167,9 +3185,25 @@ void ggml_cpu_fp16_to_fp32(const ggml_fp16_t * x, float * y, int64_t n) { __m128 y_vec = _mm_cvtph_ps(x_vec); _mm_storeu_ps(y + i, y_vec); } +#elif defined(__NNPA__) + for (; i + 7 < n; i += 8) { + uint16x8_t v_x = vec_xl(0, (const ggml_fp16_t *)(x + i)); + uint16x8_t v_yd = vec_convert_from_fp16(v_x, 0); + float32x4_t v_yh = vec_extend_to_fp32_hi(v_yd, 0); + float32x4_t v_yl = vec_extend_to_fp32_lo(v_yd, 0); + vec_xst(v_yh, 0, (float *)(y + i + 0)); + vec_xst(v_yl, 0, (float *)(y + i + 4)); + } + for (; i + 3 < n; i += 4) { + uint16x8_t v_x = vec_xl(0, (const ggml_fp16_t *)(x + i)); + uint16x8_t v_yd = vec_convert_from_fp16(v_x, 0); + float32x4_t v_yh = vec_extend_to_fp32_hi(v_yd, 0); + vec_xst(v_yh, 0, (float *)(y + i)); + } #endif + for (; i < n; ++i) { - y[i] = GGML_FP16_TO_FP32(x[i]); + y[i] = GGML_CPU_FP16_TO_FP32(x[i]); } } @@ -3369,6 +3403,14 @@ int ggml_cpu_has_vxe(void) { #endif } +int ggml_cpu_has_nnpa(void) { +#if defined(GGML_NNPA) + return 1; +#else + return 0; +#endif +} + int ggml_cpu_has_neon(void) { #if defined(__ARM_ARCH) && defined(__ARM_NEON) return 1; @@ -3418,7 +3460,7 @@ int ggml_cpu_has_sme(void) { } void ggml_cpu_init(void) { - // needed to initialize f16 tables + // needed to initialize ggml_time { struct ggml_init_params params = { 0, NULL, false }; struct ggml_context * ctx = ggml_init(params); @@ -3439,9 +3481,10 @@ void ggml_cpu_init(void) { uint16_t u16; ggml_fp16_t fp16; } u = {i}; - float f = GGML_FP16_TO_FP32(u.fp16); - ggml_table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f)); - ggml_table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f)); + float f = GGML_COMPUTE_FP16_TO_FP32(u.fp16); + ggml_table_f32_f16[i] = f; + ggml_table_gelu_f16[i] = GGML_CPU_FP32_TO_FP16(ggml_gelu_f32(f)); + ggml_table_gelu_quick_f16[i] = GGML_CPU_FP32_TO_FP16(ggml_gelu_quick_f32(f)); } const uint64_t t_end = ggml_time_us(); UNUSED(t_end); diff --git a/ggml/src/ggml-cpu/ggml-cpu.cpp b/ggml/src/ggml-cpu/ggml-cpu.cpp index 735ef3f015c13..a98866a2d8052 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.cpp +++ b/ggml/src/ggml-cpu/ggml-cpu.cpp @@ -578,6 +578,9 @@ static ggml_backend_feature * ggml_backend_cpu_get_features(ggml_backend_reg_t r if (ggml_cpu_has_vxe()) { features.push_back({ "VXE", "1" }); } + if (ggml_cpu_has_nnpa()) { + features.push_back({ "NNPA", "1" }); + } if (ggml_cpu_has_wasm_simd()) { features.push_back({ "WASM_SIMD", "1" }); } diff --git a/ggml/src/ggml-cpu/llamafile/sgemm.cpp b/ggml/src/ggml-cpu/llamafile/sgemm.cpp index 7ed3874afb87a..ed61869a5508a 100644 --- a/ggml/src/ggml-cpu/llamafile/sgemm.cpp +++ b/ggml/src/ggml-cpu/llamafile/sgemm.cpp @@ -52,6 +52,7 @@ #include "ggml-impl.h" #include "ggml-cpu-impl.h" #include "ggml-quants.h" +#include "simd-mappings.h" #include #include @@ -73,7 +74,7 @@ namespace { inline float unhalf(ggml_fp16_t d) { - return GGML_FP16_TO_FP32(d); + return GGML_CPU_FP16_TO_FP32(d); } //////////////////////////////////////////////////////////////////////////////////////////////////// @@ -252,7 +253,7 @@ template <> inline float32x4_t load(const ggml_fp16_t * p) { float tmp[4]; for (int i = 0; i < 4; i++) { - tmp[i] = GGML_FP16_TO_FP32(p[i]); + tmp[i] = GGML_CPU_FP16_TO_FP32(p[i]); } return vec_xl(0, (const float *)(tmp)); diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index eff4a53e3442b..8531baf6c57fb 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -108,7 +108,7 @@ static void ggml_compute_forward_dup_f16( for (int i01 = ir0; i01 < ir1; i01++) { const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); for (int i00 = 0; i00 < ne00; i00++) { - dst_ptr[id] = GGML_FP16_TO_FP32(src0_ptr[i00]); + dst_ptr[id] = GGML_CPU_FP16_TO_FP32(src0_ptr[i00]); id++; } } @@ -130,7 +130,7 @@ static void ggml_compute_forward_dup_f16( const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); for (int i00 = 0; i00 < ne00; i00++) { - src0_f32[i00] = GGML_FP16_TO_FP32(src0_ptr[i00]); + src0_f32[i00] = GGML_CPU_FP16_TO_FP32(src0_ptr[i00]); } quantize_row_q(src0_f32, dst_ptr + id, ne00); @@ -156,7 +156,7 @@ static void ggml_compute_forward_dup_f16( for (int i00 = 0; i00 < ne00; i00++) { const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr); + dst_ptr[id] = GGML_CPU_FP16_TO_FP32(*src0_ptr); id++; } } @@ -267,7 +267,7 @@ static void ggml_compute_forward_dup_f16( const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); - *(float *) dst_ptr = GGML_FP16_TO_FP32(*(const ggml_fp16_t *) src0_ptr); + *(float *) dst_ptr = GGML_CPU_FP16_TO_FP32(*(const ggml_fp16_t *) src0_ptr); if (++i10 == ne0) { i10 = 0; @@ -372,7 +372,7 @@ static void ggml_compute_forward_dup_bf16( for (int i01 = ir0; i01 < ir1; i01++) { const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); for (int i00 = 0; i00 < ne00; i00++) { - dst_ptr[id] = GGML_FP32_TO_FP16(GGML_BF16_TO_FP32(src0_ptr[i00])); + dst_ptr[id] = GGML_CPU_FP32_TO_FP16(GGML_BF16_TO_FP32(src0_ptr[i00])); id++; } } @@ -473,7 +473,7 @@ static void ggml_compute_forward_dup_bf16( for (int i00 = 0; i00 < ne00; i00++) { const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - dst_ptr[id] = GGML_FP32_TO_FP16(GGML_BF16_TO_FP32(*src0_ptr)); + dst_ptr[id] = GGML_CPU_FP32_TO_FP16(GGML_BF16_TO_FP32(*src0_ptr)); id++; } } @@ -566,7 +566,7 @@ static void ggml_compute_forward_dup_bf16( const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); - *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(GGML_BF16_TO_FP32(*(const ggml_bf16_t *) src0_ptr)); + *(ggml_fp16_t *) dst_ptr = GGML_CPU_FP32_TO_FP16(GGML_BF16_TO_FP32(*(const ggml_bf16_t *) src0_ptr)); if (++i10 == ne0) { i10 = 0; @@ -765,7 +765,7 @@ static void ggml_compute_forward_dup_f32( for (int i00 = 0; i00 < ne00; i00++) { const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr); + dst_ptr[id] = GGML_CPU_FP32_TO_FP16(*src0_ptr); id++; } } @@ -878,7 +878,7 @@ static void ggml_compute_forward_dup_f32( const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); - *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(*(const float *) src0_ptr); + *(ggml_fp16_t *) dst_ptr = GGML_CPU_FP32_TO_FP16(*(const float *) src0_ptr); if (++i10 == ne0) { i10 = 0; @@ -1419,7 +1419,7 @@ static void ggml_compute_forward_add1_f16_f32( ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ); ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v); + dst_ptr[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(src0_ptr[i]) + v); } } } @@ -1435,7 +1435,7 @@ static void ggml_compute_forward_add1_f16_f16( GGML_ASSERT(ggml_is_scalar(src1)); // scalar to add - const float v = GGML_FP16_TO_FP32(*(ggml_fp16_t *) src1->data); + const float v = GGML_CPU_FP16_TO_FP32(*(ggml_fp16_t *) src1->data); const int ith = params->ith; const int nth = params->nth; @@ -1467,7 +1467,7 @@ static void ggml_compute_forward_add1_f16_f16( ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ); ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v); + dst_ptr[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(src0_ptr[i]) + v); } } } @@ -1889,7 +1889,7 @@ static void ggml_compute_forward_sum_f16( } } } - ((ggml_fp16_t *) dst->data)[0] = GGML_FP32_TO_FP16(sum); + ((ggml_fp16_t *) dst->data)[0] = GGML_CPU_FP32_TO_FP16(sum); } static void ggml_compute_forward_sum_bf16( @@ -2660,7 +2660,7 @@ static void ggml_compute_forward_gelu_f16( #ifndef NDEBUG for (int k = 0; k < nc; k++) { const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); + const float v = GGML_CPU_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); @@ -2763,7 +2763,7 @@ static void ggml_compute_forward_gelu_erf_f16( #ifndef NDEBUG for (int k = 0; k < nc; k++) { const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); + const float v = GGML_CPU_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); @@ -2866,7 +2866,7 @@ static void ggml_compute_forward_gelu_quick_f16( #ifndef NDEBUG for (int k = 0; k < nc; k++) { const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); + const float v = GGML_CPU_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); @@ -2969,7 +2969,7 @@ static void ggml_compute_forward_silu_f16( #ifndef NDEBUG for (int k = 0; k < nc; k++) { const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*(dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); + const float v = GGML_CPU_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); @@ -3163,7 +3163,7 @@ static void ggml_compute_forward_silu_back_f16( #ifndef NDEBUG for (int k = 0; k < nc; k++) { const float x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); + const float v = GGML_CPU_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); @@ -4500,7 +4500,7 @@ static void ggml_compute_forward_get_rows_back_f32_f16( for (int j = 0; j < nc; ++j) { ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i*src0->nb[1]))[j]; - ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += GGML_FP16_TO_FP32(v); + ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += GGML_CPU_FP16_TO_FP32(v); } } } @@ -4792,7 +4792,7 @@ static void ggml_compute_forward_soft_max_f32( if (mp_f32) { if (use_f16) { for (int i = 0; i < nc; ++i) { - wp[i] += slope*GGML_FP16_TO_FP32(mp_f16[i]); + wp[i] += slope*GGML_CPU_FP16_TO_FP32(mp_f16[i]); } } else { for (int i = 0; i < nc; ++i) { @@ -5018,8 +5018,8 @@ static void ggml_compute_forward_clamp_f16( ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + j*nb01); for (int i = 0; i < nc; i++) { - float v = GGML_FP16_TO_FP32(src0_ptr[i]); - dst_ptr[i] = GGML_FP32_TO_FP16(MAX(MIN(v, max), min)); + float v = GGML_CPU_FP16_TO_FP32(src0_ptr[i]); + dst_ptr[i] = GGML_CPU_FP32_TO_FP16(MAX(MIN(v, max), min)); } } } @@ -5476,11 +5476,11 @@ static void ggml_compute_forward_rope_f16( const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); - const float x0 = GGML_FP16_TO_FP32(src[0]); - const float x1 = GGML_FP16_TO_FP32(src[n_dims]); + const float x0 = GGML_CPU_FP16_TO_FP32(src[0]); + const float x1 = GGML_CPU_FP16_TO_FP32(src[n_dims]); - dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); - dst_data[n_dims] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); + dst_data[0] = GGML_CPU_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); + dst_data[n_dims] = GGML_CPU_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); } } else { for (int64_t i0 = 0; i0 < n_dims; i0 += 2) { @@ -5492,11 +5492,11 @@ static void ggml_compute_forward_rope_f16( const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); - const float x0 = GGML_FP16_TO_FP32(src[0]); - const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]); + const float x0 = GGML_CPU_FP16_TO_FP32(src[0]); + const float x1 = GGML_CPU_FP16_TO_FP32(src[n_dims/2]); - dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); - dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); + dst_data[0] = GGML_CPU_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); + dst_data[n_dims/2] = GGML_CPU_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); } } } else { @@ -5507,11 +5507,11 @@ static void ggml_compute_forward_rope_f16( const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - const float x0 = GGML_FP16_TO_FP32(src[0]); - const float x1 = GGML_FP16_TO_FP32(src[1]); + const float x0 = GGML_CPU_FP16_TO_FP32(src[0]); + const float x1 = GGML_CPU_FP16_TO_FP32(src[1]); - dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); - dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); + dst_data[0] = GGML_CPU_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); + dst_data[1] = GGML_CPU_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); } } @@ -5525,11 +5525,11 @@ static void ggml_compute_forward_rope_f16( const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); - const float x0 = GGML_FP16_TO_FP32(src[0]); - const float x1 = GGML_FP16_TO_FP32(src[n_dims]); + const float x0 = GGML_CPU_FP16_TO_FP32(src[0]); + const float x1 = GGML_CPU_FP16_TO_FP32(src[n_dims]); - dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); - dst_data[n_dims] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); + dst_data[0] = GGML_CPU_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); + dst_data[n_dims] = GGML_CPU_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); } } else { for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { @@ -5640,7 +5640,7 @@ static void ggml_compute_forward_conv_transpose_1d_f16_f32( for (int64_t i11 = 0; i11 < ne11; i11++) { const float * const src = (float *)((char *) src1->data + i11*nb11); for (int64_t i10 = 0; i10 < ne10; i10++) { - dst_data[i10*ne11 + i11] = GGML_FP32_TO_FP16(src[i10]); + dst_data[i10*ne11 + i11] = GGML_CPU_FP32_TO_FP16(src[i10]); } } } @@ -5933,7 +5933,7 @@ static void ggml_compute_forward_im2col_f16( if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { dst_data[iic*(KH*KW) + ikh*KW + ikw] = 0; } else { - dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_FP32_TO_FP16(src_data[iih*IW + iiw]); + dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_CPU_FP32_TO_FP16(src_data[iih*IW + iiw]); } } } @@ -6109,7 +6109,7 @@ void ggml_compute_forward_conv_transpose_2d( const float * const src = (float *)((char *) src1->data + i12*nb12 + i11*nb11); ggml_fp16_t * dst_data = wdata + i11*ne10*ne12; for (int i10 = 0; i10 < ne10; i10++) { - dst_data[i10*ne12 + i12] = GGML_FP32_TO_FP16(src[i10]); + dst_data[i10*ne12 + i12] = GGML_CPU_FP32_TO_FP16(src[i10]); } } } @@ -6358,7 +6358,7 @@ static void ggml_compute_forward_pool_1d_sk_p0( case GGML_OP_POOL_COUNT: GGML_ABORT("fatal error"); } for (int ki = 0; ki < k; ++ki) { - const float srow_j = (src->type == GGML_TYPE_F32) ? ((const float*)srow)[j] : GGML_FP16_TO_FP32(((const ggml_fp16_t*)srow)[j]); + const float srow_j = (src->type == GGML_TYPE_F32) ? ((const float*)srow)[j] : GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t*)srow)[j]); switch (op) { case GGML_OP_POOL_AVG: drow[i] += srow_j; break; case GGML_OP_POOL_MAX: if (srow_j > drow[i]) drow[i] = srow_j; break; @@ -6450,7 +6450,7 @@ void ggml_compute_forward_pool_2d( for (int kx = 0; kx < k0; ++kx) { int j = ix + kx; if (j < 0 || j >= src->ne[0]) continue; - const float srow_j = (src->type == GGML_TYPE_F32) ? ((const float*)srow)[j] : GGML_FP16_TO_FP32(((const ggml_fp16_t*)srow)[j]); + const float srow_j = (src->type == GGML_TYPE_F32) ? ((const float*)srow)[j] : GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t*)srow)[j]); switch (op) { case GGML_OP_POOL_AVG: *out += srow_j; break; case GGML_OP_POOL_MAX: if (srow_j > *out) *out = srow_j; break; @@ -6538,7 +6538,7 @@ void ggml_compute_forward_pool_2d_back( } const float val = dst->type == GGML_TYPE_F32 ? - ((const float *) drowf)[j] : GGML_FP16_TO_FP32(((const ggml_fp16_t *) drowf)[j]); + ((const float *) drowf)[j] : GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) drowf)[j]); if (val <= maxval) { continue; } @@ -6558,7 +6558,7 @@ void ggml_compute_forward_pool_2d_back( if (dst->type == GGML_TYPE_F32) { ((float *) drow)[j] += grad0; } else { - ((ggml_fp16_t *) drow)[j] = GGML_FP32_TO_FP16(grad0 + GGML_FP16_TO_FP32(((const ggml_fp16_t *) drow)[j])); + ((ggml_fp16_t *) drow)[j] = GGML_CPU_FP32_TO_FP16(grad0 + GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) drow)[j])); } } else if (op == GGML_OP_POOL_AVG) { const float grad = grad0 / ka; @@ -6577,7 +6577,7 @@ void ggml_compute_forward_pool_2d_back( if (dst->type == GGML_TYPE_F32) { ((float *) drow)[j] += grad; } else { - ((ggml_fp16_t *) drow)[j] += GGML_FP32_TO_FP16(grad); + ((ggml_fp16_t *) drow)[j] += GGML_CPU_FP32_TO_FP16(grad); } } } @@ -7142,7 +7142,7 @@ static void ggml_compute_forward_flash_attn_ext_f16( // loop over n_kv and n_head_kv // ref: https://arxiv.org/pdf/2112.05682.pdf for (int64_t ic = 0; ic < nek1; ++ic) { - const float mv = mp ? slope*GGML_FP16_TO_FP32(mp[ic]) : 0.0f; + const float mv = mp ? slope*GGML_CPU_FP16_TO_FP32(mp[ic]) : 0.0f; if (mv == -INFINITY) { continue; } @@ -7210,7 +7210,7 @@ static void ggml_compute_forward_flash_attn_ext_f16( if (v->type == GGML_TYPE_F16) { for (int64_t d = 0; d < DV; ++d) { - VKQ32[d] = GGML_FP16_TO_FP32(VKQ16[d]); + VKQ32[d] = GGML_CPU_FP16_TO_FP32(VKQ16[d]); } } diff --git a/ggml/src/ggml-cpu/quants.c b/ggml/src/ggml-cpu/quants.c index d2e705f287af5..ee35ab42fda07 100644 --- a/ggml/src/ggml-cpu/quants.c +++ b/ggml/src/ggml-cpu/quants.c @@ -2,6 +2,7 @@ #include "ggml-common.h" #include "ggml-cpu-impl.h" +#include "simd-mappings.h" #include "ggml-quants.h" #include "quants.h" @@ -137,7 +138,7 @@ void ggml_vec_dot_q4_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, c } int sumi = sumi0 + sumi1; - sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); + sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); } *s = sumf; @@ -174,7 +175,7 @@ void ggml_vec_dot_q4_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, c } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; @@ -217,7 +218,7 @@ void ggml_vec_dot_q5_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, c } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)) * sumi; + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi; } *s = sumf; @@ -260,7 +261,7 @@ void ggml_vec_dot_q5_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, c } int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); } *s = sumf; @@ -290,7 +291,7 @@ void ggml_vec_dot_q8_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, c sumi += x[ib].qs[j]*y[ib].qs[j]; } - sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); + sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)); } *s = sumf; @@ -342,7 +343,7 @@ void ggml_vec_dot_tq1_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, } } - sumf += (float) sum * (GGML_FP16_TO_FP32(x[i].d) * y[i].d); + sumf += (float) sum * (GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d); } *s = sumf; @@ -372,7 +373,7 @@ void ggml_vec_dot_tq2_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, } } - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); sumf += (float) sumi * d; } @@ -405,8 +406,8 @@ void ggml_vec_dot_q2_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, c summs += y[i].bsums[j] * (sc[j] >> 4); } - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); + const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); int isum = 0; int is = 0; @@ -504,7 +505,7 @@ void ggml_vec_dot_q3_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, c for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -577,9 +578,9 @@ void ggml_vec_dot_q4_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, c for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -657,9 +658,9 @@ void ggml_vec_dot_q5_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, c for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -714,7 +715,7 @@ void ggml_vec_dot_q6_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, c for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -739,7 +740,7 @@ void ggml_vec_dot_iq2_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; int32_t bsum = 0; @@ -778,7 +779,7 @@ void ggml_vec_dot_iq2_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint16_t * GGML_RESTRICT q2 = x[i].qs; const uint8_t * GGML_RESTRICT sc = x[i].scales; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -829,7 +830,7 @@ void ggml_vec_dot_iq2_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, float sumf = 0; for (int i = 0; i < nb; i++) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const int8_t * q8 = y[i].qs; const uint8_t * qs = x[i].qs; const uint8_t * qh = x[i].qh; @@ -882,7 +883,7 @@ void ggml_vec_dot_iq3_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT q3 = x[i].qs; const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; const int8_t * GGML_RESTRICT q8 = y[i].qs; @@ -924,7 +925,7 @@ void ggml_vec_dot_iq3_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, float sumf = 0.f; for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * GGML_RESTRICT qs = x[i].qs; const uint8_t * GGML_RESTRICT qh = x[i].qh; const uint8_t * GGML_RESTRICT signs = x[i].signs; @@ -1002,7 +1003,7 @@ void ggml_vec_dot_iq1_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, qs += 4; } - sumf += GGML_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); + sumf += GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); } *s = sumf; @@ -1063,7 +1064,7 @@ void ggml_vec_dot_iq1_m_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, qh += 2; } - sumf += GGML_FP16_TO_FP32(scale.f16) * y[i].d * (sumi1 + IQ1M_DELTA * sumi2); + sumf += GGML_CPU_FP16_TO_FP32(scale.f16) * y[i].d * (sumi1 + IQ1M_DELTA * sumi2); } *s = sumf; @@ -1087,7 +1088,7 @@ void ggml_vec_dot_iq4_nl_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, float sumf = 0; for (; ib < nb; ++ib) { - const float d = GGML_FP16_TO_FP32(y[ib].d)*GGML_FP16_TO_FP32(x[ib].d); + const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d); int sumi1 = 0, sumi2 = 0; for (int j = 0; j < QK4_NL/2; ++j) { sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; @@ -1113,7 +1114,7 @@ void ggml_vec_dot_iq4_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, float sumf = 0; for (int ibl = 0; ibl < nb; ++ibl) { - const float d4d8 = GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d; + const float d4d8 = GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d; uint16_t h = x[ibl].scales_h; const uint8_t * qs = x[ibl].qs; const int8_t * q8 = y[ibl].qs; diff --git a/ggml/src/ggml-cpu/repack.cpp b/ggml/src/ggml-cpu/repack.cpp index 692c53e01c08e..72ee93a5abc7c 100644 --- a/ggml/src/ggml-cpu/repack.cpp +++ b/ggml/src/ggml-cpu/repack.cpp @@ -6,6 +6,7 @@ #include "ggml-impl.h" #include "ggml-cpu.h" #include "ggml-cpu-impl.h" +#include "simd-mappings.h" #include "traits.h" #include "arch-fallback.h" @@ -72,7 +73,7 @@ void ggml_quantize_mat_q8_0_4x4_generic(const float * GGML_RESTRICT x, void * GG const float d = amax / ((1 << 7) - 1); id[row_iter] = d ? 1.0f / d : 0.0f; - y[i].d[row_iter] = GGML_FP32_TO_FP16(d); + y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d); } for (int j = 0; j < QK8_0 * 4; j++) { @@ -110,7 +111,7 @@ void ggml_quantize_mat_q8_0_4x8_generic(const float * GGML_RESTRICT x, void * GG const float d = amax / ((1 << 7) - 1); id[row_iter] = d ? 1.0f / d : 0.0f; - y[i].d[row_iter] = GGML_FP32_TO_FP16(d); + y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d); } for (int j = 0; j < QK8_0 * 4; j++) { @@ -236,7 +237,7 @@ void ggml_gemv_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); } } } @@ -280,7 +281,7 @@ void ggml_gemv_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); } } } @@ -325,7 +326,7 @@ void ggml_gemv_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); } } } @@ -396,13 +397,13 @@ void ggml_gemv_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, sumi2 = sumi2 * scales_1[j]; sumi += sumi1 + sumi2; } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d; + sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d; } } for (int sb = 0; sb < 8; sb++) { uint8_t *mins = (uint8_t*) utmp + 8 + sb * 16; for (int j = 0; j < ncols_interleaved; j++) { - sum_minf[j] += mins[j] * (a_ptr[l].bsums[sb * 2] + a_ptr[l].bsums[sb * 2 + 1]) * GGML_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d; + sum_minf[j] += mins[j] * (a_ptr[l].bsums[sb * 2] + a_ptr[l].bsums[sb * 2 + 1]) * GGML_CPU_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d; } } } @@ -449,7 +450,7 @@ void ggml_gemv_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4]; sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])); } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); } } } @@ -500,7 +501,7 @@ void ggml_gemm_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); } } } @@ -555,7 +556,7 @@ void ggml_gemm_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); } } } @@ -609,7 +610,7 @@ void ggml_gemm_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); } } } @@ -688,7 +689,7 @@ void ggml_gemm_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, sumi2 = sumi2 * scales_1[j]; sumi += sumi1 + sumi2; } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d[m]; + sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d[m]; } } } @@ -697,7 +698,7 @@ void ggml_gemm_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, for(int m = 0; m < 4; m++) { const int16_t *bsums = a_ptr[l].bsums + (sb * 8) + (m * 4) - ((sb % 2) * 6); for(int j = 0; j < ncols_interleaved; j++) { - sum_minf[m][j] += mins[j] * (bsums[0] + bsums[1]) * GGML_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d[m]; + sum_minf[m][j] += mins[j] * (bsums[0] + bsums[1]) * GGML_CPU_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d[m]; } } } @@ -753,7 +754,7 @@ void ggml_gemm_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])); } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); } } } diff --git a/ggml/src/ggml-cpu/simd-mappings.h b/ggml/src/ggml-cpu/simd-mappings.h index e42364c59aa10..b68ac0dd68b40 100644 --- a/ggml/src/ggml-cpu/simd-mappings.h +++ b/ggml/src/ggml-cpu/simd-mappings.h @@ -2,10 +2,167 @@ #include "ggml-cpu-impl.h" +#ifdef __ARM_FEATURE_SVE +#include +#endif // __ARM_FEATURE_SVE + +#if defined(__ARM_NEON) && !defined(__CUDACC__) && !defined(__MUSACC__) +// if YCM cannot find , make a symbolic link to it, for example: +// +// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/ +// +#include +#endif + +#if defined(__F16C__) +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + // // simd mappings // +// FP16 to FP32 conversion + +// 16-bit float +// on Arm, we use __fp16 +// on x86, we use uint16_t +// +// for old CUDA compilers (<= 11), we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/10616 +// for MUSA compilers , we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/11843 +// +#if defined(__ARM_NEON) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11) && !defined(__MUSACC__) + #define GGML_CPU_COMPUTE_FP16_TO_FP32(x) neon_compute_fp16_to_fp32(x) + #define GGML_CPU_COMPUTE_FP32_TO_FP16(x) neon_compute_fp32_to_fp16(x) + + #define GGML_CPU_FP16_TO_FP32(x) GGML_CPU_COMPUTE_FP16_TO_FP32(x) + + static inline float neon_compute_fp16_to_fp32(ggml_fp16_t h) { + __fp16 tmp; + memcpy(&tmp, &h, sizeof(ggml_fp16_t)); + return (float)tmp; + } + + static inline ggml_fp16_t neon_compute_fp32_to_fp16(float f) { + ggml_fp16_t res; + __fp16 tmp = f; + memcpy(&res, &tmp, sizeof(ggml_fp16_t)); + return res; + } +#elif defined(__F16C__) + #ifdef _MSC_VER + #define GGML_CPU_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x))) + #define GGML_CPU_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0) + #else + #define GGML_CPU_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x) + #define GGML_CPU_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0) + #endif +#elif defined(__POWER9_VECTOR__) + #define GGML_CPU_COMPUTE_FP16_TO_FP32(x) power_compute_fp16_to_fp32(x) + #define GGML_CPU_COMPUTE_FP32_TO_FP16(x) power_compute_fp32_to_fp16(x) + /* the inline asm below is about 12% faster than the lookup method */ + #define GGML_CPU_FP16_TO_FP32(x) GGML_CPU_COMPUTE_FP16_TO_FP32(x) + #define GGML_CPU_FP32_TO_FP16(x) GGML_CPU_COMPUTE_FP32_TO_FP16(x) + + static inline float power_compute_fp16_to_fp32(ggml_fp16_t h) { + float f; + double d; + __asm__( + "mtfprd %0,%2\n" + "xscvhpdp %0,%0\n" + "frsp %1,%0\n" : + /* temp */ "=d"(d), + /* out */ "=f"(f): + /* in */ "r"(h)); + return f; + } + + static inline ggml_fp16_t power_compute_fp32_to_fp16(float f) { + double d; + ggml_fp16_t r; + __asm__( /* xscvdphp can work on double or single precision */ + "xscvdphp %0,%2\n" + "mffprd %1,%0\n" : + /* temp */ "=d"(d), + /* out */ "=r"(r): + /* in */ "f"(f)); + return r; + } +#elif defined(__riscv) && defined(__riscv_zfhmin) + static inline float riscv_compute_fp16_to_fp32(ggml_fp16_t h) { + float f; + __asm__( + "fmv.h.x %[f], %[h]\n\t" + "fcvt.s.h %[f], %[f]" + : [f] "=&f" (f) + : [h] "r" (h) + ); + return f; + } + + static inline ggml_fp16_t riscv_compute_fp32_to_fp16(float f) { + ggml_fp16_t res; + __asm__( + "fcvt.h.s %[f], %[f]\n\t" + "fmv.x.h %[h], %[f]" + : [h] "=&r" (res) + : [f] "f" (f) + ); + return res; + } + + #define GGML_CPU_COMPUTE_FP16_TO_FP32(x) riscv_compute_fp16_to_fp32(x) + #define GGML_CPU_COMPUTE_FP32_TO_FP16(x) riscv_compute_fp32_to_fp16(x) + #define GGML_CPU_FP16_TO_FP32(x) GGML_CPU_COMPUTE_FP16_TO_FP32(x) + #define GGML_CPU_FP32_TO_FP16(x) GGML_CPU_COMPUTE_FP32_TO_FP16(x) +#elif defined(__NNPA__) + #define GGML_CPU_COMPUTE_FP16_TO_FP32(x) nnpa_compute_fp16_to_fp32(x) + #define GGML_CPU_COMPUTE_FP32_TO_FP16(x) nnpa_compute_fp32_to_fp16(x) + + #define GGML_CPU_FP16_TO_FP32(x) GGML_CPU_COMPUTE_FP16_TO_FP32(x) + #define GGML_CPU_FP32_TO_FP16(x) GGML_CPU_COMPUTE_FP32_TO_FP16(x) + + static inline float nnpa_compute_fp16_to_fp32(ggml_fp16_t h) { + uint16x8_t v_h = vec_splats(h); + uint16x8_t v_hd = vec_convert_from_fp16(v_h, 0); + return vec_extend_to_fp32_hi(v_hd, 0)[0]; + } + + static inline ggml_fp16_t nnpa_compute_fp32_to_fp16(float f) { + float32x4_t v_f = vec_splats(f); + float32x4_t v_zero = vec_splats(0.0f); + uint16x8_t v_hd = vec_round_from_fp32(v_f, v_zero, 0); + uint16x8_t v_h = vec_convert_to_fp16(v_hd, 0); + return vec_extract(v_h, 0); + } +#endif + +// precomputed f32 table for f16 (256 KB) +// defined in ggml-cpu.c, initialized in ggml_cpu_init() +extern float ggml_table_f32_f16[1 << 16]; + +// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32, +// so we define GGML_CPU_FP16_TO_FP32 and GGML_CPU_FP32_TO_FP16 elsewhere for NEON. +// This is also true for POWER9. +#if !defined(GGML_CPU_FP16_TO_FP32) +inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) { + uint16_t s; + memcpy(&s, &f, sizeof(uint16_t)); + return ggml_table_f32_f16[s]; +} + +#define GGML_CPU_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x) +#endif + +#if !defined(GGML_CPU_FP32_TO_FP16) +#define GGML_CPU_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) +#endif + + // we define a common set of C macros which map to specific intrinsics based on the current architecture // we then implement the fundamental computation operations below using only these macros // adding support for new architectures requires to define the corresponding SIMD macros @@ -415,7 +572,7 @@ static inline __m256 __avx_f32cx8_load(const ggml_fp16_t * x) { float tmp[8]; for (int i = 0; i < 8; i++) { - tmp[i] = GGML_FP16_TO_FP32(x[i]); + tmp[i] = GGML_CPU_FP16_TO_FP32(x[i]); } return _mm256_loadu_ps(tmp); @@ -426,7 +583,7 @@ static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) { _mm256_storeu_ps(arr, y); for (int i = 0; i < 8; i++) - x[i] = GGML_FP32_TO_FP16(arr[i]); + x[i] = GGML_CPU_FP32_TO_FP16(arr[i]); } #define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x) #define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y) @@ -574,10 +731,10 @@ static inline unsigned char ggml_endian_byte(int i) { inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) { float tmp[4]; - tmp[0] = GGML_FP16_TO_FP32(p[0]); - tmp[1] = GGML_FP16_TO_FP32(p[1]); - tmp[2] = GGML_FP16_TO_FP32(p[2]); - tmp[3] = GGML_FP16_TO_FP32(p[3]); + tmp[0] = GGML_CPU_FP16_TO_FP32(p[0]); + tmp[1] = GGML_CPU_FP16_TO_FP32(p[1]); + tmp[2] = GGML_CPU_FP16_TO_FP32(p[2]); + tmp[3] = GGML_CPU_FP16_TO_FP32(p[3]); return wasm_v128_load(tmp); } @@ -587,10 +744,10 @@ inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) { wasm_v128_store(tmp, x); - p[0] = GGML_FP32_TO_FP16(tmp[0]); - p[1] = GGML_FP32_TO_FP16(tmp[1]); - p[2] = GGML_FP32_TO_FP16(tmp[2]); - p[3] = GGML_FP32_TO_FP16(tmp[3]); + p[0] = GGML_CPU_FP32_TO_FP16(tmp[0]); + p[1] = GGML_CPU_FP32_TO_FP16(tmp[1]); + p[2] = GGML_CPU_FP32_TO_FP16(tmp[2]); + p[3] = GGML_CPU_FP32_TO_FP16(tmp[3]); } #define GGML_F16x4 v128_t @@ -690,10 +847,10 @@ inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) { static inline __m128 __sse_f16x4_load(const ggml_fp16_t * x) { float tmp[4]; - tmp[0] = GGML_FP16_TO_FP32(x[0]); - tmp[1] = GGML_FP16_TO_FP32(x[1]); - tmp[2] = GGML_FP16_TO_FP32(x[2]); - tmp[3] = GGML_FP16_TO_FP32(x[3]); + tmp[0] = GGML_CPU_FP16_TO_FP32(x[0]); + tmp[1] = GGML_CPU_FP16_TO_FP32(x[1]); + tmp[2] = GGML_CPU_FP16_TO_FP32(x[2]); + tmp[3] = GGML_CPU_FP16_TO_FP32(x[3]); return _mm_loadu_ps(tmp); } @@ -703,10 +860,10 @@ static inline void __sse_f16x4_store(ggml_fp16_t * x, __m128 y) { _mm_storeu_ps(arr, y); - x[0] = GGML_FP32_TO_FP16(arr[0]); - x[1] = GGML_FP32_TO_FP16(arr[1]); - x[2] = GGML_FP32_TO_FP16(arr[2]); - x[3] = GGML_FP32_TO_FP16(arr[3]); + x[0] = GGML_CPU_FP32_TO_FP16(arr[0]); + x[1] = GGML_CPU_FP32_TO_FP16(arr[1]); + x[2] = GGML_CPU_FP32_TO_FP16(arr[2]); + x[3] = GGML_CPU_FP32_TO_FP16(arr[3]); } #define GGML_F32Cx4 __m128 @@ -828,7 +985,7 @@ static inline void __lasx_f32cx8_store(ggml_fp16_t * x, __m256 y) { #define GGML_F32x4_ZERO __lsx_vldi(0) #define GGML_F32x4_SET1(x) __lsx_vinsgr2vr_w(__lsx_vldi(0),(x), 0) #define GGML_F32x4_LOAD(x) __lsx_vld((x), 0) -#define GGML_F32x4_STORE((x),(y)) __lsx_vst((y), (x), 0) +#define GGML_F32x4_STORE(x, y) __lsx_vst(y, x, 0) #define GGML_F32x4_FMA(a, b, c) __lsx_vfmadd_s(b, c, a) #define GGML_F32x4_ADD __lsx_vfadd_s #define GGML_F32x4_MUL __lsx_vfmul_s @@ -874,10 +1031,10 @@ static inline void __lasx_f32cx8_store(ggml_fp16_t * x, __m256 y) { static inline __m128 __lsx_f16x4_load(const ggml_fp16_t * x) { float tmp[4]; - tmp[0] = GGML_FP16_TO_FP32(x[0]); - tmp[1] = GGML_FP16_TO_FP32(x[1]); - tmp[2] = GGML_FP16_TO_FP32(x[2]); - tmp[3] = GGML_FP16_TO_FP32(x[3]); + tmp[0] = GGML_CPU_FP16_TO_FP32(x[0]); + tmp[1] = GGML_CPU_FP16_TO_FP32(x[1]); + tmp[2] = GGML_CPU_FP16_TO_FP32(x[2]); + tmp[3] = GGML_CPU_FP16_TO_FP32(x[3]); return __lsx_vld(tmp, 0); } @@ -887,10 +1044,10 @@ static inline void __lsx_f16x4_store(ggml_fp16_t * x, __m128 y) { __lsx_vst(y, arr, 0); - x[0] = GGML_FP32_TO_FP16(arr[0]); - x[1] = GGML_FP32_TO_FP16(arr[1]); - x[2] = GGML_FP32_TO_FP16(arr[2]); - x[3] = GGML_FP32_TO_FP16(arr[3]); + x[0] = GGML_CPU_FP32_TO_FP16(arr[0]); + x[1] = GGML_CPU_FP32_TO_FP16(arr[1]); + x[2] = GGML_CPU_FP32_TO_FP16(arr[2]); + x[3] = GGML_CPU_FP32_TO_FP16(arr[3]); } #define GGML_F32Cx4 __m128 @@ -922,7 +1079,7 @@ static inline void __lsx_f16x4_store(ggml_fp16_t * x, __m128 y) { #define GGML_F32_STEP 32 #define GGML_F32_EPR 4 -#define GGML_F32x4 __vector float +#define GGML_F32x4 float32x4_t #define GGML_F32x4_ZERO vec_splats(0.0f) #define GGML_F32x4_SET1 vec_splats #define GGML_F32x4_LOAD(p) vec_xl(0, p) @@ -962,28 +1119,45 @@ static inline void __lsx_f16x4_store(ggml_fp16_t * x, __m128 y) { #define GGML_F16_STEP GGML_F32_STEP #define GGML_F16_EPR GGML_F32_EPR -static inline __vector float __lzs_f16cx4_load(const ggml_fp16_t * x) { +static inline float32x4_t __lzs_f16cx4_load(const ggml_fp16_t * x) { +#if defined(__NNPA__) + uint16x8_t v_x = vec_xl(0, (const ggml_fp16_t *)x); + uint16x8_t v_xd = vec_convert_from_fp16(v_x, 0); + return vec_extend_to_fp32_hi(v_xd, 0); +#else float tmp[4]; for (int i = 0; i < 4; i++) { - tmp[i] = GGML_FP16_TO_FP32(x[i]); + tmp[i] = GGML_CPU_FP16_TO_FP32(x[i]); } // note: keep type-cast here to prevent compiler bugs // see: https://github.com/ggml-org/llama.cpp/issues/12846 return vec_xl(0, (const float *)(tmp)); +#endif } -static inline void __lzs_f16cx4_store(ggml_fp16_t * x, __vector float y) { +static inline void __lzs_f16cx4_store(ggml_fp16_t * x, float32x4_t v_y) { +#if defined(__NNPA__) + float32x4_t v_zero = vec_splats(0.0f); + uint16x8_t v_xd = vec_round_from_fp32(v_y, v_zero, 0); + uint16x8_t v_x = vec_convert_to_fp16(v_xd, 0); + + x[0] = vec_extract(v_x, 0); + x[1] = vec_extract(v_x, 1); + x[2] = vec_extract(v_x, 2); + x[3] = vec_extract(v_x, 3); +#else float arr[4]; // note: keep type-cast here to prevent compiler bugs // see: https://github.com/ggml-org/llama.cpp/issues/12846 - vec_xst(y, 0, (float *)(arr)); + vec_xst(v_y, 0, (float *)(arr)); for (int i = 0; i < 4; i++) { - x[i] = GGML_FP32_TO_FP16(arr[i]); + x[i] = GGML_CPU_FP32_TO_FP16(arr[i]); } +#endif } #define GGML_F16_VEC GGML_F32x4 @@ -1004,3 +1178,7 @@ static inline void __lzs_f16cx4_store(ggml_fp16_t * x, __vector float y) { #define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR) #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR) #endif + +#ifdef __cplusplus +} +#endif diff --git a/ggml/src/ggml-cpu/vec.cpp b/ggml/src/ggml-cpu/vec.cpp index f7614568ea388..5e34d79a1695f 100644 --- a/ggml/src/ggml-cpu/vec.cpp +++ b/ggml/src/ggml-cpu/vec.cpp @@ -219,11 +219,11 @@ void ggml_vec_dot_f16(int n, float * GGML_RESTRICT s, size_t bs, ggml_fp16_t * G // leftovers for (int i = np; i < n; ++i) { - sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i])); + sumf += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[i])*GGML_CPU_FP16_TO_FP32(y[i])); } #else for (int i = 0; i < n; ++i) { - sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i])); + sumf += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[i])*GGML_CPU_FP16_TO_FP32(y[i])); } #endif diff --git a/ggml/src/ggml-cpu/vec.h b/ggml/src/ggml-cpu/vec.h index 09dbade2179fb..84f6c0e6d26c4 100644 --- a/ggml/src/ggml-cpu/vec.h +++ b/ggml/src/ggml-cpu/vec.h @@ -58,7 +58,7 @@ inline static void ggml_vec_set_bf16(const int n, ggml_bf16_t * x, const ggml_bf inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; } inline static void ggml_vec_add_f16 (const int n, ggml_fp16_t * z, const ggml_fp16_t * x, const ggml_fp16_t * y) { for (int i = 0; i < n; ++i) { - z[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(x[i]) + GGML_FP16_TO_FP32(y[i])); + z[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(x[i]) + GGML_CPU_FP16_TO_FP32(y[i])); } } inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; } @@ -67,7 +67,7 @@ inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; } inline static void ggml_vec_sub_f16 (const int n, ggml_fp16_t * z, const ggml_fp16_t * x, const ggml_fp16_t * y) { for (int i = 0; i < n; ++i) { - z[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(x[i]) - GGML_FP16_TO_FP32(y[i])); + z[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(x[i]) - GGML_CPU_FP16_TO_FP32(y[i])); } } inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; } @@ -75,20 +75,20 @@ inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; } inline static void ggml_vec_neg_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(-GGML_FP16_TO_FP32(x[i])); + y[i] = GGML_CPU_FP32_TO_FP16(-GGML_CPU_FP16_TO_FP32(x[i])); } } inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; } inline static void ggml_vec_mul_f16 (const int n, ggml_fp16_t * z, const ggml_fp16_t * x, const ggml_fp16_t * y) { for (int i = 0; i < n; ++i) { - z[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(x[i]) * GGML_FP16_TO_FP32(y[i])); + z[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(x[i]) * GGML_CPU_FP16_TO_FP32(y[i])); } } inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; } inline static void ggml_vec_div_f16 (const int n, ggml_fp16_t * z, const ggml_fp16_t * x, const ggml_fp16_t * y) { for (int i = 0; i < n; ++i) { - z[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(x[i]) / GGML_FP16_TO_FP32(y[i])); + z[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(x[i]) / GGML_CPU_FP16_TO_FP32(y[i])); } } @@ -131,13 +131,13 @@ inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * GG // leftovers for (int i = np; i < n; ++i) { for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) { - sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i])); + sumf[j] += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[j][i])*GGML_CPU_FP16_TO_FP32(y[i])); } } #else for (int i = 0; i < n; ++i) { for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) { - sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i])); + sumf[j] += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[j][i])*GGML_CPU_FP16_TO_FP32(y[i])); } } #endif @@ -280,12 +280,12 @@ inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * GGML_RESTRICT y, // leftovers for (int i = np; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i]) + GGML_FP16_TO_FP32(x[i])*v); + y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(y[i]) + GGML_CPU_FP16_TO_FP32(x[i])*v); } #else // scalar for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i]) + GGML_FP16_TO_FP32(x[i])*v); + y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(y[i]) + GGML_CPU_FP16_TO_FP32(x[i])*v); } #endif } @@ -430,12 +430,12 @@ inline static void ggml_vec_scale_f16(const int n, ggml_fp16_t * y, const float // leftovers for (int i = np; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i])*v); + y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(y[i])*v); } #else // scalar for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i])*v); + y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(y[i])*v); } #endif } @@ -444,103 +444,103 @@ inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; } inline static void ggml_vec_sqr_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - float v = GGML_FP16_TO_FP32(x[i]); - y[i] = GGML_FP32_TO_FP16(v*v); + float v = GGML_CPU_FP16_TO_FP32(x[i]); + y[i] = GGML_CPU_FP32_TO_FP16(v*v); } } inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); } inline static void ggml_vec_sqrt_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(sqrtf(GGML_FP16_TO_FP32(x[i]))); + y[i] = GGML_CPU_FP32_TO_FP16(sqrtf(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_log_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = logf(x[i]); } inline static void ggml_vec_log_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(logf(GGML_FP16_TO_FP32(x[i]))); + y[i] = GGML_CPU_FP32_TO_FP16(logf(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_sin_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sinf(x[i]); } inline static void ggml_vec_sin_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(sinf(GGML_FP16_TO_FP32(x[i]))); + y[i] = GGML_CPU_FP32_TO_FP16(sinf(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_cos_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = cosf(x[i]); } inline static void ggml_vec_cos_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(cosf(GGML_FP16_TO_FP32(x[i]))); + y[i] = GGML_CPU_FP32_TO_FP16(cosf(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); } inline static void ggml_vec_abs_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(fabsf(GGML_FP16_TO_FP32(x[i]))); + y[i] = GGML_CPU_FP32_TO_FP16(fabsf(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); } inline static void ggml_vec_sgn_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - float v = GGML_FP16_TO_FP32(x[i]); - y[i] = GGML_FP32_TO_FP16((v > 0.f) ? 1.f : ((v < 0.f) ? -1.f : 0.f)); + float v = GGML_CPU_FP16_TO_FP32(x[i]); + y[i] = GGML_CPU_FP32_TO_FP16((v > 0.f) ? 1.f : ((v < 0.f) ? -1.f : 0.f)); } } inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; } inline static void ggml_vec_step_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16((GGML_FP16_TO_FP32(x[i]) > 0.f) ? 1.f : 0.f); + y[i] = GGML_CPU_FP32_TO_FP16((GGML_CPU_FP16_TO_FP32(x[i]) > 0.f) ? 1.f : 0.f); } } inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = tanhf(x[i]); } inline static void ggml_vec_tanh_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(tanhf(GGML_FP16_TO_FP32(x[i]))); + y[i] = GGML_CPU_FP32_TO_FP16(tanhf(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expm1f(x[i]); } inline static void ggml_vec_elu_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(expm1f(GGML_FP16_TO_FP32(x[i]))); + y[i] = GGML_CPU_FP32_TO_FP16(expm1f(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; } inline static void ggml_vec_relu_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - float v = GGML_FP16_TO_FP32(x[i]); - y[i] = GGML_FP32_TO_FP16((v > 0.f) ? v : 0.f); + float v = GGML_CPU_FP16_TO_FP32(x[i]); + y[i] = GGML_CPU_FP32_TO_FP16((v > 0.f) ? v : 0.f); } } inline static void ggml_vec_leaky_relu_f32 (const int n, float * y, const float * x, const float ns) { for (int i = 0; i < n; ++i) y[i] = ((x[i] > 0.f) ? x[i] : 0.f) + ns * ((x[i] < 0.0f) ? x[i] : 0.f); } inline static void ggml_vec_leaky_relu_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const float ns) { for (int i = 0; i < n; ++i) { - float v = GGML_FP16_TO_FP32(x[i]); - y[i] = GGML_FP32_TO_FP16(((v > 0.f) ? v : 0.f) + ns * ((v < 0.0f) ? v : 0.f)); + float v = GGML_CPU_FP16_TO_FP32(x[i]); + y[i] = GGML_CPU_FP32_TO_FP16(((v > 0.f) ? v : 0.f) + ns * ((v < 0.0f) ? v : 0.f)); } } inline static void ggml_vec_sigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = 1.f / (1.f + expf(-x[i])); } inline static void ggml_vec_sigmoid_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(1.f / (1.f + expf(-GGML_FP16_TO_FP32(x[i])))); + y[i] = GGML_CPU_FP32_TO_FP16(1.f / (1.f + expf(-GGML_CPU_FP16_TO_FP32(x[i])))); } } // TODO: optimize performance inline static void ggml_vec_hardswish_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i] * fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); } inline static void ggml_vec_hardswish_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - float v = GGML_FP16_TO_FP32(x[i]); - y[i] = GGML_FP32_TO_FP16(v * fminf(1.0f, fmaxf(0.0f, (v + 3.0f) / 6.0f))); + float v = GGML_CPU_FP16_TO_FP32(x[i]); + y[i] = GGML_CPU_FP32_TO_FP16(v * fminf(1.0f, fmaxf(0.0f, (v + 3.0f) / 6.0f))); } } inline static void ggml_vec_hardsigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); } inline static void ggml_vec_hardsigmoid_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(fminf(1.0f, fmaxf(0.0f, (GGML_FP16_TO_FP32(x[i]) + 3.0f) / 6.0f))); + y[i] = GGML_CPU_FP32_TO_FP16(fminf(1.0f, fmaxf(0.0f, (GGML_CPU_FP16_TO_FP32(x[i]) + 3.0f) / 6.0f))); } } inline static void ggml_vec_exp_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = expf(x[i]); } inline static void ggml_vec_exp_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(expf(GGML_FP16_TO_FP32(x[i]))); + y[i] = GGML_CPU_FP32_TO_FP16(expf(GGML_CPU_FP16_TO_FP32(x[i]))); } } @@ -562,9 +562,9 @@ inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp inline static void ggml_vec_gelu_erf_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - float xi = GGML_FP16_TO_FP32(x[i]); + float xi = GGML_CPU_FP16_TO_FP32(x[i]); float res = 0.5f*xi*(1.0f + erff(xi*SQRT_2_INV)); - y[i] = GGML_FP32_TO_FP16(res); + y[i] = GGML_CPU_FP32_TO_FP16(res); } } @@ -577,9 +577,9 @@ inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) { } else if (x[i] >= 10.0f) { y[i] = x[i]; } else { - ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); + ggml_fp16_t fp16 = GGML_CPU_FP32_TO_FP16(x[i]); memcpy(&t, &fp16, sizeof(uint16_t)); - y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_f16[t]); + y[i] = GGML_CPU_FP16_TO_FP32(ggml_table_gelu_f16[t]); } } } @@ -613,9 +613,9 @@ inline static float ggml_gelu_quick_f32(float x) { inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) { uint16_t t; for (int i = 0; i < n; ++i) { - ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); + ggml_fp16_t fp16 = GGML_CPU_FP32_TO_FP16(x[i]); memcpy(&t, &fp16, sizeof(uint16_t)); - y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_quick_f16[t]); + y[i] = GGML_CPU_FP16_TO_FP32(ggml_table_gelu_quick_f16[t]); } } #else @@ -628,8 +628,8 @@ inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - float v = GGML_FP16_TO_FP32(x[i]); - y[i] = GGML_FP32_TO_FP16(v*(1.0f/(1.0f+expf(GELU_QUICK_COEF*v)))); + float v = GGML_CPU_FP16_TO_FP32(x[i]); + y[i] = GGML_CPU_FP32_TO_FP16(v*(1.0f/(1.0f+expf(GELU_QUICK_COEF*v)))); } } @@ -638,8 +638,8 @@ inline static float ggml_silu_f32(float x) { return x/(1.0f + expf(-x)); } inline static ggml_fp16_t ggml_silu_f16(ggml_fp16_t x) { - float v = GGML_FP16_TO_FP32(x); - return GGML_FP32_TO_FP16(v/(1.0f + expf(-v))); + float v = GGML_CPU_FP16_TO_FP32(x); + return GGML_CPU_FP32_TO_FP16(v/(1.0f + expf(-v))); } #if __FINITE_MATH_ONLY__ @@ -888,9 +888,9 @@ inline static float ggml_silu_backward_f32(float x, float dy) { } inline static ggml_fp16_t ggml_silu_backward_f16(ggml_fp16_t x, ggml_fp16_t dy) { - const float v = GGML_FP16_TO_FP32(x); + const float v = GGML_CPU_FP16_TO_FP32(x); const float s = 1.0f/(1.0f + expf(-v)); - return GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(dy)*s*(1.0f + v*(1.0f - s))); + return GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(dy)*s*(1.0f + v*(1.0f - s))); } inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) { @@ -928,7 +928,7 @@ inline static void ggml_vec_sum_f32_ggf(const int n, ggml_float * s, const float inline static void ggml_vec_sum_f16_ggf(const int n, float * s, const ggml_fp16_t * x) { float sum = 0.0f; for (int i = 0; i < n; ++i) { - sum += GGML_FP16_TO_FP32(x[i]); + sum += GGML_CPU_FP16_TO_FP32(x[i]); } *s = sum; } diff --git a/ggml/src/ggml-impl.h b/ggml/src/ggml-impl.h index 6dc5ce0d92fd8..57761644f431a 100644 --- a/ggml/src/ggml-impl.h +++ b/ggml/src/ggml-impl.h @@ -317,203 +317,81 @@ struct ggml_cgraph ggml_graph_view(struct ggml_cgraph * cgraph, int i0, int i1); GGML_API void * ggml_aligned_malloc(size_t size); GGML_API void ggml_aligned_free(void * ptr, size_t size); -// FP16 to FP32 conversion +// FP16 <-> FP32 +// ref: https://github.com/Maratyszcza/FP16 -// 16-bit float -// on Arm, we use __fp16 -// on x86, we use uint16_t -// -// for old CUDA compilers (<= 11), we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/10616 -// for MUSA compilers , we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/11843 -// -#if defined(__ARM_NEON) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11) && !defined(__MUSACC__) - #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) - #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) - - #define GGML_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) - - static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { - __fp16 tmp; - memcpy(&tmp, &h, sizeof(ggml_fp16_t)); - return (float)tmp; - } - - static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { - ggml_fp16_t res; - __fp16 tmp = f; - memcpy(&res, &tmp, sizeof(ggml_fp16_t)); - return res; - } - -#elif defined(__F16C__) - - #ifdef _MSC_VER - #define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x))) - #define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0) - #else - #define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x) - #define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0) - #endif - -#elif defined(__POWER9_VECTOR__) - - #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) - #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) - /* the inline asm below is about 12% faster than the lookup method */ - #define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x) - #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) - - static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { - float f; - double d; - __asm__( - "mtfprd %0,%2\n" - "xscvhpdp %0,%0\n" - "frsp %1,%0\n" : - /* temp */ "=d"(d), - /* out */ "=f"(f): - /* in */ "r"(h)); - return f; - } - - static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { - double d; - ggml_fp16_t r; - __asm__( /* xscvdphp can work on double or single precision */ - "xscvdphp %0,%2\n" - "mffprd %1,%0\n" : - /* temp */ "=d"(d), - /* out */ "=r"(r): - /* in */ "f"(f)); - return r; - } - -#elif defined(__riscv) && defined(__riscv_zfhmin) - - static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { - float f; - __asm__( - "fmv.h.x %[f], %[h]\n\t" - "fcvt.s.h %[f], %[f]" - : [f] "=&f" (f) - : [h] "r" (h) - ); - return f; - } +static inline float fp32_from_bits(uint32_t w) { + union { + uint32_t as_bits; + float as_value; + } fp32; + fp32.as_bits = w; + return fp32.as_value; +} - static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { - ggml_fp16_t res; - __asm__( - "fcvt.h.s %[f], %[f]\n\t" - "fmv.x.h %[h], %[f]" - : [h] "=&r" (res) - : [f] "f" (f) - ); - return res; - } +static inline uint32_t fp32_to_bits(float f) { + union { + float as_value; + uint32_t as_bits; + } fp32; + fp32.as_value = f; + return fp32.as_bits; +} - #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) - #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) - #define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x) - #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) +static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { + const uint32_t w = (uint32_t) h << 16; + const uint32_t sign = w & UINT32_C(0x80000000); + const uint32_t two_w = w + w; + const uint32_t exp_offset = UINT32_C(0xE0) << 23; +#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L) + const float exp_scale = 0x1.0p-112f; #else + const float exp_scale = fp32_from_bits(UINT32_C(0x7800000)); +#endif + const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale; - // FP16 <-> FP32 - // ref: https://github.com/Maratyszcza/FP16 - - static inline float fp32_from_bits(uint32_t w) { - union { - uint32_t as_bits; - float as_value; - } fp32; - fp32.as_bits = w; - return fp32.as_value; - } - - static inline uint32_t fp32_to_bits(float f) { - union { - float as_value; - uint32_t as_bits; - } fp32; - fp32.as_value = f; - return fp32.as_bits; - } - - static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { - const uint32_t w = (uint32_t) h << 16; - const uint32_t sign = w & UINT32_C(0x80000000); - const uint32_t two_w = w + w; - - const uint32_t exp_offset = UINT32_C(0xE0) << 23; - #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L) - const float exp_scale = 0x1.0p-112f; - #else - const float exp_scale = fp32_from_bits(UINT32_C(0x7800000)); - #endif - const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale; - - const uint32_t magic_mask = UINT32_C(126) << 23; - const float magic_bias = 0.5f; - const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias; + const uint32_t magic_mask = UINT32_C(126) << 23; + const float magic_bias = 0.5f; + const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias; - const uint32_t denormalized_cutoff = UINT32_C(1) << 27; - const uint32_t result = sign | - (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value)); - return fp32_from_bits(result); - } - - static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { - #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L) - const float scale_to_inf = 0x1.0p+112f; - const float scale_to_zero = 0x1.0p-110f; - #else - const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000)); - const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000)); - #endif - float base = (fabsf(f) * scale_to_inf) * scale_to_zero; - - const uint32_t w = fp32_to_bits(f); - const uint32_t shl1_w = w + w; - const uint32_t sign = w & UINT32_C(0x80000000); - uint32_t bias = shl1_w & UINT32_C(0xFF000000); - if (bias < UINT32_C(0x71000000)) { - bias = UINT32_C(0x71000000); - } + const uint32_t denormalized_cutoff = UINT32_C(1) << 27; + const uint32_t result = sign | + (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value)); + return fp32_from_bits(result); +} - base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base; - const uint32_t bits = fp32_to_bits(base); - const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00); - const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF); - const uint32_t nonsign = exp_bits + mantissa_bits; - return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign); +static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { +#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L) + const float scale_to_inf = 0x1.0p+112f; + const float scale_to_zero = 0x1.0p-110f; +#else + const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000)); + const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000)); +#endif + float base = (fabsf(f) * scale_to_inf) * scale_to_zero; + + const uint32_t w = fp32_to_bits(f); + const uint32_t shl1_w = w + w; + const uint32_t sign = w & UINT32_C(0x80000000); + uint32_t bias = shl1_w & UINT32_C(0xFF000000); + if (bias < UINT32_C(0x71000000)) { + bias = UINT32_C(0x71000000); } - #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) - #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) - -#endif // defined(__ARM_NEON) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11) && !defined(__MUSACC__) - -// precomputed f32 table for f16 (256 KB) -// defined in ggml.c, initialized in ggml_init() -GGML_API float ggml_table_f32_f16[1 << 16]; - -// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32, -// so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON. -// This is also true for POWER9. -#if !defined(GGML_FP16_TO_FP32) -inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) { - uint16_t s; - memcpy(&s, &f, sizeof(uint16_t)); - return ggml_table_f32_f16[s]; + base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base; + const uint32_t bits = fp32_to_bits(base); + const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00); + const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF); + const uint32_t nonsign = exp_bits + mantissa_bits; + return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign); } -#define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x) -#endif +#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) +#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) -#if !defined(GGML_FP32_TO_FP16) +#define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x) #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) -#endif /** * Converts brain16 to float32. diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index f8e7c595bce15..ee605977f3a2c 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -61,9 +61,6 @@ #define m512i(p) (__m512i)(p) #endif -// precomputed f32 table for f16 (256 KB) (ggml-impl.h) -float ggml_table_f32_f16[1 << 16]; - #if defined(__linux__) || \ defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \ (defined(__APPLE__) && !TARGET_OS_TV && !TARGET_OS_WATCH) @@ -1422,14 +1419,6 @@ struct ggml_context * ggml_init(struct ggml_init_params params) { // initialize time system (required on Windows) ggml_time_init(); - for (int i = 0; i < (1 << 16); ++i) { - union { - uint16_t u16; - ggml_fp16_t fp16; - } u = {i}; - ggml_table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(u.fp16); - } - is_first_call = false; } From 41e8618c1e4115450dcbf84f1795d7e25cd55f0d Mon Sep 17 00:00:00 2001 From: R0CKSTAR Date: Thu, 26 Jun 2025 12:11:59 +0800 Subject: [PATCH 151/192] musa: enable fp16 mma (all) and cublas on qy2 (#13842) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * musa: enable fp16 mma (all) and cublas on qy2 Signed-off-by: Xiaodong Ye * Update ggml/src/ggml-cuda/ggml-cuda.cu Co-authored-by: Johannes Gäßler * Address review comments Signed-off-by: Xiaodong Ye * Address review comments Signed-off-by: Xiaodong Ye * musa: disable MUL_MAT_ID (q2_k × f32) due to precision issues Signed-off-by: Xiaodong Ye --------- Signed-off-by: Xiaodong Ye Co-authored-by: Johannes Gäßler --- ggml/src/ggml-cuda/common.cuh | 25 +++++++++++++------------ ggml/src/ggml-cuda/fattn-wmma-f16.cu | 4 ++++ ggml/src/ggml-cuda/ggml-cuda.cu | 25 +++++++++++++++---------- ggml/src/ggml-musa/mudnn.cuh | 4 ++-- 4 files changed, 34 insertions(+), 24 deletions(-) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index f6127aeee425a..ea20355023825 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -76,11 +76,9 @@ #define GGML_CUDA_CC_IS_CDNA(cc) (cc >= GGML_CUDA_CC_CDNA && cc < GGML_CUDA_CC_RDNA1) // Moore Threads -#define GGML_CUDA_MUSA_ARCH_IS_QY1 (__MUSA_ARCH__ <= 210) - -#define GGML_CUDA_CC_QY1 (GGML_CUDA_CC_OFFSET_MTHREADS + 0x210) // MTT S80, MTT S3000 -#define GGML_CUDA_CC_QY2 (GGML_CUDA_CC_OFFSET_MTHREADS + 0x220) // MTT S4000 -#define GGML_CUDA_CC_NG (GGML_CUDA_CC_OFFSET_MTHREADS + 0x310) // TBD +#define GGML_CUDA_CC_QY1 (GGML_CUDA_CC_OFFSET_MTHREADS + 0x210) // MTT S80, MTT S3000 +#define GGML_CUDA_CC_QY2 (GGML_CUDA_CC_OFFSET_MTHREADS + 0x220) // MTT S4000 +#define GGML_CUDA_CC_NG (GGML_CUDA_CC_OFFSET_MTHREADS + 0x310) // TBD #define GGML_CUDA_CC_IS_MTHREADS(cc) (cc >= GGML_CUDA_CC_OFFSET_MTHREADS && cc < GGML_CUDA_CC_OFFSET_AMD) #define GGML_CUDA_CC_IS_QY1(cc) (cc >= GGML_CUDA_CC_QY1 && cc < GGML_CUDA_CC_QY2) @@ -203,9 +201,9 @@ typedef float2 dfloat2; #define FAST_FP16_AVAILABLE #endif // defined(FP16_AVAILABLE) && __CUDA_ARCH__ != 610 -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA +#if (!defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA) || defined(GGML_USE_MUSA) #define FP16_MMA_AVAILABLE -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA +#endif // (!defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA) || defined(GGML_USE_MUSA) #if defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3) || (defined(GGML_HIP_ROCWMMA_FATTN_GFX12) && defined(RDNA4))) #define FP16_MMA_AVAILABLE @@ -219,9 +217,9 @@ typedef float2 dfloat2; #define CP_ASYNC_AVAILABLE #endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE -#if !defined(GGML_CUDA_NO_FA) && !(defined(GGML_USE_MUSA) && GGML_CUDA_MUSA_ARCH_IS_QY1) +#if !defined(GGML_CUDA_NO_FA) && !(defined(GGML_USE_MUSA) && __MUSA_ARCH__ < 220) #define FLASH_ATTN_AVAILABLE -#endif // !defined(GGML_CUDA_NO_FA) && !(defined(GGML_USE_MUSA) && GGML_CUDA_MUSA_ARCH_IS_QY1) +#endif // !defined(GGML_CUDA_NO_FA) && !(defined(GGML_USE_MUSA) && __MUSA_ARCH__ < 220) static bool fp16_available(const int cc) { return ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_PASCAL; @@ -233,7 +231,8 @@ static bool fast_fp16_available(const int cc) { // To be used for feature selection of external libraries, e.g. cuBLAS. static bool fast_fp16_hardware_available(const int cc) { - return (GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_PASCAL && cc != 610) || GGML_CUDA_CC_IS_AMD(cc); + return (GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_PASCAL && cc != 610) || GGML_CUDA_CC_IS_AMD(cc) || + (GGML_CUDA_CC_IS_MTHREADS(cc) && cc >= GGML_CUDA_CC_QY2); } // Any FP16 tensor core instructions are available for ggml code. @@ -242,7 +241,8 @@ static bool fp16_mma_available(const int cc) { return false; #else if ((GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA) || - GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc)) { + GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc) || + GGML_CUDA_CC_IS_MTHREADS(cc)) { return true; } else if (GGML_CUDA_CC_IS_RDNA4(cc)) { #if defined(GGML_HIP_ROCWMMA_FATTN) && defined(GGML_HIP_ROCWMMA_FATTN_GFX12) @@ -259,7 +259,8 @@ static bool fp16_mma_available(const int cc) { // To be used for feature selection of external libraries, e.g. cuBLAS. static bool fp16_mma_hardware_available(const int cc) { return (GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_VOLTA) || - GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc) || GGML_CUDA_CC_IS_RDNA4(cc); + GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc) || GGML_CUDA_CC_IS_RDNA4(cc) || + (GGML_CUDA_CC_IS_MTHREADS(cc) && cc >= GGML_CUDA_CC_QY2); } static bool bf16_mma_hardware_available(const int cc) { diff --git a/ggml/src/ggml-cuda/fattn-wmma-f16.cu b/ggml/src/ggml-cuda/fattn-wmma-f16.cu index c5668adb152b2..f3b794c3644c8 100644 --- a/ggml/src/ggml-cuda/fattn-wmma-f16.cu +++ b/ggml/src/ggml-cuda/fattn-wmma-f16.cu @@ -9,7 +9,11 @@ #ifdef FP16_MMA_AVAILABLE #if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) #include +#ifdef GGML_USE_MUSA +namespace wmma = mtmusa::wmma; +#else // GGML_USE_MUSA namespace wmma = nvcuda::wmma; +#endif // GGML_USE_MUSA #elif defined(GGML_HIP_ROCWMMA_FATTN) && defined(FP16_MMA_AVAILABLE) #undef HIP_ENABLE_WARP_SYNC_BUILTINS // conflicts with rocWMMA headers #include diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index b3e6833c396fd..b30c13c62f25c 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -1227,9 +1227,12 @@ static void ggml_cuda_op_mul_mat_cublas( const int cc = ggml_cuda_info().devices[id].cc; + const bool supports_bf16 = GGML_CUDA_CC_IS_NVIDIA(cc) || GGML_CUDA_CC_IS_AMD(cc) || + (GGML_CUDA_CC_IS_MTHREADS(cc) && cc >= GGML_CUDA_CC_QY2); + const bool use_fp16 = (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT; - if (src0->type == GGML_TYPE_BF16 && ggml_is_contiguous(src0) && row_diff == src0->ne[1]) { + if (supports_bf16 && src0->type == GGML_TYPE_BF16 && ggml_is_contiguous(src0) && row_diff == src0->ne[1]) { ggml_cuda_pool_alloc src1_as_bf16(ctx.pool(id)); if (src1->type != GGML_TYPE_BF16) { const to_bf16_cuda_t to_bf16_cuda = ggml_get_to_bf16_cuda(src1->type); @@ -1257,7 +1260,7 @@ static void ggml_cuda_op_mul_mat_cublas( const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_BF16); to_fp32_cuda(dst_bf16.get(), dst_dd_i, row_diff*src1_ncols, stream); - } else if (((GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_VOLTA) || GGML_CUDA_CC_IS_AMD(cc)) && use_fp16) { + } else if (fast_fp16_hardware_available(cc) && use_fp16) { // convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32 ggml_cuda_pool_alloc src0_as_f16(ctx.pool(id)); if (src0->type != GGML_TYPE_F16) { @@ -3061,9 +3064,16 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g return false; } #ifdef GGML_USE_MUSA - if (b->type == GGML_TYPE_F16 && b->ne[2]*b->ne[3] > 1 && - !ggml_is_transposed(a) && !ggml_is_transposed(b)) { - return false; + const int cc = ggml_cuda_info().devices[dev_ctx->device].cc; + if (b->ne[2]*b->ne[3] > 1 && !ggml_is_transposed(a) && !ggml_is_transposed(b)) { + if (GGML_CUDA_CC_IS_QY1(cc) && op->op == GGML_OP_MUL_MAT && + a->type == GGML_TYPE_F16 && b->type == GGML_TYPE_F16) { + return false; + } + if (GGML_CUDA_CC_IS_QY2(cc) && op->op == GGML_OP_MUL_MAT_ID && + a->type == GGML_TYPE_Q2_K && b->type == GGML_TYPE_F32) { + return false; + } } #endif // GGML_USE_MUSA switch (a->type) { @@ -3090,11 +3100,6 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_XS: case GGML_TYPE_BF16: -#ifdef GGML_USE_MUSA - if (a->type == GGML_TYPE_Q3_K) { - return false; - } -#endif // GGML_USE_MUSA return true; default: return false; diff --git a/ggml/src/ggml-musa/mudnn.cuh b/ggml/src/ggml-musa/mudnn.cuh index a63be5755c79c..c30128561e810 100644 --- a/ggml/src/ggml-musa/mudnn.cuh +++ b/ggml/src/ggml-musa/mudnn.cuh @@ -1,7 +1,7 @@ #pragma once -#include "../include/ggml.h" -#include "../ggml-cuda/common.cuh" +#include "ggml-cuda/common.cuh" +#include "ggml.h" // Asynchronously copies data from src tensor to dst tensor using the provided context. // Returns a musaError_t indicating success or failure. From 3132b6ea7a0e1eefd86e3062309e93d5d3a22b83 Mon Sep 17 00:00:00 2001 From: Aaron Teo Date: Thu, 26 Jun 2025 18:41:41 +0800 Subject: [PATCH 152/192] docs: update s390x documentation + add faq (#14389) * docs: update s390x documentation + add faq Signed-off-by: Aaron Teo * docs: add s390x z17 build q&a Signed-off-by: Aaron Teo --------- Signed-off-by: Aaron Teo --- docs/build-s390x.md | 76 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 74 insertions(+), 2 deletions(-) diff --git a/docs/build-s390x.md b/docs/build-s390x.md index bb6eae784d6d0..4c9ebb271cee2 100644 --- a/docs/build-s390x.md +++ b/docs/build-s390x.md @@ -16,7 +16,7 @@ cd llama.cpp ## CPU Build with BLAS -Building llama.cpp with BLAS support is highly recommended as it has shown to provide performance improvements. +Building llama.cpp with BLAS support is highly recommended as it has shown to provide performance improvements. Make sure to have OpenBLAS installed in your environment. ```bash cmake -S . -B build \ @@ -82,12 +82,18 @@ All models need to be converted to Big-Endian. You can achieve this in three cas 1. **Use pre-converted models verified for use on IBM Z & LinuxONE (easiest)** + ![File Type - gguf](https://img.shields.io/badge/File_Type-gguf-fff) + You can find popular models pre-converted and verified at [s390x Ready Models](https://huggingface.co/collections/taronaeo/s390x-ready-models-672765393af438d0ccb72a08). - These models and their respective tokenizers are verified to run correctly on IBM Z & LinuxONE. + These models have already been converted from `safetensors` to `GGUF Big-Endian` and their respective tokenizers verified to run correctly on IBM z15 and later system. 2. **Convert safetensors model to GGUF Big-Endian directly (recommended)** + ![File Type - safetensors](https://img.shields.io/badge/File_Type-safetensors-da1e28) + + The model you are trying to convert must be in `safetensors` file format (for example [IBM Granite 3.3 2B](https://huggingface.co/ibm-granite/granite-3.3-2b-instruct)). Make sure you have downloaded the model repository for this case. + ```bash python3 convert_hf_to_gguf.py \ --outfile model-name-be.f16.gguf \ @@ -108,6 +114,10 @@ All models need to be converted to Big-Endian. You can achieve this in three cas 3. **Convert existing GGUF Little-Endian model to Big-Endian** + ![File Type - gguf](https://img.shields.io/badge/File_Type-gguf-fff) + + The model you are trying to convert must be in `gguf` file format (for example [IBM Granite 3.3 2B](https://huggingface.co/ibm-granite/granite-3.3-2b-instruct-GGUF)). Make sure you have downloaded the model file for this case. + ```bash python3 gguf-py/gguf/scripts/gguf_convert_endian.py model-name.f16.gguf BIG ``` @@ -163,6 +173,22 @@ It is strongly recommended to disable SMT via the kernel boot parameters as it n IBM VXE/VXE2 SIMD acceleration depends on the BLAS implementation. It is strongly recommended to use BLAS. +## Frequently Asked Questions (FAQ) + +1. I'm getting the following error message while trying to load a model: `gguf_init_from_file_impl: failed to load model: this GGUF file version 50331648 is extremely large, is there a mismatch between the host and model endianness?` + + Answer: Please ensure that the model you have downloaded/converted is GGUFv3 Big-Endian. These models are usually denoted with the `-be` suffix, i.e., `granite-3.3-2b-instruct-be.F16.gguf`. + + You may refer to the [Getting GGUF Models](#getting-gguf-models) section to manually convert a `safetensors` model to `GGUF` Big Endian. + +2. I'm getting extremely poor performance when running inference on a model + + Answer: Please refer to the [Appendix B: SIMD Support Matrix](#appendix-b-simd-support-matrix) to check if your model quantization is supported by SIMD acceleration. + +3. I'm building on IBM z17 and getting the following error messages: `invalid switch -march=z17` + + Answer: Please ensure that your GCC compiler is of minimum GCC 15.1.0 version, and have `binutils` updated to the latest version. If this does not fix the problem, kindly open an issue. + ## Getting Help on IBM Z & LinuxONE 1. **Bugs, Feature Requests** @@ -172,3 +198,49 @@ IBM VXE/VXE2 SIMD acceleration depends on the BLAS implementation. It is strongl 2. **Other Questions** Please reach out directly to [aionz@us.ibm.com](mailto:aionz@us.ibm.com). + +## Appendix A: Hardware Support Matrix + +| | Support | Minimum Compiler Version | +| ------- | ------- | ------------------------ | +| IBM z15 | ✅ | | +| IBM z16 | ✅ | | +| IBM z17 | ✅ | GCC 15.1.0 | + +- ✅ - supported and verified to run as intended +- 🚫 - unsupported, we are unlikely able to provide support + +## Appendix B: SIMD Support Matrix + +| | VX/VXE/VXE2 | NNPA | zDNN | Spyre | +| ---------- | ----------- | ---- | ---- | ----- | +| FP32 | ✅ | ✅ | ❓ | ❓ | +| FP16 | ✅ | ✅ | ❓ | ❓ | +| BF16 | 🚫 | 🚫 | ❓ | ❓ | +| Q4_0 | ✅ | ✅ | ❓ | ❓ | +| Q4_1 | ✅ | ✅ | ❓ | ❓ | +| Q5_0 | 🚫 | 🚫 | ❓ | ❓ | +| Q5_1 | 🚫 | 🚫 | ❓ | ❓ | +| Q8_0 | ✅ | ✅ | ❓ | ❓ | +| Q2_K | 🚫 | 🚫 | ❓ | ❓ | +| Q3_K | ✅ | ✅ | ❓ | ❓ | +| Q4_K | ✅ | ✅ | ❓ | ❓ | +| Q5_K | ✅ | ✅ | ❓ | ❓ | +| Q6_K | ✅ | ✅ | ❓ | ❓ | +| TQ1_0 | 🚫 | 🚫 | ❓ | ❓ | +| TQ2_0 | 🚫 | 🚫 | ❓ | ❓ | +| IQ2_XXS | 🚫 | 🚫 | ❓ | ❓ | +| IQ2_XS | 🚫 | 🚫 | ❓ | ❓ | +| IQ2_S | 🚫 | 🚫 | ❓ | ❓ | +| IQ3_XXS | 🚫 | 🚫 | ❓ | ❓ | +| IQ3_S | 🚫 | 🚫 | ❓ | ❓ | +| IQ1_S | 🚫 | 🚫 | ❓ | ❓ | +| IQ1_M | 🚫 | 🚫 | ❓ | ❓ | +| IQ4_NL | ✅ | ✅ | ❓ | ❓ | +| IQ4_XS | ✅ | ✅ | ❓ | ❓ | +| FP32->FP16 | 🚫 | ✅ | ❓ | ❓ | +| FP16->FP32 | 🚫 | ✅ | ❓ | ❓ | + +- ✅ - acceleration available +- 🚫 - acceleration unavailable, will still run using scalar implementation +- ❓ - acceleration unknown, please contribute if you can test it yourself From 22575ad6009eccdf937e9f4f302945fc07b1f19b Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 26 Jun 2025 15:50:15 +0300 Subject: [PATCH 153/192] metal : batch rows copy in a single threadgroup (#14384) * metal : batch rows copy in a single threadgroup ggml-ci * metal : handle some edge cases when threadgroup size is not a power of 2 ggml-ci --- ggml/src/ggml-metal/ggml-metal.m | 43 ++++++++++++++++++++++++---- ggml/src/ggml-metal/ggml-metal.metal | 11 +++++-- 2 files changed, 45 insertions(+), 9 deletions(-) diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index 19f4d59e59747..248fa378ef0f1 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -2450,6 +2450,7 @@ static bool ggml_metal_encode_node( nth *= 2; } + nth = MIN(nth, (int) pipeline.maxTotalThreadsPerThreadgroup); nth = MIN(nth, ne00); ggml_metal_kargs_sum_rows args = { @@ -3780,6 +3781,7 @@ static bool ggml_metal_encode_node( nth *= 2; } + nth = MIN(nth, (int) pipeline.maxTotalThreadsPerThreadgroup); nth = MIN(nth, ne00/4); ggml_metal_kargs_rms_norm args = { @@ -3816,6 +3818,7 @@ static bool ggml_metal_encode_node( nth *= 2; } + nth = MIN(nth, (int) pipeline.maxTotalThreadsPerThreadgroup); nth = MIN(nth, ne00/4); ggml_metal_kargs_l2_norm args = { @@ -3888,6 +3891,7 @@ static bool ggml_metal_encode_node( nth *= 2; } + nth = MIN(nth, (int) pipeline.maxTotalThreadsPerThreadgroup); nth = MIN(nth, ne00/4); ggml_metal_kargs_norm args = { @@ -4974,8 +4978,39 @@ static bool ggml_metal_encode_node( default: GGML_ABORT("not implemented"); } + GGML_ASSERT(ne00 % ggml_blck_size(src0->type) == 0); + + // TODO: support + //const int32_t nk00 = ne00/ggml_blck_size(dst->type); + const int32_t nk00 = ne00; + + int nth = 32; // SIMD width + + while (nth < nk00 && nth < (int) pipeline.maxTotalThreadsPerThreadgroup) { + nth *= 2; + } + + nth = MIN(nth, (int) pipeline.maxTotalThreadsPerThreadgroup); + + // when rows are small, we can batch them together in a single threadgroup + int nrptg = 1; + + // TODO: relax this constraint in the future + if (ggml_blck_size(src0->type) == 1 && ggml_blck_size(dst->type) == 1) { + if (nth > nk00) { + nrptg = (nth + nk00 - 1)/nk00; + nth = nk00; + + if (nrptg*nth > (int) pipeline.maxTotalThreadsPerThreadgroup) { + nrptg--; + } + } + } + + nth = MIN(nth, nk00); + ggml_metal_kargs_cpy args = { - /*.ne00 =*/ ne00, + /*.ne00 =*/ nk00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, @@ -4998,11 +5033,7 @@ static bool ggml_metal_encode_node( [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; - GGML_ASSERT(ne00 % ggml_blck_size(src0->type) == 0); - int nth = MIN(1024, ne00/ggml_blck_size(src0->type)); - - [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; - + [encoder dispatchThreadgroups:MTLSizeMake((ne01 + nrptg - 1)/nrptg, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, nrptg, 1)]; } break; case GGML_OP_SET: { diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index 3da19879b4b36..f028276068ef4 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -4306,11 +4306,16 @@ kernel void kernel_cpy( device const char * src0, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], + uint tiitg[[thread_index_in_threadgroup]], ushort3 tpitg[[thread_position_in_threadgroup]], - ushort3 ntg[[threads_per_threadgroup]]) { + ushort3 tptg[[threads_per_threadgroup]]) { const int i03 = tgpig[2]; const int i02 = tgpig[1]; - const int i01 = tgpig[0]; + const int i01 = tgpig[0]*tptg.y + tiitg/tptg.x; + + if (i01 >= args.ne01) { + return; + } const int64_t n = i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00; @@ -4321,7 +4326,7 @@ kernel void kernel_cpy( device T1 * dst_data = (device T1 *) (dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0); - for (int64_t i00 = tpitg.x; i00 < args.ne00; i00 += ntg.x) { + for (int64_t i00 = tiitg%tptg.x; i00 < args.ne00; i00 += tptg.x) { device const T0 * src = (device T0 *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); dst_data[i00] = (T1) src[0]; } From 7b2938bc18a5bf089bdc3d8297087eb63fb97469 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 26 Jun 2025 15:51:19 +0300 Subject: [PATCH 154/192] metal : add special-case mat-vec mul for ne00 == 4 (#14385) ggml-ci --- ggml/src/ggml-metal/ggml-metal.m | 25 ++++++++-- ggml/src/ggml-metal/ggml-metal.metal | 64 +++++++++++++++++++++++++ tests/test-backend-ops.cpp | 72 +++++++++++++++------------- 3 files changed, 125 insertions(+), 36 deletions(-) diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index 248fa378ef0f1..d8d30cc0b41ca 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -211,11 +211,14 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_RWKV_WKV6_F32, GGML_METAL_KERNEL_TYPE_RWKV_WKV7_F32, GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32, + GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32_C4, GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32, + GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_C4, GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW, GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4, GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16, GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32, + GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_C4, GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_1ROW, GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_L4, GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_BF16, @@ -1175,11 +1178,14 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RWKV_WKV6_F32, rwkv_wkv6_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RWKV_WKV7_F32, rwkv_wkv7_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32, mul_mv_f32_f32, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32_C4, mul_mv_f32_f32_c4, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32, mul_mv_bf16_f32, has_simdgroup_reduction && use_bfloat); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_C4, mul_mv_bf16_f32_c4, use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_1ROW, mul_mv_bf16_f32_1row, has_simdgroup_reduction && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_L4, mul_mv_bf16_f32_l4, has_simdgroup_reduction && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_BF16, mul_mv_bf16_bf16, has_simdgroup_reduction && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32, mul_mv_f16_f32, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_C4, mul_mv_f16_f32_c4, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW, mul_mv_f16_f32_1row, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4, mul_mv_f16_f32_l4, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16, mul_mv_f16_f16, has_simdgroup_reduction); @@ -3111,14 +3117,23 @@ static bool ggml_metal_encode_node( nsg = 1; nr0 = 1; nr1 = 4; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32].pipeline; + if (ne00 == 4) { + nr0 = 32; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32_C4].pipeline; + } else { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32].pipeline; + } } break; case GGML_TYPE_F16: { nsg = 1; nr0 = 1; if (src1t == GGML_TYPE_F32) { - if (ne11 * ne12 < 4) { + if (ne00 == 4) { + nr0 = 32; + nr1 = 4; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_C4].pipeline; + } else if (ne11 * ne12 < 4) { pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW].pipeline; } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) { pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4].pipeline; @@ -3137,7 +3152,11 @@ static bool ggml_metal_encode_node( nsg = 1; nr0 = 1; if (src1t == GGML_TYPE_F32) { - if (ne11 * ne12 < 4) { + if (ne00 == 4) { + nr0 = 32; + nr1 = 4; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_C4].pipeline; + } else if (ne11 * ne12 < 4) { pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_1ROW].pipeline; } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) { pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_L4].pipeline; diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index f028276068ef4..5f004a856bde6 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -2532,6 +2532,70 @@ template [[host_name("kernel_mul_mv_bf16_f32")]] kernel mul_mv_t kernel_mul_mv< template [[host_name("kernel_mul_mv_bf16_bf16")]] kernel mul_mv_t kernel_mul_mv; #endif +template +void kernel_mul_mv_c4_impl( + args_t args, + device const char * src0, + device const char * src1, + device char * dst, + uint3 tgpig, + ushort tiisg) { + const int r0 = tgpig.x*32 + tiisg; + const int rb = tgpig.y*N_MV_T_T; + const int im = tgpig.z; + + if (r0 >= args.ne01) { + return; + } + + const uint i12 = im%args.ne12; + const uint i13 = im/args.ne12; + + const uint64_t offset0 = r0*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; + + device const T04 * x = (device const T04 *) (src0 + offset0); + + device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1; + + for (int row = 0; row < N_MV_T_T; ++row) { + int r1 = rb + row; + if (r1 >= args.ne11) { + break; + } + + const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; + + device const T14 * y = (device const T14 *) (src1 + offset1); + + dst_f32[(uint64_t)r1*args.ne0 + r0] = dot((float4) x[0], (float4) y[0]); + } +} + +template +kernel void kernel_mul_mv_c4( + constant ggml_metal_kargs_mul_mv & args, + device const char * src0, + device const char * src1, + device char * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + ushort tiisg[[thread_index_in_simdgroup]]) { + kernel_mul_mv_c4_impl( + args, + src0, + src1, + dst, + tgpig, + tiisg); +} + +typedef decltype(kernel_mul_mv_c4) mul_mv_c4_t; + +template [[host_name("kernel_mul_mv_f32_f32_c4")]] kernel mul_mv_c4_t kernel_mul_mv_c4; +template [[host_name("kernel_mul_mv_f16_f32_c4")]] kernel mul_mv_c4_t kernel_mul_mv_c4; +#if defined(GGML_METAL_USE_BF16) +template [[host_name("kernel_mul_mv_bf16_f32_c4")]] kernel mul_mv_c4_t kernel_mul_mv_c4; +#endif + template kernel void kernel_mul_mv_1row( constant ggml_metal_kargs_mul_mv & args, diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 7be7f2205fa04..615c2dc008a8d 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -4252,39 +4252,45 @@ static std::vector> make_test_cases_eval() { #if 1 for (ggml_type type_a : base_types) { for (ggml_type type_b : {GGML_TYPE_F32, GGML_TYPE_F16}) { - // test cases without permutation - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {1, 1}, {1, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {1, 1}, {2, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {1, 1}, {1, 2})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {3, 1}, {1, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {3, 1}, {2, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {3, 2}, {1, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {3, 2}, {2, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {3, 2}, {1, 2})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {3, 2}, {2, 2})); - - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {1, 1}, {1, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {1, 1}, {2, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {1, 1}, {1, 2})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {3, 1}, {1, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {3, 1}, {2, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {3, 2}, {1, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {3, 2}, {2, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {3, 2}, {1, 2})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {3, 2}, {2, 2})); - - // test cases with permutation - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {2, 3}, {1, 1}, {0, 2, 1, 3})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {2, 3}, {1, 1}, {0, 1, 3, 2})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {2, 3}, {1, 1}, {0, 3, 2, 1})); - - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, 256, {2, 3}, {1, 1}, {0, 2, 1, 3})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, 256, {2, 3}, {1, 1}, {0, 1, 3, 2})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, 256, {2, 3}, {1, 1}, {0, 3, 2, 1})); - - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {2, 3}, {1, 1}, {0, 2, 1, 3})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {2, 3}, {1, 1}, {0, 1, 3, 2})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {2, 3}, {1, 1}, {0, 3, 2, 1})); + std::vector ks = { 256 }; + if (ggml_blck_size(type_a) == 1) { + ks.push_back(4); + } + for (auto k : ks) { + // test cases without permutation + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {1, 1}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {1, 1}, {2, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {1, 1}, {1, 2})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {3, 1}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {3, 1}, {2, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {3, 2}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {3, 2}, {2, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {3, 2}, {1, 2})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {3, 2}, {2, 2})); + + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {1, 1}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {1, 1}, {2, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {1, 1}, {1, 2})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {3, 1}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {3, 1}, {2, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {3, 2}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {3, 2}, {2, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {3, 2}, {1, 2})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {3, 2}, {2, 2})); + + // test cases with permutation + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {2, 3}, {1, 1}, {0, 2, 1, 3})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {2, 3}, {1, 1}, {0, 1, 3, 2})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, k, {2, 3}, {1, 1}, {0, 3, 2, 1})); + + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, k, {2, 3}, {1, 1}, {0, 2, 1, 3})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, k, {2, 3}, {1, 1}, {0, 1, 3, 2})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, k, {2, 3}, {1, 1}, {0, 3, 2, 1})); + + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {2, 3}, {1, 1}, {0, 2, 1, 3})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {2, 3}, {1, 1}, {0, 1, 3, 2})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, k, {2, 3}, {1, 1}, {0, 3, 2, 1})); + } // test cases with large ne00/ne10 to cover stream-k fixup test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 1024, {3, 2}, {1, 1})); From ef9d687787269e7fc4690a9ab86fa791fcecd6b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Thu, 26 Jun 2025 15:01:14 +0200 Subject: [PATCH 155/192] llama : return mistral-v7-tekken as default template only (#14390) --- src/llama-model.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 9b19da984081e..c2835ce67a88d 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -14377,7 +14377,7 @@ const char * llama_model_chat_template(const llama_model * model, const char * n // do not extend this list unless absolutely necessary // Mistral-Small-2503 does not have built-in chat template llama_vocab_pre_type pre_type = model->vocab.get_pre_type(); - if (pre_type == LLAMA_VOCAB_PRE_TYPE_TEKKEN && model->layers.size() == 40) { + if (!name && pre_type == LLAMA_VOCAB_PRE_TYPE_TEKKEN && model->layers.size() == 40) { return "mistral-v7-tekken"; } From 20e6e61609b72476de376a756df2de878d457c4d Mon Sep 17 00:00:00 2001 From: bandoti <141645996+bandoti@users.noreply.github.com> Date: Thu, 26 Jun 2025 13:46:53 -0300 Subject: [PATCH 156/192] cmake: regen vulkan shaders when shaders-gen sources change (#14398) * Add shaders-gen sources as target deps --- ggml/src/ggml-vulkan/CMakeLists.txt | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-vulkan/CMakeLists.txt b/ggml/src/ggml-vulkan/CMakeLists.txt index 39f022f33d856..0bf4cb14f88c7 100644 --- a/ggml/src/ggml-vulkan/CMakeLists.txt +++ b/ggml/src/ggml-vulkan/CMakeLists.txt @@ -143,7 +143,8 @@ if (Vulkan_FOUND) -DCMAKE_BUILD_TYPE=$ ${VULKAN_SHADER_GEN_CMAKE_ARGS} - BUILD_COMMAND ${CMAKE_COMMAND} --build . --config $ + BUILD_COMMAND ${CMAKE_COMMAND} --build . --config $ + BUILD_ALWAYS TRUE # NOTE: When DESTDIR is set using Makefile generators and # "make install" triggers the build step, vulkan-shaders-gen @@ -164,6 +165,14 @@ if (Vulkan_FOUND) file(GLOB _ggml_vk_shader_files CONFIGURE_DEPENDS "${_ggml_vk_input_dir}/*.comp") + # Because external projects do not provide source-level tracking, + # the vulkan-shaders-gen sources need to be explicitly added to + # ensure that changes will cascade into shader re-generation. + + file(GLOB _ggml_vk_shaders_gen_sources + CONFIGURE_DEPENDS "${_ggml_vk_input_dir}/*.cpp" + "${_ggml_vk_input_dir}/*.h") + add_custom_command( OUTPUT ${_ggml_vk_header} ${_ggml_vk_source} @@ -177,6 +186,7 @@ if (Vulkan_FOUND) --no-clean DEPENDS ${_ggml_vk_shader_files} + ${_ggml_vk_shaders_gen_sources} vulkan-shaders-gen COMMENT "Generate vulkan shaders" From 71e38879796c142bd7b05c46d22612753e66fc8b Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Thu, 26 Jun 2025 19:34:02 +0200 Subject: [PATCH 157/192] model : gemma3n text-only (#14400) * gemma3n * add llm_graph_input_one --- convert_hf_to_gguf.py | 124 +++++++- gguf-py/gguf/constants.py | 75 +++++ gguf-py/gguf/gguf_writer.py | 18 ++ gguf-py/gguf/tensor_mapping.py | 64 ++++ src/llama-arch.cpp | 54 ++++ src/llama-arch.h | 17 ++ src/llama-graph.cpp | 23 +- src/llama-graph.h | 16 +- src/llama-hparams.h | 6 + src/llama-kv-cache-unified.cpp | 30 +- src/llama-model.cpp | 517 +++++++++++++++++++++++++++++++++ src/llama-model.h | 22 ++ src/llama-quant.cpp | 9 +- 13 files changed, 960 insertions(+), 15 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index bbf8b30ff5324..4f2339a02a13c 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -310,6 +310,8 @@ def prepare_tensors(self): gguf.MODEL_TENSOR.POSNET_NORM2, gguf.MODEL_TENSOR.V_ENC_EMBD_POS, gguf.MODEL_TENSOR.A_ENC_EMBD_POS, + gguf.MODEL_TENSOR.ALTUP_CORRECT_COEF, + gguf.MODEL_TENSOR.ALTUP_PREDICT_COEF, ) ) or not new_name.endswith(".weight") @@ -320,7 +322,11 @@ def prepare_tensors(self): self.match_model_tensor_name(new_name, key, bid) for key in ( gguf.MODEL_TENSOR.TOKEN_EMBD, + gguf.MODEL_TENSOR.PER_LAYER_TOKEN_EMBD, gguf.MODEL_TENSOR.OUTPUT, + gguf.MODEL_TENSOR.ALTUP_ROUTER, + gguf.MODEL_TENSOR.LAUREL_L, + gguf.MODEL_TENSOR.LAUREL_R, ) ): if self.ftype in ( @@ -921,13 +927,16 @@ def _create_vocab_sentencepiece(self): tokenizer = SentencePieceProcessor() tokenizer.LoadFromFile(str(tokenizer_path)) - vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) + vocab_size = self.find_hparam([ + "vocab_size_per_layer_input", # gemma3n + "vocab_size", + ], optional=True) or tokenizer.vocab_size() tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)] scores: list[float] = [-10000.0] * vocab_size toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size - for token_id in range(tokenizer.vocab_size()): + for token_id in range(vocab_size): piece = tokenizer.IdToPiece(token_id) text = piece.encode("utf-8") score = tokenizer.GetScore(token_id) @@ -942,6 +951,10 @@ def _create_vocab_sentencepiece(self): elif tokenizer.IsByte(token_id): toktype = SentencePieceTokenTypes.BYTE + if token_id >= vocab_size: + logger.warning(f'ignore tokens from {token_id}: id is out of range, max={vocab_size - 1}') + break + tokens[token_id] = text scores[token_id] = score toktypes[token_id] = toktype @@ -4217,6 +4230,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter @ModelBase.register("Gemma3ForCausalLM", "Gemma3ForConditionalGeneration") class Gemma3Model(TextModel): model_arch = gguf.MODEL_ARCH.GEMMA3 + norm_shift = 1.0 # Gemma3RMSNorm adds 1.0 to the norm value def set_vocab(self): self._set_vocab_sentencepiece() @@ -4238,9 +4252,8 @@ def set_gguf_parameters(self): self.gguf_writer.add_value_length(hparams.get("head_dim", 256)) self.gguf_writer.add_file_type(self.ftype) self.gguf_writer.add_rope_freq_base(hparams.get("rope_theta", 1_000_000.0)) # for global layers - # both attn_logit_softcapping and final_logit_softcapping are removed in Gemma3 + # attn_logit_softcapping is removed in Gemma3 assert hparams.get("attn_logit_softcapping") is None - assert hparams.get("final_logit_softcapping") is None self.gguf_writer.add_sliding_window(hparams["sliding_window"]) self.gguf_writer.add_head_count_kv(hparams.get("num_key_value_heads", 4)) if hparams.get("rope_scaling") is not None: @@ -4252,7 +4265,7 @@ def set_gguf_parameters(self): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: del bid # unused - if name.startswith("language_model."): + if "language_model." in name: name = name.replace("language_model.", "") elif name.startswith("multi_modal_projector.") or name.startswith("vision_tower.") \ @@ -4267,8 +4280,9 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter # ref code in Gemma3RMSNorm # output = output * (1.0 + self.weight.float()) + # note: this is not the case on gemma3n if name.endswith("norm.weight"): - data_torch = data_torch + 1 + data_torch = data_torch + self.norm_shift return [(self.map_tensor_name(name), data_torch)] @@ -4325,6 +4339,104 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter return [] # skip other tensors +@ModelBase.register("Gemma3nForConditionalGeneration") +class Gemma3NModel(Gemma3Model): + model_arch = gguf.MODEL_ARCH.GEMMA3N + norm_shift = 0.0 # same value with Gemma3p5RMSNorm scale_shift on python code + + _altup_proj: list[Tensor] = [] + _altup_unembd: list[Tensor] = [] + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + assert self.hparams["altup_num_inputs"] == 4, "Current conversion only supports 4 altup inputs" + self._altup_proj = [ + torch.Tensor(), # to be replaced + torch.Tensor(), # to be replaced + torch.Tensor(), # to be replaced + ] + self._altup_unembd = [ + torch.Tensor(), # to be replaced + torch.Tensor(), # to be replaced + torch.Tensor(), # to be replaced + ] + + def set_vocab(self): + with open(self.dir_model / "chat_template.jinja") as f: + # quick hack to make sure chat template is added + self.gguf_writer.add_chat_template(f.read()) + super().set_vocab() + + def set_gguf_parameters(self): + super().set_gguf_parameters() + self.gguf_writer.add_altup_active_idx(self.hparams["altup_active_idx"]) + self.gguf_writer.add_altup_num_inputs(self.hparams["altup_num_inputs"]) + self.gguf_writer.add_embedding_length_per_layer_input(self.hparams["hidden_size_per_layer_input"]) + self.gguf_writer.add_shared_kv_layers(self.hparams["num_kv_shared_layers"]) + + activation_sparsity_scale = [] + for s in self.hparams["activation_sparsity_pattern"]: + normal_dist = torch.distributions.normal.Normal(0, 1) + std_multiplier = normal_dist.icdf(torch.tensor(s, dtype=torch.float32)) + activation_sparsity_scale.append(std_multiplier.item()) + self.gguf_writer.add_activation_sparsity_scale(activation_sparsity_scale) + + sliding_window_pattern = [] + for t in self.hparams["layer_types"]: + sliding_window_pattern.append(t == "sliding_attention") + self.gguf_writer.add_sliding_window_pattern(sliding_window_pattern) + + def _stack_matrices(self, matrices: list[Tensor]) -> Tensor | None: + has_all = all(m.numel() > 0 for m in matrices) + if not has_all: + return None + else: + return torch.stack(matrices, dim=0) + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + if name.endswith("_scale"): + name = name + ".weight" + + # TODO: implement self.prediction_coefs.weight.clamp_(...) + + if "language_model." not in name: + return [] # skip non-language model tensors + + if "altup_unembed_projections" in name: + data_torch = data_torch.to(device="cpu") + if ".0." in name: + self._altup_unembd[0] = data_torch + elif ".1." in name: + self._altup_unembd[1] = data_torch + elif ".2." in name: + self._altup_unembd[2] = data_torch + else: + raise ValueError(f"Unknown name: {name}") + out = self._stack_matrices(self._altup_unembd) + if out is not None: + return [(self.map_tensor_name("model.altup_unembed_projections.weight"), out)] + else: + return [] + + if "altup_projections" in name: + data_torch = data_torch.to(device="cpu") + if ".0." in name: + self._altup_proj[0] = data_torch + elif ".1." in name: + self._altup_proj[1] = data_torch + elif ".2." in name: + self._altup_proj[2] = data_torch + else: + raise ValueError(f"Unknown name: {name}") + out = self._stack_matrices(self._altup_proj) + if out is not None: + return [(self.map_tensor_name("model.altup_projections.weight"), out)] + else: + return [] + + return super().modify_tensors(data_torch, name, bid) + + @ModelBase.register("Starcoder2ForCausalLM") class StarCoder2Model(TextModel): model_arch = gguf.MODEL_ARCH.STARCODER2 diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 0429b0aaf135d..fb75143b0b545 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -118,6 +118,10 @@ class LLM: EMBEDDING_SCALE = "{arch}.embedding_scale" TOKEN_SHIFT_COUNT = "{arch}.token_shift_count" INTERLEAVE_MOE_LAYER_STEP = "{arch}.interleave_moe_layer_step" + ACTIVATION_SPARSITY_SCALE = "{arch}.activation_sparsity_scale" + ALTUP_ACTIVE_IDX = "{arch}.altup.active_idx" + ALTUP_NUM_INPUTS = "{arch}.altup.num_inputs" + EMBD_LENGTH_PER_LAYER_INP = "{arch}.embedding_length_per_layer_input" class Attention: HEAD_COUNT = "{arch}.attention.head_count" @@ -142,6 +146,8 @@ class Attention: SCALE = "{arch}.attention.scale" KEY_LENGTH_MLA = "{arch}.attention.key_length_mla" VALUE_LENGTH_MLA = "{arch}.attention.value_length_mla" + SHARED_KV_LAYERS = "{arch}.attention.shared_kv_layers" + SLIDING_WINDOW_PATTERN = "{arch}.attention.sliding_window_pattern" class Rope: DIMENSION_COUNT = "{arch}.rope.dimension_count" @@ -314,6 +320,7 @@ class MODEL_ARCH(IntEnum): GEMMA = auto() GEMMA2 = auto() GEMMA3 = auto() + GEMMA3N = auto() STARCODER2 = auto() RWKV6 = auto() RWKV6QWEN2 = auto() @@ -399,6 +406,22 @@ class MODEL_TENSOR(IntEnum): ATTN_Q_NORM = auto() ATTN_K_NORM = auto() LAYER_OUT_NORM = auto() + PER_LAYER_TOKEN_EMBD = auto() # gemma3n + PER_LAYER_MODEL_PROJ = auto() # gemma3n + PER_LAYER_INP_GATE = auto() # gemma3n + PER_LAYER_PROJ = auto() # gemma3n + PER_LAYER_PROJ_NORM = auto() # gemma3n + PER_LAYER_POST_NORM = auto() # gemma3n + ALTUP_PROJ = auto() # gemma3n + ALTUP_UNEMBD_PROJ = auto() # gemma3n + ALTUP_CORRECT_COEF = auto() # gemma3n + ALTUP_CORRECT_SCALE = auto() # gemma3n + ALTUP_PREDICT_COEF = auto() # gemma3n + ALTUP_ROUTER = auto() # gemma3n + ALTUP_ROUTER_NORM = auto() # gemma3n + LAUREL_L = auto() # gemma3n + LAUREL_R = auto() # gemma3n + LAUREL_POST_NORM = auto() # gemma3n SSM_IN = auto() SSM_CONV1D = auto() SSM_X = auto() @@ -597,6 +620,7 @@ class MODEL_TENSOR(IntEnum): MODEL_ARCH.GEMMA: "gemma", MODEL_ARCH.GEMMA2: "gemma2", MODEL_ARCH.GEMMA3: "gemma3", + MODEL_ARCH.GEMMA3N: "gemma3n", MODEL_ARCH.STARCODER2: "starcoder2", MODEL_ARCH.RWKV6: "rwkv6", MODEL_ARCH.RWKV6QWEN2: "rwkv6qwen2", @@ -682,6 +706,22 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.FFN_UP_EXP: "blk.{bid}.ffn_up_exps", MODEL_TENSOR.FFN_EXP_PROBS_B: "blk.{bid}.exp_probs_b", MODEL_TENSOR.LAYER_OUT_NORM: "blk.{bid}.layer_output_norm", + MODEL_TENSOR.PER_LAYER_TOKEN_EMBD: "per_layer_token_embd", # gemma3n + MODEL_TENSOR.PER_LAYER_MODEL_PROJ: "per_layer_model_proj", # gemma3n + MODEL_TENSOR.PER_LAYER_PROJ_NORM: "per_layer_proj_norm", # gemma3n + MODEL_TENSOR.ALTUP_UNEMBD_PROJ: "altup_unembd_proj", # gemma3n + MODEL_TENSOR.ALTUP_PROJ: "altup_proj", # gemma3n + MODEL_TENSOR.PER_LAYER_INP_GATE: "blk.{bid}.inp_gate", # gemma3n + MODEL_TENSOR.PER_LAYER_PROJ: "blk.{bid}.proj", # gemma3n + MODEL_TENSOR.PER_LAYER_POST_NORM: "blk.{bid}.post_norm", # gemma3n + MODEL_TENSOR.ALTUP_CORRECT_COEF: "blk.{bid}.altup_correct_coef", # gemma3n + MODEL_TENSOR.ALTUP_CORRECT_SCALE: "blk.{bid}.altup_correct_scale", # gemma3n + MODEL_TENSOR.ALTUP_PREDICT_COEF: "blk.{bid}.altup_predict_coef", # gemma3n + MODEL_TENSOR.ALTUP_ROUTER: "blk.{bid}.altup_router", # gemma3n + MODEL_TENSOR.ALTUP_ROUTER_NORM: "blk.{bid}.altup_router_norm", # gemma3n + MODEL_TENSOR.LAUREL_L: "blk.{bid}.laurel_l", # gemma3n + MODEL_TENSOR.LAUREL_R: "blk.{bid}.laurel_r", # gemma3n + MODEL_TENSOR.LAUREL_POST_NORM: "blk.{bid}.laurel_post_norm", # gemma3n MODEL_TENSOR.SSM_IN: "blk.{bid}.ssm_in", MODEL_TENSOR.SSM_CONV1D: "blk.{bid}.ssm_conv1d", MODEL_TENSOR.SSM_X: "blk.{bid}.ssm_x", @@ -1486,6 +1526,41 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.FFN_PRE_NORM, MODEL_TENSOR.FFN_POST_NORM, ], + MODEL_ARCH.GEMMA3N: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_Q_NORM, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_K_NORM, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_POST_NORM, + MODEL_TENSOR.FFN_PRE_NORM, + MODEL_TENSOR.FFN_POST_NORM, + # altup / laurel + MODEL_TENSOR.PER_LAYER_TOKEN_EMBD, + MODEL_TENSOR.PER_LAYER_MODEL_PROJ, + MODEL_TENSOR.PER_LAYER_INP_GATE, + MODEL_TENSOR.PER_LAYER_PROJ, + MODEL_TENSOR.PER_LAYER_PROJ_NORM, + MODEL_TENSOR.PER_LAYER_POST_NORM, + MODEL_TENSOR.ALTUP_PROJ, + MODEL_TENSOR.ALTUP_UNEMBD_PROJ, + MODEL_TENSOR.ALTUP_CORRECT_COEF, + MODEL_TENSOR.ALTUP_CORRECT_SCALE, + MODEL_TENSOR.ALTUP_PREDICT_COEF, + MODEL_TENSOR.ALTUP_ROUTER, + MODEL_TENSOR.ALTUP_ROUTER_NORM, + MODEL_TENSOR.LAUREL_L, + MODEL_TENSOR.LAUREL_R, + MODEL_TENSOR.LAUREL_POST_NORM, + ], MODEL_ARCH.STARCODER2: [ MODEL_TENSOR.TOKEN_EMBD, MODEL_TENSOR.OUTPUT_NORM, diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index b9b63d052624d..d32cd479adb17 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -672,6 +672,18 @@ def add_parallel_residual(self, use: bool) -> None: def add_decoder_start_token_id(self, id: int) -> None: self.add_uint32(Keys.LLM.DECODER_START_TOKEN_ID.format(arch=self.arch), id) + def add_embedding_length_per_layer_input(self, value: int) -> None: + self.add_uint32(Keys.LLM.EMBD_LENGTH_PER_LAYER_INP.format(arch=self.arch), value) + + def add_altup_active_idx(self, val: int) -> None: + self.add_uint32(Keys.LLM.ALTUP_ACTIVE_IDX.format(arch=self.arch), val) + + def add_altup_num_inputs(self, val: int) -> None: + self.add_uint32(Keys.LLM.ALTUP_NUM_INPUTS.format(arch=self.arch), val) + + def add_activation_sparsity_scale(self, values: Sequence[float]) -> None: + self.add_array(Keys.LLM.ACTIVATION_SPARSITY_SCALE.format(arch=self.arch), values) + def add_head_count(self, count: int | Sequence[int]) -> None: if isinstance(count, int): self.add_uint32(Keys.Attention.HEAD_COUNT.format(arch=self.arch), count) @@ -702,6 +714,12 @@ def add_max_alibi_bias(self, bias: float) -> None: def add_clamp_kqv(self, value: float) -> None: self.add_float32(Keys.Attention.CLAMP_KQV.format(arch=self.arch), value) + def add_shared_kv_layers(self, value: float) -> None: + self.add_float32(Keys.Attention.SHARED_KV_LAYERS.format(arch=self.arch), value) + + def add_sliding_window_pattern(self, value: Sequence[bool]) -> None: + self.add_array(Keys.Attention.SLIDING_WINDOW_PATTERN.format(arch=self.arch), value) + def add_logit_scale(self, value: float) -> None: self.add_float32(Keys.LLM.LOGIT_SCALE.format(arch=self.arch), value) diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 79f044d2a5945..b30f77dbe3be7 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -480,6 +480,70 @@ class TensorNameMap: "encoder.layer.{bid}.layer_norm_2" # jina-v2-code ), + MODEL_TENSOR.PER_LAYER_TOKEN_EMBD: ( + "model.embed_tokens_per_layer", # gemma3n + ), + + MODEL_TENSOR.PER_LAYER_MODEL_PROJ: ( + "model.per_layer_model_projection", # gemma3n + ), + + MODEL_TENSOR.PER_LAYER_PROJ_NORM: ( + "model.per_layer_projection_norm", # gemma3n + ), + + MODEL_TENSOR.ALTUP_PROJ: ( + "model.altup_projections", # gemma3n + ), + + MODEL_TENSOR.ALTUP_UNEMBD_PROJ: ( + "model.altup_unembed_projections", # gemma3n + ), + + MODEL_TENSOR.PER_LAYER_INP_GATE: ( + "model.layers.{bid}.per_layer_input_gate", # gemma3n + ), + + MODEL_TENSOR.PER_LAYER_PROJ: ( + "model.layers.{bid}.per_layer_projection", # gemma3n + ), + + MODEL_TENSOR.PER_LAYER_POST_NORM: ( + "model.layers.{bid}.post_per_layer_input_norm", # gemma3n + ), + + MODEL_TENSOR.ALTUP_CORRECT_COEF: ( + "model.layers.{bid}.altup.correction_coefs", # gemma3n + ), + + MODEL_TENSOR.ALTUP_CORRECT_SCALE: ( + "model.layers.{bid}.altup.correct_output_scale", # gemma3n + ), + + MODEL_TENSOR.ALTUP_PREDICT_COEF: ( + "model.layers.{bid}.altup.prediction_coefs", # gemma3n + ), + + MODEL_TENSOR.ALTUP_ROUTER: ( + "model.layers.{bid}.altup.modality_router", # gemma3n + ), + + MODEL_TENSOR.ALTUP_ROUTER_NORM: ( + "model.layers.{bid}.altup.router_norm", # gemma3n + ), + + MODEL_TENSOR.LAUREL_L: ( + "model.layers.{bid}.laurel.linear_left", # gemma3n + ), + + MODEL_TENSOR.LAUREL_R: ( + "model.layers.{bid}.laurel.linear_right", # gemma3n + ), + + MODEL_TENSOR.LAUREL_POST_NORM: ( + "model.layers.{bid}.laurel.post_laurel_norm", # gemma3n + ), + MODEL_TENSOR.SSM_IN: ( "model.layers.{bid}.in_proj", "backbone.layers.{bid}.mixer.in_proj", diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index 8dadef204f9d7..435e3b9ba3db8 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -42,6 +42,7 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_GEMMA, "gemma" }, { LLM_ARCH_GEMMA2, "gemma2" }, { LLM_ARCH_GEMMA3, "gemma3" }, + { LLM_ARCH_GEMMA3N, "gemma3n" }, { LLM_ARCH_STARCODER2, "starcoder2" }, { LLM_ARCH_MAMBA, "mamba" }, { LLM_ARCH_XVERSE, "xverse" }, @@ -932,6 +933,42 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" }, }, }, + { + LLM_ARCH_GEMMA3N, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" }, + { LLM_TENSOR_PER_LAYER_TOKEN_EMBD, "per_layer_token_embd" }, + { LLM_TENSOR_PER_LAYER_MODEL_PROJ, "per_layer_model_proj" }, + { LLM_TENSOR_PER_LAYER_PROJ_NORM, "per_layer_proj_norm" }, + { LLM_TENSOR_ALTUP_UNEMBD_PROJ, "altup_unembd_proj" }, + { LLM_TENSOR_ALTUP_PROJ, "altup_proj" }, + { LLM_TENSOR_PER_LAYER_INP_GATE, "blk.%d.inp_gate" }, + { LLM_TENSOR_PER_LAYER_PROJ, "blk.%d.proj" }, + { LLM_TENSOR_PER_LAYER_POST_NORM, "blk.%d.post_norm" }, + { LLM_TENSOR_ALTUP_CORRECT_COEF, "blk.%d.altup_correct_coef" }, + { LLM_TENSOR_ALTUP_CORRECT_SCALE, "blk.%d.altup_correct_scale" }, + { LLM_TENSOR_ALTUP_PREDICT_COEF, "blk.%d.altup_predict_coef" }, + { LLM_TENSOR_ALTUP_ROUTER, "blk.%d.altup_router" }, + { LLM_TENSOR_ALTUP_ROUTER_NORM, "blk.%d.altup_router_norm" }, + { LLM_TENSOR_LAUREL_L, "blk.%d.laurel_l" }, + { LLM_TENSOR_LAUREL_R, "blk.%d.laurel_r" }, + { LLM_TENSOR_LAUREL_POST_NORM, "blk.%d.laurel_post_norm" }, + }, + }, { LLM_ARCH_STARCODER2, { @@ -1749,6 +1786,23 @@ static const std::map LLM_TENSOR_INFOS = { {LLM_TENSOR_FFN_GATE_EXPS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}}, {LLM_TENSOR_FFN_UP_EXPS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}}, {LLM_TENSOR_FFN_EXP_PROBS_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, + // altup / laurel (gemma 3n) + {LLM_TENSOR_PER_LAYER_TOKEN_EMBD, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_GET_ROWS}}, + {LLM_TENSOR_PER_LAYER_MODEL_PROJ, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_PER_LAYER_PROJ_NORM, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}}, + {LLM_TENSOR_ALTUP_PROJ, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ALTUP_UNEMBD_PROJ, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_PER_LAYER_INP_GATE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_PER_LAYER_PROJ, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_PER_LAYER_POST_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ALTUP_CORRECT_COEF, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ALTUP_CORRECT_SCALE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ALTUP_PREDICT_COEF, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ALTUP_ROUTER, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ALTUP_ROUTER_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_LAUREL_L, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_LAUREL_R, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_LAUREL_POST_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, // this tensor is loaded for T5, but never used {LLM_TENSOR_DEC_CROSS_ATTN_REL_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_NONE}}, {LLM_TENSOR_CONV1D, {LLM_TENSOR_LAYER_INPUT, GGML_OP_IM2COL}}, diff --git a/src/llama-arch.h b/src/llama-arch.h index 5b0230c150678..9181ad053f6b3 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -46,6 +46,7 @@ enum llm_arch { LLM_ARCH_GEMMA, LLM_ARCH_GEMMA2, LLM_ARCH_GEMMA3, + LLM_ARCH_GEMMA3N, LLM_ARCH_STARCODER2, LLM_ARCH_MAMBA, LLM_ARCH_XVERSE, @@ -269,6 +270,22 @@ enum llm_tensor { LLM_TENSOR_LAYER_OUT_NORM, LLM_TENSOR_POST_ATTN_NORM, LLM_TENSOR_POST_MLP_NORM, + LLM_TENSOR_PER_LAYER_TOKEN_EMBD, // gemma3n + LLM_TENSOR_PER_LAYER_MODEL_PROJ, // gemma3n + LLM_TENSOR_PER_LAYER_INP_GATE, // gemma3n + LLM_TENSOR_PER_LAYER_PROJ, // gemma3n + LLM_TENSOR_PER_LAYER_PROJ_NORM, // gemma3n + LLM_TENSOR_PER_LAYER_POST_NORM, // gemma3n + LLM_TENSOR_ALTUP_PROJ, // gemma3n + LLM_TENSOR_ALTUP_UNEMBD_PROJ, // gemma3n + LLM_TENSOR_ALTUP_CORRECT_COEF, // gemma3n + LLM_TENSOR_ALTUP_CORRECT_SCALE, // gemma3n + LLM_TENSOR_ALTUP_PREDICT_COEF, // gemma3n + LLM_TENSOR_ALTUP_ROUTER, // gemma3n + LLM_TENSOR_ALTUP_ROUTER_NORM, // gemma3n + LLM_TENSOR_LAUREL_L, // gemma3n + LLM_TENSOR_LAUREL_R, // gemma3n + LLM_TENSOR_LAUREL_POST_NORM, // gemma3n LLM_TENSOR_SSM_IN, LLM_TENSOR_SSM_CONV1D, LLM_TENSOR_SSM_X, diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 48589a50ab24d..71ee431a977ba 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -350,6 +350,12 @@ void llm_graph_input_mem_hybrid::set_input(const llama_ubatch * ubatch) { } } +void llm_graph_input_one::set_input(const llama_ubatch *) { + GGML_ASSERT(one && ggml_nelements(one) == 1); + float f_one = 1.0f; + ggml_backend_tensor_set(one, &f_one, 0, sizeof(float)); +} + // // llm_graph_context // @@ -1267,8 +1273,14 @@ ggml_tensor * llm_graph_context::build_attn( // these nodes are added to the graph together so that they are not reordered // by doing so, the number of splits in the graph is reduced ggml_build_forward_expand(gf, q_cur); - ggml_build_forward_expand(gf, k_cur); - ggml_build_forward_expand(gf, v_cur); + + if (k_cur) { + ggml_build_forward_expand(gf, k_cur); + } + + if (v_cur) { + ggml_build_forward_expand(gf, v_cur); + } const auto * mctx_iswa = static_cast(mctx); @@ -1276,9 +1288,12 @@ ggml_tensor * llm_graph_context::build_attn( const auto * mctx_cur = is_swa ? mctx_iswa->get_swa() : mctx_iswa->get_base(); - // store to KV cache - { + // optionally store to KV cache + if (k_cur) { ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, il)); + } + + if (v_cur) { ggml_build_forward_expand(gf, mctx_cur->cpy_v(ctx0, v_cur, il)); } diff --git a/src/llama-graph.h b/src/llama-graph.h index b433f266d1b29..4b1ec354dfc30 100644 --- a/src/llama-graph.h +++ b/src/llama-graph.h @@ -329,6 +329,17 @@ class llm_graph_input_mem_hybrid : public llm_graph_input_i { const llama_memory_hybrid_context * mctx; }; +// TODO: remove this when ggml_scale_add is implemented +class llm_graph_input_one : public llm_graph_input_i { +public: + llm_graph_input_one() {} + virtual ~llm_graph_input_one() = default; + + void set_input(const llama_ubatch *) override; + + ggml_tensor * one = nullptr; // F32 +}; + // // llm_graph_result // @@ -589,14 +600,15 @@ struct llm_graph_context { llm_graph_input_attn_kv_unified_iswa * build_attn_inp_kv_unified_iswa() const; + // note: if k_cur or v_cur are not provided, they will not be stored in the memory ggml_tensor * build_attn( llm_graph_input_attn_kv_unified_iswa * inp, ggml_cgraph * gf, ggml_tensor * wo, ggml_tensor * wo_b, ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens] - ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens] - ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens] + ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens] optional + ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens] optional ggml_tensor * kq_b, ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v] float kq_scale, diff --git a/src/llama-hparams.h b/src/llama-hparams.h index 7b315a9a74b1d..e85afe145a922 100644 --- a/src/llama-hparams.h +++ b/src/llama-hparams.h @@ -143,6 +143,12 @@ struct llama_hparams { uint32_t n_attn_temp_floor_scale = 8192; float f_attn_temp_scale = 0.1; + // gemma3n altup + uint32_t n_altup = 4; // altup_num_inputs + uint32_t i_altup_act = 0; // altup_active_idx + uint32_t laurel_rank = 64; + uint32_t n_embd_altup = 256; + // needed by encoder-decoder models (e.g. T5, FLAN-T5) // ref: https://github.com/ggerganov/llama.cpp/pull/8141 llama_token dec_start_token_id = LLAMA_TOKEN_NULL; diff --git a/src/llama-kv-cache-unified.cpp b/src/llama-kv-cache-unified.cpp index b506d32ed4d06..8517b722a9f80 100644 --- a/src/llama-kv-cache-unified.cpp +++ b/src/llama-kv-cache-unified.cpp @@ -33,13 +33,19 @@ llama_kv_cache_unified::llama_kv_cache_unified( GGML_ASSERT(kv_size % n_pad == 0); + // TODO: this is temporary until we support passing reuse layer filters [KV_REUSE] + auto n_layer_cache = hparams.n_layer; + if (model.arch == LLM_ARCH_GEMMA3N) { + n_layer_cache = 20; + } + // create a context for each buffer type std::map ctx_map; auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * { auto it = ctx_map.find(buft); if (it == ctx_map.end()) { ggml_init_params params = { - /*.mem_size =*/ size_t(2u*hparams.n_layer*ggml_tensor_overhead()), + /*.mem_size =*/ size_t(2u*n_layer_cache*ggml_tensor_overhead()), /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; @@ -62,7 +68,7 @@ llama_kv_cache_unified::llama_kv_cache_unified( cells.resize(kv_size); - for (uint32_t il = 0; il < hparams.n_layer; il++) { + for (uint32_t il = 0; il < n_layer_cache; il++) { if (filter && !filter(il)) { LLAMA_LOG_DEBUG("%s: layer %3d: skipped\n", __func__, il); continue; @@ -102,6 +108,26 @@ llama_kv_cache_unified::llama_kv_cache_unified( layers.push_back({ il, k, v }); } + // TODO: this is temporary until we support passing reuse layer filters [KV_REUSE] + if (model.arch == LLM_ARCH_GEMMA3N) { + LLAMA_LOG_DEBUG("%s: GEMMA3N: reuse layers [%d, %d]\n", __func__, n_layer_cache, hparams.n_layer - 1); + + for (uint32_t il = n_layer_cache; il < hparams.n_layer; il++) { + if (filter && !filter(il)) { + LLAMA_LOG_DEBUG("%s: layer %3d: skipped\n", __func__, il); + continue; + } + + const bool is_swa = hparams.is_swa(il); + const uint32_t il_reuse = n_layer_cache - (is_swa ? 2 : 1); + + GGML_ASSERT(map_layer_ids.find(il_reuse) != map_layer_ids.end()); + map_layer_ids[il] = map_layer_ids[il_reuse]; + + LLAMA_LOG_DEBUG("%s: layer %3d: reuse layer %d, isw = %d\n", __func__, il, il_reuse, is_swa); + } + } + // allocate tensors and initialize the buffers to avoid NaNs in the padding for (auto it : ctx_map) { auto * buft = it.first; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index c2835ce67a88d..fc39195ed5177 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -103,6 +103,8 @@ const char * llm_type_name(llm_type type) { case LLM_TYPE_17B_128E: return "17Bx128E (Maverick)"; case LLM_TYPE_30B_A3B: return "30B.A3B"; case LLM_TYPE_235B_A22B: return "235B.A22B"; + case LLM_TYPE_E2B: return "E2B"; + case LLM_TYPE_E4B: return "E4B"; default: return "?B"; } } @@ -1017,6 +1019,24 @@ void llama_model::load_hparams(llama_model_loader & ml) { ? 1.0f / std::sqrt(float(hparams.n_embd / hparams.n_head(0))) : 1.0f / std::sqrt(float(hparams.n_embd_head_k)); } break; + case LLM_ARCH_GEMMA3N: + { + hparams.swa_type = LLAMA_SWA_TYPE_STANDARD; + hparams.set_swa_pattern(5); + + hparams.rope_freq_base_train_swa = 10000.0f; + hparams.rope_freq_scale_train_swa = 1.0f; + hparams.f_attention_scale = 1.0f; + + ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 30: type = LLM_TYPE_E2B; break; + case 35: type = LLM_TYPE_E4B; break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; case LLM_ARCH_STARCODER2: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); @@ -2950,6 +2970,62 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0); } } break; + case LLM_ARCH_GEMMA3N: + { + const int64_t n_altup = hparams.n_altup; + const int64_t laurel_rank = hparams.laurel_rank; + const int64_t n_embd_altup = hparams.n_embd_altup; + + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + tok_embd_per_layer = create_tensor(tn(LLM_TENSOR_PER_LAYER_TOKEN_EMBD, "weight"), {n_embd_altup * n_layer, n_vocab}, 0); + + altup_proj = create_tensor(tn(LLM_TENSOR_ALTUP_PROJ, "weight"), {n_embd, n_embd, n_altup - 1}, 0); + altup_unembd_proj = create_tensor(tn(LLM_TENSOR_ALTUP_UNEMBD_PROJ, "weight"), {n_embd, n_embd, n_altup - 1}, 0); + per_layer_model_proj = create_tensor(tn(LLM_TENSOR_PER_LAYER_MODEL_PROJ, "weight"), {n_embd, n_embd_altup * n_layer}, 0); + per_layer_proj_norm = create_tensor(tn(LLM_TENSOR_PER_LAYER_PROJ_NORM, "weight"), {n_embd_altup}, 0); + + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + + layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0); + layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0); + layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0); + + // altup & laurel + layer.per_layer_inp_gate = create_tensor(tn(LLM_TENSOR_PER_LAYER_INP_GATE, "weight", i), {n_embd, n_embd_altup}, 0); + layer.per_layer_proj = create_tensor(tn(LLM_TENSOR_PER_LAYER_PROJ, "weight", i), {n_embd_altup, n_embd}, 0); + layer.per_layer_post_norm = create_tensor(tn(LLM_TENSOR_PER_LAYER_POST_NORM, "weight", i), {n_embd}, 0); + layer.altup_correct_coef = create_tensor(tn(LLM_TENSOR_ALTUP_CORRECT_COEF, "weight", i), {n_altup, n_altup}, 0); + layer.altup_correct_scale = create_tensor(tn(LLM_TENSOR_ALTUP_CORRECT_SCALE, "weight", i), {n_embd}, 0); + layer.altup_predict_coef = create_tensor(tn(LLM_TENSOR_ALTUP_PREDICT_COEF, "weight", i), {n_altup, n_altup * n_altup}, 0); + layer.altup_router = create_tensor(tn(LLM_TENSOR_ALTUP_ROUTER, "weight", i), {n_embd, n_altup}, 0); + layer.altup_router_norm = create_tensor(tn(LLM_TENSOR_ALTUP_ROUTER_NORM, "weight", i), {n_embd}, 0); + layer.laurel_l = create_tensor(tn(LLM_TENSOR_LAUREL_L, "weight", i), {n_embd, laurel_rank}, 0); + layer.laurel_r = create_tensor(tn(LLM_TENSOR_LAUREL_R, "weight", i), {laurel_rank, n_embd}, 0); + layer.laurel_post_norm = create_tensor(tn(LLM_TENSOR_LAUREL_POST_NORM, "weight", i), {n_embd}, 0); + } + } break; case LLM_ARCH_STARCODER2: { tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); @@ -8980,6 +9056,442 @@ struct llm_build_gemma3_iswa : public llm_graph_context { } }; +struct llm_build_gemma3n_iswa : public llm_graph_context { + const llama_model & model; + ggml_cgraph * gf; + + const int64_t n_embd_head; + const int64_t n_embd_altup; + const int64_t n_altup; + const int i_altup_act; + const int n_layer_kv = 20; // number of layers having KV [KV_REUSE] + const int n_layer_sparsity = 10; // number of layers using activation sparsity + const float f_sparsity_std_mul = 1.6448533535003662f; // std_multiplier = normal_dist.icdf(0.95) + + ggml_tensor * one; // containing single element 1.0f + + llm_build_gemma3n_iswa(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) + : llm_graph_context(params), + model(model), + gf(gf), + n_embd_head(model.hparams.n_embd_head_k), + n_embd_altup(model.hparams.n_embd_altup), + n_altup(model.hparams.n_altup), + i_altup_act(model.hparams.i_altup_act) { + ggml_tensor * cur; + ggml_tensor * inpL; + + // TODO: remove this when ggml_scale_add is implemented + one = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); + { + auto inp = std::make_unique(); + inp->one = one; + res->add_input(std::move(inp)); + } + + inpL = build_inp_embd(model.tok_embd); + + // important: do not normalize weights for raw embeddings input (i.e. encoded image emdeddings) + if (ubatch.token) { + inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd)); + cb(inpL, "inp_scaled", -1); + } + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + // TODO: is causal == true correct? might need some changes + auto * inp_attn = build_attn_inp_kv_unified_iswa(); + + // inp_per_layer shape: [n_embd_altup, n_tokens, n_layer] + ggml_tensor * inp_per_layer = project_per_layer_inputs(inpL, get_per_layer_inputs()); + + // inpL now has only 1 altup, project it to the rest of the altups + // these "added" altups will be concat to the last dim of inpL + { + ggml_tensor * target_magnitude = calc_magnitude(inpL); + ggml_tensor * inp_repeated = ggml_repeat_4d(ctx0, inpL, n_embd, n_tokens, n_altup - 1, 1); + ggml_tensor * altup_added = ggml_mul_mat(ctx0, model.altup_proj, inp_repeated); // shape: [n_embd, n_tokens, n_altup - 1] + ggml_tensor * new_magnitude = calc_magnitude(altup_added); + altup_added = ggml_div(ctx0, + ggml_mul(ctx0, altup_added, target_magnitude), + new_magnitude); + inpL = ggml_concat(ctx0, inpL, altup_added, 2); // shape: [n_embd, n_tokens, n_altup] + cb(inpL, "inp_stacked", -1); + } + + // inpL now has shape: [n_embd, n_tokens, n_altup] + // inp_per_layer now has shape: [n_embd_altup, n_tokens, n_layer] + + for (int il = 0; il < n_layer; ++il) { + // this block is made to be closely resemble Gemma3p5DecoderLayer on python code + const bool has_kv = (il < n_layer_kv); + + const float freq_base_l = model.get_rope_freq_base (cparams, il); + const float freq_scale_l = model.get_rope_freq_scale(cparams, il); + + ggml_tensor * cur = inpL; // [n_embd, n_tokens, n_altup] + ggml_tensor * predictions = altup_predict(cur, il); // [n_embd, n_tokens, n_altup] + + // predicted value will go through self-attention and laurel + ggml_tensor * active_prediction = view_2d_slice(predictions, i_altup_act); // [n_embd, n_tokens] + cur = active_prediction; + cb(cur, "active_prediction", il); + + // norm + cur = build_norm(cur, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // laurel + ggml_tensor * laurel_out = laurel(cur, il); // [n_embd, n_tokens] + + // self-attention + if (has_kv) { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il); + Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il); + Vcur = ggml_rms_norm(ctx0, Vcur, hparams.f_norm_rms_eps); + + cb(Qcur, "Qcur_normed", il); + cb(Kcur, "Kcur_normed", il); + cb(Vcur, "Vcur_normed", il); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l, + ext_factor, attn_factor, beta_fast, beta_slow); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l, + ext_factor, attn_factor, beta_fast, beta_slow); + + cb(Qcur, "Qcur_pos", il); + cb(Kcur, "Kcur_pos", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, nullptr, hparams.f_attention_scale, il); + } else { + // no KV layers + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + + Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il); + cb(Qcur, "Qcur_normed", il); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l, + ext_factor, attn_factor, beta_fast, beta_slow); + cb(Qcur, "Qcur_pos", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, nullptr, nullptr, nullptr, nullptr, hparams.f_attention_scale, il); + } + + cur = build_norm(cur, + model.layers[il].attn_post_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_post_norm", il); + + cur = ggml_add(ctx0, cur, active_prediction); // [n_embd, n_tokens] + cb(cur, "attn_gated", il); + + ggml_tensor * attn_laurel = ggml_scale(ctx0, + ggml_add(ctx0, cur, laurel_out), + 1.0f / sqrtf(2.0f)); // [n_embd, n_tokens] + cb(attn_laurel, "attn_laurel", il); + + cur = build_norm(attn_laurel, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + // feed-forward network + { + ggml_tensor * up_proj = build_lora_mm(model.layers[il].ffn_up, cur); + ggml_tensor * gate_proj = build_lora_mm(model.layers[il].ffn_gate, cur); + + if (il < n_layer_sparsity) { + // apply activation sparsity + gate_proj = gaussian_topk(gate_proj); + } + gate_proj = ggml_gelu(ctx0, gate_proj); + + cur = ggml_mul(ctx0, up_proj, gate_proj); + cur = build_lora_mm(model.layers[il].ffn_down, cur); + cb(cur, "ffn_out", il); + } + + cur = build_norm(cur, + model.layers[il].ffn_post_norm, NULL, + LLM_NORM_RMS, -1); + cb(cur, "ffn_post_norm", il); + + ggml_tensor * attn_ffw_laurel_gated = ggml_add(ctx0, cur, attn_laurel); // [n_embd, n_tokens] + cb(attn_ffw_laurel_gated, "attn_ffw_laurel_gated", il); + + ggml_tensor * corrected = altup_correct(predictions, attn_ffw_laurel_gated, il); // [n_embd, n_tokens, n_altup] + + ggml_tensor * first_prediction; // [n_embd, n_tokens] + { + first_prediction = view_2d_slice(corrected, i_altup_act); // [n_embd, n_tokens] + first_prediction = ggml_mul(ctx0, first_prediction, model.layers[il].altup_correct_scale); + first_prediction = build_lora_mm(model.layers[il].per_layer_inp_gate, first_prediction); + first_prediction = ggml_gelu(ctx0, first_prediction); // [n_embd_altup, n_tokens] + cb(first_prediction, "first_prediction_gated", il); + ggml_tensor * inp_this_layer = view_2d_slice(inp_per_layer, il); // [n_embd_altup, n_tokens] + first_prediction = ggml_mul(ctx0, first_prediction, inp_this_layer); // [n_embd_altup, n_tokens] + cb(first_prediction, "first_prediction_scaled", il); + + first_prediction = build_lora_mm(model.layers[il].per_layer_proj, first_prediction); // [n_embd, n_tokens] + first_prediction = build_norm(first_prediction, + model.layers[il].per_layer_post_norm, NULL, + LLM_NORM_RMS, il); + cb(first_prediction, "first_prediction_out", il); + } + + // equivalent to python code: corrected_predictions[1:] += first_prediction + { + ggml_tensor * slice_first = view_2d_slice(corrected, 0); + ggml_tensor * slice_rest = ggml_view_3d(ctx0, corrected, n_embd, n_tokens, n_altup - 1, + ggml_row_size(corrected->type, n_embd), + ggml_row_size(corrected->type, n_embd*n_tokens), + n_embd*n_tokens*ggml_element_size(corrected)); + ggml_tensor * tmp = ggml_add(ctx0, slice_rest, first_prediction); // [n_embd, n_tokens, n_altup - 1] + corrected = ggml_concat(ctx0, slice_first, tmp, 2); // [n_embd, n_tokens, n_altup] + } + + cur = corrected; // [n_embd, n_tokens, n_altup] + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; // [n_embd, n_tokens, n_altup] + + // cur now has multiple altup(s), we want to merge them back to 1 altup + { + ggml_tensor * target_magnitude = calc_magnitude(view_2d_slice(cur, i_altup_act)); // [n_embd, n_tokens] + // do a view to skip the first slice (active altup) + ggml_tensor * alt_slice = ggml_view_3d(ctx0, cur, n_embd, n_tokens, n_altup - 1, + ggml_row_size(cur->type, n_embd), + ggml_row_size(cur->type, n_embd*n_tokens), + n_embd*n_tokens*ggml_element_size(cur)); + ggml_tensor * altup_unembd = ggml_mul_mat(ctx0, model.altup_unembd_proj, alt_slice); // shape: [n_embd, n_tokens, n_altup - 1] + ggml_tensor * new_magnitude = calc_magnitude(altup_unembd); + altup_unembd = ggml_div(ctx0, + ggml_mul(ctx0, altup_unembd, target_magnitude), + new_magnitude); + cb(altup_unembd, "altup_unembd", -1); + + // equivalent to torch.mean(hidden_states, dim=0) + cur = view_2d_slice(cur, 0); // [n_embd, n_tokens] + for (int i = 0; i < n_altup - 1; ++i) { + cur = ggml_add(ctx0, cur, view_2d_slice(altup_unembd, i)); + } + cur = ggml_scale(ctx0, cur, 1.0f / float(n_altup)); // [n_embd, n_tokens] + cb(cur, "unembd_merged", -1); + } + + // cur now has shape: [n_embd, n_tokens] + + // TODO: move this to right after the last KV layer + { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + } + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + cur = build_lora_mm(model.output, cur); + + { + // final logit soft-capping + cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_final_logit_softcapping); + cur = ggml_tanh(ctx0, cur); + cur = ggml_scale(ctx0, cur, hparams.f_final_logit_softcapping); + } + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } + + ggml_tensor * calc_magnitude(ggml_tensor * x) { + return ggml_sqrt(ctx0, ggml_sum_rows(ctx0, ggml_sqr(ctx0, x))); + } + + // get 2D slice view from a 3D tensor, the idx corresponds to the 3rd dim + ggml_tensor * view_2d_slice(ggml_tensor * x, int idx) { + GGML_ASSERT(idx < (int)x->ne[2]); + return ggml_view_2d(ctx0, x, x->ne[0], x->ne[1], + ggml_row_size(x->type, x->ne[0]), + idx * x->ne[0] * x->ne[1] * ggml_element_size(x)); + } + + // equivalent to get_per_layer_inputs() in python code + // output shape: [n_embd_altup, n_layer, n_tokens] + ggml_tensor * get_per_layer_inputs() { + auto inp = std::make_unique(); + ggml_tensor * inp_per_layer; + if (ubatch.token) { + inp->tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_tokens); + ggml_set_input(inp->tokens); + res->t_tokens = inp->tokens; + inp_per_layer = ggml_get_rows(ctx0, model.tok_embd_per_layer, inp->tokens); + inp_per_layer = ggml_reshape_3d(ctx0, inp_per_layer, n_embd_altup, n_layer, n_tokens); + inp_per_layer = ggml_scale(ctx0, inp_per_layer, sqrtf((float)n_embd_altup)); + cb(inp_per_layer, "inp_per_layer_selected", -1); + } else { + GGML_ABORT("TODO: support embd input"); + } + res->add_input(std::move(inp)); + return inp_per_layer; + } + + // equivalent to project_per_layer_inputs() in python code + // this calculates the per-layer inputs, so the final tensor shape will have n_layer as the last dim + // output shape: [n_embd_altup, n_tokens, n_layer] + ggml_tensor * project_per_layer_inputs(ggml_tensor * inputs_embeds, ggml_tensor * inp_per_layer) { + const float per_layer_projection_scale = 1.0f / sqrtf((float)n_embd); + const float per_layer_input_scale = 1.0f / sqrtf(2.0f); + + ggml_tensor * per_layer_proj = ggml_mul_mat(ctx0, model.per_layer_model_proj, inputs_embeds); + per_layer_proj = ggml_scale(ctx0, per_layer_proj, per_layer_projection_scale); + per_layer_proj = ggml_reshape_3d(ctx0, per_layer_proj, n_embd_altup, n_layer, n_tokens); + per_layer_proj = build_norm(per_layer_proj, + model.per_layer_proj_norm, NULL, + LLM_NORM_RMS, -1); // [n_embd_altup, n_layer, n_tokens] + cb(per_layer_proj, "per_layer_proj", -1); + + inp_per_layer = ggml_add(ctx0, inp_per_layer, per_layer_proj); + inp_per_layer = ggml_scale(ctx0, inp_per_layer, per_layer_input_scale); + cb(inp_per_layer, "inp_per_layer", -1); + + // permute to shape: [n_embd_altup, n_tokens, n_layer] + inp_per_layer = ggml_cont(ctx0, ggml_permute(ctx0, inp_per_layer, 0, 2, 1, 3)); + return inp_per_layer; + } + + // input cur shape: [n_altup, n_tokens] + // output shape: [n_altup, n_tokens] + ggml_tensor * laurel(ggml_tensor * cur, int il) { + ggml_tensor * tmp = cur; + tmp = build_lora_mm(model.layers[il].laurel_l, tmp); + tmp = build_lora_mm(model.layers[il].laurel_r, tmp); + tmp = build_norm(tmp, model.layers[il].laurel_post_norm, NULL, LLM_NORM_RMS, il); + tmp = ggml_add(ctx0, tmp, cur); + cb(tmp, "laurel_out", il); + return tmp; + } + + // input x shape: [n_embd, n_tokens] + // output shape: [n_embd, n_tokens] + ggml_tensor * gaussian_topk(ggml_tensor * x) { + ggml_tensor * mean = ggml_mean(ctx0, x); + ggml_tensor * std = ggml_sqrt(ctx0, ggml_scale(ctx0, + ggml_sum_rows(ctx0, ggml_sqr(ctx0, ggml_sub(ctx0, x, mean))), + 1.0f / (float)(x->ne[0] - 1) + )); + ggml_tensor * cutoff_x = ggml_add(ctx0, mean, ggml_scale(ctx0, std, f_sparsity_std_mul)); + return ggml_relu(ctx0, ggml_sub(ctx0, x, cutoff_x)); + } + + // + // altup functions + // + + // equivalent to compute_router_modalities() in python code + // input x shape: [n_embd, n_tokens] + // output shape: [n_altup, n_tokens] + ggml_tensor * altup_compute_router_modalities(ggml_tensor * x, int il) { + ggml_tensor * router_inputs = build_norm(x, + model.layers[il].altup_router_norm, NULL, + LLM_NORM_RMS, il); + + // router_input_scale + router_inputs = ggml_scale(ctx0, router_inputs, 1.0f / (float)n_embd); + + ggml_tensor * output = ggml_mul_mat(ctx0, model.layers[il].altup_router, router_inputs); + return ggml_tanh(ctx0, output); // [n_altup, n_tokens] + } + + // input cur shape: [n_embd, n_tokens, n_altup] + // output shape: [n_embd, n_tokens, n_altup] + ggml_tensor * altup_predict(ggml_tensor * cur, int il) { + ggml_tensor * activated = view_2d_slice(cur, i_altup_act); // [n_embd, n_tokens] + ggml_tensor * modalities = altup_compute_router_modalities(activated, il); // [n_altup, n_tokens] + cb(modalities, "modalities", il); + + ggml_tensor * all_coefs = build_lora_mm(model.layers[il].altup_predict_coef, modalities); + cb(all_coefs, "all_coefs", il); + // first dim now having n_altup^2 elements, we reshape it to 2D (so we end up with 3D tensor) + all_coefs = ggml_reshape_3d(ctx0, all_coefs, n_altup, n_altup, n_tokens); + + // permute to [n_altup, n_embd, n_tokens] + ggml_tensor * cur_permuted = ggml_cont(ctx0, ggml_permute(ctx0, cur, 1, 2, 0, 3)); + ggml_tensor * predictions = ggml_mul_mat(ctx0, cur_permuted, all_coefs); // [n_altup, n_embd, n_tokens] + + // final shape must be the same as cur: [n_embd, n_tokens, n_altup] + predictions = ggml_cont(ctx0, ggml_permute(ctx0, predictions, 0, 2, 1, 3)); + predictions = ggml_add(ctx0, predictions, cur); + cb(predictions, "predictions", il); + + return predictions; + } + + // input predictions shape: [n_embd, n_tokens, n_altup] + // input activated shape: [n_embd, n_tokens] + // output shape: [n_embd, n_tokens, n_altup] + ggml_tensor * altup_correct(ggml_tensor * predictions, ggml_tensor * activated, int il) { + ggml_tensor * modalities = altup_compute_router_modalities(activated, il); // [n_altup, n_tokens] + cb(modalities, "modalities", il); + + ggml_tensor * active_prediction = view_2d_slice(predictions, i_altup_act); + ggml_tensor * innovation = ggml_sub(ctx0, activated, active_prediction); // [n_embd, n_tokens] + cb(innovation, "innovation", il); + + ggml_tensor * all_coefs = build_lora_mm(model.layers[il].altup_correct_coef, modalities); // [n_altup, n_tokens] + all_coefs = ggml_add(ctx0, all_coefs, one); + cb(all_coefs, "all_coefs", il); + all_coefs = ggml_cont(ctx0, ggml_transpose(ctx0, all_coefs)); // [n_tokens, n_altup] + all_coefs = ggml_reshape_3d(ctx0, all_coefs, 1, n_tokens, n_altup); // [1, n_tokens, n_altup] + + innovation = ggml_repeat_4d(ctx0, innovation, n_embd, n_tokens, n_altup, 1); + ggml_tensor * corrected = ggml_mul(ctx0, innovation, all_coefs); // [n_embd, n_tokens, n_altup] + corrected = ggml_add(ctx0, corrected, predictions); // [n_embd, n_tokens, n_altup] + cb(corrected, "corrected", il); + + return corrected; + } +}; + // TODO: move up next to build_starcoder struct llm_build_starcoder2 : public llm_graph_context { llm_build_starcoder2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { @@ -13974,6 +14486,10 @@ llm_graph_result_ptr llama_model::build_graph( { llm = std::make_unique(*this, params, gf); } break; + case LLM_ARCH_GEMMA3N: + { + llm = std::make_unique(*this, params, gf); + } break; case LLM_ARCH_STARCODER2: { llm = std::make_unique(*this, params, gf); @@ -14295,6 +14811,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { case LLM_ARCH_GEMMA: case LLM_ARCH_GEMMA2: case LLM_ARCH_GEMMA3: + case LLM_ARCH_GEMMA3N: case LLM_ARCH_STARCODER2: case LLM_ARCH_OPENELM: case LLM_ARCH_GPTNEOX: diff --git a/src/llama-model.h b/src/llama-model.h index 06e6c687943cc..40063b790d434 100644 --- a/src/llama-model.h +++ b/src/llama-model.h @@ -95,6 +95,8 @@ enum llm_type { LLM_TYPE_17B_128E, // llama4 Maverick LLM_TYPE_30B_A3B, LLM_TYPE_235B_A22B, + LLM_TYPE_E2B, + LLM_TYPE_E4B, }; std::string llama_rope_scaling_type_name(llama_rope_scaling_type rope_scaling_type); @@ -316,6 +318,19 @@ struct llama_layer { struct ggml_tensor * ffn_up_scale = nullptr; struct ggml_tensor * ffn_down_scale = nullptr; + // altup & laurel + struct ggml_tensor * per_layer_inp_gate = nullptr; + struct ggml_tensor * per_layer_proj = nullptr; + struct ggml_tensor * per_layer_post_norm = nullptr; + struct ggml_tensor * altup_correct_coef = nullptr; + struct ggml_tensor * altup_correct_scale = nullptr; + struct ggml_tensor * altup_predict_coef = nullptr; + struct ggml_tensor * altup_router = nullptr; + struct ggml_tensor * altup_router_norm = nullptr; + struct ggml_tensor * laurel_l = nullptr; + struct ggml_tensor * laurel_r = nullptr; + struct ggml_tensor * laurel_post_norm = nullptr; + struct llama_layer_posnet posnet; struct llama_layer_convnext convnext; @@ -354,6 +369,13 @@ struct llama_model { struct ggml_tensor * conv1d = nullptr; struct ggml_tensor * conv1d_b = nullptr; + // gemma3n altup + struct ggml_tensor * tok_embd_per_layer = nullptr; + struct ggml_tensor * altup_proj = nullptr; + struct ggml_tensor * altup_unembd_proj = nullptr; + struct ggml_tensor * per_layer_model_proj = nullptr; + struct ggml_tensor * per_layer_proj_norm = nullptr; + std::vector layers; llama_model_params params; diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 43229e1938597..f4b5713d7dd9a 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -223,7 +223,7 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t new_type = GGML_TYPE_Q6_K; } } - } else if (name == "token_embd.weight") { + } else if (name == "token_embd.weight" || name == "per_layer_token_embd.weight") { if (qs.params->token_embedding_type < GGML_TYPE_COUNT) { new_type = qs.params->token_embedding_type; } else { @@ -830,6 +830,13 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: // NOTE: can't use LLM_TN here because the layer number is not known quantize &= name.find("ffn_gate_inp.weight") == std::string::npos; + // these are very small (e.g. 4x4) + quantize &= name.find("altup") == std::string::npos; + quantize &= name.find("laurel") == std::string::npos; + + // these are not too big so keep them as it is + quantize &= name.find("per_layer_model_proj") == std::string::npos; + // do not quantize positional embeddings and token types (BERT) quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD, "weight"); quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_TOKEN_TYPES, "weight"); From 19101789b7fd3e7808d64a9b88f137ea0106c713 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Fri, 27 Jun 2025 10:42:19 +0200 Subject: [PATCH 158/192] convert : fix broken sentencepiece vocab (#14416) --- convert_hf_to_gguf.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 4f2339a02a13c..aed595e259ed5 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -936,7 +936,11 @@ def _create_vocab_sentencepiece(self): scores: list[float] = [-10000.0] * vocab_size toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size - for token_id in range(vocab_size): + for token_id in range(tokenizer.vocab_size()): + if token_id >= vocab_size: + logger.warning(f'ignore tokens from {token_id}: id is out of range, max={vocab_size - 1}') + break + piece = tokenizer.IdToPiece(token_id) text = piece.encode("utf-8") score = tokenizer.GetScore(token_id) @@ -951,10 +955,6 @@ def _create_vocab_sentencepiece(self): elif tokenizer.IsByte(token_id): toktype = SentencePieceTokenTypes.BYTE - if token_id >= vocab_size: - logger.warning(f'ignore tokens from {token_id}: id is out of range, max={vocab_size - 1}') - break - tokens[token_id] = text scores[token_id] = score toktypes[token_id] = toktype From 827dd5b34c40a522cfcacfd0088e586d74d993d1 Mon Sep 17 00:00:00 2001 From: Radoslav Gerganov Date: Fri, 27 Jun 2025 16:41:40 +0300 Subject: [PATCH 159/192] ggml : add ggml_set_rows (#14274) * ggml : add ggml_set_rows Add ggml_set_rows(a, b, c) which copies rows from 'b' into 'a' using indices from 'c'. ref: #8366 * use I64 for indices * ggml : add repeat impl for i64 * ggml : add ggml_is_contiguous_rows * ggml : ggml_set_rows support broadcast * ggml : ggml_set_rows support quantized dst ggml-ci * ggml : support GGML_TYPE_F32 ".from_float" trait * ggml : ggml_set_rows update comment + better index name * tests : add ggml_set_rows * metal : add ggml_set_rows implementation ggml-ci * ggml : simplify forward_dup_f32 * ggml : fix supports_op * tests : add comment to set_rows * ggml : leave the repeat_i64 for a separate PR ggml-ci * ggml : set_rows use std::min instead of MIN * ggml : better error message for set_rows unsupported type * metal : perform op->type check only once * tests : more consistent implementation + more tests ggml-ci --------- Co-authored-by: Georgi Gerganov --- examples/eval-callback/eval-callback.cpp | 2 + ggml/include/ggml-cpu.h | 1 + ggml/include/ggml.h | 21 + ggml/src/ggml-cpu/ggml-cpu.c | 10 + ggml/src/ggml-cpu/ggml-cpu.cpp | 1 + ggml/src/ggml-cpu/ops.cpp | 96 ++++- ggml/src/ggml-cpu/ops.h | 1 + ggml/src/ggml-metal/ggml-metal-impl.h | 16 + ggml/src/ggml-metal/ggml-metal.m | 112 +++++- ggml/src/ggml-metal/ggml-metal.metal | 469 ++++++++++++++--------- ggml/src/ggml.c | 41 +- tests/test-backend-ops.cpp | 87 +++++ 12 files changed, 653 insertions(+), 204 deletions(-) diff --git a/examples/eval-callback/eval-callback.cpp b/examples/eval-callback/eval-callback.cpp index fb188f5a9e132..bbbec6a01a175 100644 --- a/examples/eval-callback/eval-callback.cpp +++ b/examples/eval-callback/eval-callback.cpp @@ -55,6 +55,8 @@ static void ggml_print_tensor(uint8_t * data, ggml_type type, const int64_t * ne v = ggml_fp16_to_fp32(*(ggml_fp16_t *) &data[i]); } else if (type == GGML_TYPE_F32) { v = *(float *) &data[i]; + } else if (type == GGML_TYPE_I64) { + v = (float) *(int64_t *) &data[i]; } else if (type == GGML_TYPE_I32) { v = (float) *(int32_t *) &data[i]; } else if (type == GGML_TYPE_I16) { diff --git a/ggml/include/ggml-cpu.h b/ggml/include/ggml-cpu.h index e3b79d09bb66f..be40b100979de 100644 --- a/ggml/include/ggml-cpu.h +++ b/ggml/include/ggml-cpu.h @@ -134,6 +134,7 @@ extern "C" { GGML_BACKEND_API ggml_backend_reg_t ggml_backend_cpu_reg(void); + GGML_BACKEND_API void ggml_cpu_fp32_to_fp32(const float *, float *, int64_t); GGML_BACKEND_API void ggml_cpu_fp32_to_fp16(const float *, ggml_fp16_t *, int64_t); GGML_BACKEND_API void ggml_cpu_fp16_to_fp32(const ggml_fp16_t *, float *, int64_t); GGML_BACKEND_API void ggml_cpu_fp32_to_bf16(const float *, ggml_bf16_t *, int64_t); diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 9c4e24023b5ad..2b1bd6e0f48b9 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -470,6 +470,7 @@ extern "C" { GGML_OP_TRANSPOSE, GGML_OP_GET_ROWS, GGML_OP_GET_ROWS_BACK, + GGML_OP_SET_ROWS, GGML_OP_DIAG, GGML_OP_DIAG_MASK_INF, GGML_OP_DIAG_MASK_ZERO, @@ -687,6 +688,9 @@ extern "C" { // true for tensor that is stored in memory as CxWxHxN and has been permuted to WxHxCxN GGML_API bool ggml_is_contiguous_channels(const struct ggml_tensor * tensor); + // true if the elements in dimension 0 are contiguous, or there is just 1 block of elements + GGML_API bool ggml_is_contiguous_rows(const struct ggml_tensor * tensor); + GGML_API bool ggml_are_same_shape (const struct ggml_tensor * t0, const struct ggml_tensor * t1); GGML_API bool ggml_are_same_stride(const struct ggml_tensor * t0, const struct ggml_tensor * t1); @@ -1375,6 +1379,23 @@ extern "C" { struct ggml_tensor * b, // row indices struct ggml_tensor * c); // data for ggml_get_rows, only used for its shape + // a TD [n_embd, ne1, ne2, ne3] + // b TS [n_embd, n_rows, ne02, ne03] | ne02 == ne2, ne03 == ne3 + // c I64 [n_rows, ne11, ne12, 1] | c[i] in [0, ne1) + // + // undefined behavior if destination rows overlap + // + // broadcast: + // ne2 % ne11 == 0 + // ne3 % ne12 == 0 + // + // return view(a) + GGML_API struct ggml_tensor * ggml_set_rows( + struct ggml_context * ctx, + struct ggml_tensor * a, // destination + struct ggml_tensor * b, // source + struct ggml_tensor * c); // row indices + GGML_API struct ggml_tensor * ggml_diag( struct ggml_context * ctx, struct ggml_tensor * a); diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 7cae96f4b4885..2042ee71f1f80 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -195,6 +195,7 @@ typedef pthread_t ggml_thread_t; static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = { [GGML_TYPE_F32] = { + .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_fp32, .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32, .vec_dot_type = GGML_TYPE_F32, .nrows = 1, @@ -1817,6 +1818,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_get_rows_back(params, tensor); } break; + case GGML_OP_SET_ROWS: + { + ggml_compute_forward_set_rows(params, tensor); + } break; case GGML_OP_DIAG: { ggml_compute_forward_diag(params, tensor); @@ -2170,6 +2175,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { n_tasks = n_threads; } break; case GGML_OP_GET_ROWS: + case GGML_OP_SET_ROWS: { // FIXME: get_rows can use additional threads, but the cost of launching additional threads // decreases performance with GPU offloading @@ -3124,6 +3130,10 @@ enum ggml_status ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct g return ggml_graph_compute(cgraph, &cplan); } +void ggml_cpu_fp32_to_fp32(const float * x, float * y, int64_t n) { + memcpy(y, x, n * sizeof(float)); +} + void ggml_cpu_fp32_to_fp16(const float * x, ggml_fp16_t * y, int64_t n) { int64_t i = 0; #if defined(__F16C__) diff --git a/ggml/src/ggml-cpu/ggml-cpu.cpp b/ggml/src/ggml-cpu/ggml-cpu.cpp index a98866a2d8052..c9daa4c39e83e 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.cpp +++ b/ggml/src/ggml-cpu/ggml-cpu.cpp @@ -416,6 +416,7 @@ static bool ggml_backend_cpu_device_supports_op(ggml_backend_dev_t dev, const st switch (op->op) { case GGML_OP_CPY: + case GGML_OP_SET_ROWS: return op->type != GGML_TYPE_IQ3_XXS && op->type != GGML_TYPE_IQ3_S && diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index 8531baf6c57fb..9f17ea43c8553 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -696,24 +696,8 @@ static void ggml_compute_forward_dup_f32( if (ggml_is_contiguous(dst)) { // TODO: simplify if (nb00 == sizeof(float)) { - if (dst->type == GGML_TYPE_F32) { - size_t id = 0; - const size_t rs = ne00 * nb00; - char * dst_ptr = (char *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += rs * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03; - memcpy(dst_ptr + id, src0_ptr, rs); - id += rs; - } - id += rs * (ne01 - ir1); - } - } - } else if (ggml_get_type_traits_cpu(dst->type)->from_float) { - ggml_from_float_t const quantize_row_q = ggml_get_type_traits_cpu(dst->type)->from_float; + if (ggml_get_type_traits_cpu(dst->type)->from_float) { + ggml_from_float_t const from_float = ggml_get_type_traits_cpu(dst->type)->from_float; size_t id = 0; size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type)); @@ -724,7 +708,7 @@ static void ggml_compute_forward_dup_f32( id += rs * ir0; for (int i01 = ir0; i01 < ir1; i01++) { const float * src0_ptr = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); - quantize_row_q(src0_ptr, dst_ptr + id, ne00); + from_float(src0_ptr, dst_ptr + id, ne00); id += rs; } id += rs * (ne01 - ir1); @@ -2300,6 +2284,12 @@ void ggml_compute_forward_repeat( { ggml_compute_forward_repeat_f32(params, dst); } break; + // TODO: templateify the implemenation and support for I64 + // ref https://github.com/ggml-org/llama.cpp/pull/14274#discussion_r2169492225 + //case GGML_TYPE_I64: + // { + // ggml_compute_forward_repeat_i64(params, dst); + // } break; default: { GGML_ABORT("fatal error"); @@ -4470,6 +4460,74 @@ void ggml_compute_forward_get_rows( //} } +static void ggml_compute_forward_set_rows_f32( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int64_t nc = ne00; + const int64_t nr = ne01; + + assert(ne0 == nc); + assert(ne2 == ne02); + assert(ne3 == ne03); + assert(src0->type == GGML_TYPE_F32); + assert(ne02 % ne11 == 0); + assert(ne03 % ne12 == 0); + + const int ith = params->ith; + const int nth = params->nth; + + // rows per thread + const int64_t dr = (nr + nth - 1)/nth; + + // row range for this thread + const int64_t ir0 = dr*ith; + const int64_t ir1 = std::min(ir0 + dr, nr); + + ggml_from_float_t const from_float = ggml_get_type_traits_cpu(dst->type)->from_float; + + for (int64_t i03 = 0; i03 < ne03; ++i03) { + for (int64_t i02 = 0; i02 < ne02; ++i02) { + for (int64_t i = ir0; i < ir1; ++i) { + const int64_t i12 = i03%ne12; + const int64_t i11 = i02%ne11; + const int64_t i10 = i; + + const int64_t i1 = *(int64_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12); + + GGML_ASSERT(i1 >= 0 && i1 < ne1); + + from_float( + (const float *) ((char *) src0->data + i*nb01 + i02*nb02 + i03*nb03), + ((char *) dst->data + i1*nb1 + i02*nb2 + i03*nb3), nc); + } + } + } +} + +void ggml_compute_forward_set_rows( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_set_rows_f32(params, dst); + } break; + default: + { + GGML_ABORT("src0->type = %d (%s) not supported", src0->type, ggml_type_name(src0->type)); + } + } +} + // ggml_compute_forward_get_rows_back static void ggml_compute_forward_get_rows_back_f32_f16( diff --git a/ggml/src/ggml-cpu/ops.h b/ggml/src/ggml-cpu/ops.h index 2d8544d7d3d43..3a395fdcd9f04 100644 --- a/ggml/src/ggml-cpu/ops.h +++ b/ggml/src/ggml-cpu/ops.h @@ -53,6 +53,7 @@ void ggml_compute_forward_permute(const struct ggml_compute_params * params, str void ggml_compute_forward_transpose(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_get_rows(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_get_rows_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); +void ggml_compute_forward_set_rows(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_diag(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_diag_mask_inf(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_diag_mask_zero(const struct ggml_compute_params * params, struct ggml_tensor * dst); diff --git a/ggml/src/ggml-metal/ggml-metal-impl.h b/ggml/src/ggml-metal/ggml-metal-impl.h index 17eab976f3ad1..260440aedde00 100644 --- a/ggml/src/ggml-metal/ggml-metal-impl.h +++ b/ggml/src/ggml-metal/ggml-metal-impl.h @@ -521,6 +521,22 @@ typedef struct { uint64_t nb2; } ggml_metal_kargs_get_rows; +typedef struct { + int32_t nk0; + int32_t ne01; + uint64_t nb01; + uint64_t nb02; + uint64_t nb03; + int32_t ne11; + int32_t ne12; + uint64_t nb10; + uint64_t nb11; + uint64_t nb12; + uint64_t nb1; + uint64_t nb2; + uint64_t nb3; +} ggml_metal_kargs_set_rows; + typedef struct { int64_t ne00; int64_t ne01; diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index d8d30cc0b41ca..349f0ff998ed1 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -202,6 +202,15 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL, GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS, GGML_METAL_KERNEL_TYPE_GET_ROWS_I32, + GGML_METAL_KERNEL_TYPE_SET_ROWS_F32, + GGML_METAL_KERNEL_TYPE_SET_ROWS_F16, + GGML_METAL_KERNEL_TYPE_SET_ROWS_BF16, + GGML_METAL_KERNEL_TYPE_SET_ROWS_Q8_0, + GGML_METAL_KERNEL_TYPE_SET_ROWS_Q4_0, + GGML_METAL_KERNEL_TYPE_SET_ROWS_Q4_1, + GGML_METAL_KERNEL_TYPE_SET_ROWS_Q5_0, + GGML_METAL_KERNEL_TYPE_SET_ROWS_Q5_1, + GGML_METAL_KERNEL_TYPE_SET_ROWS_IQ4_NL, GGML_METAL_KERNEL_TYPE_RMS_NORM, GGML_METAL_KERNEL_TYPE_L2_NORM, GGML_METAL_KERNEL_TYPE_GROUP_NORM, @@ -1169,6 +1178,15 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL, get_rows_iq4_nl, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS, get_rows_iq4_xs, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_I32, get_rows_i32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_F32, set_rows_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_F16, set_rows_f16, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_BF16, set_rows_bf16, use_bfloat); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_Q8_0, set_rows_q8_0, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_Q4_0, set_rows_q4_0, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_Q4_1, set_rows_q4_1, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_Q5_0, set_rows_q5_0, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_Q5_1, set_rows_q5_1, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_IQ4_NL, set_rows_iq4_nl, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RMS_NORM, rms_norm, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_L2_NORM, l2_norm, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GROUP_NORM, group_norm, has_simdgroup_reduction); @@ -1635,6 +1653,10 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex const bool use_bfloat = ctx_dev->use_bfloat; if (!use_bfloat) { + if (op->type == GGML_TYPE_BF16) { + return false; + } + for (size_t i = 0, n = 3; i < n; ++i) { if (op->src[i] != NULL && op->src[i]->type == GGML_TYPE_BF16) { return false; @@ -1804,6 +1826,27 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex { return op->ne[3] == 1; } + case GGML_OP_SET_ROWS: + { + if (op->src[0]->type != GGML_TYPE_F32) { + return false; + } + + switch (op->type) { + case GGML_TYPE_F32: + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_IQ4_NL: + return true; + default: + return false; + }; + } default: return false; } @@ -3777,13 +3820,74 @@ static bool ggml_metal_encode_node( }; [encoder setComputePipelineState:pipeline]; - [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; - [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1]; - [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; - [encoder setBytes:&args length:sizeof(args) atIndex:3]; + [encoder setBytes:&args length:sizeof(args) atIndex:0]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; + [encoder setBuffer:id_src1 offset:offs_src1 atIndex:2]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:3]; [encoder dispatchThreadgroups:MTLSizeMake(ne10, ne11, 1) threadsPerThreadgroup:MTLSizeMake(32, 1, 1)]; } break; + case GGML_OP_SET_ROWS: + { + id pipeline = nil; + + switch (dst->type) { + case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_F32 ].pipeline; break; + case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_F16 ].pipeline; break; + case GGML_TYPE_BF16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_BF16 ].pipeline; break; + case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_Q8_0 ].pipeline; break; + case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_Q4_0 ].pipeline; break; + case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_Q4_1 ].pipeline; break; + case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_Q5_0 ].pipeline; break; + case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_Q5_1 ].pipeline; break; + case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_IQ4_NL].pipeline; break; + default: GGML_ABORT("not implemented"); + } + + const int32_t nk0 = ne0/ggml_blck_size(dst->type); + + int nth = 32; // SIMD width + + while (nth < nk0 && nth < (int) pipeline.maxTotalThreadsPerThreadgroup) { + nth *= 2; + } + + int nrptg = 1; + if (nth > nk0) { + nrptg = (nth + nk0 - 1)/nk0; + nth = nk0; + + if (nrptg*nth > (int) pipeline.maxTotalThreadsPerThreadgroup) { + nrptg--; + } + } + + nth = MIN(nth, nk0); + + ggml_metal_kargs_set_rows args = { + /*.nk0 =*/ nk0, + /*.ne01 =*/ ne01, + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.nb03 =*/ nb03, + /*.ne11 =*/ ne11, + /*.ne12 =*/ ne12, + /*.nb10 =*/ nb10, + /*.nb11 =*/ nb11, + /*.nb12 =*/ nb12, + /*.nb1 =*/ nb1, + /*.nb2 =*/ nb2, + /*.nb3 =*/ nb3, + }; + + [encoder setComputePipelineState:pipeline]; + [encoder setBytes:&args length:sizeof(args) atIndex:0]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; + [encoder setBuffer:id_src1 offset:offs_src1 atIndex:2]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:3]; + + [encoder dispatchThreadgroups:MTLSizeMake((ne01 + nrptg - 1)/nrptg, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, nrptg, 1)]; + } break; case GGML_OP_RMS_NORM: { GGML_ASSERT(ne00 % 4 == 0); diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index 5f004a856bde6..984a0ab503e7d 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -35,6 +35,17 @@ constexpr constant static float kvalues_iq4nl_f[16] = { -127.f, -104.f, -83.f, -65.f, -49.f, -35.f, -22.f, -10.f, 1.f, 13.f, 25.f, 38.f, 53.f, 69.f, 89.f, 113.f }; +static inline int best_index_int8(int n, constant float * val, float x) { + if (x <= val[0]) return 0; + if (x >= val[n-1]) return n-1; + int ml = 0, mu = n-1; + while (mu-ml > 1) { + int mav = (ml+mu)/2; + if (x < val[mav]) mu = mav; else ml = mav; + } + return x - val[mu-1] < val[mu] - x ? mu-1 : mu; +} + // NOTE: this is not dequantizing - we are simply fitting the template template void dequantize_f32(device const float4x4 * src, short il, thread type4x4 & reg) { @@ -97,6 +108,173 @@ void dequantize_q4_0_t4(device const block_q4_0 * xb, short il, thread type4 & r } } +void quantize_q4_0(device const float * src, device block_q4_0 & dst) { + float amax = 0.0f; // absolute max + float max = 0.0f; + + for (int j = 0; j < QK4_0; j++) { + const float v = src[j]; + if (amax < fabs(v)) { + amax = fabs(v); + max = v; + } + } + + const float d = max / -8; + const float id = d ? 1.0f/d : 0.0f; + + dst.d = d; + + for (int j = 0; j < QK4_0/2; ++j) { + const float x0 = src[0 + j]*id; + const float x1 = src[QK4_0/2 + j]*id; + + const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f)); + const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f)); + + dst.qs[j] = xi0; + dst.qs[j] |= xi1 << 4; + } +} + +void quantize_q4_1(device const float * src, device block_q4_1 & dst) { + float min = FLT_MAX; + float max = -FLT_MAX; + + for (int j = 0; j < QK4_1; j++) { + const float v = src[j]; + if (min > v) min = v; + if (max < v) max = v; + } + + const float d = (max - min) / ((1 << 4) - 1); + const float id = d ? 1.0f/d : 0.0f; + + dst.d = d; + dst.m = min; + + for (int j = 0; j < QK4_1/2; ++j) { + const float x0 = (src[0 + j] - min)*id; + const float x1 = (src[QK4_1/2 + j] - min)*id; + + const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f)); + const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f)); + + dst.qs[j] = xi0; + dst.qs[j] |= xi1 << 4; + } +} + +void quantize_q5_0(device const float * src, device block_q5_0 & dst) { + float amax = 0.0f; // absolute max + float max = 0.0f; + + for (int j = 0; j < QK5_0; j++) { + const float v = src[j]; + if (amax < fabs(v)) { + amax = fabs(v); + max = v; + } + } + + const float d = max / -16; + const float id = d ? 1.0f/d : 0.0f; + + dst.d = d; + + uint32_t qh = 0; + for (int j = 0; j < QK5_0/2; ++j) { + const float x0 = src[0 + j]*id; + const float x1 = src[QK5_0/2 + j]*id; + + const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f)); + const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f)); + + dst.qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); + qh |= ((xi0 & 0x10u) >> 4) << (j + 0); + qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2); + } + + thread const uint8_t * qh8 = (thread const uint8_t *)&qh; + + for (int j = 0; j < 4; ++j) { + dst.qh[j] = qh8[j]; + } +} + +void quantize_q5_1(device const float * src, device block_q5_1 & dst) { + float max = src[0]; + float min = src[0]; + + for (int j = 1; j < QK5_1; j++) { + const float v = src[j]; + min = v < min ? v : min; + max = v > max ? v : max; + } + + const float d = (max - min) / 31; + const float id = d ? 1.0f/d : 0.0f; + + dst.d = d; + dst.m = min; + + uint32_t qh = 0; + for (int j = 0; j < QK5_1/2; ++j) { + const float x0 = (src[0 + j] - min)*id; + const float x1 = (src[QK5_1/2 + j] - min)*id; + + const uint8_t xi0 = (uint8_t)(x0 + 0.5f); + const uint8_t xi1 = (uint8_t)(x1 + 0.5f); + + dst.qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); + qh |= ((xi0 & 0x10u) >> 4) << (j + 0); + qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_1/2); + } + + thread const uint8_t * qh8 = (thread const uint8_t *)&qh; + + for (int j = 0; j < 4; ++j) { + dst.qh[j] = qh8[j]; + } +} + +void quantize_iq4_nl(device const float * src, device block_iq4_nl & dst) { + float amax = 0.0f; // absolute max + float max = 0.0f; + + for (int j = 0; j < QK4_NL; j++) { + const float v = src[j]; + if (amax < fabs(v)) { + amax = fabs(v); + max = v; + } + } + + const float d = max / kvalues_iq4nl_f[0]; + const float id = d ? 1.0f/d : 0.0f; + + float sumqx = 0, sumq2 = 0; + for (int j = 0; j < QK4_NL/2; ++j) { + const float x0 = src[0 + j]*id; + const float x1 = src[QK4_NL/2 + j]*id; + + const uint8_t xi0 = best_index_int8(16, kvalues_iq4nl_f, x0); + const uint8_t xi1 = best_index_int8(16, kvalues_iq4nl_f, x1); + + dst.qs[j] = xi0 | (xi1 << 4); + + const float v0 = kvalues_iq4nl_f[xi0]; + const float v1 = kvalues_iq4nl_f[xi1]; + const float w0 = src[0 + j]*src[0 + j]; + const float w1 = src[QK4_NL/2 + j]*src[QK4_NL/2 + j]; + sumqx += w0*v0*src[j] + w1*v1*src[QK4_NL/2 + j]; + sumq2 += w0*v0*v0 + w1*v1*v1; + + } + + dst.d = sumq2 > 0 ? sumqx/sumq2 : d; +} + template void dequantize_q4_1(device const block_q4_1 * xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 2); @@ -279,6 +457,26 @@ void dequantize_q8_0_t4(device const block_q8_0 *xb, short il, thread type4 & re } } +void quantize_q8_0(device const float * src, device block_q8_0 & dst) { + float amax = 0.0f; // absolute max + + for (int j = 0; j < QK8_0; j++) { + const float v = src[j]; + amax = MAX(amax, fabs(v)); + } + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + dst.d = d; + + for (int j = 0; j < QK8_0; ++j) { + const float x0 = src[j]*id; + + dst.qs[j] = round(x0); + } +} + template void dequantize_q2_K(device const block_q2_K *xb, short il, thread type4x4 & reg) { const float d = xb->d; @@ -4410,6 +4608,7 @@ template [[host_name("kernel_cpy_bf16_f32")]] kernel kernel_cpy_t kernel_cpy; #endif +// TODO: templetify these kernels kernel void kernel_cpy_f32_q8_0( constant ggml_metal_kargs_cpy & args, device const char * src0, @@ -4433,23 +4632,7 @@ kernel void kernel_cpy_f32_q8_0( for (int64_t i00 = tpitg.x*QK8_0; i00 < args.ne00; i00 += ntg.x*QK8_0) { device const float * src = (device float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); - float amax = 0.0f; // absolute max - - for (int j = 0; j < QK8_0; j++) { - const float v = src[j]; - amax = MAX(amax, fabs(v)); - } - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK8_0].d = d; - - for (int j = 0; j < QK8_0; ++j) { - const float x0 = src[j]*id; - - dst_data[i00/QK8_0].qs[j] = round(x0); - } + quantize_q8_0(src, dst_data[i00/QK8_0]); } } @@ -4476,32 +4659,7 @@ kernel void kernel_cpy_f32_q4_0( for (int64_t i00 = tpitg.x*QK4_0; i00 < args.ne00; i00 += ntg.x*QK4_0) { device const float * src = (device float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); - float amax = 0.0f; // absolute max - float max = 0.0f; - - for (int j = 0; j < QK4_0; j++) { - const float v = src[j]; - if (amax < fabs(v)) { - amax = fabs(v); - max = v; - } - } - - const float d = max / -8; - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK4_0].d = d; - - for (int j = 0; j < QK4_0/2; ++j) { - const float x0 = src[0 + j]*id; - const float x1 = src[QK4_0/2 + j]*id; - - const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f)); - const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f)); - - dst_data[i00/QK4_0].qs[j] = xi0; - dst_data[i00/QK4_0].qs[j] |= xi1 << 4; - } + quantize_q4_0(src, dst_data[i00/QK4_0]); } } @@ -4528,31 +4686,7 @@ kernel void kernel_cpy_f32_q4_1( for (int64_t i00 = tpitg.x*QK4_1; i00 < args.ne00; i00 += ntg.x*QK4_1) { device const float * src = (device float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); - float min = FLT_MAX; - float max = -FLT_MAX; - - for (int j = 0; j < QK4_1; j++) { - const float v = src[j]; - if (min > v) min = v; - if (max < v) max = v; - } - - const float d = (max - min) / ((1 << 4) - 1); - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK4_1].d = d; - dst_data[i00/QK4_1].m = min; - - for (int j = 0; j < QK4_1/2; ++j) { - const float x0 = (src[0 + j] - min)*id; - const float x1 = (src[QK4_1/2 + j] - min)*id; - - const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f)); - const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f)); - - dst_data[i00/QK4_1].qs[j] = xi0; - dst_data[i00/QK4_1].qs[j] |= xi1 << 4; - } + quantize_q4_1(src, dst_data[i00/QK4_1]); } } @@ -4579,38 +4713,7 @@ kernel void kernel_cpy_f32_q5_0( for (int64_t i00 = tpitg.x*QK5_0; i00 < args.ne00; i00 += ntg.x*QK5_0) { device const float * src = (device float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); - float amax = 0.0f; // absolute max - float max = 0.0f; - - for (int j = 0; j < QK5_0; j++) { - const float v = src[j]; - if (amax < fabs(v)) { - amax = fabs(v); - max = v; - } - } - - const float d = max / -16; - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK5_0].d = d; - - uint32_t qh = 0; - for (int j = 0; j < QK5_0/2; ++j) { - const float x0 = src[0 + j]*id; - const float x1 = src[QK5_0/2 + j]*id; - - const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f)); - const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f)); - - dst_data[i00/QK5_0].qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); - qh |= ((xi0 & 0x10u) >> 4) << (j + 0); - qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2); - } - thread const uint8_t * qh8 = (thread const uint8_t *)&qh; - for (int j = 0; j < 4; ++j) { - dst_data[i00/QK5_0].qh[j] = qh8[j]; - } + quantize_q5_0(src, dst_data[i00/QK5_0]); } } @@ -4637,49 +4740,8 @@ kernel void kernel_cpy_f32_q5_1( for (int64_t i00 = tpitg.x*QK5_1; i00 < args.ne00; i00 += ntg.x*QK5_1) { device const float * src = (device float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); - float max = src[0]; - float min = src[0]; - - for (int j = 1; j < QK5_1; j++) { - const float v = src[j]; - min = v < min ? v : min; - max = v > max ? v : max; - } - - const float d = (max - min) / 31; - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK5_1].d = d; - dst_data[i00/QK5_1].m = min; - - uint32_t qh = 0; - for (int j = 0; j < QK5_1/2; ++j) { - const float x0 = (src[0 + j] - min)*id; - const float x1 = (src[QK5_1/2 + j] - min)*id; - - const uint8_t xi0 = (uint8_t)(x0 + 0.5f); - const uint8_t xi1 = (uint8_t)(x1 + 0.5f); - - dst_data[i00/QK5_1].qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); - qh |= ((xi0 & 0x10u) >> 4) << (j + 0); - qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_1/2); - } - thread const uint8_t * qh8 = (thread const uint8_t *)&qh; - for (int j = 0; j < 4; ++j) { - dst_data[i00/QK5_1].qh[j] = qh8[j]; - } - } -} - -static inline int best_index_int8(int n, constant float * val, float x) { - if (x <= val[0]) return 0; - if (x >= val[n-1]) return n-1; - int ml = 0, mu = n-1; - while (mu-ml > 1) { - int mav = (ml+mu)/2; - if (x < val[mav]) mu = mav; else ml = mav; + quantize_q5_1(src, dst_data[i00/QK5_1]); } - return x - val[mu-1] < val[mu] - x ? mu-1 : mu; } kernel void kernel_cpy_f32_iq4_nl( @@ -4705,40 +4767,7 @@ kernel void kernel_cpy_f32_iq4_nl( for (int64_t i00 = tpitg.x*QK4_NL; i00 < args.ne00; i00 += ntg.x*QK4_NL) { device const float * src = (device float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); - float amax = 0.0f; // absolute max - float max = 0.0f; - - for (int j = 0; j < QK4_NL; j++) { - const float v = src[j]; - if (amax < fabs(v)) { - amax = fabs(v); - max = v; - } - } - - const float d = max / kvalues_iq4nl_f[0]; - const float id = d ? 1.0f/d : 0.0f; - - float sumqx = 0, sumq2 = 0; - for (int j = 0; j < QK4_NL/2; ++j) { - const float x0 = src[0 + j]*id; - const float x1 = src[QK4_NL/2 + j]*id; - - const uint8_t xi0 = best_index_int8(16, kvalues_iq4nl_f, x0); - const uint8_t xi1 = best_index_int8(16, kvalues_iq4nl_f, x1); - - dst_data[i00/QK4_NL].qs[j] = xi0 | (xi1 << 4); - - const float v0 = kvalues_iq4nl_f[xi0]; - const float v1 = kvalues_iq4nl_f[xi1]; - const float w0 = src[0 + j]*src[0 + j]; - const float w1 = src[QK4_NL/2 + j]*src[QK4_NL/2 + j]; - sumqx += w0*v0*src[j] + w1*v1*src[QK4_NL/2 + j]; - sumq2 += w0*v0*v0 + w1*v1*v1; - - } - - dst_data[i00/QK4_NL].d = sumq2 > 0 ? sumqx/sumq2 : d; + quantize_iq4_nl(src, dst_data[i00/QK4_NL]); } } @@ -6419,10 +6448,10 @@ kernel void kernel_mul_mv_iq4_xs_f32( template kernel void kernel_get_rows_q( + constant ggml_metal_kargs_get_rows & args, device const void * src0, device const void * src1, device float * dst, - constant ggml_metal_kargs_get_rows & args, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { @@ -6442,10 +6471,10 @@ kernel void kernel_get_rows_q( template kernel void kernel_get_rows_f( + constant ggml_metal_kargs_get_rows & args, device const void * src0, device const void * src1, device float * dst, - constant ggml_metal_kargs_get_rows & args, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { @@ -6463,10 +6492,10 @@ kernel void kernel_get_rows_f( } kernel void kernel_get_rows_i32( + constant ggml_metal_kargs_get_rows & args, device const void * src0, device const void * src1, device int32_t * dst, - constant ggml_metal_kargs_get_rows & args, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { @@ -6483,6 +6512,67 @@ kernel void kernel_get_rows_i32( } } +template +kernel void kernel_set_rows_q32( + constant ggml_metal_kargs_set_rows & args, + device const void * src0, + device const void * src1, + device float * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + uint tiitg[[thread_index_in_threadgroup]], + uint3 tptg [[threads_per_threadgroup]]) { + const int32_t i03 = tgpig.z; + const int32_t i02 = tgpig.y; + + const int32_t i12 = i03%args.ne12; + const int32_t i11 = i02%args.ne11; + + const int32_t i01 = tgpig.x*tptg.y + tiitg/tptg.x; + if (i01 >= args.ne01) { + return; + } + + const int32_t i10 = i01; + const int64_t i1 = ((const device int64_t *) ((const device char *) src1 + i10*args.nb10 + i11*args.nb11 + i12*args.nb12))[0]; + + device block_q * dst_row = ( device block_q *) (( device char *) dst + i1*args.nb1 + i02*args.nb2 + i03*args.nb3); + const device float * src_row = (const device float *) ((const device char *) src0 + i01*args.nb01 + i02*args.nb02 + i03*args.nb03); + + for (int ind = tiitg%tptg.x; ind < args.nk0; ind += tptg.x) { + quantize_func(src_row + 32*ind, dst_row[ind]); + } +} + +template +kernel void kernel_set_rows_f( + constant ggml_metal_kargs_set_rows & args, + device const void * src0, + device const void * src1, + device float * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + uint tiitg[[thread_index_in_threadgroup]], + uint3 tptg [[threads_per_threadgroup]]) { + const int32_t i03 = tgpig.z; + const int32_t i02 = tgpig.y; + + const int32_t i12 = i03%args.ne12; + const int32_t i11 = i02%args.ne11; + + const int32_t i01 = tgpig.x*tptg.y + tiitg/tptg.x; + if (i01 >= args.ne01) { + return; + } + + const int32_t i10 = i01; + const int64_t i1 = ((const device int64_t *) ((const device char *) src1 + i10*args.nb10 + i11*args.nb11 + i12*args.nb12))[0]; + + device T * dst_row = ( device T *) (( device char *) dst + i1*args.nb1 + i02*args.nb2 + i03*args.nb3); + const device float * src_row = (const device float *) ((const device char *) src0 + i01*args.nb01 + i02*args.nb02 + i03*args.nb03); + + for (int ind = tiitg%tptg.x; ind < args.nk0; ind += tptg.x) { + dst_row[ind] = (T) src_row[ind]; + } +} #define BLOCK_SIZE_M 64 // 8 simdgroup matrices from matrix A #define BLOCK_SIZE_N 32 // 4 simdgroup matrices from matrix B @@ -6906,6 +6996,27 @@ template [[host_name("kernel_get_rows_iq1_m")]] kernel get_rows_q_t kernel_get template [[host_name("kernel_get_rows_iq4_nl")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_iq4_xs")]] kernel get_rows_q_t kernel_get_rows_q; +// +// set rows +// + +typedef decltype(kernel_set_rows_f) set_rows_f_t; + +template [[host_name("kernel_set_rows_f32")]] kernel set_rows_f_t kernel_set_rows_f; +template [[host_name("kernel_set_rows_f16")]] kernel set_rows_f_t kernel_set_rows_f; +#if defined(GGML_METAL_USE_BF16) +template [[host_name("kernel_set_rows_bf16")]] kernel set_rows_f_t kernel_set_rows_f; +#endif + +typedef decltype(kernel_set_rows_q32) set_rows_q32_t; + +template [[host_name("kernel_set_rows_q8_0")]] kernel set_rows_q32_t kernel_set_rows_q32; +template [[host_name("kernel_set_rows_q4_0")]] kernel set_rows_q32_t kernel_set_rows_q32; +template [[host_name("kernel_set_rows_q4_1")]] kernel set_rows_q32_t kernel_set_rows_q32; +template [[host_name("kernel_set_rows_q5_0")]] kernel set_rows_q32_t kernel_set_rows_q32; +template [[host_name("kernel_set_rows_q5_1")]] kernel set_rows_q32_t kernel_set_rows_q32; +template [[host_name("kernel_set_rows_iq4_nl")]] kernel set_rows_q32_t kernel_set_rows_q32; + // // matrix-matrix multiplication // diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index ee605977f3a2c..3d04f80ef4f90 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -933,6 +933,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "TRANSPOSE", "GET_ROWS", "GET_ROWS_BACK", + "SET_ROWS", "DIAG", "DIAG_MASK_INF", "DIAG_MASK_ZERO", @@ -983,7 +984,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "OPT_STEP_ADAMW", }; -static_assert(GGML_OP_COUNT == 83, "GGML_OP_COUNT != 83"); +static_assert(GGML_OP_COUNT == 84, "GGML_OP_COUNT != 84"); static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "none", @@ -1029,6 +1030,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "transpose(x)", "get_rows(x)", "get_rows_back(x)", + "set_rows(x)", "diag(x)", "diag_mask_inf(x)", "diag_mask_zero(x)", @@ -1079,7 +1081,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "adamw(x)", }; -static_assert(GGML_OP_COUNT == 83, "GGML_OP_COUNT != 83"); +static_assert(GGML_OP_COUNT == 84, "GGML_OP_COUNT != 84"); static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2"); @@ -1348,6 +1350,12 @@ bool ggml_is_contiguous_channels(const struct ggml_tensor * tensor) { tensor->nb[2] == ggml_type_size(tensor->type); } +bool ggml_is_contiguous_rows(const struct ggml_tensor * tensor) { + return + tensor->ne[0] == ggml_blck_size(tensor->type) || + tensor->nb[0] == ggml_type_size(tensor->type); +} + static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) { static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); @@ -3384,6 +3392,35 @@ struct ggml_tensor * ggml_get_rows_back( return result; } +// ggml_set_rows + +struct ggml_tensor * ggml_set_rows( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + struct ggml_tensor * c) { + GGML_ASSERT(a->ne[0] == b->ne[0]); + GGML_ASSERT(a->ne[2] == b->ne[2]); + GGML_ASSERT(a->ne[3] == b->ne[3]); + GGML_ASSERT(b->ne[1] == c->ne[0]); + GGML_ASSERT(b->ne[2] % c->ne[1] == 0); + GGML_ASSERT(b->ne[3] % c->ne[2] == 0); + GGML_ASSERT(c->ne[3] == 1); + GGML_ASSERT(b->type == GGML_TYPE_F32); + GGML_ASSERT(c->type == GGML_TYPE_I64); + + GGML_ASSERT(ggml_is_contiguous_rows(a)); + GGML_ASSERT(ggml_is_contiguous_rows(b)); + + struct ggml_tensor * result = ggml_view_tensor(ctx, a); + + result->op = GGML_OP_SET_ROWS; + result->src[0] = b; + result->src[1] = c; + + return result; +} + // ggml_diag struct ggml_tensor * ggml_diag( diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 615c2dc008a8d..a233f1f2fd97a 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -1213,6 +1213,76 @@ struct test_get_rows_back : public test_case { } }; +// GGML_OP_SET_ROWS +struct test_set_rows : public test_case { + const ggml_type type; + const std::array ne; + const std::array nr23; // broadcast only dims 2 and 3 + const int r; // rows to set + const bool v; // view (non-contiguous src1) + + std::string vars() override { + return VARS_TO_STR5(type, ne, nr23, r, v); + } + + test_set_rows(ggml_type type, + std::array ne, + std::array nr23, + int r, bool v = false) + : type(type), ne(ne), nr23(nr23), r(r), v(v) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * dst = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2]*nr23[0], ne[3]*nr23[1]); + ggml_set_name(dst, "dst"); + + ggml_tensor * src = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, ne[0], r, ne[2]*nr23[0], ne[3]*nr23[1]); + ggml_set_name(src, "src"); + + ggml_tensor * row_idxs = ggml_new_tensor_3d(ctx, GGML_TYPE_I64, r, ne[2], ne[3]); + ggml_set_name(row_idxs, "row_idxs"); + + if (v) { + src = ggml_view_4d(ctx, src, ne[0], r/2, ne[2]*nr23[0], ne[3]*nr23[1], src->nb[1], src->nb[2], src->nb[3], 0); + row_idxs = ggml_view_3d(ctx, row_idxs, r/2, ne[2], ne[3], row_idxs->nb[1], row_idxs->nb[2], 0); + ggml_set_name(row_idxs, "view_of_rows"); + } + + ggml_tensor * out = ggml_set_rows(ctx, dst, src, row_idxs); + ggml_set_name(out, "out"); + + return out; + } + + void initialize_tensors(ggml_context * ctx) override { + std::random_device rd; + std::default_random_engine rng(rd()); + for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { + if (t->type == GGML_TYPE_I64) { + if (ggml_is_view_op(t->op)) { + continue; + } + + for (int i2 = 0; i2 < t->ne[2]; i2++) { + for (int i1 = 0; i1 < t->ne[1]; i1++) { + // generate a shuffled subset of row indices + std::vector data(ne[1]); + for (int i = 0; i < ne[1]; i++) { + data[i] = i; + } + std::shuffle(data.begin(), data.end(), rng); + data.resize(t->ne[0]); + + const size_t offs = i1*t->nb[1] + i2*t->nb[2]; + ggml_backend_tensor_set(t, data.data(), offs, t->ne[0]*sizeof(int64_t)); + } + } + } else { + init_tensor_uniform(t); + } + } + } +}; + // GGML_OP_ARGMAX struct test_argmax : public test_case { const ggml_type type; @@ -3984,6 +4054,23 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_get_rows_back(GGML_TYPE_I32, 256, 5, 4, 1, v)); } + test_cases.emplace_back(new test_set_rows(GGML_TYPE_F32, { 1, 8, 1, 3 }, { 1, 1 }, 2, false)); + for (ggml_type type : all_types) { + for (int b : {1, 7}) { + for (bool v : {false, true}) { + test_cases.emplace_back(new test_set_rows(type, { 256, 5, b, 3 }, { 1, 1, }, 1, v)); + test_cases.emplace_back(new test_set_rows(type, { 256, 11, 1, b }, { 2, 3, }, 7, v)); + + test_cases.emplace_back(new test_set_rows(type, { 3*ggml_blck_size(type), 3, b, 1 }, { 2, 3, }, 2, v)); + + if (ggml_blck_size(type) == 1) { + test_cases.emplace_back(new test_set_rows(type, { 31, 3, b, 1 }, { 2, 3, }, 2, v)); + test_cases.emplace_back(new test_set_rows(type, { 33, 5, 1, b }, { 2, 3, }, 1, v)); + } + } + } + } + for (ggml_type type_input : {GGML_TYPE_F32}) { for (ggml_op_pool pool_type : {GGML_OP_POOL_AVG, GGML_OP_POOL_MAX}) { for (int k0 : {1, 3}) { From 6d1bb393f52152d7af9106393ede0eebdd1f16b7 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 27 Jun 2025 17:55:45 +0300 Subject: [PATCH 160/192] recurrent : call balloc split_reset() in init_batch() (#14414) ggml-ci --- src/llama-memory-recurrent.cpp | 37 +++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/src/llama-memory-recurrent.cpp b/src/llama-memory-recurrent.cpp index 1b1e95d567a6c..e52156bf308b6 100644 --- a/src/llama-memory-recurrent.cpp +++ b/src/llama-memory-recurrent.cpp @@ -363,30 +363,35 @@ llama_pos llama_memory_recurrent::seq_pos_max(llama_seq_id seq_id) const { } llama_memory_context_ptr llama_memory_recurrent::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) { - std::vector ubatches; + do { + balloc.split_reset(); - while (true) { - llama_ubatch ubatch; + std::vector ubatches; + while (true) { + llama_ubatch ubatch; - if (embd_all) { - // if all tokens are output, split by sequence - ubatch = balloc.split_seq(n_ubatch); - } else { - ubatch = balloc.split_equal(n_ubatch); + if (embd_all) { + // if all tokens are output, split by sequence + ubatch = balloc.split_seq(n_ubatch); + } else { + ubatch = balloc.split_equal(n_ubatch); + } + + if (ubatch.n_tokens == 0) { + break; + } + + ubatches.push_back(std::move(ubatch)); // NOLINT } - if (ubatch.n_tokens == 0) { + if (!prepare(ubatches)) { break; } - ubatches.push_back(std::move(ubatch)); // NOLINT - } - - if (!prepare(ubatches)) { - return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); - } + return std::make_unique(this, std::move(ubatches)); + } while (false); - return std::make_unique(this, std::move(ubatches)); + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); } llama_memory_context_ptr llama_memory_recurrent::init_full() { From d6681676da10e2418225ef8707453eba1a73d321 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 27 Jun 2025 21:42:02 +0300 Subject: [PATCH 161/192] graph : make llm_graph_context destructor virtual (#14410) ggml-ci --- src/llama-graph.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/llama-graph.h b/src/llama-graph.h index 4b1ec354dfc30..ee2197e892b5a 100644 --- a/src/llama-graph.h +++ b/src/llama-graph.h @@ -475,6 +475,7 @@ struct llm_graph_context { std::unique_ptr res; llm_graph_context(const llm_graph_params & params); + virtual ~llm_graph_context() = default; void cb(ggml_tensor * cur, const char * name, int il) const; From deae1bc5c1793215cb6ca437504b19280cc93f38 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Fri, 27 Jun 2025 22:35:30 -0500 Subject: [PATCH 162/192] vulkan: Fix GGML_VULKAN_SHADER_DEBUG_INFO (#14427) This setting needs to be passed through to vulkan-shaders-gen --- ggml/src/ggml-vulkan/CMakeLists.txt | 1 + ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/ggml/src/ggml-vulkan/CMakeLists.txt b/ggml/src/ggml-vulkan/CMakeLists.txt index 0bf4cb14f88c7..b97e7bf995504 100644 --- a/ggml/src/ggml-vulkan/CMakeLists.txt +++ b/ggml/src/ggml-vulkan/CMakeLists.txt @@ -99,6 +99,7 @@ if (Vulkan_FOUND) if (GGML_VULKAN_SHADER_DEBUG_INFO) add_compile_definitions(GGML_VULKAN_SHADER_DEBUG_INFO) + list(APPEND VULKAN_SHADER_GEN_CMAKE_ARGS -DGGML_VULKAN_SHADER_DEBUG_INFO=ON) endif() if (GGML_VULKAN_VALIDATE) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt b/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt index 14e9daaa01a25..e1f613fb4f683 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +++ b/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt @@ -19,6 +19,10 @@ if (GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT) add_compile_definitions(GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT) message(STATUS "Enabling bfloat16 glslc support") endif() +if (GGML_VULKAN_SHADER_DEBUG_INFO) + add_compile_definitions(GGML_VULKAN_SHADER_DEBUG_INFO) + message(STATUS "Enabling shader debug info") +endif() set(TARGET vulkan-shaders-gen) add_executable(${TARGET} vulkan-shaders-gen.cpp) From a490434612aa198830568471cbd5b2b740c3e2dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Sat, 28 Jun 2025 09:57:07 +0200 Subject: [PATCH 163/192] ci : fix windows build and release (#14431) --- .github/workflows/build.yml | 18 +++++++++++++----- .github/workflows/release.yml | 12 ++++++------ 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4feccf21e9e3e..4ea8ea3c0428b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -664,7 +664,7 @@ jobs: ./build-xcframework.sh windows-msys2: - runs-on: windows-latest + runs-on: windows-2025 strategy: fail-fast: false @@ -714,7 +714,7 @@ jobs: cmake --build build --config ${{ matrix.build }} -j $(nproc) windows-latest-cmake: - runs-on: windows-latest + runs-on: windows-2025 env: OPENBLAS_VERSION: 0.3.23 @@ -725,16 +725,22 @@ jobs: matrix: include: - build: 'cpu-x64 (static)' + arch: 'x64' defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/x64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DBUILD_SHARED_LIBS=OFF' - build: 'openblas-x64' + arch: 'x64' defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/x64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_OPENMP=OFF -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"' - build: 'vulkan-x64' + arch: 'x64' defines: '-DCMAKE_BUILD_TYPE=Release -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_VULKAN=ON' - build: 'llvm-arm64' + arch: 'arm64' defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON' - build: 'llvm-arm64-opencl-adreno' + arch: 'arm64' defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" -DGGML_OPENCL=ON -DGGML_OPENCL_USE_ADRENO_KERNELS=ON' # - build: 'kompute-x64' + # arch: 'x64' # defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/x64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_OPENMP=OFF -DGGML_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON' steps: @@ -805,6 +811,8 @@ jobs: - name: libCURL id: get_libcurl uses: ./.github/actions/windows-setup-curl + with: + architecture: ${{ matrix.arch == 'x64' && 'win64' || 'win64a' }} - name: Build id: cmake_build @@ -825,7 +833,7 @@ jobs: - name: Test id: cmake_test - if: ${{ matrix.build != 'llvm-arm64' && matrix.build != 'llvm-arm64-opencl-adreno' }} + if: ${{ matrix.arch == 'x64' }} run: | cd build ctest -L main -C Release --verbose --timeout 900 @@ -930,7 +938,7 @@ jobs: cmake --build build --config Release windows-latest-cmake-sycl: - runs-on: windows-latest + runs-on: windows-2022 defaults: run: @@ -964,7 +972,7 @@ jobs: windows-latest-cmake-hip: if: ${{ github.event.inputs.create_release != 'true' }} - runs-on: windows-latest + runs-on: windows-2022 steps: - name: Clone diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 64fff175e227b..7c95a61fc1b47 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -235,7 +235,7 @@ jobs: name: llama-bin-ubuntu-vulkan-x64.zip windows-cpu: - runs-on: windows-latest + runs-on: windows-2025 strategy: matrix: @@ -271,7 +271,7 @@ jobs: env: CURL_PATH: ${{ steps.get_libcurl.outputs.curl_path }} run: | - call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" ${{ matrix.arch }} + call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" ${{ matrix.arch == 'x64' && 'x64' || 'amd64_arm64' }} cmake -S . -B build -G "Ninja Multi-Config" ^ -D CMAKE_TOOLCHAIN_FILE=cmake/${{ matrix.arch }}-windows-llvm.cmake ^ -DGGML_NATIVE=OFF ^ @@ -288,7 +288,7 @@ jobs: CURL_PATH: ${{ steps.get_libcurl.outputs.curl_path }} run: | Copy-Item $env:CURL_PATH\bin\libcurl-${{ matrix.arch }}.dll .\build\bin\Release\ - Copy-Item "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Redist\MSVC\14.42.34433\debug_nonredist\${{ matrix.arch }}\Microsoft.VC143.OpenMP.LLVM\libomp140.${{ matrix.arch == 'x64' && 'x86_64' || 'aarch64' }}.dll" .\build\bin\Release\ + Copy-Item "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Redist\MSVC\14.44.35112\debug_nonredist\${{ matrix.arch }}\Microsoft.VC143.OpenMP.LLVM\libomp140.${{ matrix.arch == 'x64' && 'x86_64' || 'aarch64' }}.dll" .\build\bin\Release\ 7z a llama-bin-win-cpu-${{ matrix.arch }}.zip .\build\bin\Release\* - name: Upload artifacts @@ -298,7 +298,7 @@ jobs: name: llama-bin-win-cpu-${{ matrix.arch }}.zip windows: - runs-on: windows-latest + runs-on: windows-2025 env: OPENBLAS_VERSION: 0.3.23 @@ -448,7 +448,7 @@ jobs: name: cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip windows-sycl: - runs-on: windows-latest + runs-on: windows-2022 defaults: run: @@ -520,7 +520,7 @@ jobs: name: llama-bin-win-sycl-x64.zip windows-hip: - runs-on: windows-latest + runs-on: windows-2022 strategy: matrix: From 567ae3e02fe3a60e4bc08acfe843edeef68b2311 Mon Sep 17 00:00:00 2001 From: Xinpeng Dou <15529241576@163.com> Date: Sat, 28 Jun 2025 17:35:41 +0800 Subject: [PATCH 164/192] fix async_mode bug (#14432) --- ggml/src/ggml-cann/common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/ggml-cann/common.h b/ggml/src/ggml-cann/common.h index ba2cef0c25fb2..8dfe3b061c13c 100755 --- a/ggml/src/ggml-cann/common.h +++ b/ggml/src/ggml-cann/common.h @@ -359,7 +359,7 @@ struct ggml_backend_cann_context { ggml_cann_set_device(device); description = aclrtGetSocName(); - bool async_mode = parse_bool(get_env("GGML_CANN_ASYNC_MODE").value_or("")); + async_mode = parse_bool(get_env("GGML_CANN_ASYNC_MODE").value_or("")); GGML_LOG_INFO("%s: device %d async operator submission is %s\n", __func__, device, async_mode ? "ON" : "OFF"); } From 4aae9bc548248cc6b9ed4ea8b412654020236924 Mon Sep 17 00:00:00 2001 From: Weizhao Ouyang Date: Sat, 28 Jun 2025 22:08:21 +0800 Subject: [PATCH 165/192] model : add support for ERNIE 4.5 0.3B model (#14408) Add Day-0 support for Baidu ERNIE 4.5 0.3B model. Signed-off-by: Weizhao Ouyang --- convert_hf_to_gguf.py | 46 ++++++++++ gguf-py/gguf/constants.py | 16 ++++ src/llama-arch.cpp | 18 ++++ src/llama-arch.h | 1 + src/llama-model.cpp | 178 ++++++++++++++++++++++++++++++++++++++ src/llama-model.h | 1 + 6 files changed, 260 insertions(+) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index aed595e259ed5..c2c55166e7641 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -2743,6 +2743,52 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter yield from super().modify_tensors(data_torch, name, bid) +@ModelBase.register("Ernie4_5_ForCausalLM") +class Ernie4_5Model(TextModel): + model_arch = gguf.MODEL_ARCH.ERNIE4_5 + + def set_vocab(self): + self._set_vocab_sentencepiece() + + def set_gguf_parameters(self): + super().set_gguf_parameters() + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + num_heads = self.hparams["num_attention_heads"] + num_kv_heads = self.hparams["num_key_value_heads"] + head_dim = self.hparams["head_dim"] + + if "ernie." in name: + name = name.replace("ernie.", "model.") + # split the qkv weights + # qkv_proj shape: [(num_heads + 2 * num_kv_heads) * head_dim, hidden_size] + if "qkv_proj" in name: + name_q = name.replace("qkv_proj.weight", "q_proj.weight") + name_k = name.replace("qkv_proj.weight", "k_proj.weight") + name_v = name.replace("qkv_proj.weight", "v_proj.weight") + total_q_dim = num_heads * head_dim + total_k_dim = num_kv_heads * head_dim + total_v_dim = num_kv_heads * head_dim + q_proj_weight, k_proj_weight, v_proj_weight = data_torch.split([total_q_dim, total_k_dim, total_v_dim], dim=0) + return [ + (self.map_tensor_name(name_q), q_proj_weight), + (self.map_tensor_name(name_k), k_proj_weight), + (self.map_tensor_name(name_v), v_proj_weight) + ] + # split the up_gate_proj into gate and up + # up_gate_proj shape: [2 * intermediate_size, hidden_size] + if "up_gate_proj" in name: + name_up = name.replace("up_gate_proj.weight", "up_proj.weight") + name_gate = name.replace("up_gate_proj.weight", "gate_proj.weight") + dim_half = data_torch.shape[0] // 2 + gate_proj_weight, up_proj_weight = data_torch.split(dim_half, dim=0) + return [ + (self.map_tensor_name(name_gate), gate_proj_weight), + (self.map_tensor_name(name_up), up_proj_weight) + ] + return [(self.map_tensor_name(name), data_torch)] + + @ModelBase.register( "Qwen2VLModel", "Qwen2VLForConditionalGeneration", diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index fb75143b0b545..b5ba933cb0c61 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -354,6 +354,7 @@ class MODEL_ARCH(IntEnum): BAILINGMOE = auto() DOTS1 = auto() ARCEE = auto() + ERNIE4_5 = auto() class VISION_PROJECTOR_TYPE(IntEnum): @@ -654,6 +655,7 @@ class MODEL_TENSOR(IntEnum): MODEL_ARCH.BAILINGMOE: "bailingmoe", MODEL_ARCH.DOTS1: "dots1", MODEL_ARCH.ARCEE: "arcee", + MODEL_ARCH.ERNIE4_5: "ernie4_5", } VISION_PROJECTOR_TYPE_NAMES: dict[VISION_PROJECTOR_TYPE, str] = { @@ -2177,6 +2179,20 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.FFN_DOWN, MODEL_TENSOR.FFN_UP, ], + MODEL_ARCH.ERNIE4_5: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], # TODO } diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index 435e3b9ba3db8..aa21108a4bd79 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -76,6 +76,7 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_BAILINGMOE, "bailingmoe" }, { LLM_ARCH_DOTS1, "dots1" }, { LLM_ARCH_ARCEE, "arcee" }, + { LLM_ARCH_ERNIE4_5, "ernie4_5" }, { LLM_ARCH_UNKNOWN, "(unknown)" }, }; @@ -1658,6 +1659,23 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" }, } }, + { + LLM_ARCH_ERNIE4_5, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, { LLM_ARCH_UNKNOWN, { diff --git a/src/llama-arch.h b/src/llama-arch.h index 9181ad053f6b3..0771ec3ebadcd 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -80,6 +80,7 @@ enum llm_arch { LLM_ARCH_BAILINGMOE, LLM_ARCH_DOTS1, LLM_ARCH_ARCEE, + LLM_ARCH_ERNIE4_5, LLM_ARCH_UNKNOWN, }; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index fc39195ed5177..b15bf73c2a29a 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -47,6 +47,7 @@ const char * llm_type_name(llm_type type) { case LLM_TYPE_475M: return "475M"; case LLM_TYPE_770M: return "770M"; case LLM_TYPE_780M: return "780M"; + case LLM_TYPE_0_3B: return "0.3B"; case LLM_TYPE_0_5B: return "0.5B"; case LLM_TYPE_0_6B: return "0.6B"; case LLM_TYPE_1B: return "1B"; @@ -1504,6 +1505,14 @@ void llama_model::load_hparams(llama_model_loader & ml) { default: type = LLM_TYPE_UNKNOWN; } } break; + case LLM_ARCH_ERNIE4_5: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 18: type = LLM_TYPE_0_3B; break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; default: throw std::runtime_error("unsupported model architecture"); } @@ -4344,6 +4353,40 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } + } break; + case LLM_ARCH_ERNIE4_5: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + + // optional bias tensors + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED); + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); } @@ -14125,6 +14168,136 @@ struct llm_build_dots1 : public llm_graph_context { } }; +struct llm_build_ernie4_5 : public llm_graph_context { + llm_build_ernie4_5(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + { + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + } + + // self-attention + { + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, gf, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + struct llm_build_arcee : public llm_graph_context { llm_build_arcee(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; @@ -14635,6 +14808,10 @@ llm_graph_result_ptr llama_model::build_graph( { llm = std::make_unique(*this, params, gf); } break; + case LLM_ARCH_ERNIE4_5: + { + llm = std::make_unique(*this, params, gf); + } break; default: GGML_ABORT("fatal error"); } @@ -14786,6 +14963,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { case LLM_ARCH_BAILINGMOE: case LLM_ARCH_NEO_BERT: case LLM_ARCH_ARCEE: + case LLM_ARCH_ERNIE4_5: return LLAMA_ROPE_TYPE_NORM; // the pairs of head values are offset by n_rot/2 diff --git a/src/llama-model.h b/src/llama-model.h index 40063b790d434..a958c5997a11b 100644 --- a/src/llama-model.h +++ b/src/llama-model.h @@ -39,6 +39,7 @@ enum llm_type { LLM_TYPE_475M, LLM_TYPE_770M, LLM_TYPE_780M, + LLM_TYPE_0_3B, LLM_TYPE_0_5B, LLM_TYPE_0_6B, LLM_TYPE_1B, From 197286aab8d9f5b59c5dd93ebcc1e284cc7eb9a3 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Sat, 28 Jun 2025 10:17:09 -0500 Subject: [PATCH 166/192] vulkan: lock accesses of pinned_memory vector (#14333) --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 99be5e45b2af7..e42f115d04034 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -305,7 +305,7 @@ static vk_device_architecture get_device_architecture(const vk::PhysicalDevice& } struct vk_device_struct { - std::mutex mutex; + std::recursive_mutex mutex; vk::PhysicalDevice physical_device; vk::PhysicalDeviceProperties properties; @@ -1197,7 +1197,7 @@ static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipelin } { - std::lock_guard guard(device->mutex); + std::lock_guard guard(device->mutex); device->pipelines.insert({ pipeline->name, pipeline }); } @@ -1411,7 +1411,7 @@ static uint32_t ggml_vk_find_queue_family_index(std::vector guard(device->mutex); + std::lock_guard guard(device->mutex); q.queue_family_index = queue_family_index; q.transfer_only = transfer_only; @@ -4124,6 +4124,7 @@ static void * ggml_vk_host_malloc(vk_device& device, size_t size) { return nullptr; } + std::lock_guard guard(device->mutex); device->pinned_memory.push_back(std::make_tuple(buf->ptr, size, buf)); return buf->ptr; @@ -4134,6 +4135,8 @@ static void ggml_vk_host_free(vk_device& device, void* ptr) { return; } VK_LOG_MEMORY("ggml_vk_host_free(" << ptr << ")"); + std::lock_guard guard(device->mutex); + vk_buffer buf; size_t index; for (size_t i = 0; i < device->pinned_memory.size(); i++) { @@ -4156,6 +4159,7 @@ static void ggml_vk_host_free(vk_device& device, void* ptr) { } static void ggml_vk_host_get(vk_device& device, const void * ptr, vk_buffer& buf, size_t& buf_offset) { + std::lock_guard guard(device->mutex); buf = nullptr; buf_offset = 0; for (size_t i = 0; i < device->pinned_memory.size(); i++) { @@ -4457,7 +4461,7 @@ static void ggml_vk_buffer_write_2d(vk_buffer& dst, size_t offset, const void * memcpy((uint8_t *)dst->ptr + offset + i * width, (const uint8_t *) src + i * spitch, width); } } else { - std::lock_guard guard(dst->device->mutex); + std::lock_guard guard(dst->device->mutex); vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue.cmd_pool); ggml_vk_ctx_begin(dst->device, subctx); @@ -4548,7 +4552,7 @@ static void ggml_vk_buffer_read(vk_buffer& src, size_t offset, void * dst, size_ memcpy(dst, (uint8_t *) src->ptr + offset, size); } else { - std::lock_guard guard(src->device->mutex); + std::lock_guard guard(src->device->mutex); vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue.cmd_pool); ggml_vk_ctx_begin(src->device, subctx); @@ -4578,7 +4582,7 @@ static void ggml_vk_buffer_copy_async(vk_context& ctx, vk_buffer& dst, size_t ds static void ggml_vk_buffer_copy(vk_buffer& dst, size_t dst_offset, vk_buffer& src, size_t src_offset, size_t size) { if (src->device == dst->device) { - std::lock_guard guard(src->device->mutex); + std::lock_guard guard(src->device->mutex); VK_LOG_DEBUG("ggml_vk_buffer_copy(SINGLE_DEVICE, " << size << ")"); // Copy within the device vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue.cmd_pool); @@ -4613,7 +4617,7 @@ static void ggml_vk_buffer_memset_async(vk_context& ctx, vk_buffer& dst, size_t static void ggml_vk_buffer_memset(vk_buffer& dst, size_t offset, uint32_t c, size_t size) { VK_LOG_DEBUG("ggml_vk_buffer_memset(" << offset << ", " << c << ", " << size << ")"); - std::lock_guard guard(dst->device->mutex); + std::lock_guard guard(dst->device->mutex); vk_context subctx = ggml_vk_create_temporary_context(dst->device->transfer_queue.cmd_pool); ggml_vk_ctx_begin(dst->device, subctx); subctx->s->buffer.fillBuffer(dst->buffer, offset, size, c); From 20dc2240c10363c59f2304b4b713ce14965d37c7 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Sat, 28 Jun 2025 10:36:40 -0500 Subject: [PATCH 167/192] vulkan: handle noncontig in the final case of ggml_vk_get_cpy_pipeline (#14378) --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index e42f115d04034..996ccbf66b016 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -4844,9 +4844,17 @@ static vk_pipeline ggml_vk_get_cpy_pipeline(ggml_backend_vk_context * ctx, const // type size must be exactly 2 or 4. GGML_ASSERT(ggml_is_quantized(to) || ggml_type_size(src->type) == 2 || ggml_type_size(src->type) == 4); if ((ggml_type_size(src->type) % 4) == 0) { - return ctx->device->pipeline_contig_cpy_f32_f32; + if (contig) { + return ctx->device->pipeline_contig_cpy_f32_f32; + } else { + return ctx->device->pipeline_cpy_f32_f32; + } } else { - return ctx->device->pipeline_contig_cpy_f16_f16; + if (contig) { + return ctx->device->pipeline_contig_cpy_f16_f16; + } else { + return ctx->device->pipeline_cpy_f16_f16; + } } } @@ -4907,7 +4915,7 @@ static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context& sub std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3]; std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3]; std::cerr << "), " << (dryrun ? "dryrun" : "") << ")"); - GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); // NOLINT + GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16); // NOLINT GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT const uint64_t ne00 = src0->ne[0]; @@ -5135,7 +5143,7 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3]; std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3]; std::cerr << "), " << (dryrun ? "dryrun" : "") << "),)"); - GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); // NOLINT + GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16); // NOLINT GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT const uint64_t ne00 = src0->ne[0]; @@ -5736,7 +5744,7 @@ static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_conte std::cerr << "), (" << ids << ", name=" << ids->name << ", type=" << ids->type << ", ne0=" << ids->ne[0] << ", ne1=" << ids->ne[1] << ", ne2=" << ids->ne[2] << ", ne3=" << ids->ne[3] << ", nb0=" << ids->nb[0] << ", nb1=" << ids->nb[1] << ", nb2=" << ids->nb[2] << ", nb3=" << ids->nb[3]; std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3]; std::cerr << "), " << (dryrun ? "dryrun" : "") << ")"); - GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); // NOLINT + GGML_ASSERT(ggml_vk_dim01_contiguous(src0) || src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16); // NOLINT GGML_ASSERT(ggml_vk_dim01_contiguous(src1) || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); // NOLINT GGML_ASSERT(ids->type == GGML_TYPE_I32); From 65785577556c0a9489f6904a1841457b646e58e1 Mon Sep 17 00:00:00 2001 From: Aman Gupta Date: Sun, 29 Jun 2025 01:30:53 +0800 Subject: [PATCH 168/192] CUDA: add bf16 and f32 support to cublas_mul_mat_batched (#14361) * CUDA: add bf16 and f32 support to cublas_mul_mat_batched * Review: add type traits and make function more generic * Review: make check more explicit, add back comments, and fix formatting * Review: fix formatting, remove useless type conversion, fix naming for bools --- ggml/src/ggml-cuda/convert.cu | 22 ++++ ggml/src/ggml-cuda/convert.cuh | 5 + ggml/src/ggml-cuda/ggml-cuda.cu | 207 ++++++++++++++++++++------------ tests/test-backend-ops.cpp | 6 +- 4 files changed, 162 insertions(+), 78 deletions(-) diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu index c6dec4276b36d..eeaa14bf57950 100644 --- a/ggml/src/ggml-cuda/convert.cu +++ b/ggml/src/ggml-cuda/convert.cu @@ -728,3 +728,25 @@ to_fp16_nc_cuda_t ggml_get_to_fp16_nc_cuda(ggml_type type) { return nullptr; } } + +to_bf16_nc_cuda_t ggml_get_to_bf16_nc_cuda(ggml_type type) { + switch (type) { + case GGML_TYPE_F32: + return convert_unary_cuda; + case GGML_TYPE_F16: + return convert_unary_cuda; + default: + return nullptr; + } +} + +to_fp32_nc_cuda_t ggml_get_to_fp32_nc_cuda(ggml_type type) { + switch (type) { + case GGML_TYPE_F16: + return convert_unary_cuda; + case GGML_TYPE_BF16: + return convert_unary_cuda; + default: + return nullptr; + } +} diff --git a/ggml/src/ggml-cuda/convert.cuh b/ggml/src/ggml-cuda/convert.cuh index b65b98e08e7e2..f04214be175ba 100644 --- a/ggml/src/ggml-cuda/convert.cuh +++ b/ggml/src/ggml-cuda/convert.cuh @@ -22,5 +22,10 @@ using to_t_nc_cuda_t = void (*)(const void * x, T * y, int64_t ne00, int64_t ne01, int64_t ne02, int64_t ne03, int64_t s01, int64_t s02, int64_t s03, cudaStream_t stream); +typedef to_t_nc_cuda_t to_fp32_nc_cuda_t; typedef to_t_nc_cuda_t to_fp16_nc_cuda_t; +typedef to_t_nc_cuda_t to_bf16_nc_cuda_t; + +to_fp32_nc_cuda_t ggml_get_to_fp32_nc_cuda(ggml_type type); to_fp16_nc_cuda_t ggml_get_to_fp16_nc_cuda(ggml_type type); +to_bf16_nc_cuda_t ggml_get_to_bf16_nc_cuda(ggml_type type); diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index b30c13c62f25c..811422f385073 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -1749,7 +1749,7 @@ static void ggml_cuda_op_mul_mat( } static __global__ void k_compute_batched_ptrs( - const half * src0_as_f16, const half * src1_as_f16, char * dst, + const void * src0_as_f16, const void * src1_as_f16, char * dst, const void ** ptrs_src, void ** ptrs_dst, int64_t ne12, int64_t ne13, int64_t ne23, @@ -1772,83 +1772,131 @@ static __global__ void k_compute_batched_ptrs( ptrs_dst[0*ne23 + i12 + i13*ne12] = ( char *) dst + i12*nbd2 + i13*nbd3; } -static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +// Type traits for mapping ggml types to CUDA/cuBLAS types +template +struct batched_mul_mat_traits; + +template<> +struct batched_mul_mat_traits { + using cuda_type = float; + static inline const cublasComputeType_t compute_type = CUBLAS_COMPUTE_32F; + static inline const cudaDataType_t data_type = CUDA_R_32F; + static inline const ggml_type ggml_type_val = GGML_TYPE_F32; + static inline const float alpha = 1.0f; + static inline const float beta = 0.0f; + static inline const void* get_alpha() { static const float val = alpha; return &val; } + static inline const void* get_beta() { static const float val = beta; return &val; } + static inline auto get_nc_converter(ggml_type src_type) { return ggml_get_to_fp32_nc_cuda(src_type); } +}; + +template<> +struct batched_mul_mat_traits { + using cuda_type = nv_bfloat16; + static inline const cublasComputeType_t compute_type = CUBLAS_COMPUTE_32F; + static inline const cudaDataType_t data_type = CUDA_R_16BF; + static inline const ggml_type ggml_type_val = GGML_TYPE_BF16; + static inline const float alpha = 1.0f; + static inline const float beta = 0.0f; + static inline const void* get_alpha() { static const float val = alpha; return &val; } + static inline const void* get_beta() { static const float val = beta; return &val; } + static inline auto get_nc_converter(ggml_type src_type) { return ggml_get_to_bf16_nc_cuda(src_type); } +}; + +template<> +struct batched_mul_mat_traits { + using cuda_type = half; + static inline const cublasComputeType_t compute_type = CUBLAS_COMPUTE_16F; + static inline const cudaDataType_t data_type = CUDA_R_16F; + static inline const ggml_type ggml_type_val = GGML_TYPE_F16; + static inline const half alpha = 1.0; + static inline const half beta = 0.0; + static inline const void* get_alpha() { static const half val = alpha; return &val; } + static inline const void* get_beta() { static const half val = beta; return &val; } + static inline auto get_nc_converter(ggml_type src_type) { return ggml_get_to_fp16_nc_cuda(src_type); } +}; + +template +static void ggml_cuda_mul_mat_batched_cublas_impl(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + using traits = batched_mul_mat_traits; + using cuda_t = typename traits::cuda_type; + GGML_ASSERT(!ggml_is_transposed(src0)); GGML_ASSERT(!ggml_is_transposed(src1)); - GGML_ASSERT(!ggml_backend_buft_is_cuda_split(src0->buffer->buft)); - GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src0->type == src0_type); + GGML_ASSERT(ggml_is_contiguous(dst)); // Byte offsets and tensor dimensions are currently used in an inconsistent way for dst. // As long as dst is contiguous this does not matter though. - GGML_ASSERT(ggml_is_contiguous(dst)); GGML_TENSOR_BINARY_OP_LOCALS const int64_t ne_dst = ggml_nelements(dst); - cudaStream_t main_stream = ctx.stream(); - CUBLAS_CHECK(cublasSetStream(ctx.cublas_handle(), main_stream)); - const half * src0_f16 = (const half *) src0->data; float * dst_ddf = (float *) dst->data; - - const half * src1_f16 = (const half *) src1->data; const size_t ts_src1 = ggml_type_size(src1->type); GGML_ASSERT(nb10 == ts_src1); int64_t s11 = nb11 / ts_src1; int64_t s12 = nb12 / ts_src1; int64_t s13 = nb13 / ts_src1; - ggml_cuda_pool_alloc src1_f16_alloc(ctx.pool()); - // convert src1 to fp16 - if (src1->type != GGML_TYPE_F16) { - const to_fp16_nc_cuda_t to_fp16_cuda = ggml_get_to_fp16_nc_cuda(src1->type); - const int64_t ne_src1 = ggml_nelements(src1); - src1_f16_alloc.alloc(ne_src1); - GGML_ASSERT(to_fp16_cuda != nullptr); + const cuda_t * src0_ptr = nullptr; + const cuda_t * src1_ptr = nullptr; - to_fp16_cuda(src1_f16, src1_f16_alloc.get(), ne10, ne11, ne12, ne13, s11, s12, s13, main_stream); + ggml_cuda_pool_alloc src0_alloc(ctx.pool()); + ggml_cuda_pool_alloc src1_alloc(ctx.pool()); + + // Handle src0 + src0_ptr = (const cuda_t *) src0->data; + + // Handle src1 - convert if necessary + if (src1->type == src0_type) { + src1_ptr = (const cuda_t *) src1->data; + } else { + // Convert src1 to target type using traits conversion functions + const int64_t ne_src1 = ggml_nelements(src1); + src1_alloc.alloc(ne_src1); - src1_f16 = src1_f16_alloc.get(); + const auto convert_func = traits::get_nc_converter(src1->type); + GGML_ASSERT(convert_func != nullptr); + convert_func(src1->data, src1_alloc.get(), ne10, ne11, ne12, ne13, s11, s12, s13, main_stream); + src1_ptr = src1_alloc.get(); s11 = ne10; s12 = ne11*s11; s13 = ne12*s12; } - ggml_cuda_pool_alloc dst_f16(ctx.pool()); + // Setup destination buffer + ggml_cuda_pool_alloc dst_temp(ctx.pool()); char * dst_t; - - cublasComputeType_t cu_compute_type = CUBLAS_COMPUTE_16F; - cudaDataType_t cu_data_type = CUDA_R_16F; - - // dst strides size_t nbd2 = dst->nb[2]; size_t nbd3 = dst->nb[3]; - const half alpha_f16 = 1.0f; - const half beta_f16 = 0.0f; - + cublasComputeType_t cu_compute_type = traits::compute_type; + cudaDataType_t cu_data_type = traits::data_type; + cudaDataType_t cu_data_type_a = traits::data_type; + cudaDataType_t cu_data_type_b = traits::data_type; + const void * alpha = traits::get_alpha(); + const void * beta = traits::get_beta(); const float alpha_f32 = 1.0f; - const float beta_f32 = 0.0f; - - const void * alpha = &alpha_f16; - const void * beta = &beta_f16; + const float beta_f32 = 0.0f; if (dst->op_params[0] == GGML_PREC_DEFAULT) { - dst_t = (char *) dst_f16.alloc(ne_dst); - - nbd2 /= sizeof(float) / sizeof(half); - nbd3 /= sizeof(float) / sizeof(half); + if constexpr (src0_type == GGML_TYPE_F32) { + dst_t = (char *) dst_ddf; // Direct F32 output + } else { + dst_t = (char *) dst_temp.alloc(ne_dst); + nbd2 /= sizeof(float) / sizeof(cuda_t); + nbd3 /= sizeof(float) / sizeof(cuda_t); + } } else { dst_t = (char *) dst_ddf; - cu_compute_type = CUBLAS_COMPUTE_32F; - cu_data_type = CUDA_R_32F; - + cu_data_type = CUDA_R_32F; alpha = &alpha_f32; - beta = &beta_f32; + beta = &beta_f32; } int id = ggml_cuda_get_device(); @@ -1856,7 +1904,7 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co if (GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA4(cc)) { cu_compute_type = CUBLAS_COMPUTE_32F; alpha = &alpha_f32; - beta = &beta_f32; + beta = &beta_f32; } GGML_ASSERT(ne12 % ne02 == 0); @@ -1866,35 +1914,15 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co const int64_t r2 = ne12/ne02; const int64_t r3 = ne13/ne03; -#if 0 - // use cublasGemmEx - { - for (int i13 = 0; i13 < ne13; ++i13) { - for (int i12 = 0; i12 < ne12; ++i12) { - int i03 = i13 / r3; - int i02 = i12 / r2; - - CUBLAS_CHECK( - cublasGemmEx(ctx.cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, - ne01, ne11, ne10, - alpha, (const char *) src0_f16 + i03*nb03 + i02*nb02, CUDA_R_16F, nb01/sizeof(half), - src1_f16 + i13*s13 + i12*s12, CUDA_R_16F, s11, - beta, ( char *) dst_t + i13*nbd3 + i12*nbd2, cu_data_type, ne0, - cu_compute_type, - CUBLAS_GEMM_DEFAULT_TENSOR_OP)); - } - } - } -#else if (r2 == 1 && r3 == 1 && ggml_is_contiguous_2(src0) && ggml_is_contiguous_2(src1)) { // there is no broadcast and src0, src1 are contiguous across dims 2, 3 // use cublasGemmStridedBatchedEx CUBLAS_CHECK( cublasGemmStridedBatchedEx(ctx.cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, - alpha, src0_f16, CUDA_R_16F, nb01/nb00, nb02/nb00, // strideA - src1_f16, CUDA_R_16F, s11, s12, // strideB - beta, dst_t, cu_data_type, ne0, ne1*ne0, // strideC + alpha, src0_ptr, cu_data_type_a, nb01/nb00, nb02/nb00, // strideA + src1_ptr, cu_data_type_b, s11, s12, // strideB + beta, dst_t, cu_data_type, ne0, ne1*ne0, // strideC ne12*ne13, cu_compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); @@ -1905,34 +1933,55 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co ggml_cuda_pool_alloc ptrs_src(ctx.pool(), 2*ne23); ggml_cuda_pool_alloc< void *> ptrs_dst(ctx.pool(), 1*ne23); + size_t src1_stride_size = sizeof(cuda_t); + dim3 block_dims(ne13, ne12); k_compute_batched_ptrs<<<1, block_dims, 0, main_stream>>>( - src0_f16, src1_f16, dst_t, + src0_ptr, src1_ptr, dst_t, ptrs_src.get(), ptrs_dst.get(), ne12, ne13, ne23, nb02, nb03, - src1->type == GGML_TYPE_F16 ? nb12 : s12*sizeof(half), - src1->type == GGML_TYPE_F16 ? nb13 : s13*sizeof(half), + (src1->type == src0_type) ? nb12 : s12*src1_stride_size, + (src1->type == src0_type) ? nb13 : s13*src1_stride_size, nbd2, nbd3, r2, r3); + CUDA_CHECK(cudaGetLastError()); CUBLAS_CHECK( cublasGemmBatchedEx(ctx.cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, - alpha, (const void **) (ptrs_src.get() + 0*ne23), CUDA_R_16F, nb01/nb00, - (const void **) (ptrs_src.get() + 1*ne23), CUDA_R_16F, s11, - beta, ( void **) (ptrs_dst.get() + 0*ne23), cu_data_type, ne0, + alpha, (const void **) (ptrs_src.get() + 0*ne23), cu_data_type_a, nb01/nb00, + (const void **) (ptrs_src.get() + 1*ne23), cu_data_type_b, s11, + beta, ( void **) (ptrs_dst.get() + 0*ne23), cu_data_type, ne0, ne23, cu_compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } -#endif - if (dst->op_params[0] == GGML_PREC_DEFAULT && cu_data_type == CUDA_R_16F) { - const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); - to_fp32_cuda(dst_f16.get(), dst_ddf, ne_dst, main_stream); + // Convert output back to F32 if needed + if (dst->op_params[0] == GGML_PREC_DEFAULT && cu_data_type != CUDA_R_32F) { + const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(traits::ggml_type_val); + to_fp32_cuda(dst_temp.get(), dst_ddf, ne_dst, main_stream); + } +} + +static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16 || src0->type == GGML_TYPE_F32); + + switch (src0->type) { + case GGML_TYPE_F32: + ggml_cuda_mul_mat_batched_cublas_impl(ctx, src0, src1, dst); + break; + case GGML_TYPE_BF16: + ggml_cuda_mul_mat_batched_cublas_impl(ctx, src0, src1, dst); + break; + case GGML_TYPE_F16: + ggml_cuda_mul_mat_batched_cublas_impl(ctx, src0, src1, dst); + break; + default: + GGML_ABORT("Unsupported type"); } } @@ -1984,6 +2033,12 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor //printf("src0 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name); //printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name); + //TODO update for generic tensor parallelism + const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; + bool use_batched_cublas_f16 = src0->type == GGML_TYPE_F16 && (src1->type == GGML_TYPE_F16 || !any_gpus_with_slow_fp16); + bool use_batched_cublas_bf16 = src0->type == GGML_TYPE_BF16 && bf16_mma_hardware_available(cc); + bool use_batched_cublas_f32 = src0->type == GGML_TYPE_F32; + if (!split && use_mul_mat_vec) { // the custom F16 vector kernel can be used over batched cuBLAS GEMM // but this is only faster for GPUs without tensor cores or with a thin src0 matrix (particularly KQV in attention) @@ -1992,8 +2047,8 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor ggml_cuda_mul_mat_vec_q(ctx, src0, src1, nullptr, dst); } else if (!split && use_mul_mat_q) { ggml_cuda_mul_mat_q(ctx, src0, src1, nullptr, dst); - } else if (!split && src0->type == GGML_TYPE_F16 && (src1->type == GGML_TYPE_F16 || !any_gpus_with_slow_fp16) && - !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) { + } else if (!split && (use_batched_cublas_f16 || use_batched_cublas_bf16 || use_batched_cublas_f32) + && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) { // general KQ + KQV multi-batch without FlashAttention ggml_cuda_mul_mat_batched_cublas(ctx, src0, src1, dst); } else if (use_mul_mat_vec) { diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index a233f1f2fd97a..128d63988f4e6 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -4425,8 +4425,10 @@ static std::vector> make_test_cases_eval() { for (auto nr : {1,4}) { for (uint32_t m = 0; m < 2; ++m) { for (uint32_t k = 0; k < 2; ++k) { - test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 1056 + m, 1, 128 + k, {bs, 1}, {nr, 1}, {0, 2, 1, 3})); - test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128 + m, 1, 1056 + k, {bs, 1}, {nr, 1}, {0, 1, 2, 3}, true)); + for (ggml_type type: {GGML_TYPE_F16, GGML_TYPE_BF16, GGML_TYPE_F32}) { + test_cases.emplace_back(new test_mul_mat(type, GGML_TYPE_F32, 1056 + m, 1, 128 + k, {bs, 1}, {nr, 1}, {0, 2, 1, 3})); + test_cases.emplace_back(new test_mul_mat(type, GGML_TYPE_F32, 128 + m, 1, 1056 + k, {bs, 1}, {nr, 1}, {0, 1, 2, 3}, true)); + } } } } From e5fa50b1c9506907133e1fc2885e95fd49ebb252 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Sun, 29 Jun 2025 02:43:36 -0500 Subject: [PATCH 169/192] vulkan: Add fusion support for RMS_NORM+MUL (#14366) * vulkan: Add fusion support for RMS_NORM+MUL - Add a use_count to ggml_tensor, so we can detect if an output is used more than once. - Change the ggml-vulkan rms_norm shader to optionally multiply by another tensor. - Add detection logic and basic fusion logic in ggml-vulkan. - Add some testing support for fusion. Rather than computing one node at a time, allow for computing the whole graph and just testing one node's results. Add rms_norm_mul tests and enable a llama test. * extract some common fusion logic * fix -Winconsistent-missing-override * move ggml_can_fuse to a common function * build fix * C and C++ versions of can_fuse * move use count to the graph to avoid data races and double increments when used in multiple threads * use hash table lookup to find node index * change use_counts to be indexed by hash table slot * minimize hash lookups style fixes * last node doesn't need single use. fix type. handle mul operands being swapped. * remove redundant parameter --------- Co-authored-by: slaren --- ggml/include/ggml-backend.h | 2 +- ggml/src/ggml-backend.cpp | 54 +++++++++----- ggml/src/ggml-impl.h | 64 ++++++++++++++++ ggml/src/ggml-vulkan/ggml-vulkan.cpp | 58 +++++++++++---- .../ggml-vulkan/vulkan-shaders/rms_norm.comp | 15 +++- .../vulkan-shaders/vulkan-shaders-gen.cpp | 2 +- ggml/src/ggml.c | 46 ++++++++---- tests/test-backend-ops.cpp | 74 ++++++++++++++++++- 8 files changed, 261 insertions(+), 54 deletions(-) diff --git a/ggml/include/ggml-backend.h b/ggml/include/ggml-backend.h index 778927f68217a..a2977ea2e56d9 100644 --- a/ggml/include/ggml-backend.h +++ b/ggml/include/ggml-backend.h @@ -339,7 +339,7 @@ extern "C" { typedef bool (*ggml_backend_eval_callback)(int node_index, struct ggml_tensor * t1, struct ggml_tensor * t2, void * user_data); // Compare the output of two backends - GGML_API bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data); + GGML_API bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data, struct ggml_tensor * test_node); // Tensor initialization GGML_API enum ggml_status ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr); diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp index b1050ad59c26a..788861a365fab 100644 --- a/ggml/src/ggml-backend.cpp +++ b/ggml/src/ggml-backend.cpp @@ -817,8 +817,9 @@ static void ggml_backend_sched_print_assignments(ggml_backend_sched_t sched, str } if (sched->debug > 1) { ggml_backend_t tensor_backend = ggml_backend_sched_get_tensor_backend(sched, node); - GGML_LOG_DEBUG("node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s]:", i, ggml_op_name(node->op), node->name, - fmt_size(ggml_nbytes(node)), tensor_backend ? ggml_backend_name(tensor_backend) : "NULL", GET_CAUSE(node)); + GGML_LOG_DEBUG("node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s] use=%d:", i, ggml_op_name(node->op), node->name, + fmt_size(ggml_nbytes(node)), tensor_backend ? ggml_backend_name(tensor_backend) : "NULL", GET_CAUSE(node), + graph->use_counts[ggml_hash_find(&graph->visited_hash_set, node)]); for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * src = node->src[j]; if (src == NULL) { @@ -1826,7 +1827,7 @@ void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy) { ggml_free(copy.ctx_unallocated); } -bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data) { +bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data, struct ggml_tensor * test_node) { struct ggml_backend_graph_copy copy = ggml_backend_graph_copy(backend2, graph); if (copy.buffer == NULL) { return false; @@ -1837,28 +1838,45 @@ bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t assert(g1->n_nodes == g2->n_nodes); - for (int i = 0; i < g1->n_nodes; i++) { - struct ggml_tensor * t1 = g1->nodes[i]; - struct ggml_tensor * t2 = g2->nodes[i]; + if (test_node != nullptr) { + // Compute the whole graph and only test the output for a specific tensor + ggml_backend_graph_compute(backend1, g1); + ggml_backend_graph_compute(backend2, g2); - assert(t1->op == t2->op && ggml_are_same_layout(t1, t2)); + int test_node_idx = -1; + for (int i = 0; i < g1->n_nodes; i++) { + struct ggml_tensor * t1 = g1->nodes[i]; + if (t1 == test_node) { + test_node_idx = i; + break; + } + } + GGML_ASSERT(test_node_idx != -1); - struct ggml_cgraph g1v = ggml_graph_view(g1, i, i + 1); - struct ggml_cgraph g2v = ggml_graph_view(g2, i, i + 1); + callback(test_node_idx, g1->nodes[test_node_idx], g2->nodes[test_node_idx], user_data); + } else { + for (int i = 0; i < g1->n_nodes; i++) { + struct ggml_tensor * t1 = g1->nodes[i]; + struct ggml_tensor * t2 = g2->nodes[i]; - ggml_backend_graph_compute(backend1, &g1v); - ggml_backend_graph_compute(backend2, &g2v); + assert(t1->op == t2->op && ggml_are_same_layout(t1, t2)); - if (ggml_is_view_op(t1->op)) { - continue; - } + struct ggml_cgraph g1v = ggml_graph_view(g1, i, i + 1); + struct ggml_cgraph g2v = ggml_graph_view(g2, i, i + 1); - // compare results, calculate rms etc - if (!callback(i, t1, t2, user_data)) { - break; + ggml_backend_graph_compute(backend1, &g1v); + ggml_backend_graph_compute(backend2, &g2v); + + if (ggml_is_view_op(t1->op)) { + continue; + } + + // compare results, calculate rms etc + if (!callback(i, t1, t2, user_data)) { + break; + } } } - ggml_backend_graph_copy_free(copy); return true; diff --git a/ggml/src/ggml-impl.h b/ggml/src/ggml-impl.h index 57761644f431a..4972558c98b81 100644 --- a/ggml/src/ggml-impl.h +++ b/ggml/src/ggml-impl.h @@ -301,6 +301,7 @@ struct ggml_cgraph { struct ggml_tensor ** grads; // the outputs of these tensors are the gradients of the nodes struct ggml_tensor ** grad_accs; // accumulators for node gradients struct ggml_tensor ** leafs; // tensors with constant data + int32_t * use_counts;// number of uses of each tensor, indexed by hash table slot struct ggml_hash_set visited_hash_set; @@ -467,13 +468,76 @@ static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) { #define GGML_FP32_TO_BF16(x) ggml_compute_fp32_to_bf16(x) #define GGML_BF16_TO_FP32(x) ggml_compute_bf16_to_fp32(x) +// return true if the node's results are only used by N other nodes +// and can be fused into their calculations. +static inline bool ggml_node_has_n_uses(const struct ggml_cgraph * cgraph, int node_idx, int32_t n_uses) { + const struct ggml_tensor * node = cgraph->nodes[node_idx]; + + // check the use count against how many we're replacing + size_t hash_pos = ggml_hash_find(&cgraph->visited_hash_set, node); + if (!ggml_bitset_get(cgraph->visited_hash_set.used, hash_pos) || cgraph->use_counts[hash_pos] != n_uses) { + return false; + } + + // if node is a view, some other node might be using the intermediate result + // via the view source. + if (node->view_src) { + return false; + } + + // If the user requested output for the node, can't fuse + if (node->flags & GGML_TENSOR_FLAG_OUTPUT) { + return false; + } + + return true; +} + +// Returns true if nodes [i, i+ops.size()) are the sequence of ggml_ops in ops[] +// and are fusable. Nodes are considered fusable according to this function if: +// - all nodes except the last have only one use and are not views/outputs (see ggml_node_has_N_uses). +// - all nodes except the last are a src of the following node. +// - all nodes are the same shape. +// TODO: Consider allowing GGML_OP_NONE nodes in between +static inline bool ggml_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, const enum ggml_op * ops, int num_ops) { + if (node_idx + num_ops > cgraph->n_nodes) { + return false; + } + + for (int i = 0; i < num_ops; ++i) { + struct ggml_tensor * node = cgraph->nodes[node_idx + i]; + if (node->op != ops[i]) { + return false; + } + if (i < num_ops - 1 && !ggml_node_has_n_uses(cgraph, node_idx + i, 1)) { + return false; + } + if (i > 0) { + struct ggml_tensor * prev = cgraph->nodes[node_idx + i - 1]; + if (node->src[0] != prev && node->src[1] != prev) { + return false; + } + if (!ggml_are_same_shape(node, prev)) { + return false; + } + } + } + return true; +} + #ifdef __cplusplus } #endif #ifdef __cplusplus +#include #include +// nicer C++ syntax for ggml_can_fuse +inline bool ggml_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, std::initializer_list ops) { + return ggml_can_fuse(cgraph, node_idx, ops.begin(), (int)ops.size()); +} + // expose GGUF internals for test code GGML_API size_t gguf_type_size(enum gguf_type type); GGML_API struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params); diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 996ccbf66b016..aebcc03915f5f 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -425,6 +425,7 @@ struct vk_device_struct { vk_pipeline pipeline_norm_f32; vk_pipeline pipeline_group_norm_f32; vk_pipeline pipeline_rms_norm_f32; + vk_pipeline pipeline_rms_norm_mul_f32; vk_pipeline pipeline_rms_norm_back_f32; vk_pipeline pipeline_l2_norm_f32; @@ -978,6 +979,10 @@ struct ggml_backend_vk_context { vk_command_pool compute_cmd_pool; vk_command_pool transfer_cmd_pool; + + // number of additional consecutive nodes that are being fused with the + // node currently being processed + uint32_t num_additional_fused_ops {}; }; static void * const vk_ptr_base = (void *)(uintptr_t) 0x1000; // NOLINT @@ -2655,7 +2660,8 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_norm_f32, "norm_f32", norm_f32_len, norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_group_norm_f32, "group_norm_f32", group_norm_f32_len, group_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); - ggml_vk_create_pipeline(device, device->pipeline_rms_norm_f32, "rms_norm_f32", rms_norm_f32_len, rms_norm_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {1, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_rms_norm_f32, "rms_norm_f32", rms_norm_f32_len, rms_norm_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {0, 0}, 1); + ggml_vk_create_pipeline(device, device->pipeline_rms_norm_mul_f32, "rms_norm_mul_f32", rms_norm_f32_len, rms_norm_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1, 1, 1}, {0, 1}, 1); ggml_vk_create_pipeline(device, device->pipeline_rms_norm_back_f32, "rms_norm_back_f32", rms_norm_back_f32_len, rms_norm_back_f32_data, "main", 3, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_l2_norm_f32, "l2_norm_f32", l2_norm_f32_len, l2_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); @@ -6430,7 +6436,7 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const return nullptr; case GGML_OP_RMS_NORM: if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { - return ctx->device->pipeline_rms_norm_f32; + return ctx->num_additional_fused_ops > 0 ? ctx->device->pipeline_rms_norm_mul_f32 : ctx->device->pipeline_rms_norm_f32; } return nullptr; case GGML_OP_RMS_NORM_BACK: @@ -7530,18 +7536,19 @@ static void ggml_vk_group_norm(ggml_backend_vk_context * ctx, vk_context& subctx ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_GROUP_NORM, { group_size, 0, eps, 0.0f }, dryrun); } -static void ggml_vk_rms_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) { +static void ggml_vk_rms_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) { float * op_params = (float *)dst->op_params; const uint32_t src0_type_size = ggml_type_size(src0->type); + const uint32_t src1_type_size = ggml_type_size(src1->type); const uint32_t dst_type_size = ggml_type_size(dst->type); - ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_RMS_NORM, { + ggml_vk_op_f32(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_RMS_NORM, { (uint32_t)ggml_nelements(src0), - (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size, - (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size, + (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2],(uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size, + (uint32_t)src1->ne[0], (uint32_t)src1->ne[1], (uint32_t)src1->ne[2],(uint32_t)src1->ne[3], (uint32_t)src1->nb[0] / src1_type_size, (uint32_t)src1->nb[1] / src1_type_size, (uint32_t)src1->nb[2] / src1_type_size, (uint32_t)src1->nb[3] / src1_type_size, + (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2],(uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size, 0, - op_params[0], 0.0f, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + op_params[0], 0.0f, 0, }, dryrun); } @@ -8736,7 +8743,8 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context* ctx, ggml_tensor* t // Returns true if node has enqueued work into the queue, false otherwise // If submit is true the current all operations queued so far are being submitted to Vulkan to overlap cmdlist creation and GPU execution. -static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * node, int node_idx, ggml_tensor *node_begin, int node_idx_begin, bool dryrun, bool last_node, bool almost_ready, bool submit){ +static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgraph, int node_idx, ggml_tensor *node_begin, int node_idx_begin, bool dryrun, bool last_node, bool almost_ready, bool submit){ + ggml_tensor * node = cgraph->nodes[node_idx]; if (ggml_is_empty(node) || !node->buffer) { return false; } @@ -8974,8 +8982,14 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod break; case GGML_OP_RMS_NORM: - ggml_vk_rms_norm(ctx, compute_ctx, src0, node, dryrun); - + if (ctx->num_additional_fused_ops > 0) { + // fused rms_norm + mul + ggml_tensor *mul = cgraph->nodes[node_idx + 1]; + ggml_tensor *other_src = mul->src[0] == node ? mul->src[1] : mul->src[0]; + ggml_vk_rms_norm(ctx, compute_ctx, src0, other_src, mul, dryrun); + } else { + ggml_vk_rms_norm(ctx, compute_ctx, src0, src0, node, dryrun); + } break; case GGML_OP_RMS_NORM_BACK: ggml_vk_rms_norm_back(ctx, compute_ctx, src0, src1, node, dryrun); @@ -9710,10 +9724,15 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg uint64_t total_mat_mul_bytes = 0; for (int i = 0; i < cgraph->n_nodes; i++) { - ggml_vk_build_graph(ctx, cgraph->nodes[i], i, nullptr, 0, true, false, false, false); + if (ggml_can_fuse(cgraph, i, { GGML_OP_RMS_NORM, GGML_OP_MUL })) { + ctx->num_additional_fused_ops = 1; + } + ggml_vk_build_graph(ctx, cgraph, i, nullptr, 0, true, false, false, false); if (cgraph->nodes[i]->op == GGML_OP_MUL_MAT || cgraph->nodes[i]->op == GGML_OP_MUL_MAT_ID) { total_mat_mul_bytes += ggml_nbytes(cgraph->nodes[i]->src[0]); } + i += ctx->num_additional_fused_ops; + ctx->num_additional_fused_ops = 0; } if (ctx->device->need_compiles) { ggml_vk_load_shaders(ctx->device); @@ -9775,14 +9794,18 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg mul_mat_bytes += ggml_nbytes(cgraph->nodes[i]->src[0]); } + if (ggml_can_fuse(cgraph, i, { GGML_OP_RMS_NORM, GGML_OP_MUL })) { + ctx->num_additional_fused_ops = 1; + } + // Signal the almost_ready fence when the graph is mostly complete (< 20% remaining) bool almost_ready = (cgraph->n_nodes - i) < cgraph->n_nodes / 5; bool submit = (submitted_nodes >= nodes_per_submit) || (mul_mat_bytes >= mul_mat_bytes_per_submit) || - (i == last_node) || + (i + ctx->num_additional_fused_ops == last_node) || (almost_ready && !ctx->almost_ready_fence_pending); - bool enqueued = ggml_vk_build_graph(ctx, cgraph->nodes[i], i, cgraph->nodes[submit_node_idx], submit_node_idx, false, i == last_node, almost_ready, submit); + bool enqueued = ggml_vk_build_graph(ctx, cgraph, i, cgraph->nodes[submit_node_idx], submit_node_idx, false, i + ctx->num_additional_fused_ops == last_node, almost_ready, submit); if (vk_perf_logger_enabled) { if (ctx->compute_ctx.expired()) { @@ -9792,7 +9815,10 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg } else { compute_ctx = ctx->compute_ctx.lock(); } - compute_ctx->s->buffer.writeTimestamp(vk::PipelineStageFlagBits::eAllCommands, ctx->device->query_pool, i+1); + // If there are fused ops, just write out timestamps for all nodes to keep the accounting simple + for (int j = 0; j < ctx->num_additional_fused_ops + 1; ++j) { + compute_ctx->s->buffer.writeTimestamp(vk::PipelineStageFlagBits::eAllCommands, ctx->device->query_pool, i+j+1); + } } if (enqueued) { @@ -9814,6 +9840,8 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg } submit_count++; } + i += ctx->num_additional_fused_ops; + ctx->num_additional_fused_ops = 0; } if (vk_perf_logger_enabled) { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp b/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp index deb8ee9960f58..6428ca7ba3300 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp @@ -1,11 +1,13 @@ #version 450 -#include "generic_unary_head.comp" +#include "generic_binary_head.comp" #include "types.comp" #extension GL_EXT_control_flow_attributes : enable #define BLOCK_SIZE 512 +layout (constant_id = 1) const bool do_multiply = false; + layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in; shared FLOAT_TYPE sum[BLOCK_SIZE]; @@ -25,6 +27,7 @@ void main() { const uint stride_sample = p.nb03; uint32_t a_offset = samp*stride_sample + channel*stride_channel + row*stride_row + get_aoffset(); + uint32_t b_offset = src1_idx(0, row, channel, samp) + get_boffset(); uint32_t d_offset = ((samp*nchannels + channel)*nrows + row)*ncols + get_doffset(); sum[tid] = FLOAT_TYPE(0.0f); // partial sum for thread in warp @@ -46,7 +49,13 @@ void main() { const FLOAT_TYPE mean = sum[0] / FLOAT_TYPE(ncols); const FLOAT_TYPE scale = inversesqrt(mean + FLOAT_TYPE(p.param1)); - [[unroll]] for (uint col = tid; col < ncols; col += BLOCK_SIZE) { - data_d[d_offset + col] = D_TYPE(scale * FLOAT_TYPE(data_a[a_offset + col])); + if (do_multiply) { + [[unroll]] for (uint col = tid; col < ncols; col += BLOCK_SIZE) { + data_d[d_offset + col] = D_TYPE(scale * FLOAT_TYPE(data_a[a_offset + col]) * FLOAT_TYPE(data_b[b_offset + col])); + } + } else { + [[unroll]] for (uint col = tid; col < ncols; col += BLOCK_SIZE) { + data_d[d_offset + col] = D_TYPE(scale * FLOAT_TYPE(data_a[a_offset + col])); + } } } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp index c63345ec8b4b6..a207b98c60e51 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp @@ -497,7 +497,7 @@ void process_shaders() { // Norms string_to_spv("norm_f32", "norm.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}})); string_to_spv("group_norm_f32", "group_norm.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}})); - string_to_spv("rms_norm_f32", "rms_norm.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}})); + string_to_spv("rms_norm_f32", "rms_norm.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}})); string_to_spv("rms_norm_back_f32", "rms_norm_back.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}})); string_to_spv("l2_norm_f32", "l2_norm.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}})); diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 3d04f80ef4f90..1262236c0347f 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -5841,19 +5841,32 @@ static void ggml_compute_backward( GGML_ASSERT(!src2_needs_grads || ggml_are_same_shape(src2, cgraph->grads[isrc2])); } -static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) { +static size_t ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) { // check if already visited - if (ggml_hash_insert(&cgraph->visited_hash_set, node) == GGML_HASHSET_ALREADY_EXISTS) { - return; + size_t node_hash_pos = ggml_hash_find(&cgraph->visited_hash_set, node); + GGML_ASSERT(node_hash_pos != GGML_HASHSET_FULL); + if (!ggml_bitset_get(cgraph->visited_hash_set.used, node_hash_pos)) { + // This is the first time we see this node in the current graph. + cgraph->visited_hash_set.keys[node_hash_pos] = node; + ggml_bitset_set(cgraph->visited_hash_set.used, node_hash_pos); + cgraph->use_counts[node_hash_pos] = 0; + } else { + // already visited + return node_hash_pos; } for (int i = 0; i < GGML_MAX_SRC; ++i) { const int k = (cgraph->order == GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT) ? i : (cgraph->order == GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT) ? (GGML_MAX_SRC-1-i) : - /* unknown order, just fall back to using i*/ i; - if (node->src[k]) { - ggml_visit_parents(cgraph, node->src[k]); + /* unknown order, just fall back to using i */ i; + + struct ggml_tensor * src = node->src[k]; + if (src) { + size_t src_hash_pos = ggml_visit_parents(cgraph, src); + + // Update the use count for this operand. + cgraph->use_counts[src_hash_pos]++; } } @@ -5877,6 +5890,8 @@ static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * cgraph->nodes[cgraph->n_nodes] = node; cgraph->n_nodes++; } + + return node_hash_pos; } static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) { @@ -6014,6 +6029,7 @@ static size_t ggml_graph_nbytes(size_t size, bool grads) { incr_ptr_aligned(&p, sizeof(struct ggml_cgraph), 1); incr_ptr_aligned(&p, size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); // nodes incr_ptr_aligned(&p, size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); // leafs + incr_ptr_aligned(&p, hash_size * sizeof(int32_t), sizeof(int32_t)); // use_counts incr_ptr_aligned(&p, hash_size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); // hash keys if (grads) { incr_ptr_aligned(&p, hash_size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); // grads @@ -6043,11 +6059,12 @@ struct ggml_cgraph * ggml_new_graph_custom(struct ggml_context * ctx, size_t siz void * p = cgraph + 1; - struct ggml_tensor ** nodes_ptr = incr_ptr_aligned(&p, size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); - struct ggml_tensor ** leafs_ptr = incr_ptr_aligned(&p, size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); - struct ggml_tensor ** hash_keys_ptr = incr_ptr_aligned(&p, hash_size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); - struct ggml_tensor ** grads_ptr = grads ? incr_ptr_aligned(&p, hash_size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)) : NULL; - struct ggml_tensor ** grad_accs_ptr = grads ? incr_ptr_aligned(&p, hash_size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)) : NULL; + struct ggml_tensor ** nodes_ptr = incr_ptr_aligned(&p, size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); + struct ggml_tensor ** leafs_ptr = incr_ptr_aligned(&p, size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); + int32_t * use_counts_ptr = incr_ptr_aligned(&p, hash_size * sizeof(int32_t), sizeof(int32_t)); + struct ggml_tensor ** hash_keys_ptr = incr_ptr_aligned(&p, hash_size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); + struct ggml_tensor ** grads_ptr = grads ? incr_ptr_aligned(&p, hash_size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)) : NULL; + struct ggml_tensor ** grad_accs_ptr = grads ? incr_ptr_aligned(&p, hash_size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)) : NULL; ggml_bitset_t * hash_used = incr_ptr_aligned(&p, ggml_bitset_size(hash_size) * sizeof(ggml_bitset_t), sizeof(ggml_bitset_t)); @@ -6062,6 +6079,7 @@ struct ggml_cgraph * ggml_new_graph_custom(struct ggml_context * ctx, size_t siz /*.grads =*/ grads_ptr, /*.grad_accs =*/ grad_accs_ptr, /*.leafs =*/ leafs_ptr, + /*.use_counts =*/ use_counts_ptr, /*.hash_table =*/ { hash_size, hash_used, hash_keys_ptr }, /*.order =*/ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT, }; @@ -6088,7 +6106,8 @@ struct ggml_cgraph ggml_graph_view(struct ggml_cgraph * cgraph0, int i0, int i1) /*.grads =*/ NULL, // gradients would need visited_hash_set /*.grad_accs =*/ NULL, /*.leafs =*/ NULL, - /*.visited_hash_set =*/ { 0, NULL, NULL }, + /*.use_counts =*/ cgraph0->use_counts, + /*.visited_hash_set =*/ cgraph0->visited_hash_set, /*.order =*/ cgraph0->order, }; @@ -6115,7 +6134,8 @@ void ggml_graph_cpy(struct ggml_cgraph * src, struct ggml_cgraph * dst) { for (size_t i = 0; i < src->visited_hash_set.size; ++i) { // copy all hashset keys (tensors) that are in use if (ggml_bitset_get(src->visited_hash_set.used, i)) { - ggml_hash_insert(&dst->visited_hash_set, src->visited_hash_set.keys[i]); + size_t new_hash_pos = ggml_hash_insert(&dst->visited_hash_set, src->visited_hash_set.keys[i]); + dst->use_counts[new_hash_pos] = src->use_counts[i]; } } diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 128d63988f4e6..ec088bae2a65f 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -382,6 +382,8 @@ struct test_case { return 0; } + virtual bool run_whole_graph() { return false; } + ggml_cgraph * gf = nullptr; ggml_cgraph * gb = nullptr; @@ -574,7 +576,7 @@ struct test_case { GGML_UNUSED(index); }; - const bool cmp_ok = ggml_backend_compare_graph_backend(backend1, backend2, gf, callback, &ud); + const bool cmp_ok = ggml_backend_compare_graph_backend(backend1, backend2, gf, callback, &ud, run_whole_graph() ? out : nullptr); if (!cmp_ok) { printf("compare failed "); @@ -1896,6 +1898,63 @@ struct test_rms_norm_back : public test_case { } }; +// GGML_OP_RMS_NORM + GGML_OP_MUL +struct test_rms_norm_mul : public test_case { + const ggml_type type; + const std::array ne; + const float eps; + + std::string op_desc(ggml_tensor * t) override { + GGML_UNUSED(t); + return "RMS_NORM_MUL"; + } + + bool run_whole_graph() override { return true; } + + std::string vars() override { + return VARS_TO_STR3(type, ne, eps); + } + + test_rms_norm_mul(ggml_type type = GGML_TYPE_F32, + std::array ne = {64, 5, 4, 3}, + float eps = 1e-6f) + : type(type), ne(ne), eps(eps) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); + ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne.data()); + ggml_set_param(a); + ggml_set_name(a, "a"); + ggml_set_param(b); + ggml_set_name(b, "b"); + + // Use a and b early, so we don't end up with an OP_NONE between rms_norm and mul + a = ggml_add(ctx, a, b); + ggml_tensor * out = ggml_mul(ctx, ggml_rms_norm(ctx, a, eps), b); + ggml_set_name(out, "out"); + + return out; + } + + void initialize_tensors(ggml_context * ctx) override { + for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { + init_tensor_uniform(t, -10.f, 10.f); + } + } + + double max_nmse_err() override { + return 1e-6; + } + + float grad_eps() override { + return 1.0f; + } + + bool grad_precise() override { + return true; + } +}; + // GGML_OP_SSM_CONV struct test_ssm_conv : public test_case { const ggml_type type; @@ -3736,6 +3795,7 @@ struct test_llama : public test_llm { static constexpr float attn_factor = 1.0f; static constexpr float beta_fast = 32.0f; static constexpr float beta_slow = 1.0f; + bool fused; std::string op_desc(ggml_tensor * t) override { GGML_UNUSED(t); @@ -3751,7 +3811,9 @@ struct test_llama : public test_llm { return 2e-3; } - test_llama(int n_tokens = 1) + bool run_whole_graph() override { return fused; } + + test_llama(int n_tokens = 1, bool fused = false) : test_llm({ /*n_vocab =*/ 32000, /*n_embd =*/ 3200, @@ -3763,7 +3825,9 @@ struct test_llama : public test_llm { /*f_norm_eps =*/ 0.f, /*f_norm_rms_eps =*/ 1e-5f, /*n_tokens =*/ n_tokens, - }) { + }) + , fused(fused) + { } ggml_tensor * build_graph(ggml_context * ctx) override { @@ -4306,6 +4370,9 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_rms_norm_back(GGML_TYPE_F32, {64, 5, 4, 3}, eps)); test_cases.emplace_back(new test_l2_norm (GGML_TYPE_F32, {64, 5, 4, 3}, eps)); } + for (float eps : {0.0f, 1e-6f, 1e-4f, 1e-1f}) { + test_cases.emplace_back(new test_rms_norm_mul(GGML_TYPE_F32, {64, 5, 4, 3}, eps)); + } test_cases.emplace_back(new test_l2_norm(GGML_TYPE_F32, {64, 5, 4, 3}, 1e-12f)); @@ -4677,6 +4744,7 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_opt_step_adamw(GGML_TYPE_F32, {10, 5, 4, 3})); + test_cases.emplace_back(new test_llama(2, true)); // these tests are disabled to save execution time, but they can be handy for debugging #if 0 test_cases.emplace_back(new test_llama(1)); From 16adbe1931205c7c8001c7180c94a703755ae665 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Sun, 29 Jun 2025 11:04:10 +0200 Subject: [PATCH 170/192] ggml : implement REGLU/GEGLU/SWIGLU ops (#14158) * implement unary REGLU/GEGLU/SWIGLU cpu ops * relax constraints * duplicate shape of source * fix ggml_vec_geglu_f16 * special case gated ops * implement unary REGLU/GEGLU/SWIGLU cuda ops * tighten constraints again * refactor into GGML_GLU_OP * metal : add glu kernels ggml-ci * add CUDA_GLU_BLOCK_SIZE [no ci] * more constraints and use 64bit ints ggml-ci * 64bit multiplication [no ci] * implement swapped variants (cpu/cuda) * update comment [no ci] ggml-ci * Vulkan: Add GLU ops and shaders * SYCL: Implement fused kernel GEGLU, SWIGLU and REGLU for single up+gate * ggml : implement GLU for split up/gate (#14181) * implement GLU for split up/gate * add tests for ggml_glu_split * Vulkan: Implement glu_split logic and shader support * add split to logging [no ci] * SYCL: refactor element_size ops and add split up and gate support to gated kernels * SYCL: switch GEGLU to use tanh approximation --------- Co-authored-by: 0cc4m Co-authored-by: Akarshan * GGML: increase OP count in assertion * Refactor: Optimize SYCL element-wise operations with unary function inlining This commit refactors the SYCL element-wise operations to improve performance by: - Inlining unary operations (sgn, abs, elu, gelu, silu, etc.) to reduce kernel launch overhead. - Introducing helper functions `op_xxx` for each unary operation to encapsulate the logic. - Replacing direct kernel calls with calls to these inlined functions. - Using `__dpct_inline__` to encourage compiler inlining. - Minor code cleanup and consistency improvements. The changes aim to reduce kernel launch overhead and improve the overall efficiency of element-wise operations on SYCL devices. * vulkan: Increase workgroup size for GLU, for performance (#14345) * vulkan: Increase workgroup size for GLU, for performance * vulkan: change GLU shaders to do one element per invocation rather than one row per workgroup * merge fix * metal : add support for split and swap ggml-ci --------- Co-authored-by: Georgi Gerganov Co-authored-by: 0cc4m Co-authored-by: Akarshan Co-authored-by: Jeff Bolz --- ggml/include/ggml.h | 69 + ggml/src/ggml-cpu/ggml-cpu.c | 16 + ggml/src/ggml-cpu/ops.cpp | 457 +++++ ggml/src/ggml-cpu/ops.h | 1 + ggml/src/ggml-cpu/vec.cpp | 24 + ggml/src/ggml-cpu/vec.h | 54 + ggml/src/ggml-cuda/ggml-cuda.cu | 25 + ggml/src/ggml-cuda/unary.cu | 89 + ggml/src/ggml-cuda/unary.cuh | 7 + ggml/src/ggml-metal/ggml-metal-impl.h | 11 + ggml/src/ggml-metal/ggml-metal.m | 71 + ggml/src/ggml-metal/ggml-metal.metal | 64 + ggml/src/ggml-sycl/element_wise.cpp | 1661 +++++++---------- ggml/src/ggml-sycl/element_wise.hpp | 25 +- ggml/src/ggml-sycl/ggml-sycl.cpp | 25 + ggml/src/ggml-vulkan/ggml-vulkan.cpp | 117 +- .../src/ggml-vulkan/vulkan-shaders/geglu.comp | 13 + .../ggml-vulkan/vulkan-shaders/glu_head.comp | 15 + .../ggml-vulkan/vulkan-shaders/glu_main.comp | 29 + .../src/ggml-vulkan/vulkan-shaders/reglu.comp | 9 + .../ggml-vulkan/vulkan-shaders/swiglu.comp | 9 + .../vulkan-shaders/vulkan-shaders-gen.cpp | 7 + ggml/src/ggml.c | 138 +- src/llama-graph.cpp | 62 +- src/llama-graph.h | 1 + tests/test-backend-ops.cpp | 116 ++ 26 files changed, 2044 insertions(+), 1071 deletions(-) create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/geglu.comp create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/glu_head.comp create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/glu_main.comp create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/reglu.comp create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/swiglu.comp diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 2b1bd6e0f48b9..e5dda969a38fe 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -520,6 +520,8 @@ extern "C" { GGML_OP_CROSS_ENTROPY_LOSS_BACK, GGML_OP_OPT_STEP_ADAMW, + GGML_OP_GLU, + GGML_OP_COUNT, }; @@ -543,6 +545,14 @@ extern "C" { GGML_UNARY_OP_COUNT, }; + enum ggml_glu_op { + GGML_GLU_OP_REGLU, + GGML_GLU_OP_GEGLU, + GGML_GLU_OP_SWIGLU, + + GGML_GLU_OP_COUNT, + }; + enum ggml_object_type { GGML_OBJECT_TYPE_TENSOR, GGML_OBJECT_TYPE_GRAPH, @@ -658,6 +668,7 @@ extern "C" { GGML_API const char * ggml_op_symbol(enum ggml_op op); GGML_API const char * ggml_unary_op_name(enum ggml_unary_op op); + GGML_API const char * ggml_glu_op_name(enum ggml_glu_op op); GGML_API const char * ggml_op_desc(const struct ggml_tensor * t); // unary or op name GGML_API size_t ggml_element_size(const struct ggml_tensor * tensor); @@ -762,6 +773,7 @@ extern "C" { GGML_API void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3); GGML_API enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor); + GGML_API enum ggml_glu_op ggml_get_glu_op(const struct ggml_tensor * tensor); GGML_API void * ggml_get_data (const struct ggml_tensor * tensor); GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor); @@ -1090,6 +1102,63 @@ extern "C" { struct ggml_context * ctx, struct ggml_tensor * a); + // gated linear unit ops + // A: n columns, r rows, + // result is n / 2 columns, r rows, + // expects gate in second half of row, unless swapped is true + GGML_API struct ggml_tensor * ggml_glu( + struct ggml_context * ctx, + struct ggml_tensor * a, + enum ggml_glu_op op, + bool swapped); + + GGML_API struct ggml_tensor * ggml_reglu( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_reglu_swapped( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_geglu( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_geglu_swapped( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_swiglu( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_swiglu_swapped( + struct ggml_context * ctx, + struct ggml_tensor * a); + + // A: n columns, r rows, + // B: n columns, r rows, + GGML_API struct ggml_tensor * ggml_glu_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + enum ggml_glu_op op); + + GGML_API struct ggml_tensor * ggml_reglu_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + + GGML_API struct ggml_tensor * ggml_geglu_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + + GGML_API struct ggml_tensor * ggml_swiglu_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + // normalize along rows GGML_API struct ggml_tensor * ggml_norm( struct ggml_context * ctx, diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 2042ee71f1f80..1d68cde71a65e 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -1949,6 +1949,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_unary(params, tensor); } break; + case GGML_OP_GLU: + { + ggml_compute_forward_glu(params, tensor); + } break; case GGML_OP_GET_REL_POS: { ggml_compute_forward_get_rel_pos(params, tensor); @@ -2159,6 +2163,18 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { GGML_ABORT("fatal error"); } break; + case GGML_OP_GLU: + switch (ggml_get_glu_op(node)) { + case GGML_GLU_OP_REGLU: + case GGML_GLU_OP_GEGLU: + case GGML_GLU_OP_SWIGLU: + { + n_tasks = n_threads; + } break; + default: + GGML_ABORT("fatal error"); + } + break; case GGML_OP_SILU_BACK: case GGML_OP_MUL: case GGML_OP_DIV: diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index 9f17ea43c8553..27586ed1fdb2c 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -3184,6 +3184,435 @@ void ggml_compute_forward_silu_back( } } +// ggml_compute_forward_reglu + +static void ggml_compute_forward_reglu_f32( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1*src0_o); + float * src1_p = (float *) (src1_d + i1*src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_reglu_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_reglu_f16( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1*src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1*src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_reglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_reglu( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_reglu_f32(params, dst); + } break; + case GGML_TYPE_F16: + { + ggml_compute_forward_reglu_f16(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_geglu + +static void ggml_compute_forward_geglu_f32( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1*src0_o); + float * src1_p = (float *) (src1_d + i1*src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_f16( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1*src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1*src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_geglu( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_geglu_f32(params, dst); + } break; + case GGML_TYPE_F16: + { + ggml_compute_forward_geglu_f16(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_swiglu + +static void ggml_compute_forward_swiglu_f32( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1*src0_o); + float * src1_p = (float *) (src1_d + i1*src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_swiglu_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_swiglu_f16( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1*src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1*src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_swiglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_swiglu( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_swiglu_f32(params, dst); + } break; + case GGML_TYPE_F16: + { + ggml_compute_forward_swiglu_f16(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + // ggml_compute_forward_norm static void ggml_compute_forward_norm_f32( @@ -8052,6 +8481,34 @@ void ggml_compute_forward_unary( } } +//ggml_compute_forward_glu + +void ggml_compute_forward_glu( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_glu_op op = ggml_get_glu_op(dst); + + switch (op) { + case GGML_GLU_OP_REGLU: + { + ggml_compute_forward_reglu(params, dst); + } break; + case GGML_GLU_OP_GEGLU: + { + ggml_compute_forward_geglu(params, dst); + } break; + case GGML_GLU_OP_SWIGLU: + { + ggml_compute_forward_swiglu(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + // ggml_compute_forward_get_rel_pos static void ggml_compute_forward_get_rel_pos_f16( diff --git a/ggml/src/ggml-cpu/ops.h b/ggml/src/ggml-cpu/ops.h index 3a395fdcd9f04..5b384e4ba5fce 100644 --- a/ggml/src/ggml-cpu/ops.h +++ b/ggml/src/ggml-cpu/ops.h @@ -94,6 +94,7 @@ void ggml_compute_forward_ssm_scan(const struct ggml_compute_params * params, st void ggml_compute_forward_win_part(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_win_unpart(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_unary(const struct ggml_compute_params * params, struct ggml_tensor * dst); +void ggml_compute_forward_glu(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_get_rel_pos(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_add_rel_pos(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_rwkv_wkv6(const struct ggml_compute_params * params, struct ggml_tensor * dst); diff --git a/ggml/src/ggml-cpu/vec.cpp b/ggml/src/ggml-cpu/vec.cpp index 5e34d79a1695f..ed5d7aefc35b3 100644 --- a/ggml/src/ggml-cpu/vec.cpp +++ b/ggml/src/ggml-cpu/vec.cpp @@ -254,6 +254,30 @@ void ggml_vec_silu_f32(const int n, float * y, const float * x) { } } +void ggml_vec_swiglu_f32(const int n, float * y, const float * x, const float * g) { + int i = 0; +#if defined(__AVX512F__) && defined(__AVX512DQ__) + for (; i + 15 < n; i += 16) { + _mm512_storeu_ps(y + i, _mm512_mul_ps(ggml_v_silu(_mm512_loadu_ps(x + i)), _mm512_loadu_ps(g + i))); + } +#elif defined(__AVX2__) && defined(__FMA__) + for (; i + 7 < n; i += 8) { + _mm256_storeu_ps(y + i, _mm256_mul_ps(ggml_v_silu(_mm256_loadu_ps(x + i)), _mm256_loadu_ps(g + i))); + } +#elif defined(__SSE2__) + for (; i + 3 < n; i += 4) { + _mm_storeu_ps(y + i, _mm_mul_ps(ggml_v_silu(_mm_loadu_ps(x + i)), _mm_loadu_ps(g + i))); + } +#elif defined(__ARM_NEON) && defined(__aarch64__) + for (; i + 3 < n; i += 4) { + vst1q_f32(y + i, vmulq_f32(ggml_v_silu(vld1q_f32(x + i)), vld1q_f32(g + i))); + } +#endif + for (; i < n; ++i) { + y[i] = ggml_silu_f32(x[i]) * g[i]; + } +} + ggml_float ggml_vec_soft_max_f32(const int n, float * y, const float * x, float max) { int i = 0; ggml_float sum = 0; diff --git a/ggml/src/ggml-cpu/vec.h b/ggml/src/ggml-cpu/vec.h index 84f6c0e6d26c4..ebd4b75613451 100644 --- a/ggml/src/ggml-cpu/vec.h +++ b/ggml/src/ggml-cpu/vec.h @@ -905,6 +905,60 @@ inline static void ggml_vec_silu_backward_f16(const int n, ggml_fp16_t * dx, con } } +inline static void ggml_vec_reglu_f32 (const int n, float * y, const float * x, const float * g) { + for (int i = 0; i < n; ++i) { + y[i] = (x[i] > 0.f) ? x[i] * g[i] : 0.f; + } +} + +inline static void ggml_vec_reglu_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) { + for (int i = 0; i < n; ++i) { + float v = GGML_FP16_TO_FP32(x[i]); + y[i] = GGML_FP32_TO_FP16((v > 0.f) ? v * GGML_FP16_TO_FP32(g[i]) : 0.f); + } +} + +#ifdef GGML_GELU_FP16 +inline static void ggml_vec_geglu_f32(const int n, float * y, const float * x, const float * g) { + uint16_t t; + for (int i = 0; i < n; ++i) { + if (x[i] <= -10.0f) { + y[i] = 0.0f; + } else if (x[i] >= 10.0f) { + y[i] = x[i] * g[i]; + } else { + ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); + memcpy(&t, &fp16, sizeof(uint16_t)); + y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_f16[t]) * g[i]; + } + } +} +#else +inline static void ggml_vec_geglu_f32(const int n, float * y, const float * x, const float * g) { + for (int i = 0; i < n; ++i) { + y[i] = ggml_gelu_f32(x[i]) * g[i]; + } +} +#endif + +inline static void ggml_vec_geglu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) { + const uint16_t * i16 = (const uint16_t *) x; + for (int i = 0; i < n; ++i) { + float v = GGML_FP16_TO_FP32(g[i]); + y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(ggml_table_gelu_f16[i16[i]]) * v); + } +} + +void ggml_vec_swiglu_f32(const int n, float * y, const float * x, const float * g); + +inline static void ggml_vec_swiglu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) { + for (int i = 0; i < n; ++i) { + float v = GGML_FP16_TO_FP32(x[i]); + float w = GGML_FP16_TO_FP32(g[i]); + y[i] = GGML_FP32_TO_FP16((v/(1.0f + expf(-v))) * w); + } +} + inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) { #ifndef GGML_USE_ACCELERATE ggml_float sum = 0.0; diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 811422f385073..086f9a56c4aca 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -2303,6 +2303,21 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg return false; } break; + case GGML_OP_GLU: + switch (ggml_get_glu_op(dst)) { + case GGML_GLU_OP_REGLU: + ggml_cuda_op_reglu(ctx, dst); + break; + case GGML_GLU_OP_GEGLU: + ggml_cuda_op_geglu(ctx, dst); + break; + case GGML_GLU_OP_SWIGLU: + ggml_cuda_op_swiglu(ctx, dst); + break; + default: + return false; + } + break; case GGML_OP_NORM: ggml_cuda_op_norm(ctx, dst); break; @@ -3096,6 +3111,16 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g return false; } break; + case GGML_OP_GLU: + switch (ggml_get_glu_op(op)) { + case GGML_GLU_OP_REGLU: + case GGML_GLU_OP_GEGLU: + case GGML_GLU_OP_SWIGLU: + return ggml_is_contiguous_1(op->src[0]); + default: + return false; + } + break; case GGML_OP_MUL_MAT: case GGML_OP_MUL_MAT_ID: { diff --git a/ggml/src/ggml-cuda/unary.cu b/ggml/src/ggml-cuda/unary.cu index 2c0375fbe3cf6..ba3c0f13762b0 100644 --- a/ggml/src/ggml-cuda/unary.cu +++ b/ggml/src/ggml-cuda/unary.cu @@ -196,6 +196,95 @@ void ggml_cuda_op_log(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } +/* gated ops */ + +template +static __global__ void unary_gated_op_kernel(const T * x, const T * g, T * dst, const int64_t k, const int64_t n, const int64_t o0, const int64_t o1) { + const int64_t i = int64_t(blockDim.x)*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + + // perform base op and multiply with gate (either offset in same tensor or a separate one) + const int64_t j0 = (i / n) * o0 + (i % n); + const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n); + + dst[i] = (T)(op((float)x[j0]) * (float)g[j1]); +} + +template +static void unary_gated_cuda(const T * x, const T * g, T * dst, const int64_t k, const int64_t n, const int64_t o0, const int64_t o1, cudaStream_t stream) { + const int64_t num_blocks = (k + CUDA_GLU_BLOCK_SIZE - 1) / CUDA_GLU_BLOCK_SIZE; + unary_gated_op_kernel<<>>(x, g, dst, k, n, o0, o1); +} + +template +void ggml_cuda_op_unary_gated(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + void * src0_d = src0->data; + void * src1_d = src1 ? src1->data : src0->data; + const int64_t src0_o = src0->nb[1]; + const int64_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + void * dst_d = dst->data; + const int64_t nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(src0->nb[0] == ggml_element_size(src0)); + GGML_ASSERT(ggml_is_contiguous(dst)); + + GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); + GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); + GGML_ASSERT(src0->type == dst->type); + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == ggml_nrows(src0)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src1->nb[0] == ggml_element_size(src1)); + GGML_ASSERT(src1->ne[0] == nc); + GGML_ASSERT(src0->type == src1->type); + } + + const int32_t swapped = ((const int32_t *) dst->op_params)[1]; + + if (src0->type == GGML_TYPE_F16) { + half * src0_p = (half *) src0_d; + half * src1_p = (half *) src1_d; + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + unary_gated_cuda(src0_p, src1_p, (half *)dst_d, ggml_nelements(dst), nc, src0_o / sizeof(half), src1_o / sizeof(half), stream); + } else { + float * src0_p = (float *) src0_d; + float * src1_p = (float *) src1_d; + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + unary_gated_cuda(src0_p, src1_p, (float *)dst_d, ggml_nelements(dst), nc, src0_o / sizeof(float), src1_o / sizeof(float), stream); + } +} + +void ggml_cuda_op_reglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + ggml_cuda_op_unary_gated(ctx, dst); +} + +void ggml_cuda_op_geglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + ggml_cuda_op_unary_gated(ctx, dst); +} + +void ggml_cuda_op_swiglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + ggml_cuda_op_unary_gated(ctx, dst); +} + /* silu_back */ static __device__ __forceinline__ float op_silu_back(float grad, float x) { diff --git a/ggml/src/ggml-cuda/unary.cuh b/ggml/src/ggml-cuda/unary.cuh index 6686fc17e9193..9094f1d0bad37 100644 --- a/ggml/src/ggml-cuda/unary.cuh +++ b/ggml/src/ggml-cuda/unary.cuh @@ -15,6 +15,7 @@ #define CUDA_SQRT_BLOCK_SIZE 256 #define CUDA_SIN_BLOCK_SIZE 256 #define CUDA_COS_BLOCK_SIZE 256 +#define CUDA_GLU_BLOCK_SIZE 256 void ggml_cuda_op_abs(ggml_backend_cuda_context & ctx, ggml_tensor * dst); @@ -57,3 +58,9 @@ void ggml_cuda_op_sin(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_cos(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_log(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_reglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_geglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_swiglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ggml/src/ggml-metal/ggml-metal-impl.h b/ggml/src/ggml-metal/ggml-metal-impl.h index 260440aedde00..7a9aab31684e1 100644 --- a/ggml/src/ggml-metal/ggml-metal-impl.h +++ b/ggml/src/ggml-metal/ggml-metal-impl.h @@ -422,6 +422,17 @@ typedef struct { int32_t KHW; // KH * KW, pre-computed on CPU to save GPU resources } ggml_metal_kargs_im2col; +typedef struct{ + int32_t ne00; + uint64_t nb01; + int32_t ne10; + uint64_t nb11; + int32_t ne0; + uint64_t nb1; + int32_t i00; + int32_t i10; +} ggml_metal_kargs_glu; + typedef struct { int64_t ne00; int64_t ne01; diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index 349f0ff998ed1..12a366957891c 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -526,6 +526,9 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_SIN, GGML_METAL_KERNEL_TYPE_COS, GGML_METAL_KERNEL_TYPE_NEG, + GGML_METAL_KERNEL_TYPE_REGLU, + GGML_METAL_KERNEL_TYPE_GEGLU, + GGML_METAL_KERNEL_TYPE_SWIGLU, GGML_METAL_KERNEL_TYPE_SUM_ROWS, GGML_METAL_KERNEL_TYPE_MEAN, GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32, @@ -1502,6 +1505,9 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SIN, sin, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_COS, cos, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NEG, neg, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_REGLU, reglu, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GEGLU, geglu, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SWIGLU, swiglu, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS, sum_rows, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MEAN, mean, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGMAX, argmax, true); @@ -1680,6 +1686,15 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex default: return false; } + case GGML_OP_GLU: + switch (ggml_get_glu_op(op)) { + case GGML_GLU_OP_REGLU: + case GGML_GLU_OP_GEGLU: + case GGML_GLU_OP_SWIGLU: + return ggml_is_contiguous_1(op->src[0]) && op->src[0]->type == GGML_TYPE_F32; + default: + return false; + } case GGML_OP_NONE: case GGML_OP_RESHAPE: case GGML_OP_VIEW: @@ -2419,6 +2434,62 @@ static bool ggml_metal_encode_node( GGML_ABORT("fatal error"); } } break; + case GGML_OP_GLU: + { + GGML_ASSERT(ggml_is_contiguous_1(src0)); + + if (src1) { + GGML_ASSERT(ggml_are_same_shape(src0, src1)); + } + + id pipeline = nil; + + switch (ggml_get_glu_op(node)) { + case GGML_GLU_OP_REGLU: + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_REGLU].pipeline; + break; + case GGML_GLU_OP_GEGLU: + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GEGLU].pipeline; + break; + case GGML_GLU_OP_SWIGLU: + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SWIGLU].pipeline; + break; + default: + GGML_ABORT("fatal error"); + } + + const int32_t swp = ((const int32_t *) dst->op_params)[1]; + + const int32_t i00 = swp ? ne0 : 0; + const int32_t i10 = swp ? 0 : ne0; + + ggml_metal_kargs_glu args = { + /*.ne00 =*/ ne00, + /*.nb01 =*/ nb01, + /*.ne10 =*/ src1 ? ne10 : ne00, + /*.nb11 =*/ src1 ? nb11 : nb01, + /*.ne0 =*/ ne0, + /*.nb1 =*/ nb1, + /*.i00 =*/ src1 ? 0 : i00, + /*.i10 =*/ src1 ? 0 : i10, + }; + + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + if (src1) { + [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1]; + } else { + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; + } + [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; + [encoder setBytes:&args length:sizeof(args) atIndex:3]; + + const int64_t nrows = ggml_nrows(src0); + + const int32_t nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne00/2); + + [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; + } break; case GGML_OP_SQR: { GGML_ASSERT(ggml_is_contiguous(src0)); diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index 984a0ab503e7d..fc3cfe35a34bf 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -1191,6 +1191,70 @@ kernel void kernel_neg( dst[tpig] = -src0[tpig]; } +kernel void kernel_reglu( + device const char * src0, + device const char * src1, + device char * dst, + constant ggml_metal_kargs_glu & args, + uint tgpig[[threadgroup_position_in_grid]], + uint tpitg[[thread_position_in_threadgroup]], + uint ntg[[threads_per_threadgroup]]) { + device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; + device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; + device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); + + for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { + const float x0 = src0_row[i0]; + const float x1 = src1_row[i0]; + + dst_row[i0] = x0*x1*(x0 > 0.0f); + } +} + +kernel void kernel_geglu( + device const char * src0, + device const char * src1, + device char * dst, + constant ggml_metal_kargs_glu & args, + uint tgpig[[threadgroup_position_in_grid]], + uint tpitg[[thread_position_in_threadgroup]], + uint ntg[[threads_per_threadgroup]]) { + device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; + device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; + device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); + + for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { + const float x0 = src0_row[i0]; + const float x1 = src1_row[i0]; + + const float gelu = 0.5f*x0*(1.0f + precise::tanh(SQRT_2_OVER_PI*x0*(1.0f + GELU_COEF_A*x0*x0))); + + dst_row[i0] = gelu*x1; + } +} + +kernel void kernel_swiglu( + device const char * src0, + device const char * src1, + device char * dst, + constant ggml_metal_kargs_glu & args, + uint tgpig[[threadgroup_position_in_grid]], + uint tpitg[[thread_position_in_threadgroup]], + uint ntg[[threads_per_threadgroup]]) { + device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; + device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; + device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); + + for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { + const float x0 = src0_row[i0]; + const float x1 = src1_row[i0]; + + const float silu = x0 / (1.0f + exp(-x0)); + + dst_row[i0] = silu*x1; + } +} + template kernel void kernel_sum_rows( constant ggml_metal_kargs_sum_rows & args, diff --git a/ggml/src/ggml-sycl/element_wise.cpp b/ggml/src/ggml-sycl/element_wise.cpp index c56924ce8322f..c7788bdb6bf8c 100644 --- a/ggml/src/ggml-sycl/element_wise.cpp +++ b/ggml/src/ggml-sycl/element_wise.cpp @@ -1,12 +1,19 @@ #include "common.hpp" +#include "ggml-sycl/presets.hpp" #include "ggml.h" #include "element_wise.hpp" +#define SYCL_GLOBAL_ID_LOOP(K, ITEM) \ + for (auto i = ITEM.get_global_id(0); i < (size_t)K; i += ITEM.get_global_range(0)) + +#define SYCL_LOCAL_ID_CALC(ITEM, IDX) \ + (ITEM.get_local_range(IDX) * ITEM.get_group(IDX) + ITEM.get_local_id(IDX)) + + static void acc_f32(const float * x, const float * y, float * dst, const int ne, const int ne10, const int ne11, const int ne12, - const int nb1, const int nb2, int offset, const sycl::nd_item<3> &item_ct1) { - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); + const int nb1, const int nb2, int offset, const sycl::nd_item<1> &item_ct1) { + const int i = SYCL_LOCAL_ID_CALC(item_ct1, 0); if (i >= ne) { return; } @@ -21,535 +28,375 @@ static void acc_f32(const float * x, const float * y, float * dst, const int ne, } } +/* Unary OP funcs */ template -static void sgn(const T * x, T * dst, const int k, const sycl::nd_item<3> &item_ct1) { - for(auto i = item_ct1.get_global_id(2); i < (const size_t)k; i += item_ct1.get_global_range(2)) { - dst[i] = x[i] > static_cast(0.f) ? static_cast(1.f) : ((x[i] < static_cast(0.f) ? static_cast(-1.f) : static_cast(0.f))); - } +static __dpct_inline__ T op_sgn(T x) { + return x > static_cast(0.f) ? static_cast(1.f) : ((x < static_cast(0.f) ? static_cast(-1.f) : static_cast(0.f))); } template -static void abs_op(const T * x, T * dst, const int k, const sycl::nd_item<3> &item_ct1) { - for(auto i = item_ct1.get_global_id(2); i < (const size_t)k; i += item_ct1.get_global_range(2)) { - dst[i] = sycl::fabs(x[i]); - } +static __dpct_inline__ T op_abs(T x) { + return sycl::fabs(x); } template -static void elu_op(const T * x, T * dst, const int k, const sycl::nd_item<3> &item_ct1) { - for(auto i = item_ct1.get_global_id(2); i < (const size_t)k; i += item_ct1.get_global_range(2)) { - dst[i] = (x[i] > static_cast(0.f)) ? x[i] : sycl::expm1(x[i]); - } +static __dpct_inline__ T op_elu(T x) { + return (x > static_cast(0.f)) ? x : sycl::expm1(x); } template -static void gelu(const T * x, T * dst, const int k, - const sycl::nd_item<3> &item_ct1) { +static __dpct_inline__ T op_gelu(T x) { const T GELU_COEF_A = static_cast(0.044715f); const T SQRT_2_OVER_PI = static_cast(0.79788456080286535587989211986876f); - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); - - if (i >= k) { - return; - } - - float xi = x[i]; - dst[i] = static_cast(0.5f) * xi * - (static_cast(1.0f) + - sycl::tanh(SQRT_2_OVER_PI * xi * (static_cast(1.0f) + GELU_COEF_A * xi * xi))); + return static_cast(0.5f) * x * + (static_cast(1.0f) + + sycl::tanh(SQRT_2_OVER_PI * x * (static_cast(1.0f) + GELU_COEF_A * x * x))); } template -static void silu(const T * x, T * dst, const int k, - const sycl::nd_item<3> &item_ct1) { - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); - - if (i >= k) { - return; - } - dst[i] = x[i] / (static_cast(1.0f) + sycl::native::exp(-x[i])); +static __dpct_inline__ T op_silu(T x) { + return x / (static_cast(1.0f) + sycl::native::exp(-x)); } template -static void gelu_quick(const T *x, T *dst, int k, - const sycl::nd_item<3> &item_ct1) { - const float GELU_QUICK_COEF = -1.702f; - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); - if (i >= k) { - return; - } - dst[i] = x[i] * (static_cast(1.0f) / (static_cast(1.0f) + sycl::native::exp(GELU_QUICK_COEF * x[i]))); +static __dpct_inline__ T op_gelu_quick(T x) { + const T GELU_QUICK_COEF_LOCAL = static_cast(-1.702f); + return x * (static_cast(1.0f) / (static_cast(1.0f) + sycl::native::exp(GELU_QUICK_COEF_LOCAL * x))); } template -static void gelu_erf(const T * x, T * dst, const int k, const sycl::nd_item<3> &item_ct1) { +static __dpct_inline__ T op_gelu_erf(T x) { const T SQRT_2_INV = static_cast(0.70710678118654752440084436210484f); - for(auto i = item_ct1.get_global_id(2); i < (const size_t)k; i += item_ct1.get_global_range(2)) { - auto x_i = x[i]; - dst[i] = static_cast(0.5f) * x_i * (static_cast(1.0f) + sycl::erf(x_i * SQRT_2_INV)); - } + return static_cast(0.5f) * x * (static_cast(1.0f) + sycl::erf(x * SQRT_2_INV)); } template -static void tanh(const T *x, T *dst, int k, - const sycl::nd_item<3> &item_ct1) { - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); - if (i >= k) { - return; - } - dst[i] = sycl::tanh((x[i])); +static __dpct_inline__ T op_tanh(T x) { + return sycl::tanh(x); } template -static void relu(const T * x, T * dst, const int k, - const sycl::nd_item<3> &item_ct1) { - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); - - if (i >= k) { - return; - } - dst[i] = sycl::fmax((x[i]), static_cast(0)); +static __dpct_inline__ T op_relu(T x) { + return sycl::fmax(x, static_cast(0)); } template -static void sigmoid(const T * x, T * dst, const int k, - const sycl::nd_item<3> &item_ct1) { - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); - - if (i >= k) { - return; - } - dst[i] = 1.0f / (static_cast(1.0f) + sycl::native::exp(-x[i])); +static __dpct_inline__ T op_sigmoid(T x) { + return static_cast(1.0f) / (static_cast(1.0f) + sycl::native::exp(-x)); } template -static void sqrt(const T * x, T * dst, const int k, - const sycl::nd_item<3> &item_ct1) { - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); - - if (i >= k) { - return; - } - dst[i] = sycl::sqrt(x[i]); +static __dpct_inline__ T op_sqrt(T x) { + return sycl::sqrt(x); } template -static void sin(const T * x, T * dst, const int k, - const sycl::nd_item<3> &item_ct1) { - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); - - if (i >= k) { - return; - } - dst[i] = sycl::sin(x[i]); +static __dpct_inline__ T op_sin(T x) { + return sycl::sin(x); } template -static void cos(const T * x, T * dst, const int k, - const sycl::nd_item<3> &item_ct1) { - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); - - if (i >= k) { - return; - } - dst[i] = sycl::cos(x[i]); +static __dpct_inline__ T op_cos(T x) { + return sycl::cos(x); } template -static void hardsigmoid(const T * x, T * dst, const int k, - const sycl::nd_item<3> &item_ct1) { - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); - - if (i >= k) { - return; - } - dst[i] = sycl::fmin(static_cast(1.0f), sycl::fmax(static_cast(0.0f), (x[i] + static_cast(3.0f)) / static_cast(6.0f))); +static __dpct_inline__ T op_hardsigmoid(T x) { + return sycl::fmin(static_cast(1.0f), sycl::fmax(static_cast(0.0f), (x + static_cast(3.0f)) / static_cast(6.0f))); } template -static void hardswish(const T * x, T * dst, const int k, - const sycl::nd_item<3> &item_ct1) { - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); - - if (i >= k) { - return; - } - dst[i] = x[i] * sycl::fmin(static_cast(1.0f), sycl::fmax(static_cast(0.0f), (x[i] + static_cast(3.0f)) / static_cast(6.0f))); +static __dpct_inline__ T op_hardswish(T x) { + return x * sycl::fmin(static_cast(1.0f), sycl::fmax(static_cast(0.0f), (x + static_cast(3.0f)) / static_cast(6.0f))); } template -static void exp(const T * x, T * dst, const int k, - const sycl::nd_item<3> &item_ct1) { - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); +static __dpct_inline__ T op_exp(T x) { + return sycl::exp(x); +} - if (i >= k) { - return; +template +static __dpct_inline__ T op_log(T x) { + if (x <= static_cast(0)) { + return neg_infinity(); } - dst[i] = sycl::exp(x[i]); + return sycl::log(x); } template -static void log(const T * x, T * dst, const int k, - const sycl::nd_item<3> &item_ct1) { - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); +static __dpct_inline__ T op_neg(T x) { + return -x; +} - if (i >= k) { - return; - } - T xi = x[i]; - if (xi <= 0) { - dst[i] = neg_infinity(); - } else { - dst[i] = sycl::log(xi); - } +template +static __dpct_inline__ T op_step(T x) { + return (x > static_cast(0.0f)) ? static_cast(1.0f) : static_cast(0.0f); } template -static void neg(const T * x, T * dst, const int k, - const sycl::nd_item<3> &item_ct1) { - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); +static __dpct_inline__ T op_leaky_relu(T x, float negative_slope) { + T neg_slope_T = static_cast(negative_slope); + return sycl::fmax(x, static_cast(0)) + + sycl::fmin(x, static_cast(0.0f)) * neg_slope_T; +} - if (i >= k) { - return; - } - dst[i] = -x[i]; +template +static __dpct_inline__ T op_sqr(T x) { + return x * x; } template -static void step(const T * x, T * dst, const int k, - const sycl::nd_item<3> &item_ct1) { - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); +static __dpct_inline__ T op_clamp(T x, float min_val, float max_val) { + return x < static_cast(min_val) ? static_cast(min_val) : (x > static_cast(max_val) ? static_cast(max_val) : x); +} - if (i >= k) { - return; +template +static void unary_op_sgn_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_sgn(x[i]); } - dst[i] = x[i] > static_cast(0.0f); } template -static void leaky_relu(const T *x, T *dst, const int k, const float negative_slope, - const sycl::nd_item<3> &item_ct1) { - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); - if (i >= k) { - return; +static void unary_op_abs_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_abs(x[i]); } - dst[i] = sycl::fmax((x[i]), static_cast(0)) + - sycl::fmin((x[i]), static_cast(0.0f)) * negative_slope; } template -static void sqr(const T * x, T * dst, const int k, - const sycl::nd_item<3> &item_ct1) { - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); - - if (i >= k) { - return; +static void unary_op_elu_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_elu(x[i]); } - dst[i] = x[i] * x[i]; } -template -static void upscale(const T *x, T *dst, const int nb00, const int nb01, - const int nb02, const int nb03, const int ne10, const int ne11, - const int ne12, const int ne13, const float sf0, const float sf1, - const float sf2, const float sf3, const sycl::nd_item<1> &item_ct1) { - int index = item_ct1.get_local_id(0) + - item_ct1.get_group(0) * item_ct1.get_local_range(0); - if (index >= ne10 * ne11 * ne12 * ne13) { - return; +template +static void unary_op_gelu_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_gelu(x[i]); } - // operation - int i10 = index % ne10; - int i11 = (index / ne10) % ne11; - int i12 = (index / (ne10 * ne11)) % ne12; - int i13 = (index / (ne10 * ne11 * ne12)) % ne13; - - int i00 = i10 / sf0; - int i01 = i11 / sf1; - int i02 = i12 / sf2; - int i03 = i13 / sf3; - - dst[index] = *(const T *)((const char *)x + i03 * nb03 + i02 * nb02 + i01 * nb01 + i00 * nb00); } -template -static void pad(const T *x, T *dst, const int ne0, const int ne00, const int ne01, const int ne02, - const sycl::nd_item<3> &item_ct1) { - int nidx = item_ct1.get_local_id(2) + - item_ct1.get_group(2) * item_ct1.get_local_range(2); - if (nidx >= ne0) { - return; +template +static void unary_op_silu_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_silu(x[i]); } +} - // operation - int offset_dst = nidx + item_ct1.get_group(1) * ne0 + - item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1); - if (nidx < ne00 && item_ct1.get_group(1) < (size_t) ne01 && item_ct1.get_group(0) < (size_t) ne02) { - int offset_src = nidx + item_ct1.get_group(1) * ne00 + - item_ct1.get_group(0) * ne00 * ne01; - dst[offset_dst] = x[offset_src]; - } else { - dst[offset_dst] = static_cast(0.0f); +template +static void unary_op_gelu_quick_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_gelu_quick(x[i]); } } - template -static void clamp(const T * x, T * dst, const float min, const float max, const int k, - const sycl::nd_item<3> &item_ct1) { - const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2); - - if (i >= k) { - return; +static void unary_op_gelu_erf_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_gelu_erf(x[i]); } - - dst[i] = x[i] < static_cast(min) ? static_cast(min) : (x[i] > static_cast(max) ? static_cast(max) : x[i]); } -static void acc_f32_sycl(const float *x, const float *y, float *dst, - const int n_elements, const int ne10, const int ne11, - const int ne12, const int nb1, const int nb2, - const int offset, queue_ptr stream) { - int num_blocks = (n_elements + SYCL_ACC_BLOCK_SIZE - 1) / SYCL_ACC_BLOCK_SIZE; - sycl_parallel_for(stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_ACC_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_ACC_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { - acc_f32(x, y, dst, n_elements, ne10, ne11, ne12, nb1, nb2, offset, item_ct1); - }); +template +static void unary_op_tanh_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_tanh(x[i]); + } } template -static void gelu_sycl(const T *x, T *dst, const int k, - queue_ptr stream) { - const int num_blocks = (k + SYCL_GELU_BLOCK_SIZE - 1) / SYCL_GELU_BLOCK_SIZE; - sycl_parallel_for(stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { gelu(x, dst, k, item_ct1); }); +static void unary_op_relu_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_relu(x[i]); + } } template -static void silu_sycl(const T *x, T *dst, const int k, - queue_ptr stream) { - const int num_blocks = (k + SYCL_SILU_BLOCK_SIZE - 1) / SYCL_SILU_BLOCK_SIZE; - sycl_parallel_for(stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_SILU_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_SILU_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { silu(x, dst, k, item_ct1); }); +static void unary_op_sigmoid_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_sigmoid(x[i]); + } } template -static void sgn_sycl(const T * x, T * dst, const int k, queue_ptr stream) { - // hard code for now - const int num_blocks = ceil_div(k, 256); - sycl_parallel_for( - stream, sycl::nd_range<3>((sycl::range<3>(1, 1, num_blocks) * sycl::range(1, 1, 256)), sycl::range(1, 1, 256)), - [=](sycl::nd_item<3> item_ct1) { sgn(x, dst, k, item_ct1); }); +static void unary_op_sqrt_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_sqrt(x[i]); + } } template -static void abs_sycl(const T * x, T * dst, const int k, queue_ptr stream) { - // hard code for now - const int num_blocks = ceil_div(k, 256); - sycl_parallel_for( - stream, - sycl::nd_range<3>((sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, 256)), sycl::range<3>(1, 1, 256)), - [=](sycl::nd_item<3> item_ct1) { abs_op(x, dst, k, item_ct1); }); +static void unary_op_sin_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_sin(x[i]); + } } - template -static void elu_sycl(const T * x, T * dst, const int k, queue_ptr stream) { - // hard code for now - const int num_blocks = ceil_div(k, 256); - sycl_parallel_for( - stream, - sycl::nd_range<3>((sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, 256)), sycl::range<3>(1, 1, 256)), - [=](sycl::nd_item<3> item_ct1) { elu_op(x, dst, k, item_ct1); }); +static void unary_op_cos_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_cos(x[i]); + } } template -static void gelu_quick_sycl(const T *x, T *dst, const int k, - queue_ptr stream) { - const int num_blocks = (k + SYCL_GELU_BLOCK_SIZE - 1) / SYCL_GELU_BLOCK_SIZE; - sycl_parallel_for(stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { gelu_quick(x, dst, k, item_ct1); }); +static void unary_op_hardsigmoid_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_hardsigmoid(x[i]); + } } - template -static void gelu_erf_sycl(const T *x, T *dst, const int k, - queue_ptr stream) { - const int num_blocks = ceil_div(k, SYCL_GELU_BLOCK_SIZE); - sycl_parallel_for(stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_GELU_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { gelu_erf(x, dst, k, item_ct1); }); +static void unary_op_hardswish_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_hardswish(x[i]); + } } template -static void tanh_sycl(const T *x, T *dst, const int k, - queue_ptr stream) { - const int num_blocks = (k + SYCL_TANH_BLOCK_SIZE - 1) / SYCL_TANH_BLOCK_SIZE; - sycl_parallel_for(stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_TANH_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_TANH_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { tanh(x, dst, k, item_ct1); }); +static void unary_op_exp_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_exp(x[i]); + } } template -static void relu_sycl(const T *x, T *dst, const int k, - queue_ptr stream) { - const int num_blocks = (k + SYCL_RELU_BLOCK_SIZE - 1) / SYCL_RELU_BLOCK_SIZE; - sycl_parallel_for(stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { relu(x, dst, k, item_ct1); }); +static void unary_op_log_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_log(x[i]); + } } template -static void hardsigmoid_sycl(const T *x, T *dst, const int k, - queue_ptr stream) { - const int num_blocks = (k + SYCL_HARDSIGMOID_BLOCK_SIZE - 1) / SYCL_HARDSIGMOID_BLOCK_SIZE; - sycl_parallel_for( - stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_HARDSIGMOID_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_HARDSIGMOID_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { hardsigmoid(x, dst, k, item_ct1); }); +static void unary_op_neg_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_neg(x[i]); + } } template -static void hardswish_sycl(const T *x, T *dst, const int k, - queue_ptr stream) { - const int num_blocks = (k + SYCL_HARDSWISH_BLOCK_SIZE - 1) / SYCL_HARDSWISH_BLOCK_SIZE; - sycl_parallel_for( - stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_HARDSWISH_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_HARDSWISH_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { hardswish(x, dst, k, item_ct1); }); +static void unary_op_step_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_step(x[i]); + } } template -static void exp_sycl(const T *x, T *dst, const int k, - queue_ptr stream) { - const int num_blocks = (k + SYCL_EXP_BLOCK_SIZE - 1) / SYCL_EXP_BLOCK_SIZE; - sycl_parallel_for(stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { exp(x, dst, k, item_ct1); }); +static void unary_op_leaky_relu_kernel(const T * x, T * dst, const int k, float negative_slope, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_leaky_relu(x[i], negative_slope); + } } template -static void log_sycl(const T *x, T *dst, const int k, - queue_ptr stream) { - const int num_blocks = (k + SYCL_EXP_BLOCK_SIZE - 1) / SYCL_EXP_BLOCK_SIZE; - sycl_parallel_for(stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_EXP_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { log(x, dst, k, item_ct1); }); +static void unary_op_sqr_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_sqr(x[i]); + } } template -static void neg_sycl(const T *x, T *dst, const int k, - queue_ptr stream) { - const int num_blocks = (k + SYCL_NEG_BLOCK_SIZE - 1) / SYCL_NEG_BLOCK_SIZE; - sycl_parallel_for(stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { neg(x, dst, k, item_ct1); }); +static void unary_op_clamp_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1, float min_val, float max_val) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = op_clamp(x[i], min_val, max_val); + } } -template -static void step_sycl(const T *x, T *dst, const int k, - queue_ptr stream) { - const int num_blocks = (k + SYCL_NEG_BLOCK_SIZE - 1) / SYCL_NEG_BLOCK_SIZE; - sycl_parallel_for(stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_NEG_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { step(x, dst, k, item_ct1); }); +template +static void upscale(const T *x, T *dst, const int nb00, const int nb01, + const int nb02, const int nb03, const int ne10, const int ne11, + const int ne12, const int ne13, const float sf0, const float sf1, + const float sf2, const float sf3, const sycl::nd_item<1> &item_ct1) { + int index = item_ct1.get_local_id(0) + + item_ct1.get_group(0) * item_ct1.get_local_range(0); + if (index >= ne10 * ne11 * ne12 * ne13) { + return; + } + // operation + int i10 = index % ne10; + int i11 = (index / ne10) % ne11; + int i12 = (index / (ne10 * ne11)) % ne12; + int i13 = (index / (ne10 * ne11 * ne12)) % ne13; + + int i00 = static_cast(i10 / sf0); + int i01 = static_cast(i11 / sf1); + int i02 = static_cast(i12 / sf2); + int i03 = static_cast(i13 / sf3); + + dst[index] = *(const T *)((const char *)x + i03 * nb03 + i02 * nb02 + i01 * nb01 + i00 * nb00); } -template -static void sigmoid_sycl(const T *x, T *dst, const int k, - queue_ptr stream) { - const int num_blocks = (k + SYCL_SIGMOID_BLOCK_SIZE - 1) / SYCL_SIGMOID_BLOCK_SIZE; - sycl_parallel_for( - stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_SIGMOID_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_SIGMOID_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { sigmoid(x, dst, k, item_ct1); }); +template +static void pad(const T *x, T *dst, const int ne0, const int ne00, const int ne01, const int ne02, + const sycl::nd_item<3> &item_ct1) { + int nidx = SYCL_LOCAL_ID_CALC(item_ct1, 2); + if (nidx >= ne0) { + return; + } + + // operation + int offset_dst = nidx + item_ct1.get_group(1) * ne0 + + item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1); + if (nidx < ne00 && item_ct1.get_group(1) < (size_t) ne01 && item_ct1.get_group(0) < (size_t) ne02) { + int offset_src = nidx + item_ct1.get_group(1) * ne00 + + item_ct1.get_group(0) * ne00 * ne01; + dst[offset_dst] = x[offset_src]; + } else { + dst[offset_dst] = static_cast(0.0f); + } } template -static void sqrt_sycl(const T *x, T *dst, const int k, - queue_ptr stream) { - const int num_blocks = (k + SYCL_SQRT_BLOCK_SIZE - 1) / SYCL_SQRT_BLOCK_SIZE; - sycl_parallel_for(stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_SQRT_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_SQRT_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { sqrt(x, dst, k, item_ct1); }); +static void clamp(const T * x, T * dst, const float min, const float max, const int k, + const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + dst[i] = x[i] < static_cast(min) ? static_cast(min) : (x[i] > static_cast(max) ? static_cast(max) : x[i]); + } } template -static void sin_sycl(const T *x, T *dst, const int k, - queue_ptr stream) { - const int num_blocks = (k + SYCL_SIN_BLOCK_SIZE - 1) / SYCL_SIN_BLOCK_SIZE; - sycl_parallel_for(stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { sin(x, dst, k, item_ct1); }); +static void gated_op_fused_geglu(const T * x, const T * g, T * dst, const uint64_t k, const uint64_t n, const uint64_t o0, const uint64_t o1, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + const int64_t j0 = (i / n) * o0 + (i % n); + const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n); + dst[i] = op_gelu(x[j0]) * g[j1]; + } } template -static void cos_sycl(const T *x, T *dst, const int k, - queue_ptr stream) { - const int num_blocks = (k + SYCL_SIN_BLOCK_SIZE - 1) / SYCL_SIN_BLOCK_SIZE; - sycl_parallel_for(stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_SIN_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { cos(x, dst, k, item_ct1); }); +static void gated_op_fused_reglu(const T * x, const T * g, T * dst, const uint64_t k, const uint64_t n, const uint64_t o0, const uint64_t o1, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + const int64_t j0 = (i / n) * o0 + (i % n); + const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n); + dst[i] = op_relu(x[j0]) * g[j1]; + } } template -static void leaky_relu_sycl(const T *x, T *dst, const int k, - const float negative_slope, - queue_ptr stream) { - const int num_blocks = (k + SYCL_RELU_BLOCK_SIZE - 1) / SYCL_RELU_BLOCK_SIZE; - sycl_parallel_for(stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_RELU_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { leaky_relu(x, dst, k, negative_slope, item_ct1); }); +static void gated_op_fused_swiglu(const T * x, const T * g, T * dst, const uint64_t k, const uint64_t n, const uint64_t o0, const uint64_t o1, const sycl::nd_item<1> &item_ct1) { + SYCL_GLOBAL_ID_LOOP(k, item_ct1) { + const int64_t j0 = (i / n) * o0 + (i % n); + const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n); + dst[i] = op_silu(x[j0]) * g[j1]; + } } -template -static void sqr_sycl(const T *x, T *dst, const int k, - queue_ptr stream) { - const int num_blocks = (k + SYCL_SQR_BLOCK_SIZE - 1) / SYCL_SQR_BLOCK_SIZE; +namespace ggml_sycl_detail { +static void acc_f32_sycl(const float *x, const float *y, float *dst, + const int n_elements, const int ne10, const int ne11, + const int ne12, const int nb1, const int nb2, + const int offset, queue_ptr stream) { + int num_blocks = ceil_div(n_elements, SYCL_ACC_BLOCK_SIZE); sycl_parallel_for(stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_SQR_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_SQR_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { sqr(x, dst, k, item_ct1); }); + sycl::nd_range<1>(sycl::range<1>(num_blocks) * + sycl::range<1>(SYCL_ACC_BLOCK_SIZE), + sycl::range<1>(SYCL_ACC_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + acc_f32(x, y, dst, n_elements, ne10, ne11, ne12, nb1, nb2, offset, + item_ct1); + }); } template @@ -558,7 +405,7 @@ static void upscale_sycl(const T *x, T *dst, const int nb00, const int nb01, const int ne12, const int ne13, const float sf0, const float sf1, const float sf2, const float sf3, queue_ptr stream) { int dst_size = ne10 * ne11 * ne12 * ne13; - int num_blocks = (dst_size + SYCL_UPSCALE_BLOCK_SIZE - 1) / SYCL_UPSCALE_BLOCK_SIZE; + int num_blocks = ceil_div(dst_size, SYCL_UPSCALE_BLOCK_SIZE); sycl::range<1> gridDim(num_blocks * SYCL_UPSCALE_BLOCK_SIZE); sycl_parallel_for<1>( stream, sycl::nd_range<1>(gridDim, sycl::range<1>(SYCL_UPSCALE_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { @@ -570,7 +417,7 @@ template static void pad_sycl(const T *x, T *dst, const int ne00, const int ne01, const int ne02, const int ne0, const int ne1, const int ne2, queue_ptr stream) { - int num_blocks = (ne0 + SYCL_PAD_BLOCK_SIZE - 1) / SYCL_PAD_BLOCK_SIZE; + int num_blocks = ceil_div(ne0, SYCL_PAD_BLOCK_SIZE); sycl::range<3> gridDim(ne2, ne1, num_blocks); sycl_parallel_for(stream, sycl::nd_range<3>(gridDim * sycl::range<3>(1, 1, SYCL_PAD_BLOCK_SIZE), @@ -578,22 +425,11 @@ static void pad_sycl(const T *x, T *dst, const int ne00, [=](sycl::nd_item<3> item_ct1) { pad(x, dst, ne0, ne00, ne01, ne02, item_ct1); }); } -template -static void clamp_sycl(const T *x, T *dst, const float min, - const float max, const int k, - queue_ptr stream) { - const int num_blocks = (k + SYCL_CLAMP_BLOCK_SIZE - 1) / SYCL_CLAMP_BLOCK_SIZE; - sycl_parallel_for(stream, - sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CLAMP_BLOCK_SIZE), - sycl::range<3>(1, 1, SYCL_CLAMP_BLOCK_SIZE)), - [=](sycl::nd_item<3> item_ct1) { clamp(x, dst, min, max, k, item_ct1); }); -} - -inline void ggml_sycl_op_sgn(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { +template +static inline void dispatch_ggml_sycl_op_unary(ggml_backend_sycl_context & ctx, ggml_tensor * dst, KernelInvoker kernel_invoker, Args&&... args) { #if defined (GGML_SYCL_F16) GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); - #else GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); @@ -606,14 +442,14 @@ inline void ggml_sycl_op_sgn(ggml_backend_sycl_context & ctx, ggml_tensor * dst) case GGML_TYPE_F16: { auto data_pts = cast_data(dst); - sgn_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); + kernel_invoker(data_pts.src, data_pts.dst, (int)ggml_nelements(dst->src[0]), main_stream, std::forward(args)...); break; } #endif case GGML_TYPE_F32: { auto data_pts = cast_data(dst); - sgn_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); + kernel_invoker(data_pts.src, data_pts.dst, (int)ggml_nelements(dst->src[0]), main_stream, std::forward(args)...); break; } default: @@ -621,11 +457,11 @@ inline void ggml_sycl_op_sgn(ggml_backend_sycl_context & ctx, ggml_tensor * dst) } } -inline void ggml_sycl_op_abs(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { +template +static inline void dispatch_ggml_sycl_op_fused_glu(ggml_backend_sycl_context & ctx, ggml_tensor * dst, KernelInvoker kernel_invoker, Args&&... args) { #if defined (GGML_SYCL_F16) GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); - #else GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F32); @@ -633,52 +469,66 @@ inline void ggml_sycl_op_abs(ggml_backend_sycl_context & ctx, ggml_tensor * dst) GGML_ASSERT(dst->src[0]->type == dst->type); dpct::queue_ptr main_stream = ctx.stream(); SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - abs_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - abs_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const int64_t nc = src1 ? src0->ne[0] : src0->ne[0] / 2;; + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_is_contiguous_1(dst->src[0])); + GGML_ASSERT(ggml_is_contiguous(dst)); + const int32_t swapped = ((const int32_t *) dst->op_params)[1]; + void * src0_d = src0->data; + void * src1_d = src1 ? src1->data : src0->data; + const int64_t src0_o = src0->nb[1]; + const int64_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + void * dst_d = dst->data; + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src1->nb[0] == ggml_element_size(src1)); + GGML_ASSERT(src1->ne[0] == nc); + GGML_ASSERT(src0->type == src1->type); } -} - - -inline void ggml_sycl_op_elu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { -#if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); - -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); switch (dst->type) { #if defined (GGML_SYCL_F16) case GGML_TYPE_F16: { - auto data_pts = cast_data(dst); - elu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); + sycl::half * src0_p = (sycl::half *) src0_d; + sycl::half * src1_p = (sycl::half *) src1_d; + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + kernel_invoker(src0_p, + src1_p, + (sycl::half *) dst_d, + ggml_nelements(dst), + nc, + src0_o / sizeof(sycl::half), + src1_o / sizeof(sycl::half), + main_stream, + std::forward(args)...); break; } #endif case GGML_TYPE_F32: { - auto data_pts = cast_data(dst); - elu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); + float * src0_p = (float *) src0_d; + float * src1_p = (float *) src1_d; + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + kernel_invoker(src0_p, + src1_p, + (float *) dst_d, + ggml_nelements(dst), + nc, + src0_o / sizeof(float), + src1_o / sizeof(float), + main_stream, + std::forward(args)...); break; } default: @@ -686,7 +536,8 @@ inline void ggml_sycl_op_elu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) } } -inline void ggml_sycl_op_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { +template +static inline void dispatch_ggml_sycl_op_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst, KernelInvoker kernel_invoker, Args&&... args) { #if defined (GGML_SYCL_F16) GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); @@ -695,52 +546,31 @@ inline void ggml_sycl_op_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst GGML_ASSERT(dst->type == GGML_TYPE_F32); #endif GGML_ASSERT(dst->src[0]->type == dst->type); - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - silu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - silu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } -} -inline void ggml_sycl_op_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { -#if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); dpct::queue_ptr main_stream = ctx.stream(); SYCL_CHECK(ggml_sycl_set_device(ctx.device)); + + const float sf0 = (float) dst->ne[0] / dst->src[0]->ne[0]; + const float sf1 = (float) dst->ne[1] / dst->src[0]->ne[1]; + const float sf2 = (float) dst->ne[2] / dst->src[0]->ne[2]; + const float sf3 = (float) dst->ne[3] / dst->src[0]->ne[3]; switch (dst->type) { #if defined (GGML_SYCL_F16) case GGML_TYPE_F16: { auto data_pts = cast_data(dst); - gelu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); + kernel_invoker(data_pts.src, data_pts.dst, (int)dst->src[0]->nb[0], (int)dst->src[0]->nb[1], (int)dst->src[0]->nb[2], + (int)dst->src[0]->nb[3], (int)dst->ne[0], (int)dst->ne[1], (int)dst->ne[2], (int)dst->ne[3], sf0, sf1, sf2, sf3, + main_stream, std::forward(args)...); break; } #endif case GGML_TYPE_F32: { auto data_pts = cast_data(dst); - gelu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); + kernel_invoker(data_pts.src, data_pts.dst, (int)dst->src[0]->nb[0], (int)dst->src[0]->nb[1], (int)dst->src[0]->nb[2], + (int)dst->src[0]->nb[3], (int)dst->ne[0], (int)dst->ne[1], (int)dst->ne[2], (int)dst->ne[3], sf0, sf1, sf2, sf3, + main_stream, std::forward(args)...); break; } default: @@ -748,7 +578,8 @@ inline void ggml_sycl_op_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst } } -inline void ggml_sycl_op_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { +template +static inline void dispatch_ggml_sycl_op_pad(ggml_backend_sycl_context & ctx, ggml_tensor * dst, KernelInvoker kernel_invoker, Args&&... args) { #if defined (GGML_SYCL_F16) GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); @@ -757,6 +588,7 @@ inline void ggml_sycl_op_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor GGML_ASSERT(dst->type == GGML_TYPE_F32); #endif GGML_ASSERT(dst->src[0]->type == dst->type); + GGML_ASSERT(dst->src[0]->ne[3] == 1 && dst->ne[3] == 1); // just 3D tensors dpct::queue_ptr main_stream = ctx.stream(); SYCL_CHECK(ggml_sycl_set_device(ctx.device)); switch (dst->type) { @@ -764,14 +596,16 @@ inline void ggml_sycl_op_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor case GGML_TYPE_F16: { auto data_pts = cast_data(dst); - gelu_quick_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); + kernel_invoker(data_pts.src, data_pts.dst, (int)dst->src[0]->ne[0], (int)dst->src[0]->ne[1], (int)dst->src[0]->ne[2], (int)dst->ne[0], + (int)dst->ne[1], (int)dst->ne[2], main_stream, std::forward(args)...); break; } #endif case GGML_TYPE_F32: { auto data_pts = cast_data(dst); - gelu_quick_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); + kernel_invoker(data_pts.src, data_pts.dst, (int)dst->src[0]->ne[0], (int)dst->src[0]->ne[1], (int)dst->src[0]->ne[2], (int)dst->ne[0], + (int)dst->ne[1], (int)dst->ne[2], main_stream, std::forward(args)...); break; } default: @@ -779,593 +613,320 @@ inline void ggml_sycl_op_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor } } -inline void ggml_sycl_op_gelu_erf(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { -#if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - gelu_erf_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - gelu_erf_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } -} - - -inline void ggml_sycl_op_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { -#if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - tanh_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - tanh_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } -} - -inline void ggml_sycl_op_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { -#if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - relu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - relu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } -} +} // namespace ggml_sycl_detail -inline void ggml_sycl_op_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { -#if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - hardsigmoid_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - hardsigmoid_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } +static inline void ggml_sycl_op_sgn(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, 256); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256), + sycl::range<1>(256)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_sgn_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); } -inline void ggml_sycl_op_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { -#if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - hardswish_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - hardswish_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } +static inline void ggml_sycl_op_abs(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, 256); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256), + sycl::range<1>(256)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_abs_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); } -inline void ggml_sycl_op_exp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { -#if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - exp_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - exp_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } +static inline void ggml_sycl_op_elu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, 256); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256), + sycl::range<1>(256)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_elu_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); } -inline void ggml_sycl_op_log(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { -#if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - log_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - log_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } +static inline void ggml_sycl_op_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, SYCL_SILU_BLOCK_SIZE); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_SILU_BLOCK_SIZE), + sycl::range<1>(SYCL_SILU_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_silu_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); } -inline void ggml_sycl_op_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { -#if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - sigmoid_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - sigmoid_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } +static inline void ggml_sycl_op_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, SYCL_GELU_BLOCK_SIZE); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_GELU_BLOCK_SIZE), + sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_gelu_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); } -inline void ggml_sycl_op_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { -#if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); - - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - sqrt_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - sqrt_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } +static inline void ggml_sycl_op_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, SYCL_GELU_BLOCK_SIZE); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_GELU_BLOCK_SIZE), + sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_gelu_quick_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); } - -inline void ggml_sycl_op_sin(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { -#if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - sin_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - sin_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } + +static inline void ggml_sycl_op_gelu_erf(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, SYCL_GELU_BLOCK_SIZE); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_GELU_BLOCK_SIZE), + sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_gelu_erf_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); } -inline void ggml_sycl_op_cos(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { -#if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - cos_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - cos_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } +static inline void ggml_sycl_op_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, SYCL_TANH_BLOCK_SIZE); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_TANH_BLOCK_SIZE), + sycl::range<1>(SYCL_TANH_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_tanh_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); } -inline void ggml_sycl_op_step(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { -#if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - step_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - step_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } +static inline void ggml_sycl_op_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, SYCL_RELU_BLOCK_SIZE); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_RELU_BLOCK_SIZE), + sycl::range<1>(SYCL_RELU_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_relu_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); } -inline void ggml_sycl_op_neg(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { -#if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - neg_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - neg_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } +static inline void ggml_sycl_op_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, SYCL_HARDSIGMOID_BLOCK_SIZE); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_HARDSIGMOID_BLOCK_SIZE), + sycl::range<1>(SYCL_HARDSIGMOID_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_hardsigmoid_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); } -inline void ggml_sycl_op_leaky_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { -#if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif +static inline void ggml_sycl_op_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, SYCL_HARDSWISH_BLOCK_SIZE); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_HARDSWISH_BLOCK_SIZE), + sycl::range<1>(SYCL_HARDSWISH_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_hardswish_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); +} - GGML_ASSERT(dst->src[0]->type == dst->type); - float negative_slope; - memcpy(&negative_slope, dst->op_params, sizeof(float)); - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - leaky_relu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), negative_slope, main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - leaky_relu_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), negative_slope, main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } +static inline void ggml_sycl_op_exp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, SYCL_EXP_BLOCK_SIZE); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_EXP_BLOCK_SIZE), + sycl::range<1>(SYCL_EXP_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_exp_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); } -inline void ggml_sycl_op_sqr(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { - #if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - sqr_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - sqr_sycl(data_pts.src, data_pts.dst, ggml_nelements(dst->src[0]), main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } +static inline void ggml_sycl_op_log(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, SYCL_EXP_BLOCK_SIZE); // Using EXP block size + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_EXP_BLOCK_SIZE), + sycl::range<1>(SYCL_EXP_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_log_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); } -inline void ggml_sycl_op_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { -#if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); +static inline void ggml_sycl_op_neg(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, SYCL_NEG_BLOCK_SIZE); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_NEG_BLOCK_SIZE), + sycl::range<1>(SYCL_NEG_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_neg_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); +} - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); +static inline void ggml_sycl_op_step(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, SYCL_NEG_BLOCK_SIZE); // Using NEG block size + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_NEG_BLOCK_SIZE), + sycl::range<1>(SYCL_NEG_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_step_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); +} - const float sf0 = (float) dst->ne[0] / dst->src[0]->ne[0]; - const float sf1 = (float) dst->ne[1] / dst->src[0]->ne[1]; - const float sf2 = (float) dst->ne[2] / dst->src[0]->ne[2]; - const float sf3 = (float) dst->ne[3] / dst->src[0]->ne[3]; - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - upscale_sycl(data_pts.src, data_pts.dst, dst->src[0]->nb[0], dst->src[0]->nb[1], dst->src[0]->nb[2], - dst->src[0]->nb[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3, - main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - upscale_sycl(data_pts.src, data_pts.dst, dst->src[0]->nb[0], dst->src[0]->nb[1], dst->src[0]->nb[2], - dst->src[0]->nb[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3, - main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } +static inline void ggml_sycl_op_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, SYCL_SIGMOID_BLOCK_SIZE); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_SIGMOID_BLOCK_SIZE), + sycl::range<1>(SYCL_SIGMOID_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_sigmoid_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); } -inline void ggml_sycl_op_pad(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { -#if defined (GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); - GGML_ASSERT(dst->src[0]->ne[3] == 1 && dst->ne[3] == 1); // just 3D tensors - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - switch (dst->type) { -#if defined (GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - pad_sycl(data_pts.src, data_pts.dst, dst->src[0]->ne[0], dst->src[0]->ne[1], dst->src[0]->ne[2], dst->ne[0], - dst->ne[1], dst->ne[2], main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - pad_sycl(data_pts.src, data_pts.dst, dst->src[0]->ne[0], dst->src[0]->ne[1], dst->src[0]->ne[2], dst->ne[0], - dst->ne[1], dst->ne[2], main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } +static inline void ggml_sycl_op_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, SYCL_SQRT_BLOCK_SIZE); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_SQRT_BLOCK_SIZE), + sycl::range<1>(SYCL_SQRT_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_sqrt_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); } -inline void ggml_sycl_op_clamp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { -#if defined(GGML_SYCL_F16) - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); -#else +static inline void ggml_sycl_op_sin(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, SYCL_SIN_BLOCK_SIZE); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_SIN_BLOCK_SIZE), + sycl::range<1>(SYCL_SIN_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_sin_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); +} - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); -#endif - GGML_ASSERT(dst->src[0]->type == dst->type); - dpct::queue_ptr main_stream = ctx.stream(); - SYCL_CHECK(ggml_sycl_set_device(ctx.device)); - float min; - float max; - memcpy(&min, dst->op_params, sizeof(float)); - memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); +static inline void ggml_sycl_op_cos(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, SYCL_SIN_BLOCK_SIZE); // Using SIN block size + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_SIN_BLOCK_SIZE), + sycl::range<1>(SYCL_SIN_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_cos_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); +} - switch (dst->type) { -#if defined(GGML_SYCL_F16) - case GGML_TYPE_F16: - { - auto data_pts = cast_data(dst); - clamp_sycl(data_pts.src, data_pts.dst, min, max, ggml_nelements(dst->src[0]), main_stream); - break; - } -#endif - case GGML_TYPE_F32: - { - auto data_pts = cast_data(dst); - clamp_sycl(data_pts.src, data_pts.dst, min, max, ggml_nelements(dst->src[0]), main_stream); - break; - } - default: - GGML_ABORT("GGML tensor type not supported!\n"); - } +static inline void ggml_sycl_op_leaky_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + float negative_slope; + memcpy(&negative_slope, dst->op_params, sizeof(float)); + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream, float slope) { + const int num_blocks = ceil_div(k_elements, SYCL_RELU_BLOCK_SIZE); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_RELU_BLOCK_SIZE), + sycl::range<1>(SYCL_RELU_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_leaky_relu_kernel(src, dst_ptr, k_elements, slope, item_ct1); + }); + }, negative_slope); +} + +static inline void ggml_sycl_op_sqr(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + const int num_blocks = ceil_div(k_elements, SYCL_SQR_BLOCK_SIZE); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_SQR_BLOCK_SIZE), + sycl::range<1>(SYCL_SQR_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_sqr_kernel(src, dst_ptr, k_elements, item_ct1); + }); + }); +} + +static inline void ggml_sycl_op_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_upscale(ctx, dst, + [](const auto* src, auto* dst_ptr, int nb00, int nb01, int nb02, int nb03, + int ne10, int ne11, int ne12, int ne13, float sf0, float sf1, float sf2, float sf3, + queue_ptr stream) { + ggml_sycl_detail::upscale_sycl(src, dst_ptr, nb00, nb01, nb02, nb03, ne10, ne11, ne12, ne13, sf0, sf1, sf2, sf3, stream); + }); } -inline void ggml_sycl_op_acc(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { +static inline void ggml_sycl_op_pad(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_pad(ctx, dst, + [](const auto* src, auto* dst_ptr, int ne00, int ne01, int ne02, int ne0, int ne1, int ne2, + queue_ptr stream) { + ggml_sycl_detail::pad_sycl(src, dst_ptr, ne00, ne01, ne02, ne0, ne1, ne2, stream); + }); +} +static inline void ggml_sycl_op_clamp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + float min_val; + float max_val; + memcpy(&min_val, dst->op_params, sizeof(float)); + memcpy(&max_val, (float *) dst->op_params + 1, sizeof(float)); + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream, float min_arg, float max_arg) { + const int num_blocks = ceil_div(k_elements, SYCL_CLAMP_BLOCK_SIZE); + sycl_parallel_for(stream, + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_CLAMP_BLOCK_SIZE), + sycl::range<1>(SYCL_CLAMP_BLOCK_SIZE)), + [=](sycl::nd_item<1> item_ct1) { + clamp(src, dst_ptr, min_arg, max_arg, k_elements, item_ct1); + }); + }, min_val, max_val); +} + +static inline void ggml_sycl_op_acc(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT(dst->src[1]->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); @@ -1381,7 +942,40 @@ inline void ggml_sycl_op_acc(ggml_backend_sycl_context & ctx, ggml_tensor *dst) // int nb3 = dst->op_params[2] / 4; // 4 bytes of float32 - unused int offset = dst->op_params[3] / 4; // offset in bytes - acc_f32_sycl(src0_dd, src1_dd, dst_dd, ggml_nelements(dst), dst->src[1]->ne[0], dst->src[1]->ne[1], dst->src[1]->ne[2], nb1, nb2, offset, main_stream); + ggml_sycl_detail::acc_f32_sycl(src0_dd, src1_dd, dst_dd, (int)ggml_nelements(dst), (int)dst->src[1]->ne[0], (int)dst->src[1]->ne[1], (int)dst->src[1]->ne[2], nb1, nb2, offset, main_stream); +} + +static inline void ggml_sycl_op_geglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_fused_glu(ctx, dst, + [](const auto* x_ptr, const auto* g_ptr, auto* dst_ptr, uint64_t k, uint64_t n, uint64_t o0, uint64_t o1, queue_ptr main_stream) { + const uint32_t num_blocks = ceil_div(k, SYCL_GELU_BLOCK_SIZE); + sycl_parallel_for(main_stream, + sycl::nd_range<1>((num_blocks * sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { + gated_op_fused_geglu(x_ptr, g_ptr, dst_ptr, k, n, o0, o1, item_ct1); + }); + }); +} + +static inline void ggml_sycl_op_reglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_fused_glu(ctx, dst, + [](const auto* x_ptr, const auto* g_ptr, auto* dst_ptr, uint64_t k, uint64_t n, uint64_t o0, uint64_t o1, queue_ptr main_stream) { + const uint32_t num_blocks = ceil_div((uint32_t)k, SYCL_RELU_BLOCK_SIZE); // Using RELU block size for reglu + sycl_parallel_for(main_stream, + sycl::nd_range<1>((num_blocks * sycl::range<1>(SYCL_RELU_BLOCK_SIZE)), sycl::range<1>(SYCL_RELU_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { + gated_op_fused_reglu(x_ptr, g_ptr, dst_ptr, k, n, o0, o1, item_ct1); + }); + }); +} + +static inline void ggml_sycl_op_swiglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::dispatch_ggml_sycl_op_fused_glu(ctx, dst, + [](const auto* x_ptr, const auto* g_ptr, auto* dst_ptr, uint64_t k, uint64_t n, uint64_t o0, uint64_t o1, queue_ptr main_stream) { + const uint32_t num_blocks = ceil_div((uint32_t)k, SYCL_SILU_BLOCK_SIZE); // Using SILU block size for swiglu + sycl_parallel_for(main_stream, + sycl::nd_range<1>((num_blocks * sycl::range<1>(SYCL_SILU_BLOCK_SIZE)), sycl::range<1>(SYCL_SILU_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) { + gated_op_fused_swiglu(x_ptr, g_ptr, dst_ptr, k, n, o0, o1, item_ct1); + }); + }); } @@ -1509,3 +1103,18 @@ void ggml_sycl_elu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); ggml_sycl_op_elu(ctx, dst); } + +void ggml_sycl_geglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); + ggml_sycl_op_geglu(ctx, dst); +} + +void ggml_sycl_reglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); + ggml_sycl_op_reglu(ctx, dst); +} + +void ggml_sycl_swiglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1); + ggml_sycl_op_swiglu(ctx, dst); +} diff --git a/ggml/src/ggml-sycl/element_wise.hpp b/ggml/src/ggml-sycl/element_wise.hpp index bd40113f09705..86068b10129ec 100644 --- a/ggml/src/ggml-sycl/element_wise.hpp +++ b/ggml/src/ggml-sycl/element_wise.hpp @@ -3,27 +3,30 @@ #include "common.hpp" #include "ggml.h" -#include +#include // For std::numeric_limits template T neg_infinity() { return -std::numeric_limits::infinity(); } -template +template struct typed_data { - const T * src; - T * dst; + const T_Src * src; + T_Dst * dst; }; -template -typed_data cast_data(ggml_tensor * dst) { +template +typed_data cast_data(ggml_tensor * dst) { return { - /* .src = */ static_cast(dst->src[0]->data), - /* .dst = */ static_cast(dst->data) + /* .src = */ static_cast(dst->src[0]->data), + /* .dst = */ static_cast(dst->data) }; } +const float GELU_QUICK_COEF = -1.702f; + + void ggml_sycl_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_sin(ggml_backend_sycl_context & ctx, ggml_tensor * dst); @@ -73,5 +76,9 @@ void ggml_sycl_sgn(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_abs(ggml_backend_sycl_context & ctx, ggml_tensor * dst); void ggml_sycl_elu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -#endif // GGML_SYCL_ELEMENTWISE_HPP +void ggml_sycl_geglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); +void ggml_sycl_reglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); +void ggml_sycl_swiglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); + +#endif // GGML_SYCL_ELEMENTWISE_HPP diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 9cb36ae99e7f5..ae5e062572e32 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -3676,6 +3676,21 @@ static bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct gg return false; } break; + case GGML_OP_GLU: + switch (ggml_get_glu_op(dst)) { + case GGML_GLU_OP_REGLU: + ggml_sycl_reglu(ctx, dst); + break; + case GGML_GLU_OP_GEGLU: + ggml_sycl_geglu(ctx, dst); + break; + case GGML_GLU_OP_SWIGLU: + ggml_sycl_swiglu(ctx, dst); + break; + default: + return false; + } + break; case GGML_OP_NORM: ggml_sycl_norm(ctx, dst); break; @@ -4212,6 +4227,16 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g default: return false; } + case GGML_OP_GLU: + switch (ggml_get_glu_op(op)) { + case GGML_GLU_OP_REGLU: + case GGML_GLU_OP_GEGLU: + case GGML_GLU_OP_SWIGLU: + return ggml_is_contiguous_1(op->src[0]); + default: + return false; + } + break; case GGML_OP_MUL_MAT: case GGML_OP_MUL_MAT_ID: { diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index aebcc03915f5f..4696f1fe46e3b 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -437,6 +437,10 @@ struct vk_device_struct { vk_pipeline pipeline_tanh[2]; vk_pipeline pipeline_sigmoid[2]; + vk_pipeline pipeline_geglu[2]; + vk_pipeline pipeline_reglu[2]; + vk_pipeline pipeline_swiglu[2]; + vk_pipeline pipeline_leaky_relu_f32; vk_pipeline pipeline_silu_back_f32; vk_pipeline pipeline_diag_mask_inf_f32; @@ -661,6 +665,13 @@ struct vk_op_push_constants { float param2; }; +struct vk_op_glu_push_constants { + uint32_t N; + uint32_t ne00; + uint32_t ne20; + uint32_t mode; // 0: default, 1: swapped, 2: split +}; + struct vk_op_unary_push_constants { uint32_t ne; uint32_t ne00; uint32_t ne01; uint32_t ne02; uint32_t ne03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03; @@ -2757,6 +2768,15 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_UNARY(sigmoid) #undef CREATE_UNARY +#define CREATE_GLU(name) \ + ggml_vk_create_pipeline(device, device->pipeline_ ## name [0], #name "_f32", name ## _f32_len, name ## _f32_data, "main", 3, sizeof(vk_op_glu_push_constants), {512, 1, 1}, {}, 1, true); \ + ggml_vk_create_pipeline(device, device->pipeline_ ## name [1], #name "_f16", name ## _f16_len, name ## _f16_data, "main", 3, sizeof(vk_op_glu_push_constants), {512, 1, 1}, {}, 1, true); + + CREATE_GLU(geglu) + CREATE_GLU(reglu) + CREATE_GLU(swiglu) +#undef CREATE_GLU + ggml_vk_create_pipeline(device, device->pipeline_leaky_relu_f32, "leaky_relu_f32", leaky_relu_f32_len, leaky_relu_f32_data, "main", 2, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_silu_back_f32, "silu_back_f32", silu_back_f32_len, silu_back_f32_data, "main", 3, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1); @@ -6473,6 +6493,24 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const break; } return nullptr; + case GGML_OP_GLU: + if ((src0->type != GGML_TYPE_F32 && src0->type != GGML_TYPE_F16) || + (dst->type != GGML_TYPE_F32 && dst->type != GGML_TYPE_F16) || + (src0->type != dst->type)) { + return nullptr; + } + + switch (ggml_get_glu_op(dst)) { + case GGML_GLU_OP_GEGLU: + return ctx->device->pipeline_geglu[dst->type == GGML_TYPE_F16]; + case GGML_GLU_OP_REGLU: + return ctx->device->pipeline_reglu[dst->type == GGML_TYPE_F16]; + case GGML_GLU_OP_SWIGLU: + return ctx->device->pipeline_swiglu[dst->type == GGML_TYPE_F16]; + default: + break; + } + return nullptr; case GGML_OP_DIAG_MASK_INF: if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { return ctx->device->pipeline_diag_mask_inf_f32; @@ -6933,6 +6971,7 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co case GGML_OP_CONCAT: case GGML_OP_UPSCALE: case GGML_OP_UNARY: + case GGML_OP_GLU: case GGML_OP_CONV_2D_DW: { uint32_t ne = ggml_nelements(dst); @@ -6973,7 +7012,7 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co } } - if (op == GGML_OP_SOFT_MAX) { + if (op == GGML_OP_SOFT_MAX || op == GGML_OP_GLU) { // Empty src1 is possible in soft_max, but the shader needs a buffer vk_subbuffer subbuf_y; if (use_src1) { @@ -7566,6 +7605,25 @@ static void ggml_vk_unary(ggml_backend_vk_context * ctx, vk_context& subctx, con ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_UNARY, { (uint32_t)ggml_nelements(src0), 0, 0.0f, 0.0f }, dryrun); } +static void ggml_vk_glu(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) { + const bool swapped = (bool)dst->op_params[1]; + const bool split = src1 != nullptr; + + GGML_ASSERT(ggml_is_contiguous(src0)); + + if (!split) { + GGML_ASSERT(src0->ne[0] / 2 == dst->ne[0]); + } else { + GGML_ASSERT(src0->ne[0] == src1->ne[0]); + GGML_ASSERT(src0->ne[0] == dst->ne[0]); + GGML_ASSERT(src0->type == src1->type); + } + + const uint32_t mode = split ? 2 : (swapped ? 1 : 0); + + ggml_vk_op_f32(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_GLU, { (uint32_t)ggml_nelements(dst), (uint32_t)src0->ne[0], (uint32_t)dst->ne[0], mode }, dryrun); +} + static void ggml_vk_diag_mask_inf(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) { int32_t * op_params = (int32_t *)dst->op_params; ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_DIAG_MASK_INF, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0] }, dryrun); @@ -8778,6 +8836,16 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr return false; } break; + case GGML_OP_GLU: + switch (ggml_get_glu_op(node)) { + case GGML_GLU_OP_GEGLU: + case GGML_GLU_OP_REGLU: + case GGML_GLU_OP_SWIGLU: + break; + default: + return false; + } + break; case GGML_OP_REPEAT: case GGML_OP_REPEAT_BACK: case GGML_OP_GET_ROWS: @@ -8870,6 +8938,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr case GGML_OP_RMS_NORM_BACK: case GGML_OP_L2_NORM: case GGML_OP_UNARY: + case GGML_OP_GLU: case GGML_OP_DIAG_MASK_INF: case GGML_OP_SOFT_MAX: case GGML_OP_SOFT_MAX_BACK: @@ -9013,6 +9082,17 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr return false; } break; + case GGML_OP_GLU: + switch (ggml_get_glu_op(node)) { + case GGML_GLU_OP_GEGLU: + case GGML_GLU_OP_REGLU: + case GGML_GLU_OP_SWIGLU: + ggml_vk_glu(ctx, compute_ctx, src0, src1, node, dryrun); + break; + default: + return false; + } + break; case GGML_OP_DIAG_MASK_INF: ggml_vk_diag_mask_inf(ctx, compute_ctx, src0, node, dryrun); @@ -9138,8 +9218,9 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr if (!ok) { if (node->op == GGML_OP_UNARY) { std::cerr << __func__ << ": error: op not supported UNARY " << node->name << " (" << ggml_unary_op_name(static_cast(node->op_params[0])) << ")" << std::endl; - } - else { + } else if (node->op == GGML_OP_GLU) { + std::cerr << __func__ << ": error: op not supported GLU " << node->name << " (" << ggml_glu_op_name(static_cast(node->op_params[0])) << ")" << std::endl; + } else { std::cerr << __func__ << ": error: op not supported " << node->name << " (" << ggml_op_name(node->op) << ")" << std::endl; } } @@ -9218,6 +9299,17 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_tensor * return false; } break; + case GGML_OP_GLU: + switch (ggml_get_glu_op(tensor)) { + case GGML_GLU_OP_GEGLU: + case GGML_GLU_OP_REGLU: + case GGML_GLU_OP_SWIGLU: + buf = tensor->buffer; + break; + default: + return false; + } + break; case GGML_OP_MUL_MAT: case GGML_OP_MUL_MAT_ID: case GGML_OP_FLASH_ATTN_EXT: @@ -10016,6 +10108,19 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm return false; } break; + case GGML_OP_GLU: + switch (ggml_get_glu_op(op)) { + case GGML_GLU_OP_GEGLU: + case GGML_GLU_OP_REGLU: + case GGML_GLU_OP_SWIGLU: + return ggml_is_contiguous(op->src[0]) && + (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16) && + (op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16) && + (op->src[0]->type == op->type); + default: + return false; + } + break; case GGML_OP_MUL_MAT: case GGML_OP_MUL_MAT_ID: { @@ -10746,6 +10851,12 @@ static void ggml_vk_check_results_0(ggml_tensor * tensor) { std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl; GGML_ABORT("fatal error"); } + } else if (tensor->op == GGML_OP_GLU) { + if (src_clone[1] == nullptr) { + tensor_clone = ggml_glu(ggml_ctx, src_clone[0], (ggml_glu_op) tensor->op_params[0], tensor->op_params[1]); + } else { + tensor_clone = ggml_glu_split(ggml_ctx, src_clone[0], src_clone[1], (ggml_glu_op) tensor->op_params[0]); + } } else if (tensor->op == GGML_OP_CPY || tensor->op == GGML_OP_DUP) { if (src1 == nullptr) { tensor_clone = ggml_dup(ggml_ctx, src_clone[0]); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/geglu.comp b/ggml/src/ggml-vulkan/vulkan-shaders/geglu.comp new file mode 100644 index 0000000000000..f4268ed24f44c --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/geglu.comp @@ -0,0 +1,13 @@ +#version 450 + +#include "glu_head.comp" + +const float GELU_COEF_A = 0.044715f; +const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f; + +float op(float a, float b) { + const float val = SQRT_2_OVER_PI*a*(1.0f + GELU_COEF_A*a*a); + return 0.5f*a*(2.0f - 2.0f / (exp(2 * val) + 1)) * b; +} + +#include "glu_main.comp" diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/glu_head.comp b/ggml/src/ggml-vulkan/vulkan-shaders/glu_head.comp new file mode 100644 index 0000000000000..41a29889075f6 --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/glu_head.comp @@ -0,0 +1,15 @@ +#extension GL_EXT_shader_16bit_storage : require + +layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in; + +layout (binding = 0) readonly buffer A {A_TYPE data_a[];}; +layout (binding = 1) readonly buffer B {A_TYPE data_b[];}; +layout (binding = 2) writeonly buffer D {D_TYPE data_d[];}; + +layout (push_constant) uniform parameter +{ + uint N; + uint ne00; + uint ne20; + uint mode; +} p; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/glu_main.comp b/ggml/src/ggml-vulkan/vulkan-shaders/glu_main.comp new file mode 100644 index 0000000000000..85cf65a9ecac8 --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/glu_main.comp @@ -0,0 +1,29 @@ +void main() { + const uint i = gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x; + + if (i >= p.N) { + return; + } + + const uint row = i / p.ne20; + const uint col = i - row * p.ne20; + + if (p.mode == 0) { + // Default + const uint offset = p.ne00 / 2; + const uint idx = row * p.ne00 + col; + + data_d[row * offset + col] = D_TYPE(op(float(data_a[idx]), float(data_a[idx + offset]))); + } else if (p.mode == 1) { + // Swapped + const uint offset = p.ne00 / 2; + const uint idx = row * p.ne00 + col; + + data_d[row * offset + col] = D_TYPE(op(float(data_a[idx + offset]), float(data_a[idx]))); + } else { + // Split + const uint idx = row * p.ne00 + col; + + data_d[idx] = D_TYPE(op(float(data_a[idx]), float(data_b[idx]))); + } +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/reglu.comp b/ggml/src/ggml-vulkan/vulkan-shaders/reglu.comp new file mode 100644 index 0000000000000..0073d8f766610 --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/reglu.comp @@ -0,0 +1,9 @@ +#version 450 + +#include "glu_head.comp" + +float op(float a, float b) { + return max(a, 0.0f) * b; +} + +#include "glu_main.comp" diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/swiglu.comp b/ggml/src/ggml-vulkan/vulkan-shaders/swiglu.comp new file mode 100644 index 0000000000000..a28e7c6cc8660 --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/swiglu.comp @@ -0,0 +1,9 @@ +#version 450 + +#include "glu_head.comp" + +float op(float a, float b) { + return a / (1.0f + exp(-a)) * b; +} + +#include "glu_main.comp" diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp index a207b98c60e51..23fc50bf29503 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp @@ -585,6 +585,13 @@ void process_shaders() { string_to_spv("sigmoid_f16", "sigmoid.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}}); string_to_spv("sigmoid_f32", "sigmoid.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}}); + string_to_spv("geglu_f16", "geglu.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}}); + string_to_spv("geglu_f32", "geglu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}}); + string_to_spv("reglu_f16", "reglu.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}}); + string_to_spv("reglu_f32", "reglu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}}); + string_to_spv("swiglu_f16", "swiglu.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}}); + string_to_spv("swiglu_f32", "swiglu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}}); + string_to_spv("leaky_relu_f32", "leaky_relu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}}); string_to_spv("silu_back_f32", "silu_back.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}}); diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 1262236c0347f..14000b55aca1e 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -982,9 +982,11 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "CROSS_ENTROPY_LOSS", "CROSS_ENTROPY_LOSS_BACK", "OPT_STEP_ADAMW", + + "GLU", }; -static_assert(GGML_OP_COUNT == 84, "GGML_OP_COUNT != 84"); +static_assert(GGML_OP_COUNT == 85, "GGML_OP_COUNT != 85"); static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "none", @@ -1079,9 +1081,11 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "cross_entropy_loss(x,y)", "cross_entropy_loss_back(x,y)", "adamw(x)", + + "glu(x)", }; -static_assert(GGML_OP_COUNT == 84, "GGML_OP_COUNT != 84"); +static_assert(GGML_OP_COUNT == 85, "GGML_OP_COUNT != 85"); static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2"); @@ -1107,6 +1111,15 @@ static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = { static_assert(GGML_UNARY_OP_COUNT == 15, "GGML_UNARY_OP_COUNT != 15"); +static const char * GGML_GLU_OP_NAME[GGML_GLU_OP_COUNT] = { + "REGLU", + "GEGLU", + "SWIGLU", +}; + +static_assert(GGML_GLU_OP_COUNT == 3, "GGML_GLU_OP_COUNT != 3"); + + static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN"); static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN"); @@ -1209,11 +1222,19 @@ const char * ggml_unary_op_name(enum ggml_unary_op op) { return GGML_UNARY_OP_NAME[op]; } +const char * ggml_glu_op_name(enum ggml_glu_op op) { + return GGML_GLU_OP_NAME[op]; +} + const char * ggml_op_desc(const struct ggml_tensor * t) { if (t->op == GGML_OP_UNARY) { enum ggml_unary_op uop = ggml_get_unary_op(t); return ggml_unary_op_name(uop); } + if (t->op == GGML_OP_GLU) { + enum ggml_glu_op gop = ggml_get_glu_op(t); + return ggml_glu_op_name(gop); + } return ggml_op_name(t->op); } @@ -1730,6 +1751,11 @@ enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor) { return (enum ggml_unary_op) ggml_get_op_params_i32(tensor, 0); } +enum ggml_glu_op ggml_get_glu_op(const struct ggml_tensor * tensor) { + GGML_ASSERT(tensor->op == GGML_OP_GLU); + return (enum ggml_glu_op) ggml_get_op_params_i32(tensor, 0); +} + const char * ggml_get_name(const struct ggml_tensor * tensor) { return tensor->name; } @@ -2609,6 +2635,114 @@ struct ggml_tensor * ggml_exp_inplace( return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_EXP); } +// ggml_glu + +static struct ggml_tensor * ggml_glu_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + enum ggml_glu_op op, + bool swapped) { + GGML_ASSERT(ggml_is_contiguous_1(a)); + + if (b) { + GGML_ASSERT(ggml_is_contiguous_1(b)); + GGML_ASSERT(ggml_are_same_shape(a, b)); + GGML_ASSERT(a->type == b->type); + } + + int64_t ne[GGML_MAX_DIMS] = { a->ne[0] / 2 }; for (int i = 1; i < GGML_MAX_DIMS; i++) ne[i] = a->ne[i]; + struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, GGML_MAX_DIMS, b ? a->ne : ne, NULL, 0); + + ggml_set_op_params_i32(result, 0, (int32_t) op); + ggml_set_op_params_i32(result, 1, (int32_t) swapped); + + result->op = GGML_OP_GLU; + result->src[0] = a; + result->src[1] = b; + + return result; +} + +struct ggml_tensor * ggml_glu( + struct ggml_context * ctx, + struct ggml_tensor * a, + enum ggml_glu_op op, + bool swapped) { + return ggml_glu_impl(ctx, a, NULL, op, swapped); +} + +struct ggml_tensor * ggml_glu_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + enum ggml_glu_op op) { + return ggml_glu_impl(ctx, a, b, op, false); +} + +// ggml_reglu + +struct ggml_tensor * ggml_reglu( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_glu_impl(ctx, a, NULL, GGML_GLU_OP_REGLU, false); +} + +struct ggml_tensor * ggml_reglu_swapped( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_glu_impl(ctx, a, NULL, GGML_GLU_OP_REGLU, true); +} + +struct ggml_tensor * ggml_reglu_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_glu_impl(ctx, a, b, GGML_GLU_OP_REGLU, false); +} + +// ggml_geglu + +struct ggml_tensor * ggml_geglu( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_glu_impl(ctx, a, NULL, GGML_GLU_OP_GEGLU, false); +} + +struct ggml_tensor * ggml_geglu_swapped( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_glu_impl(ctx, a, NULL, GGML_GLU_OP_GEGLU, true); +} + +struct ggml_tensor * ggml_geglu_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_glu_impl(ctx, a, b, GGML_GLU_OP_GEGLU, false); +} + +// ggml_swiglu + +struct ggml_tensor * ggml_swiglu( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_glu_impl(ctx, a, NULL, GGML_GLU_OP_SWIGLU, false); +} + +struct ggml_tensor * ggml_swiglu_swapped( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_glu_impl(ctx, a, NULL, GGML_GLU_OP_SWIGLU, true); +} + +struct ggml_tensor * ggml_swiglu_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_glu_impl(ctx, a, b, GGML_GLU_OP_SWIGLU, false); +} + // ggml_norm static struct ggml_tensor * ggml_norm_impl( diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 71ee431a977ba..010300df6098e 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -560,12 +560,20 @@ ggml_tensor * llm_graph_context::build_ffn( switch (type_op) { case LLM_FFN_SILU: - { + if (gate && type_gate == LLM_FFN_PAR) { + cur = ggml_swiglu_split(ctx0, cur, tmp); + cb(cur, "ffn_swiglu", il); + type_gate = LLM_FFN_SEQ; + } else { cur = ggml_silu(ctx0, cur); cb(cur, "ffn_silu", il); } break; case LLM_FFN_GELU: - { + if (gate && type_gate == LLM_FFN_PAR) { + cur = ggml_geglu_split(ctx0, cur, tmp); + cb(cur, "ffn_geglu", il); + type_gate = LLM_FFN_SEQ; + } else { cur = ggml_gelu(ctx0, cur); cb(cur, "ffn_gelu", il); if (act_scales != NULL) { @@ -574,7 +582,11 @@ ggml_tensor * llm_graph_context::build_ffn( } } break; case LLM_FFN_RELU: - { + if (gate && type_gate == LLM_FFN_PAR) { + cur = ggml_reglu_split(ctx0, cur, tmp); + cb(cur, "ffn_reglu", il); + type_gate = LLM_FFN_SEQ; + } else { cur = ggml_relu(ctx0, cur); cb(cur, "ffn_relu", il); } break; @@ -588,32 +600,19 @@ ggml_tensor * llm_graph_context::build_ffn( } break; case LLM_FFN_SWIGLU: { - // Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf - int64_t split_point = cur->ne[0] / 2; - // TODO: these conts should not be needed, see https://github.com/ggml-org/llama.cpp/pull/14090#discussion_r2137437217 - ggml_tensor * x0 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], 0)); - ggml_tensor * x1 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], split_point * ggml_element_size(cur))); - - x0 = ggml_silu(ctx0, x0); - cb(cur, "ffn_silu", il); - - cur = ggml_mul(ctx0, x0, x1); - cb(cur, "ffn_mul", il); + cur = ggml_swiglu(ctx0, cur); + cb(cur, "ffn_swiglu", il); } break; case LLM_FFN_GEGLU: { - // Split into two equal parts - int64_t split_point = cur->ne[0] / 2; - // TODO: these conts should not be needed, see https://github.com/ggml-org/llama.cpp/pull/14090#discussion_r2137437217 - ggml_tensor * x0 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], 0)); - ggml_tensor * x1 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], split_point * ggml_element_size(cur))); - - x0 = ggml_gelu(ctx0, x0); - cb(x0, "ffn_gelu", il); - - cur = ggml_mul(ctx0, x0, x1); + cur = ggml_geglu(ctx0, cur); cb(cur, "ffn_geglu", il); } break; + case LLM_FFN_REGLU: + { + cur = ggml_reglu(ctx0, cur); + cb(cur, "ffn_reglu", il); + } break; } if (gate && type_gate == LLM_FFN_PAR) { @@ -743,12 +742,18 @@ ggml_tensor * llm_graph_context::build_moe_ffn( switch (type_op) { case LLM_FFN_SILU: - { + if (gate_exps) { + cur = ggml_swiglu_split(ctx0, cur, up); + cb(cur, "ffn_moe_swiglu", il); + } else { cur = ggml_silu(ctx0, cur); cb(cur, "ffn_moe_silu", il); } break; case LLM_FFN_GELU: - { + if (gate_exps) { + cur = ggml_geglu_split(ctx0, cur, up); + cb(cur, "ffn_moe_geglu", il); + } else { cur = ggml_gelu(ctx0, cur); cb(cur, "ffn_moe_gelu", il); } break; @@ -756,11 +761,6 @@ ggml_tensor * llm_graph_context::build_moe_ffn( GGML_ABORT("fatal error"); } - if (gate_exps) { - cur = ggml_mul(ctx0, cur, up); // [n_ff, n_expert_used, n_tokens] - cb(cur, "ffn_moe_gate_par", il); - } - experts = build_lora_mm_id(down_exps, cur, selected_experts); // [n_embd, n_expert_used, n_tokens] cb(experts, "ffn_moe_down", il); diff --git a/src/llama-graph.h b/src/llama-graph.h index ee2197e892b5a..ceddb6021f114 100644 --- a/src/llama-graph.h +++ b/src/llama-graph.h @@ -38,6 +38,7 @@ enum llm_ffn_op_type { LLM_FFN_RELU_SQR, LLM_FFN_SWIGLU, LLM_FFN_GEGLU, + LLM_FFN_REGLU, }; enum llm_ffn_gate_type { diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index ec088bae2a65f..16c4268579756 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -1106,6 +1106,107 @@ struct test_unary : public test_case { }; +// GGML_OP_GLU +struct test_glu : public test_case { + const ggml_glu_op op; + const ggml_type type; + const std::array ne_a; + int v; // view (1 : non-contiguous a) + bool swapped; + + std::string vars() override { + return VARS_TO_STR4(type, ne_a, v, swapped); + } + + test_glu(ggml_glu_op op, + ggml_type type = GGML_TYPE_F32, + std::array ne_a = {128, 2, 2, 2}, + int v = 0, + bool swapped = false) + : op(op), type(type), ne_a(ne_a), v(v), swapped(swapped) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * a; + if (v & 1) { + auto ne = ne_a; ne[0] *= 3; + a = ggml_new_tensor(ctx, type, 4, ne.data()); + ggml_set_name(a, "a"); + + a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0); + ggml_set_name(a, "view_of_a"); + } else { + a = ggml_new_tensor(ctx, type, 4, ne_a.data()); + ggml_set_name(a, "a"); + } + + ggml_tensor * out = ggml_glu(ctx, a, op, swapped); + ggml_set_name(out, "out"); + + return out; + } + + void initialize_tensors(ggml_context * ctx) override { + for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { + // test extended range of values to check for NaNs in GELU + init_tensor_uniform(t, -150.f, 150.f); + } + } +}; + +struct test_glu_split : public test_case { + const ggml_glu_op op; + const ggml_type type; + const std::array ne_a; + int v; // view (1 : non-contiguous a) + + std::string vars() override { + return VARS_TO_STR3(type, ne_a, v) + ",split"; + } + + test_glu_split(ggml_glu_op op, + ggml_type type = GGML_TYPE_F32, + std::array ne_a = {128, 2, 2, 2}, + int v = 0) + : op(op), type(type), ne_a(ne_a), v(v) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * a; + ggml_tensor * b; + if (v & 1) { + auto ne = ne_a; ne[0] *= 3; + a = ggml_new_tensor(ctx, type, 4, ne.data()); + ggml_set_name(a, "a"); + + a = ggml_view_4d(ctx, a, ne_a[0], ne_a[1], ne_a[2], ne_a[3], a->nb[1], a->nb[2], a->nb[3], 0); + ggml_set_name(a, "view_of_a"); + + b = ggml_new_tensor(ctx, type, 4, ne.data()); + ggml_set_name(b, "b"); + + b = ggml_view_4d(ctx, b, ne_a[0], ne_a[1], ne_a[2], ne_a[3], b->nb[1], b->nb[2], b->nb[3], 0); + ggml_set_name(a, "view_of_b"); + } else { + a = ggml_new_tensor(ctx, type, 4, ne_a.data()); + ggml_set_name(a, "a"); + + b = ggml_new_tensor(ctx, type, 4, ne_a.data()); + ggml_set_name(b, "b"); + } + + ggml_tensor * out = ggml_glu_split(ctx, a, b, op); + ggml_set_name(out, "out"); + + return out; + } + + void initialize_tensors(ggml_context * ctx) override { + for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { + // test extended range of values to check for NaNs in GELU + init_tensor_uniform(t, -150.f, 150.f); + } + } +}; + // GGML_OP_GET_ROWS struct test_get_rows : public test_case { const ggml_type type; @@ -4094,6 +4195,21 @@ static std::vector> make_test_cases_eval() { } } + // glu ops + for (ggml_type type : {GGML_TYPE_F16, GGML_TYPE_F32}) { + for (int v : {0, 1}) { + for (int op = 0; op < GGML_GLU_OP_COUNT; op++) { + for (bool swapped : {false, true}) { + test_cases.emplace_back(new test_glu((ggml_glu_op) op, type, { 128, 2, 2, 2 }, v, swapped)); + test_cases.emplace_back(new test_glu((ggml_glu_op) op, type, { 5, 7, 11, 13 }, v, swapped)); + } + + test_cases.emplace_back(new test_glu_split((ggml_glu_op) op, type, { 128, 2, 2, 2 }, v)); + test_cases.emplace_back(new test_glu_split((ggml_glu_op) op, type, { 5, 7, 11, 13 }, v)); + } + } + } + test_cases.emplace_back(new test_get_rows(GGML_TYPE_F32, 1, 8, 2, 1, false)); for (ggml_type type : all_types) { for (int b : {1, 7}) { From 0f5b1fde0a0c7d143016af077a5993b40c17d106 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sigbj=C3=B8rn=20Skj=C3=A6ret?= Date: Sun, 29 Jun 2025 14:38:10 +0200 Subject: [PATCH 171/192] ggml : fix unmerged GGML_FPxx_TO_FPxx refactoring (#14443) --- ggml/src/ggml-cpu/vec.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/ggml/src/ggml-cpu/vec.h b/ggml/src/ggml-cpu/vec.h index ebd4b75613451..d5507d75646d4 100644 --- a/ggml/src/ggml-cpu/vec.h +++ b/ggml/src/ggml-cpu/vec.h @@ -913,8 +913,8 @@ inline static void ggml_vec_reglu_f32 (const int n, float * y, const float * x, inline static void ggml_vec_reglu_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) { for (int i = 0; i < n; ++i) { - float v = GGML_FP16_TO_FP32(x[i]); - y[i] = GGML_FP32_TO_FP16((v > 0.f) ? v * GGML_FP16_TO_FP32(g[i]) : 0.f); + float v = GGML_CPU_FP16_TO_FP32(x[i]); + y[i] = GGML_CPU_FP32_TO_FP16((v > 0.f) ? v * GGML_CPU_FP16_TO_FP32(g[i]) : 0.f); } } @@ -927,9 +927,9 @@ inline static void ggml_vec_geglu_f32(const int n, float * y, const float * x, c } else if (x[i] >= 10.0f) { y[i] = x[i] * g[i]; } else { - ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); + ggml_fp16_t fp16 = GGML_CPU_FP32_TO_FP16(x[i]); memcpy(&t, &fp16, sizeof(uint16_t)); - y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_f16[t]) * g[i]; + y[i] = GGML_CPU_FP16_TO_FP32(ggml_table_gelu_f16[t]) * g[i]; } } } @@ -944,8 +944,8 @@ inline static void ggml_vec_geglu_f32(const int n, float * y, const float * x, c inline static void ggml_vec_geglu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) { const uint16_t * i16 = (const uint16_t *) x; for (int i = 0; i < n; ++i) { - float v = GGML_FP16_TO_FP32(g[i]); - y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(ggml_table_gelu_f16[i16[i]]) * v); + float v = GGML_CPU_FP16_TO_FP32(g[i]); + y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(ggml_table_gelu_f16[i16[i]]) * v); } } @@ -953,9 +953,9 @@ void ggml_vec_swiglu_f32(const int n, float * y, const float * x, const float * inline static void ggml_vec_swiglu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) { for (int i = 0; i < n; ++i) { - float v = GGML_FP16_TO_FP32(x[i]); - float w = GGML_FP16_TO_FP32(g[i]); - y[i] = GGML_FP32_TO_FP16((v/(1.0f + expf(-v))) * w); + float v = GGML_CPU_FP16_TO_FP32(x[i]); + float w = GGML_CPU_FP16_TO_FP32(g[i]); + y[i] = GGML_CPU_FP32_TO_FP16((v/(1.0f + expf(-v))) * w); } } From 54caf5d4f4486d6a721ff033bd7c454dcaa2ec34 Mon Sep 17 00:00:00 2001 From: Akarshan Biswas Date: Sun, 29 Jun 2025 21:07:58 +0530 Subject: [PATCH 172/192] SYCL: disable faulty fp16 exp kernel (#14395) * SYCL: disable faulty fp16 CPU exponent for now * Revert "SYCL: disable faulty fp16 CPU exponent for now" This reverts commit ed0aab1ec31b4eb4b0f275dd7acd41d96a375202. * SYCL: disable faulty fp16 CPU exponent for now * Fix logic of disabling exponent kernel --- ggml/src/ggml-sycl/ggml-sycl.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index ae5e062572e32..4ecca4165bee3 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -4215,7 +4215,6 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g case GGML_UNARY_OP_GELU_QUICK: case GGML_UNARY_OP_GELU_ERF: case GGML_UNARY_OP_TANH: - case GGML_UNARY_OP_EXP: case GGML_UNARY_OP_SGN: case GGML_UNARY_OP_ABS: case GGML_UNARY_OP_ELU: @@ -4224,6 +4223,9 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g #else return ggml_is_contiguous(op->src[0]) && (op->src[0]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32) && (op->type == op->src[0]->type); #endif + case GGML_UNARY_OP_EXP: + // Disable FP16 until we find out the root cause of failing fp16 sycl::exp + return ggml_is_contiguous(op->src[0]) && (op->type == op->src[0]->type) && op->src[0]->type == GGML_TYPE_F32; default: return false; } From 71c0d60a44f9a624ec7dce8497c653fbfb229eed Mon Sep 17 00:00:00 2001 From: Renat Date: Sun, 29 Jun 2025 19:29:57 +0200 Subject: [PATCH 173/192] server : fix appearance of the chats list context menu for Safari (#14322) --- tools/server/public/index.html.gz | Bin 1913886 -> 1913892 bytes tools/server/webui/src/components/Sidebar.tsx | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/server/public/index.html.gz b/tools/server/public/index.html.gz index 0fb01665ae5ccf999a186e13ea09b8f67de3fa1e..53b71079c1e2a3ffebc5fbc70d6b30ca3de8f13c 100644 GIT binary patch delta 1705676 zcmV(jK=!|$=yas$bbz!0Nz;GgXkC6kRlHc!{y5h5otK4Ha~>XWZ|of(My%q%I@LW~ z57D_P8|T`QdKaN~_ggq|<9W3t8n4_(0zY=VY<}x?T#-vqSv1kHR)xLqlB+slR(@SQ z;l3_i)m?NW7kY`~po$yzPu)vRY*75Z`vRyo~E*>IX?0( zkGk@B)DL6yGBvyWiF~2_+-S`Vu31yB9ghY_ob@+a9gYnyu{Sf=9c%vHq@3+S&d{e~ zR^O!K#!Eb(ZTIDkeaCsasrZvPsM6I;p>ok@_tga;nTzvzyTN}2*6}&_qxh}bXzt$L zJ3E`!PeXzk1wy6xCUWmRN!bX!Oxy3F%jdPV9K&k9T;g^J`)VIawl`@UE-Gs*Yq{77 zg_Y_^Blz`7Lf%ViiN!?+Q=^90HbcTYK>Sodwm4G>d7|tlf?m#;o)lhjK>gzs*6nE# z8%HzE-R{Lb>6U*QuFMn?oJgj{f8$xc_*q5x{c;GzjSr$U^MHtdXgoVZj2^z3=4>Qa zs(Aiu;HgQi+PE^!%_07j+VGz$vcMyO4xkl9Ldy=+SHjMyOE0w+V z&dfDkpie_+aIF8>T(eiE*z77Zv(z=~;M>S#`0`2}iQ+oRGojJJC)8Hs`agGnc>U`5 z%iXQlh%8Tr=X;8ez93fl<=p*lox5~x&h6d^cZ@&&aD7yLKk?(*H-7zQOI=OIdznKG zgYWyBEp2}YoA0;QA8*lS94yJJuJ@Hiy}MJdhOc_B-n=^5yZ$zF=VRMI7-Z+o9rcY$ z-*CFJ*@cPC5zLU|x z5M<(#S~{INH_EXZz5<7*4SYi>?%2i#89+0G3}AnY*ry;%59XD9*jF3Vhm2Ao!Iw}5 z`(hx7X*k+zW!wTM`OvxauX)s;oo)XVFl}R|sBDUxeVoe1#>|v-QF|`gHK{-f9?bJC z96&alk6#vsQ?wot)-Ui^!bcm(e>BSjdBHF?gT7+}zy1GcP9BE>d>;<8)$D1(PlC4x zM`M3UCJ=@7X&v6Y;N#w?yWct)10&k)Hg{U`%8VdjEZLfPak72=@MP!mT)O0VI%vu9 zl?_Zeovjmn7yC}r{cx)>7nl&xPvZSY&?o&4*4*!%q5Hkz(_2SA-@tw|V|v8GJkJ04 z=``;OtCKKT<*}Mo3xQ3$Z2r|n{3ax=5ZDBZ)UI2bHR)P2^jWuPx*uZ zp((-A)LMh_`eLy4$|rQuP&&)HllDHBxL%pX=cDl&OKgAzS}E6^euUP)d_sSh?){B} z;YdFAY&AK^$-!euND}0Zsp-dPDOIZMS@el5{L82L-GgjCGsJ}NdPnt&V`pJJTQvWZB)pc>FOp&Tm6B%L%_G!qo)f(cH-;bLCg35oPZ^IUybr<2` z6Vn3-zA58(w?5Gl_JZ);uL#+m^usr|Pn!GG8xrp{2ao#4qvQ1X9TUfe^t<@0>~QlO zlhKM{<(v!;M#HSF+Y&sLqBK4AZoBn?41RdFnWU826cs{!_vQIo%CZH3lb&ic0{G!c zTldK3$T^k-(T+p@NSuGG+XcNU8psJfo6SjrQA4@SHLl@!&bRnLji|bx0l@{)_Keu8s!=0L7N=o;eu`Cxy zvpA1GfVSoFCv4oV2$Ld*wfO5+zWR0JT3q~1ZhMaLODq4~T91Dgt@*gWigmi`7DU&W zB3RVA21YI>ZEOwL2d9`_vv8Sml=h%?Ug)^B$Dz*qLD$t@mlg>$@MBfkE3W?i{V>ng zNHIt*gz|>9UQ=V2S75Mc3gPWGBVYxhT&E(>7;Q8q z1_PGmTTsyola_W2wImYpG5mE)86d<=;HBaTvRD;B*6)83+b7m|9|aSIEfQ#R3fgX@QFwwIwQYIxPCAl&@5)8qZoaH`rrt3 z3sCpAC&co0n!ZeoI6cH|ZU{@D``j0mK#!Rt8XDX9OaC}EW5m^Uq0Uh(acjR*Xu%ds zIuC!}3K$il40QqUsh!Fq?%rl(fQPz{%i<=xO8}59gh#sYE)UNwwEDOzh*4v&LPShh zh&cKkBPU&R_3h*ET1Ull?`l=1NXBX<_2U#ZZY7I9jja~N(3&cizMWx7ZasSw^?_HXU zz&2qk$Ab3Bpw;Ylgp3yU#|wq+_Wp67$>wI!xTXSBx6W^ww?X7YQ(9slfW<2P zK}EkF^<^r-heB(QIS$&9T0Gj|ipS(D*_*kG7k=6-1-8W<((byHw{H zcuM7PLJ;hYJa>>}Ro|RM4$Fc!T|XVG>m*)uNov)pgC=?MJYnyx&e|dN5k@!gfy#X+ zGT1Ke-qsTB^*v2B7S5!C7TN)`J@9wUtw@InzQYRc^E}wMmxwg`{c3;x-~*~KHT48G zF)RTyFA>5b+L{mH{!k0oFZ;m=nf{57h=1~-{&6bQ@GuR$h(e?{A8sUs`;ZXsLmWyw zqCOG+a}B`J$XDe~o)DqXRRMP5ZtM?QkJ`vsPg-|-KHyGfmh>1+#9>SOq43yfCZpQ+ zYLS(;vAm&UGMulrSwpCb@F3ZkQ`wgXsXl|; zv@DH&fMkP@CKuCvGf}^agODYF+^hVDPInGT5)mGVS>^YYqQ&CTXwG&-dMyXnEcIwMLH7j#-a2iv!q(nAU?+sK{lI-Pj<18 zq)jU%5O&~alF>>3&k}fvrHECkk0hLRO)|?!w=5^D#*xusPQVvA`XpW zl0&3Y>Wfk;b72yV3`Z@V<0#TIP(AE;6&J0Eez;_f8X=DzAsP?_^83DZB8Ix+g3<>K z;QM-U<-y%{1iSN4NbriPz2*ly@>B@aoVgAUl154b+oPeFW?C#+qHQRan!bu3JkSPV zpq@Q=B_w~fCI^E4qV-BBD>9YFG07l$$6?V_Jj$xEhNs8b*ChA?f}o=!hLM0lbkic9 zN!*$nY;0b40iqQp)5VA%Sp!yCbzvfTzQHO)dcO2|Bz4xJPD}5S6Y?B4WTk7&9gn zQHqD8-Uk^8i7gj2sOy>Ul@pw}xKB69fgnMZ)*U>6yip)R?iy|w%wdN?B_Igkz8m*U z%0hn%SNTV&jZWSI*uSYvLKlNsP|zTI0g4WUTh5sX#(`OK+UEPAqi|^g2%fcxe0JLW(I$Cr}>cZQ8A?j;SLEr&2^kal<^2?A;F@g z2k0?jWbF}d5+4uOYh4GI9n?vO3~E7xIw6sO&PYHeDl(u21VhgDGi?4{$t@k5Db7c0 zc8sQ|lF+CU;&A-LmF^iLoe{LXMu43Rw8NUrCgY?mo^w*n+p%Yp6bvkq!jXlxgvdeb zsoi_-T5OL^BUS)s!&ZQ2V^_?d4WP4;wFL>kadM(H=m51tAWcE?EYJvBAX$GNXoW46 zWP*!3*qB4p#X&AlzGoJ?vUF)2hYa_*l1aQ>Hsc7HqDlv&2qeZ59hiF@LadE zD)5je@J>aq!a?2J5=&DYLD_%wMp!nrDPK0NqaIdNku9|k+sa{)+CH(E^w=%okVXr& zf{6|Xk60wER}0q=Gt~4H4nfHA5toe|PSz_QYRwoU30XV6VkAD)LY`j{9>a&y}8%lp_0uhviX3fvs2RK>aKH@hg(e+~3T;vl$ze*CHBqPbc1)0T0X)c! zQgWeZC7IBLDkAV1s66EYVF#9ABZHQ8Lfk7#ANE5SM%l}vqiBCAR}aZ=S@gc&Pd_iX z-Z)!1lB;@<#F{B{dmwE0k#J+q6VbNXq5Ng3mCF$IO9=aR9{N&Rhi_$fC+L_2T^SdX z!kE=g(POeMuapu3NkrcIbgVOxbwDcCC%t9|?yf&NL18KCoH@a{ELO%)*lzePO2$HB zyoz#R%FCk}R%L(UvlI^{dqo${SE-j4y#q3vI3>WMG%dzWh*pSs=~;q#d07UCS39|M zECRetEUMqYCHwn?@C!HRLVnub_y)bLODF8@y#0+rv=EsrI&&XhBp^$vQi*Xf8E_?| z$=s7>Q2=Ly?k+&X<%i#+qza_VDE}z9kR5Oy&&J_G?V*1tk5jq15Uv=1p?Fj&%63M9 zi}E+GJOnjwcAm8d?fo`XIw%qOCD-oFCnX~gm}?UXdT)Q~_IKX?F4*5i`wPamI_$eA z+##@`xVXCORd%dAj@OUG7yBU~y2B;`PZ8rfMa0+%C;Ck1$cL=F91V*CM6Z;T3k^L8 zCeJ{<){TFjjw8C8($md&1?2^aSyTeVESf1&IDp>}GDtSV4&WF@4;c6h9)-qHM9@Ms z<6=sBQ@N@XL8(Y0#8dlo)jXHxRRVoVC|pTFN7W)pzi+bkRV0|1mX(*8<`EN2<^U&= z43I*y2}DE0M4=&4{3s}iB*|xr<~c{*hulVe2pE6V-$%DIDV(_DMNsW3XnqA*=!KeK z6V<%pruZMMzxDsC3f6aPV>u~e9?kTN{sWb>{*7u_m+D>pU(vD3M0EeDw608;_T9Qx z-=RB|b?QG-D+DH$R_H;=4RF&&P#8Q0WNjVC~3(7SG9Erz;oC zB~T%JV`w~8h!{f^A}=i>p)H$<$tj|Om;-?W2Sd=d-46-IYkWZ8H#Hmj*pyyYNy7qO z$+uG+W*~NxRm|QzkD(Vy?Mx-GbJ|Z)hUkB@Xz{qmEExbA9ir}Va!U_GrI|8tMLFoN zWJPnN2xIO_0I~5NKr$!Rqup|m_;(3~&Ml+B#4IgZUby$*pdd0P<&DKc@ zbiL*=aziuV5LDPuT0yHRKVt?MFRkLSZNPjsMaLuW<7G9O7MmAQdcffpHYu_N1~Gr^ zdOTFnb2I3dfu2t|pa<2WD5j0k!dY#yP}cP4CW>EuN@}QiJjlXH{>luE$y>9GLBLu9 znNT%@WpY$kcP2KCfQ;|WwEeJCCvCDFn`7nWdCkq%Q4`x9VK_3#mVa6j6Ygun$ud6S zEffSJ;9CJy`dBj(w5&A_MZH5##`b^A%1=>25z)d%9$e6fLnhvh%3qmOH^NFdYnu4k ziKr;Ko2d9GVptraq5>p>UUkRQo+RrlMspd?$kXVNKjw{#hw0!1VX#3>pc}qRV&EL$ zguek2B)&lv&JS@e@l{emW-CB0rFCBfL9kMD`LE^9wd#Tl*Cj;BZ6WGXy~lqL_Zg8~ zOBe}L;dq=$9WCfPlFN8=<!~V$+`zLMvoRS?Y5+*kYe`%es_tbxP>c2ak z_}yp!1IKWqclgM^4wFXSQyD!GZ@OE1_3_&J-s}(8F}JndxL(_B);?p5qBYP^rx7h>g#8VBFfM7gwYO*agYBHft5<_% znz`zt*(R~e@~c;$ZaM2IL!Q|4g8rS_4Bky9aC4CeI_+cyiB6Urepp1K*kIZ@-Tv-&3-*ZF8e1+qt!i zf_#RBgX(ax+n$wYrsu+Sto1n;`CtMT-H zwX>aC@(&rRXEk-U1BrNgf1|VAsBh?#PUHHk>#u&?{pXK+Kfd~Lc6#@p*MHm#v6&xV zU57*s$CN@O^Y7KGzn@*3`Em2_XBCT8ELJ&tMg22-KW@I7Ioti``NrPN z?96qn*w;SsSsDX2h#IjCwL~H2nk;ucVj5#uFT;NJ=d_|8fWs+n#N_ z9D0I{O8|Mkv0SpgXUr2;!Z5$L)A;Ythns>s(_hbr>pQbCn$YJSNT5QVV0y*G4BNV` z8g+Z?JJEl@e;k*W$)*Ht2$$4mS>>r3UB#UMj(&#cseFSw<4eY^(^O}A2_9^`MwjG{ zD~-=SaQ?Jl)yU3Kjpu2+w({{xo*csGvzRQK-L}6w{GF;W49+~ETo5&}9IQ4G(S4od zsIg)J)xyTMLavdG9ff@hpI5JtSDy_%V$k>4uA6_QdBn!;0k(u))h>aldpX>l+Y2N8 zc=|b`_`LDl?|0AROczI0gXL*4y zhqHf_u`x3{OWSRHW*hUY*+!nFMi$@LEm=rDl<_~mo-y5zDKT4Weq6&4QVh;%VmEhG zaCw+%Pt_8h46&i29pTtaL+KRy9230R$p5a9Ph&&jjhk5X36shXgIB$B6GwhoV*~3I znmP6N^}na4-jpM$9nF@n{WQrUZc{7>64`&7j+tt9@2FVdx9Q1P^7_kU-|(jF2=3&# zbCmT!mMaZ!3}HO(&?WUULl<&Hd|~s~2RYvOkS%_dp4sE0eUW55-+Uh1*{7W}Gj2S# zP4%+f>89VEdU1Dp*WKu)-RWKT>eYAMYp-9oyT{xb_=DxyS)^6T8!{&HdSq-3+?#*% z&EEQ#U>~7(e!el;w%EIFwUWNUa;O_}bK*h0zJNx{nlo&4rnQ~s`#fwUrn{7i=_#3! z(<1->#^uLRwK^-9_Bj0$EtDZ+i)S;Nd;4Pa2?fX;}s*A)3|n!K2cL{kvKrF z@{QY;T*-{93KAx^$=9-^rq6$|Gs?~!Ag+*nTQh#yf2Tr}%i6?;9LEBPmStb5xWp94 zsg?~-KPQ`=^}4qfd8ZvM@EPYY$q1)2yB7`Y38Vb#b!J|fCO4h#G~O1siIrxVT*h*u z&e-IO!$l+;uGS_wjVv;g*?=pTAMjF^Fk1$eTus`+gLTgH?iu!aS`vTfIGX)p{^4We zg_*wbo9)IBA1T<<1X#@!6XGwYoG z=4w(x!lg;64vi+IMgk6tvA6*-#Ey^eY2T`hA0a1pS^=u)W?Zd}8TGf*-Fn%n;fD z7u9d^(l^?d>D%kvI}GvRg(_U@I!zqbM#FXo&uujfLi57aUPDwe8^`aQWYxrrY@5+{ z92Cn__K8P-ZmSH}rv3;Y{2}RUa1#+2Bgm&An^aE&{(FBLrzTE4lqJ9ZvA)kG4?~%+ z-n#XS-8=GqUu%c^B5wtbXK)4bM^?wW9D}s}pn1JU_Z=pdLq%ThT57>}R&RBKI)`kb zW?wT7MycGvcNRRjxa3}A6#L=+4gwW#H@<|q!3}Nn37GAhduDIJZXE|?NfG0Ioo6Ea zP$1jamp6ZM^=L_T`Q)M`2rFMpi5(Tzj-;I@s=)a=3J?T+CCKzlAlCtDaMpr?H~LOq zXU3^?;{SIHh!y!dcl2hGPrV62JIh(Z1KonS1O@E_v#rns>I`3t-Xgf%{O$MkBdi{) z+_1v4UQdM6Wz>J|0Y<0w3x+eyp28E%BfAz)gj;{(IA}CZ*D#9DJ&$Lm!qF)03hCUm zIaqJ|fn)8qCwY5}o9=liG4Y7h@2^_XZ`|Y@rC2x)ywjGYPP%U-oa*D7*J*GU4$(yq z05Go+^{(I|fJ3sx0W;MHYD2Tih3`!DA1{BVo7H})&3!o+jy;kEYLB*Dw)0^w#`E~$ za8kEG@&}>cYZS&_W@nv1Cepwc@|a_@Osd$k`J~qPVGJb&8{!Se?7Btam^9kFT*$E-u~UtE0$+&2Ro6*tb4pOr(iPh-E4pC zyKzBUQ$o0ZPxOfEG!8*@^f5)*y{fNU<;6l$v#}?im*!V=#B(PFO6k_};D*AVd=vde z8Ufi(g=^x&o+7UlaNdXJks@6=xyCuv3I1BrZFYp!u~ThgakC|l1lW;tS(HzOvETcl z2xy+TxgMK{hAuiP8a3>;6c^g|JFS1~VOQwPCYw?i!v5xH+I_cffxJHWq9 z3$^u2wdum!*T&TpavcQ5Va!8IpL%;#r-@V63%q-|5BB3^2~n?@3(Qv zET;uN*Dn3U3_fP{Co%z7uQs(%?I{~IdW`GmvX)+%cyDr0UT@*0pwZ_pA9-7RyLE8J60Yh)!e(4ValzwGm|P5z0%D6zW9*pf!p zk>j+pCmI~t$BN76sH^+EjQ)QkVV+3;2Ub(`I%FqfUlj@xSz5ngK0m^=@oC@f8Dnl- zF5g4?*TN+z!W#46Ri@^-t1bw(5l}SLP5HJ1KjMb8>zN?T@eb1ob2U<%y^yCce;tJ z?)qw~?L9v>kim-%t#&%FQ)^_PHXm*6re<@?k|LmWq;J*MUbcVjShI_yad>k2<@FXk ziDay%*T5y2MjmgYV<*{!xo!9>Vf&qSbg;|FvdsRlZv8P8Tfj|SI@o7i7Egus6asB% zlW|GRZsXxqqbnDRh}bfmV1j!#Neeq;a-3>YlEudH+HB=-SV*FASf`_L&LCMWYZ~fi zYN@{sE+{k*BHic$G`XY1PgqVkW*4De~@9%)w4mrs0 zvE%7CN};Z_W?(0s7w2tha@oUtyY?KOM{kUX9D=`_$~8Q}MOq%Tl>~JZ9kNwz2GTV6 zsa>W>{RZyZ+V~ur+4R+-LCUD(K#~XaMkthezCO88mIi;NXoj!vx3oU9LwB$|*-ZE8 zeG~PVaq=ryrjaW+%C9jE#lymzSP7k1WD@KB^IoIrNPSNYmNvr`G>=SAGiXI#*n#0i zt!GFJyg;ly+Z^3?;ZY_qV7ep=@+lZF4kanh#APvPF@+e?l!ixd`k<^ zy|taXzx987%5A2|84~0Z?^5=->=$g=`&Ezrsyz5`qnCdz=<->vsCF9j-M4Bl)@OBHymg<8 zMfc)=S8hkTcm^>C7`qj7obj&_waZ=CUw)`7a{zV>pZnqn@uD#s*17CW-)#R6< zus!$2dRIPtu^HOZkBma_V=9+>RW#T2UG&ZkBy*t<{TJ-Ivptue()Z5F1LBNF?^ZtO z2{wNKnMo$c=&+5?>xu}5ZW7vNIah%$WO&)omt8PC7~W zA#$5Fe2h))Pjj2YMs4_Wdvk8RJ&Pcwp>SKeCJppvU`OZRVBM!b)mw`*O)u?`l?bUT z!46^94AU>pO`$y484*JZ0e;;!QX_wxrpe)wY+43Cr85ico+o|0j+#|1g<=u76^kWP zgIrbg2KmXAv%lWo-0kmiPbOE8vFYSbO;1NuTaa-Wf=q8? zh8?lBcMYpWCgag-ektML|Jcb~ZG3D_=J+p{8a^*Qdhy3FFS&+51ep+)2pkC+q z=lTMN!k-rAEj+)hNPT%uv88{d`L!j*msS=Q7X?{fTw5{7^3vSgs-#9&mzS0nmbAny zFb=Zs>h<~}|28ZC>wo@_%DOwH*;>$IHPadeJ>3Tf6TJan-*qbfQ_}FSngV}J`sdWy zl(gFIjX=IxPkdT38Z>SkQXUuY#-gHZ*LILshY9xVorC?Y_V*uWU)6tVYmH43|5^L* zBxYVcALNaN#%SY}OxDH}n%Jn%uY`(U-?k!O9a8+ZFJ4vt+N`}%m3KYtO6~f3?Z55T z{`gX=ZHs%n3AJ^8s?BHbaaahGo5Z}TMGN!UXdf4HOZ&*Ty9R!^+~Gye?`oZR+{Z2K zE*wu+Ug2>1(fOHvcFup<^GBR^s&|kitCNGg;Xp3dS`3&Oo7zGPP_)mE4mrMf0^Fr$C_;c9K8u~~ch8uVgPFYX&pw&YOXzkGigOG`zE6*_P$G@Tj8 zn##tpw7f8n{HOmjfjZk^-+i=sQAN6fX*cRK>kNgKtvlXb)T#}{jFNkcPO!SX|!hBIBwTCQ;#GeeiNAYr%qR?Ok2#(hS`+DCH%eJ`kpRoJ%ZlTI}2rL_Ye%5AbN>Bl<6hNY z13azOHgr!Cvh6NreO!_f}!-2U_1`dlJV7V639 zc-Y{OIH{;B?Q5^!zQJoH*PL6YNCMjjW-wCU+(CqpZ3HE(L*Yz29A2HK54U-W@zPK% z9puzS|E)udswSkWer1;(SuQDh8CC5xKc9c@>Q??EoTJDW7n(37VW8NqLZW-1P*rC$ zR;p0uq*jNOLc33^w~OsQ&A!%>@K-gor#yILt3^7Vg|gHx&FbSw%;HGc!7xx7rm9G= z+KsjptNeJ#){bc@&}AcB8TGYgQ2p{ckfM|IcCg0jNU=W7Nb!Ip*z-ao6)QIKbJ%|@ z^Oa`GBVTJbg!XZP&_152PApBlhgMO-PftPIR-JwRJZtLb>BrT3fhax=kL_he673wf zT<&24z|ENTrTUobAJ=uf+9O3r#lbcoN***Gb7|+EQ|-KOGj;yG22hz{XiY!Ws?L$p z=Ws~r4wO6}D$x`pHCvvxg~Z)0qc4B%bBDG+iCbg+6*548c!V?t3(s=k+T2ik8hdjh zjXmJ09=ed#*RCxG!!n^g7+yqam2v^86|>9f_I0ATK64_u1L(}$xhl1_&XcQ+txITb z?4Hrw*XPwOXl_ny4|r}lPHk^aptf82(;{KC#({maFe&$HwW*&U$3w?cw$*=odr-*| zjd-g4^r?1HjgHqQLFRHEN^3k$m9d<^1bo>Q&Orr&3Ot;2ctP;hy)$e7PFZ2dSh=rq zph3oGpBDH&|6F4H3+K=4nwp*6mg%gZ7uu{%p1^Io_N^~w3-QQ~db+;xM1y+_9bnJS z?u;a__3gF?MUQ^ZAldo&4)A~Pl8>gNA_7u`4ugJ2Cbc>nEbA+JzvJ6Kuv?7=$~PJZ z?teuMII_i^%(Ha=y50EV+?Q)$VV5NGPsOf3eurTk&)~h!8NA0Od6JlYuNwuYRqakX z=W48w9EDY9PB%DP3zRLl*pMA(fx4CDaTw#CFLQ9)==nw6fi)~HW`loxwN#hqwcKWE z{X0^MwITCS?n<*M?pZ)e?XJ$O7+~UpGHx~c1q$1x|0~&(!>p|f7(#Tv^$vK7|94Pzpd}Fjd z{jyr)ctqe>kIb6>w)=mO;{G>#U$)iO)vbA5^%S;F1_KqF#9-rujb&B90XHPr#tvA7 z@-JI5h^PyaVvD9mUDZ3(Bh+i1C#mn7tD9>#5;#6pHf&Hi5E{e;7(Yo*RXsA!K4VmuyfU8vi32_sHzWg)@1zE~{UkX$vMbtsnv9QO z$zozNg`{JgTe&Y_h$wl@6AXO9m+ry5f>|y~a0OuZ^j8zLqeH<7g&)Jj%BD``gOS zsIbj$ub`l5oSc>`9nFz|jUXHGcl~@1j6f_S3ZtDGIy`?0I!-MGv)j;w0vx%J7nRy$ z6Op@7yW0bJ21;86!wLOCIAHO1Si6ffxcT!f@~OAMWO!osd9t3A;67NbH_`@;z2L|* zBqiE&L1h@GLqc#M1JNTy*r&K=(Wn|>84Zq?043ISTfY2$|6u~mH-tQZD=$*)LH`M| z8+*_r(y4!_?8C|aM4#+W{LRY>Yg|`ge;g!d)4b%&7p(X*mu4s;l2A>9{HY*U=6h)M z#sl=20xB($3(c7DL9<;bzgbObwfFErosX~*pGH!)@G)~exPNsef1x{)zYnjjAXR6f zeW3mf@GDyc({vGQKV=#~1`nPmi!;l|9RN%=lr(>tkI4qI0M;4Fs^Ur?%6IWVs^R(u zQxUyD(%1~Ex%2WD^-IzeXmy~4!jQ>$$KBBjm0gPZ0O@~}xmQhpKg&<(q?+=tmav3^ z%w#oKbDW7u(M&%qm?qRXe>Ckz>0bQOYj$8#^B*(B!>A8@lIx@+Ax%0D@4LNur)J(` z6KQ`0%Tv{e9dFdg;_c>eunNEeoLjif(vrpR3uk&?IGgFhLA|xtL5!+(Vb?mb_o9WZ=KqzhX;*U&8pqdA7@HcWVAmlMOMSH|$k zXWP^L*~{4&^PDQ&?BFYcp{KvY!&Pmf9`k>*UYz|scYNym9bM?gT)y{T8Gss-9Wn#8 zR>KEw?26tMkQt0C95of>3uf%dsJBscFw9<4>6~&cIXDHAFIL9$>n;4EIFliHzw(M+ zQ(LDF!mV3PU(`FH%~32J0E_%jMFS{0dG) zw8>+dSQS8eCJLgb@8_DnpWkr3Dp_h=uA(`_8$wC}y#Wi$wK;WHk3mwOEZA!yD?11lifQ1*6?4 z=xjcJOWzejLkQ4zf30ZfGetma4Z(lL$J^E%KEo+Js}?tJquPC{-CuT+9?Fky2XU)^ zf`JeJuB^fmY;Nnm<`!?@vmoZoO&uv%1wt&x9$NzLGWRRJ>M z)7*$xQ#NlM)|X~}-88BR9!8qt@_w<2<)g}YKery$op@CF_-#hjw#JO=+!5G`RL7wW z;f~iyo&%ORT00j}#@Y5vsH4!HX;&-wLUAJ^B&vn9AKz_ZlP!yBKWY!3$_6jLKC%-D zrJXy}UdNNd$D6HDRR(_>lO1KPpno0@USY zWqnHvXXJ@*4>4$wCqE?z+O20?SY&=f!dxLEC+i77Sie0dg2GdPyW&p-C94_-BwN|O z%NnV6XdvCp6g-(SOI@**Z)W^ zNKV<^r2d9$Shyj1S_v1Om$*{BfNZ$v67*ExpI+X4C?*BWPeiHa@X2uCYgK%xyNqJ& zb?_#aC_v!$O0IuUe3QRJF}J!yJ+3{dLr=qtsBQ+6{4LK-Y>W9oRuk@&Zp(jh24z)? zotUv#4)8h>Am>XLfkZF-K6ADe z>83KQc(2#lX5?vV+y4OCzxyGESX>7E13DR@Pa9zR%~PUL6I3#OzaW zc0)#}a616BXOH9;R1HX_KSKsIPib3!R&I^rG_wv(k-Ty>Tld&Z&Q^;Ij9rNRbFgP@R2=&h02(IjTXe1LB3$Aa!ni>3sXj~GTbx$S9WzQg%6yad4MmKSZfx) z)Pf~6KLYV!1aC}e(C_>UzWYlR!7@dN7+KhNjE0?XQ4F5SEXE#jcAY3CNbAOrvR~AF z0(QtWd~TMzE@^peN>?9a1K>d1F;6hZbk%>%ZPvAM(~ZA?F~Qd4e1tK9R!V&$=y%t~ zoF=+2wCnX!XD4@Gn9*Zvp+lsZbo{tDpJ{>3m>ZIQZb)>yOUI3ln+$yik4!VyCFyIX1hj36>nvCBTunkocY+1QBbds_;2yTD$ z3k%%V+}$-L3%hsTA>Rvp>C6F;UM@h<8EH zm64%pTdB=oQZK%77073>XVw$!^I2SKSl{BZk)a80PuVmWo@QqfUvejV#JCrk+B1Z|O%C@bn1>RWB24<;r znjUbm1_wdHL6og`er({8Fb#JnYK3QvOS{shG1~-g$v>?+#|VTL^QR&%gH8BGa;8_pwPP5uJq#Zqq>0@S2S>w^j#uNvYCYDAJ&E-(F-%Yhw+p+pQLGUjdLls3uZ@mq(VZO z)-fG7D59F8PJxLYB08b%`;a?`rSV+=n47h5fmSE@X9>O|h%0H~r8f|N+(!H@_YvrB zH{9?Qj?R9vn(TsuLlax! zLdCYxVy8BiiMt=(p^8LNFG*G7GWldzbLZe-vgIY%M9Q~X} z3>bq}?Jg{T9`FtqPa!8bZe-m)R1-x0XqKYq>QE_ToN&@GgkKP={S`2`+!%mEbz@Yi$4HcW4v1OAc3nWVuZijk29 zs9Rl+wFz*$57mtL8o_*k)Z+B=Bk(ImFE8A+iKiRbmXBa*nk6jFK(jO-i?7Ebn(+3# zA-nc}Q1pFQEub|=nN#A$x`Ew5j3Hd?+Qe^m-|aR(2$pju#TsPi(j8HhIxlu=K)TEs zjnB9|3)q3-VOJ8ydYA3X_&?@)IolnsAfN0Lx~Qhth#}NGK z0eJ$j!%p9X*Dgu-;O{eE7Yw$_^tuVeq2gD6g|neg=U`LR+=EOCpi*VtjB<3CfxDLZ>j+m(CbyU^ zmtzaaae5%%bjA{YG?P*=8Kh7iq>A#7%b5N#^}=F5l-WC&3LZ?mQ=4wTq` z?>Ml=Xl+sen{(J|>8+(Ta{vn*wzB1B+@?xAu#9)5pYePlJRf7nsY&Pxg1urqe;&gU z%ZY}1N;Gt;JJizxo?y&GBbfWz?j&oEqb7j@Yf(wGmscM{d6~6tinF$1Hx$qr0`kF^ zr>5B&`aodk2rS!Peqc*pUM(wI0>!|8glB1mUjfbp5Z7&f1;Zj6F(TBWkv<~K48i75 z*bxCXM59RF;KLZ*On@e@Enx)j{LO#mjAtzf0@H0l zhc7=*Liu=@oU>*|x0Z?|z6So-7!>l?e0YkO?3L41{&S!IJm5cT{O2M6F=4X#N6I;s ze;BesMYQO)Pq4ftriKfwkyHROz@{*;hW#V)HIhod$xieKrVaQhV}N*nIBpC~XdL|D z194*c)u`P(|9C1tOe|;onjrqwxkU5MM)!_Fd-E$F@7tcki1!^LZA=*0H<^UJ({G}eh;3|* zb`L0l1lbZJ)htX|Y#{4@8(M|l&soeERIG94Ac@4n-VE2^E`{7?r^hptK{MWa;|rXl zfcVz?^}!6@#EFLsbIU8VW#-Y@Mr= zit^ohB!ZN8Yfv)ZgO^xgD>}6Y`R2yePv=!X=ldOgZ%tHSF0l3F=QlqeReWqJl!zG5 z3aSo5qiidAy+tN#fEU+yI!@8IxXMG-$Q9hJX*r^~X z=mOX>#X{xUvR_T#M<1{k5lv^;_- zLhkSvyNTgm^6QwspR)gD$ExgWIF-tF88rgiPvmxgs&6HxvMB^*W7NmWBA3vD3rvaV znPXKXdOlPRzK64>UmB)3w@lB>L7;ZfJO$*ZMm|!TXr9h<=WL~tyff~<`pb9nr9D;y ziWN@J;xqG(J}VizJ5N=n27Tt!c+J}8Mo4%w&!&RLKFM}7R&x~&&5L=9*)4>!g_bX} zJ8QOo%Lw?_A_7L;Dz;ySDx*?FK7dB3>|bM$^ACM51$nIu$?Ap<$l*3RQ?40QL|>~L z3gYquxDGN%p_Miq2C*eZ*GShqL- zguL)8W_rBhk&gQX(_^*>>9`SLQND;)E|PJ7Zeng^)IfZH?I;wZPZ^hGGACdEtYBF_eOuqhBRvpoQn7vtwLmp73=fc!xm_!EdBx; z47Tvl9F~~*WQU&^ib4R5?KuZv5M zQAtYr%4#*|5V9357F^HcIen5NPT^6CKHQ8xlH9Xz@xja%lTnb7ia3SRB>~3C)EH(!8c%-F(m0te z`5WJgW$^x*(K2=-vz=yc<#8*c_;XU7wU;M`OS}|)3*Pt^#NVQSnbD=b zY&=3=i^oTgOBvP9=p%0bSrWbpCYmVS2~04bd^x&NdK!w#2Mf$-avz#-+5^B8Ju2x(7<}?V&gx}9pq!XS zqdhTY$C#F*XK1(gy(V z;4F%`8PJc}SK<4fiQ!?>Zy7^`r9McS%lAzj5mmStQLt>N@YoCa1G;vvq@n%`S6-9R zdvnFGi^wZWZ4{Oo&E=I%?+l*R`@OhN$-~OR>)3@CjQbSWO@5<)DbM4{J(J1Pe)Vz` z+;8NaCHC27$D{fvEKp!bg0I68$eE(~W2DG%@zT&5KyqL!L*lYRife>KR;QugCNpUT z_KMo(Ha`F$yrP$Ire7Van z<*Q-u@u;1g$B45oZ8qj`E>~pJ7OKMK3-C8$ zQwUEz*uRe=3?e>hd>eR;5b`D!`%tRIeMPZ#XhCYQ4K07^Z4nEy7`xDd7tSyo5z(Wz zYa}MsO#scI1MEdH)RMp{m*arrqAY)LQI-L@R~K8$32iNZUE`dkW%QiuTWn>SYmNge<{G5lZmYYE zjkY#YEwpJh=Bt5mxvfEJ7=mb}m&1!IaQ}R^68lw3#H?GpCWSK3^0?DYj}ha^5)yVz%_+ z1JSiuKoVzJQq#|DhL2eRR|!rYv@9;vF2q&AT*)ov22=G!Pt}vJbb(eQW^PCG?-g1B zQ$CEgv}n#^yxFfBzYJFF3y&46@kK%~AU#B9md|s48%KD9E$PM@9pgfF!?69fmB0Pw zS_w zuKmX~sNxY-F+zh?`o?Uth~=6^t%efNjwo9w+0!uN(?t&;N5n~yI=H7KQCOhkUP%805CxwfQ!>9 z3%}4QaN}p@IQ#;FL=@@>;d0{wb7jLtNp0`|m4IE=oioIUBO^Z=zwHmsVJ-~$m>IJ@ z`y?<(I}Q#i9rld!R_SYS^*_L-@Xu& z(Qgb9nw4=Q^P{omK{$T<4L8%^?#1p)xMORgX7jJlFF*Ijp(@kGEQ*a*=zJz4wXm?X z)O@rOxur{hyEN0N=A%{G&6Es#eb$snf2_0bgNJJQXWK2&0WOoTI?-=ppMsOgzywny85j>xU6M@;DlD^N*AB5W>>(la2$$1DAc7uM40Uxr%fO< zDU`%9D;SA_{9wT20oT5Dxuz2%vtVCCMCZzuF@CVC9&HonqoQSFX zWhM!Su#q>T2u;8WTjK^m@J$%272zt{8u>PV@be3`Jd5^kHcZE^46!7PYRD{qdI7#H zI8wDi+pPvFw@dx=nOpn%(dAkj(HyX3kaMlgNBe9b&X>6?!n+CzgtutlIVn?n2EA0O zyQlgx__9O~C2psY1}dlU32LAs)JrIB+E$L8%|i#Plk?{A)Bw(7E=8+TRunvioG(v1 zS;4Lk2Zd~2<&JGsNlp{wqT$! zPe%=myv>Avr+01~kKXB%*bV?;nYXnjiZU2@Dh_<4{&;#@AO>(m2K6ed8v*zg2?EGL zLJ*$1d*$&2X5{gXUL|<~B{jlA3Rz-8iZmv(97g1pscxPTIDIsaJY58T%6?)4iIm4X zDK8-M9-YhgMsZ34(>beD#dx;Nf#iWbz<3I$T+ZzlMo}pm1jRbL@EuySzo~bWP1(L3 zGO)0g+*p+9DI~%Eu_Qu1aonDnqaHg^=KP)U4Kk~IdjL*|k7>wu9nC9IjL2v~(Jr#G zi5l+&qxOTK?9a(+9P(^`p*S_44W^8fbI4H%5^5UnE*kBOB=;Y*X?}C&w8T#I&jH(# zV!@MjXa|$`d$lj?AQgw9^Ot!R2YYoGBF)2EW(w(Y{Yw7&cYCSD0}W@?SP>NQpR=Nh zViGv&!aM>d7$LYEbXFnmr;geYT({}Q4LD2K?$RYY0GzUZg~!Xw^8e0la$?EFisgO)d5fsA2qTj zHPc!Hpr#@v2=J_6P5p1DyTO)fQyhLIWEb|wipC-5+y>2QG>b&^H(GaK2iw~PV1XZb zqnV8=sojRFY2%20U0C41(Lv8)k|VyuU>lSqFbmKrR7M^jW;}Vpz|%+~(X_(}T0?df zi3Yb(cN~M?TOKhI4e+8FfX0K>`lG+zm9798RADnm*Qk2||kUBF&-5mSO?AqzqSR# zxmlaIZcMZtHVika0via(V#pYpC{zrDjHwap^j1SWZ!oN)Cr4h7#67|Epp;9@6uiEYjv$Wi;^z@eeA2&v zO^!gS{Cj^D|Dwxyb$>0L>?LRfgqEoOsIi;;_S29*J z%0Jghho7RI>&g0R`UW+Qq?)zk^aZPVVE^8izxVH_ulEv@9X0*6!{U!Voh65R#|fJG zX^*9U9G&kaZ`P4;A9^oVihQW_6=ME1Q8+i+^ayD`?;T$!AAQP$Rpdp=z^CBhe5+<> z?9+L>4PMwj87=k8^TDuvKAtXKzJEVFAB+d@-=|_Y#WM>^0SE3iN>j=_;!1kjFZn@*>RK+LBNi>r@a+}(i8xjVtx?=dr!K6YWj0_X$qUIA1z7GV}2 zw4Jb(Ytm3Yxd7sM^T@dzVj*JB6|3@pqvLcDZVclDcdv;F(h$wic(XJoZ49B!Au6GOf{GKX#Xo=?EoTI)b6S5E9JYE&0HA)shUnO>2ESHedO@ z_6=e~J4XiwCjS`D2bM^a_qgvWFCRnt2e{bq6z$tH;*lF$f2JbjRui13d9BZXR=fy@ zD+@UoW4kx`a1iL8Fsu!@Rh@*GHoj7v69<4t$b92N%4}437xAD@>o_MCww%GtrNwW% zG8VmBkAJnl5tG3y;tyrR948$46zg%E1Ruf-?Gl>B!~Qrx@gtum-j& z%pR`_g{8|85Bg@|r>exazWHFa`Agi0BbOA~)s7R5N%EI&jUn;kraec~p5sVB zxV+~kSY9s67M!`a+UZt*yMDCQW)?3JN8`VAt89&SKaLvydR%L_F*>+$qa7h675v`P zr^tBp4xLv*x;dTpPBHMCA=Us3N4Zwl54|(4X*--*Vt9lBTFj7y99e{d&RhAclN|U9fcjHH| zy_4tD#n)pQNBFL37Sxe`^%C2-J~Z#rUfp&R3(udTJh(zuaozCeAJ9 zcq(^X@{vcO9a#>4U!XX6VNrOG5mY;*j6#mWxVgnu)_B0yuto6L)Xsz`QjQG9_<|LQ zI|d$+FZX9i2iv%uzv?XUX<`kApw0l{lgU}xT!vM!yu>d+X(O6*4+7Vk3M7;TR4AVKK;>rJco# zGf3!+1u~Bj&z5x-zu=8Ms zDQX0NZ{VDnM6g}N&Vp5&G>I}meYw3yLDa*w)gRXv78R*VgW%rmh#S$@}$GsH0btD5@XZ9cWm7cu@T>4-!}NDny8YjqM>K zp-q7ifhu7oRIv<@U(iZsmG((ANWN|`yqI@p;(m|#hoOI1@Ae5Nr~M_8Z3@mEv2N9W ze)!{$#iP5+Kcvh5^=SEDm&^bCxO%m)_-~8#^pDo%O7i3N)t93`TFvE8$9Q^pUA>a0 z3qIXnxvp;faH8mTD(k~5PtM!JE|lww!>#1EEww}Z=H(sSBAw0L*tnl&Y484;idU=O zLS+p>EU91>Zr1$q(@$JHIY0mFL2DlE!>t4L~< zE|(yv=K~}jTk*|hb21*#T@&6)&tMAe9?}2t09OY90r<3E00L_gJm4lFlkNb-F zxJba=9s;8%_&Y!HNw1x!gQQ{}xB1Y*m$>+=1o`(_|I;9CW>bF!q^?bMCL{7WY?CCB zn;jDN+B$(A+N_e2eqS|?5HVtZlo#I60pIq~tr^JqsJ$@0L6olCl929Z359Q?@C@cT zt3>y6f_9f~p_%abQo5-TpV)ZkhaP3n-4MsE!JzzJ%@G1l(5S9 zlwDU44wNsf7ljL}D-5e7rvOZhS6K^kY^<^}OPO>tekYxM;#MVp8yK%iq(D{odL5#1 z#{%?PxHSF}@d3`rtj)-{MXrC(EPHmw^HI#u&IKX-<7uZ0Z1+ADMz)Lt6atPFDvYBu zXz@ho0?h-TaheYL09a2M-+%`qg4}Skm?Va8H5(BX9z8?4ZFqtRmImwI6yDIXf}8C} z&w_v$_n2qnqs^>;5<0j~TwN8vSB?syjL0@{+v0Oa@sTmzGmK7bak20&e%_K=B(Gr0qzUF zKrou077wgoOyt%1$#-v>qkG;$pXIT6oZZPOJ$I-rS>%5aV(N=!L;Z zb`hZxD79amW9)prbu{i?Fk?(FMepIYZeGy_HJ2!)^I!W%3oIdO>+({rF=u|RAQJbN zKR;`rd7aL&LqBkpJsZ2Va;BtyLJDV)tXb_M2>1*UtW7l-i`w0lWY|xp{p4*wxxgwj z1icLti-UH5kZZJS=*u=l8%}D`_EaI9QPx|?r#X^0DdE7Zq?af|({_CmijQ;?cCl!O zW8ea3lr#9s6@V-F5IoS`3;wf7$nCe}`QG-iHlVEY3}3cT7r)E5Ik>mPY1SttPAvR7 zXJ4Ee(+$VO`NojsrLDQXgb^-?x_@yF9CT@jwe0K_+{@ebeX`gmi zLulK7Mi3x3Y4U^XfU$eYY`};^(dX`CN6x~L$kZb@_grV_!m_umwTmdfuo)oZDur>| z)I=W8$b|-zv*^(g1*paW@7N5L#I`oYMq}EMYpW^R2AbyvY1hWakaMRR>l$iwfOwT_ zDRwbuYKb)l^8*hih0(wTKoDZ=@Zo?vP?n*8;DxwT1|^D87ag$f>gB+kC1Nc33@$O) zR`~xJWGy&K!aF1?sbLdxKQXM7dx$x|?I;D7U0+=yYss*Ehcb>-asJGMY{V2{YQP-{ zm#?^1pQg@CqxqC7oM;dOiZL%Z8PUo7ENLrDUd;Rn~MbvEK_oEy2Xu431(L{`dd!zeaah3WQGxez$>}jhg{~S>fht!PToh7X5Jl{La{KtI>)45-s`g9t)}L zkfC?_C)=I#5g-cGiN__QtWh|hl+oSfQ#Vh%pvQKCTy&_xLzGzD+t`Eyl*m&HUk^Osg2$vH?>pEw|(h_p5#)$UX z2<9JlU4mXkr1Sir7CAZRMNV6Py5t9JMhU!Ap_Ep8I6lqo)VYyyT#WkyJce4TPoiZI z2^RLdlk*`Z59V4IZBv`KYq77ptc{dfy@XnG?A2c@^_6rrGK9EsL$lNmBZSb}`Sj-x zb&)yXxI1W*=T(gE-EC|UVE&zfzHt`8DaW&sWFUfpA|^L~l#{~cK7g}-Gk<&Ftwxz1 zlnlB(fZaM>DjQ~y=llZ>hAg+G#xO6<@lz-1Qv=SE-;OD6J>k+6E%}m$sZCB5?${&i zx^nksl(DeDD~Q(S1S1-*bR%$C9K{zE06@pjF;KH6OWi`fTwreYjW*y26ZxB$3AE#H zUZxn~#AV);nmTOyZ{?Y)2I2f4x z#Q}+BcLYF9-+WU1Lb%P9rfec!`EF6!lsl{2God(A<=>%73gjzc;wxCOKIqm?>LXI38S?v}LQih)y< z4=WHXMXM@;NOj(SbZoZWtp-%Djfv-8hUQ*%%otr}j(6L%o3ECVu~!U(+2;OfJ&X$6 z9S_pV(%c%iK!na)*@(S#Ol2+jQW(G8>^HoX1IYo*xbXlY6K#H3&#H?0d@omJr%zEY z+0shU2mOBb0|kANgk^`XtbSLjmfghs#Gu4QZs&>&wmQXsoj%Qd;q7>YV}S!$@rV7~ zXo>h(x8`Loi;4YQFcu$j&9GqZx!xw&@a0*3{Veyj2_v2VxVSIRje0jz0^$0c7ToSX zS^b5TE*tkSBXUMgE9@<)tWi>#kz5y*0%tfh!FaM27fEq%RG>1!D*ohEC;N%D)GEnO zwl2;uOkdG|Tz<)Cjo=%#bt`tI04pFWO4v6mzg*%Ti0_tFS9Yy2xhevh{n&Qen@Z=@ z&M+mXsdVfc(np+376JS19opRWqd0EbFk4C%v-dG|9;Zc8 zXBg9ap)(OJoXoj-r`4fM5pBIA0`xnJD8!4dlRLT)Ex8VY>pPv1l~;?QaZ$UR;bVgO z{y{CUW$AWpU5ZDHY$XMN$3;^BOj02xENnUZ3EjjY2ErvlfLSO@9zljlI3U=7I9n7n z!DQ)wbw0u*9=DBPr&58&9Tp@R>lOb6P3U<&X$?O2vmZgfqqvTY!-YF4x^1Hu8aGdV z6BH_aX2Qdn7OI{0h=%Weljy>Ie}LY7-6ibbN2WQ#J^{*1<( zePd2xxJLno1%^jU@I}d0f*lciB>dWc;;26bbB2gbddf~QN@?ZaDofbeKlQQT04CE) zxJHOW=!tZ2~a` zJyD}5+O_si+UM{>dez`5a3Z7g zkE!f&;OrpmC00EcLm5y4^9bwb{l~+z*}+VJGZD+#SFU6rx|c1OP7mCFkymd&+4tj zHPF2U9~F4C;!R0Yl3npZ!S3|y)UMyb1zc^bH)((FJ{9e&?OS)@&;NU*Ek{~J+y6ln z9UrBNo@ea*GpYJhcAi~-R{~l82asyq4MnIkx-oOEpLpLpkI&||WjF-Z&*)v3eu75< zw`tH7@r;z~HI)`3v!NB{V9;$h!9d!g0L98}o##+vqX&Efzw5ewNt)@J0Hapwjan~V zY|OZ%&ti+MC*8F>(jTh%O+Lw_p$H521d4iyCJIs~n#X17K}C z>6RZehl>T{O&AA6CW;BK#(~KiH%EUiQ*yQp8`vJMXdKjmk^l`JxlVcs5Ql#N3x*C* z=RkCD;xrRLD+bqoNi$tw@{pDd)|J16c|_R4`P0ZKnK)FP?55|(k+=bp3Tzj5`|T4d z!5`9$sfiZ13Yv<4wA(dx{)M#LiKc86CZT!T%P3Zg4-I=)5G;+n$jMQ$EXD+05J@NC z>O=Hcg7zybl)j%6h?Yxg~3`MI#{&=Ew0*V*=MY{fR~n%%MTwwBpM-l zy)x(mKw=V$0is=8L`?k>+0x7dY3rukQ28GEwOr74an%{Gv5fre*0nspX$PHn{~P!6 zZKi&IPy#c5-l3u3&@|^GB%&E8_k25{D`-NoGF#k;#crsKHP#9`@ngBbJ8TBMhTegy*j27YkQ0dMM!5N zLwX)lT8yHgygl*d0BQ$Eq>V6@8#vi;TLB(tqGF7<9u55F*}&%Nhre|UBoI2MuxLEl zsy3$dYcaLg5L}5x@01SkNDu8W=7bt3XeXGam<1s5In4k!k{!l{(nHK7L9OHb?zs8& z0i+9m=C@q(Z6|k9GP%HeC621BsKr1SV5M=Rl8!4m9? zB|eAf?Y<5lSs`DhEsOE2M+LA%$eInr7eJ(c1&2glP%NtrfFW!(z|L4GzwN?*j_{DX z8&x7L=JU!1S6mQqu}g|VNbFou%8dt1sqe`_n@ z{UY5Kh+z7|KLfr|!z!Ke;utI2ooYWI`zTJ1s7UaPGo-L=}o*?mJ>qRp+EkV@^bCl)OC5uORI4T%fF z3}B`5PSQah;X*KVSiT{49tATU%N!j63F{a-b6mhI8Mrq_Zm41j9ZJOOgWb*<^6X~?Iq+b6$#86rYIOwc=6 zApE@TK9>eE*RRcCSu2f72Q5v1*wAcCySbwF+teWz4Op|)DjcsfKwp{ONYJDK?6XEA z!Nkb*W@i^{KilHEvZrQnvvWd6urN*)lTry;jNwGc8L|qGwJDBLDnILdi*5BM%Ys#{ zFBPt)<}+?RzjxZ7osq_+#eWz83OW02+!gmF!x1x(ZML7IW7CrWV{x5-9PZY-wa1$< zL$GrO$Wt7Edp+W>A^3}h<@kB)npdUEKikt$`W_Fa+sU}W&x6f_RcXo*M!ADsm|);t z5R0Q%2f(cN-2Bg5##E;3f^+}G;M{I?IinfruN%apiw4?=w5QK-b?g+WD88-N6=ZheJfp#~v;ia1HRm)E0 zht3HoxRMTPd+Q0_D|kR7ATR^#yJ)PIY!6fJ(AD7aO3j@$^O^K{Y{94pXM#2SgfS1) ztPt!$psdTD;7%t{j(%Bo7LT4FGC+WmL;^MqN%0#v>P9C_&=tRlK$2SvR8Sx0)~H?H z2-C!zdQTvmkzdk(SfGp8D+y*iJT?umJOGXj1b*=iQ~Tl2?1w!I8VIODzcVz#0V3_s z%tmHAldAQUNJS>66<3@2Fs`!p!3Ocq6e5>9uzXSmuc8po0U-MM{@!ndhi*EO0(qfA zCc_ZH?hA7}`yM#+RUp=3W7(T7k_Hjs2EcWctr{P4jsgOI?MOPf>UcQ9X(&~&0nhgL za-GhfiamQOHq<#HMdULC;t7Z2jD8&fNYk8;(yK+&EzIQAmHCOlwgvs0<~tV-&>PT< z8bZ_+%FhsyAwpOWnPYU4>%N?`goAb&r2C#hx=!Bha%;Lg8{sYp#Hqn(G=-%S4wbMm z38=08;;$`#pkn}&9ar+ZccFu^r&Avd#k4} zw^(7#Bs>&ak?Q54T0h)fd}pj0t3aoV%0gX;!XibeT;Uq~U4{`U)aL*D|NP$-sPUa# z`7uPj*01sF3_r?kOu%YW7ovOt#@jkDePt8I^V}hSz2yUT$i*%Khr)=#8p4ce2&0@S zP=GRN{MHsrlnT0>j%Q#aX(5XoS6AO{iCDcTk07w&0NfVVIh(`2tvGWYn7NayEzE|9 zC>(d>K3%0aswA65;>3giE6fYKVG%SsUmoEzq|#oOA?Wwx!DFAuYff84ECJ^wf(?cDQpK7F&D)_f|S~sC2VS zOVdZSLv(0IDGzr_^-ZEMlq&ZfQE}lq@_`NGVhb?04q1CYyu(V~FA^*4gYe_}Zf+ko z4kls4rNyaT3DcZO7^zOa0g>Q}Aw-edCiYc-^AR*knxAMQVL{8@cI<;GDzYAnbu;_b z`hI$}Py1XmB^R~cQ7?9!yI5GbC{O|+O{*6jmF?Fqg77o^R7LJDEbM1&efFKL53#(} zM>A(O$!5f52Et5n%(Lf;;9WSNU5?EI!k5RHEbHj9 z#H*uQt#^4erG5UJ`O<;gy9647`y}8b>)V zq(WTC5Tr4b8a1=4>W1Y^?2O>}M59qRK^sT(X6dqC#tp=RVfC&;$2JnuDmWg0N`;{s z9q>iMdpaIcX?wJTP6B025uEYg^fqgzU$2iS44iSfB zVd3ymF1No`?$^S*qeHoWFznO@i~H<2Xd>D1-LSpb**J9bxx=vIVIlD`JzQ#fnc`GC zg<)vIg30gGZu+@$T!BX)x0CL1Y7dI~)_RJtzRI}{8)mdZ5d5Nft5xD6x?)X0c#Kf- zDI-)+*G#n@SV*oxMAQ($aHaH(INcX=+{uc}taBRGi2qqNM^;=y;lq3ssiCq!6 z!CsM-_O_LNh71N=h%~2dGwb98`QXeD4xHwwiD@QPI=__8_Yq`*VyPgU~4Sz(<|qFHX0(&973ql5P|1V zCJMJiH5a%>He7{6TOo`D27ZifFy--~Ie)?nMCrha_L~AOP%Mgh2cD%I;=cF>cag~n z!q*ih2gxu*FdVUHV{u#erN6Bmk^(NSYrSN*hAZTSzh;Dg<&Va*rBM0rdfD^=tqRS9 zYTl&(CqiBJpPC+BU7c=MZ8tLEsojJa`<=oj%IHPLdQ)|~nLZk3cPwmA-&5=|JetCp zQ3h^DMgS%mNFxH3$1oL#Vd9a-8_o`9U?=8b#0tYWM!eZ|Ke3I^{L0Veebpn#z`SKkt75Sst5o0tjAOqcIy5 zP#P}{(%M`aBZ-<8k~B#~Fj>F-yl>UG)R>tGwB74}Jo|ahUaMthjydj^8Z~NEz4h3& zPedge6onp!9WaMoG5}XVsK1C&cPI23f*e24w#eXc^kgSWqA)~5`CM0 z!!#yq(hQkf$N*qk|sS6fV;IS~92!SxPSbNN{K8Ct}R&T-6*a9-3wpDRdNz=MAvPmi3P0Z6bWz5VS;PL27{dDHl^YXJz!{+l? zM~?-Ye|{%^_5A*`P57>2leCvJw*(<$vUvcn5g@;=v|% zgbVh@z+``MP#(;xFi~9D>G<#B82gqAf2>{nU=Njf7eT*ykYMwnEIv-{ExFaKK}LWV zNMqKkP|9uL3Zu3QDhXT{zQaKj-)XLeZ1pPhgjbI0S_qDD(0iCvxFGa1?g3^T%p~%n zcj-;vdwv_0Up*HZb-q?5U2+1Or_ZZ@4(86+tRU{Kqiz1Q9QzJa8*|1;RD(54e;`#L zJ7!f;-sw1d>W8>7!LKY~!`*Ujdfa%r0?DUri}FPvg19bI*`WCbE0EJV7O#FV&h~}tw1s(3=P)F5lr26Q9YSTxmqD(LS9OA=@@dx&#|(= zoEJ*C9PfxrLhd|Sp#Xx6T;O;Qf3jI;smB_sVhGOtzSqT_3#Rk#18-1$zc@G8+oiZg z2gAL%-Jh<58PLKXG_Z;(U3;^Jb7-f?-T>LJhpNGd%E)*j^)qt@oa&W3GcApBQjxr(vNC9h6vx8%!h&`~pQZ8a;;k688|LTXR%xso znx-4Tp2N$0Ix3j;e{^RO`7n$|nu8#f-})>HJbaqNvFY|` zA)7OHj!yhaqBI-N!rrWo4F{J%{VSl7EZ`gw4eqkD!8Zt9C|NTN)D9MjqQZGpBC=IH zStoKX0o}4Qe?b;~x#7F3{vcY{=&*pLdKUf|P+xUzJ1Rn$AWP4eaUq+RhmRZ!^OoHgbA|`ETa(a1Na+$WVuKkA8N45{Kk@_ zR&lIK%Ucx2s0$)41*vG%H^{IZuRlT85R%1DIlKqi7Q}RYNac$8@Nppq?rxuH3hue>zu0EH7-7Gs}R~i&9-?JE?PEQynCiGd@kJpH4FME>BxkhU^TTT zcuaBEWiwD{Mdpo}#hC!L4PmiOvT)eB#R zE^4Rw_6D>x>i{Wrml^d?3~~Kast-smEG<|LV|UTrVZVEcu3IB4pAp5%+SH z<$?Y(1-P#J0Qi5JYuk$`OVHf6uorv)5u~G5(EJvB%wHv`Kh?RkUa@eLMoYG?r>#ul z&_2OrliP?8R00cJ(m>#Qe~{IP--!6(sto&ROQ?fXt~ClqTk+B^l5lg{!(rSEsrwqn zGzJQWwm6lS-4Gj9(1^qB7lU*pRL#NaaXybApX4#an!=z$o|U(#L+`!s-;i!LkW6 z33rQK^uA7I@>rPdf2V#)N#H9*O}9byK74?KV=t+h?h~%D^CWqYFLcn97JfS33Pt| zD^vLPr_bj=seCm<#px4oBoPEhBtx#vlFnZ7Q7loLyY_ree{53R_O4yzh9r~$i4hbg z8CI-pgN^j+)-eXwjN3|VB2OtQ{VNs()l4lV4Ib%e>V3XVZU|fuP|1szk7cI9^a0sx zQh48nF>^XB*<2K=O4Vio?Go&;e2@rj`P=(;_qze4IqDBWT{HOWnv5cAcFM_&h+0N= zD)Y)AJKLT6f7(|sx1YU!_UkH+Icv{eJbV3{#2m@==H6h^h5N{6^)YT*C_^zz0n+@q z6g^cq#AwQA92|+?;PxD7u3!%U(K*7#q!ZFk^<)F|WtSx{P2wg;PQfs0AB*M}zKiH7 z26hHMS>S4#M*SRikChDf7EzeR8^qjYLr)bkFFQ^u$bM& zzpkH!)Q5B(d0qvA6LN}jpL3+T|6z@Ke_glRu2;J55LI0-BeC2q$LcWz3aC;{t&Y4a zy&A8mVHD@u24&2V@9b@;;)F9K`GcSwFtM|l(REh z4T+`=c(BmMED4!HdQvRwSdr<)9!Aw ze|A8#oNRR&$yXPlS=BGUSDmm^p)4@CubS%aVv>+Q-`kQwhMi_+dcRTV9uI)m(IXE! z5QUHEp+@cKX5J6_V49r>7xG;dSORht*_CI`KDCZM(XkHb#CxPV#{mHAuwVb& zthh)p#5?b{xQ(9U%nfO*9^GC2-XYJ%e@qYI{T61E`{=6*zdkIVLo@@H6}81@n4wVR z16X^S1!4Dm0y&@gm#Q*gF~CH$_x;FA7x0Nw3h|mtd4uWrl#>VE#Kd3}QmgAeDZUQv z^Z~8U5bTRmYS)g5qsnOq6*RB50_Ca5vF;k6S>bVZ50s}tle*PicRPwNU4-Exf61wm z?hQB@Pt08Q22{KTB1X(PBdkLy&e52+IE{1*&OsDiR`F=Zsnfx1!O4!>VbX=5dH-Dg zp1vj}fc}LN#@CkjWrbA`^5c4)e^Elo<-=_j=fC~-V+&*j_|@8Q_$0k7lGlGwdho{@r0M;Bw^vzQ{D^rtc+Xva z@f6^Gr=S1m;Rmiw=J`TbVT(XKe7^|Sn(am0sK7tVap&X02>?GoZt@Ji4ZHnu-z@^p z{&KCl`1_ocR2#NVkMLJLe{>GNo^-p#Ym2S&<2Asg&@mZs8G8L!ZkrM7cT}hI1sDl~ zj~~zH36MD7YKKbA>#~Hhvrj!%h40Sgg?NV0#NumWUV=G6^;fgO{_o9WvzaFvy$J)y z0w+Qevg%AQ(#)F;PX5ruzmb2l;Y#SmB4X@+pp+OmUJx|e>)qz}e|EEz1-fkI+^X69 z%jM%DI4c%0vK&jzVKPZ|Zu*pLrX-v1cc$k?;LO($j$n=0mM|E10}KvPJr`ulb73q- z*H4JVF4Q1DtwTm|WV9_nQLj2KH{Wo4Q3xrRcA5APqXDW17n!!`5X!>2jmRCPE7`n< zY&7R24-JwGyn&G>fA?>h#A6lCsckrmJtKDM&MCk@;Zc!=dp=# z_-k1J73zkV+}T}TaaOjRO#(weKz#5yM|+|`~q|&s{4x415VVc))7-@r2ZLF|xCrLbGB$D%Vn@uZY7yL~0{aBw z4lU1J5Nf9N2cF;70J+P8Dz6s&PuP<9sRPSuSR>-n{8=0Q*lKL|&(*x-kKR|Lz+e9A z=}#CGqe4Fua}zQ6o1{vDHa!BU*;_ zx}r<9g-g#$4Bp#1$OT(+7AfT|9&=)$!TeJW89OpTc6c+t`kbeVbat#hFYjPxq=DPk zN^>XZ6FsTg4|_WYzEhO1MP*&Up4@&*Cb+NsVq-PoZHmkbunRJOyg2;(~@e{{=R z7P?_0Lnl3|0oEb$)lEPR0VI+k;9m$JkqiM)5DdZ)IVBE<)P!FIx%tf6ryw@A1qLG=xy+h`>%zR_~|4sfkN9 z6<|I2UT)EUt7?!&INj*{BC9`l<(he2!q}PI4V>q*z2W};x6xd2Z~x%;Yo#A+3kURp zw!eSvdZ8F&&1>M-<>bV(BhJL4WWGhvm^lmu%&!)dCea@-O;A^@}uGd#5I<@;Ym00N1rQHd^!Ps zHn7c*`;+Q9FABFnAkE}|l5EpYs!yT@Z4L>4GIE_+EF&SBf-#7#*eESk=GHOyL6bl| zCs+-;#<@ki4myU-FqL#Y0g7EfJ9tv8JPG+dA)~QT+A^LM(4(l%Y@U0+&aW(S_jSIFVvQGHG}IY~@$x(gpa!ESd9%mptl=0IOQ; z7mrt5rj=iSPLu|%Qhrf6UeAUAs}MhE>|i1ccJ{giH6hXYiWOfeqn$A>;O2HALQ#^3~99izN#7< zmHv7e;>Kqtq=`1!Y@sC=vVHSUWLPm7c6%|y{r$x&WCcNS=#gJ!&Z)8(N7>T^oi5@* zvLd#5I42R(GjWY*g>c8~e<35a3D^9nJ1>JF>M_tQH)wP=`J-45pB$6os;O7%K+Xp`uA;2v6 zHx9~shWZrdY7eTvgGuV)U-#RqkvxD6D_QKHm5|AQ<2Pq!X$}SLFSwUtsR-KfaX_|r z1Gwwolp_MFr=c>@eKTQMBS7JXM=K*rp&Bm3`jDH#>OIv%Ui|%R5nfoyMXA{9Icp&B7{jbS{IP{)f+1Qwe{_{iKADs&f7+AWShr-8*BbXDxT0Yd!4UFCLeS zSs6?md6e`4fAX3wr3}+7Hz=qpoL@v%E917E=S}d%7J|neLFqQcfgZRJHM@K|z;qi9 zUlI3|OOJ4mJc`1GZ}jEfLbuucTz$V`bW%3rPU_f$j~`Av=&$i;RNpA)eh^$}=Qrgj ziSwAie2uXn7k9AX`Gx%QPOng2XS}>mlgc~Uuna`ce>6Lff!Is8-#x>SIhG6BD26^7 zz(AOQk2Pj@BgZw?mwUEGb(!ts8+Z;rAY{ZI@sC@%)Mx6Re#F+QU4uB|fNEzv!zQaN zn7q*1fwsVx?5_$&Mmtwl0(y;@>7jDf7tr&`-DwF_XzV?LMxRAgHBzwb<=6PuFl`f(|ov0$E3nno3c8T=1d79I)ud?P)jLOhHMNa=`8JSHz3YV`;xKlg8x6E^wR4Q)E()WA!a_Lo^7Q_>Nht+DM=Q_6?vsYE zC%eR(OLmEWr@MsNBr=D!^NnPOaLyBdPBwt&6Uduu$YkoiK7~B|d?oS#t~^Zgq(2=0 zLpUOxn*aWHod0$5Jz-+Ka&4#EoBT;%fA^FA-*lRQ)HYK%_d%F3Q8&vS@gquvG&abJ zovK=9ep??!xXgavZ0i@=l z>=v}y49bGPt3!edpJ3BRK8A{$U%3Oq9|2S;gb*8Y^MMI4kk-GVIry-*soqdreoEY&~QESwi`+RUExlsn~XLR7V*=I_vOp zA$F!7>B(G*Mg<(LCRJ_PUbAoa!2r%?iR$!uQ}pQ^tc33thC1HaMt(bzf;@gMI*-=5 zNxYB+6S(_N?CG04a{_^_dhcEhf482Lq#y|QI28si-MCwmKz&=fg=Xt#=Z0mhCsiuE zRCx{qCm8>Jom(n5P~iN9-`6+8aQWW8D8X?G?;)_5(2J!_Z2WoSBRFzCpVD^8=TzRt zknHlj#7A|TQ0w~a^0_stdpYe=nYRFN?qS~Ha789#^k5U`Wx$(?y~iQOf6U|%-#2|u z)uz1e2Jju~zzahs;YXKmxMTkGS-4Y`ew(|H^dfMVk)zxl15ra(GojaZAS2f?-1$GS z(n-H9E|(T=7g=l)IFCSJd1T`<;ufv})9itk3;L<`+@=kz8nswM{S- zsSln592b)YkDT|QG=(mL&EUZ~Ub4;Luw6z-C4LN|^K}gZs;b3;P3W`(z6awFE;5bM zp(s{ny>2D`wNjJkNDU9|k-Gk)a6RhWcTy!f_OYjCyv% zstWOZ4HowxeDy-A_JZ;-Mc3c1a_{!{0VN!-UNX{~cxmswwZ{v08mc<*Cb|Co%Xj*{ z!TWG^Ze&uptVa73XN$t;38Za?~oBg^jT z;jvycVm^Tzg09{5>x-0IQ)=xx&&A-BTl{@*zdv*EBZSd=5@mR;IK<2ELw&_nKRGDW)qm_ z-3*#vf48-aiFN&$Ls(FM!M%1(6XiRw*W6UzLF;7wwEm^Ja&$yxoJ!#}IR2(I zNJax45$^T_fnvRRjg2=bK;V`?r8m9ub|fjg^oQq8Y&_SFodX)+$=%)=7$MAR!bJjh z>Lv)(AT4=~-GXvuhM&NpfzU!ggUZt8d>F4XOaXkte@#TV_N_j82gkJ~pW2hrD+<-w}v1l29I}(M-ib()oA{{D9~VAh#aR5n-$zvZv-$i@A+e z3l!xl)$+<{Ay@92Wm@3Gpk9fAwrF6L4KQ>kacXU6c7yDxJPJCYx{|kEWJ?Z&VrZ|4Kw5GNcIiY+NgbfUL zI$^_sNguR0TTpK<{22F~AN`&KsPoI$kk4bvGV)Q9mGq-qNQe0g>BDa1y$jd>^_6IX ze*rIL=}yK=P^oa=BJja2oz63%U!%K`Fg{w4!Q?)PHy<{KXNwf zz%uF&$OP_yoVzkGn)omG1dWDCe{!wD%)yO-u#;6Y*Z$Yt`_Wb!D7sQy(pQnf4pJ_k8FjfJF5R{uM=M(B_j zrjh3xVR!!j;U1ww^FNk*e>C}1(LwlG?8(|65G-<6!{OPdIHQ%X^gCh>RcZ1PibNPpj7dyxjq7vGP!R&c$l|y6i3(G)ZH;t_uAjtV{-`*-4|dcNWZ3h z;26a)yGhVE(M)zdo6kKgFm0Or0k&38sV1>s6S9$(2bTNB`bc=$H->I28>YwR~Ts14Q z&#T0%vkj?!|KH~tQl??r9R6SK;R@Wt>I7xS`UGkIe|uD=No4IjQDn*gz#;Z>nIsnb z$uxna7Bwc0d{=TZ$@zSlonV!1S%6{{$>C@Wgq^CtFd`Zer=w(kYMxV z2b(89zN2|?$CBGTsmCR4WJT!rY6>RYJJ(q=bFM3kVgu=dwGjEndK4Y3a-&6UWUzfTs1USO-6Jf!RKWNQAHU6%kq>C2xGPIaK0h%CCc@;k$a97Hm zxbuHWn9hu*Rh-|)hjf0rDE-wm-4L3tI;{}xmB&gu95qE7wU=rbG-4-^K?)=_eywwc zFA#D-y@z3`AfD1!@hq*GZ$g{|m@kc=4fFsEe<4@nzcB(SB%GwNJ%^Tn^!soqKd_MC zKbFQL(V;!)TzsKL7AvMY@a`EE;F*W?&BQp%Am;ZKZr^9p(08qZ6(dgx?Hcvom55SL;vqN0s2g)#yu?5O!@vVXJ-abRKOCEgWD|NBT647ahm+U9^_yl! ze-r`x8$*+MjVA`U^}!B!BquFEeXy?H)7@wU_mP+e0K~~m=4w4}V|Ths5$r!ZA>o!P zn-R1KQbyN!hj5_1Zefp+46Hn%4)j!!3^v>6f7i75+zXmQTm`38#tIlKE{M**+|LRwLcsm_ zwn5bjctfHa((6A5SZ?IQ*4>46MY=gJHmTpvc8#7yx;~pW64lGsvxd z50IvH>`AmqXOS{&S#I(Hz z2vGHpK)s@C6T`=wD-bza=P=%JjSA-~T7{bZ$_7DE_S_{A?K9NUAYM6^gH4S2Y3!^5 z3_(T=iGVha7)?rK!V3pH0vMzxe~@zFr15p5_mv(vJfNV2Nx)Zv;`YS!joNV?F~d`X zSb02wa|-m~7nplkc+@cyU1LfgAlFGN8+aMP<;6mcqUlWee}8YIFT0I4 zE~o!P_tDoT-$$GIXsr8pvWo^OoXA6AwCz-HBK9PvehcIdLK3_Whc{)y<9_)svGF9B zn!{v{K9-Wq`kQhVIYHm#?#{=ql~K!OPIxxESu*!+0=4&ot(_;~Xf|Y&k@d+*5@pUj z>3gq(?K8N;4jSc^&5g3qf8NTIO@QV>QRL%BIcl&~wl>P15?7wygZ0lY`$ zk4=4fiiRPIf8FYwRq8w5SdX2=9iejMl2&>)kM??!hks=r?R9Hw*UHS>LovI6!>p|F zour4@nxDZV!7E_gGt8PIztbi+TBDrW|-h&{%HY#%ov^qOL6E>%(w&WYKy_N{G>7!UNLiOK%{ z9B_1o2Y@;$cL0VMfS|5yZmm$A&R64DvpTZo6MI%z!qoZ}x#={W7h*h!9D#G_n*K_S zZ|FKk17Eee%@xiL`gcfd3At-36j0_cl zH<9u>@bWLh2*Ujc!WdFMSG%diJwb@>E!}o~?Ct7cPE0CYM%#f_1Af&i>{w*IvFzS@ zmMHcbvkt+Ae>LAi=s5RtF4{>KS)1@)6g+Q(C)mWlErVLXii9qJxhrx}>a?y@D%y@! z+92LCT`Q3No0$o`SBrI^P4f*Sx~yN`(Tdeu1w!P$Rb5Y7?(I)##p`e*x54bkoi;qm_|I{F zKH0Hb+`yC)$j)~x*T4&WO0s{M8tqLZ(aQ#g%T*j(Zjee%YP(@dPZF|xSz%-~vw{dG zGYhg4e_s!1NOn5M%{t5lqRse~_M6~Eh)0hgY!jDh)JI6J>5Z?=bb3?4qg?m%xA&lx zl?QgJE$B}$*G|aNhyuPrV&_G8A{M5KhKH<95FiOdlz>MHoI|X;6Q98C)&j2Uh5uwT z;%{#W9)eWDw9pa13klTkSR4Sp4NeFuJ0Uf`f5c*)T+1OP1Qoplf zTm2_4e>W|*)y2k#x@QvtY6_%Elwap=W$oMLlGqFUjrv%f4HK*2IsbOqxcC%y72 zFC;R4Qo4$ACumQ+fTVp8Pn3bU{7wV9A4~HG!~I4HpmE@w#F}pA8?3ZO#YHH(;65?& zf7BKBjFS?4jzw6NNGA3O9a$zIRGG*cW%BK15-S5xwJR$z5HB27DV71=1`!#{L{%yi zG@~qxRGBmg7-deh4eZx`=X0}-Q(oom4hdHuXFlh&`^>Y%fn?wyH`t{T& z!qKRem*R~{F<>z`;Tx9cg(2Z`&gy_8;tcwDqk8MEQjL!{9xN^2ykG2e_CCUe(XXfF zk2kQK5CgvG@7D0VbGssM_XtFQU-y23IJL8P3wIX3p31M=;nyAi>s|=^36~&yf5^6T zcEH!A8@O*V?{|OF*CY3N>0W$~I4WbuZSfx_HoWEFE)jGvGJ9|d=-u1B#S5?5&s5yj zCM9sahVY}?pf}Ag;?1>&n|ElE97n-#f+Gq}Y|}qYXUM z>>_%xvP%>6Tbb|%-50{6mEj*_e@0Ak>yBK;fr!kV@00;{Tx8AV%WatKbGYz))siB6 zRNK$y4+*pdvoiT7FwFoBW~o z+IH4CVwAzwArZa%)Aj3oVn$)`Pp>ri_3s;e;LFNa9gpq~eo_txpKWt3e+WgwOYe*> z3B~y*rTgaeZt6ave|#B}I}cK4gXuG6#VCwsG!)5SH*=gsKLPcCPpn!nsExc%g#KUB zi{I0M#O)FSVWMi+8)1IMk`;Z)xCN+4IxnVb?EeEV*^ zQN>Pfan|X2mk!rn z2&MaSEDs<#@&P1A&;TK9lc(j0%t>2Kog2033);-dw_#2?9|F@?u_zVX1vm$v zSj8CF_Jf->Kw_56pwVk5R3n5WA_AU>l6(e!0iXG+jR^%>L>7N$e-ZtbR=7NAuYuP; zFH(?tK%Ywpa3In3Gk8Aj6cMq)FFINnrhyIBLz+ZVC!%ve~Uh6z|EUF$~SMh=Uw*%>DC19334P)7mr=d=WQ4Nwu^rUcvbRr z`QGzS?1?g)mgDw`v4P(J5@e}L7@N9|O}$v-tl++oG-UF0lGcs2+;r%#VOn|4z{tAc zQpg8)T+hf8EtV%*$R~S5p6C&Iy6@}}KG`Gkbn)2}e6k1le`F7sCP8fjE>NU^yP0q- zA4!-jD}-Y#C1Z3opP}D6+HW29n-J=b>8n62H$7uc=dAlChYRY1^{lGLmY5q!fDWVJN}qbPDQ^h0v@@( z;V<@$JTcgOe{vAziN2C22A59^s&rlgdTO$rMSBwjQ~PaBN&vY9pGvj;*hIeDirsR> zk<*kXO7+!3`x0@!=$ooI--F#jAP(L`4j=vbdE@2(7)U^LDnf;_C;$;vVj4eStIdRp z-!w|jX4?ygaDh=L6o>l9jxJd734p$SS8}@OZ|cW5e|m8@CH zM2`1-5Ck}=F(V$@m?VgN~j;F2_PVbpG8XlOyaYh zGR;PPlMtU)pM(KHH^QQ}@!mxwp!DBoea$cu(Pyd&Qv|Z6VePw8u*cW*i>dk>r2Je z#%dQN3}?v5>t}jJK(U4RT%w|%FHqHnx{PB#ESbYuRSA9KHBy2HS2SmMJz4>+2 z1KGqnN{(0U4edI}^L$xHp9byEz66p+j5=*t;Gc7Pu_CUYI1IeeX1@d2RVR#e(=yy@ zxM>Ba7JUu8sgGOc8$58x8|Hs}(NDePe+~DUF=Fx}@l5kkbkZ3iBI|JweWE$wPpF#t zxTw9zi?JdN;#e+mCidHR5n#cm^OZ>GS#r3{B_UQ^a7F4Or}-;T7bDI}PT@lGvNFK@ zm36gZP2~LC+zd4l?8FtIVLx*Nt{-H589@MuU+>|)dtmiN@CUn9ZWK{2Afd`ve?!7% zq!C`)aqlL`p=&}?fq<+#p@AWuKM=;vB5xalCps%2`iF;gLL_b{PJZHQ=>#MYFpyD$ zGmW$`>5=8yNyVs|TXBf*PL1a~JuAIIt~~CiB9QHWczqH55kf1kV;oml11{U9tuPBw zzi=3I3sm4RExL61gc+TQl{1vtbY zJcKVjGp*1p8nEl~m6mb&cJ#L{UkTOayKa^*9QPunCYio`-{vjf3GE42hPiy?TU-nN zzZm(;qY@#prHhtpyA^|fxSO3%=wFTJN5QUVWWroAe|#J;tNW4W zf(x@aE2ARj^zO)sXUHgTDO!rP45`9<9PXixlHV_HKH(tRqxvZAq0@oUS*qdbpM=|j;XiCzw-ay-_vfi|@vMyCWOufpHrnM$CO^s;^<=aL*U&JcG;g>(v zWloUPxA3w#+y~k7!L))He{~sbWjgzroY==v1L79MQzBnZL^)4_5@WMECrW_D1!`;^ zHwBP2o)q+Ooq9}&r|6zPM6W=ttrgsiq4J~)0^Pv*u2;L?ApC)vF!b@L#tkK62mwTw z6rjl9i@jR7PBEzE=r<(7{dywBg`8Td-Y^zt4TA_zYPHu$4^!+ee*~G%P1>~G{Bs~i zEN}Xo5qyw1yUnFE$=t=6z7Ajsn1Y zO_;>gAR7~M0^9Wb3W15Y^_~zZ*r~fSOlDq8IItv@ns96}t;XSHH@5()MPFLTc|}nkF}IKusmByX--c4 zONW0Fu6E#xI&YWbMH7ey`rG=3_I05lQqT)r?H$#-&}b(sS^-2)o`0C})}Z=fpD`<(x;)GS zz}t@?Qw6HJXrH~@Q4gW|Dk5dJ4P=K}iY^Uo(N6o(t^lWksGH-8X@mWi$UuWD(>Of+aeqQkTg0V`Kut&`>Al*>KK8_%$@K_jhzht;XpNm=e{C*F>iMwyeGZ|&Av8d4=4$5yj6NOf z<#<159~5}Ljc{{9rf(;`I@y?Zb#g1QukJd5{+M&Du9?8bO|Z{$u+ zHI_+7#q5Rp?Qr+P10%#AA?Ot#bXq_ieZzY_=}YZa&Gl2iD#gIGZ9H#vW&65cMJ(UV zf3(6v{w9R|U3*e#_(mdrwF)QsK}vF5zc|McK<+l6BHL~rK%942ti0~yua)|tN3zn9 zSf)ASMk4_r=-Vv#=Ce4I%?OtzB2l6jJ!ngxB+>b1qdT1wRP71>)_T~$pzi_g3NSY) z`E=0jJ7J9MO{N}3i7JeOJz5@Gl^he}f1KwO6xcmxfpXkBDHqPnp^;xMgpFlM>ei#C+CY_CGREudBcm&r=?UKJ^9gCj1*3;vk&*i+ zroh``Whd#;x26iF`4{8!lQlA~iZWHF07fV9cvZ(Xl_}L7kGrY;aO<`!U{mHaYuZ>vI|u%J?Tc2Y%DSI-F44 zRyybSnls=V;{(nvHCx~~*{yN@j*SFwKw3DD^lI$JN!1G!HGXqD05^>65b!OS>?U8H zOx#ekghxqizGWNnkC4_!?W)ftLIcFI8WHe+Iq@5%IqSVHjBdaB<9oOO^77=&=5trM5Rz# z&6*HzCN(6VAfH*ED*mC#eEwnxiZ?le zr+3mj*oJW0bI4vjOb$2TJdZ=Z2wBo@b?saRA9sLd*O9zDjC~27e}H*F?w%bYyYy)^ zNA2Gw@NZWyhXui@Zgr1oaNd;i#>$CPmU9v~bl)*mkgn`2hV3c8UQJf1!f;pr@U`y0 za{d0NhD--1{tL)NhCW=sPNI?4 zjH4-`iU2=Az`rU{#lhcaAb@xpF(gGa1&}hv_Ns<`0=5TsUw1ViK8-NxfWItzl0{&(~5+}tRJ#K zz?r+6tXK8D{1!iM9?bI%lU>_FI0Gmmz@pGK?IvR;5_PsM0HdRcN_nm}i&|j{wZ`WU z2$#2SFh1w8R>LxgB7Y-6`Ar5Ca}0G51+(!w&|<{N;EbjgDg&{_)#6k2DolUr65I|( z4pvNdqqR5%bo8g0vJDVMW0qlAqD+teF~d2{>pV@m3dLeCMCyH}J+K(Q8)4GV7DeYv z))VKva?&SyC>eFyJt3jIB z@u%k#^I^J9X?|4O`#n>0aHtKdjx+@*;YtOLC4ngcT_WFwEe4-U%h>y-l;&{@u3T13 zAVv!ye#ZJCwv%P6KEprNqq$#c{hFrvre-gX9{B0?45G67D=3|F{xObZ zWPo(DS_*P-X@92&7i*UR3g9Phl()fXh3=9}zE6!o`po*hxjh<9m#L3mmTUYn<+o)n z9*{C!7QbNOnT7Re3|xZ1)WhK~JA(2<_CyfB+wf?01nWa|;%YE_7ZtCZaH@EL$hE~@ zk^SE2oV7E_@;^yRDn#@<6Vl~<5S22gXM!#Po8hS%4S)QRp}xR;pB#ubc5Sm7?wm4o z1F(>!vIG%^A&irrw(o{2pkn_=^MDt&Aa?;#`8#r{pgw zqzQzPbANgIBP8!>#_)S-sbb(IxhX*TCa8NQU%=y>n6B5G6rJNf{+~Oh=Zr6O|NW4Y zHWQ^ITU&^-k`u)j8X|RQ-~()~Ngd%6l!-k_;4Ah={q(ec&dz>E?-9V2G^cL48$p{^cdL&Y;(%c4rSS2L6V7i}O09)B(+f~6#W#QV5NTBvus$LD(C3ie~% zlXxT>(tp9c>^41HMhHZU>;{ytk1w!G)1itwXok{@fDuAl|uB~xV0tQNG zWPg}v20_wx)v;p|E-yYQ%2Aw+&FbQQ8@Nb2_#v+5N_c5Ozwrc~_PH~yZt{116$^A# zLTRD=MFIuX>-Enm zj{)y|0!$Ubw-t{b;80h~N2@$VoM}m51AmD=iwLy*I+9cX@DgC#h~>x^5r`C^HKEpB z{x%c9x~x?LT4)^t*EU)b29uIPgH>s?z7Sov^>F~SO8F79Am*-Qfmx5+9yE4|f0p^@ zCjZ>xpWFO%$9087|MU@B5Nz-#c>_WuA$OY}$&T_)LjKUn3M&L;2bP67GGLe0S$~}l z6EFkx4sS>VYZsg%yN+19Qo8{oeLd+Wu)CiGM%KV735p&ZM|lb;Gs0le14Izp>j{k+ zv1N9d{9EJ-07w*H09M*YN5B~k^dTVlIJi$wEEZ~sFw;H&9;am9WBIxL52R#Q@|TOv zM+&?Kxrn^32nH7H`f$8e$A6L9bblU1QXlhre>T_NgFaDO;htvaa_{)N8(`eVQeu9% zP;I?sHn^sTXe0vOHaztN>@+GNwP8h~;!%M3ucH$8-GV$ZG=)a#{>*mra3Jm+sRYX( z=F{h_Df-XMf31?ReG2 zp~D_Kc)+8I{iv9lGT1d4G{kQsWs#th5y{o(6?hk!Kmh5mkWa&*>q`aPV$V{ig1Pyf zupZn{v9~cK$An@a%GvenYn#yLIR+hqWq%bNROvG%WopY1sV$>NIeA4)N(uCmx<-p& zUUldwp?XK?Q}hoJy)sj#oqtT3o}V%uJ7r>qI%SZJOqqr;0$fe%ySf=?e&N5!-}4n< za02xO%ON1jyU#c>7{xQ`ZnK1)>u84njH5IU$S7CB)Y(@{L)m{`B zZK`iKWCMLs>=VjefhII+&L10=yvxNYv9wm{cS)3rdJxn^9IQR~v46U=^mu=;eB;Kd z{9WVU<&}APNYI@-@^a_i0Rj00KAlp`{z8TZP&LjM?7w=FAfi@iAqx6ZopT8K8A=)I zhBCDG>JZHsAZoq`{AeB}mv0ctz$|_!KkU^pjwznBi&}$_Lpp#N5nbCc1{J>7JYxsz z_z1q{641auO^Q^4$baFW6=Dqpa7Cg*3C16}jQKdHxjvUn4sNxBS~DIz&eiP$)!WxaI~x2T50aV?%pOcG8;34$hu~_r?~K` z)qtXd5q_8imVY_LEa=bZAX1!`D($s}S!pMq=pD!!^7zkL3D z^~LMC!r~(Mo@)hQ>8jxs@bBPH>|?840g5pg+!56^!SOVjN5`BefHtP5!u@^tqyu~t zT`{iA?;alP@1OlxA);=zFjt#ZjgaD;_}3f)@Z8~w*ME;8@+hULlCOmad%r(;cu;~L zy%OEt0Yy%ad%Dyfw7# zhq=Ms+HhxxU9j+j2+QH-vCP|-kHAAf)?f)^ha4|FT6z5Q_U6j=)5F#6$1AT^aqe|F z6=F_5pnthYiXQs9cVqtE%KTb={=>ny<EPSV(Qto%8{SU~!v!>! z^(g#+e)aWOK#PtSR$n|h++KOUdbqNE_-J+g*^4RfSuf9BxZjq=i+ zQE`86QLIO^?%v>*0#2;WX;br zzhZ~@#KK4rr#n4;h_Q`r;^1&Q2ofg(%K`~d43b(GV?gd_e%5@SZ}Y3>eXDBSwze=% zr_Y=@M~{v6T5IpU_PVdCT2*hcl!ws%Jb!P!C%D5hU?y?s6b+S&77yoa6n=Rycp!dR z7Y>eW#NAseziVAx!r2e(dn~j(*FMQamW87}grE#~l8MDxD}!H%)=75R=yxz*Gw85E zG(&B(tOvZ(i$+%8(QEsJ&$UnFanmqEgHBny(i%`D4PNRXEFNLCn^+{U%oQyimVe)) z#RpFwF9E*<**vd5+t_~oW8K!!`sVfvE(pDJsZtJHUr(36^TqWSd~bIxEg;Xif+YmOMq3#Jo!VTlM-ujzx^%wjdb|KKMAM*?3Z(SVBBu|H29O8x# z$1Ra1aBV05-yUj)2z1GIJHI-zeSh)-Du7p3nAdK-aoLbO5wuNJ!dLCqIWwZmWEO$E zgMLc3C6sM296$Wp#nBq}6vFV8~E?OesY}pT+o;B7yAVZ@;Z_hf`pu(j5@nzJE=-48E*z z+mt_aoA95(z{s>^Ad7$)N6rQiZo#1er97_LVfm-O+ItjB^&lgH>;k(xdP#!%qfY>X zx8GO6O|Rd6uTmcZx{9om;n5N5Mp;qoniEKT5lh&}52 zaDdYg8DTgEgv_Xp4hO*z;(y_7-Sim{B9VbB96`ao2C`t8VgVI|hjO78tvWiiy?vWV4=2r76Gi_O@4@jXSkOFtTKlo7#zDi{7YR1CW3$Z#(xL@DxqcJw)F22 zMs5G9J^);A58n})UV8{5K{pk_r%J9(u{EvBp09!@aV(~v8bOF9?)$8Bqj>0qT%y)FBx>AfOop&x z$wa%v4rAZBbi1z$X{`=n_J&b^r1Pe6Bs<`7S1vp*k$sN{5;BbFnL_qBz@Y1eqGldl zbh&EKU7uX~GJlojSs2UlqnagtWs8G5D%);!Z^HU7mk*e*9q|~ax)$6+WFknpFy(c_ zz*eBU2dEnfVN`X7T%b;RaC9YmN=qh9-!g!)Vn^=#`OK+e^>xQ z1P%1q(FoW??>5<3M5X7HmHopvFj`}vwL$*E- z{Fj|qrl1aa{IX8i<}c`hUgmfgt&DP?U+MG$YM>2|A9=fgUv4Zueb-r&Xn;l9OJH=Si=ZmT>wW5vuF51WGd%-$@-XQDm&;*jomYWvdf^%-@K4c4KJf~6wR4OI zI7*0^nOqcV@r6C;g*gzNG$0o`IC$_CSe?TKNPiZtkOiyoOdf$)TMAUW`EKP9h5I1P zA56-4Cn}*6o-Ebg99$j@km2&9$BTGlf`3Wu#IGf#wFf2&5VGjy*kjjj%gZ^g)8CNx z9Q+JtfIFg9lI91MUdoi>VGJL1$7gtrog6hCU3gqzDf>i6#W{t~OdW!>Gw2o$u}^2J z17P~_mb?!kNW5(h{Cen6MNE_M`ND!Ys{_NyZyd{>&9u2Q{q4z_Fju&<4?@i1H-8!4 zUX1n#GEU!u@Ngzs-V`h&%X4lqN`wlcqsp=V4CA=GUw%=p{py7{l-~#uJSO=%77}rs zQjTM8j$@(I#!BvtK`M>0e~pzWA3I7(1H74#T2YJ-O;T9-B)%i6Eo0eX$Bqgf+Y&Q& zbssxYT?0fBp{U^z7Q07IBKdf(aDN4G{f@46jS)*MC=3p-;Sn9rHLSgmJCOsYWNxs4 zU+EjPkI0N6GK=G=8H&mE7tiwda$)H(^d`V&mgnpa@mBWg5UhP{e9ctL55Y3Gr(Cx1ZD@VD!vTAQYbt$At}7z z4L;?H{L850-$vM{ukWmSt$z|cAAeyJXMoJoHCt421nH~~O|OqWQ*o13MVh~5^vF)X z3ooS1o;A=aNWgzR#OD88|)z^tc+Lr-kM)6J9+33|{VE$sR>B5c~Kutj7ai z2QFnv#qosnF@GdcB6Iam*jFIfOQx1Z0z4aQ$`@z=sp;*DK&9LNRc$D`FPh=*;e zlN;P>4Y6CiQ|&xo9~pw$m+o6K`l`@G5-4QN8kY1Br84LCU{!Xg7g`ay`vRO0WK)ur zj`!}Vh$T8*g1zfhU|TkupeIo+C)d4z4A@V4j7TiP?SB)DMZSxTQWj!sA=e@nr2wB> zjtSTy2jjZC-T~_@(lMC(hfyZWNjkHm*3en0!(4O=fhisyc0FU?m7es^Vj$A=i+hLr z>JA;ysk}nhA7x(gqdSWZrcy{;`h_&SZ4s*qfYKH>>(xz&qubf^=5k14OM9t|T(U-4x(6~Zd6 z^5HOqxO@2B3WESES%sE)dZGWM%VPI3>4`qxn-@^7VIsz52soQ0< zbu>ENG3nGr<2Vo$GLBwaZ(!+Zb}y0x0AVnuYP;#7TxS=c(;={^%AcYin3vCP`}7a@$~Jy`DOYww^_6c1M=xxiyR z=d|XE*MqWkYkgN!U8$WjPS#0HUTOog$|!i*SEgZc()}KfmD9~NBIO)A%C}J zO1B3X6mqI0iR*m7H-aEYLeWCPId{%9If0Wx_u3uT`K{sl<{AlA+k&(=)bXR+9&7*` z=`rfq+N?EuVXN)4P3Da5vi9W4?T8hblG-2EbvTry;f-_*%C|@Qnr-bnz{=O@(q7t{ zT!xSmWhxNW-cWMHH>$j~r`s&%&VN7_TJ;nlD(AL-qbyaOzwN$i$)nAT1aqCBs}bka zt{tXu=i0Kq)J{dq=o}HVDt_w%C6<#ij~#WBERxF0BPZG;JT#W2L64AiYd8Y|Y-^b$N@iI1WJIWpeHW!7W29x%lXi55 z7isIsjEd}4)URhwdQfrE;Q9q&w>G^FNR|DA(F(HZ7wa$A_f|LRn`_nT>dQ5fY}9)=TntTdNz}FMp#acGq^cclYY{YDmxrISjUT_V?;L5PR=)uvga*S>F=j zmfYQ2+r^{b{PZo28>+ul_trKcC&6Rn{?}gApY5-2>`529XCMAV0|`93_8J9S-Q|<+ z9+}hEKKNqo`_=so-rwHeLor-&C=XbF`2)YO{qr2;G_4g;38azlqJI<4I7M0WXjCH@ zp!&E*NL^3EDj2{~(tLK`V)}?h_>>ct*kdlL&$$>MRJIoyFB*W;WQzS(=2rX7QM-dF z-=ZIUO6qXr#{{54EaH#~L?rf}6Ieu0{uilcHmB%1iou2#Y~DM#aiYzSu}IitSzcOO ziEp+jy! zcQ_9{UEnRkWwHZKO!SgQN8C9>ePU7VCsY;(Xcngf6;wc>_vA!#r8nb)0cCApEV#Bz8~NDYWq<9~A8qP?guN$D+Iq?QF# z=$sm8PB&}L^1$`B27v6jjc#lU-~bg8Ua-`WAnW6+PBqDpGtyl>yUj|ZVe>0e|V zs&c-2o*a~-9!mSjKh<3LlKOg7>4S)KQX`YPYg_x9&(?NTR*vyVP-Cp5vPB#<;) zdS)%Z@8ox=k$G+>M{4(E&2AJ(3&ZoNczh%UzR+tR(8p6QB@F$U+~M)LlLFeQTS z*==Vy5Y4)AEr(zHkz+8-xT)tT@qpZ0-`iNz8v3U3SyEv&Jz1eXj;LQjq{vm?>2^W6 zP<9#9dhn`SPOXPZEOK1VmsC$o_aM`4YeCIOwi;46a(@i(>d8fdd!K6GnA(w^a8fnF zje^p_64fCW>lL13VL()=Iph@ROq<7X*{1MDLxDt-KsgrdVi4VPZ#&ce>s>m^v)d6l zRM68Rr6)VLP}Z!vXjoCJ9hJUNRtvgC*ccDXyQ)SgN`&Zt*=M;rs~QBTQz3{r>y=L9 zL8;PYt$*V)wiz_4lud!~Nu-vt-#SGuu#L)GVtWQjV9GRlglrTBFLFe|vUf*)ksb|3 z(jiVG1s78v5pWnOzm}TE&!_?2-hs_v>4oYSxqtwMSp#ltO{uiiHV5{8$=FcK#3Ysu7J%t zlp(+X2X6}}IeKb;BEygsbc$qdOd9#E^+Z5ud8pQz+=KFjQ|ZayxFibxosE`SkS(Tmj>v$)w#L%>1g8D_7Hs#7t;MR!RD z9-pbE4<2+@viyQs$byf+NR`f4ek-QgFMr;hv`;whFTlVG+xL!=69#C%N}Uy)Pl|M! z42L7}2hie@UVvy?H8`YIur86_V6orpgGb*0OG7t`)}N>Z#lT-^Zmaqx$M5t$r@QPP zP>YpxnZj%s>B^G}8G2peBg|5n+Xc(n{8qE*u&c3c>9bN9NV}BF9*T59OWt^>&wnHI z%{PAz;cx%q!+-i~NdNYsRRMgosNm@G=nAU$z~ITRtnR8|V$VTB;VMBTBhj`LBWP(b zwub44Qp7K+xD*p;zuBE8?TP)`$8IsD+7l?Hs!YT!2|NuD=39P@U&Bh?IBbHum4FQNp=g># zTZFFCdJ}7_Dal&i5Jc(*NnRZ zAYq`115o-hFz^*fjl@(0rTqWKVCtV@CN*isQ)n^eY1bpO29O@P`BV8XAJG-m+L+l? z{h>BhzP(FdAHmS)K8QrB_&DOR@?;*jY6nf+D8V3!0kNji;KF>V1YoO;jWK@U#(lg` z8zWw#x_|cJH7DE&+Kxs-RKU0&$JJzNF-l@W{Kx~^OVP5Zw{uvl zESNde0s*Y}aV^y3r@`kiBV6ng2TjQt7nfmcO*7;(PpGZ-AKeCt*|_U}U8AJy72m)h z%GLD942g1fL$UpiH`XKj<%wXlsuK7ToB!J483eY!A65yFqg3M)Tz)AThi3`QZtw$ z`!48r`TFEu&l%AKC0f&Pa!i9aNAXrAk)?@}{LUuDOjJyF+ALJ+&(!**j#F1Xg{kux zRZU6iJnC^q9rYKjV}A&m&xP1@5g|BTKy{qIOjd<*drJ*Bvcwc7OAJ^tXZM5k3&&|#s((PUh`J%N!1-gQnuZxT z@C;-Uu*};1Ow|oDr#7eoK9MLGMzQpse{WLV5UXUY@`%rxQGh{g1(TIb3^+2cmX7!! zqZtQaxX>D6s3E_Z+G?P^+UIDw!5M!tib(L&L&M`eF|){MWRaQLI<8Rl&uCP^A3i9G z`@sLAstcM_Y=0=^3JVHc#eM=;(FDL%Y$tFP)p%SbUtUQi;wm;1xQfC&u4*rlnJzu9 zdV|&znOIC@VkyC2G>cTp%n&j)aFEMIPcFSHUZihe*Z_36OSUZQWws2w4l=D0trqx` zIlG0-)blf*r(|lez!3#LPcHbD+9JpW-!eM{USWm63x9b>z3Nl6Kj4+t2fVsW>@EDy zXlTHn%$W{kW-yRBa{=zOXyTl$7lx5HF8WB@o=>X4oFQq3ZHq4BlwRWWC28b_ zA;uL)Vt!nO9K@75$wGLsAooX;0Txe$-=Nv42cSv@-TpGzcHR7zM{&6$-u+?*b~0nk zXP51@0xFia0uF!VKRwvVSt1Sh%$#AI7@dF}foJR{OQ5l5%*X|%(?ke`oE?N%y3^^^ z9Oz(owof_E&r`W0d(mEiaxou(c@IkI8Tn#9-=EbBKd+2Vd>`J8;A*q!@r;Qh@^faEVTF-1;c%+RC^Czj6$@fQnb zY^DCRgeEdwI98|~NNo}!jIfb$cf?{Du(6J)b)%s`eaA~&OvK`(C6Wu~h`B3_#zK*l zL}V2c!V-VjMX|$0$J^OZm_%{_KS>~vnGY-sL=whQY2gtsxV&*?<*!kjd7tf=XfBSWh~{I1H1KNgsq> z5hEDK;Ey^m5us#37+u&ojL9Zp={3#$SZ^QnfH!|dr7Kh~Ef=FKZaQ#c>`7$!eK88V zLkqCmO@-Vs+hGX@8#rgUoPR(mH;lUObm+s4&8uTZrxv2QDEgSDkV!R6uafB^P|AeP zBULZQqx13phF8DL<%HLda{8BedCcTbe!%NSx|AoA=N7k`tj*1V@Z=%M7jUb;I6mmi zKX`wFw1bB+5%(~EPL^hJ|7moBI6-6HpPRc+%IDXpuP(|ro=D{hD_jBJX3rQ~XZj7b z!Q+BN!yuPsYM7R*_rX0g5R6Yf`C`#Y$_+{oaUUO14j!gV*b-i$ny+vF>eJ}6cl}b_ zkmVk}aUIqBbz1M=mnjN9|DE^_mv>icuc3bve}O8l!fB=1KkLLgU;MZI?Y*@mtvT&P zsC*S=c`a)8pOIx`lv+eC=eqmQ3^rkL7bfxuE}-vT5wt8ciEL&*3Z&n3-32IFanHj- zgm-t*D}necJ(^37uP}!P56b*W)8`*NhL-ce0VePvj^Zg(uo66tA55lnj!pP7;Szs@ zg7`A#1K^$BH5Hk&_*6eqf^b?8QyIQ^TsOPNuxTq=GbN)U{%`K!!=IjiQK^h8mB-Nv z_-F~0wwA^+L0t=W_{RFyTH4>!hQX9G**Ua|WQ#ieF$b|cI|2=jhb?3$vqCn|*a#-c z8yLCyL87WH5p5OokTF9a^X-G>@~wYbGqQWC)(nSVx%yeH=J}OKY7z*$yOl2~UiTmU zyrVLH*S9~v^3w**8$D&N!FZhp_%OS7VwOY%1Ipmg-!TD9@kE(wmCh}(NOI*oO^EWKgBJ-+w8I&`lDXC=ejzXh}E}mB7V(AYj{nVlMnIrQR49|2UJAxzFmB*!<^-V&G ztZwZs-zf4zI%2=jqj#PpeJm$gqL~X<%pcZXKi}Ti-u3x80@x|Dlu8CX0AiFjUw{5M zKTgxW`8coXFyKKuf4{%=d~bh!dka7))t%M7=RW}KA+)>~D7g>7dLJfz#D^%cmsa<% z1wY&8Uc9!k_Iz(!0S$cdl(p#gE3gj?r97y)4vba@F81+Ix#OVSIcx24ggSe>r}CHhzy|2K+RTd@;Vb7$epjj(f-B-iLp2vvoSgnN)Mk zKMwBkV0*a6I)x z3!uwjJqefu?5e-6kv|oA2wdz5M&MMpzYPO2rTur23*aLMO@{NJ_rmH%PJs;!PT`o; z=2=v4h`Q7kPPTa>C<}k$7C5*G7F1Y(5}r5husrb-?7aua4ugM>zhHG0OiR_acu^KP z2wCp7VH#rW&?x>$kK2!=?5SsA=T8`3>L=U^J4u%a)*y6r-&Oge^EnJ-B&`lLSzO}| z8hYa8-46$*+Cw40VA+tYRD)s!EC!Kd;Pg@syi`gzw2qQ$MeMaH29J{Su`*y(ZLfF=@H0jstGRyw5tvD;-C-b`f`aq=cTP8?;(Ti&;#*Iu+wPGEp6P^2m`VHyF+j>TQ& zrm9{L15r<(TQ7fsU^f7<3UD_4hJ&Fy0VZYNg}QyHb>LW6L};z;lM@?@E#SKy1A3vK zd~wf2GOfK){|M?%ibz0evC)5td2aB+3^mT91jD}aKw^MuZTCdD=a9Hei^?qgd)@P^ zQyo1Bc>^~Y8wSdy2*ix+=sQCjD-``fhem;l6x*vOkAQzYL(kG<^coGMd*Ii$WR2)t zRn92H6A9K;MZm%|2K0%7b<_ZfX0qAALPhJ3;m+DtCzHuCZ)@KQv0xf`O*MMS&Kv5GxooMkMl z-j}<{#|5J>f_}qJN@wM^m^Bg(1_3=xA{u#0q$PLeQEXfF|z0?=mY4w-lu zM23>6Vs9kNB`U5<QQe8bdljzs5DBNm}8B*u5)ZSri#cvJDC8{qQ9wg%}0q{$+_o z_ac9tuaRWo>PO%W#nL=(VWq?FSixC~fR#`RU-4pigaiDb7x)U?t>7w4)r3%zeFrP3 zBJwd1dV?Slm{G1x{grAJ3g;qX;DXV)DWjAAsSS$N2Gx-`w3abxznD|x$NfNpxZ6ks zjp}R$?Zv&aA{>3ZjZxH{wus+)Z91NJk)eM`xqDoxJExXOL8)KroPr=<`cb|lD#|(v zWeRB=^_Qa&jp;$6M!0`fcvNI1R}>=9f;bbam>OrJgPp?4=fn7hZkzdZhvOKf2oUnb zb0b-y(?p^-x}`XziV)sD3Z=;8UcQVKS^5YF!LV$$Ws^(q$*Mc;X1*kvnkx#vdEQVGGo-|&U~}>>{}F9}Hbef7AEi;Wg55?b z<|H+Yu{c#6*iR%nRwqf#)y9)(NjzUAOHD>wNSpq2zD1)uh2_YPxHxsnR|6w8-F(sFr8S}v!2m%H6Rj^^{$#9Zc@n9H5wW&?({1gUw^D0J;rG!Ap0 zQr0R&^5tVv;aua5;)%vPlV8bKCq(_M|4@+)-NI0;fg2lHgPjJ5Bu85@<1|)@f)yu0 zU;?{a3-$q90(1pHYsUAL$FYA1wfijy4P*d~yGZ-mnw_Pncp6J$p1jg1gdr*tGEod( zwkRaDzG)rqwC!%Z;Oo*F67H)hP|7f>`HkpNsNiTa)pli7is4-%^eNA+aWExldaX^4 zIYV@G5vUW38Wgp~P8AALBcYHk7U~zJxq%R;xvsY~|9qzbqjK}&_%i?o}ORzAZ!RCGl&HH?Q1RqqHZ>Eo1!<4 zj3A%mrDAet4a0v-k6*J@Fr?Wa4hfhW{t0~{jTucBr#Ko+(yR^)>NIn~_(evOw<6=r zBrcWkh>E4r$Zc{#Xo&{II>daSb_*$-9#i;KLBN%Jl9{B7gBAt$ zG-^8$X*yavY>6%CNR#kIMn zc~U-(CU514Y+xgMl7U0EeS(GE??o=fNmpFl zK8#{0ox^(C@Gq;QL62(kWMMTuL0k*anxEVNph{^5*;mS@nD`E$J2~* z?QN;nUKW3IjDSUz=5~>O+rH?Ii5LjqVB|tp2#Z8t50c#_ zB&1wt;3RrhG(?=CRBk8QpA0dCF?Td#XvKUS zG3d^P7`7qBpc_vND*&>hvcslBO%e25736y`Cy3j#>kjHPP*GuNh=V4AiiC8rr5JnS zV)?AFikrAH&{}RN@{ACNH!53q4sSH%R}B{r1+xU#K`!nZF7jRFPohFCrXE%ad*v64 z$uoZ}pJG&{kBeovi%9JdQL>RC^%n=~`>gREhCIcoNpbS`k|N}9y07QcRrS1SO7Ir) zpGo4jprmPTDexZX2GRiIO~56Qy}p z8@5uUv~9GB=QVeY@?tDxgQlchTaWUhSXh68<3~|a^XjEgEGBoLo-cu&6cL>FtvZQ zFxk?Cd=nH+y~10$;!RA%TzbA43a4f9y(wQxTOvPKbID0t3_6XTA4+kWscXbZwnJM| zbP$zGsWjkFt{~Z@Dw+(?62EbTlv5~K=~<*ta+?Ljc|D^kD@3xu39g(1YkC;e5+!M* z3ya2M1_8^2(6w>}cgkR2t*0W%sYHLmKIr}EX`n(cLf&ahNb*~srLo@T7$hW^g_5O! z<-sOozCSE2|6Sh*&~T!#jR%&1C0j&nHGo< zH*zF7h;S!Ud~@Z~ZDiz=2k(TWEK0@@XRl1$GTI@Z4)MwHI|zowx{de1ib8)4Lap86 zcv?iC8m*Y=jpTzQ-DKyGm@!r^nsTC=6et>r$Ky^Cyii(WiJ(Z5)K#YH`ALEzHzagb zE4qf#8K(Fm`bfMt zMXB!KK28c}^ie7V3y~{?l|tETJZc|J@B&bc z_DnlbV@?IZ#neNNL@0-pDWy?pAVTt{_MVW>I3w6779X-=ky0^`TdjXCDLVFeIaDb3 zLIsHg34}y#+)@LFJ!qqrqfA^Ac|Pj8i(Pe659K>=^8I=W0P2ysd~{}EmC|MF=v^E3 z_uXsiC3oj5G=dJBEe$ntxaHxPc;AY_ZkK8+jx7nW zlz=86khqC#FoA_I`Lx$Nr=?R2hR|9}wCc+pIHAm`kf%V3E+Yy(oRWQ_p$)VCspcL2 z?HGWU7AT7V3`t*WEl$y;FU}AnRXnRr8gVVdgyiS7;P#I|w84L5Yq_)xleM{p&HNMe zB`Pi9bk&pZQdhkV&G7`k7TLT^voQxW31|rV_q&fbiPcZO6IzSTt^q-BGf~hpP<0J3 zAEiK%pd7C)$=1h7|#5 zB0=4Gx&(w|+9iL%WCw;xanXb6Ut&%c70+WwXvTxcRCMska!zTc;LM3jfDCYN)m`=j zVR_4SJuHQaFih=bEJVwxAJ8zCK5BldA`2qFZib63v~{arT6**JX|_;-%bR~P{<4LG1s{C+6d)jP4j%9y zZulo8;Q%gLako;bM7I#D+*?8mCwSiGytT+Vtt-SjcbVmkIm(8EzzPh1V1|uP0J~WW zf`B}7uwY3bAx^SN1rTaBE`f#{jI;R0!*4$#Dc3{f$r9h8$kC(erjH8BY4Rw1^AH#*f*O*zQLcLk1PI+AJt z<4{;ZLu*tw<9hYz7~WE(&}og?ba>}4IouaT!&KjY8djGnPE}TpqQ$q0gycaKkB{IY z{#S672XK|!0ocVJ_>A_z6A zdy3e=9DSP4^BD6}@Z`uo%L$MQdl4)ZLbx6g>R4XW^F2 z@>I-?&-qMv6?KSS0ugnWyuzNpw7imA-Ey|Skyiy-Ud&b*7PbcmH{*N3nt8%zIYm#o ztKBSUgFqJ)4DO}c-2Fo?v++Xv(-e+msaAhin)t9*7ld~SKUm7mEW&#J8-Nv~>Ge0R z07dLUfrE^P1!w>@!rF=z9l@%d>=+m%YJj7Rvo52~NKn+wxcr!skI3EQCEMXFQv@x# zao{daO%KYDvvB{GGB zpvzW@Pl>s(3{1*&g8A`U1E28-BeuLGUM7~}VY@M9*cPpnF;P4TX<6co2UHJSHy$q3 z@mmLd92v)ZGhN5I#EsBX-$J(iI@hJV;`rz>{jzNtd<#65ikbK^0J7@G2shcODwuFqSxML1(AvOf<+?!b{OI2?bVJQ^ry zI;@*m<#LSwXl04uFmvPD8$gIY07AsY%F_MX8-P9CUjk~9r^dxIEs>J^*#5PwuriPZ z2(S$Bb$}-`H!n{>8`nx8?mSeaH|D>`{yHg&zSNbQe&tx5VYLY)iPlx&k@^zC=GK8k_f4i)kfj+xrE1auiR9+q4425FSFR#sLj4*_q6H5R%zJn=3KvA{ldPgWiRt=PZ6K4rn5tNri)S@}Qz z%N#h}J4~bE3U>~cn~N}6ZogO@)Cker{!-1n=X(g zIqwk>oMZr0RAc=Bf=MP)3+;JL=gvXBpq_|!u?LEb0U15>2^B;oX`Js5gFSR^LzF~u zoUzL3O~>27EHPh2<30qP-7|z|;oz+2cYgM5AYABVK=7@NpDbvH(vqw2pT} z5;^uTUt$hc(*sI6M=dx=!3m@&b}C~oh=nn!=4dn{*_2ds%(cO&lE-d|sKB62Q=rrw z5USy;CX6f~9Tb?HxRnzww-Xc?V1>1cpx_8iT4S1}_6>HvXfj@C$Z20g*0fmCFT)(54F99k!Wk zTHCM>7f1oXMUv&Bb0zx?H4yHX&Yu>eX7ZCMeCV6cg9GxP0?FcVqZs;Jnu%Y%6Y5?H zP^=ilo~Ide?gQ}#>O)N=QH=iOHwH1Y~=uFOxMBP;KQ5(KLE+vNaU%d*&KmK2BV^Q#l}0O1DzF(|fT z0Q({&tr|BNaWN=xUvv1#{??1N-Rkr0-L<+04(C)(0~XQkZUpD7d?B`1$8@Wx{-$6Q+Hn#8%D?M zvaxvStlK=(U9)hfG&Ed?QEaO39Joey4gA!ot_%5OA1FE zhzUmz80c`~BMi%cM@T*_U$W(jL>Lr>1gMa$#uk5m*E@*w5Fd~;mw3;E(|jMAkbcE= z|ExM3A>jY8(l2yt-!c+??pga(J&#wN(0R9Yx+ZQ~R!uF*e19j=zx>%n@%CR`c%Sm> zB=Y=QIl<0o~3^s2he2H)`>oy7aR;dT3P)a z9XsicZQI%3IM-)i>@n8Wtoi{}W6m`nRF7MIBK;SbS(%8e1GK7!{$IY1jRFYP*jwBB zt2@SC$=g9!5oWbSktplIP?1$YB*!mTHDGVIRV4_P)JpMrE%=&;ahWgyNu$Ocyhs}M zloFNDM!oM>t8x|h#XwDnIui1W;~j{2@DH_No^UASNn*3l(|plIaSjK>rj5j)oZ$?t z-^#A)UT_Cd{Fe=uRVPXm6(XvLbCIM2=tEHW{@Q{g&)p8;({fXi4`w9-s=0&UsenIj znuEBO$Lq+={>Ox=&(RoWo+bSUYmFabDTc0Y3K4Il*wZ9RMlVYK9=2uVI#nt0BvaVE zVFFEYj*TU_E+r8bR(sbE53gqNX6=eBez+iX=ITdt7$xdAZEyKhbSWB4{sZ@N>6<-! z0iwzC%K;)=-uhN2+mJ)kdZ*5*Goa8)rKMStn~s+-uh3gJ^zIdft68;$7`e|TKPBOJ z5N8H{JO@PJ)HFOl7;&oKUOHjENL2adtxCQp|MRj|spe9FkZ1w(Ods1fq2|4jy`9Th0M(D$-dHlV4K@_2rV z8yFU3J4O6n%tVOo6Px=Fr+l)de=Dah6P`W6G;cNDFrkTRvo;tzE)( zZ*$2V&Wva{B;H)}t|48=4JeC}p-oA?F@}?cBOdBVZ`S1gp!JPu!@5vZO!9&^;@j#8 zzo!H~9#}+;u0|EjBX+KO+g?Kc#9X1UVuIb-s*YvCd=hU>T!E1G#O&GrP0-DI2jQU% zquSZ|ERTZ@21e5p82TV!JijS&6h7H{$!9JdFGB0}x52oH3D5j#0boF|PhWuCaYI|7 z@)wx>xe=`TckW+%M_RhTbf{|9!_zLOJ=n24m#Dm$?uQtf_y;}>m;|AAuy6^eC|`AR z10~hup;^&*E%gFlmpPsgy5^6>x*;B>oj3Rn{DJho;ybffxPs^U$yHFKSE8i=66fzn zW~kR~w|9R>4CMn$fHbd>lG~Z-YG%kTz+Srk2XB!sl3`}}ey0|C@3Jg}H=_lC!|O)1 z7q5noQ}*yL)_pz`KB@do=s4%S$Qc=lgC<4xIVV==bw>S8Y;{*h?xA9wYe(e4!kKP@ zxhYf5=z@LDL@)`AudQ$CE1p%Dor)j=3d7zm-(XAXy$t&W;9OT3jF0;V`oMn$o1hYv z*t}k(A4c*cp*Q{0T!-=VmylbEd9yO#3K z3DgupGj98dau7w@-tIAlRvOj)S+t-+&su0%E(&)mZuSL`KcxDRV!JwH=M~B zS>?F=vE%j)VMPA66$l+>C7RAog^}oVt~Ke2hczx`J6NNvhop3Tc`u%=@-UhC@c@#8 zzMWp7pSvv1l}}~t$O2zMa{~k2P)9BUESvBcFa#2jST)cY?VA;>3*WB&Tu^Mu^d&!0 zxTQ|a0QC};Jy?>ZHKtM!=V?C0@vTH+vrH9s?hqfylon2mZN|MQ;$v}$I_5UY}hK?V4D&G7^xR;hU5 zotXN?{-eYVF&e<mtZ<~Aj;M=t7D|aU7UFnO z08RGKw4Zff9IKk4;pr9jetS3K;t#kRuN5%_#SwiN+vNn@`~>UVnfaX&pS=<)9#qVv z9V#B{2D$BBSr4YG9p*y8=hYQH*sn~hLqMNN(3zj}5jO)Z0UrNjGGrFq!KS}>(}B{W zy}s6@k5~fUq2=F^B8oE2pzmFAcvr4Odc{ZUfn00UDi!4f-jn21ZxUae<`*>d-0)~a zu`n2O{_?+`_o!!)=1v$G3TN8IEY{$>5A;iYCDIEk!|EN=@$HIcgGuY^;!i{lQ~(Lt z-se80vpEhPY7}*-oQEFwkao*`)6787=<9$8W{QwT`AU>}`AYZMQdWgx&v(k%Qdi3? zdJ~JD?8FM%GR-KO3_fv}#5B#!;Y^j<=}@SbzpUGtUkUF>L!}~1MunmR-Pg5huF2JR zT$LKt>A{1+Vh)(<3XT~!%Yb1%0B|?_5w5TxQD3IeDT5KfJwn_piDX5pE2~Vd%tNA= z!5k+A?*&z)Ww(t}V&gFhZEBf^#CEw|Ub(R7>@?+&g3+NX3}91fn>z2Xwdlkx(@SYN z^=A;fWQDfOn8$1|QPW;T=9h2HZ+Vg9Q7N zTAfN>12NOUIXWSy;*@I|3M>Yf%pL;4DZtb6kv7yh+JarUH=yCbZEeRX{}d%w>KSql zK@WDAS$R2$;>V!mi$qgbtAo!%jzID5YvfY4YKM|# zR29xUDsNUkv{WsBy{cCmOFLNh*bCGm(z_U-@TlA!_TxU!tcrq)1u#q=2En_X+e`yB z3Gu0-jh#1w?2U;TcSTItWWZfVB9{XG(JamO5^&_^>rry&Eq1g5Wu*tdbRp| zZ1^}&(c=ze{(UsY2qaFL3?308=Wp+J^A_2KeP>i{Z9d*cMHaxLs4QWH^Ud&O$^B!m zPDL8K`H_B%6aC8MoR;?=3sP6@9~`Qog0ESYu6c@#5u!7v_brZ&w^=ZV4Up#P+5}p`&pmn(CvxQJE zsNl!li>QsbDadH|MA)>3*kp{#;LS+7__Z@E-dP@dds3jD*B8hmyF@Zie-mEx2Q4@1 zN=|EM$@Dzk5TK8R`lN^$OVcH6-*Hg{^cQ42e!7X|E6!O z_ULJ<_6SgI%bH#OPY&W}Mg-e?|L@kfs6B_Fs({y#rpuA$y-`|?=WvJ)nb-E*LU^uhA=jp=GSm;Z7#i>X4Zo~ZF`c*0Hb5;xZ z{C1h1!X9-qXwFTF#Q_2UIt~}1hG;}TA9_TJkiTIWFkp5=*H#el`8mEZcr{3{;l2Zh z-lg*3{i@;BM-r{?^Q=+A(i5*0i0vQk(5HB?e*He&MjVsf4NFLGh!6;xdgpz(X(a2E zm{sb&{BqG$&;+XPL@bOsjfSv&1WLxbo4-h^S%O5O!|Bk*{VxyrMM}>3Mfjp4FEH;r zY}t)GVK`Z=Y9`42{6MqTtMC8v==&wPG)B#A4>jWNy0N~N-4>3V*F93J=VvVo)sWr9 zy62q@`JdD3Yj#vC$IV-5GFDD$i;@Fr2Cc-own!r;sNGmr&P<(=O`%$nkC?PW)}=NeZ? zy&!Kx1?vckwkI|l(Q7#wdsBn_7uNNMNB6s(Jgi4T`W4UfPYm+24T^Ki!KjnpdK%kr z3ZJad^m;p+&(|j!Ih3#7N$V$r@2pyWqQG4wB&8&2hMuPZvwHcfyqgQDf#hxFQvF#$ z4c*VZd(SIivICL?baWgk^ydbSQa8EIv>(UV!|t`qF9I*t(YV}aGv9Kq<;mOg@wR9L za`^WqY)NwyWzB{&@oDvB;4`d{g5cTTk)onp?hBxzG@Aovr=?THS}- z%j>WN`?l8*Z#_LPU*J?l2Fj}$7UCfN8P+%iRap-RY zoVmhNSF_dq88~#ubc7XlQNk*}GR)4sF--UuLMrXAu(bR}!1QrWoI?!SD!D+;MLQPmZQWWeD(hW#^l!KrzOBew30Zt`AE20_5D4u&Ki~+V-0xtM!25a+dlb3uoDpM(*teO= z4Xx68#o?#uK2dE1$gZp-sYztzF8F+KC}~8bFV59=M-%lV&4&CI&Yb`1@A9*odIsvDDXh+5J1q$^UOiQdBi+GZ@&f^gRyGzl-_*N z9np@d4b$(zN~kA0LWHRMhw9nb^^WoPs+b)j&0oCF!5dV5 zsuiLCm9WBUh4a%*&X!DNA_*ENk7*SVEB61%;wZ&fi_C0IwiR1A+b=$j{1>=?O8lwf z*jh+f11p62e7LBz`L1SV&OJ(_gDl z)hv;{Zd~QEYOi$xyLZ&&vVMx@0mywO5j9i}f@q0Ui z)L%d&LrgP3qo%CbkH6QvLHwOk?#|hQ>JkIVPs(aTm+?Vkf1|YZ4Eg|uyh=dRWR9a0 zsFs~$kcU?>2r$PQG$rqsDcEIhn790)q_IcUm9DZQv2q&hQDHj*~#tlCHW_`Z2Qz1BS`_5Mi zH2P7D5JAbZ&fq-Oz!iiV^9suiY7y`+5cK#*(dE~LW$m+SdL9uu&+NU=OtHL{#&%V8 zb@i4vlZ_WKf!iiNUACb|#N|>U51M=v+|eBriK{dqyP!5q%OL%4@;V(k0WHzonR3bM ztmidOH0ZYv@x4~JiSrQDGiOpyEGsGuJ(e+pHxG5;z5YtsjGV-E z3fGypeH$bRMYfE{peGXe$l(`zNQ?#yZZf`6=URl31Iy|5G)J@2+@7J2LB^kE zVCadS#~a3G9Z*kTj+a>MT{AN7H0+>OQxCKhU# zZr1xcu}m@1jPf1P$QQkolfWHx#qRzLNNL7X%=FfsTUw~^$I=my+8tp@ElCb9QlClcw?p7y0|yC4fHv!}P{ z>5<&!)X}%2x0$!S(YLPU)y`{R`e@G~`W(Nx=hwjbvf*}3pF!x>;WV8PO#JH+N9%cZ zw(?B(n(j8lYsi)Eb_t^RVkup1O_5)km)>-}Xh(5nt(I+_-L!3=zTrA<9rPc8t?r+8 zw)S@Rz5o7N(bx8BQ|X`F2hLU1&MaKfGl}GhGg*ZLhrohmR|I)ifDm zT!vO}=_Avrhyd?TzRB;__FE`dGFwEtzq*EvABZ21HqrR&-S0E!TpJOICLe%iDQN3U z@bRC@Q(S~`rWQ~qkk4%}iPD3i0uE|S+#bh2eA|xhsnP}Y5U)fP)~;_S1~`nEjFjXt z{M#^3v0}u>pYFQWydvk7ZHZBL004a#yb+GT+7Cxxe5j_~oo&vuPU~N?{pkD&+8%EVZT40+)%{ zY=--$g7RpbnxUmu@{XEYHmGM)dzeTB=!;i1RY$f1v_8QB_r+f}6H$$nzeB{LNEP+DzHpNG=Wi@2S;~GT zA$xc#Of9oJB()z7Q5e0VcZ7}BX}V{b?due#CiL2};A}qs%yXrHz^i2Jj#@U>bhes& zKr`4Ocr*Yo>TNC=#&7u3wZH+{-q44rEH2XJyInK^c4ud5XB7X;fdkTbgh2=U8N`&O zG+`@4FM`M%tTC-q;VZ~$bSM0mbKe#$wSahofS;J-13y1!Uj}F52*Q=UexpAKGG>zh zNDtH4*44k!eVJ^*Z^M(O5kkY8NVi&pcrBRmq0$A2$uS6!Bd<_ZOcGD5x#=Prh@i;BOU)?1!8jA#m?MW(QnH}XdS$QOMApC z+=z(BAcH1q9yi@0|Ge@wqFXGmn-pHRip`Ksi(N^P*Hr)s9w;BJl1y)_30Ny&OY;eb zggplgn~>z97)jjufb&fkh7;=pSc4Q8SObM92Yxy|G3k@wL%H&2g^40C0p9Nd%c zPeWX34G4PcLDLTABfHF8p*U+*`OcbQWAJ~l&&d@&&>m#+Jt@N)H`}q(^GE0)QiST5 zFOvl=bwi(_9-G#VrT0$DPYjy+7Y<*`2Zf)-gcMKboyW5a;iU7NZ%J>gmOES$nJ%K%15)`?w62tt#0Y0F=e=g z-{G2VMBytq!Icdrehe$N4>T|}$EpDH1c(G(9({U$%gKJ)p{2diF`Fpd5GnoVHj=;k z7&dGM7fwzH>qvn)R8>wVf$J^jZiaSkUnVzlQ2*XHx31ajdSh)fyx7d8!{;f&@nk6( ztlxobYW3vq<6@vy{$dwzRieH`5}M&NOuK@H%_oQ>6Cq5yQr`IREMxt956uTmz6(<5 zu4L43-zaB1yqfaKgb=^dN>7Teesd(lDOG>@o_xFLS zog-}USIctsD=6^#*TVn0ZL0&OtNST{wn+0o4H|sA@qkd9hs1;yx9qe$BHy5?j=D@I z`unO^XwS}n$iZqQebsE-KG|lIQUeZS!X)B+opO68UrvmTy_X*Z@k?CsfQ8hMuc6-s zW-T%b?E$qdUA%02@fWMUx{0EP*vF-QsB20k=S|$|ZOC7{k7^1R9C^T~mtLZ65ZJ}3 zJ4i{*T(urEdyEKNmDcSOfmd&^iX^+uaRx) zCp$|L{UaY@A5z2qPYU0Bsir;2Nhg_Oq%Zz9Rx@Vyly*V#F~z=Dxj~OF*=4>(^Q!}*I|Ty zhxlSf(Q4Mh)R)04%~tlqFI0PfV>d6FI>eYVNV8@nTAZ($U6ha9_Huwlf+D+%eZ=Dk zEl{R5Gqy)bDel}SKQ3PF-GaV^N*?jI89%DNcz5l=%f}8hPQEMgtEVJ^di6kSGz(h# zW$9z$VhN7uA&@|h6X{DFFwTX^VK`Y z_|LkN)TjTCvn%g&9Wq>RQU4LDZcMookKetF$I(Ocb>EocUzq}jhOUzQF{dKQ7yID*{o_r z>b{v27C`%jeEZM9Hd6XJoJGTnpuz=paFrd^FkbQQ&_z5?pk$IL{s8Oiko?))OE=;a zWcwM-U51Dc%&rk5AnSLt=nH~`+bEq~S({(PXBYE0qgc!u=l#XRn~hv;t*L99nf+iW z!5ti*;SgSX)}lMtu|1xr52&T*Fr88bB@#N)p0_;e94|HTv8;t}0be*-omo zZe2iBR>4(eKeZ~I{1E!(ZnH9|n9<cJNJE(%44Ei;e>0^&$dVu&iqb!RD zyv)NH$aTPPT93cVejV9R#Z?4Onz-q*YfwP3(@tH;4|R&#Dl>G0ej`!rOMbQo+WF?*Zb3?6iM$w#NzMn! z-|3P@b_iAGn7c@>HcRCo7K*jc48N%OFk;y57$`2CxZ;;4$+*49i5MTel^){+#l^94 zJ>HqC#BCwThEV9$J$(C?4b`4>UTZaOram%=0; z5>6u2pXQKr?V`r4=q7uHxI$kjdTRD%FKrsEM}d$Sx-L%CHAn6`&Fggfubvsa=m2!s z2kLS*4HI8^IaE>>(B#(YRi(P42Qh_Ny~2RP?{gS4upMhUI0Qr>RqvGHMk%g!P z+76|YS^z98r}!Zt{bgzWML5ZnlgS|3nU5x@d}-wtEwIw(bX@f#3+6}Z!Uatmf;*Lf z9^>bS9%B?3=puRa2- zBX40U)1mW|yuUcFs-FmH(2|2R4pMbU{Y#~yu_(svn3o&}O`H?d0dr_ISFQT? z^%&xrbcNHOky&0T89F?{dZ4}94*i5_`D}nzCkwq%o5>5uiYIp8ij{DCo8QRlgmuPtbcI3}^}$Kcg_PUOH0`n-kp&$T6Ypc7~uKp8iWAWG&q zF>HIkzlFK*8(+64@9dEVO!I9KX7uwu zfIUN-Vs<-`t{908@N6hmj+AoJm~5h%7dA%0xbAYMIkX8&>)tEb`IVEi6gPc~T@~~s zy>0|YKFg)a&@uUyc@2{VxaP|y|{5W~HbJqlLeu=#*C4Z!_+!{Sf?>aXjwCQoXJi9P;$=?{<* z;tKc_Z#0#g@-09 zZmjMrSbn|%4h9oYzbd?xa98QpQJ`2#3KyP{?<-5ai+;P!jvHqr{yW=h8+z&;-(nX8 zXBuZEhlzooGbn1?7@Hw)9<_l-As;AYiCquEmqtYzm?Pjq;lwLk2BTOFFj^_W1DBq)X&Pjx)5up`D(LlXxP=eH~X=|6iz3&xt{e{B#O zx&TFdwJKm{#CIs@=CVUpQ1WUD^^OmKZC4Qf>Gs_m54QP$A6$Y*-)(x*Zm5|$0<94n z%r!j)FtT_AEwq^F2T)a4f<>SVj=1w*SLS7$)_^r=oW4qOby@R|qA860X+x36ynP5@ zPkY9)eg?`~a(lzKRAO;i_G0yXEEXMVi{OeJDmuMJs64s7=IBb+!*yg+2HcsxW$k}~ zdTq}~G*U~(Oj#3{+-aLB;Ueu55e*df=7-z>Kc@zghYbo5uud!g8k$ef@6NsPgp2<; zI`c4Y-5nY(?CN&;U~c3H&uv@1rCKTe8Op@dM>2NZX9U&0Oz}EB(l%G&32(^_e;pVX zGS6$iH0JjK7O%#Wg^gMv4s}3=tF$K}6w{z&j72!a>h*|T(>f<_=>O_RIN_y;Y>4j& za4zs`uSxX0rY0{3IL9VLLZ?=l(8!1u(;`jP(*9DPruw&3UfHQ0v>y_cA9ok&uf9^y zv7*|NNNOCbpBn?T6L0G@nUV_NF7-!c3hhS7Z&ZfsOOg zOM*gyC#2njmd}`-VO&6shxoyXVvbJ=$Rsyl1{lg{NL<_H?T>=hka|hki9U(S6;hh2B@1vJ~?&^rKSjm5~>${Ozm4;F#cF$A6#L zL@z_C2P=_CTcoAU=-Gr6aThoTsjl8$Y6`_O9b8pgxqvuFb9?_jSp$_RXkQ*F!>|_-)$n3ukK|V?2G2U7I8i( zjy8s)iT6*`ewgPp-ewMIarl^rCLXWduRjUYD1(zM+j1%fRynl1TObbOdkh-iQ)6!k z>bLA6a0!+nCrNqDa8GA)Bant{03CAClk1}4^Q-bzU$F&ow5x84z0`7^TYyQDl7eSe{1ao+(6M!}t8r+oepvjQ&$G#J&RP2ca5uD(MSn3a z;{>jWJ}yh@k~&diU<7@Z-S6}^EPeTfiC7*zu3Zp3h8>N~;J%i=7Q{##1)QL9y{7xr zcMEM~eRjNClp9q9p~f3wsh28DOC!T|xwa10$40X^iu6qn9els^T2B~Q>K}f!iJ=Sy zrUyWmLr$y69(@1Yh|2blQE*K3crKB6T1v~VUtdXDPD}Y zCd&t}=gQMI>iUoHJP_R-7`8rV`x0{UyWS5W`(?#!J`1HLw@Om9(2qA?)JKM(|L})I z`aRp44v%c82+w3~)(i9(79)q$2N-;fx5b6D1!xQtK-lvO3Oa58@z|;0xZ$Ne=iX?K ziyyI#a1PZPDmqX2OITY8%ds;$16^rV>KTFSK_2YQ35`8IZgs7< z3`s6Hq+{jMsgTz7b1#a|o8sCW7jV z8m;HLFHP!7I}7*3kAtlCHLbGeOR3S#Rye2=oDo?GhaO&C8}zbzlpO_g?qi0<9e*YV z?+|y*Zyf`Pu4!n7H2k{vf`p|F46o87`=`o0O?)25|L)7*T_EX=ByK;SM;6PKj-xFf z_i(??5~R;jWTTHq>^}$mKsGizqRH~61q?xgel~g#SHU^uKS-_T)`$qPH^Jw)Kh5gj zVr*@0uBTrR`IlMn2MkJL1W>8(K#snA7yMzTwg?4mu@a2IP12NIz&O{PIfRAKd5mpc zoMQgKnSwPjLyI2p#TdnU(^g4yNZ}X{(Un1nHhG)L%qy=5(9|RFLMdfu!-(3XlGM`=ls-wooOk z%m4$Z?eSljb9ORXERB<#S|P9{ld96J??)E88|c|!AIv3=50QU~!5Dlo{~L27C@rB9 z&JNiTActhOcPdfN82&Ve={w^pqc{vt#^MCM-xGllBc!@~GeXzhz|03QxewP`%Fhb&WMPpT>091UJ-RHnB(1}6 z4>XV_{tVw)?y*)Oloq}5$w-pgMG76_oJM zA-^!pcerAJmxOcF6~HV}1K*jX zH;9?IYKZY@8Y0}zEV|iEaAKgJPA4niKlcg|ob{GWiDG0ot;lS7Aft~#-i}? zG{6rz*@t(xw8)0Q=9%}J>WU`iB1lOJnR(d}x-2)+$Ha2BmVUl>*Ta1cDNzZShDuSm z;R+M`)uX2}`bIWfDxw&U@(s;^Zw~%Xnd0Mi$kefjG16M(Ze1>}h^9f+neJ6$TeIFN zIh=DfYd_g7ghx^)N2kU`CvrUpr-c)^9fqOyK1aXSP{*8nUeC+WdSuGU(9%#OKys)? zij|4MDQ$0_av(hgRIW<1RC9sqol+CJUlsiubf!J^ z+>PnE%5I07&mw|3_7~XKhgjo0C~IQNgF3(A0!oDLYwSh;GciY~aOcI%=>X5p2eGL< z+rLsJ&#{ifFcWP0X~lYE2Ua`ddMEJ#BiCD*`Rh{LPM-t7Gd772Rsz6({H50qjWBpZ zFg$BH`oWLTVzt4{DR^Z&E5oG$+HFm75 za4g9XuCSWOSD6CSVn(>vi6p3jP7nrR zx@r@5ZFug=vK7E?^uheEK}Ksfw{k7^RqDy4glH==Q`kL;+~H*~fOifa+ZtyFZ38_3 zYfgM;f4}}e_->xkiKopR3l70mag`~Qk0Fk3Cc)F(bguy8i3q3hmz4h}WJ=S`+|Dd|~;!O9)W%eO1jDi#u}V z_m#tmMVce$U5e38l75+MnHCj;Y2>+#DJ~`IC^{cy|BN-LAwo7$rKa1qR>$FFCmFqF z%7zOH`w4ckq|GdpnFX(`N@Cj`E6E>I<%YOnQy1}@rcy!+nO^EerkkXJ=Rp_;T$1pF zGpw_nN2h5u^^kSnH_PGnic!jI@hwP¨resT1j)Y4|D)m{)bmMjTPGJXw|vv&(wS^0si~% z;sqikldZ*>GBQ7S-JTn;YJF6=_o)t3 z67Z420|?pLqD{5x+7UsOl+5iU1QyS)A76lrEhrvY$Qt@f1IrQIhP=XH`=RCY>!%-| zta}jTYK3mn^g*el3;Iq)N8kI`>le6zuJ<_(@1e4rspZ5kBGGuL#js>a43IVf1f)jy zNXn|Ze)iUx$Wn$nWd%8rm-H4iBE<(jn`pBZeF36VkAhGnFjEtt<4FvcP4D;1uem@@7{KI=%% zD>+@87@oh4v}K%e9fBWK2k(JK$An+PWi{ZPUPm+vPK@Pt&vVP@*)ou`~rghnz{FTxEM2wnjoL3m!}^C(k9i#bJ?(QDmH&*ck$ZXv#V%P*5nW z2&SDEh`9t45fJjW2;bmSpE1Lm$=Qqhprd&kzf$7sFG=S0<4K+8wqQqB{0k5~vlKkL zQ2OwbCqI8u5|SP#`)3S+18xE3yd)fl_eTX>;FpmU;bfiZzf%8tsMza_7gpO!rOIK4 z=VsHexO;rcA~dPHbI>uM)$Gkg*q%bnIYsQ+h#%9k?7qOM8|JObVkrx;zgR8SSqSa* zL$6SOhTwCW8w_0@>9}x;Mg(=Uztf$GI)yEYKwhOGL_*_P`j78MJs%I~Ib7y#1rpA7 zb}1plvK_&UmN$J^0crPwwUs0?_Z5`S@__(|KUO}-`s;}eOFtB#?%~kznhBRin$nyt z;$6_=4?`kP@pXi63=Ea+MYFnjk6%vUWQKJ{c2Ajxp3~jJ7`Ei;ik$dT8 zrs<#A&JEEyUEdN1h7QSh;#j4PK&uR6vACVaH@$=MwI>6gsT~79-UXv9N=uT%nrtFU z#6zTLMnP4plxTav3bW6*D;V?s#AbA4Jw7s9*IB{8A1JKc*WF+WI7W{wEcl~V!5%1q zWgs;Gng*7uo%DT#C_8yb8wHbUwdZIWtXj!t=1vp7S`5|=jh`$#nj7z8=`u^zSJUU) z^5|{pr|{}V z4Fat?gO=3>sl|6WS7JD>d6k4LkgK~ffYb317JPU%dUl5tVXHl>EI?&gW%D3`iEWNE zzj#O}7^BB4$^=eHM@o-7&uqEq9#bLEOQ(B0$Aj6=4FiUMT!TKX9`x@lr zWB&;>yaVnk+CS6-ya_;xpr?ZXFh;mi?fREPms_f@fZjNvoaOHBkKq;(PPE3LK89Wc zDi!6GR-NRf^T0BWJ#@}L*{_&v^6z9?_-KW$)JG&Y7!{DwKaLWmha9ue#y^ebY}Il~ zb{IE)N~(s-=qr!zia=g@v#fxINDZa>hC#b!h5;1gsac~OqRg&y`H~}?<^)N{sG(5?iLDTzva5QyGNtCudj8mrHzPAcM_Gen4&FBj45TqEr;hDg`62AUQ!{S>}AYuv~2!@V9KOXK7q_edVq z4v)u0N}pueAFHKk=YkD{PD8fD*58@w#|EbEaQb)?d_ceVTrD-DUVOdwe>w-mU6Zju ze>P*H9Glv;%?t93yrs99<2rhdgjOzU`DW~|;Az%*?_)!Qb9)iGrp<&z?6$Y~*TD#k zIl7;DpB4FdMd9U1{}*36fBE}nVuKHt44SaF{z?A^sx0c6{q}@Rvv{Jg12_nmr`BaSsj!jF3MVmeo*LE3Y@cc|VLE z2OqcQY2LIo<+pj}$2?I9;Gt?BUPFPWQo_M2syT6~GmE({Zn2m=|7RRcb1}f%!x<$(l0U_M+7=>cd zI+{dT9P2nq5Y6;*HA z&P$`++MzhW8>X~b|J}Q+1fppVXuU19ypJ3G1BNS_6y5&rMNs-J_}?nzwx{&Rl^C3} z*nB8uT(Gz;hh{V6j^3cA>5|L$Jn`h|?*yU!^%+m>bTPrMtZ-B7t0eQK=hc;F<{mY? z484Q3CYn`pNj{V(rUh_k4Ba}*^`-?f2(+sD+@nOY6j z-w%#B&-9j}om0pp!h_17QZhZd$WA0_>8t2syNF15eR02d;@BpoXWGxclSslG?k0Ay zUW|Ba>B*xR|4?YUJzWs>>pLt;9NU{VoRk)79-`{84S!DF9X#VZshjXQsYCUsvb)L2HAYg;JQm`XVxE1GbK<*O26zryX6dv!A5^nJ`fV=dIfQ{E8{O=bBz-L9G= zKQcv3mlaKQUMFG%YS^ZEfnF9udD-^QUw>bF)M_c?A{R_3`-LjW8fdk!WcVKdJwU?0 z>OsbmToA7`3uTC^e}qgGgOe=_i%n=+tYSrFV6z)HxVyB5q-mWRS}@Au3hP;yI4VkQ zR#t!^?uB;-jd~%EMG2Z-pQf}pLw0Z;r4!3in2EU)(GplDR?>6v`I0j42x*zWdMmS) zx8?Gk4wj@UBunwB@f5xGkxm%e?i%!jJ2M2VJ;Tf5+lm9 zmdXVtX?Gc-u#hwP$jh8N(({%J^1Sy*rW0|$t6uF#{5oXOoPIf`GZ6m=uIq9UT2`X< zBS`PUF)Qm)>`!DmQ9Q;^!}^KHz84&)LCk0e5cp5hj z>E0>cgz(lfe|ObY-IYTzQG*qFqUMqs*(LD`Y=hf+OGx(k;Jj^6Tg+h|#&ViXPR)X@ zr%{fj5Sgnz>+u}*#GONSaAC;>Zfq!)ULr^pOC-;_Y1q^CgXVw4q-#azSUf6QZ^$JV zgL0EGn=z~3n`W-D25wm!X9cM=T-wt+#|o;B0j%Pzf9k8(ulzHUax+<$vKZT`_r#(m zT-W!c_wuq9*YfBdrj$$ffDwsm<*X3%hPsWwnxw!oJQm!L*=~XAs7WHcJ73r9G za&~ZW(rG=S`QHyZ_GnRVh=z>sV&&=c~U z$#iLnf6HJ}X3xN+erBFbPh>VbD}o~Cd3lIXpHNzQl2C4y2gEv7Y$x^1)R!nN(SR6- zcpZ1&;ysu-ml47;$({aG zf2=>TD{+GdFeO8h^ZuI_g>`3I7?z!Bp|3i#GGBD2g;BeXGLvjIofxO2z+bLp(#i#` zjpULSF8REA_IXJbh{AKf7Yd#^!~)OEM@@gDQDAPPqf%#zJmB3VII@= zyl_LEChDpzjd{pKjyY*1wmk7%`BE6gOOu^<_qkkD6DFdvar+!+6g6X>ur~?TWcw{$+eh^mqAlfudSxMBo~Juv2P?Z zrRIH})SS(dX85ebs-e_i$>$N&f5@S8hLUXkC$6t$*-|_m1eaG$o;rRYq2zBAknQ;- z6CN*Go$RBe?508UK~1N zRdMA_N}V#_XauZa2dX>9M)f@T{OXcNesQfIefA@{JQ{EEg zZ?#mMwMDOUa2dK%T4wGVX_3v)R+Jc`N+tD!PqJ7o7n@c^iw|Lz^ob*uoWRLSZIPkK zCkv?aE={Ja0~nwM3$9p!wLEre$x<}ZfkpG_0s+H>$hES?>XgZ@24W)MsUUe{L*{^8%uW#u6-fFbQ$x6U)l?4UGU_Czn9P?FmYYYdouY z!GsqXGx!yb%veDCdMNSnOmdL%5Z}~-FycK&ilYj5xFj@Jx!ih2xtK_pBZ|?*`BsK@ zne0$5hx+8`Pbh}@`eYxLqHu$dj+f>0Du&Wz#LRbOH>Bt$Gl%4Ce_`d>d?l(yQKFH2 z+V7+wD5W)*DT<^>d}PeePYMXWf&P)oRi*OsqnOwe(k_<9g7A#~X$GGL`RMx>gQL?r zy=3&FX_Y*U(TxI4%!f%g7KPieBvdhE(zz)qb;EsH6m9fLX@X~-ln&d%Wv3}Q49v8C z*ltq&B(5509(}8le+v$Xh(DnRZtM8i^Q!F$Um4NZ(_t{n`yiMXx*{PjwSiF!2*hI&Z(q%;c;L}=bLbPW0#V--nr zsgV_fl&XPzw7R0|80s9o9eu@vQb96N10h-KxAef_8?;`_e_qBX`5e|!pD!@04*Q|} zGXz@>vS7;&a|pHUengbz#Ycou0+t1i!tlpN6wYX-$? zU`joWP=Vhu<64IZ$+;&rTWm8PG9(<0bmjBYfVgFP?(vFscQiDjy^4c2yf!Uk5#Nz~Imj zh7YRu>^kN6|B)v?MGVdOgI4)u&_h*$I>#3`^e*6huVwDto>d^Ab(S8#IsEu}+>d;e z@=X>Dqb;te1I7GOnAczt#xKqTF-MPQ_=#O&J}n01e`I9G)D#o~=La22ADm%=L|lo@ zIz@Em>=&3e3feRHbM$8+H_+LPJD5;exac3&yJv-pF6yVQrd%shM|Uggg(r}}SVp$M zg&Z5k%@(~61KhiSVzk{XqP3&wwFqj^#~x7v%jqIZ7W6}ve7ApAa53fCcJqY45P1;R zK=qVuf9lts_=E%%upe6mCLPdx0^GwEjD_eB*dQiz$`*0d7l60|lSI+E;JAhj1u%#P zttQgc@L+?pI-wpN*Z}nQNYOv503-)mn9q($!jJL#i-b*$d#QxwdKf8GMrI?Te{KyQ9FVGNgw>wPK{;_`)Q$QHYTftMO3_U=$TWx9x~v4RBS!+%AL@l1RDn1q*K6 z{qj%D-8$z7=#+}f3JM9P%E+cxYdKC5Ws2!3%c@>nhd;X6DV+g8}+H13&F+l4BycnAvRh+9F;M-Y@R z4*`p|JM-VYD@McaPu3P7n(aRS?(W1s5JwOPnxWDYmxSySz_S2&e2zztui;e4VdLYR zj==Ir!lvtWW6A6ULfO%=%GF`We-?5KiW-e8ZU+XS%7|D$9`rN9WIK%QFzEb=xrj}9 zz~ClD!$ye4k2ZCXPJ|V$qctdtYq^@;=f|Na<;$}dXrZi{jZv z-RfUyB+D&&2d5@M$}k2v+h`0<_B;@1jvzDY4n<7Wb|rwlZOkt&_Bf)90|&j_{{kyO zvKx^6i8Z&cDxvC;coK)K$NMXAs5 zbQeTNL<*5+BLf@dK|%mxN_ox2i;KnsA5;hjR&h$pa~bd2D+$g9bCI@ts@AYx}V zp;23bcoXXdX(sQzveoZQlIqy>QA^C|fQe8hA!>(pY*NBbe4ywkiYld!MUqng(soBx z^@-BQa^@RUe=)-`Fgi_}LsOjmkYr4c=o-zi@E>?k0PaWUiI~y3Lh|7aNiNLzNiI1U zi&%N#lJQ{cm}$UTfU!;=MvDFA0RzdR36<_i1q_=FlcSuD5%YnB8e)_CE49V)!VQp} zA#0iUkk;``L>ERN`qoB{koI9iM>G5iajGnaQ(P}@e;W>eU|C2<7oemtcM+6`B{xuV z+MyOo9dM*QP*K$kr@{-JA*&@Ep#kmi#b-_f*S!ni{cSrt6Tjz7hW9c5D8fA(BuL;7JWN31XE`@bs|yE2aTl+G^D8y ziGrn}e^K1GBDi44M;5&?1ecNnp@k|2LwpFkSEMv*tIV>mDIZuZi>D^IcX9wrIHYPx zTVcSVNX`xYNisY>xKuqZ8Xlzb9#i3&;O08|^LtQHhgjtZeb&;xV8J=>s#cQ`r{Wr5 z!m0DSB-n8XpqAH=Wc1@ey#Rdv@q~5rv_`Qaf9*1XWMs6k!CDHsgpQCD!aQYA*UYDI zt=^|_sou3F@mU(Ju%TH6Tsuu@4HauJabBPiSHvnNWqj4q8o%*lGxt$&vzVu1@8Dz0 znS$LmrRoQXQ>B2es1Xo}B+(L!ZorO6##dWG`dn}3S%m*`gOLk^lBC2W3q=$+YNbq8 ze+fMer>BPrK4LKzThfi5f^u8Z!;F|W5&L`BaNpEICS2aKoV8ZK*5H;^v7DII zz_SxM0xH%~zEEt~Y#H)lWR$6kg0PA0f2pM|q{HKQiCgv5k&x6rI$@dyy;C&GAKNRC)& zVxC8}%q-^o3Lrhw;WA)3(!|y{90038*XkWLD;v9ah()(WJ8ftTS5hpzeC?Soer zXp2Ev^Nn0$rAr}OrTnE;NwWH=6p9l8*5ybS9?LSKOf&z#i9M-|W0Fl-XsMP*M7VGk zj~ixknZp0h#CMBXE=*AWtCLJRC*&itkC-~-v@^RnnctTU~bb-P1i+a z0OTZLLe&@)-EjeEIa=;bvK75qxCE4R;4e&6Exxn^>zs@OQ068cJCQh0Cs(RmNXEx& zOBl)tr0iD6%67~(e3~;opZKbWT{Om^16Y0^)DY*Of*=affFMN8eQ*x4f8LXC=APd< z2b`whvGyk4fV%?R2Sc1~DZ?5O_iYQrMxvh#5uP;d6{Fa z$BO)G^3BwjAin`|e2NqA)1|L-Ux59m*FEabEjbGL%G^p(?Cwu#)q4H8=e<5DACFE^ zwE~FecV}uvu@YZsuV0(*^`9Q~CH2SMFTMWP;^7}pk^7okM}FP7f7wAqSXm2@E+=F> ziCqbt46uAmr70Zdb` zK^y!#05RL*j*^?}D&13BQ=e0N%KuzN^U&DYyh2=$oz2a;&1C)zK!h5vqS&#Kk?dV^ z>ekuhIIK9JH=2TLe?~1Hg&pWY$vxzSwdS7V&aGO_%xviyhq;Ld-Z$D{U`Fw`Z`JIh znkGqn|GjiyMc};Xg6~WL$mW5Ji||x9R3YKlnVPLl^=nVRB<*n2%c@EGgh{Cgs7)^! z7dbyZ{&XCy6E)B(@CiEd1fB2%?v$eS?*2$HO+34NOK12zf3EJ%Ov6V6NWFkUMIpI& z@Ugxle$OlTnO`Uh(+=P8zTBbvV{Ph}4ZIzn7#t7+3r3GRL7ptncDntDHl#7 zL_Brs9e8S*f2b;Y^-M!+++g;I&31EJhW}{94@_(oO4c&iJU&*{He&5)o)maQ2Jjo( z*xn>&7rF=uRCgpm3nn8~aiUX1RV7?qBsS21>Y=YNWuxKYk)0M&3$SS0eQ8T3*qWwt zL|%2U@#bCg2Hr^A$29t z`Op(s90?H29HBS%;c4=aSRDJflo=$wAIM7Xr~zpf4!!Ton;P`_uJ* z1!n)DLe?et{Q(RzTuq7Wre=VAF0Y3O=%`FY_BWcqnE}_IttRRVP=8i9mMGA_JcN}6 z)XFRKf2l0t{L}rCU3q`%SAN1NZ=EfSA&Urh7vwyJ;7UH%_D5B0pF)+>>h8d0Ceg@p zm0(4tfK-jo{?sNfI3YgQ8#=LIC-Z6;n`rhf=zSvNWegHH0DD;*xYs}f86I!(lX0z(A;B}akb4pkcQ*h+2F}+ zd{YI!CUDvs6dJ1a6EON$3=3igwAnH;K;#Fjg4_@v_-_Xrx(aUh{>>NrM${2!*C0Dk z-Jgby##NzdZI=zzj%uY6J(l=wUoFeQ%7z%&?Y~R)vyEyP4mEp ze^8E!8d`CskquXf#cT{Za0n+A!#12#e!w|T@3|G59N0@ z7W^c?OY*zM-<2o)UGu9xdN}(N?t@J%A#WZxvE;FqJoY<1diVot_vqn|k3-3iKWa%h zX7(R)*5kD`FZuDIlstLzqnAAS#}6!7ipQ+#F{@9!4Kl1C3if5~Gc zYg4~To9nk2z_W-kHza@5BOO!v08YY>@3)z3*+T$uXu!G@%lCMSTaBG#FZ!Uf0h12U+KzY zJkQ#S>|R@8?bcS-^wrKXvnt(uZS{vA!khnruV-y_4bQ%|rdL^8TVXfoyyaWpKv&yq z+8c=dE$eDoFB|Gk|2blJ!gV@twLAU$ngugLbO#W9D}5_-r+^{K7)^gkI*Q1k%Hvh^ zDF7_3aCVI?oc_P7BmCJ~B6rEPFB zu!0L$zGp8!i}>~8-|KMKrixQ9w7!HXywd-0O19+^pB>fX(5k_TzkHn(3MbTB!if9cp*Jd4frpCfM( zyAC({ylQjPpr5z!@>Oxm1h^guY@nWjCxab<6>M9yJZiC8!m3v7kR&d0y9V1zlW*3C zs|#r&zyT-50de8wT(|C#oD>o;1=3l9M&70|5@Skv$xhPVLcj-qlUvBtW{U+#)N+Ju zM<26H$pLz|JU8O_e=9DO87~* z<+rP^wFiH&f0aJZ?Y@}$eIIH|@X}F7+PDwZ$keuFjnhL5p(?yRD{r_bS$Q`CD=$(d zR$eJFM&5W5=zH#d*$$^6#nUcd-EbNj*Z6>>zCu2d>=F{8um_L+uQ~QaXtPM9GEC?_ z^epoJ_AskKL$v#FZ?E6!j^D#9819lD0Lti;6J>Vle`hd8<5+&aZ*IPE#ohM$W8`#t zy1Ul}Z*#$4_L>^lcW=CuFjPEM6$W4v-SIoUwrHUP6?koGDZOGioJWqsV_Fa8`OM)0 zDQWb-a2TFWepmR*hlL(^ZACQmVmFlL{@m+ge?=$Bg>EMm2FHQ zg}wl0f6xk=)DW@)Pg9bb7h=j#+&@>(|sW*9bCKo*HBo)I@(+D(R^tP2{s&Qsdfa+`d z{@pK!ov*b&v@&=3ZG4^DLCya!ZSTI6Xk{v#z76MvWR*UnoSB z&0&2KAV*g{FzrF4ULRzE4Bh zD=4NoRB8+1rH5on+H-qO}gV(PH zuh$2!xA@sw8@$;ZY?8kDYVdA#@W-pc_H%$EIv;*|(Lgk9!G8VMffsqy(fMiIZ)#?UKD4sBf{ZSf7*S5O21}8e-~bx zkizoJoL%%z$}cNEj__$SS>Hy6_a0Vrnt~Y3qI!xwKcLLGg*8?HnD)*`Lp^P(cO5on z?CQ1txb>%}4f{*~)%&cOw>NJ5o^Mrh?D-*BSOu+E0Sgrv7lvvJIHoWO$tQoDVAb~h zg*dcLSj7A8+#VBq+hzH5Xk^PBf6yEY!IlSIfLjhS0~S-WHx%MNmmcbbhg66=jVN-W z?g{k^Yd=Pd1419XL{pIHtrQ9EACJD?MB>-XKx3f?Q1Ki-vp>%&5?Ne!7CL{i8hPs2 zgSPtGOpC5d&dZ5e-pKXAfh0n z2sORB{(+j3?y@@bdDeoxw%g6W)@&B5v$yoTgE?8~xz%lfn_V120X}(kcyl-~8t3yt zA;ACG@P|Yy9F6Pv>o?lwAp3!OspN!8V%$O>sxU}v239Na<^0Xl#WOTnVqB4aKhl)d zvN^WBLL9^Kdxy1Q+h*8Se^oKy#})18E#a9tue+Ox%C(?%M7z$4M zLx+U$UWg~z`lB}CSO0zX0XrbRz>8SDGTB#5@cxRl+haV%9>s}7$n@eY}&~Oqc|}UYR53((XPlb=v-*; zBviVWlA9dLX~_-;e_M|-%_vy*7Bo=D?%j5zX%RcE*S+kkAU9-Q>NT+BAR@%bdQ*R6 zp|^RLEO9TpQn?tF?bjTH&!RQpW(c51;5!Zag%jz%32K4QTF1olPJ=0LG>?}}L3GLM z=Mn#|-v8xM;q|YN!z-o#9-iN^qlz!j+bEI;UIL6&k5LBMZU?1#dUo?$Dg1LpN6+qo%xRPSyOfA`HxWKD;1llgQy6JA*}Pl!fF z-q39?+n~L@z!yBeVmC6AiQA;@JM7xaT+MFGNoDm#{}zUa&-1FLXyQ}v`#W=ZV>11w z`EYeFz3+2A5?qwOcx`%nRDQ+vu5zzdo1TY`skQ#d4caH3JoSzfp3GW2;M(kyoMwSj zwqM1Qe}PmBMeEn|3&3cm$a)z4^mJgwTfHrU8C<_bDFyVgRX?t3cyuipmK%}N)M9vU z2)d+(LG8TSTWB4XN#Rs}GBLpOik?tAyLmwt(-Q@Mp$Hh@aqDtCMM2M^^3~aR+o6#VQT3BCRi-m93mr*L8UxnKfl ze+<1>_fkojxU}zivA%39ss~qqr$PmV0_K@%r1zE1i2J6woiE!q63<%onf+VkVMk2i z;UWjmRAk;~p1&(w_pP%1&1d`DUD@<$%6}OmyowS4&xMU;@9*y3#IWwS2}#&Z$T=>0 z*KxzoEGi`NM_(xCR@p1bZk;Xjc5nWhf80pP77FeDx?WTB$SK+ShcF1Z7n?mpb*?tRaAyFr3? z2Nxtb$y==b)8pX1DV*wjpJ&3+mO6dQOAmQya}2n+_zEizeB5;1S$UgWdD9&mlzn}{4Fog(K>97ABQ~?V z70G~pi!#y(7~ohL&(8WrbH5=}f07_*9Lwy^iic;sxE{te`|cLeP%4Wpus8zSp%WM# z{3DbjMVRyrQ?_qWVL^gGU({f47zWp~a+y^53!D zbM~Hu;MohE`+Za(0O!uX%N1nrW1e<*EAF8(^f;Kn?Bv~>7t0%)&);q=@2|eld9|@@ z-blH6aEU`^nIkC8|9};R+sJfCV&7&c*xFCoxKYC6sKuw#u-ON1<{LiJ{SX=Ca2CP((0t)|G`%9p<0(-SRv-xuQ+0vVpazI+RygRVq>&`@+Ta2kp z=L_a9rZX_kL*KVZgUwOmfP)~4&*c#?u&_H*=`3JwG^7gzf1@>$cv=svgUzj_jV%_1 zEd{PZ2W4|S>OhW1SeTo-6xkw4(6{H9mMlZb@jk~>iJJ^=V|itHdwtYlaK6)BuQvO` zeiuD;=oaXB(lgK%BX`BI>-|nTQXJ55ny}@-PI4xQFA2Jl(24)QP%Y*XFjSe{C2Jnh z$V<;K??!UQe}1hutr`Bt;=R~F-RHqxw(4BKku<_cu`23T#h6RkC|U}&#$o|hqeP(_ zE(#w*(gS1Bms$u^j7QEY(qFOfi#PDadtsea?nGk$9a40s6PJ*oRrjs4n&s?iI_v?r zR=3uc{krzIk8hl3x69C9|IKG_IFH&hVFCQOL{X}uBVsHupv@g@E=Rm$8{b4%vawJpN?0PttW}U?&-kNf zX6T$@e+G)q>VJ&9b=0jVY~{_;o8%;;$PML+W_5twL^}{`&Nj$NmxAXztfPXH4jB`9 zXHeceGO_jRA#>`PH!OL>yn~e(D6vMa!CGW0h72u8&O6wPr(?hqz4cCY*e|uRj>xr~ z|GQb+F3Mq7#StxU)ShlcQS4XazHag~JzsGre;8uHk_j!!2=Y1>PC$iA3S&^}cT3)C z9ocPCy*CTq%;xFA?Sd1H%{Y6!ax;o}vkR!sxCn;G52wX*8ZPH&H6z~>G4gip>Gq7B z^7LXjxNecCg^bcl-~2Y0R}7MVGsrB5iLh?#R@pdZ<^X%HZ%{JCmbcB8YLW^=*AI_ z_I$`+v3#020^GU-0p88cn}xob7@$-MM`_}@X%C@0@r~@cQ{Y_Rh3n?%ZNPg7Zq=}o zL4)_vE&RRY;W7LmBrx_g?LUNHNnapfe;R{%l#UZVwe-9KH8s2!2C)rH4P6e)8D7-S2#_f9LyfTgK+` zNp+T3DDHIee_V4s5QYcxNZmjIp8_Tba z=;2;WUB&z1@2wtoJf&xN!#XU@f4o`VN_YF^fqapVam^9CzU1@9OP!-l5>2F>1i(X$ zvxYt|SraAPLxCIQ2S;ImN=nhym%mQ>UXqbn$z2ai2l^j-c>W^NhWf>&y1M4+VCxYM za8Biz&^f}<^ST zGs3@%(-J#bsX75m7DN%MrlmhR;&||CG@{N+M~a-);D;VW90~WMk#yOs!^q0<>D zdFfQKEL=tQ97vVY>9xWkfAHgg83{<9yo(Au_&ZQ=I`VxTB1H7+tFc$qzHpvNh@n!@ zIpo0x9^4@ig$M&R^GJjWLZfF-vAFM1uVlAX^F^gc`Iwa#cTCSO<<8xRHGUnlNe_-@ zD>;I$gexSxCEEN13XKQN_Bk=xFNw(ImZOa3fRR5Qor%2sB9&YOf3u!`DOb{+4T4fS zMEo@M40PSa0z*hoYnHeVmb8#%_lSR{&yYj{nVbiD4ggbzzEjBu=;9ZbWhGNMD#>k) z{!pE}@!?R*Zk;^fS4n?d*v^t-M>TCsUK1)(i`r_6+rl&`;1mB?$=#A^L9x)~NJDn# zT7{y9^~<$2pYIk6f2?P^PW+0vlXnpIk-XAs71`2|>6cc6LP;@^oCIWNR93UM4g12E zL)~fkd(BiG%I#F?P;S5Ydq6UKvV@aQHfx(99&*{iHH^!P-%5O99rlz?`|M)NcVQW! zyG&ur-1vlR6SzSI)rK|UU01WE&Hul5H_V=(26wG0xwc$We}X^R9I;VSYnJ`O@`nZk zb#5wQB?ly6fCs{3&|VB`H9HG;=!So{sCMG!VQ`GC?15N>!q|f=5(h+LxL-|wx1aITmG;!Fok#4*^~ zp-^A>5EgVve?o?Z%7wRF<+j!f7ii?L=_UDhqY*uA=m}ZTin$!NyugXMAmiADVI_XY zC<*j$YIr%jpi;P^l6|GF*xt2FFhM@BJ2npdq)y0hu!l`t@w0!Ts~HxNyvcd5XwgU@9H@w7QN!+o}gWP zc95EBQM!j24i$?i?%qN$@92-&bjCVXGI}~Td^^_hZQ^e>>7-slFM^BL=ts5LjNgLe z)Zh*Sbj&tYJ~MZXYSN2UQ_a^z*q=BXQ7c+J7w-2J^q>9{Nr4&1enSkdYIm=o_#mKQ zc?9#Lf88?tv<0+gDizrXBy3dGeBm9Z6u3@BCdD{HIatn!>@_2R-3EX57fm$C+f4F~ z$s{&|K~1OdVKDavf;Z^UC_R7m72Wauh4gZ);$V$gs2J2R*oEvmvfoaS4mZjj6d*S1{`z) z#kZ2HR&?sW5JYF2o623JLIV%+L$Musw;0w|e%B&yswA~+5ua~K@@L^H8UGAW)LqX$Bq4_JaskZwaA|e78bL8z{BtZM-e^!rX)Y{}+dF7gKR6eA0wVNJ6Yj?0I@+^3r75XX%M-o6=7msQ%GL0U+;Hb~8UhQ0$)8Mj z&Yr%Ic;w#LV3EBGU<&MxaXZN8{Ap43ViCY!RPz+BvdA{kjkXQ|0-wG&)>MK^e`q(! zG)5{nhG9dYMxoH?yo@geQV^3^UX{j}bYTtmeH|+&=?h|pv1&|CF#G+g0{f|`88v$Q z9viHxl?boKM!!lk=xCdGP^8NJ$n}g<;qNwUl()7taTg17wVu3xM zSAbInm{;}j=Gp&EJ89K*Rqo|zDM|U|m*@AUr@<%Fa?FtEjLgrmA|R`cF8I8)l~B|ROqoR%e|g#%Jh-$S zV<+ktjtfwoBHs(-MQLa_*iM$o#u^nk=L+&oTo>Eol9*FkNO$+jc*j4`EyrX2UcW5Y zw>$O;Rfp)G&|U3`ssBTreJb~|u-fj*k)H{mp0OG<9Avp(nZ{}Vz|ttu1m_I1S{dUP zw+nym8fz9Xl83~_H?dUxTAk`NL~34+s{{J(U(aP7usISkJi<~(VE_rui@!* zy556z6Lt9A-K(D#W*4jfoUBdV&u0GT!w2OGiF=BvT$BMgd_KCdpf4W&bI<;ks^tno zVx7v;P%Z8D_t0ofSfaEyCNlw#$L4-8Hur;KZWpaEofu+I$(87Ie}2Un!x_IxbW+hu z%^UqWRaSM;4aXft=~Vn+XvN?+LHv=KV52i+anIG~JB@wo)Jr(_n?hROR?XJAmy#5m z20t1vGJGU;XaL{pln&SI=9U5U@_y49IV~l;hSx){=d7s6IIa*k=wsr0iKGm!@WyJ{qO1NX{ngp zFADXCVwM-p06-Okr+a0yIa$;_{7E*S1AGxe`FZE(8u#E3)b?kde@2I@pxSeOp}39T)?m6*@4leQe-E@bb&m+^-^%lZ?^9S& zeJjs*su=xVfcWH{3PSgXS}hclbF&{UcmSpsXUP|xHry%9IkYs|GcSc23ibj+iYNVq zb_iV9KBsz-e^XlA#EFx3r}s1*NMA~WP>k}PWv*bN%k#AvuHX0^;#WLJWTDr&6n`y9 z$=@%D5FbZ8=fwH7sE>S)d<_QMUoUGnI6;KZ+^wE3X9~VsZ7gSp)eHWdomEfyk>KI1 zdZMRZb(>pk>*%QZemOhtR=4=;jAg37gG@WED$(^te*;xl-LIpCs++tKvZabL-{)>0 zAAZ#%G$@e{RG0a^s8=tSVYYn6kh$HgzS8sgXM`TOB3jNIlbjJ9R$rpbfUKtaFMdzX zs?V0WN1U7yU*DZ}6V)X?v=w^-zDaeTmuvjbcB+3Wqdh9(dwZgKAEm--|1#5Vsy6jY zn1J>rf2R{&y5Lu#kgKOWFtQVVWP4v-xinV4AJhBG?DC+~>{qugvnv$WtG{1n5P8+L z%j~+&@9O340%u97zLG7i5@AY2^vs+n&#YWL=E3znQfY)q!PgkCiO<&&NV z-I$y`&L$?Yl3&2JVN88usqKCt@u?*q7ZWEe@z0BHG5LWd|Mcb2-$>58(V-i%?+_&7 z40uqwfH5Zf$BH?+*jSfTi!N6Vn>*{xe?83?KLLjUr8uo&0j)Oyb1c8rR(aAWzt!HW zt=o@$zBPke2^l^k7VPLWq<+G-+|!a5EeW}(F5fsm_0=P1FwF*22D`V2N-4*%m}pps zjh8?<`J6}4oJPLmDvA+BCT1v}E=|)Y!zhDvKjIzCDCougo5SWz?*zP2uH1eUe@J-BvE&I9ws75c6*No}8s<%Ivm9KKk&ffxc+CE4snyNLGbgRe$Z{Wt*cOY}Grza0i zFA4)Hb!`|oIbm9cyMYRbC23M^e@j}x=UBJZ%f>78p;fuHz)$R^sLCEc-&##0{r>nw z=}t4FHeoQFc=|D%ehn=h?dphbz&tmHh`bS9BMXXruToJ#4Nwc4QJJ5Ki^5F!KAd;E zeT}Hr;0Sl4#dY!L+D_r?-mUa7kl_yv%ksLc_Lv7mu>vYSR3g-=L(ZzUmWQ-Ee>5VL1;ONN#E2YQPpu2FMbMxYRssgf+ENRBvpDEyJvVi} zS4aKa|Lx?YiWmA3+qGEI)EFJ-ro*XdJRxh1GdC|60`}~&GDb(C`fe^}@}AilTVa>k z=0H`+sap?i6Wi#rMGM=*i8p#2tOZ1*!GX$%_7fJQRtq@iz=Z?^9kuo#|rS6IeMTR6O zl$Pk&s8Ig2=D1JlQd}sOOCd1STCu|x7|J>{8W_rtMH~5p7Qt|6s3huI3ISGzbEBlL z^t;ZtQkf!@Ly^0AC}_k!=Cy2y)+ylZ4n#Y3SQ$P9$Y|k$e{rI75r5+ShN|$6gzoSl zU_qH9jfcDZmUkD4P>1=AxEP*Gq_4C2wFHm6&{-OuC@MQ6ZlvNT4erov0uFY_6t|i{ z<3V_smzA}4%>b2M!+M=CGIC)pgz0)Zw-APQ(!{jgMzsdDDPrS<4SG$TYwZQ|@^$N8 z5+5yd*@+)Ae|P3~@mu&Zb<)h8&ERlWZ~U+8!|pDthlKgPMOCf$DL*sL9Tt`Fu`&hu zRxK-wm3DQgf->b9^;96*U^U-5ALIhaNBosQ@KrwgH6kSC#`=O;IUqC`rtuR#gvK7+ z8Lam(9}g+}J_(y^@ep>=1Q$n7zic<`r6YQl9 z|HBc9YJw?(F|+fdav)dN@+N^R;fZ^MF695(;OD{5wZSAI4F{7os=+{g_n{uJE8m@} z4YI-XV7fM#<^Mr-urPS?g#T*;{XYOm{2SB;PoF;3AO6`h{|5LA+Eur9ywX~kfB)7$ z)jiNZe|ymL|D2Fx;W4z^?7x4yXS}!B*8ie+O2Be!p3R42`2Ae7TOk3x=l60leSA z>Vwk(8k!H#0_?iKO?$htH^R9MKEYi<38Z_`ZlFjaea``bz|Y#(C zY;e^-8uYs8gdCm>6i{n$-M!Gib2WH9koxBUU9zUBfyhmByj2=z_V#wA>in9ZnDm4BM=e#;brEzAL9J`$2{}TOo}hDRCI^WZW+tO2^RDK! z_AH2qUZpY?oObO^?L62kHwKq7A|3#8e`af7sxNtlY41BCNwfrcm_R+!Z?;juIY@1z zu~JfDdF|MeEBYB&kP*SPSgb zT%MnN7P+$aQ&r@_V#8h7Q+RbAn{U6EO&*%Z=EDec#4YoXpY!OjeY~&so#WQ&f5kO= zI)$zFpZJQ`#`GSVQ)j86bZz=nKkD37r<@giL0@7?Sx z5=_6lzwZpQhjnwR*sKoA->QuG+3K66jrVC7(<)A?i>VvSg1BeQSMlaH&bLqX70DyM zeQ=>ia$5V#{GEFDv*kB0_BWSae=qMZZSFr?URix(`kx-9DVhBz z9s&RZuc>2#>IrW2o#kK^o8{c-x{4>@F+v+H!z4M$--2SwY0xj?jT-@MTsgT0v5OEqW(To(F^BhLV94 z(~TDz3^I%0NDMW()Bs07xWC6i{~8Bvo>F0M+pn=p9VsCWUjD`@Ww)yMHL2?)*5&xE zrZ5m*tfMv*cF(BsuYE2}nk|4Vt7HrUTS4mSn9G>Kid6@LHcmrmA_0fJCx3UqPP)>* z4;t9E(XX^G=-AL=RJ+--#Tb~SDT8|4m|_8J%Q_m;0VX=VFd$bhnjhpW`rNn})D_H| zSq??{ie1m*(8aAckx0IVKt0hEIw6`#PI{K<3h%y&W)7n%Izq~it6~qKluf%bSKpDR zh2m-pvr}xA!&ZhB=8YKztAFS|+DAh(;>O;U!R!3XAkR5*by|nUfI%RN!|$l8n#3hf zXvx)1LQ9U5gA_Rt1vboiF<49+DRg~Vn5|t-0!X_yewfsVMH;Ld>v_6cYklsGjDS14I=_e zK`ieybOGM0(TD6qr)+P`ovz9(tdw^Zw|Nu}E&7|aSTG;z29LRF!tiiuA^$-q7sVLk zB<;E1fd4`j>NYc>sJ|Lwmo;N{lB$Yu1bEnuLaroByL(*z_{Y-5n`+6)S$vk;Y}GEC zeoz0aB?%ATXHA^&aer?xFhN$V;srsbo9Mf-SurPNQCRL7ew4kEjZ}ajkVZbi#as*d zB8zQ;EXnqHc7HcSbu!)sPqcy(X!3R!Ecp~5$>Uf!2B!KQPz;}kU>K4|AsD`V8-DUt znAZI!5g4>2us0frDf`r&%@1-MW9)~nYLUlEv-P{PG*g2;h>^&i7&fLAh zwQ3q8!%HkbBSz}K*_XA8*!Ov&v<^l@;@;q$w$#&?9(>~SIc zQAYI#KH>M}6kfQ<;30LR#fcUyafnuZOqKQ*CocSl6EYp!v*dqfD&HI@H#JJke84mz zCP3vTqf=eH@9epE%e2(5&AV43*jtQk7~M!?Hn1+FIp(MWnUjAkWSyEyYVyrFd8@B` zuZBo5ZGYT*S|JsL>197Yuh1o6o0{hkIz2;+DGGsoQHOiQi4S6J@wRJaP83H+M6P$C zn2u>q+^6m|brMo*vU$ECDOLHM#+j`Afp!MxiR&Fu#*x270CTs-OV`vED@pPci})VV z`cQ#Tkul`~aabx0!bOPx)CZd(mU4FMoPZ8UM3Zyl@fhs-4syrs1L;MLbmG zg*F(2;Jm6x{xBDx+fmZEK6X9fH7#8@I;d=ACW(h#%-P-pp8xL6-)h1F0$)>Lkz1E#8HchZNwE`0M3z17S?uv%h0wW+Eh<+?TnQ&ageEiS(V>_cr=7O3*hMJyV~Eyr}(`YP^*1o~ruc5?DUs^LzAR;)N` z0fqIOba1Y+@EM8L9bKB5XC2qb5IPiuMSp5eXVAg^nCs~GC z=8l@Y8c2r-2#3Rbj&)bPJK{ibI)Bhkq>4c$o*fh)k)1xnL>zw$VR5=*;f>>vtgJBh zw0lVG7F?AgBV#Qgvnt>rR%Rr&p|5lWBh2>S1_RXX#x$TCc5y?fL?LOa=6kWo`EA6~ za3Q}!!sbv7{>Ak#|LAgteeB?rham$27vF;(gp=W`-hYI<&HV*p z@~V#CVwj;P$!W6-`dsmi2Gi2?DSuHMp5FPNgW4%Gh1V@W!UwK<$g` zlF{|Cf4MfNbK)_|e_P(zsG930fD{nBcJb}kZ6pTc> zW~g+Gj>jIcy$SpqzsEF3@TY0I2Zb`l#gcG$oR}oqQ<)-?m?&B=BQ=^@dm*Lj#<=LJ z@ivHYK$aW_XLUygTGaUt#mUe|dGs8IO0|jk`5FB`;dM8!(7pEhX@55R+J8InLg7SwChwYO2_I<<&WJ;^19Mg>>$1Zzr&Fmi0UEWQD|O=} zH1B+Pu7;qgNG-M(M^Zc8m#yOyJTjd0H<~!OD|fonizgwea(@`Yx;d? zJ1M!cIE^#9D?ATKbD-sXIXGqH>cmO=Q*WAn_IjJ8Di?$Uoov3DQJ=yqEaEce!V8M~-IM>nyTr9lh zxj8R_7oxUUv%_%kN>q;UZAP2eR=c*};@$3*zSsCK8I->?VHf9$@$h*vm#YHjv^?=E z9&zNF>HuY4t$!0HBF=;Jy())u+6N``cr-JSG0Y0;C5u*#e5~gsCIa!h7I?}ojJ0zj zN*}rF@WbIsXZcOw2l2EHzq?8A92Ci|FhH>MCl)-Z1YjeDw5aMR0hDOWt-e)vX7*Hy z!~i<9yH??D&(F>LZH55M6S2Ex<;ZMVWF?MFg30wNxPQ!JtW8{|H6rI1L=V?Euj1o6 z#x&R-%NuZBo2w_Ng_s4oGk3r@a2Rh$oW1&yI{CS3#R8`2 zGRUzODRE^qVSQ(>b1oK~`9ON^yhhj+bAFQUx!4%z2_by*rW4h=SK|O~&6}Z9bsQm6 zQ=Wl~CD*Kt%Vt}VIoR)KFHvV}l=1f3^cf4HkgBC+ zM_eDc z=wOloU0&}L>}>X-r!*St!h#%?`C0q5XWbv7Zs-cN*RdXHmtC)a#SZ1xe7)t>0QYY~ z9e-GT6W+ID;9JRx1J75*LD#K{Rikt#g2>h{0K1XpTyL6940IE1dRteBP>LBa$G&TM z)mZJt%-O20KG}!vNF`&^YOL~oXi*0{s(VwXeiV1oT%HfPSWLBZO+Cq_uj13$6jN(V zG?0zx&Z}L2B4X2-Bheh7-Ino|(kAa29Df%D>Z*1jLj}}%5tf7B!)@{6rT(vlnrQv= zEuBF>p}E$7s`b&!E9ds6ck#Zv<6 z*|=rhZmI}N+dZ&Lw3K;TIBG94ICdc0k#nY9<1vST_8(lZM4Ax$o+GoO7$YR45`O_8 z5se|nch8lv4tfcOCNCE{0Pn&(CMt*(7R&Tqt7?3m!`WT+!>M7Y)uC_3v7}v_uH=hG z?~5op$`JAgIya6ks&dyru3tTe^j5}1&f$RwaNQbU;y2WYUGSBv9Sxe)q%&hdRf{=~ zv#jf(Ei({7o`GekiR9mqbtmJXk$-#Wh4`9nak(bF)&u6K2q)}up~8mE-{Mr?srE}_RST*cEf2}UE59bz{PVI7`-gy?}wxqZj0_ofw*;V8K0itzz z*+oJP+yyvZJaBY;04bJvkqJg-gP7wI?y(1=@h8T4GT>-e7-wDlDy;ad%6|&~hCmt( zHyIjBB1XPt0VBp-)Hq%@2`?+Wp9_bn(|y5t7cX8WwzC1fG`G2N?SE{V+0(g0kGU5UL2n%B7BB%==bFePp-dd-D>kOzW zG0n%Q(ca8ZfW&oTQtR0saR|zSB=fteWH=EpaNaLrrGsg+$ zrh|>jH9P267~ug8N1Nv@es!MsmU;3+WXdl(&K5W%3X4QW8O5Ha7=K$pGJ5m51fV%# z`>Y<$0@oFm)@!PFG?Ec4+R0jRPR<@ylICJg8J=SgIwX9dLm^2BZE})4LF5rA(8)7x4#Ne6Y41%K4&oyR%h=7^=e21aMqhvM)81VCXz7z*H=X9+?CR$6{7F7)?- zzaKK^UvPw8ns_k9>HJ{AuDEvBCBSt;tb(Pzc3 z2=Yk}KqS7qOIldnh!ZG%*BHVK`J9@hkN@LiHm_RV*PqTe|9?WB`qMB?4}TfudY<9o z6WWIH;&;_TwVUhqtaOFzbJ<*YAMd)Ya>EC8yCX%4h)J_xOt7WjL2gXvEioJ50~n4w zZ{->w!>k756(lq8&V?0<{HLlwzTS7xb(OE4*U)32QlrqXjSB!?5Hn--+~@g#N8DTJ zLz`TfB$0<>5`RJMZYyj=Mz(<(LAKK3&-*%mbRy0#;8k3UAHv6#-Rgek3RW3rxmcMF2K1zq~;=pbyj7Oeqc=&?D4&clcNnz_>QBXd`ykRj#uK)hDWP|oKB z%M=cx6?kUxEMob9UD!h+a2R{|!h*l$!%#uclb>+VR)22AngM(gf*ou|LTo=g z&F-)DbAQH9T(x|lx0)!hS*l?+0hO@Gmj=dd_ak()$%50Oq;c@O29LyN83 z$_WjS@SzY@pgV$*ZIU*f=GUz99tMLghx7sby;zll4U_K`HWva0oanqg0*j}a!y%^{ zDU^OH7D}sgg|Hj;rR7?5=W~;5=3cGW$qu^kg~CJ~ba5R7BpT1)P}e^7_?lR5A0#~1 z$bUCpGuz^wHQgSY?S;U)UPHIqY~hWa&P{gYL$sftzJJqdqYInp`MM?Px#5W}5%cXh z<$?gT5j{zC=*O`umtD{jLZ%Z9hVI8r)kH+3PRZm3u+790TrCP(z!Sa-#re2wbuW6v zw@5P2l(xG0J0WC{ZDfd6&ZYVUXRcrRIDe*zkB$=__&s!9@Cm*Yg8eg8t}K;DWh04} z@m2jC3z4@Si*X{;NMlGlcDur|IBZB)eDRXX(`oVK`M+L@2R-8=j)_Ld@8Ja=5D8Aw z>cO-(C!I-a{|QU_s`a@w8|oXWX$$F-!Wam?rc+w%ry`&h3K&HodH9H6C>7jZ{eKq_ z(R^@tbeiW@8zL8oO~33MauJSyE>zF`8a&Z#FCuk-Wu7ByF{TTm1T$r*g4*C$Yiq>% z*?G?K!yQYoHi2W(iOg*|swI2e&?@3i5yjh%?E0MH&idyy;-3U@r^z_-goPG&yIfny zaai2_g}U?fw3(#ZBtzapWgKIFcz;KFp#iAK4pC4+f(f<`4HF)fu(kB^ds5`wk>5Un?^H_;j}vR=Yi$5j54OEyuEaE>R8x__)FJVLKc z7Fl`X#oO0@wW^Pb8|hBIOdSY58g647e_r4XCI~#$X?9s60?kzqKfscY{k@N~6L)HB zXrO09)*_EhvSw-uj%t&{MvQVu`*4X@+$%xUl#RJWN&3uQ4dQ7Oz!kiO@xbnxF}GMl zR|9Az>eJqGv3?8zwA#F(ynn>i%4GB4O8>-$mV1w#b+mceQ>32PuU^DHXpdw3-*EBF zq#;cX?=^99@?~4q_}6^To6uEP)*L2(h?0@6v1v-V_7W!7n&G9Co{`rRcZ4#%>+Hm6 z>ZG*1;=cF}MGMqG59OKM)hc9L$5>?@?GHH+DM*Dp9N^i=Woiw@)Kx1fcm<<-ihY(@0dv{>T(7U@n z=bam0`w#Rjk+Yl{EPowHQ6X{Kx&=jrm7)2PHw)c^-im_JsR?yFJ#Stvyrwn;BYf|i zii~uABQGPI+oQGp;l)qO=*L$5`FY zqT~7MS?0XqSJ$!-IizJF3X>QP`O&gXBNKE+3h)LVVU zl-Ryp*5pOjQ>w?8wLdS)UoI=z{+gIvo0m1O&y-Uf8wF?h$Cqh0s3>qrCnpgo!M4d) zGup|8#oQTMF!=(h_KItzT95BAC%Ortg%-Ku%Z2`YsGXwSvxXmZ5AqK)&S_*~NgI-? zo0rDfGJhAcp%=uf9_3|@@(n*uOf{w^q8ex3Ik=am9O^ck{8$7I$LGs}n%G(NxpGZ} zo?MORBJ;{@$sHiSbv7s^6?A8U>Xu7yGqR zyUy7za$twQ^cA~oggI7FX0NJnozwN(iP`maa(|6jNgcy7Yx$g&FEnZ5K^!Pp(g6C^ zIH+eF1-OZa6XvApQ&VS8J_O6tP<*@gfna=NW^x;<$cxwgh`!=_bX$RwDhpq2Ad{2s zx4@hUN>0O{p33Xfb!|8qC-h$<2Xh4lTW3`ufT0AKyZ_8oOmSuXRJvx8ThR4w!c}uh z4u4Klf2OcaQV$e82gh82q><>17&a^5Xv^)`$pY`GLG@HPs!{QX2lX?&DU`1cH z2&S6xqJv#(4MGSxp%kI9?JJ_2GE`eV^y;)$kC&VY`2la#PT4@y^NaOr-HUt(q8Sg7 zIOT-Rt=z0U;y0g~`0*^e&Q@%d(Ehk?U zM1@5G9U7QZn_;RqYMTbT$$!b&VWM%?sqJqB`5y=9Q@vD9q-Q=-VVpscPX^0>RtfXzBbbm>xj1y0kpW zR}ZuH9_MB&ZuGxYj0+0{rp?_#EqwY5EXiWlf5fZ~e1+`)$OY#ej~f46BS_~@Gkdeg zIevT#oU-Jg0YjpeKz}GQXFVg$1TKSa_^l^F*MhEx z>n?U3lH(}y{%{whrqx@-u(XC#MUL23qt+PqS(IQ19m2(x)36gXM-+{{YQ5ITT2r+~ zbh_JBBekxy<6615+`CS+`T&QNg9)PJ7LK(};VARa>=7U8)_;?jA-+gr7;q)HrgR6} z;N2x~$@8>`JzAINAC7K!gm9s`&xiZII5srin%4)uU5qG{u!1Mac+9^2-qHYdeGEjVL08 zh`FFvQf_=99EvHl#w7;rveU7vE}G500nkq9k<&6S_J0cPn%FD9s@cnl%B&jb=|<)2 ztJ7ESo;0?hw4wfU6$v+@G6DD;K#^~W?=u9K9Dk<{{|qZ-Fx~ROo&JL~Q)W~z*(r7N zZ0EH5(;H?-nAY{tk{t_<7T?t0lxfD}^?I%2_DWLhpo=URns$+o)g(RK2tlqQ7&e;< z9FI7Te1FX~>a0TD;_|niepvcF`SA99b4)?YlE+`EfWGdpfh}8MgIoiEVU$===YlP@ zisZS?TH4~CF8+3@*8RzZme`0VCw8v~Z8s>Hvi*8bIDL!fTVte8^_?C&vCH3DgL`2H0lA&`o!RUDh>dZ#+C5*01po5*f=6}6fJ>@KHyOS$D#0lSEt07+ui7Y8@ zQ>BFk0%dT=Wi(zkJ)=Q5b%aJ3Ll(Gx87&ZsCGAGut_xi!yH(gA0ht+MT68t!8-uWJ z)=4BFi6k<$A^&nlr3fB6X62{9J&gUZLAEhurDJGA=*2$tVjB5?iS>t6-pOIRB7~a; zs(;BzNy`d*$@|k|B7^QwOwnXe)bA%uGiG4VpG&sfY&>&`I$fM1>0q$&figVv3s7?` z5|r@sF0-x^y3QLR0e)WE$~=;-#V{%dUQ%94%@9WY|>QoQS%wyGpCiV2J9QoTVMS9=~=hK ziRjA8*>p(dP5|L=Z=K6-`%<1TL$Mcxw z;l{h;nfGH&-G)shcFRPxTRBIU!hd-K)l*YIGIe!zHFNdL43_n?^Z)qAKW2S|bYh;5 z%kpS$Zf;ib&=Y>iy1x$uW`oFBq(*l$t`m%GnUlN3OIKIe0x~0;F*$Nvv45i}wr#Uy z!~05Zh>K|Ixfr`Qnp^aRb|gcUHnUZ~?_0CfUqb3LZDW`A_mH-fr~MZN5qIe~-E;9)o(a4#IJl2U4_SVSQJbV#Ir84W8Ha;0aMgU;`I);%sM zXk#uGj6n*%SJp!|zayHyi+@o1J&7?vjK$_z_s@zrJZ@)9!3xS z4wEg(&lR^o!BgJT?^Ptiv<{1XiRjoZ0zMc=UKWi~y7zpz97Q)0YNoqW=K=vzE)*{- z(fsK|XDPa^W=xN+PRV)TCoBPi&7}oS0tEgfY9*}~uFHYCW{)&?T7US0t1R>qX;Ng0 z=680T?`TyV)_-2%uTx|TDSUp|(qa&o4(Ai}vE8FmPJ z#|A8psC!s=?{taMfdA-1VqcT6)Z1vb>tBi=RGkNhYQp+xWat{jWt}MZT||xUtA+l` zBcc7P7;QA~t*NS|?M0WB#Yr(Al4%m!Y_A+2@hPs-< zGlUpN;(8cNMG(RngrO}sKY`~hAj@Nq$bUBz&ddHni+490J(mr1g3@`gC&w^vLjI8C zgk3jWm7&FINDt$o6J<;!@NLgm>uZBb0z-<>XfZzN&c9)a5!GYz|NVbBWnsWPo&U?? zMQ!4L|F8dhVt*OEcPP0jx$GAFsya5G zpRVEn2!FAh4k6t1@Evh{-n}@}tu!VQ)sQ(}#366DfyzYi<2t`c9hL?g6U0=_OB{ij zIUxOKz9Drh0#Ds();?Y7_81UnEY9`eL=NdOt|$m@htqHkcM5d6#i~uIVvI+#{5IN| zHf`4uN3GpKV~{g2Rem$d&c}6m_TM|ZU*_hfcYnV;nj`j+!pP13w7B<>z+=_HvZ>wanZ1Yiy#U6X z-@2nhk3$s8#6IG7j-yM6vh0&Nw(;(!`j2rQcLX)qe4IJW|7!hlhSY?p0GzGW&SR;& zJ%6Jbox9afR~bGSiFRHGQs>XQU#f?X#c}R;83`W6PTvuG{QNN>hr~xem34M4UA3yG z-2$VEMqP|>>BmiMfD@bGBWBf9AzV2#AH}lrq=g|Y$WjKCIM*7=WPGDixz{c2?s7fC zTE4*(Zc8oF1=k~VcTA7E`GvhDQ)bI@2!CCGrd(d7=wF`Cq=79SH_e%xiwhA}5Zq6R zijVK_>1p5aw0fttb2PD~sal3hcwFTbiUx_NMPJW=CM=}f!%CsFEu1lAG89-LM(0JGgZ>VHow zBw6dI#eRU=<=CP6Bjx(ft3PT(Bhmu?^{K@6-CO4kZcJV7Zi0508)jd_L?&gWh?_ob zC$$0L#h7;OYp+xme+rlxVk#Z8?y=&!rY8G7ZOPG@lmjr;%V$@;l%HC1`rfqqS%lQU z^;u42QRGxc-tCNGNcLcN;xM2PLw_!eXX2;BLvF3;(7ItAYFJ5UN>A*E;N8OmTk?Z& zHTtpm;rZ!*aDII7Ky0%I=jX@A2R!5Zt@KmH+cP;83d{-$tIX9Z#+d^dmpcH+Yy&5K zpN79_1>K56QiN0%6Tm7e?8v>QP?SEcAD(wno^V6<96~?9Qw`8El%4>DF@FOy69f9e zGV7kQP?Xg#b&ad^V0ZVZ+pQW2Dvee~CB0+w#lSu;O>2iA+s&Zotj)t0*DqGAYtzjD zib#cF*lnctloQBqFz9qDj0NzK4Lxu>{0yYx;6iA#!de#tMmt3IViY7;*{Qs`xsu4Y zSs|zVs&7X^n0Bga0SE8{ek{Rq-Ceg- z51J?SOE5ao4NGex-5>NykEX*CAe#sP%PP>vogK2mHcT_SI?X}BA!hoRNdJH2olCP_ z$93Q5_fs5kVmUnLdcPl(V3h^gBC80+F%?o0%_$`D80>Wn$}xUO~5;rO%cBw!7x%Dt>x37`EXCa{*=7dU;qBz zu$qcN<5SREU_LnMZH3L4P+C4a0rQL;UAuh%X}EgZ6WiPRn1AN`f#&P|XB$S@=9pnG zr}@6gbX0yG5wFeof)gsnE;(r+yL=v>USFwv5GeS?{WO}NYc$p^_=e;@fTGr$&_`JQ zncjbtbpIYvFGR#wPhUKTs_7Aoytup!+w`-O$dm2#^!7Km+MR#GFR^N-^!YCqm0X+j1-Mz6O?t%AdY?^6b?MZfWH( zh3Oz~u%j56SmuAYXqcxTezCcc-c!~*g-}-Mbr&;F>l`!hKO^JW7RHNeXE@89_$w6i z+)B*&^0H?Ebj(*fC*UsJec?^$1`IF*_I0qA!tXi!4}a7&eP-%MBHHjvu96Bu;vmRD zq6HQu66b5t#R53GLiIJmsA$*I#o@g5BPeDx^Kb5-KaqPIi)O`()D4>;y};DW zDGM$IOi#C`#0PF@;$<`A_UVIVuC_v;VG~54vv@KH=CKhi4LE6Ra_fg_+C&UvnQr6=1!tS z`OCd)lM0nRc2S-`T8QWjxD+r8pPrc9!kY!BS@dX|efiAyTlO@eo$2`lJbU5QQf3W` zqM!d%aoy)QiL;y=#UIUN{Nlm$pDN;}M)@Wi;(w_uzu;!T!x{-X{q0#>@96U#$dW8F zJg{Q%KeI(-p3%XnE~ac*0TS9%n#IJ+ake~r@GMj#XY2dldlvG)f@t&CC+W%eiU4t0 zm)O$(2FI*z;R{KQ^P7=eKpCHONoSH5{>B>vG?ZE$I6RZY|{is3()M z@qbnR;q?0td2tT+EM#ZrzfI?=xzRW1{io=TZr;*Kv+MKUd;z6>cKVaI-hTErz8xTW z|6hNN0F&q8`s&BhexA*4>o;E2KebzVrginyP9B7%i}@~#rT!7O?QAUckG@yfTxfD&k@J_kN4&6SSZ9}z z=?0~#Fz$2b4KW8}=2pq#{%C&8NZy(M3Vx*TeUJE_^Y+=ZBagJm0kJ~FV?!hdr+aNJgCn< z`PGBw{Fk3TXwOfcJm}7!zvRYxq~XWni9dev{mrmL zUg+-`{*zyws}%m1pX%=!{^ZHIPJiLpo%~(==_lvUn+G3Ln+pC(Nr3kG$@vZ-&!&Gs z;=fPMPXID5hO$xfR{(+VZ=>e#05U`Vt%`hd{sTbJ0ni&yK1m(GH~EwIKKSeR|KgYL zuri;upPYaGm%sSAe&64Ha{l1q-@NzN_Sa81-sFGH&z?NdL6u)T`1p^0zkj|wp6GFY z#mPG#e_XZQxg9wg@^Rk{=b;~2?sF)6{R6r>F^;5oSyaurtg>RCzChzApCc_+SHrn% zsyZq5X&f+sd7$5y?I*6n^tRzjG==X9)6n~y(^q`V%#;88} z8Pl<0>DSxS$fi3%umtR#?w>yZ#UE{lhu(TLwr501S5=4$-gWo*=6b1ad-nRJzb*T- z{c$Pl+xC1Ju9x<9IP+1TFQcB66!Ra=&6VHPxe%=8qdZ@lYpK7~LRH@m`pL2UbJwdz zOO2+vtthSHowuy&U4Lir(rfEOHP1Y&u8j?C;M4l3&Si1)vg|3+Lt_4YRVT%{wqf=q zMH;G>^|ezyhDxp4#!wCIxhkx?rW$Xq-6$|ysAvpa_EadnKvk7D_mAIx^=Di%(6dORj1m&?QNDF|@kd|b&3Lp&G*wp{)f8o*nt=0iIM*Y;T2Neq(7HMILJgQNnQ7T- z1>jO3w5kWdz<;bjTeAFA(N96ks!(lFA!|W{y3j!r?nEK2p}Oe$X+?%6SrJ24uN4t; zAOJ0E8CE1+kh0*)h+50hi_BVEjQI0>|w6GPQEPtYA=1W>Ku(Dw6Rja1Muj{4v zEeRLZL)&4DhQ74{+v>_wjazheF1>%Kva=yE3mb)YPX&<~g!zl!rvF~{e6`S-ec4}K zVSjan6A{-Tvhr_8ML*qTJwq)uwxo))9$U@&wye{|q*o&vBXt)07)0U=((U~NIH zeBssD7?pRRv1;Ynj9Q$rd})7ivdO+)RuE{@R0=WRye-M;?Pk(FKP>*c+_f7+kYG3 zN>pL{!B_`GjBtQJkp$>dTB0$KzC`n-^S{{As?HK;)m=sE&b2t6YFDnq>9aAxL;GC= zB~wO?>T4-LZk(_YTjARYyA6bmYG>+#v^Zg)DkMzXb5}H1ltVfyY)|sILs!tR=6ur? z-B)kKR07&X5kKEbcR^tY8Zwk&m4B5*M|=)ddbwgVIW=~A5_ z1|Gp>Uc#8rHFz-8HCwjTLl&()l~Te_I$pZsSOmB%V@u7Yk!2xQMGCc zl!%^^Wp%oAO?|oyRPP~$OC!!j8CVyey){u%psertPhUL`+V*5$-L>mZSad3UD(mso zKx0(7smk%xb>->4DhK`e%8>9_$c_6XSuhYFjK06dC~tLnTK_9ghQkB+RtzsFE!W}* zgrpyl7W5WQzK)slZPU3e+m_i-Q7<3jA`Xz|V>8 z3I3tO<<^{zJD%TL@~y=r|Kacd+Y~8zRE;I%`EFD8rw^T2-{lePat9DXEC2AH{^#6= zh?=71Fib^xL>oJRsg3{j9ooQO`KX2rZH&bP;}Mm103=EW4E*=G=YOWPvMt(UEXyMr z*#X4S|NZ}%+o+*W6LND=9%&%kfT@iH0~kRnS~m&8yIL!;4Vc>a-^nN1>Kc_(Upb*DlyOxnz4K)~^VG`Zm^N}6~j>I;01AE+cS&E|GE@q|J=MM7U1 z7}-fGv6L^kuc=gW1DvX|gRkD4%I4AnKUM7o`ZhKUP~snLS$~WYc5FPmztpr{@k#41 zOP*6)ztP?WKB0e8hmNhzsT*Lx8dZh9eQJ7&%Vud$BjY_`6Vn`O_X*)WoM7lQQZaT$ z#CRwLoEprY=lbA}`mVc{)kX6dkRW`pyNU~=6&5d`gP{tCO(!Wa^fwnrESz3QzpKZw zuZJ(uu-=7@Uw`o$vZ~?#RtATVOc9|P|4n~sKwR0QVIY}ktvYqGmB8(#J=H465}_{E z*hU&-LD5DFJXlvlrLMlZ`J8UPY(n>iJ^Q>s$~23IkIzmvFXa7>bmL<_)nj#kqxnKR zm;v;D5%EZB4M?ntAqZoPOT?ouh=+x!Sr_Q*R26(^kbkrQ?OY7gq7|)%W>B)aV3of> zzceM(1rdv=hbls`Q4f>@erO%3Ts9{W4;JHT7;Z2C)tRWJWHeeZ6Xv!)Csu_umIOxb zm-4$^><=(*>j(KzNwlv@myY&CKP(^ga}oWR&D&xXN=eX=ML!qOj}!u05&b{|8ox$P z1#KRY4}Vo@uD-iSNv%$r3p+(0qM(x#aDM`83|EM3I!N?1XlDw!tSKemomO#>KO`p~ zN(^Sl3e_SW$QwkkFGe}Mz(U9Npkr2rt#6xAo)0#S_OvXU6T~C$w+h(6o?%?Kyu zG(iQdn(|$Y!+X$bjzZy#P9YBlfqlpFuw77Mz<&d_6{HOZ;saqPF2jam8LLvC%u_t6 z(i+%`MvkK`!m6^EH?FrCQxg6}2ewnRPH+ODz0oM}7qQ>L239UZC_VsJ5EZLzo4jh| z3`ioNA41vdgK8lz~|nF07U%G6`*GnSLyAX7-O{h%DXQ`2Z5xC(9{C{gbtq_~FQ^|fW;CK7{JIitph~>KFlJl{ ziq!0dQZW=+k8l-`ZH&W_e{;HL8`N6NVPwg%1+;KseMhv)d3}j!aG}oQN04SS+|zFN zC4QQFI9!P%gwDXT{5D|BW?myQY8J9WiStqNbs~(dynn$=zu*u2dUa7UA&84k+L4(qUVMBx7f5FyQX4 z?BV3M9U@2cd|R8uQ?(Pug7_5O8%3z;o0Fy`xuSW&1O|Sa+`xNjR40kiw$>h=K!2O* zt@JgXN$hAP<%CxOq$evZ64mMrR>v4A6r&D%FecPAZQ-b?M&TLscxx#D;@_z}6f!KD3 zwQ6Qkd&HVB2(=+t(SZ-=Y$g!sNCv4C88!j(A?>+9{PMD#vSQhyGBS`7y#X!I5OxQujr_AnlT^XBC zTByauE>bG72Z`dcJkTa<<;2xGi*E0Ef=O(uE}RZ>QIB5SHjA04QvT&Cwr_|8?MZPJ zWV(iMM_}`zkAJt4dVC{mx<)EPD7YR!r?I|?>nOSwrAu`z+h`gj)c%~K761PC+$t{= zwZ$o*Nb3TQPHs{7Lw|JhhUFe}R_C**mEw385!AH@1~pM63j)EghQ2rm1sJIIw{&A! z52KKbmeFFP^M#jJ)QwL-3Yo{o416(s;Sc*?otUL! zinY+mDx-WD@*x*#EwYykofd$W2?7OyPCi6K!ha|G1PXu_wbY>U`7_R+ntL2Pl2m~% z3m5FJ7Sq%2K`HXp(SEYuj9gAdF>!erWZWQjQ9%QkKQJc=#9IXg2lTtbH$w1|;3SGV z1|0~thH$M)9n)_#mRe~d-UVgBJW{uyELmUtG-&+IkdVSWb9c)aei>B8uS{&!6$mP07=z$OHNuvQl$$5LChuHE&4+%jKl$1V09spZFq`yvApc&iL zjYNNxz!rWR=B2ur8&w{kilhq&B7)`3Bb+JEky$2IIs7*0L)Zkv6Jaz=LH&XD&@-6n z!I`fxp;V~J%uc*%>hyeV~?`VcTLX5m|*xag$o9!a>o8LY9X3d&nYq&Q`V$9sC7ADZjsuffddBQ6Izw|DpTseV*o6^dI7b=sFrMU*`J* zm*z^8j+F)WCP(>745ddZwnk1m7vwMjU4JYPKYs+;i5b#X@1(Sngc%( zM{W(9<{+wc6;NI%qUUludyE9n67_!-3PCm@+E^%&zxkcSKS7T*&ce$5AR*}P(M#Wf zIPnLn1bxN4a^*dI2kCFSnhl0uMgArT9faWOVShlAw)_&bM&*ilm&3hK@ntRN%RK9- zS(q;Y^wN4q8T?vQ!Oz22!MHR#6dX+t=OMi^DvLLnHY;CtBfbVdDc(h5P2+#XlDNuw zh$%TRIgNuB)Tp#T8Od@U;<%?tnX|p3{|I=rE_9X2#h@L*r%;c7Fis%4%j78wj-_ke z8&R0*SHgJ_!B7q10d|A=%A}^Tm-GSOVl&WgaF&rsg|gVfbX(Ea2&;we%F*=MxyRXI ziF0hDU&u7{tFVOnm8&fF>rQ`0M6#U5?kl5PIm^_od}QiY-*dN_pKLZ{ar)S%)-o%s z!KdvIe#%cKEzY+ax)c%@wA$!e#v@m#lsi^azG-&1yLQwbO+BCakmM<2 zm>xrduT4{87ZAEMZw7yvtW1Dq1x>W(_(KDiwW1V13YeVoe#I;lsfPJ&`+3%A3Ir8U z)fep{sw75ITzyAWf$AAo-w{;^sT5b=5LLmEnxG1TR6hN}mAdI&b)34V%)LnNT2Th|t2G@k`QcLj-*rC?|^4+cNvzET#sHZ$WlUx@HScA03MGClOhS~C`KSW}&!crKt`!Is!2h;fEQMZ_e@ihqwm5_!JJ!MMvm02kv zi}i`S90;xm^Q1$oUi1q&%|xYOFveNu+B+@B`wcUH?hWFXl+X3F)%S0?A9 zdv9^Qym{Hs)#QKw3W^PmEmla#m6t`V(Hu+q1D6*lMcFZ@m*Tx9>gBICrPY1D=3$Wo zQqs(H0*j=Q1R;M0PA^tCp;Xie9tc~e4r9mTeUVn`rJc1}T80`mlf)V?YzgMo$;_^qcy$&?+yUAZv1pN?UQv&zSKgN0*Cv8KqY4 zMCQF5{K95~R-lfotQ#ASNW~K^14MxbwJ!>R7;t~-TaJr60K=eI`rark+i}T;(;6i^ zPHi6VxeC{qgpW|vn-!xCVOx%M;*whPoZ$D9yRkJj*7YP@1NH5Q9~O^D{Haw@h=*f# zFiW-#UX1tQ#WJrf9|FG&CI|G7iUn7`jQ3LZMJl3QWUHyyB*GL|wv6plcrwI;>7X7> zTtj~xmRklg+04e@>EqPis|8LCptUK$sr(J8*eoCJ4-1U*H%c8Y1mze>ts6+A0O=47 z>Vd>7Ry+p`pgcT!pi3u9a9Nk+Uy|i+Mn6)HFzxNos1%6P%-OvpM)=Q&Hn~=6ytSYW zdjSL>RgjacW{04XjisGqhP?bh%1;_@aYTPZza6NO<w zsol5YBXYzkXF(>B{}zJe?G)42H@I{& z`w9P0OVUax#s;$;0V&?fG!+fASTYGcCQWh|`a1l$d8iX0*mTBFb(BxYz}>|+rzbwz>>t*wDCC884_kkusBq{% z9Eso;>NK{&{?K=VK=_X#knary_9G{7LCjF#yfYOX?Z)yL8$_2K#?@Bo2_x0g>Dc7# zazzNtt1d2&r={0fx#_P7XW-8Wh**+g&3}WnFP~nunHea%&k}Ma|Ln|ylKISK^+9dP z>Ks2~nxqL?(BCo-C~2Qi#EpN8PLKWj5eE4`BvL|36uJ2$Z>zis zi3JL;2zQ7SErK>3M`S$g6{Vv(PAg zTuPwQY!w8$Ayroiygl^CVIhMn+GkdmH7XV^oyw5q&%%u4$GfX(Lx)Gq4za@`csKP) zO#Jcr@1A|ewJxtLUWBK-xNGlQiG-5e8OvaRhCE4>{^c1sF&G6z)Fwy`Aw#I{@wF*##Ay`Y1QG^}2@*$c zt79S}JkqY@3w7pw9ZR#$SqmmDT!sDXK{Yfy#ENZNf;?W2&$)jw*SvqI|J(JCqwbsc zFSfMkfBbg+bISUg_J6&5di`yn0S-uxP6aw)1{vu5GQV4WkPLqlj;Q-lOHPkL4mz1k z%V%sKmE&Byj?tmHUts4t+by=vc}<;RbGp!(xACl#jrtIXc9kmmd^aG@@k95r1!tMOBvuk8U3t8d#y${LI z#j$T66Gz4Mh|hm?P?2CbK0Xi1Xy)4u%D1@N&snoJI2l?NgGWjVzLf3Mpz?!suloNt?eF1<@hx;$-Qz@LQ3y zVkH*3kvy<$?UVA7ri5*~2E$UePcdhk4nxki$Kch$l(m0NDL9a&%bTYuCT?@SE@W;K z?H~xmu9G;m118G7_T+8;w(M=bmA=hbm+9NrRr>Z#Y~rO97SoaCRIn`se6yq|4J}soXqZaV(iz==OyoSB*AeHusG=Em-TN z`*+iYdE|fBH1aWGf~dYhkTS+7&B6MegOtHzdh?GIqeNT6==sNqQCf&n+xP#;808{F ziKnLS&8;(-AJ#L~!n%hCGy8bSHT$pfMCz8K&Ys69E~JYO?3A-k{Ue@q>RHE)M+p-} z5p{gtp87=zFeuCDJaf_>+W=Vu!x|Ai6-S~}!tQ^x(GPtf4{^!p9vmug2@ZeL2fg@z zmfb1dYBxC0HfO)dxA9AIBm?b)wOG2HnBfWmJ4>%LPYEBtOF-80I3N7`Gh_;AZW;6oJgzZuN=3*j6oBP>QR8LQeP*vkOVFmeD5Hss1ns4tC2+3{KoN zAP;{i%nK3r-&y`yhQ(DJRKiTv{X7=3N`8ckTkQ4)zfs3->yZXwNA3OAiEz&f|6!Rb zjD<8ZWTJ=YP6aPDjz}khS>PaE&m*~|%>*`eY+YyT{n)!Lgx$Ci?@p1#Qk!}_obBe6 zv#$V${yDUc@HOuzC*seFZk-j~;#9odVAFq^kBI9LL;*a-T1KHcdvwcL(S&hBU*vHO z_Qix)TZ<#W<8wstBsFJ>|L3LmjNEU&CB5&FyXAB5(mP4ck={k_fwc6?X>0lg>Qib@ zQoywl{20+U0tNi9$lfWQs}HCZDL(ANX%ogD{~{Z^H;Aq6n}rO zRE^&zr^4l#?{RW{h)92z+iXrBsz0pD5A8Ac4RC|V=}Y_LPG_^fM4IYSC>`0JYv+~h z#)4RBK7ajU(YczNsuOq85OfNBqicnu1g46@S82th#!kPwWDwG^y1m*j+9OeeB|nxYy08^L5R(3tN`q4!L z$H(GFkk}nTB}P-cV6JW?lem8|uf-#aEadW@r*999Ul*vGjnE7A!KqC&10}F)tJHq& z5V}j-=&CW@2qzRh@~5@C)9ssV7RqpuSsUx zg=m8ghuTGBx))4a)pZ_%H#bsU3@u}2Xc5$|b+a*X1Dvkl6e8^AOkK8hmQ`xknSli4 ze~&Xohh8N8T?qJfg~Dg~)#E_33o_!`CJ~9M*`KzHgGI+I?~MgklCf;b+9i%9*lJkg`> zPXoWY=?a}jjw9O-O)EEyWZ#drsqU4e(; z>34MbHuYVK1IqBkK9y^(^hINFt%&bdBVRTua~x)xdfI>JBx|2nVeo>6hiWN#^C9=Q z$9Lt~yGmnMdRx@1=X#4SH-*pl3oqVP`;+kJU%mL$(y@5qFTMVN*Y2mYh}RK6<=SN4 zE6rs^_Gf=nsjL$YT7tmva(dB_YCd_v8!rFi(Fd2mdCFzfpQ5n*v98|vSo>4|zpc3G zyKD78*Bo$@s6FML&8h;>EdeMRT@Rq&t1;{W#+|16Vg-O*aanj@uA1V10{|QU7NA}N zYW)Pzyz?<3NvW18l&SaYsfV`qrrxinJ~mCGruKiP-ZS-$-a|*j-uwsVe^70W1n+HF zA))1e9RQH9Iiq23`G*w_B4CLF4R>$)!)oONU|i4K2gKb1J98k*WkI_fI`XpIYkXTE z-POLH`tYJB8vGjj8`V@RkNnr=-K zKBa-2kG*B1l|4%0xZN%-6|2739@-y(_9?Jj)gog`TbB%5`no6wWgKX$W8l;71xS;7 zK#U9laV?=`-jW+KI6_6N9D6%}LHDF()wq8SNh9ON-UI|rc0i(z4G_6hzMa|DuPfj? zWnMpfS~(RhpVHVoeh5UQpwBSozdjuYYPAtcR~s|khd0dn5YDhazC8~zVzkwP+dS6wT2awKy>^?345X>=fDWj=wJo#*W3~2RQVsyQe6p_@WJuWt zG4Cz96-!RwR#eS^0?0NxbW;8%1iD+ctR@ zYXR5Lj^ILVq&P0rSfFOGt$zS7onn{mi(-dT>rN~mx6&;*_KwhxyAaWpci5{GbGm3_>LaUQ{qj~!sv z_}CHLFbC7O`RQu`H{>0_Z9@eA;m7bcFpQlIi5h$i?*gCRg$H$hweZn!SObCr zxdzPUSPqC*d&1O%-isXClVFIDB?#GK!UaygXE?)Q_d&a6D$HpYcj?^tOjzrr4%NIblTh)d74gT+h#}N*~rJBkDkqaZKyC3K$bTDqvjnpn&<=4wjot)YlcTS)ijs z)oVto0yc}i0vy7$E_U54DsbpWQ32CE_7o)*@GZL&8c=@$C;OenQUSND4+;p?ycvny zY)p53EejkSApg+*_E!PldfNrQKEL(w z)B9(?dGYD*?w(!WC@Kqo)id!;JW~CLIhllJ1w|SZ4LIYRGqUiu%vWUe>V=T>!kg2S(c#*!u zpXb3`AbOAEJRmZfr1F(^oV#MvA-$K2E9k2ttRNY|Q%M%qjUs zQzSKncT?ozt25LVGMS|DLwZRBIO0Dbmy6HTQHp=u3CaN_2A#x$eFVI@PrK6bI#flY zA~{|i^$}?!A~6XK2h^J`8CvP!OET|Pc-2?}!a__V#VJ4HFv!PGqQwE5LU))a*hr(s zR)7~WTK=8T-N_8d(da85M_55|Nf0@igvtEMJ%amF9OWrZ0?PrHgm9)oLZa%zOr4y# za%6w;Q~}l;pByZoHw4c6o~Bj-X7(#dB)=uy;%Kit9d)0i&H>!UbcU}#$EjYXhG=#VNmNm-&j*>MXXyqFK zW5IGTY(m`;x(9Vn=pJ;npb=m;$!@i%Hm|CA{BW<@3MIURsenpM){>S+GUf(Ua#S%6 zHl$)4MBTFWiC(>VFa$fK`$v(J5G4nWXJIox$Ky37c|bFOeciIsQLrR6YOVvB#Yuls zqccw6V%2AqwFk-0$w0}-2HYAhF^GkrRpijBV4{;_amG4~OP!gCwCapk^ViBeTA=|3 zVktf=JRnqxL>f#0Zu8)F3JqZc5p3w0|0jT|8|B$FL^wrcP?k9|d{~9qSgX3x$(jI& znaq4X19XxxD|%+G*3=pa+#Di_uqJ;7Vrgn>3kimv$>>k$3i~i{?v1(DyzJHa^%hAK z#t%Kzv|=WK)r@An>_S(wZn%I=zJ(C_O?M)bP;$rI4kQBP$}?(wGDn)Td;Wo2Gq=#^J1%Er@Da~1XqLL`64tee#a zpJum02yg%OIjk_!qllzB*Ts-0%*0!0feb3^e>5+{p7H~R5AjG0tDJ7MG&d9 zGZje0Wk%>Yj=g{<+-#6~6AG+J)0auGK%D6ka@&u?Hna)M2x)qH2k+&_&+k7w)bZrL z@;3D}a+2vO>`C&>oF9{yZGz&Ql*($Jk7#eiv=u4j=J8knVx73-rpA9{j7iCiqGTSK zI*ux%jK5}s%8Y;1Y>2sQPnYsp${8GpJoX>jaT`8cfh@^M)1<>SCwmyg4qTRskyyL=p! zy#!m16j$~_gl`2kuS$P07{%Zjj|dG56jVVuCB+_QUw8n!V`;DQisV~)m;kJ(KGui^ zA$nT?s8Hgsm;q!PsHtcH&H%a)AliL1fY7y?ik9;Pz;CA-lOJLRa5YVuN}70NfUu+~iG2+Q+U;dq1j->=T8TT_L`v|HF z6`!F&Ur083i{pPi(I#i9;dqYPWKtsrM`F^|vdW}QCUrP6XWCmd2b@3S^fwO>3W!Xn zpiQQHWnL>AziE^^oknGH$&O5!+z@PhATDiUCQyHh44H~qV{%Xtc!iw!ZgDYUWyd}c%SbHX0oQ>lhD-kqwSpI^n z$i>?TJUtmc=7%Zvllh*s_`xM8e0FOU^kG*KqgRG<}@1 zRFvyaF6w^_EL1OMCkH2xn2kTrFDyGA%)=f%%go)R_;F(p$<@xVpV#UIS7olI$UDIu zwAz=Uuhuib*D8xMfq0T;xpsZnw45z4E;>JVY_#%=XV15}GH2(vpDSr(^ZA|z&C9lT zXeJ)|S+%vc6I>R=F|BlgDaR)f`Oe-fO=lqs9l?K=UbBG6Y7k`ATA+VRnOUh8xa@Zn zFpB_h_a3dDjXG;RFV@*p^e{kEUcHc^K+_F9L7_r?Hs31P>;bW#@$fXf@CBT;nozqVaCFAhASBpX)30gO>WyIM*y^% ze~^DyQq(rLC`td79ERM_yw@tu=_M%WTJf~j%HEh)i>Ti=hHRSQK-XdY9Sn#1mVAF$ zf78U|d3ItawwqSWjvlQGbW+id3uLL78wIp`4oA;%gx8gj(f^tf+csfRLWb*WN+b+^ zgAO_wv#)7k6Pl#;a~2bMjCtH{FYz}NceRMp~)8%`UJ z46Q9pXW@XhP3*vwfqN`%7w)dUJ*f`-&_M%yQwa;Fk+z;2g@u5$6xDddDS&NZ3od_Q zS&W(FU{Ai?1KUXe?nWT|us~A~Hfvo1&{FB1Y2ff+lxK3R)Y5QnX=Cd0b>+-?T7`cQ z3f%;S?Arnn1EQzyl8T@C?1H~ZFw+WcLaAq>PZ>)wSYUD--Y{e+vT>IDFa9EYYK$pcssr? z;C>8h7aoqf?!&h?_6+y6+nr%J>bidm-zP54ZXZ_-r%dEJKj1RZmt!O~%m z;D;8IlE_~JKeT@ctgJMeC@?r*x)0iOy8~L84H3juzDIn}(lfWfapPKg<~|o5LmFGc z!Qyd^kiWJ8u?YJ%WW^67B&cJaBO@>9QV3g~_)!T@klxaI#1uSD`bgq@@n|7BXJ^?MY z;0a{s7*}Q@J~*Go9@9{PLx!fz$W7N^k%ivgdqsNW^|b*wy`sib(Re4dXf+GODY?r| zJEPZ`&9aWhq57eD#Kd=bGxC4Po8J6myK;i3d_a8NQ*6by)IrJ$wAdsRn#04OX&?^E zj$^LD6tiGhvimS^c`JAdoI-NYYslbhr3l2x92G=X{x#JOqsqc_THxr0acJuE5i_#& zCIQz&dm%=*u9lF_JefGCG*ky$a5vKgO(S+IXco9zL(}-(8k(MB+bMsNhV<6ZG_tpb zru4H@Bn|ehp=r!-4b9!X+>($|&pQ6MDzXvgn+ZG+L*_xy{xP3p96}b-!%bLm6ga87 z5!Z7-9_ks*#cipl${WifJ)~5JHa+Ph3-D1@=h+2SG;a~*(`bpZcnX_E$x_Fdwwp&b zFkbA&rPLZzj)V>=%IG6gpn2;?2H@{f)F-1`_ zS&ksc<+%|FBo%oP37kfQbYm_8XGmHOHjju+801t?NF-Y?4X697&G_KDeugLRR^q6a zL0v`35rtFVe0E%HVs&#wP3vRB&fW8NQzJBsAkGU&C{1C>G>k~U9(&|ih4xMwG!&wJV?|)Tu{zuc$+lYs5JwD`6rP5N zfRdw>LUWm<*sno#>~IlG)a%7O^*TWFh@yB$sO(MWVMWQitZsH_*uGFH+#ay zmt-w0_)R{sIlg~L>YK=`wtG4vyITYqy$tuqWJOYyKIFuw{b#6~W{;L;v7#*GI1exA z0(snV1_$cTgOAC&gvqUAkQ-%Qi@_`wnM{x~ddddO0+QkIkY&b&5MmmYWCpTCOq;SG z={D5`Woi+fT%k6PPEO!nXD^n~NqB>wxx(oYf|u(MWt@LF=e)EwS%5N_9+rF-8py+o zD@nH%Wz2~|2R70QYgf1qvMR#95g80f$RdN2{4%RWSlEdFB@Ebwt_1;?S}W%_eJzwG zmOCB*?k!2n@ zT!HiW;Ve>v8nG2bbY7JQ5m!XE8A%NB!dw!!D22&B&clgI)g%uq&iY%RjXbspj)Uxo zgUSMoh%Sz7XSMT@t!n2PMo}sC%ByhWLgKChjsbtJ1SKMatAOK*0v1un4R{uMl;HA} z4(xRie_YzK(O78|w zCqJ!=_S|~4g6QM&pe|OW3o0a3mIoWoSn?i+kn_4coQq}^j(u}QoX#F)=N*}~t5IAO zX_jSV(BZ(OyLogz!A#5SBrbnxSgvD*`Q(RiCm#?(-9%t^W)xK+2ww4~{NU|8S6$<= zI&*70&n@~KMR7RDnk;8_d$9RQ+X;aoY>^`=E=@pTZYFOxke^w`9qBWI>!)V2H(v(Z zkb`)B$?oO~g_X}w*Dj}(AFSnd<<7?=G)Gv3^7gqb$2MQTSU0Dta8iFiLi1C!(+cu_ zb%So2$-etCGZ}X26(evVmbh^>*>|&#2ntig0K<6}Ep%6PX^88Eh-Tc!Auy&hZw<7F2-AK3=Yell^2L2XI`FIJ3+? z-YfGu>_zj~Ve{nB-VB_}bWY{F^TM@>$vj5u)s+aS@Xq0)iMvB@$P{Zl6H_^2qjkK1 zLGYe4n_=$%F!v?QaUIvWe+9frNWi!N0TA2<98n}iQIsW%BD8IM5wh zDFFLDg%b|C=&|x|x`jx-^@*C5Z%Oxv*Pz_#Td>Nv!m@_+CzNjupKZahe5;U9zBMm6 z2CU>?CmVsATox*&5i65zoZoRcpES)a{hmp;%)h?FQldvZ>9>`PN?;I`gG`tLMJG15*LkJNV%WMaYXS?RPCI?H}%z zom95L3b?u|gR`;}aDBLaQI7>&Ur37u+z=iLxMm@UazrTK1|n#6yaFToK8`02yE@fd zWo)RKd=`IpU7970G}9)UrbMJGqHcj=6HV<_WTL4~pCT1ClOC98mM%PKDUdRq?^UTn zur;l_hoG^cbqzG*5|Q?7(mXS1oS8JuOd4h;%`%flnMsq(q(NrV95ZQ*8Hkzg@M96v zg^DR=+UI6M^z^{uT{K-ZTGOoLk}|_IC1OF;eXD;hQ?YKyqSAL}lnCcj0)!9o1W%!= z(AaYyW=jB$R*s$$RVBuPo}_8}>PeXDqePAomvS0M3>@Sq9&-@N#}O*2g=o<1kUWqk z%ML{&4g>gvsEB39WB{KMAXcWH#Fz}=6V8$+C)y4Nj+Y2Y7UX+fQV`Mq*yB=JU*NFH zhT4De1VY!LV`O=$+a$KUOxRtTyeYCBNrP5#Hjo5uf!OXxaO#PFZnT<&y`_mkYR7XH(w3z2^r@WeLv-1H?jzZ?FYqMZ@b4WGrg`$+bl zFRyw1Pl;+_<({bKt)YQ)M(=u4%ZcS`R`*4aKnE>d=xp_p2x8tK&SZ^|MwEsLhD zF+=gw9dSbP6i&evJ8d-yGt5xREO~*Hc&S0zJn_6eN5JX5BM(Tik8=R|*vHNp4ICV5IX zD(p1P>M(Yi>9T420$WxsG}F=HLf+q$O4w$Wf!Tc9Ou1rFAFjvY%`$DD8N_vq?rR+D z%#gYp>&!szw&BKvOQ?dfWoCcnsjilp)8GlFEoGPKSY?W$7$^(AWD+8j?72X( z$V>wzG86C-pv72YMjS+Gjp@aG{8VsMMRHd4ytMU$gzhN+bkd(7P@vSEMMpoBv^psZSH zR!6oHE6r-SSZVqjh@%dPb!Gw}R+$xXYt$yQ3Mv+vmC)2?E}>JSIv8urDKWk(7nxdP zRu>g(%qj558q-!CTVs}>>C{K8F{i<&N=RyrISoEl=1Ey&+K^PSHD(E_x?xXQV|u~I z$Q!J4VJ|V1)J!u;9F2dGW&|wbUuiVZ6j6qUIW~Ys(c}D-C3bW$2aywpL{aMY@BD<9)Pz?c>ngPpmiC)>kki!4 zBGsN#tBO3H>ruMY!<8sQ$rHG5qKK#Hld8(hRx*K!qH#AAa@(nz$+;{0 z&GpR_?eu@aJ-zXUpR5`-JE3`^oj%wOkWcy8)hJ9RD5Or}=?m%}q`{JKHF}IfU8Y=( z(&cp8)u=kTOej3!xkz-^V1-FhDU$w15l%_8mR8BBc*^0ZcscV80@Y_eg>p!l0>Z*Y zG~VcaJJNGRXbsV5g3-LhrSqWBk`uRVp!6xA*c;uo%X?SO_ke(j^Ks6)-b4XL>^hLFlL zlW;={_qQ~Llp9i8IoN9VY0MaoVf1?2BM(#IBUWf2Wp_1VkJ3f{V|rP1h!hM;Sx@Zo zTrGcMJF9IS;z@Idd;_VFlm`}4taOLQ|FS zF)=4_T6B@*IntUZt4S3Jd0c~wDz|~-Y%H&uN!CQy*fQ$Tqr6&FTAhe;n7@>3O4)yq z70zW5YL3oW5s5?0X$vQH%PZ>^b^zgyc&XWVJ|${EURaSNEnb51Gc@U{6fBl#l%)Om z3)XSA#;Z@FC6GiN&Ga#Z*W~Dt9J4x6?JB!1G@`@7VuPo;VcPQ&jmY39Mp_Q83SR}J zLh)mYGG9gZDviE$_9&#H#(oQR<3N9mmngcfiG&D6<~W`r9tYTNO0tl(ZdE)*d69sG zPiK4(Pmym+Y%O3@W3w&pqlsjP0r5k9GNvCMz?sqaEs`Lsl)(+@0y$K#Bj*F8fNmZo zkAe7vR$NdoC%26nMG;}N2qQrhMa76Vw2A{Rj-seIG1`KGA~Riy+3cj7q5moCFXJIWLp=M|Upo0VI;HqQ^-$_m&v*y}DY`oW{bP0^ zR<~N=8=F`jSzW=|5~<>ub+mtaY=vV-1wx5ki4sjSRLZlZ!TptSwE}Aqo*S(P0i#6B zNa*Mx^ertY<6jPvS6W#QfkRzoJ-m_|$5}u-3rU_$7J+Gnhb8-LXwwT7@^8p95)h`b3lIx{Y*$yG`U>Hl?#Gh z2M_(spc}EunvEj(somUgq``7tLP9{K$}(!T!AHfB6bM-^rg$78=z2m{NHn3Y%2l|c zmN6nEW3ZB>;n;NKt*9-y!F1#;HTeXiQ4Sgb$+-r9*?Lq@RoHyNE9mkGiv1i)Ujb&o zexBf0wv-6Q4H|!)HVFh`<*;B_xW8lS63Af|dx-hRo>LRTSeb{zPg{{J&}#&r!>5)n zl%;2>6wShg0K+{`JPXz&jD_jI31zgPm19si%l6z{z{ z458?@BrV+1CT?UM!Z9@yVF+nJ$uNYXRvd>R6z%so455Ff2R3dt3L$lxNOkF2R#d;l z$c=5)DC%*b)okcBQ4tSm#>g!$5veIUR&FkeOEWh=)!4ZOsj+&iW9b&8#_G$CcpRk0 z1k;g?gVcojTxsv-phz$wDu9C77%Dbxc-tU{4y(9~Gs+b*Es5}fSc%E-f#mGg2_F~( zV%1j#BprWulL=g`C?YINtR@1sl+0?2hUo~6xz*TfPT~(hEm-)%p91F5B2~z3Eh{{eI#-m7Jzh-KVo&*J5uGGH@RMo7V1zZi}w$L0*(h>S5(A`7-e zsR(}~K3KW309VJvde;iM&g4NGhx@6@gr{()WFI?;`?8{KqA{0-I}_swvjGU2J7Q`U zVAV(u82o#j24gVu+C{5)fTK35ZgS z1Vk520-_5h0kJN}B8YB@1Vnd40^)ybm#iR6ROD)d4HMECj7JSRP@RRFa+-l=Pl;>c zl;;e(wp3Ls;S*vp!~lG56b_q2{XwCD$8p@pJMrUuAJeu;>JNE{^xdMGvKsW&6HUmJ z!*~%C*}-$CQ?@I;@`M!h86J`x*B_pwQr2utZUYy}T2_(h$t+I0Ds9Aqi^_jlKGSY0 z<(qv^c8NP1kPL%Db)4Zl+p?3RvH-I)w4ADn2Y52YaSS+q9Lw++LRfs)903-MBM=Wy zc2FAR&3Cngn|sedDFqCk@xn&6sK20KR>rC$<|5o*PJqRIJdA;4+bw-P_KWhVVHCrP`K9<4o(=B=oC@kzA})&0XbxqN-rMl#!p?S zsRKmc6SSJLSNQY5*)Efw^ygtb)kC#4>CXdahE>+p#&uc1y)%1ehyDs<(nMHy1tXJZ zS$ubsC+paOV>x;w0|uU$b@aA{Ad0y5Pno1J#zENqz>78vF9fv}@eO~B7MLlW&~yZO z@bmaeLGc#h6oN4;qMeN9ByfJ{_Rkm9oWos~c|u+bpKrtSEbvbe-NN}o8wENOQXD#|NFTz! z_*R}Bp~ng{Qb+|Uw-x2Hn_t@Ju%gld!P7%J@?v4OO?k*fZvB51mT>4F$|9k^!x$d; zJA^`!ncc|QVJUuj!riBGX_yl1evB>+6Y|@S-=*PWh-nFjhSMdq(4k>zRU67>p+iGI z(UrzRhlYNlD}#kPkba^od4=kaexfUHg$@l%yi`b8o;yQV!1|012|txYyi z5+alabcv-Wy)%D}1t+)h#KTTPT=JX~MI%oMNv@Zm$CHm#e=8J?ygtRxIthWu3uPY2 zLSCV0=qU*(<(JSYk&t^75Q{w4Ct6bGvCx#z;~BgMPdcs`m3Yyq@=zGdI9__SH$q3W zNq8f4b-H7zMjv5ff#x8Ogefs*nnyzVJV1gu$RlA&coBaTRmv>_PKhl3i&gxflM+kaTO8C=*MnDXAds)-EtnObG-(R=0M< z{5OZ~!FU?yuUr#^P>SIoW_t8!Vc8}UPZ8FEFT_qm04(_?GUtFvnax^O zf2*q{<3vSZzNNt!anocuCxRfzmt50AbCZ}3854g&uw2!gR)W$hJI}E+)&2?E)1~n` zl{vkv0em3g(TZokrfd%Ff}zN99M_MQi2{h!g@qqLkEkCO5oMha$H|bsLq85=7zMG#y<$Sp1wVyPVCLcn{l35p&6 z3Jd^jVAImcGxtE0Kj2l34gM?wR-mmYFND}Y3Cd3^yZuJ7JZ)jKhK`WO6^6g zWGS%%MASY2Ap{$ffK(~-tX*A-j{9^e@xOl?7Bh#daC}KvbP$?Th)I&iI2$^jfS8S! zfar3IAeavF(R2EAu|SMH`sVpSVqcHT0`nPX_U4#`u%{glVrpaGfJq461DJPa1AI3+zc%Bpgc2LE|of~YGJ+WVW%*9~qGf>vl64d{BchG;H z6hBW!enS>gW~R9Ml^JNn(=xFB{nSR_t|>;0@x}+gOw)kk0RUV;qra*Gn3xO$6fNy= z`D=&7eN+{YJxEmoQ+rW>;7Q0;VYo&O3#iVB(q)Z5qADn8Tw?jKzPW%!rmF;tYwb#K z_52XDat@Rlt{g}WVdsO_Z3<-W-CZy_tb!4LIzkvc>siS*gqc|&L{*g|ELD^fnivAj z{>bW)W1Is2dK4V*#%_e3p>do=50laMZ`Wnj87wGp8kQU7(_D&?30rY=qaCsAv_LO-H^ zwW%>Agn(2+nSxY9ldIlDE)XgaEj6pe%4DJvEOa6o73sh#9ide|4NjC|8hqN67-CTY zS8{ReKym?_E(~HW2gsqB#LNKE4u=XfX zizMVgQek#yNHA=AKsL&BpLhhjJRG2ZUE$H-gwxX%UJ2zDUJXs+TX{%b;Ysx9@=m~X z?W`WOc0m2Kuyyw;aW!c1R0D|>41t!A((;Lm8oNF8VoFisORI|V3W}lRfQRlcX%$sN zD_0VnH5!M1;*3pKQk1SMDNdJ_6sF5ciic{JQruN@Av-xAH`LL_#lpe=8lPPB&+yFK$$VAE=&}A z3Yh$tR@cpkO?6=_BI#9~@idBO^%R?Bp-enTqu8ua3(|{0W4mCJX_AvIu?x(~me>hq zx+Q*SC5KZGOmLIvi(E>Es`rOq=3Gr7s)k(?f@#N@`F>;x3A;H-szL(hl~MCP-i znA%(VC8l7&yu*h>O=G~k#fQmy`x++e9ebFZo^kUgA10?~+`P+&$$E1fCdUq#I9a@g zGm=A?L|!ni51f~N2FkNN?rEtc)UC}u5d+0#bw$ba;LWRkn4BKGdC?D((}Op!L}7Az zip@)Yn4F$s^NJrPr$=djUhu=@^eD|cR+yX~rFpv#lhdO#@AhFbKitWJh+yfVnm79} zIXzVKULPi>hicyG!{qc(&GUSaY+mNWldkYDR7fw6xPNsj`mX}XEu03Q@ydV0ovCqiy|#mG}}04)ga>V zfSNvKoeQ^H33Q@5#K82TXC60|?g?0uR6!T@bziT&9cCtuZ>J|HUwNm|VqWm!MaTG1 z*K`w-PBdZ5^a|2{Zr;wLbEAS>n-}|t=vSpzoJ$o*vTylOdPRQo-W;W;$-aHVkJ2m1 zmwCUB(ktq(;EV0ceUx5tgxKfz7*JJ0+1Pk!;?qdH&aBV^ETQ}R`IWu;-BG8}8>_yT z{M*b&gGRH^#f@L%VKHv*l)ra|?cPoM^UgCxdwYkfSf!wUwC2xderwWbK|ta-9`u)0 zXo@@IVPoB^zc=Z!^m`J|tGJ7REJl4mDnDJ48a#Z&&>u*gNTtm$Dle!N{?~9%(kgm= zse^n6k+A^zYe6Lp;A*h*$RL~8a`a*}8Bq3B((Z%@H?0P#oR@-7$S^dODJ?M`u#Tp; zVNTs*)Y5Z*7MWOZrCSmff}V*fqZEE>*dI`>lmhopjMi5$NJh%Y9dfF;p1O9 z@jsD&1v&roS8LgMeEgbx{CX{W8XsR5*f(m~MSQ$h%f5(@Z`QJ7`1p-l_9c9LOFn*6 zV8125zFo_n$H#YS*)#Y+>o4HreS!T>ExUq`->qdA@bP=K>?M5sel6R>#~(;e^z#}% z{-~C{jF0cuvK@SUua>=vkMGOJ2hz_UNX|chmS2A&J^NEh{Igni6(1Na2K}FZQOmaQ z@t5-JuWH#4AAc>){!J|_@bR~`tc#DolV5*d%X;|u2l@C%`S>UK_-D!aFY-$f`EUH| z<2P&B03SbB%bNK3d4_%b3*6AhZ*eC+#^l_=$J;oEy(B&E@SnSG>{NT%%UnhYxpuHkLNqy_wGC!-r>)|MCVUY*rSV%c)RNuh+9N zNNaZZ@C&=My*U&%c}|{0ruH02_+;3-+bRaw3~khz?+D#!Z(vup*|;9xiHzMWcJU2c zVwZPLdjs2GpZY(%RKM$BXm@qCGk2E&W9oh9iTd%gl|B5W%J*e(=pTM@Pw%YOH|XS{%jXT82K&nj z{zAbQ2)!N&3>mp^irrMFM$>bJ(GBegB`Gc0R66)M$0!! zc)HPTclM&>S9v8x$?H3ff!`2y=Xo4U?Lntq_&x15?nM3SwtL=b-~qovO*}7K&u@hB zGL}ylF=P33FoR6^xOeY`{^I#_>lfEAU%wcjX>7mVyJyS!SUYrRY`#0F8}rA#ds!Bt zPeAg+&!C8u4D!RzASe}oIDap}0d9MjH*7N2l8@xE`Bs5vz=NbpobLlYMXyDxQnvVA z#(S-tik!Njzqv_8s3e(RInRxx$uucWCCMO|sj5W>{}e=)iSnC4g2ECczZt~XDV2aj%31@^(g*$7yULT*O zpV9+Gyur7ixqW%oN9%ZQ-_(3z*ys+5mNekk?=@{3Tx?Q_**V)aR2cP)))P8*_^`im zruMaLf%4DJ=@N042B(zExkk&GNTTV=`NpWQn8J=RyVI$ZkKCdTjCkAIBHrzO5qIAy zq`=3Xgu?G;`we`51H$E`9PPFtOu1NYx909Pk#T0V#vjLVa2{LR8WxS4Yn*UG(oZ6N zS<{ujN6HCv`h5ySqkrhd&IQ_M;e?tRbU$j=E3*HBWn=Q1t$JulvlCxbRk3Vg&G-^h zDYII3C3bcr zlgwG34U17RUZ7Mkhe8qwNWV<>&DzH1{+3-kriFOtp$%u9R<>ekuQueA;ieq29yH=o z{CoFg)q1A@m_S?o++2K2>~9xiS!aI5x@68~xDM#yzF}DO#+gI@_xGJn)=%(e?q93l z9gJ(sdAD1CgHGPbqbu;-n>WiJdAo=2P_4+f1~n+VA4Sx%e9)}n^|RH&-(#na;s*3& z-bB(WLb%GWUaN+{Ek_XuA@2czXedW<+VWA{r9ty-aL3fPpKq_$?-qBk)boNC zeBPbVvd=Hj^RPi$4xW_OaIuCRiV zQRgDuhc@*AH45z-y~O)&?ra-{-DAY$5W+K813WLA!w}J2J7`X?k`mo3x~cpmIY{PG zGJA1=An+EiLw)X|p=i?Q5ANi?8?5 zQZ|#J;-}qpl^1!B>V%)~$YX z(gi61>rphu#l=oRzW`rDS!(o4jc!qY8vLfyjk$pLvduwYw=E~RF@Z5pW^*!gW-4rd z+X_q6O0DJZu7M^bmZ%)9o+r@x7&m84zs3)?#Fn0>)UiI%3h_c|8NNyHPjhy{0y;Lx@ z`iQwJhZbX=fSPfm-9xG~L=vTc%w98jpLKhafsN^bW|xy-vb~(jwl6brareJ_CO$ph zix}%TGRz_5p+4Ig7B_H30dD!fVMZE{F&&gg=i=&WJQwD+ZP@fDq2r_VzZUzpNxiY5 z-auS-8{Wve{b4IJb0FvVXT~PWs#T|P*6wbDyg_)OZfJkN+))QjPw}&VJg=o|mau`h zd(1v={^u6r& zr{yk!!wkn#aG3SngZan(aM}PL{Pagg8=22lkY<0YUCG5a&;?=z+ZV!AORBOkK z9@s#&eDK)?gvV;QN{BGq`YZ$ukA+8}Ib4B7RO`?(5u7tzaRoz;Ot^V)xpMk7g)!&M z9&aAb9hmM+re$G&$X8@aqa+K-twf`KEF66~x+!eEh)e;==GAEr1Yn$bX{rnC3iH5( zRZ5wwjm&xfY%8RMpM~!-dDC(@7SP@OaHC~A{Km`+1GCxj#Auu|T)Y^-rR!=krv1_Nr zllB2pYda~mCJu27cIHmE)5AKSg0UkB&f@fu@DqPh6I)+@DA zvN(Q2XM+WQM~XClj=5SQOQCO@W&lud@K#T{ts`eB|4b{*?`d{04wZStqD$=mu-j zgbkg0^|2blv3BN}akukSze~+MQQaP)`zgo^TX-f}&YPY7sK95t*TVzrat_o|@_;&p z&tcK2E$`>ABY)OtHnFxA#{HW`FQfMtYye1f_?GntWiqLaqCI@0)0i{t=eIibyOh61 z>@t#n@#*Lgy(CIjB7n@@z17+1Y~pyR>nmZtJ?u{gwF12vbVz5kx1Wy=A0}2ovV!R{ zWn_H2#LIx<ye^jym6J&@cM-l~?N-BM*yz2yFv@S^nG*4?lvHEFWr*SQdV1b~nF) zjCuCSgWtt_-D|HEwC9x@`gI-9MBn zi{Su7!U25nXSS{4R#fbRKew$Vpbvh*w3h(h|9u8_+dyAf?gIvOfUFSm1BPq^b0Oq^ zml)E(9)oUR*p`m2F$9gk9C*NokchF%*nBt)5SEZXHJT%9^cYjrYx1hNT0+<-( zyT4|04AMCs{0ir2^;5vF{vHEwV54I-g`;}Nz+O9sd`Bm(KNv$dGVGI7db`yaVNZlW z885-&g{Li|Gb4+`ue_RrknntA4CU&7q4ZB4{vtz~9%P8BeuC3~g3~VG{X9xE84q*X zVLzjrXfl5C*5Akq=oABQSPZ=XrXaFUDOw-hAs%wNB9aMglA|)a+o*P1=c?e}mz>-ZqeblCqaa zxOzwcWmUA=jc&i!O4s~t&T`e;2vS(c)lShEj?+bDr(E_Zj}-kK>C~XnDpEy%_+5st zn{>MfEWpeCx10#|=PFRg$PJ z+(v)$wZGsT!)|{9PN2EtO~6)vgK`KV?xb^29q-HJ5Bc}XYh%)I|TYQhE4{Z6i_N1-2yKbxA;vZ=k$c^7W%p=l<4L+Wr#2*Xtsym z@h5eDOTf6RD!gUJP-lMJS!LJ*p&q*)bzsQ%{!RMXZFFQ>4Ma%Pxn9eEKWyF?BGu{l zwmZd*F$@y=&_xf(>^pKKJXe|F{lAwCQ)_u&Ry;MuS2z`I$pOWeZx!|YgKca|J#Wjz zP#5Lx?@?7t+EuhdbiCj1`8z?dCkvKasm}GbY>3frMN9t@w$w_o zA@ATlA!sX^?`)1*T;ZYjg0$72T?wjuCGZ_3tr63|<0#irZOfaMrs z@GTGY$p>z4@Cm zKuC~fCa;Pg2s>0*itU;1QkJH49acx7<7nnkGDPOfI5 zRRl})SmdZDw<}h&FkRc}6(uO$u3XJ#IeEgVsaein2BucCbef>)w&f&Xc-3l_72Cso zoFhM)am)7CMl7lZ+FI`mvFH|!(PUV(WIWE0CDU8VvWe*K z4_D||cc&ku|0SnC-R?QJ9}4?LAzznp%OAnslm$h9FZF{V?z;jEO0BpfXx>bt|2^UA zyc{v2-;)!cbB&>&Y;Y zfol-C?a5AM4SIBS5;e#%tqgPOMEJfDNt7LbicXAg=(Rb6Qxd`_4`fjhEXha64k2(A z8zHMmp#IUyXUju5m8b?>dj{vb48G&u`k87~Tt~44EBMJnIhP1r+)8EmK#qLLkj(Kw zgde_Xj(YY$7V5A)!n*{Zs3fjH?g~gHsXEhrnIfWuDf~W^qfx<$@;{IXD3A2023dlC z>zpBG!Q>n>SMu=~gwPVt(VPrBdns1)w`I=?I4OEP_*Yv?auLbyBybfgEUVBtj$=Va z*_S|$@w14Gmll=Y6;bJxw3uZdHZYs=y5WQMG$61%Kllg`G;d8{J(^f3!jqh@fT|3U z+~9&(EAK>@mCdE#bpIHw!7TXJu31JzO@eUs^ag1}f704J#-u zC6Q?KLU-0VL%d35JDV4>xbs3!NC>FhuhILGZGfiBrjD#C^vUb4u^pjqg-D{4fa&>?E!Wzad3q`X{vwFwBBEiNsq zLFSbn+ENQbp^o?^b*r@5zY`%yZXWc-7~`6DKVe={M?2MbD(~p!J2Kczi3a7Q@h?OY z*@RVoZpsyotkdysA1~H_ttt6rgf;^d=3~bFZ4tUV{XU2?GBHVv`c$q=_n{>2{M$nP zu%>Ry15mQMw`JO8*-XhMvfguvlja?vPLshzdY$Nr9GMeX<#S)t;}Y!%63>&_j8=(^ zs*kbvVfht8g?nmu8{5!vi;rG8oV#R!jn}!jqjG6#af>Sfh`#B6btc4xvrSz>a&=@M zqpDD($^f#Crfh<@pWMG(O|20jwlul) zuL=pmvfFi%Zsr|1DxHN=kxyvmffFg&FtQfC*7`7^saL1Xhx8c^G01HIX&nY?9K=2R zjFQz3ex99(A#@IyIQa8v5uKPgNYoTrM;IED$+ z_R>y~fXS>r*XVAwVxWxSnx_iK02%p}7$7@xy^RO|2quhw_RGqhhETbmy0R13Dbo0r zQ4A5L|AlU`9kmAckJ*R6>fsNb7PAk3%}auJ$n3*!cuChO={+wAz96#?zv(4irlfCq zNpK^XefXPR5`0N!AAZ|Qf+xxB!|!-W@EnQ?2h~!JAG9F^b5+W@nfj_2Wj=zmH^v;94RzDIdvI^S~zq3=w7)7CaF| zm@ZTpL4+~shXjJrA&UbmwPJmIBmyPfO{8Y~Bas!k_`bFGlcw5hC1*hbuJ>8XMvDo zni9&L)KQfC)s!Zp5z~!?9>L`b6nB`VexLwU)2JV4AZmd?4)qIpBB&vOA`9V_v}{Ne zKDD8UCt#@FL$OMeUrsNfIo4vIh|`!amtFi<6S!F8&En%mkcg9Ra8vAKc~#-zJe zzGW79o3snv>Yf{d`p9I$e^;EyuJu!ZUY|WXmT>0zqw4(BRGQmH2#FfePLNH%15bf) z3EAKEp=9E66T{)TiX{~c1_Wg+3 z3%Qh?1@U{__DwIOu$()6QId&zEVNed^1^o-$s(EwxKc}XTtzBqXBerHwCaVPByRjor-YkK-POyXahKtDeW`VSN+OA} zTSF{IuPr(V_8GqK7n+i2YpUv0hoOl!h5-O2Hp&vNriZkb0-72(%NpZ?DWGX_Rw`vr zm|>-JQqF_fN`>sv=-tOe3zox(z}svn?Cmc;=D(^cT{mzU{;J$&4r3^nl_R3l zEZjRTQ&ex@LLhbPwbx4B07ngf&FoF$4(=t=h)5_~U`Keboc@$emz>{WWs+)njM73! zAzGa=&#~~Iwa~_>*=~Ncw6Y|WH6-;$?|eK%efxmQqK5hAIw-uf*ABr1UOKCH)3UdkiI>( zx8=0(fZr3c2B26P`6Y#7&;=bCIIx9d1YR97Imk+h*lcUcES_JZ$={F~;mUyTP0iw^`Pjp>49Y7+`_v9cU z9YeO#O5u;+FHK3_1J7@N(a|MWYqSes7}{dQqe9D@*k>9)`SBBA0Q-ld)T<`1AZv%Gj+sES0ZF- zkF7`uE|EbmSy&k`Z$bV4VQw#=vFHJWLhZno)lL267lfg@4J|2uuNq;{!e0J|@JKX) z?E2WfPx#SWg5xLg%ESIa9MF9c>z{nx0}*=voeJTPoA$pcH~x6@z!|X*qtlRczxLYe zvA(K2cP9?eN3xI_mTk3=|0PtkA449=g`B-*DFL!!&f$tTtV~3Z7h=dGnLb>|Isj)u zk*iTRRaTR{Xq2gc((q3QsiH0Pvq7pXBayYsROtr%M0=?+_Y~sBN&TUWUzSOkR~CdU z&N5XN;q9OXEd3)mO5vfy%_~nq&-i(bF(}az<;c|;@$FrBuUJ^f$Wb>w^~=xvjC>wB zLZAD-fiBG{GKP~N0Hs$we%}w^^wvR5#3khwgiCf)CFKEsDlR7or*gL@P$01!vXh0d zeN7giNAQy4r@s%e@EU@$H&bL1BHIdZ=DmoueRfGLf)j}J|O z_{GgDY-<)aaX^8XO8VilV5D^!|4Ty+ z_&zyz{=!oipT6|WFIx3W*(lIJ;_(M>cN>ACEax8ym_W^@xx#uqtF9_egG zH_>E&L;yCV$vd6R=q8$s^j!GKTkUQjofvxupH7=*`999vnMM>b5Kc4xbnKU28VsR;+w zb8_W#sb_U%^>eA`_^HolTqjO_KK(g)?8N7PQ_rbWpG!TbRzDy7Ae`yr)r0lAasoc1 z4#M}b&uJZ-bn;8onJ>NLEG-RVbDiSIG~v7g|LU)QRL_keUK^w${rWUM;3~eJ=iLda0TF?~7r z{Q76ffgw9BIzKC0{nRB!W-aiSbp%OT2OEFmGw0)pv+K+JDD(RZ@zZ2(OVhI2f{ek! zc+9NTD`HH`nTqq5m*5%_s*d2sqv0N$aWkJFis!)_FX+L43BDhvw=b-w0zy%LRLdYX zte3Kkm~Zvj-oo+YE2mG)ePMYCbauAAz|7a&yo9%E)H#4GU$>lHsx4E1Cq(i;wRGxC z9o`m4XAr)B_0*~g9(9g|#LoeS7S}6ZOughdbA$nKFOT25XTP6N|C!XGCDS2% zK|vlO=Q(3WNUdppnQ+JHCWU)>nVz`^u*ZSZO+s)3?l*@V;g_Nr-Fh{O)1=Nj;h&Pj z)E+);zAdUZOLll2CeGzRM`uEj<}|Qy4WWpoO0+X*2JMLBOos8HvnD^_AMJ>KI z#&@og+4&X?y)Pvu^}c%fUPG{96GQmkJp^r%P+MkM;1nU}s*^evSPOpJci{yUP4(5` zKEAM{{d|IN`5P8ld0E0XyhKZ-zm;y69hujAB%7jglJb^>X%rxA(gZF*^GiI zQwT4_)FD@~>=>L93QB-~Qq2r$7f?cvqwaVen#ACC5QOm)BV{teOcZlp8mi#XxEP9xYKpX%8p}|X(?bp0A z7B^0A#S6tH3wtYjh3G1+G>}sX3JkJoW3Ke;G|g=m`}=oQ44dmSPmPLyt`R{VRd!{_38yvxS2*!Mju>0ERZ^O(%7tYmD zD=J@^6HcAu6{9z7|0Qg={2EyQYiCTfg|%zo`!C6+r&c$vZ6+GF9;C1A&RznA zMn9O88h^oFzdcUeugc@yv2AETmL_vWJX3GbfzjSX%rHJr_#5Mn^1ng($;GkceU9cq zH)8c{nErEr2cPphbzos7@1||08~5%VdP6#}&K;luJbn+>{T>|9GB%Z#P ze|6f}^y~_KZ%^Q6M!j#-xkfwy&K^3Px{aH~3p7{z`OB+y@@+j=e}ecEgylf&e~rlj zf=6LG?C*Q`I(%wU#90193C=0S>CSImQiqsPv(@Tr&g$XghH2xC4d0ZF5=#CPckVJN&opw$+ zS)`iX+dU3)o{#qPFYfZr;-k6#gyuF0`R%IXdwVoXD*$A^qzLV3DiJCw41%k%w*7ec zh-4H*fQRU%OL_~tsu-T{j}hSbRSqbHNJpkA%I4w%4)`&o6kk|kp|eKIWrxMOQGg(AV#O}dm&Fm~mall^?CM4t)!SgPmJtH8aKm1z5Q^-G zcPS-D^1~?$c54B#_8SNpFUf-Yc{fRaR`lAaKAP`iSMo7`m+5(S6VX62iIv{kCpMgc z)QH)Scg<*Oy__qmwIv?gL?SeYs9_Z%i;gbPVRAgx7&r3k*tgdc`}VpS@W9=-*EiR? z3-!Swb(SjU?w%d6C{r7F1J+;M#r;XWYme)=i!iCedd=o0^##9oW$5QUw-<7Mr0j7o zh=r$yGABf;9F|9(ZsWBNq8&=jbm6di20@}l9-NUPi+JKsc8DjJ`?KeaBGtL{fNoIB z6oFsDp#=4WaijHYH$h$9%1FldYL{>t;Yx6pxGaeYq=^^9{ibUmwSel+AZi@l*DPV- zQ|wnS-m5Nriu1}Myq$hQyk4Sz^m&L_)s~`-a!=&NFC|3#`=j~#iUzEs{hq-a@^CVy zTPZN%6LUZdg#-z60xfrmUA~STI>5g@{G(g*OJ#@cmH9Pv_!4*cn!jG>_U7x?c*&OQ z>kXt(_w(oQWtZIc6hjSu&#|@E%VZ8R&N29|Qs1A;AtJr8nQMTZ^|`fwt0w+!!yIGRG!7j(Qp6c%@|KBcsQgjpXBUo90f}bk<(bR7LNu<<nxsCyY)`G1OP+QGw{(_c){7V|vVtIdfaz!k^j+AFh3ojfpPw%|aXE{)y{;HR zFhhXp>eXNAaV{IWC>^j690pF&b9)s`1_b%hF8SP{L$V3lqY@NHjjaxQ+uF}Cahf`Y& z9smu7lK4p@#xFW{Q49N)zHDZvWa15lPGc-dXs^~lZphPr-{R`Y<$Uqv@+xldNV%EL z$7KM^AINk1_(^0uecH*h;^tY&@?})Ck}sY(E}8tA%Bq$Zk1eg1b@`RKwVggCm4VPC z9(^|W{&chc;x4i04QBRVoDgLKcYyEDwr0U2%Z z#0|78qP=eI!m0oe39Yt0lJB>-`R5m<%yZxX`S)#q$$GBW#N}M=i}DL=dYjza)~@X4 z#9oxY1^&u&IA>}v$`4%c)Si{Em*|=1{3`GXbTrXq?HqpG-X^}Z_Tp|n9`0S}({)Qh zY6_#;#nt`%TCaY$H|cKS>F8KK>48RMFrx<-vt-j(-SqXoO+W6Ye+6;b==P?Dzk}Mf zUQX?Q)UGb&T?*W`-p|2WP+Yfbh?qt>Bs1mH?1^ktgXE)nkWW6qqtX}2coml}Tvjqt z4ag`T$XM36-sgsq1|F>|!R8*Pum6v_Z)!@42XvyK~fikg3yv>vuRSc zi(pb5YtC9*2>kjy2io;>A$B}q+HCaFv+m&v^4N90%ZFUj|AZpS%H+0?Kri9Y5#4L;f3Lf$<35H^Lo% z;uSPu@W*TPvR8-&_-I1kfU^cS!}wKrZeKM|KqY)7lm5fAmndGdUm2JkQcfnhpP6`m zb~~E`Z94hbiGb4@))>KYQg@>&9~a9sBvsDaWFH68ambFkUx6 zZF=X@to*{r^!m85{oIu_{}e{*3N#CU_O(I0u&~gnK6(w|MCb6ZIb0{B59M&>_&Mlo z6Ev!ghoL;QAsqJeGw5@EqPdA@pqrb40cKzwGn&pLcu<{4?sfS@Y%}X9B4-wEsk^U+ z6WpftB%jcCSQ0nAaGK zt2Y0MqBM_qQCDfgxI~)pJ;JDxS-?R}nL;WA3^D~LtXQ|JSFKj-nCiz;hE;05Fsr#k zk7xUhfm4EwOxn;@iBDc*0l65(N`1e4AReHZeG4U^DEJw_$s<+zA~&diHlmDR9+*>% z6ntKC6qWAQ0rK-FmC>RKZEl=Z4ti+n@-nPD6Aj}ku5asn8y1**ebJO_)7cQ-PF}jY z2`NiuBinO#ObRlB>ILGAjaU<-sxmH-n>Ul_{9D;}wH22%4NYTvN{LU_vNzOF#ML{k z&QPY_53qDnkv>}2(7f^4jyIc(qniCY8>S81eFEP1%vqE!g>e$qx0 z>huSvm@+8BR6Dc}?a*>|Dve!LD{bsN8NMNcEaS$I>I2Hf`zMCP3bG`8=#8=aKopQq zCmSO4?$8ZuNRfWYkV|}S)fo?zO5&K1HkFpT4&k;4l`2c=DWRo;A=HwV4hic{fprU4 zxSmSb@Y9WVFRv@7$=n8_P|NW@hcz`B{5XK8xT(x((ItHsqpgVB{oz8ya3-pg&B5UW z{be%bbdqF$f@(ji=xy#!>3%Pm;?=8UW#?&bVhJ$($wSCP1|kP_nVH)wW8D50*`M#| zD?|qm9Tjru6wf@-pvI;`%N6tERSJW+(cST#-%!)T}+xW9s1oMd7-Wr>g*T zfHNuDl&q3;g!@Z2f8o@}KSL1FVUWuYL7%Wp+DdGH2e_kSNbzq>6nV_RCLy0pHl~;y z!@>jwSEmUreQ0(Oqk(eJyO$8(n>TM1-gz>)UPX_gb)Mc`4_QOYyC(hdI+vF)rIHd@ zOl`~&NG0v*bcyE6M-&(!C6cv*-E;9YPsDBIL2CffMFteIi(D!Zy{7xSTYN&hxGyC=(@`X zxl2aU4VxcI)Omj?8oLFeC~<&w3X}cpn*m%oc*+OhYAjo|_8Y*gROZQ*>KBcFhAFLc zbPm~5I>iLGXT-tD&{OKbRwRQj1LLc(2CPtSI(+BKzlOEA-uAXid#e)ql>VCCev+#B zKy4p1Ys`twdJR}R|cznX6_XZ_9{cxo~ue%&p}_m`S)1^?50__11&?_XNJ zyLazezPoqtmF06yFKX8$v;Mw+UDnOt{n_7Ic6QKmv)5&P4Vl?X%gnA9GymD&M48c5 zml^#p?X_f1)8)G27fm3Q_TF76W-g7NkUO>Y`U1)+ zekez;F==ORBpah)2EpcWR~~v>_`27RZ5X|7>4(=a#%s@T8WN8TG4fX54DU3Q^Z3F| z&)fuVlvu!zL8HZnGx9n>O7vvr@oU)~v5tpP0hoMw@A4IMRE6@>6g}x1m~+ag$F;Dg zD3MO>R{cVVWy%yMTUj@MqP%htQ;by{#Z{2)q9`Ex24|MQu4(;|R#@Z)C7EGg;kHC% z_8nO`IPVR$4UY{XVs$<(8k|nwoICOrCHd2=C3Q=Mkv=?8j)#xAc5mH?#2uYpjmurg zIJ4e)yTNK!7P);3gdn7o+?5y49E>*LX?OX_oSxpt1q= zUt;!HmVBULSxAkEkR`u(!%QZv#Q7vRGmgA(iYExtVw>6?)8doka#kAdOn`qQw2aVb zw9}TjrJ=l(7jvE1X~`_hu<9)4ILqaTTs?{(lsGx?gVd9O zWr2MvALM;M#E~X%ly8RK`SI&Dx2QLQ*fkh<;Ej)^`GNqODP)$&%A;@>FohHbKc0zZ}d#_zc>Ez~v+r-wX zOiFv!`|4Zx9l6!w6=^;xQdYRd7IlfbpQt~O(Y<$V*f|}X+#gv>u1}~G>hMsad4LK+ zomo~admL81{q8eK#au$H{D|oxWyo@i#l?7sU2~i&7ll8#3Voelk_7h zK`K_#|M1fHKRCWcy&&0%Mzg5i3@^=mzMtQrP-)r@{lEW*{fY)-n2t>^eZ)*U319l@ z`}rpJrHS~`ttm}=;oT*x3+%)~l1w}SHv%vq*A>%$p;Q24K%Bp{<0OAj^gZiLV{BjD zuIxb5DQRHrwo>IsPW4gF5vr;>rwti2LyBrcTphnJ#l(@w`%~u}e98%x!fhzVW9m;W zQf|SeN3F89pek)J7(8a3uAtg#H~G*|yS@^B7&2e;1jxT{4-h_pmU$5Sirsd~i9JfuySU@5(HzK9t&2Y22QnZkIUY<{I>yhx+0Iy|j{uv?C z_$(xj&p@7(0zLt;30%k1s)1GNdtZ1XpHN8se=ER3)aQl%wH z*O=%>CglBdU|Q=a#F-_PE;_y;4m1*%0;9>J^}kT zX_Z*#tPt?qanCLJUm;|JW{=(hT;g|sjxe-g@ZuK$>pjoMCS^^SL6@HP4^w%LxKId5 ze*!rEuP>s2mU0fdj^AeXl4Bsqj^v4yOu zu1`p9!eZ?#rK!O$H9aK&kz6>chBU{p8)Rka*T4GJ65x1nd4ryw zsXO7M)4`6;?J>YTMIUvT)$ZN9m)%}jf5~9V-aZ#ocBssg9gQO4gVPdb{0=;#+@`%& zGdUu0opxd}kZ%J%2zLO0musXjkNIM&g(4B1%JnH}!gngaCUS**1HM3xULe29SxScN z4p5rou0=$pyjZ<cMUKH5v^r2fdE_DMj^xs}*Z??VeGPVo^zs%Dd-KiKgUh z`k>zDS%O)m>b1MKtyJ!aF4>h?fBE)i4R;`TDmiA0dq$%|CCgKitfIU1JMjlhsi>q9 zUw2Yb%3zcY1anB4WjA^`{=>D*e1*Ytg=D3{WNFk@O_CLgl3dw1DV{W)?vJV>u$NX= z&HSyDKDeE-=9(-PNY-YQ5*Kysn3dC~;_$eoW!#YEemeXTPcFAT`K5@Ze_!C2P!It$ z+DG`p@|7&F7-Pe#`D__Rszv@Ps%Hdevv{rMmQ%7UtLH1+;9nz9KKzmiZFo{S#mu-9Z_)?Eg6gG6Fyp@N#AKvut&FIa>t_fhfyidR z{jxiv$Lbt#HWBX&iGb+@qUNja843^Q4E6M%&6WJ`PZu6xZoR-8`^4#Io)e1e`S|`L z{Nt1|VqYq2Y3JZM5hI>z0vFUPxH7vHwYE%!Pqd^SPgzpWN|)4V%-WGbNDqCJ_k+~S zEvaM4gk|zne;|&+T8P)-KB$dbabLOqJic1r_uysTY8GAGR#DqN3>L~3vr3`-O%+~2 z$%>BZ7wZx&+zS~-E(4r6L2L^UR#2c8dew%=P9*&9m8(s?uAy>+c}8C37a_`m9y(wR zNX5m%7f^RSj@Hr(M7F#XjMGcTf?R@@UD6r%@~cr1~rI66N(rqKQ56W<@okJVP2rk0zPx94@N zqWSDX|D3Nl5_aK#vAe?`yNha^glx6twL4gnR~WISU!S+$rwOewZ3VApG-1+g+ZmIo zgcdG$f3Q3+tZa-gWZMAVbrMdu9)_@M&o7W6bQf?+KWn~K7a#SKf3+o!qffTQf$MCG zPVMrdzZEG&&Q8<;LNxJ&EHosjaRCHs{tJhf$O@@*QT!)@`et(w;mje(9l?H2S~w4q z99nC`!Wx`Jc$+7|zJC z1cPUYBR>585%5_X ze+lXvtjoaW^+xpm6ni;PZDP++Q%}O+Hajw5lM|kcCWXu~VH=%kZL9;*dJ~=z0+1=g zgDs*wrs5a_&tt#YZ;c$i8Y#VMc%UWs1}&rjf9Jn6WIjR2l`k=JRXuI)oz-mhLtDZ; zZo>&GMCEOa3kJlL1amC&AYTWDEGJl9e;NO6KuuaqJQV&5VXEXz!nM_V(_cR z@WFD+(FRxy9al&o<`#)Y+O4eI8bW=Gz_9_wiZp}CR}D4Oy~0%hj(P#zE!&{OQ5aET zq_ip*{9q4p0n{ZTr(hs9Bwf)i;1F^Q=OnBn$!$0p75GSle!w)E+8Ob;w))Yqf8yPX zrz%)dOfC4v3kQk0jM^^@LYzLB`wMt_hd>}=5`3p>1yz>Zu%Ol*v6{4sHGxb;2M zd749$vT9yn_n}MsIE@S^O=^1sK!80s7QI<)ZY-^!6GlqOtdWquq1)z^=A^NJMmAqt z5p$P6z)FgM3m4&cTe7}%(q;c_f9J+gUD+K4z(}4F8O!q-Eie(0T|pC2!3FfpZMVf@ zw9A5PG6BT5fv%xAo&lHZB-(i`?7MIVfwalk!X>j*gYa$JMqCWEDLcxsll!gS0JUavIUUlD$2r!0!EBG zp>eC=+HZ7$I}^i&oZimih5Ymy9OkU$h47COYE(muYA8_+9jc*1H8iLO^3;H!8UR#- zcxu?seB{pJWaN=$0FRkK$uPm;)&RuPA-&9l)RoVom@WbeIvzVue*!#^zc6l=17rb4 za4$zxOX4#8wfM#Kj$ul~tly2;v4=7|lofBcpLX{?9nI;03ESvq1lQ8n!?PbpbHl;O zg?M72q=c{_d9knohyhYE)q|PSdjmkiZhkQ!A2GqtR3;l{In`9^)TSEGkXEuugXlNP zmfNOY(s!+m_*Q2HfA}_TBm`H7v2TL)YuPu+IN}x@_R2s<3h2GbNTutA46{Ej1~E%0 zKIuKIr0D3kZ)w%)E09xpGnCR(p18nSjAx79*%3PH#2?IL6U*2ET=a|8bd3cpvyV#I z0VkD*ZMQgNq{}EBt@4w*^f)&0|9hUtPHEr|Wh`Pg{nLejf5GcLu-MpX8y?a4q?6Xc z$sr(@-sO`tzpz~6l^jM>Q|*>(tC|5aA-{{xAL`N%8z`vMWwA$Ue0}!nX?+Y@ZhbD< zQ#l2irX>ofkd)8n6)3JJvF5grYP&YCp)Mr4d3KPnE#B#n%u%w>R zm@|>hQ|b<}&C|<&`@A>O`~O1i_AMXLN>I?JW?4hjoS zB55rmZj>R=i!39OjqK0tq!Z`Gyu+q&fK}KH=crhi}2<~X`Y?rMVzf>PC|1$T13U@fIz8RI(?-nZ)XhjK~6w(x5 z@%Z z76%l&4>iE?k8T-~?&1UsmwG#o4B3gUt>%v|3i6&Jm3UyOb!-PoO+?oN)Fie7-Z+#I=-dZyYy`30i~m==sEfETW$ zk?%~jmI)+meK3Wb_G6^|5CH>Ne-oI#iEt?ztrm5C(Cnj3@f~oE#qs&NQ{UDu(fH+^ z(@o|1H8xjR3#Wa2RagjoyoR9eeHOK}aJ?0cr|CDI+*!`Z9x?xxp67Y|K#Jdhh=3ce z&~zMGbNBdA{QlZ~CsxrIh&&+G;%1@Guz8>AS0Jf3sPE0LU#C zhsJ9Q60vJIt>zMF(FdFp8_aV~H%Yz&-44NEk!Ieozkub$meUu-KKOQCI{O1p!6lBM zl^zGa@9VATE)K5pbPK{De;2a_-fj|Hr7(_-C^>R<@ZKnd$NJJ4(a@<*;+x@5{SZ8< z6?Q}GDhq=v!7RYH=PHUC1b=IF5_`~58~IREtR`dZP9zd4I2e{x{HTZNp)`*b4yUxN z)@Jq{lG779f3_y=>U<+^8@xgt;mQw>)veT5JVaUZRXoDM>@@j1KE)O|+*jx=up9a@EhJHD85$!*eV^XR896CECjY;{k-e3uD_Z zRP`GiR$O&_YQKGaHXM;quq!|cQDGGHDBY7b2|;Q3^>yHN>1Ecm7|>VpNaH8rgp)eR zkNVO8n=7=P)abOde=n$UfK0*j%y|}$X$Op)y$kpFoqaB!hjp5_vF@A&d zG>p{@%2;C=m3eP_D;_pQvqg@I{$<_hJp#1C*84}m7lfFmG!7+5IvEU&k{Jib44~6v z?uub^e-q=N0RkMl=XysG2ydKjR@(Gzt`3#0lu?!xwWctqe_I$=brul8P}d=hlq8D= z5moyxi22$Vl@&$tq=Nh!y9=U?KKA1^v)-u8H!o}RK$Pxu&%Yte^@GKFm0vM`|G~rW z@-N&lXh6ah4M6A;PXVtATaC^_ef=T~$_2A965tlT?*f+oumD8PItK5;<`8dX$Y7cg z*<^7lpY*UGe+Mt;+E9{B#FW&9fQRMj@vwi2McBpL&mZeq17)IAv))G(#ANW1DDC_{ z8lZo0aov6D*BwljX%i+R$kCl^6RCUd7oTcbnedG!jv9pR&Y=3(^Xy@IFv8w?*2xHK z9Qin4wCj4j8vV%%_Xyy{ZGk~Jtk}flH@Lvf4zYSQe{)-hG&61qi;-yB4xqdCq6gSV zci4zD3@jf0K;R3t)mir?!ty}kNe$N6eg{)-E5=$&7AFn~zA?j4@{XO(S3}Y6V$3(7 zqi9G#gTo9)p`*qd{@GO!9WOcj{4wky8QW*{sEb9~-2t~0eTjX>Lft-u#5=lt;{c(U zEoN)Oe{q|ffHTyR@4D2agW;Qu`P>oo{G3MxS;H)epr6P6EU|tpI(O#CCejy$*5GCwDMgJ&toqm#w9~*HWFfK3xm(Dg|zt z`ijk7n58Svl}(o(*sT20Bd)Z`)y0LoWD(7ThOt!?wZ| zs=@u}6|7Y>HmD;U@udL$n&gu&B((_XoFHd2+5pdV8#NE}LZDAx8fK+z; zO1%rnrQ$C_)WAV4wSw>~7$MEGlYDj2O~0hN54`+~D3=wGg|&_iVtJyy3`ytBKT)!E}B|>U23FWL&fJU$U$~J++)nNP!>%8IlGv-F& zUWTJH$3IOSMY9ALz3&Jn!-uO{e=+n}!H^*`xGN)tdCLYJsev1e7ZZFA?txyGa{#%g zemY^2lo5rbPQ;xck%-pj&Fvz1pUcbm#RP@L0`CzlD}V0MxdS_nyx`{GaVUf(zB!l^ zd%P^VE?UqBKp}TY9;qdHNn<=~&g}7W6X9mT>@lo4N^ix$>gG+7Z#ec)e}`KIcqIZM z%{(V#opKV4IH(V;4beKKhoui&Aw13-%@Ri}eyVY=8P!-aP}wTlr}yE^a^THrhjN@0 zk~I=XR?@kAZsi#T_qo_Zqu~==n`pRvGCUGg8UK^aIb}a4#MD$tP_{ivD9r%@j#kGX zVjEpZ>AIA2mm)us6s+yEe}2=2=z7n;)l^912CJ#E%7iFOzpk_z2fzB&+pAA*{_|h| zaCrH}H(yqriy1~{iU_2Q(|7<^vXi+HL}75^qWwVRl$1T$N}97h2?>jkwg~jzZ%@c_ z2<}{x7l)n8G&ePqpm%3IJuwAB%$lc}WJ(|50EBfU$o1<8X2<<6e=BP+?4Dy|Fq{hr z$y*_^pjU7m3^Zy284vRiK(BGb*W4RnXe`;)4H0vhFb!S`ME;6sBRCpZOmc+<3YlMX zg(X%(KW9+Ogi?;fT~T;(AbYFOOwhw&_+zNbprI;qlR#yA2Lt)8L-SR2ay9E|B!-K6 zsR74i_M!?h9%o95f4#V~nuT#-oQ8PWZ5jtCGh$K?8PPc$VF#4#SAYViS(;2azr9%72iPDSb#-_9d+2oapi zdTCRKZ2ugxQvv0jcDlYLq-&_ez0ZckW!A$3S%PG)yullAf2OT3es<`-UOSK*f=}Ig z@V5#LRXj(aeJLP8Iykic-eMKui$@U5q}91lvSroG0XvXFH}L>QosimQL;W5X+KJ;5 z1`2aGKYhk@)nd8z5L20mnQ$=l17v*^H*6z5ovScRwn@?3Kt6%++3|tG;CAhW-CWUr zOUnAzNL9Wze7igPI-EWsM;1oU+`2z9pc5Fr-SJEM-N9h z+~Yy9!~KswF1&Y@Y+gdVPd(^uco4oP9Ul9JXw7jie`sL{8%k1P#q-Y5ie}4KFUXefHSNSF5G@w+WHahQ*R;P@v3xgZJfe5b6N*z%SbDaV_M3PXwrGXsE zERe~iBv%okJcd&uz7pQkRYUtb1c^lij&vXr2ppMhlmSm){?{4l4z z-IG|NQLoYqo{W)2Gqx+@0+BEPP>%u*Lb&`Z?5%vu{zmy^ughj;=j*=XtzJ{xU1WRp z`9|`*=AuDN?WkG~!kRZbNb*F_ZrIq72%odeMHx|mr}Vvfbao(JU-Ml@P?Ck)*a(J21L7s$xw(d3i6=7z5`#_!kU_Tq$ROYb22umXusPggWnhb@l|hrqED1I+BO@!5t=Si3sWg(qb<$nsfdD0D1t@cRnC0 zYOq8#OVu8hSkYsy`g@4f1;DvaE}ZHze|9ae-|Funa4jRSU+T?>f_wGt^uOfCVfK`{ ze)TKMXXZo5=YPo$X!&ODudI_%m%2~+`6**QG$&0&3&&RTv1Bgt>wjI?z+iU+xpW_~ zv;O+uB5;OX0q8hTSGf9pCZ zWzme54IQY`w9iATeZK_}3ge8<|L!mTmS`BjYTzKBHdj|y)2vUUtp7!_D%zqe0AptB z7pC~qI(R}1Dg`u~|J`o^A%8IQNDn-0BtMo8bDgc(g(*JCg zqEAvO@Rv&~wNgo?=-`W!A%vw?fA{bH&0nF~NmD9*XD_vAzQW@!Fu;6$f02B<)Q5V@ z>ywhX>dtcg``4j=@BHA>{+*@$`^#h)r9RYKUSF|)fD`aDEgC>q+rPI-`485||2XZN zwG=A7clWkcT8(BpsT2U$QYoOzrBdKpmv^+JQrysUtCBYUQtf}82Tj1=f4{S80rD-) z0M@%uy|3*mw;X+K|J`c)A6DD{yiRQ=s`hiU@;Co0>f81EWFFJLmcNhs^d5cb4<|HP zeno%*t?9S_JKvw#H9^X!9V)5nALKHdbZh3vnPtrJKA1LWLunba;e6OOlr;p%AYM=O z^>*`(d;P{<@A7qDU+*+`f8Fa{d;JxJEze+cMa?E-pfirU93hWct|R5vziK{qos~nEe2czNtJj zL-74@3psLpwhRcc@`7! zo#U-SHh0Z1aV952Wwqj_&R`+5Uh<-P2A{vr?|e-1Xc$%a!f4}RB{?HE;% z)3eowJJVF8NHDp{B18uKkC0fi`D>3Ana4tK%Qb-nrOh$`_OkEtXt*v2Vl*Q%JR)bF ztWp^~nuwL~t-Kr{>#XvlDSF7?@ncFP5ROR*8QkVmvNQVWY@Fo4P|}hWba@#xdO6z< z*pVJc4&fQ$e_&JTH?afdn$ljl*vQ8B>#b3<-oW45nnd`k-Q`qN>I?0KuQ#eoBUH)< z{u(x)Z@`KPt@EailM@Vkn|E+`?eO4}u%xYJcklaxlj0>$;67?b zXD&9Xw_fibDCvZGX|ocZv|;ooPO^@L?&zIf-_Pn5e;~HSfpNcCXOAePmD755K$S+JMX89p*8zHYvHYK^&|&pb!LgekSUqVu9{wXP999 zZQ(g*%s;w$ix=?WDz0J`y@(lCMTEG(nq}bj<*jko;M3);dnDiG->bvnRmhKfB)6MBE}kf_LXn`LjL_rNtGP}cvQVO`s-899VNYM@!4ONk}HzFNgY|3rvLie zs=tnwU1iIzt;Xd^qWei+kN^I?<_HXvqLJvn7PRMjZ3X|`%T{V@e?X?rHp_ussXXE~yC1R8gr+h+^Y4KA zEos`ev40OjW;EzC|2N_PB)|2wn#I@5;VQ0~fV+&6&Y0xlMYSo5X5c34TplERo7YsA z`Iwop(Ln@4ft-F466%-j6X@OvoBUqxkK$Qp<2XUf9@wHyZ=BcjfeQ>EN5KMCvBw5Xqg@#<2N%%i=47FJxbbf4mDh zQ`$UZwn6nm$6@){S2aR5A{!YUpG)Xwc)rSB&Q`okzsBCm-(NBLh<*Y7=r&YH zX{pokx81WZ`i^TN_u5??W`hNIMeT>Q{?hH`h+z4fpKim%{kk-7j*%K);ZHB8L`5~Wp4%>i#<#?&04A5*i0j!ezs zdjg=DkV9PF(|$~OA9^t*L+Hbl488}+Sn=AFcA)%qXute@XukaY8aU-$fAXIH)H&r6 zmXX9PVH`=YQfBcVgU2mnU~~MpKKhnpu0Q7MQ;t97@{9L@__>C77~ToDTaWEtJ!Cu# zRbz6!yG4i`F?(_@D|TFWTX~B(&vEjQ)>1sA)kIFt?SKFZ)Ko(8tLX2dSL)9D?GDIay^La@}D! z`a%|*C)$zIyhqE8f5399->>PS&2+)1#8EHy2sV=7-#PT$9|eD|q#z7d$8?0IhnNr2fI;ivh5BNm#Cz@66@U_FE$ps`K z-R<%uzGg_0QJsQYEQ{V-Am>4GGNSt`fS7H3K{ThaJpqd$T~QxJhBxp&id|mPdLsmy z!q7u1d}EnWLjrTp*9M-6JqVl3cMdV@=D6TI8;^8cHaTytT(1Q;2hMn-yM0>C+YnKv#1fKKXzs{%> zOf6}=ZnNj_=>c0OrHz^@F$0-(cn0*B1i@grR3n+fq z_e%#v@<^XfqY-fpx%)I+(nQHr0KIfDasq-l1^!tVe_as@DNLfCV@^*pE0zPN&xdjV z5W=iuDVk5XAB16zTf=sr-kOe0Xb#uhS0FvZ zuF2|300A}Sqe-He&w65=QnLqMeqkaNa;iEvOPTUc51f7QJGX~Z!~2EtuAg;*XH*?^c5UDp!y z%}wGnPYLqm!HA>}r*MzQ$XZA$cp`Fp!Skenz{nFgh5(gr43|B8{Umw2dfQ%i&-FEI z5lfW5aBAx<^~(;neJY7nXxcTC=+44~kia6ICBOx0Ajms;<$c%~bPVe)uY}yc1tZRj zf9BIQ2I$=IN^)o1s$ZC0RA(s7)fi-o;{5cwit{j&TQTKE~f7hR!6AXdaT5fDJzUp4`A+y>oTl`b{uma zYMe$9?sKC9V$=XfXL-iSUQBAEOg743Qxx2?HcTMT3vPb_Xma!*K@%pg%?Oka1VP~ zi1W77w9R!X#34*y5<~>(Ve7xgu7=)}bk%=HKCH=(p~&m`n5PMt3upi5$!TVWfNu7& zW(%cm3{&Ba3w2`jV# z?X#XZL8q4bC5MkmNNG%1d7(RQAEj$BZHIyZw51BT8qi zj~&s{!Az7fuQbcYz;~ygn#5RheF&k8a+2V(Eu!07v`W!$x9!DFe~{+}f9UD+5g?7F zQ4MB%kO5fbgT{Tp+{iKXA#?Lyuzs`WM*5JO8wuCH+I$;_`yNt{;y~}DV#pKaOS<*} z122W@B?B|cjSC{!pJ2cEO6;=-HT!jWS#X>$M%P*njS+yKK)qgcyMXKRtJMPvXb%y% zcLYxH_&I#Co45}Ff2pA|ln2yv@BBgs^k@~p;LRwi_jA!BMVh%XD1rh!GRMmt z?G>(BqDp#fWC40hs)1X6S_L?FE&oa|l{}Wjs}25bas+;`={fJU7rBEr}p{RlRjwq2-eys-Jb?S1YZUcy?jv_ z-uHwe#p^J~e~WP&3egrWjRuR3up9E>DlYpO zBHWM-dYWkv?3N|dezOzRhShoFMoGu51J-eg<^^0Gu&k7asoU9b_~YQXZ48HaZa`0wGs!<4&mUnq2KUK=lpRA zO8jF*e~Fhpy?^uOM!TG8*Q)o1yX>a5@5GSZt*1mjSx_3qmNpT6yH!F8><<=Jz2yMM03+?(gs`( z-Kb5$W4fuZjPFMC_jLbS;O0Jza)d!{OV|_s9L1LksQiZG`tw$ja;|1u+Q^dep@aX_OzZ+$6W%t<$ZC2&i0-NbInjSUT&3W<4x@eA)y z=e--%s{o!(sbb}{SzEb#_uj3_1$vWnf3%gM+rWsFwg({jR;1rqjkP=7YNNh#3$C+G zC|huZjdet9$=uq&fmXYF>kW>y+A99+fD?_5q$8l5IQxQ4m{_cD4(=a0n|1K1n}h1+ z-~qS)&H}oR7-p!3beqefj zpikZNv5eO9@j`PqdvyOfD}2;^e~y}%TOiYPZ!>*?3XNmeSo1M|t9bZPhVhnXjB|5` zPmdPh``SUf&EqjhTKizIz?f)q6&g2ppo5zZij=`YThjnq`*F68$MnjP0qe1flmWY_3=wyz<1i~Ae-J$38_tMWrG_-|wSe`H7f3s}#d<(1i z#&Qb@slXZ}htS`OEQ^&a`Ravy?sYL8hC{G|l|Wh;{1Z2P=#N_TAjP|7I}!ZORPSs! zQmdr@F`U9>CDY;v4(eQ8(D3b!??3f%S+K~m1qcJf27=%9(=olzFE8K1GZ|mJ{v>j< z(QOUk4_GLS_b)FmEAWd?e+NYNF6XH$LVoO_J%?E3QE~Fqtk5W=d7vDicJ_JjrLV$M z8?1-5tEK>#m-)1YQ8Kt=7qhoaKL)c_+32{5=h1;_aL_|YqQ&rqHeu^WPGcPOsXK_2 z=2FX)WBu~?s;ujSsY`K$;j>Ej>277Qzl1<2x50>J3mz<){f9`tukoqGwwAh7R zsvoT*n7Wj(0Pa%ZKTWKkd8vqps=B^A2Hs;~{_C{%xl!poXbur|Mr71owi z;>k_T5i-I?0b10PaRp6jFp~_^+oSiB@M9mcc6!()@dB{`D_s}ssj$hvvQ7SZxA_%G zAt}Y@cB-GBAk8-ZfBNU9(PEPBJ*3#QBgyvO8?!Ph?!C{Q3hw*_d+!Qe`Y`3*8`#R= zBSU!`9Sn@>4iP$pBPzcyFA*sQwg(5a>-7Ue-?Iuf{31BrYfq4dnE3`fB6zIdx{eU!GcrVu7{TG zTKH-JD@45(3{IM_V0x;n1HT-r(E8HC0~MzC~yuEQNfyhl5Ur zH&E{GAgbaqe_-DQ*bwpDAS}LJ$+-wlp?c?!9bB zS{Co;IB+bf+U-`Y-l)~@z|AIpwOC&O*QM-WtG4`E1EIZPj+L6o4PR49s=hR|Msjvm zK$-)9k-74crr2!-L7x+79=lYGa6v$kH?*FH1re?TP8zVj!TWxZOTj3fE+E?F@Dyum zy9s-`e~klIhB5J`mQdUl6xv-xDR1Sy)7)O)xxc%b!Kia+ zMjCsd|K-I2|Id=)e3f6q8qk0AnmeE%yqO^-2d&u}UDla&?& zfb&xrZ*_jk7x+~JIgfQl`I{NtX^SUud4`u*>C+Kxsx8B*t4{8>5s4#Fil6U8G+5e_3s6?pP9gFbvH)u~b*5)HQPbv> zXk;MuhN-zTzLuk}Bs8a=t);KK@1e|TltWgSCmgCo7&gc8RB6r5#mMJnf&HdOzc zE@sqHroRELk*^0g)>J8j0!8s0d^CclX4G0SZG=hdT-!muEa`9|cA~ieu2e2uS1fEF zkW(DBu*MJ)B|a5GurN=rfTl;7AjaX8tnqO#VLA1ff=z;djhL8m?6Ceq6YGX%fA_;C zz7UcvUjBgBa7(f7E}Azw>us$Vt$qQ&-HM;_?a-55?55rOP71xgyU<+G)_2(YHm%{j zh#SISChj;ncN_<8{VQ$Du?7<%-xt-6uUnAu-9u!u_{=9Rt#AY(#+s%opr)Df{5BNU zh@FkQ2*h|MW}aUdF3e|hSoCu*e}@cS_6%}sjV2zB(G3jw#{CX%XdC*KIM>0Pz%qD! zL&9&?TS$TB(FO%k7wGaEp4|f|xO`mZI8^4I3zYfBhsj*Na)Ru33H80O?cp{Dwh3^) z*P(w;#o5k^z#c)gR+Xtfj%F8zr&p)CiHza{cycSREcLe}YkM6$8ZS zc>sNS7Q)JQ80o<)Y|F-{BYT2id|)Hee1Z4K{Lcn*g%S|0T%$w-CDP^cZ1Cd%#$aC4 z!=6ZVLoA980VgENw$K}xGL*>vK}#aK;ez@35Yp5=JF+#(DChT730I5QHexc&8AtSo zM4_8D*~ojwI4cO3?|vQme_xOMub=&|@BAMpIDcCCTP48_^5F|$ zCGb{rYbY;k2E8VfLKQVwYe*q{!XZ+uQ89*5$b>7TTKh<)Pt7G#tQ(Rq2OWI{$6_@jmOCVZ;;#+X)qF$NYzg?y>50cB?(_ z`g+JXmQh{=j>XV<`9)#q`Zttk$-f3eD&uc=6#93?p&vi&)YS*fl!ksg9}lU zCf+2pTD$SQ;}oH#Upm8VI^8y-Fv;+GiP%8$ADR$doIbd>e<3c)N30QAXXExW-*G?A ze2V@0fOSt?!|8MyteE4l5M9SOtbiZ)?!4k4liyb)SS_=oMx<^VoK?iVb6ywj=R2n}nW5_rZm_vbIq>>F=CDIp_Kt6LHa`61 z?D>}O!?oJ|g4Tl&QppwGM^1DGg>RRyK)o%G*d@lffAg`=W8-sEMI~0k*9l?f6Ue<4 z-2{^JCjc!03|>{ea`X^dfVXrG@fW{fce@~5e+Es*lX9Qudx(SjT&-sX> ztovpKe>Hlt3G1lQH?IfZ;=^$tqD@z>YhMomD0=)V?iyJg#;Q`3mFAO8iEV_TVo*U~ zCCJ!Ku3U&}7npIxRVJ;&xO)Vn?)eM|Dt%o0BF6&Ty)d8i4hfKB0VLE$z0Y7i5bi{% zI%4SOFm+DRs=uH>2SW*05n(4;Gh*0W<8U*se`N0SpY*`~iluVecZ!ODRM(k1x=>p~ zq-!PeVW*xY+Ba!U*+PszoJH}~{38a4bP%z{e3}m+k9~ooOk$F9D=@+v?r^TffV}{l z$L!+$3J2d&7T!>N%ST5?!gYxE+L8Lz3{TXISkg2A>x(l4aCT|(AUOflXazLF9^@%u ze`B&Q31SI~o+&^ae^QQ{Ho~8S;Ti5E{&Dmo&ZhknAeAVO?~W8Oqj)Xse~z^ley#sR z|6Q}9#Jd$f4NGM@i8yoD99&F26HN)^#-pWga)U=yrAkom$D6y~Za@4UK!zwLN3O(wXZ*@0OWAu#L2U*DuG#IPtI1k@ z{K8%ndn;rxYhX$@_VNz-U$6S$@&+(XX+#a=;24~vgyqnu#~L^lC?*cb#f8k2!D-XX z6_)6r5xkH)Oedvrulhynyb^_LJwoMT<*vHVQbiM4=yAq-pb$m zomK}g<@Z&-&vvX&w9lRHVAz*FGk{)p3fq4$43FAp)WEg`pyG^X{;kSBjcon1!rByU zm^fIA$~Xd|8J;ii$U}pTzWa(({}Zqy&{9%L3>6pBuLEzQpLLOKWP3!)e<@v1bfU61 zVadYjDPKQkvYzcjf4l*+?$gFb^X{$F61Wx_ygE9|tmW0Lz}evxIZ9uy;w9=YLxPd2 z&7$%Rms4LC(QYB<|6}gkyW^~`yZ>%Jg%C7I)=1Xq;){fkugJm%Q(uT9*@Tg1BoCHm z6phBVEUODEG=V^yOK-U}e<5vCAWfSlElz;6%~}K&S!v(CpF#TnUHuC0@3;54KF@hZ zl1;k;UYhfq``PEd&)&Zs*pXWAH=W+!bFmL7M>D-oOkp-avSX^hCm%@bPXt4n>F;S< zE38*E3FSBtFCS_IU@lD9(BAPopB@fK+X$Ayeq=rn!&Cwe4GkT`_ff|EWTQ{X${&$*{t9f?9Qk>G6|IJwp&S7jBQ)qjLDn0ph;bfVl zOiXrE0<)}4pjVe7TxET-0+ozJZbM@Q#SBUHE|*TZjYe?k1l#~rK&!t^wG<~W=inYM z69g`c?QReQ#a()u2$wkm=YLTkgXMejR7UO{qVS5{7gpv}4xe%CR6%Wg+Fh&T060_f zkVQ78DSAG8Ih%y*E=3#fAh%NrQSdZ6Dpdb0q$ zT<&q$*Oi*WK`X>uu}W!fSFzZSnVp71v{QDF%!8JMyeZWao4Q`n>3@a$pOyF(8y5f# zES7~K^qyYJHF08wDE1!CV1jU*tESmvOomW}${`>}qBVnHZ#Xba86K(y%U#P48TGRU z(NuXPm&Bep(x8IImnTsRLS&QsH&Unw@{yF?3)54bkOwO&w%L^0n-3Yp=}N1H*8N)P&+|W8Cs4*4WVo<@s}Z(B;!2?asaTW+S{Eiz<(3ZMU@8Zi<5yr{FO>5i<{bUkw0)XJR0Wu>?tO#Ueu7X#x za3!CQ;Yt!yW`Dk5KtK4{jc>ZU5d-*Y#l(=O7fTJ92LVCkTWI2XE<>BB?e)e2WoCTK zn^133&cXGj2y7Bu0nfu?>MuH!F*e8yU9a4@qosBqFC8_Cbkd@OJagMU~$Dfcx8g)oVk7>x_l9ic#_ z*)Bc@{wIcmrI_WnUYUjg7uV6|<->9ptiya>D}8=CG7M%8N;E8Q@lb*ncOtyuxma$P z03>I$F+Nr8vfEg%+}Jpj+zrBK3*inKdt0cxm^rXb3d1_RY*=RAh)Cwnj&lAv&L!-C zd1V2bgny-6!9r!Zz67)oJbCcIJo)))oI(YVMkiBK(?dm8g)`#x%^OE%?`_9 z30ehtxmxj$q$jNY2CAVUBAMdlXICb!OwSs@*?%dmIpmt;NGq=r>p^6&FnnZ`FOm1K zxaoSq^C1DgC6GPLfE+ju{@dgm^ViOvf<86wq95_L*synP~gk@0ue$ z4u3&To2(E9oPaV9nwW=naPN8S*u9WwMd`5vZm}6VFhj7H9a84go`#W_FV8WKy`4HR z8=WFidj+CMV8#(h93PXyorEKMIi?>KC$LVKD07J@a~g#YF!J7oEy*B-o{-H8oJkFe zea(Fj8RMaqX(HTi%gr!XKoL;{8ZuyU<$sEeeFp4$y=>N7=dlmdl71&9(n5W;wZ^QK z8`=@f*RhUn)Lvn3F2PfXOU9G*hi|)d9;o;Llx+idcUEkqbj#4mP4#6u?s)ucH^7d_vPk*g* zt4Shwd#0g|t`Y%uP4?Y2RogW~cuO+y@(rUb-l=xb)7@+Bsu3b{N{4rM`fiqPoXsp5 zm}FdCo;lYDAd|gagdC)DK?xQ(Std5lWYFvgwt-+B$eir`0bcUa&xZh%j2G{)+!UCk zHfHSL3tAb6RE=f3h4ZdpGDd6h=YO(~csI^Q``z``8eTgCK_7mn*bsh~Yp;?&g^EHx zLf%IA_Y`W6m)P25)zz(;nf4CgZ)DL+#j@(HORM0U)~Y>_&XYqw_j#>3QeY47wZhx# z8B&fuL{Q}u=lKS$z90lbHlWI72&jwk+*N(R~^_!1Op2ey4pb4!$pDvyA9a6WwB96eq^45sIAx1 zjcv2yo+iaRpWHOYgY<5qTgesPwNGVeu}d49-jHHpZ%O7(V5&}4!WIOkTFy-Ic*`ok zRVEq`tr)NHbn%CL6t3_l%73GGr8Qs2vWT7$t%VTfz@FH3Jeg}0|6-$32D1ewHDP%f z1ucVe1x8jQGu_j})quugxPpzE=|w@u4d(0ah`lPxmY2;-8U@+|M66aJCqIDN$`vtQ zfI=z%&U44XDgXx}o-vw7!2)k4L})}RVo?( zgL*ZYnw?XxCevwWA%Ce{B9f_1e<}=RDi7)6F{i-V=1VE%+C>iyJcPmVqc$8UoN3ywLPc@o-x3Sf(yVnGoth> zs@&HmW_UW+4`}FS8!2f^6}u5YFw-=2b8n0_=8@y4rFY<_-+yXlT7}C%SOK>t;E|&K_kCVkIw!L*|fbJ zF6HyW+&nKX?pIrrkOr1O?gy)0YvZg7ri`y~dy$6#4 zyKd&0r9UV$+JCj+?=}F_4*e!9-mbmO;C=215qUI32_*91QxTIEQ}f^pv;BS7Cc6&w zT&?!?TphStJ{5oj@~TE{4FSAT1ZFmQus?Uplz>*@tH{_?fH z8u&l}oMH`Y;AVD@_NqQnuaBFoQ86rv1btN~gJFSZ%^kP_MVo{Mm%t3*U`3nLX5t$$qN%{>m8sS)% z6w)qCgFTOF#GglItAXlG{nY?Fh&4rGw*pK&QrmduaU8Ms7$H~=oC_o5$*#_G(<7dz zTYp!a1S@E3q*Z~{()psCKlm>$t*?OJE=E5yH(^c`E!8fb8G;H*EoEg2Xlm$_p()M= zS+ht+MUagLX%csGV||HYNr1woBuv~H1|2u;c!L)0K!L?Rl|%a2e@%6+BnDRJ-zciwLIBKQoiJr7CF4ZP`3qB>{wK z1?3`vgp;AHM`rPAc=nW{8isfDp z+mXd6uG`m%x#j=;QTejl=t1&G8vX9PHt*~)fP!&EtG#3x5ZlU5&ZhEevrTN&Gp zIfIR|3prYG-H=ggrC&3BF2i}UyT6Q=jDDc%<>gGRAwNuV-82ff3u`@4c>Px5-^q9s5%1=CuPu4e8<*4QiR0UfSh?FAto<3519f8GX) zKXNpz4k>y+${T7`NdqS4?5CU~!{Bv_Qk`_Adu4?oV$nkrDy^L=@MK9GL zWx>AQqyg#70&jeC)E?R0>U}YzBP=f7`-DiI*CEdVmeWzv=IA#s=V2q&cX$_m$R!v> z(E~p?BVa^zVv3M|)@cngdD-M(mB@3HWs;~Y2Dn{f5d>g1NSFaY#(!D|fMcWi4G8zV zY9}O8prS3vw+6YM0uxX;j@8Vppayj0fPQE+czCoJbFfalIE8v2bM?jZE8Oal3)lx8 zLWld8&2lgJ+(vO=h802MaupGA5L5&_ZK=r2J}NTQu_7b=GgfC!@4qV~O#8E+HA;W$ zSfq+mkM>muSRuRNnSU>1?R|pv-5k)lcQJl#1RijNVQanl7_*gCLuL`D2w%Ob1P2yEQstyHQvKdL!~q{ce&s zZfM&ARX)^6g4LrAG9teNU1~QHRhgWaT@hhV7+fRs9{2l4ntw`%b6K$;Jv@oc3@j1n zBKsC%{DQ|z93k$w)8x5A2^U^9nk&$5#V$mi=p98_7{)^eRL+UBR)@CswbImFEaJii znYs$Clt>&Q9ViK>qH%NjQfP(RP$bW`MCMk1^)f6OD{R-KW=5qzjfwKSs#Omvg)mA} z!+;dy)=P^E$bZ7f)K&$B%p_5wwkMjC-4r4m>c3EfrL79ZZunhx-{6-juN6s|r=IAV zbrR(6-HabSS?Drl9tnQs01!+>Xx%}6SK{$VC=OxzX;Qpi+OCmWnlejmD-&(B2DQ5^ zqyXB0nl9G*DFd&tcytOTax4}+16FVye4_e1M0-&ZM1MDM9l3aOv1hqY+6|@wavLH_ zIBJ6(`9aMjbTk2xm|80)un9=}Bi6jXr5d%T`k-s>q~q*}7FrOhUmGcMjg#zSDG4k*u*kq+coo|&Hn5UU7IyCU%cHVI1`%M0M~axSl4qP^=ecg03z zdHl)~W`8somirZA+3WkC?4>M;IHsPB0{4iub=?KD1zB%j38d`!J+h{Wlr3D!L=-zqtYqHAV z-hV;nz|N~*m(Tg8^Z15%00os_ufMFwIQ2zb_!cQ+vh(^q>vglH9$VI9UKmw7uYbvT z5VWIv=N*?zv)ZIq$Id%0ljh1&n~c7lcU(5jHL%gt?-lFUcG13Uy;@&n_ThbteHB`v zH*E?k8#N`$IA}mnbv}StEhv|p(k8BI)PI(+sXaUp|P_iqzmBLC^rKJ*3 ziKcW?$|x~9AsOz8_2)o+EFh>Zgm_F?I18qcV>FNok#wh=5JV;`8k7{y*SRSAY-)Wz zzCBLOWv+mi@bX;PkBlm7U5@Fwue-9r`iZa1MU6t`A_E4e*e^d)1!DAR95F9e4+|{tMl_s9iVVxcW_Qc)nmAG z>MzVH%a2Kh-D@KKHo7T*;^@J&zRalbR;&&j|$?Tx+2D;DKpV+WgKF8<{5JyCIfS2b8H3} z=vAKRVT9O21ISIj}#UVaBl=mCftyyW#tEO}w8 z(w{pMG$2=&IBa5q)ZIJkmI`>7Fh&_4>7mr1BSrXR+1tww&V<@mqF%$cR~1_$^Mm8A zf&>71+c+R^YaGI@FduTZ9SekfaP}QD((jOg^y*OK{wtbnl}xoP_Jip-L@42^lRbxaL2J0n=pZ;M5MhghiaH1DaowE28XtwqMg?7 ztX64Olj$XDG6|S*TsL?et1n<6uND=XrDX4TW7*N(AtV;bG+>#pE!SK1<&~w{WcE!a zBemQ+mNZ=%#lpf`AP(UlZo=5{>m${`RH-o5Q%gfMchXBMJb&1`5^}ws*-}FxjzD(6 zyuh4Xtt_w9s`?Nki)juG&=%zp0cjg-cep<rkqHa8b%Og3qe8(_#>HY1-2xTPM}t#KbK`B?B;3i z)B`MGD1^gu1%Ja#-VOYyv|bM_a#J)=U74xXDR;pv)HaZv|2fSIK%WmHm2NT(*;qvi zst^pZ)y_&YT~83v`TdVk(U-d0bkB;c7E2+;BsEemPReHxGIJ^fN|&9TP6Au)VEX`j z@M_@T<$ZBU2=fcQP2Nts^3z)@l%JlqPAtMzyN(ZxOn<`x)mQ(H!y{=V*vS>)mgMXQu?0{PH*6y8jXLg)8CHVS>I4>JmrqHMXr=y#&5P+P}UT*S1zY}@ZhSI0tK znZ8`D)LXTT2C4JwtmkMzO^m+jJ|ulJ8o?0;3lExEU@u?CNbm;zo)nQRICDs+iX3UU z_;#B3@_+V~dFO_}_Z}92eeK=_Vt~Rvz|CHz!YSuLi zF}vO<@r1=;N4M1%pg?P^;LsvFiWQrb0C5s4#BYPv3q+zonWhyr@}|^iTZ)^KzV5NKn}DRXYkIl;jYyQ zwSSI+R*ThOSZr#p(hTxqO)X^g^xnPOJqNx8tVO8GN(TK^_$$2>mdJ4t=8%dvMcN}l z>$6e3!iJbH?4T85RAy56tZuQW1PlI?FhrdNVGZ(wjtRheI)+1sDtQ2KN-WuMzuK7e zVg=WzSay9LAiFxp{E?04X`{HyD_jPAT7M-{gD&rk+u$S=oYgahIA+g0F*qp(9tW_f zKwAe>RAc}KDl02X>pfv=vH&AwWOU3}7-6>3r+OuLudl83%RKJWx|0)~iy6f;LbfpO zi)a?0yRM%GgS(y)6bc17+1TfaM2OdwdV|+BKF8dK@Te|o_)=L?`?=6;u0au{=6}Q0 zaHCjsB3H`=$lJTP&Ux4I3sa}{rKoMZ&d_nKbUur>hk*;v#Rgb-?<#z0s zBK=#wj_?m`mx#%7r!*Vb^d=y*yVX(Ime$b6t4|oa;?BO65yotT;Ve*#Rbi?(0qiK& zY(;Std{{oFI586Ooa^pICDe1n)yeRmjSLex_!3DUF7h`IL%VQ;m!SZQ1Ah!rfgGrT z%GW8*?A$2X^S#)^r3ZOKY{+~|?hXgX$mWC*<2$swIX}eJIVeO*mogHUb613Y4apY4 zXOWV0|6W1rB?nNbS?y!(x)=py8wYcUBSSi!@;!Ol5xW zWhU?a1e}-zQfO`tzz!Uj$bUWF!P0zRuqe%76G>b@gtLs^+e%xbzjh01)1L9hF4qB` zNhJzI1uWwFxYdWLSD&yNE_Db6!}L~B;YM^82ib!oSb1z^vE3W->Aret1II+z4leP4 zE~jLGX;-Q@`H-fc%bT?QK$=c3R~GSXrM7OI3}AbF=E;1{f>PB}>3^H)w_$KtVL=OM z>+*1MD#*^B&pC$C0~D&}A${4H#rf1GUx~efrxbId;#JIHD`H;ex#Rt=yZctWg!gXf zhix;-Wo?oXXvOsljEh)1Pgis}N)nOpgdvxKLkEDmYlyXOl?t68_}+N~cRJGMieV03 z%v|PHac<+9bZ!^!l7E@zvlmP2vU-JL?Hv8HUSt{v_=8=I%e7PCC}k!KR>9n(~t7Ds^C{pw*gky%(!ZWjZxFWaf1gV=Eg5 zf6afHoU`A7?_RX_c0o>i$TKVUSu!D7Lp@nmx-#3V)0CFe?g+mE`#~T&T}Y zLq}I4L)9T?WhfAT@Nh8xxW9`AcjvsZM*9;nSPg2XKY1eNo3bB3v#^R{9-J`9Nm=z# znPTi@I$~xaW}o$RK|?->h%Xga(Le>c*epv^%{O*5?b1|LKsy6Yo(;f=%r-3h7A1tl zQY%G58Xkw->VN)nC)4+8*(JRuXUddW18TzAxA#C0W(p7SG+khi%VBY$5RwI0NaVJs zN@#t(I``z2VP7zTn0&$J0nr|B6ILUILcNnKqmqS?%B2p0nhmGm5$x{mIR`O_1%nwiBX?cdh_+gQ^68C7AU@@n=Xr=vGxf2rgEjvN$FF&moCD2_7oS5z{xAP z9-kq>fM-`IMwfi zd!ebS!IJTbiGd@&24TGM6N0m|+&6KeG}AwELYp+dHqk-HX~rlC3bT6bT&Oa5F?E=1 zT5Eb%yU--uyf*&y18QK+uQ`K)MsGAv)H1Yf42iuikHj(z#gb!h0f*9@?a~n}E`M&-NblS}O)N;~6g68D5#l)kG|_IVdy3AS zgo*tGsRrtu4TALri|(bH9?qyaSoJ`!3z_j`BS|esPm3RiN<5f(;lc^zJag1Zt?qa4 zW>N$#XF&3_Qjn27qf7zCG+9qJ9@V0lP34qe;+=OPj37MRtCZv*3VR#Qs2>St@ zP#`b`q&>M^`h%L-5$X&!Nl^SM0d&3FlfzO$>&%~7^YjP=} zvl)mETBS{=0cjCuh9@?b>)0=~9i`>Qn3bt6i9EXd3=h_#g+`vmE~sJWS0VU{ZgL6s z*F#hj&GwL$wJ=?Q-NIDeu|a?jaA|l}S6RGc?9wsHto2QEDi(WfnzIVkp?_(KMnp}p z6t8>UG^072>XWG_Cq*ry9f7ffvxEIduNf3vEvYw|2K+clpmFr%N?r&ocBP_oiZFUA zjGcq9lA(9q(mVC=71CQCy5#yX1ua0nbqcoy8<2I0${jg2HX!(b*>@)Ef?mWX;~B>v z;zwR~g+=3Xk|;*SLgSkd>wlL@6Wt&cnIbV7uu5E+%Hl;E(%WvoG6miQeay2n3qo%T zY(l1oovGZS(dDIhBU3nho$SO}G*DvT;>;E=qk3qv7yPNhW_2uL$~D$1@kH2Hk6&)w zXf$uBc2gH_Mv|z_u~}sEt*55Suw#=tB08%IYH|We}ABI)_ZMku~5ka zG%&mVP6P4FzF{q;8o~Fg=*=oAD>yp%`+TDchwfU1Y{k7VwreyW;OP~HP^x@-8qAG? z`h^JRSP^!OimFLW{$C^>XIoYtl%_d?-2pq=FonCvZZ-jEJZPE3(D2gr8xF@fUNC-e zSU(u^P(NDrFn(qQc7N$NOSgPMb2bw=Y=UK2JP!PVY{p zagh4!bcQi`2Uv7YA|`(BmU~QAp~-vhN8o%9wrqmZazx8p)9NH^oKXvJ`x|_@S3V&HRv%EF9!6^)P@KM4dnzfWgR8ykkIVu? z8-7v0@q2%g@hVOegR^QzT{R4wYS-L49X!TyHV8+*R)3Li0dS=apeik?)>*;W4uCe- ztE4Yt;S}RHVvRZZ;=&b}3Y98E>lD)iA==trjGr0fIWbfkIIHZ=TdS<9hi@}y|GEID zxorTODa_J!GWckA3xYR{ z=jFF|UjF<3|M%vv{`-e7_y6~=esv8InFW;l5qs9x^#tn5&yzvb!>Ft*XUNs7Fp=}Z zwSV4$1Cv*;jyP}l9Z+hRpiv_2qgtoW_NwEuw7+*;F0fq}r(xZLVE65L+;H__9U$Hm z5npMqO_-y-_*jhm$Wv^sfh4utkv9hAJO)Y}SF?m3L6jSAzL|^yDnEC&)D7|UOr{&% zzu!Gp9ubB^d3bXZd7ad-paC}a?Ao=7+JEL!?RJk2bJW091|o5p!o5)xdsMfE=);TX zO+Xo(KK~S@h;MZ@D?Q8vM?46^TWPvXu7L$3axM85%4dEN9|GqlYt-m+LqWE&1kT%c zYc+0!83%JQS}l|M@O|6jb-xs55@W2~Z ztE{@BAbS*A5k-|qF?*fPf!P0dop_5^Z5o4v&HP@zkOEB@nYBy0)XQ7Vgi>|d^o(&>K zlmNAIqUpSy#EGM}#XpLf>WU!M!>B~(FDflK@x;`0&b!j1OvA> z_aOUh4avQZx-cJM6RKK!&VOw(YRj5wo6Q^^TyZ&EY4zJIX>NcLjYiCY$CB;oc8hp> zNKhXGs}V{nq!;@nlOP?nzs!8F#QLW^4Dr1t9mk^{&_{D5qOdcw%}@%3bQmR;lKvYz zlgMlQ!P$wqVFh7nG_m&6fTXY~MBL5b0k45Yg&`~QkQld)8Ch>(tA9iavJP>W6}G87 zMcSw$!gy}4FC(eQe~_>SUS!>b7SRl>3;vb zYsBssp@xO;xMU3`wzDvdyL-uK5a=*g{3OVId<|f|?hVCf$Fpf>i`CY?*agHTv7HNE zittVe+LXu^@P9~BOH^NUnUp0C6};(LjZA@LdyaiFcYw8K4k<7J8@x3d$^yfN_P)#l zRM za`~y(?8h&QlF=yApx)|mIY&FVj1fc_{*rm)2&lqUbALe6!+COkwJC=ZXJe;sr!quV zxl*61E#R-v`HIFkYMxiq&xHo;K^G8gX9t=6d*E(U4NWVXc{N#Z6AwKf>_|jLH-guVj>>@ z*?2fcx1doa5t0jix!1puV3;&oO)$)We~U$D6(ap02U#wp7)r7wJma|ZOT zdw;`jr!Z8iK4ydcL!(^S)&m#Y)od4^Q{~6~`#B=XI z?fkzP`JZ$C{}TB>u=RA89?uROsb$T%P!a6@i0Xz-_wewM&BtvXHBO`AJdkigQhF~L zdONTEAgG8cxiK&8oY#I3dq_S8;W}Lsob|?L&=m)bdD!xD8(=?)3cUK7tpJ>6&0ra( z2H)yX*o4}p-RLjZmQkifg4!4y7=MAs#$(}p0|Ewizs6(fO1$~%e#DJTUA{0f8+GIL#$V~oUYdt2GoR(uR|(o)jeV6QOPRRZtfeUKhd(k%{&t5_ zUm~aHb{h%$@AqiSA{`b~FTcPXvtgEMO)hn^vJ4HEv2oh9#Wg0ChY3sOQT2JarYG{S zVaYteIC6P(sD}+p=7C{r^MAOBjf3_S5Q?Lcd@0P>#^4IJ`|>FM+D?C6iLOa(`pW85 z1&2&GZ0bB5h?iSN5$9BGX)O~+yuvt_uGg}0#4C(*uChE=%|ueaFxC}F$A&Sk)Wgu{ zGNI2!p%?3m+17|x80YdztG?7U_P=c`;~AB7JzLcExF)S^cqa~_=+!yJhsu|cjbE**RL2?PQ;U(q<1tLpgXNB`m4Uh1oWPKo%(cKq)O+;T$L|;!v zU;WY7f#_>2`Z83@_!;{~v8S%h`ew6d39i+Uch<|j3o{92l8!4C4SV_wJ zr%M;hNjYD&p%i-`$2B|l!4wNdVJ0vhqKTAlI3^Mnh1ql+OYIl|cWbbQ6WJyMt~Z-E z3bTR>$K-7G!2QEnHt&lCBjXDn-WFm{JYwWMZx`0#b`hmGh^L*OuEN&pq4yDUE}2Lv zucTkXQ=B<()_>)S^Sf=CLAo*Lnbal-j~skfP@dOuzScYyvQeF2Hy<}|Oi!*&+@K)x z7%>P07Gtn9(yRPsE+TjDN<_ov5Y>5Smu^;+u%ID|E($pz1H$0wl_w`#D-2UT45CFINcuai8HDEx0uo^;;&G%2iOH;(qb`L%z@bjY- z#8}c}magXV;rK{qZQpOMcs?Rmv?TUBJ7iBjL2^{>Ug=|!)@G|;QMJuh5S|X{RWFMo0&YBBSA=1C6`aK_L@&v4Hw_lIfxGK zxQob685pfCQ)bBqf`Gd9(8a(^AAPZ1F?}Q2OvSu*Vd}C`2GhY^HafX` z9xYS2QM0yj>Q-- zn1+TAdj^xls(lgM_y;VqJiuAyd6{8Bqa(f`0+R1qkR`zWiWxp^Ul;-oo@v-aw#)nX zdrrJBd${5%Q69>*^`V-&OUXg2p(HO8CB zWGy`rG-=0Hg|Z*nw7a#>r1k9HTn@Xo*A{R2!f_6GuvlHwLu76o9Dj3xn;Sc?dD*F@ zPH9{q)4b!y+G0U`31HlxMR8kDl1WK+Nqh;+q**G;p8<^f(YN zfY&h>h%~SMFq4MxT1!iW0xn8uV=(nwL25j8Lb0NtSuPZ*zWQpIYNcZ;6UtPth2u!` zOA8Sl3&WZ6tI3qRCVyqxAHN=_Wqmr8YQkA+I_0iOrPMe4lEPT2G!n46@sS@?FBL_V zfX$8H{KC(PC1LZ^;!#pqi7;v#P>?lm$Xt?VgL4f6?GpRuTY|sO-?Nt5^N=c!#FF^?#+HX`M1nwF_@21uc8) z71y%2zN9TPIM3?$iZ(0oGshA=yLo0`Sdl>L&K7DJWFm2fH|!B`Qlv_S9)o1Igg%3Z z$*kJMZcK^LO?weF?YQsTabK&ZwY0nptl#FL@fJ88eB-krFlePaKsMx*O_K}38mD|; zI`H=7&a}qcJby`92_TzK8>>~2OWT%wIrhJ7<7fPz^(3F$c-D=E3!%4_CB1reCiM1l zp%rXqhKE@rw2=1&*jPT`_!49@{b6Z&gA6evVHyhHAjFE>f{zT141N8E!_S42fq)C* zUkQXpvC8QQnyxmHiA)G2-!;TE#EQi7{7RGHdf?3ko`0$=E*j=pwW@C0-SrAk2CAB= z{Rm2=289ZIh>LdE?Dab%$AczVNn!-Xw6FrYRFH#AndV2Ad%MD@)%s;#*ln;K8^kEi z2cNO2)4|tkl~obcvd*A^SgMT;vT1kr#~IAXrNV{g8}I36+$t;ZjA(e7Ht{{{b!8=r zebz7VVSiiWi4-<73W4>sf{>-v&ZJvf?M%CKE`2)unReI;E3}Q{L1isF8vP`EHcI6E z&o9^UW)lsX^ZvXIDH6gw$}5ClL`?1SHFFIUQ0FM+cKzieFS z5Om!y`e?^<{?4ZoOZqzNJb_Fgo)?`D&|{4Bm47;eQ7PE}Mn=s~IeZz*A?C#916FK+ z9?IOqNG0lsOTp_VmMgo&;fN#OHt<#rko0+fnH2S}UDNTxd#-}~;mGVI~V$Ggi2cK`D0 zIe%0Oa#6!9b186Ma5L0Kl=ljbPQad+CBtOOSa)4;uzd>Z;xOS~)&i@4Dcvz@Cnj~u z3jEbTY2LpA|L~a=b%ouM*zf99w!ry?mcq^%lPjn&)DnAELY*#TSE|q=xGK7R z_Oo#)GcGj^EXVr9jA*3I5T@0Dix^W7D}P=;c*?2B-6hu0N~pTDPW^48VP0 z4V>=VvRCDbYHi2mF&UWC!%}DOE9O(_>vY#JBJCJ9$vA?8;mNZkUSM$(1?-f-0Wo5=2^RBZKbJo@ONk)xwE5ZY3{F-J!h78XWx5%Y~1I#jDx^AYon87x=J^W|K`d}9X7m7_=VIe+FGGc

3*%bE;djiFks|&?w$Ky$59wBSG|F8O@}0ZuRf1oyfBYn z^0#09H|50mk+ob7%2Acf`32{rrD=z6sB#3439FZ$$9pdw!ko2mi%;pe$Ta zfBwt&{5cLY=qHOBm1`A$2Gzk<7TqRC`L8f`(u$D&_wSEB3_#61zCgf(Izq&zKj2xd zxt#yoc>>QPm~&kr;yOd!RPz|hV6QdP3sy$|p8GDwq*^N~XsV4Euf-g;)ORk%F@=JU zC_oGku0{J`m=OJRzDD@Yi2z$bq`&^pH66xl{7tvV*R3c(?Zi73<@|l~e;o8L*SO3Z z*T|@Uyk~^J!^Z?W3LJwcR*AEXG0&?`s2hU9_?fgb1VFG(ke~a`n_M-|&A6US%g8k) zl#MixtN*c$&2ux_*pD6K@R0=SFca_*JUi%%0?IV`eir9B4ZA+|e<*twk?ph1QQ*70 zEI}X8Y!de`AW~QTdX8{zf4}i;FaC!*KorUMIS#Pf=CK_%1>?`-^=w!Chv&w598&MT zb`D$N`}q0iY&%J#fj;o}=gH@>h=zU3h)?D?k1cb*W8DZT)xYX<>gRT||NA)|(_rkz zZiEn(m@DvX=wE14R)G$~o_Y~;C~Q+CYCvG?TUW3LL?_Y zb6J!P>jvu2yi~y`$ryO>%HM74zwYP88{=_iDGkPk?>I~R^SN7I;WoeO9#L%^lH}2T z|Gf}s;@&Bn0&*RR2*;l^(Ls1@D1bdQ&EfjO;55AelUmFsAf7rph!5;bAkN>g%%^g#?R7$*#d;Ew8u(t^d!BcK`mgtcEy3{3Q-2}=$+741Qf8avFk%j$+aku7*(N;b zKgZ{1#~k0!*ShZ9Um$G?n|5CB!E$2ZYw~b>rNLfuzy0aIIX-_koWeb*TC`-kp0&1-o3 zdmiS8ezsq~=Oy3sFz?Lg9XXtbcmFvL@7BW}!rBPue_e1Md{n>A8%LB8L`v{`1T{Qd z|MNQv3g7wjdoln1>+e73EA#L1&-p*U|D6Bx`_K75zmM~C;N9nbEM@b1;d#Gh3OfkT zf%$np?^Mk1g|hgbhdFG1fB$=4_w)YZ_q^oi{o&J|&!c|c|L45V+=eXV@BbM$WYmof zy7Z1^f97jb*l2KUJu&Xg=Z`;qbiU@@+jBk1s3%+abN+d@iM>DPFMplS{&ha;%x3o5 z_}AER{ibNvN9?)X$g?eg^LH#apL>~YO7G9PprYa4f6ndaXZdsP_51A9 z-}}tsd+!40f8TXpt1%0lkGTce6!Wv=e2kqqf0u(l2KWAbu7b8{3xAzEKePAeTm*mM zzT@A@Q9(Pk8RB=jBj%#eSJ3x!%yY;Zl#9P?N0($~U)4DKJm7;ZA@9!op7XHd4gq{_ zf9_-OqW%2dqw!;Lhj)m*XaLSCD-D=ktLTU-g>5;p9KEne}2C5 zU^6e=cP=%wwXa_j_KVM?^S*_YG6{G*bDVng{S3vLj4kJ%=%U%25B2@bQMmR0oA3SZ z=l$(_v-x|`GI5^GzJL6w^Y?&Tj)YQ1f_jiW9~T%vP7uco`KEsUJ_x-aL3Do3G{f#C z@famb% z_eA*7732Q=8TYA>KQUj2`+@i{f5v;g=6a0h_mxpT%={eiruX;>IVb!*n&YnR`7=z- zZ@J(&?uQ-XZ2pY$BmdL$hTnR~*lGSAJny$$py&+Y3C!U=o>R6_zGwnCe;?P8^ZWh0 zKlP{F&l$=W^>bk6>kJt9|KHljPrZNN>-$_NkMDE&{aj!8Jl7w}ALZK3e@95__cO}( zdw&Zk5M=WMc!K9ay`miO+$e`{{be|h{mjpWJe38*A>Z!*_jdK4o~xV75%opQ*Zu4H zIF9GZ@Ekma@|@lK4X#7Ef1^H-42S;mZ4W4)Z~d+2pV$;cxqaK+dp-}(b@;Yl8~2^d z>Hk%Gnfn`~&F6pXTQ=tV{nfsI>f^(J{)G0i{?(qo--GL+eg%i+uYU5aFIl{iFPG-uJoY{OsS?LC|ZiCzKaCpNGuv zE%v_{y`SsJn9B#(f8Q71PyKq%{_pMaGru>sERCP`H{a(wUVP^J;QaDe`|Q8>pW6|h z8=vw1)SvMN-)GJ3e*oq9XFrcXSnaR$keNjsHV~_|@@$e;1#p>h*s9uiqmb=%JVY zai4E-`}qs3uPlH4j~z+=uiZ2KAOFrXTKyepgvb8b9^|lB_X}v`;n^AP1I2>xcHDnm zN|*~F1*3q>nj(x?;UHE)u8|6qDHTJhG)K)li+09F`GTdCj8_DZv2U#7nvWEJ@ND_~ zCLKSy#}@IVe{VipkLM3d0$q_XRiUXx)_C`W(Y~JuKz1MFW=*jd>6$ymA)^Ct+U!2% z({h85zDld>rqJ4Wcj|nL{E)*EQaB!n(}#;$N$%?9Uv`@{ad16ko95?Tb6nXA{sm|Z zc{p3?^TM2)Tyf>twU z7KGUNC(ZPXKvA@iJ;pAt6XSZnPuDL1K&`%;U%ApFe z7vwd;f2PP+%9GcAWb^~|Y3gLNSYQW|9)-1f?))7hK9bH#)@|cHTdhe~en_Op9FlBK z)N&u49^}TcS5Z+d9>gVx+IT|_vGQt{a(52Nb!Mx&a>)k-e-OlVsp<=Bqpdyh*?t(s z&4hzus?i68-2H3VY|`C{^v=5xFt5`K5TI3Xe}3)uA?M$7ak*m?_d$2NC31JrjyRv) zC@vyhUlJDIXB$hDU|k!Ox2>N&pN+5WF|17LT<Dq>OAK}x@%Y0eXYcT>*pGR3eW ze-yMcu*VZYPFi#o;v_gd10YJ)!*)y3PYA{OTKGs9P-@e7nbv7YY%iY{+t6{m-wm(p zf_@x7A1)qtfc`=c->PV5Leo@mmY5(e=IGUOHF9^`P3qpRb@b;I^LW*^8)k^&`HSoRMb(d{Q`4v~h^?|Me=tpKw!x5{ckS^o4X5Nx49liYujR^bKHmGBK4{^h zZQV^54*JM8Bh7Q<+3efRTl9h^osb#OFYD9!<=fhHbT8iFZnn65RQyku@Ac-gqdvUDkXP3ztSDyKb$rSig45+1AUh2GFnTw6{6(<@-d~289rX_)>dhVkglVKey*m0aGs%O1N~k(pL+Wu z>5b)nb}oxN6-X-A0UNg3f5GTq>tUr|1gYus)`Q;91F55VaXZsXps{=)kCz0BqG(t`^kj;@mLdm@ydqpI?5LI3W0{h&U zLUULJ=JB|5xyDUt)2X2NI}&!i&8{Y}39F^NUrTALmyB@)G#&AfCbE^R6~aam<*n^z zWUkw;Y^bz7kdrEU?=s5I{th9$q+>Q!9&dA3*F`8IJM1ew`k&QCT2(GqbC7I=kMrub6c{tEj{R(z0&*YBUzt2lH}%u-Pnxr^nh?9_ z_Ud}7rFic!j@AZp?T^cN)!XCZv_D?1$o)Hch|3n$2Fhrow*%fB z)ZM@WA9uGoZq7uQV9ie!wMEyqRa95 z5bt)=jYr`fe{d=Y|I2i5hN+jGW#eSu{2;8M#r<*3?ojGAgwkuVtJ&&$8}fbxO!Tze zH0h~4=IiCMVZy2j-Qx90EPk_~eI*twNkvMBejsHhUzdVkb`Cq;mU6H=0?SQ;;N?h9 z3)IwT`C8Si=Y+TYrj6e&1;XMkc|ry;qF;bUyXCq`f1y)D>Lq!RS>Uvt(mI!hrRUo$ z>ed%~;#u?tbW-oza-);4nq!}SR}95^5Plkc&Yz!?A#(_VIK)JTq(ZG^`u+ZT`%2lt$s zWP7Iof6^kWJG;%(;^ZDWGQNb^Os)}xAs||v)kL3*6vy5WO{uU_6WA(3U1e5p5o#Uu ze8btd7ktTzK;@c(E!c%^L#fTL6?asD3_55Iv$a||tNPK{+;7NK7@J%0sC&0r2GV14 zBaqG?gcqr{py*-KnQDi_ig}iq zj!an?X|s6J^)>8D_ZnQplPT^4%Cie=v@#a*;+ zf17*R7iaJtGn!~=P&H<$nXXwT;i%nBQ%jah_go39_Tdg7hOvxc)jW{Scs}*-iLf+e?zNJXYvbYAk8~qDrl%5kcF3xmF;J~$#ki9 z;hj}_yq&$TSc-HvV&UN&sMi7*gU^B3Oq=Ff_I1^=%0(S3@|dga=?tP;@fhBV`|@=X zFJeF|F($+u*CGt?2E|8Nyl}m4@_dWI*)@q+x^z#7wW`)lRWCV$#T(#tMW_j{eT%S92OjuvVo1*h58??$X8_<04SA7=^)U@4T#B%qHFSNlgH&ShAf5ug$BQemfm*CRriG54Arm0-32D|7mfIqs}uAj;X zJ^9Xq_|)Zk?|BQ08i;W95Gh=qQuP*sMPR)^<|6gC-axF$Y23x&d~(D9Ue=DkCxJC= z7XJ1DVKMqbj4i(n7PkZD7MT?vZ)iuJ#Y04g*Lrs-_6qNw?sxP_{^36se=k|NE9-u5 zc8}G*+{B?{tu3~9D*`J7t1>@_m?SSjV}T~x$0xd)?rUX7v^fcH+gP1G3(&ZkgbbhO zMJzq;xBDqK*w5uTv=P@r@OW`uz0b58YtWt{{iwO9`+Rd7k=;U!r!}i)oA~PIsDL2H zoO%tX3^tHL`oeI6EoF(qxlmN%XUhT2-}qqGjRf7ZoB0_G!MtgaVr z)w!n%I^f(fDvT&wyXrNhw;$!5Je`RKA<Xf$}gXvkt>nZlRvO#i`GxyPttaQyJD zaM!NMnNjZT%h*2nFa^{Ibk)&_-Jx7TPaFYpVRXE+l%zISsVBX^lFyvnH|ln$^Z zE9z!w&2CBfj6v^A3S@XsX#btYucj28ijq@+t;i_e<9oF;nBlL59oRN9=XSNLl0 zpW-cif2Q~eLU&Gdw)Z2l zqq6-6c}#RAVeK1v^#Y51hgp2akF`-hs>+P?fBI7Rh|}%D*Yl15zjV`J*>rzBVLD8r zbH54~|LRZCc}TXWB?%q=BfU|NTqWLH07qA0>fDyP;}F}2zADs24$iB3c@Mijnbdw` zcz1JnT+IVepI87Y2-tGv>&?u-BW|Yi_0Ng+`tbj}3G@ zf7bikr8HNNINta58-(I)0BSsvGu*q}b z!M4#whgb*`jl+3xjcua$xW}sY3?|yLD3gf?L)|5tC823zOK8S&f9sw& zfaI;_(X<0bgBRSn~JfPGG zb8{}$_4$}i7LcC@3g!hjT?-Iw;iezuWmo&#Rf2dZ(^!$7YaDW_J(!!L8(;hd0gN!R z3Dy;x2>E3(x&w(^b)l)3&rsiPe<@}8)YdN&1Lm$y4n-gc<&vT~dqqFn7Kx(bp|}Z- zmRZIAUFgxP2T(Zf90?k}9I;xAV>IqO^0e_*FS0!NbrU2;>V0}=-O#1X8S-we-wwpJ zyX@`i`8We&SUn=yZp&C3**_9J<$$-WwB-4aE%Y0*)ZrBu%0)!Ut39WkLhLHs zuv=DJ#_2J*`e*N(-r)us2rWBdF7qS!=NFiE&ft~SM8g9@_OyqwKR7%(%%OJM6V7jFaD6u(!tZ>z0+f)hY%uG4~b@ zUo0Q_%VhVa_pqmOe^s~(fCDAr6n<9W`F!%_lijPQbs<t7Kv(AQ^B5JVx6-NQ22nl0@G&xPoVU?(D*`+cf$%Q{{R;K!UxzTL)PP81J#NEbTI z?)ZH^0u3%)7Ue4p#Yl4A)2-*~6|~##hhq&xx)FIG&a>P0f2lPs*imim-}Lq8av5!c z+vzR>^rpqJl9&xtxtGVs#q)u$NzJ22njq8Y;WX%Sy}q+&!Zp@*lp?`RB^Ry$ahc?B zS+4@HSL-`}m!W~a50DG6e}vs~l^^KfVvHXr8eV+7zDFx@>-w)kUpo`Vt^LRw$6DD6 z??)0XYBP|%f5DI5A+HagG0xw}P&;+VlxL%}){Swpc}RzIzFB&}{L$3%l*eaCp^ED_ zJWU6nVzV) z?RQBx5RGw^KIad)TGWQ&5)ZBa7|tTwADcUT@mTybeV#`f4l5Vd&U!1R24ga-; zeAbDe$v!$98}CrOz56D)@5QC3bsJ*Mkz1nR&TnjP4cGcq3b)ZVN3S+Ow+EW%%mhM_ zbc+IPz0nBO#;*G3qgSr>iv-X6UW5s47|gVRVTfegYN87-YV$+}r=flJOF2;OrtKpIo%a!!sVhV(CpQ=`Uxa zN&NOC@6VkLE$n@7y{0XH**L?ggaa_IjPushf1`5w!2Q#}fBnAXn#};}iN1XuF8XoXzYqvxp1Qqt4mF|kF6**N(df6QYJ`my zA~~_S?|E?g-wJ!4R*nu*lE*B0`}0ArV3GD8zg$Bu-m;+hWEb@2x~z94h{_)^W%l9T ze|*D5Wn0N(bJ;}uU9nCV>-T<7+iLFBc~&L&rEzSQt+W9eAz2<8+pJycXhi-=A{*Y6 zX^`2naIN&ikzgqKa%Mi3XIXtrEuvVAZ7Ai4X+R}mPp6TsG9qTm=Z+p)!oP|qLlso8+S?x|> zFkZROF+Q(#lT?ZB=86xrhWTim-6Apc_xrT`knvtdLctRI zY54?O4In=K=#`)G36auc>$M%XwDOCt64;VSQ-D|VN+B8^?O|P97UD70Eo9nrz%Tgv zX(!54D>24d`VchDU$Snw%>!;E!_BkhZilmb3D&3KeE%FhmrdAU%_X;Ke^}^YZNKhP z1CBv>rv@+%_xtf$Gw8mPtYZCo2ie1zLoh)P_nsRgsm*QP_f_`r`pJ9K0O zak6{kA}vzOn$(W2-B12he@}-dciYz{$+pcxI-g&cLm)~10LrcyosY-GwOYF7+A8vu$*i5Sm^rNt*PF?iEg34N^XlC&qTh9*UzGr z>qot5jaVs9*3K{9Ip7elOXyq~ERon{=6e^V5_^^`156?bk%BxKf8Ez7VXC8AYquR` ztg-xgd3RU1-A>^6Jjlc5LqzTL@Tk5vD@Po!BDYvGI``Rd+3xo6827Zs7V$KA3wLvF zbg76Q%Bv2U)&8JeDq~e>VR61owQkxcf{?lDaJj|Sd-vM;Hw)A2Ykx3K$|M7w`M#_G z?}+8qxI2>R>$#Foe;LRL9|XrY%cK6dJRPdZyh%*eAy|G+z@IlM1v%W{Ej8(LQ&VYp z-ng5U^wy8d;@)Bju*gy`(H}(kU>~p5YZ-jB@w#*+@dGJmU{f5CR=n^S)%PyHIN9~U zl_x`JyIXp`JZ``jwq2+lN#W?X*iDy~{h=jSmj01EK&yrafAbEGUL*I>C8yMrv$8(= z`NT*XsqQwbV_ocwotH~;Sjb%aD4gzfXio+6S)su9Sm^qnHQcj%lDms=aPFi?q=U7x zd8^X9$`BM!Ji}|{J~mi*Ijd$Op?;C<$K7h-%pho=7!S*-9Y)ibq^s_=b-W^tt58|b zy;389eKXv)f9l~Ruh{h^N!36g?o_{7oG$TNcqe5+o&EFEY+Lg^^smW>V=t>mQ-uSv zd0pH0;>}8axDyvsm6~rNs)ht&_o7Usr(EA0AGLU2D64DvETNYrh@ZQTIkgt{jSJ#) z!`{v0?vgLSWbNbTd44aIMU9XPK|F7K>9bE$sH5Epe+aBx#Lj5Jv0mP4rWNJ3;!f~r zlf(N30B~O(*&1$*M{_840F(#4U*@#GIDx-liA!l6N1Ga3$*i1(gNJRuDC`65B6KHItEFr^WqWiDQ8>n^7manw|2c| zot?W{>+pKbT9<#gxDydQ>T7D z-x?n~+s~Um*FmkM+hwxy#cRHP4-$n$<@WXxf0DX3md9tiC1gUE92Z-lWw(c`SAccIj7Wu&(gm&>oWw%O9#_pqrID6kx_%jyO`2)&L< zEjkdP^L(E7P86(>Exue}b0^Tsq2>=lf6$sshu8dUy>2jD7+~JN?DXrX~>JJD1k(n3j+CH2f0T35;v3v^EgT7JQ z_L%P*1DuTlGHT>e-F{3t7Q>RdKKJpaW5-rl=lu#9ruPCHTFgq}_SN}r4JrvgfBzAz zcYiGJmj>+F6(j?1E#L=4V}8>-)F%e>z@CnZ9a!9UgQ*e&k@WO1I@{9jg1}AYTHyIsf`by-!G9S`x_o5H+o_yg^H{yj z#8D+m+(}nlr9YSYwUu~mf3^#Dg#e%nF%=uN@y=6|-M9X`VMmo1rL$~O>4)1V&RQ)$ zKP7s`9J z=35ZCcHQd+NA}fhz38*Lhg)T-ATrKgG%JfZ?*znB?b&Tf9+mCGe_6@prokSnup=ip z*ob^x9jCaV|%V>P9JRJUIq0`ewv)1-?ije!XOgj zBYFthB&T4k5{E$Jf4Ae}b{S9iHM<$N;c2|Id3)08*XJ3^BX-3!X>CMJw__13rZx+^6yEhvpk;;eT`TXf)D)JO>_tt5iNQWoc zWcj&U7#IIjwJ-*s@DgSc+!wDo{&i$30J z;Y-Vlc32MaqZQzys$aWaOi7Ug^`F_9n`sTaxe5?vl0BWB^BL_62%zm~X_q!pgK^&^ zaM#FhcyOxA8;Ynqz{Yp08^){Bfl%0Peehbcp6=L(f0C9(l?n0=;#;7FaaH+ld9Qgs ztud*{W<_odmED<(K2b`umX-tUDRDthe(DqQwFm(-6JU19gl=sDP4=hIO)kWnw+@eX zL8gn#QWXyOiRfLRCn;|oRMjP6hC}?`G$1C&QV7@n&Z9VCXqv}PUeUtpS$~MNS6)F} zdJuBlf2bI)cC~8WD9PN6M+pg5fe676_BfFd7y9q)*-DK@ev(L9$(yBVXKU-M*P7Zg zPby;8)}<{syUh+EL+g{te1IC3w^V(f-U2eP5MWFam#rvKMQI$gd%2~BoZ37QXc`wl z2@|I_=ZVCjn*|V1AMen2Daill$TWOPc^bQQe@ChhbHVTLt1bU}y!!inmq_q|n3mWs zI^UX)aF}asy87?m`L=N2$`qsP>saUGAP;DEE%Hg+fmA6cnt$urqdu$Jlo6FQXM0#^#KT6I*`yvwXsBXD`FM zdX)s#!o2a*)dJhG?+%+oKbjgGJfST>;hybw*iL2+Vtx#O-!=#RxsN^}KZKQ)e`Ykl z)2qn7Q+lDdok37XcEM1VaWqlZ++Mq^lVSsc2ak}E#k8ZY!^M+G=)8C0AWfc66Y&)| zyDOJq*$cdso@DU_q1;+}t`LXP--TU&wJeuuZ}IYo=qYMX={eizJmHaQH;D0Dd#G0| z_9vfd5iWW1vd8^tJ_fJfxa=ohe~MN)HRtLz((em4(qH_m!c36AwwJ|`BDAx62(v<) zVnD|}J8`+k9<~_VAv4(M&%@;om7|2RF5OygrZdL+`)*CwQ5rioSuVH3suV)<^TJyA zs;8rTKCHM^`Es#c2hbCBs{>rbf5R--P#|fnmD;E(B2+d1n=bzVaJGdcG(NDSjeX>P1bES zZPN*>e#BIyA-Z7)esDZo?w7j+!d|oB8iOz-B3d8E15KMdJd=Kf9u`k6f=_>aTyvFq zaVlffXKDyu*1Lskf06c2b^oefPGmonW?J7~A$;>5b~E7sbFUCrlMy?)n~3uPGBI_y z?=o$>14T1k4zRE(xxT%?n3b#XjqPb?FUp8y*oMsqJ0Ci;-M^k~N*yGQ$UCa$*ST^5 zl2y;L-c66L)NXcuBomf$x`?;-6_J>HuKn0Anea8e9y{;de;h9=w|I59v&cP`&k=#e z!BedH>q{v27DKn@{sII>D^Q>Q!QV&f01-As)*S4I+TTbBKi1}=qTbZDSy9TZ<=*!# zoL2#%F1kj(?yXZ2+aF^0QC)v%!{b_?Hn#^NIvvDyf)G#`?I?FgdW5?_J1*owezMKu z4tElktZQtaf0z2W-ffwe8IbL8_an(bu>Lqbgmro`j@z5DJ@oS0I)QZnww1!>^?`i| zar@C0?g!YglYKJR;f5~9`y!_!bjje<*mqv<4Fht!^|7<4_j|f>We6$_@p&Ib`ek^p z1$_xGh+45UOF3el+p>$re$n5F>_}&@T7KdxSId0&}>39&feoSyv0J>;g_O#64>7vK`X1D>gcU_gih2w0^Ky4MQWEE3I|5$K z`V;P5e?49(rWWu|U%Nf|Z90Z$N9%6XdM_%lbn|hiWY{~J>U|WC%VKW<-OOFohbX<( zt#wf>bHsj^aph#}f2)%Fkg-D)zo12M{J=7s#LcNXP*2AnUYLF_Y_^+f=+RpDpor;3 zExnPAabA+{`uU`|>?4~~6cMExu zZh8BH-MpQ$z8v43m{|Gd=#PM~t(F*O@4J>h5YNmw#=B`;R?qNu2p=oOH;|v5w68*Q z;x~sHC7Uf6F6JlfoGTx`Hr3MtPyPMkZh$cX2c^r}u~dn5UO%3@W^HE|U5@PNO=zXWx&UVx*qqh z0Kw%0x286b&+M10dKYTrK6>2JrwD(k`jGsOwXW06@eF#5JkZcrb|_0ukLhdEEv7b9 zEYQglVTtfsO#zJGeTf@Ic}W?T2#!JMbrVxfxjhCkBiF(cStDm- zu!lB_yY10kl{Z8d9=bz7ca3r9cJ+-t-mM_I#KJRH6MG+QT75UVd8bUthI`ANf4>x4 zEK0OH>+0tn_E2B}0TzBJ7HV&N%wn^A- z>LKE57x_OyVv6T_jI9^==i9SXiv0Pm+}v(?Qmp!-g_0n@WT`y9Se5_};rUYGh@TF- zli3Q3j|g#*y;pixw7IXP&V3^)f9mG_U!1+!nyT2A2Kqo=(1?nc^qqk8&7GnG0-~bQ zJpK7GB64S)U3D%_)t7r^#)>d#<{Wc0e?x8E9bsFKW#ZRo6*{w|xj#V8HEl&Ej;2-Cl$x3tHAldyv2kZrg>u@P*vE@3Ljzss0+AO`~7xpAH`|TjpS4Q3@nPr_x;mB8axV zZr%KJTu0?aSc&V?xj2sSf?F?}%jZgQ>p^$d;%53-yZ8p6llnZi3!-qwKqtlY(k5*d zo2S8FzE|Rs)fsz;kbJ|te^oj31YC{J?t)w$_M{#gCv4rg*K9eI6~UY^WU*j37GnoqZ1+i=2v1!=r<3Bq)bJ7q1_=@St1!{WeU zr1Q7{!a-;N0MPlk`dt0-zDGiwu+mzkL{EvWpja)v*3QNiV#1|=e_d2&*g~XYeJS3# zdK&8uJS(?UX5Q27=Q&z(RjlMqjya`gMdxyL9hOoooYm}8y`E~UIBB!kJRh5JM_7-; zEn)$QVu|F0a-MO)MS745c@!%)j&1*TUqugXTlZ1A)sq)rdaDga+}C@FO#1;Q(qZZ< zQDV#%unMJ&p6_j4f2BioHB;YL!8Xx1#ZK&xxni|eun~1A!FA2$_}DHDj8LYiQ|R?k zF6~I!@3vU6cg5vi6g9#zRXXUBcpuUGQmIwQ+6pi(8@^OF)2TkA6Lorq&|3+4%V&6d z@k(4I-_KC-yLgE2l?n1tKa){rxmiW;;CQ}!IXvz=sK4VX)6?Cok|G?{V;fpK6&v(1q6wO{tTFm+AL$i1 zb^E2NubhoOf3(DsXUNV>#MLr#E(=~;SohUNY$n_j_}K0i|3g zi%Y(;cMX~qyJ2K=`0RCB**1`%zv~C`eHAXf&NW`De@g%W01S5)SZ8^E`!=!Z)}xVO zmXCZEs-aE*AQNo~n*8DOt%X^R|Ly8;rnw1()`PKV5B&o7!eTDMWhp!QH09f5g3 z_kqk2AC|^xlgG!ay8Za81T{06USg0_*5vKR1f9YTqM{B)Y?pNWZgjh4>&ii~(nxANz1lXufi9pksrL?AMc%B*`)*=Z`j{ zFNb;7;{ctWCb=sG!R*s4%lnw^!Fh;`;9AV2f81=&^sk+`_@fp4!-f9-ypz%cXe(b? z8vI2Bxn^#P3lW{%*+N`~1_*ciI+2U>^+Bd}VF@&z9_+>O{neK%4LM=_JM`dk=XFph z5?;RCtkDk+@om18iw!&HX4M^OtC&CUYhV7=_!T*;Dnw|{e(lCN;rRW$udoO7ctvjA ze;*t9HQ&Dp?E$zmUFQ$IG|KDZhW&AA&Ch50eiXRvvv4wCSKfL#+mFv5|8vpT`LB#q z>vfOpY(2tr9_BPv(y#YA+_`lv>9@4PAZJK6FJ0S+Fb*Ru=eL#nZo-LZtzBGz4!sXjj(~`mWUm9!K~_3FneB>C={IX&@S*Y6eG{dYPRNx#4%C$>dV;$) znolK>oop&wyrwIoF~ceIpoO0fP`{V#XOb+ahbl>|!j=1VT%<=wU1@0Yzo3xpFrV7# z1Q=ZJl3eQjU3+;>TaIXjTYjuTf1y=bI}y6mPRk@oy&uhZgSH=rz7Tpgr*XX!0ILHK z^<$=V5}ylu8@uXm^3LU^>7uInJoCy7z_suEdl~{TAwRs_Kb;=>G#saNk#4q&aGkMl z{n_*|^bAjg5nY2D{5eibAUwl;w~3_%ANTLqc+fzkwV*qk$orf)U5~QSf4LjV!V6I_ z`iEMp-eo7b5Dw**A?Q;lKCKHwyjvXTuV(;FgWF{{AdC5SnjZ+M0wWLP6XQV+~Q# zx0`Js+~M-_OMcuVW6+Fae_n+|0#+{thtX{HiD_^;4jRq8_Hj`Wk866=ZmRg1wA13u zpY{vwBe&XxOBhDT28+T-+Rh$}BuP(t~p&OIwG?U@k24#_xw=u8^{zqxo(d)qmdHzP6H=gK%nqLhwE+ojLLekONK()66_{5WkR!- zFT>J0aR0JHAh@VziBG#9?aFESMI_eKc5&IO61T2{?6u7;f0G4xLMCWqk?ucG-Wx1+ zxwzG*#bpC>i8I zrKP)Es@Wrdf4l`4tG267eLa9D8ykA13n#NVKzdX`dZRuA`kYS%wYr}J;_Rmj7zM6x z2D3E6Y_oc-u5?s*56U0iqT2@Cb|IRP`*|_Ksd`?3v2UwB?H+GA!RViUt=GP*Ne|=N z?5;=`FYJN^A$21qAp9R;e8w;BtK7vQ@u80xy!+&0f4`g zh)v@j-zt%|pIrON)k*S)QYT0yfx?a#-sLMPLtOaX2QF$qUF%zL*u~Vp#8JV5ER#}i zs0n|Lo8{&Tr@_URJ&^Iz6#do5oSUfKHr$>A)p6R(Vz_w&hE93!GvfJpx|SynB%E#> zJH|a9e-FXoaV`7kv6nF+d_RqR$R4k=T1O+%z48g3YF(!2FnIY0N9;cFa9tf6@s&1X zzSiF7W(`+$^<2VC%)JxtM_8^e?!E#Ud73&%9jZV<}TEq(P9``eI3cy z%j(c$)7g;q3?NY7&2>308|x$tx5cEM-;VVtf9PZDSH^G=z<4o8t3Gqbu}U|qw_gDK zw9P)(j`*YwyY1*r1}{vA48T$YLLY&?eKG@=n$(GrvqTTh^ivFA;j11D5DdzdaVe6; zIn=CUhPLw>a6O)wmir}E!+J38r}%CWCCDG$3Ps48zddtm;$BQCxvOM!272W|))Xai zf7g0IGQW%VnLW^DyK9Cnx#YC5o*8I%*9*U>ktdCjM((OV08`t(tVK8BOf8UOm%%YY{FrJu&owcL29sP*^*vD7&R>j(9 z+5^}qn=MUfMGP_))kl}-5*(s_lGBH~72jmrHcK`oAR>Qt;@9EDwTsi`khuMN7%M|2 zpBwqiq}$HFs<(_4Z|gf;0wRmvh38#=sJ1Yo`Ap0{atY~Xm#1`$v82%e+}ZS ze9de2E=;Q>O}#1Yfb>$CO;aewhV^;Yo5 z3#{9EhAnR6(UIu&qySAovcKJJvK=gIEDS35+DR~NK!+Cy(ffK4;9x7do5R{F+mc&m zI;(MFc`KOZ?A`WZ>X+lXwU;``vdB7eV1Jw-zHtGsucwdMOWG&zmBZ>iscdg(c(Mz$ z50njae2f-Zw4%AA9b@d&9)D_k zkA{X`c*UV6SBYDtW(8T*$FnQhSiBG)HEihxrocc&F{tym&Fezvk+_jEJBY8Nbw7Fg zJv#`x%zaC|<(ZSpD~w*`_H{}>dX8^3whR>N(&+kbTdF{Mc0 zPX`6WOZe|st`63YO)H1k$WvgDtT9@`mm^p zon)M8aT_fbnL>$2i?|SrQx=El`YrF|0@IyKx9|b%zQOe0m$TkewtsO2-Ik641e&UD zz`$K@$a|Y&%t2vz#HllYTs=LIJc1nS7Gn%KrcK>1gt(bS%wVb5Hy+jxpzL0m8XY@uEOPA z?4!lw^rR+Yduvy_8-KI9e@@ELR6lqj9`}h=7v2e*C(C!0vPoPTZh=59t=Nm{10F%g z=W&Ix#4aNx;sf9ZABVt7P1I1`TqRfVu9mM)^^}W4N^>8^-6wlXTL9`|>`7(WuSnYi zN$h?m9$M;b6}UIv^Ro6<5`g}f@YYxLhA~i5tMSljmO}^2bARtSgj&%z`r5DUfMl5} zG|wum-~cc(;Pa+$vfaf7;s6ebw)G-ko4vE4Cvo#RiS9Hh$;DdryH#>r=0yIy?Wqt! zpprNGb^1K+pNF)z7wDJx!-ZWcXPTOzaN{f!o3!~5;GpdVM(@Qbc}bgd`&zO7a7*tR z#nM&05~o_UcYiU76pvL^=2C)e>{*LVc3=BPs2-7RJqB-21CUR3!|`}}enQUmO80>2 zf#1_K}3SNDrzcN(lZm~P_XmJo?{x!tMdhTipS;>Bi*ZFbAv-STx<4(v2E zAMkLq6Z{zQ7GPke`SMy!^r0*>ll4UY@!Uz<$^=wdf`5l?a95UBjeMhdw~*6~f8y60 z@)nI4KCDT)`q%{oTlmAm25js@*EveAb!DCG03_w(D>zV22D-+x)rsqVT#Mp((C^_{ zec!ga1A*x%e6^Y(!OJSh;OSfBaxL9=X274qz^h^1c#nJv-c^br@kgr<@=L`om}Gkc zQRg^yj(_~{Z$>|z!#-oThxNm|Rc&_Z^tfE2EN&ENT}hI)xdsw4kwCnhV;2U}H@i0= zj^K0aPx-lhHtg%5bAan@iSRAH!Q4k^*j!i3-Br_+9b2o$Ms?Q5583Gw1jTL#g?Rb| z7hL$ECa=?oya8iFCVy@>FVcSc(R*}Yg1dcr=Zc~7?p)pc zOWKDcCaA}==)c~V#T2KXXiY9Sf00WOh1vMFCSw1&xZYsOI60b-elPX>x@~ zby&vUrk%Xw+JA3*kTR(76i)RjU5e8&s;cDtCZYzMkXs+XzQU-kT;1$F9O*q&3!xBG zS%0#t!#NAidkYx;jrUp-ONGfU1{m=x6G1W?&QEjK9M~tr%CDN)2oU3M4)-;42XpOn zhOG$z22d!meU0D53In+6dfFd)(4YV}S{lrOPB9y7&vJ0RU51Fv_HXqh*y1}t+@!Zi z9qw|X)8#Tq=x-h@@B8lDW9}*IJhq2j27ksazn%DSkmX@n%CV76FD&CTQ@xYTBedTQ z9gE)L;ps-$FZ>%rkBgw4o_>Rh<_3OP=N7M{Acr8JizBnY^I02u>~`Z7>=xYP`nBLP zXG_v%eDF|)N*4X7&&J!Lw=J_rb`Y6QRXqdl6Fn|aXPRjC__}0a+bh%+h#kya;D0eo zIcP3dQLwR8TW9Dd(5d!zDjz$xN2hqB+$8_BKEn$+d7lLW0QzY>6M0v*i2?*)ADok= zY^ATgSFvX?yuCnCzhfc+85ImR2gIUKiEQ?xNRjIWnCbZCHN3mZ&ewmMX) zHFl|aYt;iwtrg{=`HJ&cV@OjWfvF4n6|b9xn1cCN`Q8x_0&}MVOYsn)uy;&?aaR~C z(LVO!HBR1T99d7jtK4b*UK?Vozd!6=BaD6RFOE>57>_YRqbo3{>YbGe#w`_K+43mBZjc*tmD|FJ2f%<7!s zrM{e1%Tmk(2#VV1^fe0r?XCXA3mdo%>w$^hHS^DP#z5dgp*(@J!{u8q@^&*w>&xQv zf{IK#qBhK5^d-6<2;an)`d7IBw3Q>&g-2y5K2JcBH~RhE zcvPpXm*=Hu*PmQ*w+Sq|oMyHC++GnTeHQHL_hJaZ0KMI=wi=tQj+=g^H7?V?o8`Ni z9@@TKrR?ocodC^UV|~0-Yt(PW{(M<3jqZsN_w}+2QO^ z1_|$49iN9)^%9batbfmY<3UcWc1dgu`PyKNgqs1dWfle(^Bilp#VtmEq5Q0{*NUAJ z%9~I;^zLCT2ab8sM|!;tm9b(}F}r-gaS~O!GwZ@n;fhV~7t$ zd-tqcSlp>qYOtr5eZ77l+@&5ZOL|}X8$hd*+&?eV=Vhp#SHryE#&yeVb@Sp2WC892 z6|^kV*EBduOn*(GE75)uW3|ffYxVj`(N$hE%1(V4#xg$M42P&iG|8W;e@mA2YOA!= z9##6)Nw+9O#I}&^*J=f^JV>Lw%Wy@>$rb!|%YUcWBaI}Mb~T8$9pr~_tNf2ao%`$w zaW3+GbU6|tn@c*gMA9E2r=BOa`N5sGL@Hm}A6Kz1a=?HZiH zu>v1K7a)?vf4RT z0MAU?b$_RuZ*L17yZi07@JTu|wDF=To75-4%D_b8q6JyptZcG)WQF1j$O=VUlsvuK zH_n}yaOWP{cRPj1k7BFd?dkwkez<7)PpPALRouew=M1V992P}Lvq)R$se*8RQVQ#l z?(QnLl8bk>0BIb!%x!ZR?#HvT4Z^)~5vhWDm45}9PMc*dtn58)KMu9!O^>Kt1D<1! zd5mVO3@unVu$_#wk^#I$N?&Zc-Sf&qz~>78EIjyZ?a;N@HARdD-7XB56gl-r~wmAslZLQ(|fuT$0lCI4tw^`ASpj}A~)LiYr?i&Xy=8v z?ux6|vPVsUpok&dDZg52=|R_?XREpXL(|QS$7Nbl2!g%e@#LL zJ)o$etu|5*eci@E@i^9t#fjy?b(ga#07ShB7fi(V#XF;6%*-8Gkv5iU^HH@9eR%sV zdL7uw&D$2At9)>m$HN>vYxqpHV zE)$Dw8zqay^5LiF%|TtQCOffCgi7nd>#)1^uQB~B#bu*RX*hhm_hWwr|7cnJHML5L z!8XrXr!13vdyFk}4M=H1AEad{zE@+rg6QJvik{=0DK?5|7{^GjCnLm|btASzJCv8@ zLbb&A5{kX-2~g<5&+d=YBYnGGe1AM4PrsX)4Zg2V+@JpXX=9jUL16FhM{EhjD?4T1 zWXxW{^wpbP(kb8tLoAL(GmFta``eWr(1tv1*T`Wg1mQR@XcH}Q{VIvfmRfXS)`N0) z*mYMh_xmFi13K9Z#g0l|bViRAVW3wbj;yuoe(QFMfw=AQQ^|;a$kVk)jaIH%D^vymHcC6TiE{x@8Kh*yceDNW_wz_86fj+ zhxn=U?ghhYIN>=s zI)pF`}(6E!+h z6})`Nz39t$vm?$2=zqiBIOh04hxgpnr!Z5m9c86gc9}kw)>6(i)%+aGlMEpMwF)=P z?y_ahY46@F-y&8I3n4|6<_{g9i7{MM+K>c}W*zUz6F=nU6ACAkOh!5<8yeXX; zcp{pUtEoI5?OMT!Nbdv2Jh}TPdkQKsYt#+f!&S8j_)C#q3hT)GcTj7zMfF~tIxt}m zHbK8?wF1cw(0>sMn7N(Vy~1PV9y}HjW8i2iK9YSBIww#@Apn;?kl{&(;lp&+>@|$f z@<(~F%F8vCrRjsy-RnEhe9tg}Rg(A@hd%C)#=9X4x$o=l+(JKFW&Niif3YZM|{3zD2K7xn$0ZV6jCIfZtQ2n;q<|+Rr6gyf?GJ z1gq}XYf8!%3nv3?^HSLhvCrJ{lCIzM6t2)4Dp*$KXP0 z551a!H{L7yftKRYE|#w}b0?%J;>(f&frKj9txeXLBfhy;e%X7FRFU(QiF6jIW{ed3 zOLSdUY7c2)2xGq^>KX}T>kPYxZ>^GL6}<6Q?SBhWwe{9OgBThSgn8EnWw@;lCLYpt zff~0^P1)S%!;4~}07sl)&9yU5HbJa;Y4y~#v>Wzyu$7;|Q82f+g&)?m5<9krdPUgSh)9co0VFvF5ngBl5~gYi<&PKgp_5pBF6i zVmk_=_m5Lx+>zXG7d(XV_hx=+KsEue)PLJ-YVUkrQFZ0@gkGP2J7?qq>Uxr+ z_*R3I&va3$(RElzY(746WoIZ4`BZ*(o_O4;Kmcf}JQBj9+ZGABM+QV?Q7B(u6=t0erCe-y>#cLW)PKVBu|f;s z{CO`><~}EIK6agR1G>kq#e;OOhE1=D19Q8lyFs%-Wem42cW}#gpB$ZYzJv9|yG;-F z^?VKT=6X_=A^C`veQ7PYu?ym| zUesBP7T_8zGbqENKabm{mVXjgt`5q2drqCO0k_RmGlqPLy;|@KS&-Rf#{?JIa0r+p)x?{}!r)wdH+l4wzd@#P44LVo}rY^G_J$CpgA z0?R)xJNInS-D>N8%B7i+i6xzI9O6@-?u~h!A&6-&Ja}4y1n_P}e>OBUOX@=w zP?)ynYg??X2O6yf#hv&0vJ!&6>F%Br@9NE+BE{DsAYZo@Bt+xxj8JMf9?@~L9n(Hj z8YpewvZWuihuJCmwKHAYz7>zVeF4{8qCw`DMxLzFH_QvAMSqSL1Z?c)eHYe=;9JF} zhfMMAaqs(bXULi8ZkNt!fk`2~+$e5bF|3>1Jb}OoRzqwdj-Q_V$wOQLoOWD*{Mm^rUesh>!g8{SpSJ?B^QQg9r`e9cd1<|RMafN24 zEg_GJ7AxIGQ3xGK1(wzJ8w%3N9FZSqSfgt@)(!N4DPacyS5TNevBk4n}WG7^|8_%A$0TlP=Cu(xfhTcRMso+Tr10!)VO=O zaktL+aL(`st4v8f>?-a0LK8;W6%a&(Dy?)8T}D}06^8A-;qIy1dw}W58cSv}ZYo&R z&B=z|$r@8oKYxp-e$B6-Ajj=Dg(y*50^lS9^tba?%xypQPmBU?$_Kn7cgcLzt3qrz zj2HLz&3_$|X8+vB`~1=wmk(*iRGOXm&%z&VQyHZ2g30&EB=1!TWfOy=<0JW)@`%c@6v-jL0)m7?Uew|*n1yj zs6~!2qusv_{UWha)>0|=<%e}7mgysbHpn4lprW?$gke>!Aso8KYzT^W<^YgE$}P|x z^nZ4j1t&9V%mk~~may8P2D`>TaGri6Y2)Zn!kLIO>h?UP#^bq6yN`4=Af^)RIgcl& z91O*)Xb1`jc>z@yTQNH@r${w06*?E}z5PVgGbFh1?5P#TC|1@SjIH=o?PfRZ%@gTE z-FMuB+e4X=55&B8SJ56@o*xI$D1gt^g@5(c9l)YAC*mx)U=W(aG7u|KKHKQfmQ1Q# z=}Tj4Mzg&O@^qO73qG!dD-_TC6Y4D7hs^YpfaQ`sZBT zec1auakXfzgMZoQPy2c6Ad^5J{eS3CiV;{CRR{S$=r6=q(4L4u(-Eg$?NzA@&vFm& zd9^=4h5&Qtj!Tq23BUBrGhJuS2N%xZ!&PU9QUQRtzt!CMpUd>3FMS z?_+6>v5?f2Kas%5Me$Br_Pby+tTd_icewgYe5_B`E~LOXQ(Xp5XA5%>O@B~!Lbbk8 zheOO=muJ~W09oES{J;^KD zmmwOnJm=;G(AQh!JRyKjE3xMEJxTh9?dy#4>sfFHJewb|GD#3`lWy#s5VOVmWpTcI z#0GP&)Lt~&_0)AV7o7IX6o1-sJMtOl*0vzhm$iFzJ5+|7wp@i9E^`shL6Cq1gHOND zQxJ3MDUi=Q;W>5f_MXvjIn|!m$9mgn4GZ41>4^tzoB;*B%`Bm*y1Bl{W9&pvRI{%& ztbdNm;qZAu>@Xfq+UrE=8*8xZbj6U>vOT}2jAV-B@xG$I`qTx?$Lu^K>*-OxU@Ii6ClhsI%(^715X#;e3%TII0aev+r9UTGv6 zi&cETxVNl`$-^ZVuaLIg$nUqryT=jUo-bCqnsw(6oPYh_#$Lu z8G#Wz@%4h=Xc}l}Ab%(b?lqf1Jk(zf0uqqPknG#zGF&a*!&;^i5g{rAVR*tk4yKDW;JWGHYiDny;2=l*dg zx`dUkb2}XP%L)**_ml5>_k7t5&8c;s#Pxk%QXhCOr}y`?w|`+C!9X538;|0ybx%uq z%)F!VD9qv7-l9zdBA^d9NoVs#?yw+cPFjhX^NvVOX+N0HOYkO2JHIRxQdCyXw}656Ci2+yoOI@Q4GRn;Cc zyze(1L*!1R-hb5*?Ic$m7VD4Andb$4dBD&hJa(TCl5tj6tJ_U~)`H)AlMi9ZE5xJ+ z^PDbxV->EJ=8`W=9~yltTLDR6`Gl(cMtKqUq>JRGl}7AXe@t+0*8U_>MHkVhGt9qk z%V>_{|*q2=h*`Hd5E7GxoD1SKpqz2Md(awLit7>=}r%%zF z4S#)#lwhP+LS&Zxl<4n0Pk48M!KlU9k`gAA`a6C`s}FyXuMh2%xu=bt`Q7Rb0hd}A z%UdF01+PzAC~I2ucyYXY)!j^K8}Fu2=MuZi_n$Ia7pvQ9Y_hn5LM|Np-*50+7%O(C zQUz71S!AW4a$eHsI2HS#_4X~pBBl6Pp_MMmKGLM zO#R{v^)~gskj03nR!-^xqAWfp`+0z_+-NUY*ne+ROBBejwLPGHU@9zqzXo>z$JV`A z4Z+>sTFc};MG!}LKnwSftU+UZOTwVb>pl$LE52i0w-$%N8uq7BI!Vy^4glGZw)%0T zZ;NXCC=-JO&{Y3q;Ilux^hiFf=uJ4AN?XfU#fXX}{pm&Mz>`0ov@^v8_K^Tq!>sei zNq=wt9|fF^FE0lCoBz;#G_X@RZ~oJsZ6)CqxN#((^IJaS6pK4be4nckcV+|?kYeuE z3on>$8|=a&^-e@N%zov9I643~6vW zLs(^|*pE4?0m$pnE3twOm$NOzh2(gUB!8FG+_}1u)l>yn=drH$wnK_W@k1fA~oM^?!S!h1j%2bG!QYq5fmvc((54!VL4-@BF%3aeqv4 zcCHvGA-RCg!R?ewQbtT9aAX(rH5ZEweQ5)V29N*2-N`FGU9|Of@(UAy>=Cvey18Kl zif&I&uCj#Me5=MM$phNUp&H`=FsD9Y6ampoE+q1+0^wN*IV}2|Or#g!6Q+FHu{I|F zMQlnzrH(ca2M3c#EopI8bC(X`W`8d_*+JS}I=41y_evLqm9WEmgZtQ+Y7;NU-7?6g zrqj;{V!KmEu%`HNEodu3sKPXd-Az^;7r}0#o`Fz~u}zox#~{PUX0$Y8B&(EL7Atw7$VD(cF@I&-75UB@V1a+y zI_LLHTf_Z7)rmqPWMl+;DFqS?B;$=pmL_KE<4hfTlKUaBCEsX;rUh%LVXfnbxZbi8SP-G(66n; z+@ibQPM|eX{^r)No>A;%KHZ80&F%zBW;Y~z0F1vq*VpON-+%foy>E^_yJ|L^^**NZ z<9Zy1E2Jb~DXgz5GW1jX@$T&`y>!scGVUlZj+x^yZ#z=qB}#zTWn zrKg)X3$?98en?t1fBs*$e-ZtYBwR%fIyqeuM?Ed4oi~&`{T(aA*r$b=^}Yo*J=7A zdqhP8evEhJjW!n<-s*P_GKbTkJpkaiLA|W?sU=dg!+%YXsqI?6Pom2n2kD<6DA zINeWO66>NQ4VgU}W>mhfS8tL>AublWQ1TKmiXtYGm2h#HRECy9cYk_<;yz0=jjOC! z)3E4LL4jCtZy~vKqzV+oy0C4QUaKc?EI&dJJ!{^{#rhE_-iZ`cNwXz|wEgnFd`~Oy zpll_iQ-4*kUyYeEqNnuwY;RU=q%tJw9?v!C$o74^Up3jCPnBJ@Nw{EdP$#Oo?g(Y= z00NY|LI?jkpC^jb*-UJE$y^0w1-pzkr?(7^*pSy?{BQ}DfbOrO^h}z(Qtq_sbB&A| z{40fi&=0A-0ki>>bk)Xtehhc*N=v$j%;8dqB05YMjvagNtu5ag%h40bu(&G=C zj8z2T*a_%BioL?8(+y9PS3lP9HIj{aB+x zi+|6+XTMb_AJn%C*g1mE?)F=0A%j;vt)t7B0z-n~CHq$LM@S~1=XsFy=S{LcD9qi9 zCioLEkBj18wGD>;L&!vYx+!;L3cb2POo)C>^GP>F2NSKB|)>^M+8^t>>VyG3#iKl{bk4+_(4EAB!A}e z)uU8`gN8xs&Tb}Z3Z%9KpN{gKB!s0(aC(`=iA*gGEY!=1P7CB|^?nSNkM7`@kaO@| zy{!wJ9v*ZCN7wKMMccOBHr1zxi;3nW#kV!EzlbZQES4JzKJf^)XH2>Cx@{nsKkk4a zqkQXH-A_xUPRS1svdx@R96JHJIe(M0)m>^$acxIMw4{%CLh!-)xj(Vf34cXI33VKQ zjL5?p?kN^5GYtWJjJtFROQ7TOW_|M0NaHa|8tZNQqKjkKy>~ZED0}Z+w)*|FU^clz zt=NtsSjMN?<6&Jk{n3XvvnU++-tjP|&mz z|HRd11L$PFdv7rNwBFP4{eQWav=|yUqw&H0=x>{RZ?p%vSwBu?B*?ig+%?WH4K6at zIJpnXmFKxJg}Q(nh=PL7avCPpU3MWlWsWVhUp2jXn6SCCQ~6z(<$9c)jZ#TF(X>0o za60bcA2Ano4%Gpo58|oFF9-SbI>7a3I6h69ehxcV1VTBUp01j1AAdJA^q|(h+WT*z zxW+pRMw+*hFp`{JU&DevxoV>~6sqgAqNMBPX5TCk^y-~^U8(%1&FZZ<85Cu!@*s-z zI_EbR%-4^*H@&YG{*oj(c}+ZuY3L+>Yjf%OBb3L`9zXI4OqMMVx@ob3LU%e0tdlPm zEyVAFgX(<%Zq&m_zJDx1GXZU*niwTmlhpf&##uv9nSF#g-EuQC=x0rVGdxR1Ujn9t zbBePB)xPr4`T)db5}JuBVIpYX~H8QImg8fQtT?SJ94Hug{|hZ|~Vlc^a9 zhv%7j@y9if@yusOEI@^Or);;sF8sH~B z03iXD)m)f8!B=DHsUS|lG9Hy2tX_ZW6Xd0NDiruL%~M;Fx+3Gv#J5I)LxH6xKG$cW z+T}+$}OEtm#x%leuLf-#KOr2(8>Btc*Ra$_XeqyN>cxEwCJbTTXhdHb=)K1MsU6OI2$2 zgsT%ZfHw_&OSFxDptGHMp`}l)M@PbS&!^q_iun+KQh&Us$PMykZ*Q1lKcuG&rsA^k zKtJ>7cLkbXC!C(QSN+D=;<%oBKMKV@EphqS1E>J{+hNO|x9e)-!fRYx-&ax#VA*nv zpb^bnwye_KbJBfZ*X`-O#nU~iu!6;|h9UByxpc=2NZ<*8s`q6x#`W-?w(AMJjP-6h zE@8&^n1A%^9%3>$E9}CFQ4OC#r(VDg;(FUcMQBwN>ex_}d-3%Jh{9Tv#DIf{)|l3` zA0C$%q-XiH>x>md!O=f!pP!nW`y7F_+O`LAvw;ZC(7uQ*cxtD#RH|cg(ci}B6P*{p z4pKLNB^(tDc=z-HVBj+=yCJ(+cOXtJoHeQ5mwyyos;2jmKD@bLwd*c_>jlyo1Ob_W z#H1yTl|)kr9r;i;7pXL$hM@`55-6v+p0z$OS=&Y)!-Suvsx=Q9(4!vQVn!C+n9jwm+3cX(v)&4KZ74@n z4}au`^h&K(TOOD>J3d1BvqplrI=!K}Lfw~}$Qsp&0!ONamchGD^!vd=lXE%C1w$F2 zSZ+^h!5je+Qe9TY5KQdD+lNuQsIQ1`KR_k$=l*|ct^AkN3Vh=JlvYsi&kX?kD-Y81 zxlfrN&?lL2k1vSX_b(G=R!#v~9!e?r&wpk5l@oEVryDZSqa*CmfpG70Nyf41jlUfu z$WSk#lpCR++xh1W z{A zjr&--utxz2DC8#X$@y7?-vJGoCMlDb#lsq3=FXoM^b&`@PjZ{(v1)M^1z%0SD$l$ocg!jV#z8Bm-?%Q=#9t>r!Xk8tIHSXy13V+`^nv>`6`FT5n zLZIG=dUP_O2*f{;RXKnn_CcuQx__P@SV;~e;j=&*l#jL0hw>0YY#;chL6c<9=)@x4 zff)~nj+BuL)ANBckNhS%81u51xWi>Db3r03dt5TEQ{bfUby{xX%3OvXRYuLf2k!=g zb=jM1ZUCgYK@5(bB!2|4FjQ-<>jIrvB^AIyhXPlLO_aQ{yo|KZcaP%t=$y(UZVI2A zJI%jGL?oMkk2WPs{EpkaCUjtbl0e4N%VB!(NjN zgfNFq@_gS=kHz)ynv)fhlewCYKmClSIT@7r_0{~WJ;{+zW-jGWqVS|%=JD^XI4f8w zBBA%ecw4S(s)-2-=Aog8x%o+5=qax`juO8YJ%k|r9PuH>wCXK^bMY+a&+iep@%g$ZEj7o47r*0kpWDs+t{c7} z2polufF%ba;d5>cV%hka_S-W(LbOZz#3%>_} z+)FpTmzG=s)$uEL<2fMFb8te4VI#Pj2#wjV?v!yMD1WX9cw}-r4y|=z7r5fQr0_lv zTfX;=J^YP#a-Yn5d5#qeZr<~`1axG>9+B8rCG*}zykhuf#NQHrTh!|yve)N;db9-# znM6xG7unn3w~)E{o;I`c7KCxpyyDyWeoZdN{SJtEs6Fv}5QCD*8iK;|@ec4Fu$I3u zMW-bJbAJVA2SZvOzy_(%;)Kr;D#%RlRZ;EF;%VnZSkb`FnU>JWyPX)w^Yg-@!M!;= zP8;MRSkG7ocwU9(W%FM7#xWjACHEWSc-%AMF7A7diCK)JaUtg*;g@7%gisCg!&sXJ zJsETf>qJ1i5JKHddc5f&T-)ww<7TQ4c@CL`Nq;=zZSFvz9;7f_S*7TCe;B|O`1$5}llZPb%&pM#9SWGN9 zoPVDsB@GzvINBV?RDHrbaik^IgNtXPYJ*ebY(?zP_`baRMgB=)pSKz43lH}G>}L=OljJP(f`C4tXXR7ULlJoex9-%G0IX;=^6`^B^Q zzLWp?Y<`}Hi)VYr@3w?%t(^Q8B&$@fQH9`XR6){1xwTFT< zMjLpJSn~6}%C3k9^YvV{Jh)k-6mpOH%{vNW71Tgqnyu#QYJ)y0;(^3-l_9_vAMfca zUAMacZdEYC*yBjxaYW*Ld?Djk9Df#Z%AEJ@Uu=M>7S6YSp39ui{G59~U(0^i@Vuu$ zu??LKghdOje)-RvodPo}zdntO1VTpt!pbVR~dSKhkEDuuClscSYW*t4a3OHKLopVmkIVk9rl;-vh1pDUw^%u{u-?KTk&wtmXXjuCe z&NTfS`~RKCC`wq@fZS+@`UX1b4ffyKN{I8;R;rh|2kzRvTJ1g_)C77H?o@!k_UaBU ztJ?v-6}rbh!yS|gX6p?WRF2JsRW@=eQf=g!&Yb`C+Hd{`@>>${C9aR3*U@k671Lrb zw2wWyG-}yB)q;E)y?UO{0)OK=EvETA7(3RO<7$rgKV}^HegBxV&iSR=x&O=O&uiiK zlW)%J7-#mpw{kc+i9hEI#;s7_&Oefy?IkbiZO7!7`#Wgi)$E@lIF)ZpYYW zpG&qM$igkiMRh!?Wl$T1!X4KIcktRuEKFlJ_O2IHdw6iU(0{VX456!?NQMpT;|#B3 zo;y%*J~0R4y^^db2xU;Dr5?FN&oRUTp}jX=bJ^fhKlKT{hq9(&}Q)8HKcsq=pJOG0ygm6mND**P0Q;y2h~q-2zNAqQEv8=Pie&PLFc zmqrK47=J7hR&O8uSzNLa9@Cr-yXGu9I7w;<*-6c-k95S#r! z?4=!7(BZk6dZumU(E#1WpfTt3IY&j#i~LHJFlo?*grEGp5XCIv@N+`Ykz&q{s%Snv zmkgwQfO7LqvWwRZqweCSSbJOSy?r^nA%BS1q9J9Ra6r}L8b1G2fQ%7Uvofoq;6Nx+NRyRZX|H+7;0@?IlCrulLRaO3TpqHm z#>39D=xYleQ?9Gw`D$pWkr&DBVL9G*;jo1YA|Idl1jPfng&h-=t&_7oH_LL~AQpk> za|UqZIfpN4FFzmDo}JHopgNI@{eRSLG0s9bYhb?@|2%`AXNNO?UL$zc-*_vbw)ov2 zOZ0f=HTIL&{l?gz-cQC`f=L8!QUGTvWF@l|Q6cPL5nyl@=mWw&B+F<&LZT*YM^nCo z%4t5p>fS1$eMzXaN^6+12b+XU3ITxDm^*z_P)_>N^PcLja~OM4h?W^U<2Vqg8N zpE;g>pG~5|fPMk(>EBvB;pbS2y@2O~v-;=knAh6xne&ZDLYhMzrdbKv6iVpP>=iFy zoHD<6eCzLq+m_+7O^T+VCM0oENRm$@B^!3!Y1;DK9ezk71IOx~60aKTK=n}?)NRUZ ztY1_|!?od@FP*gkR(~2?%h(>RYjkoFhkRXv^UUSa5y{1eR(j$=^?~C-xr+e z=YHJxJ^l0E=X`$_>2l`%GM_7d=OsPoUElcm(+|OUf}UDA=P`+p(ksM;P}vR4Amb@3 zlN_7;B7r5}qOKCy+ByB63$4_xee+J;J(oqxFEP+}EQP2S5`Q;Coxr8&bscdoe&8t3 z5Vf}8J=45a+Gm%suxbx4NOueujBhg>6{T2lIMY(e1#%Mkvb-nYg-;5PoR>J9D#9)_ zT`z})6V4H5&h3A5gWq%F=be7@qxoDZk&jJ(dDL%@@-JR~bL#)(Xy1J7Jg)=Dharc< za{$EwYh7TvV1H#eEl4#?&C^>xC?psG1mx}tL{Sr_Zn=dLfB)D~0l4q?hgGrY*_p`Y z*NPv!-tr1MIAi6B1pt2hagoxgY4v?5mRyufxyXeBFH8;*6*(M(PKH*HiOfrZ5|Oo4 zhw9$D*pOs;2Q;96gSi(+XTevz<&q|e^5tZB`H644w}0qnnlUDqQ;reHH_vbcg2VZP zsEa(K26A*he-wW1H+=I=JIJbX$=D+=kZ_W|~T*LyU zaqxXw5j5;;WgNtv8btu;M!Jh00|?6^j#kqLL~sq)bWe+UR31-0KL9}pEl+okzSd@k z`XhI`!G9$)NSIWy;z64>*IaW5cj4&dhRl1a8!SbU+bKohnJxpm49-&wT6SY|26*%v z58;4*;Vn*h!v1-VhlhXamF&Bpf7bc;{r;|lImep!3ns9|+$Y2P{hrZyF8}@>#0p`a zWBlgwyWIR+KR>`CU?tqZ$h zz?2>$him3?<!oM1G(L$qNNM-s{hM=Ga_#IDCIJzh|dszI|e>%en5FdsV0#@J{Bn zjQIAi`bqukx0&dJFVgcqYmf_spu2L zNgjXZs|oxlYqul^g{!<{M7%7?0sP&P;EANgH}?!+!747Y=RI~a4{d9ecRsqP0SBx( z`WSeY-ydDIhT=7pv5Va2;X@)o2!4Kk0)a02QWB^fV)S@^O8zr>v~e_dSOJAj$*< ziq{}BVjjanv|r8x#Z(x%RiO{)p>gsFMgxpm$g<-N(vw`tnio2nsN0^%MP9+3wHzJ$ zAwwOOJ7Ny|b|liN_xMD^KBZ_p{(G0XNOfpIDqdAH-|Rh-;$ zE}^HCZLegq#-`NFl5KdLS5hhEk`S`$*dF&Gd-FLBU9g-$9pI~glF5DmVHbZ55OM5# zulpEtp5t&kV$C3p+(p0jlfcjYfHC;%1w%wq_vanV z=lXwUzc}l^J>E0G`DYGBU0CD7Bn59>)H9cMWk)2}l+dz}YfTkTy2)b4q6fGVulZTR zID03-dar`;4yGU`B&ke3g$jQuQ;K}{l)yfQI}h@8^9s{TFg0E-h6Xr|f?kDmtNQpcqfklTsk**@YyMj(Lg5{8LhZg+{Q}O&N);X43 zT}=pE%nziSL_SHdW?>|3Ze`a!V&+B(s4-nB5#e=1b70~FR=e2`Ndi020N>Xb4qK@n z87opK?MQN+5AAK|We0z}uaAW~8oknon=^6h6~&Qfd(+vT!l#CH&N#5b#6+_aL>Jj9 znXRl4*@(=8Yr{V9d}j;`&&fLi$X<+xNVLgwend#%9xdU7Y%gtMH(MhgQ+_UL{X~6~N*`u}oUgP{JO~UX=LLXtBL-w|q z=TF(MEH3X=?3OHytz&es{XB6P%m2~W`l(yzbLOY-@%cT6xZnZN(rOa8_+*qR=-;!N)ZXMsIz z1rcDE9?SC%WXast-Sv>US+bGl`y%pD7Tg=`Pk2{(X0)uKs|2SI(t#ycu%i zGt=W>@}#{&%EFN3TbKSTcl!1d{?<3%<8Q6@Q@{Ph@&s$%ob!fxw(#?OesMWJbFA-v zSzxxP{yejPa1B2-%kP=+eMa7Pm*y;n=;l@#v|N7*dk;CTu!H+Aa1~5cF}4 zzy8Sg+NM+#)UPbJ^w(&!pM8NsLDt(gq zdGdcH4=?09%SDL9%$~YL1{N=e$hIi*az`WzY-PuQy=NdCgy}$GqyVPV)JmScI>A zKNtBA-tWAxdHjj<nP_r0Ug2;bY zn3d#}vdV70OKn8tt^#Tfnd~<5l@E3lkN#n(yX9w*2AyoU%uN#XWPi;bFm@uP0X?ko zoySvt@Su?KDyw#PC3zXC6=r12z6Scx0kCun{$Y>z{8P`fKt=q{?Z10zerF9Z3xBhu z-&~y$&=3BJC+OwP=f&J>$9X*C49I_P|9yU!GZyIE_xi>lq~;d4^O>%?@|;2S!A>?A zjqKiGDP2&6zEo~|hnSeJ$%PBIfB>86jhp9ZE^q+Sd?| z6GQ^t>CYTrVV;M%f_a?d>;K0(|Ltph=PLinv8U`?qx{Ai=`Uu?vzCALl7H6ecfJ4A z@qf7u?R^i!Y%ib&7fJN%*3V-CVWfuZgQ4*J%xzPjh488S_M&dBq4&rST4wTFamOVa zuS%?946eSD`hnLxL4da;8o+;p!P#j-w>J?#xQ2PF{Qtfdowl?v-&`8TJ%rH!>}^<`oIGAVF?9soJXS zYaYFzPRgC5%oz!KO6WXGck~8#azTOveA761)d5wR$lP|P6`f^r{SM|Eq$g!JuSjai zp1Um06xNoDXGdA9wNqXT{hV^bheK2KGo}JU z#?Q}l{4g`fU%(B0e;x(toIg6i0QA8PwKT5@OPcs3NA5+elN7d=gZyTZPu$AI2VolK}XSvXA)vXP~p%%Ek4=6*}aJQx~fz+jEV2Dn+y)tOoM zxXvT4Q)RfPIbVM#62=+N()nLJ3)bClFJbQEqTcxKx#fT6K0*AAd&8Uy&H169UCV98 z4K~;>+FNwUK#9@W5Xr~G*IvNE@xe8yxr9H+V}MUD%E-%LP$E_p7$|;~C8GBN)1kX7 zAjzqn6uW4xUWD1mQUYZhedt5Z(+;FAk;ek(`jf|w=I?(z`ui;BzTVt}{+@GkuC#vM z(tGsp_CozY^1(T|WA$XqKO+b|4*=jukYN?tFc=-oeF`I*U|K%+9WDfce2OP}o#BKp zZZiLk_urY#PY(Fai~o9BbN^wEZ+}>_|Gej1;(@t&A$)9nQIjp{HSUCJ5W)4c!DW+VwUR)Ze^;#hqrax+kPJ&dEPckO5Tfn6xDy7;d#!ZL~8>5OB*OtRRWf;oR}8c zEVD}~_Hj&zIq+bQiqpP$*zAjkl_oR9`};nt-}p`eLr>p9*pUCt%e1%wGCH0`td--+uLfZ~*dbO#ff|7SyTVT1w@7)I&2? z{dezj@kW7JiwDovMfRvqe36pC*tKC!RyaTN1H3Pf8rPtgA$wuv^4K2%6Yd};5gSS| z#R2ATlrAGDRrKD~ZoaXUqXGy|HUS4r@aliwKyHJ0Z&JW}q29d`mZNUwc|R~)IG3~C zZ@irIi~p?4KOD?_t$%yxXMKFvGvd%s{rl<#OYXN2@{c_@ene3B6h(5d@<5h>cy{?7 zhgjX(Jfs5vQCMRFiDwPN24hSBzbtV1f-RFzt(bMc78WJ;GAn)|*1Y3+6&GCO~ zug>PTIa;BMgm-)6J>!LekHq=~?FIO*`8|BE=ck7U{PFKz`Q?9v;8W`a#Jn9}gT6N+}rR)T7s%ZqS(>JE$+6)0aaaoBhN&@qChn^97?7W`c zU7&tlK9?7%+g;)Sx{g@wQkmtuRUbueurvBMg!&fG$p4Mk^PfMp#5C`@?|Ji6>&`X*Uk!&mHhl3RWbS{tE>x!B zAWlK)Pu)tb@njE)8s#VSzivv7`#+alVwTliQTI4u+?^nGJZ2z{3NccfUtxffpF;sI zDWn5IMC@)ZJ8ekJBTEue(KGo-;!-G-aDIpR;(TK>?*4C1gzNd%=wHmqU(Eg1=`wO? z&VR=a*J;w=>H*i;sJjKwgZ6(~oxVXJM5|TJc;Ke=zA_YV;p0H`3OPA_^9t;H^o1ZR z(a*>~^arxn_gn{G&=SJNQ0#e9rG0 z<%yMLppP|EdNioblyLaJXk}${6Zy6PAGPCrBjnn}!$8Pd!kyj8C0&1DkefOW9%H*0 zIu+0oxoECyN7q8pL0gcDqy?bgb+D{`|)@E9|+=%ZZE`oVA{=h^ZJL6HV#6pe;L>P4{qQuhxv_d zKlot0!_O+(!`A9RySo(1nTI^fc##TJ z!w7lYIbIQ)`TS71lHoLmd(QcjUgF=r`P28A&3o&ob{VAkIsNvv@Ke^T5pnebL z`tSQ3*_-4d{}!6r~Ve%f*2lsZ%;aMUD%#L%C z#t;GNEU*~L9u0q|>fyy{rw3XgNEQDs_oe9NlqZGu*Bu;?dSdL6a;sQ5+HZWrY#bAV zf95MVPk%A5zvrG5aDV4q@xAV)#i?&(<@QI>M1|D_n@k|c^YZpZ&l{t3vDe*QfVkRG z$qKDGVD8x(MxWk@Cx#2Dq-ega31zjj-PY6&U;LIip9g<1??Yxz&g(M!#c$1gfS-4W z`QkU{`)8fvn$WKUMi+JTcdq-L^CEvhTw)9irRLJY7J2v|Kgd@A?HARBhB)&I54{N; zO9@!+E(L!WaZuuYtQ7ZP&isGoZv?S&kj=NujQjqF&xAU8=9K)z zfuDB(E=e}wpPzW`BflenWd)W|0+$31k}JN}_RcynwVbpM1IMQ;5N!pNh6NcMC#mt? zlBVAwFIIH$nE?j-%W%CBO9wp(_pJG zL&(lxIbwCY9{dOE%fAFp+2I$x27X>P^B8~&;u6HYZ~pSH>zZp5TsP#x=l;hJ{t%Pt z?>REhEC0^ohC9VuvjQZbg#O*E;|?cq%$k3^Vdx%QnQY|&d_j_OVCnGGBXWpKCe)}( z0=96W0w4{n2f3GfUUnxJb6L1}pFPz1PY@7)_ySv2L>>aXuf_qk0eluY8Qc#(YtC!t zclcY!{LDZ-=>N|Bs=xmKUrhbEr#~!H{F7^B@r>>I%Z2}D(U@8B#2VZM2s;YHK=^+& zwD&kEaQozFaHeX~Z}9r!}sn^>orV@vbFAP4c%o9=bcK%F7JO-fB2QZzWML`V}1tTGx9gi&#@iP4*M20)fZ!e zxiE01$hUgs)=^z%+$$kY>K!z&>|M@66{jy_m3tT!8`@}Hp=RSnbW0`aUMGKY>; z;4h^?lp`8pkTce$Fwfd0WHF_CMEL6V^N&4S;+%gJfL%j2 zdzf&R{P6g`eB3Wy<|nWIVhMit4Q8Bmui_H*##IK-++ zd)s~9#s3d`@7A=ewr!35m)xaHiu38qO_Y;}f}k94QXEAT6a*Dg`R{2w&pYQ@d+)V( zzEmo?%B))J)rTPr#<7pyTW^1DCa-F(AqIHByoP1Xeib;ziQ^Xf+S4V{nhU9)A}A>j zclVqbj{ho^O&bDV!GN|uwF}m?95(ka$cbJXy6$@SJ;<m?Dj}U()+=TLhS-4|W z)dv_C9s}h8Y%KXY)YqM+=O44~lC;8}a8qN%aT>%%G0;@jxO@s5F%E~kL#f$$IhCtu zry;a{^Af3b{J>z^Ys-p;{cOR{EPj^1{6Bx?o-9}w`gsvKRgQo~1IQk+iuKF0{gWC& zkKWo=TnUe?@+VHut)qVj!Vo64;4J^nZ$rN0@$y$(=1)%ff(yBcN1SxG$_03#@4As; zuWVr>>>S+$1|pV$Nn3LAfUo<4XS)a|2Od?phMdiMu@l$p9kZ5yVvE3!{lx8;xEW$g zfABkhiM3u^*Xpo?4Y~ffLb?_s1(>O@j#q!(2ZFW`fJd0Yw4kXf zpAJbeXRWTNnF=X87esX@{ZMTXgWrE%4suremi;z(2Nv7!0XUuC{NfT^g7IaEdx8US zjeYkooFRN_$n$`oe%W{bI8`uLzSH<8E$DD|KE zA=3x>Jjkb7BlG3&IxYLepZY9w4{Pafylx90L!iA!L*3rIO-TB-ZX7+Zi@Ikg#~j)F z^?^VR^N;^S)XALr*hb;}_&%V6P&wAga}+Idcj0p0#vy+QcAxrL?0$efFqVL2_=7nH zcJddCyY%UQ;;;OfL(4q-;|qhoqvS*9>W-djfILIx^~osoOkH8kwJjr>gZlwCCm`6) z&hZxP)^vFuVz>Yx*}4Bjg+5gRl7Vtp-ckN!X&8fpqUJ(LksUN81LlKVZ$F!WJ?jvN zz&Vat>=J+Yl`#63)$Wf!Y1wan{RRJIll;SX`MW-UesH!}1Hz>EZhD{yUdNAxcm9Y_ z^*v%8gI|JjRc)a43V6>CYr&>EZXDyMeMgxD{=+#Pr-(M@BhCe>ZLOb&!-S2kjSb@p z!p+LCwF{dAGrG*vn3xWbMg(s72Oxqo#^OKxhyQ=z7eD)B6GVSvB0qk)KQR$tnEowh z{1?OU8wdP%Sjc~cE6M2f3Auz{uo9i_g{JLyAMhUNj`dU=$Wh;cZ9&c*=2vrc96^>j zO<@sFS3bJyBduPx=l#j~ZY%GdP%OCz_Sb-q4>7oZizNY61Noo!v5tLH*_*C(I*eC< z+8Tevfz$)%xA*69Hz3E)^DPkA7Ff8qr2f7WQQso=J;ZndiGWIEpx^n_RU>M{?9Wv7oAF zuE|z*{t&$W|4h06pEKpI@sl4e1+?gSY5$*FXi(@~uZe|sh0d`4C_Li-8!V~%I(vVu z6GtC!?fo_gsQA1 zt49U}R|)71QvD7-7JLQcqu5v&4VQo85;st=d+ygTebvvx@bgme`|}zkVBX~rfCyIc z7MB_B4?^EF>@NlNyR1R-V669G+d+CaywmVqpej3id0y~A z;6s6t?Lndizi2W9bu`4>jLrE-3=1QO;b<-%qBQWPEa%Sk2Z#b4a$iSO{qBFP{m*tu z4wZ2agA|?W-OTzQofd)Xv$51o6WkuwIW#{9==Ata3TmBan;1u>Aes|A%=A&XMbS8GOt2 z2l!2Xc#}Q|shUe5>(?LP{CIy}{%L?MpOiOR?IrxveLX$a;$;}iN-aBw3o&01Z^!sN z8mNGL3@^)6-_q+J%A&>j^+VnCd)sA;St{{o9{iA0EYW*VJN^6x=hL-}bz#AUDdHOa zR6t8`jf~ou{e-o=^wW>04e<$#isJ{&eHyt2>JCgGb-gYQ0Fe?f#tDDmA_#(c6FDKx z1pQu7tODEpOM&0vr#}D1e@g;34h-v!AE7(OWPagwTs`VMveD!0^N^aRRr1t{0?>5EKCQ}_|ZHo zGA6Y)(scl$=D*A8)8=L%nF0}Lb$9Q8y99YO>DNG1VjsTXnUH_t^>-<7s>lr=%>d5@ z?lCezW*fM#3kZIk09mlMc8`s{3b?YRMI`vO_UnhI;rCdRf9`j}Df8!d>(9^ckO>yZ zOFMUFglqC0fX)s6E-!!AKXS894Mf&|?xDmNo!-QOe@kC$y>0w5ejE9bZlk*&)1(yp)Z# zU^8OwQHmQN0}lTpG4z7ZpUvq};oS^pl1;on>NCP(^YDLOPUHC%jF+ME?noM>ZdHJ` zK_oE3`0Diuk$3I(ci&-rZTDEnooW2x8gS^1jK3Mn-wf#O;u&iLoD>*0*LcZ_6Y#(N zdTU}_$fqUWdwEX|Z3)V2-B{*@6M(@h((Yh*(CSD%{dv#F>vci=K(#dBC92mF(@=lM zmpoy7{fB?K@OfcwX?Q+-v1n%%?+m6RLk&!WR>80+BQfNs&(@f47*AJ3r$)RR`eNx< zt-1$j)7{Cc+`sEU{@3^1uW!qJ)<5_8M;k5U*Pu1k#qT1S{wUXF{)0KEKY-)`>}@{k z;l66azjek5*Qe>bzU(xhW&qry;Z4K)1?vl*dvt$D^wm;td`7#*@UAK+Y>3|da)DaS z@A%oMFIKZm z<#&I$h8*l=JSaQ7FXkl1ioCR+3|-vI^=eh9Ki(hh8mRyH?k?jOeY&iJ%RR3C@82>0 z719Fh*hX=2pLO#e#}1xl|I9mVdCNFl`Uc*B_-7o#5?#)q28q7)@Jt0tf5_kXGk$$M z4%c^@kKd?Q3t7Yg`fZt0B>GjuxZ7NY?*)G`Y1kS_V{vXsUHEBNAWfG0r>ZA|apO7x z^ZL^#(b8T%gySHRgZ_2B9?yyPdsRmM0j>kA=^TEp>(}YK+5B8jaUF5z<@yKLqo3>B z6Rv-Mt{1p&{9YGv{q}R+oUr!)-TxS)&1MgBiQ16C)Z~-cKeT1uzV`9~_1*k6F9UyQ z5+E1gaM|N~<}Pat>Mdi;Pw?pKyPr0FAq#aGFUZ#bbD%cqlZy3$1`_oHiXXAZJIbz) z@w7p74tXN4HM_Kjg*mmsyu6?v(KcSBW1j@uK1}@QC`vN`&_rm*OM_=CW ziuVKF2=6B!@P5W+dB2^E>(kG5I23=8wDxm-X`mxn;b-qf8_Hmydn|kD2kY9~6!1T+JBOe3D_iE} ze=*M1;AuqLFMGMXL0z!N-mLm(PGX&2=JHG3T!LWfE9-)Fgw>~?{sGD6-(!F9i@XGF z8CP1r{+hiCf?{6_6i}N_yQS=mvRF4TSN{9TlX!3H&wEqLbN(uiz2RTK2l@~1{hz<* z@Ab9T|1suNXJyRSFW@M?c~ys(f_(~DTNh*Lzk2^=?Ld2>JoquHOhop+Js^|Ke_S`8O6{MJjq z+E=W%n*{T8`Hi#eiTB^%=y2*n$6`GwCg}M0SFm0x6J$;Cn`ICBcfT=YZflTPz1S9; zW$i#h2HJSfysgH)W2npxdH-pzE!`kw*9|9+|9{==bjT}CdA^)ii?4V`s zme&{eioIf~8|D|r)C%($V|5)GB=(dkSoW3QzJfh~wQ|H*U2IMK{hzKaZF|D0LH@g} zj<(Tu9>x&%rO`ove7&h7u%Z5rt^0pI&nFh^@mjprSR0mgR>6Pz{m=EjVI98x>3ab6 z;q^ir|BE(Um{4j3}5f6nPYlsoAZY}QLgT1IgBMs;)SQ=PgHwxMrVyXYW>^l9iE&pv<6=emJ zeJtLi zzn-sSuP_&TG|7;Fp-TBYw$}&G*VB>x+cDM?=X36;?!D|gRgnJuez4QQ`FVeRjW2jHw4e66EH;S%tisP& z#93mo3EAK4n7g;b_)?aAarc7YQVt#SQ!Ya}+(7#&7oL`K==h&i$GVX% z_L=hOk&(Fk_Pg}`5#z~O?9{FCJ4ct_2pjpQ{9+HE|1M81Hs3$W|C!T{`gi#p_8a^& z{Xc)L9n8-&u-`aKowh&YT0(i8)96pJ*a9g3QjMi72&i!XzssWk9QE(AO|6LaI75DzTEY8l3`j4`h7g&9M z&YhU^>1*K=B>lxVv+P;1gF4AN_Pq%S$~b@gWjpS{cMZi613nL&$#fgG#OAL(FGJmc zy+<`wwCI@_ht0ZyA5L zFE9sOmvvQ@jbOiU{%&sW9!Vlmu#)UI%#Dyd}UR{6U=k&kuiZ>+C0P0d9+>+``De;IMzc!<2^q@gLXW=DRH5 zi@&}DCgLn$s=vO&e~d%ZKfWW91f17Dz9S^^Z?0SG=CJ{9{9oT~q=qQ;LJ_aSMZDf{ z>n(7Fpl*XRd~rdeGk@|Zky!$?!v(E}UxEh|ym7|Da*Xk~czM8!qkHS@vw?qDR<_3d zK@NJr@0RaS6uThqf`1ohH2m?u-xs_t;;;Yx&s9+l2mc032=p=b*yx3dAOys=I{08fHw!M$wWYn{dOQEsW% zWvP$1_k{?F<|FeGn?LP!@o!@%Jf9mCZ zmUhSYr62Ue!cPG(>Ke|V-|zcnJmCHSsWR&=AZ#Dh2mOQhN4+mgN(+C`7$C`B-XHaf zKj8NSzT*G=@$s+fg}BucpF%t0cmKXTuKyVq3S^a+_k_+szKnD9N3zrlA+O)|`RPA; z`4_wgXt(IMA5jkVvX}d&4(_|O(+AJPqJiHvu_mA=VJKu2s=4XFgEcaX5ad|)VOSX&`j4S-^XC7ky{nYQLUbsKLBbxd1oS$)p z_y4IE`V}M*OORx9T;e7_{rm5qi|haXzCYtci+RvY={meh`{;m<{1IisJ0SuWg~* z&Ku01&zgh=3F51Wvtk}$9pDj`!#r8yWJ?Mi+8OHrq$luR*%DmGIC(7N7^Vuj@Y8}a z2Q_wZ0>ZdNKl{sg0IUq-`-b@42B+i?4sh6S_t6@3<;s6DIl&nKn;^q}rS3M2KPHei z0*n7P54fj$K7jior_$!G9p?4)bKUdigL3%$y0!lMI`xn1m%p#$J^qDK%+Qa;Fg=U^ z;eY%;{?|n%=6^1etoYymd}^-u>A#r}R1E(&txs;l*#9p-S6;((FaBG*+~WTGmrnr} z*WtghB>8{8y!!0F{afLgMU963OL`_JCH;q@;Wzq+X!DP-3-C`EBJvy5)&I5Sfk}?| z^+g2#-h#|Cxvz49QZ2yHU>-tmUOY?yBsN~b1c3A8;d07SLf!Z-bBvHLeyG~W{FY*= z4cC*AY+G^Mbn*9|PS6Es?9jU#)APA*Rfzl~^y7bI9@+dfb>L#yeU-8BbPvcsXrUJ& zlf8kvD@z|rHpKhr(KI6*5=G1&F5-(e3FjQ&T~BvNO+D9NsD?auYxk&ic0+13Kk$s3 zmyP=h8Ezn8#3=-|ZA-`-sn(6HWZp?~P;Ed6Rjd8!$ z%D#U#&XII^K6Z(}aUmh~9tehxODf?)A$&3_d$ppIT7!94O{o&fW)jBIU&$$jp@=p|ZyX&f;-YVO2d%*M_Q;=Tb?6M9MYz&AS~e?2sw zE1c^VB434PtyUZa@aNaMe~FVSsP}@L88v@CmY4HKvQBxnnOEhwBJ?j4$7|yyk|NcA zX6E=N`i)H=6X!MU2Rd_oR4w zKhRfW)qqLo=9yG5x;+Kx%(tb4F!q06+Wj)+38gm2b4S!7wI>9ffKD9G$3)i?o!P!Q zk5V7-`+LkJmw}PniET#=kx74Wuv~W`}gRbg5%lM4A!i*&-N_@>@%YG zz(gH!%2MCm&gh`t302jX**NcTq9!Bk$S>q!9jT+U?7n(1C*sx-JO|OU=5n8^554;m zEBSous8eD%gu5RCO{Nm#!P0+Q;;yU)DgieF*QSozG|Z(GPDKt5kktnQ6Nw1cb74*( z!k3t%+`O6Q#PrQ`*CvKNh4~e_ix+jTD%bPTpv?)~zNRUw#(B2o@1xV=OTz0(KtTn@ zpo&}f2xSQnO0ue|!Ms8qdKCx#7bNIuBOekwcq3f=%N@1?lU|r<`*GCiYP8t7#mi zlY8=u__Pc1@OxqBQ=#lEM5DBveN~L6pCtYX@wIGE9jB{#XP`Ljrt0vO;Uza#aO7qEfw!Ui&QK}p;PA<)ncQd54w4^SDc5oI zT&Luqhk26KO>=+vNUN|tBi1^L$Kd+L z1U-Fl6}VWT^MKWDj=jsJUdF7d=e%es#onM!`a1noi*25yyD%4e8-?w)p#$HXy z)w;P5*)Y4m&|(|$T6qM7F_uF?o5|R)Z ztMpb=;WN$3`0-R8LB(2`^Tnxnr&i^Pyp^FuCw%qNDTZjXtGyRRTQtv2e0m<8rqkow z&erMI#w~xs;l6*IsNtJ4nOFJebj)|&z@H0F7FiISB=M~WhgSJ~!r%Rvs>6W_1~)!x zXDt+7Pcyi-XW*RXbKh0h=Q&#ipZm_#X`ZgJX6>_BWZU~_Jk5E4IGCHiw4?o4`Q*mi zD03g5!5cxv*M>W_CWNpr;)z^Cc5GO4*Q837S4e-VkSOxs{+>``w>r)KVA~;d@QrWm zQBs-^to9TGUz=~a^1aRzCCqD*Ox}mG$UQ0j__vjCr)9I#+?a??^rL%T%{VnQkUSTl zYtBo$>$;f;Zd*TV3m_=DIvhpQHX8Bm>WvU=Rx4xQoobMXMr8oa7id!)C6ThDxcaVH z@OgiK(wiqbi9vkgBEKX}Fa$0UF%fq*zxC!c(_$e46x_$neRJhFifyl!{(fejkjG75 zs?TbNjKSA@YB@LB-BL%Q8zR@R?%aiqv+BN(obU2-4KyqNno6(`TB^-7Z||BGMkmyT zcH9*#}D^)yh)&FzE zH}<7;f@mO!&6}{;RoT^j71ZA$2!_F^;)}$u1?t=ht7(1Et2N_JR93ynw3x3u;SxH_ z+>j+eOJ6&=3BOf+Pi}L73^It%-^qXbr=yf7(xpO|TWd7u2P9X7RCWp>Jz|tSw~Yc) z!SMdfCxwjfV^6i=*=mZgx_7BZG4xG3oApv zdAd8fni66-zM0uqa^Bc{p2VlEh+TLhmA62WpG^HQ=S#Uo?vt<4tnq&+jpp*%ux-=r zSgGQ*7x#!a%69_jwu&LU`8h%AUj_%u$yAz@2o3g;;~;UyUOwI(EW+2g?1$h^537f~ z$t%={AIAB7IwME1+ck6jxjdpsb++uey51=PC;pMbL?(R5B;rSH-EoZY+?+Wa^Ejk_ zUA|9lqRJT@sbCj#HX46{<6^VtKVn6XtIvbp*KaWW4MR3ixpfxjk;;f8+kl=|F6)?& z8(RyH$IaYI1(#ODXporR;hQa9=jaYoJ8Eja^<9(P_Bdt{#iTs>=E)DW=4=?QN2!0iZgtMs29>uT?rr#l zjDn|E%|SY9oP8%O=F^*hlvQ%T&U?AzMxam{{9ld5eZr%2KEgMwhu?C;15`&%1xTsjpYgg(CI5@aSE7r;{sh z#dCXY+O003JqKi6vL|xf*VVqWKC@zAph@b!6KnoW57P%i<2Ao>2w&{4Q)1WUp@~-c z>ify%JO3!RPpQ?q68h9%{RBq(8*;9O#~oWvb1q=@*>9IUzrB^r%-XYgP)c(3ZlC-9 zeLcP|$}oRPE@0Vf`>QftX=^y@@X~k-!q+ztj`i4mg|B`S-}06??}XhKL* zb#UF#`T1Rj&@f$R27lx#$g`2CtczQj%Po|XGUqD&T?x3v+n zg6<6zjX?V65o^jZi1vfnC`h&g-PEDkJk80Mi)ZPC_5CPvtT;t>dB^X(f_*CJh|U2@+S zv;4}YCfOSahX8#%irnd1M_^amC+}P>H&R_!M&}3#=^fXxz8{}ZO}t;A9jgV<{&#7s22{6`vvfPz z@m4s`*0$KM5%Q+BtrF|6qHHu*SImE{vHIo%(maWsB@))*K9#lW#tB$1SJC?fD=XN^ zT6us_^@AoK=4Y>79f6%P;o6F;^LwUN>0sdeG$(;k%y}C0Qc_W`nPIl?v~Si#=Dped zb`Tw&3HJr{vAR!c96L4h2JvmH4>nhuVZ*<@vL5!6^iCM!AVouLxs}q2rT%|T*>Ypv zb6P-#-3dF3&F(~KLzw4}J_^bZx}`9RN!!Kwk}?~vJdh{X`0#dT6`mnO8;X87wqAZCnNkv*&pTU)+Rxvf_?Cc) zon3tepuJ1k@yb4EAM%raQZs*ksz8k@?UcWE47G7rhlt@;jmlPxsR^5#BG>@S`qX-^_yf50&L$UE{|y= z<(k`W5^GmZpOtsZkA%%LA(7W<_W=x-DVgoVQXn?}pO?`YaMRO;DL2F_R~k{zBHpCtwZ*#f=sa06~E#% ziG8WZ6f_DmhqOOB*y```xu2r!GE6Q=R0Nx9$uG^6daMaL~~`2S;am|Gyed8AxoXUT3B%YS1HG5 z3n60ZR5b3)+gBq!CB-R2AjJHF?Bpn1-yOdKr-Ql~-;{2+s^j+OBZyX>4e1p$tKAch zgM->XvLF1qi6mkL(7Ao*>|h#TiMD<uC zmm<}5OV*7JV%0wN#5#oA|5!6wUHQ`W5Z=h^_KjS4eNVgH6c%qzpAXXAznPpp(bDV0 z9G%xa)a%2@AN$WPX`*{-jW|&oTliE5-Q-=y6z-3MR&%$1U}e9hq~NNW0YMU<=acdA zcLR;|O78Kd0LQ1#e)qKA{3U;ZJwk%uYaQLbQ?B*(xEn|jaMV_OhYyO4*L5k6y*1eF zIi!i*cnGZ%;t~gN1+P8sYQ;9#nexVZ`l-}`>LPm2+TKZB8eSAj)UbR!-z8KGFI#`C zL=2S^83^@%Vc6D=KIANL+p`Z7+Kx@OiO&^%*gil{(#9%7Z)fpaItB+SLD0FG=XDN& zHFd^8@B7_5pJxb?gk*F#*;z7+9nXKJ#=C}_u~xSI=*+|BG@53XJxItLa9-%OB{*4Q zhlsNGp$qT36pCNu{gCncm|y+j#YXP4WN$rP=s`$-m60&EG9pE_!0t3EQ?n00C0jaV z((g>1_T!X&ynbw3Nxb4XADuYbCmCwxznOMN4U-_45D7kJEEVyO_l=S+Eac0Ln`D@} zA<0ElK;qS5gHW~icWU-`-D|baGry;+?PhPMCw<(lbL-x(Y55(`;?vd}wVF5Iz=hy+ zVz3i`;h%y~s;75zL2?*!5*T??-~_&xcVcOgN!7Z!{iV5J;M+v*#uc4P_o7TN){gc2 zxQ1}S^+~BhyCcdc+zRXVweq$1wJ2srC^Y)edjhf%vOC$3QK@j-Ibl*~C)J02@cl|G zyBpp`e09Hwh@HVA>$uA5!HKI8x=MpRWQV7Jlhji5VvY^#&kSZxc(mHAqm{(;T6$Vj zvY@uU4^*YGza$HPovtbs}4&DiVol~?7#e%cD7%FDGcs!n^nPZSXl&RH6T zy7zA9EQaP!oYPzGEXC`9Vb-ApdY+X>3)@+Kthw8vXQzZey{-tsTxh9mUXwUGqH=8V zVY@ZQL!rF}HA&3w#hkm8Joce^`})3rMJ2^RU3KmuA&^xX$||waEB}PD*L}+$yj0+J zpBGya7M7lETRsY_957(W2 zrr(7v5ujWmrP=SF*_mDyWacR*xqtZcnOYSgJwE|#ml>V|`^^9%;K4km&rj75<#le~ zP8YkhT^0{8zgXve;TUN=IH@qwaH1&0@fvh%oe(Mro1ZJ&5|H$pSEw=CLleY|iY1rn&Oe)qvt0*Ixu53P^MS3Nwp*lYfOkHOp76@rb z5A;S~H74uqQs!UQJLs`(lDnY=4yyX0UdwT#ua@eue z7pQ2woS%!S2pRJ>*>_K~*E0W_1c5&!$=%?P=D~VLP^o9hca)e(B82vO-D@y-eK#D$ zw4X3e!k)da7@@o##duJTgXW+1-1src>C|qX74bN~&0s^39He6AY9^N6m`u!#pSM{Z=BFfG>GnL!f-$EXHZwk}V&LuuuHxnIW zUk+K0-sBA8WTzw1O97lQj0Y2`%^2fWipSM!6&xzP8l0f1j&3b>a*%F{Qr) z!_g$OP1Z%0X(M{rPLg2ICwFhnR=}3pF&tsAO#Y_V}2#*Ed6bnH0jb zqXPD#i!o?=Dv<<#Wtf}cDszuR55jf57EE(XZl|k6QBQAnO>RipZqM<@@JMm~H3xa# zb#)w1$k)EG>uPWRAfhd%RQT ziZgbwe=%E?y^(J<30B%V@C)+N--alBrBwdnMt*YvZPz2@B9WJle%tkWx*K*woA2I7 zdRU*f;i~wEr|*rCsDL>^>x7cdq6Gd7Kr*{|*S1~A!uD57G?T)>a#%Z>_+Hk2qMEyOnu}~N>M{%yW zB?^0b9gLNDjqKWnire(cx_Rv-pXTW@{QdB}5k4^KQdXDgt+*f@^3@wot=q)2?mQ?u5AcUh};EPCV~rfH9jl z9j`GnPG74YE?V#j_lBuYl97#j_{JroM$w>4KrWe zw)CbsyAWtETjK6hfUFF+>AImDrmUQWZ>Ok#IjKkb7fLNtZS?C_9W$Fs-}WuGKm)vgehBNnEn69~`Zm;$p!f;( zgqVtwJPTg?!&{E|E9cjHAaqind~e@Sz;XxvmfXViH{BR*mDzz2UdUvAo0H`Z*^}je z_sAQ)_WQF|2?y9OSJ6_AoqB8=&}bb zrfR(>Y>svue|&<$D9vf;&Z<16<%G*sU$Gha(NYKxYLH|XKdkVwQXkgDJ6|)GyeM&3 z_Un7j>0<^3Chh@NyL>R}=Yw2~=$iIx2G%%YePJ8e3uj~x&b+r(Mb#z#Ch z`>(XoOM=R3ihgVL`K5&#)(OI(N@(9P0|R=1vUkNyW8844! ztamv_?_y@ffh*MRD1yn0l(d}TC5+d{?pT(TYu0I%w4TXJ*xX3*Q%RJG&m0O$73Ss4q^2!ti)oXOq~_m9Er>ViY=84ioMny6)DRzU(Vm z=^Kt7i-aOxr))(^kD`dorf~az$1NXrZkj*y_*Hrld%5vngO8GpU@2vF(4GI0<$VmyG>?*x&Sg>sFj_ z7h0*ld@u8dw~)9cIQ(vvG~W`Y)DZF#PuRKr0e{o{w3O@qyC2x>vp?PJj9ewYUhA_hr+k$K`vvmoLn3V)Q+U|ERBA&b=nNEYs z58F!@G@a`nh(2oxTQYjkL96!(5rM~%93&(CatG_2>Z^_Rz6Hkz){hGO_5wVjg%Vjp zFkYaPn#Otou^PI`kbVYpsFJ=oD&*GHC|}I}UKeX(+6C^rA#Z+v>2qcyCz8*tKa+MB zboJ?SR=6<@7RQJvwZ)N=9_buMtobdH?al5Vho;oczhNMC8 z_1)@;yC3yioe4QY-Sj13{oSfT$e+xbmV&Oc18nK2kId>Q4w1$%>8k7^P z{1!NmkZO15TN)RCiLvUY491PA&Vv~rPS$OhDS{R=Nq#Y)PR>7LS-Je$&p$j&c0y9A zO_h7=1Jh8~%at3Du($P@akeThq`vpZ{B5Wk&I5{6O$_^%uxb|7N4|U=8xjQmM&n8+ zJKGz`06f29daW*P>6Te5#d7vuf6I=RPP=@rXSVIKU3z(clB&dW=NC~zlD}$bDmjZm z(TY;-WM!7bv>(3wyfVSGRtMwx<=w$%I+XNEzX~{L7_YDs)K4cc1NpUq-T8X^v#ISL zyqfi^OAXqq1zXHU}iNi@W1?XzKrm(ds)k zJZmIEx0A+OWm>-X#|@hax4Wh9RwvV!F1KfRTxG+4rCb#WOU>NbVKudW(_ATvZ&P{L zm0K}_xYrh^EbAO24pDUdRnJHgF8_%4#QCG{lumiid9CNh@pv<>9ZIIoU71!IV zJz5X*6sdH|ei17=pPJ1Pj?+X?UaNINLl@>NhzAV>g4vGf#)Olf5|fqEdH|+r+jt@R zs$j=|bL~>y`;A!i-l9e>@Kf#^Q=R-igl-g%`>oKG;G~I%2~4`inCo3DdS!BFAKJrq zg;|m+_G$B%-M-iAee)s*>!5E1IuFfx&t|I;=p>n6M>DxSd_`fg_=?->dwfU3`^y-} zS`EJOfi`xWb1wJZchC52V0t_u$p*xu3L^M_NW$=x8JoI?`#^2I9g_c3VkcN^pPzZg zwDsN4Kt;OTz4ZrvnU}sDmZE>#Lt$_m@v5*fGiR)(q#O5hjdaug{)&YiC=<-5^x$Or z&2Lkk#ilWFjSXxRM!C<1N8LNWB(;x! z!hIzZa=*KS)tw{c<5n7F9^gn|HiI>@Rn}DxUE<>z%SXe#Y>)_)QCFCcimYxH!zjHK zzb@{R2ZA%y$9uQy67xgCymJd=jh!&UpPR?%SvvQZJSHr1hM$YPWFo~oIK;+ZPfl^h^UWATVM?^T zV>|8mb_0$T*5B*?G2b>U{H02nW50V(y;yqFYrhh+q-?oUL2KCVu$ofi-Iuq2`nLb@ zS#uG-+x`jSil>v!6g6~js4wQ2%80q}=G?O<*?P~HhPev}U-hGPocY_`5w{R8kI7pg z*3oAhIWI8i?;rzh*O$2{w*>*=$gf%+_ZWq$XA26|ttcf^XG!RL4}vbGprJ?zF04d5~N`Z#MR@W1Bvrab_y#dmj; zCWd1v{U3|sq!v=-Z7(nxx;U*m4P z&wWd-s^LzW9LK5e;oF<^m2vH-XP3u^64H0K2+lb$l+WUwCREup{AgN#qMh!iGUiT$ z1@~-MG#V4ysquXZ%Jw|$v`6LT(?Rv0~lme1Dx@ z20JRBNG!?CrMyb;h2B@2*Vg^OE!p-jR?0qGa+C9$_XX^)>HD#N-Kl5zehwjlos%sd z7lG-6nppKhumc)Rj83Tn%+3h3Dm(4F&qFlfqB7ry)f+;G-A-JPQBnbX+SBTsh=JcP zH=K)qu1_5DQOqkq`UtqnNlIlC;WCo7wYdaBKYjs6D>!CB)Ym?(azNV{6s3417`3Td6 ziAHa2I+LCqM<0Y-`bf$Hs|6w+nF*)QZYy=>6n?b_^``Q5v^o=`4k!SVMIT>NVoMoM zEkGN`!hA6NRI`6P_B=zg!ku#iy-0;fVGPVVVjSYF;uq4+PXEjx_@i~1Ea=we(H6n-? zW2c#$3zF(c&1sqQZUfubcwQYDjOH#lWp71)%U+NZsItB&U2ky(|NRO$cBrf%2aR>J z3!d7w)43uKZf}x+=$sJZ8SYjAK-=w1Ryw2mvJyE!s~6s-Dei`vd+b#@UkMi;1c0G# zQR;(B@C+@m$XoNAm0sKkTKCt2o%GTcnyPu^->1qJ3svfKV*F02JWE#6=hRwN5vN0c z;?l=!r+oHXez12ykFT7_%{ZVJFPeOM7`9=Yuag6^26aLR>EgH!xTi0db)9S-mwP;~ zJO21&Yn71!j0o46nRENi?!b)6gj3j^S_5yV8TLir;a5c!;`N*0o}a7qGU|{kDztOu zo~0i7cSf=?&VKmey*h24l2Y@?UP5($PV4#!`EAj;TA2rdWhR?QFUSbQL9t8vgy*7!j``+&EBF&VGUVx)`e*AV&%hqgZame& z#T%q_g%*sXfb%7Z<&~Ox3XS0_JyMcLh|Bx4ZK%~rtC41yI4xDrdb)vHvsAo)!9nc} zxxhBM)88MS@(reh&4FD%*l;KDkIl-HJ2;cW@(nazUBv}v2{EZb4;wmp6)Lf<=fjp-SFQRSYRO2uwpAG#y)|i zr7Y%kZ5{Zy(-7ZVPd*cFx$lF2>-aNKZRgxt8CwBl0w!qTA>ii7Y!DHED4!6{j_>D- z{sz}uC^Euq2+Z@WU8OM|KHGEBLdt#(G>Zn}HjOs@%e>wP5p~mp1A9}F3r;z-wQ+0L zw!s|$5XC^6cV}~00l{BX#9`p0Q+FB#1 z^G_Gj`#ksi)#`iYuWuib6M49j`cA%NU(-z1mtoi;&b*@qU&!q>Z>??*)|I<9oN~MS z1m-O0pHyoN zG2BFp)6$Jh*-Af~EF$DIw5F9Hg%3!SP42h`H;7~8LA7ZbRelE8viX|6Id-z4zmG_-}|W}PFEXYG)R{3!tcg0cg+wUD}*Bdu{C%#g6C z*p2Ey2JC#(cCnX#jk$wNm#^?x-C}*$-b`rsU$y3|cA@Em!}}FND7QsK|G59kpKGU9 zkpFePvpFh}b-aZaGrUQn;5lh4-dJQNxmj^XIu`*KJVSmb`&tmLGVGW@I(-3|N$P?u z<~xzrd=j{WD{Sog2oSj7gX@1NXQq+kC_y5WCh z#X<%^TvpD1=MN`1(vI=Bi<@(YCa@5XS#oXfKMB|2l8L>Y9&c^MModgD1RMwCG2*`; z+Uw(*2GQ%L$@F|!?)SqY+#}N;wBpfK5DGe9gb|kd{nY3;r~BvDPE3Efc=-rU=(P+X zvh^D4=>|%JTtm)(uMEbsrE1FqWoPR;)-BN~5(&ZU zow(dog^Vg*YENvH6Mz7;wMY{+KO9AeiUUC?ruW_eg&3_d_rOP8P-CA8Dfv zNS|LHemK1K<$mf-2Z5*k{G^Cfr7C9rIK8^8 zusW-MfC3MDuY=s@AU}g%sU3Z50G7?n# zbE!m-+^5@>Cdz((m__)D_0tx(5wMjP#Y%%z6QuJEu?{?UmQ@$bn(qMHY@2Y1#=1zu zyNASTziuzPus^ym-hlb3!Qp;$Rom+8-CZSr5nJ%O2V`Y-TNtvYn+_SgAz1roKawjOhF3wO^S08oF^wgm;Ek1|dtSjCrj ziNrgT6+tO>tIX}`n-s^X$Lrs;{kHy&ki1OlBrMK%6U<2Rvp{_eH2 zZx1Y+5}@NnysJ02?=>)ZdwhCDS=@>8R;IAHn>+TrXSMOmE!2n)mtEl&{ASctLv%WO zy`krA0YxdN#DV{V({N3}0P0ic?Fx6oVOlGXzo#3SOWW;nEyWk5ybwnmog#XFNcK~; zW9DdDOYhf(9gkOkH7mLYy``H_^-_6N^6!lL$?kww5PDjqt^N3tjCfPcJP2Be?7O1( z)`g>yMrhLAUD<384|yb_4jhzzJW*Hr8~2G?4o#h!t3d0xoJRjZGvxIuBvbklwzWWW z)A<625wfSm>#@Bs4ae_7P2G8a51sOScLPv|cIl?a;~wR~y04hCT2J7EN;F|tn7dWP zegN;LM<0Q$@l1ZT?PsJu(-vpj$?mD@XF7Pi5KLw>Z4WD5_xy75qKPOkz)?G%aK%n!JKm$@TR7h z>+EBkr)sYEL-(zphy9MrKS$mQGC0Rc+HcD{@)y;`@%&~_1QKD@^|abbVX>C{OYW%ye_6=_f6Sa{%y53I){$GzP|CaB33YDp;!=qzytCAG}06A!lStiv?po6a%4FA@NdDQJYt=dv>p@+CneTu@h}cpWp1yzQ%si0y)pHwGM*o(| zthDY@+TEO)KV4|%VzmCunma?ZdxET+fdIbeU^Q-Bbw+0;Jk^GMQjm#718m@JUDDq(nFDo z)G*qA(osb`3`xfc%@_$=Ootl~$I{y3oC=YkHxqqj?q(C$Hl~8d%BXg;7u=v_^2KRD zwxidV>;`dI;L4ZbNq*bm=q!IN=CSE zYm0bThS-O9g8Qyqg~hbJOD&r`>4_S|ZO%x4UA~_4q-DZ09*9TT3p?So{`D=l^F@*AF!-F;*?HLpKD-GkSE z%y;OIMhZn61geJTxx5-;|iWP#rz?Dq7bcuM)^}SWJS>ReVD?QEY z=+2uD;i+D3IddTyZ>ZF4IxWb%{rmubCfvK^Ct(tzaG;`30J$^H;CPN)uHpBw%sf9*3&r+4tby>V8knaCy5N=n}2?7QQ=|Dw5-)S`F&8 zRfk6{yTa)qQrX5&uJ3qNy6o$XtfdVwhUG~WEq&x9PpwDa-`}P9{W{<4Yv6u4eXwhFY#Ku<;r}=d33`_BDV?PU6P;txaR{ z@iaMWf0RpRSEc(K@j^5+xijHPvnct<9?1`qU=9|fGx;uvpo0+hifPGz({o?@Nr;UG zTBG?<-0i`Yz_0lsS^MrsTI=smfPhxa(bN6dwEL^y38(qqUJWrgaz;^R)8i0JWIskq zJyTG@Lz)6VkM3Tal9OfJ5M28}_>F~P*38SJwYk;$7M@yR3dZ>oZ|{s{%^fh;8vTkW zI!BypJt^jq#&LHAG~Bd*Z9NgtD?aRR7iXacFv-vL!L9(jCDlit1eR|3X$}gN@e0C; zPyOC{76%+CwOZ66ircL@r0skQ{e#tN2#=fzop(D^(BxSn#87AOM$gHefF03 z;2Q+NM8d+*X-*iwra_!n`XwNyf=v13XN~=E zJCOd}y|OuAt?FXlpux^mNd<3%$%lr@0L-6|RI-)Oj7s$m|P_Cd@pM-~l~g)w+H^B7${){@E1k^g?I{#XBQ0dM8c! zg#%cwER(l%KD;YML)JJYKC802XBjLm8ceN0EwlT8O+D;~NXrJ4YUSum7RoHcBZY$H zcRjN0yNrL&yHypkE(;av@1{&xKUTXEVvRb09V0;cXWjS8yS+Ic)VqKFydL)y7`rKh zBI5h8qBv52?2b>q2eZg~?`VPd9L^8_sAuEIzAfK8mHJ)_2?$2NquiZZ_GPWyBve+U zS3%}>Rgtc=e-KsiIq8_}FY+usrb)6ItIWvHfdtx!5YFxGYzA+~nl65|nl^RPh42!R z^rjlJdC&FPnesvhW2bwQu;I7;%+mmr z+bw8%-MwOVk2~3wa{79FEMhappL%zD(kjufx%~_bPe=JTdxa#QSnR?R@XQp4h4s@+ zByw|RDSxh6|B6TlH|N82es6XbT;b4RY~t5z4`XKYfNr&azMFR)pap+Gvu{hY`Vx=t zteD?_&3%Z;o^wZ}7I-PgT3BLKW{4fBPEUU2?4x# z_Ex?b&C`=_!}PXR5CTG$C*u-xKqR4%wecf&BZmq>qVe_{l>b`&}g@vJJ|PK8wzy5y70n(K7qw7Ac|RtyIw7#Qw$th;jej0D11j! z?2sw#2OUM71EmiW(|h~SL*(B9&wuv;ca_=CYX|xOeLb>rWVzAdcu7SZuDLTZl(=eFq#aEOeW9d( zRNpO2aSj0NHa)xsi7!5-y~|2v-wQ^izpLv*@j@~t&X?(Onw7vVQ$2ax`Ik?&_vn?Y z<9jVmZd22bZ!&Hu7X-`At__AQY^+032_~Juj+xQ6^r;Q>)&N?y>dJ?r*NOvDm0eug z++~MI-HL|y8A}TeDaS2zqWoe$T%-1X`;q4R2I<%PU{Vi_M2z=fgs7&_g-Y#w&yoEh zqokQwwm-ew_u+ZHUG{XLSde!CSXnW^eJ6qkNXg!pXcI4Hh!4I30*1oT`3ta~t^ww|JY2@466bqgd?|qAZ=Won- zd=!8z_p9hg;0$5{1obg?X`P;Z0q2Nyj}w5scc1>t`f%AR>?Z%5AuB8@7!4`|bWnzOMENZuMRQT}a+M9lcT(yz_eS$XIWi)Mj~h7lZw@X)XG zl3$U`hp%&W`?-Nh6jsk3qT+F^eaoZUD#@L^h}Pv|%!hA$++CG#-R@rVW;|hi5B+;f z-4m)g?8cTeoz^qve0zRFOJ>;0kM~1BzHgJ?cBij1$YO$F6e7!i(W`bAgVAzH!LHbS zv-yP(UQPpjL8$i~^W0;u{Zo6ds!X4$sp?0T~!?j|w%nw!5(!cGeP15rCn~XTSmOQ9Z`` z12u8-8@wO^DaTV1P6{0rifJ59aT2?)=jrCO$-D#m@h68GQ2sE%*460-ztV*r{RLl! zcO#-{d~LfH>@x@7^2nQAsV`;!-dD)pZtgWLcc+<5sgBn? z%yTmM0IRi=9H-esNr#xpO?XkJSXG+6zo#=xdG8zh5%VAa-T-*g?W+4WPus9+qM|=- z4u1Ynr~Yl0on^Iy|B7G6iheZVc6CJp13tGDxmn(|;BtXId zoDbglU><9C9D`j4Nli&bkMP_roXMp_K zVv|yT?&6IVi_I)N0}G1AZ4 z0W@LJUM%^}oPnN$>MkNY=?5Dy1sWF4I@)n8#C9V>B8{M?V)6szS!{UU%MNgOj5>UT zCABH`J8k>Uh&f62`mGBd0WU_x<3v2*69F54hoyo0zpKwuK97Qpt!lmwyCb1X)|_&O zjn zD&cIjXJt|l+vsjZ@qu7DFKj6p08_s@ubA}^9{Ju4SGOQ=0p#Mo=k^P^Rq4Fqfsi@1 zX#$=EBr1T?YUYlzHqJ6ZrfMNw4yOVq*SmNef^Z8P{JR2t1)2M&ES4*J55R4ER>SF4 z^Y;MKaqlr?m<^n5Qss8cX`#Rrs(2NT(?g%PHcn0FzBth0Tc6Wg zB~gIhzsBRuWC=CO)WML}UNJI&pOAB6Nt?qUH&XMca#_)>M=9@D{`nGap}%u~O9(N) z>h#$H-Ri!4=}{XU@5hAhd!7hR8>v~p<1+(zLFeWjm}!IaLB5=CHUIf+)92ua;Uz?1 z#f&M+O9(;{Ss`PDqY;|#hru9|0SV8KL%W7N4ILYSetd+=-935!<0SOmy=7V< zeG>wrFyV*T^G8U4`@?%5fotZSCFk7$R;+|1J;vmPIJBX4{<>vEbl#^ePiz@;cX-|p zhTbq_5y$eVG{-x*B%tuTIv1|JUIpRD6&aWE)0^}#+H^Hkf%gd;~gE7|RMS1;v(I>W6+Az7F$q_RM~ zs@RMWrMAllQkjhvH@y@6RP;$3DX&PEFHC5;^@`g+cWw;+j@38^ZJ~NYG?Fe-X8Zbd zevy&|4z+7%QBT$cv&h#Ix$(yMugBZ1_X2NkqT|qrejnL$emj+a=^ea!H8p0yog>in zeq9wC&L$nOtF$0A@6Gv$W{0MjygXWo#+&DV-uZF&xsYROzT3*7JIg*_Q||JrcgcL? z#P9ABDSXik-kh}#uoeOmJGa{N^zKXLd%g_2VgW@7zh(1T}@}$ps5hb?ezfV^1 zF?A&WfrxKF-c zikb3{`+4qG1CaH)RLCt(T*4_1OzSSIBNfm%pu1)}6ZT_&p0XqLaO$K2qrq^errBgt z?Ml(ULAWl%eXtXIgi{Oyd>E_u&MCk|GyxJp*AMpdWw${Re;LpN5S3E`F|AmPG;k-- z$&eG5#>7L(HSvjIeWl?0m+w0y)_kbrX9vxEJ2N*gzkZd6)(cG1>m@OZZSxD+b()vo z`VrDhQZ}7`BwOUJV`#UTtd?XAO_v(__|mOU_HXr^R42pPQ>+;Jpb|w?l^DwsnKVb6 z1x#xYo(woQpG0OergHC|4t6NAyGtIZ&cx?4KZK;eQhkDG3r22zAEKSUD3|PUr9Acu z|Dlb%M;>r@p0>p5QQ8FK9$kibG4oAYP08UQ0LE{BiQDq>oPm|KzWPHR-V*c0*`#j+ zZnGDW&EWbwdhXrzO0aj<;R&v~TVxGSa7HxFz`L6wrtXL2d*#l_`)R&c7iR?UXYySA zlhfjKV|{^5_it(bBIE7pp4cl5;7kuvi}l+9p7jd%2oy&uBL@#NT480sg zSd?dfKlEKO5zxdWn$76)S#njgFe`#~e|}n20n{Z`HIw?u4g1lJz2E0#diW-T33Dg9 z%y;1p#+zL?4aXI$1CqO|W1=tLGt%1qQc~Narc|K2ovYQFU9F+BeQ^&T2!B`Z|H<2% z_A0L}-D3YG-y#&*fB|!(NO9jA_r`pQz<^tSFkrwoQT}_zG;2M3pXNRHCXupq_IUss zW6!2$&8ksjAcOBtHPe4_ZAwL_QZ+qlBi=k~b&*gbp^N_I{SMMge?P2HawCfej>SC& zk9}5CS32g`t7vx4p8$)`?#}uYJsyj<7qI*sRV{64ZK|qyr>@3QO{+?M(O;GD^Uz~| zdg2}vX{vM%Dn3!f_DJISqg|AXOLbC(7u?+Ulu+7H0#U4a+_7%b@HN{+!hz86vm_`0&`Wnc$X5is(q{izZ!$wk%YJU!)( z2^7gt`tS-^`yvle7&yXo#niTV2l1nSj*O=v3kiMhEA4rqtS9(AmOkH?3ktw;@Efy* z)DF8Ij4svgCL4253D%xxo>?K6qpjIXMJV_b(oYrldd;$VbeOF+(}xn+&=m8`iJ&Y8 zcoqoZ&W}sVyR5?VJE)$@z``{>loJ}*?`n2zQTi!~^PPCxvGSI=+J@;l!S%*})(yQm zht4Gwt4W>D_nV;+5V72bLsP$+;7nfze-tzU2x`IrUW!4J4xBbcI3_3Tb_BW?@6R*V5 zIIW#q8XYC~a$1F*|8}$9vtJ>9q&TJ)P|{#)ylLyaJ{hxo59SSLdH$53auP3ZUIP(q zYZ_IQTsNt`zBf{>w0^bpujkj}8f{kAqAA;Vc}l6t)tYV&SLf|i8aDf-|9F}^YWkrA zxtVOJ7HN2*KysuQA+{lLd`%$M;jm_QcwDRFUEK)N0kPB<6*jVDMw<+O^h~A{M|NG` zq*x7%1_-1N_v*pg8R`-!(behnTHcbpT+1C&D_|Bzsl_&0?cVJ1HhJ34%1ojY%R z_L!LIQL~q`0f-BE4FKo4dozUygf!!1%O0uqu7q?DWm6{T!%8rkq!w?ZR0mR=v^0WC>b-B}3ZF*!w(J+3nrm zeQ}2Zp8nk@TK|p?!BU-ox$DSp9w_!a)9*JDGpp>OAI=G+X1(>D4O$7|al>nMr2f|B z!<#O9MDoR4p3kJU<50=x3g^Y*-f!=Em?#c?W%;+(Y<6rTo;}u_qLTOA?&CcKRB9i< zpZ6Njj^%TUv{)m5UMVW6wR5T2If4Qdb_{>Qt z2dcW=oWAN$=#DHUh?v{Eq@OOAlo4ki038Ep)$Jw|>(pJkq6GJL`&e&KIPeVS!0m2t z>4LYBBD*px8*Fdex)@F~dL_IYg!4C_+a%X-IYLT@Yz{Mj%AklWm%y`s`ba__-4Rof z(;4yrj)Fjp4Y`$rfPE7%n*88_eylh!Ifsm#3=l`>*N1W`&G@Kac~=7@Knq1V&308P zq%qsPf;(%RHuu%TsE}|j4*l}3dg*fL&Y4T?U^^Loo~_s7L(9|@1PwwMl>6FWfVu(F z4bs76n{{)4E;}CuQplfPRQ~wlv!LAVAFHMcjxjTttX3=9)9ROeVHj)kyxx3!<#iu~ z2A&49Rhax92br|W@^{zn%~z@$r(3Hp8|PX?7VR&lPINfbDgV?i;lf3YT=yx!xE>m( z`$Z{{HbNOEH!UjFyU;JaelZhJ(Dzn`q` z0-GnTZpo#4uBQma+jM_*MO43?qq8wb(y4X07!0Yc)VXJF1?ETzyKs;;D@C{3HYn1! zXMm$VuaVy|Yzy984;5F*NuS;#okb*h(zo-H@}4;>J@%9lJ?=}0K%4@b?_YW&)avzp zJzm#;3u7Iu`{{<;y^WK#0dM`x;O>r_QEcxEDpz!%ucoib3#1UYzW3-0YU~dvm!wi= zj;gL9D|FtPT&OS$w810O{yz1`gHkuz=I4E0v&b5MwUs_7*}6^i%0t1oJ#!p*P-d{07eJs| z`}BUIbfb%XD2j@jv{SF#DS2&9fZAy?b9jQ{@Z(5eGFyBuV3kyyXelG^IR`*i`WQQz zOApNx+ED4{@_N4}@}rKmLuajQDSy$92n^UukWf!19@gGPT87i-tbtV1zS;6xryBKt zbs^3wpz$7?nRp~cpz5C~+l9D}0XPr(lsQ)cE3Xt_1{z*0ZL`19 zwzN8*l*Ot9H$#E)Y`(U-Pq{}#Fz9F79lT7Io=2(=52#^yvwy z^M%H*_AK;~J;HFs=Br86K7$T3ijSs$jk;WEMoz_9wl&-ZbXh{Lh;Qr3%3Qnj`=i`W zk>;=0534Cxg}YHa=oJX($G)kdFLY!A>U?2%?QzcTeo`)9q5tMPX3tzLx|(0;E%qn_ zjqJ`h)qRUY8;og=XD0=Tt(z~~}Gqp80bTAmnD~CI$0<7hz z9q=-cMxXb~3o0wJ;-#*0Q=U|R&aRv?TjA!^pe&3R7l4)Hg#bT5z`wK@y69$!4{*n) zK12xwzkqXUEgpTx+z*?<$xd$`!B9DNHq_hLzb5@@y1w3zNw_^POh>QQAO7&6Kj+P) z4!Y9`f-|NFHU|Wnix>D=_Px$*>g=$eq#&Ttgl*4(0~|Hn&2rkSat~3W+Zqx6f6P7d+NBe4Od3pZIE&j2TVx4PC8Pu!75=-&YM7 zHsTzG*Q+!|;LQ-!dt97{(_>SG!ty;gA$LSy>n!~o_EpwacKDjlExTlQx8n(Mg8NlH zZPd=!kg<$LCinQwHF~5O|@@Q_LBec97BIjs18|azHQ3}TKsk>@Ue>Y3|4AR*% z2YXgPO69uTyeE%dUIS4Jc@G^KPQTy0uIx~3kBe9PZY>W48mM1QtMGsnMLB3^9yWI?T&@e=FTpm%urMo%1RF2`m1hd+7!~M z{cCqc(qQ$PUOWJri+L66?R2o7-IZNz7RiIYo6KcXf|ch}n3k_4dn(tLTvnB+m43Vy z40WvDLBgVT2sBE4|M1w8+a2G@>%a;~#14}?PF(VCR~6myf?2F&fSW+&0LFg`aJVGYE?9RJDV)B zUAg{f3pF3&&EHEBvs6)zg6$UwR6uJi(tF{V7r+p_z0*2hCGWrk)dHJ` z8&P!c5YpRTb!SBb-!D}@heeR}FYBdDh0per2+RDfYHz|A1D^Fpg2mJA(_&Xa@I`2u z#hZy2hmFN-e?QMF+$+jtRyvbzwkSqLRVk}qZ_t-n?&tEwFhLu3UDE3r#i?&k&dh_( z>B!!^47kYYyDXCvwVpuP4Vm~^wRAQcZ6>&SwvU5?X(&uUC^T9AaIRK!Ia>O6&~rYJ zI$-wYc{QglH`7&-4B(67;XMJX;?)LK1)#Im0lI|CeOB5YKE{rUv?gDQ9q&@`tnxJM`~Tx-isQ;;z^B6JZYt%DG;!bndp1a&i` zAhchBfA>qpDsdOMR!g{ICIp&Zxi?X08>kn)eNmRIMK%@1y=$+#1tSiL15sjMQdqa( z1xQx8F6&7P2_+=AUOh{B>hF1@kzQJ$r z`h9U$lkB8ez{G{z34&OyXS2-?`SKPip=)03Q|Mi+_BMh+(<k3+nzXw|si#k$M3c ze^g*}{(_BmY2>F8*x8u{#P{vRV||-^wyiM(ouNsA_>Ok%+3AyuR$T4^E(04~pf-%zGT8lsVSI^bS_WPv>5D%;^HT7EmD$!G3nSUqJ|SMLB5x zoY6&(N`*wy3Ket#b?K$E0Z8Qqmj7fif3f%J(U_xxdiuO>Kp@77&o^&(Y`dnSw!J1m zgr4lRGOwpK%1=!Li4LQFr~;z`rCe@|2hwltF7ZHL!S6p!^O?456IDLl5)3jvT&vx& zTCKe4>afeAb^-J%q7EnfRwk%OOVq~_yey&&Br7$XsqG2}>Bx0mje8NNBSU{(f9x!| zedG=JlIuj_@5vMNgO=!a6KQw-aEVoFJ*J0}XURNkySG}I3?PKBeL^Nt<0!i;RJh)q zC(3Kx_d;Bw2__q5U#>Ps+irMG<}08pdDaE$fB}n+mZ2Wqg*pW-pSE0<>zse0J>9%O zdEXVKd9XDu2C@*3<3t1z{{c0vf9gGessuz5Ea>L6>aI`c;{5(R=0F{s^y~_$ z#KZO|nV09snE|5Oe<|`&a7g`#)X<^cDwNW>lDyr}O$ScR zm9##x$x;D1?s0oZL4X~Ipzy!m1(mVFA-nFk2Spy-p0~DQTa$yls=wKc{Dc*63O65JB<<*`km1v=Ar{~?WL0TWZTr-2be~Hbn1!2Q+b9(US6^{>h-ca7wtI76qqGZ;ThQ1U$|T2~u;s1KGF z$c#C#!21$1e}z+O*< zU_!uEtGhUi?M38!lzAE+fLqjO-L=|-AFU|Gy6FaFe~E07;nyq@3!$D&<^WSr)>7{} z&_V2|g|&0tDs2Z!_V>aBOxC*vU9<8_5@C#ZthzS=*UlN=+3n_CyX%Ux@S!{>PlDW< z)K?a<&K`v1I0y1%M<}8h;caDg0oiE1j1V$H)@Nf}`-&Mh%aUOu6zRW~dlU^6E(IJ2 zZSmK2e>Gj2)mriUexsJNli$?plmZUMJ6CiwIU3hlYFzK9$eErX)3`Z)Kz^RNlR2UgtylVi_n`(ZzaR->ZEYHLh&Sy8a%P zEpup5sAqAMCa&B{SzJG*fQW~V%1=mG`4e^LnkS&$n>KU*Oi!|!A&A!QNrH~*B`vHSnIt6Lpb+TNCv?3pp zT2>DrLO6mnV1ufY`4M$G@XG?WwwDj367`o<(f(dfe6vD;Kx-?%YQ4hQn@;Sf%PM&_ ze`zl-CWz?y33lly23;SZ@CruCSN+lSy?%SeC{Z$y3H&4rV+*p5_-J;(W;bhaORTuc z+*!2ahsV>?HB37~yya0H7N@rGj`nGFJXFv?)rDwSQeZMvrvs4?@Ij0%_> zrIsIk&gIKEtI`g8tv8Qatr*C+M{UPzf6Lk51!LNEAQ5JggWzE&GP{0!4Pa#*EG}xu)uzOLUCwH?AS#Z(3-arlpa#>{1@wt2IG^rPD z0z6(giwZ7Y_3Z7uH;>4um}ar)pvx;g0F^@V?`y;GsCIgB=Ia%|&9wqV&&Klee;Oh6 zJ9eEFNDXtjoD|9jLVM2Pa5DNuL#dQ=+)1c6bfwTKWAvmRAj%gt!s>;|Tprrl>ak3o z+0?%cw`%=dv-$)s@Lg#n$+#xa*hvKU?xVn5K#SR+wMae4F1glapYmbEb>JPpT%uSv zyl~s^1M6<3b00KUrNz#Ji+5-Of9=(_^rpY@0yL@~d{LR4QO)KF0RK1TirwTqNz_@s zJ;XQZX#vS?Kkrb3bcbV8vr8I&#s2oOj!RKNDeDUIX~=-5O}S&d2pP*b)5 z)B4#&sX{urag!s^{`4@210-TN;|)px+<(G?p$CCyK`&?2lro=sf7#Ej)R9`Ha0>r-sXk)`yhN5S?3tus=KggZb=hhs%!)@grb4yeapd9!X^#F3p zJ&fH&b(7rZ+ZwPQzI-;&RMJ)XbSpjzk1R;ZD$>M@v*@5}kG!ywgNO+f?CRZRcR5^E zpUXx`RdaQISp(C#f9;04)nJZ(-uJUj__)PUw39QhpBhW{#M}$z6(@88W7n@0Fsc~# zY|vKO^itD>7Ei-<-BUtl)$FbLoKeHp+Ysv{sDW*wM3HN%`Xrcln{y6a);;v5JH%W4 zBVb0N@^snv1F{JbKn?uq`C0O|lfFM~y5jt0c(w#ecbrXmf7optP#j*x-0SL-AoXT) zx8%oMtO96kC%cek9qI%HdiUVchrW1qyWYCD90lO37MvW2B%jXfPdG5EDaw#%{OPz@ z%pt=FvPiQ%rC`x_fLmiet*y98+So=tc~;xWX1@S$0?-%d#)>#oewKE)0t=#cU*5bp zInr_EU0l_Qf2;)jWYDt7rBk<^!h(3+ zHntM%?$f=!$^BIdE}{pmeE4QC;rk%|gAtzC%64DM^=DB`6HE+PulVr1d=72=cHZE| zDf15Bd{{5{wSCkBxyt74o}BC5OkPk%2CsbM0snTmx!W^n+OW2*ZGj4~h4VPcUn%*U z+e=n+e{lV}+{2FsYA!$Om(XrNmcelPhwR<4{|8Gc$Fcw0Kk}aZ3+>7C`G@u7Cu*=6 zH~$@iM^o!l{X5xkX0))NCSGmfyn zf8hVL2)rf;Na6o`=_4ToEcw<@U);gUw1_&j19!l!WIUjdA@|`7e6QkU#I~MLG3PrC z0;28A789sAK_$Ad3cItwMH|xaya(BH!HU)t3ZLehmnH(=ii%g&yzg$oW&k2nQw04$ z$HCb~(BW!FLei%fw zR~>D^e^BfJ$vA$1L=w5bA%V737?SI8pM;!u!L=0f8F#8CX6y7eF{+42eA>DVaidUS^9Kn~V8< zIY#?H_U*YCsIM{lskB|ggI?^xd*MYd#&*T-EsgkGfAAK^zv5{Q6qrOq?ge5aM0R|w%YM@1J`Q@$ zXa<)`_;&}=tGNbqVSx3kL!4Jf>_I}dUg5h+{;;|B=}sEYM(U7(`iMC@9qvbf9$#k4 z$arx=NA9PU$^9U%t3u#Rm3#@;l;&L0@f#r4KSDVfvXnQ6jF<76e|A$)*JZrL0iWvf zA~i7FG!rln38Rw*>O2W-vFX*u_{UuM`}kK#LAV&=nR~ZHtic!Sv{Q0E5IiYRx9mk9 z8Kxx8%n<5s6J93s590^+h>V|F3(}lxMQnyrp*nSBuwg#VDb^JOPDL1ee4a!g&tsSP z-O=Xgw*JYk_%GQAf9Vzf5o>*=XpaAM#KI=zz7;OReRtB9^V2J0CH#;fM`b1IT+Cg3X*k4qqe_9+^)xly#`fD6NARJLh zo8rF@IJYrYh&>5Md-84jOsh_lhoC#x|9mF){Gt9-kk$23!EiM&Q#|s&4nVIrv)f?gMOg`yk&0oE8!z@T_oB~SY8lWxDiN7@{7iL~{%4N5IbFxD9HU+&FG z#2_xVNp;jDN$Z1YbJ62Wmfi3KBId#_fbGqB-}~qaf0UDAe$=WDbte#-@#UTEUiw?ibqn*06#J}WTrdkm4AVa~u zqF1(^%yTxUW%-t?7>lP?3Dj&-rc14qTywdL;%O^z!-CRsM)zjymfO!KyY~*aJhso9 zqBH*Gf2x>|3~8UuI3GLAZC{IIME`&1B3X;kXJowm)o;DT-vD)|JV|$GeY2Ny)(`u)2(6D=ZQ{M&E3MWCx3^d7T3kf`oY&Yw< ze=R42kc`mAafU<9xRe$^{JJ5}kC9_^y>$ z7qC|T_yRf^KPaEIXnKC)SjWgEWK*?7XBJfuCwsf(>Z5!!pS$d=(P4}Eo+v0DMIA19 zzq6eNrgpnZcE;zqJJvT_4;XM^NA@<+Bm{glx5!hK>)_X%X$$huDE+h~J+@0rYh(x3dU+a=I>K3KzU=H)Lv zo%z$es#vvJ`=-BE91-XiDFc-XRLX~B&Sy>c0c*mJ?{!DlPFA#sCS-KB!M;Yuf0>2R z&KWM>cbtwDf{Zgyd3cIbgL)97lT5R1hpTl3j*8jU-3%~R0*Ny%5w|Q1_e}04S*th6wy3jGc zB0z8#D$GBA?t0nLl$CR_)iIWmf3;>TNU7(OZGy(xnILgbjZa;&9%4$Fp~91m4Vh15 zy>`>w9-x-wK>4jRBL4mTmkX24_OAo-za{!461aYm)719S~#~H+4;bK0=m|Pzw)TKfUJLgV~pz`ba3UK zj5`T_6VzkJbp~^ajKi-#PW-3ux;S8qj|AbkCXn}d1{J<1#>QxaZ6SSeQ zY(b}e^Dphp0PU3~(19Ug*E+bZ$g#FHGzV!&wh{u@36$!I{jttef6Wc7(01IKBR~Og z_CBX|;0Hj+68qDI#c#WOecbOpgUnO1PeGe}=PqfJuZ`Z0ecv;JMf_8j0lzES|2y9m z?!(BiSJ=~#@d8^$_Bt5ZUS`?s%_$u{V9->&ZRZ8sr$BaH>tp!idxcg15#lBg8*0&*5Ic%W5Sw?ec}?K z4DQZw1QpPJ(#O~SjO!X%^T^&ULZAMObF%mO&M9I?;Ug3KIr`=Q+_$h%{unP7yGd z6sc{MO#k3ovEof-P_DgHx4IZv=)lIfe9i`T!B5m&a=6foEIo3gPZyeEgB#s+TX+yC zGwp3zvV|;*wJveK=$kjSbAkLvJ4B=%Xv-eke*wM`{QvRU|Ivpb{mBR#Vj^U`|E`B) zoBjIdpd2wX?6cwL{Me(}ca0h6!f4Y;EM**9oZLZ92tHp0Z7t}=?T_W=Q=z>(Y<{&p z$Pittul6F8YM|e)X~5ZoY)=D&w8dx9&QWiC3?^M6_7VeeJB@FaLa1f32jV54S#W+d ze>I9^WsuUA5_*Pm%3@#OD?AH~zZSutK;crj1wx_Rmh9H|lRJ;i6SR{O#0xZYN>~1^ zG2?49wF7n&dzq`a%t9ZqcglEy0-1RaQB<;T0UNXS3@T`5x8uG5{@r(85W6|f3)s@H z?VZbHZ6o%9?61h44nF(Xew~DI_HS*-e;=Dm_Fo)0wG}>+Vb_kA5&*L;dyRyC>kp6~ zD8oItfm=Q36pxo?D(-=*pe6AMPuz#+6;j_3B{MJXIpAh$(*=$iv3|3P=y;ZSW^oK3 z)7#M*T-Wc~r|B&o%Fwi8U>!k{Imqt?m`iKH6zrwSfvAJ#0~1y0F(g6K54hOsf2>A; zh(vc8YL2pSP>697;*AfR*T?-h<`$lH^hE;queAknw0{^cXs4fcAoFYje|kI4v)}#J zNBiTOqQ|l0)fyR|uizSnUrVWw*IO~ja@7wlMbf~(Ku6qcD?5^l^54us+{X}O_H12} z-j?e2P^AY0B`OBQMs?qM2SIq1f9(i@1mQ*W^&7;g8?k69D9%6$7E;qq!*ZQv3K%EQ zwxg|pX%1mGv18taIwgEE!X^EIpM!WS<`*^zAdD`iSlc&3r~TO1^u=Se{CQ^5*BOcz zGuRdM8+mTx!~7XH9UxmXJ7|e>MOr()Y-Af_M!HBUeptV7Y*{d*1RnXAdURdHu*^JnzbhESW6(Mkvv2L!2Aa~RSHTp3eHlSAK45FoK_`c$dN zu?btU(*2OodlOD@^dr{Cdqtp|h@ThJFsxppJNBb00~vsL<=4KAe`^8InLl%t#DDD% zA_@Q4mtWiYjbH!qUw`(_WUVFFlE*dYZ$21zy*H{SnNkNy5WmTQ1_c)c1KkED(!Ath zc9ENiNxgBKqroRBczst;45@bfnzwVw-AlQe|!hSf@1yek=&afdoX70ou+k@FrutiS#pK>|Yx&5ybTK zH(U4}M?bMK#LGdCM11?de1EjrcW?G%FS%>zN`b^!v9f?7e@hAGdS2w9c)~HZ(zhd)^pF?vBKbGqT?HwEA6d z$MFDvob>-+e?I2#bA8{Zh2WQ>)M&pocqKl`E4MPo(4f`vQE0KLdrU37gy}X>c>`Ms%!WIMD-MIpr2^sO_w5Ad( zUJ^j`?nyS3WgsJjHc{J`+`C(eo9b$xEU?}n?w4(3e_x6=0?6u(=A#v1CO{Wk3pt2j zS2$A3=EqI79=;OC?sceXA$IK`SJj3?+J${6a%Bao7!-*X)nK@lqxmeAUs}l2B#|_m zP99=}GK7X#i=!+Ds7q{JsarI9o?oKSja83bXnvbWA@m~JDN_e1NmalL%Qw4# zUhj?Qf210GgSdwDko}fYxfc9miGeuDT@gTd@v%IuupO)WW;4|@RbtOX#x)X&5IBS4 z5Ag%WIXv3Bzxjlt9Y;SCn@IL+;BUnG_G9aQ`|QWBMC{Z083W(({_XppeciwG+4x(T za#8y*j{Jr;y3UY#YbOg8*b>5x>NnuqM$#*@e+-%H4)B9Eyyq4Qx=0+_4cA~y3#35gRZ*pF8u%v>I^1hlh@yan;M25rnspuIs>NahXNw;y8~KmIu*{_ab@ zc7;)X+Kl+?zx!ko+a`Nr%;2xRHW3eR<>ZKYa}jvX$huS$-AG9SD~=b(Pb7hdAjcXT zbd@B)?-C`EgCq~uc$;&Z6T+;U4Uh8$f2)Fj9?P{h_`Ws^h9(5VyT#Rj^7g5(`jP>g zj6E=>2>9H6$X}zr(L{>E#r&O0BGaO%9kFr7kRTAn^GMVnMj>~o&>l1ssr~HgYw2bP zcLJB5B+v>*Gt|J$Iz}c*Xyn_dAry5KT5&vx8AyI32vaC}j{d3?SSKDi)p0ESf5c6` zV-2~=>^p`LU-|Lj|6z^3!XNvNLzMGr83P@Rbb|OdY|+;~|98gfp7U)3%BqnlXz!%p zPD+V6o@4zf+|tmF0%G&9Pk=UX;VZ13jE^MUF>Cdx4-XqYLzRtWW54V0y(9=Zw>JXo zEcMgaS9Yg=9kX`Ao#&IY+wO&|e-RDe%1gx2kui=%i2<6oeex)*v_9Pj{S6#^cRUtH zuk4chE(%qrdkP;<60!Goi0_9uL!=4Kpn#nvZgN$Nv-5v3es|yTJ6UlZOU5tO!~G|& zq;dWGe=tT>jKvc-?zhH$!HWau?stC%Tk-V~;Tz+e{%c2m`VF=Qd9)B}e@f6>`yU#! zO5!MGEl_&k?XdQAUTEHa$%^^MSxA`at`sL&~=K= zvXL7Qvj#z3O_DhYF>Y-u1m0Q8+K=fAN3A;Af}7l>PxC?S-#LgGWYeL2?6yndr-;ZG z{LYC_q@Bb(!CKKCoE{+>e-h80gyJSMUSvnRC-Gdr1<}7L_!!5e{m34R ztb@p%pgq5FMdWD7e&TmekF|8%>;BpQlR5CShK{y@tkvJx#;^ZDZ1%VY|HK|K_J87R zKtObmL`gS%mnkY=-5o)aCG6OpDg<{nxZUzhJw?^h`|ICk`dOGREM04^Cfyl!7=pM3lpYX?=5xrl)gT5h z!qgSvom5e4f3;G(8P>8=Je5t}y>!2=F@EyI&z8BzKXj~To1SAfD7ukAXRaow?7nCR ztE^Hpdvb=@T%&x=>p>F7Ik%B3x00|AbO3d7z|~JO?%3~NNF4Ekay+sRL>w_3^Y26W z~zt3sp_?_H5oIe}vXqBjl@AkE83xy#bO4DI|tX zbh$yk{zgfBd*HdyLVo6`E_mjgM;wTn_fYq`IDq-Q^YU;5 ziBA4R;Ftxxf=0O<_GmH5lWWAktGUm(&pmKR?_;0atI+Nz`&`{6@z09^Ly7wt%11G%uj=OBoGMr`(P ztQ}+achC2en?!u|8b<$aXIO9)1og$R|FPqIC5f{i`*x|LaQNwf2;tN|5tkt-PA>Z`g0oW(6O#5 zba+=It1JXvq~77pkUV9R${XlReNCh#E&@|hZ{6|GvV zr%s`rfSai|Jr7M*KHLV!1Wj>5{Ad4P_&5G&2Z?{)hQ$7l`+MN=Mx59;mz}834Be8Axk-vN2F}{VqB(SHy;XeqrB7=_uFX3xbkDFTRTEsZ{O=3y1e@pM6 zSf1_jmW5m@P!-{8>oS>FLd0dpmSQNo@R?VZ$mSLh&+s#8lg_7`AqI9PWes>ITsAoa z3UP`^rf-|h1c6QDoC`KtHOv2} z0F0;-LD&JYj7D<`Y~~6XRPik&)Usm3 zYZ5Q%v~+1s#sEk^iN$aj=N`8KR}W$*8ESLeGWxi1+Z8z4$P>?S?!O-^e}~Nl>PDjO zJ|_7(yjG<~WBu#9%suY?T$bQpTD}R0ud|u`J`~@GsZN zz#J6-`jMV?1(d@n+-)P3e-=OtVJ6SDmn8GA{DXY1mw8&(ZycWWu*d6>8xLX$x}})} z+ClH;U`-Q>TSok2M8{-9$fw9eE3nVY^YwTTh6Jeb#z9{1qkHrxL>vZtPB@4JPgzo! z$6Hbr4SyFnGTg>DkM7BRi1-jNH#mNrA0M`GWVjYw?I8NWK1xh7f0BwQSOWto=2v16o3O;^TlH$)p%lsc}GIr_&xzm zAe26EL8_Nh&o5C>wYpf-6RTyO`n`HIBqn;V=Bbg2HC} z_z2r^KH!?RkJul4e}mtg6Bl0IMnMa*Q9Z26(!>rzjeMe>2Aa2{3aB$nw6W!h=PcGh zW__$7Ck+$;e~B*fz&uGj@KDwI8bbMj=(hS$BM+=`(VNP-Lr2!7BUzWQ-rqGIxGoac zM*aqK>x=ivLyVtatj|xpmy9`_BOwO!C+FRto*%L2(eBWNJc~SR%jJPTkh;pL(;NS-do#OpdY#+z+Ex z*$sJPbP6hE9mJ!6O`yrTihe<(fA^Uro;=2&#~k12oBqTZ$#d{v;2w}fv~E5y9$d9| z`!?cxgh#+}irY)M7Sl$6Y6sGRby})?<_AhBaG9EEbY2+rtrdVhtraf1MM_|uI zAGt}O!U{fI_;yLZxxC>ImvCt@DM-+1)9z>iYpd%8vH{#S8`ToK83;ae#IK#GUgH6^ zzk=zPfB4Y>?-Km2g?s{Hf};;j#*;kWAdEXx~|fMy-aD*j-&aSuN^f6nhc{jTGxj=1)pT=6HRNq*yJ{JB{D zFksf=c6b8U3N2bUJZ_~l5#{GqgIEG;J6uoDz#GVcZM*S#4<0(5@8Fl#u*Zz*aSdNA zHCYDUyWdSxI@BWG>D_7M+CE}MQ=n{k?qFiGI%0+~PXdQsatAPz06#qXD}74l$=3df zf204bqv~jTe&#jSi!lZ^;=2E=(dP<*x-;)Kz;sJ%KG^wo*D4~PPkCNqH01RT{>|X~ zcDa9Kb6M#Ro?z{B)M8KRsTs4vtC~qd0Gbn9?xu%)Ve9#^g4)U72G@-mycBh&>`at6 zaKiu}`<+J~>M$}gkACiBY|mf6jo@U-JSXjD5qm$E@42|mxfu#)U}PIf&Jr0he~`^S zPw&7P$s6RD9ItJ|s`Cwx2@jWDc(2h8Ub=qzNH#JNa5-NGaN9j8O) z+|L^MGfpvoTsNl$k!O5JsB#wKPl%ObO`{h{!tb6u$|ya}+tY&!)zSgOf0WoC5DO>5 z+EtOaAMyXdw~zK`#Keqqo7it}TrakN*1LgGfGbgdV;MgfJmSND<7~h8HT?AbK8NI5 z?O8I`9}sD)yk(uU>?0tPkGLy*looN}5DLkBjIoFu#m#Ztt`0wv%Ja=bRG?zL%}e-` zSMXZCgt8t^KIb6*>+PYLe-HzY!mUTpC9*^ex5>$rt>@x_w3=>&6e3 zo?xT@va9315CxfkYOGS_j?zuRPxiuxLHM81Kz9%x z$ixkIlRK?k?2R_=V=nNn0Zg$9yloY773fHnZ;S5D%OI@dc^+~Pd!180ru&c%pIq1$?c;ReP~XMrC>UuO`^XBP!QXKx zjn$GV*N=+lf1F5xeI{v19S88B7_a>pejXxB@H>>9>jFQB zbr|*$f1i%_aXaR0$5@8-7hW0nuYG`JIDrTg={-^DP{%Dzz^wI#9DL4!=K^}PJnl~w zrQJlE;Cy~pLW4TjmRMeqrz7^58|!1ndWKB;scXV-MKa0Reb?9#myb3>uHz>bM*OYs zIy=s#5pRY$#O*^U<7c*kl)KGk7tveTXm_&Lf4_=;1L=6_BF{%BrKrL^Hp0Ormi_Y7CTe(wxgE$C!$szU#)Ds~P z*xZfG8-oYq3Bf_Z2SgiE|9rl;Qog299|PvS%3Q`OoKnFtC62 zvc%8G$sUrxPJY(zZ{5gM&y=Dp7pQx4f02)KeeUp-+$@v*+|?Byb`J7Z$VkKWlA4fX z5M9!4V7PHe%_MLa7$=qx7W70EF*=pBQVYNyhiY%Q?Z897Uy~O;;$a^#C+TFmvc;Qe z=QW4D@LKKYhveG`-&T?QfrIF0rRE`b=T+8p!xr66Is6x7-76KT3U~keBws*v*P{#m;_K;a(-7Ta`Rq18* zGC=Kt_%(8e8Kw7ZABj5+SSDZaf76B2jd(8&TMzi9>{Q>-**dUA$ww};i6yY0>q--sc;$c^e^EolZ!#5e zbs+UfzTeMsIUCnsncyY9d1hi`!@qKDlz)6KG;>nye#<%FPu2mv?T*MzqjQD2^jeE`Gz_~hb zinb7@DrjxNp4g+F+g4+(mg%I{vqh+3YdhAXatf$fEK<)r;z&@0^Aq?@F=H7Eo9<)r$sd8YzR%jwSQ@e~pLS5&RwfNEGD% zW!HZ9%HKMbuWdMS@E`v8^MA!h_&Z`8(&`%z@jbTE2ibvNk62Js=RUX(du#7z@G(n` ze-4omT)1aROHjf4 zJLOOM1Th;YiPF7?g*Qwz-VnZLfbZ%jtRtcp$2u92-WIaH?#r^q#O_&=18`H zoXTI=Nnjv)U=GN=c~Xg3`EUPWE!O5-WUGk39(0MHigAj{e`~GZa%-gGTLc?YK-Vnc z0`E)f*}WZCC8R#UI&zAOOj>Al^Rh1uSm8$daH2Yf#Aj|K4uVkwa(!U6(QhPYMCK9x zBfn2@G^CwIKLEZL*_VI#HC)H6i{OU~*yEy(G!eEQZ!3c}F-tDuX$!UYi3IL%eoL(k zUr25tM0&a0f3r84#HgP~vnez$%R3OuNC8i`w#G0e>iEWW*2okVLLa2A4^{A-{F^@^ z{q=*jMO>C(PXDU$`;NWvi;eiZpPDIeC~v=aRdp4*8>BGUiM)!iua~@?NMrkyXiC2I z4q1KxWJWQ?*p#c|nG3kN8Avq*>i{ZYYOrBgo-dune*shnNBWZImKV}}m2uS(EHi7T zBz5Q9j_hwYo$)4dFSKj)GyZ?}3cvgGpBN%y^WSv__@cm4?_vjkURSu}mhvU*nW*iO zXK5_SmArGqy*RixInX=FmCwsSlC}PrMUY3+RyovB7oeZKS-Z$;;4dvs!r0Hp(a+;_ z;D2E=e}r+Y{rx@^^1B*|502P!(yu@B?kdfZzQeE7KE`Pz#gAA_u`hyniO(n21@l;607QX@Wk zhhL{av;&Ff<`m>rBFo&)$dK*v6X$?vIp^KVe>e*z?&vDVYpUkG>u6v&-+-vy=B+@2 z;OoEl=cE^uR|b%l5!O~z*b7)yNI3t8kd=I-k(>Y)@7Ek40h*CfRUFX*L(30eAc@d zstlE~<9`mo3^3|F%S=JR+Ll#=u#*h8dl+z&O&XFc;wG`(ZdQrsbc5R33m|l^l=E*1 zrHli9(2vbro3tkP&g|iBmDF@n$OF|Cr zJL-Hp0SoGj^AZjt5cMhy4<2iqgDi>OsWVQf_;rTkc)uCnC*-|B-kalln!K;cdxOJ! zuz%>Fm|;K>D@FjWCnC;%0wBq3Mjh*1C7lgdWv)wlu;Re zLV74eJu29(!BWI4XQZgmehMcgoFWNShf?8;#aGoEs0(mCJo>3Zd*ON<%fBe9LYfl4 zuaVFI>9Y^lq5U{=ydl5$+31#13w;AX2_fO_8UW`x&4uiWu)EpL_4RTr+3@tbeJp)2;%`Z9= zILFZGbUPn8Dn;7jXj*MXVaew?H?}AUTf5;G507VG)*c6A9%r@guqQ_;*$wHD6 zI6rbF)ZLHANopQLd_2AZEhPRJ3@Qr5baSMFsr84Ny(hRM<{1Cp$A9&QVkJ{RbVrU7 zFRC(4dSP<|$x>LWaC%FC#5kaeX$LH1R@70_a$21ns&Rt|)P9-dEwsuxE`P>%UL|&LZN{K3TuX7jfO`OyKXP3_$+-n) z5ghkjbOFv*8o!5okxY{ljeiwN{-)U90sq?aJJB6}&#ev`k^?Q4t`?%P@k0au-4v)5 z8@;lVWAd8=@>|f?Cf`+<8`xB^LzA<-mH3@92;BzPL7Ol!+7lMncIl|}V#sZy8Vuiu zVBYt7xZiI4p7{M9m@?o{pq-mrj$;NExQu*`Y!$hl0VE}mF*|aN&VRmeheWP-vG?k@ z0?M+IUA7#^e$k)kJA4<{<$6(}U!7Wy-#_^?(4})c&g;cwh46-=?;xw1qwjcQTJO-- z9DcJV_l@>OJ+Uo-B%}kUO6K`O#|z;5!dT&H+r{sw8qoEEU<=A1jn_y7oLh9?Hsw$^ z55iP_rVDr$jhu^Zb$_C)Bwh?{IT`;&o5ws}gNLb*GSIza^=-1^f%{pZ2*;1{n>)}X z@~$$I{W!|X*q3)`Z!Vk`7+>u{1JW(_|MVTk7nIAm8vRR#C@+F09G_+QtSa3C$X!fT z_kupW=ohfupe=yf=h8dIg>E8xK%P^q?DB%7aesa%&A!KFVdyzt7o(`?rGFfDg#$AN7=?@}o>#)N7ZGxt zte}SmR$;BiF1U>$K zj`8of^ocybit7mfI(Eo!Ey%pXA`Rp>=~Ikj%;9kyCMHQXStaRP^dsjy5HEq3JdR&J zY&mkgihn}%r?1gk=AqxnoDxaD#=M{@!W?E4O^e(OW{q1JW4p?vU(psVu=>VIFcIS{ zNT0|BX|M2s{@kK}dDQsecQ8*7?=#R3ijU*{cpL@zlJq^^;&?b7SJB?scaq~r99O@` z863~P$A_U8yz%%0w*RmG$GVb8&oIVkCf;w+kAE7Ow*nFZuyvC1GcUpH;ZMCp8@LzV z_oI!$eVab)iwQLRg|z9W(=R;63+%XnVDVNvXs{3X%?w@8iNg!4xGrNnVO(K+NwAan zx8>+@-b3;M{UGO05cgxcgS}A~fMDyfL4CAZi@IM(wI8?~&&O#3o=?#5e5kF)^J#cK ztbf?#`6?V2zsExycgN#8+L!mo<5wJ4zQ;-XrsHt~?TZ}*Iey1+{d=5gHF3amad^#Q zokX28fUM)%6!RFO19*f~V|$F*FO(hP06 zac3YL<{;$+TYPFzYO<^^$=tAKdlqEA4C9gQ!|S}-z-KO%6|hC zC;*R%`Az07Y?6+(9_wgI>~%tXDYA~@*Z?d#k9{!d*Ckm`Lb4gPU(OCPMDchT6%*`dSY2R!o;MoTJOWFk-X_y25a6iNrIRfS?5Pudj{3MXM z;d?apS-T{&Nm|I%2yufk*qF#(Ts3!kB{H#8_?lw8WYs>hPEK@Y`3o zN7zaRV->z8p7Za=a6K9Cv1a|Ao1ksN+9&6r?so+8DR!xmN@Oy&;{SS{o9JWM9s{;v zw6o6ekN1tZ_3NkaGqfeLJb!58f76D4jX@s!3fTAx=M|CC@<$QGRfiDDpqkk4%!kw= zM*;iC^Z$CDb_nHvIgjii{>$r;bppJwXdB!!Z6{&=bm1>%PkA-}yq?=D+(qWUOIb>xhl_e)|49|B|16KYya1I{2bL;{^Vq zJ79dlrl1d)zkWZ#TG0@HTvPkcDE zIra}^EMhN#vG*yy$A8Ja4@;nz$af(}#c>g&jlX^?@w1U(hH9d^2bA07PsKJG6l2KfEBowz!A78^SBXI*BeSbUaVqqBb6BsMc7F)@- zBQRHsoCp65=y3S2<9P!)4;?d}hvy==)Nnixej3h$k_b6(3w;{kntslMwjbeDCO#a1 zN2I;JuhCFgUcay5`v3#{&p&?WbPwXT z-}7hV_5JoQ+o6tQdyVgaJ$lxi#&J#Ke2JV-d;y$~>wlxxwpH=WuJqTrfhre!R%weBw(&=7d}unH!h%6Ud>)^H;n;&drEVAN-yx2;;T?oQpnn zr9bE9<2|!~YezvIuRZ*gPhNzx5PQ&;C~7S=9EeB z8VGW}smYUjAldh3Z`6SLfSQB=GQaONlrjPY@aMh7 z`G1)RX#4MbfFLsPP5*h1Y(20&f4xV%{2z|f!af5G@6US%L1^br_>m5TQd+IJG~;ia zlA?~8N1Pr{hgbpP^CX^1n2X6b7X$}{JOC)Df&L|dNVL_?1*Ix9!FL@Gyesac!}rL0 z7w;7d|AN#QpZh=W!53-#Kl)uTJ^lT6L4QJv-)+hHIj1l<5Ak(eH*rBOpoHAPx{G?o`V^`*8vap|E>SWe)TxC* zFGHxmYeEi=&oPzBd*pk@>x1VIG{nb_pOgDRzoBv;AUBS`lm0-80OJKJp&70#GH`_d ze;7Ca=Dy$oq8%mDj`-d`pO5?g8Gk1Zc)ZDT3K_nKc1M4pSQqnwj5D;)xBtd|tj2cB zzWoT`Eba^K0s%R4UECKuOyrA5yCO-k?JzDeE^z!o+6UuxAioRhH-nIc<9Tl6`uN^o zyAJ|6o4 z<9h5zjr==47y1kBK-%Mb-gsZg*8nB^{T|05|J8iQ3(iAkeMxXuW=HH7lnBUpA}L{z zr;+>l-Y1fQq@Qr#NU6TbbAO}XEOMXYe8c&$2fz)WJ&4`IdANU!Ph6jj^Y6UC^ZzwY z@OuuK_h`pI`7eqA&+5MyApQOk(>>;sMwM6MfRVa84Aqtlbu6L`q0oe9jTmG`y1*qv z112UVU)_ye*pV(Hc|MZQ(n!%A+KuF4(I}WV$ctiUhUaz3{A9`ehJRmyye`4dBDIT@ z>zvfrZ?Jb5^JP#_CG!$LO8iE+Dx|$rl5~gcQp5Qj+6iHwAR{Sv;xfXnfs-HOIwJNg z>OcZ5SI8%Vj|4<9Taphn6Ykl869lz@s6A3pPl)Z5q=OqoJ;n=2l!M8KJ5CRmS_Wze zGc`P+;;yQpZ#^5V6#a3BIVa{&R@zf&&TWZ9B!xo z>3{ye{&%7Z^S_+;>FNLcd37o3;lCOC;I#cetbK9)U&oKz=TM*i+m(F3{(b_g$t-{V zw-rtQuk9oKZ~v$=L!@VBzdAEVdaYm_eDj8iiLDf=Rfj=4z<-HQU;+PazV&aJ*9*L= zvI7;&B^7z_+&89j($r<(N;9r-#X{~(WO}f3<>W$1ptw_6S)d)Zd+yrH(-n5cE7mx$ zJ7^kp&9q-b^J9}6R&&{TQGJ1S6-()-D(Sj8xPWKnvSh1)j=l0GcK~}P>=Hy=LnW4F zDCAvoWbgCOntxOE+PpazAEIf7q0hF>&YCFE6KBNqO(8YS(A*R40yO8MZd+qwmvcJ5j*bmoA z0UB3XIx1V~@Tzx#w!YNHb2oz|i0a&vWFDp~CvL5U^M7Kray#=;M^8Q1D{4k9ab?zr zNPO|k)phKmA>t!TYk2u3E{E!CUfV*FPY;I#hvr^r5S~+BqKtVhuLWQNp8Z! zD*Mz=eSh$(&e4MmQ%m_UWeD2QFg+MF8ruu8sYPVKfPI-baY-ECyaMp zP_s+@436fkUZe1!a-4)gDR=1e_xgI6PB*8?sh-JfPbEjC(@aI^{R?UwZ5^2~d;K=I z6aV6d{^ISg+ab#DtSeM%C__$`x=qiEXu1v5ntwf;Or~efmOtMoFA){r6_nI3kw5=* zD;HF;WuACzWvxlK=zW&oVZ7Tdm&^984lZ>P*Uv27Qjo2a-`5h+b$_3)ckM0wR5w;a z0nRksEcNqK=)4&NIeO*rv@bT5tnp>PYKiWSd z&3{Pwwf~Z>$6A;rkXWRu(=KJ0D4)~w$$H8T=KFRiU+7*a_JH9;(w|IAa1gKQ5~)9B z!AiyK49ThM362Q0PiHhcOAL@53r0SBRN0Ykuln46dsnl(av!g|+|a32Zh{~Gm=&x0 z&{avaRbu%bSWyS5PwF-N0py~AQVh#9 z2@wHiTgHExPlq?WE5PAO=vPd?_iMY>w$E_6cuk<&e2^d8t@nzjRPdw@tGZpi7MnvL zukJ?OgXx#OrOSHJc)N_=?YToAI$E3uMzZrN>Uq2$eQ*|@Wf1)&gyVg|eyZ(iS$~2C zd7W90lU6v#Y}%s0^JVl885QAa(3Ii!a6$z&cX~~yGvFX-YJL1D<$QY>(x#ghn(L>n zYA)^jd3IIX=W6AjBXusXwhtwVc}=8|YYBN(qCPqh9nLCOIQvk$p9zg}tia#OONSE6 zsd}V(eckW!7-Rs>tX$|*Lq7!7E`Q{e85?qj@SbC-Qcer-a0imoO{-(-8k6UCzLqwr zhlpFkOx9xgXfLAUQ`GD9`OIhf+EWhgp}rl4Se>MfL8xrNFK2xs$lKPvkJQ#)G){r= z`P6J(?{D?ySf4lb)u}HaL0c~zbuiDyntfguF$LOzma6PE;2)6eigZ_2U4QxZM)(F1 zpFA@1uyOBI9xd~*&Os&?UEKS%scznVmIK5XT1J%I?4p<%@8l*Hk5f49aa9nl?%hYd zZ-V>t7#)_6Q*@p+>wC84OURsx?{2;j!^*JF!{z|Z8QonZ7Gu0tg}5qSkQ`+B1EV^F zU{TBII@hcrmiAE+7~V%>FMku}>~+k!T1q1&%Fmz-V`gm_BoHC0RC#zb zeILG06oXwt#Kz{6s>+P_GaPTvVuB-QEI<28HZg3YAXNsqjGB*s-1OC=WW~g^+7xyu2M6B z_d@5g{h~5>&ugHf66(mx3gpQ1`kKb)hP4mVerf0ZX-iAfaTf<5#vTNDP zsKS{~or}4c`*qGhS1_UHv)j#%juK@y!rfE)Aei@kd;?%&Ut0P5#A@cNix}p8fOI4M zq*-0N((G-LiraQpaKS7u`7LnOiNx=NWe{ziubiK+>5~cNw|_}?>D#(+FEDpjvwKpP zHzsranqKG2c6)SCTEAi5pA9oX1acm>%hywB=e7RHJ*`|{lH|%BKeP^QLFw+5kbzvw z&)P)<{K+p2FO}rZ;ra9BnN1cnRWXV;xh8ZP({Ci*PN!*0-Ku;ewQCC)^C6NSd(8mY zv%Pz3@fu;q>VK{>X;-VXW={F>u$85&r z+GaL;1*O8iKf4Qh53F>>%gmxK7D3B$FZQ3stuRBn@PGPyr=@uaIEZy9+=97JZbRJg ziG5m|$APYq626>{?0NgnjrpT}*Roi2D=kQvbk(lGwq_ji6D!TukL$Hzgv+C4FOl6Y z4YABDda-&<%V@T{WWsc=Zu4HdGi!xgr*)eq^NUx|_pNlk+%}i+K6$p=lfzDBu}fyt z+0g8}`+o_#;_7ih#V2Vw@8_TE6RTiio?mu0HzmKZy0?4rK@=820`-+aM;9B7d9qKHC07yuhypr+I)xkw~O1sc@SB|`(}z27O@lCp}X;iL0r z_J0wD;K-?)=y6QI@4zhPB}_W&i-DR^01ei~V{P!fewex45(1u&&B(_X!)be9b4@Um@Y4(cLMZ1uLM;xS-M1F_W0{(m@y ziz_boo6w7|1?uvwu$8vhyyfS0jD6QrL^0HRI25nL(O)lXZ~96Bp@p^v^y8u2FW1iX z^Pzpk0~r^{=o|C znViqW6UsgwTm7+4KdX80XxjZ=qTwco;sq93kC+Q>uK`G+K*{=u+e^1_ngzF%a@2-kRy0lkWL0Z+1*)11z-r-oyNatP`=F8p zRiRSQJO@?99)4%lkU;zx#ebnqt@&a4xiN^p0bhfH;3GFXalHE4oC=9&Sq98*@PSrm z-lORaq`E&cYN8W)0qu;9D06x+1rO4-4%35~g3<|vai_a)b5~cqop#I6Ec#=7xc6)R zE}XJWbEJV)-oYr-$Fr(VKWly?OtQFfSh1%TWw-y#9;^LkX1`F4zkjvd)oR$45EHn^ z{rujY@26#hgk9P8(C>is%_@I!Vh_#2@>&UN?DcQZ_moV?REnReyvyeK1B^Ak@HDL5 zJq>6FTwIYZCXc|ouM`NnpJZjZ%^nsrDWoJvIr^pqR@!_=g8`!HZa;_-jZ(iETFQLC*=SF`P;{4+XU|4X?ltv zt`?`CHe4}}?P2$RBN*srlWS~F9zhd7&oRpHtlMK}i(NfwR=WjKeAvn`LAzKia%)G+3$3AY zY}_X8zGJ&Ge@;{Cu)pjh)KuxKx49fZg~^;J{(BAGS4VwM5^Zuyw(C!II`5@_dVR+u` zA__LE+S#fe*ymT2PUHHpn$@A2((i4Mf9&XTT0OK4JAcfhu3bPta;KO}{VE5{DP7Rvg=vQS9VleNHt;j|~ILdlEnE*UpRUVI6(`M`ZStLUjopL_6d5Tec_=O9I z*f$DdAe-$`Q4Yd-_1s=o%h{~n8Oz=KT{_k#eShkAhcOaY3bB3nA{`>+y9d-$N<9&GyBXsgk2m9hlXQYkcsbymBxXEH?h@xmNGH zcz*&u{W-K??$%>cPtN>MFp8YX$IJ7W#9P%#>H4O1D81iQ!kNpi=QmgSIuyF+6_hXe zSHZR0LWAO6)-ll;SY%(aoEqci)0#$7zJx*wT67^EovY2zW z$XY1IM@e7kr}xTSi91!&571&)u18P)zxg_kCD*kryZ(wot*Sspz&k0bL4qJG;eUM( zmLR+*z^~WJqn0|lQE}_NYx3~rNrHQ?HRl+-GSTnH3(wrm9w<715Q!ax5O@fH*z6Ps z>_!ib3OHJ#!E$pUPx!1+vbl;dB@MiRJte9862gys%5ez>k$JEI#xG*Px9M{Hc`@~- zAaW4bBnfKz*_A_Eo%FeE1a1o{L4SagYauAK;eVl;B`U8fD(!?~pAKJU=u|3a3y-vq zklaHrBeiAv4n#JdxvCJBA6#qj0#CgO$cfCYQR)q|d1x^9>`U<;8{xa9iJtYkIBF^q zDHLy2AY*-4^+_A|EUs%8>c$}A2ahFV?O)HE`VWi`z1IQ~l;fxRcz>7bXMY*RTIl_Z z+`&=X+ zjrU6Yk%$>ygiBQ3vmsf==YN*hMlWB%x4NC=ZNp-$IxbAWx~5VS4W6kp3vKfsyr0!Q z=2$(##hh3&Gja~`0kua~Xz9D_L5xlW4}I9@#vR@eL{lU*9Eg#JNu0I^YXutQd+J<> zNz-bn`FccybtJ-P^NbZcQOjU|&rdy%ojkxYe9MaSG56S=LzY+|m4E!>*S2`5jlR6c zmjx;nk+}r}1hg#j&7cunK9mg2$6sUs8-f31KA`C7v*lTR31U4St#x^K{3A)=8Wz98 zzyT^(>hZi!b_}!;euqGp)*67f@zx6zdrLMFdY5*6TsJS%Xun636_R1ER<)~-C^l3) zuy5i$IPEIU%Q&mNA%8Cz$~H`)kEuN6(nP8f8yc}eSs{k2=%YOva)vRn{JkEH4-kxm z>6Q~#{~0yuYj5~3c_&SGXg0W)`YZuR1T+`Gp8VY+;TbW6R9=CY!FRa%QW<9w1B87I zSt#05hR<;q%z=yhYV#uC$_+NbN>RK&T8 zcSwkP(DR3@S_9GPeF~Y|vCSzQpEL)rUP1LXxJd_45-sm7#&O2BSZzvTQ7PP3)fTD0fxH{JWf>gkLWhVsW*3&r$8oE*o&6hUxpBS9W2S0k$zV_h_vr>n4-Y5^e@3> z0LkUg7n&ziM7F}^^OF#*@p0=5FN62AP(5|1hdz253V%0%ML!Wzx(1pfdpnWuVWVs5 zEmRjx_HK;*2?n0l;5{8M;Oo)cDTn{{pL8a$2qrE1JD~;6)>2i2XY}>#eZz9IJO@0b@YI^qml5cc8Sg7z#Kc7~R3a>#>>&mBJ^Gh50IyQv0CVws6o`;JqBM#5 z$2~s9PaDquaikUTM8BZwZR7Su(PP7Y=2A84g6acc$rxPL4BvQlTP7%x=b_~x$KdOo z9yXZZ1X-P(>#HzNSj0}Sq4ehBTfrO?L(dhgYkzU2q9QZTPdmz`;1*k@-AC_)eF$OkRrdoH+1s$H7=&`!%io>W?+#%W&Qs5 zF@O5q%D2wvCm*!&{b1AA_EN+aC)bej>*C7I6Id%VytR zyYpVY0keWrrZ2+?0`)y{nr+N7?@P7s?|&UT1+o_CFYVD!2Yq&-%B0T>DV$OCu@DQ8 zCNvHU8!QF+66v{T{3xCiAe?)92fS7I&45z$t9RI5jLLSt?I+B)v$J0OgLru6QQ5ca zfkWW&qbLf_K6UtXeSCh>XJ_y@ODlkJ;EJkH&f|is;6lL>&Cy=-ua*uD2&9ja%zv3i zS_Kb1cp5v!l3!IcQA-}Scm6YU=_%#9l<)b`|>ddQ*Pvec&_u- zB5Mq(pC4_{-56u{`=?2JOf+!qj8Ph#?fP~8&U++5VT)6F=X+C~;`Ort34fQ@_Timc4^~><@8pmjoO@vTDhy1Dvkps6 zuXHaZ)>x!wMrP9bZ0fDEq<>vh_7r1y9t$Iv{br057wS$?gYVATQkLfJ8n#l-jSy{+5ZH1vzVDYl5~v6)hmmQEq=jRNmnE1B z-wh}adH96t$&+Zng=^y*oSE@phB^7Y=bty>xiURz##GNWQAu5J< zY%ja?*Ib*Qfi6l1Lx071fdk;%H--XyD7n+O;RqH(}l) zv`ERChR(%@2@@EU!e08;P?q}}wEIE;kf`{YM?-f4EL0q$ja)_eJm&Hjom3cd5}fuP zxYLe%5cH}u370Dwb=n+&fv|gRYU49V%IC{+sv58ucFfjy!+(a)QZZqkyZx?aWwh~f zCUuy@DSZc|&0dQ*|BM#)&!XBf6p=Bn+3R*G$?D?g&7xR1L*3J30q2J=JjtvV#aqj} z(I=Se=bx9^7LakC0vRys+6(+n!BS}~WkAr=v7YA3vQwUsJP29?wC7In0u17(sDd+T z?busTdB^PP+J8mSt1Yp(MYR>%n@j4W$@LbVQ-_bPlK2}q&7UpPW}xVR2CulCo7jZ( zYciHH)-yQS`A;+Q@q0pgpAtrHt0PX2pR%yx#?LDYTC%sQN1Fh_0S%xM{%-ppN({(0 zM&34{d$lpz;Oz(@l!Xv;sr`gkcce0de$8riwn^W!_kT_p5ex54l67HyZpk-eCW*D% z7X&3D_9!X-AvN?HYUASbd@eTiY=snAj(7c_bN+#e89~Cg>9g>42tymY=e!At*@zScmGbe|N*RQPAbMq-t#DzBxQ)0W|2fj}@_lTLR zEYI;GJAbnSG>OHwSayKpm#umB%{=y#PGOQtb_9ujq}O#X70mJ7<)UqD$9LY!>*wN( zQE?ZGXM|5m=o;oBssNf;EtZ)bg;IbosU)22&TuFx<`|{q6XMch0EA#{Ft8X)oc0C(k2MIJb zG3f^GkzxY22;%(^aFHnQuG$qA3`&zwCcFTi`pt~)AJ;2ae0Xnn4GtbcdfYri$(|1| zQ-61)5;)1KlL&x)o(;+G6+;>P* zG7K?%$ia89@WA@rowB3g0|%ps?@?~^V}F;IVOsjR^JaY(liS$ss*kZfr7G#NPitqtqsO%zxeZ zrFiZ)@=_pMbuWM1n3dsU`a@%p#d!_MV7$ta0Mh1fXwXrVT|6lfl>P#v@A{J0GfwdV z-|b|UX^|h&|Oc*_Ux^shSArcU;a+I;=*qY56jZvLzQGE*6m z4@&6O`ZKmQCZOPFp!*h&?Nv6?3w6b-6dtSw!j7_*o%wgAK)sS&k$iJ7PJg`E0${eL zK1;xkeGSwaH?Gr8YpKC^>`6}D2$Hfs7TAR&D;U&5!hezJ%9f~(ZY|f1Dj^^ZfNeII*=_I0uDBQJsst34L+c4L_n;Osq@izkfv(M}*)GUqax z_BMxBPleFaVAa4Nub6>iLlX7I)&u>gn-5ZvU@Z`yjxfIUG&~NuCUh)p2?#*_%K|6%>o9QuX){6nCcahQ1h;_ z#NJQpgjEk=e4y2daZSP}v+J&ON>oB~IWFW>AxeQ7T4J4yw}3*YMw+V>$@v)hlyDO1 zfE@7bTYKo>vVFJGF@N?q9M~5hhz;GT!BMiY1FY7iW!F?HXz$)+)qohlv{8)>3JjbjuoHq0EZ>CCbqws z{f9WqS|erCgKnMm#T_+`^;j@jUGm!8K2V3ISD=&aN_HIH%-ijwwYJeJm6dz z|GQK#P1v8qhAYXCW-Oj^%OS4Ehu|SQKbKtya@aXCf6^-)ArIkwG-V7$q_L2P1n-h} zDY?68VPVPLynp4#wJ!-7$AC^W%g&!zQ)EZ=OG3Cs9#5XR?;AK2lnK010Sr>o8#Ce$ zwVvPg-^!!OI0-h>sk|a^^vom`?wj}+aX(l_$Q1r4XNq3fusIdM&60&=suFcNm zm^-fW?tiN8+QRzv{U#rNKK4TbH`t(5JUlHE2oo%T`PIGWFZeQ~D_{o#XL|%+bsF8I z5{BvsPDBc#XXouQkOIr03CR%DYsTpXDPPRXm~UOWKOc7sP1&$mK2G{}`rwAi6#?^N zM4T<|Bx~L+pMCbqXnX&;)5=sS@Siy)f+vM1B7ZsF0+=CK@U@zS{(grA0dQX)J%<)W zusvuvM(&GF`FTN}0Lg0A#JvO|Ws6)1&b63+3qWDD9%R#Tx)2PaEED%l4QYpU$P6Bj zao&rdC`==qe%%`{bM@#*xc4m^DasSld;@cl-{>u^P1ZQGhqXP(BB+o_>M95bamMcn zcYj$OTJr~mc?7J8ztjifmrpTj>y)oL1;ie?V57jhK4~L`1j!R(caHrO*qP0qx!c;R!ga~$3g85O zD3bBGgMM;;?i^QNx~8OPW5Bjmdm?h9QV$~hpxDQV$b}MDE#i~1Hz2OSJ>|XOihpQL z!KmX~+VH!Oy-%Kc-yfIL=}75Tg@@14wIEm&$1bvm=e9iXK{KP=ODd6cmiirq8neRt2Us}51(w^G{Pl4raaY?M} zc^k;tA*`ecxfihK`@6GaKM?Q&uYYZNV6fd~NVas7PuBcR+h|BiK?Wjj!znAV5ip+8 zgEjLi4;5>#^Dh0Gbc|3ykt;Kq*47Z6i%J8<0zlqX6shGYzU~m-)Tm1(CtW}PuxV2x z4T2mII9jbR{m(c{Kb%CqWWxeGs`qTe;x!>33};YrzJ&u1x`92PRLFYVgn!ZBCP<6( z*Zh?(yX`(g2;;Kqyus(Oi_!z6{HwT~vzyWP1`y}qp8x(zX;r_C zUhSuKukFH9tUMC7Iw|lCvk-6Zu9{cC1xwgXsjMT9i6g3A=hmev$OYp5rymMA-9Ms? z$tkMd&_NNqm|;nb$}KvLyi-+phJMtK+*Xzaj%m2BxR7fG^q&y^dVjb_tT^m7Fp~rm z)h|ux#h{e+`8>aMD^Urtw@4yFTzmeB600HAtdbs4Em<78wIppQlT>w9y$)guP$2&* zpZ3QM8S+70qJiYLUNP2sd7l-;^D>`3zI`^t$P}Wlh@#Tx=J>}qYlr!{DZ57+$xNas ztdaus`T|jNA-}rUI)6W$Nch!5v7Lm--wOhrqTp1xur;3W&~faUdOz&KWtS6Gk9_OI z9C;Q)MYM;E1h@z&P~9lM47l|1>fsRosbJm%`H{|ngD98aykD3Kv~*$9@`6>pPH(!a zNQ)fpe6i*%^?H$83-*QtHFknF4A za5wxWlI9M$vq@#lvpkw#mfG-K`;K84^&5k?=?!m0hPFb*L3ZM#tHHvbJxJ$M@&*ad zrL!gaTpHSoPw)rj4+u?45Kw51-(YX`cbV&-m538$vQS}7j;pB$7~?v#CmrknN1I{L zz*3&8L;Dc}PJge$AlYB92n7he_FV|iIU(=WX)Wwpk0&J7GhxozqMP4x=dC4NDbU4i z*=q<6lD=CDWUSV??v2kmfW{7T#_Jz(I#*G}f7@_|WS*6vDl^|x?-Xl4D;<6JGL5Et z2KN~L>qhs9CxqAPh+9SFV~vV13`!R@vnTPFuXO z1h8A^3n`=YBsLAhM-U=-j+rnvys`mQ`atIs z^XJJ}et(?0jHrqq#iT{+8cZa;&a~bXx9}`jD-E@{sB@t(VG9lb4-HkoGB2x*^>Odq zCcd^8j=X|*?!s8x$sHtk$fk{V!hHO4mYL8d!zq~A9vNisi=TaL()2=ET>piHz~LI& zJz>XiG?Mky^5A^~baQDmU-!~fxQx+K*`+Hov45VdB2%P4mlHmUbO=c&4Kpn#mfSj- z!xQBt0d`(b=3{cT8yCRAsp8fM`hw2g0l8i!CaB5A7E|k7_cD%IX?}UY=Rw!NhsZNn zpdxda{xU;O3I@Y?xFbqh67B2Pzd!p&B)qP0%VDWAxk@eX6@i_CljFcNSEg9jb-sxH z2!9$(qnh~9!o3G(sOFww9630%>!#N*OU-l9h)*?NR2c>$CZQeuk~zRv)N__t#CFI0 zN28dy^}c49!uFg~Q9Eg=JrK4AYhq}V(S&mw1z1?<2J7!<06Yo(kbV@;yqQsWJz4a% zA*KLJ;BVhiQ!D(ZO2ejpw)Fn8#0X zhgnjI*Hc|dmPfz75FcuvJ4p6OswUr~KN=JW!WbLzr`r}{H5xOI;>%Cb_^aJ>6@QHO z9;&PnvnRk9PAi&vmon_mR@Lwezgw`Xfds)L_%4m4IBcgR`(k&*I@sS7NYJ(GY={?p z?&D8)EE_39->F{a4TiUQvmb^IgQaS<2`P^4hY9)Xe2$~lHX#EpDw01uu>dquUl5_b z-(sJ=ap{N4yPBp0Y``=nJ8T;sG=F}M;)0j;TlIy@pb=fY+{BySBlH^Z*O*IKWOvC|#z?DBMTTr&xQ6PlJg>_tH)yJ74Rs6n3L3%YS^t_z(GQ z9+R*0Xn9vB&-=bNU}B%a1%L1w5q-z18(o;2G|o@I249f7B!3_? zY;UQQM3sUX@8%_%I!@j2E0>mF-2udhzF(g=J%Q-J$v$D!NH9)hfqnjiDfO#O6I?`} zg?BEU9Z2C))$Nb$<=u_)W}fgs~WM_EyKT>}#ylsD3aBfpSYeu)TYm&pH7P#OcH> zMA!Q6v3LnonE(~!?r^fbv{twIq(^<&pPsI%irB!>3cT-sAfE;eA=_V|uZxKi{d+EP z5zC-~<)h`3{4gRfwZvZ0+=z=iquatHZj;aquZ zlM>X)Q-o3Q(SN2il&ye^wL;xUfE>k~`LI#idtpDPnb%bE05w3$zl@j{G-}td^1Z}_ zbVGbi)AMC!%YkhbM9(Mthx!0uw`<(;Vcxm9GEY9?IFtfEi(Z(ap`t;8YaRx(bl^Ot z!;wC(KssQH^zy}W2(qpN;&iW)=sTc+*jz{85}oAV$7O?paBjc1|cIuSVP6#NOUcD#Su#&&M-s4OnxKGXRK1b_u_x1 zQi|SsV~hX~vBE-8lYrsZI_DiEpJYdg&fxOcf;3o||FSQnvtGr>Oc3zw>B7Li&E*%{ zH|dBhf}zI~z0-f^p3h#C4_pP?YrxVLn8qRXoQQPOoSQJ|Tzl{>!=^+z8o*&EFAJq{rQ24_Ws3O$YkDMm?%) zsnod7e|}p(c+U9N(IjYUR})58?SnMN@q6$vqjg&ijvs%|Ae701n0EevSp)am=GWRY z=68c^OFJphM2_fRR}y@YQb{+mV83IQZmD%ONuCySgD|lfdwd!8Aj*Ru0{t|JWV3mw zzb+b*KrxY-F51*a`j^pIT#{k@IB=0`r>mr#tA`e?~0AMIwJJulww0w`qfn%UVgPLq1*_*N-%S#|LPz2)gRKvj9J&cL*~iGiZ)L{w<-_ z6-BktPKWup!Lx#^J6A5(y5<>`%9+)K2)53jMG>+Vydu{QSN4@(uoAy}q`D@1ux|eL z7^0zr!w9UQmgqDqSmohYXOg#3KA^1;{DR8@i(Y>d+^BD4D@~bjy-K7&#izR8x;*N; z#MUspc)gmL&_x1!fS&XB$bC9mLC3g}?j3$ym#~Z=7{C^gDQQ76KejFD>GoS#bWiQr z@8RzWT>#D8xcV+GAIulkkPCHO88H6~tikglcu;%c5c_}=RnMaK>Zg?!V-6F|X2+oJ zc3giglp_=ns4N^5JtRh?f(iQj0jnp}y}v_Vaor%Z_j#C(+L1jQ-=Vh+ua$4}^^M+xKmCvJv2-R3GRNq3|Z`fy|X95SI9 zAA~vo&bx-!2=n7ipO+X1-A=UGdQ=1b7?Xbv<@cC5n+H7Pau8;(R&)-%1Z&>>OdF}h z_xF!qL*;dwk_3lzqVl z6-II%)D$02E)cax5piD@Vf6_H3{k>24cgEjEjo-DI_h)t=!YU)6?RKBuD#dsP^^FF zL=X)CA@F;7g*yX1-E^AZa}3>>6=K-HWVGZk`?Cvesm0ll&d`V$T^@ZH&l{4opY-LA zdSCVv#1`Z+y*JFrw|*?B=sZ$DvLE*qb0`Z9RhR4I zlq?-5;wYy0ysvW?WcspRm7y6a+>(DvoX=1A-Vdu{r-d81X_6&vMB55g0=CGuOlix{ z3<1B2Old%TJ?X(*UNl+(jwQj1;619f{Q6!W_^9R$b=x2pIKMb++V5F#dmGuYrchb7 z9ipD*s8gRFNpj}6{+t<6uwc&_cpAHW?`$iA6$-~WCS5;4A@BIX^5BWY&hvjS#PA)R zbF1pMVivkUppe+*%nrz`XX2Gz@w_-;0xzpmm)wO7Zmk8sKbC!weZ7fv`1q3#&C_Or1@(L`@a6|KeGs@zy`_J$v&egi!5b_9wsemE%1K3R;|Kuhk5YOiL>DG|1oVDca2Fr}s= zU{W}Y*xDH$XgEM=GKhbE(?}EgqgME4V~*(QZEw8d!lAIWQUg;&tQYij5_!9ElUQvh z5D!T0Zi><%{aUz2T$H$vY+Lwj=WC_DLZ(-S$q+99jHIUC&uL>3MJp}R8egi_;F(Fq zOZ|AeG$p5Brc9yQw>k66@~LyPG$^(V#~X$7i*`lK&w}B+FL!^WN-Vqlf)kVfRFlK$ zk|JS~FEHf@&&r6{j7`viNd~0r)Zf0$(~rhy#cMx+P4~=;$LD$GtS-OTuU`}bA}Pp< zeIJq7$;;AzW;1_w6|8Sol5Tcmu|qbnU-NNzAJN{@7bY?03p3K4LwArRLdqwxc_=Sf zqoCIy^R4ODPdfYll+d==YLC$Jl*i97-Hz0h)sH?-mI<5WClf6>562 z3#Bi!&_4taPp?5Y1Gtej=mj5dWm^IZrMJ8YE_rcpY_NXJ@p77i)8;m$VY{-Ca33A~Bls_*(=wGMk ze)b7Dko0Hgbl5q|^Ug(58F0gvJti%ox$a`X7U{C2^(dBMbpne1l@Cim$9j@mdcjdA zwu$APklKHU!t4-Vs`YW4gz7Y;Lx(1DF& z$WX!lP>sgv2SV3V1fM6w{b`b+;Q2|_-I0-KMg(>78Gc++~-f;EC7A4zR`cVaxm>b(|4tvw?mh4hvU{H$zq5v z$W+~YyfFgnnEz`=3cf1VCU%N0fc?i1OKJ6=~6oAW4j93`a z%Rh9u`XcDEor%4EQ(J0bMy^hTy#j3N2~2-_#Y!GRCigz&L7p2z3aJaL>aX*?JO0yV z6-50UaVJ8oR#m4`lcZNGj6V#m0xZI&hl{4LlPMIyn5lxVHJA$LT34q|SI*nRV^%_Sw+ZX!tEzu3 z#jQp>V8oAg540tcmdj;6rgq0Ej2&B%Vb?Nk>c+0c7plQM+?6>mBv&~B*p>K-VRr8- zoG((NDw^scxHF`#ybJ*E3o;~da1y6iM!p52qu`-?B&Ei3`+l{3QykFKdnpbdKKZr4 zbss)89DOJu9uPPPE_tZsO|o|(hdO_1>h)ekgoGu>?jrkgNF3yBh@({(JX0ImDo4qe zDQR_uXS!s=*{29U^WZuBj!K-W`^(*?t*n*LU@-&za<%Fo<`A~F#NjoQgT~dLi?`QK}eRZLr zh2Y=j9)8wS$zXGiEWbQ;!Cf{cSi!~-EIG|*)D+cua%~JVfi?O)xZ(SX@%jVrCnG&* zOdN;^93Zmz%3q_EU*mcx7m$u&XF%*Je4@rx4a$nZ4aKr1@_Ukr6 zGyQXG9Z~ltd;u6oU7rxCH*JBu>I~X$z|y?m=0R%`NI8A503`KJ$QOT%_#Qs*kA6>` zE_<71K%ucle_AXbE?0bvYhr2vV$N2nf@mxvq@kB-9a?o7sNl5r;V5Vf=%EK9zM@gk zXb5kVumlF6!&|6~Uj%a1rf@!;iKZXTS`P=TBA!*D(Ti=G1=v)8lF_*721DNcdrg7eV? zci7q_#sKP+iV5MAe{X?lQI8Z0h30XVr}I87*b37X95H4i78C`R*X%uty&K_DP@DPQ zPtW=!xkkp>2_0O=v4*38Lbq!^fV&UIg+X7{o0gsgC$E1uQ!Aj|30QBnW{tOo;jqxp z#|fU+Sy;b&sE*S2^dMluC8f+8GTk?r2m^!H_8cs;etQ%XwVZ3?sYj{={bh7C016d+ zy^)w+UclMv;x$0~DDY;SmT}oI)^;K9x+>z>eG^kocJFE}QlxE8$1m9YL|@2N-r&j& zroA@qIe~u*a+rbL7PbRS*mu%&%7&J6Jogc(lk){)(P1qwQSURMW*>Bd0{27izbo*% zz;b_jLe1V6Xt1f0E)I{DVcUxb7TWUr5#9ih{1ApIFMm3jHT8vMe_onIX&?r1Yq&qV7mzG z(LL=_C;gk)_@DQ~d^#Vx5L;-Xl%Jbz!o0qP(o=)rY&9nHs#M*AKL$*W7>i09EDql# zz*&FriL@pJ>tTBfdky9Fc;|N`2T}R*`bO>hG5Q<<%2_hke`9lZm&|uyodbvJwi~(Pdpi@(a z{^V#pynobr)k*q*$Slk@XGkNxgH6?ceExswcqV-l@PXw>*4m;)N7C^W*rWGeAX)6> z&(~TwTH=uqZYAmG@)3p$eoIvYLOY4ad$+GjuCqWbe);O*m<#P3xHHWkU$_?`yrDl& zh@5h-AI{{O14$m>?07(Nxksy0lRQhL@Wb{MDe%(5B(-~lvg4a-cF(`_g9Fr`64ZZJ zf(^=U+)(qAVGK3DR2X5BZ2Vwo+Nx?ZQJ74GMJlOT2mp(aNOP!B#1L!^XzF!Fm2`6R zZJ`XrQXX#406+d?k7ewX4s|CvW68?sXA7p)AZN8hXtG$4Ncj@*u3kG%(t|;-l4`;3 zmxz2dv4Zh-U!^?XxeN7qe9Cw8f@^=<9f299L)k3+>!=P3fmf-vp$eDm_uY|e2v0td z`GV*96VBNX7*uF1yd3t~_@6JdfMA&7andK*2S?wS$#2ly-7^aP{@6xoQ608>B-pFv zM6l&Zu!3lT_$=tsb1^PK>}Gzgd~y=^mBwE7mP7zd3(%;2-fJk$RX^bZ-<5wZypKmj zU~i3i$?-R?B_tFU7J|&aG+2s*JpOX1Y2?!gpvFR^FlP4Kt_ncNjhuLEmz46l zxWMUSzL$%M26V!&5OL-p~iIg z5coaE3<9i{{CryRRZxHU3Um8&nHfG2_!=#RV65Z;U@&H_K(hA-VK8C)To3An+2~`1 zP0g^q$W7W{!=t#OoI(Gqj7j}7+?QVq<$=|n30dS-awmKI_N6-rtIA(63x~*5!k}7g zhTkkak1ze0YWnlOygU{pnKL@la2e>SFIu@t7w$?O)BebJ&QX8gv3`xmo|zcnpNIkj zVw^utQ^CW~2$6ArW{qr_C7eG&ES>!_!kM1G6+%j7=eysS&3~Nwz;2{HldRR0`_e3P z@qubGg!ec7hpTO%GTZ!3#V1$h^VOY2DzZE4fvTLlE)17WNyOaWTxb2q`LbRf#Vc&6 zHsh3Bm^w)z*}Q+JjE0e9yF9le2cMJr%|-wdaBv?_WJln12dz3+PmVb$04LW$rysWt z07jwdjek9ErSu>Ux**fu7@L)7I9=lUtBc7CpB@?uG&d=8Xare*D zh&!u%m?#det`Q*l3N#JJ_LT>f-kU#dmWt>XrV}0br*41I+S|r-9%j25N8TB<;zV%< z;z=StK1C7L2MA{K)UJM~OSLUX)fx{l3~!P6JeeCBDxvWrgfTZGK4lQI*%@ag7?&6z z$@PardN(VfAw=WAIN#{8ms=tTL4NvY^n+sjTQ{LSai^BFkPq0Y9?=IXK7r$f8sIbGR}+phrW2n_ zYKf2jtePK_Hg4z~!V0A&&VZEVbv)(Y!}`%cts8$tp}F6AU#_7o(ibTJ6=VAY)a-^c zf>aZ^+=gSYg}~V5!o>#?W8LV;J6KUvTL;(kHZ0Qpb02Dt<}pH4;KQ^B^b!m9efUib zKoJDFH9FLLebxv(-zDwRH%c;&bPYvDt{X?Fhv&(5lYC^zE$Vp>is0(Y#vr!O0*S{1 zaGZbTz$zS=r{5_TV9F;}`(%F#%OcJh_Bif_G3&SMVYxj*%4qs%bRVbTSmB46+fd5- z+zh2FnLwfIz-vi3rNdO>2kQ}d(*?Y?8&1eFX54;C7A}_tL+*~`U?R)%l08Mu{w!fR48nG*C)&y6(xpyio%l~EkSss4$LR8q|qKE{i=C0RO9r1k>lKy)`nT@ zra(?VN*1{EoFNyNCi_<$3jv0gJ`%C}XtyM8^vr$}k3@Lek?ftHrT|W}IOL{Z-9~>s z(s)#laY;eO8$$9rlce)c4~yVJCf@y8%ts!z4_iQs4Gh&-RQn#{CqSuS$bt2~GB@7P z?wsy>#~cGtg)Fe&PCmY|v|oz>a7N)ilg^1UBT#W@YKewu+5pT@W6FuZR1Qns6Y^-=68`0aJMS@=rc_8SePr&T?^ z7pzHYc7}L6GQHCrzRw6a+%SK8Et2U;VZejOvpRd0S`Uknk0%%uuK)guW+uY0fb9KZy=Yf z7}i2EjuuMMspzC79I73N9k4)5ZVSh@+Rp&gj-lgTll(NqU~ zvld^6qjkabfO8$q11PKyR^Z{5bRfvEkp+8wHfNgj?_}NXa0Y)1+6XeYPhw}C6?*!2 zfv=zmRuJw+vk(>;f44XR(BKDqygd2gyIwUFt)XXd;uKWgue`t4*u9}&6a~eDhNc+< z%=yZk03WrtXSpUZ@fJH3UzZq&Y#MsZV1NfgifDkEefqm%pVMBme^y$B)2gjbF-^ zV!gXXg{#E14sbk;%*Gri>-;$J@1;H=mc5nZs6i3~s`7dP4{^OCm_b%7+_Sg?d>*jm0Rf?5}1PW3CSX$eW#TN=)ywcrO&OG zG=+bczINDc8DAg)g2LSBVEBwd6hOZ_ji<_ooG$!}lquk&8^RHK5bB1s?_^uekmsdj z8{Sa7bbPOLq5RtF>(nATpJ`dBU&>ttNGu_u)Hz*gqyq0Iy>)>!trX(SqRcK2%~O9C zlF|2^_i@oWVEdjWkeUb+#4){NdGGH|aG!sENFSQxWEQ06l$NsEFMMbZR1_w{k^Arb z62~Wi^xf;Qz=Qmb!w(evqSle*jvbkNNBN=4e7Zm-r=}nhEtC$`uki%B1ciJnvn%;wQqp^)r)|^#dRt|1EzR zyenu3qkR>zhVCHZ86dzj!EfJ;sO(f-^rFp1ln6Jkkcuw=l`s`Bo?u-6evGU}XLb&s z-7z$D!HlOPd24sSWslqQ^+b-nzO9*K)=2d(Ifj+)a&7x5?ZQ}+a_YkfH|LmrWPOTu zdhiT)eW4zSt))EtvP&dv@cA8aV8wq!h{MFD!@HHVn+x_wgi~FJaqo-aTc(GNU_p_q z))m~`OfoY-x;&yg;W(zoUnpQ8-1lv_^~c5`c=>~`ls3&{?SAponHWNI%rjswvT4BdU>{FAZ$|1%W~^{GR2a;jKJjKE+L-s1mNkD|-+*Pi zVwqhT^``*n=*a+6_xY5^_#v{!93TB`RB(0NzE<$|rC!~khFI1vXNW8T*d_unQ5VFq zxnc)OFzyT@d}`q2s8TO~hZ*T#XTNL3dv7T)ZIAdM&Z4UZ7%DJD$cv@z7+>LrR-F9@g?5<-zi49#xYj(k~s(2x1iVs|nBEnc3WYJ4@Ib>WCG zvcizf?hr6tK1h1ajsDJ(ICI4tdgXQ0cIQ5t~|@Z3X#P!LdEFP((DnoAswrj zQSn5#%|-hYqMK)X*)zR3Yq3YGgdRVr?y?D<3-Gw6McVSoK|GX4$t{gAMruzdcG75O z`#JsO_n@snWFz%eW)$-R9-iO;@NwHPG>yKBV#OFmzH7_pUAce8JBMf1IuYU5;W3SN zfKU}+cUX>%JmR#9$$s!=3(riE%=kM2;cZy8y>HB@)m1oxA`$J_lvtY(P>tWXyBQkd5Tvt`g>Z<+g9^=RsB3=#5-jLR_2OZ#%}7*Kd9-0aRf90% zm--TcYjEPH&A+8pYJxn~TZ>R?g>JB)u@svNqi^G`x_z}GwDcO3g~{S2zWoe*CtfoF zMj6N!O<=|zedI<2Ykq<=w%1=;NU=HW$_gLs@}Z#_5dBgtg5?0;@w@+~!p9}|jH0Ob zu>600b+wzOb$20xA$zr$3_i>!-XUD*@8=R*3uuMP;SfZb0W5a}B`oR;XF#N)tS-Uk z2_8wkMkDOEE4-oR$pRWLG;6{kQq<(%w~Q(7#z1QaOfPljQim7q2-MYX$R;^iAJEs> zm~XUR(A=aR&;5I`AfcRp@;$geVVFjWq!oXhSJOiika!`#15_jNowA&IK+m~-2-LbmvDcS zdj{3g=N-$U1?nz=pKQ~2`$FZEq{-ciUSB;m#b*Z67c72&oo)3xG8FshJW_<2GDcC} z^27Crpo_M*ZE!hVDQYk<$;R`bjTaLfpKixch9EvpM#wU8OazPW3oNs&rKZ5Nfg9Yj z{SXS=SmG59Cw)LQvb6TIc@(dnUe&JJBba~uJ@U1)w1o7{XRdKtoj~49+H<(q`n%3(;6o!A+Rw#-g z#HkuK;MFAL9%aNk3!{DR{>gi>WE3|n-N13wKxo+W=_(S-^@1UI0#uq6^cj!WG^x-T z-mCf4ia$st!TV=U@enA=K{5hlg0%)CFeEb2%PON*9!1fg9}7h8CBnm>?uF6!Md7|* zJ^_x{`O{^2|I9!S__9K35k!AU?cSSFF1K;+Z1>O+6LA1Hn!Dpn>(~>(H!q-LN)0~n zw|-2XWA!5Oq$WD-SQAuYZafg}7lXc= z)fJ+ktI;1Cak2sSr&dPO+!9eWN1O)B17bgZwiW9hih^~ zo~8+bZ+@W&BIU3P&%K7#OI%prQuf9dcPdx^-VBCzChC1QLJZ6)-Gz;^&xRn$75M|I zrmnfEK)LW%EwQIrjCgKd%VL*qU8UBHzTQBb~u$xoPjLMhWTmf)B_ zAX}|~9Xrz<_e->JJ_Uc`gxYNYD0E+lLkN$Ah>!#6Rcu_`T(9o z3JzCws_qKSx89+Vpt;dEdRQ(oeE;CUTgCZn_X8V3OzrCqik&wQInaFE>%Dz@`r|6{S+Lx8 zV|;u*?)48Laq1o1bf~EE`Ih(+@>&`$V~<0@thKrc5a9cMe)$CLHY(Jp4a!N<-oiS| zt!_XiD5McVV*Y^l;xUa(ano=q zYAq;U*Hkkt5Ubxt=5bhzkU!(9ZSU60NR40=$zQ{8al<_7XZ!GcEHf?y9VkS0Um0$0 zXCx&T(I#8a(eex#Afo&M0IaU`)c`ixk-vii9*iUu79M}5!Th!8Zk4$i;z$2J_^&4? z>j#ERAM~d1*sUTu5(c5;`T?IvbUnv&&E74_!+-gPQmDJMjtiCWUV!ol@3P>0&h~0d zx7QPe*9{>hqlKPzbbf_5qw5-&&ykz5GC1*o)VB8U`ep#TIW_U`PxRC;@uR#$k6ua@ zdS>9lc$|NWhXS`jAN2i%821A_buZ~LC>XZIiv)RCin}*#9U+AY6ML6*i-;EAZ*rSj zdK+D=FRG5e+=r5Uyk|JTtuoWMyy=7p)Yw}Rq`H{&kb@dha|T%#1F8L_P7zWqL)uNK zkBmnt@OFzykicUgJ|{HEv=xKW{h1R<>m0Sr=0JaPN6@IL971{I0sa9$*IQeX6i+Yn z2xl1$etA5rl+ixd;9MYGcjSdi69=;*S%q0vzD!CeC^WNd|Bm2UQxemB=NIHc08ZzogkZ;$j z5g31L(Q@-T!tmyyVpn5?=_Fd&#zVDci-ZF_ktv`4>p{i-*`$VrulZ(X6QWVS*`prv z=ft!chVKhNl3Qplwq(}ry%j4)4otXmF}1CbeUgWg&cLgS44K7ukwJr{Ru1_Qh`+?VP%uOyC0{$*^nwHjR5-K5nRbe}6;)@r37PyiY<( z0Fw?RRxPR2`>ob2=FZIFB|z4{7BL9+4N&LOmI^M2_#J%&L3GW2Vvve!ap+1uOJ{#3 z>+z-RZ-4Nis%>^Pj9;Er(t7vP9TPXZjYVUt2+-qqb9{Vc30s=m3$P?cw^c{9L>z3) zz+g2FpQCJVcGPV31L`B-9^D1>$06gy({VLzN-S$4Ky#KojeqnzC|g*+t@D&0`?T@z zK<8bId~7Tl4S3J0FQML36=}dr8rXkG>OPE`%738g3i4a%8^9v~w?Y%SkoegbOPhhDsUzlI|jh+uj;68=wqT1-@oyq?kVtbjFRTv zsGb^@z+YC00;kVyG3jDx z4DAOaGLLz&7@W7l;*X{NxMP3nPx`%=eZ6`7+o6Gx>9qk&FFXSh7#3h5!+azc#LV7j zj&kM!qRMZv?p}5i&$ENIed^eS^h^Dc`;7j5^%qE*0=R?Dq{l)(P?#-dPt*@yZdE#h zKnpJHAEH-nI1#LYyZ8KCAPA0rU-SsXE`SmMFMB6=ppQ8y=IxDfY@>g+t*pO-RgFi! zDqPnF8^BH>U%yZ zi4Mje|AXpyG{Jhl3D&;dI4&%l^JyG(ZR^7u=J-!0Mk!=f`9jYPpck&l#ODB zJp^fmu%!iPxF9=*{)2j#MoBe1YU*~!_1A^0U^&`!tw=tKszJ{HyHCmzY`zCevmg9K zb_?BJc|SsUsBY5%n!r&CF0KVI4%b2UlRD$aSGahH!d)x^IGlgH&1N@|zM3Fr`UzxM z`^kx{aICMpAwR`*o0-Q`g}&8QJ+ww(ZgKY46Z@1XfNiGN@^)HP0#SXGN&E+pcOZ|b z;OeB5nEFxSoNX)f0k=-sUGDtrtVQ$3J*{AJfduaPxKKp8MC+d(3xPfol`~ zeKFWA29yqz89RTL+0}M1YwNEZC)wY^roU|2cd7W1AbL;%(~{C&=LmG8kqg$OAvDvy z^1@uYo8dFZTTrpYDD^CIp~BPm%98lP2`3YQ z>wm-&+Bbiw4RQm~d0xj&g(V&Q-oJ;?X+i&3DN=MX`*ZCf!U6$BySOv&tj~UxkoShn+;S;>#ah1y zAZ^x{fwa@N#$g``@~xl)t)B}MemSxP_tts+$UMgs(4ALzgJ#YW5vUJSW83=v;BEcO zl&ydDG(z@I Rg$CzjF5fmsOmsZob1!uPjQp6AB!P9}3^AlSzEKmq`(ayW?=rx4 ztuZQk*yi1a5McpPSYQmF?{DZp92pcBQ3g>mHTUj6Nm;Ps-g_TNBkA{CKmc|I#SWl2 zz_rge>>Q12HDv>}>@%XaYJDgzK@D!4f$@5MwFf8BgS3DDGew_!#3-r&_ z??(xm&&4V>AI%4VKRZy4)A~S0C#Bs`fJ01JX<-n6#00~ve0DEktMMtPCzvWe0Um#x z^Qge>IOr7p0R(jdkACk@!&wJtAAbH@`fNmIu(J^zDFq*9m}7*u$&*F=@zsO+Hl>qe z+bP@lBS+twFRhclwkvi+l$M~RplkdjXr&z^FF=KYTb>|cgBrXPA;Xikvh-Mf+2^64 zGRQ+%D@l2fq`j=X(5_e0KIp2-e?foq1PX8sxfxjI?mX zY4L;@>HyxEhTBs;wRh<5OFJxIkeK}p$nE(11{pyAg-0g-2I!@40y=+#%|3szOu_he zwpn={e)iGu_AI}bM{EJ8!#u*2y!Q(_sF2+Go>ko2Y z;&HRY4VXS0>57YD-$U8&XwZNDF4;7L8|uz#a_Gkp{1>JeWJwj({D3og!JQc}xOmaz zEzsvr?^L`_v{?E9fu)8>5knJ&PM+gcBM@~;=4t(%mVNSWP>CUmk3MRTb~PBy$ zw%3-od#Vus_>)-`(7-xwgL6C5Ic%cJ*pq78kCz>V|DNq$v%Ioy|*RHUM#BV z_<99ok1=SrDH`TaNZ$MT9s|FcR&lH^N9$v_(5>AYFkPAA0_>k1f}-=268uQcR_8D0 zlDk2A!{WBy>X^yo_^Bdi{c1Mbl3n=pdQpKBC{ME=-=~y?P}jFQ>0XCRNEsZg|JCD* z?H|*gN-u1QEE#4+rVf9$^*b&fd%8lya8j4020$Jd1~dOEw;hLl516bMOl{z^17J(< z1u7G1!h(fhh5KjA>H9(shJ}5r^hJ?>SBV=0#}C(5BuxcT)VmdPQGiV;C77RE1@EeN z-E+m~V{&*bKM@`1yRKVp$vnP^!00Sy&-@p{c!xsg?XwUj<9>e|K6%LlmL8u)%067+ zVqX(Hw430PcAF6zO$98^^IS!QwId@{w$Lz@33fSO+o?HE(yOjr5o3Y12ZCen3mrb) z8QM5>I^f29yOsoq^035i315I*mb z*D{*h_IUW7^$_&K0+ECKanTjA$pO@Y&b~?A>q2pCM^&$MH{9p@+C-y>1y8^C;0SF@FOFJwRI5cH1Z|<8b>Dwq)3`~{8rdMfmfXL+hl9DM z*%i?>A$RJ~YjRzT9KiYSNyQz zV6zVyaUXvZ>HMU9vd3Z9WudVoU?h-W7ug7MV>K4!4~>vO%vT!aMdAKnQiP9|qHFB0|%0weOl#m#@N#cPOg{;|GKUKl^lhx~tEG{$>^ z;9hroZgTp)g4!KyuZIZGY;o>h?EV;O^Y>wTxbO<%@_24j=Sa(v$)O2K=hcwGV*eK~ z8IDmA4slsa07Gn`DukZ@uPTJ%o!b}0Nz<3`Gx?K%zRZfH}790i?!^Gm;2k>`J#b>FE9Xvr(l9kRN-Qyn1l9VebuFV4;9mLjm(Zyrc~2RkV0rmE@Vo@d)n6Y$pO9@Zo>h z-Vaw&-P-|9KnhTFFi%r#fU({50<2=Y2;fnT;VW?k!Wwi#ikwCu`Chh>8Qe|Fcgow3 z_V$>KZU+vk?Mugd_{=;~X}EI_tag)blQVwwy#~}!C8P+?DnTWwo zmM21~aDIhed9VE%JgDFK+XvM-3pIaa@JO(5bt=BV7y|ot>CmMDSu+4ezq^`YO6Udg zal6MB`9xlQdgiI}GvOzc;S7H(K&NpFCQw%M+RfyxHw2O4yQk%SlU&b^w$K7_nF+8# zIl{a#4w#T>U_b4E5J9K=n+9%PQ3eq>&=b!3Jk~pLd@Ddc3?cNvfD-}m?w5bZ0G0&) z`0xMz&;L=4OP_B4@jt&~RT2O5KmTiAy8kHBE&X5BIBe+OU;W>}tuGnszY2_(ocHbi z?_Y&|>+J?F4F0&3_YJK3|DQkZmVa><{QqwG`>#*-+bLgn$1kygqn^331ZN-+%PsZ^3sS{_Y1RKuPCpV-h}F_(~02<8Qae-|M>(F!P4V zP7Qw#UYZobKYzRZy`Gx@f~i}Dr4o*n@Lyy6@0U{kUZofb*xTCZH6wqF`tN@u_xI0# z{$8JQsb3k<3A`@qmeZH{()~ar1eq^p(NlbZ0`-2YWDSbeL-_?KaPtYbo z6R9wDppS1Go~Y+Hd+6G7_XQ{<=}_Z(5+UeMo@-ejfpGc!3cZ{UcF7{mS1Gcc25>@squJT)7R`b z!N-0G6?EJY-Nt|GTyxr2As3^T65C!PxybJMj<$?I>YHkMKg&}5Y{NT z-0z9yerR9Icgob?Z_F?0xjiT+*b)m8`l8ge4Rn*n`76-_SU~&uOU(wr7Y~fdy26~4 zZ}ysGLSFRAQnKehU0ZpZFi}C~>RNT;r~75^R8QtE%tczeCh#*@)aM_$z5`4l2LG~IES3b}3eOZ{#g$$?UIGG4IrS$g^5tw?LI zX~?jy1+`~xw!C$to_74L+2x2dQrin?(O_W{UZF$_g}y)6p!ZJnvFp8f-ByK@uEXxv zKBb9!T+QMHrNNuFn04qtB7I#phq$e$Ec@RN2$O&A{PSg+Rt4f(F}3^6zAW~>rh_(D zXEEyW9+^iaV&!{{*IKDkb)GZCHk1I`EC=!=)@c*mcV~|)^(|X3j?Z0|5RfInIkW4s z0BzZC?XLNgocC&|kT`X_^6vKnrM>yLE`^g_lH?h( z#s`1(0AhH&{%rju6AK^!ndvFIcNq2_17X?n+kI~CxY79Jk$S)!+4jCJ z4?hU`a-UXTf}bV4m;6xXcYY*#7;5X|;k%T+(i1x(evYse~cAgPL^)Bv1)bbu79{T z>5t2w1&VK)x`!fAq$|>H&0`iB=fS@X?aqQUeo}}GQqh+qJHAHW0Nn71WccIqy7SaTrEU?V(KFkdvJrS;3{B)ro1yol!szf0 z?TRsykmbEGsF+rHwVz|aJ*DXS6p!1?60^npgwb0JybNv~Ha>@smbQj^K8W6;rZU+y z!$HRN;B0U>+l#;om9QmWm7}fkn6!V+_L$oB{doo3wuKqLB2JD`Za@EyYU%h<58W+l ztEAQ24>b>;ZvvzFoMW7Mz2Tq+vF0Xz5$p6Gcj?I78*4REc6TnklNEVg zl#xG9S9r&ZTQYH*>9*^dK!79^Pb4D03W%aHnK=oJ5pRh@wvNsG1qXKp$D)6zil6v( zjBL_UlGU92?vKU8j!?g=e2alggpWS`kw{h*^`nJ5=XXxz*?Q^Zj9BKQzi0D_44Ol6 zM-4~7p9E`|I8dp-Q#Gh#u-{28KHU_L_j_6O zYKdUD{-Z&%XcT)hUTPa|%+|Y!s(h0Typ|dL_24ZiV_)c3d^>p5THc9veg}B!B(-;! zHwWz!jKZrwO8mjOM;vh|lojrO<}LdjQuI7u+lfxHJp|Ryi%~*R@Kf4qe7<)S|Dew* zKdbO0Rp7&fEwJO1i3`O&(9R`K)|pm6>BNx>?S)J9TfuzIWQsO>Vw!ouGU(Zk7vJ9r zda92Rj@H-HHNyqXJFOS21J76zZ=Hq^C$HE=!XAHa)>`vjVP`2kZ?>g>+^v){bLR@6 zX}x@wsxvvq`%$!S3NcF@&~cjYn<@Ld+;TTNgl<;@Q;xlk?B7Ysv1!E>?E{$CMcd(VHs|tdj&<$z^O-pmQQL%ntG7>QF!8aIL$|K|?TR z5hp=AsPX}w*#(jrzt)FK@5ABiQAK=f`K__y7sQ zbwab!7_|mt`Z<6gU|cb`>(NC%UNZ999Q&Q5TmymZoH_m~Xt}0;>L>Z5D!_SQo>8lP zF?k|&>t&rV5~rNXOv=srC8ugwUJl%q&LJ@ae3GA!9ufYtBGYYmzdf#J`KPTm~ z??7`Mzn|=v7V$`bd_6IoB<8BZQAL)w;;73!gDrYMWcKF_2u;K04un@#q>&x&fi7%Y zMjTARTy_KoZXfmxSnvQa{_f1!O}Ele%D!9*aGWW}yTeqs(KPX_Y*E>Z@eeedhD-hq zrXCQfT8W5_WMxz4%DnAoux$B;Q!yS4D|)_ z1As#OmQ_%U&dAK_g*zp?tqd<5-;bs`@WAHR+Pyf^ek`#HMzS3YQC|##-8cgl9Y)B{ zo4fGj4!6aB=4CyPkK|LLb~yQ)s{l`x?bofCZ)muxxBlbDfIm8v88UoE*9+# zd!EVU`9bgiLXR83^r z5~9|`;X~63`HQ7uph;Pfw<#Bl{D|8Q)0wp88z4qH1-TttnK@MrmQyDHPmdz5jUFE%3f(Z_Y z{t6J?)u$%l`%`%CL#s&dOY2_Qd2>1=Jteb$pK{!~@Zg?T7hCLmWCK{wjfhVuw|C24 zRJ`$kxnnn6-+FAjI$M7Ze)QyznwKt)|MV@e7T68`RrYsq?e7mNUWS&8DOSEU8Ga6H zzRMw};^Htpbe}rVkpwL8jQhh$imd*j7)vnIE9be+lf}CcrSox~_x|r$!VSR70W!mX zdwXB=7Gupbl!qm3H2R%y=1=PPXS$v=)qI#@!GBE;$UZ6#%A%`GH&Mpbu1PbP)$%(4 zX^B*mO%OtQv-2suNPsBLP@kOko~kP|ug`1#QlkZcU6v>0V^i-O9ayAk40rU?nj=Rd zqXKq{7?1p6U41SOD*n`}Ca2epoN)Ai50C7&r<{(vwr4`W_~Y-nHo^m3Q5AVjAGnlX zzSeNn=kM^Q)x|yQn``R|T>$x4lm$ z`Kqo$*`Ask@$h3AxN+{VPT5PzoM7~tTAEDBUY}q4DfsXRRUOWqO0&(d@4u^m{v=Bx zL79ln-M1_~085H)cX|v@JRPSp%=F8#}4FeEyEzhr=C{gY9R!AA8ut zhMy2IrJxMPCE*;ho^@}-7{yiiP3`L~Wbim>Cqn3A_hoQ9zPglE@QmAYb=K?gjyI^_ zULqWyzL{{khu_E8LvHan?dK$aM%{*!md6d3c3b}YS5hA^hjzD&dIK&$-I>EmUIiT6KcjeD;>m8}zV?l?rkpOzIriF}siGQuhT&O0-f%@|;$Vn~0h|<_0)WuRIFknYc}iKi^|B~I?V)kf(YQSTXa&}0EW@^ zJg|DeN=*U|o%_dIUtc59mHnP=uhOwU3E?r_`OCdjM9@WF`b%DC^2G`W^kUY9m8;@x z?LA}%psYlKe^e5}N>~IDbQZgmBMx7|F=bVNlSm6`&WOE#23;YMqO@yk!+8$GFMCnk z=3RKYuDwbLPcBhT{1I;R*-FNtKwh^9eN&0d4S8aKIS9rYvBP=c0EgsK;S@`!)8C7@z^1d#V!d!5u&E z6V!ftJ7UY}kjnxmft~fqfw2Q_;5p z%dk^@J08;aT1ph&uCu3ZIK4{cDbVlYzlq&TuS6vza2_h-Om0NwZY?9 zR@O_d*arR&R^L_`BU->P6Wo2Cg}kPJB;jMMWHwzBaerciE1ba(F_wUT5#c9h z#WTg`dq0A>mtN1FOpc=WIlBv3*28fz@g9eUL#_)q+JE`keP$G#LZ(GrZd9cRi<{cE zb`d=s@;dR7p zP)^2+s!r-#5?!V%Y5>{fsB4Ru`96&J{8}9-GUr{EJMz5_ba~Kb6a9mqp}dKx(hNZqgNei=`Pa} z=)T?IlZyqGTXrY)_eK&_KW1L^Z$6w0&B)bXODWd=u^lChZRZ8ga{A!dkdrgVTf4(- zjLT=_!_)eel-lk0Qpeft{N|84KkHjD5BeMFC86as`}mbJ#QbZA0WY{pT(Qf4DqzOO zhJ-bsSGFoxG)v9i`Y#;ceyXwgWkGO<$_N{k_*VI=a*WZ+>U7GbIlZXGaHdLxn&}rz zVX2^#cG2h0`xLooF@3(lbu})I+5)0_eF_)J_ybjMjCbn#CHXq}Du|W8mS$Z7^=KpKev)@~5 zKfcWEP7+&IVD2%01NWtpTe0(a5a0WsP$fDEnd6(3gj7C`4v@aoNZRoF@zwf#OqnX_MYNcR=+BYb2ZJ4ppB$ z_55a67>4RNC_kDk*s;bRzRQeh#nu{D3klGZ#I=LziDJ@Gt3T7Xx@v zx(kO%>^oxkRXU!8slw0W2B^dxh*j)0PWHmlO@Kj5^*Qkv>~igX?B*dy1N|<%RR*i) zI37l4!v@q~L!4oDT7!(GDVjrG>0E)2@g+*e7_8oYLMD+pRoq8^ai8Ekb{8l>6}+fE zXGzvlGIoRStE325IV@=^_m}CjY7nP-F~+TlnzsXhl@k5;bBM~VGWTguC79{V2G);Hh; z6>;^Hulb;fd-TR#dRuPY7}#6L;){jp8d;bzm=FCBFq8Iwj9z@&jXGXB_tNO}qbXl{ zVT{k!;fvW!Ehg%nV>e`x6p40#ZG7S-wBGnURn%M-?qSb=Q)EP(PIT8TPex5kks@@4 zL4IFA^FUC9=sH{dGN2Jf5`Hair3-KaSk^b6*`(G#9xW#U&vkFwv2r`>3^zWeEMWHz zmwZq*u-WK;LbSp9^U*PW)l>?3K2YHy;yuWnoc3xlUG@=TTh{MqMOvi-0IAM0t}oLQ)U=UbCxEnRrhTzEWL6&V;%NGp%L zRFgOoD7lO7&djOLeRO;$6mfBzdP=3Bu5m%L4@D-a*}ljG7%!ySUBe$5DAdybAg})%%$I;l1c1z#tV2mY3-r)za}j8wanh{LuS? zgdLj2#jrp(yMKsi)mH9`C1Fk)&`Lav@8T_gmg~w?u5W4+Z{LfCb?Zv-?}Kcb2dnL& zFpT!tc6Xl$XM5^uJO+`K`}xKXV&HJ3s=~RA$TjTS@>~fA)3yGfp2qE**fi7Ld9&SV z#UqFOtD=mE1WEIc8i7V0@_Pu5JUruMR(>%i&OG;hvf}6A>Aspm;wFE3r6c{axKPP| zT|8ZT9X7B>$C^Q6Q?iGt#(A&v)cIjflM$5#gk`AtPkXG{Y|qXw>NnPZ&OEisft{)8N>A-k2{-t;zJ1UG9~_={t0a{Z zbvR9W>5m(1ne`GdAF^d?HOrZZ^8R7jdAh5T$c%?D_Y>#XFa1;0C>@#8V z`7(L@5Zmj)^co6wxQue}^G;(k>K2uryyI}|bXn-n0LV+veE*PU4+`z4@w3x^?<<8w zPcO6>t?y>5o=WdMFO>^eF>^A{*9(-ZS;IXop^@&;>u5S&|F66g?tHQj7AM(FMU%hS z=_CDU&)oX;MsDlkN@c%)n4Af$iW@g{w$Cs<{+aMqfW*3~*WLH1zRMyY6qtW596L~U zOyJ${S(e5)X#7XPp6`1_o**@UPe28piGFY)NuQA<5LF1i*fRLu?Qk7C z2TD1@!2YeFwu5~BdZBaD+K7BBh*#_Ljb zM%qx=15z(+aC=nD0ck6W30DGf<4}|QEOr20iJRzT}e!XukH)TE2?F8uR=(FaNhRL4IbyOZR=`@ zv_LSeUt1g`Cc_EubT>GMcRJ{iq-qY16U#$-&=ISl-dtZ0@bEAWiGSq!_e1z{E=&`bB>)pn(} z$f2{5ezomQxc7%1t=4xftpt4f^dUvnJBiovEhuN49GB961Pct+9(!sO3UtNwD%~ob zW_CjE%XSh5+Y@teq&}7Q8Sp~Xs2r0m^c@UPdTNe2^uZCOm0$^Xl7oC}65X~5Sh!n& zp*u2%;xx{oI<%Degkn#61eDuB&(P-by_}3)CHxjcWv{EBknP%EZl@yY z-eGkchAi2|6q&<7aMN3yI?Ls8daqRN%M2-A*8z0v{FMgh=?yAu_SvbrSzfn^xVOre zwvI%4`r&L^yS`d4_u>^9fo5J$`1Il)i4hE+B8IYm;l9rx?2Ftea=P1_@)dA{Z} z)G2&_roWYyPgn|H>1@l3ZB14tJ*FF4%Y*#Ab_UUD-Y+YVC~}ex3W+LGcTY@Zz{T2L zTAu7XKG3jI8sLUK>Y}nyk9qp$*VZsA3F`YOKR40CHm??xPsG`Nywy!oOmDE>-N1j> zquK8dEil0hM!PnU=mY=x+4=JO)^8V>Ehbri{63v{7hSly+4;}ip@wj=Tm?5%z4$4Y zXUgY=+K70()jDr`iaOqWD@QL~0rTKJ?6dm$Xx`4~1NM4={^cHOr$Iw1+IJbiElS!|fZN+|x{l@t)nU4sy$X z_65dYWIg6sQo$1brKb^ZOVB9K!E+|=I}h@uSxrH;@VV=BYlt_${I-L*0E?Xb+E<^} zr&-7LxY%%G00x~KCpuF%h1T5&?~uP#<(;_vZ~ z8vrIeXJR&%V4`a2aw4@x*bF-IT9@B{ko}0(M{a`A%lnDp19l7_No-xXspvMAD#vNZQg;#bB6tp-`o3t^c9hZ zzp0$OwUUnv_B48U9I+S}h@n1TzZ`M36hXR(HFHW?G;n}#GoAktgjzUkdZyZD~ z)<=}i3D4s0JYF?FklkczXGAxRzxPscN)j(3kv4M;aa|NV1DkEU`U z?vDg{iQ+_c;4nN%m7^|LEWh_Ac@zumHTU>tX(2Vs-G}{HNmFG5V4$TJUe))R!^-2y zLs?nqYQhL`f}PZK9)bjaJ>S@WvL0}{rcJ*51B+1f^&HE0*6-*zf9=ivH?dDB@>eq* zSy~iobinhMA~%@Gx1go|xf8;D{_ylqCqsR`8OS3%XVBCs0thZTh;D76e{Kxpr88LvuH-z5T#^!6H%= zJ-U!#ps|?S~XwBz?;x#tRjz4`|O&r^hMs1K$o zdU)*Et#z)HCln)J#pmbJWiywVyS-o1fEldVpG2YE&lelJf81gO?KFghe7U*`JZhLZ zgSta+=P*tI*E!ICTJQQlrGvZzJlACjuUK`R|ImVbn5MNV)4Pwe9eA~opnfH{ZN6kz zi@cQlddL$OgV@Lp9qh`K8%(Kh)OH8;5jW2lmXkLPh?`Rm9?<7y+TZ>2cZD&Vi_etP1FzxA(?ux8+nm zVl3r=C9%f-yhY#HLvw*{XkH_ajgE&empNCbKLoF{Q{PpXlHEJ*|5td%`?fPqM#M#yCXe~uT#?Zs z^k2UNn@_}ly$)-9Spm{24enAvymizmg0I&w=K1olz0Y4mhDdfU7ZO(du{GIli`!mx z1hn2zs!@OK=e4=$HX3g6oB(;n)-Y~LkK?|`_p``3?e1D z3~!^dvmLq?ihW)?!C@d%N^nsDpL}Q9a`Jq_Qb-%S0@ro{rRZ#&ytnASte$vUIbjuI}MjKMJ-0{V97PA~7Ht;A15;4&q+{QH9~w^ z+*vgf^>}RCr_A<11Dgfr$tyeR(R*!E=LdOzX+Ailq6=D@yX*5=p9%b`)h+Ir^SJiooC&@DTCG@kM)BAy!WQ~HO{PShI68o;32mlqtr0y>l*rE0OeDIR85k6fR z)C+z7Z98PM>Z*6ek7+eR42J|L?oj`vVzrJ45d!q5Os~46AD9N8AD5E+)>3;)nMEjn z0GT}ej_oe9MA=e+S>Xnmsx3!jd7n9Z<(VKBdO%c$&q$`5-z$oBk7S?b&T3}3fnp?i zNz%i@EA4hXy}#7jz>*M2A7OKIm6Ag=xMt0H%Iumjg-f3xyx+ce8|<57ZgZ(K=*-}2 zPt+lb%+wbGgARc6r}J=E~Y zFn7-w0bHyZcIjMHfy>VAXsrKBwaop;z2iYI^Zrx#cyCVo`4{@i1650i6+a<6qK@&pQu zcKTtbam=zK0uhyKDnlro(Y|^mnRie+jIqSv{4|L4sEARmVdoE`vzG$2cA=SlgBb(Z z19SIFG&K9%U^_3>kdhe&q_^yUylu93u_l+7=zfpQS9XqYl+nSaMcAvH>${c;_hOM=l)hb#KU&KQaA3}eJ8Jd zIr!i?of_M(pFMvrhaWC4l^6RuA!O{n_BIDUo--3+g)y?st7i@Q=Xi zLinv@8`wbl+D`Mk)9vvw>@~#2@Q@}f>}>)ys;^V7J7;fahmTKT-9c9AsKW2xAFKJD9ND!EKTwDQrpf5(O@#BwfhqV> zVdM)9LtWw*=cQuDTRuqni*;hcbP{j!!DdjT`vVZ1q(b0LQuoaHHtpks265{j8C=H$ zoIaA~a^LeBe|82!es2M!$Yvwn-~Rq0siTVd3)-#lP9M~HIqr0S*Fvx5S1z!}YwymI z%(_F1kv!J)a2u}C8}sqKU=9X7@1f)8`0GMF!68jP7wK+g5X6HgDdMc)en19fVVIHuG117SEMiey=;@TN-oz!l2_swKmUzFS{4I%gm0_h#ko;SMibRUS^F@ z)6b&-qEfF^0wi!5CpahmD%Uv$zw>Vz|bCSKSV5Oe}MGu^zF}ze;!t z8KPBAb3990-)NVYMZ}gn)}EMC#qc=ujH1N}n*|>LTaKsp^Hq2;{hZ_ZavOIXcB^*z zHC8)RPq-W41$G(3PbB;_t>O^|c8v%BL+7_`6aSma? zN=I?PaYOZV@^c?L@y*)dBFLf}vYc9hE|}Mw?ZAtNkPn0cw@Kv6JJ;PVEE@RX;PD@e z4dp>2KCN$J26F$j-yg>lzrO+bNq_rm2%X);fN#uBw9t~_?Pe(K}279tLf8+f5=g`|(QlzHI0=opt=iWIeedx5fu11DA3+VA&f zzj;@G`CR$^LB3Dx(P@7Z1>C3i_x?D%4tpG!_ULNCop=S7TBD#*hb!&Ae;cci_%ntB zsEfp9@=b~GHiVSVbpbSTjhM~01??(THU&s`sdqU(B6xLgZoRt$pPqS)amqKBF zCBlPgmtm(A#N8YPoA!HvJ?ERmF!oCiWfZBEUg1(-P=LEYJ=;Vtnd1DhNII88V>W$% zewXFGeb`;Vvc5fFmt^8@y*-}Xw>ad%R|t)$RW?9(9-zi=#1+|4_^)*>oA8>{>Xn^ z9!(Jo7huSj@z)!VAcq{(@9UJhVdk~;`uq92GyddXX2gZ(fgCW6-Jj%P2oUh%1Bv3q z6$wVQyNqAsVaZe6ELmvLh;Mp+k;HKOwGO!gZTtJqubKQQi?6rD;rj_0M9cPn(_efh z6y8u=+8;Kzk4v#{^-nBvy&kd`{64@Q^sq0+^K7}onppxVi=U!z>46pUja9dvg_Gzq z*`8tr&CA)i?_;?eie??~W8ZQ>q9IoP9%xAt1rK~dN7r!h`AguJM{yL zJ4;K|GKIeAYo`zItQXSSF&IF9Gc=H`r>KVst|WmJ`|h&R^hbgbot2K2S%3!#r4%SX zuLn&J<;_`InZEZA_zy8@HfKi%CHV*`^HP`J%@QXqj?7_p8Dn(u@Em*NV7!GZu-QgO z%D|k-o-Vl&i-Heny!P^buL}kX|Iw|!${WjR)D2op?jEN@9CrsH>EOA4DU@7L<#!0# z%Z{9!{VxB2lfo0;eNZ=fU*y$K#5wOQmkQT3bmBW-QX2>@6;K7t-pMmf^e?+z^JC2~ zO2;nZ>mj{{@a)6uKAOy0JdE=v&~bbh&z)Hb&57aVPj`^I+<9Zg9!^Ucly}%ZI;^zV z->EA;pxNxt_g^J^BP8s9s66@2qlrQYSQGvI;EJrGlFE??0@e7?HGZlNE?t0Ec1HtC zqcn)?qkhC=0yrd*Z(0P)1|>Y+UhnQup;>eOqI>NU+96(dg=)5NBpRNBexm@6(y5XP zkd+0y+&0Iiy)$$`FYK$ zM#66Fk^%7K8Z#|n$KA?Tf;E#ng1r!sGkuv`hOHXWWNV>Bm+JMU(4gAOo-IopL{&pt z-#(n}29!VNl}HMgTn4QAF!#c~Om>bK-`@|8#0OeShS5SBwm(kbh0gdp)%RVSGyVl{ z@Gh0NJ7n>g2ISwYFEtZT$vn;ji!*>czt z465!AjL0+fz3Fn5zH@GPwCE#Jvwv8W8xx#*_3&bsn@5Fh{25LoR0R(i+cubW9?s`Y zRT5BI%7;sZ13lrI*`&rshzcHa4eR?zYgh{q6d zCt>g*w_+t51tW`3FGGOA+xaC{_s{zlv7)w2bj4d|qw!+US_`%!;jR5rO=zbqx!$+*B?28gOn*;$pxcegnW?p@%OFYxo@Bd8k?A;3tQkBOgZpN?n30zQ{Y=Jx_~OHfKNmJ^RLmhd%Z# z*#v5T!hQOr8DAD+r{^b6x!7A&nYML}7u6YrEoN}DTj*z|u9+w0Um1oOA`l&CXi{<0 z#!)?%-cSY6H!7F9_Of#wGMN&6VOzhc^9<>XM=zpt{#u(CekE>adWd%h;)_69X0To_ z;$@W@w+PAkl_(-@Nm1_HgdX8VtdhJW;dW7ukr3VK?%(wX)2v3=6QPOjUJdjfSU z83;HNfhmJ1uyB#_%is{OSov(RSU_)Vj09FdWs9}+p6B)fe zvNfn{neQ0chFeeq8>sM-)i+Nk8yQ*>tYGdNCJxyjNz0GH<5J}yb{G}_OnT}+++mV| zOGHBjchctgakZgFcab{60s>btF5NYMs%##Fo8Z(%YAJky_NfgIvv z853(F7w-$%kI$>2L*F6AY4+qU{tqtlVU!LR<6^oJGSpz#i}w@yUAw_x&s2MQ5=Q0# zB6;8fM6131>s1r1G(F{orn2Ash~MQ*rR*P+_pX*l+oXSJta>hL&qjYXOM#YB*PudM;r zwWD=cnw#-M4gYyC7~&^ zcPvSKTfMQLR`^lY4xkp+eoT{x*hb1^EhqP}eXrc3Fu1pTeSVT-bQ0)|Eal@Se&D^UC8F67BzRkXL)v2u z_){XHfJGvT6=iQ}$89+02j{Y5(8ssCUZ9hqcj?bS?ampyIH&qds%9}+nWicANsfFG zsqp7m#Rl#z`6md>d|&8)P8)LF6-z!OTFZ&?wp+Bp=A|mgD22FT_d#=x+p4 zQud%U6Q}PN8W>e`?{33hKWY%C?$2o8ul^NGSU*P~Gy~AR6x4VsxeDCCpHbvR!MAp& zV{{YVq_;}jI3T_26&@u17yQ1X97CDd!?6`eubHJl8g_7;I___Oc(L-y`+&{HIrwvn zePGOC@HNv?exiO9P0zP@fVwz~DtfhI^4c}nyzAsu9B->oZKV*gY|iQP<$3Ix#b|bp z1E!OH+c8Ex9}dG|(T;OkG>662T4>ak+s^u&Uyu2?q73VE^;6qusqnKeTHWyphBcmQ zTujGKf$LED{`yXT+j!Pdo;;zwZvf*MK>uEGwhOvRrT!eFw&Y`>a1;PT<|VcsD6F90#VeB9`kl~F@V zp9mOt<6zC>-ulk>l9Fu=AUpB!arR;*`}+Q3-8`*(8hibJu?4@nD>szrti8`_w<%OE{&o-e0sh+jH9;6R%zT~!lN6g-*yZU>&O&>%>9*tV)kJyq< zkQO>yT+j4&WM0JS8(l08YxOC*N#wJOiT0=gOR%F{xFcZ{yzz4Wc?S>Kz*Gda)i5Ea zj`~<3zMk%JHm+IHou20MgX*Cl(_ye{`KZSmWOF>M=n8Q; z-ilOze_fQvO<-O9q9o2;GKZq&tI{nnf3IM@qD^L=ny=EI-XH5&5P?mll+~#9{XgTPBcT&k+@8)Hiw4MGq z(&4#zr_Q)+O=wGrU%p>QS79sm_?&spBQD^7<7UF=_=)}G{=;Al`97EH3$2OUW_agc zgio7n+AwD*iFjq1w$n3D@zjB`;QQY$JMP1rqUpk3r5YvuK%(KWIN>{rK@*@HzNmX} zMj@W0+GPH1Zpxvuy^*?8)!jO%;r9sYC4qj;gW89lqGk0?JDlzlf$=>Yt->WlIk7f> zPST)$b`7A#d!Xsyux1Jf)BI!mJv|L54VAM||FIx6%XCxy!cSn__`u#5(NT(tO5$o}`m%ZV0jv{)%_IszhaB7mX_21g+l@xv4?8 z<01p;kH#J730YY0%)1ArR+pDwyf>NhDe3l3jd$dmich~Gn zCVQ>>iz8brOydT)R;>O)DD25CmyeRmSXc*ia%9+3jTGh6?xT?V+WSFTuJvhu7sS=m z`n)B)EO5+=NeNNn(|lsHU!c1u`IKwa>Pv#PmFMvD6(~Oy-h97Zv~mbD_&cg6|K^(E z3uw?Zix`-FgWc{2J&aD}V)tp+;8j%5&3qZiaim?L4hI;yq-jwnvHNZ83Ykc)@KI&m zM)ZLm_fs4B`a#q}5_8j9-M*54I}~4w`*P?7w7c8AT`ZhW!wM(6E7uzG;x1*M#tCkv z_@uKqidu%}Bg79|1ZnJK>%5)TWS1-W_1%xnB%1L(jG<$BMNNJp4(Sevc7hXLSV;1W z4y3|U=@_F5CVRDRH;!Ww1w|KWEhD&;C9l`lq-l~pi{pZ1?he^+7g%q9x{;d@w+=(j z=KxL{Qd3amVaLiggWghIbAJ)2pI}{Q0AQ=+c%Mw>h;goiqME}z@67Fc+*QOm+ga{T z|H;pB2Rla`$&`(H1oC8ksNG6G&jO%(5Czm|4hKP-2STtM8S6EUY)5S8^A;f7_tZqf zz9msR3YSdm*^ViaEVW60#6^2kK46n{zjs@V@HM~7B@PdMSAk38f~!(4UDA<02445TfcSAaC~dj_TK@B~ z`)s$9IBP9PyHqt(hvS>-tz6xZK)thQ)kD;CKX{{_4WItZk-Qy$9MAWhYBc{uA41`T zOs=LsfBl5ZUal~^P;+lhx%2ahH%{B_7H(fNEEPz%n$Q1uc3!8_csq; z9Q%~AN|gusTpjN>9Hq*91FW$>C}WZf>wX?n^^PBttvw|UilaDBh-+GTG zK_~dJ)9ykAb-Vq4V+8qh`733w{LcojH339+Q?UC z_+>6`bc(}|W63lEezf+CkGr71-An@Yfa=$<-GP@-9+K5KK#YVaX`mqk{f+1pK^}fz zl%fj9=D{P-?&@7##iJ@;3f(^CDJ+)uV1R(UCD8g)oGI~tQONk2j?^W$KWmbI{`_Do z>)~<;@d#{-{^2>oeNB<0&5>+-fi{#C7*r04oALj)9#qhHaknZ;C$v(7f%b$59)6|Lvlh?%s|Jo@8=wp#lhSr%N#A zrgF!3b64%i%{4B5=7SI_w057W?=uuQm&gc@#*piO09Y?uzV`dm$sJJ~3{mfyU)NU6 zX|9yw=mt~Eg9!@5H=Y;f4(fcb`m@5%w`cxi!=KPXz%UQj3lJRNm#cZsLF6=p`OpNT zL?0cM;UNTQlq{I!6xgRjCdJ{7VF1ZRrCDy#ueT?#Hf|T9e0a@OQDOsK8LHVp#qh%6HPk!DDGWq79k!nF~fyp=b39FSI zuKo{9NRCz;n=Z`c&EbTh=5BD;G_q;7&WywEEhRXbb&Kk~80lxZm;G|P)hTji%^`rr zsDB+-dD~VvHH!>8?=}NAr!!By2l`K_YFwp%>F%m!W0C$|Q-7aMRB@-o!aixJPVZ8Kq5OF*vYWX}gcBg>9ucD9(9hT!>-YEm z_d%(uHlI&j%I?GC4p&KM9EoWEygZ*@&3v7&93AA~6n2+Kz;?gSgy6qooCC=wn&T*c zs1z}&Lg?%A+_wYeP>+|59q?$rJCHqyi6Uad_4Q?W|_S9Wjc-G6D|jDJp({_MQ5oy*6W>R*qX!9*=_b;I^Z3wv3-dBo)_(e0l>Y z=7^Gwdd#WXy`SJbnLd#0?UP`O>D`kE4w1JSt_49eI~L4fsRWpS-MQRfXG6vMp18XA zTlK^Urvm-)DlXpfctZ$7*-O%-B#UO?L0p+TH}9wI^1jQy`1&12!dFdyB>Ch#cGB+D z52V71+Hq7GEx>#TlF=UXK^Ek(&1?qc$7|_9e|$oGuO1L{-~`aNxv|rj^d2khetzA$ zJUi_RGCL9YEEQ+c-Lq24%0DCENqziDD3O9eZ3FC9pH2|xpY}|tyubwJr=q|UvZjSc zOQhb9{36X4OXsiueY~%K0UI30?BE{uoJ5&36a$iFjc|(!rKg|L<{aT-!EIQ3ui-YI zXw0$$VR-LxrBBJ5pA&;+T?Vbv207asCryhlfFo%Z6kaeSywm(377(kUK7mQ?w0De} zmm(FOfiqi9hu%HxL{`!+796ab{w_~Kg-4+Cc+JJ`py&)dfWbR|Qtb;C-)@sNMerV_ zVzm z(RTxQ*S*BA8Kmy%w%v*h$SKbjiCuQtH0wIR=DdG#w7F(C#pEw)_Y}_nge&6Xs}JBm zY*2X^O{ULUFmRL$|0onSx`PbqLoulC97bOBB7|uHyJAd#-^vcVSiU+Q?a|I3oa`8^ zh8uRX7JKu6_JZQQW0xF0sj+d#{oXuf55x~8V=c350sfj-Z*g+!wL8b$3Q~z;W^T^k zP5lJy)q1-1%feMRefAha8D^`~8p4p$h4Wr`3cp1B7k=JsaionAy^p;zN%aTZOTOgY z;s#5Pr+YdBW0WV&!<%o$Ir*5Qbi4MRSb?Bb!V*j7Wp|0( z@t1q5wrn+WgkNR&xcz%~QNWIC%!NSW9M?zDo8yN^xzREm-nPaR~fRxhLL#=dF|m+BPiljdds$2v>v!&kbJN zWLq^x4j9ZAE`>ES9#i;KE~+e092v+!DN7x2Q;?JW%;YDM4@AMM-{FCCU~y6ST7q9| zxjtU~nE82Z-0XYy8aR%QG&MR=!}8b|DfYoPTL#Q1K|A5;(A(B8q*ldgcLUxAIfQiL zZW%6rntgU1tMkABrnh>o6+$M8~jMW5;Y)^TgoK_G%C3>EkUCn_KPMP^?dx+R!0Ef8F9e;|ZL0k@zX{ zKC^s7agtBJmlZ9M_SQHpj?*}N?MLOfkz6=`Wg3oiak>wRBu?sgr;IY^S!3n=jU0BP zqttMq4Wg@b+q{kM<5i)sm)v$u{PwW+6Ck`;tMVHV#dV;Jkv3C&MDKXdD0B+{Vh%Q< z)L6H0^Z<2pZ|*^s9h|Kt`9b;$0VtRi#(em`KHj4sY!o%{k%I#}5Al4MTuX8+S~$Od zY1w@v=S$GPD`-H9e|^q-%+=bt^_>7QK+eCg4D|XyKN@cNO4vLK-<)CZx#G+bT;|uz zPU%%5Qs!0higaMr%MKT9>U2{R>BvlG^RVk!6hgcB5by1JwzBW%Ix$+Ik>b-|q;&Qe ze7v+42D+Na_cuv`Vo4F=;pN~;=yH!AKpS0aktS+BZUj8 zLtO|hkuI9br&1w9!!IKCvyuHS-dLWVV$=fB?!QaDlO)q6f-jOjPVJwNw5mycZvmJK zf2vgiy6^R9VUQ8&VY?{M@qB5-l;Zug9Pd8FnT?7<0rx_UtFq)K8U1}whPJ3=_3tm= zBu5uF+f3^Pgo$((e%p;VTBFhy#d~V7z|kI_)~=r{3PZ@kOJw8v7vt8x96rPklR9|y zDo6kFJasp1cpgF+Rif!6jQr<2vxCd#f75HapEyY*&)tij5KeMrz?y%rgU6n%=DnrD zDWyE`gdQ9AyWTD(4xE(|wX(*}3G3KLT{`>M<8q20r!TGw+bKKHdE3wPX#g|`p^Z_2 zt?uFRJ1*+mvXBR?ZmQb>OdfP3*01^P#oq-#SmRie0L5{Ncjs@X%$=a&zu88Tf7)EI z9Ua}^zH6Azok)pW!vG}g_17WZ6}YnP_horlQS=6iy22-UcXXq2@*OYM~$3Ll@F&t=w<#S+j5Pl?~yBeiNEpP?HSA!H?r;7Je|S6VQ{->r*oLQ zBjs7oR|<~!hohdX`KoD--+YE-f9;l9>3ew|IP9p1n+;`1f|yWzJy_B8T9eh4M{Y2~ z9KmzD%DCgZ{{_3Uv(tGTDe-WIKh{E@(2~Z(6$w}d{_+riSKXSqGiROFK^0IStJm2Y zUTQm98_*OXOx<|&X929buMLn9hE2F%vWTbl&Y;5~=WUwC#r7sgIXRV$e|gYP!`a^5 zACXk65;HrqnY{r?kXi7j>nF~SD!to0B25okw~u}9aG5O;)A>^|o$_$kUwvMqNMRz;#I(V4gYWS=z0An@e{5ao>I6#riTx-tq7rYb zZFe$&tY4dmMSOmq7DMq*Zk+lM^x-6I*JjsJuuz-*FRw%YYX)Y zQVDB%4L-xvtvqr~Awgv7R0?hpYjsB_1c(i=UO;}(#jO|A*9e;m@X`Ll?wf}zYB*q8 z*K*X6StrJMr$5#6^Hjlhi$d7doV-rSJ3ngnvT=)Df0uTih$-SHO>Yp^vs%QzpBsO8 zHOwpc8K!%nS+Gx6sgFQ+b{IoOSK-7g> z%kbkIw>@!`{j;0gxY8j2Pn$_| zz}bzP=TY&39YBfQjKUbEgOcDpY{Cv3EVb3*j%S-?_C(LL9|IJx2@>3|0VUKVBieT+ zNqL7F$~iS&YBC~ks2*wTDD&NvVz-};B_l^JP%d|ocputI7OUuUuc!JTchT@><1>J( ze~hZ@kg>U7)XJCV$Gs#sENptx&k8q7r!((L@93f87x;F)5%Jx9XFDQlJoivIrF3W| z&V@*5MjS{``>)zL{AdTT=n$jyzBx*A6$DIHqn!P&+=QLK%UuC{K?*E_QKP8kG0xh< zz*C-^fo(n!*D+(c&oc|>HVtIdlrLxPe=1|?SpsDpV{&uJKzxJU!qd20Y{L+rnBY4B z$kSn)y9?l!uY(&-WNg6{X81V2Nww>$_`b|nMod5lGQe~6LtVW1;g(A2F}OzbJDJoy zwc!+LcmDO;5|W6%Vt*(rZaiK#5MLWhu`}j(2$@GyKAM6Lk5v-J1)USAIHDJTe*`^_ z2nc*ZlWcB;av-#*9+oM|1UbABEjAnGx|Jz_)hlBec%;t5J^N5p^O+u>0|o%c47RKQ zV9;BpQ^_q8=`phS&pl@rI(viO>V(&rx(n`-PpyfjhqM0bVtdu^_s^<=JLH1+mrv)8 z=3GF=(ZN4)dyrY1ikN+tQ(pW7e|0?W;d^HY#Ukx>rEn31BPO+EP`?gN2?eb<5@z%^ zHmXt06?^p*G3u!}Lm@&ZzaRCxY$@v-kWGlU{8UD`p5!5VgPxWhJRrVBaa{9oenM|| z{Os*Myxi=cPetFj$S6FruB=-z{3uo|H4Hr)Q*p=iml>1jvTxe_yqG+-|fc zmxd)rIfM^ug>1TYgkQfT3gA zh0F<;J~}*T1qKwi&y(L?e+9<=594NP`FNY2LJ_rODVU`6cJk)q5B4r2c7ynS_swXr zVi06P9KxVA5o8?)X+pDXe9Q;?u#GJ12gmo$eJbJ+ql@m4{yO{K*LHN+Q6Se>&wJx` z%hH<;}UdwPmaN0 zP8WGTzdl|RnIqSoe=XK`#Gm^o2ns*24i7Qhdq~}LYOYVKl`|FSR651E9mI3Tc6xtm+Iqi{$d|axb z_^BbMV~JCJd>aJ*S=oI8i#!x787Vc$ZCfrvM5R80^W{1Se+#g_v;Yo2uMei@`m--b zvB!PIm6on_@oX+sFWtr%30`DL6OY?j{;6rGIbr86EGAHkX^~rFS9Q9Lv@XaLsP(Ao z&h<)MOSBy;(e+HaTfikmn~Z4p%#vt{0H==EgKY&Xr4?D%=NO|LLOg_F*ssf$yzwv2 zCh+1EJ>9;oe{=(O;u+26osr4*P-veKX8AE|n~r%)(f+i7BS}1=_hst(kTWEu+V|h< zIJV(W9KT=bc==lhhkPMVSfCw=rhs_o>N;6#V^5ZNcqr%B_^{&zVfOM7vo56d;QC+= zyOu-#r262|ad1P_O?X^3EdaGZa)YC*Zs(La_V+3@x=V$B=KTOkRWaF?9hR!TkvkH ze?z{1wnb#aNT-u&osL0ndGVX~()wKW%G-;sNX;zrS<(H4G#FnLSe$#UAC;zQzfRKm zDEz3Y99_dMC{e863t)Pjxul(RqeCZO|;uA;k75?aMCL`g=l-mxU@Z`dzAt0T<|te_Tz*jkr6DN^Jlwi)BUvKZ0~MUysxMY3c?q z-O|g~&7U4}zAK@XR?@z4!Ft`1S%{8UbX?%Ir%w!sI%hTWUuJTJHRw<{4 z*XLb22P&Ppw@Cl zomd3cTs%b>I-AD9W-0(NYyTkoEL%pZSlkX5Wr)tvru;tH@*fAstW>p z>3i1BkHI=AUjMS(HRO;?JbMv~=`iF5rpS8uPhg=dAyLOund%4*eTEqm3}O4lchNF> zkgzQ>A_WV1k9V>^2-gD}{VkrCni|R}GlY0&S^$!hlYJMYtRp!Yf2cL_K|VyStj;k! z{W10ZKm*5}bBU)OzhRqFy= z`F{5l7i#CMEoq<6Y|IVsq#*T!T25^{G36ShNLO6*CAn~;s>$!VE+^^!{eFadyi4pX z=4#2`%|2o<$<&`I)B0O5>3LAKeFx7@E%-=V|{;fR=9BT{p7J*pM zSk5VVMs4)Vx~ow(oY$S-+yVXmQHKA=tVns8Pa`0|vmw_Iacr}6JNzl`9GIzc>7!I+(XpG?yGm5R4Lm~=zZw<*XTDz z1ehb9ZpWAw8)XmHiSYV-zIvE^sK=@Kgy9UY^tZ73sHpk($8MtE`^EmUm%yR`THis{ zy6Bdqg{*4rf2*b;INQDqzec^%H;XOp>!{V0DV?soKUt(7sH+i*kG!%=T5go_VrrY0 zn>rpuJI-V!!e8F+bo@Bwh`jmrdjFjpi_0uBK5r<@uReB$eTI?kB;0&tilSqJOL*a4 z=x=wrj$43c`m03UM=8%RZy~cabHBr&u?(Ao-j{|7f8~M&fJAzA`&KB@NfCh1*R$x6 zug6!GrDasu1?%s!Zmeja|FFrc9oH;<%@tbKcB&}yLqTZ8P?znSb;;S=w3(Z3X>ONj zp0u-!{I45!G@>hB0uzB&nqrL_SjjO`^*yNsu55<>NoS`F&VP9j0u_IoCvmmf<#EOf zm!rl#e}jHkR!Z5A&PCEee$(?GpCv{I#6e$W9#<;Jz$$9clH>_GCtZbA1e^h(Z{=nsL92g$etN_PHHpxa)SD`xCsA+ud>j z#Nv)g>FDHFINJ{2bkCBm$8o1}Zct&u6)|B^v5UyVwr`KPB z`nrqCJr49$41Vd4Mun}uI^DFfuBM9^e|N)7jagQ}dSv~~N(^Q%#Muu{3RrM1e-iSz zuc*(s?;Gm++ZT4UFj8v6THBaPZ)n$S?bN0=p05!^{wD%3Ls$Cfi+@XAyPzJt9FFd? zbo$mFT_H^`Ae>$($CwmUP7(X7Q_p~mp7V?8JDzqI>^#qQe3?OP5m2JfEyPilf6a9E zL;vTO*vq1`uPRhGyF|KQQ-js-fCS?NBc&px`8=O}3-)f3np{zr%jO~+w4-h~n}bZ8 zn`Y(G6M2wrd)xsBQsgYYmrTB=x$gipH6806x8Bu&W!&fW2#`a$6B^yTKsenZu(~Qy zm?ioTO?%H6S|Jp6r9&%qL6zM3yiB?c=be z<*boec7AV>u?=uV0hSZ!Y((~KLCQi{Fa*yFIgL&O8Ts`%;}lp`5C%k5HCre!~{6q5JlpcQ zb3sS+>{Yq^^ea9R=;Ry^>d07pWD*e!zhr09t?N_O9vN ziCpT7?%~A{&dK>ovm9mZe{NpO?BHBtdmcPIF%UtK!zult32ggCrTNMl5X8B_d6hlq%p%e@?~qwp-ofz6rR+Q}O~G>E$==+{;cx)7y9D>4taFFz?!+ec!I_ z?g4zjZ*RTtL=oJ#0<>OcI7|HmqUNt>B3efkym$lp;m=QyaGUKDe-f`-sdvxMgk(T3 z5Co1*kT~rB(e`FbigW9_-mN%%WCxC*@9gk_dL$4a2?62^1V|wIPQcas84_e={d3N> zmv`7R*1xJMlO&Ne#y7s^Z9H{~SXkq}g&O*ZR&nW0*lc$j^Tq6@<1>aii(9E1#wvij)zvs5 zNS`hG=aCH3sq!v%?vJ`PO7Pi8i#>U9UY5asArZtWz`IyDO~}Z{)Ey zk;gNoS&TaV`(hQw8+)KB>-+ezryo`51$c@pYx?@{o2tr|f3x2=mHE^SP8c8A?dA2( za6dqA@dQX_+%2svbo!cFd5A0IZi>sKTA4!kluQ~p#01xwM0=MUl{h3 zLg5?-Xkcjhbi76z&odpM|X%*m)vtVL3y;p2p=P)*^=3!;N7}&cTJ@34^kcnW^$us>b+&LGl+I|! zsBalQ5o@ZPqfJ4_bj!Oo23%b=d@1h+A{LtWxK;z4+a5QZD2vhbQAHuR_9|ZEtFo`_ zG}KCd)E<-o!um}0rZ3o;yB)=c;V-%uTY4X!X6+hWf9v*fJH}9Y{KRT#{Zjb#SWCW( zO3N5JV<#}!4YnGah3zBJ+k2x~=z!+k3@eAQ(nEc{(LL4%>!&@_!>GHCyKYc!9E(Ar8zo3?{FK)<%V7i%-U;L?wouZpHsAYFQo0)@qAWk)HP%_4)w{Ri}}s#f-*PG zq^)MPe_B~Yz0*|DwvSNiyq<@7bx}q6uMs5-fiGCJ?p=D-W~nS)fG>>!a5m<$8yFF` zgvrLdOe@<<)b0dTH5%K=dbD{&5gI~=YH3#vw9c#Bb)H)YC{%3IXG`YM4r{JIGn2*P zJfBVd7%vB?U|MpmVcyu7%{J>vgGh46@%Ct*e>!M25eTq{59uk{gW!30)tp?clbAprWebP2vx-R+1c`Y5oHR<>Q{JjLf0dtud4vhew1c#Db8KVN zfAuOd%P4kkjY2n2Dv?!eqV}Z}i>0b!s}9Q8i{1XAu3hzAeofb?t97fak+ZwrzShm{ z{6PIIzxu27eNy+@O$prNX<4rW>A@{?VJy$Z#PHgU)I&5v{lDBPE5tS{)qh8_pf0~}hey!WJukUWT><(~0?JBMNVINiL7v1ki z-;^t;jhX>s9LZIemfOjq)8ITD_r2XI7(3I$d0%jUCvtQA{YFz#6}o{KhQjrVdhYTYO5Kto&fY9G(nV+po&@TxbUe?t{Y z%Y7KU$Ln|=>g}E!F*R1#El+zlr<0o2FBZ!<^`1_(`fjMf#Ky^MziBOFWvk#n)?XJE zo$k4@*fr_|zh1BW#4fJeDh_*xZ6YD5x7i|ZJ*$AT=oJgbx@juVAy^_qTf4WQM{Z6{ z2MTiI&>k)>9Spio&rwow01SoEf9NH;2c7CWxsBW1L+y%C_-pW%q^e`96Lcn8wQgCj zZ|~OHTzf5qHpn!doBg$G#jDgoz^!g#D40f&5*Y9K?!3L57wRv3&bY~D!uCqRWJ7np$+s1QP{Owl)Z5w?38z}p;HQ;uWDgp`nwQ!&F20%f1S0Z;RWAa zwHaRvuK-Wlx>D}W59vL9G@C~IVs#&V(3i$8Uh5UTTD>07T^P2pu#^zaAIQz&J}%sg z+9RpzGS+Du_}u_jl;$uhnuYe6O@a-7^ZDwhoi44V3rf%o?TW?pYIWn+a?c~!1;>)E z7e+lDW&Ugs*4oM1P%S%ScNX;*vQdT86yF~pP~$9>t5 z6n>E}-1on({()p-*)K^eH4=XQ{H1>iQ>Tyew}wZ*ysk87w@P&}yzQ%tpZ~7#IiW{K zdis)K02O@gfK7>naZwj93v+JAy|eV3>&v4yh+Ylwx4L!gl!1IhfA5yIwG~~UtFy7> z1P_wpYN?=Ul8%PuP0 zQ*Zu8X{@)so)#~b2zG!^-vQ#nWJ~XPV@&OnF_P5N=2$L{e-9(af_r%L8Mhsl^0qyj zV)Ql{sx#j%)JmPT&=5oE9 zNx}fWa~M4jt3qhj)58Hgu3HY1a=AI3s#CKXzKa!41)+jIk!rV|&?XnHD?Zqd{&Vg0 zfdRhy#Z{=he>JtAb}v#~QFO3e(XK9S%Sr|1?sT|JdgFI=-$kvy-vnHrW{+9m!ev8~E=XMx19dWGGsyWiuIbh=k|!xaT> zvf5)Ktn0J#^3W?RrTf&qd#8rethb93G>)3GAb!+`JMRqGCTtl7nll z+cW#+7QHcFQsIbvHpk!p8&=UtaZEKDRK-7)?Emurv0uu`d*bW9=D6c_b=`h1gJpHO zp3G8&VanU7WQKmJ`rMX^clCoU<3(B=G%tq(p$2WMY4!P9C(*QhR)84J zH_4=-e~nFhw~poj^7|v~hUa!`q`R<=yt82yrM=PK?%Q&Iry9eV)jI(~NfIkwFZzYk z#haw=cKUSJ96bHm-7zng)9gLx-L`z)!-^#TIbB-I7pD0#lA{eH#;u};Ek?(MK6F3@ zy^05ufA_1t(e>u-AT0;Fni$f|3rfj)y_{CCe->?=P!3vbMouNz#(-!2a<8#Ib*_<% zxtVepsts@bLh+{D>zr1-g8gQ!zHrS1&{rGa-@a^jR-IKmZCmy_2<$%8h`TGzQ|jqM zuiadk$^Js);da*bTc|bc1SjXq4+OXRL-24ZN ze?d*^&z$Gwc8PC3Y2{&1aUabd?s6O@E&O}ZXLWw?A6LCqo?ZgP#+J2Vcr~9&U0J%L zK)H|~He1pL5oK$$waq5_uZKan?L4-P(V&eAx;ySI^+y4*)>5f43TrrCEW6l)U^HI! zwc}zjY7rW@^>=h#*5O8%_kLj-0wU}vf9H$;UiHE!G~)Q7-j&hf4&=gV_3Oj_xVvs> z;W50g#|yi77?W8puK?;6ZC}AQz5!A7yjowA#i&E|aDFxuMSGlOWW<{d>@VUJci)!R z1-IAn!f}qnnoVUBde+kKs z^0D8m7}Dc9T$yxyinY$V+j-Af^quWIsK^Pq^s0Do^_-{IskYmlVr7Y>y{`L`vgNFK z9imWYe{2JTD6jw!Gn>fKJn4C=owhX@tM1T_k53yBDk*hf+d1forhZVpJa>`VmHK^J z!cl0%PS|}*mm*cl25BEg&{1j+e@7i~?d49zn#*MmGBgL8ddJ+8bB)ip!1Cl_`A?l) zU0u;J!WpVag>-P$(?^d* zFuzEv#dC%B$|RjlTBS+_RPCr|G@HHBlYdBuPt=z|0dlgnL2X|`FRtn(uY_!BJ zndQ~U+}CYL)K41-oOZ**=GAY*;st$B7|0m^8eX`qJz&u{m05AiO#X0dpPS{yV|H&B zn&shRIBqYdrD0Jnw$?eV- zmumZM>Cix(BwbDAS*Xv_(_`q|SD4&QYW4oJdc#DsB=yU~WqSZ>fGBlYPw)h%rQ-Dc zfQ+v_?)1kUcN}atZL_;7xs`?o#oYuO+g^ARIP|Yc((TPX4b1^)@YjWd&yJN=l2o00 zgR)hh`{l}cv_qkPe>X{zofnpNGBLZoejb|DTAOuME#L3$cI7@P7bk_$B-!2Hi`Qkn z>!G4?uVNbL>CKkx_c!ER!|hDkk!wl#hn%02a`mzpF?Pirbh>lBePkWX#(UD)+=fe~ zh0F$TUTAG@CuMqAGs}3lrFMim>8Me^fT*x~ZQgUZm*y>Ar-6v>Zf@xw3&MG*Abg zf18i?!MLp!4&AmN&+pF*jnt~gnM6rN*U4@y%EQPWc>4Bq4l13C|5{C7Z3fda5?bT! z!l06F#uY~Ie@=5Y@7w+3ZbLKA9&9J-&@ElN_IrUaYN6g5%*(q{M{6^qY(JFrhQLo7 z5&z+A-Z=%WYRhU3mkhR%qn+JT#(foc4m&dp>eH~KN#1JK*Zs-}hf?)mOq;#V!8?y? zj#1ULL2tW@8xGpMRfJe3D2}>&1Coh1dB<0y5m$z~fBP`>!=|{PKk)9HtEe5u#qIIx zuZFb?0I==c4-^zc+@&lTB_(K=P|n9dc*6{?v>A2EjaMryG$!5RvYV*7Y&IqcgISw- zbXYsfyDej0bK4^gK~2(q#3A;qK&*pfbQ-^{#YINlvD&E3{Gb^z9q6WVSSe1eS&ZBG z*bJMxe_cBcDub6Xz9FA@TPiT@jD`l)cR25PB;Bt?I3>jyhfVJyb!l4HaG35%)xbgm z3|)-zA<*3;RI2ATnjJi=v^otsSKS^=VCfvq>E^lZs!MBp4-jt9G*He;cQ|?NqpkNw zDPj~M%@NDK3qNNLcxj-7F>UEjcV3eQ-PyX88aU}R zWBk!77-oSgxhW-NC*)wG>U*?2+_KV?d-g_Lxxa{qbQUaFmjbGE_!$=8B&7`fH$av%X~ ze|^`to&)MEtlFbZN9du+=49s;kwrV~o6McdcC5s`T5{h`21yW~AS^ddnt1>Zc`S^8 zhYk_7>zQ>V!jIDwPuoR&V*a@@8oh0wq4CwVW@A*rfrF$4^McLLrGFe$cMAG(=Drmo zznolNJ)>|r*{X-ulZ%M?c&BeyxQMP_f7R=`FP7r2a|}WzmFdKnS)BCwWh5i0Thx}`nKx`uc?)`txT4! zo)Mad^jai~nrO8VytSgmzh`O0Yv#&n>PDT{uDg2MT5qzy+~>fKLhU_hJW>Q3e})aU zRv1Te|K9Z$_USw-8cMNrMLu0?H+tyOde8c>qc&C^)a3HIe~6x=U8(k3b*mMO5jLyw zqaJSNjp6cL!{%&3heKbLdo{Coy9LQ~XxWWncY>#Fr>(S3oti&5o!+hP_*~WFS2(~o z@a>j1?KfS<$!2^EVMglaV^ew5e=gB!|2A>rQEQGiXy6s6p0T*BIb(gc()uBJP=gMg z6%MhiBdKf!ePto}rTV^iYFDCUR_RzPtZyXxBj@>bvA(aBvZS@D{rG%{nxS5@2Hhy$ z?e|($dOc5edsW6#_Hcg{{lw@j0!&X9Q)}8+TyMq1yXqc-muVphvg_n=-L(KH;XrE4W>)#NUWV&gEluO3Kd zr+#mFnyC;#d;KBw7+QoHBE`vYej2<@Y#RqR;Oow{qLwGcgt^=je^%7EnN*FWJ7q*! zUcKkbbhK`DOm*637*?%V#i+ELl}Fuz)tfAy(?L?*cBnnHe&uc0_w%zoZa$sCu($$^ zeCuyw(^QpP9TmQc+THdfrP{BQ!wKTgv>l%d*yHMHY%YeyIwpplvNUi5-xzE$ zB`7HqJu;>fWgPg@ZU(VveCaDPcG z-|&@TI4!@VsD0ZRw@GKaTcEEQqC#HA%r*3BWW7yIJ~c4YZr}TpiakE8I^J;DY;+E4TYbCr66E4WB)5uA^jvaq^>&XD)Gw*r zC`Pq*u`MBkE{#@fn&BGpecW_sv+L#TU0Q|Wpo;L~ysa9+{9Ltm^D)Y2_m16cJsR!f zIw;}ZRHdGHf2gZ;Dk*-~E_Kw%iEmo99G|C{zs~wHp7Nnndp|u#-P04@>P@8-tPk~5 zJ8GcdR4;X1?AzmXuAQkMjjJCFR>jTG$8KrejHiV}^V+zhVW8~wlGn*S1G2zkenJE6 z@EkQtvvsdgY}(6}*>Z8EEIFX^-YYyf1Lf*J$p%|vf7mTw0$!()hH;C(%;Jwt!$pPX_L-HbAW!)OZBK&5(9+MzR_;jCUC ze?R-hle~Cn;t{+`J7gK-jkda3=GpWw&T-NmRfV|vXw_auKQb#XgUyIVVTt&LIKfjG1 z<2Rg!m05zKjHK|ccWLy`N^9v4r&i%Mf10&x^X;hd4BOj_b}BAUXx%SI(*~tvR_}I3 zN0%}}Q{7d)eS_fu*Q|Q=7nbPXuGN`Vcl?s`c;7n+p4M-HZxpW}JD?6(W*j+DghMz` zR>kYijjm}0^%{b+$!3B!i8ARg)gG_AUBfG{tESy<*E56Ucs$WB?-JoQ`_uW{f1M0g z@#%Oncl*g6edlS=Xmq{a0)JhU9SXqelZ~Mp1_Df>HxFhe#$u;>12iQ!<`vK2~) zFU)$c0aK)j-7@;|1t#$9z4J;T>Y<%Nh(p=IaChbRMs?ctnIb<5lev=yi!=(5XpYNk zRh`=KqV2wYItI10{60jZkvg(we>(L>0Do$r)qB;gC^b+ryzloFV?UY97Xj0a`tBB? zFEE)@x~CaFvcY^j9`A051W%DWP`0DdViBC-i=$AZZXdVn?TY>R?Sd`-;BLEUjvlli z2$$i~c|PmqG(jF|fa}>6QM30UnGWpZQ?Is0veY&-$(H!PJIzX-*TUt9f6+$zQ}L72 z+}+Iv(?oYYvO*fbOQ+oLO-jotrd3kO-UiS9$gbKw?{&Kv=AZM<%|CQ$I zL#f!0=O?9TTTUK}jamg-09?CO7}n!cVO!hxj?-$b z-kL4fdM_N#20Q(Y7Hz|}rFg3--SEBd;N@(Q#I)w4OW8&;uHHkPpnpr(a1mOr)x~Gr zzuq2myAd3RMk&~tgSODIHM-q*Z?oDBpEX)yGxf1*Z=HtvjQaaUf9KI`Z#~7vK46aC z#NO^9kcCdY91NC*@w`D*eH!CRqjJNFvQ$0{7PV!6x!Wg2Yc^k0hKQXF4oEO=jpxx> zq5VSBp=l&g?}wYm=+)H^y?4vk!G#Tar}1g@xM=O6qr8jbSn1j~zkW={&2ULs?Q8Y; z?!Na73lNX9_Q7>se>=W<$Q{;Z7aM)cG#yh((romDLM-pRB0*wp-HGX}YV zD>pPs9KVV?G-bE`>7>p|{o-ia+D3=7L9IB$7V8-w>{D&;jTw}BgTmvr8Y_$lPwq`p zSLi8RTdl)xG%g>VCAThJ^x~U*d`sQaBz}1HlDj_^H`>bAe@?fB&8*#CZiZH}J_Cdb z(v5dsb&nQSDBGfpQIeH_)0P;im%`JRkwef62O4=6^qz> z{onul|FS+_fA;g+|MtJ8$MyPu{ontO`(^jPE$6rS|G7RMZd3~E|7$V7;#l{8WIrV# z$|i}s2=Ft^j zl;^X9d_epL{bjz`tw!R#kv5CNtp6|O1{i;3e~k|yf2a3qj3$m4pbf<9EqiW^)ZfR*jA~t3qC>^+&*pE6dt6N2TF;XT;XOESkI9Lc65y+f<8xubDWWKsjQ~cgn_IuNy!|y&=TffwQ2i5*I*!HE+ zFkfcn^SQ@5|K@cY>lKvyYFOcQ74z-zbN0sC)OG}4%ka~l5?%XBNkRyVhOTQ)t^W0H ze~rwF;_hrEJ@)W!k@SdWsgrp6%Tb0J8aeN1ZRxF+B3=9Z4y!i~ES&}m4kxCav>=4I z>!B3!v!S7Q<%!}t+-LFlOvxFx3Jgb;TO`L)N0XG^_!7KWA6xUHzf2TboKat~Ki6hg zhml$R`S(l9|DE3lLw=$Q`OdEG@2|nTf7V^D^<{T-cFaG&mft__1MjK5ovoRM9nqne zM%mx#)(0J3wci+bC{-u*{9SpeKi31ANMk+fvUI|!m+S*tIn5teED_#n%{L^>juN`GckJ8E6Hd2;OA+Rlms$2oT+&5$PWryYkfe=7T$ zth)oQN4|nA6wN5?<=2$~BLa zRO4rE20x=Jvg3(=Z_G&6WRpe6?~E!Y)_0B+rHbn4AoWmcbo4H=K_dQ0#a1L2p_@rs zjRhiaBWAp8At&P3Uo;&bhzRu5vQ{L0-E5U*Hdx_^B zqxIa8L+*>FD~>-lpCwar_0aY#6z-frde^uh>!?f4-jvO0$30FINAqP*e|pF}`JhPp zo6lO{9@(Rcw%hMo9sS)>5AW#2KAjk`Da)G><36AJ74>A-x0O!+CCjd5Z=V>RLMc;i z?tva_t{rzH6SzhaSW>7+F1l*Yx~opglqn6j|I{>ItEikKCUJ2|38G822lsq6eZb&T zk5)O2dOG2gpoEP?q^=$Af4|a_hN(Q1^p>RhMB=e-;Y0lF;>1C$;`6bVU*S}j+4p&T z$YK@uD&H61=OUiPCpJiVyb!Tr#C_iIGhWbdEFkv%`tVeR_7 zCsi4HZ{#S^)_nO1omodSC-D{Jl2qw$WxeYy#{ti6l_Wz=D!GJ&e>Ny~7H3V_JcBd( zz5_MZW}RoWkVby$GAFn6S3XIf>)s3X{oDGTCmw4B{t}>qv3XWB zCeYQFjw2DGQYi%{f3CXXT+j%Nsmu|3uFlGJjLA|fTle3Xx(#31S@2}-=j*nU-RmqS zga{AjPI(#9G{q{mnRKGaJXAvOV=oEwBJ&e@s*f{}UyNZBKJLtOFFu zEqTjlDjy5MUYaVq<<>VSq~fFPP#>5w=YkjX__s5;wO{}{Wu*+Zg>@~vHF>n#@k~na zkQ14+!Eu$^&Q$46FydGM`N56!!3aeiZd&c$sJ7(*8F%HxQzeFllGE)mYrxsN;%;m7 zj*lB{gj79Bf5NiVde8vq52d6FLlMJp({nzs?#d$m#s8RG7OTj>P{QL}anlIv5f3_dpt2RBY)Tg9!?TV_jaI6ar zRk9tmT&HR3%i~};9i}GN>)?(xa4L_cc}i15+BSpe6_#u+SyG!tIn}4qy@tS^WL%$o z{*j2stc|V7S(nA)Zw`Q^b+*sNnsPNITRR6UNytMSLxLc-q6;azr*;<~6vzQ9tr9aa zzuLq^f2HYYg^Fn(u-G^tAAIHey84pBoukQ;Yas8=d``NG`(jBWO>T>LrC~aO6#@Cx z3r&gaoJC9alSAd}+hz@m^MU;#;(9rI&G-2|{ruec#40mCyYqdr3G*0-ik0qZpidlA zk%JXF&GH&08Qn=bve6owvNP^H#}6zQOg*lle-)`S!)c8b{B7|z$1 zJ?=!uYfnm-I_{j(&XMP??-_A2(kF>L<~-$mLO?yes&Z+JTdp#e^poytfYNv#nhGA& zZcB$mBBADa*s^#Ye`2T~f1XE&_YvzekEh-@&k1Ahg?vQZUw0x!@{gJp!^?C`i)=$V zf6bk#7I9=b7PYjapSt$vd^ffyiC8R6l4Gh_y6ktP2F!cv2=76j<(SerB{P=hZ&fm7 z&WdWDAO4)P`90l=b3&|-nv%tLv3HW=!`d8YrVP~6!EPu@28!bhx-mwd5!aWJ+u$ZR z_JY*pL>{~?rI#iUYRQF>-5jLhl+J+3e=1;vSCUSVR*+mufRy_qPEciaw$C6!pI{pe-?jN zz!@r)*zr~A0QR#MB01@)QAs_Ca);`xSPT@+p_4277&cQ+c;-}zsCoE3pLPC^^R%XL zUDMC`OP-pYtDiGCWIy;NUMDM2ZLjpSvFzDcu$Xr(7WB(hU&@sI&yr6*W6E1C=@YAo zg^HB&8Qh1%pmBh)P%_xZR;Z^He>u|61J`VTflNcO>RRSCUd%q66+2d zOfvGgl07HW!^iSvm&xNwrnvXybDaIv{F=ymp1xdj7lm8w^rO_Qk^H)GDebA`C(@DI z^=`geXjY)Z(36Kv6arYA;`gapO`uCh5VvCAyrNzf$DW_Pp4T_u81b`ae>q3<+(WF3 zEx9FlcQk>smE6 zF@8tovC^l$2oVgfHN8~pe^C3;w-(o}7Tb6Siu+8iE#ife$L}m2eq;BqYyQdSDxa9x z`JJnYvjvT}{C;IQN$}>s@+W`z#?HOp=?}*p-;s{a&{zCejt9oMrc+)=3tbS3oW4Wh<0P*~!>v48a7iN8gxCUrYLZQ{6F7y#} z&FezepSTW@PnAtvzxrF;-^4j5?vJ=vBScxBtOGA}Ss!R#*ls+-#CNI|>a@zFG~lo* z8HVHNQVK|Ee3XN7Oa^m+$A#8ZPrpn7e+S*FR#f8RNFf8vk?NWoKmmERWO^ zTVg#<5q-HvJ#KQGMYg`too*CbDze_}cOcCE$Su3WQ7o}He|G)l(_R|Skk^V_xrNG+ zGA5erwJ5pNy<^;u>3hhn6?qq?DSnS0gRQIJex<8=-ramwzKWs;Ne-|Z7jvSR`}4+Z zUEC-4_MBGwoId6P@3%|VxA?W51^GnuEkM3+42ix)s)>D=M`mlTH=q|^`W}) z;z)DkNxD`P6mz_ztb5MPNNbMb@ImFM>c~=u098P$zrKw!l1dj-k=orjQe_!Y&2jB9 z8qIW5F;|oFWVY+kRDyV;zqF&ZkuIgDKhWYJ?rh84JO82|mVeeKKV`P#E4`?jh`NnD zh<=pFgZxh(B(6WK5=X_gi#oTWZAE_~i~Up9fLPb>&%Xb}JMpI`nMx=FM(&TI{x)%f zjy9^vX53mU&he=z#8Db^Qq+wVuV(QKWhs_=UC$mWvx2AXj`R^QhKT9qq+-q3mP{_h zehdtnYqzLuu7BOS-gknh=%3lcVwrPX*+$Y%QXR=jrS9#Wmb)){8EDhz`R=E-B<^LO zQRHd)p7=*C6K3mK9*LOrR}DF(hah6L*f&|N!*x#rce!$uD`LF0wUqkQ0!gp8CGV`( z*W+sBD^W7_qNwXhS`SZH(;T(tntL?ev{vvoK6#XB=zp7fU)o%eR?xb=jZ-pQtzuCZ z8oRWjJ$K#ls8lr+J2JMd1NWgaR`3;i$2NT|-YZyWQE(_8%xiDs&!urX>8a3tMJf?x z^`W9hoUuwuxaXW7rBCuCan4si>r*`A@~3wD*IN9iM$dX6-#iykH2t6a?El`AL;fD~ zmjM&PjDPK}A?gsx4HS%3LhS31BOG*M zgYv$jw%i(8on$0GS>y7x5l_RmC27Io{d6ijfA6ZTb$0`4j2ID8yYmsVN&pGxK|Z<-WMCpZ)#KJ(J_pe4Byl zX`$k@j=g`A(@FkVvtNmCxam!Cy6w)39Mk*$+Z@r%$UaZ;U1Q{&f1X~}&pkT-`SVa4 z=Fj8%wNFmQ`J!g=IC~CRzVk1iv9(j9WQ}C#RdtTkkkn6D%WCAwoFl+093GML?z+yb zgMZkfbf@9w*j=d7#ic@B?$n4mocWG#O0VE%Y>4q9Pe#7#oZ3q%Nku=JynxtBULa@Z zx4pC2JJ~wgpmzAHKB6|AP*YQD+Dm#MX7TbU+}Ypc>-?xL_PtJ@&?5Jm7jIWj0=%nB zzBQ0icVAEWxM^Q;ym0T>V61< zWAq+aU%{|&LD@y|Qi&2_{-!PvdHeKRNAkOO5>!xJy@W*Evf!u*21$O zY|`FcNtaZj4rjWvDG${Ex3a81|Bdf|*7&CnkmdHDIKXQ@YfI#9Ke+-mZFc|E>>7UZ zfO&!touWbM|JnSZ#l30C ztyjn?nDs)Uo-ZBgJ;qjK?>n6LqBpE6+8ym=&G0Z{fM&*e66cMqdC9uCz3u>|KP+;( zUcT=+-;S!|bep1Hn2Pu}K6&Od66=B9inuPW=@ZMdoQJ*y`|O+Z{5cms{eP0rwg0(C zn{fNj*M4dMddc~{<^AB}{oS{eu3F(@jmw9)hL7F66Fni4NDe$f%;39?{Y&hpSg9uT z8`R*8tG(77q|OlBJ-r=rqBywXsZ?e^Ux#3UK{MStQq9E*my{qn4Y%YxUTNNWCud|7 zho}8%Wvi?HQ4uw7*pr>7c7K{V1NsKerUCTdDMWD^C6i9rlf-jLHA9uhJ4jxk|30}= zER%9j6{YryJ^38KHVL-c)XwWk!QxSeQnw)~a6qzi#(bz4x^bW5?Ao)MjNXo%pBeeR zfp5#~LigqRi0n`sr|$?KQ&#< z0JsNRa(}U(04j#e5Pvl4p0HvSI(ub2E-}lKb+)Ivg}xPCFS~S9EYQ$BDoG-j>!H7c zet;V7e6rgF>24t%I6f$6D58N@&B^uG$yC6R`rett6(%<8&7HLRFdvF63!$JsUI=YP)cy21JBt&`_wYyWrO zAnzk8T6=OHmCU{&iQLuk_~nFyK*pv=lUZj&B{~eur_tZejIEoR(~-2|j5^ce(k93I zsVr*FU@$tq91=pmDC#n4BgLD9@F!YoNT3BIpuXQJROvcNu59*@f{@8VWz61O{k%(J zXDWuU`E#|O;(xgk5;!zAWy1|f8bw?b&+8x0Q1m2Om-IVXqqfUgmiK7$e#fC+NVS&A z2nPm%chGw9y06kmGQHAlIEl+xv-(QMD=8s>aLL7__iZSX*bW8}4y)oB5xa_Ejs2{z z#~Wu8jbg#~kw5EigmAso=Ty7CIBWAh9rv2gz>2+*&5U%0xSb5cudb3#<=;6Nc_8NwY^^Sa$ApHU7SDU;n zuZse83{pWpBy=hVo zE9wZbU$|D*o0zd=eGoBw`JOG}x}{@OTi4r%Fb;8~sn@)uq^glCy04Z@lMfjRW((BK zm?6qipeiF&=U&dl+^!8uGPjS8%X4IMzxobz{VB|rTID5;f za)|_1K=d!OJm}9kAfDl`c@w?yvUv7+JtF!e+Fxga=H-36ME2$f*%r^**6OdhF0n?v zK7H;#U)Qa3h|k}(cq?B;&xD>#LHx<#nScGsUd#TJ9OSrFo;9DB{vou~qxiglH~74H z-p1#Ze%56EoTzm@VgR27CEr3SJAEQ~U+*ewT`H5gWao^yFY5479Z-XjkZwi4VJPBF zH_MY-{j5*?bj18EkDc^gey%_J@9(vc*JPq!M!#)dTPe@fcV(M|6Q`_30?w;6$bX*g zj=Vs*7oxX>RE7BpH_Q&AF|Esv-c}JWVh%EVOtqoZt`6PfUQx6Qd+)n1)v?rN9s(Za zKkUoj_aN&F{CsBC^ENxjMaBB_SU4ny*o-6(S9RGO#Y4}aY2+cuqhFFoRg>d#fr*-9bjr5v0W^eO+`a|G9p zh5paF)KP@MQr5vF>vPt#o0ml#`u-2z%DR9Ko={8Jn~t8BE~Ded7IIg|NLHLAbPuvImNM zn}w>~p14<&JlV|l&bmp?Aim!z6qs?-t1=7d3;(*8I&#v5V%6)vx4;qJ&GviC`gsdd zlQx#tl^qU}cv$rctzwgW<$rr#^~0Zao)Qc4e*Et}Pal$+3a!q3?R@U5EB*A=IG#U!1oj@(zB+(PM?YX7kylGZ8@ju5g%|*}YoK(Z|eTJ3vE#2iyTVPpP)xghe z!P}?}JSR5JRymYWqSTy&NxzUxIjp;5ENjM=rd7;tdQhU{;i3p9z;9mDbF+L^5<|`n z=V*(4OkVTPeVq60e}C6~;(Q%*j{oGP+4&^$R?ZLh>O#cYMSos#pN*a+9oP1xwVI9F z*IpVAywYi`ziI>e3nS|K&l)m~j*&*j+rN1f)zXSRvFF~dn}|OKy^fo-h-r={_MyP< zfMc{8V0L_#J&F_hV*Z;26Mp;iQ$#{Jv-E!TzlMUw_Yp&;5^_Jk9#(?Y*R8%rW^4Y2t9yMbbgwM<$VoLoEq9jd{Z%vKJlyuG_wQ z_t%=C?$7h&@4C;|Ox7#UdcpbJFnca}&0vf8Db^UXCx1U*W1MMXX1Iw5vwe1cztxHY zDN(Uyz0;EA$Wm4CAte7Epro1TAIJn(YPhnMI^&bl>dL8aXbR>@>ghUvS@d{7bjRZn zGvF-Wv$r)YH?Ehy3!&LVdone*j#NsAPp#BJF)-_QM5V((3TYFyMw%}bv@L!3H8`fo z3XCD2gMai#--00c%+EQ`v$~09xp6Hyo``b-hxliv@~3Bz_wAUeh+Hv?C%<|Ju07r80}H~DC;@khfEW0^tkxyh zjsE#jP7X%#Gw)YbUDtU1Xey<2QN4xr?s< z1gmq=pWyZG2%H}0pnwFUY1rMuL(1_f89v6Y_Q6YVo!`3UTj%`FDcpQ6A?j{_Oa7xA zpMQQqVSU=MR_j@b17xab$jYoo(rm}hSkyM>HTVBvv$kf~_G$D6JMGFSJKLa&YC3(> z4d})^k42+NDwRZDc+T=d@GN3J{yn#T&pSUDiQhR=-Y5H>34Bl59tX{+W-TPm+I6}` z-(!|P`IXB$nl;kyDg6a`A#wxGuQyHYS%0U0w%?`ByT!T1*DCE&*BN0hx*WQnJZ4rb zCaNd$nEXD1f6nrlpWKDsx`>6_9E)ZDoquQRlh4RrXM!?bRBT%@2Kqv-nexas6d>I3 z#O`UeRj5VfU|QjjRn76VjEiQTcl8RS5dyyAJka~k?q_pZVJ^2+@wXHCfxImi9DmOi z=M#}f%#pMF0G!2N^^XMhj2Zb){~*f;vO1jD|A{G_OaF2W%qBVG?~>yuwM9ppNJ6q7eaPG5TJb;%nRs?)@kaMxh{ z>q=w|4+*$yk&}~C75B>KxD5_@j@l`JsE(*(#dAeHiv2B~YmQSI{xdHJf0gyi{^fZR z`ToZA5m&PKlHHe|pYL-&1=i1|?vng)8XcUWLRn82=stk$&UM;H+3b6xV}EnS^($$? zf`V^Fh^VQ|u#BpE+QHEzFvQZ4trZq_NRCL zIwG_|OcrK{WC(qud$Z7t9Dgu{dRx}|1LKg= zr!emGQvmJ}Vgs@5$&yo^VCB?XA~Cxt-qEN8$70AVsO^>9G??LF#rhX@0-ElrVstxRwlC4lO`-NBHH|B9NzYkWPS506YFz?CYWQmv4 zx+sklxo4i)FZ67EWq(~SO4r(e`sH4X--Vfz^vUgI34Ux=pRalUUQOVkNNwk+(`R@n z(SObA2+iFicN!_zQMHA*yt)warc>iOc{%(9HLFMb4Ce;MB|C$E<5<^cO*yb{ z_y+3fnabcm5{eZ|7BAm|&cc7gq6)SVUjUwm_#AnkfSo{1gnuQ`I@l5_?PVuo`8GxX9mmO1yAsup;=;9k>#q{nIm!q zx#daQt_;rqJqyt=d3H+$S=&k#NV;Gk{TSm5B`Ck-p^Q2eG<^o@mXZz&v2ra8$g35p zzp}kZMOu8MY=2!WLF}f6t}X&pWr>~^cD_kxlstxUxKKC!Xqi&ax-_gEtKNB)7CJK? zX;MP5leiU0OXe=hM9Oim>|^j{h>d;uDx$V}YB_%BPmd!zpQr`1T(R%uF^)a*C%+sy z(WDH9Jef^r)1=m8E-@}4GXSmtT**^)jI~~>HEky{x_>@%5jY*h5rT4a+nqwnLM6w1 zU-_aYqo#M0U9C_&C)IAce#>wF-j#1=rqxG%&gYk_5!*NKQ?8gNWcbjm*2{W$*}49E ze)by&#=6frH)3y-v&dG7xzyGiF(+67GHPEAikCU>c2l>Jur{ZZ@bzOE}BY=5Mtmj_Bwk$OjJ9vtAMe1+BkgB{}#{{>Rq*1h`|35<}7dyqTcP{ ztNZEWf1b++lS{9dUeTX*B=*&xnm*6nB+=i`u|N8#Umcc@XND#-CGuu_wN5Q{-3`)I zM}Me&En%!P)OOc6g;q31o>aa*@N!k(sWItAnmykg!FHKw!*QR$BPyuji<+hLvQCqy zG+O#r!El{~;L7s;WM3*b z7tG3Bzc27&&LXKPh7>dmIXDhB(Lj17C4b8D*Fq=1;tY1w8ZFXeV@NE4m^Nb;u(w=< zSx4qUMmUtbM&KTa-pyhk^1T#>k{9JXkB|`ssr)$@@7$4plQ|g$<}B}#=h*2Fp5zy% z;yWLDGhfMa_*&;@Zj{YQv*-1PudpF5d~2XoOTiG1!CU1sRl&FUV6FPHeIQTFYk&9O zxWm_M$K1E0DB-zZd0MuV-uq9RIG9ef=aIZj%=I}Jv-u$PQc5kfIo86Gd1tOSl6Oe6 z>TXCHQ79$Oo<3rtr$$#w2e`Q%E%-CzuI2-Qw%45f5BNKCbENmCFM1-+WY|NR7C+vO z^-dbrn?cIPHndlQE|%kh-x2*v@PCFj*nPwpoOGxN^%7Uq>CP&vnVeH+0rY;65&@G9V`7P+$-WLIh}M<<_}ehK0eg%r$VscpEqf}6L2 ztEu!}4&=y!q6{%7mTJD9TuZ&UcCz9YXOTr~>J87p(LZ(MQruJO$bX8YW_*$Gryi%j zuZ@EL)bj@m_Sc$VW>1cj;aq>salU7}Cod+ul2${ z$kq#P`vF{0kry%-%zyVmc254a?w^{I%}d|@MH=50a>82X@T1{EZ70sZE_1h0d-JK| z{{B5;zYU0!rsQXHA26=@d|lMJe{iKaUL^0M{k^X;{NWd$^3Qwu2Zxx|4F7JwiqCz# ziuvHS{g^$Ob#9kuH#ricboht3ETo(dtZn^yeS45T|J!?Yaep)gyBudw@_-w2kN;}F zHna^VL}URK25(f*8jQtBi7pHDPoefw+Xm2=*`*n z^h1N$0^5w-c43zUQTF#e$<~;BkD^O=+w!HYR8wkuGyCjEz%egr1y>BgwNiG?JIm3* zw~D+sDi*IwCx3!0On+50-)~^c=o|f$SMd+5WmwVk>oR`w%X}_avLek=H&F+llKxaH zC0t>BraFyXwasNrI;H8sCdBOM-ud;owi+>z4i%VZV$bVdK%tQn(1iZ+T zo=!GH$oIMw#p>8_fv@+ku9|R<|FNc~t%X?%ZuELj>y0yJjZwHJy47iRneuGpbCBKZ z_8K(7sNOYR*G|g`ORn-%1MaV+EW~xh;yS=1VjvA4-EB<>CE48mPp%t$xFF2y=@I>- zr+DDLmw*4!mzDRg1T%EdSsi$t&z9r9T9zI|_WW5#e9pb7iw>eL(gN{0pWxCmenGz9K775O8c6hHKJyrXpZ`3A zEN4fOP2WBGH9lNlY=^K_v3Fs0N2gGZ8UXFLr*Tp6{iX<(rtFhy*O_&TLd%Hu1|LvTdI`MeJzdS~BfAh3#J z#w>VB0$2Rq&*F^#U>395aU-7LPwlvADc|+i)$414!<4~XTT(0ON6hWPq=~()H-Gr$ zdA*4AfGsQU8-P$SHO%AX7kCJ<*Ly};erhzFf(soKaBa43#Bx@nOU|l#%WFl0|9ar7 zOG*uZw*;0$^oJhkp(T#|t)IXhe){y^KA_+S<^O#!o7w#EZ_J6fMzQX)9-^E$W!GhV z>B2!6pvj=6k=_Gi3vbnDX6LzZTYrM8)CbU3w$uJWav2&d=pDUl3{3o6_=4)l+O(!N zRI})qogIhSTUt?9h_fQ=YZY74%ja{$@aJ=rkNo-EfATBwxd*am^4ZA)nJ6L2xa=MhRSP2!BWqXnV|@ zOb*}lObpeFdWv|RuXR*ICJ%y0ucY8990g1=Jl|aay4oaHmZ$hXfa|Yk_aw{D@I%b_ zKRjaY3+M1PO8olzoifrxm{a2&>(+>WT*KCokgCF73%waPWcE`{tP`LrazXbw}hU=R~D1&YDjZx%;{ai~`e0N)f&nB8bA8h5xI{ z^YUcuaMqW6@T8*ura@m7{qKnWG!+^9xSP)bWO}r}c^r8!?SDJlc|TWU zQYAm&in+BNuYd1w4wIclLDxBV$c^TPYoa}aK96Fa`MzmJfbciRjUXgo{OG|oMH_j^ zIY}t^)7K|eo(pV5#+MMdNovJB=WJr}6Vd;KBd^@Gnc;L6((`hvaGF2;zi`@-sJCVCm|rYUK7Y^q7%l{*IAz|LYk%te^l$Syjp#Rj?zh0Ng*l!f z7B%c=#-G&C*wl($=a}&vRL#G`T`)ASt_u!(_BF*XMlRg&H1vWvdp3r}B$K>PRuxOY z8N{yKjGROak}K z-~Yr65g$AGS`_zZF;}J9hy)XUi*0MELoW>c)ys^GO>jaVX{&-!Bu4{l7cR*`1x~<& z$7X|DgY#z*d+|p9>)(P#U`n$`u?DV7c{%cyh<_(^mrgU6Gc(s+D)%!Cv^mZ6lt#GP zeDZp6UVixZ;`@1SnPY@LoK5idKUiE5`{6SFuE(+%`Na!md+n={@zsM6*UtUO-h@jp{iG(UY9s#U(Eg@N8dSdC)0F#IfW7Z6)->Zp6Knm=d70BF6f=Jjuc2$ihrI8UJ?DR6saZAZ=+`Z=zjfhVMYGVe#i9?xr*M+Oc1L zucIa#n#~L?;26X>Q93txhb1K@|3TrPN`=!b8(_0_^@PFIjD}4VBw@twCtUvl3n!r!;-1M%{`Z~?|npiIz zJJ;q&nI1X`HB+eaJ?b~1+kr-z%=x))E^`i?;KOT(2ezIP?pbZ*lXKo%9>+yaLM#z= z>Cbu29^-t^@pvMhR^y}tuJm3JR{QT4jj|c#;PRL!`!1slluzxeLAagd)!{jY?H8`{mqv)F`#)!B3x8MV$bBt4F0%l8 zN=(uCdui$50ZA<_9r$~PnP$wb!Zw8Oz2F?E=s7T#p1s1VTLp{LVID}`Zk710Vx}Pa zd*o+he%38Bt5G>fsf+Y7uYc0Z5S$*dU(lB&WdsjA!^QmI z693F^Mg7aO7W~hQzkl&J29H@2+$S-Q&hpH>FC=PtVj4OaV3DL>8W(fagqU`!*EQ-q z_w!*HL1_|ew~rpj-ATc)`rb^GC6LG;QFMrG+B9lk;GDGREvcaYq|^crA*(1F_GXY{Dm9-w;Cs4 zLB&1$rGxTkhVchS{O4Tw@JTcNn9KW8!&1kdcJP#Ur1s!Bjh$_$_nwiOfxfAAF@rkF zQ!DJtrKyPqntv&4o!-+lJBq(Hqa&C^#~Z(*`xp!-2L{E`ah%m5U^#~g*gq8=!E(vc zHYK=EA+_zv*p{C=6Z1HRR22CP4)Y07Y(SpgaY@NB+y15aqmcu648MYo+tbFba z2U5tVNa#rE*z_& zd%CHuHZK#@(fI~O6AP)7o|r$hJPlPx)1RkXa)xaI%@g9KeIR#-N)f=_)i=SL-{IA7 zv#kGL9)FI2T*?moBhIst8fe`V>98~%z(}CWX&%+>l^H{@wqDEO+PP|jO%fOH{2tD4 zQ$n~s=7Vz{F?;^(;{u4PR2e%!M@UsU47)cl@^IEgk51q){;V-^ZzBK0HAiPF-@E!B zt%B@)mM3a@>8WTCz0yYU(IPMOD=GkXKhopPKBqY>ctQt;J}r|!99O7gZg1EmFB^_~_Bmh7RWZ;I&-!x)exEhx z#^>z*#@O$k`1IS*cFOZm@r)0B`lsNMp1qg7?ZVAcCN6|5bYeZklN{IcFa7lIjD|H-|MxRh(fj*1 zb5wi{z*(a>1h~$Xgm1v6JSv4kVo%)HoPXo^)&Kf4N6B)utXG-!HZrW>2S5D7zZJOi zES}|Y=_d!s`IE$X3#?X-d-=uH6Y58sqf))6RsXr4yy67N|8%Xt>Vy7$G~wISIf8?0 zFS>F&cD8m+-b7n^T3hO~nq8{xaH)ldNZ^`;k^wIMrnQ24eI5;^ffb_ zMaIPi3r-y*V!-Uxjjv-3jW3?Hua%ybLJqWwIjl5Sm&hy7mqHIYdw<`(M6OiH@QX>t zhs^qu#Pghk{0r|V)(A0AJZn*3NN%g}T9?>i#HFtZjb3yvXcR>?-JrGpZ3dS|@*Ad2i#ZL-nI4D5X11OU=|8`AFLnwY;<= z`Sj^G;8TPiUPL5_(SN!S^)uNEi5_|=XhjgGe)I&t+E(8hTF%d*>@3Rnvsm-;ApZZN zrWLv|lG2s({=r8N^jpt=GzZ9`Gi@gP)P6X3*-Sa_yNUeqcEnHKj=FBRF_HGC(F@kG z)`594w>JOiN1?}Z6FeB%40tUVd~%H>pb;GY^=}aO+%E+_dw)xAIz5iKO!tBN<)gj& z(T~Y6xxX-Moaz7iwN*n0pUK=rQ6Fsu@=2zP`EPmD8BZoZ>oQ(M&Xviv3jV>5A3A6TqB+3&?}6_t8@^eN zvl{a^U!U2I8459r=PYHv}glMU5@>?c#n|2qHMQ^nI5?G~b(09%d4{G8uSzJHir%s38m3|G|uLYLqmS8xHZU? z#ed!r+7bMn*L?b4U%dnNDL9V*yifldx&~?=zVnZs!LYgfaI&w=Put~uZm9IW`x1&o zwk|&WP1XhV*$;o6eP}-Eb$zt!Go16c#-w-p(b%Bh^GBn?&eo3LTCo=dcQNmcz3G#q z?a?VVYQ+nEB_~Ki)OIP=k#m8^n(7uHH-EU*+;{e}I+lHFI-ZA>8oX<+&)i^$(Wm}{ zpOQVNVxbKdgWSoIQSXfa1zhNZpi?S1Q(3;t@92zoU<=I^IC=K}wA>JQ0<^ZSW4H%D z{2Q_F9pOI#|K?kVWPN)v*Zrm8D0KhOwr0KMIIBN+X|eWx)hbzUU&M&5l}Jb`>woq# z21AlwK>}T4r%cd$)p|lGY~aId4#I){Xn=8VkcW>o^mZ`exZcHldswQ!0=ShOdDrel zr_ms0D3m5s$(dnYx0!*_5qKbf@5F}@YWUE9 z@u9Nkn$u#8{I0pUo>|QKO??B916?Bv7&*N!9 z%551D7^ATF#L+g>FgSo;a)0}Qw{bcy{uhkunGFh!%+TxCwYJcjp{D)l6pLJvJeaz9 zBI<~Di8EjH@kCv~eaL1e`91pd;D77*fAQbH^^b%`*N?{Frx%`aZhzw!^IjBOw{M*O z_P5HB%)ZPtgy<6(>PR~n&KO9Xd+I7r1&`2jzsy>2>|TEpT`|)JCx0YnSOTX+orUHD zF_7>3yAB|x;>k4SXNvxW){`o5X44L$WW*#_B3}k}E-*+h+nPG{-KZ+-Msyt;&sJ;Z z_yav^E2AybS2Xd7FZ7qMR7%1Mo%4!W5g;+Je1?Qz=iOS|o0eBQPK8XBQ{&Lf&S8d8 ziKz1l{8`z|t50Lk@ z#LWMT5B-z37|dk+>{&g01Q7iZT0iJYrflt6*KtXa&{yaDPo&TSNRwln>eJweDJb0EsLjt(1 z|M(2s=htT^gg=vMuK)e}O$l8OOWHajjxQvYxnd9VmHn1sYC=gNucd)!avUM;RC zbvquj7S1#y4u8O87%MRKmYUe-CGSB#fM#b(3x~y;rm{M+xBr9h=X@=3y{lB*@9*^< zI`HwcIV0G|4=*mCr~KYy8#}+}+m9aTf9@iUyM=Cimdm_y0Xh$hZW5 zX>)(HQu4ayFP(b&nV&t)I?2{tekPQ%*=W87|9@zG$bYoXzWoQD2i&3`kECDTT>$4l zm=T1l7tH#H3Ob!|&uFv!JOdY;XV7!QkZ8P=V-w@2fA@VfWPsb7@8HY>DvOt*A{ z<3i6t)b#0v*%`Bybl#0=MA1LdH|%MrrRXAlL{3P39ZJj6!i3u;c$DCB_&VB|bdJmo z&wosj-aY(Hgr^dEq^t!S6M)Za-t=xcw>uO#Ja4{+SW>ap7;KP~7qut&DfBf9+55?jx}krb*OG&#v6!8XZwD z360;sb;$Gkz59Rn^;s9~bbQY^_;PLF2!D?exuN_U4dYcrJaQ}s%6){YEj(1FowC0M zlOq4<1fyZt{a62r)4$a_7T2X{vd2_g;5UEtuAV&g z)KO!0NSFG6br>Vs<-vs@|&0Z)qmg3 z{g3kXyCQ#K9XLnFdOCoYY5?YizOGnXmFq=#sT~t^MpJ>SfqGyLR4H^V*TyTTCiEuB zBNCt5IjakMV#Izlg@2sD_X|8YTwv<34f?zIg?&87zF2oLFBO^vUrx|py1aQkmY=~Fz50`J*s%&)zs*7#!vKm#M*|(=nAbD z=!;UzAO#_~*+^D7fI+eo zG|Y8X!|GjFab-A0p-FC{b(DmihKe=^RF zzVa0KXfa>XIWHSNm|3C4gC`OBx@iu9voX`X(jg|#2x&kIU8Un%eFj6+F{IH8;u|@&s6R!IDu4CJJ9d_QjhM;vY_vJ; zc(8}zoZh+F^ZPw}KGwmCB;Cy|+N8JVEM}&|{_=*cJN~Tl8`iqucf--k`SO4FM*sM| z{WBKP6aO{a{=~O0=Oyp6{_4y8@{JLBiP*~@&9JSOzrSR_5aDZS>dZ8yMd|=gr6|gLTcD>w3b7VZP@Q!K++pVqfb|q)f`g3L(!#H|a+VP~PG;0JQKWM4`|)Sc|*%`&1(t7|dwgZMzkF~1KGy^f?60%$w?1YTAHRN^ zzqOKuj(dLQ5j(wznI(B0*FXf+?$q7@P{Xd`1k}y&HF^Ff{TxGKe^S4Z18{%vfDtn) zW}1rM!V)6U!RNBRn1#O_qlgz0&w}`x2!BE^&jsBMA6WwWJ#Zn1eYltgds1I>^PiEv*rm?}<1oU2Si?TA}8a$Wv^?q?D%~bd-&GhA$baPs%^s(K#D7^~ zqH}qyaPA6k=uBs@nL6mG1QNu3ET(f@Q-*nF-+uK2f9tp8nCov3)7LC?sEhj^o|)GP z{WQ3+w@>GgEy)! z>M}f5n9jHxVxrKMkeWp_$eyC#D1ST`v>a=npAp-zAoJNi8b&|d=g&Ghfbq|@Yl#Qh z`f<>x1aejXo`v3wzLLf@+e6PYzuz5kzuV$|i+}6L@GIU& zdoYi6UoEEmI?&+w$+1P8%yWU*@@9B+8cT5GBXdLyM+3u>gjYeoJa%(V2y0YeTgOWm zo*lSAB?_1gTxZ#{dM=vO7qaTPig-OeP1ZChufq{t&4|9J$O$v-{&=-B=^5jxT<#m- z7KNEr{ye~e2pkEzOMlK4=6~rQPL0^pCK^!bRv-+z9f&`r8tgxOo=9s+PdmTw8^{1VK*Yb=j_#jhJ080D4#o82r9db18pHh(^?Rn5F73wf z9DF!K`F=5;hxTOhpY|2!7A32AE*5+ca{I&^OC#p2f_Y+&~GA z7+>Rr50?7)^n_E9J$KALr{I4E`m(&NjZxMS_ep&;OVXCUwZNYW^v$2%=&v6BzwS5k z?_YV>x3>JHwfNPs{PyBlCwWaHcpyBpo!sPLCPqVInx0^d3vE`;-HKTd9+m&PHomXD zVO9HmEj=>qs)g5n8J!!>9bK0Duwdb+qv!)}L#N&x3T`&=#9y%@65*xJxi#7pk$fWbY^mA6y*Tt$nfH#+t4?nnvf%-pficp^ zmXi3;@@J*PBs!lz z3!i$iZvbi3vwfq68!5Ou2~Ek|4*I44!AJzs@Z)g z;Wg}nf1HN$ff(N>A7;LlP=o(>VtgTZG|(g#GXRmRMe>=vIf2Uo7hL=-K1%=1m`*P^ z5qQw=Ifs8V2e>lPD3rgwCqD01_Wp}=OZ4E$**`r&-4r}Xu>ZM!9SLlie#TfT%a8i5 zxVJ9kiCsLFcvil@EF&GZi>u(?jo)Vx_4t3(4rrcz^cm=he(<0o_xj^4DteyZ z9>bwjMQ1GhpUtmksed0m38+!oBS+YDM}a)w*!T(jPd(G8B`5UJ&C4LNL#!197a+&w zAW{cE8?<9~W`F$Vat%8)N4_)j-yZB8^4EjzP7C_F#+swYChER4P`9OHmKW<}v(z%1 zo{E32|NJT&4t?lsmhEuEg}}UV;9^KzaJhvRyh)Ouhd20rNrk11Jt?k-qbr_*`283so|Ul z0nViBgKHHzD|-wdJ?ig#ooIk|^8GFXz$bs-{`O5XF7F46z|4y^l*KjHQKC(x`B6T@ zgY7Be^N=10>mCA7l8U3mfy&b>fCC%rk1Kt74MXgt3MYck6?h5KA=EI$;XoID5CR0B z#o0oOIICrRXi-0-vy$frtj}D3TEv(vu6~}K$n{yzy6`r*$_=5zD{4e^rUjvooD_dN zI~=x`d2ysmVZ5%RGp$X9whS@GApY?=tbP)+z9Ndh?b7TMUliFVMhO4D?0L1(#pV8p z)Smf)Dt_lzV@J#gzBN8QOZn5kCSJ<_t!Jx?I_1nd>JDEiXCnQqC;eAGi)Yy)bQ7pb zN--W4h=*z&Jj{Q49bkv`Kfdo9HmQH8HF;eU@j8Mmxx-Eqyfm?1$mz3noXw9Qmzq2u zb1V43{O|AiNc!VhuX8?S+(bNRvYLl@n&$x7pV?l^W}d%#^xs-lzC&CDOQ0!n>!gSo zedcYyDD2jaiMmP3LK{-}EDgML2zE)H3t#z1qf)$tCRj}7#>8f{2Wgk~2BUu~_-t?~ z&++A^(sL5t|7fUaqwvApgL#S%W?JiY;AUqrE7d}{Y}CBbp08W>D0RUc8k!kf9j8Sc zf;Ztf!DcM-TWN(~21r+t7YHpGcpFbG>r?06wxVzP>Bqz{4FBMoe);GB%m_bcX_oVf z_rOO+__1_oGhMsZi#p?3OUQrkiTk0G`CoEwTU&H)BB3n+ck}7@PbIKDQc3q(cw&Pe zfv?`}swtQX(@g2zuF~{65Pds*E{gG9%m~P(M4<<(8E0;U{kXSi@C)N+@P5I?AE=1t z!Chi_FEk}Ef$IP?zu>(l<*w5#>>dr`fqvGcmJE0n@0W-cjA>|k#p0&&Ui7^-{gb^F z^Rc9>;oA#t0X`a9Y0iIfi(QiF3(|`@^10Y|+>g!!ullnj=8fNhm z#ratn`7wH^jyQ|Sr?NAd&p+;t@TeC)uYFZHR&3V3B8Nc7hzWoA*_g}>I~%b3C*l7g zc$mDF-hS~uDu{gAd|m}k1r1|zX{X)UZUW#G)LlFWt_i*U>wcSr%q?wt$#}iR!4fS1 z7$^b&U<$sC7e8t<0Kni$eT)=1u<P|MUpV za+mLV5_&XwtjT|V9-Ys;=Bj}ucd~w%sK>JPC1RE6vA)RZQ;4K5bjQF9_0g`WrdOUD zTHR7fV>WB5yYnOKF;gD~R&WAGx)I-#UExW)EtpP(WHi0d{PmsgY=(*f7&oCqB6^GT zG%sa%2M4>^lOF9$>Ytr7-2vxu2b)wuyPmaO!V$kU?8twu*g%Jtv)So%g{~8RAdaZn zv`sudGHV1|<4S1Uf-8!zQST{%0b>7`_Xe6s%;$t`Eu6K?y9S(f!@L!5@Y171x?@Z+@Bk0pz_4{K$UI zbH)0HubqECS52Ic9}OiCI~1pUX0AgT7fDdW3?k_16BC(If$69vLpY!U!w6wT3BX>; zl@2lM*XyF%3eXr{L74`Jmg+5Ij9)0P3C;A{>nK-|;|2=;iUQ{h7X8Vx#KS!xrohvA z+SX>+OgB<8)|g?_M-kXk_$iyJ5}m7_I6r^A7Ic3-wGCctQKaJoPU&hH*QoNgl$Hkf zFjk7?23U9MyafLH6=`>&YSBS-40;RoGfg%jU?=!RU$3hvIrX=`odI zF13G(Q;D7sGXSBFz-sb#C_rSsy;6&OOi3Ipz`?`7;}=~n@vJhO>DQa-pZ@!Q`9J-^ zul|KoC71mAm40R-=Et{OT(5rbk5cg2LX)N6fe~{;ABtq8^@1-13LEujhupwemQdMr zeRPp@qt)m;*BM|5ZBmjR%zPlm0_|~kDrbMMFyVogUOU1sK^&UT(L)z-xrQXP=m}JUQEU;i&l!{S~z#%q}M~9#ov?9j0y{d^Eh>n!pim z^XCW#?uD1)AI)vS%R%3Xv+a*&h`<85SxgZ5#{Wa!n>8z|ZQFu>CF4Hb8)gP5mgj%I zB(>B60>Vp%f&v0ohzJ?^-?I&zbAI0{_CEXU8~K!9#9C7XVKB-*dT+h8=;Dt_{`>KK zAwD|sG<{(|BeDAjOmb)&`85&cCjJ@ZEe%{Lm=_A0+9AFSJRso*A#5oh+&bG(ix?I( zbY+qE0qO36*Yd~5OfcvJ*VV5)XtICDBlZcnj=u3YBu?~&_x$1D`C!|@bN$6Z{gs;= zGQa_R=jh^^A>M@8vkiSHWljJP-p~u1}1m7a|TlyPv%mTpz!p_g*#z(*rd}3qpN)k>7f~Vv#w*!BxzcjgQ z3AYL2LaF_#)|GNYJ_X^=MT{7RY^cMYHE<2atGzXpvI&`x*0&&N2QC-d9wSLWeC~QX zg+kJU1Cpf<*PYg}2S3x{q(Q$VzJD+hL%@n>g%gr_EuwEgaAQO>7J3`d2qgQ+fg2E* zCgQ)#V`iO#*A)3d7BJXsUu%DC`A%M-!{pCoT@F?|)z}zvgnO@Z&TN65tf?vMJjZXWvZ*I4^-UHoVR|MvYCH|qC(W6(sVx!^sK7|HjcbBQ>mzY7iZ zFu8)8$L|swco*@B_5OcMuQ;qTr!6*5Du7tv_SiX{)T(5kyYVC+27nz17vBpIV~0PL zj5C5~hF{IRIvEmi!5>WKLV}T7Z)cf|+y&BSdFWw%v}m;%`(V2K8d$uIsN_@85vr-3ycDNvY)e(^BC-~NC5=UQezhR=Hw9wSobuYDx) zY^WA|al@A3qd`LOhh&dG@Pzf?poo!24viX2d$*2L;BP0Nu{~w=JthSMdNn1Lwcwxy z{?E3`Pu>Luj;?<-#WqCZhD02`c|H&$hCh$YbtZYJL-Yevx6{yG4)ChTXFMVQ0Qd(- ze@$bm0tdH4e4`f82t`vt(2ZS>*@T#|S#cwT?Sc|Nuh!Kn>(!TO5Ny7U5gB8&+-V7JTDnn{1bGt?q(SGd^6_~5*#u)qG# zuXAYV{Q>SdHFAQp2Oc_s77^%;t^(1pg{~a(8*fi3BYEbcddwB*dxEEdqjlITx|SL` zCLpw5n`;#foPPW^Tny5Ah!wlE-viQ2Lx&2Ou{kt zuz|9&*G7Le8}CtRLC7YE_g`+s?t;AQbG_I{;zXnt`$~#}N+No#Xg3c0OewerKQXw! ze;)Ljz*SWw6{xy7Ki>e+sitehHPm?|&01j17S^P4kQY0Qo9%05uq)*JfmivrbrNJ` zL_1y~aS5W^^&3w0+y5QIUq<4X)(ZWGqmEd|z;}O-l(aki?ob|u4;yV>IV^k<;?|LE z&b$`j*0>7Lb%E_>a3GPx+J|K+eyHGN#26?IhUvk8tBd7hBs zLgyap8@wjp{)0g`Z-d(6eOEja z#Z7;q#+ZWBq&zx~IN2112~!JO%5dase%n4D#B23Z)!qBI3hQY85{^6-!!3-^z)cBaZU2fIm8?CosK2jEUW zy0PMIYbs!o?vQcF^#i=wMKiS=M^O;tWWj&;j-<~`%E2zf{P(-&;~()>U-;AC_$Gh) z&X9-qt1iFxKVP=ZCrA0aj=wbphkG3owYy&c?pym4`n)ax?u6P83p)N_61jSQwYhQf zRNgZUb?30|^v*JzWoE3V+i|Y`=Nf-- z?#AX3d-2FY4$(O|Vc&k_egCOvV1a0}5=5zuIRO{em|a;0JKC|p+rMX9_|Qi}L0ixa zze;yx1?0=M6o?J5V9vfxkxw{j@5$le<9uTKsFr073u zFt~wR1DsHUJ{Estsa2g}&LmiUz~$Jnd(i6J>YCe}XeS7YR%Lxu_a z6+YGAwjTFjrit-WoPhn=4?L$x27{@@{qoIy@g(+$34Hr%K6CYwhOK`M9d27x zu!iV2GFFLi63GI@55;E+;X~aKZWKIg4^ad7bJ2HL*oc{}QTc16rSJ`W#Usdms%2G( zy1{?Q!Or^Zm&iQw$=!E;u*=Y~KwmAFh(*I+ja&-k3ZT9U4t^V8uYl1?ccalZDNMHj zZV4M*#J$;^2L4S!9mFaXx9fj_bRE1j`&z9$hyLsj2Amq9jU?q5aB3f%C@|XC_q#vi z1^s-`wMPGj`RT~8ZTLPgofLL@hPjb7cGQdbjZhKLRU-Nhm?l96zZO z>IswYFFX}+z>tn6xEKjO1A<-rU|D~QHw>6I8yt3MU$RE>G4$GonDKvL!*YxQzbEV~ z=f(gA=0c1IfiC1t<#-OD4%^9_Jr*G2M7&6JF6#OW`hV;Jb{!r21Lx)nlMtw;9B}l( z1;OPj@VhNbvpoRLKzJwzzb1H(raTrqVvDjQ4vKx(;L{!aN&RP^Gibv9;-&oX*!^e> z#{3x_o0LtuGbDF`U@(871&DqFpAa88#ZkNtt9yhO4)Tg?{Q&x78_I$3q^#-k41&Y`43!XW6XR*$CosfB`xUV>#ObZ*(=MIA& zw$vZ;*FO7!4@cL>9{b^E`N|za?=Hv6dAUN)!_zK_U!l=^RJn_|^aXp{1R0=o>4_h4L>5tf+ zZ=LJU7{}*tTje+Z$DnlqKX>go1s1d|d&1r}h5Z0AS4@gheS;Qis)H(70$u}i!5-Kn z=-{Zpv3PxOMnWm#B`)G{u+xxy8zIB3E$sds>cB*UH`#xIE%%k*2A=+&=AfU5yg=Ad z-}U+7#3NiiL%feCx#*v9`N28-W@llqy&il3Z^VrS8(ev?Y0l8sBzP~Zu?q4Kf!(zt z57n27Gne%q95>bEmX3(OPY`??7~mlwxp9}JMn^8!L06##<{q{yt{Zy<6PkLMOi7-6 zeA3)0V~Kz8AqS|#7u}As+LgH(NXbO#*?|iX*h6S|!LGuZ(}Bx0Lo$Yar9$vYnl?EP zK^XX^v6r61rril|QC;!qqrfEq>-W(=A9=#YR!5|tnMjrRh3ooboMx~Geyf)sJ@hYJ zINJO#TzJ1YABgWi33z*)I~l9$y_au#`lxqfaP5D=(QZF)KA!dIqtga`6uuFHXM<#P zfY@zzSQ(ff!T0cW9C8sRgq;E%itM{@Iv{c`AjL5Od4mlu0_6~SFs)nSrzRR}WS{m* zfqwmHXUh?P2JGQq_^dD6=r^3=@A;3P7|*xX8|>>ZeZRlv z`}}{A=le%r^LLHO2{@E@`!Tp`w#i|O7?83x9im-{y_bniwz;R5NBJBj8xMGmb}Z{1 zY=^g}iT(;arUktd>nrvhy$4-Q!jTT2z9rY zO4tY3w(Wse3U)r(Cwj$qoaV>Q$2$zzUiegr4%64#3yx@GVgaW=*!}Q#0grDY-nyV3 z@GCfY&INXA>Va3j;O$AdJOy#80J8-jg~Ue`mTVH+|6ryVY= zp#qlxzbLpHpq-DreZ*PB-s*o_cw|Q)h6@$o+f=NDh-ZkvyTJeShxaN-KO)@a%=f-s z#a?mP_ac@`II=!-6L^Mx^r-&0hMk5nVZ;B0z5M|sh0s*sZDQ9JDi-`X^#Jho@82Op zjvnUm;CO7or`Q&IL<>e8a(OXMRCd-KNdLmvB5O5r=)TwGkwNgId+2}me(_0kR z$o2T%pML2vd~6HwRM4l1Inl zHz_PS*m}dhXx)Kf0}5zu|(<-;y$F0TyQ<%QHLLYGrFMl&QaZrc;_Rk?E{ z<)!sj@zM!M8Hj&g27W`z3$Cv0Y1%wJ$Gq|J&y(?1T#5cK`^B5_!HfOUQo)${(SP{H zZGG7TNs79~k`W)Zg|3|;t*$Qp4r4pyoEmi%20lGrif?xAZ^vj;=vr_J^7_Z@7LxbC zM%in`y5;kIt~iGN>m3yEhs-2mz!04cYF>j6Le*IiY+!$EkogO9Z`l2#7|Ay-68c>q zd)EEJCRhA2gKoakwNrUM#d@-bNDX5CMFgBDEsNJO;ty^&@EJEzupVn7R7MuL4k{uN z-ynW+Lh#Aj@NIz`#978310M)XlMfatbV3?a@uxrBf#)P~9WogZf7Ul%^S7A$kN^Hx zzTcPc?hk))j)T<+lLq!n%?V|+jo%1@W{7DVWK0g8>mWA?W5Lb+LVfD+HxwjKSuu0O zVW3kgC%z9q1mPtibv5L?Nnq&ZRq$7@C6oDZEPVO*Q`C1~puWMUgZc)#b-;(4(BFrT z0lx09x%V3n^@sd#@MV48FZ*a7hNnd}U=K;4Ca-o}s)OQW?G6`ny{tX*H z)6~GO=$gbXUW>}lR&E9sBNN=9n6I~ThXi~-T$`v?SO16QANT>Fj#D5rVPsA$-TTetU10ymw2;UWa(q5BBM|SoqgGjWy@j{`|N2 z)Q5k6;a3c3_-(=leVwizm#jt~ngt$XgpEdGRl)mj`rsSB&mm?69o#86*1%^+xVHbY&$ss;PDaBX3xg~oC@}7hL=#|2M1YR_VreREB z%!3Mlz|Vg4&^~#Ng7?9Kd|?Dg|NSj@nSp-|M|jr1bxQD_;I;aU$MYwr_A5pOie-%* zwCsY%u>+0*wkEiz#B9Jksio{p-E#_Tj6O2oN0RfsBU%v!G>1UJX9AZ20a$>BTQ!#B z;%F$ytD{%I3lKl$moLbMhCRDOx^^TvJ^SSg+Km<&yK!iOmqebIm&nk)KyIXLOId#e zcq(9VzwC}Bu{*%aNqPgxlR|#iU+oHDAhF^OeH;tOU+k#tgm5bGu)t%7!P<=qH*DsCo3ds~^aDSXP{gN8oU zKwk-6DN?u1;HQL7PWbGTkW)VNpJ7~(+{HW=M3+eAC|kmX3S1!eUf`>7fa8DS!Od`* z5-q$Pe5>%yf};WWGjYQN&>JBAi3F1Hhs2k_by~IrrS^q81AY_So+@%lzxEkm%}^J_ z$ILnFRr7|430x({2A+}f_PdCv1bJ(~YkfV-mru-`xH-xG@5G{iKotr;qa*PzAiojR zC|~jVkN(9Ezr`;A1i!#InfDkk~0KxXG54{HGPT~%Z z{6q*MiOS##yex<8)m2G&S#}<{SUsYVVD5#f z8+WC7_-F8#*(ybyQTN&v@#qQW?AJejAAq7tMuqs+VRXg94$fd~L=p1+L%9{=r(Qrl z@Y`LBfAA*m1}a`kw272LZ1AYmcA&v-aa1 zdNteA(@5`cH~KY4zLsbqs=5YHvW30YT0x1hJh+rQL%#g<+5v6Ja&e=JM+Y}9M=oV|ZsyzTM$Xul7mr+jfR zSYF_J|Ll927XDcHow=CkLiGPaKgMeMD|SZgKa7Jfp86lmQ1wsS`Rs4@^4?Wllb;_` z-#$&>}|Upjv(WPKxT3GTpeZTZ2+OY^HM2QKoNd?T7n;(p?a+_(ak2Qg`ldO&aC zKA&s>Egdyyhuu`l$m}lv_+7HjdE6&{p^eDicnL-LmF-TUVfn!W5qpchfNwix5%&U1 zDb}GM9x7KPe$sC(6#R{jz;t|RP_6ICso^=yo6vXWdEkG=a|b>uR2wiYfa=Cb8iA@N z@|b>jK`#UDjo_Ti0sjfEVB)7zwj{TPtPN8`1-^~S;t*mN&_TLZ0#_gmZ*38)(TkZ8 zz&|$Vi($?f@(I^iht$v7k9qyez8(Ax3f4*VuwS}b90Q%6Z%m2+o!CG5iHUt)-W)uy z#SUT%dxn4WbRhA6*?FJ(0*C)lUnFi!;ygckUEp^mz5$ZQ?1-)WP>~Zp;9-bw26#^% zm~^Zg6Ztd&etx(od=e?bBq0Y(^}wGx=qt`b5_g0y7YvNwbrs*@FTYTSeZ}A#jSF*R*F8&bT1RwC%J2ijEsUtqC0l%qJqnHP(ACMQD*d=6L z`;5;oyZb8-b-+tW-IwkjY!>L~f|F+l9%j(IP~>whjbQ%*)-~tWYN2nFGu#Oy0>MW^ZvxeDKYx1l~^6hO-|B=HO4w<5S%#C-tC`f;28oGYq&mfrleJbyuca7^l`ZXsiVwd12 z1}K>2kw1&tQNinjJu?J2kel_o%yduiqVtzkc_{4|oIp6PWFZE=WT5 zcYoV|u@?MG-~G9tw`&l>!TJ96UTS{_PC#%U{dxz4Mmv%IDHqsBm9725`Df(Fsr|z^ z0pt+!^PF}SqTkx#vk(7kz5XL#`ak+UMJ(hmKJ>%i)O#yaf73po9sV1jjL~aaf~)dnJ3RA0I=v8Ie5zLdx)8(!c4u2`c)9cvWu<4gDjcECjFjeL6R*k66=Nv<{MD1X@r z9GpQdSHdFv-B13T=Ko)Ojvw0`eYji_K1ODv9d;W5OJ_C(pENX&Yy z;~VOLm3|w0VFsRL*ua2$fJ+!TFWG%cJ;$ycmz11J0Lg~f|I1MLKw35O2>5q172Kl; ztr2uBm@>9CV==`+lb7uLfN>i95ia4s{G%WFlmDU;TnxcGAub8*6l86d*mNpG?i9Ft z5VsL8JM3==NAK;S^=yCO0`~}iKjAms(!lVBxOTTn0BnW6vkt3&RU+pc_!;=t;hV;s zOZ?%(x_}%UtP6Gy|?X2aX0>TAO!>pbz0$wc(l~92L6}pj^NO zf6ePI_%o3|Kj>Apz`4R#&7Bq_f=gK}8LNk0b=1L6imf=+O@)8PUV{{Su=6p0K+}21 ztNMBNXP)@-yK~zRIsfE5%J{Zja|}(2Rmxovzk+=Uoe?A|A}@udW);>R3wwF)bUcl! zR@pa2vaZX5Tf(PCk_EuwfjUBaAxAeO{6lT+Sn&I3r$vn2FnAnta0U(awEtC4q;Gql z{+fRByua~P=+l1!bSFhriU^ws_`1)UXp=lYV3%Idpn?BFK|Jq!-2||I@O7fU65luG z3ADF-T1@*Go1mD`1`u54@`-T-C@uOeSrr=cjnss5^I*@xrJ*1H6V0b2? zoj|<3k{z_sC)|h}pDOn7yxjGC_mH$G6tRE%7W-VxcPJ;~-DD5@ zOIz%R*P8eu28__JJg;Az^glYDKl28`beGH${FC79dn=;G7nlkVkL|%}#dFXWE1{oV zgOl(2^aX!$53+sauTFjbJ(jE>zL{bS+eH-JHr?3gC3Hc7156bK_wY8fvQaV-8V914 zbW>=4C%SX?SzwYF;3`bHP>~P@?1^N0CmXTc0pGBuW-a!2z)NW|J+&bw~VJ?C2XZ@t_!moPh_xpuihyVgWLzG=UL)GU@5%;MSG+o&pD3NG7-y zogUgfH}4HhtjN1Ypmyv6%$%C%wIClTIb{oj)bSn`vU;Ie7;Ys|Q-Zb&ig3eqH)jiN zd)C0nV7jBR;&;KYFFm&>wiBi2p>UiSJBs*~wlB!F8bs(lxr8If9g#VJJd$%OLIh z$r~cvfI5FdYz@2`GNe?%~cA?SML;u$DDr=u98sUf%tGnd_ z7cXrgQyl;kY1@Pc5&U-81K(a4vt`HG#9kmA6WZ#`BA+vyEa9uPjN5pLR1Zuw@SKow zGbZDPfEYjm^&5ujs}B)>z%V}G*Z;79)&W540t(A1bipRMPsAX$HOb!AXX!kH_8oBdLJIzLZysV@b^`9?q+SpTS55;~05J{z z;HxZjtbt>9fW=+$o436df$IRa16-yyWv5F;bCDpW61_7CU>M9TxoZyuj; z&>!xJUKoMkjsv3a|ITi}EEl`L#|9Ab4&Yb#Db^Vll5*$GVs0UyU1|5uIg=g{v71tM zMs>e>5Jd*r+r4o!g3s*=vQ9GdCCsp}};6!U)vo-4xGzX~8X zKx{jfOXU3{CJrhM688g#xp-Y*@MCU3ej&I}Z44#M6H^P%;@ZOqU}qHo?FG>!;0j}p zxH!>pg`W;m7VndHm^H$gZ#;KN7zsb#u!l+A+Ro*XD95`PHtpknTnl6}nJyBqw5 z$c=S>=ZnKw9Af*dztn#vOmo000<&i#+5$EH?1~&&y#dZlrOqM`-?t_>qQ@T(H_=L& zBla;w`hm>fn7@X+D3bFHO%?Gs+#X_Ozj3j1XdgZS*Y6bE5Xhru-w|3D#&!W5!G;}; zN0UIT_t;OtLrb}HPPoz85@9y%OUd5igL9XAoABlm)3^8xdTtsg>WPO8+G^1djsxw!b?H+7k|Th^BYc4v7t-y ziS>LiFwi>3xgXpjIsaEapszXeD|Z!QJPKmgz-nGknvH+Fd>b;A$f@zuPhKXqUH=n4 z9AqNzLG{c6UlHL=k%ERcZ3W!ODdBK-%x*bgRK9)W z{Cf#b1=2qaZq38rJDmGh_;D}~ka`>VJm9Ahe#MUehU4WcHy-u)#Xt4Mjq?W{&EK@a ze&>NAV~l_Jc?I~%dD@m9uw`dt-o`6M$d9q88~B5M;VFUA|981$lIIs@8o4G0xS0kdApU%J1&oG~n&x6e~@XvgH_ox2*S5AMXn}I%gyt5rGY8H-n*U&lh%a`q zJ1?k9=jbpL!0 zAKZ26gCl|$9Plfr`l?UiLkE82qhExaXY4tt7_m|Ix)E6{SA;)~*tLX93JSKElmCQ! zLvHZEhV@HB>?=Qf7}sC69dg=!Y>99D=J!7GXFXwGG;-CyaFFeEHRrAz@(;kNIDLQA z^&L6dsj4a8SkB$5_~756K|SP#@C~rozhgVS1N`y4_#1{Y1=bq&(KoL1)UkiSj<!$vlr{!V>R6c=<7x zW?V*atfFx9H$K^u{qfz-pl}6u>(hT0=>x{*5x8qmFWnM-d(B`wx<{{GLSF!HOE*q% zUcTAC@+Mpdoei4!?@0U?y48P&p~hYiI1|Ke4@aw42*0Gse_gV z-WM7vrw*EkLyk82FJv{5gkV4ah?f~R1b+zrPN7}&^AG+@AahduMSeHz*b#rWD}2!E z4D#rNfVFIPG!<3b*>iFs=Bv#D?T&x55|Kh;x8ur^Tl*(2=%iB-Y_n)AHTKSM5?We-k0;9{ zS);=_SFGeviB{RoCd`Km=b6h)eSm}I0dA)Fo%P7CNt10hG1rId(QxMAREtkhMm-ex z#j@K0B9_LJML<&7Ur@6IKIi>J&_o;uHb!p{+p&qAI zm>){D@v52o$G#eJNv%B2w~^R1@4C+raxzDDi>Lr529$t_?aa*e%mWuC5}oZLJGm;PvAlot!gb&3zNq%@QPnK^ z-n#EZKY7h+3&WtsqDLgV-C`1L)0LHpi*7qxLBobSBI@n1rAT-dKKh(we8`^L3CRhV z9N^)W0s+kZ^`Eesf^Qvm5{WB+1`!!YpL28IPd^_$;*%e4!R=BmB6-GYYC(61DS+^NMBzFk6>Q^ENq&cpk8UIHOT zj-fx=0_TZiL?Hff3|V#@ljq@h`2BD{;e_?!bALP^JL5IdJim_Nhe`$PGcRmUes5v| zj`kObIg$6rE*^MAln{AN zHuzOgKGaVIoiMz=bVB*b`yR=8)yDhsDYv6WGia6L`r%l3rEK{*kNfc~`F%ppgPUWI ze0Wd`;eLN;4+$7P_zWOJjCcy_1MhnR{*xTpY}oj6T;8^J1hl+XAcVPLwkMN zo0Lx&`sdIeP#(eH(EfNS;unIOpL`_mr;%fxaMG|I83%Yi$ZQ79VbuTp)8Ets_7PY& zP@kwTAb&{vO9=I9;E5wrM=W(z$W5M~=dbq^HH96C%E7S)Oxh3z|Edom#r@&_!u>67 zP0D{q`onZM|2ZDc|2ihz!gwAr#ebb2?)Rr-mSP|1AH)5DCrAGw=k*rP=cc58aO}kf zxNPXZ77Wr2aq8w{;io@za06 zZ#Ue3=r1pF{pZ*sdFMa#!B_wI9>0Ho{8fLRpZ6Wk59I@c-mmk&9S_g@Ip)c+lal9u z^_RbW9%&!(GhTnri$V`K26-RsO~`yh>LVnb23}NhJhVUOf?}JDUjz7Ef(t?aK;}5= zduYF}_SxWmp!jOxaX{9pL)x4Fod5lK-{nzQ%%2!vXula5zZ){2u9rAQe}E2z&BlK> zydT0)IHp^iPmq#8j#-N93bY4O*fC!X^?`8=4-1La2~P_88q!tSzL=bv)!=geutojp zA8^(fy~mVC@+H}C09L4xK>_flrm+t)NY*JUkiF1w{ntYA^~pkr&FV>>;P>t9sPKy+ z4;qZqYzMoSMIQ7b=0MbgFIV6b#$SK9IUuz|B8p$@*l^pyltUqgd@eFX30P&|FaV4Q zm&?Qy+>v<%UtU{69gO52ge%B-7@Q~8LX&~59hd+f{Tz~IH9tx9gq_(!#aah;HU*=f zb!O+GaCcc*foKwl0V9Q@u}C9JPf*fvCvFIaAA7|WKR8rD3=Q{=Xfos0bp%6}DcP@18&n^bad{4{SizJ`94?Y%2j{Kvs|;3o0wm7 z?W)vv-tl^)yu>pPRF--$%Ek>=YHXB?E=tvkzRda4Sf$$grn|qUJP6BxSf@rh)t*Ka zwRlm@W;CYe6m@-#Tx!}=*=seS`USOqO&8RjSv@wBl!~b3{MBD5Yqy+IeNI`zlA-$J zR5_#>1f!?xOPY+h@pONUOrIykGt<{8e~wgUx;v%M)tz|@C(c~*%xisItQIBH=*PyK zGtA>~d?xdrxiiOyIlnXa%kj3FSIm8PER(s*+=SzKH8+{-<(REz40G8X<79SbO6C}> zW8XQc-iyiY6)AtKBAhsT|Yph4}z9)oAVHNX%Cs+I2h+Tx%< z#Hcn9)o1N0cqqV;j|C?%LDn2e0eNQUM_&01$#3Q9Eku8o9+)-LtVWns?syQrav|zu z8lXPGnQcP$`RoKzfqJy@`}g<#P~LX8t+NWb7JNdXyns*BGo^6&i#o43d}bvLxVxIo1Vy=~ z`2*!yAlJkV@XW{w;0cr`k{kHxIdG*2_04+Y9Q}W*J}8LT2kuYXp?}47&6a52!AB;9 zqJr;Id{><9+#o3Evt6MLE46}O9(5t%yZDZNp#p#~yr)o@XNVKBVNS*c7ouI$AlSET zV5W}#c~3s?I^KqT0{tT`mj|r)SO3z{zy2~7-~hzf^6)J2LFyTOTvWko?u6GfqE!Xz z2<(3Vq|Mj^`U9!oR9@iS{DQQr9msx!IxUq6!q8u35YR6F?4C6bA|RAyxX(`=Rp`Io zIfS?#d3Ti88pc1`v2K9_VoiyZ1Dfbr;9fJF-{?Agutm^DvLxcz#{1#y7=2)y$V?<#h0-2*8u+Wm9RsZl-`%1EE{65j#crwYEGldLstmJt1V zfEjFz_l1%;oBMnXUNhCx-Y^GCoa3N#jB*>Lhvy-FB!HDh#Ca{x;oOXaei*{p zgMIOtv@P0^-8=|x0rf-rFXw^44Cf1Qz5>yq&`Ro{-N*oIK$O3j5=gsNVDy)H2Gc02 z7MlS=WnE}Py-K8AQ5UrXBH#tDYwtQIZ6f7=q`tfd+H;5YMQ(GA=U|-Jim#y^WEbb( z!}&eVi=Xo~&U?f8HR>BVNaTEi^Xt#~DbA;#^Ii9bznq+ZVDA6b{#aK+@d<$*Bh_)d zLpv&D+`>VwV6GG8uW`vmDLh8VF=HDXzlS*nuW5=$4 z9KgI9;h8C#pcOh~O@b~R{R#aF{Y%6=iN7_p43N~YHU%8|G+BF5hdSF}zI;L@78d*R zf&a+-^&aLgtdRe-pYdj9q3+3A4*m8Nb!piXi;T$}pD~v67D&oyXsZcW3%2QDXdglO ztq+P8=5o}d7}SDzN1xy%&kqd=YiCKWg|?E;ZZL=ci`PR0RDcJ8ytgp)^{?xHC9Dm9 z{~lz{`)BX@InM^)n=MC}o-G|h4~}9|@U&nZ{`}^DAR3TVX?fasMB%j|QKXG>CC^*@^*0_8;Nr8>fgx8% zjnUd0Zb{VfQ_%1(i2f5hhV}fshi5=ixCVbiNiIupcapj5M%HHX`?WF=&kiE;TtViZ zzw2YD4;P!Xj~ydoT`VwEb-d3@f2$^`Q$Ip&5n4P)_d>K<7O#Ma<{Lp)2I$`}KD zVaj9-kg|Nr0fr8|3$Yjft_{e1f^wiQL}YG=4)bC{9U<;y@$D;)NdN`c()FJ zNxeXa9b@1Rc%Twa!nx5 z;4Xl>1X&H;SKkW$@i^Df#+Z9F%nidlyXpQ|ZX5IPSKCAR0ug5!b^JGV_^S^>4mPiRPxy{&Fq%B(~+BuC;M({m7xzz4?q6QaAtU`;fkdb*&|H zyz|xepYa!dwR?hgYGI3h^%K~Oc8C6jIR$Mn{q_71YsHn=Bfs6_e{Bq7-ucI4g^Z65 zrffy*a7RD_Eyja?FNDy&gw265EFOkqiP(>SKYs8osMBazU(jDMW<}Vi7%#A)G?dxm zrRdlD!A`f?xz^3!n8Z9%UoNrXQ0Le`kiLk$1o~c|f6kM#cXMPklHV3&y!eWUF+}Ve zVrK&*K+1CZx+aZ@h0>16?XLY>-i|(Ht%5f z<9;Gd`XlUO>@mcUe1=Vo&))Fa!DqtpKt8*}=g;@s&-F3+?0v2uKKq~RhtI+1`r$L| zG6J3B(6*B{;5)>@vB&7}{@6;AYU+7Qpqme@?E& zJKN%~Yf1UHKi4jYviw|&_eNCy_c3!KcDw*=hDdBljvDTr<9gCg3fhufzu}P=$eNDiVmR3AW8KXHzvMCYmFR$VQNjk}m+kmuiLc7Y z;PXJA31_e+e%8DeWn0@11MYpq;2!XL(ApS(`pblmMf*NR-am4aKgT>~7^J-sM>=j% z!$!rtjM;pUyT?8F?hf8X^hXiFXiUABW1;Ssg^dmWJNBcsn{qkKRbbcHT*|+$bt3E+ zp;ezbfqsP>oCVyQS3>M-hTmbchWCpCzZ$RYl<4eR8h}jH6mz`-QfcJG)vXVXPmsrd z{fq8AgftL}C7}(CfJga(BkOUdRz$mj#3@>SLy|&GRzqGV(T$S{=>*Y_C+GPmMUB}< z%)KEsLm|jB{QmP#g~x|bW+E^{uqHgoo(a}Z8A=~sKWSB=AD0jI)d>u)7ARjbj z9vH-9z%omI2|Z=qW;;^z$a^P9BO)eIvTFnB4%~&{A)&juCBMUQNsdFLtddXxzDKi@ zW5fk<>;J-YF)m1F`gtzMk*GQq__^ya6AVY34oN_+>>2{=ZYMo@^K7*0>ULy zqsjRK*Q1`$WY{JkZiE!_7H~;C*GWlv6jBb97tcviPCO6gnSVX+AKVXdSy3VP!SJ#F zUAjZP)}>%-EC4% zkVg=l7xH-i*V=PvC+r8kuX4qIpK?hCsb3s>r2fw2FD4w+`%s_YT_Np``wIXGv>d~( zW0VK&fc8PT@!TsLLWqZ4hxbRhV(2~)p7#Im`@!Fo>p|)fhWf;N zYUFpQcO+wwK?$$Kr+$X^OGrE7_xN7^v?H!Vxe!0aeQDg6lnXlx8>Co&_#Mz+C!&{# zW7H?&=jjk#1XTffj{^54*Z=C5pYa6T3%M`Kf#0E?fCFJk{t);m2pJfDH;gmfZz$KN zJY#Yk4dW9LH`1R_{-Hc*Pw0#pLpfW_+C#hk<+;XR?)&KkjOtSAFjuHOi2BIqh4_y?U0hc!+SC0`k~(p*L})`{CDso48Nm^P8uL9LqEZF z=*Lh)Y{BP?=ixa+KN>_EaE$i+l#@mIP>(40h42L7y%FCfpq0-!LqrSXe2M#z>kV=p z%FmMfV_Xg8MthNv=THB5u_B585BV9t@J*mF_`x^*?LPvpAL7t|q}Iqhju14-%_d#7 z7^udEp@~R0;VH&~iYXQ-7w|6;UNw?SKCDke71v)yoDv=$MAqSFBf54xa4bY02e?*r zt{eLmx+eKOXrI^D_`x^=&je6Mz*hjLf$@a-08!cn87~{yJ+Mc}`VD)M(5?d?jG{Xv ze-h&!%Qbd2fJ}jZzK+xb3g=+Q1e^;gq(|mAC>4U%$3{FJkslN~VWlaFr;|7`;`_kc zC{t26fmIEb0VNw@CMpofCWS_)LRN503Ju?~LE`Nh=>)*U%B)#K2U|#3vtXe&LiqRK zumuk@bZ3xp1un!XQoNB@0d6kfqPMWWz{3v>Y;ec{>eI1*u#2B5&ydOFiL^09$=G`f z9cu@iy3iCxE*b}(($j$OgnlRM8d$u<5}a zdAjVb#-t^!khch&9kGzG@deEg8H|vykl8Q>4}=VV>e?&9R~S6tm^-KP29wV^31&C2 zJfMD(@u7m+_}O4M#&7mpwzK=<>pW-;?632n!3S?+b_mAV*LA%GfTAyFWgM3$Z* zkdJJMMADMGalUzrCb)L&xACN`50k0j$v8LW`u+DkX>sI_eue4n?sUu^?r zv9Gqlc|gy<&lli0{hY@v@_pWT|D2}{U1Ijc;ip^JClxr8NEk+^0NN~Vg4+h_z>BD zOFH;rZsZ&{@llp>dAL9>d=f9|i0`Re_OCgnK{-r1qD)q}jc>;p5y z-%gOx$7J$u{yCA$*CF-sJMuT__rdsoc95m+y9^+<{=PqLwB`9QnO)+GPu}t8d*{22 z&R&bUdM80S!E?()Wq$YL&Md_5>nHYipE)Po&v{xg;MF5{;knV#}pKV$oU{QY~-)qg$Tgo5bzc|2bN(`6R@^hY32zMg~pvW}C@ zItjj?0l3fiJ6rt&y9}ZvMyrcDg=)s{Ql7|F`F( zIX=7F@xT3#|L6ZaT`$l7D|M%CYDlfbL(DvKo?*B6O`Q?8OU)RsBJpM=i+?RR$ z`KCQW4*K~Y3!46)^&|d|=28ClWdGd%2RTVW`Q|kFpND;YoKOGvgkH(mFMjZjBG$hT z-xFA#$+0;29sk)s2qdt7=@~?AX$3(DaJOIA`bm+G;3%Dt9I*^KxG6%c#3!9Y&LjoS zAl)j5W0m^TS$aF9@g4E%Q3&mr6iAr!9V{f}UZq~65OQSlB%AC{&f7Z_{ISP z%-$Riw^HORwEvLVS&Y?4O(S?UIU{N%)csCqN+wvgFH;2K_J^&q4b98;G`eI({6N{e$&jSmM&*GH^7LjBfnW4@>NtZRGA%* zhI^;g_l$Y%E~ahX{B3wC#fRHxCRIJhg({nWyOfc*O~0bQNXM@uR^nU zo>%6H-QLXO+>3Wx<_+4F(0uMMm$~8j@zN9m015SQqAPoDxt6(J^H-1j5^f}4WO%2x zXN}moki1>j008818o{z{PBD{B&d+3jMlUAC2vi$JY~94$4t3&9ESSV48*fhlmnOC-bNm?TLTPa@?n}NqFNC7M&`bSF%rkcT+*>GrwMp^MsyxcMWp=2X$`Jp@58rn$@tec;M7+HUiwb-d?nhwr>S+-V`JB3vK$@b$7~dTxL*1UDezxx_!l-X z(!A`Sl`zL1VI8~}QO(|cdYHE zc-gPydG_wX#58?C} z=rdEDEa}d3uhZw%)|@-H%v$+6Js`>9Vm9G9I3Kp_adf#>n2_B?kk{#*W-j?5p!jz+ zn>aTqQs3%MQlI<7#x&)bbFu6^7zd>p^7nPw*`_wBMhoVDF`v&gV`^d^t3kUpI*G_< zXpEB1HWs_VV2`JPa_Dl=J4f;kvu9Mv*Ns=Z520x2akZbem)TvV=FMacB6cu9R#!UM z?k8+BxnyT|0U+elT33bEhZpdMNp$S(t{SrLOQTt6^zJx$1c4ubqp%f@R2rx-0;i$= zUbwp@M*ePp;+-`8FfmR+Q%QAGI`xgM4*S{pS;rc4z|O1Q-p4P#f)n_ZFzf1Nm3`XZ zFLTP;PMzcuy&zTI-DbIVr(S7PY!*;-G$l7F#?qsndzPVb`chtLR{N&GzZXDiHk|{g zD>X!s(d^w&zOdA7Z>5_W`7&==4q18^Aagv-)1WGUZqA+FDps(6^O8I6cP_@`&E5n- zVGFH0d8&4h2+!9-Hwz#*aZy~oM^>@sbDXa0YSu2jFq6G#E>INc(jCX3Fu2byVnxcc=X=ZdtCh2C zkSk<=4XVeuSloNDnEBKCt(|U1Q1ctsDVifY{z6^eCwL?#D;gQnK{~#jL&{$D?EYrj7J5YZbO)tPLd*|vv&I`C%I*uu)x%;+xJ;I9a)TG5L40R_&C zRNYz5eyn%dyV%aR>ve43+1}5K?Tjw9NMP3LjrD{3>tas?em^ZI4n$+CtEmD9W!?Q% zD6z9rFqX_M48;B2Gn{)6uK1f(Fub^NpBrhO(6?^h_{$2sayR){_*S%gu0r3zfNxBH zt?TZNH@#XaU6eY1(--q7Y~t>Gg<~);q<6HjK(Y;2uyEqST;%)+J$@fTqOlnP(iWXELqvd-w_Kf>Hd|SfkojE;|EcnKZ ziloaXbz|D~eb;ocDO>LsvrXuL-?oCTzRveX0~4&ch?2aG;?h~>f{YC|Rdk1P0!EgR z!O+M-n$$X7<*)neKrgmwF3?cibVKY`8W&mDckiue6o7+?S?J5wPMPWZeeLsqW^!{^ zoOKwrj76Qde2d$6Qo|JtR*0 zDO8WU-BnSjT|^_Uf~kt%J=P{9A{;j;N2#~6=yo-G~U5`?0b4(9_>m!*e9a8S&y7oc6m<9 z_vLQBXEVPSLQhDd+st1)4|KJ6j+-k;q@QWB=>162`eg*_m#m6L?~2WW@V4SWS*_E} ztv~m(eoJ4)n4_S>Bkfjy7{1iJjW(gtY_HMDJg4V6w&}@BvbGO>KA|S=oojE-8GLV( zI(W>JCYaA=cTgd?)#`cTBXz_@-EIv*jFTiEt!!F~0Yk;X!+h;}k-1i}^4zyZem)&< zmvCG#zJ8w6bcSxXu}mKFL+5Q0P6W^|_z}bJ;6n4Nlyi5M?pH5y`~*RNs~wJOc677OUDc0W z_1T5`v%HDXakut=5AT8u!joGzaTcgt1RYFil`#Q4y_xP5r;_3K)zq{UZwuyJwSKEk( zQ~j7xK-EJ*!J6W!~(P`9YGl>qMe;IG84ND`(8n zHYai1zDJH}&E!Co%JX=i-L-Wzo;g!_ZCseSI5N`Ra%Ui2D9%&Hqt{m@7$MCNDH}l% zY#yWJTA7!B-FpFBy=XoCOXQ?Ha;ar zzm;dk0`bmfu{!Rr`pK5PhxvT%7Kgddgy;0GD$Bkw?x6Fs%1NEhOQUF@(=RbD|V;Z4< z)4?}BFT3R?p{%sJzW~2#tNh3-3y8<|>Re=~MU*AYUgU(Jx}*jW*)$LIiI}P zi@QR9=wa`RZhTGNr(`!hVo^<65Vqy+41NxW7Pd(F1r;}lbH>Gje@-wfES8&)I!+x> z76xn%Qa~UEueYOO=reV_K>+2dzZzkpG$F0pv#Ao4cU=xHi`ctz7}w9{5;R`B?4ogC zQv3Y6aBm|%oih9(u{NXqrJi#AoqpHH%dMDyPeDGb$fshZk4?pHQngm=8zO?z<$84y zHiID%y*k)BryE~h-_<*9=WWmQOXqsh7aJsD-~8ZYYqi}ksx;m>@0M{_!Ctsh34MN% zsb_g8UWpLCXxHg2Br86A&0j1JBXy6pMQtZXd)@9IhqAu5#>QSA0iHeVCwjz;yT%=V zJwo8Uq(Y0ILgN;)9-h0C?#!dK<)4Yc_i8t%6e}6U{+2!+RW+}x{W#r@(bX((eeS6D z%)S-3?zYz#rP7Nv)9E+!kR^b*ZhiBi(Q|{dwiEk6RrT&5=vni6ncJm5r6c3P7wze| zw|UE9m-MLDZcaJgY}V&De}&_er-U$ngNFT--Mp6TktYYw3rK0C_q|WA<}{q=NB6Z^ zsmtqOq$jU_d97y;gaTLD%051Iz@lHU>krMdU37UTSN=3k-$DFxED%uf+_8J{)@_U2 zp$5zl0m$b{8=HnW%hu8@nS)1B9W4-fd+yHFVX`l|a+)z~IXnv8;pSkM85>l8>hij$ zNrg(~+%brqe3r&jdQXMr)+)#EB;mPz_z<@6Sb?NU*oV(9x>$$%QtWQ8)AKgD-Wl*K zNn1f{t9Iz9=()mx5_b*W)H#qZz6;a*ky0t7U(mG>0q z8TQZX`&=3uk3%b(osb>J$8PI?H+TDSa;VL9973w*yw2sO(WK2t)(iHW(>8a65R2)* zysNLxAX(c^Zo=uP8`Wyr8b=*WC%Sms7VXuUbQC6epp?(ZN>5LZ>^7e0b?8o#3Mn;0 zU+w38RxeM@^OabIbJ(69l05C3`J`&wrTEH0;~mc?K zI2Ky}CL2;dsjtoJ>=nt|ledPs68E`cmv`^Ae&4xd85q;KJ!zu(UW<=8BV-e8e7-~h zCk6c$Z2e<9-t}XD*}&#zQ?BH#y*b0HP^CsWsjf11+VsLrw&AiJ#m{n+Z>;5B(wJM7 z+QDK5*T>_z0=t3GZ?#w-s@~k$yq>wraj;{_u0$NWVmm0Ke z(IMX&D|fer1OWZ+1#4!8lWD1MsjiYtegblI(1Gp~B*NARVmxY$knyEBZ8$a2w*HAx z9`*XMa^~EBRdgI z7jd$i%=qcV0#B%hC|mF*)yP2zWmjKc%B;W0=lyekTuzvscsF)p7hlK5r1dIieYFEe zZa}zI(TRF&$4y4xZ!6Pw?OZdSn`%5$;>n7lxA)2%As1(MGPuL#F?&h3yt&V3kQJFS z-9Bn2qxRl2le;_uTbVw2ywqns7q}{lxYEkh>18`#PLCI4uOn`8RF>-ftRsFyx3~Sn zk!<~cKE7$WH+?j@F-O#BjQv8(oSx63l_Ng6y$hS(DWQJ8kUs_jY^l7LWNg7|9DMIVDHGrCdl1JBd_` zpw(sWB26rVYPa2AX0Wm7V6^V<^~S1se(BHsxv+!Ca}$qxDp^YF{bX;2H-5K1Z?iXl zXcAxz)ri@5rDdJM_bvx_NYYsGGrU$JQz;jp4#wfmYJw=V?t9|_(%Wb{-Jl-zk??R3 z|9IWTW_>s)>rE%$lG$QMe<&0N1nIR zX2%1ZPOu09YqLG8wA0(S$6jD|ff(O^&dsbYa)oSx!oH6#7fW^*Po^4|%<78WgX7+; zU6aO%q^j9-onFP0DW_CdbG|@0(XQ^-HDT4Nxw1K_hR~V z=x12A6{0JX_j9(Dr`v1U-5D3IB4*A#7wh|jx>W{ym|D+5Ximn+8XuOkXt7$J$CjXC z1#RF6mKIm*cA}N3C%!?ux1TP5?@19ic;Calc-Z~2efA?Om+T-br0c>?MwEFKV$3;{f>%uFt=)GHiZl6aZYNAK}SkOi=J5MW0fuVLkJLZew*6;b82O}jG3vE_v z+tq0Hsvhs7c7Fx{S54G0RXv`g^gd6<0lW7uwPerVQ+4*v?vYDrF7ze#^+TP%H0u~G z;>!~-!p1Z_X4zuay-@V%RldV{f+X}2Tp!cYpQ!QT;`h?xe3`9((%7E~t4nm^i}Pe0 z%>(0VDC%f)if6FJmi~${xXH`9v&&1ex)}Xvk8Rg;v!|&yKXFHk*<^G?`uNI=*Haa{ zQII5RtzJCL$*80iCwO}!>*$QS$x*swfn;)C7>&!RVHhwH4v#`l#J7Y%K33)UHBLJ} zQOh|6Hlb11OoUQ@n~a6Ul>#vByh@ANk%hk&ShmpGPnx~_7(3T~&g|La{Ixk*wS1$& zi}D;j{9Spv_>O+QS?O+5%{H%N8ej_y-yCb+7TBvSw%ZQ7rFv9Va&=I%^-g8>mnuqt z8JD7mdQSF}<4gy?>ocCb_x{FPNac&MTTGa*NL%fygl3Plt@5>%vy~;OD@*gU~krZ%vDlcAgu9vAQV9qd<3EkX5V&A@>xk0$Bk0O{w`a<6(`x+_lNbSEERyu51d z=+hefJq)wg=jZihVP+e*1liI4=4ltU0t4xj0lVn^2=O7)=vJo~eN*-B?q$c%mLut; zAE%k{D80EsgF~=;JUpX-$j-WKo7wKD^j^+n3&`z%jwWwddu;h$27FUKtSSg}1IUw$ zqAZoePARJ*iJz|tqXE#lcIO*<{ocl>H*Xqk8{|M_fR0?WieQ$Ho1K5LM(H%k-#uq2 z&>e<4wH9y}rLvaAeY{eHA``3)a8~U#MTTk<-W2sPt|Uo*Xy=_(iSAwFYYi-+{<8^= zFY(ZSt#h?EW}uC@Ate6f*hPCk9cP*f&a90eZfeOW;0#vZbUk4g%V!pC7gK9{+f$xy zPpH}A@eC2_7B?>{_v&^%weFU4VqV-yzEjOQyeD~e2;e5-^mQN6TFz-Z9nwTIqsZ=c zX^ix0q!!P6D=)AGyXH?L47`0eud6din6KYI#i^kz!adm+5ugS-m>t)fYd$rK<%K!hI;_ZXBG7OS4%rz7zXgfo*Eq zdx$K0&U-K9?_=@?*g(1K?Rf7t^V<<~UDCb-$xRQJ{S&r^w19Z3Ep+BRHjnCSehgH9 z`@GqLAGX}xcVo%k$QqxH+3TBMHT8*u^3q~;A5n8Ud;5Vpk1pEGj9>m3q#5tS{cyf_ z^`lwy02;$MDw*)Qj#tOSZm~4;9mv6ZZhxWe(A>P|$Cxn!Bm&Ng!`!V-no@wY2_fHt zxgK?17zqpMH5ung_;~coVj84O)J#)n z9DvL7=WO3qFVESm_yoZqu{nABBsjUvVP?R_<)N;)w6*ti*M~*%5}N7Mi#D;ZNhgI} z-OD0!_7Lu<^%2u(v=r)QxNYdWq(;6z)~;)}xf$DR^Ln1@J3VtW|8Uf7z=>FY)WWMX z<_o^wb6Zfq-Qqkt#mm!Ndf?wau7jj*eKM$W0WnXP1Od||6);^7GUSZej-m1p!ynel2M&Ml1akBI6 z_2P9??Wa=y0bVW}r=+QpRMr1lmM@6!V83p1NbMD%Qr_ zI>~%G8@VO`M)bCNH?Lj1K5Zid`}!4JwbU|~rSW+5m@d!j>88pU;{3K^vFWCr}KANOW1E+uB&TW6|0@HIZEOs-$?3zj%#kV?YGN= zDb1EQXR~0Ubkt@`^AH+)Ce&1HXhUam4uu57=TRUa-pViA9>`T2*N)G-V7JlA&Ge9+ z#*dy-XVJ?wn8{ij-Bc>LXP2oncka8@k`avO)RJbrz0uR%dd_X7WzbODeBSFXre~Us z3?jRNds)$gcWJ;+RA_jA9osOhme0jgKisY(2yt$VQ67oo`v}0J$!)_w<6f)p?##Q) z_2wzS?L5xnQKE_S!^2s~yH~?+CiCbpm-3XpblogZtKIwFwU=-S4W({wPuKj!>jLg7 z>=7sDNARcbgMFnfS_d*A(?eezGv<*`7tN8~QmeilO*hlLinI=Y`@&;sTdQt2b*cN- zjm^%!!1>3_v}%EX&@o7MGH3V<)omYfIh7CZ{_ZX9F=VFRDdkhO1N_7jh%6fzr>b4e z)kt|Td&nV~A*`H>H$UE{MKNoEY4IfMzM56X`Lbk~TQA+;yQuLIARNuFGfjDCo@>?D z$?KJ0@)PtupO-6tpf09(-Jh+qvJuvmEYJ1xLGEwmp>Xy_-|Xxd;tGB6wx-BvIP8~) z@uBpE&8Ud?H;LEpXK^=9C5HmUou3xYDb6QbkPDr8YT?0->8F2`YrL{Nzv0h9k zv9&4UIb#Lz)?6V9lpUa7wql?W|7cuw-wV>aFd-_pD?_Az@Xl@#*Y@5roE(hpYfW7m zff2Io4I2o{sMb$`~DHis7d8A0W@HHT11bUen&+Ndt`$HJb>=FWOmtIRq)i5ewn z8*01>rIYyZj%E{U>^7@{5y-$di}GFsg?htqnxrS_n1+_5_S4E&U8jt1O4S`k6X0oY zAei69!P=#NpKogE>bs6ny1n7;_;ZN1=+~p!b2ss;E4!pPy4lYvL*Gy9-JUz;x4>LU z#|+rTe#TM_V%Ce}_C2o-bNWRGnya_`?#1bsqlN*)_bc6cKp~r5JSjj-kCp$rLk{P* zp$%=V%!-&v*ju42C-$hd;0pDlXYEuCFqTIS+m*UNUJ3(|Ns zFK0LJxOrsvbk}e#GJOS zr(NfNnH5{lnR{V2)Ke)FFrDbv@q1=i={A{eT`<9ngluG&Z((+yVd$N1-cr^wO&4dQ zjNd>vp;=z|_4P4v7WO8mrt>T1Z;?CnNVBD|kt6A*b1x@xmkV+9uIZ!WJCj}X%0ZKE zo{xqG%_$z}kN5Qf$B_51sT})tm%5yGe0Oz!Myuqhy>{o5Ja!j#iCM}QRAy(g1P+0;`uwzubBP3$H&mK^bP_d;QwP zk2SY$%}nnc<}F7De+0r$57S7$nzps3JiyI~IDSAs=gzI^MPtUq#-Y-Wr321&#@f_> z#UdMRQ%6u{adDns%PDue+7Rxpjm7PH6#+}m>^e6ueGoZW*jMdHqomvB_E2kG(kZ~+ z=D5+A^ZeI*oJY-)F}EkCYQz4vUy27Nn9YsDaSqnpXda*O|Gl8l-R$;Y=Qp*RY)@si zUn*&0j6iEzuPe26O>@0#XAP4a7q@%`vE+7{lR zSGqm*on*UBHyU9NYxXbs{5Hj;HQwDc$JU#bM_G1$LS;vwFE{5@IECBRoqmjeZ*8Q> zv#V+Dxly`!SNwy2O@w-H_|9x?@AdKK_MY?FyIo!HO0`N-CWP%>ONYkN>ukJ%3j6%1 z%kSk=3FeaVkR}hko{#4{OH4*fKAEkQ%NcQPX?1pKm0KLRk!(Cn&fN=*9^JP$&t-sX zV8Fz`5^r-ADRaoR?Tu&_T$3?>VnWMtscWl)EX$`-an5{m*ShIu7C2DqN2XnBLS+c% z`)r4G8Q`eB-%lAYzi^L`xu@6lbfli>sq?hrIX^n?tIihQ4G527dWKNr-eIq*3AvlC z|Ma~J$k`G$tNFP`Z`Yu3$qznATmd6E?@rOSX{vM79gsY8wAcglgmy=NEzEDtw3^lS z`;cCyYu&qqqqjVF)#>al9X;nhS03h0W#5nrK1OW6zVJ^m~X1v$GZ%y zO{F|5U#a90gdRXIzQzE{H9{vs6k>sH8(ZRSZ30}hVy|JSXr*24SMVE6btn@nNq_D- zd99Q!L;Kh4II7FNL&Ii&e%v-W=mOoljJRREHJtIw>xzJZUQvT6R?F{ux3t&m3_KDT zTW06i&D@p5K1EIe*91~ozrxFGvu!+;^(XCe;``IgR@&P-;Y;r^djber@7yOH&3&k6 zV{62WMh)a=Cy*cIoqZeSt|yn*;G!^^dwjr8@G4Gn*Oc-Rkd7LE2;GstTV{>87aifc z1G@A{Ti)XQyd9Q-How4x>G~aTA2&DGwMlywz+*&#%6Ewfh>BFT<@vd@ZZGFL8)>v) z!3n@w4A-QWc0~#8W@B8%=lC4(Pk6X5lVF^#Fi{}c4#^qn4Zu(Tdi54a&U>*1#674f zbpmkgDVwnzRpBmw?^P+fx?>|YLI1oJBL=>^Y#u@BW`0%1Mp3hu0v?PmePm;g&m_hw z6DNNJ2tBcRL-UWD6L|5mf88B(G+LLd+*^!yymJS~#Pvlp+&tXDAF%`kX%6C3o35SE zJH?x_qz)qt~?1e z!KrTHI2D#txRzY0natj%RdVBc=qG=EFXehOd#A6WKjNJ>&)7pAMlA1k0U-H{Vz*bH*u!>u zT7tzuhQ26ZvRhXY^n4mdlM%8VfOCeVj0(o2O3%lC_arSJ`AOpMt-u6!zP_<125o(fmk-VBfh0L!+WV@p+Ty+el(hy4{3umm{s@|P zm&$bVTF;th3i0Bq*b37(WBH?^DNamDlPpyVLATNK_!J+b7RcYR;HPuCnk=5X?bP3L z?-vh$` zH7|%uT0L!Q-uQL$&>v)dgr!CkZa~FvmVFY{$M)qOeUsf3f!!{k1aqk7^E03btsBut z_hCPZkz7^bSraQQ$IMx5|}$f&t7Mv99y0Zj)J<|^7w_w0qVcQC z)RQi!)9k(oPK*25@+a(;F%$nTT}DR+LeI^8N~_CvIDP5W!rxh`f1;X~Jhe{oqn>(y z=Jl0YQ}1v@X5D6yytv0BNiAR;EgjLFNhhtbQ`wDk8MoR&b+^;X;y;4Lw$DSxq!2cU;I$6dPE{PX3SukL_`G7NbgZCa{W#CezVBP z?CxBhx5?+!M|KQ5SQGBD!+|UMV`Y+uH|IPQ)d(ax!Y`vw5(BZln$Gvsf6 zPHIrZeG|(0RE;xTj^i18?xQ5JAFaI0JZH)-Hbdd1@{Yw_Z0JJhkOJm!mL3`y=WTT2 z7D*)1X_k>$B)CKyzNu&8zPQH(NMj2a7(rJ&H=;Mvxy7@mzQADU#Hy8l=f9o8_1w@K{m`TwN|8)}wb66%e#fXoaJmJ5U>_w42KubimVM1e zcPn7$^+Wnw|LBDanNMNgCVs005(XrYM?iXB=&Gs!XMMbDcxL+yw`e&2R=-6AF64fH zbwhx%z~i_Fb3vo^+n$<&kl>2%ee5BJJ@&f<@VW_dsLTLON`jyOJA6ax345+L(|J8e z@VP3j4T0Mh-gF~>Sx})Ou9F;O#=QyPy+9DFcwJ&4BN6IDC>&G!vf_#cBD0mCyI*EB zl7a5-3g8jrjfp?WuP5*2kS*PXq0qT%0`p07E~HWrlR!5 z&1>}to6qKEeWA6-ecWK4%jI#Ng@;CO;G6};L>kI#gqT?ZPwd`a>g;W7^F!!R??Nny zJ882sg!r3UP zSSb$b+lkZg(6Ui_G(lFb^&}I%rUA3K!F{GNf8zziC|ww69!Zw?{RZdD4Z`?N-}VgD z3Uk_hPT#qYGPfe%90)D$7&3u;_Q8lF2aK4=oQ@q(1oLV$yj=1IUk$57mq5Vu9%EC| zu-t}&!v^JO5SG0bOZ-Y?WRg93^7FGiE0u!92^*_2CV%hUZp5kNE@RU8r6Al1Ke^ zFC9n~l^+vLXAf0^@3mdNKVfnsS{g=H*w5mbKXkKfA|Q|6DJV5O+UMi`pl zysl$#di-olJ2-BWzAR^pLxj}ygMCvvBUan_ye4iPs3W*M>&^QL_aqSbHbGnn42SRN zE@m$oN-4yNEJriFAiB&DyHPIQe>lO4RB1KK`W;;2{3fWzJ*o-<#*GCRwQ+WG*inod z`r>lwHV;eDhTjSnqNkbP)AZoEiw(yd(#=gNkEy9`ntVP}H!sTXlWA`P>=(b_HI@`L zzf=BZK3rqQ4aYjDa?Ta3EJj`XdYMN_v>pR44ZtgbL@bnNXte0;P|<*qfBz(NY4&q? zzILM6$vMIlr-q&qWBR7K58n~`(2Tb;DaHK*ivVrD28f8VoK=nswmN5Uy2m{r`=0CX z-=|E#6{uG-^~g_NPc}=M+520ef=220B%jvLn>;^R;HN+Rhk0^)I|<~QxRA6N{Iov< zFZ6umMvWtleA*65ew6{zeDqUl$m|1}jM9=GTn%TwfJ>tHi(S~)-PrD{M0$qKy(xS!Vo17E~ zcO&NKk-4zu>;2C`!(?hfts2>YN9vJJ@8Qewa4jG)qB173Lk7J&e;vKf-#K}*Fs_H* z!pQ6N*0>$a0BZrFZ1CoJyTAmkC*1eaRIhL23_9a=@ZhW7rsGEpI#V))yly{e z;m?N)k8^vdXKg@61c!hlbG@u$fC6-;)bfUCvb-SR z#J(ktXA4L5ai=!P00m#v{8+%{SinDYO)H|&F>=7OUnA_We?mE5KqOEltjy>q@RVTGgvU=*0gufp&H0ceZ#v<=OsOaZJJ)O z>hbPEA1-h3Flp-z)*Upla=iq3%uddpjuCtr8JZ6URd4nxsSk(l@Ar{xtoSLBQyjo_8XEBEw7hh44GvJ_QH| z{A#Kzy2$Ma=wlaQh|CihvjQ)lG4h$8tM}0E87s-We`o5HBs+ppkliLW+-`QarF1ADEH?9P~akxEF`qV`}Buz(`3%`t86hPmy z5nuS7f4CUiAMkJe2M!~?4{u{}cQpqFE*A#+aj(ugWL8(Bj`71)0p5UFmQobvzlZbf z8PohE0qG{G;2vHd$<3knHA?2?_)Hogu0-(4hMzy~G%A{MZKzC*px@^zP-M=|ozw;0 z)&q0Kn6Wpy#}$;zkS7##4nAqeXtjX1^_rc#e<#BeZh#0OCbq8>!T@t?zOc2jx&=e& zYk2!s9Sjs=yjW9uTq7*g(HVAwPi)%y9oeJq>%~e=l(}B+-F=h3nKa8=_ni7q51Mm8 z13i)6&ME%+Rk+(&{T)%phO-rsX7s+JDJhC;aL&AJ5D8r6h)0(ltv<*!Lu!xEmc zf6Rt60m3IM8g#{1R8RCDoKK;MfivmPHYL72dOLX%-^kj<4%RLX3761q)H`XEvh^U( z2!w>@_N)F+xqhxP*$2z93BZGt1|iR*Ldb~e^xhJ28(aZ9tNUPhIXDl;l9&nMgec20J#9Oe_$ z3z~F?!g(|~-_J0u_MiK}z=BDCf5&qvj0&oP*ZA9@wXoSof}6;lG99wW{QcM^bAm%D zxVdFWOUheKzKDeMs9X6or{^4paLk(yUdf+9XbTMYCZNfQ2B_jPf3l8TyUs8M0|ue` zWx~Ec#u_ehi2;vG7$IrlOY4?5s$LSm7KJ*UzwkPkm|Xu37AEcWUe6`Of6A{ryZV9$ zi45(GY6DdY^vJ}GsSGw9TN8pepgbI8r{bwEvfUlCwSUxUW;4YMaL^vl#Hx&Vqh{@S zDhCVcsiTiiXrv%y#rc6ls~R!xsB?vM{j7P#dj=hGDbtUt@NEO0b&>2O%e>W*i}^S5 zz4TvU9$I2*GYtuhxqGmAf0X)u7IqU-MGbICfDu4V5@V+tn}m?h!2JS&4`jbn+;q*f zk)-iP!n^5n|8RoOTIZ*bUTI#rng8f&-$-ij1D^b@Y+4ftG!Zm)c(HU9oQ`}5P3X{n zC#Oxk8wL+Rc}d+om?FR4hFJaTl(HuLXXvvQS-+h=aSG~ibkEcuoWOvl4|k}+hM^9WSCj7V zaBzo`F9AU%gG0QUFc`KXzJ8WUzrFb+<%s;CF?2RWupUV(rh65#Yw!zWWJn)MV6yiP zI$IG0_CC-rSIT=qe1Irt;cJ+EC5XP667d;@!`a$f`s@)pe?knARc+(&?aW?Sb1bbG zL0&{$oAB5=FIRHD1-AIVUvy%CE27CY$0F{Nbn}&t!?6QJ!NzoGaz0TQ!xQO?tTG9% zzM;^E_ZB;T=L0e+sScwSdIoR?@HJh7hn^{Q!-fQQ>?8Ec61ZpWA$`J==QY?V4uG3M z{=v;`x!av@e?H`bIr_FQY->nJLB@^W@g(YoM}LSc- zUl+g)qEu!`K(nrmNeNq$&*-r-l#3B6 zgy`Q<+Sg&$_#`x^qvlX1GyoJpV9|Xm5Q|LT!-^A0f0ckIaH9}D0y3P7w-QxlepbYv zEdlG2d5rPU%O&_QsdfZlodgf_a4kR9VWI)2>tH`t;&MhEP4XRyADukM z7N!DBifnw|Nu2ypSkMSIFmlf|MS*@nNWszf$(8!kI`AX8_U6&&h)i#Qg@ERc4&$s8 zZ8rLdf1p9|qhG->)j-Vgo#zs{o17Ig)C+59*d8e-JkQqwjT)J`Xvdr2njhhBe(zar zJDd=xwn4XxYA(4BI{FTNQT>dpKNO>FKVXX*Igm^uSBZbpdbiD+VljWfr!#MG(UOao zosZw-r8+pdHOtqe?(2GY?js~9#oQe~MSRX%f32Y&v4#O0&sxZf{8%#f?(2)!2tJl5 z)OFl~p%`GZzIgvyoZes_V1H%l`%S5w6KHd{0NN^g`g2Dd)9gp}Nw*b&@1Z>o!tW!5 zG7&RUo$0y*z<(Ba#>DcY8X}n3#~LiTzwoU=K4_@7;K|$VDfF=vm{M`90*}{yY22}B ze^A1ThbHb@#tXL^wrG&QjxxYkI9D?IbxGRfr+JC3T1Sltfn-`I)W53*E_t<*ts=p+ z1=eRvq{8E$0pfB<37N8p{7751>JcKOm0I0K^3XKeU|rJqI0E zkDlS34I!Wm@w2njkhjnjhkVzx-i8eaf9WE;s>DsF|ke=<4`fd@yD+V#bTr&iqo13V{U8u>~Z5k0>;6rv9z=Y=20t5TJc zd7o)fDHQ3*wi0f2su9B=y7@#|e?W2_+ge2Q(sS_xol(Noi6Ql#t4|y7`{U71&gNN1 z<%Zt~_$0d^I`~L?8Kzs}Q&+w9Mze=dra-piBbctotdr+LuEbHyx~TH}itE;~NQDfK z&LhiGVT-Fuj97sj3tRQu9TF;qL7y4D{mxim3}(XBW}DiKy-h!QdW)S5JY0gJ|hT{SDY*87ZM-k<{3n40ng!Pz7>q%TX)2>6;o!1m1XIfc48 zIXKFib?&AHn!$-q`?8i2GYze_GMA@LA%mdiX67 zD<|~w9dG&o6AJ5H!~|#LT3^olcME3Xvwm0~^71sSrHHUpVI@_83Tn~I*9Hg5r#BYC zE-d&-z+1b9XORIn0%p~&M4>L4IKLB5Od(TOdC0=A7ABvLj_{x9F;|+esW}i9N_C$7 z42quj%iBIIEf(Q|e}@v>h#!#g*{|({B-?E|+kVYNk#Xa^-6HL+_G5whZi@{vmZM5m zm$0g+F-YmL{+RM`48Wk%f{FFi0|eq#v9$ZJjs&95jTHA-tQA=EU@T~@HC<{FCdf}) zoXd3JW$fY3S+L6KZ_`#gs^OsL( zDnnOz7Bu`|f*HNEMdO-!cAE6raKAzqE&6V~?6P+u60`!H-;4_kUm-4mseHCSe;p9!Vg`J4-IE8x#ajdF@A0%p zNtL!T9|Y$nYEEwiFlhKqli zts^Wu=&f(Fc>{NIqW3j{J%cK|--WJy<&dT?X#qgI!dB#-TrF5QR`C72(c7bcr|P9t zkLHGGe^uL{IPwX)>(l+HJC3R<1{*ieG3}6eX(--pG{0!i!HUVj177LFtCHLPAOG>6 zeBXcg-T(IA|HprY^8UX}UhDrdc{gzT|8FKQfO`M`?()0oo*ejy@8IP4(`Ws;9PU4B zkrhUM1Mm6o#{;1P{DJ>DADjb?O1KM=5%%|Yf7md|Biyp#Waeg9C#AcXioB8g8#96&CD|K`GvC+rRUlTG?FpD;!1BXD8E zUqRr1gunJrCqYCvMc(xvdrMjgD1`9u`}zCv z;HeD%P$i3Q&fkH5{)V6b4j$t_ap>P~mHhtOC`r%z8U6eH`+<4r27-z|f4b7_f2_>+ z1P?7gh3{%l!LN>5jWKHg2n8J4;Op1Pe-wZP?iFg%R?9Mcnjrt>MkOq+;W1Ig3E>}L z?lAR5H+2r3O(XB6j4M)Q97{jht25!oIl#%qdQ!3hb3`hVUIxz? zIKLg~x+H8n;`P(S6CA!^h??S`?f5}>; z4L6@5Vd?r{ZI%$I&LZYRANUOHTLWIa8PI;7faaWUz;gph(A6ISgh@`9*9QQ!!Vp)7 z)x>6nPVp!^4#H1fRfCKm$VVohQpCj)KdD4VcAk_aFe1bs+~25|?-uOV`zRnz-4)D& zU=TNZ7RhrePl%Z3nbT(LH+3+v2`{>N{0 zU$%QejoGJAW+ty?0TQ6?JT>H0%@VH>Smw7@dP2<$xi@TkKQ{(|4exf^=Ax5UlFV4# z0GjWiK7m@l{1!?~ony}hp(uY(V|#v?m_Tr5I4+Jr*y2F}VY6S@<~Rk$e}Td-iWJm0 z6%e`^=IY&9uy@H90sU^B5Tm_a8+a^x1yAill7N{r{P8itW+^3H~f}f#akTnGZpe$ia z_wK<4K!4Cbq&TK%#iA1he+Rd%qm~H0o0Ncp!OZ(unQmis<= z^Otf9PmW(0rh`*mTdh`sHWLImb1qDyi2SeyLwem^o@wp^LThu!jyU(AVgh_7SxKLF zB8x4+2Tf6@k<~9w4f!>e$MK+vDuyc_nBJRD9Jgdw|5Rgfrm-5se^4vAetFo#!@);m zy9E$dhoWUM!coH*NwFO;4GUcZCzZe6b3HYk_TyLbI>>Jdjr4+b3OIfLT9-_`NC{i_ z6JGiY03_;LO?mUtCj}rruCu3RDW?5l&8Ag^j3TVW*za}09R8NI?+c4WO!m#S486sv zpy1=PT9$`jUej2fe<(M~%E1WYe8sK7f1HDF(*WTK%gqT=|DwqHc|ay?aM_}MzzM=dTV@$*L08=umV3mJO3dIN?8wly1qhCc3L42 zM{($uyEjZu&j1SKwxS%al9O@jA2x<=i9ueEJ_U>f{7Q!Ffu-NNP@^gBc5qDCa91#8 z@BLIyN5=>6f2fb92*3x2Ve>D%zK;7th;Kcm6xb~s<$Be%i@EKr+Q;X>6VP@7*%4BQ zYAt=%WeK=jK@WhJ450P(DM3HVJ&W`4r&q1}a(9jz(mJG!fX{)ffLfOha9gH!KOJdk zV3*D}016~#CWS(q;f9ijoegIS}EDQ*7ao zk$S8A!XXLQS+bIpHu5S%;D%XZ4Rm`OWuni#^%3XRdB7EqBS zvae{4e}vCVFCM}<=dgC%as;>pPEjEuNk#rzys2MHmUcd$uV3e@K2z}zc}Fb8~d>Xc+MMWg$W(wweZuqm=}VH@Mnk#v1lYF+ey+LCj|3Y(@4QqvD9N(3f3X!# zjRYe;{w~Po0!CRV&vvI_z-ghlC1+dmM_)2JuZ83g_H-s3SYw#i9y~J)3V}5sw${5x zjzV#v9-w zgC^-n+l{~P!eWZ}`LIE_S#{Pye`NAi$M32}S61FeUzRrZ2NR4tWxv8Vuh{%{T$gaYR&Kq4VRsCTZswtVC~;0YdzP|3bu5rEHi zJpjld29t{c{b&v{hRR*O-YoKYm6JzJFp(p89&|TwEQ45CK?C>o8qPR9e}6s+;o1`v zy?q_W;xSVjl*o5xB&2Y-CK(niQ@?*y&47y4BomNWnk4-2n(`PARV4!NTQOn(E-#1t z0F%0(u8fWAw_5_$0dUyW%5Gpw|Af|*mXO*|a~WG;#;-MC0<3dvuYdMvTLv>YxL3f< zx29RpEyEf&>sp(s+)0`Af6)QN$6=As_@lAc<8|;42V1jeMp6>4{(9n$3DX6eL+qp^ zkLArZg%>BV3kLyen-@12`?CUV!J9fDGXh|FWoK^cV+>^aees?EO=*fP4zQU=1Ift* zwHYCiIeId|yDkg&LRp$MR?l4b-{%OHt*2W0o(d&M5BC-h6@g-0e?#AqKI(Q{yH~PJ zMIK{7HXfBY&;TDQ^(HVAyMy*{H;!C9CU$~hASe8>L%^?E&U!?b(EGRZu8-|gq{X3# z-ouilTSM)Q0NfkJ0A4Rp(@D(n*yPiKVCegV+6jPxuNh-0TtYu$?IyqX6Hk5Jc(vuz zxN+30gm1M3t)ID`fB*LL_recSrj$njogioN6&eP!{n|z?AtPDyht+X_%Dk~3hyW(O z_9!$3C?>p@{+c^zzjOu|r5*+>95?PDY6%}3;*jf^(x3fuoS^9zI9Ul^Ow~M~jljpq zx0_v{Gi~z#b1G;zexyo}tjd-*LFbCr_x|NlJbI-3!zTumf-!&|e?{ znE&p@wa4vrawN*@-Ru5yftC3qMG9c%OVrwbjG)KRBK+HZ1LVD;U_JxFb?P$`?e#3r zl{rd_e;xbOV{d#(9dDpPT8ATkM+68tua9*M*Sv-l)S*`dw8PR-Fj z9xy9X9#b!@&nX+&?dghP4^AcCAl&u#=0=#re|A2C%;u^W!-D`m^{4}2klH~?V)gS1 z0-RevmFdA0$9-4;YVHQ;rns#Blz;{rr%dUqu2R9I{(wDT>E>ND`TEkSwfatd7hzYN zs1zmcvrd!06}Jzd9Fj)WQfBXL`kq*}JOGT1Wl@t-c-g=hGxTPDiyIUc_AZU-Hu#A# zf4(EdP=D@o0Ys-tJWzxpC7SB>vBUbYmYS>GJrQl#=rdkQ-!egGFm07$C8AVhJeiksf&H=tCC#f-t+5^Dk@+(Re< zA*UAccvA&9Z7XU5I0UgM0%{w@N%#sphS$&xpS8~S$Ad2r0WR`CvD!;Lxz!xOvCs(R zY0Ke4iEm;XFIj!f9z%JPn8J)`j3awNw~KCUXO1y-g&~>w+-_*{e}i=@OozcHWdMLM zH?Wqrd+(W$>!$AT%9J$d8n`P@$;9qU52kYVD&HLM z1h$>9IWu@LbVeHDe>0}XFIEdF>k7c0C9zzWpG90CseR)fp{3FKu@>kEDM{sS_T@ud zi!RJpLcHjxQcOU|khtkO3C4BP9o9bZLMJqf69$FXwL-?K52lbt!LuY zdKnv$wo1WztKV-GOgdAM&|tV*zXWLDZ`K99Lwn_r_T?4pFNef0*nze$-c7eB^yeO? zy$@@LL#!+O_!w`z*(scq0Z9B$gY3x5l!0p4`=KcorSvRgc1$yri*&T3-fU~P<^~^h zu0A&?lQE9sxGAT$vvt zY<*tr?@h`!T*BCz`@o`prNZIU!?VQqSlg}02jC2{*1mQFtz$j9^}eHhcu^&O@b$N7 zYRCKj3VQfh%=%8pVS?Pb7er-ije?;NxC5RLYUy4me+TF)3QoQ$#5WJ%6QOpO&&j|a zXkj4-ex=_me~Yglcw^kobmkjx{(T=_pfr|h|pvEQG?Ft*8BEg_h+N zxU(RP>(rEM6|meFS5;9NRya)>=Ddde+rgk;rKQ41pj9s-Q$xTtTQ3GF90n* zb36s+nxv6&*K4ylRj6P#j_y!I3%b`@%FxC|(Hql%eWQj`TWQOVB6veZgx7S-=}yYv z&=Gm2AS1uw7psJkO|b*F+aBPHC@FjwjO{yUl3mZ?aLrXTl&6r=awR-riTv&4_e;JN ze+XdrK9!HP9B;Tw3@W!Yb}Qxmp`s46%Ts+HCD%TjXJl9xy^+DzJ2^y5kV-+kA9B~< zA3?O>wOA&n=k(d{F%Xr`{N8*R{saZCkrO8Z=$<`(%9qH4SGH4>{WBAu^uZuv1z~9d zy}Ed>@QKAZxG4`Ax$F}d#7x}zB-Zi;e`w%(w(0i$>cZQq;8OIAA8BGSUlLiw1{hzH z!2=#7&hJ3UPZ|MfHIp9iKhil*p6v=2r6e1+t9Y^QH_E$#hfp__nlHyd<5!>%`K(;*)Sn@FBX7(d`ECIXRzf4LQ! zgG73?0SUGL9WzAxgnR*s6Td)v?EFexB7%K#NM23|(cpe(Cf);&vi#QgQ^mJ@1IV5Z z!qc6m)AuNVd12Y}jYzyEa;f6xeNf3;Nl)riyv zT;cBq#1TNgrl@s2>KmtRsDic{fk=2lRBblZ=uMDd>EGsjy8+DiK(QZMKsYe3A5xM` z*)hFFnlia%{Z;^?kU%w?qV7Sx$U@jqD3Jr9q|Y}*Y#M+HnE;~%oxxc)_H;%r*gwFK zxXESnvp#YYDIC+qgQ$Cue`{Uj?=53R^*6@%JKPS%0(1-v|2b|Z>|a1&{jk019exUw zH$+nG`_FN99}OI(3aP!Cs!~~&vT?wWf^fKY9Hf~{={uJYTYLQ;Z^o{r+>e_gA*4PRh6-U+D%$kGV} zmBj~Ewu^kv=!O1-_bV<#?9xA*rn@Fhnk=;U0Lv_sn2cUTldP30?z7JJX+GU;7RMC8?(*eFe*#<)w>IH<&fiv^Hi%-05+k*-?z1(Q2ce@9>qS! z`6>JFAx?1Ye>D=WV1d2IMBm1OK7qmwO9nLM`Z&u>@;R0S|6-)IYqu5%r$FeBBE6#_ z4m_BLbOQFS>k{d5J^@+&bkzK9bQ^w5sBOICBmb%tHW zA@ltL+^IJ6Unm^7`Bg}CD)`ZQI9GTJgclPnI#`G{f1j*Cf8``r9FA~p=7PC!`l*n? zkkizRl-33Q1M@;D2d;mi1wEz=1+T#N)F;x5gg5*e#L8Um&}W6-HCMvAfjgDc&GW{3 zaAK`Y*^_k(*~O_zM+lXR^tbq zulTx(A$zr&jYK_J_E!ahTDuAzPS&Vi+c{Uq8<7eUO9DXx_@-rPnEM)G| z!V%H3w=WU3LC4-AK}s)GYFi5L4y~pRYymOxQ*5j^%2{C4vkc1l5;S#t5)0t1SD@t( z8%PCL;q_H{`{rp;`hXiX92261O#7VD>0pE}6BH5;f7^nf z$std{yk%{lpd<=VTr}gK?2Af8=^S|G;6B>vtR=q#9)kJ&bft(J=!gU}Md(OfNcvl! z$Db3j7@0Zd&^qcD&OwlJdM=G0cLPWPyru#i`1iH)awCd@T-(DPG8 z1zbJe%UY?z41QVk>@7aubhd);A2{j~0=lI;<8!Lr@n2W#NA-o72rR_GJLx6&6>M-b7`6WOj5$ zO&dZ5wX?`z8MQM2!dE>|e__A@TEXLi@8skdyW<(>_xoME59QO(?&qa|DY({NdI|H& zq38J?@B;k_r>GpjKJ*qmR|)JhI;S|}t`QNvGEZhJ> z5Mloy)~rrg9;y?2e^_Y{HaGtU4o-<-6+lS7ZW2#k*Ue=ht5Jf-?|z1-|(&nL45 zx4P?x@lr_h1Q6milQF*r$6wU+VL!5|NsN2ii`*;nI=P2~`z^dV3FG|SpZMCVG#v7L z>)r|O@ehJ!=rS-oARQiBDpZO^hT*44Hg^FlVn%%`;V{Am$2Zn{-b@?ml0o7ucl&)` zVQ_tZBfS;2f4urfVj0f~m55SCe=Im~BoJ6!_NuBre9|Ugd_0kXku>>-z}dfrKV_jt z#GjV>+s-JOE^xI|h~2vSR=vFe#?$$mQf4DjDQbGECXlj)`3e0MvAm-?E4+aEGXg`O zvx@~i7NBu~_DQaW*S&JKjEk83z=5Axg^&Qb^$~2&e;;(`0g!$ANjKC(o_urXU<0W2 z5Gvx>i|r}C*4`$eQ?IaDHAzVU-<4}K&WLbDgM5Z}eaNP=1x>|F^H)A3BoxNt!>E9i z<3XbHi?16%>px)f#@GdiPEq>pVJl#pOfW)!*V^u62?EdO8z?QM1R2YH|1xgmWX zok_(je`?=vJB92@K`kI~UWxll=wti*-LvaujzeYl@KD5Hb})bN+n3lk+&rVKR&K;g zhB89YN<&-TU{*jpwXVZt^|@KQ^huuaiYzeNgEVb*PP8h022=<*`EA|{fB}+$NkYzR z2LkRB1o?Tel^|ToVmKqcim{kV-<*-=q}G zf9L`f8$Aft!#MigSn}WU!}cJr0zM~H_8WQGKtNaN3B<)w*!iJ-@pd<0hdf7?UAQ+u zjRm`dtHc{}z1IWO!)<4>F~NHjNVD@MPHLhpaTnr?N@c244X;H^$IS}70^bfsfz$Pv zY9VvdPlg@q9=gY51JL*>Kx^R@eSm)je}-z4mA^!dohtyDd+Viv3Zohen0W@pr-7!u z20|qeCJk>lOW4aCkQw=S={>i@Q(S6LgY~IRFn|XAs z2ACH%P@-^ZnEB8fQcK2cy3T*I+y+<(w-UM!K%>i8#rhvC_vbbQ5~G8ELFq3*$q6`x zpA>x#9igJ$9rY}iyk(NyWneWXFJn%DLCNBqxthnBn-ft2F^0J1f7IZCI$A49`SO27j-*!#LgO*9A$wy2>?NGQ?bZC0 zD|me9tpb9>Q($&~nO;=RrrSGyPX=QWbj2^bsqj){0IITw$51&(Px{&v18|-j0vtNe zj*sbKB%Lsm^(~NE(V_8_|8Oj41Z1i)jk$}obOHtU@?Qop$~_xSN?Q!&MH+pZVI^F&{n%ZmM0sBTK*bl~iV~(Dy9HXhjrxuP8pAj3{j*ZGdE(D) zf3G9#6|j9@1Nyz+x%}!Df8iC&Y^U#?VM|JpeQ(tyx?oeVeLH@^aO(5E1|%ATANUe% z#5oG7{ersT9uw9Hwgvbk0HhBr?U~D*L-duCri5^UduuqThag?GlD@cUKM_#hCSC6? z@ScZGT-{k83ARdJ^JIat{%b&R45Xk$$ptM-V?zn-2LJjL6BzJ9e;5jQ23`wrb_GEO zG%2?d!t~QpWCX@~27Rwk6A;Qcb!CYG<#(AWCd?9P>)@v4m?f!S*rk))s>KhI$I zU+p`Q)kS-m8F6gO170Y18`V@G1 z3uA4|F4Kog%i6TA_0S7{n%-JOUEj$|$*NS7Cc$>U{H=7_e-L={io zX3m=pUAi90fBfdag##G=pAR6^(r8J)@M^i>=qvb zRisyWuPKC+cDfXlK5P)Uvfvt9i*M=&SAo2J-cd-|5n+IN4>{`20q$rOB$o8!B|csQ z9G(>|1)o;Ec?-DI9ZO;5-v;4G21}b05+8VRdsZ+fe?p7#&O-h-inJKq#JJGeoOSmM zvDON}*weqp)4dQuL9B<^3i};RnQI_m0DHR}%xp_k1H!qq9!}aaTnbN#js3FiB!eRdDwg5f|Dy>YPMdx-N z-#PTjrptau2Ym)vsr~sN`TOp{^1(Q5QHINge|$Y@qh>5hxZfV2q+8FU2d+vXXSNLS z1ClytY|cIi43xYHXi~4cb=|n4)?eBN_yG?Sp84gt zfHKZ7DquhjrprHXvCWR;$?@HS+aF*mf51CIXWhgxUGy{T-?#G{Ar=B|0~7$t+ysnb zsKEl>5Wrz02u~p}2d1l({kE{aTFMu2bzd)d)G6Wn(nj`DRO>F<&wE9GDNJq|4$BeLe;@+k zX-ROs^Sh;`Va`P7@{4#3K@I%`DL<1V#=s};p0I9(OBEp1JPa|zF z80(MRdisr@bo@+omH2K~I@ETMe`Pmto7W-j#Nu?`|2{8(^q)BcAOX5iZ&$GVqMdnI zt6BD~2|ncok_BI=3xJ$m3;KECn38)jQ>Nr91kG%@50KHtg}d#8xilH^iQ9syzR$4% ziq+nPoB>5}wanbj34?&3mB8;Xu&lcq<>pU)WJE0>;+Oi*!$zae@0S5*feJ+ z_H6mz2CtTy@i?c;3^vKW8RmT-vily(!Ckj;+DZ86*;%(6z|Plr&*6P^Af_W6)EfOV zovU$eAzeIE=YP>~@7{~|uaimJEh%L>fF-Nvf>I=GZEiZjmQatOuizm}fK~ex(PRig zpQaTMC%6jY_QnlMA9qe!e;+Ut@J6zA;ygZWJAQn(53pC>EJem+EcqY9?zvlos#(k_ zz{z+&iCh%!&P^_{mtagZi;%=G1`5`N>wYg{A9-&A8*e%}+%;LPa@|TWV6G$V z4u~K4Kp5QAh$$w3gW5~LRiTRzg$6FAd(mP9CuhBC?<(ej&j)g4MTN-`dsc z{I!1ezAH$m@Ny}jXW!pt-qE|xIyzO`mxT2ChoZvGZUBD+zvlhNz?S_QT<3v{O0Rl$ zPP{106x6M6i#zyV9NY0>0@|`~z_PO2m?+^ z1nLS$A$NPA&*Ti@PBxQ)pAe$=G)*k`xoR{ti=MN7GsCQVD0yiAgOd|3LuMu2Mle> z%xF#j3wMSjbgnlL+80>;oNuhzk0JMe58!Pt)klV8fCf7vIvcSd7VyH6pFBGXnhn^TAo~}GSVKgCyl|%z2M2X} zk69-;*cKm7Z+}B;*TM~&YgD3eUwU(3aV|-3`jL7l28-!%Sz%ajK^?0*U@N@n)*Z|Q za1>|(VybJiugyrZET?1BWBVcQLG3}^gB_ld|A)on9aUjI_FICwMfuHv&sdpuA9afv zL4}5I)(VjGF7!eP8pGV~UPZ=31~TIgXkyKWSMNz}{eS7YNxjsn=m4+&$|8!%m#`3Y z&78sqB7R^HfRBvjD%l%=yha7Tmvd`Rui;Q5^so z7yI}Kgny~)`JD&+9-LKI{?FJioZmTIPyC0tZ@fVrsGzoQ2*p$r808Te_^zeJYdvZ@ zbUD^c6IG@HfQLEywXk=ZT6Ab!4Q0x=;0`Z7vTMW6*b}Jbz9!dC--}?|WJpir>WTSM ztroW7kqvr9vBYKX(*Tbbj9{(o+mBZxMlPI9?0;@H0WB{-c zLfiYr`LI?QKP=@ERsV!dSbDcNfs$Nu{mmwvJm@6=y{ zWPjZnU}jh;3;LW2T9|-n59Va+0rClBmYMZjB>J{017~3xAj5C7A;5ltm1}2D=Dpj? z$Bd{M!b>KU>Y=q?z>|h}fU-# zMvP;dYBda-;5oKJr!>2l8zPKfhD05r{Q#yU-Gkz(VK86?cKN{@YJIH{q?TQ;j-d?oc(1pfH+(PT zCr_W@ot)sti*dA3y@@aE=Q^2E%k43BZo}2|9r=Q+K-OODRnlt)d;Z4(DS1PMI)5Ix z_TwjTLn$G`_$P3R)c5{^FtzccFq`so!R)I~p4bqmhcF!`chKbtOwgP`CuwkTn&8Kn zjH|e?TNuVyEEA?kGxvK`!0O`*O%-q5-JQ3T2&!6-+9RFDTESdm*n|}VvHp$0Y3biImJ1bvIQFMoMx3qnZDXffc}m%d|Z4e$f;ltKkI)2`ohwvBJf z?e>@UZCV|q_dg&$pGV=qM@MH?|NXAF6Ts-op#Ln@$Gy{5AT|1>^Gi`cBUv@x%sy|H zOu)M4F*wj`$g|&>WiPc<${UqC@UQX(nA;Q7|Imh&_^wy=1`T0w5Y#as~=`!lWY$zqLk_0&eu8;3?tl1CW;9sAt?UT7Yao%;-XONuuGhd>dKc1`B;z3$oi+Z= zX=t+dp$%93H#e2`6@LXG4PR9qn8YwMfFS!pLC^8|1)A5l#q+;_uT;E7TxWf}&Ryy& zMp{~$K+QjO&U|3kLb6R0^yqIZ&<*kj&=od2;HE_%89YfP>;{1z3oIDOlcU`jqtSW( zLTD8OjkHq%2V{Kx;Ah#;Op}O+RKrWlyOgx?U-Z=GT~hQi&40bbMpph+M3o(U-p_-} z0w{l3-GKACK9ZoFQwqg;g0S42Ft{cVI%s%=rQk`dYm|l33V=V5!fC;=sn%anvml}0%nJMq zdKHLl)01CI!A@TX)9qY!tUo0d8t1_^VT!+0hV0O|dfRk|mHQ)&4 zfITzZK?Md6)UsqA*iwESo`tTtFz;~Qtg_R>Jsj*48h=J^pnP)h7uCZ>;*A0^Z-=Ej zxEVAzxWhv_hu|UfjL_-hA=#Tpq56e#ZD)D8Tl6_7dsOPxp|B#L2^s2J(-NqZfgI2_ z`2iQ6rv!MS-J!W!z&lLhIDixyI=CnK@r~@a4^G~Cj*L||qsxw>&LXf~E0Do(x1B@ej1ZLGmq!RO}h+$jX74?x$NCrgE+ZU~#uvskQhyAwTal3HjNEmM% zW1q0LLd@QH=h2hG^|^pzJ9}Qz`|!p0op_J1q;9=8e;CM!uq?x*py?>k&TW^eijk~Vg~ z3I%7|$m4ud>R6W06!Xgih5GY#1v5mm&P-rC`IA`o-!v z8|exTP-Cnay9P8{1>QUXjup-2jU2|UVtcwvVO&_P^`6o$7JU}qQ+D{30e^#9Ns&8a zfipzH$e)-ry%j+dETD-wgl_GVICsiEVdrv%ksoKit>o3#ZMQdtoh-6 zFwk|v;jL>sXqsr~SJa{Of`t2zGj0>}2&~TM<+0a@P5@PDSi<}R^tqf!FNp1H29!-Y z_}=>66O$yXn+eHvAXE8}8h`wVMKHcjAn}9H__)t4{pc31HAkCuiq1rNiP1)1f3J(9nN}ncW_P?=XwIqpeJ9KfSSOeykf5qB1+er z1iXL;DB^!eNdkmef`l#Ms5^jXKIB%eabj-rtH=#M^HTFp@aX48e}7{0rPH5-_c6F` zFJH+2`X(hYlcAt6XF^}!4Si^gn%=;eymq4sU`fX6SH9sEH;3jy3|x=1JnppzV^oQQ z1(NioR_Bc+j1;D?bDAEg{`+t^P240LoMK&|8`*_dkcXtEt^$d65*|EWs$9J(q|8Xir>(l6M zeXK7d(Uk8k394@gfaey-4m$8{G7ea|=JQbifVKLaB@dD!FvhFXyTH=T;ksx&#-SN% z6is0b?7z+%(6<~x@;Gyqe(Hx>Nmg9;oh^I;nQ1|J_u?G!6@PS7l!OU5n2@fY3Q1|` z0Kw$;y1`^EBU}x4DpU}jKLrlciMy=&AcFH7lFWJ!v_OA9q3=PDjb^}+zximl+K0Fyz^mrJ>0KQshbsRX#sIxnDeZjjLs$O#qoYGe`SdjY=4?r~t;BY=oMK7{MTwQlZre zZ{eiY&41)Al7vR(ff>IYe(2$CmyartiUFugS0rSR#Q-i01EZ2}I7csE;=v>f^M)TW zFreU(aqd=v_vi~I4+Y$KU^VozOorE1`>N8XG4&WYA&49@4}6bSMi^n*lEs<;AfCQLFXJ$*d{8{)cK6-i=kDx`A1Valifp@y`j-VJ9o-vC?Hk73M5T7bM)hTScBYEIBn zk2k0?IW$_8yVup)be^sZs+P4-ftd1)s(4ytsM4$Hg7NRV4eQp0;d!xYEtwHZ+q$!- ziGROKZ>2EcdbC{oa%e**hRp|z}@j_9iu5F2T zm%-!LxRsFp3&{>ipz^+;0s)C~*O57}NjlYU4WYZeId!;N67`YR+<76+Mnov~?O#$7 ze7I_7@pxiiyy;hr@ciBdh*=tte8Y^0OMjpKMrZktL0OsM19i!6KEDB?-GtDQ9H4g) z5U)PMe)cysa5(LIa_p9_l11yv17uqb9pc?7JPgET9pGpsnK4Oh0mH4jbuvF&2Wtfb zM~686=fpru$iEsgz7WGtVZoQ;q!HIw??2Zf{hY52vb{hg;rH%_EIPRzYDqCZU4NW6 zd22j!eN+TO*hE{~fO2cZ#{@ID>(U;Ndb!}&1ejL81igSc>FE9VY+q8#wAkliRrB4Q zZaZ@r+65EBs73aHG(2T%&auIuy%e9yHnao2DX)V7u3=t7O;UG?EP*C#P!Bx5c(@%=ibXgneobY&$7E_F6F?F9EOY!#>m)g z_TCoD%z5Evc7u+Y3=X|7Sjj>4UrRlkWqHQa%n89Cs)D(XFr?g+~w%9usc);%n#|0wqHwn;Gj(-rOr+#~Qi&!VHSgSB#IZ2S-H5?@%VF?3S`lFquo<;Rf z_7xi$faM%}&%P1&9Pc3CsD*k+i{N~H*F5&N!$Tq24>*ExnLGx~z6KUR`<0UbCwIPw zid4QJ3^u={%Em=5rIE955=a*JwVaQ#zECY=_6zag0%}x%9`uqc*neViFR5hv=6ucr zwn75g00^bbGGE)*g;^^Fe3|3q&-qxM^~obQ!T^-Q2RdS{4={eRv50U>doAN&rhc^m_;%E!42mu?yl;dg|&F2OtyQ07b*?MtWob zSkjZvK|wTAZ-2vc{r6iB+{u4m4*@whObd37<3Bg^hWz7M{+B->OoZNl4s+Z7(2;*% zw0~c>zaN4kO5$^)&g^4Z63ZR1DaW8uMJ5YaYX!egpcK5ndw}D>+IclYIi}GFRgBcs zuXhcGy5QO`n^OZL#;))11zB{;+)FeKJ?xTcuLshPNPjFfjR2tNJ#`Y@%YL{cA7=kS z?d&%{%tnCELRz@%E>qx|u+R3Wo}~e_F!16!frGriwz+=^aFBhiP-d2c?QM9b>v4>{ zFid7Yx}hHYumO*cW<`L@*Z!(r3p5F#sTtGFH1OZGJJepb-l->8#Pj7u8$JjmWVuFDL2! z)_cdt8&C-}z!(ke3C>XMZ!#VPe3IPEfK(4@<~%PxMi%0jo)=+=E8UvMzk_;TtjfdB~vh&vEQ0)d4ATOz>I?{7+gadmb7f9$na#IA_yE*lWi zJmkzAIdbHiW02B1w;uEpL}-7P|A(_bTY8r1+61vvar9Pq&<-XsRYgZw6Ch?W!;XVd z%rgjZ_}sm8zu*7;<#M&{?q}zg9;HwMN#@G6uHhFG>QIgTeg5k^tnuI9cP;Dpyf`0t zhJJDDFV4O}-q?oE2^r!g>+gB`|ML0ow}1W-%$5ATw}1EAg1Pnyu?&CAn|X-|Mxohr~Th+h#P)+i>Lbg?Lbj}_<|dZ72@kz$N9_oy#Lz4`tb8V_@`a` z*ACX5UpxN}?cm)1X$ODj_Jw!|_D0YygLXfg#DCfy2VA2Y|9c(Wf3Nv}Uc-fg-~8uw z`Fs7Z|Np1`>-ReRr~N;#|7riv>wnt+^ZNMp1J3>T9D^tYoEP->@0vm!1pR>Z`PVKg zf9D0);;$X7VZZZt{@Ts|oIn59F8=5IZ~wHr{B!>QwA+4T$hd#c|2c1naBo0NO=bXz z4&O~6Mgu_d1oO_X{qfH;`uooMegE#02>0pz{C9gWLf?TP_8;x>f3*ip;D5A-dj@$W z`S{PdBmEoO7<}h%9=QDV3F49WZ`_E#SOD5HxBIVc_KPX)f460zEpSo(qwV+Cmw&g- z|N4IUZ=V_D@7RAOwEsKT-(KyCKzmqQ2talH(;ntdXnTiz3>^DE+P+|HzMucu_Sa|o z-)*0N$36a=>-d6k`o5z7ab1dEzaJ3e{au&4_)q(PT@zR`;Mo6eTmJfO|GO>houME9 zZVUGrW}m-(Cp_~4^Q)O^{4Y22TeCdp1zrDHfYpD0#*P+t{pG5TU(N%{>Qw`= z#DA=LB;;ML>3u%_a{eRqTYOJW3{yEAyZ?{(pauM^1O2_1V1@xrr(eSJ1qGWCzQ6ot z^MBHBpQ|EIkcxxxXevNt4+@xL0nW>pP?u1z;nMC4eaIFAoHB{@OxbXSR|X z807}*=SP1e0(Q=;sN8>j?+W|aUk=BMofNb+pl_EE41&-C=K3mzQsUD(ZysoA0CDI4 z&vYIX-=Vnr`}>!NeYwBP=Z1nGQ}HKv40-1HR~*K!BLlh`P!COP6fpq1@b|e)0B#rR zc;fSy(u7Fv_x*qUtDzF~V?X^okbX`7^2Y!19>Ra%H{{j-{T|YT0i;g<{T|=|JxJdC z`#q4md+=ZS_j{nALwWo^KZktq|J&ypRF|MW^Y8aM6r~^3PYzUIufVjRuHx$&feym2 zxSam(7?d`GFO*6k@2diu;`dGceO-QaAGq=`c7JyoHhqF~O} z%OMnn->?2pe-HdI!#DItRDbWcUwarq`|odKEL{U%!CgC;YvK`*HbQ*9Oe6|GD0O@1bPycYN?o6_YDK^~qm<;=j5A z$hBN=KiB7<`}cPr z>%Z&v`R6|VjeD>}gX1p0`}@7`@3_J5_n{xa zGWz{K{6BpE{QZUT1=sDbKQLYX{r&4t`g;%0#a}=F8BcKDfBk`+7}y5<_49x4x&6DZ z|NZa6Qup5-_n-0M{+&Nm!vE%lU;E$l^fwOw#xGnicpd=!@W0zXXbnICy3TO$uKqu&L;kH3u#m#vKej#i-~FaQSWjRaz|{JOFM^>B z?n{4T<3rh*ya6|NuftG>TdUVVR+KL8zt@vFRl z&nNq@`4;BgFFZ$pOZk=fe)BJ^15m}AuOLDL^*~6q{GKD2SK-HD9fErE_t%LUN|Z2; zp)-&+(tpAi2!U)}P0OW}U~DFIai z>|4-R`gQK79RhWI9q@mYSMXc{*>2(s;2;fyI`AV0UqP5ef1Yza)Ujc*cw?Y@^fZ2* z1NFG$3UTmXNfJ(Sd8c^X)_^E{!jtZ~(_eiW^hQ_+um89I?SJ{dR8- z>s4De$A6I{JM;hVYE*Xr_3yvu*qq{FilV{^vAW zqW|*y5dHRfPW*q@IGQv1^1r4S2bx>|g6cd0l>$q}UqSj;%!U}_|5rT~5DOrq1!U+? z3jp2c>)wm+5R^yL>yv;R`6#Ymu-$|o0PbHYf<(fes2}?k?`^~12n7U+!JV5RE&17d z5#cyeXki{Iq6|>9)wg-}dd$F$>^aB7dPLhs+JAF*nlpddnI;tW^D{e{5cT3WPMQMU z{=28dtbo<)MJ}w@_*P<+%B6#Vsg>iUpg$RoAh6cx-#DEIY;UZIL5XE) zVuLpB=#j6TyfyI&Fq$3g!HvNgkK~K2Tj$j{6M}yy#je~yI^|A5{nHEAi=6BF#~0Qv zKR>8b4fwLQU+PP)xWasH1emes289pP<>WyYK>gHCGugGLHx#%Px z>_mT2o7^JM8&0YB(rcONU#s0GUr}XA4GxvcrXvgTmSJ4w5v(^z;xY*_`;Js_NwK-v zNns2+3ta;4;eB72ms2Su=m_06z&`SUyFW`ic*%b|O?mCF9%T0YEfjGb5y;^rR#@uZA@Fp& z+t#swQUTMs_VNWdxR{&Y7tzve+(F%rxV;#%udo+8aR#=f8ujctXB7M0*F8DO`7xzd z(7o1iTKQTxmb(BSr3wA~`!coe`L=(&aw=jI-@SvB0nl{8Rn#^QZ}L>O-tF$L^JIVN z+h_@LE}Duz_MGGWoTH756bWudQ*8dE%%CRQ#Pba0mgZfJ`zo(sashwR+pf)6 zM#wSZ#?E;WN8v5eRMTAR{>8cg`sSwN*b)U(fSF;6C)op$-9FQoboGGm6q}Y z;;qhHBjT~cl}>Pzm$QFdPB;s#s=0|0RqJkV&3VTXA%EqDt}v;dyFGX~p(~6a&pAfX zxgx=?uje4q&M9P+y(3J_(d%W1*K-b-SbNnP7M`3;?ev|g%ch!-+n4YuAyglCsPdS1 zc^97!iTSrdREX}d)rs_Y1#GF_lMsGS*OztBIg3VIJ;p(zUY~#P(^67%xj(ID{bW_A z0AM@xMTjIG8wQFJ;%I8?V}*>njoqBYTfS8%0#-48!sDWh=1#CmrqWI#79H@BsMiY^ zs*P~3>*8KF4g9s?uMdB}Am+J?5T!N@2JBYt?q}|nvHe}SbPrWj!Y&Kz1(`XvJJZTq zmM(AE~VX# zArgpk(O(?T%J%R+9;f+UEQ^pt!lb610V&!X#uTwA(mI89;&fgT zVizLIX>ii%G6(V?efZTjnBOIkD)XGa(LS5iC!@-%b;7{UIy5rMqpog|8 zGBJ~=&!{6b@1dByFwE2)oD+gw<~C2_N2qwYJiFXzz1Rw-8ISfR$V3H%!I$lb`$&NP zMt`k+=56$?(?t$o13(A@k++K;O!!^V!l(Tq6PkZ0;ShLsqBk0fbfYYph*gPQ3#&Ny zeklOHamj=oVeZ)b9guT-)NsrR?c_JM87gD|d7>sCsNb zGrsX{@SWt?=33*$$e`3KpOfHd_bYOjH9o)CbtpmQ=1$qx+B-=tKD*@Q*>4#q`87lV z(j9-@9SiD~8m!q~bqT!Ft|oC*WGQ8wOw=C{emSa1KYIG<-d>Xb!uJ|SFzj&{JeUjRU+Bc1RXHPjJ7NvNLtk9d)jBaBKCxat^Dux-CBpN}j9M<_Ly>&Z3T zO9wqT*)72Ph+Ei+$lI5{wd%& zY2g;mf=RCJuU)q9S+>2(h4w!k|CWEZYG;s}*+@puy;rb!^Z7|KCN2s4wufa)I%Uo0 zAa2EU5i42(q(WcAGZ-Z4u;Q%rhEr5o9y{Zmn(RdZxm_9AmWv&BQDol8=$yv3*B%!V z=e?204p(j1xXHpGV7*c9o5BQZ{orU$(<;k;b@3Vu?L9XYZC6{mr&EvC#&3UZU=%>N zo8(6O$>sM)QBIb8lo?+4eHoC|c0X@3Q){xG9?co>Q97n;Dx%!FH7nAc~y2eYg{ z?2wIszO8M+o8n(%6Vlznuta|a$5@C5bDf+!7=t4aY)SePZ40T-joy9a*839gyZw$o zHPeFOcN5e;>jwR)P{QMFwqXC|SVf<$&n+kU0TixLoxY?`gi#z^8b>S6?|p!%iW&Oa zt8ap}aMx%Z)DC}LdAFeuIMo%n=l|y>kM%;zq_H-Tl@0!8tY$b(v7f(D@4lEdhu(Z0qs#?-2 z?kCd4xzs$O_R19t*iDx9e6hl}Yu|2O)$%#;R#oW<3W`a3Q%GKuLF;cSk7k!)y`W`K zVK+;`5KBmd_)FDY$Anl}+`|y+e6WMS;-giIhs^DE zkF4;NtXsiL0w_jDBjQ^TFFp_d*##4@F8z6CCU&e1@%RK64lsYV2Y(&u3u-h^12bfL z?*YYq-s^9w*XuE9&pttmkvmchC^R_M;Ej5Vt9rZZwKJp>sUVK@re_*#PfaTe`*Fue z`gqouSUe_BUz5#<7B8(PE@~{iq&PDwnm`DiuG04XStzK2BEr%6T_iS$9=OKjN4@d5 z7U;4zq*o$5FfxBiX|5fNIP;n?U~r4pBgMQ|$*?jAWjHHrD?Uf9r6g{c+e418M+8=g zwwBd5+<}BEgn&7ecL2|3R@TRAF%#sj=?Vvdu;cm0pRKjP_GoB!>So*TGmdQ7c`~S> zKbPV;=k*l8C8WygiA&hA9xMKOY-8Q@$b+lERukk-Z1;cr96T`lWiT$4)#<^FLCXYV zEfEfft;blKq#2mI<@MiqD*O@eLdZr6S)1PkGXVMLa3wRT2M1m4ZaEj~RV@@ni8=&` zw%`fWe0*~XU%xvMJljhMW4E2T>Ljv2uIrB{RUlVN^X9jhTZ9<7IUGf-H8l@A(_61M zKrH*Rctd|d9cuwmzw|DR;13CMcw?!luMBW&N|@3zZeCKY z;}>gR%TxDH=d14YdRwL(qzZQ-*kLDg?*wuS#se`6PW=O_th%Q0WDNgnx}fuLo6 zg#lqM2s>~u=aDMc$6eRrx8G=5KN#P)=|*6A^_+i^3kO;&K-fg<=QB&A=uYxSki({B z4uvoAoB-kItT)rB&XUp%fGS9kM0g9-6cR?U(i3XNJX?>bpi&-R)^Co{QS#LrbPsSq zcKEiXj@4~Zu8l0N;Lr=zfd>S#kWUKg^)_)gL^7UGcJfN?(e&|Pb0YtJNAiY>AhR%j zYf^t!acAiOJ=F-hb@%Vih&{yDY!N7bp-q##=HbAS?=b{u$+b?hfl@Jm$gYe>nbd&| z#$pPrC+0xW1oIHE3LZB5AWuI&ypR3d8WczAPz*NEgi{{I-rH<=CAm!c%6K;eW8>z3 z9)zF2ocK{z4)b*&xD7>iL&q!S4%%7U9bKK_$)%39MH~Z;ZSJCgOU76RNTUQC`0?~it z3E@P-Qza&9Zw7x3yKf{M-u1OmhzhP{m@*((E#Dp{T$ z>~BaQ1di2B^#&n;9Ey5g$!KquE^$G!l2rF@LcC7QD>8*W?F50mQo79n%`RmczB{wQ1;;# z^Ld`?i}a9QtCNx|7KoE=zb?;5!W49R8JkwUpKj#zJt5MTE7B0p&Kn)UiBMCcj#fF1 zgYAbp&PSk^gn%_XZW1&|rY71sgHV_oqOaEh8W(PastMIYCliSamab3I$JT!)%wrt# z&i5kP#Epotz`_Xh4bx~W^l}H6U~>w0tV!|oYuTd%v!?Z4^>g_mt^#UABLU%_)$aHQ4E}k#}cr7P&k8m$et<1y&>2_l>D45~Mb{ zV>DmAWPH z2I1#3DW|nrptnH3MVY7gGc3k%xQKa_vZFZ)@fdsJgBFoLqw2GjU{a*nzxxKg zJa|5WP5B<)354I*l3)W1eE#`q+Nsk#Yf^>H zW;C*i+t~xsf_}#&hR6uI43%lq_-n%hOu)_o2`;@tT|kY$G6sJEo?Ih66CZ_|C z4kh+USM>*EfY!Hgf=~xZRH~3*4?UZ@%VFsKghj6ri>ZmdO{vvpwKDP#B~aMOb)C1r zgP3AG`M9g$O=iETK4jV$%vlDL*GKr}re%4VZ`}0Ez4vKnd^GT_#OfQf`O74V-96LzHD7Ijvd+lGiS&?~! z$0Tz_NmNP};niz`RP3Ldl_kjfxvy9%M0)_cg;wcdu6-La2nlJ0(5!3V=XP{*bK-Xz zLJ98m>6iH&%onVF=Y5N*$cdIw0atq-0>3k??ahD1{eoNh>;~WFHleyv^*43n9U07+ zfkWUKUk@xK;+Hd0c`m zRoFuP2aKD`a9Ka>OC*FuZkkl=lJVyCCg^{LTs+}E-c{+n*-AcOdNxFZ^CfmPWA-PZ zyqCb=Wyj{ULkd2Sxt-64Kg2M1&7{nfI!NZhGg+kC4)gSy9=8JMv-)g`IVr`JW>V`2 zGe{|8olf5XI2q1LagY!ebixsQe%el3@+q?rDi%vjoL5E?$M*bU)_Ym`Ih1`E2 zsv$iCgdJ9>UOzo9-nZJDuq%%2xc!W4en%Ic0M*Or?JCaGqHOXA~O_1 zZzz!FL0jEdZk;=AHXY!%l<0)HxOM`UO3!-yCbMdwE0 zRC5#PcYl$PXSYyU3-yetUbSSu2lVwE_!q601z&8NAF!)=ogT1GUB3A2yl)2e;77tj zNHBkF=1JMVsl}<0Dfl7p_wKhWdANxnFhOh(C3X~1Y#6zgP-W1Rxnq3ZF^qrK7rKDP zgzEAFnRVHbNkELdu;mxVX%x^b8*WH!j2G{cL9VbAu1J+p1S>?|2ko_IH{S2a)WQ9l zfbsNBVf8N{z}u0&7#GjH=iD$^39H7Cn_wo~m#9QZ;b{Uxs;wpCK5ZH2*n3FK1ka&x z&2ZaDSTxl#+*?bADe~RYF)e>@s=0;T0K27jS!&YpM!~U^_?36dT%v$VuuN1!=xO6~ zvW8rUkg_w%BnBRr66C2DCeF|P~VyxQUcs!A}u$&r6&c-#4ro}?OW zm$ph#^K*t8|8%->o|h0wo)I}n{XxA;_91>;%!0xRLE4dt$wB~YInNMw%Ze0D-{#7loZoWB|+do;MW-QBv* z?Oism1S^vU!%fesQ^yNyuD7pCavY7cP%vAyR#5NMknry#yOSmF9yk4h>k(%Xb?b*O zeI`u=o7|hgbI%NO2Nhwx=KtwCH=P7>iIBHxRRDPuKq}+1GiKiR- zN(tonL`$eVUYvh4?yZ@)+odlw*??HhLN;y6;~dSdh3hX0PDJeOVog$>rkII66X0IgXNSLEq&rs+*5X}XaQx_>G|5c^h8y2*S6a3 zE)x>>#I{pIzKb`yv5}FMpF>z9LXMYu^Zhd|>1t*uFdCyQJ? zWAJ)B8Ia6|d!lqW5<()oVPE8WcB7=Za6cz!tmihWo&!OHs`i7gt#2nfAe~vfEieuR zSkw7&2ikw)K-M`Aiw_99d}-o+0Pjrj2Io29mX$b;xuR-ML z9U-3by#{M24lJC~`<9Q%VU$7cwwF^Ol|Wf(IQBu^!2#PPxO`SSl6SVAm>dt0Rd&<;4?RbP5|-beB5F30}G&L<}X7< z8K9a^H?VooBMUszlK16BVC!9z0GXh>{S|*_vq1Oi4v++D`)&4fs8IZbSh00(?1{LD zRa>BgL_MVfqncy-yp6@pxh_Kx-*&oW2AXC;o{G95-gPU#M!LO1uSFmo_#K(Kbg9n? zBERjyWju;szz45hgXA%p%Ih-n?o$I%v|NxQaf6yWHnIDAr9p(p?!Kr8!AjMeDP(^j z5Of-<#nXov@0eE=8$jrT+-|0k>TKc(8>AZvZp@MqpBK_62buI_m#lE}oL1xh?A_w5 zI5SbpHh10)1EqpWUJiuj0s-*UGgZskk`+7Is&tY;iS<&&n+i8@2r}*14AV{OXWQ zHSawY;3HJrqU7A9gTUwacEPyaMMLP zu256m?nJi=goz*qbKZ{XVtY~CvNyuq{gAK-4Ci!Dtad`!Q1;Qa6&3RL#y7YLFeu(U zE+P#hf^?r?zkM-Z{e2=`3vV{1+I97L@}2uiE(%w_bdJPx%=aZNY|m?Rkez?L`%=E1 z*8AATg@DS4_(ev(^BW2*<*fcA(cc_Av7~$`_pl@zeyD0)9^n2Y^2`!w&Dp&bVP|IU zkkSeSE+IfgT7Zh}AaO{ldLGf`&C<+X?%%bvlS3@3@91C`U2K3AlW>{96*d6hvQfE* z`#43Nv_3#H*ui-vUr4YF8bN<))$WjBB7r49ks&^6?{W0BJDURnq%Bkqt>zgD3lCb( z!~0?qz$Y06Ak^>jUzttSsm7IGsREEig9xY z5<#<>nPTpajeBcL5$m#`49jr!8*=YA7GOMLTmV`2lEE&2A*w@ zTLoH@a|R1F*6M~QLL`6a$K4e{ya0>)$mAu;Ji%o-P&BN?+)(y6J_qmdoWev#Mipv(beEDMz%HfhYVIJ<%#f2nr!&Jq%B@ub7(S zbMwH>f|hlJ%_@shBe#h&-j5N;1bM_ecyro@j7U%+PTWC}9#*K;=O82OML~)k0xF6N zy$1_i;_nKT7>9r1sI*U&{cKh8tZ zn-p+Gh1df*hNoAw{F|%*xB$YZqCljs?5+ldF@}8N9y@3xpWL+A5{6GMB%8eh1rl(@ z-JO07A%2csZ{E;ZKRoBr5$N`l7Db zILfVB(IKBCtQ_FHlauS2408|SQr4no(=7%WhUszZNYJNqMsBoKe9%e67ukNTxymxM4MLr#(9nrVJslftS|8>{qzz~m z`a+#>+SK3_xWEWc)XCMLJ~k7_roIJ(px052H&SF6lYngDYjoAHHZz=EME2RS}aB zL<19NP2%8GzqkDpDk6$67@S_xiL>;f5kzL`SQ|tj3GHN!;|7FMwM26*E#^tOrN6^& zJ8GlHNzgYblD`HsTb)CO*7y#eUP^3Rh!~jLgU`OS@o+N*0H-kYCv&(Skb=pQi73L}}sWhW%=y4=Yu7EuqM(>v8m`XBXejLEW~d-G(_UsHo^H+EH9s zi8-vD3Os%0IpdRC8ZU^=_IWoWAztY_Obj1T-wL|OTNs<4w>IK9QodA2ZLEOReifTY zE3~E9GeBw+35D8FhwAdXt7U4#lncWfAzNpEGJZej3Uez0cB@c$TmUfvYtUFQ z+e+I!nIx748s6U#^HO|+DY;+{D0K{JWd|Y>5fuv29`f>Q)3u#Zz+M+@r((xqkivfv z+!&|a%D-~&QS@O3J6HrSxnaq0FwJ^OGdENCBWb#jj8?DghFAY>U$J*aB-D06pWFR^ z?)evwFTv6lcHwvPmt!1~{e+pFa>}8yqBt57>J;71nzz(|_kL%VK zYD6kb7|QfEy6`E!gJ?7tiBg)y#BF*>eGN*3b0HIdx-uWtS2N6V%28aEsX`n8m}hnX?m|kC_7~^p z`gz&}6cBHsNdlxIard3&t`s>BRD1a>F5$21oaNOO-S$uRzW5kw8|>V4%GqBfyWeHe zYIh{Pg>RURAz5r(F@R+<9cGINv(V%1kX(>AzDb)7=>~RKA@SM{;GCxK=G2jYGM4tw zZx*B!=<$I)I8rklEw_i43CxErY?yZz&G0NM$(^q9!1%~`iS4pUUq2!Ujd3{51 z!4Zp_j6vlFY1(wvA*bf+`C+xTzN$S`skCvyTJgm_?E6xyb?{k(9n_!|86K=imqxdl zs+|nG7_#r?%Z=9a(OFlWYpE!BwTcuz%vlw7g2}NB%vL~BscBe1C}qVVG%9<(hwn#< zHMwqcJlb=?iYm!i~PA^fk>wmM^hj zUFdWd1y~{1PY7_ft*Aq>oUqTwohp=*k|%7$5kp@tLLUYjWQpE?eGux8Nuf_L{j!U9 z`aTrA{i;A9Qq4M2J?x0@d2z`bqO9B(*-hcA9*6g?tYJyPwh?!e_O#tQFcd6X*^GwF z?HAJZfx?}%{aqcX1Q6nr5Tg%MeTN0+5I>$?v`nu+IOhRL*6WwkYC#j;vdMG=iBYxV zDS1Ajj)4?_lWwMed`h~-bU6(3y}g3UGt`{)?=q_qvI}}vrdv&R1{HH%M3ylE4!FSL ze>-={f8efu@$^!}C;qTFAl$Zlis48+e(%R^kT+i<=AT2Otetv<%}-xcErD!1DZ4y7 zkg81m@N|JEklM{9A6{Vb9G9zZ5BI89Rae&S67Ct4+r2A)B8kc&e+)oVy{e6R20F%u zjlquTj%02d4^C{}#CujvGU(E>+r!F5$OmEp_W`Yb#$$;^vayyLia~F3;CdTfl5f|` zSFG!mKR&jCUX?B$<*yKMf=dAcg5SOH!KKE|z>W!1yO6^o%@x+WABh-%S!3xXTx2bJ zx8WGJThac1?4=7aI`_sdbH{pUNnD&zY5KeZFk&=14X=4d6yjGhYi@Rdz2fjhkvnye zrkAmC-&gx8T@uWD4i2TiFV{ivOPPZ_EhGtu41}MXCP?@TT9?J~g#10;8V~9`P@Bk!eU}6FPdN6QI|@igpu>;1DH@2RhMzO`5G+M9~&~hb=-rk{q7(@(kGD z3x^9Nqte3%3PgY&-*iz-Je(QX4R?JEQM+KBs1V#Cyb44(hwtpeOf5tr1NA_?a6o}Q zkJpb0)eHoF^U5fEL*pzq(0FASUXpc zH^O6g>9C=**A&Zr%((e);OIPgx3{Cvw(F9AXF!(W#N^!Rbrf`L=u^>L#o_I#EiDp+ z1ry?nw1l3ch7fihU8)6oBgyP_)Cz?}JBebO0tJGJav~F;S0B)Lut)X*JN(<+;`h$u zQw_{GC8@2MEV$(R#U5Iy>_RnCQfSag3ctRZKu4PIuKWKn`NFMy1R{*+FK6Dg$V z8Pdvv33OM{)i)TJSh(P)dpk-J%TQm_XhzubsK8%9&O5HrkUXGti$BiA{$<8T z&re%}Y$JGLL6Ab0A=w4W7bIeTW5-8$L@Iu)b~h>)TFKCt|5fFEg^lk$W?g~@wU3As zr^BQ{kSrc^#x@YrvK<5tbunjIgVqwKNtc*5RZ2($x3bW8ZM5F%gJ?z+8w8Sxkntey7xGF5CIA% zGxVSUPBEZnBJ|lkzW0}b^1@buR0|_=`AtCy>w((E)V%tq3{^_)A}Ki{ocw0UX^4(3 z-Y_^j#BR_b)Hk~&1_+LS)Ww%@I;bhI{`+n{*c&qBG?*{n>6=Sah{qwJe9IxQ*^0gp zlgJ;RuZ~CV;x3$o`MB)9>+l(s@NVLy8l-%C=k>9iP1gh`8#MIQDT7qf4yfS&(!py! zYa7#nUW{+ZuStCJ0R8>?wytVfMxQ>H?6Z2PpUtNCfd$2aOzaVrB;Sa8OE}(PzzK4p{u8?_8YF6 z9me$F1-C(5jG3EXW6M_$apH!47z=0apD?J2C>J~*$Rb95Gv*Z#VO*#nYfV2w(anRs zEJyGVTF8fHEpXF+o1G)EPjQ70+#3khK*~0ta?6_KWgDt$8v#9{s2~A3_m4m|?uR4} zXL;!>b2ysekWzv<&*9RnL)lu{c5<*kZC4PuvIfAQlCV%6Z>T9=U@ILRFk;HUYE?4M z2_!}Iv;fXRUL8}JQnPcVH*PjLh(1C$0aOewcf8`7ViT=@Uk*&qD7XIJjV=Y*$hD-( zk)%QE^A1WdS80tCJ@}r+O@AyZ>L@r`*5yURS0GYAXEq_S4GQf@Z@{*|Rm#hpoKywK zcorIA>$yP|&5!Gg#;=6qdQmTpme!q?x@|^&Ade@g@4%XEOO@w@AKxI|>EZ_Qaoc?j z@r(3?y{Dsp_`rkP^*mCw7mmIfa&j)mBNj6tz@mpK8s3G9dBA_et|~##dWgtMn(~FW zRPAFH{Gf4be)B+Rti_H2(x3Y~?iCv&G&XYk5^PF@)%-Ra5$iLmwl^uxtHIC5`35Q3bORl!^awB=nQtTme(EBB%v*YHpn!vsRz~>19}29)Ta|0? z%(q8A&X#Xr$?d_u)j^xqzzO@cccD0a;!i;uuUSTX?gbX9i-C@GW_`UElaW+9E7fa1 z_79qkHx3*cY35Q&&q(r@GI`%$s0fm%l>JSHXNP>fj_X@=F`vWe@IyJ%bPXgBDDz%S z$Ki&58PZ6zS2KaEA;-dzKx9B`tO{`u{5o2`q4Tm{lLqww=R|=YRtwY!SWP;MO^KD!k+sr6b=z3f-k~)#NTa%L#Uc$ z$rgA;QL5w`B){(ggy$w2y~{p>3RmQ5nAyRGGt!q#$?mG+0>>Hq*@w@Y{DM-z2NE!V z#`-GiodtRewMY!_Q`h0}x_e$Kr=20ESiJiTl83Nat`N1q(zgt@`{PQ=PsgGAghdvA zkKgcF6q_(V25aDg5W2EOBUR+N!t5}nMpbdT$HYn_Jhy=QO%RB!a}?WMBv!b)WCQN= z#`XkQo5@aC$^kuyi$(I{Q2S`?f6!4(C0f|o-8(wWJxh;D5Y|H1s;909$T#>p_95!q z0Z9jD_i+Loo3J_oI5Bu`VQ0QRVH){=PCHS&us5NUW7bvx$-cbbwTXV`4%uADKpkMC z1khT^XeW^ZKv;v=(Y@HBfkVWPm|WDYVgLi40NXe3jKLUZWJdgPm~kdz)K{DUZIq13 z8mS<$T|yd(`C`+_MIF+oRDwPxib!y+81Aya?b)4Gi5cS4*VXHRu3rx}lutx|3DiFJ zIk@5G4e$6#{P^b%uww&QEAf1e7vu$%)#$c?{5Bg=0B4*DnEa)(e=S?%5TPOu8+eUq zVtaz%d-N=xra+xQLgGFpR6ITa=jQKIF5R3^_Rgb~9DvmIJduyfum|g4dF4~j^{{Fy zC87ur`37{{1tfDPXQdQuyNz>ypJ6DMJnVgaRu#IW84$bZC}vyZT^&Aq2Y%g!bKL3M zM|N5%C^EoEPI|Ogi>*5*06cu9(AgrI1VfX1`rYnLvyQ-fQ!E+`P6=W3ql92Z$eZne zzLu*v2729q@BLxar|I)zpBQc_TXjo6ZuXshhQQ0BQ|h)J2y^)YU&8=@4WsIbf7v3}A zlM&;=uC!v6DgQ`a)?L8j++0meYuxml7I$*_g4kYSCF6r2@>6JCC`?F=ES23cPFjnK zI=MTym+;V4aLJOCHv^9*0%=V;^AbI6q%kJy@8|n{lM7@^4_4Xn8$*;%kRoQ_a3Kbj zs4ei8%S))}VZ_US;KuEi=6=h;wK$;tKT2K;bg zQ0=AKGlosqi9d1JoZlGW(_;;k3B2f1tH!MP#z)XgeB>m5KKJAH{*bW`pLL{ouKEiV zLb`Tl8B(&(PFlbU66)^gv-V+lMB+rS7>)pG|1Dlu>vV5CIq z&}$X^fxuWS>zeA6gow)u0KN^H%0TRY-3&e&H8rMZN`0yaQc{qf)q}&jMiCITszzJc zBvWCv?=ky-tDc{7i!OZ|$n@l2=}L}i2$Y(&B~ZxwSe;pp$r-cp%-qrp0a^hdxs$g! z%vg=8=r@gfm!t3Fr?l-+sOQnLbG|}ax5I!u2KgX3LCT)9mJpV_Ak-EkDT4*;D<^|M zF=xV6c|5RY1TEa7j!6Lo3dp2g%$x(ms3U#b7Z4zSg!I8y0#qkJ7$X7w=)0YofnYu9 z0J7d*a_OgA8mW0YSWbvX3P_lY9u^cS-Yk&L=>6SwK~Jmp80o>DMY0-QPhGSypqNOf z4W0?gj|IAX$?-tOZBYS(c&`euzr1Ij4m_v?P!z35;82eM?G{Y4%_h&W6o6iLu{ltK zhEnQ(B^k2jynK8X@}ni_1l5+AWl`(|?1>t;n68N|`dIaVYeL)x0wi+nGw@LwlEI+B z$bjtaH;OEB%wglOU}=(Hw+4u&tIjbQ**Iio%T4{$;*g^1*YxssM}G z&quu|Yr8S^dIJ7L*j25yljj2^9oUIywwXeIEd_O^Jh?oSKIc+P%>l!5p#6Pkdfb)G zUOfj9+yTML`JG{2)`<_eyqm;$(O|z`myfkvL(vZ3{k*Tf0@f&wLSE?iH&(~ddEQ}! zT1vuPUY7T$C*N}N3S?w_h>zvv^29WXe11U1fqFPw@d(<=RU*-rJ6VlRQ~9`GH?a_Z zknwc!!;VIddTZWhexgG9g?C|OB4GZL>Xrc-Z;Kg4;m@Lnzx0sPDoC>vPiDZ@B6$=E z9}^%j0rSqJ!CrBkC%_xTmcwbSqisf|&vp+x#=g=+S7By5<6&>pNcVB*bl{oW#cgRY zV}0@n$*Q1$W`M3F(ajW8j0Z#_#&m#xAA2nC$6`$zk`JIIqT9gZAk&imbV6dh0a3Sq zk0|urn&YBu2_Z>+lvA<{sfNz?^83CSqn80X=p$lX=G~*J^y({X~ zy??r7cXRgw@g<{KJ!cgPenSF(z>2+s=mEA%=))F*yVFzRiQZp;Oi?QX-x!Vvs|NO^ zV@Edu`?ydCa9OZBP{2xs05wg2raP+&Rh_)f*7Hup90M9zA;6=9?Vfs{QvhPI%2(+n zyjR>=TccXokjp_*Cg2A0|m?qy;E<{X`W!TUY|Q@PCo z#38NgUO@qSOMN`pSKo2JkQ{q9&k>Gv{pfy5$g+?hiVA z85;?;^lsn!_(u0rhZGr-P{3Sa{E&HSf&GX*QW{PsSgxrzCO`Y&Q1a!=`&CJsSADt0 zajsz)E0FZbec}N62HiC^^P&2Oasenk&F#_rKsmCpUMwW}^<}$%KdiDTY1r&oAo2Yk z!Djz>U8$$=0qv&Lau0QRrHR*Nz=4cWeg;E=83iueMZfx$>VL>e1SnEDvCX*^yA<*= zIFU!s05BT^`2@UlyUf)yx}zHmDICn}iYYZeTX0Sjb2X;;A{5p$7SsKqZgzR_7xpN@ zZW$~Mz)vjV4b9bm0KOSQ)`GWg*)t*EK9yfq7%rx5lHcLc-Y^SW-dm48G(mVN`h_Xmsh<;Dx7Q92(E(TOtseW@n6Ch2ZT z+mHK#&-&+oKC&6qKt^G~*&8zT8e{ zdxE#neDas?I~!wv;h+9R$5iYjb&f$lX+GNK3A9G>cU_wiTvmbqX+4~N+7K%#42%>>WAiGLd(Be(U3R}7Lc4H{TSQGjVnGbyIEg$oCRJSj$ z%NBKuDw*{eJ{OM0SKZ(~b&Wit7lqz3JIp})_6kDVmQHE6vJ2qg9q<4FNFDeS;pKBj zotz&6(y=kI!6(je18&J>7%8V#ZxmD_FU0|Wxt9tR$B#~sMppBe4grr#ZpS>tDP!_t zZcDs6OcAs+WxVuL^R4X&%ktH2>8DFm-_Qtbf4tIMbv~ZenhLOqKkmWvHDFwGooI{0 zBAxc-06osYIU;?PAVUtgSg1V!28siP$mDfcqMfX>HFJ9mjLG-6?;e!jWgXb-P<17L z=)g0a3Tcwu)ar4`YdSQ0<`e9@x;7-iFMg5Tw2T!9^n~{Az{a|1GN5W?!#2s-;DG02 ztTNvfr(THdJwJy70&Hx_FMuQl+`@79_rUp_R~{3l_otG)u}!2fR4~MOLox3gfp}5K zWf;U7P+f?;Rnmt0>$yXEMkHREafv<19l$6D_fkR@uHx&RBu1LP-Sa5yZB;Z=Z;Lr=$ecu?Q%a)jrrDKSXBYn1L1 z+H$BKeSSlkWsDhXBXf^GJbrwC=VK`{lRho^5|7#I)lF$p1J2kg1y*QZI1W~nTKS!_Zn+ISZ^m1}@i!YT;6U4dzo3$f@4!fo zqLoCS@jJkt1C|yVdO=38mEe!&gWWV#fA*$^&$g}DkLpp>Q#+-n9?!+Q&3KTNT2pY8 zes;Y5#ki**i&vMK6~=&LUWwjy6sO1~kGeu$2!Cmuw8 zYTOL7**(XC_`G@`Qr78zP?o^xLO$B%_yWoWvIIzZ^jp#)S4f6YKE0v+Qtsb9OKcxX zIG}fs!;TYsgppVQK9y>>lMx{l7U>f}OhCMS=+9%TvzPiA?~u$dV%R` zQWNNm2f4nYrC z6MOel<|vIrTK5)z2mZ020ZYcEyX4UW-PNJyE|6SM#;XOJ6~ewVew!J?YpW~(maV+d z@n+Af_pdmXqdfMX&{};ATQe8W1V#dNiIrE0bIgm^J^RjfD)o1wx@+&$fC~>$Kl?sd z^E~)+D+^Bapvfvaq%ZaO+7j%e!>c)D@u*WS07WNb(#-T5O8Wv`6%^V*rc3nWDnU?2(!AL;;zbGLN4deylJ@j6?d=Yu-?% zcv~Z61^TA(qq3280QtN(D3W7zNXKA8OuDkrGXlS1Wq3jrryydqE__7cpw0uq)YE=S zvRPBlj|0q0{$kbr7!^aLcfAmTKfv7s4V4J030{#7}~Ixp+1hXKhRFCPX_EP_oC?<`^fL`E$q z9AR^R%xEu2z}*YVCE?K{2#k5s7jBDzPZJ*Kw2v$E1IeyX>$rl5&FATe)bz%yl8f}} z21)c6R85epDMp}-iSD>ye6p=Aea=K_*;l%+9gc4cH5n#M_Lp^P8gU1J0aqavC}aSq zu4iALt_z;?Mfq0QUYf<_1A2O29=IDlQWW5SR5YR=xn~oyEgO;I=v@qepRZ2nuP`20_uVz93HANu+Jr$Vve9PH1315DBiyD*+ zmkVi;kbnLAnM3?b^!xAo?~vH3@bCCP*9Hu%g#jcj6s)|3eF#3GKWXsp&mkiC>q8R0BFp#QO&+yAYiREL8YL7Y@Cq~ z%H1OdVxcXsT{iW6Tr)shJ!aS!&+Mt5M#)h&-iB)!=Q?og&C;x8Hbsj@6t+Pe4-;ta=}kx%r^<$8=?iO4Mb>QR2BHyK(^8 zOrHqCe|j9=fFd3T{f8~*G~M2%@!3|d0My%KS?Slh-m?uBeE(b*90*WmU+f4F@c_8( zt}7(nG}u{TK}rLV4U{E+dyJWAQMiU6x1#DIqK{z2NvwNxMSvX98~~?L9w;k3C!urZ z<1D(u*9{|j(MM#h&+Av*A-vJV?+$6SLK4s6eIW!CRDIhn?!ja11QWsyq@9g%wfH8* zPFk(D#NY)ZGoZJ4Up8lKY(r<^8~MWNuz||L2{e6Er7uk zjH$~W_GFmSB?W;6pkXXz|0o{W3*ahNYS|a0$G;K4km3Rm4E3jFyMRpR^D+-aO}-r9 zk_q7%A)nWGx)sWQ>jS#vpfl#zgchfLTc6%iyvaOT;K7@4VSAc+3y6*y1|aLo%;#0 z&5wrMpD_?qsZW}v0ti7D!s6L2G18Xm-3O>DUeJSsI$y|ttZ0)EGdat6gd6b`<+%yg z!-~hQibI315NJy`!13}ZoT0R+lGl!MFNFt0779(LAT^~?^ZUXtu$9F1&AEUk_uDE! zRUvsLH$2&piU)TEkR7jT1Y}^G*LDD~QjAp~%RDtB$Z`OpUa)smwCE+Q%x2m_M>V}{ zD@q|R%ghyjS27t{aZUn+rG)VY^4Mjc5Bz4LR7o--&aPfgJ2L2wv5=cZ0_Ye;)6aXA5SrTpz_8~*{ z!bO=gG)C5(5g9%N%j4k)RIP^R)>GDLlnn$_;hxfe5Y)LpUXu__P9+6uY&7q*90L_o zwd%m~f(meHI>YddWK*sEZq;6_hE`r^K1tc@ zZ>+_CmTt$cdib8%Ckv9^IeTDT8Yvveq4H0Wl@OM?wD;n@VEH+n3#tns6X@;g33evF zlOuX%pxD@^TLr?cnyf$bQRox@I6FD%DNK7NOIHG*FEYYLP~2!r$bsH~9C^X`lulKc z_akjg5fX&V3}g#Opyhn}?j?ib3S3NsltyKLI0u}#PR!t(Vh9)0ZjPaQKRs*{?ApV^MXu9>%nsv<@1biFFP?r)!RTsMrekVgkk)*cm} zk6NY5w=9rqaG}3^Az^vLar4;$hX$&Dk<pF1lQmTAJyd$~a9m5uEYx{gK!qqj}f7Rc0bXdx*1W(7per_OxR zm*4|*Wu6T#g`y|6_|5FnMH=PND0evt<2Mwt_m6vJ!Ae%$a{w4MB9O$&_p7si9H1~d zbNwaFx^b0Du^a##XRqH4gxB$VV${r*NDiRUbn0T1rgqsNAF@~d@kRh(^DP>@)UIiG z3ko~b!keF(zWcelD3T;|`UEaJYaYJrS7^2m9#F|Yn`A%s7MRV&9cNqm@P^SFtB*Ol zpg<^qot+>X&Wm}f`|>P`@Imf>?42R;Pe5{A*sv|q+&XFwTK?RI9L%^ET(BtOR&QZg z#Ee!pY3&q<5*o2jIya14EL1hPFwAouc&V~^aDFO;WN{qa55-xI#-0C2w+j6txq9Pd zWXFmC-Os!mN}DTuRTaYV88D^{F#^wEUws+PGY9gAv`2#;o38~3UNO3V$-{jOyA;S) z4=3x5H-%Voo6*SxTBeve!=_!Qch~Lw`*cn-|%`NdlUfU zx+H*-Vyf#3aj`zch63?_wO>IJ9&v>P1bbnvB@AJLS~%`zu(KPey)Dc(RV)1SekjWr zw@|{s)Q9Febv5uO9$iwfp?EU7eJfzk1xfmzxh$M;0qBMHZ3XI+v>;ux07SmIakmYKVI-`$$&i2_c) zeXnC^FrzeLpmJzEtPJ+lAlZ@*P3aCw--TvB1q2_EZ z<2RZ>Fn=b^r$2cb9ZVC*eAu3t-5Y*RrIy%0aTg#YLt7Ev9dA4uVs;(!xL_Xo2w%iIWc#$A zxW{830q}7DurYY=&=>rk5}^Qrw6{N2|EfE8_o-6F$4V=(;cUG{?{9R9Wm~y`3So~Q z06(qe;9@|3lshH3yQA*7@Lzb$Kakg>eP9RpTUxqrmVcw9yWhtu%`)Em0JN%Kf$OLkT-Hkp2(Ik;=(%GgC~s~^cB?r*d8)18so!T?f~R6~p(?Me z)Vp9#bOS8!{F(d50?6dTUVUI?4S?3_E1E|d`$T1b+GLjx=%DIB24gUGnK3xB-AASh zDjb1W8wA&k8n`heknm7FrX5^e#6J+tp5RD9cFs`P9B7XT!KpI(^g`N3Wg@6hic1(>a|~cZ}9{JYGV49vl^%vit$*>9vpi$-&(5%U2`+$t;N%!bt)ZGJp;b|mql^2}kdSGJj}abCNhIQpi!9Swq*T6BY@x@^ zBWuy{LFhLAjSsPL!a6#HEbT~BOir=XU@YiM+90>X17Nb47hpdkccg8VXaWM_4>@?R zydj4(#2(nDLb_N5APD%AFwRG^s(l7Ng`?QnGpMe52rLoQVzr_h5Sn0GaG zbKo8RUaHl#WV?6q%z`FD8FD8F(EboAow-Oa(1@!O$8YD3A0Sz|H;*#GL~0}nsLccM zb-BZ)sndG(bat0YfDe;2FUhop)eQZ$tYe`QW|xqpg`h$pD^Ow|?!2 z{80Als!@vfd=EmoHyYpO_V&2a1ZW11%?dR)5&%D&c}bALexS`ozB~k1;CpsCf#d6M z_%Jsb07YS&&43azxS=-qIwhNXbg;Ghe^K^c3yP~-(%=Jefdm4?rM&kB#2pfUAde6N zWS)LPWY(^Iy3ha5%n2^3q zUDi8whi30M3CEoxFO|{HQR}uo%;Vrf1RhnOK2f%+PX@|H%GSGOC7#dv@h;cs$9Sjp zj@5%5oJI6tc<Q6IsI~Uuus5bHDFQf!3GK=Y2&@$qlaDz+2n0$zK$4luBBHdnn^yPiilwMM-!|h{3biRP5_19TDyX*H5;G=uZR3Wbi z?>mZbMmx1JiA<=1f@AW3n%%9VMY}q+4REHijJCCQS-@;#>w?VmU{ml%?4#c$r;W7tPEb2myVY{S`_97_UCWQj zOG>S!6v;0h3bC|3z+$_F)6MZEZNw%=58 zk$Q;@3~42N&d17aaSo?m|Djb8_WRw*IO?FVBo91Ko{$dT+pG#XBB(^)SYaa&wGV{| z7}Y(1kwW1wzEXLqtBQABsS%$`+X51*WgV_QnczVSwarEuiV8^7cls9Y8VkdO#0He%*3pae6*qbp1LxkT z6Y;)%G<%tg)OvTk`sz&TUBB)KbY5##GH!1x@f1lig5VP{)cM1ho5?b?UA3$hClD^~ zS4ESd!+`VK2Q!vfvUNie%a>*r(_(|Z6qgGELPj&1f(zlDeAL~6)?yO#r9JVWP#oL*d-W$E3a!R=ayfFyEK zs^rZ@O;@>6x3WS|rf>l>O=WK6J5#m=e-5OjH3l?$JA~WiegSWUK;yxgTr}`;5*|B3 z5d1}d$0}7Zu8)4zi=Q+D6KSLao9*wqM57<^3KtCL{@58$F*fex zYt@|`GfG+u3Ing>=U#7+Ev+cT^JQv2!?Ut~j#f)ZO5K-jf^OW;s%wEeu3J#wF2!;{ zfJrW7@%@~PWw?7#ukG4DO0(tPtJIGmuaELw%NLskRom~w&`Ln~tn>N%l0RWgTcSClH+NovJ^4Ya4J zUmh@?gJFPz=0G_y-D}N!CK0;l&%=QPx|RwzL5j{M6-UP*F>Lovvv3^4xX1Pf^a=YJDyIp=y!1Sm$ z7VY(!X}u#92}yh5E=z^mlPCL__s2CkDK+@Hv4QE#80=h;ILGl(fBIzM(auf~g44T1 zV*vVVAlh&mH~{i1+N|Ac2~(6U#AmIw%pNQ!pifRe9Aj*eV@KfkxFfX1L*-jt<9F4FCdk3Xpr-6EoC8wHfY+j`MOj=}JA?Y%g zx307^x!mJwSlXi}k6XNWg>)AO2zC~5DLrni=aarZp2hY3#8BjaZ3de=nF?-t8WaKL zFRXag^u`hlx{z_Is#e=YWmeTT>eJ@k8p#bx<us5MbvI$(ntZcN3aBF#_K~fq_W-WvcCq}NsB49`O&>B~yR0$$5i-ap zoS)moHyq%T55(@^b(1nT>1*Tlltl$i>z8!1ZLu3X0cJ%d2LUb1g{^F>++l&)L%x9C zL(wpQDB&X06nS-0noJWbf1SS{eX>$s3jl>dzc;QaB$F6Htju{4uAEVjRGK1-h^+^fPd21t$c+H|=h@n{qPN+ui4O zk-_%eLV@b`()R=8tiu6aZF*Q`^+aYK_^iU%4H9Qn=d+Fay+ zB<29+t!nP6!OXSL0()tkmPhBc0RZ}xq7SmAFn$~c>inV)JH1W2S?{Kp>yxjBJ_O8} zzZqS_mirFkHz18Arplw^qHWYt4aQ7mVo9 zGqVtan2`5^v}BC=F6L$;w=roV>iQUkl!TY?HTp zrfBtqK)%>G>$PR`FCW~S(+TCfw$%#ruE}Q@Jt-^ld`B`0IraM~Euq_WN5deHPzgyy z*=f7veeBNK`Ya^(Xyvr$=X!Dk8X;^x6#_v>uQQjnTc|pvwk1lpSSINHUa!V}q?J6$MM+)B;(mof z;bM7c6`s0X$>NX+P{Cn^1ASGk&*i#j!O5o~$bg#8F22Gl3)1ku@tO4S9jZEC=iPu- zv)%Y;RSwOk`u)A~UZAs85l*P37{+>&s6^lZKS030Wap(s8R?7D6WaDKfa`Jh7-z+iH%kz@e?d7* z^$d!#`Ocjin_)k!&VXKVot)&f`s|2jHEN#^leepjtiw^_)55nohyGtd5YFeG>^J(g zyi39AdW3z|IwnOJU8Qh)3?Z!4Z`5mVYO8l6A3o3i{6HkS%fLjTKF7_KlG^di3G0EU zo%G>oAh@Ewc!Bi`3_{VPrJXx0fBh8sH=NqUd%%#qVov3qG`4~z4;+p=-rj&RB})rB#;pcpfO6DLWazMlM@-K6bd7%fgC<6*LLPmo zKJDwmVxXOn?uQoo?hEDdxVhxt0gXPUoH?S)mSd%O2pwf}Z6z(#t~zCUidrCL!Wx9q742%nE9};43e(vS zBMS|*3#R6}-JMS{<;_VpCIvlnE)k|&=pCK)u?|3R7#4BA%eFX19jKjCMBs1@Xwv7grnI&s(vj!MlB+ zysP?KAYAxR6K(NendH2cgPulyO>tK}-aN10O{xjX_WO$`f z8K*`si29+y6P?mcf4OvR#!7D2PJTtG9Kwb#+&hI2b>^*?Q0%gyq+r;Cmld5k?d05; zwpD~;9GTb`93MLVn4q$}fgoX}5!7-z%huC}GP}hSs_{qznDr2k z6wF7Zy2i*I$wdYQtaHm_2MNV?P!(VbcXW1w>UrKgn`RTkmnwJ-b5h2obGm4 zIy%IH0#9oFzGPY`3`260wc(J^rk@K_%Ep#;Sk3|Pb3NRM;dbj^unlZmakncK7|JO3 zgU8^yrq~q-Dfxvv@tj6o7#pZ7XwYmz9xk7qC2ZFaf48s0G80({sboTR7(j9 z{1q=flsmgSTx`t6{$);mhNd8Y=X`F54T@h9xs@GmvD@2A8e-6U%?PyI+%Wk-p+0-c zvz{c)e=`h@93G+3C>8tX=bFH2@X#xv=M8%SI-a?M$gguN@2Yv=V6cSD9ZQLYDdPGa zPjKM1WfnElW`Bn9sP{V1!gjZ!&o__@c2Hbhz2kNt=8gWi8%SG3%}l9W`|wMJkz3d; z!i4Rci+QZz=v0Rkfj`6`9umP75IzB1IAIr7M zPX00s`jbZ*=`D!##3%}cJoL5CcRiHymG_?B-Rui-wi1RbWao*XrLFAcM1HRF5ndLb zf0eyKTaQmte9ds_S<_5=Aoj)S4Sg>EvV5O#OrA29SzH?>*MJg$K%q9Iczpp*=?j=W zE*Q$;1$r8o`x|;5K&sUH`@|Kt7Q{#lNJ+nb(i95;7w@epMje9!cJe?~H~bt+>muU{6Ki+6$~1mXq>J>l@GeaKfr z8{yUzG|C&{6y0uGF;PxZKxHWggF3Q}{&Kfssq%C>y~;1;ke}V+X=bz*8h%!5&`W}kqm~_r8UNVvXaSapM<v2|*7C}8rw zxn> z`&Q={>Wjw2!=YoOt&L6Y*cIus`nk`_$DWQz7y#yAi`L_ST*!Ut$5(Y4e^=>I)}|V` zhIxN<^_pJ{7jdKR(AT|9sFPkF)xGcXOMEI; z(!|a4!30Zc{X{Ek!SK`+e}~i3Bv!|f7W>^Y@AabJG+{X&-ljUNu8TgG0^7SU_m60N zw%0uNo7Y6ToVsqwy?7~4HvAzMj(fKc{^QJ1oVN+xF0-w}9XyU zZUcY%;&~(#rGc{-be1Ye1PuvjMZ&2&3o&)=cmwVCk48Hke=bBWJ{UNF+}B4!cR|Ad zK_VZ{F*-Axw3Y7FCg%JYrU)Z_{v2-Cz_`d}#B8E~20sI!kcylJj*^N|@F`X}2M<&= zIjhg}Z zzt{bfZ;uywe=(5567Dl}&|M4h_MC?I`Ic>!m94);SjjzOl*Hk+(;w`hg92m@zgGrk z_Stlp-cL}`4CQ=IF5Jefjg1c=;gw3t`tgQu=vB^{HovcphA+0Ig7O&jlU~F2Fwo4h zD&s8$ICJRKI&_9{(-}kIk*qOV=CM~FJ3kEU8;FhWf0cdQ>OC$!)C()R)8J2X6V`EG zTQxI2B*>0~&ofZBbum5m_41gKPQPt0b5ZD_Zr-`bjxm#<>BUS=b>J}x&A;{n^zGhE zGEDY0=N1)t+naFZzIT#>o!oED10YYgJZ<=o@VcynOJt$~aY9MP$PU~6K7@loGiv&! zZ1wXSf1JrIA9KT}+>QQmgZ0Kg3fry8@2-=FhlSJ08~1fhLxs?C6tOp%dR&6LyLc|L*AlpQvzCt8&3J0dYb-82QSYZ6Bn;vz_gFYV>vM@=RsHa$6YjaXPJ}~R_%jt_qRs@3q0;#y*!Ac$ZKpFaOVg?Ep?fck17<8 z<6;v%4h8!Om3^;lr)(&WK4i4y19{f&e-h+i-1Wx^9O9JtmxIJ#t^V{jw#zQL;i{?C z^Lyhxg#+|rQ)3$G&HW(+>(_BAxvg5YTI!!3YLTB=XE<H#FL_mlHaVWsIUTdJRta zFtg~q&lLH~MnRZ@6sgq^jevH84Y1bnUFPsNx2$b788orq*CC+EBl!q)orD%bf8t1o z%bDKwYAk}nR<+JCKv5;`vCkd^Z2Q4!xd>2-qN!vTn2~wk!we3d9WpNqv>2Yq7wrzq zJjQZX=!Lm_;(ASGPh3D}C}viE7b1`#Q&?F;RBxH&?FWfWHia<44aXB8jQgVScOJ8w zhLXIZabUV#z%ZzZ6#ZMg#!?~Yp3Zz1Y)WBcW8e+@sN2V)oTK0QFN zw+pQ4(GaVz@4LNXXPf%;=l_74JVE6An7g;!hErHX zmROxXs?Z#*)YzEzre_Ps)17jXr3v-E$?g|HX>?Ds5k3vagrhxs$$3b@+96s zAtlb$IEoD zF8~8|i!N1Da6ToB;=RJV$O=qRR|M;BT3X>Ox6TEIW~og(Cu-M6z>cp93N&! zRk(Rr!oE2Ys$#_&Z9NV$)z~51Owv{pFa0hTZ;>88yth-WKo(egB8Z7ukhQcuTqOd6 zXZtm^e}I&qy1oVGV?66;Qa1(i?2eE3X}t%-U?s%ildo_7K2QmyALROO-4e9U$!Atq z7WTHJ7Rysfz)@hK$7l7cMl1fY5*KJL;*}3gXr6LH zd74|6MtJhNlW*}GPaB`Q@H(4E1YFo@x&w-8e>WNK3mMq9&t%yGiMo*d*yITR`{-Ag;FwIS312PE_WpBKv3EHBJ{JZP_Ga~8GfZe8`rQj`Kb#b}Sd zC$Q=7qsxpp=AlrV4U8usNWSbM*MGI3>fE-5nqF2$B%yS_f&Eg0GCuYY?iraKVxbbN zr;%RT=(dy~=}9Qh<9WR)-`p_8ww)f*e~R|PuB=_UHPHg|I30HO?hsP7acvH#@jWY; zHC?QF@tqw^D*)5w>6gofF*5qVttSYW$V(f_dEzok3FlPZSi>mIwhkovm!Kw-5WvaY?Xu;NLhX{VUu@k!QwtT~<5@dvXf1puq z1VebN$c^uwOy3+1)2&FN^;TXHm(NNnH`=KcRVmUAI@`e%KB;Q%2$a6pDk!k7cF~>W znG3qvN~(6C;|Gb2i8E)cwQSBEw}E8w^61^3XBd|_oP&PP_2V{@G|S(ZwBo0Se%V?J zII-zZ2s+x?$;%{MESv*`T`d=ef7nmF-yXV+;(=a=73Miz?>_vPuR3d$-PFKI&sS{(k<%zZ^z#<(fk%hB_}+;1r>0)@ z=M|>Ix&7SV$KQj^{9Ci`=E>xGJ2iuiG<%)wG^ic@Y+le4&L(Ke@2Rb%eRn*<{?W&p2)}&0W5g-m2#OSeEyDnE^DYUKSC*I zFUr1cg<})fCA|`G2{5p*6!nU;#ka$+1IR!<6UAN}c1wsgUN^lv8BOXGs5&1--p0s)ELo5+ul^;xEp#XorGG>);j7tQEw|ZuB^lp(Kqu!vFMgF4s2tPgW$@ z7sMNWhsH?ocPorOFQ#m2=ZJqpJOK(CchYYl6_>Ai2+CMne}EStJcXc;Nqo2L-hjm1 zWr7MyyP<}wZPwr{ADGr9kMie}Pf9B3&W6y(tEW6@wvcwy>!sqOo|<<_>|_{gc%Z6W z`X>m27HH$wHh2#Qe7HRU4LD?j#mk0 zB^3G~J`-{Ee=OG6e~!{((SJn#`B>D@nzED0AmGluqW3yEeQH`-tQ_u2D${LEJe^E> zl$+P*rEu_YZ21O0gzL3$;MhbR^f8WT;%k>^ju>iRnl4NLeu?m8-9)WBA$STbG4bL> zL!;0I?!J$$FDK8^)lFybf2MHb_@c->Nf@fG{dLE!e|QvR8W9-a>zNY?cKB9q7t|P> zS~%})J@znJCxQulx0PkzcuSqD%V@EL7RkxA#|NiA?@NC`V_AP@7?7m|F9iM zt~QSberegWFtBM<168t>E$~kILg_gZ@ zMED9xA^w~WE9*=@iu>WSVu<}os$1HENm<1pe{`=F4;kc}NoLcqI85j;#?vnI{jOsJ zX?=Zh%WAuNUUCOsoy3cS@K~B}wr>+^*|K{Nnw@b(5!OpKll!R6@*GKrL1QW*&isRU z-^%t??c>MVHNY|d@aGwKZpQS+XkgTjozfU^*x0T~ah>{W5Y<*{Z}gSd*5$L)_MP!W ze-hKz>2o*PXQH!bEwkGj|tn-~doifApm>^XF!a#vsj;PxD!B8j`0`6_YntQ-A@` z=6%T(DAkFk`uy+@6G62*ygx1OcEza+sN8L2w2BS#6Y@q7!q^T8l;~f!zJ*J2=LK>j zoUj!i_bPlspb)i$-3As5=mZh&fmFj_FTd^bYT#jJpG2^sh~@J>xFn0tbQ%#WfBBqs z676^Xt#V(hn{6j2r-o5-eAryf$ER7eU?)n$Iq*lp1;KwLz$L%oLRHXWwRs6VpGro~ zbf9=Im+O!H+N0K*Kz~F$>&$V`2W2R$&XNoa`@^rrt%M?Fkq39d zNApY>(&{pEixdVx-;5i>+8^wNAwcjUgV`R8ZuT?0ILhpc|SdovMV>+BbhD3j`ouye|0!@+Wktb zPx3*#LhNKSJQtFDc^ZKol)>Z9?4>mdCNLEvPK@2G#%gtERtW)5Fj+755CQQtESh(B zdOma|_n%H9qeGu&C8ZB9&%>5FZ(s*?A#}!E6?<{pzSdxZ6c78MO|gewKLv3~WEZLL zqd;J{Y#;lt=OX6{eG;8Ke?AF1XB1n9gE_i&k|~%jU8L?ML6&>txKpzeURoZ%DxRj0 z2mZ4!TqsLBqUEJ| zl5gbd-Q^znMwL&Tm!aqp%2z7-vr&nv2)RV)%Nnd1V@UY4J-7 zcDT1RcO*{0ZMdM8e=Ell9+QNlIu470>h9Wy9&)KU7ia}`u5Dv%y|g&|(SEZui)Ny_J|B7NTBJ{8f7fGOO)@nT0t21z)0x^PA+BczY77GSw43N{HE3>&6q^t})m_L4*z&V+RJz+&-1N*^-coG9L7seJ~nY`z@a0{9FvF7L6O9D+C7aDYO}% zqXJDx%h1OQWvV)u$Qn4s3-`{;3&!beN4{nYx`J)p@S$6N2p_a{4wpBZQ-vKPXezV# zh`~e|*XX{@OH+{(9a?Cr!G&A>_<$=rbhZt(PL7TPf2)JbmF81VF;A^_AISrA@Nq?b z?p?4>j<4$`(q%EFqk*N6Mxqu}Rq9JE*L==VwqRO*_}o(5digY#UH5^#vbx3bf_}7N z@XhPzBWJA!u`{u+xlQ|QsXiBS?LAZC^qt|;;t3baXYaB8xyUd5iCZjIYc6ReNC(z% zCxL3of9~x(D(PaC^y?8j?qT^sCnN-4GEtUFF^XY?a1Kz#>xKboHD^4P4sLyGobZu! zNkfBq_|G$}vRr+VL$7%>5s2v8Zng8-nppwoWU(*a&v@g~x60R;vCT7=O&yNmAOd$e z5XgYU@31<0#(c{&bc!9MseO4rLAjf3Y#784Iq$QSId()7UD;#j|>_CvRqfNsK+gowv1E9y}=_Em4J!l%N6d(6x1cflw$$| zf7L!MRZ_tzNBFFrTpn`KR2b*-2eMmZChVEg5u7c|L3ov^wGE^$b?yy4D2?XfH05mU z4X3~)T>pTrobSwyIX~Odbwa8Thx7TOjAugyk)bSKmKWamr7pc+nFWH;1lwY`B_idnfOg)tO9= zx5hed0y9Ob7^K2#tHa64a1=O9`K2P;{H}OE&`Bhi+`6&0!gHf97OfGpPmp~m*B~-V z_182>kgrNp1NikZFx5aarSfD_Ryd2U(g8|i-pdE~;S%PWWFYen+v|!dS7YkLf7Mfw z&o`A*wQ4=W-nx~YtPtR4m_b9vpE|B>hVkZ~oksCUbg;j5h^}1%Qnt)ye29Cog-}SB z1ghh3aQN)-SkRP6@MX+>ZZVJTY%d6t#8q!4~8FoC~F3&9SQH?x$zrD z(x2)2;zf_9Vy>2BA!k<55hc_zf2y{yzpKTj;a8hW(X$L(+COiVgX13poc1gn(3;58 zKri3LyW=j`+!&P+3&Kms7HkA9dakT}<5~=tx%}Pt0AP$qpfO4ZHLB2y005Psl|+xi zp%&Y*(l;EFmd0JEMRqUOqhhfz0>990a)DdL;=0`3Z=VDB3L=%=(8(3sf15S?gdS?B z2~TJBTJ2s``5wdDla%;2h7*y|*N=y&uVK`_lzH>Ee?}EloajVGrLn(>887y$q>gz} z2Kh!lD=>a3;|9H|D9sm2pmXp5W8jv(p{lL-Mx|^KX>UPOdcO2+e=!ky_5{iJ5>|-6 z2H!ZZqRO3X+|YLF_eA>0fA<$f6!biI;WK+=S^|2q(5t9@tbJaDn%pWVvz>U$z4r$e zVu=ZkT=smz*M2&_JQxGqd?t1(k0P`NgR=l)_xqSR%*xDyaZS*N;DJB-f3aPv&DFqN9%0cr8TJuD%>7vG zi%g41@XJ*ienyFF>^__|sBdF7Hh{_k`iX}};NGKtL$s0Fg+mE(ZD?bo9SJ&3Ye77P z?iiNP?prh_JJNDh4XTUdXa9U&8)(J6SNf?=lY-~Vti5!7dQ|p)eD0%wIKc!WzSi`* zWDCdIf9eT3*fgXs+)jxk%k>WLkU*9Fwz?du;P9j`fK6~kJdavh)$WM}pNa-&a1JMJ9j#gP5tuunPWW zbw*2AuSZDN3EZABY6g`ADD9kBp@&qmC$d^8f9#zQok6heh1@GUm5?7#ZBFTxrBmLy z1EJniG|Y64?xY_V17JJ=Z15Myf>_FtUq?2{a-Lw(Ztf~)p)S3hYfd6mw;n@~h$rH} zvVbSoPQUsWfty ze_{oh=SicI!(b*iWQ9U4oO7Y_LzSQ0f2eL(g?U7%PXg7z+o(w;wtLNJSb}& zEN*E%A(Vo>%L_HYrM|eJ7tuPd`A>(j^%vrD#pmI9Viz{0u66)BwHYgAOaEy8$Vl=f5`7sDhrL0&%TKV%zBTK<_B; z^b6s1pP0yiWh1t+ltG6q?fQ>g>QsL%-{+{ecm@tN%A_sP^#``fwY&8mCBea9VM%91 z412R}SVe?J9>YHphfeW&e|O(dLr0vAO$l6#6x(~dPfJiBmhsH$PNbccN88pDxRIeSN3~Oe%cNvHQ)o^~XhD-2d~GmXc;}d8KV^>d zaS2GDhfrWB-g$7|DfaJ{3JtLSCUwfd1wx#fAg0Q7#N-JJ#IY(nNx1p`jJo=>dSo|t?*T^ztiU=4mq9&z52 zgr9MRn&f>&KGt#Yf`a|Sxx$uQfs1SpP0=>=uyxhuRYI{6^VfFa>#|*;rBQ|vW4H-| zN5h>)t0Ob&>93uSfBfR(%l`IJo@b}pK;;lX9dmOlXM4Qu&deL9=4VrrL7z@KS~GYD zo$SmI?>=ZJ_<&mN?)*!%`mE4Ew>R#vt11Tmv)JTM*n5Ezy>DJu_=|^S!({F`zI%dl z^c$;An+&Se=~Pzwq1@Es%~?K15Va0ZoS+$R5@om--$4gOe;%ve`_z0@Mc`qW*_Jf! z6%FA000!t2LFG`tBvUlU`<&X# zxbO7fIDPz~fmT|nT$?N-+-u(r&2roB0N_=N zalrjsE(G>9f3SQ`ZK7+*#F0tA8}UhQg;SP785}I9n0+@FL_qP7ov072dfxP~eeL3{ za)TwaC>INj${A%@5 zsSvuKYeJ3-#%)f|)B89NYm(fp9( zA@cMc>B-jMThQ1ox~z(?+xznfU1Aj-9eT|dtDI@&W6o`HSmTJM15Qu>`P-T z%7?W=ovwlUKAo2Umyd&z4yS5J%Zros@V{VRJSNUWcVa*3&tqz}5hk4L=gl9ZyXm%WKA5XsWyg!3f1D_o7cEK+#_*C8+a*+CF4WpjCBNtv z5m9Q}?b@6KDnm;o$$%UZd`OdcY~ExZynMxJyoIVafMq4+>|WY;3;Op6i9);FDklEeiDGB1K9qb<$2Q~6~YhC)0uyvu|JW~cPN|m1AZ6z zfADFyK0{QlT3&-obwX!*6{<3*W7@mAUqiZL#^I$NKXf#ft|cckc)?f@8JYUe53wc<#5HDf{Yv$bpf~T@lzl(LUP={s( zcz!@qxd5{%KB(d}`c^Sk*-c;}Ri(KLk%x=>lB>?IAxiH2+o}@c0+@8OMTb&#f0o(8 zMT;T@lw6}A%QiBf1?aad|9%jgew%>^CW!6<75Ag^B(3`L&X0WG1sRNYVbWUnqG_G- z?d{U25M&>ZkNvY}5n9wwX$@!Zm&4lT6rCTGwPr7XG#lk~dSoVaC*U1@d?ikS^o@KOXgc<21) ze!n6GY<0`M`j0mx8tr5wraK;ehN^PdOl^`&(ASQudsFt0CA4m?Ub!|7;XNnBa)NRN zG>dvwBYaaQD%o$~%F>70?E+D=;vW&n5J)ce%fures+F37wy&r0D-lC?;BDNya>L4=e;S3+n^W>zyV;{~5 zBW?T73!YG6t)HU%s)|)O`dg*61A?`;Ud96aSEg9Q=_FO`_~bsrjM^MF=%|pL_;r0Y z=zG-H>cGYHwkzXwe?vkLzih*+VJjQs#oyl$u{tiQk$N-d>z(24xCmc`2)S4Sf>4WW z(=Q#GxL!cZzc$RxCZPThpWfakCPwyHBs-1aq|4pp*VJhRC{eET`eE7M>CkTYt%hwf(2e^gvyvOOgzv1>x-zM>(| z={xSyIl``&4oXPhG@EF+_Ag1^ED+sWc>%H;d^(jtfh48&1UvL8O8osp6{HR}!h#By zPzwEo9>eKpBHtKoaBhy%t7ahFE2-#JK0MVwwR#jQ5!bA5MZ6yjp#^U$BXEr!1~^5?ppcbe>r~JiJOO`L+sIb4n{v1JDQ6H zIhEq`ARhC>C8Xs0+Zf{>279YlZGRZ9THRJ`h#N(R>cIx$aQiL1aIXh#={LrBsNrt3 zW!9%YWfl2zG|QI=eG*T9V%|d386$6hI|{^ObUm07y~K&zltH^&ptN@EyPK-sj&LuD znYJ1;fA!=&2)s^VZ^9ehJa$QEMTZ{Nr`42O1+iy#qq+-ycG3FERo$;{Ba~O!xr1&! z+?i&`5B&!{+119U3O@GQB(Oj7{b=yFJprAn?++p|duy3U9j@NCpGKHxN zgPO7Lz8;F_l#jDpZJ?zjs)ry9{gC9tu0uq9SgoeDL+y4kxCZyn>$1m*hpr`(moeNt zbPcYTh@g9EA!ekBt>>?tfe{o2a zvq)8yO?855y$5|+PxH}f?3;c%$>6FXW~HF;xh!TY>Wu+-HT9cLaAS3Jn@ETmLyuj5 zwu1pab6YMMhW;NuHUG<|=Joo=rREpafgL{nf6t~qX?$Sj*5nPgzchTUU81k+4EuGN zK`z_@3YuMKipw2VEmdpXI*Y&nFEGnZaL8&Qgf3c(aBI9lpMQp*^$yy2^ZAWaig5zb-It6CYiW50cFi zzkkl>CsB-8+80r@K0u})zhBJ5!gA8rMds@ogD)Fc>)`jVJQzMpc!ifsz%`!XjJ#uwEDA*@L2r3fD23x8=3CpT zGMTBdG7DoAvmjt|BUb@le~B(>c&WjOY^XAQ6El4(LAC3|jOj%j2=GZkMZ|s-(@#!e zB@3yvV29FToSBeF-es?h#jJTt^9ZM0&m2=uN`Z;7#5@(s%cBdy3BKjk4f55HuQFfHjft5a&b}}CXe;0U0^E?u5X+!~P zAUH7qABkynwkJi|fecUIb2hYMk?IywT0%7Yp~$Wv^G?UIPD6R{^I{z6PeQi<6qPgQ zMgR=bi{K;?O9J(i7j2z&84oAp+q6bTPnv=6OCMflI z81HfZT*=Bn^BF$JfBgAmkx48jg=@i-;B)=X&4AB5B$JpqveGA#=+ECz5LsayAhe9Y zLS*rMWmIa@ch-2$1ydwzJl0|z=tR$R`yDI%Y&7m_4#l!Q3g`Ec4BSV}VHK7d2G5v7 zB5Y7ZFBP5)~mf{{7wm^0Pyg3i5k?jsSn=!%CHx zBV*b#s%+tre~ctB7aS1|^XMZob5SfgF^)58y@2^r4kB!4AjrG=S%1It)X($(uX(`M zH?i;V^<5plKmVDFG5Yy=5gkn~slVUHnikz4!1Jz+BKmpWRQP#b{OtQ2^Bm$e#mOm@ zc@O;TgHU9X!_VWzvpAH%JsC*or*cFnfQ%xJ?Fp9oe{6^4Gr;&l>zcv;+>RB~W-xeT z$B!sOIEgJVQLZrv4k!w4X2eCpfC(w3@=jxO2w{a(0m#&}j<2MO-z1#x+zm2m+0+Qs z6b=I!bKqG9q8uqlF_bL^=npR;o##N96Xyf_+0aXfUCwoF>=ls-64n7FKyW)nDEAgj zN>Pq}e+YQfl1GKU%g`dupt~Z9qMW8BpO!2>vudwoB;7Dz6B5E=bh#IV3Vsq0VQ_&wS!fLj2C* zJlDtXKI^gHzGn~4(EML#&+i_dfA_mCT<6dHf6vbaD=##0=2)ZOImVxVub1C>o#zt2 zcfjxCXLv4*E8fq~9OL}^K0Xhi10T%qOJ=hG9afn4R2@~e%hcW}B_CZ81L(Q4LQs|z zOmAHL)cG*kc~cROL55K^q{~&dSFA(;dj^7A3QN1<(0Q0Yju|1*#xkzp81q?|!|W`Q ze@5(B2c7w&BZw)PfU2>&<~`O4TAtIS*ZXANuNh*7#Tv#KB1>ISJ>>*OfT)AQX`b;? zv$BTqP0A8%aGt_ zhpx~ws&Jhy=A3ywHMKNGG?ZlGU=mUff4&g8IIx70q`LapGRQe zLx^i2#C^nGouB)NeHt$72=ku)IUj%KCH$H1d9NnF`xJBH%=0I77_Uwqk_(e57(Ytm zoXBupoLLOa_zMEOF{)FBMitYS4eM{8O$pqi0UUn1Gw0xZo5KOLs4RQzOGkhIf4+~I zfir60&Hla9z*f(q-!=DVpGU`e-6%@|Jq*g2m6d?QOlF1?3DtIw$&kdb{#JifXTO3S zc9^_zW{E88wyi#~5(j0I2^EYVCp<=g!6Xw!+%9F{nuj7*k60$}j#%G+Klg<7&0)RI zcgLhm#_+HAG#U!q8^9$l;2=TBe~7_r;q`b9+@V54Z^V83uh-`!PMlv~!FprA?-89H z4>B0O)jz|)p{pwM`5PdA?WP9Q0_7=8-#vK1>mJ4LxO~S3d%?gN?9BV}_j`ZN;O}0Y z&(V3T=KC?9_Y%hMC;s`fH-GNSpFNMe66%IEG;==^Xn=s(MJ?~}_y+fCe`>-x5Z*HA zYh%DzDQF)1SQFSNS3Dud-7R1Z#eAAdNKX(C1TxPG9M@Alb0;KrfW}T59IPOUWZ%JK z)rKEBmhs9~@yflJ!toXNp2&pAu5tczC5s+=T}(SX7*)`WzOqbU^0=9OMEZC<%2^Ft zF|LO_s?JjM!!(BJXE zSYa>!y|c#u5tpPgDpi8S^E96+fxz{hkT)gkqKJmO)KEMLVaXgjSQ=cjp{g;j0R!zB z`V#w-V?iPWod>q}_}#Qq*+#>XjZ6;fA8^ul9lAHfOIFHU9R}5Se|RcjJzyQKjh{Pp zJ~y#H|E|aRT>N{+DytXoa3W9JYKnVmP-fRIfl1N)hj>tBOBN0KypRr8cM#yOoT|J_ z_coFe%u58GXM=tHeIFsi5lk2`&=x^Hfr8}EeyIMO)xUT5&w1aA7POi;2EzJ@gQ>#V zzUUL4P@AEp}=l##PmN_S)+jnvnTy(cP%3gyw znm}BSefq&Zt;>0ze%}|)()st^UvqNZ`^|l(BC`j z&pCm|{GL~sHwjum{R0YYs>Id`h`zDLHI&IOK~1790lL=M6Aw8{MUf>4fu$w{20Zfs zt|oyf9|%6%25XBt9S=DH3L*>%QIh3HP=dBWCyx#M`kkW&P{ed$gzBK4inT*B+L_^n z`CLV-a49BjNgHnRxA}1iiBa6m-ebqm408|_kWoDV&gg!`gD4%1Tj`LVZywMqMQY&6} zQA`X?{@^Sys!Oyqk%XZ6qh^^HStP9*a=gnR9u+~YeZ z>bz!01#xV`@+@&@MKGhwGDcHQ#%G3yF`9E(5ub%bY?3nRz&k-ix;>?E>4w#MYFUJM ze@p?ry)oPEdH&fTy7;PZJrK99%y z^LW4CujiBTO)6{Zed`hSnm)e`A+sv;oY%oaW|^XgbaR+=OO*^C)(s~-t6zy0{NR!5 z2|ik}%g!f4b!-d4l3aucGYMH5OLd zd+Kp2Ex@+W6`Pi}$Qju;3@bOdJ-$1@SeLzwPy~x@!+9{$R}L%fn&=E zuDzpQB233xppk6>MC!~-(0~X@fJKPy_9WP?wJd1F<+>OF1=j_N)_~b%G;y$ikOus1 zb!dfnD9C=6Odjc2wvhxgH*yUas0^%S*9_i8P*^D6yh7^+d~6GJ{!lnPL+^-uRnpr7 zTZONsytJ&^nYq zw^~35xA~;9TxoFI+MC-2tKdBs;$Os$VbqVxbsopcjB1+?c{*e~3jQwp%4DlI192i- znYA8$gd!aRNWV2y(P=6VS(xl01c#GO>Y>#@Y3`=FE?Mhw?T&!#3I)iwT9?owE;6z| z*++QGjsQGPrM&;rZEV~8?fj^6%;A&hpb^i>;25KolN~WR?_tyTOU?fH^*f4R^aJsq zVhW0Gr)r)5WVfhPO5Sryz4{MO#_HQW>N$H~IHL=2ls)upE1z-b=j#1pFssncRq5CK zFFm2MuYZ;7Ud38Hj{SAifu<1GWf>?$6YiX^e+6*QG0E9<#32{-DW1Z4-;IxcvH=n0 z-Ftv`Uem?2dDitdD;o!maxwfhWyw8eirVx) z`PXv;9kNvRoH6uCJpiVqPqr#stBB&g|BHfEz4+9;l;nd~AMN-e2rTLA{46cDQRRPq z1l8Ar&bY!Ru!q)V`>3Kb^;q)t4`~Q)Di#UKf0gvvCluR%d%3o*o;r{v_vQg57&i@KmZ?&``x>KJa%( zabf-+Lv(Xnmd+PF4C^$_cQ0$3N8>MR+2z)R!QSy^UZZJ=QT7C^0#1Ao zaAB;z^_#nK56T(C=SC5@_xZp4=VLd+i?XRbRM)SGUAb%1lJGQ~cyuq>@Y?aRN2W&| zI(p;4vO&4oJK_KY%JF92c57Pe>?RGBkTG5aRE!<5{v$Gqd$p7i8MX^i9)r=A(3O^RBt9SAhs^yi(-^I7(bw&3#f85G&PDr(p*IVuDJqFq+oG#AmKbkFtC0Fe@HN zJZ5#{7@RBTT6ZCO7kh$KHnGJJp!{ z+@CHm71FhX6ax&Lz*qmXpl>yNkzi^q_ju86Qg#5+^|mterB?3X#;}-0{M+mi^7iEf zpB7rw^YdYk5j)>#$?98q=1&H|LcaX^v&a6curR*0QpS+-pr*Jl8KH@Ab(Sc>i(H0z zTMEtN+>hecym)ngyy&=)n-$MrZo%?VLpXy-NH@MDozFsciY?{rwAZLU8{Xh4$Td!V{Un6U zvLj|c_Em>J7M%Os@?No-qc&8t{r2}>XGI7-(m6nf+$m{QeSFTFOkVLa#YmDTL*T(j zh|b4D>FWh}h{xML*>OnFF@#TSVp$D4JUV`w)=z?d9tI}gAWne3(jZoM@nXiBtUAdg zkrl0e$Qw0*;GkQsp07Fs`F+hthBahht{UO8b%IcHpL#;N(ubh1qvdau$jqZ}HpS-* zJ>V^?$)SUjNXxu1CS@@N*pBI=p0WppvA?O={M+tOSe0uVU(@UqHvbEK!g(iEKD)oj zTkII*?iuK5pb!e^eM1QQo9D}+KUf|LWi(= z<<#C%!JT1m202pF*uUg^HZZZVT<27o?3qyBF{Ts_BWO>)f3WTt%oVS|pj?FIs1HBo zU$@=wZdV&Ri%Nop;BjD;ag>POPRu;$9*CNdafy#tBmrxys#eK5y>Iu97TF|M92VLM zuyLe%1MQ>AHXMO%BscOi5i~yim>vY?n!nTNYPKJiQfsi6TvBSd6YEvlO$!3cABKg8 z;ZmvoGB9J>H2gg{Ioes%|ACzT29l}N^;0eea1`q|7j2{z!xx+hTv7wK<>#KHc(nDy zTE!*}fUZPW-Tehsk@rPB9|##eQdA4T~&uJ1UwEx7-z#xi=9 zo#sMGXScn@`vG&04CVAXjDs;HM~oVZ>Qxg8xw%TX{>u zSDd;?Z$U6aYS+y<-2sig)67tR;(!X@u*Og^fK9smMGX$?{vy5e?(AZO!F6O4f#vzf zUt*WkTFD_>)+e;=C96tVHft^_NvFg<;>|)OpSpPp%-l<(2`cSdO0zmpdCwX3Xp;P- zO6U&jN~-gRJ;(hZPi^XG%!d22;r-n&DG0y1!ib*tF}T#cm`E|+N_;c@#%fQwe{47k zP<}e2r1YNt5g)DIwl+3rq~3WKOEQ}-eJ6%WF!qY|zu!}Jr`h<%bm6|<$-ODIbgi+P z{a_{uDY2Bo8a8dsuS84@;7&q*X+_WP$^fT2a`1ysz+4SBWFv|+?mbfS?@QgiK7 ze#~7rW&63=K0l2zKy;@)-&VKej|~+NP-R#r%1^i0y(X*d7Wh9;MQ+JhOx(aoHdu_jlxI_E6i@~(*2f$AR48Gy^Z`=Y6H;)%NJ+l`euZ~azI(4+97EB zqP-t}kP@K?C{G|y{vw7^`+^5nok?spRblnli&MV&yuIf<%ec;_23(Il>yh%)`vhjP z!KoQ}xof9{ywu)`S6TrVC>VSa`53Y!H^Se>eR`GMVl6BsK6OR_G|gKq_mh zedDZ^?4Anz?1`uIb&Mp``Q3xTmEJPdb?vA1!plPPZ`)a8A0~e)zi$;{(;7%q=s5Ys zm-Eae;XJADu~5-a>>I#6n$Cy(;;h?Y$IhMAmf7RE-IJw1;M_vm0rcY%-!fjz?l_aq z5|R+Nv75H-5xwkuF(KX^rui5nbJBBOvRdz{D~N3xw^=*=Bzoy?MY5JZh-RG$n@%_> zWh0W**W)F>2?bDuvB0(SVQBZ$3riNR{#P9wM7O(Q()4+iH$Ka zg(6&PwDT5+s$o0Tk!hfFI8iWspXxMYvlSu!R71CU~3+s&M6YYiQPIp(Yq(6T1D^_hr@)9T7E#1d+e# z>NMzcZ&4x>+a&-|B(KbnXG}SmNV)c?AoqQ5ocOJh)d2|j6o$AQy@9K->p3Tr`y&{? zG;`gadim$cQ!~v-y?D7F{9wV3fQ8NH;*Y;`*=GLRKQysS2{*VQ60$yTV7|4|@6sEs zPyOkHy-H_{cjI?~D3&|T_+|5UN78w`QRhm$wJWS5Atr`F#%6d%=QMsi6f|(A?YlxK zmBtWP+~P*q0(lBO6hT2$lUT`k0>4Wg-`r}Lb2S9}4bX@Jh9WZC+2=@HVbQDG=x}%4 zV&p22%I7X}fphIGr1g(iA?pmg_1GY@ulg=bi93Jp#_)Z01En4=Q=$L9|Wah}t9Wn+Yidd;DQv3YQFa!7;Pc)a~M zB_bwQ@sw5Q2XQ>jqP+VrJ|ZhR?*;o~RxyA}11-slYTwgmjQu-F7`+=Vv?Ff2KJ40y z;+nZ&uq_Ki!Ca5M*dOyeW#{649(aX>DtK3IxnLN=ydn02vaEBol9NxbLA_?{5;u%- zrY@6t>!17uKRExMKfY``3~We>V1Z>oUX@fRWXu(xh-R_r;QqB6yvL*domz0Y2w;PO zpTV~h`EbCb<_ro5#0N|)Ypo+Me9HO}%?!dCKyYmY;<7NRE*B9>qnnU-br{{WdfN0- zCy&mX9l;~Sc_UFL^us2GQ^#nH8{%<=#`Gj8w{TC3w*bKemLPOZT*VY&2ytgx2q!dZ zd|7*VRS3F#T;?eR4FhZOw*g0RfV4yt_S`7DL^~L){7A7B&6VVEhSlC!Lekg& z+P&y}oIKbZuO1g~mvH zxFBDYt&?jdSC~)jpB;u>5?7XyZ9}m7f7(m6!%rVc+9-A46st3iv?v<4z6{E==)NEa zWc+6{t+~M#vUm=V`TmIGUp|)nP081)iB2nsXL@xnM?CV1fuz*&ptJfwk)%-8U^{9` z6(++QUZurcXVZH$8pf?R94g7wpHi}wiT2j&ptI<`v6J*!Iop1L(*070#i z>JTd+1C?_Au?tb2H}Q3yAr#D0!U<;>|10+!qttQM{SFH=GVcy`cv6324N?09=w1L$ z+vtZ@D{1nx_>&OQ@ZyOoovQ08vMB|vx`uEbBL9Ug^(q@~`e!1^BrB}~`2&DILQWDK z!%GTHHvTST&17dsd6l{vdwCHpE_Z*%2?&B{d>ArFz7nbz2uq`mq9URo)}#;0+;7NgCc#-7e+_lER3tXK^cCJ4whK7*0bs|(u~Jd=E}k<=XpxH8-kWzRUea8RtPU)`Ul}xx|(C~Z_u~LBo1s*qzh zzG;Nom^h*ods2_;OyzJRGl_EY{~KBGBv1}ARj~C+(n#mbv&zx)N3JU`p}V5lzf zu8#B5AVFI~;%S0B%0?$jKdJyD=A3bxV_e~CO#yug;j21A-O3=4wXDN`z8HN<&^YMI z#hw{vCTdaoi7Y)x4=!B|&MQ3u`}T-llk%`6IL7;enfpNxsrNDv(H=V*bcmCevA_TO z3|r*g7702@O6BRch;dTP{}%nP!JV)?mj5- z@imp!XCL~yiT*n;C`AvF_^#kgW5@NX!jFz(H@r4S`{BD>?7qKwW4HtSpDDYWY~g(d zBHv#N6OJ2u8NUYtFk%4dSO?1*f-qES9BK9@E<*@APNZEjK%J; z`1~xHo7V-1^N~>*uN`lnV!vuN+wf zi`;+op7eLq+5e za-A*K0<(Wdron!+InM=OG9Tewp#RXRUlGWa2hsrW?NHw)a9yWTh+q2nefvN+j6dba zdZelP>e`RmjZ8v!!uM~ACkF_=U0~gX&~*gzptib5yCik2I}(GBNfX<|WsybXrDRCv z12Oo8qB1Ex04@re{LA}eqe)}|5^+QXd7YCZ$%ud(hK6Z=Z2nIRVk&PqGCx`li~W&D z5C#JH6~qsSOBB3x=6W}`>Qo^sXbqk{A?bsD%BqjGRQ1r8*bR?0{&}vDjQPgOr>^*$ zZOk-n!)WL5?yMdh9k16=4hgfXnwR7MG!S_=gC3?Gwpr;h;yJNAIWS@q@bS66%?!}x>+wOxeSP%zS8V7 z!(L1dz)v+);XxCMC;saZHeIE_Kkb_n-&o zc9LgK7EdjfN`10~<@h`MLE8cM9>t6mI(>1#KOA|Jw^q*kK~F(hEY)af7`!(Ia27Z7 zUG?i`lM=tv`V#v<)90vdj)qc=y+kJ|ah^V$9#zIH5bY@-^&&JVF8L2dCgkIPx~B4M zBa2UyyYVUN;p-2g_eBR{>YQ9L)}8q#2D8rY;%#y7J$XqxRaJ}X7-rkJQ=NBD*Q~=? zG%x}XstfAylOISK}*FqyMKk<{$D__xH zXHb{14`1-dB_)m0-UhZYT~#wY#Ay6BxfFR+)YBv1fK=MxRf%~b)><|k zs*@%6G}9Jz*2@olo7VQtcxP+HpO~viUSEVXZRZ!Cur+W2gwtGWG1t;zGPa5P;2HE0 zB#S&JRBk-u_fx2;a!9Z1iEjFL(#kF4T07gw)ylV)!N()fIeb_4$7EQINy}d<>q{08ekrCoVR7U4(AZxd52nY;B(g4Q zcRPa`FCR*J&;EHFeSB>;7;S3W-zysV$c*b(FKKr;&q{o1GpCz%z;G(+h0zwB5L2W${m)kUpCC{R zCFn|{SI;TlX(kBWF4p@a-RqVkZ~&EqA~cj1MeEXc0@Qc~z8l7Mnmj~2i&%N(K;!4A zhfL7|1ba-GY)SDe$~(iar1;?LtEGw*&&tg{-S)R!Gs$RHIduPOT1FKlqVV0Mr})*v zv=#zAR})l!!WY63{q@P6H?^t>3mjYW*;n5^@(||q>*1y4SKkyF+IiLXdyg;@4TqfrKBb)1xv!*w{SBtr(u*I~ySt00 ztAAh<+b6CUJG)r{SszwG5Pae`^mZwBm=%We82IIh;~MuwhaHz%40L;#xv8pCz7W$R z=~@LQ?Q27Rd8Ch!zGQgJzO=d4!*?3Y=-M))w;=VlL!|#@nO)rtCdVEb#!P@N6=8(b zNMod-{C*?lA){RwFj@UKt;8C=pHC^j=^zk0CvlnnBAC(|ud(dI(L}|pP>tx`$?5&t8XZ^M1D6x7G&kxSNC6j(TV9PBFE}iZ z#dCzNc>i%{V>cIgDSnwfop#qxE%qDS$lw9^VoKvH3|;Ei~;mpmMyD&|B1_D|y6a8agj%VDKCst%*%#36h6+B(E&E)BR%E5p3+-58AwQcj@*cruzwxq}hhtPV*ib1y5?$ zhEm+|3>Pz&)cM5sJaxt)W@PXSXOuLEWzSU8&hmq6ZExOZ=m?;;DL#Bz4$7UOPeBMr zd-RPulnB9LR(oBDq7NS&5MffSa=FASut)Rv=G6kP-_+UrTv-!iFPzfI0CkcC&-KLa z1?&&nPtT2`Lu&$r`VnXR^9bwcw>%+}oT|hS7@bQ|84o<6-Yt0e^iA!t7W`KJKkMj!z2dVI18v zGxhVDTKs3OvT|)q=gYOjFGfRpSl?&TMq<1D2ZJ6ah`2vb#gx35;mx;-?JbG1DRq?G z>)~fBkv^Zl`jCsd{(-p7yNc@1?5c8dTqcov4GUW%Y4=i!pA`kQ)Ww$==_m92_R zHeBX4;nK)-JIk;}PT51I)D_m5LdHs)Z{|2ZSMc2AJhP>ZrUZSA*V9gp{(Z-U!@gYc zLjl;TG5v{5{x7CruIv*UR<66xUek*jUY~G$UDXX>C1kLgH#D*}l6#}Th$IHZ*21PR zreat!RRrxc^?g4GLT4_lXWl zK;%c8;zlK1L#Rp5BRiT`&2enzLPBdD8i&!E%8^%l`=^d_fc~U4)kmQ{HD3Q>Zi=2 zA-Oy&G`venSm)QAS-$2zyLMuoX;hf#)+h$hB%>glgK&_M3I*HKk zrHJ4aw%n4F4hXmRE#krn7{Hd18hdfh%X39{+w2nAdIIkJxT}vYjst69V2osttcC&{SlQerz#4KD}M7YRs>f=?I~T}gBbV*hV$1ZG*_${>`ra$P#@JUiZvj&joEP~oM}`8}^XDYr?R)#twa*qCu~a%@JVO5kTxFsSbe zjA||2=%Hx2Cd|6OC#=tvWoL@%X-jcQpe`fRuoRTdp5r!(>*=L=Mz2Q@cL~E%yc8%Y zu956icMlk%)tH3UJl>$*x`3OJk8nX}eGu5mi?T40nso_rDCb{UH-HmJi^^l1a@tA!KsK*<$XYi|ADn#n9?v^5L;; z<8WghY0I>Gm-|<>+tG@MH|+hAqsdwMX4f9+^du+sclW-Ya zhw0Ba8mLG0-JO$Wegp+n+9L*1eYiq;Kx@EziyTGQ1wV<3%$Ld5wqwA_qvq7JF>CQ4 zSy+nQ4KrC6kEiwzht}q`%vy3onZF9ejSXy>EWcT!nT7YuFXv14aG@}bhUNt&>Q!m? zH-m-`={jl!QXGQ{L=0Jl1o`nTUieicCy8g10>WBphhI(C%`}AlAR8wZI8ZdEK9;S+m*{UmC@UOL+{?)q=>_;QI2Q{tUV$hvagoA$1rnVhcIL$ zmkKfeM_q_BSSD{I>aD{1LM#^ZK-YKr(ay(%(v|i|5Oue7xVOeL1InVC0Q9t7sOb5- z9-yVk^~~z?d-l*F?yi2T(9F96GnSOnlDpZ~WlR}K@a6R;=4l@5K;inM(-NMtJn;hG z>$zi$l8F?99i$Wj6_&N5c0RkV!`~0SDguEIMX}B_(`I7P3*0#6%OP{%gbx)2@$J^p z;?q`!@D9xER01d9CxoldS$-O8O2G2}D3QXcG~Qji#UUd$DYOD;nU%y?IegFJ+c8=p zWOq*nxF$j-E!PZ5Yik=JGj1Mm#ru%J%g-g?yG_@k%KDxfbO3S%LLz@B@KGyGYY8bD`Gzu~v8%PuODiVw*`x|Q% z@lR3__z)6@vk8E1CaWTl2j-BoT8(eeX7U`nwf;<~r$|DM*hXEHg_V>t095!4)dbhq!&DcxpEO1#D8P4B z6MI2cyDqDe5wbL9=5e}m!rCg(RQ*N2u6U927fj3~eviA7CWZTDnNs|-pyiL6m7dYe zjyd?NiE%&0S>lhxFQ-o|d@rP=dytscGmmtn@*aS3zYx*s#FG2ma*D|IR;#5Q2bWYZ zcb_!C0&Av_b-!iG7|M3`q1U_I=;vHnPMA;CtCBZ0|GHNa?NfJPeaXgb&uwvSh)$jj zCsqsxeR+#~fvgX?S>kYV+zQsTvH!$5NW&y!MaLgOGGi#9#0xwdZ1jxXe(e*V>|++| z>6N>e@It~P7qrmQ!*3Z_R1ymlJ!q85+#&$K971#yf1n&GoDQ;d8IONhD{P^EDKfR$ z{ge{&E4L-k;w4`GI(84UnObSVj}=lsRXczAT_}1(v^W7XH2CmrUg|$Mbk#b?Fl3{B zQGBRVpLScmMSdj5@^JH|z1) zbrL2c_q<6mU}urdn<=6yV0@7mg$jc#=d<*k=P>j9GiBM^cdFX{)Q-t|U*;)ptM5^F zy#~^Yk~+G6`X@VcY|z1%-)ZCewUI)QAKp`{ha%nbhNc2G) zro}VJvX#XcwfPK;dy6xMavkQyR|8+N_2Osv8b)2hw$(+dPU0)Yqf8<`L$b27H&E|H ztjWvEXG74K2If~!&=Ga0vjZLkk$>#+o7Um}(#1Kq5UidNCUZp)+!s0i*GxW;E}o#G zr^v_5sCmk;-VEt4{{28~s%9-4a@=FOaa154$J_Whb3fM6z~%OKO!WQe9AIrD^^?!* zzKmY0+xQ~zwym|^q-Rw;S5{!)h(9IIt(@6kIlxZ4i13<3qrHKRv3f!WcDux+VY@1P z)nNmCMxCW!w#_DZaang+LjEfx;;xvjEdw@>a-BjfL11$@!r4TC*)h`qkcHXDPz7>U z+@fRG@n`Ju#MFQZZ=bINJP}v&Zx$4YwCTh%60fk}D(a;K zXRC1P?Wa()DBdK|@$8P{An^)N97^}i=e+h!e^^#Xq|7wsA^Q~9-)B7S@9%Gk$Zfb8 zK>p0mYQmBZJwiIDR#}mshyFL%K zA2aSk8_V;3j9ve!^=U21>>k}+@yVZJ%y}+HQO?;i_Ynd^SMfbLjEtQ6`pR+rD3(!s zI1w*6S(ro}E^u@Wm|t3A<6VIqh$2FWG{EXG+l9T~hL$K^E8xG&KHX>-GR*%2iUGiW zzt~^Cgu|%t-~vPCG{J$TN(GnS<*$^)W0;oeXf7*K#_sh+$y~qr2^zAdsZBX?`_W@2 ze&s70%r-Yn1E|BFL^N@+1iQ*PzNy^^v2`f2s3@yzea@c=Y={SMvcEOy8fUBdJDo@V z36>UFP345e@t$h*0zK{D3daIxcqBy@+X|T^IN1!_?N?+S4bwd%%Ak!Cjbg8PYyVsacD@hp0*;jKj9M}g8sPh&v?FL=drDxV)2e@oT~7Lc1${gJ!Q z*9e4Z-+%n#`!m&&!KhLu?NX>x2WF)$fr}>evbCr;<&@**zGTn4*nKL=Cpg9E1BWg$ zo*xm;>e`NgU#Odo2Ux{HLT*LZugf$n46owab{9nnJV+og+T{UtGr}bJLClu|dLF2G z?;tBt=FaR|i^l9V{G3S4lbkJH%>zM;Q^4}o#&-|e&JpbdT+XV#)oXE@0137-y84a- zEfI$k`9Uz3JPTJpVol-3*2)Owk@kM}2PqAP3NM(J@8b=bv<$(>RE}K8xC9g&Z+1m8StgpUH6?m*oLVz=rx*ke^lWu)f5jV$pJo!JS6;r>OAVor^6PUv z&P69gOOx2$SujojAvNu7nf^7D9Ponk#DMHYfqzIV>4^UIpV?tO<%!*wTh2Gb<53P& z-ZCrvtP;XeaZe(1-hjS44Rkb_o!?j@HUL9uxJ!ZP$^2H);DxyM2}+!UWh`~N{=`AK z#h;JUH(QowF~&FVusogIs5QN0?DTR8oHC!e_ArLKCxm#+>-L&O-10x-7YMLV=NaA4 zMhx_D*HzNB&MOHKRhY9LUnG3jPqn8O4bEC@`t#OG)xh59sdjG=7gYImW7j!@Hcv0z z^3$UEBySTiqLFoZm;zyZ?k65Y<9{s>cC2}&vU`#-?0Xp1+%9u!MpQ71C!Tplds4US zTW~0`4q}rgN%Bpt+z)okJQm4k@9d;rB4tTM_VDALO@)(mf4#R&^&zW^w^z9 zgHhca$E?6_RC6g8x)&Q`fX9PJkIm{?ar=iN?GB$spesSQf;Z{-EWazVGhJKvjMR*^ zGz`%HLPz&J>bvmTBY<6wT}a(H<hom zKUAsrj78|$ss2>RiqAiQxq0L84E51hzIx~WG;T@8uO17IWxcyOk>t>wX=$%Wr~JcT zouRaF_f_3wa*M6!9s$zk9@UZQtkHWldJ9YgG74rwo(<7U14;O;q2~>e z1+S7Foi>Py4*#R4?1Muivdx@CfC3h~`1LWH7Uro|2Gfi^{V}5#Z1Cm)V|i!_jfe~_ z=k`=|o5rM4bl-jkhdkAj4q*WWCfDb6vXJRpANTzWRWWK(rxv_A7SfSt7Hh85)55Vb z?*CB*SlD{70=SW%&=Sx0?M}#7b;B9#E}+v^-0u;=F;Yj_rQZ%PClF*(?pBB6CD)1` zUD5#iSv)&f1itYoGus+$dSFUZ8xJ-4)V#HqMG zSsOCT{gO%o-#hw@LG1;;_E}@&ZDMAmSz^J}sUFH9@jSif$ zE%oV50PbYi5KViB2k6twhbn;yDXh$F+Ka4a9vr`8guDCJ<&XNq*Ns*z_1&_6t3>&{ zJA~Pb*Lo~6=u&&xa$~qC0&tQF2TTK631Q7qA@Iuqses98QS^HL?1U57M)V8viac3Y zjy#e$#CV_zac=BFq@Ei{!q4_A>#c!FuJ1u^L|jKb{O<6c8IzR_Sq;ay~v_?%BSb{KxA2U5ysJE=jqFPznK$0 zZynC)XUIPf>v@~$HnEiCL$@t&q$|nCsG9wVFfEv+X%#;&qycFd(2OM~BAM8j3oN*S zms1V>et}T=u}4$c9xPL${p!yEdgbU9Hr7M-9U-Bu6@}+oHp3tpk;X5w-d<0{ zL&R^UWCI%80y0zYZuk;7CymEGNzAuo$hsciuNH9k(7#=@;6(LZu!w3M632S5bRsRu zVM3kR_dt46a-jafb?Osi^nrnq;_g$XddYkJ4ebBPGHalJu?i?73EGwLEbXjC0E^^< zx<%U34cVgp4ZxpFSDrdNqw3}uai!d9F6{upU3pm<*uhULR0+}CzS%Tu3s#KxD z)*m2+*Bx{8cBn0XFwWMzv$6gc|BcOBdfub;?l0F}q?p54@O$=5)2C+W!&7aRJ=Nibdi&l9*@B?bA~BHhwRL zWgd=5BHMaY%{NR}CU`1j*eC|-zScX4o?p~5gYoKwsvNI+b4^Tld6%CgRWxn_#6~R$^ zo!cqBWNIrqUMq!6^<8>r+gk}runCLr_iVmayo|r%XF}WZX5RCB!Q(g%{6_GHk|x3b zU08qUbv$c%bprLU#wHJQOU;}f|6)$dh^hMdM)&u|)cdkIm+4(2{_&d7z(Gw@%c$Vp zh%c=7%<*u40^s7Es#J~KiouoDB3GfLaYVWLfwwg=5DDzCZOQX<6}yO0j_uQwz4Uns zMKlV4!S?@%cs7K19Z$1TwnAnheb$@4Dv?=jYZ<`i_H(X>gzJ5X9~*_@+cKvH>#!HH}6AGQN9OL3)D+__`OHkIq{~E!j2SRH=5|8ExSCX zrL*HO@@Uu+e{`2y)Io0Zy;$XheQrVPFuS-kAm)ZqdW5YlFjKwZClOJ>KUCxnIq4$Ko2;#PC@-`z#6IF~aJci~H?_a1+>}<@ zvp9#l$=uSnWuUHRin}>AJkm8eyQn}^xiopVjP7w{|JsR`@PE=vvJ znsJlUJ^OcFVDNOVdu(ven|2bAkW0nC-Z2>@#dUA7V5ZKo6599`9FhNmtWRPl(K%{$ zr#3VE+w$Ksb8YJTnqfy7&Jox~tymBA$y~kgWQui{|L7EJz@pEvZs>D;nKzoW<6Q?o zG`wi8YE2_=b=^xMF&x~cx=D4H?$VO=o2KlrYbo=w2NmqbUVhi3{r)7etmkq@gR}aJ$l^N~s5w*awHGr`$%?{nmB`MYsz8D>JUupt>sC+zV3J`aq{WzK` z!`|a)&2K-p7Y^=uL3xpcy9Ll416YO;_i60iNbEYitiBP+WaiS6^%2@! z%&?Y^;Tr7!#-)h;IIM~8y!K$d-ZY;m&;3cOQn=oy_2?rtngi_KC>%4$A%u6}oTvzc zBLW+UV<##1<4G!GfOn&z{C}(i0cR_U+wE`R=Uo>|woNA%-NCASh_J5J=%L4~JH}{H zV!rO9e5wCv zFBY1Iv0(FR@6!7ZvxrQ?`=#sAqdsQbN}()|V+xmuq`ZLR061_~qZ}N}R8nMW7{!3q zIx8OIaJM?#myn4guz52o$5#f3YKPPSFE6kxn+3aL8L8b?2tnFk!smNA29hsUYY%wo z_nSG*SB)Ax!OZDG4+{gz0uQ4OiF|HnkMKv)tC>j~Z0D)Bk>T`>N&8_Eha9k>Y`n0z z|8_e&^58lkntrCWjw8(Jc!R@p;qEOceyeW5CI`qm;Ddz+CLp)oVOkTffro^>#b@79 znbNL;{tPmlQ{SQuQV3k@si@pfbg5{a;XuiQD{rI4uc_N0k|##WcW+O^cihr>R@7AT z=06lTW%g-4UB(`PW$&6w!%h+`@&o+}&ikcQRDgsNzIN2>!~UTSzXX?e(!};wZz;vh zn-tU126yB`*&B8Ue>W^?aSS^m`8WpaqE1O?E?;!3`})YUSKOLONzNAM@=!L7jTb@E zG?zIR-n&1wuvWetwbj&C(vVKjmE^d+)StC_W!f%W7Q>lA@5_X^!J#c*_+k2^fAeV4-y?{&BQhdA%GGwInY2<~C6e%G*^N-Q^7Y1w z=1%B_y??LrVXNQm{{dA%s=uCxIOC>G`r{qaAOC+s{)hNQ&WBG1yX4;F9<#$qSsE%=hPD%2hi(<_s4~JCSWOuJ(9UQY8d1k@8SSt zVM%|_HwL{F(r*NrU#v?9eP{GJ?6FTOc(1h=_6x=e`OZ{?o~+0Y$$5@4Cbn0PUjcZM z_%mPivTweKk1asl^6n-0h3g|Z-EZ6f!Abn$p6GJ~ZynSAyMAKTZ{C1E`2W}YM`nI6;=DJRfzmp^O^a)=BiD6AW$mt|19T$4s z_*^RJo-{+9WDvu4;5>l6uQOd3oTxkWBiy(VI_6-h6-D zPIu8AzIN}Y{N`LCSl~C0;+F>Xm+gP!R^Ir6-#h>3P#2LGtp zZMC*RPFyt5^}_*9ik{v$Q&QCR?EYg1|-K_}@>T92;fMw!rM}A2V-5t34iUfXy06v)pSle5~(U?zO3M}Bzab{%^ z>Jr!-$kT#{WfRJxZ}5M~qz2a_B(lLBSUFtDWzw;P7BG0yon~pceZP;yX#{ToJ{Yr);BCYmPNtr3Z5{qBU=m~eL#L56sU@NojO_|0=c^uymg7Y+x-H!Od^2H(#Td?D}!pYQLr z$bc8^=fQ)&{7MFp6Mg$5fGfb)Tu{gF?P^^I6bdZUPIB_3NaR+7fP1$09jfBbDZE818j&j=@f<+)D!K zKI~OnK+A3N{~v$-{8V_ctD0Xm`j|z_BY4l%JJH2?^T2Xne8FGbDDO25Tla&TfBh$} z;k1{lqdpD49NJ~Ti7Y#l_skVQF_u!Lv`_9?_?Dt=qXW+I( zZ~AW@`+svC*1}5e8p7Xzy$BvOwlcV8C@JuB*rpr)%v;FGZVEfPwf@Dtt>JINhSTs7 z`*}kQgn5Gonvyq1IfWM5@S*~{K-L)16Pb7|IfHSc_q@>p!N+4=$Lty=@DE)4raF)^wI&q~68En;* z(OfU23hF4(9mvRm6k8hK^BDBp893XNZ37fmA1DUAhKwQVg(|qy6~ecMJkaZKM2|;W zEll|D$OGM=jsNZ=B-^V&NuwuNC;yS$95^_kCxZX>kdFEM9v*&*IEYj6FZ%g)$k#T? zfaHHK7lfyCSPQ`7q4BX10cMdny@2jk2=h+@*p0!x3gw!nOGy2wlvAAfh}O<&mGCub z@q2=IyD%>-y%2Ve9XiZP7{|*Y$9tm9xVej-o>Sq?UF#+BW^iePt(4GUh+H?)7)gIP zdc>)Hv`6$~ILXmHSMsr5H(b4ur_dAQI!=GFt6hMirjj#zx?L=lDZsxSZ>x5Yd7j)6 z?Wq))Idw8ASl*>HV^TbG|AKy?GR(2YZuEJ@A37aks*QJIb@c9U@@t{jlPkm5iT+T`x^X+YsHw*}IYC0dTjh*;7c1&tasz&q$3(TS5!UdmNL3ev!uTA6tSH~qvS|oob zOy+|d%X3*-H4L*riIp7?1Nis1ThCmxFyA~b(>wYWc= z!MJ~^AopLg+XlrX#f0v__iOSz)S$p%vi*2q8{_k+|4~#*-Y2x2tOV{+vR{rWi~WM} zAfLw(j`tHsK9TEBMAsW+BO&45#Oq#95f6du9n^KXDfv9+2O87h-rv6G^XvZ5hm7mD z>=^i+6wj}MgX=&&$n%DP53qj%o~Oa*7X@3T7+=}p{3Y)1>+|5mzA`s;r%O+32} z@a3fR!}58mGvxP277)@Kv0F-t^+Nd8{``+@6`OHb`*r{MCFdkXh;ky{=7Fc^1Pbhq zYqGxvIr+>2T!Z*`L-bbOow#q4+3!2Br}sC3fBSv>ZVvnyO$a*xk^hd=z@R1scd&n;sPU89Q1csAQ-hkC zt*Fy-pimQq+Ea@w^)e{&IeAiRWl=3h0hKJ7@$dT2BYhL^Y&oK4MVNi4@{UWNcnsb#!@-;7}e zV(A!X$Yr6T9N~ZePG^aApAWQBCkJ7f*OAlsWNhHJLETF2#Cy$}wJLir)RaO{#t5Xs zO%%y45T@pyW<%TURp-v|u zW_^q6dE5hEXVCM28n%-|jTH%&Bs@Xm2rS9?9PULlVDEqRIZVMYJ*u7wBrIs8y02@C zjVe1(G((1qbxzSutynO{x{(FKl|+ij-|WI9DDN@(tUS5I@O04el4d|#CpbSF8{dO5 znM4@R+7T^>4v%gm6&ObcpGVj5`}=Ue0O{g$U=b&uYslxwI9C^VPppmrl$^(enTdR# z9&bB%n^-E7$bB?y9s&Gl9Z_BPBFrb|4xh#MvaKM~k47o`&+omF44@Q; z>sle6BNr!YNffym=8n^cWrDFrWgsibePg^MGM14yYT(qQ0Ng@6Jrdg@4yBJ#7D+LSY>r*m!YH_63CP zP_=)GqIJVYR_Yq>=j8LQL$5K5x#jg-hHrsX>3e+{SYLnJ3jzvasuw%1KD$nt3VL|5Afj~xrF)h^*+KN8+AYfkp-FMbJh{|y< z{r7y2(FyA}IL1KOk!Q#K+Wr2=JfhzO2FHJr$bHD2nMGce;r*py5IKpcNn}aHYX{GV zzmGv>7lH@1c3h92nWIyJWmB}kQjeJz4~^mb_nt0p`;K>s;z$-pHO3$h@Awpe*xa zm{ANZRQ0fH0DJ7mRU-3>v8Y`XVduO7tV>SjM9Rr{1vkv+3iAs_l^TBs`vjVrI_5$4 z@P6CBzsCFG`~4d42mSjB#v3^@^8OC*EARIwct3r=-*qi=U*!E8Z2!;v<6M7<#d{Fp zcLrXsFpnzPw*bGWuyvyR*_ZIJy`dYc9s9s*!Zm~W9)YtJ_Jyb*p2(QmjdtR&USP-h z8N{Jg=!Z%P{w5f4u#dY35IUNK^@MeW^(Dei;@_I3v8SLCk1RYHn>+9~*zPW7pU+Qx zi^BJ>yJRkfYJRg5_S7Fbgj5GqX2*xMJvuzOU8lv!iKfwEF5XAc$#+UQ@ z_s@7=e!ow~ciX?OV|YiQ%T;kpGP|}jvdNyz@iTw+G6-g5F6rlL zgiL~;Zu%gZ6+^0k4bwnzw&@|x?~x^Lp|!Pb-U zO%oo^*5O|=F5rLjz#jPHeuynvqF{k=sFC0&5oCMKLT7=We&*9;$huj=2mZbWJ{_^$ z$Z%r(_x-(Le_>6*Zep$0-1dkwMbO|cYr%W2aF4K+6RcJEnt0CN@8SC-yvCXJb#08X z2}+M#6UpFfM<6DnOT7E6t>|Cxa|3e>+oQua^mcaH{ds@i4(#w}?$P;7vW6Jre;C7` zH3(99V&e;3mxprA?F5kSY=W|kRiRCc8<^r;2lkKS{(hah2~cqS%XK7%_?O>D&Iy;C z#{upc?Oj23ci=CkOR0F&K+;LXzKUjD3)Ss!*K&8_TmI!*2iMlOJje6A_X`=D|HtPc zYYpdGLu`M%`XFdJm`?`)?hl%O0!*h{5 zQP5upKMmI*T~DrC<+zT}a`)FUHO7c_i4P~kr@(lKmX@C;+F}{?U&tXi2t_aAE+wgvkIQc0U5!Z!xnPPQT=!4xSq@t%4hxc zbB-t1ro^ZBzpmx^{=2`g#hg0g_qADn&&;oJFY$fm5tc>8xtZXCRiVy-tOu&P5(IrYk0mS3Qt3DVgjqF+B&$7s0nPnIs$H+6YC}f zPX!2Y{*&oE6cADS0a$`0_V6q{hj*<&r%NT=!2oe1rHy>31|A3SNujPPga4=JPwcsc z?@!>pZ|FaMQb$QXYuKw$Ni1x5Zp?<%{>4j|G z_iKzQu%P(zuh)PMpud40VW(5+=&1NPVz~l^ub5lk&!3rU1X+R7Ih1hFxC!tX5|S!$N7Kcb9h~n*DMdjI-o7_cc5%b`n|b3k(5PjsNsH{Sn!q`3R%MD`kg2Ogq> z`3Fum0)&#_Hu3sGp38qg3)o-27aoR4)=P;r5pe&UMP6fE9xP82o=v*He^Azt=MAA} zLe|HY;P3kTA%9=&Q|zQ}kMX0)_oHr>5quuNCQlaRA^rD#jhhma@p+KR#phn6-{8MC zzx})+J`6thwSC_g@Zoqqybj2?9m&7gEf}AEe2=8`(0@M~!FGQQH)fSB?g#UN=f!>F z?^a}-Fprb(@m{8+&*uOAb@31P1>JAt;>kGTbAP=a@8f>J*V&+ig7L(2>g016cg!E^ zFF3kCm%x)pOg&A{vy{CL}|Yc`}M1neYpO2_lN889HYwFEiV001k=`#^9ogV z(wB%w$E|>KPK6LoIz(2`PKgM9=$ z0DIT-B%%>Q-Op1fc1o%3kmO7*pQnf zM-R6lH@hNs7+P6J$Re}W1Sb&`1ECLyC(Ve|J8pjtxnz^nEpZ*;8wGaLs!@CwAf8#U zM7Nxf-4M`G!!(a`O?bu1oI2z^&lxUz^&IfWK1*7sz zt_^=z+-n`R&rNDMYq<~t#kzdH^X2%(`@5hF=)U_dI8t8~n*Z&8`=9@ZLv=f?@Bh>P zc)vaN_wKU)AOGk7>wiB~=llP9Sa0|L=jW$WUUvVXul@b%|1j72`M>qQuJ2vB|Bo~I zeEI%Ov(MA){vQ*X{%`ed`yY>6`9JN}_w|2&kTt@uQqzwfA-BfB7E==C7=D)F_1jC^UunoE4|x|yK@{nVX2o^5{; z-!d)eDuIi>^!WANvAmm|UiMlZXr*H4Zgw=!^L$=Z%&}t9_61%tdgyx%ozwpMKAvy# z!BfAkZSFOgK4g8rJ}Z7*4pvNno`_K6f>6}8rUSa`Rjbb;o3)4If&m5n#s@pyh^a<2 zRlVe7a9y*EV^FnI=XZ8Mu^zJxb)uf~iQ%=eS1w5i2# zx4oSwo(s|k2L;d6@h+l}BQstUwjkZqB)m%MHPA0XYwg0}ISQ#=g8xO7xgI{&-R_B<3t$JDN1^zm*J4`Ew%0b8Om>YH3!8PkGp=lDY}GNX z^Bci^+FsEX3xR+`KVh;lAedBG6F%|Mbw_|$X+nj?N(eBk@VSa zjw$ixAwxhtucr`Y{uQ--SWSNl*|F8>9zos#gxCxEwYe_tep9Z_iF@4DD_>EWlg6H! zDOA%lc5CvCH(n37uT8U>#Z0sqP~~j|yqh*kER$}AIrCf%X$apR!_=ZzOlwh^KB)4` zJuWBLI9naFxwaV#gJxCu!FYfa#_YVKcBB1hcZ=o+r#xP;Bv}1u=-_{~oU&Q^ye~bs zu3vFA*e(u3i3(=dn|nIcxhv1--em0&PGa?{t+$iYa2_w$ipF;~v#7dme3@-f0EAuz zvpv|fp(Vets&ib+0uXdM)knq8rDBFM7E{yZyN`cCpCL@-&O2Eq2ARFpkk~Gc5YdY1p$I>%E2&9Ln#kD*wG)WGZ z&&N4jWoxZnkDr6Xg|c75d3<52sMD^CP9L54>!ah#5riK?J+^Kry7ClfTwJG{N3vWU{ZaO6vf*NS2zNuN?7YjJu2Qv4>gj5^ zm>rFhuPN!cn0JZ37d7-+9P5%A#qHT2oWl8Fad_;c#2v9kQ7xJEObQlzCz@=;q7@ne zx}FP;8wluKm79OuVrF-Fi>BFSHkj}S`youH;rV!IN0cZPmqis<)U^oBDf?=gratTe z#Wm&|0;L8rPp(v5au#JZs$YZct<;pMyF4bR(dZ;U)5&x)ZuSp`E0+6hwb{3umH6z& z7Il&445jnldYvby^(Z)Xgv;wIX73u^1GcK3gkA1U^3VI`0GZm-za!Df(p zsa!^NS9;~O9t2t(A8UIFZ0Po}t&fV|g4SFX*V{Hr`Ta9)!`Wl2MTdd7vciHQeJO^< z*vcd_Cc3{7`Tcpmkd`xZuz5c0_P%fjgM)K-u7V|xZ_Cjnln>o=u*#?Nt_+R=JY%t{OfT`sk_HYpY5SZnRnKb8x4x{`bSn&CU52BfL7*jsA+ANuz(sooiKMtAK)6%w@ zUoU^-lwX`7_0Lo*Fu%X}bv&R?uZy(2O^W&)JU!!3nv2VL%xPP(d%*&Oghz38{t-C? zP1Pn$00lASsUE|{fv)0{xHcopnnI*@zsfiHg5@l|<=4a3+#5m}T#x33+3yZ3SPptp z8{TT`T&fG5ytoTEy_ju=iuqaPW~IXF)46|>h=4>NZ#&7GY$GI|vX^ZvZ&?x?PrDnzySH9DZ>Cpz!sN6iSTWaB-t8SK@7CSR3v{Gn}+QGbuLwROOw^V53t=w52MXA1qmIL5Jx-t*&Kwi?LF;^hvQC_{MzFxszi<(7! zJ7vnrjO+axS+Y^N(neVx-hwcBslI=FYKP)LooKd_dG>w372KgZ<1Tu{LQUD5pX?FT z`_F29iB8a^09nsvpZSl7Zy`o_Twq8K32(I1YGqVj3r~MjFVh*_9*_%c!q5Zs6MQL`dUf4A+4FgPuj&f&0;gt~ zuF&A%6q)YK28ixjPb}F83PHt%51&@f(o3cU3!AaaxFBOG4KxJv*Jfz7)5ZLLsX!Na zq7`iuKQ8B6W}jR8B(7ev4W7K%J6=`CduY&GNu;G-d4XoD0cTu``vHGp>ZqC7owY!< zd7BNdE3Mjs>f(NaXw5EEh7T$n7sp45s8-($uU`3BiRT;QO(ro88℞!y3@@U}V?9 zvgpjFMJHDFjJ5H=&EQ_;n|8*doOWzRVJ8?$aeMCBdNSTCNHJZdQi_QL?YVmGhJuau~%6+A+ z5$%0$Bmn3(zQ6d9HJYsS<}{T`KVCA~o!eL2W~!L31es7T4GyMJHQ8J)(FReBd9|ET z#aKQo`h0%zw7PKlsou(tw_7vJsw}qiZ9WJ4Du}`_(@PoO$}xY}b+)FK!xdVzv{ByW z{@w0s&(EgtW3)19vsP=gYN%zjTgV%R#8-jmb~NMO&b~d>N5LQ(8-AXpS2dn`px@n$ znY%r!{ku`08-eD_iTOINvc~1}U@xZHOyZ5*?6Pi>Es{UEAswU1!a7R>Lp)o`W8rT6 zf~D+nvz$J&I$D1YxI)<&%l1ID1R}_2QQIWU#-|qe!C^FO_Lr4OC40qqU51y%zPOD> zt-4<|8wGPTkH{s#a*Q=yx=JUm& z8OAeTsQk2IOAwh*^%g(}I=t&&EXuLsxMyBp@o-66LC zQ{`n^cg=rv<-uB@(CdcVi~h2Pxm2z;@jjX^fC}CeXII)MAP`&XQ*(Jaw){cPuDcbe z@|@wmO(k*L2qRu)%jZ%}&qi6E9%H9{MB|mb8IQyjy99bJl5Tc3-!Xg7PR4r%F4_u- z4e4rA&O+%X(}lTM4d&BVRt!**y&W5A)-^OW373D+1%YqXPlmo=hw=a-b>Gm zhWEo+9~uI5Kt(&Jh-~lem}CR~Zz;EFw^= zwoqA;410UG^s8}zSh??k4e@C+Nw}(`khdKOdl=eIv!(rJSj6g_${i(wuOuBFNiZ#= zb=ZH2o*`T-4S)t&^vw1Z=zav_wIfwnN7&(Y}sX6uJtuI59ZKS|1A*`d>; zDy;FY8lAL-`SR<*rEYDk^P^Z!Z=;EQT_3hv?{xNpp-VaX;O_C$Ia*Pj4L>~GcH!!L zyKJ1r;BMC@v)p+0G+!?!lY?};x|&r^)7^inc2H&RMkhlHrq^;BA5KEIAKAeI-C(2J z(t0M(A$2vTcJk8hOvP{M_2ZbV&wRF9KCZiSsN&Rx-0ZcpM$>{e46BwWim@4V=H(XA zg#$jCZjo6`vKdV--Q7I4c|q|?ORb}k99*xfIlZ5QeK)197m9BV;pAZWYr#v-?L~j~ zwu9lyTkIh|vKibemp7EVdrh=UFkN;>>7KuIq;k^X>)>gdNBml>Q1VZzBDlK)Yr6A8 zu_}r@-t)2H)AV&g52mUzbDE^(cPBl*J=(AhccCSsvbL{{p}ez`@WzUxOuS|0lz$W= z*n;JUxsxRg!Ck;KVlj;GEd!_13&(%e$%?X*G>lt)4n@vYwey^vip8NmGHpeBO1)BY zcOmWbBR93swPbbR{wq&NciZh8R4bkswkgvdX=TIB2D98kD;CseOnp0%p4Sd!OcRL$ zd*He~2fD=MC4e^*UC0F+2-G?pPbPK64b~u$Vb_mguGlJL=+<_%*g=zKmu!DM>y(`8 z=V=h~n`XJKlHjxo=vZ0MYY0_?K_3Yov03kS>^<{F;bQa>*ZKIUN=L!elO5XhXnR8s zk4IU^yS*d7n={uW6I0(7ZraU`}bl{8-uMd zEC-QhKQHuUBvP7bE9MS)1U;|7MFXk+c6~GX5#2GT*C||2YY7U!yt11D%zkMLZj>Cf zNni@Q?mq62X*+Esem6Y1()qMj$20X*dz4f!A2-R*FK3f3?J4@t%HYOt)aG)IGVv{Yn%RGTk};P}EFQx#)Ri9T ziS4ZUoG+Kt-O80VD15}l$sCIpHr~x2+}IU6SOq#gfo(_=quaFbq?5l5z@p;)5%jZ?t&8neF zkKDa>r~KM+eTjb&UjTE>ggDmX>#Yh}=aSV{ww7Pk4CM2xW2--pyEUiZH}Yu(CQ4r% zjpSV`Crw2Iim%xh(GK-;m>0wP>OL;jVLj)X~bsUc_fW4;-X?M5`>3oa4<_ z&iEK^ZK3bYb*JAg?sD#S{v3Uqi(H=LC<9yvilWBeJRg5*P$^zGc7&dRV;QyOL^MEg zbBK-w6J3=9l8R8)UlpJQ5g&v7q;(g@O z$@6ez*%Mh7riwCEpYh&3tu8BL+KlcBvs-l|ZLvw1Q=Pna=SwzTZSyJ%yFxpiC*>V3 z^1v>yduxAqf6edGX?yV&h&U!^3UT7fH)Ve{S(!0~b_&avJ>j%IbVraTMZ;BmmB%Gm z;;s6Sg{4Oy=hw}cE||PKo`lGmCYz!2h~w=|g2q11SZn#ZvD*^9TMqS0iK?RHbe~a@ zA4iY2yDYMqKPd|FF+_(g0_6CH!=5km#jBaA7(ESYwo~_7$d>yHhjqEQ`b|ccy6h)Ot?jqFr%_$O=2VZhz+bV&EXkvdZpuNt7fa2MR+Ow%R2%K% zjm>|kha-KBq6g0Xq)ofbcc;02^Fdu##Z)Z6j1g#-&*&E26kXlMqMezLtv+W^(4rpW zjWf{8a^JZ3#j=_6+U-GQI)B@E!(S8YPHDoORB8UP!utIn@-fhZ)y+5I$*rQ(R;J;@roj8Tiz8 z4~erwVQKk&aNKc~9M$__X550Av0HyW@8PRSviWVD-=~}9b0*WIDO~oCuKbigGC`6Hsk5(7HI4F^M=OnEq~xOj%z^AL z6n473&8+4T8`0=|+N&da2Oh8&PN%2OuPT^7@iq;vV`Tr+Ya^#+=^E1qw=I7afnJs8jpx!uSl?*G_936n*5~oM!J z=v3#LI^F5QYdRDrBYJ=21avwSI|QLwrLO>MwW!08J!Kcu+_dO8KkwryniGzj0pIC) zaY%>N?z*oG7~}O|4UL9tvvi&ZbrM`fd4f$0HIAHmwub@WyQQ=K*0!e?Dw`DC*@Mf0sw z(AoUtZ<494FhaXGP+f4dT3e&5pD~u!k}YnhWn(Ok*0~I-#xC}iHMxMk$%b@nzQ58% zrVKlYfrPGbzsa_69oA_Zp4(uWU@dX^qf}yW;O~c^{vWOGC)!+xx5$}z<9s-}-N7BZ zfJDjg(7J=HOe}w-APxs6x|}EMPT$0V{p1IXV}=2=n$&Dko-XlYG+#bkV_-a$ZE&Gy zub_I+2h;Jv!>L|n+NVd5dsou2+AhIqqZ;>}eNR(2ydT&3I>0k1`YiUFrR^Ulqpll4 zuE6O$ZKMqO`Mf&px9VvOft7;UTZ8iHTTni1i{++3kTrkXu#a3UcSmQ-gLHGeM8CAt z1>jTiub2CCcx*E)RN=MFgtE3lRDW#|`hhX`h%`>jZo^&7nK5ccuZP_ochvw-FBx1O z&!2Pr!Gl#UEYCHIk>BLY$s=xa2pt6O&>svgtvZ~&u7gHL>^D0xBYi(9&Cw<~-#K`y z;o)AVXaau@SK2YLZuwf9_^+{V4jXB5x-o&U_U3YSat7@(Ih0-e8Xni{-O?J4%GaIe z7FT6+J54<()ZLRO@T?=nfn{g=6XIb}$7rWlC($VR*9iE%9YjpN>3SR4a zl0(vU8fbho&w~TS7{vnWOY`+YydNH{o2>I2S8;#QtYlV)E}!Q1#JQ@wU{fDX6N^f& zBO+(iro|4V>xcWYIm>jrhd+}>=6v6r26+a9n(M4IV)^&Mq zp-g|~i>50;u=L`soH3k+x|_t^#V?`ud@q$NR2^anUF;T-=noW{UBHk@`*ACK-d(Z| zOSw*3Eel5*6kyNpUAeiN{ro_u_X}h@7Gq0BW69iLZqLR1yyO`^*Hkov4vTcOxSE;X z&PPo-3Bysl45?~kH_2f0;G*4Jtn^LiM}vR#zISJn&}6FJzL){luhX>G`u;reoYQf? zymskg5$mikX_m|nx5w0TBLRJvdw$ljTh~mXuoxbOtMGa@_VZ#F>Dg0vZ&{h8tEI}@ zuR)Mi%!o%pJ>nI52NncXIF8)I?!~`My^Cj~-Bg2?Cj4&~QRuQ*Fjzn%^ysmtaMpiT zEe2!m`pf$xy&g>am`+oq7#HQ_wP}4dy(i~Vz0fVn_hZ)3yq*|dhZ zrW{3{=jA53RF5ObNTx53V_xerjc$(@BJcr);mC&0?QHtliQi53br_~mu@ap$o@CC`@<=s~~Yd?5#rLf=*Q;cO8d7na&!L7Rt6D3UH;%E}0_U0cJir`sxD-{b5m zkBmXd8{HwiFyNF>U*ci$dRQuziL1SG7kz4cWlDiU3n{*W_C&?PY5x${gX4cQIH;@a z-bKv#0FqH*dZ|N0oqJes7t@lj$Pfm~xv|cZYI#a*rd`iFdN|r_+ISZSgVp4?ANb*( zOLs-Q@i}Wp-)&O%60Tu%y=>HoJyC7$$G63O-=PSs%_-9{y$Vtptw!nAV4$$K$7Y}= zExTfFyY*nYo9;&y1gWx-NaTN4ToCICt5&NPi|aL))dS5wuangp#2VY>RRt$}O0~`U z$eF?GGCiM*C})zi7)?@be=2UXWjWV{Q{^VxeRW$-tZEz`c~ui)wJk=uyQ(h?vyWE$ z(d@C@E+3C*aU6RuPW z?oQnaq&crE2nbNlKA>!^SPoz})ATZZ=%J$6z>zqixN0tAbQ@oGJ2&#R>qMq`cb`uO z)&Uv|lhcgp%yRZTxCVc|H+raXBcelys-1_s0L|w)E4HsG_u99OGgjyHE+CSpNDQ*dfS)I z*eHY#Cr>?>M*EnWbexo)FR3}!t&Hkaf6YkAJazN6T8szB&2)+k@2O~pt2;PiZ#g)`Hf3k7e0nODrwVi%lC^*!(7+M>5x-(hCfZBoh3Srqi^d%<%Q*Iy zv-2FK+mkwh)F*!h;kdInl@9h$+wZl4o(`Z(d+pFwH(9XI{;=;)bjUdo!`hcFS$a9& z$=c2>^WB{GWgSkhGo8=f1Y}_2IECaJSO!~NUp$jyNgdrGqnqdSo-43RWcc&uY$MDr zH_SGbDE7XyE-HVsSIv&TL~2)B*5-6%Htu#r_zTnJ%R7H>j!UG`w6XK5*L5VH?~mD| zYo`aT-W_HK%}v71oyy?|Pc{!fdKCJ&mbtMc-fZrn!FI?kM(cSwIHM(SO4~<>=&E(* zp@b7v=%V0|(IN^=AR)Xg%FC>3Tj|NJlRKJpCH`~v!gkr=zde5Ou$X<3EO zii$&VevyB(1r>~*&f$RQ$&}|Sre%fy6T=HI2eLBrJmx=Bd!GCcL%tE-V?$C%VUu||+AJ)686dA{( zZa%K}-Pl;yZsMBkbLecdU7pqBZRl?c0a9Jh-5kUIVhXqUxnMv}uwraTbbSuF4sX+ne5JSl7mHS1Y_yPD(kGye_jnh7_#=K$WJJFUkQAj{c8X$GGq;J** zteY#TTnubtWp>?Ci?rD@a%2D+7SFvoE-RKgipETQh+yv-_WmoN$NmXr$rZvMO3r z!}Bh?=~W2i_gY4f7s}D`5b_Aj3TMkSOZisDTScLhN(OJ%%e_SXSTGgs?2EGU9(xcx4c8tW1>Vx@M5t8hcHez}Vk6&Uo|WEay;kB>)e?15&FKMT94>=tJqu2w!8ULi zaQXoJ4Pm&Gvr?r&Cl?L00i)8K>|=|50D+}WLW;hW`o^D(?X`1RCro?EsqlYcdd%ns zZRtfbC~|XbEY{_CdCR-a;AIx>-CSx_{R;1#WnWLL=~{eFwS#X2pocRp5Zgu4WImzT ztUEVHeb&W;YEz#c!;(Fx-}WS?Fb-@LMRB3Sf#hB9kN-^P}|z4++@ zrqM-#8KbeMlCjM$PVqKvi_0EikiSI1=yH-zFYtOOVpyt%RO@&2bk(QDxf?&L7=;nDdyU$}ZmGRKsb4BHa%BOOwS~0riWKi~9*1*OTPW@phn^A#H~OU00~TPnyuLVt4L6+K?Mrq(En5Gz6Og1Bma*~x z9wRg?buZq>X7yBp?ov;=Gd)7}bhuR(P@T44v*~Isj^uyher}y{w?29IaUz_lN#<|N znKxP;8H7fU+wgIP;Bt1EjyK9eo%!8Dd4P0Q9L#f6IW6Xq9;x$TF^gKO5HfDYhnps@ zd9OtPO+d20%2kzjs@T!o&DmP(RZ%`F2e|MZH<=CUAkUj(oF{w5S*i<{ zy(cL(9qzi>WV#vS*3n zQrusg9fLYED|fR2v4QT z`Rit#^)T&@jB`d)`Orzsgr`J`pFL5HCw&gCK)f|1bjI+C{ZcNl1hU>Y3 zUV1O~4&rFW%0MybCSSac!FBdk?YOVnrP{3Fu&dQY1uyJi#+q{ zU{}GJ@`k&A$xS*p{P?(r=J!5eQlDxUH$S`1aToWtlje4$%nfH02M;snsUTPcn?wH4 zI?*nK(_mo_0mz$67Q+*nZ55;WwFbyhJx#Kz7)$75&us3-Fyp}>KH6+)q z8d7%Z;jv^+A|Tr5LmLm&`Z3*v%LN8;H-XUFLh@0U|+O!@Gk%Rf}HcDqF zg^S%%-tBk*qU&+F68z|L5Sxv_WS)g0t{d6Mo#(k1LEI$fBPyEP={)qiirFBa22MXQ z$gO&R+EsRx1H?1ShQa13StaNzuzs_vvqn8l4B)(Q{6d$@hIe0;iCZ80zVEncfZ~9})9qNvY-2S`W+dYdDlqBhQZyc{e=XO(>?yyJ}T` zR^Gs-ul_@&oziMm=AKd`;i2T51?`B%9<@c9l087ps@G;YVh5A+{z}~sTVpf#r_)`a z-%*>Cgy(qLV0O*5x*dS*PGb*`3al^Deaznny6YE$;FKnGpEB0a68 zkj&g0#m}YJ^?|W_66z8#V8q}8pA6_y08xr&*Xak*GN*r8+WBNSPS~3@goVL4oI>&i;mEAavojdq37zL*He8()hHGfUFv; z+xc^}zbyAOfL8NH9{Ao)N|lf~jDv8N=15WlLP9g=2Ze&&!;H2(qYg`TyS*HfNi#Xi z)zrVN>%#`ZdsqIkTJTa_UmuJpE8O#Se~c4OP;BpzQq0bzF5nlrPH|RWS0?nHpR1Yg zOj|m?@l)$X%M`N;xKfyZ2xtSk#kce%!S%FugGk=U4w`wg(^VMf(_3QCla7y8(~YK8 z`VB19WedRtLer6Y)TPc!R5tI_fIl?zIbbt})3)=$6~XHa0(tN@gJQEP)B&}Z{MmF+ zEQgQgrRj>{XVhl69sG@2j;<r{aa)D@%P`zM(AtRl_nzLc_oZhcYzJoJiojrHZ-gyw2I@NY*7xd|Td)gLMPAHX` zTtLJX+RG>c3@Cqg;_4o3_Y4jV|8+$K{TfJHK1l_p1>R-N>>NtvalY)@VQicjw@wbo zyO-CM)E=af7hZHe8?e*a@VQ@b=dO%{ry1|B&S{lyCmU&hc}4%?OddINNTkCy*-yAQ!E;(0 zm&0~r-yR+vW^BiuoB8^(IdYLaO3UcZlt%0nujkG)&w~|ApLASR*66gp0+$QSM)8u) zi|K_v`zv>UPCJL?XuxSGp`30?I4}g{j!I{hk0@7BHuuSPXCIZR_X1{bIyQk1$G$)A z+>o9I%6LqV0`$imuIuH#j;ej8-B00Vem3N>F)!E0@Sx8IG;np!bGkQV?aq~S^qL$q z{_?D?F);3lfx7iXtft`+N}#X8nQpq(Xum&6t7biaPNmJoKAyNmr5|qX`sq{qbW=Az z0<-mWe+t&Ifl~c->s;g7+osz<)Z^s%IyqYMgck34Rq5Hv5SIpD%=zur2Nv~yxjIZW z5!HuxJ-J@QZNh~Rx6Jr>eIN}Ia}IPbYESpPa)=&3N@t?p_ktxi$H z&rElJW@o-}+Lgg7uuAXHIR0?wuAlK(-&47tKg>-o1sh1c&ThvUo3E(B&Ggq#ba3tt zw5|xu{&}#h90&kii8F7yeeUzsH3v-D3@@QuZ;`4GPn&YyMrAF|&USl9SHhswqNChR z{mCUf(h#0hns!tg%hAC{-UP+y5b4@(genbx6!JAYqi3f)qY9u_(czfS%iBu>Va!Z* zo_9e8;0f*X*TZ;`FHu}No!!SOkT0Y9X3Sqq6y24xU0m)g^Ypo~^Qhf@-2!2@Rwwk} zdE4vdG*kD^75sm7vphMK|J=gE0(-E|meeddZ-(GPT-QSy*x$fiW&0W}To;h9Yh$T@ z)(VZShR$;~xPV<_{eRPSZcU1!YZm@16LU2aOayYqnb;Q!0-~H0Io*MP$XNsge*LWM zxt@w0wY$2jv$HZ4_}+J|=Ls!Apd_^iGhG~oor>l6o7?06Y)W)HOWb^Y2q^2!35a})gKB;88+!tKMU67aN!DRirU>+xIA!qsGd9-@() z${4lQ;Oy0lDaH022t*Oiv_Uqr@^txhJ8E&N)O?lw2c!TW?M{X}Ey!^t?- zexi5Hk|>ao{5~m^Pqyxox5t`)p31dP5tlls9I^Yos_QdRme}Sl@WQ-z?E26F_i6Ka z$4N{)BJ6|&vfSLs4X?c6VsQiH5#8fAo)S9AZH4Tc9g^2dN(PThrBh#dYxCEN$UOAGqqnaln0C(JTYbTAyAKU3`Xfr_Js@$(SbF8#gBc^KOGkB zJn0bRX-Qa@D8@zdM+LZaDaY@j;Z5)zm=&)2Zn8|$@}B!{c6%_F8=#+1dY0U zd?L8I8N>cD{!<8}davIWXn0B4_3oK$c{M4BI2KXK=e@UsbKO;fMpw;p0rOb%Xha$X z%(*84>8w3&9~uyeY>dpP??({SXT{v@yEc#bUZfy=<0Wiij<>&jU! z9u<-^<7dwBAn5g}SYakZMnU-We8`@!{=+Z}`+acw(|x@xk}O!HCmT0{51I-?>N9xK zKo!mICGF1Sls*XbFp#pWnOh8t4j|RjKDDTgk)(5FFdydT_4RvydM?269cfI7Bqae} ziY|%U;Zm`W@B2(l?PeK-{fHhTCTfZzI_ze9Uk9_J3Mv$9dDP)W@%Z@`{)C}djh3Qz zyxq#F__VPrZc6R77}pD(TWUn@O4@%#s8?)~Is8>y0<<0+;j1P5bR&EtV{C%9ukK}3x>U}oqTNGHu(+r z@}Qe}S~3mfCmoLLY3lZ1yuGt7vfMEqOWu<97FMO@(V4pG+ygKu!FJsDQ)R2pN2Q}L z`N2j$L)evng}ao`B>2%TJ{NqU_IAlaGc(I`-(5bh-S)K9U57CwrbATA*eoG?V4i*% zUu}evXsXi=vtWyCpIdO->7U#!Xzhd^;?>1x{`Rf@8(rbMDNf>2oIw`69AhJRp_#GF zI|fH;8@AxB{+6V9RGPV*mE3hQ^V7WIJ;p(&_!o+Q$G9jh_0orI4N|ySXfRB^4yZ79 zjg@URxfrc}*oQOstUAVcF@d>kqe|Zq_U=XcdO@{*quC^hpC3Eb3S@p1l_jJT&Y*pi z;yb#T~Grf=^tHqlnGcms(Up)rqi=!|4^un$b`g6}S>EbX zD2^&&2=eWich0TqZjUp3wX04(l^YZBEnIgN`dvvu@S4sd>bd)lJ5G_T(>6xf?T^Qs zaI_5V8KlcvcS2bzG!6jVN=hH0=f4Plo5+CgaBBAB+;eEuKYUR0&N7#A;l_6_A{X|f zKMPK7e)uTU*%acak6rnwH57Af%ImH`Dv`bvpM+{IWZB%nhv;;#1C8);LQdutw8Y_j z?_Oo^E)nBVFLzpgqkDebuRrs&J3%!AjE)f8bC`RdiaoDWd)q(#*s^85b7DJx{&H%d ztS3%-+r~%N!uRr=?gRuhYb;s`VnV!IUKKP~un8tonb6W7y$*OSV6!PVXF+=xJ=#=v z?PLxul^xYxy~q8%8%zTaOCgix=l$&TXCW=|;AAiD!i$lC=gb(DIX2iA6z=ppC*u2E zEPg&;dX$?FQip$?Gr+-Ry274+CP!!{%EptCPIgwk7d(DGnHyW~2K6dG0(6)MfCeFz z9@=UI%Vk!KFPnRL@|=$6j{3H{PmHU;5FOuTaPpdB`m(p@7;1Ou<+zs>iMjRCxL2aH z4zj3Qux$~LxBaIq=up}1g!!pe+ z6xkA%jQRQP=zQ%{eY_Ro#ncWZwc+t_{4vhz+2%w}_j~@$LAuC@&!_fbH0Nz4@CSa088|$GyxzY#z*Qw&} za!iWY>{h{+(rfAwHcP(tmOb^<#}c|Y%;+$DPshxB@4`BA!)hG>G@wEupN z3`#gt{wDFnDysrVU-r%Iv_0{syoUewuKXvD z?#6DHN-2K@-98n>nWCq$@f*fDVs!AdW`Z;yAHxy>Je`l1;hwQ4icwY$8SW3qZSPcl zbIudKrknW2-TyUzI^WO$ZsmQmg{4S`R_$@07lQBLl6U?!PJv!z?a}yhqqiiAmv(rz z!&H;Oh#nkQ`GaN|p7!16xz1<${Mkkth}!vj5wqpeDTW!KU#6p48(W(f&c~Huhxcb) zESX-8C?M#sZkgEB;)us@6`JSVhmSy!vF0D*`;~5PFTXv1Vp`jSc`Vz9oX-B33ZA>7 zhR@9xQ+oCMX>{fTX<#I4 zR$^Lp8{jPC#D;-uRTrAvE$7ot(9k~JJ;jB1xt_Ft2tRBux|_dd;#VZjKO-dyJISN^ zFwA(BAJYt&2NEd=$#=s2=XD9)Vo{_6ud3wBtYVV8c}9*k#VtBfRQowsZF_Jc7o!ba zI4hEi_)k2vZVbgnYRCu$URiq;sq zoyY5ckqI1LAQBs>%g45-o}}L;zpI^lC~i}?<4k1V8EtKueS1c0+Go`wd{^2_7rLx? zboeFL{VL8%#T`4vrH*)O_5kY(G0LHik}uMpo4mM2<&t)-qgqo1e;8;p}6tTf`@--bUSkoFc|TBO}&X(#rF zcJVeJ>!wbwjjNL^u~X8kxCde|K=I^xcfZkL6@_}2pcR70GLRX&MJDjkrO~HwYrIo` z`QvyO7-)F%Balv=`sqFTcz?m$|Pvx1sFC~bkl-T#VKbm@QD9q-5Ai*2u zFZZUOYq_nG_MYCU`_GMwzkuYeTd)Ivk)Z50LaHq&mnx|bx>UqT%5&Enj<3j|;`^nF zJp^{2fra(U?N;3u07(lcZ|BXBEBYu7H;I5XH1)zaFw0wnwQM`4{GFT%QJkwf38-_e z4Jp19!(nnneP8A8`a~dGXs*2uiFc%Y1J?%=OCBJWk9z9o@jG@osr^qm-=>JNf&> z`q(!c)KjU!7(RRGK2;j~gLJQdb4?Z)XwXTT{=|TrvDS9dcpHz-KfYX;uunuyQ=TtJ z$%zj91ghWpC@jZ%z*X6AC6e%N~J89^; zn%?d6UwC$0K4#+MM>t?fe-!C+NEp0eTs#TSLkp$<;-hRtyM-O97b2_5*^TmF{rD9J zJVG4HolqXDOn&^LFzVlb;njfGYswXZ0c)rh?eaIrr!g|*^L|g*a#+xG9ys;g!@V1W z*{Nc_Wn-g2?0mA5k}s^@8TK32^8G7RZlA6)f{Vb<+1mJNOBl}?CpP9PnWJv|DE9~o&!f0uF$+#xDb$XSLCU-4=Rd?83Om2UB{Q|^%f1gQBuA}`q|lmvvFvDtsgXw z7$e{i6{qO^vC|lTn?|W?^ORMeL=Kf%Me6|}6Mtp)XjA!t7ENj~vHC-#`29mNoT%4_ zanIBB_%tWsU1qzfzh4s1B!>(4>)YjXb1tb1a~*~xx)h#kh1=~;r_sjncM&E7^Qm3i ziO=wQKh|l3C-Ob@F~~OB$L*kDn`vP#T@4*hdg)gXA5+PHFq^kR+I)8+4>s(p@q2{U zZr?l;bp7zjX5RcJJ&+$Z4$C{Q zi(?1qD<(L9x!isDPTl3)6*;vYZ2;KN4-#L3#VNtrJQ9Q)k=DF5k|CIjlQ~7^iNhn8 zE#S2}t;hdPNCxZ-JTv-3VBs3#LrmwEZ!UXWih?emM2cHa_ED|Z0@BcO@`jN)>`&Ms zvF9U)fDOV)rNGj-<4LoOo50w-Ju3DR2q2{PC6VE@j;`iz$*S`6M`Jv84>%t?u zNbuk<*8*pM;>9cG=VIH`){T4V(Zr6aLwZgY-!Yot-?0R`-j7PwIzk8`ik?GLNL)~W zA`e@CeYz?RMclFu7SW?zGt-a^h}+=#@O9tQ)G6J780?EQ?+kw8Xl}>5&$0yT>En6272G%~OWO2f9H$Q%aacyn2{aAIMki zSiuSkbJA^wc!v~M-Gz`uoi2t$;)5V-sMN~e{!!`N#%Hxrt8D!PU9d#)oK!J4dr;MX zHvuf|r8xBW8p2Jo>~3LNz5MX|E=98!AcR<=tcj5=1joy z217jK;iroGt8ipMo57LRRxS#Ty`@S%@d8Fu?6VCY`*BSWrRV+qb&7HBH^NK3R(RWX z%QhEGE^W?XLGu0MvlUBoQ}%momSi!1P@lhO2?>tw64ENg@eY>IBO1|HUmXsQRMYP9 z71sqhMis)|YHEK_YDaLoH=}tiVUprRw9hPiKz#DkD<=@%oE3KU`$e5;l=wD32F}YO zjR%$|_Rh%T1MVp46l3(`b)WB>*WX{Z!yBv!dcV#41&5}A%uE8 zR;|?5+SZZ#0li6XM4*M&&gq#8KGsdPbY3;|Itec?)4vhOuig;Sh7p{{uE{Z z`g-T@BkYxXR4UhEE~q>wp1pnaUv9hzi|UDMo^--G#O(VM4_@1sj(m2tM8q?xo@@4C zUU=*IiZN1OuAG~DI?bab|KW>O2ID44FPG_(tv#dRZIH51n91lS)<&>@f0+KBK03x* z{(Rk=gZEte7UcWKOQPl58uVVx>jc=Z^C{C1_g+?j794hmtiA354>g7yXv3x|aZ z;r3OgRV@c{P@QF^E^D0(aZi>4-k&ru8Dk#IuGRj$s z@0rM%OSCr4Wu}Bd87cgLW5%?pm=N1%p!-tVd<_4cRl=rJiF+*^=<=ArjZ z`fPi%agx>osf_`Dy>3NSPxh^mGs4!A#vWxwzN*~twaBKRp}=NxQFa5KQeWQNI*0G? zQ?C);2Xnb9SY7iNdv7UD$Q)b~76e-$D%N{=RxQaE-rLV~oz1!A`ID z@xzw7#PO-tXP6jt9JQkL+tepk+Z>TYR7ZbqOs{Ici(E*n)gLJXX6be$9UcY5)+&5;fymY2Ck{7>Ewu^b$Hr;SaqsMeQS+TGk=xoQHj#qLRLR{ zq^`PH&8zs!l0AQ~LSvt^JE~wT+Ko&ab?G}>y3}um=(51xRgF*P&A*FzxW|&nXE4;ER`m(cXJL2YJ&wHL0~} z#5b>h7M@&QfKMh3RrHX;Epw|B2FY!0N18G;SkofFBSwA%ZNPoNzSMwUT{yF#;`CRE zcS2)cTL98s^0J*rgeTw;QqFKZKiC^-Q_kjh!%>o4Q2;pWP34k!+^2Rz!)M)jhyu=U zqKWtKUX362=4%DlJu!ff-dqJg+_}WVPl6=UHDyl_)1pTre)0R4mF>^O(KQ3+_`9e%3tJBA zKJuuvP66IxhK+U%Bcz?|9vEDx`74n@pIt^(;Su?o9p08GWK&v_K-y>T8J(}p90`Q# zL(ui+N9^UD28t|HG$;0XTJ~|;x*dOi#j~wmoDky;HUAia=#e>AZ<-V(=23NfzNOix zl^v526(QI9o4WVsN9XP_xuOx+$LYtSWs=hFLj91sH~N^<EzCu$A$+-l7rz8;5 z>36BR_aV`PY^avyCg{MGHNR=_H1)Fz->Yb=SsiBU8)c*dRKAHiWa@+QH~#lLq7G1A z!Sa&V!0ol&X$^>pY!~fkv9=1$^;F>9F$hhv#(^dT3%7mvdAxP`r?~QmT{d9+mFI(W zSDEuZ*L(fzc;Al^nb`o0?ROG?^K#YX9E2*@y>BhETLVdnb3_w^7*T={IAe#L2%dcs zWO5Hs`U!~}u4)ue30Jh{@uj5Rj|#iC`x|ZG_Jfs#Cc4oF$@3jqL3x)|X8wm<14uFd zQ7+xd#03*3jH*T~Q+>0=^Vv+?ET>DW(iE&D;P;R9^1RI9ta+s^y0-9tevbR^ul$=I zmYg+=oNy&}4!#+=SUG0E@mRSxg;QtMk-)lar4C)^8Nsy9@@EAjE}<>G*|8s%L)5YR z=^lcoXTNPJPG-3g2_>R|4L6|l4eVlUXnGyXQIZ?aw?y8+V!tTXKELe%9MTRhS-5A( zzUN0f*x~R_kpKm^@0*-|Z0ofeC{Wso4=Gr1Zsjel5j*b<4;iYQa^|2=mO-=e@G-E&CCFh!XSq^RB@Pjr;?5 zW_0T`tmEZfmN1LPHh`p_|DF2Zt|>oNAn+9b!zE&u?#Q|YN2lh@3S?tmks0>dsPU*L zs;Y^^epO-{JCf5CUGoj-;FjMv;tmzGfjSPw&y3gVmlnD{2j8E@{pzRBA{A$DvM3qc z>-+PSLQ=F4=XYIyB0vna&UnroK5Cd#3)7rXbKCIrN4#_g^mEnv>!^ zbW#9t=1kTV5Q_@vs5F1Mt(uF+{j)re0jk=ArW2gCiae)2rQ1TACACh?nIF!BH2<^) zA)_WeJg@U5LT)AxHtd&RHyT)|r%r=1&RQDv%FQ{kzCQ5u z&gw@E7TmCuCO`{mOS70_)}bdWpXsC<40NosdX%F zXDN4@ZnzniZ7T9c^nJrLMzZ&xzkyO_BHA8ZffM&6YTA~^cq)v{i=XomMI}2ainVI+ z>N;J2@Bt2r02#M-Km@5MsPa*$RU42cl5dMIZMCCcBv>hamkePUs(!q9u<3Tkw%do) zkH=_8{wzmHB{2nqo_q4eiyYf-EQN91R!48(#r#~!+%wcj3*ehDK9&|R1i5<#9)#-b z7yIUCyD>h7X#{7m;J?K46U;1p`ukqs53Or|TfY4Hh|hcW%_KyXi_4QPx=BgQo!U`F zNGtsFHAu`GZ5_4ePcAsVNG8gruYE}Ck+jB}65Zl1&JWkPaZ9a&`7P6LcvO=cP&hSc zq~`7f6~-^?%&7bv2reLOON~qkW2j|si&(2E>jh`=Qvw-1+&9{ z5+!(sAq_Yr`z*drXi=v^{nd)AcH?8-nUOw*%9*yR>%knp=AHsKQIm;*KjS5h+_|j* zddB4^o<1b5r0-CgSN3hH`OTbY-RhRX?9@|vq1f_(>$l1kd*(UZ82<4KU+GJ7!GmJ% z*3J|j{LMy@e`jN|{n?qzr!J!(Ay5y0B7ba!O-S3$-pXG!S^V?5g|}0>tC{7wm4xJ( zA_dBF=;+?hrA51Kj$E4`h_V5y-;(B}*&AT$`~aZLiZksK=!{FU@JOD=rA;@Hdo)V8 z>&Itas^*m5>5}ZGK1obBc|1SW=kfcXPWLo9^@x7blZBRIIXjd4{&eX?nJ=P$arf$e zI|T7>`hbN2Lt-em<-2~VU`yYE!DU+p_nYzbqKDkz*rkY#vL(nt$0N)BIc&-UZ9eD; z4z5+S%tL5SbkJbV=(y)gB`}zkEsyW5+MPolJ@M17F%lbkwt;sKJ5zPezw{NCPIxH* zsRNh(b&f~(RlG00!Lmv`aHouaqp7}7;t2_DU?TlTAxU#+73-KvCj+7A*XQzHc1|VR zNZ!Zy!9fZJNVvOv+)-Hu3H6l&Kz z`bsbRFWDHqvwe3n269Gu;fsf{$gX|>oVSNT-5K;~Rfkq*(=}7croSBUjW6)|tW$?) z5!59N!P)PX1stoKGWYk7btbCqCv!t)JWke*8(K@_hE2qVFdrVMWw`Kf(4jA2GkM$g z<(+vQ(KAPNxxrvJyIfa)E_^uj4{I3fn{zv0gAlEfN80Eb8RJYwN14ZvH6WUICSaCv z43f<_ql$H~Cc!P*M_jv>UA)?GA2-}@H$E@I{)tPcQg5kll40v%vNg#scg5t(%)T*M zaUO%32lbIG>2D;R=nWkZxY!5>YL_0-`|KP+l7H%fRf3WWmfOUCwfa5n19k5c9`!IK zR7xpBjXf#{h%iKQ?RhQCuifEtaY5spxlY&LfTtA#Ew@@dj_d;#sVKY%-HZ5&04;^S zk&SwaYkVtmBom{dB5&XJd%sX`$v(t3L?PfantL-R4;@4~TofH#{y}~itGWA2o+4sC zt?U@iP-|QVG7BbuE<0ky^m@ooW(&5|Z>#e+(4f>p+%85TfH*m3k@%>c6!Mb@V&~cbSkn&}I}P4m@lZ`aDMFh1$PY&U$_y-}%wo1pVCjrK!UrV0OsjTbtM zCOCXGN^9s*qNE~}Ftp86L!N_o#NL3PPMh}mnxAo7GPhSLb)m*1^7a$%Xf%JTc<)n| z`_-Sv3K2km;_AtQSK;Zhfs#kgoBjU%9zV&B#u8>hPGX?(PLIrvi?1LE-mb5*e0Mmu zXWhO_HF60tvTU1K zT5Z2M;+yS_+he@~Yf1plh+tij7pC=#=Z3+u)azV-qnWm-cNmQ(FFQo^j=eYGMDdmr zMep$%l)<-HiR(qb@b#|wipkz^0H4-zmu%COkl!jgG#gTs{rO!JLhdK@)V@dZ4Cm|z z!Sr*$1M0)3#S3qpOt`Mu|ToNpm+M&hQFB-`X)sbV@A(cR;l;LAo!uxnJ z*T9y4(GXp~`Y6S$(0}&c#%{8$e>dYda<|_o=ar~s=8>l`yjTSt^n&%INoh9x2}jhQK3oOG_GoQVaQNUL{cLe00!| ztJDv|u?Z{QZ5YT{ZmXT>)N)yFuLpTye<=qkVQ#E=TIVV#uJ|i|eK9Jrg?X*N(tD3) z+&y=VKivUOg0IFX2PdfK4%ak8>K>9XzjyUFU473gna*SP=k}|T)uo^)%NdmD#Z%gU zZd#vYuFj1?1#L>zRC-)%|3O*si#xYTI0jSc1Nf4aw@$n2gm3v(f6Km`{ z11fh_#+;M;^!pcUWN;x*Uc3Dy3L~ao z6!IyNew(X5@TQ2cAKR(P>w*?o^b}g%H6%i6S;cp-=%-J__W8Vq+VC`V>Sa}bj`0W_ zsMh1z5B5`(?}{PUq~fu-Wdx>s`9Y98^W(iZb|rNK5AZHglv7CN@B~QB$R3)tQa2mq z{_YR?K8Tm*xTEbRr;UmAC(M~#tVIuFgH6#%g?-C~gp3SaL8%mDr~mA%*?q{=--)#l zoR9E=ZK0TrJHOo!Fq1(1OzNM1a=dFa(CVJ}5Q6D9H|1UTx!7QSsi$1+_)Qa%U#n9t z{AP2;^7+768n3(b2oH1dvtbwTsfZ)KS@_G@?GszC+6IUb)2bkulCuHN&K4VJ@dUz z-0+}Qf*Qt`7-RfnASQMWJg<3Bk9G4zx^EFEyV;2Ua2=Kvj~o198OwC)NUt;>_$`-8 z2`(ttsU7g_LJuE-+N|$?QkCpQw|A&E%v>lKlM!(!3mi3fYRSVJE#Cb-nC058$mw_~ zarfc-C|t++iV-=);EDl~5x_&}!;ENI*dxRIVwvPa`-jskK*$PsG*=r$!h6gGF>66! zc$P;oSIBJoY9gc~=+UiBjHY{V z!|4dNIySqe0#D>c`o%d3|8_5YkHo9oH|-XYOy*10ReEA~suSe=@Q`;XrY!yi2)Z@a zEI7N@cDC78;P>Qyv|=zWUtx4#EA?GH4W21zI~w*cY|z-7cG=ruA7j01T-ELu7P2Vj zW6ZWh^1EkmhTV*R(opO$2io|D)!owv&^EIk-#pqMhALcGpwR#3w`P;Q#?!V*-s^M= z%1OFJ2LNK?bpV5&j~2>zNS-l$jEW?;8vqb?iBv$;TBhjmFK0?itdqrn>Cknz8r93H_=!UA|ks> ztG;Mg{2K;+rF{u96Ti^cDN6RrWjaSZ7J<_qq>|O|Fd**Qex21cNAuu9b-G*c^Q+87 z!YN1{$tU+~ax3YJ@#tuHDLP~Sl0!wt);TsgFe`oyv?M_TKGHj=$j9}k;}6aY2${_1 zK%TCHbWL`D)#_o-2(}gjRwNZm?6y!1jazklc0De1ne3Uk0#JI_Iv$DQ6Itu&X`m)% z$QO0EbwGfe%{R<-o>MdR=n0M-Q9OTK*j^x3SM;{YWf(b{dCvBZrz}8yNUs|SE8H{l zaqsMjzrG2u8-gP*pQ44lD$^oc*1>P^WN*M{;(b+pvgC2XyC{EwKf zhL;GUes0{)4xP+cH-5j}p#+l7v$djpA_ghLM1+x0NUg&kKe$Vr@e<*xU)WYW4oJ6G z)V3Xj*CnnAd1a%mo>GtS< zeF9ff*IUYIZ|s&pJV;E8QUhb*Wz6V|uA;$t zv5tEddfww%S}e|BJb*6pOTBprUE#52LoVr*4;dawog_Xsr<(ey&Z3^(Duar2I^Tcj zQNkzGF4@zjcKe+$m*OAJKL_enj4j~t?t+Zt5cErhQ?a(N@iIHz*U5bDTdgL4G-c?% zs+KR?b+SO0=M;gt_S4law;SS2QzOs5B4I$ps9daSjM#t=E{T`4FzaizY=CWTJd@wk z6!-|Q9a-TrjP3cR?%$haZ(A#|qR2iES%{`7jx0?5U{h+PdInfx{Ql)!X@Ege=HcNN zzYNz1y=EPz0Dz!>*pMHs^z;0G+IRO<3wN_2(oLA0uja$imF%8ms-yZ#D_7uRw5oxBJa$GM@qG(Z0S>ftG{1Xpp}zz zDt*8(?RB=Xbloi0h%q`)W!3aD-XYZ8mm>LGa#jsPX@dEj-W+>wonK$NYsuc2gb+cv znC2%C-qV5G5IBL0w!SUAJ)Gh_qfRk;#2?QV;?t&2(P_1*8Q`6*t>{p7f5bIN0xiH+ zUs~+;GWYa=2g31fe{yYqu5;O^Q289|A%jPA_jawLyqnEovB{me;>7MLwxGK z-VN>BuH4N72*|?@=SHczlY->Ns2qMW8;&Y1!$@}+TrKK((b++qcRdk7#2XvTizAd% zwM}U!ivVUKHkfT(k3!$2I6&=5~B&8`u|2!Ownspxe=Z@6mHVJez4MAIaO; z|DHd>014t}U&gk#VorE`9bXOUzoh(!)S~-v9Y81S291fLII^Mcd3si)xsstwr50%5 z(%&P7Xz;29QQbq4@RS5E&Mj5Gy5jAY;x3kLoAzyyBE~CNJK4>gaSYo(Fx_v{w8KS$ zcc}RSJI%m8L8$`!dBN)>|Y0PyKuMYH{AShMaAKiR$Oy(#+2aiv01vz_6Dn&mo& zFe~nPE-qo8a_$HniOY@Jmu@T=B2^n%JBo4#nc%N`2|rJN=-M`;l)JkVV)NQ7#jkxZ zH5%Dm7#)PoJ?D0DD6t%xzNFkws6C;6y`N&u*|mr2Nd-snU>brLlioGKo=Vw+YbNj2pyWF`zk2*NT|g|7voh&dFDP3>EIwhed&LD0`C+LOG=q9@21VCi7j;ZG4<~gzMTiJ=i8F_ z<+uu8-MjIW?rTTcHpVk#j!@wx+}yy${EGzzmND*slf;Cp!@OA?@3OS!6sBD$1DWpn zjtIkAlNtOo0^kEbPTJ{B?A`E)w?Uf z`x22L!b15MVlpR(ub=*x{N7i7%9T9hlwfk{RK1d zEALq#D>P#LEwZsXK5tqBVD>T-i0?pSmfymE{edeQM8}82X66#~HS!U7?)F|B0O*3s zco6a|ykdl!+z7=JCF+%-=hsZ~oH;E^6Jf`_wApNOeqd z@r(x;C~CTA&Nr;f-k_i;V2WMe;m<3i6-{dxsaXi`@O=sI@QpcJ-p` zJrrMNGLXBc25}jGK_MJOV5_-*Ihfc_F)Aj9fxd@oA1u4m{v@Wo7auu?q*xeWpr;H! zzH%;ilFVhK9_@RJ_*~*l&6=nsCab;9BLdGlTkb1*0@Sbw;vshkE$3Y&a_;A*#lcT=ph1x ztKrdYmX^e}RR^R#P-D&^tAs?j)^>Ln`>d*2f%jZ;o4&;EEi~Hw+}F3Be!Z?7>S}d~ z=KlSG78Jg2nSDP~PiTvAevuNzq|R5_gDN~Jqc$;5i{u*b{-}%xvvL%#d}5pwT`wQQ zh3@LCtDNFoE+*ZI0i1$=FkE9GM}<;yzQ9?tsmR=Nhef8EdjL;Bu)psl9-=>urLG6h ze$&_cxq0%3D@wSUQ`ctt^@gcaF7x;N_~|{r%udu9#nRp{xHeMSgGdT+Pjkj?u0QKZ z-5~S^2nsG{CG8eNzb=R13~+_ zsCZDpT#8@{5 z`Qb+!5KsPlbsupKeqehaxn@X@dLG%GCrsEV=u=%@!{nip`36ujI% z66Kby>@0uMk29QjI4EsFY#ytOtR0Vw+5MW{T4d=N1`GA+zdjwD zEcR11AIO(z{*KH%*vwb0_U9{X969-L*vsTL6{!?7xQcVK!%_R-Q8cE%Hr)2K0z)PO z4wELI9E&jZs)g0H;tt@ z&OStsPrW9xcN7w?Hh^R_#@5D1O^i#B8{XcY5MIH>?U9%u+fby3t4_#vei7mq&eP#F%BZXbm#yV`K5yjfIp9)Np&@&1;&{*v7e zp63P>snw=+xae!0In^Rvp|3yp+Hz8)ZnT$1ZXP4L2(yehgyQr{ci{hjvG3kS*xrDGwRadgHX<$_Em~5_AqU?#l92z@f7xt7K}V8 z{fQT(zY+Z>U)grWN*f^I+J~ThRh!z}?yR}rBQD$8`S%(F7N%NZ1cazJ@rUkU9batp zy!aZWMPxC5zU#m&h^m%ZOeS_X$SwG=d&)vAE!u0W{idR<#qncoDX(zMwxs^DZ zI`vJ+ue8&6MI%HC6(jnwht$R!8`Y_-&CNVIOiod@tA$4R(Bg^(|6U1(wzY_si7yW& zcKH{dMe^qap!Gqr!e8V1d6I(Ur{ipNsP1t{Fl-tEGwZp5MVjF!TosV z+M5M+pN`U%AG|-$Yr|SZNP7She4>MDHf(SML0w9wOvO7(kN|7n34>?~pXX=^7SY8= zS_>07!XWsh&Q=>^2cRenM11wyS18O1MKUQezLMU#kp0bg_|Q8&)dqwtXbh;Ee@vc< zgOXj>jyvr1>-t*`A>A11?I&a7`MZY4V@n5}C|dyEm~jzw#s&NhgPevT{V`i&;)V8E5A$e|sM;?dkWgfBomb<=xQ4_30GgEi>j{PUOp&yS=1Sj83p zkFN-RDEKoSj;~v?zX1C)h5>%5R(~H%QeXI)avFw64J519q};+9QN=t{`&{k2;ag!S5xe`{`vgAf4^V)=fD5Yzwbo!!bN|7`M@WuUhw&2 z%Jg~Wx3=#hU#aks(6Yx58QuKDfsT|^4%7nw?1bN6;-|ym@AElG4oBIpp8KmEiL|?(jkJmH7MoQUwx?@f08%`}g(J&i{M|n!lf} zdi?|b0mP^8^z&X|4EZ*{Yy%t^b+hR&Kv&D2IjvDCTSj&2ISX>uzlGuRRwYRdi?sGI17?9=p})1})L*4KF+VX=%P+woE=!i6X?do&gko1Xp#QLQ$ z_@K~-SDYiv2CE$Zz)&jU)@V1^@plh@SB4P+b0h!U-a2OE)+gwX<`n`<@vW-kFJa!e2g-tpofuA#AtyBz5?FKZ|`6 z=2wVo`TgWT-7dUpvAl(B#O)*RB+h%8Bav3#y9Z$z8qWii%*d@gcWh0$;&+J+NP00t zXSe6-uFrYWfAAEKsJYI%10=E<5f~yk$UiTurDY8J9RLcLb zS-XXYqv0GH(*|@4ou?hzfamVF(Xv`(yVDSR2iYl+Yc754=Ld10#;0K)1$1X$l+AI! z<{hJ&f3-a_Vfxb~RXC-4aoiR%-*D#KN3S{Tv^S-?O%vKiAO6;JL@^nrK9qX2qWgFM zmFp}x5bt*#VwGY%_jf5n#18#`i;76fAq@c~0~rhz@8uI`B@ z4=80|DfNqRB5~^F^3|`~3h7R3`!EZli4hH!LF96+c zD}8;Da0`O`ZhbtCzlHh<2e8a{>t2<~H#CooC|qwQI8<1=XAXpJXf3FcG zf0jA-sWo>K>5^9)+KzDQXO2c|H@$;9)Q<|_IqGgNqPog)+QCOTmaL7w=Bba33&hT3 z6*L&R;dPtSTlZ+PDyYg^aL!mhKB`7w zr7Zk7==FaK=+$QiclaEA*+W5mKeln$f1y(1T?pL%|DH00nn4u4v>*ZAwrS7H#AC~% zE=W&}yfi$Mz1>ssd?XJ*hL&pyDfpzNe;GQ3pBGm(GgyHH$L#6EyE?B1)**!MYW=tq zxx*PS`*n|3t^6k64^Mqs@)iO5V!x!Z7_UE;TXF{Z*|P`G@)efydTSn-nvp{@e=XL_ zMpzFLIwLnf*DZ51bY~tAB3s&(tr_rgwR|fdG4c@E$mD1eB4Es??Zz(Yx+T9${ z_orz3Ph==NY#*QFFZxPcL>2CnRHQKnxF{5F8V9m+Juj9{zGi2d)n~+_&a%c~9gqce zvBa)x?2xP)#x|HQm0PL0Y z5bQ~Ut*CZRN6x-a2Z3QasMRQkBPg<7Z1Gqf1`+ot9ac` zio17wu#Av`3zk6P%9nrWBSSJfvlJViYro&eH9npo8v34BfGa8c3Go{i#oYSY>ZSb z`;y-b{&?jU*NJX7y9`O z8vPphy~1Id0#~%IT-$x4u?eQMwHCr{2tXgm=|+2w7p^#h zS=KMtxbiM)tIKj&^ncPUQ315YE%G@jSPY|Llh%!=&%b*24Ntebx#vQ3*x&bP^$o)x zPmfuqQur#HB)xasna+(Ho}gcXQEv&wdHp_`MpuUMe+^$iZ%3M{kzq^1@YCy&3ntR|ruIc~; zAklje2E98#5PkKXXSnJW=Q9uMMQEu?A{-2I?i?PG(zA2L&E&qv)z6K`;6j0b1-`s! zUg%x>f9R{rEuEKlwtEPXx;tUaSL>)L1U2Fu^44`O;lV30XHw@?n>%^GpCtwn-cTk) zs&I}q?n3g*uKvB%L)*e}1kGUR9J&kM&(&tG_3!uLlhdm7D2sK#D(;16!6wvU)UQM! zjgaVLSiatNJUr%r@G3>iPV@SvPN=i^-i~$le;S^nc_Ga*bVCjDtMS1V5$3%njX2jh#@w}w< zs+Z#XjMxw#SK8EFi61is5unu$V=d57TKg&aCN9^X72hNdSWYDJZ^h|kI9<>fw?B8{ ze@z?SHdIve&B=H6-jjI^MNeT}Z|GQD{Q%n%C%=o{HPh4%6LuJRO1koA$ar6V<4mA! z-srjit*!T=?oVpoQ7fLk+7L6CPu?X@_Jvwr(^t(ABoH91M4kn{bPTXTt_BLbEa zf6?rZY^%K+BJ*R%k5v)P#EL2GBJhOfj+J>XKz`DmPVVk@aqak^3q9%e&xe;|P~ z_*oaKSAFH?hK-c$e@>{TYlKo#*r{V@CwX|2Pc zGjL3+oNX7nOOD!4oPjqg{R+32_dO{SnFjo8Q_Xy;(Ne{wI}Rdw3>zU=I0Ncuf zB5I}xCbXhURsyNN(9EB(hL6rTG00h|o(^EdRi7arxzLY<^b^t}@X@04D)NjZkhK$c zXd%z#??SS_H{7d$F@Y7auYjy3Dy+N7GcQ^@!6$yzy>mYaigWX{C65rVf1p~ZJM#5S zn#;%#yFSL8c2&i@$CTv?Y0dmYli{U9*h>yq^y4F^m}RUrTe|;V+`;_3xFdp6((`yL zjuonqA+94J5lrvp!|GVRqg0ED)E~sYdhAKZ-kol@S~v*kSG=9*H%-vK4beG+`7tX> zw)lV%Zret8jTN`P-8i2{e@9>9-&c9Lk|aRc)}w8p1#wX>Dds+K7ppQ28^#DxsY}B} zJ&V7*u3P39J*Ojag)Q~3oa*H=?(e%hMT|Mq?>|pG7de}FECqLum)2|C1^r23WufC( z8FHoBHpTfX%2%cGYMes!k>(L_tv9udAI~TjqF=$^uqz?=M>V^Ff1j3+VzF~iSJcM$ zq$$vva6Ws6e`72e!qexwz1z<@&3y!9Yt8&JYA7>ZMcznQzoQ%REvnloqT1`3^VHNg zx$xMf?u0^djq7xNoo{*KA4fUmp0{%s?kyluUBMT#{$_!l7|{T$6-#jMBUsra`hrD5 za#O4GlI@<&W@xZ~f32da(d-5E96aYbf>@lskz?qCet}9eKI+mH5X%EM#J3&Saysb% zim9X_*d{!D4X6YGUNQK}m#$9iIJCEq;4CG)$GT4+?-uh0A0Y|gT)v>9j-;T^Jo#ND zd@13K*9BbT$0l1|hqKvz`vy)lp}bU@CrhxYOC0 z$d)S*ogZ--zWPggXY-@H_XjXc+@2nb37DB%y6?wF(buF5+a5ca(hxBcc+!yW`L z(ZQa~CrIV2e~9c1X~>U8J*R`b8~&m#$>tsG>ou$f(t)MC`xAWV@#*ZJXM}dT0|C zoudD;OZHZ3?6L=Iaya?wOP#YkPG?R2h>Ufr=Iq_K z5nIbox6yYC1^tnHo{|u~zx$`d4ZpMZfr#-!ubz$CUU~J8XjL5 z!;smTe^xU^8j33Cw(S+4R#QR6oo?T6*h{6Ce0otADZYBZ%Y6sbl3_K#M)iG`>m$r^ zUxa86hi%WT5^xoF6TIq%`N_w2%CHdJKwyU+8f8|s{ale|W0#jD{lY{yzx&$x0P0r+ z62UNcsljS6?_w*F8ou2jnzAh@`waAwNSsJSe+K`ulLyfAFD&||7`QQv7rq!1ns;o@ zJZd;DcC%TerGJQ|xsnlbg@}jmIr}HNd>zdSZbZE3rK8iRFxPe*QKRSZxK>m)$)vSe zfxs1MTQ(?$IF*!IOV=Vljy@ynM;6E$?NFkKNMC)QH5qS!s< zf3NxcxYwCX5#QOq-{a+UqvM)g+w1r60Pw96r>IVOU!aEDOSZ_1cP$frq4RNvtI$rE zu2~MRCPx*g^<0<`kvx6!yc|I~uQBx9OS54cxVd+h1z@l{ekmh&OqXKKr(auhjP+(P z3ZI+K)M&tn4O}PFp1x@v-Nohe#gO-pf2rN+v#NKl{V9*rFx{%;fqT^9cey=nEJj`b z^I1=4Mlq$n+`3QK;nx2_F#U7ZW^aZCe*rLk~;mel!Ak@z*5BZEo$=23>IDdk*KUU}0WqQ8nS(3+JP@B-j-_drhAZvX_g_7=n>YyvIt zdU)nXQq^hBOJi-%{nhh6#&ld4Cdr*A!F>dClFQ`1!1Q<;A)s^9=kEQRQ_KtcTDd0F z78@7qJL24^>4x@JcN|I+RK?2=varMF#AAk^6)H`N3ro8XiUfc#0l`E_f31tjHz1=i z-reqc+ngm%`;K-MtFPm@odu8)Qw_Y`+w0Rc*ct6?A$X}j?_9Fh`N;+suUNsyA3bi3 zU$2K@)0U=_onDGesA%oz@$V-{n;n0l^>Cfft@^Dw<4%X@k4UOVhE7Q! z=*b)Rqi~Ni|E-Pdu;bjr%cH0F0|x26J<$CCx-+UoHts;IwPeeLe+U<=ro{1mdfeQ< zSan>VtcSAx(o+C0hCLt4^QY$5+ugBgrCM&I>EDYo^B-i71GZ5hMgY{bX}iYmCz8MB zdSnqSY@XD*@iu+i;T?`kbi?#y1DWYQ$d>pEpeim>AZo)UGAmcZf48)L6I5yWI-p{= z!_R*ym;>y0Cg1XXfBXCLLQb|8j%L=a!z`@Puja;P2s2%kqLVm?sR)`V`yU!k@s+k( zyZH_xwVj>gGPPpzY^oJ27kT)c7Hc0CbJaC?-%#JoYP%%H^xS-0-%{Qm${4MT^0*}{ zK+ShS^G}#b%k@0eGm3XLefUnnfy6<3cO8Ep;s@X1zJ44Sf8p0$rYswNWemXoECB&Z z6q+c)UvSsVSst&f_`NQnEkDA=`K4!d(zq?%^W zP$sWG7zY99O6$3XBHvKTemQS+v(GiD@z-XbK`4T(MxKHJ7|#aryKJ9@zwNCo13Zkj zzZJW~6dJA*fAXc7%FyJYO_yjfUCkb;ojl=)cUNA|!%fbeo_K#>oKyFz9UDce`0I2` zIsRu^xRu&en{PAR)@eAC5h2=i@etK1^-p6^JSmAqWzTO(h{AwDPd_vfOg5Ud9suxV zqBM!jh#B#t=V_8KAHlzd#7H8^Z+gpyhZTCD-wIBMf8Z+}7uw_Pnq=n6IHNq3;KBYi z44xAe8_(tqs`66c<6mIdcS|{CKWHuOjnWaA^^SX;g}={0(+*AH+c(a}_7mybS0btx zJ1mTRiIR6e@`Dh{J|rYI3ezE)AsASXLcyh6hjg92FjE6c#_=}jtUeu z>FBm;e;886yb#>B3RARQG>R9V0%MSg!55l=Aiq1*p@P{V!yH;r04>0Gomc#A%OR;l zF2ji!hTQj$OU6KM(8HQu+%n~z=hfxdYbBDFGT@JnXM0zqHwOze!dv*BV(4kG9h1GnWOXVv&)X@{*Rq^RDUzQmUjL<6D(if2B z?fY(qt+^I&M?G%g_H0J`f%Wxy74UEm^T8UQ&9;NZ-dbP6wNQQ?LocfIx=6T>#I>>z z`RdM->89z5o@jbc0}Fh&w@rPN-L4VWf3~&zVesZB85XY{URKq1+V2qmP+2vbf!>$= zZ6a?uF3kP&FqPBkD86vl9Ts~KjE+3+Og)^?{>2smz?fU5v?B^qsYdru^&vFpR)xUf z8H@<+?J<|I9P&#IwP5=K{+@MzN0HSPKEmw%#~B76X=VLUX=JLbgRC89pn1w$fAZd` z{e;ntbHolzp?>N1Q37_i9MnKMs7}mta;qC0EO6>|FHPjTL=@Jeo*V>xRaNh)(A6fu zvO$IOQI%BL?TnknF1=U?NIufm@pyAR5WN+c-uLbsP7#lV5^Q!fqvI=9>Erafo>M+b zd8ew5jtgYu`(YGU>}j;qJNkA{f29WDJdg6Vpa}hJ95F0h(lCeqI=Z55Q$;gWcx^sj z^QQTFO89xi$q_IwK7RpeKJ&j~1e$wnhR%7E;!WHwtg&8yIVc3*{wu zN(OK0y03Q(BBpwZ^5*GFf9CrdV=U$R!*KKYy!BR<*stqj1#Mgo2|Obj#V07tYsQ0O zyTf!?+KpwkkAcyEf(Y;R^wSn3y*elc$pKNnmxP?bJi{2Cp}&1e_$Mb-`g$tI=At2 zGKG;$pBNZZjrdwATo*mYSR4RHzOp+ayVVCR`S)c!RRqV{;gv4b&z1c4-?2%-u$8+L zx7bR%98Ur#c&5&$xNG@c2X$3eHU1jK7@153r&S}lxQA1HeL}9o9mV4-4EAMv-np94 z<`~H}#j{d_?TE@nf82@=^u}4ZP*^!Omx?Z0A%O)V{;2Cjk|yAGje1(eqZ3hdi9^Oq zD{P!e1awml>Au(XHyrOgS;v@=I%=M7oPXRwqNXesl{%UTq&&)U9qz8{no$umh{ECf zw9fp-6Dy;celU@TF^3vy!2O=z!M((2Y!J#CPI6b+#qc1Jf3phwhr5bJr~F_(xDvBo zP>#top4;}?1HkhsCwFL_75ui1$P&8X$%mCc`)^ICLZM#qM(!-FIJD8?O6ksi0exyA zU$HO@_wa#2^Bt48k;?@w|C4yBf>S7mYKH>j@A4L$>Ug?j-POJw&EJ8Uc>C%o_a>$r zFt0J&&7K0Yf2b@h4ZBSUnAR;9fh=ph(QWKE`NJ1f)`}T(@T3O0re0K=WWF9dQS*D6 zuXa7$nf!-ChEy)Yj@-KUeIZoa5|=pTFiI9_naPrTe5C%DAXwq$;$bl81K8Firp46@ zXXgD;Q=gY>G{cG#Zxsc7;&tTUN&aXNVZVP*DKC^Hf4U_rcuJLYIXA)W8rsqZOO_|! z9HGwyxW1*&bf37WbXyh(czcIut^K9BI=`PxU@CvbQci8jUrR_OrxOgSUeSojFr3sb zqA5J~&rv^IeMYlP{xSlp*K|fwU-OSp2Dka7%_IFyp#Zvle>wDhN)$h6a9NFmrkp%y z=#Fnde=i?r4e0K1L>l!Yy&i%cSjGKwyCmZPl=EMk?k84Ol_^K%dxxf7>l$0Wyiw#! z-R?8+*>F*k5Em`5sr2%i(fMQR zzFLaKXH<)Y@4_;>TknCg@2JoJW$R4VRYlJze|$j<=mcRXpn!BLN;d-mX%y)Zu0Cs@ zK{81_m8X)*$;qjG-0=V3x7Pbs+oL|Ly0BuQT^~G4Nuhay2-j9@uFMy%Sj>{^<{98B z?cMdPiEcN@KG`$8%FJW?QWIH4N9fwg@JrN?*kk^1+=S2NvO9g@>}DIOw)SQ_Fqil0 ze?G3|_0aH)AAY#o4MOWFI@Lbo&-#AV4W*Zo zW%$6cl@!eRf?vL29kNlvaWLh#+(=X2U4BDIt)~z992K(;PUQF*Cr`o6 z=++>>JrtOk;w;iPQ8w-GB#eY=-zflv4pn7 z0?gsV0Wf&puYRY!5!%8&5_fD*Tm$MAyU|3p4g{nx=5m2(gblxt7e**(-Xx{M`=%byi`vz4<#O34K0A8tH?B)5jl+I9# zXH-gYH6bFVUA^-G-RSE&=$A(wpE#e&=iEa=>v_}Q@>aR4^-LWn#*_d% zd(~(fg=ft2y>q_Cjn2}y0X@JEmTR0ao%aJ*Y4m-#L8h=rvOO)Sa4f>u0Sy#H6eZX7 zX8Iv?X{&+rkdQ}Bc9J6he?nyMgBeO&D?VH$$5}&YeDw4_xa<)Qa7+R9?|V0iN@Ny~ zE9!6#7Wv;})ZcHOa=+%4kRC|~_{;C9fq%Q2oQHVo@b=`Z=ta=ujswFzZDS600KxeV zNQB+*PJsAD?+MN;lu94Y7ku0rzwx3HUFsfo?fFE1Z|EQ1hSX2;e}g2u5Miv-)6-q$ zaljGce3@MpvqClpe}rhiF5siPA`sZ*S&e+CQ6>vqgYip^(1EKvxcnY4d+Sc8>mlHj zy;7&^!~;ARC`#q_7*ZNzX9^;)lZVgGV%#LHJ-dCiQsgx%kRZkYYse27nbP-S&`e@Ca7e5&LEqW=hu!*gy- z?F1^LeK+M}Yu(>hVDx|4<8^WvaV4L>Zi|$cfA8%leuC@?plO25$ZYN5fdUo9{(Q+Tqo?}Mcd`9S?8EC%7Lk`NZq4dS z3FO}~M<=Vhgw(=TcFvkLvhsHNS#}Zc1nFe4jPMNUw#nMZeDt zu1e|p)!BnXZnq$BIscNvOo5P@o~23Cs%QN>elFIr#cxTqa?67rVPv<%;!enf=Cr|k z`wHY+e@cX9`uxrVmEA^Vi2*Ku6!5@WRTLNk?@?dor?qjZzV_SvD8*+WESAe;!x6XMi;gdeo~)ONZZ@FL!jg0MXCB z)#nVOB|}93WY28yTNw04w|qHfOn~~UaA6xrTGFJBXb?`_kuKx97C!)1svivD#kR^s z2>I5WanurJiDF)?kBaHHFfemCKCNf*%LM=W+F%8B>M_{7)E9Y#?+)~vZ<;)YcH)b@ ze>CQ7PIGVR4FdNU{e@yTSH)lRl!@jD5M|#V3_vauR&ZZWtfKeV_4GXBMOqJJW))EO zwCejOCmM6{Z+7kTA3Q#j_KLz6dVPjP>rB)~;LVu@AOR6H zyZ)3iir-bE6v+FXVN`Obw%6M$T1)PA-YTJA&bDzlw_R{cLKhlu-6Y>TYEWhKfJ<iqt=&iKEbngfY$EWQfF@?o@XlW$5cI$N7S?!E}deMPXm# zhWd1x9;~Go>G|5HbCqM??0lBrv>D1I=M>|}%C-_}_ZckoC#+vTYH#tr%kz|~L$0_m z`#mIi^kXx#6oSN?suH%%@Vig0q=BI1zE zUvp*Ea<=;-Vh;dt(o$Fo7pdwMx{LZ$3S+E1q}A!3ja`s}@NxX1d%uNPUrEmCtYaP} z7{0|Ml0yhh_nCgw2B#^7oF;u-s^bPobLLep%0)_hW`1s{Y&sks=PaEae}AN!!Wu;F zHl9D5*KZV@6Rd4Z_#Dfx^o%5m0!uixo#S!ezP}CuO#eN=raH$YdG}Sju_aH{3lsil zccahu$D8+4XvE5sBzcLB?x^v6$p{Co?!difk4tyw?08>bSmO3sh1nAeR_5oqKb-4J z@nK#RRLZ;OIeV8oClEAge_ulwC%&4_AXzay=C)NqlOBGzn53jiaLh%B+Iv7wCVZ<` z`k~;)xBFUJv9Ct*F+EfkCfMM9JeuqmQ#$ceQQ?a6eJ}z7cB>z@iHjE$qws@#-Cir?Tv>10XY!~WMlZxkjf4yIf=ex3 zSz$dhoLJXdlbRDX2D47;ywbH>NqVzb#n}~aaN?Hace8oJ@)S>^OUk@noFkC7RXx8mS1wzYpoYnVVB!uB2T;de+jK{dcI$2`w@F<&6+qV&`6hq z7koTN$N{Gy3KFCDj^H2J_}?QtB$6)*v*BSOjsP3r#p8J8b9I%zCaR-a$*DYdXhXDh zta3)A>WeW7Y^!Wxc`^0oB`?Yaq+wntki?rUvVE_Bb+PCz$3u?Z6 zH{6--e|Pz7JoccHb#9`&`>);o&MdsW>-$!HUGBNDmrDuw0yNP z;(*lNKRY;O60lSBG7FE>=>}C2rw%YNK5`rlf7r!W{_s&9^TMDSShoF+(fIUccExJ1XScDSqV?Ba1?L8$l%6R`5=?VTG( zf4b2U%?{7QuT0aZKGMXm+?ccs@8?>sxsmF>EjJ?`gX#98fC)GEFL<8^3$f^n3I#vc z9zv(*d3twzlU9Dded@z@lg^?!JZ7#b-}IIG<3rNf+i9~?6W<3g>0M#Ck8Ry5K_0wI zSNO)=cJAK!Xe@n0h*_qkr@XG70fxQ-e|~+z@B0&awy#|YT$GylZh6prKW|hWAql%2 z{W086N({~2x8uHWtd7mWQx-nkl)=n6o-WDtm+)`l1-&P!1+Y8al7M1r=TT+k_t2^y zA?FEX=eGbAi}h6!Exuj9A1yE3E0OKDgOh2uGX__wX~saO?IZ1F!Cq;6yk`O{e~OND zG0>{=J8Mr8;?=_mB$}HzneQj4--w=BugA4(wN0n{>QEV`rbKCSu5N>W){5Vy8+n@J z+F0q{QK?+XvObL`Y ze=xSXpsp#rp46h}g4+{rvz{iq->F<>3#W`k$;_uIW_|-fgZL@{$3NxfJYJ?1c*`i> z05d%L3Idvk{<2M_00=^VGg#i(V@K{tz=sl?wwjSpD=XVdwz~$uX$;Dof4E1xm<3;op2l?B*bockTJizur|LV^x+r!Aq@0X}li^=bc@$COxeh3ZwbL>vJ$^>d^ z)7I#I$lWKRKBqdfJmdNbL12%|*Y8Gvk;;`=;Vd`Zc?rg_9lFd+e}95pvIh!;Hs-Nx zLt0~iE(N3QDM5AJ%2UdHv8G5`<+jjicOG}g3!VM7=5?zzUXdBZwmQfqE#ZiQ*|}b) zC-b!xxp26?#V$YtvH{ttZ)CcMw&TikGTZe?i z>~@l#?CG{D>X2RJC8a|;p6wp zFOmldc6vsMPxN#3UIt$?ZIz#Hd>T)syUcsvh}=fNoITV=4V+l)x@JAp^m}Fq#(kA@IJPq*L$V71NR%tnkxg0}S_QF?OO)^PhJ zs%Bhk2nM%vHEev&Y!}V`t}!o(XK1)Zi#wP&??Ke|xp5ywmjD>x^Wdbnr~4~&#AF9* z)z{p!e^_}lD2023Zob9}3q5#lK#@4>O)&C}rNTVi+;lNGRlTP2&XVl2KUA~bQ}sPE z4!@rSOvZYbaS$`THa8vzp;-4DF4~TVH*m%$u0UQKy3sFu#VhIf>4x`cny+x%Ax1iV zi7!Ua{=uV6l~;?xm3dGH4VzNu-V1(HoS`P|E)3cc;q^PqXX3?xh$jtb&ch|V$@9NpFQBIk ze-Lmab-B|SjvovoRL}FiueJJ&B^ve_E)Y7G;9{1)ndso^e`o!KhigPP7gaiOqcFRf4FygbYJI34{UTY{Igt7df44`y8>rV090uiqQHsMxe=lfV z-i~XNzf9{8pZE#oIW}M?K3eyIDnd8BvE1%o!dVOmW)nXAr z;ybgt+UZr~((YG%%>3y*59#F?so$0#ekTTQ_*uz#;T&Sxn$TBYh)P3P0q_5Mx2kRM z>M0cK4rZf1!kZb~_LE_7*}M@~zTgQWnzl|d-j2`OLe=={&Ur7I1 z6-bf;JhjWYJNaBJd6tUV;t2#qc?M{ntRL6e+GQ z&LY{W)*#^uOvGLX^2Jra_9~}>>lpBjU!f=QzcZo10mTWyVKNaL7Zl;>n)@FlixkrJ zobsm{+5ZmtZuik;W5VqhJ?kDH5-bMHXzogk70dd~3k9#%K zbNeo4q)~KV+P(Opf2OJ-fOx%h(({;&)8z9>=&DZzKcc9e3l8dWdqJIg5d81qb~~aJ z`JG?yN9G(m?O6XLck;cP-BH3Z4pqUa<9g@a@8LBo_~_<@dQLE-FvH!~iTta&V3>!> zY5xNV9hJI0l7h*_q>Dj+ zBAPo;t!f2|z+)d;DQVZBcJ6CwZ12Rj7l}QTetTkLZRt*a&y12oVA;_i!M1A)BToXo zYCW*#*m%%%B!B1X2KM^LB;}Py`sdDvX5+8Ia~3m*OtlB&(C=Pi3t-_F=Yo^w^YfB^ zAKR}ag?5CCf2q7r2JBlui`lV=PnD;qOj}^T4u4`Kxk%lU4Z(g(tN+n&ynNha8UhI?4hwe*Ce-cPzGh@~eqP zYBrFVx#k_R-bd_~Jy7&Lqm`YnG8dU#KAiE~>{v+xe`q5B@?0GUv9E4>B>NL(>Kjt>+w&Tt25PbvR7euz+;4V%jt(37=a;ebQ@-Px9lyOCj4jmyxR>FD zvQ_2&9+L=UrKrcOfPBGqJ61z-maKfZNUk>MXAqXPK+M&l ztb4V?X9_hhyl6PRdS_*_h|%Ro94PNF7JzJZ{`PaV>|UrLCadp4q`3+ak&DV&uw{wr z5?`KO$pzYA8?TH@okUDGMbD+FI}lv`WE6Y!q=H`G*!au5J!jN-GrrO^6SY#*ZUGEi ze|SS?GhJTEgzWCkYUqXLBAbo6EAQ)AqWb<*C9d-}>L+l`qTHc%E7^g1E}e%+qw(bu zsQvAU>IY&=^Orh~$0K@nEd1WGfp|cy9?s2F9F4Vw(I~sQ^+>!Q{#rfKPj}~7rx38H z_3rQ=p-RxuJRL%ze}hw$p*g@GU7;ELe=@ny%lc5l?q?Yh(Ge$}^>pzqcL80Gp|?MI z=}RPf`ZT)X2!^IOerZDtcd|~+Ha+9U>Om(1)A#4yk(?E4cOg0ND_lVD#>gWZ{@wPP zpuPv;BZZRyU0Yth=W)j*>H&9G>;OGL!oRm)iMc2g2dJ=VU_pyin&njnn3LOq7gvt* z>whA7lRrVFq4<2uj|T;}iy3U?&3eA?_m#z~+s4_^p79=^>Pywo{*bm^*f6IP2OE(Prt%V|0Pl{KNq{{2seUFx(pbJnz(t z$ zaRd;=HIQ}j*4<`IQ8?Vc^7g^ahMXl1E>+CboU+;9ub!6!Pb$+(db|b4851` zAS6ofp^nVDy&AZo>WL?t+NI~Z$u7j}ZUU&N*pI`dZ0 z))}=&B*T(4M2;6}a&P?L1%LfqZWtx1={LW>5{SMf`2=tfTP>xT z78OF7=kq0#<(qCuy+d8Vkl(kvuGJUq%&603Vg&j|0=L6J zHGsnXIH@l8)ry4Z6n{jufEuu+S`mMaVE|3(y4|qln{K05{lRl&RhRN}DNTbggtN<6 zzq7l6Ws;X-(i`*2nETB}bMg)r>9ia!uNDYqS%2W1tA(PK@XwF#+y-?SRwn}r=Nj=> zWXdPUudPW9>)2pAu$7U)K0C2_yP3_cl0P2^WRHUe2H}s{On(;R;P`}=GY*5z?`;&! zYgFrYM+b@1Mr1seQK1AGcr?f1H(p(F?;6|ksV~kJFWe^NS1-eQ1d1oi&{_GoI~p&| zyU*T(xf{2`Ott%V*K zp56a8VHcmz$bZ5hY!ND7*lsLPLuZc#94?M20=s`L`&}aoF1vX8a3d^AmXIjsGp=4# zZCKv!%N+K3|5a)+a=rcsNH>)zr&I^r*}$_boZ}LN6D{@5A6l(YEJ$Li_fWnx%)Gkn z-CUchAavw@;dF5jVM6m4iGE7 zUp`KMetOx3IF)))t+)569)&2OXMgxDI=~^@ha;dXEv+YW_r)`HdVQv^Kd_xqqspQd zoV?rybbk#mWF%a#M%JBgYH&48D7=nrBarOvasK5@yvr-PDNOu^15U4o-osQ>>|`7w zN!~w9LlyYQv`kFO2!=3nJC4jL9)wD)-H$Z}IeVZ{uuZZQx<^m#PjR*vf==3vb*cAd z%&Yyc9VXszIcfQnhYa?Z z1+RXljs4or4fO8!eSqPLU0r|o@6=QVWNWzB-E=OW%4`4<<~fnm%WIPx`NWpfuqjT( zH(_Qt-S6-_muGLyFfB?iX-2x&@%d7IY{cr6x@|4p;n7UUUj-Kc;bI$fYzvxpzdZ{M zynlxB;;qfx$@rV>4YT#q{9Lu*9J_2%&Cz2CFv>4)Li}x3j=h?*Fx{6{+!_*4vv}5W zFFmffMGJs>-}Jfh1b_K)?Peh45%q^w@lGU;D*H&6XK&nHw2v4ay%0^*RIvD!JN5CL zJ2SD!83b2H_*y_zd_@v>%R8yb%+Erl>VE_7^$nvxuU1plViMl9J1kibR=Ib}#HWgB zUzSZdVFuhIoe_Xk^Zc(Z(RxFb=(>1_`b2YgE`OYN z$TI8)H&gF@uX@BPBjC0jzg1lLHBMs2IYGH<<&lGvMTbh2?2nHSK?q=? z`0rE0mny@m#_Af$(D8~qUY1YR&^f4&jtRfns}$4Iu+$I^-u6L*q|45DQF;yL7995a z00fBaq+jEk64r990LfJOU0Y( zJWekaX=4NtuvO)}qf7NQ;=O~83d>PY?|OAZDCf-9Q;#Qhb3Lp?R*?^1WQ$SOrPsvW z%S#)9K6tculO;=Xw|0(rp34nRtDUH#$!VuT6@iy!}A5fHyB~b;0E841k?b?d~()A2;QDf8$K$h0FVBE(M3& zJ5YO%tVDGiF#~-_cL2aZbbly~@71W^1Dl!tR7~p~lSc6y!DV?=uhCF9`%&rdRPLZJ zJ%5SIN(V|vA?~R9k8=yXPxsly(5KYa`}-(1Ji3mUVi(xRfKue|Bz`D<6#JO0^AkIp zeXJijTGC;W=h9y@orEU#=j35`8yx-pA=$(Kn(BDqVjnRVvUeWoj=lZ_#6tI69)FI`F zY~bbRJ*n&Pt_b4l7UG}0^jBE@y~7~bxHstJ7BYWyAM`)K6=^U1~uKM z(Xk(X1FJ&-yPNEBs!H(%I=M;HI?!DoH>_84mY3lCv+nmq{o;G;()1UTzTTnX`@p-n zwQh+o0THB;|Ag(h9B%=AX69ZvD>bff!Uq}Fb1+#fQl~ORb8PKZUkCMN|pM|sC@^ubUtD``&Cz0Y z;@PWzNv9q9quGwX1z9DPL?V30HvI8rpTLn|Q7L_0kNeZcGqga=K(6MZ1H5T2UM1tp z2&~V4Yed+h_4gj>xiDA%#U?Txo3C3u%D=3R%HLaOc7Lh21P|W1$jUurn}?43JLIku z^ts;Yxtl*Y?p{QUj@Y|iyg$cgTpm=tZCdfB#yqXAjUo)F4a+BLKfZAdjyA($^0SEz^%hg4p4Lpn3gD%&Hu(U>NmW zxeB}KYJXIiPd#~cSmTe&t6s_Szcsu|PoX`6BAO@|(>c%$HW0EI8?d!QiJsON_PQnb z7gY<*SXtg(v;z}~h5JiQ^}wH)OC_Pvp2hv0AX7y!F+KH^cf;%T{mb;t`xM)YP3Kl& zA*9|gA^*Nxm2~OM_ct=!HEE^jdfh94ZE5EG%zsPaCLK6oH=02Dy?^HEA%C-p7LXVB z*93u~$8_NRUO)SDa6>OX*3Y1d(po^ZryGU|(fLp(r|EFnx(PWk>4ZWf%bX%18!p^C zFI7Il#2kIt`FmiTR|)%A9L)U{mchMYyu^obW1kLwF+kznKPhS}_3v|>cKg{m%R0|x zrGFfm?A68HJAn$~>nre^Cic8QPyiqigxtKEgP* z+B3()*kEFDa^Y$Y*++xoE$z%1;0eq`!GF!YdOu#5{T+O;g7RQgEOw>1cWfHRo$$x^ zVsCuVCO%Z%{q?C%2I)uJD*I*#r)ZJSA26_#S5j=-R%cJQ$9HQhC}WZmD5mN1RPes{ z=-zT`V+?|<^^ z5EICp*~`RBgLqb>HI=ih%$z;*+>Tm6`iPLcpV@{4)2%0f<^8zz1REq4)YotR0e(ZK zj)-5O+jTD@M#$;+2k4xmDGUTD2>S0&0JH?=qq2F7}PzZg1Q; z3Ie$@?NykY!==g580-6X%7%MY9Dhe}D5s1$R6bVvqJM1D#I)g)c}?%>z`Eu4C2=nw z1^kRzvABg0GhrO(6FcZGmHQg5fZZu|53*%1-ifaBa`H$7TRMlrR@uj}Vv6Ip$2z zdU#U}|H-$DLFBcWGA?l4zWyS!6Dj$3@b!t3<8{>$$1#{It34J_e1ADUm$l+q+4mB2 z?^X*xJzvF7+Pax^9G#bbfTZl#oh}is;?a=hWWf|Kx)#^l=WP&(oP4N#08|3T4S?>o z45_6T?A&@k*GiYw4$TD_h_qXO_3)twfI5{@cA_Vb1v{f5ZFh5rXZ%8OcJupi2cBd& zj_5zvvIL68{894m@_$^TQXj)6{E(%eTQ4^MTD@$0e#{O>jfr&qLbdsZ1UTw~Bz#zV zLV4vMOVyCP#$bKegAW1uK2=lYmc;5&(!5fz7oZ{7I{Vx{*iwOyQaM!gnMT6X9vj>R zyc1PJjioZC{ve|#p7&r|KE7PQfOwfwZXfXpCxsKEUytLPh<`Y>QmFs~s3t&WfVX`+ zQi_7Elc#;Iae5T7n9y!oRK8MD4%x3`W#||O`1j(djZu16af^?9)aZWGtH zP!gwAXxF>F4}WJPl;O<>4VIN00n0v&fhygrKKoy(9Fc+Jl{$GxxM;WU5h57TVLo5H z6CePN5(9kShPrOc%$|H{K#6IhpK-1ON118%#O@RGM`!f8Dxnh0f)yyRAo!sj?y@6; zj&X0~{pvIPC0Qn|1_65;2Z}xnr?1x>@jh&D(%}o47k@?MwyxsLii(G(da@4#YKV~7 zpZxX>uzY!&u@B9B7^e4g^C}g~@u=GCOD;Q1JlvzV`jt8oOMu&PXQ}4{xm>1MsC(hu z(G@no7l!WHA!Cu~@yVZv^C3R2zN)g@G^xCd^nVTU6GZR9UE788G44G)D#HU83aEQ<-c<&Uf9p@3>s{9dSwd9Z4TG@pi^v zcZqO0^B?0#FG(MO5RE-OwnEd&cz>^8 z4RlI(HY<_>Hcw6Ot>!EEcllL2_ID5!^7f!#v}y!XD)i1%#~>-G)X5_8BAo+*6>;hr5S~mrIs;gJ+5_=? zN$m5983)s}pnYK*4)s38pZy?Y3zzW4;L6Ks{8k}1y>@5;e`sX|x&A|Q{rk6EHvGu_ z1sSi&{k(ffJpI=ppKE3tefAZHz4KpHaHKX@BItIkQzqP$Mz99>v{pSWX@AGqY9@w7Nt0REmWsj4=hAkdMVS zww(c9pj-}QA!Sb8bBaO_R(*%sX6f<}%%a2M8y-+0q|SYD!vAy@h4Qu6L#U~? z;Z*&GAyGq+9ikf!bap@piTO-2YJSR><>-0Sac_tGknND2?SK2z_oT+V_>OD1QmCKD z@yiBwl}yc>%@}nvJM-HES{&G4@&xVUE%DMPCi9MB5O^K1?EOK(LifT3edH3~@<@nS zt^mh>QD0lCZazMSnAfz6b&+2Ou^(VYLyrn<+PI2hI2Cs8SJ^vHh;zOg)dV1Q<*ET(1?0*6~J&Ee?i>+YK9VkA3F8;q2XIvW|UB-o1^er z%-jdwL-r_y_xCy!sg$3*D_T=tw#;osazqrbh=T~GbR5Pf4Yqbdwalk9qA7s~>~$K) zM-)6oz&<{>ZGxsJQ$JwW_ha{7f#`O`E85Uvo%PpsHGfe;OLLKbXQ=zl!gtCy2$Y?9 zy^RZi*U6PZbo2g(%->I=rhXMvm)pJYmU}wUo{Z`rKhKAgfjG3QgKIfaJC>mBNua_V zxHg4&EGZgYZ^(-YBR=HR^(T$i0ha%sTJF=F!%2|tZm+N502CXwVvE=inO_LLAERSO zKN1O-dw+JtV%o{p@T9{RauCNS_t5D#1Co*hW}V*wo93^#D&!{cl2{LIdFjZ=_iv`2 zr@+TOYJDt^=X~aKlndamqkn#Z!u~)-`Ylkj_ZR|YeC;{Zh5g|3C_;0zroKnyr+M&4 zIDObo6lF2}20prN#m1Pqc3GrL=2?>*J?G#NPk%FVGos+%U`+kQL}VXNMRqdZL{PoJ z*B+5dPjbZWPuEQS(N1OTvWGoCkE&gAjC`WWJJH{M4O$4ZbkGhkAA7e_HsiG>uB%k= zV|cv&qDz5VXzU45>LH#=R7XS$Z5Ca=BaYM0`bQ#T8V?)((U5icbe9Em)959tlQf_t zuYW$%o5KMQjAlWOw)eC_jKj>@QE0VG0QcyF^u8{Z_{r@w+p#ngK0%!#kp_>C49mu1 z8`j@$<3`=KXad=KvbhZ*Nct@X>;868NJ8v%Wb^JM-*(r`&oEKKvfNpE30~Ao1z*16 zHQk3Ef7m&7$ZXUHwn7Ku#<=h9-bAMz@qfCKqblTR@MG1aF|Jx;lzvNWlGVwAAe(dI zYkxt-J46WQB>(&>oaOgtkw%6WhMUhD5paWym#b2WalYH-(RJf=;p2raHB1-%9(uzJ z+%WGZoNG$^0{ADRxq@IWS7>n`A{i&}cbo)z#FGY+x66O=17EmHi%|HO z@$Ga9g)+ab!d}33XLCtLPUzGAWpr7tgi>*MVAha_GJD%Bs#$sfww7s3p3CjMKJc5= z`?!VkkN()160T`%zn1Q6!&Ug>vwx7oMHAS_U$9$A|AMsE874mfpSy^u*_~|jUF3TZ zW}blrv4l^2Ci12TA{t#ogP;vP4?vZO$)0a%ASI%p~da|B3!)nNT5CEBZB&aq^RTr=&W&p_k;Qt@tu zPTOalIi;r`=-|=zylOR2N$D|DlD;`gJ@_wSoe97=%xUc)bo|Ppj~sBHJRO{KkZeJW zuz~CDt)l(ftH0Ce@7HOGoC&pGOev>LPkxxG?!Mgm0Zkq=BY$rQli-J4v|IsoTI%#S z;+=TZ;imLO!18*el@A|j$2`Xmnl51`$B5u8_82h|(72C}xexOl#R_FhQOy>3%c$GKu7Leb|TxR)9qwI6I zFb6@p1j>#jLw}f+BSs+tah-JY?kr=DqYYl8aelhHr|Wk>;K}!QCE~R6<(T&SL#MI3 z)mNd3#VIc1(E^Ed%QzhwGGS|)}%RWRt@#;Qx0j#pp zubpR<-6i&URx0MK^XEezDAViht6F9*Z2Gr)QQ9EC*MASIWeTaU^n~KxFG8h?(YGdW z6*nPP<2u8Ez`qLYQ@Ffv|7MnVnV@oNV$MVGWNzM`&4(Pr(yCv$?o3I1!rB+rX(Un% z`uj2x#KncL{5_7hA8>T6b{%#%=6lQ17d!*?wRr9I&)a>7=zqWZ4+899a`)8t_pFey zUT^d8DSy_Mk^{~nB6IwGs(wBBIGgrUcv?4Z#_Xe=8bpEr%$Q9OAM4TNZ`JKI3{Kt; zT-#!#^VaTxC$ccF=i#puNJkLFgEJ1MA+{ds+F2^JT@=c^Qkr0t0& zM+!jV3FOqt>~J-!AMuQ5d?w_2h-O(UgUj+FXMbm1p|(IA$C>}s9qGu>ZcY>o6eqjv$wHobtm`S)ktb}*T6I2oKJe=7rU z;&c{=ZEw5+MgMo#0>>Uns_Is98zWpM$2oJj^Z0sm4Tq7kX#w z@qhDBV~-_IQMYH-S>IPrwF>NnY?V-JecoIF0HwZajF+Y?z zk$#2LDesQjh~(xaJh%zbiANfQp!VlE&{z36UBdeZB0b|Q#50?QEIPWPyyoK%y)=1XAleLFZ30c**je|r35*j|_OVlZkQ+aYl1ZEzW>RLre7x3amAe_51=|-F z`_Wj8rhpZZg6rG|R3*VnIfY~&%KRIJ{_A{w4#D(`=cMabnGM+uOIYD}T%AKx2WCUj z(jqFG(Y=1cM>`JQJDd9=zJFO{C*8CT{?~F%FuWkwCwIt+$%=i@Zn>p=MsV05u5>EX zK#We6Oq8$X2JGbfA?>?eu=K&X>d)3^71VVvBiSw$AH>sMPeBMC$|2P7QLxZ-*b^>pD-ZZk(+j?2M1pT4B>;z)VJ+)IYFwU!LX6H6r)WC|u`j(^pqSlr#)~|DL_IFu)r}qZUozXZ1&XY{eGA$nYtq0el_-G>o(h2rFEJw z0Y~e7Eq9}E)w)3Qy?@aL+21>NJA6~$q6oEZ;12|oP@3dbexB8*3wtD9Ui0W-=>nMb zvcs_)XN$gx^-2gf?r_lqdn{g2?rTPO{8Ts)>m&~;K0e`T-FdlqBOiK%7Z!kHr#C=R z?YYW}RtIMCp&`n9=cX#peMIYqqFA_ykmuh{zfA}55m!J}4SxgCpC?;iS&$fC@0fj) zBbfK@_+Ex5oG?#=A;usRLJ~%IuzcS5gl#Qjvs*xLk9~t$U$mc1o_nl$bKlpWVTBLk zT0J}hA<-rSipE6AAzUB&ox~(57P!EVlGe8``N5fcYPU?%i9oY zAo{mpu^&>EMRuXTi1pmHojEs-_Y0@>tS8&+CdDZ-p;M6XcqhloL9y8EAo}Poy_)1_ zL;A7SKjq$fRzu#wlE}q7Opu^0YkO0UB4~rI2e-%?^nXiy@30g=54?it%8}>0`tt$3 z=~Dghgqh)KA4dUY`Vd_@9Zxdtc{$0^;rC=yKHpn)qqUc+Y||?u%u2TNPE4zP6}8Vk z8A&B3_kBzC>41%Xv0-Ja*0@7SH^P4u{_~?*kL`v$P?)|r!`Dw2&<4QYP)bP!XVCoZ zk_38}>VJ`!W|fZXbY1u1)-hT!?^tTuIVxxG4D~VOvc2=+))?MPo_+M$OGf@(OhiJ* zew4*5cZOGkuz8fST+fE=hQ!i+oo*|44cEKdXawy-d!+CW z-(bJL0)%jM!gQQ<8GejspQ@|Fg zw&$arzIfR5^bsHv*6*_JNEX(C<=~ROv-}O;Q;q^u1rg-?!|vfN?dTjU1~63Nt|$Rg zw9cWO*XR-(^M!xY-FX@3)f#Najrm=2V2PF-JDl*i3|p=2|tq>q45qJMar6$41Fd}t9~Szk<716%Q12{A64&Wy4* z6l-E``+V-MzlE^jV`5uF>i*2(^o`A2owfA}ho{g!KRe4M_5^JH(e!{2A|rn0#Wh9b z>H?EYjb$&v%-BEmkXEv}qove#h2d*i?*1iJFv5=7S5YZ`qXz<7R&y2$2diTvP6+ygh(2;H!6w`V1(E;jr_W7FJtr&Sd# z!Jl#Yi9GR4gTYQcIQK6d;jKt*BY)g=;V;+?U)m2nM}~F!Ld`S875AWwz4EDJ_rk=w zz&w~zdLF?*lFx%7_SHh(HFrLcqXgzS=+*VHqy5G?% zy3G&3p~q8c=2oiQP;8U$h(-mOOmO&?zz@{&seyGacPx z=eV^|dz$ukvJ;A_f#9Xqp0H27sfw!Brwh=;!I&Qpd>e$Cu~GYl)4UrnaP#4ENZa$N z-G@UcaL?#yK(X}_6GOu_aY^c&gU*oRV2ROEgOgAdAf?Sc6{9`V?FU*ip?RjrdDs*dn{n}fUEruV0JjO^DB zPq=kAeysA?NnxWNxPRLxp>&5G&Wr*4Fatejc;Ic3@fWfNxoVVY>7-Ye_EPfBTZ>^_ zTFG0AY%TNbyh<_TI&B9Xl5VsJmD>>edSz4-alKyW8#^Z#5KdoM^iIqcJ-sW!|5K>6 zW|aO&Dwwk}y)1r`-0u&8{ysG^nVYOxCzb{38r~OsFmK9 zrYMdFNc~6+C5`j<7i$UKEN1+dUOwl>#G1PPAs-k!>6u7$^$zaDFzHCL+*I6uV{O|r zCFY9p*(Y~6w!@UOoj=19aWwS>N7A191DpK;m19Mf-io#|Y<=GeQKN3V3kxmA`eAtc zXyXW3?UkH+V1Kk-${-~G75)O53f`P(t}E6&z5B?S10 z^6XtBWqC6`)B~W1PxLNwm z@PXdAp3W@wUf+f-5#~w1?149b*85x_p!C+c{eRb5G;6M}aqVjr8B&*07A>^$^aXKQ zdiBPP&{6*qA?}>}XJ~Odk2TZXBP}|yL;}Z5^;eFN)y<^7QfyzLdXBHJ63mQMx0YO~ z_gn=rf^O8R!|W@p8xI;q4@EcbenNWixVcjlFa(8Fv$Pzhz*AGE| zfPYIR5VK1fp|Gj?*1gQdFuv2cdV&rbG2k~H8=UMAGOwldc>c|#Q$4=7Lkuy3BCs~8 zwFiynzq?C1EreEuZcAZkNG@E3XYV$ruT{KSEOVafp=FQX7f?*ovc6a}`X2)FP`f}A zP6*h%=e=dPS0n;+h&_5z6SWF4;{*8AhkwIucj#URq$B?Qft~f?pdMuV@2}cxXZ`!T z_~C(dRF2Z$|3F?w)Kte0)iZnG*#Gx0C&7RHx`6he92`izCW-S7y&ye-x_YX?;vK%n zP}ey9^mwsI5P#LvH_KkDTHJlD3C#`G&}*n)J?ggmp?KoTkGp^So^G~>1*5#DjDO3G zy`KULA;JUm1D}X~<|7U-zbaRf)mkvZIqTV3J$_LNGF{X+oVhg)-^i_P15(h(+UyPZ zm*De|`D%=dDh4;^MbtAMCBBPt3ldvdq#zpQXLfi=%YZq5dGsZit(hGd+O~b&=EWTe z08YH@%>2FmP00?=6EciXIB;KAv403Aya;aN4TQ68KJlyXoo~+D$1o~;kG{J_-uNcb zV~;TQ2Xe$>>lfj{j)WyFis`@G^?g0P{przjz6XjVrdkh=bm;ML6Tuk@!3$kh(0iR~@3ems_T0ivqK4Dm9!|is^s%UeCtMXM8b0dw<-_eem9& z$zdS2owlah6~9U6^d7Nu6M>jTGqOmZv4Vk-j09Uo*pTBt3gXKY_sMdfok`sGYZTB) z9*P1>(oRkxBy=J0041+xf7lDMA1a^S`G-;7wa4QSL^$E1EMEbTM&%SE{bsS>G79@k zwVJPQGd7RvF4q7$LS4l!*}v@C1|&#^%sp;mZjiJ;Z~cWSMqXS z_jhMQHWa4bZA=X8@3L*rIvi`mfYT=2y!o z)h>go$wk|#_vHDZ9+Vu>yAYpuIExMk>2Of8$K@&oiy(+s9F7Ql|9||SKghBngu~N3 z?4E!BQUCYnzdtb3e>3+(ctb?aRY^9_X;U|+ZB8N$%UdNf?3fv@d3v+bp)0QK3<^Cw z_AtHSZ8}k3@a_kU;Zt7I2XSL>PsBg?F3NPt*+1Vix}oAy_ScMhkV!{1n9oi9rM=WX&irQx=0V*xW`AZ9vmh3a$^l!R#l~-> zG^lJi>9nuJlYI|{OE1)7JL1X7V?#^LHM>Vo9+3Oe<$o#H({v6wZQfgRUQPra1y_7S zj8i76{uc#61fkDq?hKGAl9NY*GfR_$OcTcy&3FsW>U!JV_YF{e8Vc{oaBq!0Z?jQ| z3T9+kT$@cz@_%#i1ymNs?8_xn_U`+dOZ~DK`vd5WHtm5yN6~w)S6&6~v7M%*;4ARK^F$t^VA1xcgMk_A z{*pZzHlN51k#5wNvr5~G`JS#ivp(Z$-NUNy?|xm!0e>fRDQf|g8c@=TqIiL|o@AFg zX`PP>lpM4$lWfoR2$gA&xMG9{}LjfBNC57O#+f4a=<6=5ERA zDGnD$IeafC>E+Gfj{{c3TDvd&6bDKoHN|$tlBwy3UdxBoIw-;y%{P2MezwbFC#6vI zt2-N_dBnA?DTB$1Nl=>n@4IbeGDNVMjM?BjBY*J+o(Yyf+Giw)#Xa8f0U(9U5jD5K zpboHEe$JuPjm5=B`64+a_u)Lk27#}@ZzXK6B+^%#e$mOg$NBcjw=7s-RagTd@`NgU zb>X)^>R%7We#EWLTlxMjC=V7#tAAT&e$kx(FB5sVtny`@%~g6yYZYp)RfP3&j-wOK zx_`WNv;5z54iN=?W$`N0$r1Gti2-~<+qvbl1o~Wx_sEgI;)XbH1{b}3xYQW&w`UJI zBvQZ2dRf!!NsQLXZtUi*uyw@cp&k2o!X1oBYxzF#$Tj-e|>*Yhtk`ft>{A zL&jdR(4lcw!7O%V&Xvg{-d}FzLnMJ=I)4-a@9)Pxq3(`x=mDt_JKd4uk^1eE@U+WI z*Knd&jTi{HHy1?(@h*x#EP2Nn9^|5E*RVyNi{3zVz+c^4T@zItZFuoUpizlGXYY5R z@n>w)$w|!1$4|@8qo}g9iP=VWI}#Y_@F%B~kP}e8(jm4Kd{QmNdEvZ~y*x_BP=7e- zrZcAM-3XV~HSWi*FXXNhx-1aS8|4F=dsw89g3jzh?SaitLuJslrWe zj?^l7{0dUY81WK|9}3f5Y5#lt_)Q|Y_i%j8Njej=rQj2TdNm7Balad%{(S25nx)bH zU^#bh1oJ5Nk7tpPhg8XMK9hgz@_)mCAD`#$=hJMT+z-37>)*lq=6-;HK>pI#%JGuQ zs1@2H?1@4-g5T8S^Rt5pT~x7BO-TU0IoSpMH1Y1TVhsmwu_XwmmD4;4r-O=ijjfF~ zUBzS!bvuR0=3rbv)7V2GuU*LTG&3cQ(_QPkFZpz8Cm4!~6f2NeF5$0ouz%iojPd;w zdZI!!NWT{G1SRj$V%=Ln`J4;Bh{Yp9`LZ1MRJE|(YyD$IxLrK{Vvx|`>Q2;{im?HD zwZU~C#kOUC-{raq0BZZnS^4@ElBv?$4kxy7jEJ%2+O>8Rtu#hCd7fI-&}N}~pQQ)^uGT&^m6#2B!87yMNk zag60(Sh{}EYdE-)>u<%N@&$7_-DdTK&2I8h_nX$SN`BL%dRhV2u&ZMtZLU= zipK(lS^{-gGE?qK`C3us9jE?PK#0{h$keEnlce+hv2V|1{A%)RI)7P`T71S2ms>1X z8*}jlgJieF;rG>g+d1haTVYs!7d?=zZ~;hJq$fiHy-?`$?CH<_2@gkyap2E`xj7qC z*?-k4f1E<+5}WQ{W;Oww_;B!^d-u<}@KSctX)zCMc(yU-m1wUCPe`G_VuH?j2Z6Ws`&e$fUXE5 zjWzV4?R{A-n!-9KpC=Nb-$gk6Zt?Em7*Q?&1^+$l5z7;qSi#lxGs={US$bG1C$Kl< z8!c;xvvWk(=H7kT4Bs%ZDjXG7s~;M-4#t)xI7g?I^NvAqZ-1O`Zxrf18^sF(+qBxM z*bgR$T7x_H*B~*Z@z>7!pjJN+*XDj>tJFE5MX+&|d+q!F$>LV0QSSXA>HZrhAJ%O? zd;jdx@KF@zuAVC2sl%c@F&*C?b)0INFXBqC$K}TR@cp0F?F-tpz$8Tr-E*e$W@TS> z+)K}qhfb;3e}5K2?~AtR?XSbW<@1v3AK}Ta@48=2mFQbbalIqRpEt&PpQ_Az)cMNd z^6#fy06>$eQg0wiE5mi+=D%NwL!O^rxd;E@xL&GxbIPZ6+@&hu#OmXC)=X>uJwC7b z6J6CTJ?BzV9C)}lvQq^NX}&^KS}`~end%dW`hleGJAePUyCX6}g2*sL%L_jyhB@%| z(r|yzHtT5mpMHuT_3%7y4nHq-$Xs}3M|dyEo(wm!+U`!OGD!_~EgnVQd~m7@XR4-v z`xUk-o!iapcSl=)%?y^wjG)XsWYj^ExFGk`s8!-U`%^}nJDnWIunsO~i@zBD3EMkS zM*eX3aDUo%6EIU@PsRFlcD%RUs1i5^SoTWq`&8}INuNt*-u15*ln$BPDZ4A)4$Zw$ zx>Kz0uHGrij__m54;}@r^AooT_cUrkXv@jrK>@thip;By810-j7^|Mpr;3LppK=*$ zN!;g)u6gaBC+h4NFEn*OxND%-XNej8li`h8$$v+!YDxG$!T5L%Z29J9)3sr5O}Of3 zPNClLE^Bpl9**_Sjic~t)?Wx$hT5fGZb|z3<&&lS@j876?!-BtJ+Z=EQ2WYY->U-( ztc}yIdX(wyj&TcZh(7gS`PedNz!eSe8tXVV8Sb;hBquLGmdK3N<4TRORcx35{X zHiF`KqEY@#7YI>NjtfKxA2CZIUsM{=cVq-iKluT(z*d^6wMU!TRBP&fqPPEDoePQE zlsVdEq3enar{T+G6r(Z^GHWw5-r3kUY3wh#jkVHxAo-G9HU0o+gD?1Tb&5cdUw_By z@i@xr>7lE}rG0$2`|D80))D^;aNT6%WZN<2KGtuC5_f{E3kZCBV4soZ8+huff(w48 z(z5hVFPNZYFMJ%{m^c?+Z_AQ>5F?1UlU^p6?jHH!_%0#Z8QyI{@z-Re+KS?-jZgW9 ze0c^mu+8H5hM*5sm*j0dpQ$v_{D1VS*Lr?7$1?;vz;-B2@Ii2=^>&L;LEh~E!+GX% z9=^Ms)bh`RB~G@_Sa1_*?gh;}tx*t4xN#H~Wc&-2r0eSkE-+`l=exO*2bw2eJ2a>c zU`ZT+#5|x7QaX5e!2AzZpW9>J=df3nh(!hI{f<4qV{gm(NwfCC%!}VVq<ybbR`2Sxm#HvsxK>2m%Qx0NX^w`5P2{Gk+GmapI0qz8QCZ1%dw73Olp8Qz6!Jrx;v<=_(gXIbHwm zOUr?(^|RDUS4*ka*HuGIRIV{0#{gITS~10RFGfj_TwN@ZU=PBGbK_#J3Vs|h3gHhs z>cZPyM$F^z^MS#rydt^4U!zw9Rn1U5B`dt!t9+2TqZZyFO;GINkAIigF3sV%exl-` zYV`{yMKxN&G)4ijy#0bMgqC~xes-i(Tx$2gOWshIldAuU!A|Cr4S;L(K~rFMLM8#^ zcCy#+avz-a#@)r0e&4X z$BZrGchOGy+fWl0M1Od%jhk`#zM~YDNAEB^N-~&VPZC!g@KnH`JvVfuIa&y#&(A)m^=pH$JhSAdxyj)OtKl(S6JkOgT^G+zY622U1}1Rq&Yrg zn68XVu+GDLx+o$Tw1r^dJuxiritkS~8RRDim-eW}b47eo;rn+*+a+HRu;eWA!+?6ET>FFOkfGe;6?!yv=7s%hZiwdSZ`Kty+{J1}_WKU+)TH$W?aD zL^JW@5Oyoex>V1Gfw0uG?7l5HJaJdPtL7WWhI@`6jel%InsFxlIELc^)P>F^3?scL zZbk8~wBzTax<>5BVH9%Q>0Q*ua}2jv>kr@F>(wK?6BTSx(90+ou5qA&I7i(f8w*;& zr945iE&D-c*@{*)y1(B=k$J%P_MvL<+&@v4ZHbGnqu$RHMOEDahm&mE8f(xbL8pF* zyX}SF@PF@SBa-a!_;til5^*@LHFNF5AaMIdQ5d()+j|%7I}PvvKS030_rO%VCt-ZP z*yC7q9|B2V%XTIL{}%+wFi40TR_<4;gY!)B&-we&n|5p(#~mKjZogX>w$bqG2)T8$ zf?TpxrM~`+>E&hR6cRnLWUR*J$dZI~bx#ErLx>x{&xU_rquBoDq{jnh8_%l*8nb-J!Xe!KL|((L|2+`Ba=u5DYof8|6yRfQ`8uyGon6ZJ53LjnXy zLfqc4FcNt37PD5B=s% zjBxA0*^KY*hOJv$1trCQt2;Z6zQak~|LV<$NU(n=^fEUQaQNCMyDAaGCZu@o<06RO9{MG8?&d?IQFm4Ga@%u1@1A%2EZzF>wqE>6 zP$V}qhmlZ@!F-t8-p6uN>-_rq9*<`G)iOR(eV)F?kMqs&u!7X|RzJ|O(IRsLhc>Oc zH~)X-J&>two)=-g@$x+Gww(EDovy=%bH7^jUzk$;GUuGOm^0NYw=bU-0QJ-6ZGT*^ zxA|=YL8xCQjrO~*`9o2QkoFult#FbyU+YtgxyK79ag3*W$S@7Y;v9x0ZyDo#~U-rkF?o&=D zX)(Tw$#xZetSHP)Hm`HF+7;=5gmLj6_U-qNc*gLW&v3p-f}P9QOj)mC3eU!%9~$QU z?t@F(2+H>&ojy#=uUt{^!@n_P;L|pNs!yx{Ec`F~= z&+?-KUf+G6EZXC2-VUchL;g*V6aUwL$##su>9keC01We>zi5WLr)k&(UTA;b@U9;j zuUBE6xM4F(3=(8U9BM)vkah82TT&kkrv}v^V_T%%HlN>?RZu+^EL8{jE7ia;FIEO0!7rJC5TU`s)U)kN zaPU)aTzlRh)D!QQ8hn0Rl|_FKlh#shK+F{~t2*^ef4yg{_3Q}DNi(6n z&L&P6od4bETl0Ze=Ce+e8Y;6RmD;;unFGt#k)yOtujr0Z&C1AvZwR0~Q~AdC>h42j zC=heXyNrBtbsdy|FLfT&G%!C?HHS^8D2>0hQyzKaeb@)@rEev-Q0IRjqwu>oalOCs z4^AncFKHVXI&VFz_m49SfZz7y^`qd{KYCLKK^ea98hLKpMQH$%gm#&4yl|I0gl~>f zA0C2WWZAZ@LVMH(+(m3-Y8yrD1}y{0%eKX|GE40@ZH6Cr*6MROg1Hs6Vk0e0?o-bz zcXRJBOM9dGZ8xkB<#~Sw!`6)|a-PQ+DW}f!j2GjP=j!J<26z5Zo=4U=iB%T6KmXQi zKBf!oyw)3|NaD-09DLVElk+o1{Smw@dA`%Skn7y>S=gHd7n7jf=c95KY`h|I9n0ry z>7aGky)iP`x6@9j=q~C8LEB7c{aC>fDt7JZwiP?m5V(6ctX_XR>l$f;8OwjXHZ!Su zkBuukzF^8U}ngFM{;DC#9wQn-miLJ3i9)$Kt+t6 z_1Uil58CV(@53D-T#(rrP8i=JnEA5H zno}dK-}Hh2R~mOzBYk(QX2Gm#vzIm^Fm+1>!IJP{!GP+34GA$cT zgY|5s3fpl}!43isI=cVmA9k#tXAX+oQc>~fKkO$ShQdz+Lxt2qQqORl;JtgiPn-ff zPVQ)p_8)`LcDTzcKI=*a9osD2U#i$gZW~K_yBnM9)|b86eFcWc>yovK%Uky5p=%~c zT-X#Z>3o0jf?hN+#QfUI+SCrp_taNYQS0a^XGhGU`hj{w7Yd2VO}-^jJah~D5BtG= zt?n_Sa3D@@62{r{?=-=Fo%bTSwOMCR|2q-62iE?y-iG{~fAnQ9M}gR^^ZRMLz#Vz* zWc~~PmAxG^HA?p9$X()fj7Ax?`kh03>mK@OfhB*w5hbgzF>4Rf<#G^sj$$$!nPCUo z+9UZiCL!&q)J+z2to4r#D@gse%8S?u6QuQvKIn0582+gDXmtBU(e{yDKfnKX2&`3l zv6;rb4pTn$>x!=7uRmvlQ99}0mBU2krhBCKA+U#0>QQ@UAA=^kNqfgo1)`Od+2Zx{ zyODp>X5<_FFo>PpxDB!(vJa2h?7QiYZd)C-Ngl?H{C;4}?o?2ZdG7nb_GkT4nY?bT z-Akd~Yd1TyA@<+isi`t>Qtx+*@#AP@6^`5=yRsuEJ9@UYvW5&R+VBVAZtfR%h&A0L zNA3pSZ*sr6D{`m!!_RWhVpvNv_Jey{+_r>34UjPAMitq#`HGy{KNkM3?v>#@&_Ow+yHBhui7?kFZo!B0>3s-e2K%E%d~ z=i^^&Sjl4V^%~ZzyHVLK{7H4k@DYE@^xChX`+{Gy%KhK_wXFF)}-W*|1bZxxWHV&5nybvL?)9S^x%6|M^2`Q20;zwjSfH?Qrp2NKxSkYTO_ zK^DD!%dYLyW~`q(5Q5f|FVB4=&rQ3uJoo5qi~48(anycc+7Tac^d=5D3-|8!WP4og zQ?#wqHf!qendRJfj>L`Z&0K%A#%9^CXMx{!(q_+aZG$zAt)Llm=b5PAw9KN3*i<&} z!BDr-y`2QMYZUg#N?W7zYmBr#cZ$}4*~xc9Vl3OhwxlyNF4_~b^g^oxdJPK2*LqH- zJaj6jX3!kjMAB-D$*b>scBuSqroNoQwlo6=dyX1dr1C^L0sBzejDml_pK{*3QGBwo zcIIS(>xDK0zLU@!r2`f14KvOAe8zXHsNFQuWMR}Gnuj}g`!?%aHF6Rh7Tur&o|Q_f ztw!I(NqtW}2gH4>Hkd%>{x97#><<;X|s(=n-BOF3!Z>s9-D3a zYv0Ot%(LR5LO+w!}PLlVoVBdO3ecwr>=$f`VPwBmqzl zq(rHD#f}?CMH?=xL0{tdD3}mCRdM{;zni9XF0oDJ8!LOw{)!*P4sVEo zUQ}}y1LT+fmQIMe#{!L*96)cb0O!M|IoIShQ>@i%=Qz!S@aV*InlxIny7JKt0g-y$P zv2n6qnf4drbM)ur+;Nwxf6HgaS7nHdm$L_AYGX$q&&II*_GTBF23ZzlW31-Naw8yaGQ`QPc8@U0)SXH;vUpY#rnbaJqlQJm4nyJY4FG++uTYjBN1+>u{dHJbb0+8=+nH{J{Dh3WTg?qa5r*1CP%_W4ZN zFxZ8IMm;7^C0})xgJU>pn|`y0e3^F$o}2?QA3`@?9K_QDFDn~U`N@Bm{mhHVXA7S>+HbB~`cLik939-iHA~hv z#kVk;Du_cj2vP?+OUo_u?qF1moE&ZNM!nWE{6XhnO}sFlVpr4drr4tVp0iH?h%@`W zgzx%%g)Yr^pSmeQB#`un8w{J zY_A*7zKDZcrthacQFs_bV*xg&X)u&y1&jP;`CMKDG;n&Z`b|Grtwg za@&%%*``O72P5*c)r8Dvq z)o8O`$-oWrjvb~MvFUr(OjCM~+t}~8E$63DmNLIj_G_m)sk#@xZGJ4fjKBP77l|d$ z)j7v+vaeS$^2G+}^Ix6w%1#TGBzeBR1E<_k$Gf*&o>N}KdzzewgWw8@e!maPZrH|r z+^>fu8D0u9L zV0K2+wY!oZrc8Jj6g5^?{`9CB5eoG*qp|t0`U!1u2Z$Xs)J{3#kHff+y*=UD33-eBzBub- ztk=PQR;A#oHW)$EWW3d&w(s3AJSb4F1&{7qR_)fZm=b?ZUJDkv_f#-BTKsO}MVx7! zob1o%)p$;+Y!8u7299fFS*y^y?Qry=)CgSTp_Hu@Q8Ep}mOQz*)PqMleeShclJMc?`J8dx=*`b2uM^k$rS& zRE!<>dyl%S)y5Z6TL+8g@OLxf}{#A8J( zU`!ZU;i{wRpMc2ZzILx0u5KoQx8g1h%_wbKS>=Dg38aj~?F<>x4F)}zoiV&>8-B(s(btg(w^k-Zq*>7iJv!*z>*}HH`_wl?V5WskV!S&KJye z(fv@Sv^Pl|?)@&Z?RvLto^e;sr^Z!JwTVk!_2XB6^4{Bi!Q5dQ4e&W@Y^&II)<7r* z`kdYNGhe>{>S5<9to?+U@BFUKc{_h3dB4B*emMoStrXd}piWNTSck#gq_MW&Z1A&{ z6~C)6f9;sbLZ###jXqd5_HWHt%}wz=pKx|kug!$4cYD1*s+Xq3d|%`v9nR;mT^7f2 zs4RxowXkhd)B5D+J)?(fH&fT{)roO%e?}2Pk|3_54rI~U`s=t2%%a^{7dn66g@Pev zT)|Un%@)!n~S zZ{*+*Dcva8wqMOT;fP^F`okufU`jJH&u^`;`NZ?0k}Gz_BX`^m2H32H4OJq0gaf0Td^gN*UY}u@hwZJ~ z4yFFb5~NBh?PjSsN5)QyE##ehZF^7g?yKA#UHj61bbU)=)J=cni;XwDwfEwsvo-iJ zKEMu6XXE_boWX@@X8xO(ynJ+x`8o?|TuXlqp446E5LM>_-!k>WCN+OvFqHy(pYFVI z9rA|V+z-scd`fpv{F~U2c>MO#jC=I{??U1WzDBGslE8c~v1?zOoBP!7aE4-+2x&g8 zzfhBwDm}QMGB>KbBb>#r_NmEe{9A18rXMjGs+7D#>UFxlcg{z~Z`#(3VvWV3h z8LTWwth#;6-eNDBN0=hNg3tz&WdM<~U)A^RofqXJlWFPTIh^^hzU0M>#e2q|vu4Dp zujnn1*LR~`%-+Y{b?zBKyPZ|%-Y^dJug<8LOHE4E9v-wEVqSlO#xsdGlp-X63ki%( zUY|G>epA^IFQmqat?WnCC}e%k*b_E_77*$rL7jPspDg3uUzL9E@7BRRcwg;s zAAsLG#0O?NZIOQ~2F#k}{u}X{AuGVu;x1UL%ErpsIopAEg|;RBUgO8ztf=gOKF9J$ z`+@42vv_>LCVsF`IWwo+u*&JH*eID^VkIy|Z0&F7OP`_3UD0Q6!y00~)*d)j*TkEu z6bxq#MuCu_p}XLOP>O zI&yDmov43$Plrb)n){6IR^)4-_HuWkCOs<47pc`VDM-*A_dl>JU$6Fkzq|Ij{wC;A z|N8nlPrX>Z?<<_>Kzw<1p72ZD8_tKs?1jEB)m^iO+v9dm5)CA@fp45a$j|g zT+Y`S8@qq)HVV`xw7)ZiiR%dP>)-qep4meUW=1%tPRWsy_VS_$ynuYB; z(e=+#%RBp@(>-~9{is)G&bPGB`M$%?U;Uf>7;NFUpON!L{e$it-+V6=P>J}NGY+lv zGgYzNj7V1F0CEP7%vE-XDC$&qE@LBN$n<}Wp?c0bp&k)qPh-D*U-w`$*I}=xDx6txX!yL#Xi#evA6V@pu`Oqx`yl(I{UU)H#e6F+D3 zbboaWAe}2V! z;LrIRk1x3*HEL`bQ{?$ekxsyjh;jCN4Y3FOOK}6d$W~h;$*D%P9MPb%H5Py|AlJ?IwuP1-VjAnmY z1Op~seNG!qxFNsahql~@d?xpS{2F_U|N1T7&}U1|OjTPV#yjd_1|n3L66 zggIl%*Cf;Wb-}ZaD^K?bzJYYf%ROSpxkvc(t9%SQF8P`G0v#_*ZNbj3_aFbNs2l4# z{b8m$r9IMX7uX$ZGT%|(gnKD9_}xvr)w0z4=Xf0UnRb{)vs*g7fa|ym;x~ELr<(-d zFKZxY(O_7uS_fhS4JPP6szra0Yx{6$+Tc>N+9$ODsrBZCSNlHRFmIS=6nta@zDZph ze1vlhYO`*DABNmR;U=By7C-TTjtkFt0Jiw2ZuR5;D;!j6TJX7}ZaZ7Pzl1($T5A&B zs*XKrEF306TSc$djb^*fXzZBj_|+e^CPp6AgESti@nqUiLAaaRb0&Y{zIP0DV+M|A zM@+yqJX%n_sQvQUbFJ4=jD56nWA=8;lNY&*r#v8${W1}hCVBoG!Q=QN4>)5H74BW- z0e^3=kS7s)c<`ue2%ksC(Urd-CrDF?iQ5l_TK1Pk9UF$`K{?+}Q{NjLuKn`Pm>9de zp|RrvdvF`*_^#L6!7+d7|Kx`A1tH7C3DWx*sarM8Xky)8om-C%%Dv7HHZ%Aed8gAm z3IpGldU{Q<&*Xm%<;>*Jc0U{CG>#MUEMIWO3*L3Yvrf6+ITt$R2p1eje_v`G=eSUE zN6Bx`TvM>2%U+*z#dE#q2seolVW{TGzpHbheS?}s-2&_`DF1&v50c-`-Ulg){2{p3 z#=|z}Fy8G(x0r#ANCp484pt7_|aBqry+r^fj?qyX&ry=#XcsF?*o2s+NeZrL!$Ix z*6D^j8go5)o%pI?^$Ik3I_nGdHbn*f1->0qz}|Y1u@B9QjRr>t2eFUF$UxnkIiBKg zq4fDX%wUTonpofKVHkSg(%lZd6qM%Q&AVbu3<$-Hwz5L!(dXR9xZ5ypOZY%uWFtox zcGoG^`&56|TLGB_2GsV&F=B3+#D5ykyqrs&2Vd7QWe+OzK@l!#1M4GlN%7TElp28=-$`qp{t#*GscoPy9|`$Me{4P2o^` z*d@0>=!Bfx_^R{X&P|tr6#dA%hfNaOg*REm%P0w2<^b-75w?WG(q2vz;sfpXG(Z0% zppI|V#qVX@Ejho!S&$rBp8Je5oYn;!z&gMvKgajN5C6#vN?W*T)$6MIC@}=DQE=s7 ze8Ycc@B{JR9N#$}82hOSeqtJS-p`tSXT?;fp#DtVB_sr}%rbfE71G}U$Sw8Kr#-PQ z@+~Qz4b2xGD09QOCwx`){qRxyLPem;9pq0>&bPxEWbKJlKR>`dsqdZgh0j3Zxd+X6 z!_WR6&v`nRsqX3-H@R?xz*PRo`@mX>?c{&{o$+h~D9%Hy!_k^hM@O)#|KzoDdH z2YJ7arnnvF@bQGV=rEbeZY73b(7(lcwz;PW8r$10&lbmOdh8WHF+eMMra_%^!zm71 zzg8#;RC(>N_ABTu#7<$i$ZM%N{GQid@mK{1^p5c9r3P}&iOzW)9FnSjpzpw)*Ry}u z@zMev%f0$Rh z1dCzcy7bfodVxs+e(29H{EcC$?X2eM@KruPdExcv=A3!L!PWH$wXW*-8skg_6V>PT z`dQDIr{Ep3pMS0eJa(Tx16>0_Gr@nK*)JKkan|>Z8F^`cJ%h_&hxo6z!f^8DJ$$g( z<{lp4N5f%!VPD_J)7j80>R&Vi_|fqZF3k*WrNXmb-tHN6(GTyRu_Nl<6kJ@+Y&nDP zyt+AlKd{zd0Vx`Nu~q+sHzJt3dc`1c4thLmF9p9(M;{Li2UU5_@2qkb`@4Vns-8T4 zK1j~_G4((t^&#@()$jTIHBSc{sbsD8)Tgia9S(%%CG-8iu2Z%4*hXES_`^TxpAoxT zupeUk^qT7&f!L@~pQQkO<2*GwHwRUEx71`i*6m}9_{XuMow)>UUa;m?2^Sh(0?QY4%Fp7$L%aE0d7b>MqX)S9&oR8QyMDfOZ%<41CCe_$P26}A%W9p+Mhe(<%EZ5b*Q zlT!~LUF1u{1oK`u?H+a2mzRuQm=o3#2oF#gUlMmW<$0gD)8a3L!+xqU&y)eOaLyx7 zJa`k_byL;9W2tK)QgnaGDK;VeQwq9<`u^oBq~|BW7DtXhW9X_43czPJnM@E^aoNMk z$M}KH(I7U2)2(W^UxF=|qx8OA&pReHKSZLatH3o1NGqed;L(N=&v6{2*KE{Mc;3U7 z@KWIsEHqDy`!KafcjbR4Qol#r77SAJ1q7>*_2it` zavnlwhul}tL&Tw&b=81kKZ20d!GWTsZ;bP{INCg)uJcP?Dt_qHb0FLmV)@tpad9&! z*3gg>##C_Diu!+(@C>Y=-tDN);~>ad*h|k!y`r~9dxcsXc1dvDN}+IAI>_)gXTMct)}mCn4;$`)MWGuB?8`HEZF z(7yM~v%K)G{0e*0eMZ`T)9ZD{_0RmIMX#4NJu7ZDgBO4JKT=-{;9f&Nk$Q$XQdwI* zQ|Z}$Y}c6y>UHS%3EOb6(%qpG`;#rPhuY;Y=20OulvAIvOKi!&&X71pebhs6T1F}n zpd#Cw?2NM5(i2E+BeuIyk=v#J&3g{rVm9$Im37i|=!T0md?%!{gSm&C5;a@NW8+#a zHcj{+%FKV&Fx7c1aZ1J8&^SEx%gR2VxOr7BtNDl0cS26J0TtJ^JnT)Kd_a6P@y^L^ zRC%CYHz#GIzz?`jG~If`utEMa?T<&J1dN?^q}~pWLyb5=LD#-(t7W^J&K#E++mxZU zBtkR?#rTo0>5KBjW)7amOv!be??Iva9#|{xMYMk-6rc}-I+;^@cvZ)p*9wxt3W8g+ zoNWSow-p*X=V;CIGa8<2XnAt}_paEJLY?#2zt$cqU&Xpy^*d=UuC_O)`lfu(IrfaE z?0=_?Uvi_?s>_^x`>-Px!Kb9Nxxd{XdJ}9moT)`zH*B@QNBf4KHR)|Yl?-3&nU7!b zy7hl(X7N1Vk|sQik>nWA z5-J`A_8)VEFTHRmDt@}ylG7O%e-Ql6AFFCF-FBK^n`^EVa z`}&*zKK0Etv@ai3`J&v*6X&fur_yh#>q5jz+J~Gt2Nm`g+Aoc@m8Rs)J2;I?VmU8} zH=H;AKg|taB($8N`m6(&MKPxh+?juR+MYfw|I13d>s@qj2F30!RMOkETRUZufYnaO zGZ4| zE-#B9oGNl6BP1!tCVJuidj`^Amb;EUdPGD9Hhk(0R{1?+_?pDhE$V^HAc%@5CoaDI z!=b>|0`@aWs3=YPNF8;PPw*qlprA*PI01fEgg1>kqnvoVJ?|N<1x;1Y@EoN^O%6s- z&vvLoUUZ|SgOQAg5roq~`sshHOap%U#(!{2RHakIz=D=%WDK=lCwt^+UfsiN!8A+; z>Fk5huphm!2x{+eniQiev$$t6D$Xb%f0$_E1@-l%-jFq=kGk^ja7KRofqm)mW-p}Y z<5FwXwHfJO(QB;zBDDkYli(OeJa#sF^9uKr`zduWY7^AKtU%Yp&VGNSjmkYX%>qaEx8nQPlx>-c;C-yrF1P;@Kz+qmxCAsgfoC^08J;mI zJucY(9c%((1o#3I_}p@zTLlBEQ+inj&0cK>?>ne+2k!20o9?8~Q)V;u{hf)B zP+$BVb`v5vCP(6fac%37hjnD`2L-7#W8=Zx11OJmvHE;hS$Uo zJomEJ%te9JjMyK(*Itt+{@pWtKP0`P^)-xIZ%)~yXF7*}=?|lzFkaOxCfBU?A&~eJ z)r*4o#c7HCe69GOx~2%9{#0*deSd0=oHzO21#i%?;0Z^%}?~(%z3h%0RF3-+d zShJSe#4UPl>e7ERE4gXkm{q(ha?Gye-`8k|FOJ6!cKQ^Y^y7X>T{$uEC?;)c^2T%TRJx3^M@tLoWj zP6FiNIbKeBFXUbtokX7VbaqeiosI*X+8#dFf9dlHROlbo(@w244)fdR8`IMz{=DiJ zIM*F?%<`wrforR4?x(sEJ-OH>B*moX9vO;$#|WF2@7lCCtMgh1YPWWA*h}80p(6jbty&rN44$z%L$>OA z(0fxi^1z?S1+#QW-yRs^?3A-NJu@se-7;8(CACQEQk9*#_z&YtbT|a>39kKzFotR-4hkM&1DsUiGi3iADKc>IQ#mDqL|ZsT2NQ1LHhvTjjbTiZ$D5 zV9y*2y)>t+KRfl$XE5leHay9J+M`2Ybe>ZN^KQRTXzZx)#gE&^$EEZo+jlv*QsKKG zU%h{AYd2h=Xb~ol5^p=At3sUqCH>jxevnsN^pxtn;&T6FPp>r#&Y!u8MgQDm(J{c6 zz=atVCcP?`eKR7D-0{Kzmvb;dphVk}VO*b>`TSX>Z~bA|OhXp*-|d?lCv|$6-aYb? zF2x!4anVkby-M$YjY~fW#?2|DV0S!Du55o$LVjF)_Cw~SY<8s^oNQ3pFdpjZc7T@Z zLVkzjTS(%OZ^^olZ|VM&ThZ#)d5G@Ollb(CmtGPFz+ER_{u{f*^M+D$gX?qN-wW=a z7|T)FA2?{<*s}(GWzR<4e*+^VaElGbs>1tE`{j>~wOoddS;}jnjnkyI8KTYLcq4zi z1t8p9zZ44s^M1cbUpuvjHN6g(=U3ak0f>if>R5g}Hy7duLLe-`aV6+M^o4uPU*Vzr z78gnn!}-jf@)E(e&wXsc)g6|#_-h;_F$X?X{HoxU)p-Cj7;=wCz4a_{%;dS7)rlRv zG0#i9Kn$$XZntDI(RGeUF2yHsS|~AtaCa$vLY$M(*yZu3hDzXN?2S zO2vab)dcAydmUH~7;3)nZSwIDG-wDG|mQJR`Z7!{jP^%m+*hK*Z!WG!;TteqkEiVE**MLVg~$lX4XUcmAm!KV#JFn zdvIvRUCVp$3rcuVY`I#rNY~rIMZ_OTzk~3neml2XKj?JNDw`CY@ejfC_>6yICMPYZ zYVXecz2J#3N9>e=4a;4_|HbD%$2wyux#zz}{QbQ{?-;h4m{9x>x~_k4E;kPDmY{&rgY7YP%C`L@=ZXG&9hv zXL5{Zt?Bzb`YZS+fOvm=hSa0>5!>|i(A76YSbgY9kTnHb+ar*Kmd#I5Qe$w%#-e7$IU*;k6KNR6bp)F;7K z+r+xT%FoIdJKTZMoOppJJq>&ikYP(rbv#Teq;)c`_~RNEIpJHEXRY@6{?@z1+X8KAck-?T^D?6n3Lij_smUxn@kO7008HWAghnj zV8Jc2rfGlC^bcQmU*ypG*;kmoY3E>uYE;#3TyskV0N-M;8-BxnOlpOxtTTtP~ z=YA!y)(VTf@Ylp={p2siGWcnJ7yi?K3gG7nkAes32K1w)`!^vTXgi;ZAW&}79{`t! zymx<-Zdp5MA#Yz#)boshK4|IpxuH(B@o$YLV{)9F9szsR3uX&rU0+$o+wD3?QYR9f zCHg_o8d0UpBENN0^sl`G5aO=uS|QvF;WtRVvDRX6gswO0xP_EMpJ&k!Lg!8QTD%xL z-fzcRuI{bWeNoS%9aP+EeWsP@=y>w;=oo)smrwpv*9%0Cp_~a{Th9MPyTG)9( z3+WD@D0y)=Jlg!FN6#>rf5D`(0AVC0>sCBl;b&u=vTh@4((Fr@zUuWX8@X!VAufNW z9V2afX0G`;abZnC_zdb{;Col~%__c?yL^YOxYqX3H81MR#BYlMc6Ho~(BLT1Q%sw& z7os&5CLaV!8}g3$bg%R*uXvW6OR`5ek-E=S>mz@j`d_t<7xAdXq1YeMe`ufk9DjI4@*Y)BxV|4! z-}~uLu5=fx{Ej^ryGPBT;;2cj{iPNZ+U^(gFtckDxpI-1vEHgkVO&ztmR zo9%K;oj;+z8GqB`56KTKaoibNuj$sT7p+C)&ph%I<93YD*mI-OJn_+9?2qgyWrhvn zJaB=GY)=Wq7X1Tq1A3^eEjFnYSy2+XZzpS7}0ZeYRAA> zLX($zGZC=&Ii3gbqt1G+62pI=xtC{cMb7>o+Lb@Ord8eJr$_p%ck-j}Q^i0(H7to^ zvP++H#iKd(#e~$LokK?KQJ>hQ4LLm6^wWI@+_hk&_Jb2NzdJ?gI_Y>CMDfZ!ql*qFyz&uXFT)OtrNQFIDS@Ayf>;_kU!?nYeL2qR?rsZW31oe_kFKkXgj z%w0c1`kZ~TiBaIx18cL}+GRSksgrY-Ei<qgJj!KU7)U;8yQqPfcbLmcg~f1KuV z`l0Or_hMRj?`j?hj@=X7lDvBghy8>{T-J=UtLsYK2hFcM=VUL_gFQz_D5J?CEwVkm z?M!q!pfgHsSiJ-jeD;5Yf4A+(i-|FaKa&}(ABnG<3i~}P9KcA;SEOch(i=DkAF<+O zcJP;?Yd{UhL%*O)N_Arvj>X-#%x}AIO_+wqb3m1URucq`3l%EvJkLSwzdJq0`M#a& zU{!vIues<1{pdI@V#pNv$dF!=HKyXJ!2r?gg=H{SNIx zv_D`amCZN7WSw%4?v=!p^!G{H^y!&!-JdmnSLpaB5*>%Cz9u;=XEx%Q6*^8RcBx~(NENT zpW3lyhMJ!MujhZm9}$nA_&(?pZf5-X@y}8h)4F)of5iq0KjYMs{M?=tb_gE#>Fnsq zF9J+Hzq3ALFg<+KvY^wMdOg+UsfV@`8ZdCL%v!Gn&!TVW|I*T>f?_nuXYhQHh#Iy& z)$|E|pwxpddW}=bYtQ(m_z1WbQd2l_v1IRt!!f}MHYR`Ucr+sNSFe%G&2>Gr>Paaa z8fRT3_L}lcM6?s89hm3TbIOplwGjgJ`w_;lzaU{edQsF}5R+3rS>N8Vi6i*~l7 zFth|*NJo?GZ9#d67jjkRuY3bLatpf-Sxy`zf1jiT64JL(bJVA8-lueiSxtr9 zVfTNtzBL&u%Y-)R+pu*FUB__e82`G~eu@L|OT^5l+_}<0yRcCqK) z(Nh@}tM+<(GgzNA#_7Qbt!(6gA9;Vp>gI0vAg$*jl7oKOwDuTu??jn{{*`B8LZTn` zIUm18R%(<6ixBnu>@uN|&a_l`rSnB&xgMO)EbaOwhuK*HgMqz(jO3?|Q zG3zl{w4G07lyB*v5r4L={o|rc(UP{Eu2JVqkmoR=Rj_Q*L>NK!$$&buM+^V_c6IYh z5i_Uz+AM_+OI1C>V^>_jd?h{RZiE;YwPAx6_ij*p!Qr9T!>5Xq{IXOAE*j8PQP1h| zeMcJsd@oIVTrbhGuKHoTA?tr=(?Y+Y>5tzvdwx&Tt@Od-&%kifo|UzVVWp$O1k=Jq zHiYwEmp*NDR^RX3G9_k4qCxX|u4VvKaV!CkXch=B>(bYC;r*yl^Tpi+A$@)@^gXNg zy&FJC9isna-YmCyr4IQeb(yFTd*`>%>4dYS?VafD2^LuO5TA8KHEx(_OjPGlRh{_y42pf!@%Ke{ z5L?H4VB@ZEZRtT9(U@{aE4|BS z4Cc&Pyy`W)^o&+IitxR4{jhr7Pd|{3eIyp+=PC^jc@5|Eg!BK{6ma6Wf!@%$zwMl_ z{l;mP&)X>kiqCuACSJ@N^9R)*zXgGPDgX;Sxc&B;)%)K|VVAM>bxpv0jgOkb$e9Uf7{ zmixwYp!KHL_f%)O*y|I2_PYP`q#bmgQJ`^bvHO*cKF1h8I$=(=#}^*D?=hm3-Wc~`+;$t*08@9V{uG;i}a+8~JFTqC)_?%#{aB*(# z%+|2CsegCFBgIl4xcRi_p_FWL^c)EOWD|C|4xV24m_H4&xOI^N*D)$h}~F-?